Deleted Added
full compact
if_iwn.c (287197) if_iwn.c (287312)
1/*-
2 * Copyright (c) 2007-2009 Damien Bergamini <damien.bergamini@free.fr>
3 * Copyright (c) 2008 Benjamin Close <benjsc@FreeBSD.org>
4 * Copyright (c) 2008 Sam Leffler, Errno Consulting
5 * Copyright (c) 2011 Intel Corporation
6 * Copyright (c) 2013 Cedric GROSS <c.gross@kreiz-it.fr>
7 * Copyright (c) 2013 Adrian Chadd <adrian@FreeBSD.org>
8 *
9 * Permission to use, copy, modify, and distribute this software for any
10 * purpose with or without fee is hereby granted, provided that the above
11 * copyright notice and this permission notice appear in all copies.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * Driver for Intel WiFi Link 4965 and 1000/5000/6000 Series 802.11 network
24 * adapters.
25 */
26
27#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2007-2009 Damien Bergamini <damien.bergamini@free.fr>
3 * Copyright (c) 2008 Benjamin Close <benjsc@FreeBSD.org>
4 * Copyright (c) 2008 Sam Leffler, Errno Consulting
5 * Copyright (c) 2011 Intel Corporation
6 * Copyright (c) 2013 Cedric GROSS <c.gross@kreiz-it.fr>
7 * Copyright (c) 2013 Adrian Chadd <adrian@FreeBSD.org>
8 *
9 * Permission to use, copy, modify, and distribute this software for any
10 * purpose with or without fee is hereby granted, provided that the above
11 * copyright notice and this permission notice appear in all copies.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * Driver for Intel WiFi Link 4965 and 1000/5000/6000 Series 802.11 network
24 * adapters.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/dev/iwn/if_iwn.c 287197 2015-08-27 08:56:39Z glebius $");
28__FBSDID("$FreeBSD: head/sys/dev/iwn/if_iwn.c 287312 2015-08-30 21:54:33Z adrian $");
29
30#include "opt_wlan.h"
31#include "opt_iwn.h"
32
33#include <sys/param.h>
34#include <sys/sockio.h>
35#include <sys/sysctl.h>
36#include <sys/mbuf.h>
37#include <sys/kernel.h>
38#include <sys/socket.h>
39#include <sys/systm.h>
40#include <sys/malloc.h>
41#include <sys/bus.h>
29
30#include "opt_wlan.h"
31#include "opt_iwn.h"
32
33#include <sys/param.h>
34#include <sys/sockio.h>
35#include <sys/sysctl.h>
36#include <sys/mbuf.h>
37#include <sys/kernel.h>
38#include <sys/socket.h>
39#include <sys/systm.h>
40#include <sys/malloc.h>
41#include <sys/bus.h>
42#include <sys/conf.h>
42#include <sys/rman.h>
43#include <sys/endian.h>
44#include <sys/firmware.h>
45#include <sys/limits.h>
46#include <sys/module.h>
43#include <sys/rman.h>
44#include <sys/endian.h>
45#include <sys/firmware.h>
46#include <sys/limits.h>
47#include <sys/module.h>
48#include <sys/priv.h>
47#include <sys/queue.h>
48#include <sys/taskqueue.h>
49
50#include <machine/bus.h>
51#include <machine/resource.h>
52#include <machine/clock.h>
53
54#include <dev/pci/pcireg.h>
55#include <dev/pci/pcivar.h>
56
57#include <net/bpf.h>
58#include <net/if.h>
59#include <net/if_var.h>
60#include <net/if_arp.h>
61#include <net/ethernet.h>
62#include <net/if_dl.h>
63#include <net/if_media.h>
64#include <net/if_types.h>
65
66#include <netinet/in.h>
67#include <netinet/in_systm.h>
68#include <netinet/in_var.h>
69#include <netinet/if_ether.h>
70#include <netinet/ip.h>
71
72#include <net80211/ieee80211_var.h>
73#include <net80211/ieee80211_radiotap.h>
74#include <net80211/ieee80211_regdomain.h>
75#include <net80211/ieee80211_ratectl.h>
76
77#include <dev/iwn/if_iwnreg.h>
78#include <dev/iwn/if_iwnvar.h>
79#include <dev/iwn/if_iwn_devid.h>
80#include <dev/iwn/if_iwn_chip_cfg.h>
81#include <dev/iwn/if_iwn_debug.h>
82#include <dev/iwn/if_iwn_ioctl.h>
83
84struct iwn_ident {
85 uint16_t vendor;
86 uint16_t device;
87 const char *name;
88};
89
90static const struct iwn_ident iwn_ident_table[] = {
91 { 0x8086, IWN_DID_6x05_1, "Intel Centrino Advanced-N 6205" },
92 { 0x8086, IWN_DID_1000_1, "Intel Centrino Wireless-N 1000" },
93 { 0x8086, IWN_DID_1000_2, "Intel Centrino Wireless-N 1000" },
94 { 0x8086, IWN_DID_6x05_2, "Intel Centrino Advanced-N 6205" },
95 { 0x8086, IWN_DID_6050_1, "Intel Centrino Advanced-N + WiMAX 6250" },
96 { 0x8086, IWN_DID_6050_2, "Intel Centrino Advanced-N + WiMAX 6250" },
97 { 0x8086, IWN_DID_x030_1, "Intel Centrino Wireless-N 1030" },
98 { 0x8086, IWN_DID_x030_2, "Intel Centrino Wireless-N 1030" },
99 { 0x8086, IWN_DID_x030_3, "Intel Centrino Advanced-N 6230" },
100 { 0x8086, IWN_DID_x030_4, "Intel Centrino Advanced-N 6230" },
101 { 0x8086, IWN_DID_6150_1, "Intel Centrino Wireless-N + WiMAX 6150" },
102 { 0x8086, IWN_DID_6150_2, "Intel Centrino Wireless-N + WiMAX 6150" },
103 { 0x8086, IWN_DID_2x00_1, "Intel(R) Centrino(R) Wireless-N 2200 BGN" },
104 { 0x8086, IWN_DID_2x00_2, "Intel(R) Centrino(R) Wireless-N 2200 BGN" },
105 /* XXX 2200D is IWN_SDID_2x00_4; there's no way to express this here! */
106 { 0x8086, IWN_DID_2x30_1, "Intel Centrino Wireless-N 2230" },
107 { 0x8086, IWN_DID_2x30_2, "Intel Centrino Wireless-N 2230" },
108 { 0x8086, IWN_DID_130_1, "Intel Centrino Wireless-N 130" },
109 { 0x8086, IWN_DID_130_2, "Intel Centrino Wireless-N 130" },
110 { 0x8086, IWN_DID_100_1, "Intel Centrino Wireless-N 100" },
111 { 0x8086, IWN_DID_100_2, "Intel Centrino Wireless-N 100" },
112 { 0x8086, IWN_DID_105_1, "Intel Centrino Wireless-N 105" },
113 { 0x8086, IWN_DID_105_2, "Intel Centrino Wireless-N 105" },
114 { 0x8086, IWN_DID_135_1, "Intel Centrino Wireless-N 135" },
115 { 0x8086, IWN_DID_135_2, "Intel Centrino Wireless-N 135" },
116 { 0x8086, IWN_DID_4965_1, "Intel Wireless WiFi Link 4965" },
117 { 0x8086, IWN_DID_6x00_1, "Intel Centrino Ultimate-N 6300" },
118 { 0x8086, IWN_DID_6x00_2, "Intel Centrino Advanced-N 6200" },
119 { 0x8086, IWN_DID_4965_2, "Intel Wireless WiFi Link 4965" },
120 { 0x8086, IWN_DID_4965_3, "Intel Wireless WiFi Link 4965" },
121 { 0x8086, IWN_DID_5x00_1, "Intel WiFi Link 5100" },
122 { 0x8086, IWN_DID_4965_4, "Intel Wireless WiFi Link 4965" },
123 { 0x8086, IWN_DID_5x00_3, "Intel Ultimate N WiFi Link 5300" },
124 { 0x8086, IWN_DID_5x00_4, "Intel Ultimate N WiFi Link 5300" },
125 { 0x8086, IWN_DID_5x00_2, "Intel WiFi Link 5100" },
126 { 0x8086, IWN_DID_6x00_3, "Intel Centrino Ultimate-N 6300" },
127 { 0x8086, IWN_DID_6x00_4, "Intel Centrino Advanced-N 6200" },
128 { 0x8086, IWN_DID_5x50_1, "Intel WiMAX/WiFi Link 5350" },
129 { 0x8086, IWN_DID_5x50_2, "Intel WiMAX/WiFi Link 5350" },
130 { 0x8086, IWN_DID_5x50_3, "Intel WiMAX/WiFi Link 5150" },
131 { 0x8086, IWN_DID_5x50_4, "Intel WiMAX/WiFi Link 5150" },
132 { 0x8086, IWN_DID_6035_1, "Intel Centrino Advanced 6235" },
133 { 0x8086, IWN_DID_6035_2, "Intel Centrino Advanced 6235" },
134 { 0, 0, NULL }
135};
136
137static int iwn_probe(device_t);
138static int iwn_attach(device_t);
139static int iwn4965_attach(struct iwn_softc *, uint16_t);
140static int iwn5000_attach(struct iwn_softc *, uint16_t);
141static int iwn_config_specific(struct iwn_softc *, uint16_t);
142static void iwn_radiotap_attach(struct iwn_softc *);
143static void iwn_sysctlattach(struct iwn_softc *);
144static struct ieee80211vap *iwn_vap_create(struct ieee80211com *,
145 const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
146 const uint8_t [IEEE80211_ADDR_LEN],
147 const uint8_t [IEEE80211_ADDR_LEN]);
148static void iwn_vap_delete(struct ieee80211vap *);
149static int iwn_detach(device_t);
150static int iwn_shutdown(device_t);
151static int iwn_suspend(device_t);
152static int iwn_resume(device_t);
153static int iwn_nic_lock(struct iwn_softc *);
154static int iwn_eeprom_lock(struct iwn_softc *);
155static int iwn_init_otprom(struct iwn_softc *);
156static int iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int);
157static void iwn_dma_map_addr(void *, bus_dma_segment_t *, int, int);
158static int iwn_dma_contig_alloc(struct iwn_softc *, struct iwn_dma_info *,
159 void **, bus_size_t, bus_size_t);
160static void iwn_dma_contig_free(struct iwn_dma_info *);
161static int iwn_alloc_sched(struct iwn_softc *);
162static void iwn_free_sched(struct iwn_softc *);
163static int iwn_alloc_kw(struct iwn_softc *);
164static void iwn_free_kw(struct iwn_softc *);
165static int iwn_alloc_ict(struct iwn_softc *);
166static void iwn_free_ict(struct iwn_softc *);
167static int iwn_alloc_fwmem(struct iwn_softc *);
168static void iwn_free_fwmem(struct iwn_softc *);
169static int iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
170static void iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
171static void iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
172static int iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *,
173 int);
174static void iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *);
175static void iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *);
176static void iwn5000_ict_reset(struct iwn_softc *);
177static int iwn_read_eeprom(struct iwn_softc *,
178 uint8_t macaddr[IEEE80211_ADDR_LEN]);
179static void iwn4965_read_eeprom(struct iwn_softc *);
180#ifdef IWN_DEBUG
181static void iwn4965_print_power_group(struct iwn_softc *, int);
182#endif
183static void iwn5000_read_eeprom(struct iwn_softc *);
184static uint32_t iwn_eeprom_channel_flags(struct iwn_eeprom_chan *);
185static void iwn_read_eeprom_band(struct iwn_softc *, int);
186static void iwn_read_eeprom_ht40(struct iwn_softc *, int);
187static void iwn_read_eeprom_channels(struct iwn_softc *, int, uint32_t);
188static struct iwn_eeprom_chan *iwn_find_eeprom_channel(struct iwn_softc *,
189 struct ieee80211_channel *);
190static int iwn_setregdomain(struct ieee80211com *,
191 struct ieee80211_regdomain *, int,
192 struct ieee80211_channel[]);
193static void iwn_read_eeprom_enhinfo(struct iwn_softc *);
194static struct ieee80211_node *iwn_node_alloc(struct ieee80211vap *,
195 const uint8_t mac[IEEE80211_ADDR_LEN]);
196static void iwn_newassoc(struct ieee80211_node *, int);
197static int iwn_media_change(struct ifnet *);
198static int iwn_newstate(struct ieee80211vap *, enum ieee80211_state, int);
199static void iwn_calib_timeout(void *);
200static void iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *,
201 struct iwn_rx_data *);
202static void iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *,
203 struct iwn_rx_data *);
204static void iwn_rx_compressed_ba(struct iwn_softc *, struct iwn_rx_desc *,
205 struct iwn_rx_data *);
206static void iwn5000_rx_calib_results(struct iwn_softc *,
207 struct iwn_rx_desc *, struct iwn_rx_data *);
208static void iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *,
209 struct iwn_rx_data *);
210static void iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
211 struct iwn_rx_data *);
212static void iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
213 struct iwn_rx_data *);
214static void iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *, int,
215 uint8_t);
216static void iwn_ampdu_tx_done(struct iwn_softc *, int, int, int, int, void *);
217static void iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *);
218static void iwn_notif_intr(struct iwn_softc *);
219static void iwn_wakeup_intr(struct iwn_softc *);
220static void iwn_rftoggle_intr(struct iwn_softc *);
221static void iwn_fatal_intr(struct iwn_softc *);
222static void iwn_intr(void *);
223static void iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t,
224 uint16_t);
225static void iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t,
226 uint16_t);
227#ifdef notyet
228static void iwn5000_reset_sched(struct iwn_softc *, int, int);
229#endif
230static int iwn_tx_data(struct iwn_softc *, struct mbuf *,
231 struct ieee80211_node *);
232static int iwn_tx_data_raw(struct iwn_softc *, struct mbuf *,
233 struct ieee80211_node *,
234 const struct ieee80211_bpf_params *params);
235static void iwn_xmit_task(void *arg0, int pending);
236static int iwn_raw_xmit(struct ieee80211_node *, struct mbuf *,
237 const struct ieee80211_bpf_params *);
238static int iwn_transmit(struct ieee80211com *, struct mbuf *);
239static void iwn_start_locked(struct iwn_softc *);
240static void iwn_watchdog(void *);
241static int iwn_ioctl(struct ieee80211com *, u_long , void *);
242static void iwn_parent(struct ieee80211com *);
243static int iwn_cmd(struct iwn_softc *, int, const void *, int, int);
244static int iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *,
245 int);
246static int iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *,
247 int);
248static int iwn_set_link_quality(struct iwn_softc *,
249 struct ieee80211_node *);
250static int iwn_add_broadcast_node(struct iwn_softc *, int);
251static int iwn_updateedca(struct ieee80211com *);
252static void iwn_update_mcast(struct ieee80211com *);
253static void iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t);
254static int iwn_set_critical_temp(struct iwn_softc *);
255static int iwn_set_timing(struct iwn_softc *, struct ieee80211_node *);
256static void iwn4965_power_calibration(struct iwn_softc *, int);
257static int iwn4965_set_txpower(struct iwn_softc *,
258 struct ieee80211_channel *, int);
259static int iwn5000_set_txpower(struct iwn_softc *,
260 struct ieee80211_channel *, int);
261static int iwn4965_get_rssi(struct iwn_softc *, struct iwn_rx_stat *);
262static int iwn5000_get_rssi(struct iwn_softc *, struct iwn_rx_stat *);
263static int iwn_get_noise(const struct iwn_rx_general_stats *);
264static int iwn4965_get_temperature(struct iwn_softc *);
265static int iwn5000_get_temperature(struct iwn_softc *);
266static int iwn_init_sensitivity(struct iwn_softc *);
267static void iwn_collect_noise(struct iwn_softc *,
268 const struct iwn_rx_general_stats *);
269static int iwn4965_init_gains(struct iwn_softc *);
270static int iwn5000_init_gains(struct iwn_softc *);
271static int iwn4965_set_gains(struct iwn_softc *);
272static int iwn5000_set_gains(struct iwn_softc *);
273static void iwn_tune_sensitivity(struct iwn_softc *,
274 const struct iwn_rx_stats *);
275static void iwn_save_stats_counters(struct iwn_softc *,
276 const struct iwn_stats *);
277static int iwn_send_sensitivity(struct iwn_softc *);
278static void iwn_check_rx_recovery(struct iwn_softc *, struct iwn_stats *);
279static int iwn_set_pslevel(struct iwn_softc *, int, int, int);
280static int iwn_send_btcoex(struct iwn_softc *);
281static int iwn_send_advanced_btcoex(struct iwn_softc *);
282static int iwn5000_runtime_calib(struct iwn_softc *);
283static int iwn_config(struct iwn_softc *);
284static int iwn_scan(struct iwn_softc *, struct ieee80211vap *,
285 struct ieee80211_scan_state *, struct ieee80211_channel *);
286static int iwn_auth(struct iwn_softc *, struct ieee80211vap *vap);
287static int iwn_run(struct iwn_softc *, struct ieee80211vap *vap);
288static int iwn_ampdu_rx_start(struct ieee80211_node *,
289 struct ieee80211_rx_ampdu *, int, int, int);
290static void iwn_ampdu_rx_stop(struct ieee80211_node *,
291 struct ieee80211_rx_ampdu *);
292static int iwn_addba_request(struct ieee80211_node *,
293 struct ieee80211_tx_ampdu *, int, int, int);
294static int iwn_addba_response(struct ieee80211_node *,
295 struct ieee80211_tx_ampdu *, int, int, int);
296static int iwn_ampdu_tx_start(struct ieee80211com *,
297 struct ieee80211_node *, uint8_t);
298static void iwn_ampdu_tx_stop(struct ieee80211_node *,
299 struct ieee80211_tx_ampdu *);
300static void iwn4965_ampdu_tx_start(struct iwn_softc *,
301 struct ieee80211_node *, int, uint8_t, uint16_t);
302static void iwn4965_ampdu_tx_stop(struct iwn_softc *, int,
303 uint8_t, uint16_t);
304static void iwn5000_ampdu_tx_start(struct iwn_softc *,
305 struct ieee80211_node *, int, uint8_t, uint16_t);
306static void iwn5000_ampdu_tx_stop(struct iwn_softc *, int,
307 uint8_t, uint16_t);
308static int iwn5000_query_calibration(struct iwn_softc *);
309static int iwn5000_send_calibration(struct iwn_softc *);
310static int iwn5000_send_wimax_coex(struct iwn_softc *);
311static int iwn5000_crystal_calib(struct iwn_softc *);
312static int iwn5000_temp_offset_calib(struct iwn_softc *);
313static int iwn5000_temp_offset_calibv2(struct iwn_softc *);
314static int iwn4965_post_alive(struct iwn_softc *);
315static int iwn5000_post_alive(struct iwn_softc *);
316static int iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *,
317 int);
318static int iwn4965_load_firmware(struct iwn_softc *);
319static int iwn5000_load_firmware_section(struct iwn_softc *, uint32_t,
320 const uint8_t *, int);
321static int iwn5000_load_firmware(struct iwn_softc *);
322static int iwn_read_firmware_leg(struct iwn_softc *,
323 struct iwn_fw_info *);
324static int iwn_read_firmware_tlv(struct iwn_softc *,
325 struct iwn_fw_info *, uint16_t);
326static int iwn_read_firmware(struct iwn_softc *);
327static int iwn_clock_wait(struct iwn_softc *);
328static int iwn_apm_init(struct iwn_softc *);
329static void iwn_apm_stop_master(struct iwn_softc *);
330static void iwn_apm_stop(struct iwn_softc *);
331static int iwn4965_nic_config(struct iwn_softc *);
332static int iwn5000_nic_config(struct iwn_softc *);
333static int iwn_hw_prepare(struct iwn_softc *);
334static int iwn_hw_init(struct iwn_softc *);
335static void iwn_hw_stop(struct iwn_softc *);
336static void iwn_radio_on(void *, int);
337static void iwn_radio_off(void *, int);
338static void iwn_panicked(void *, int);
339static void iwn_init_locked(struct iwn_softc *);
340static void iwn_init(struct iwn_softc *);
341static void iwn_stop_locked(struct iwn_softc *);
342static void iwn_stop(struct iwn_softc *);
343static void iwn_scan_start(struct ieee80211com *);
344static void iwn_scan_end(struct ieee80211com *);
345static void iwn_set_channel(struct ieee80211com *);
346static void iwn_scan_curchan(struct ieee80211_scan_state *, unsigned long);
347static void iwn_scan_mindwell(struct ieee80211_scan_state *);
348static void iwn_hw_reset(void *, int);
349#ifdef IWN_DEBUG
350static char *iwn_get_csr_string(int);
351static void iwn_debug_register(struct iwn_softc *);
352#endif
353
354static device_method_t iwn_methods[] = {
355 /* Device interface */
356 DEVMETHOD(device_probe, iwn_probe),
357 DEVMETHOD(device_attach, iwn_attach),
358 DEVMETHOD(device_detach, iwn_detach),
359 DEVMETHOD(device_shutdown, iwn_shutdown),
360 DEVMETHOD(device_suspend, iwn_suspend),
361 DEVMETHOD(device_resume, iwn_resume),
362
363 DEVMETHOD_END
364};
365
366static driver_t iwn_driver = {
367 "iwn",
368 iwn_methods,
369 sizeof(struct iwn_softc)
370};
371static devclass_t iwn_devclass;
372
373DRIVER_MODULE(iwn, pci, iwn_driver, iwn_devclass, NULL, NULL);
374
375MODULE_VERSION(iwn, 1);
376
377MODULE_DEPEND(iwn, firmware, 1, 1, 1);
378MODULE_DEPEND(iwn, pci, 1, 1, 1);
379MODULE_DEPEND(iwn, wlan, 1, 1, 1);
380
49#include <sys/queue.h>
50#include <sys/taskqueue.h>
51
52#include <machine/bus.h>
53#include <machine/resource.h>
54#include <machine/clock.h>
55
56#include <dev/pci/pcireg.h>
57#include <dev/pci/pcivar.h>
58
59#include <net/bpf.h>
60#include <net/if.h>
61#include <net/if_var.h>
62#include <net/if_arp.h>
63#include <net/ethernet.h>
64#include <net/if_dl.h>
65#include <net/if_media.h>
66#include <net/if_types.h>
67
68#include <netinet/in.h>
69#include <netinet/in_systm.h>
70#include <netinet/in_var.h>
71#include <netinet/if_ether.h>
72#include <netinet/ip.h>
73
74#include <net80211/ieee80211_var.h>
75#include <net80211/ieee80211_radiotap.h>
76#include <net80211/ieee80211_regdomain.h>
77#include <net80211/ieee80211_ratectl.h>
78
79#include <dev/iwn/if_iwnreg.h>
80#include <dev/iwn/if_iwnvar.h>
81#include <dev/iwn/if_iwn_devid.h>
82#include <dev/iwn/if_iwn_chip_cfg.h>
83#include <dev/iwn/if_iwn_debug.h>
84#include <dev/iwn/if_iwn_ioctl.h>
85
86struct iwn_ident {
87 uint16_t vendor;
88 uint16_t device;
89 const char *name;
90};
91
92static const struct iwn_ident iwn_ident_table[] = {
93 { 0x8086, IWN_DID_6x05_1, "Intel Centrino Advanced-N 6205" },
94 { 0x8086, IWN_DID_1000_1, "Intel Centrino Wireless-N 1000" },
95 { 0x8086, IWN_DID_1000_2, "Intel Centrino Wireless-N 1000" },
96 { 0x8086, IWN_DID_6x05_2, "Intel Centrino Advanced-N 6205" },
97 { 0x8086, IWN_DID_6050_1, "Intel Centrino Advanced-N + WiMAX 6250" },
98 { 0x8086, IWN_DID_6050_2, "Intel Centrino Advanced-N + WiMAX 6250" },
99 { 0x8086, IWN_DID_x030_1, "Intel Centrino Wireless-N 1030" },
100 { 0x8086, IWN_DID_x030_2, "Intel Centrino Wireless-N 1030" },
101 { 0x8086, IWN_DID_x030_3, "Intel Centrino Advanced-N 6230" },
102 { 0x8086, IWN_DID_x030_4, "Intel Centrino Advanced-N 6230" },
103 { 0x8086, IWN_DID_6150_1, "Intel Centrino Wireless-N + WiMAX 6150" },
104 { 0x8086, IWN_DID_6150_2, "Intel Centrino Wireless-N + WiMAX 6150" },
105 { 0x8086, IWN_DID_2x00_1, "Intel(R) Centrino(R) Wireless-N 2200 BGN" },
106 { 0x8086, IWN_DID_2x00_2, "Intel(R) Centrino(R) Wireless-N 2200 BGN" },
107 /* XXX 2200D is IWN_SDID_2x00_4; there's no way to express this here! */
108 { 0x8086, IWN_DID_2x30_1, "Intel Centrino Wireless-N 2230" },
109 { 0x8086, IWN_DID_2x30_2, "Intel Centrino Wireless-N 2230" },
110 { 0x8086, IWN_DID_130_1, "Intel Centrino Wireless-N 130" },
111 { 0x8086, IWN_DID_130_2, "Intel Centrino Wireless-N 130" },
112 { 0x8086, IWN_DID_100_1, "Intel Centrino Wireless-N 100" },
113 { 0x8086, IWN_DID_100_2, "Intel Centrino Wireless-N 100" },
114 { 0x8086, IWN_DID_105_1, "Intel Centrino Wireless-N 105" },
115 { 0x8086, IWN_DID_105_2, "Intel Centrino Wireless-N 105" },
116 { 0x8086, IWN_DID_135_1, "Intel Centrino Wireless-N 135" },
117 { 0x8086, IWN_DID_135_2, "Intel Centrino Wireless-N 135" },
118 { 0x8086, IWN_DID_4965_1, "Intel Wireless WiFi Link 4965" },
119 { 0x8086, IWN_DID_6x00_1, "Intel Centrino Ultimate-N 6300" },
120 { 0x8086, IWN_DID_6x00_2, "Intel Centrino Advanced-N 6200" },
121 { 0x8086, IWN_DID_4965_2, "Intel Wireless WiFi Link 4965" },
122 { 0x8086, IWN_DID_4965_3, "Intel Wireless WiFi Link 4965" },
123 { 0x8086, IWN_DID_5x00_1, "Intel WiFi Link 5100" },
124 { 0x8086, IWN_DID_4965_4, "Intel Wireless WiFi Link 4965" },
125 { 0x8086, IWN_DID_5x00_3, "Intel Ultimate N WiFi Link 5300" },
126 { 0x8086, IWN_DID_5x00_4, "Intel Ultimate N WiFi Link 5300" },
127 { 0x8086, IWN_DID_5x00_2, "Intel WiFi Link 5100" },
128 { 0x8086, IWN_DID_6x00_3, "Intel Centrino Ultimate-N 6300" },
129 { 0x8086, IWN_DID_6x00_4, "Intel Centrino Advanced-N 6200" },
130 { 0x8086, IWN_DID_5x50_1, "Intel WiMAX/WiFi Link 5350" },
131 { 0x8086, IWN_DID_5x50_2, "Intel WiMAX/WiFi Link 5350" },
132 { 0x8086, IWN_DID_5x50_3, "Intel WiMAX/WiFi Link 5150" },
133 { 0x8086, IWN_DID_5x50_4, "Intel WiMAX/WiFi Link 5150" },
134 { 0x8086, IWN_DID_6035_1, "Intel Centrino Advanced 6235" },
135 { 0x8086, IWN_DID_6035_2, "Intel Centrino Advanced 6235" },
136 { 0, 0, NULL }
137};
138
139static int iwn_probe(device_t);
140static int iwn_attach(device_t);
141static int iwn4965_attach(struct iwn_softc *, uint16_t);
142static int iwn5000_attach(struct iwn_softc *, uint16_t);
143static int iwn_config_specific(struct iwn_softc *, uint16_t);
144static void iwn_radiotap_attach(struct iwn_softc *);
145static void iwn_sysctlattach(struct iwn_softc *);
146static struct ieee80211vap *iwn_vap_create(struct ieee80211com *,
147 const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
148 const uint8_t [IEEE80211_ADDR_LEN],
149 const uint8_t [IEEE80211_ADDR_LEN]);
150static void iwn_vap_delete(struct ieee80211vap *);
151static int iwn_detach(device_t);
152static int iwn_shutdown(device_t);
153static int iwn_suspend(device_t);
154static int iwn_resume(device_t);
155static int iwn_nic_lock(struct iwn_softc *);
156static int iwn_eeprom_lock(struct iwn_softc *);
157static int iwn_init_otprom(struct iwn_softc *);
158static int iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int);
159static void iwn_dma_map_addr(void *, bus_dma_segment_t *, int, int);
160static int iwn_dma_contig_alloc(struct iwn_softc *, struct iwn_dma_info *,
161 void **, bus_size_t, bus_size_t);
162static void iwn_dma_contig_free(struct iwn_dma_info *);
163static int iwn_alloc_sched(struct iwn_softc *);
164static void iwn_free_sched(struct iwn_softc *);
165static int iwn_alloc_kw(struct iwn_softc *);
166static void iwn_free_kw(struct iwn_softc *);
167static int iwn_alloc_ict(struct iwn_softc *);
168static void iwn_free_ict(struct iwn_softc *);
169static int iwn_alloc_fwmem(struct iwn_softc *);
170static void iwn_free_fwmem(struct iwn_softc *);
171static int iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
172static void iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
173static void iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
174static int iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *,
175 int);
176static void iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *);
177static void iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *);
178static void iwn5000_ict_reset(struct iwn_softc *);
179static int iwn_read_eeprom(struct iwn_softc *,
180 uint8_t macaddr[IEEE80211_ADDR_LEN]);
181static void iwn4965_read_eeprom(struct iwn_softc *);
182#ifdef IWN_DEBUG
183static void iwn4965_print_power_group(struct iwn_softc *, int);
184#endif
185static void iwn5000_read_eeprom(struct iwn_softc *);
186static uint32_t iwn_eeprom_channel_flags(struct iwn_eeprom_chan *);
187static void iwn_read_eeprom_band(struct iwn_softc *, int);
188static void iwn_read_eeprom_ht40(struct iwn_softc *, int);
189static void iwn_read_eeprom_channels(struct iwn_softc *, int, uint32_t);
190static struct iwn_eeprom_chan *iwn_find_eeprom_channel(struct iwn_softc *,
191 struct ieee80211_channel *);
192static int iwn_setregdomain(struct ieee80211com *,
193 struct ieee80211_regdomain *, int,
194 struct ieee80211_channel[]);
195static void iwn_read_eeprom_enhinfo(struct iwn_softc *);
196static struct ieee80211_node *iwn_node_alloc(struct ieee80211vap *,
197 const uint8_t mac[IEEE80211_ADDR_LEN]);
198static void iwn_newassoc(struct ieee80211_node *, int);
199static int iwn_media_change(struct ifnet *);
200static int iwn_newstate(struct ieee80211vap *, enum ieee80211_state, int);
201static void iwn_calib_timeout(void *);
202static void iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *,
203 struct iwn_rx_data *);
204static void iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *,
205 struct iwn_rx_data *);
206static void iwn_rx_compressed_ba(struct iwn_softc *, struct iwn_rx_desc *,
207 struct iwn_rx_data *);
208static void iwn5000_rx_calib_results(struct iwn_softc *,
209 struct iwn_rx_desc *, struct iwn_rx_data *);
210static void iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *,
211 struct iwn_rx_data *);
212static void iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
213 struct iwn_rx_data *);
214static void iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
215 struct iwn_rx_data *);
216static void iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *, int,
217 uint8_t);
218static void iwn_ampdu_tx_done(struct iwn_softc *, int, int, int, int, void *);
219static void iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *);
220static void iwn_notif_intr(struct iwn_softc *);
221static void iwn_wakeup_intr(struct iwn_softc *);
222static void iwn_rftoggle_intr(struct iwn_softc *);
223static void iwn_fatal_intr(struct iwn_softc *);
224static void iwn_intr(void *);
225static void iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t,
226 uint16_t);
227static void iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t,
228 uint16_t);
229#ifdef notyet
230static void iwn5000_reset_sched(struct iwn_softc *, int, int);
231#endif
232static int iwn_tx_data(struct iwn_softc *, struct mbuf *,
233 struct ieee80211_node *);
234static int iwn_tx_data_raw(struct iwn_softc *, struct mbuf *,
235 struct ieee80211_node *,
236 const struct ieee80211_bpf_params *params);
237static void iwn_xmit_task(void *arg0, int pending);
238static int iwn_raw_xmit(struct ieee80211_node *, struct mbuf *,
239 const struct ieee80211_bpf_params *);
240static int iwn_transmit(struct ieee80211com *, struct mbuf *);
241static void iwn_start_locked(struct iwn_softc *);
242static void iwn_watchdog(void *);
243static int iwn_ioctl(struct ieee80211com *, u_long , void *);
244static void iwn_parent(struct ieee80211com *);
245static int iwn_cmd(struct iwn_softc *, int, const void *, int, int);
246static int iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *,
247 int);
248static int iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *,
249 int);
250static int iwn_set_link_quality(struct iwn_softc *,
251 struct ieee80211_node *);
252static int iwn_add_broadcast_node(struct iwn_softc *, int);
253static int iwn_updateedca(struct ieee80211com *);
254static void iwn_update_mcast(struct ieee80211com *);
255static void iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t);
256static int iwn_set_critical_temp(struct iwn_softc *);
257static int iwn_set_timing(struct iwn_softc *, struct ieee80211_node *);
258static void iwn4965_power_calibration(struct iwn_softc *, int);
259static int iwn4965_set_txpower(struct iwn_softc *,
260 struct ieee80211_channel *, int);
261static int iwn5000_set_txpower(struct iwn_softc *,
262 struct ieee80211_channel *, int);
263static int iwn4965_get_rssi(struct iwn_softc *, struct iwn_rx_stat *);
264static int iwn5000_get_rssi(struct iwn_softc *, struct iwn_rx_stat *);
265static int iwn_get_noise(const struct iwn_rx_general_stats *);
266static int iwn4965_get_temperature(struct iwn_softc *);
267static int iwn5000_get_temperature(struct iwn_softc *);
268static int iwn_init_sensitivity(struct iwn_softc *);
269static void iwn_collect_noise(struct iwn_softc *,
270 const struct iwn_rx_general_stats *);
271static int iwn4965_init_gains(struct iwn_softc *);
272static int iwn5000_init_gains(struct iwn_softc *);
273static int iwn4965_set_gains(struct iwn_softc *);
274static int iwn5000_set_gains(struct iwn_softc *);
275static void iwn_tune_sensitivity(struct iwn_softc *,
276 const struct iwn_rx_stats *);
277static void iwn_save_stats_counters(struct iwn_softc *,
278 const struct iwn_stats *);
279static int iwn_send_sensitivity(struct iwn_softc *);
280static void iwn_check_rx_recovery(struct iwn_softc *, struct iwn_stats *);
281static int iwn_set_pslevel(struct iwn_softc *, int, int, int);
282static int iwn_send_btcoex(struct iwn_softc *);
283static int iwn_send_advanced_btcoex(struct iwn_softc *);
284static int iwn5000_runtime_calib(struct iwn_softc *);
285static int iwn_config(struct iwn_softc *);
286static int iwn_scan(struct iwn_softc *, struct ieee80211vap *,
287 struct ieee80211_scan_state *, struct ieee80211_channel *);
288static int iwn_auth(struct iwn_softc *, struct ieee80211vap *vap);
289static int iwn_run(struct iwn_softc *, struct ieee80211vap *vap);
290static int iwn_ampdu_rx_start(struct ieee80211_node *,
291 struct ieee80211_rx_ampdu *, int, int, int);
292static void iwn_ampdu_rx_stop(struct ieee80211_node *,
293 struct ieee80211_rx_ampdu *);
294static int iwn_addba_request(struct ieee80211_node *,
295 struct ieee80211_tx_ampdu *, int, int, int);
296static int iwn_addba_response(struct ieee80211_node *,
297 struct ieee80211_tx_ampdu *, int, int, int);
298static int iwn_ampdu_tx_start(struct ieee80211com *,
299 struct ieee80211_node *, uint8_t);
300static void iwn_ampdu_tx_stop(struct ieee80211_node *,
301 struct ieee80211_tx_ampdu *);
302static void iwn4965_ampdu_tx_start(struct iwn_softc *,
303 struct ieee80211_node *, int, uint8_t, uint16_t);
304static void iwn4965_ampdu_tx_stop(struct iwn_softc *, int,
305 uint8_t, uint16_t);
306static void iwn5000_ampdu_tx_start(struct iwn_softc *,
307 struct ieee80211_node *, int, uint8_t, uint16_t);
308static void iwn5000_ampdu_tx_stop(struct iwn_softc *, int,
309 uint8_t, uint16_t);
310static int iwn5000_query_calibration(struct iwn_softc *);
311static int iwn5000_send_calibration(struct iwn_softc *);
312static int iwn5000_send_wimax_coex(struct iwn_softc *);
313static int iwn5000_crystal_calib(struct iwn_softc *);
314static int iwn5000_temp_offset_calib(struct iwn_softc *);
315static int iwn5000_temp_offset_calibv2(struct iwn_softc *);
316static int iwn4965_post_alive(struct iwn_softc *);
317static int iwn5000_post_alive(struct iwn_softc *);
318static int iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *,
319 int);
320static int iwn4965_load_firmware(struct iwn_softc *);
321static int iwn5000_load_firmware_section(struct iwn_softc *, uint32_t,
322 const uint8_t *, int);
323static int iwn5000_load_firmware(struct iwn_softc *);
324static int iwn_read_firmware_leg(struct iwn_softc *,
325 struct iwn_fw_info *);
326static int iwn_read_firmware_tlv(struct iwn_softc *,
327 struct iwn_fw_info *, uint16_t);
328static int iwn_read_firmware(struct iwn_softc *);
329static int iwn_clock_wait(struct iwn_softc *);
330static int iwn_apm_init(struct iwn_softc *);
331static void iwn_apm_stop_master(struct iwn_softc *);
332static void iwn_apm_stop(struct iwn_softc *);
333static int iwn4965_nic_config(struct iwn_softc *);
334static int iwn5000_nic_config(struct iwn_softc *);
335static int iwn_hw_prepare(struct iwn_softc *);
336static int iwn_hw_init(struct iwn_softc *);
337static void iwn_hw_stop(struct iwn_softc *);
338static void iwn_radio_on(void *, int);
339static void iwn_radio_off(void *, int);
340static void iwn_panicked(void *, int);
341static void iwn_init_locked(struct iwn_softc *);
342static void iwn_init(struct iwn_softc *);
343static void iwn_stop_locked(struct iwn_softc *);
344static void iwn_stop(struct iwn_softc *);
345static void iwn_scan_start(struct ieee80211com *);
346static void iwn_scan_end(struct ieee80211com *);
347static void iwn_set_channel(struct ieee80211com *);
348static void iwn_scan_curchan(struct ieee80211_scan_state *, unsigned long);
349static void iwn_scan_mindwell(struct ieee80211_scan_state *);
350static void iwn_hw_reset(void *, int);
351#ifdef IWN_DEBUG
352static char *iwn_get_csr_string(int);
353static void iwn_debug_register(struct iwn_softc *);
354#endif
355
356static device_method_t iwn_methods[] = {
357 /* Device interface */
358 DEVMETHOD(device_probe, iwn_probe),
359 DEVMETHOD(device_attach, iwn_attach),
360 DEVMETHOD(device_detach, iwn_detach),
361 DEVMETHOD(device_shutdown, iwn_shutdown),
362 DEVMETHOD(device_suspend, iwn_suspend),
363 DEVMETHOD(device_resume, iwn_resume),
364
365 DEVMETHOD_END
366};
367
368static driver_t iwn_driver = {
369 "iwn",
370 iwn_methods,
371 sizeof(struct iwn_softc)
372};
373static devclass_t iwn_devclass;
374
375DRIVER_MODULE(iwn, pci, iwn_driver, iwn_devclass, NULL, NULL);
376
377MODULE_VERSION(iwn, 1);
378
379MODULE_DEPEND(iwn, firmware, 1, 1, 1);
380MODULE_DEPEND(iwn, pci, 1, 1, 1);
381MODULE_DEPEND(iwn, wlan, 1, 1, 1);
382
383static d_ioctl_t iwn_cdev_ioctl;
384static d_open_t iwn_cdev_open;
385static d_close_t iwn_cdev_close;
386
387static struct cdevsw iwn_cdevsw = {
388 .d_version = D_VERSION,
389 .d_flags = 0,
390 .d_open = iwn_cdev_open,
391 .d_close = iwn_cdev_close,
392 .d_ioctl = iwn_cdev_ioctl,
393 .d_name = "iwn",
394};
395
381static int
382iwn_probe(device_t dev)
383{
384 const struct iwn_ident *ident;
385
386 for (ident = iwn_ident_table; ident->name != NULL; ident++) {
387 if (pci_get_vendor(dev) == ident->vendor &&
388 pci_get_device(dev) == ident->device) {
389 device_set_desc(dev, ident->name);
390 return (BUS_PROBE_DEFAULT);
391 }
392 }
393 return ENXIO;
394}
395
396static int
397iwn_is_3stream_device(struct iwn_softc *sc)
398{
399 /* XXX for now only 5300, until the 5350 can be tested */
400 if (sc->hw_type == IWN_HW_REV_TYPE_5300)
401 return (1);
402 return (0);
403}
404
405static int
406iwn_attach(device_t dev)
407{
408 struct iwn_softc *sc = (struct iwn_softc *)device_get_softc(dev);
409 struct ieee80211com *ic;
410 int i, error, rid;
411
412 sc->sc_dev = dev;
413
414#ifdef IWN_DEBUG
415 error = resource_int_value(device_get_name(sc->sc_dev),
416 device_get_unit(sc->sc_dev), "debug", &(sc->sc_debug));
417 if (error != 0)
418 sc->sc_debug = 0;
419#else
420 sc->sc_debug = 0;
421#endif
422
423 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: begin\n",__func__);
424
425 /*
426 * Get the offset of the PCI Express Capability Structure in PCI
427 * Configuration Space.
428 */
429 error = pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off);
430 if (error != 0) {
431 device_printf(dev, "PCIe capability structure not found!\n");
432 return error;
433 }
434
435 /* Clear device-specific "PCI retry timeout" register (41h). */
436 pci_write_config(dev, 0x41, 0, 1);
437
438 /* Enable bus-mastering. */
439 pci_enable_busmaster(dev);
440
441 rid = PCIR_BAR(0);
442 sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
443 RF_ACTIVE);
444 if (sc->mem == NULL) {
445 device_printf(dev, "can't map mem space\n");
446 error = ENOMEM;
447 return error;
448 }
449 sc->sc_st = rman_get_bustag(sc->mem);
450 sc->sc_sh = rman_get_bushandle(sc->mem);
451
452 i = 1;
453 rid = 0;
454 if (pci_alloc_msi(dev, &i) == 0)
455 rid = 1;
456 /* Install interrupt handler. */
457 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
458 (rid != 0 ? 0 : RF_SHAREABLE));
459 if (sc->irq == NULL) {
460 device_printf(dev, "can't map interrupt\n");
461 error = ENOMEM;
462 goto fail;
463 }
464
465 IWN_LOCK_INIT(sc);
466 mbufq_init(&sc->sc_snd, ifqmaxlen);
467
468 /* Read hardware revision and attach. */
469 sc->hw_type = (IWN_READ(sc, IWN_HW_REV) >> IWN_HW_REV_TYPE_SHIFT)
470 & IWN_HW_REV_TYPE_MASK;
471 sc->subdevice_id = pci_get_subdevice(dev);
472
473 /*
474 * 4965 versus 5000 and later have different methods.
475 * Let's set those up first.
476 */
477 if (sc->hw_type == IWN_HW_REV_TYPE_4965)
478 error = iwn4965_attach(sc, pci_get_device(dev));
479 else
480 error = iwn5000_attach(sc, pci_get_device(dev));
481 if (error != 0) {
482 device_printf(dev, "could not attach device, error %d\n",
483 error);
484 goto fail;
485 }
486
487 /*
488 * Next, let's setup the various parameters of each NIC.
489 */
490 error = iwn_config_specific(sc, pci_get_device(dev));
491 if (error != 0) {
492 device_printf(dev, "could not attach device, error %d\n",
493 error);
494 goto fail;
495 }
496
497 if ((error = iwn_hw_prepare(sc)) != 0) {
498 device_printf(dev, "hardware not ready, error %d\n", error);
499 goto fail;
500 }
501
502 /* Allocate DMA memory for firmware transfers. */
503 if ((error = iwn_alloc_fwmem(sc)) != 0) {
504 device_printf(dev,
505 "could not allocate memory for firmware, error %d\n",
506 error);
507 goto fail;
508 }
509
510 /* Allocate "Keep Warm" page. */
511 if ((error = iwn_alloc_kw(sc)) != 0) {
512 device_printf(dev,
513 "could not allocate keep warm page, error %d\n", error);
514 goto fail;
515 }
516
517 /* Allocate ICT table for 5000 Series. */
518 if (sc->hw_type != IWN_HW_REV_TYPE_4965 &&
519 (error = iwn_alloc_ict(sc)) != 0) {
520 device_printf(dev, "could not allocate ICT table, error %d\n",
521 error);
522 goto fail;
523 }
524
525 /* Allocate TX scheduler "rings". */
526 if ((error = iwn_alloc_sched(sc)) != 0) {
527 device_printf(dev,
528 "could not allocate TX scheduler rings, error %d\n", error);
529 goto fail;
530 }
531
532 /* Allocate TX rings (16 on 4965AGN, 20 on >=5000). */
533 for (i = 0; i < sc->ntxqs; i++) {
534 if ((error = iwn_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) {
535 device_printf(dev,
536 "could not allocate TX ring %d, error %d\n", i,
537 error);
538 goto fail;
539 }
540 }
541
542 /* Allocate RX ring. */
543 if ((error = iwn_alloc_rx_ring(sc, &sc->rxq)) != 0) {
544 device_printf(dev, "could not allocate RX ring, error %d\n",
545 error);
546 goto fail;
547 }
548
549 /* Clear pending interrupts. */
550 IWN_WRITE(sc, IWN_INT, 0xffffffff);
551
552 ic = &sc->sc_ic;
553 ic->ic_softc = sc;
554 ic->ic_name = device_get_nameunit(dev);
555 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
556 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
557
558 /* Set device capabilities. */
559 ic->ic_caps =
560 IEEE80211_C_STA /* station mode supported */
561 | IEEE80211_C_MONITOR /* monitor mode supported */
562#if 0
563 | IEEE80211_C_BGSCAN /* background scanning */
564#endif
565 | IEEE80211_C_TXPMGT /* tx power management */
566 | IEEE80211_C_SHSLOT /* short slot time supported */
567 | IEEE80211_C_WPA
568 | IEEE80211_C_SHPREAMBLE /* short preamble supported */
569#if 0
570 | IEEE80211_C_IBSS /* ibss/adhoc mode */
571#endif
572 | IEEE80211_C_WME /* WME */
573 | IEEE80211_C_PMGT /* Station-side power mgmt */
574 ;
575
576 /* Read MAC address, channels, etc from EEPROM. */
577 if ((error = iwn_read_eeprom(sc, ic->ic_macaddr)) != 0) {
578 device_printf(dev, "could not read EEPROM, error %d\n",
579 error);
580 goto fail;
581 }
582
583 /* Count the number of available chains. */
584 sc->ntxchains =
585 ((sc->txchainmask >> 2) & 1) +
586 ((sc->txchainmask >> 1) & 1) +
587 ((sc->txchainmask >> 0) & 1);
588 sc->nrxchains =
589 ((sc->rxchainmask >> 2) & 1) +
590 ((sc->rxchainmask >> 1) & 1) +
591 ((sc->rxchainmask >> 0) & 1);
592 if (bootverbose) {
593 device_printf(dev, "MIMO %dT%dR, %.4s, address %6D\n",
594 sc->ntxchains, sc->nrxchains, sc->eeprom_domain,
595 ic->ic_macaddr, ":");
596 }
597
598 if (sc->sc_flags & IWN_FLAG_HAS_11N) {
599 ic->ic_rxstream = sc->nrxchains;
600 ic->ic_txstream = sc->ntxchains;
601
602 /*
603 * Some of the 3 antenna devices (ie, the 4965) only supports
604 * 2x2 operation. So correct the number of streams if
605 * it's not a 3-stream device.
606 */
607 if (! iwn_is_3stream_device(sc)) {
608 if (ic->ic_rxstream > 2)
609 ic->ic_rxstream = 2;
610 if (ic->ic_txstream > 2)
611 ic->ic_txstream = 2;
612 }
613
614 ic->ic_htcaps =
615 IEEE80211_HTCAP_SMPS_OFF /* SMPS mode disabled */
616 | IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */
617 | IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width*/
618 | IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */
619#ifdef notyet
620 | IEEE80211_HTCAP_GREENFIELD
621#if IWN_RBUF_SIZE == 8192
622 | IEEE80211_HTCAP_MAXAMSDU_7935 /* max A-MSDU length */
623#else
624 | IEEE80211_HTCAP_MAXAMSDU_3839 /* max A-MSDU length */
625#endif
626#endif
627 /* s/w capabilities */
628 | IEEE80211_HTC_HT /* HT operation */
629 | IEEE80211_HTC_AMPDU /* tx A-MPDU */
630#ifdef notyet
631 | IEEE80211_HTC_AMSDU /* tx A-MSDU */
632#endif
633 ;
634 }
635
636 ieee80211_ifattach(ic);
637 ic->ic_vap_create = iwn_vap_create;
638 ic->ic_ioctl = iwn_ioctl;
639 ic->ic_parent = iwn_parent;
640 ic->ic_vap_delete = iwn_vap_delete;
641 ic->ic_transmit = iwn_transmit;
642 ic->ic_raw_xmit = iwn_raw_xmit;
643 ic->ic_node_alloc = iwn_node_alloc;
644 sc->sc_ampdu_rx_start = ic->ic_ampdu_rx_start;
645 ic->ic_ampdu_rx_start = iwn_ampdu_rx_start;
646 sc->sc_ampdu_rx_stop = ic->ic_ampdu_rx_stop;
647 ic->ic_ampdu_rx_stop = iwn_ampdu_rx_stop;
648 sc->sc_addba_request = ic->ic_addba_request;
649 ic->ic_addba_request = iwn_addba_request;
650 sc->sc_addba_response = ic->ic_addba_response;
651 ic->ic_addba_response = iwn_addba_response;
652 sc->sc_addba_stop = ic->ic_addba_stop;
653 ic->ic_addba_stop = iwn_ampdu_tx_stop;
654 ic->ic_newassoc = iwn_newassoc;
655 ic->ic_wme.wme_update = iwn_updateedca;
656 ic->ic_update_mcast = iwn_update_mcast;
657 ic->ic_scan_start = iwn_scan_start;
658 ic->ic_scan_end = iwn_scan_end;
659 ic->ic_set_channel = iwn_set_channel;
660 ic->ic_scan_curchan = iwn_scan_curchan;
661 ic->ic_scan_mindwell = iwn_scan_mindwell;
662 ic->ic_setregdomain = iwn_setregdomain;
663
664 iwn_radiotap_attach(sc);
665
666 callout_init_mtx(&sc->calib_to, &sc->sc_mtx, 0);
667 callout_init_mtx(&sc->watchdog_to, &sc->sc_mtx, 0);
668 TASK_INIT(&sc->sc_reinit_task, 0, iwn_hw_reset, sc);
669 TASK_INIT(&sc->sc_radioon_task, 0, iwn_radio_on, sc);
670 TASK_INIT(&sc->sc_radiooff_task, 0, iwn_radio_off, sc);
671 TASK_INIT(&sc->sc_panic_task, 0, iwn_panicked, sc);
672 TASK_INIT(&sc->sc_xmit_task, 0, iwn_xmit_task, sc);
673
674 mbufq_init(&sc->sc_xmit_queue, 1024);
675
676 sc->sc_tq = taskqueue_create("iwn_taskq", M_WAITOK,
677 taskqueue_thread_enqueue, &sc->sc_tq);
678 error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwn_taskq");
679 if (error != 0) {
680 device_printf(dev, "can't start threads, error %d\n", error);
681 goto fail;
682 }
683
684 iwn_sysctlattach(sc);
685
686 /*
687 * Hook our interrupt after all initialization is complete.
688 */
689 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
690 NULL, iwn_intr, sc, &sc->sc_ih);
691 if (error != 0) {
692 device_printf(dev, "can't establish interrupt, error %d\n",
693 error);
694 goto fail;
695 }
696
697#if 0
698 device_printf(sc->sc_dev, "%s: rx_stats=%d, rx_stats_bt=%d\n",
699 __func__,
700 sizeof(struct iwn_stats),
701 sizeof(struct iwn_stats_bt));
702#endif
703
704 if (bootverbose)
705 ieee80211_announce(ic);
706 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
396static int
397iwn_probe(device_t dev)
398{
399 const struct iwn_ident *ident;
400
401 for (ident = iwn_ident_table; ident->name != NULL; ident++) {
402 if (pci_get_vendor(dev) == ident->vendor &&
403 pci_get_device(dev) == ident->device) {
404 device_set_desc(dev, ident->name);
405 return (BUS_PROBE_DEFAULT);
406 }
407 }
408 return ENXIO;
409}
410
411static int
412iwn_is_3stream_device(struct iwn_softc *sc)
413{
414 /* XXX for now only 5300, until the 5350 can be tested */
415 if (sc->hw_type == IWN_HW_REV_TYPE_5300)
416 return (1);
417 return (0);
418}
419
420static int
421iwn_attach(device_t dev)
422{
423 struct iwn_softc *sc = (struct iwn_softc *)device_get_softc(dev);
424 struct ieee80211com *ic;
425 int i, error, rid;
426
427 sc->sc_dev = dev;
428
429#ifdef IWN_DEBUG
430 error = resource_int_value(device_get_name(sc->sc_dev),
431 device_get_unit(sc->sc_dev), "debug", &(sc->sc_debug));
432 if (error != 0)
433 sc->sc_debug = 0;
434#else
435 sc->sc_debug = 0;
436#endif
437
438 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: begin\n",__func__);
439
440 /*
441 * Get the offset of the PCI Express Capability Structure in PCI
442 * Configuration Space.
443 */
444 error = pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off);
445 if (error != 0) {
446 device_printf(dev, "PCIe capability structure not found!\n");
447 return error;
448 }
449
450 /* Clear device-specific "PCI retry timeout" register (41h). */
451 pci_write_config(dev, 0x41, 0, 1);
452
453 /* Enable bus-mastering. */
454 pci_enable_busmaster(dev);
455
456 rid = PCIR_BAR(0);
457 sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
458 RF_ACTIVE);
459 if (sc->mem == NULL) {
460 device_printf(dev, "can't map mem space\n");
461 error = ENOMEM;
462 return error;
463 }
464 sc->sc_st = rman_get_bustag(sc->mem);
465 sc->sc_sh = rman_get_bushandle(sc->mem);
466
467 i = 1;
468 rid = 0;
469 if (pci_alloc_msi(dev, &i) == 0)
470 rid = 1;
471 /* Install interrupt handler. */
472 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
473 (rid != 0 ? 0 : RF_SHAREABLE));
474 if (sc->irq == NULL) {
475 device_printf(dev, "can't map interrupt\n");
476 error = ENOMEM;
477 goto fail;
478 }
479
480 IWN_LOCK_INIT(sc);
481 mbufq_init(&sc->sc_snd, ifqmaxlen);
482
483 /* Read hardware revision and attach. */
484 sc->hw_type = (IWN_READ(sc, IWN_HW_REV) >> IWN_HW_REV_TYPE_SHIFT)
485 & IWN_HW_REV_TYPE_MASK;
486 sc->subdevice_id = pci_get_subdevice(dev);
487
488 /*
489 * 4965 versus 5000 and later have different methods.
490 * Let's set those up first.
491 */
492 if (sc->hw_type == IWN_HW_REV_TYPE_4965)
493 error = iwn4965_attach(sc, pci_get_device(dev));
494 else
495 error = iwn5000_attach(sc, pci_get_device(dev));
496 if (error != 0) {
497 device_printf(dev, "could not attach device, error %d\n",
498 error);
499 goto fail;
500 }
501
502 /*
503 * Next, let's setup the various parameters of each NIC.
504 */
505 error = iwn_config_specific(sc, pci_get_device(dev));
506 if (error != 0) {
507 device_printf(dev, "could not attach device, error %d\n",
508 error);
509 goto fail;
510 }
511
512 if ((error = iwn_hw_prepare(sc)) != 0) {
513 device_printf(dev, "hardware not ready, error %d\n", error);
514 goto fail;
515 }
516
517 /* Allocate DMA memory for firmware transfers. */
518 if ((error = iwn_alloc_fwmem(sc)) != 0) {
519 device_printf(dev,
520 "could not allocate memory for firmware, error %d\n",
521 error);
522 goto fail;
523 }
524
525 /* Allocate "Keep Warm" page. */
526 if ((error = iwn_alloc_kw(sc)) != 0) {
527 device_printf(dev,
528 "could not allocate keep warm page, error %d\n", error);
529 goto fail;
530 }
531
532 /* Allocate ICT table for 5000 Series. */
533 if (sc->hw_type != IWN_HW_REV_TYPE_4965 &&
534 (error = iwn_alloc_ict(sc)) != 0) {
535 device_printf(dev, "could not allocate ICT table, error %d\n",
536 error);
537 goto fail;
538 }
539
540 /* Allocate TX scheduler "rings". */
541 if ((error = iwn_alloc_sched(sc)) != 0) {
542 device_printf(dev,
543 "could not allocate TX scheduler rings, error %d\n", error);
544 goto fail;
545 }
546
547 /* Allocate TX rings (16 on 4965AGN, 20 on >=5000). */
548 for (i = 0; i < sc->ntxqs; i++) {
549 if ((error = iwn_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) {
550 device_printf(dev,
551 "could not allocate TX ring %d, error %d\n", i,
552 error);
553 goto fail;
554 }
555 }
556
557 /* Allocate RX ring. */
558 if ((error = iwn_alloc_rx_ring(sc, &sc->rxq)) != 0) {
559 device_printf(dev, "could not allocate RX ring, error %d\n",
560 error);
561 goto fail;
562 }
563
564 /* Clear pending interrupts. */
565 IWN_WRITE(sc, IWN_INT, 0xffffffff);
566
567 ic = &sc->sc_ic;
568 ic->ic_softc = sc;
569 ic->ic_name = device_get_nameunit(dev);
570 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
571 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
572
573 /* Set device capabilities. */
574 ic->ic_caps =
575 IEEE80211_C_STA /* station mode supported */
576 | IEEE80211_C_MONITOR /* monitor mode supported */
577#if 0
578 | IEEE80211_C_BGSCAN /* background scanning */
579#endif
580 | IEEE80211_C_TXPMGT /* tx power management */
581 | IEEE80211_C_SHSLOT /* short slot time supported */
582 | IEEE80211_C_WPA
583 | IEEE80211_C_SHPREAMBLE /* short preamble supported */
584#if 0
585 | IEEE80211_C_IBSS /* ibss/adhoc mode */
586#endif
587 | IEEE80211_C_WME /* WME */
588 | IEEE80211_C_PMGT /* Station-side power mgmt */
589 ;
590
591 /* Read MAC address, channels, etc from EEPROM. */
592 if ((error = iwn_read_eeprom(sc, ic->ic_macaddr)) != 0) {
593 device_printf(dev, "could not read EEPROM, error %d\n",
594 error);
595 goto fail;
596 }
597
598 /* Count the number of available chains. */
599 sc->ntxchains =
600 ((sc->txchainmask >> 2) & 1) +
601 ((sc->txchainmask >> 1) & 1) +
602 ((sc->txchainmask >> 0) & 1);
603 sc->nrxchains =
604 ((sc->rxchainmask >> 2) & 1) +
605 ((sc->rxchainmask >> 1) & 1) +
606 ((sc->rxchainmask >> 0) & 1);
607 if (bootverbose) {
608 device_printf(dev, "MIMO %dT%dR, %.4s, address %6D\n",
609 sc->ntxchains, sc->nrxchains, sc->eeprom_domain,
610 ic->ic_macaddr, ":");
611 }
612
613 if (sc->sc_flags & IWN_FLAG_HAS_11N) {
614 ic->ic_rxstream = sc->nrxchains;
615 ic->ic_txstream = sc->ntxchains;
616
617 /*
618 * Some of the 3 antenna devices (ie, the 4965) only supports
619 * 2x2 operation. So correct the number of streams if
620 * it's not a 3-stream device.
621 */
622 if (! iwn_is_3stream_device(sc)) {
623 if (ic->ic_rxstream > 2)
624 ic->ic_rxstream = 2;
625 if (ic->ic_txstream > 2)
626 ic->ic_txstream = 2;
627 }
628
629 ic->ic_htcaps =
630 IEEE80211_HTCAP_SMPS_OFF /* SMPS mode disabled */
631 | IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */
632 | IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width*/
633 | IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */
634#ifdef notyet
635 | IEEE80211_HTCAP_GREENFIELD
636#if IWN_RBUF_SIZE == 8192
637 | IEEE80211_HTCAP_MAXAMSDU_7935 /* max A-MSDU length */
638#else
639 | IEEE80211_HTCAP_MAXAMSDU_3839 /* max A-MSDU length */
640#endif
641#endif
642 /* s/w capabilities */
643 | IEEE80211_HTC_HT /* HT operation */
644 | IEEE80211_HTC_AMPDU /* tx A-MPDU */
645#ifdef notyet
646 | IEEE80211_HTC_AMSDU /* tx A-MSDU */
647#endif
648 ;
649 }
650
651 ieee80211_ifattach(ic);
652 ic->ic_vap_create = iwn_vap_create;
653 ic->ic_ioctl = iwn_ioctl;
654 ic->ic_parent = iwn_parent;
655 ic->ic_vap_delete = iwn_vap_delete;
656 ic->ic_transmit = iwn_transmit;
657 ic->ic_raw_xmit = iwn_raw_xmit;
658 ic->ic_node_alloc = iwn_node_alloc;
659 sc->sc_ampdu_rx_start = ic->ic_ampdu_rx_start;
660 ic->ic_ampdu_rx_start = iwn_ampdu_rx_start;
661 sc->sc_ampdu_rx_stop = ic->ic_ampdu_rx_stop;
662 ic->ic_ampdu_rx_stop = iwn_ampdu_rx_stop;
663 sc->sc_addba_request = ic->ic_addba_request;
664 ic->ic_addba_request = iwn_addba_request;
665 sc->sc_addba_response = ic->ic_addba_response;
666 ic->ic_addba_response = iwn_addba_response;
667 sc->sc_addba_stop = ic->ic_addba_stop;
668 ic->ic_addba_stop = iwn_ampdu_tx_stop;
669 ic->ic_newassoc = iwn_newassoc;
670 ic->ic_wme.wme_update = iwn_updateedca;
671 ic->ic_update_mcast = iwn_update_mcast;
672 ic->ic_scan_start = iwn_scan_start;
673 ic->ic_scan_end = iwn_scan_end;
674 ic->ic_set_channel = iwn_set_channel;
675 ic->ic_scan_curchan = iwn_scan_curchan;
676 ic->ic_scan_mindwell = iwn_scan_mindwell;
677 ic->ic_setregdomain = iwn_setregdomain;
678
679 iwn_radiotap_attach(sc);
680
681 callout_init_mtx(&sc->calib_to, &sc->sc_mtx, 0);
682 callout_init_mtx(&sc->watchdog_to, &sc->sc_mtx, 0);
683 TASK_INIT(&sc->sc_reinit_task, 0, iwn_hw_reset, sc);
684 TASK_INIT(&sc->sc_radioon_task, 0, iwn_radio_on, sc);
685 TASK_INIT(&sc->sc_radiooff_task, 0, iwn_radio_off, sc);
686 TASK_INIT(&sc->sc_panic_task, 0, iwn_panicked, sc);
687 TASK_INIT(&sc->sc_xmit_task, 0, iwn_xmit_task, sc);
688
689 mbufq_init(&sc->sc_xmit_queue, 1024);
690
691 sc->sc_tq = taskqueue_create("iwn_taskq", M_WAITOK,
692 taskqueue_thread_enqueue, &sc->sc_tq);
693 error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwn_taskq");
694 if (error != 0) {
695 device_printf(dev, "can't start threads, error %d\n", error);
696 goto fail;
697 }
698
699 iwn_sysctlattach(sc);
700
701 /*
702 * Hook our interrupt after all initialization is complete.
703 */
704 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
705 NULL, iwn_intr, sc, &sc->sc_ih);
706 if (error != 0) {
707 device_printf(dev, "can't establish interrupt, error %d\n",
708 error);
709 goto fail;
710 }
711
712#if 0
713 device_printf(sc->sc_dev, "%s: rx_stats=%d, rx_stats_bt=%d\n",
714 __func__,
715 sizeof(struct iwn_stats),
716 sizeof(struct iwn_stats_bt));
717#endif
718
719 if (bootverbose)
720 ieee80211_announce(ic);
721 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
722
723 /* Add debug ioctl right at the end */
724 sc->sc_cdev = make_dev(&iwn_cdevsw, device_get_unit(dev),
725 UID_ROOT, GID_WHEEL, 0600, "%s", device_get_nameunit(dev));
726 if (sc->sc_cdev == NULL) {
727 device_printf(dev, "failed to create debug character device\n");
728 } else {
729 sc->sc_cdev->si_drv1 = sc;
730 }
707 return 0;
708fail:
709 iwn_detach(dev);
710 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__);
711 return error;
712}
713
714/*
715 * Define specific configuration based on device id and subdevice id
716 * pid : PCI device id
717 */
718static int
719iwn_config_specific(struct iwn_softc *sc, uint16_t pid)
720{
721
722 switch (pid) {
723/* 4965 series */
724 case IWN_DID_4965_1:
725 case IWN_DID_4965_2:
726 case IWN_DID_4965_3:
727 case IWN_DID_4965_4:
728 sc->base_params = &iwn4965_base_params;
729 sc->limits = &iwn4965_sensitivity_limits;
730 sc->fwname = "iwn4965fw";
731 /* Override chains masks, ROM is known to be broken. */
732 sc->txchainmask = IWN_ANT_AB;
733 sc->rxchainmask = IWN_ANT_ABC;
734 /* Enable normal btcoex */
735 sc->sc_flags |= IWN_FLAG_BTCOEX;
736 break;
737/* 1000 Series */
738 case IWN_DID_1000_1:
739 case IWN_DID_1000_2:
740 switch(sc->subdevice_id) {
741 case IWN_SDID_1000_1:
742 case IWN_SDID_1000_2:
743 case IWN_SDID_1000_3:
744 case IWN_SDID_1000_4:
745 case IWN_SDID_1000_5:
746 case IWN_SDID_1000_6:
747 case IWN_SDID_1000_7:
748 case IWN_SDID_1000_8:
749 case IWN_SDID_1000_9:
750 case IWN_SDID_1000_10:
751 case IWN_SDID_1000_11:
752 case IWN_SDID_1000_12:
753 sc->limits = &iwn1000_sensitivity_limits;
754 sc->base_params = &iwn1000_base_params;
755 sc->fwname = "iwn1000fw";
756 break;
757 default:
758 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
759 "0x%04x rev %d not supported (subdevice)\n", pid,
760 sc->subdevice_id,sc->hw_type);
761 return ENOTSUP;
762 }
763 break;
764/* 6x00 Series */
765 case IWN_DID_6x00_2:
766 case IWN_DID_6x00_4:
767 case IWN_DID_6x00_1:
768 case IWN_DID_6x00_3:
769 sc->fwname = "iwn6000fw";
770 sc->limits = &iwn6000_sensitivity_limits;
771 switch(sc->subdevice_id) {
772 case IWN_SDID_6x00_1:
773 case IWN_SDID_6x00_2:
774 case IWN_SDID_6x00_8:
775 //iwl6000_3agn_cfg
776 sc->base_params = &iwn_6000_base_params;
777 break;
778 case IWN_SDID_6x00_3:
779 case IWN_SDID_6x00_6:
780 case IWN_SDID_6x00_9:
781 ////iwl6000i_2agn
782 case IWN_SDID_6x00_4:
783 case IWN_SDID_6x00_7:
784 case IWN_SDID_6x00_10:
785 //iwl6000i_2abg_cfg
786 case IWN_SDID_6x00_5:
787 //iwl6000i_2bg_cfg
788 sc->base_params = &iwn_6000i_base_params;
789 sc->sc_flags |= IWN_FLAG_INTERNAL_PA;
790 sc->txchainmask = IWN_ANT_BC;
791 sc->rxchainmask = IWN_ANT_BC;
792 break;
793 default:
794 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
795 "0x%04x rev %d not supported (subdevice)\n", pid,
796 sc->subdevice_id,sc->hw_type);
797 return ENOTSUP;
798 }
799 break;
800/* 6x05 Series */
801 case IWN_DID_6x05_1:
802 case IWN_DID_6x05_2:
803 switch(sc->subdevice_id) {
804 case IWN_SDID_6x05_1:
805 case IWN_SDID_6x05_4:
806 case IWN_SDID_6x05_6:
807 //iwl6005_2agn_cfg
808 case IWN_SDID_6x05_2:
809 case IWN_SDID_6x05_5:
810 case IWN_SDID_6x05_7:
811 //iwl6005_2abg_cfg
812 case IWN_SDID_6x05_3:
813 //iwl6005_2bg_cfg
814 case IWN_SDID_6x05_8:
815 case IWN_SDID_6x05_9:
816 //iwl6005_2agn_sff_cfg
817 case IWN_SDID_6x05_10:
818 //iwl6005_2agn_d_cfg
819 case IWN_SDID_6x05_11:
820 //iwl6005_2agn_mow1_cfg
821 case IWN_SDID_6x05_12:
822 //iwl6005_2agn_mow2_cfg
823 sc->fwname = "iwn6000g2afw";
824 sc->limits = &iwn6000_sensitivity_limits;
825 sc->base_params = &iwn_6000g2_base_params;
826 break;
827 default:
828 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
829 "0x%04x rev %d not supported (subdevice)\n", pid,
830 sc->subdevice_id,sc->hw_type);
831 return ENOTSUP;
832 }
833 break;
834/* 6x35 Series */
835 case IWN_DID_6035_1:
836 case IWN_DID_6035_2:
837 switch(sc->subdevice_id) {
838 case IWN_SDID_6035_1:
839 case IWN_SDID_6035_2:
840 case IWN_SDID_6035_3:
841 case IWN_SDID_6035_4:
842 sc->fwname = "iwn6000g2bfw";
843 sc->limits = &iwn6235_sensitivity_limits;
844 sc->base_params = &iwn_6235_base_params;
845 break;
846 default:
847 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
848 "0x%04x rev %d not supported (subdevice)\n", pid,
849 sc->subdevice_id,sc->hw_type);
850 return ENOTSUP;
851 }
852 break;
853/* 6x50 WiFi/WiMax Series */
854 case IWN_DID_6050_1:
855 case IWN_DID_6050_2:
856 switch(sc->subdevice_id) {
857 case IWN_SDID_6050_1:
858 case IWN_SDID_6050_3:
859 case IWN_SDID_6050_5:
860 //iwl6050_2agn_cfg
861 case IWN_SDID_6050_2:
862 case IWN_SDID_6050_4:
863 case IWN_SDID_6050_6:
864 //iwl6050_2abg_cfg
865 sc->fwname = "iwn6050fw";
866 sc->txchainmask = IWN_ANT_AB;
867 sc->rxchainmask = IWN_ANT_AB;
868 sc->limits = &iwn6000_sensitivity_limits;
869 sc->base_params = &iwn_6050_base_params;
870 break;
871 default:
872 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
873 "0x%04x rev %d not supported (subdevice)\n", pid,
874 sc->subdevice_id,sc->hw_type);
875 return ENOTSUP;
876 }
877 break;
878/* 6150 WiFi/WiMax Series */
879 case IWN_DID_6150_1:
880 case IWN_DID_6150_2:
881 switch(sc->subdevice_id) {
882 case IWN_SDID_6150_1:
883 case IWN_SDID_6150_3:
884 case IWN_SDID_6150_5:
885 // iwl6150_bgn_cfg
886 case IWN_SDID_6150_2:
887 case IWN_SDID_6150_4:
888 case IWN_SDID_6150_6:
889 //iwl6150_bg_cfg
890 sc->fwname = "iwn6050fw";
891 sc->limits = &iwn6000_sensitivity_limits;
892 sc->base_params = &iwn_6150_base_params;
893 break;
894 default:
895 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
896 "0x%04x rev %d not supported (subdevice)\n", pid,
897 sc->subdevice_id,sc->hw_type);
898 return ENOTSUP;
899 }
900 break;
901/* 6030 Series and 1030 Series */
902 case IWN_DID_x030_1:
903 case IWN_DID_x030_2:
904 case IWN_DID_x030_3:
905 case IWN_DID_x030_4:
906 switch(sc->subdevice_id) {
907 case IWN_SDID_x030_1:
908 case IWN_SDID_x030_3:
909 case IWN_SDID_x030_5:
910 // iwl1030_bgn_cfg
911 case IWN_SDID_x030_2:
912 case IWN_SDID_x030_4:
913 case IWN_SDID_x030_6:
914 //iwl1030_bg_cfg
915 case IWN_SDID_x030_7:
916 case IWN_SDID_x030_10:
917 case IWN_SDID_x030_14:
918 //iwl6030_2agn_cfg
919 case IWN_SDID_x030_8:
920 case IWN_SDID_x030_11:
921 case IWN_SDID_x030_15:
922 // iwl6030_2bgn_cfg
923 case IWN_SDID_x030_9:
924 case IWN_SDID_x030_12:
925 case IWN_SDID_x030_16:
926 // iwl6030_2abg_cfg
927 case IWN_SDID_x030_13:
928 //iwl6030_2bg_cfg
929 sc->fwname = "iwn6000g2bfw";
930 sc->limits = &iwn6000_sensitivity_limits;
931 sc->base_params = &iwn_6000g2b_base_params;
932 break;
933 default:
934 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
935 "0x%04x rev %d not supported (subdevice)\n", pid,
936 sc->subdevice_id,sc->hw_type);
937 return ENOTSUP;
938 }
939 break;
940/* 130 Series WiFi */
941/* XXX: This series will need adjustment for rate.
942 * see rx_with_siso_diversity in linux kernel
943 */
944 case IWN_DID_130_1:
945 case IWN_DID_130_2:
946 switch(sc->subdevice_id) {
947 case IWN_SDID_130_1:
948 case IWN_SDID_130_3:
949 case IWN_SDID_130_5:
950 //iwl130_bgn_cfg
951 case IWN_SDID_130_2:
952 case IWN_SDID_130_4:
953 case IWN_SDID_130_6:
954 //iwl130_bg_cfg
955 sc->fwname = "iwn6000g2bfw";
956 sc->limits = &iwn6000_sensitivity_limits;
957 sc->base_params = &iwn_6000g2b_base_params;
958 break;
959 default:
960 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
961 "0x%04x rev %d not supported (subdevice)\n", pid,
962 sc->subdevice_id,sc->hw_type);
963 return ENOTSUP;
964 }
965 break;
966/* 100 Series WiFi */
967 case IWN_DID_100_1:
968 case IWN_DID_100_2:
969 switch(sc->subdevice_id) {
970 case IWN_SDID_100_1:
971 case IWN_SDID_100_2:
972 case IWN_SDID_100_3:
973 case IWN_SDID_100_4:
974 case IWN_SDID_100_5:
975 case IWN_SDID_100_6:
976 sc->limits = &iwn1000_sensitivity_limits;
977 sc->base_params = &iwn1000_base_params;
978 sc->fwname = "iwn100fw";
979 break;
980 default:
981 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
982 "0x%04x rev %d not supported (subdevice)\n", pid,
983 sc->subdevice_id,sc->hw_type);
984 return ENOTSUP;
985 }
986 break;
987
988/* 105 Series */
989/* XXX: This series will need adjustment for rate.
990 * see rx_with_siso_diversity in linux kernel
991 */
992 case IWN_DID_105_1:
993 case IWN_DID_105_2:
994 switch(sc->subdevice_id) {
995 case IWN_SDID_105_1:
996 case IWN_SDID_105_2:
997 case IWN_SDID_105_3:
998 //iwl105_bgn_cfg
999 case IWN_SDID_105_4:
1000 //iwl105_bgn_d_cfg
1001 sc->limits = &iwn2030_sensitivity_limits;
1002 sc->base_params = &iwn2000_base_params;
1003 sc->fwname = "iwn105fw";
1004 break;
1005 default:
1006 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
1007 "0x%04x rev %d not supported (subdevice)\n", pid,
1008 sc->subdevice_id,sc->hw_type);
1009 return ENOTSUP;
1010 }
1011 break;
1012
1013/* 135 Series */
1014/* XXX: This series will need adjustment for rate.
1015 * see rx_with_siso_diversity in linux kernel
1016 */
1017 case IWN_DID_135_1:
1018 case IWN_DID_135_2:
1019 switch(sc->subdevice_id) {
1020 case IWN_SDID_135_1:
1021 case IWN_SDID_135_2:
1022 case IWN_SDID_135_3:
1023 sc->limits = &iwn2030_sensitivity_limits;
1024 sc->base_params = &iwn2030_base_params;
1025 sc->fwname = "iwn135fw";
1026 break;
1027 default:
1028 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
1029 "0x%04x rev %d not supported (subdevice)\n", pid,
1030 sc->subdevice_id,sc->hw_type);
1031 return ENOTSUP;
1032 }
1033 break;
1034
1035/* 2x00 Series */
1036 case IWN_DID_2x00_1:
1037 case IWN_DID_2x00_2:
1038 switch(sc->subdevice_id) {
1039 case IWN_SDID_2x00_1:
1040 case IWN_SDID_2x00_2:
1041 case IWN_SDID_2x00_3:
1042 //iwl2000_2bgn_cfg
1043 case IWN_SDID_2x00_4:
1044 //iwl2000_2bgn_d_cfg
1045 sc->limits = &iwn2030_sensitivity_limits;
1046 sc->base_params = &iwn2000_base_params;
1047 sc->fwname = "iwn2000fw";
1048 break;
1049 default:
1050 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
1051 "0x%04x rev %d not supported (subdevice) \n",
1052 pid, sc->subdevice_id, sc->hw_type);
1053 return ENOTSUP;
1054 }
1055 break;
1056/* 2x30 Series */
1057 case IWN_DID_2x30_1:
1058 case IWN_DID_2x30_2:
1059 switch(sc->subdevice_id) {
1060 case IWN_SDID_2x30_1:
1061 case IWN_SDID_2x30_3:
1062 case IWN_SDID_2x30_5:
1063 //iwl100_bgn_cfg
1064 case IWN_SDID_2x30_2:
1065 case IWN_SDID_2x30_4:
1066 case IWN_SDID_2x30_6:
1067 //iwl100_bg_cfg
1068 sc->limits = &iwn2030_sensitivity_limits;
1069 sc->base_params = &iwn2030_base_params;
1070 sc->fwname = "iwn2030fw";
1071 break;
1072 default:
1073 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
1074 "0x%04x rev %d not supported (subdevice)\n", pid,
1075 sc->subdevice_id,sc->hw_type);
1076 return ENOTSUP;
1077 }
1078 break;
1079/* 5x00 Series */
1080 case IWN_DID_5x00_1:
1081 case IWN_DID_5x00_2:
1082 case IWN_DID_5x00_3:
1083 case IWN_DID_5x00_4:
1084 sc->limits = &iwn5000_sensitivity_limits;
1085 sc->base_params = &iwn5000_base_params;
1086 sc->fwname = "iwn5000fw";
1087 switch(sc->subdevice_id) {
1088 case IWN_SDID_5x00_1:
1089 case IWN_SDID_5x00_2:
1090 case IWN_SDID_5x00_3:
1091 case IWN_SDID_5x00_4:
1092 case IWN_SDID_5x00_9:
1093 case IWN_SDID_5x00_10:
1094 case IWN_SDID_5x00_11:
1095 case IWN_SDID_5x00_12:
1096 case IWN_SDID_5x00_17:
1097 case IWN_SDID_5x00_18:
1098 case IWN_SDID_5x00_19:
1099 case IWN_SDID_5x00_20:
1100 //iwl5100_agn_cfg
1101 sc->txchainmask = IWN_ANT_B;
1102 sc->rxchainmask = IWN_ANT_AB;
1103 break;
1104 case IWN_SDID_5x00_5:
1105 case IWN_SDID_5x00_6:
1106 case IWN_SDID_5x00_13:
1107 case IWN_SDID_5x00_14:
1108 case IWN_SDID_5x00_21:
1109 case IWN_SDID_5x00_22:
1110 //iwl5100_bgn_cfg
1111 sc->txchainmask = IWN_ANT_B;
1112 sc->rxchainmask = IWN_ANT_AB;
1113 break;
1114 case IWN_SDID_5x00_7:
1115 case IWN_SDID_5x00_8:
1116 case IWN_SDID_5x00_15:
1117 case IWN_SDID_5x00_16:
1118 case IWN_SDID_5x00_23:
1119 case IWN_SDID_5x00_24:
1120 //iwl5100_abg_cfg
1121 sc->txchainmask = IWN_ANT_B;
1122 sc->rxchainmask = IWN_ANT_AB;
1123 break;
1124 case IWN_SDID_5x00_25:
1125 case IWN_SDID_5x00_26:
1126 case IWN_SDID_5x00_27:
1127 case IWN_SDID_5x00_28:
1128 case IWN_SDID_5x00_29:
1129 case IWN_SDID_5x00_30:
1130 case IWN_SDID_5x00_31:
1131 case IWN_SDID_5x00_32:
1132 case IWN_SDID_5x00_33:
1133 case IWN_SDID_5x00_34:
1134 case IWN_SDID_5x00_35:
1135 case IWN_SDID_5x00_36:
1136 //iwl5300_agn_cfg
1137 sc->txchainmask = IWN_ANT_ABC;
1138 sc->rxchainmask = IWN_ANT_ABC;
1139 break;
1140 default:
1141 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
1142 "0x%04x rev %d not supported (subdevice)\n", pid,
1143 sc->subdevice_id,sc->hw_type);
1144 return ENOTSUP;
1145 }
1146 break;
1147/* 5x50 Series */
1148 case IWN_DID_5x50_1:
1149 case IWN_DID_5x50_2:
1150 case IWN_DID_5x50_3:
1151 case IWN_DID_5x50_4:
1152 sc->limits = &iwn5000_sensitivity_limits;
1153 sc->base_params = &iwn5000_base_params;
1154 sc->fwname = "iwn5000fw";
1155 switch(sc->subdevice_id) {
1156 case IWN_SDID_5x50_1:
1157 case IWN_SDID_5x50_2:
1158 case IWN_SDID_5x50_3:
1159 //iwl5350_agn_cfg
1160 sc->limits = &iwn5000_sensitivity_limits;
1161 sc->base_params = &iwn5000_base_params;
1162 sc->fwname = "iwn5000fw";
1163 break;
1164 case IWN_SDID_5x50_4:
1165 case IWN_SDID_5x50_5:
1166 case IWN_SDID_5x50_8:
1167 case IWN_SDID_5x50_9:
1168 case IWN_SDID_5x50_10:
1169 case IWN_SDID_5x50_11:
1170 //iwl5150_agn_cfg
1171 case IWN_SDID_5x50_6:
1172 case IWN_SDID_5x50_7:
1173 case IWN_SDID_5x50_12:
1174 case IWN_SDID_5x50_13:
1175 //iwl5150_abg_cfg
1176 sc->limits = &iwn5000_sensitivity_limits;
1177 sc->fwname = "iwn5150fw";
1178 sc->base_params = &iwn_5x50_base_params;
1179 break;
1180 default:
1181 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
1182 "0x%04x rev %d not supported (subdevice)\n", pid,
1183 sc->subdevice_id,sc->hw_type);
1184 return ENOTSUP;
1185 }
1186 break;
1187 default:
1188 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id : 0x%04x"
1189 "rev 0x%08x not supported (device)\n", pid, sc->subdevice_id,
1190 sc->hw_type);
1191 return ENOTSUP;
1192 }
1193 return 0;
1194}
1195
1196static int
1197iwn4965_attach(struct iwn_softc *sc, uint16_t pid)
1198{
1199 struct iwn_ops *ops = &sc->ops;
1200
1201 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1202 ops->load_firmware = iwn4965_load_firmware;
1203 ops->read_eeprom = iwn4965_read_eeprom;
1204 ops->post_alive = iwn4965_post_alive;
1205 ops->nic_config = iwn4965_nic_config;
1206 ops->update_sched = iwn4965_update_sched;
1207 ops->get_temperature = iwn4965_get_temperature;
1208 ops->get_rssi = iwn4965_get_rssi;
1209 ops->set_txpower = iwn4965_set_txpower;
1210 ops->init_gains = iwn4965_init_gains;
1211 ops->set_gains = iwn4965_set_gains;
1212 ops->add_node = iwn4965_add_node;
1213 ops->tx_done = iwn4965_tx_done;
1214 ops->ampdu_tx_start = iwn4965_ampdu_tx_start;
1215 ops->ampdu_tx_stop = iwn4965_ampdu_tx_stop;
1216 sc->ntxqs = IWN4965_NTXQUEUES;
1217 sc->firstaggqueue = IWN4965_FIRSTAGGQUEUE;
1218 sc->ndmachnls = IWN4965_NDMACHNLS;
1219 sc->broadcast_id = IWN4965_ID_BROADCAST;
1220 sc->rxonsz = IWN4965_RXONSZ;
1221 sc->schedsz = IWN4965_SCHEDSZ;
1222 sc->fw_text_maxsz = IWN4965_FW_TEXT_MAXSZ;
1223 sc->fw_data_maxsz = IWN4965_FW_DATA_MAXSZ;
1224 sc->fwsz = IWN4965_FWSZ;
1225 sc->sched_txfact_addr = IWN4965_SCHED_TXFACT;
1226 sc->limits = &iwn4965_sensitivity_limits;
1227 sc->fwname = "iwn4965fw";
1228 /* Override chains masks, ROM is known to be broken. */
1229 sc->txchainmask = IWN_ANT_AB;
1230 sc->rxchainmask = IWN_ANT_ABC;
1231 /* Enable normal btcoex */
1232 sc->sc_flags |= IWN_FLAG_BTCOEX;
1233
1234 DPRINTF(sc, IWN_DEBUG_TRACE, "%s: end\n",__func__);
1235
1236 return 0;
1237}
1238
1239static int
1240iwn5000_attach(struct iwn_softc *sc, uint16_t pid)
1241{
1242 struct iwn_ops *ops = &sc->ops;
1243
1244 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1245
1246 ops->load_firmware = iwn5000_load_firmware;
1247 ops->read_eeprom = iwn5000_read_eeprom;
1248 ops->post_alive = iwn5000_post_alive;
1249 ops->nic_config = iwn5000_nic_config;
1250 ops->update_sched = iwn5000_update_sched;
1251 ops->get_temperature = iwn5000_get_temperature;
1252 ops->get_rssi = iwn5000_get_rssi;
1253 ops->set_txpower = iwn5000_set_txpower;
1254 ops->init_gains = iwn5000_init_gains;
1255 ops->set_gains = iwn5000_set_gains;
1256 ops->add_node = iwn5000_add_node;
1257 ops->tx_done = iwn5000_tx_done;
1258 ops->ampdu_tx_start = iwn5000_ampdu_tx_start;
1259 ops->ampdu_tx_stop = iwn5000_ampdu_tx_stop;
1260 sc->ntxqs = IWN5000_NTXQUEUES;
1261 sc->firstaggqueue = IWN5000_FIRSTAGGQUEUE;
1262 sc->ndmachnls = IWN5000_NDMACHNLS;
1263 sc->broadcast_id = IWN5000_ID_BROADCAST;
1264 sc->rxonsz = IWN5000_RXONSZ;
1265 sc->schedsz = IWN5000_SCHEDSZ;
1266 sc->fw_text_maxsz = IWN5000_FW_TEXT_MAXSZ;
1267 sc->fw_data_maxsz = IWN5000_FW_DATA_MAXSZ;
1268 sc->fwsz = IWN5000_FWSZ;
1269 sc->sched_txfact_addr = IWN5000_SCHED_TXFACT;
1270 sc->reset_noise_gain = IWN5000_PHY_CALIB_RESET_NOISE_GAIN;
1271 sc->noise_gain = IWN5000_PHY_CALIB_NOISE_GAIN;
1272
1273 return 0;
1274}
1275
1276/*
1277 * Attach the interface to 802.11 radiotap.
1278 */
1279static void
1280iwn_radiotap_attach(struct iwn_softc *sc)
1281{
1282
1283 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1284 ieee80211_radiotap_attach(&sc->sc_ic,
1285 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
1286 IWN_TX_RADIOTAP_PRESENT,
1287 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
1288 IWN_RX_RADIOTAP_PRESENT);
1289 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
1290}
1291
1292static void
1293iwn_sysctlattach(struct iwn_softc *sc)
1294{
1295#ifdef IWN_DEBUG
1296 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
1297 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
1298
1299 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1300 "debug", CTLFLAG_RW, &sc->sc_debug, sc->sc_debug,
1301 "control debugging printfs");
1302#endif
1303}
1304
1305static struct ieee80211vap *
1306iwn_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
1307 enum ieee80211_opmode opmode, int flags,
1308 const uint8_t bssid[IEEE80211_ADDR_LEN],
1309 const uint8_t mac[IEEE80211_ADDR_LEN])
1310{
1311 struct iwn_softc *sc = ic->ic_softc;
1312 struct iwn_vap *ivp;
1313 struct ieee80211vap *vap;
1314
1315 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
1316 return NULL;
1317
1318 ivp = malloc(sizeof(struct iwn_vap), M_80211_VAP, M_WAITOK | M_ZERO);
1319 vap = &ivp->iv_vap;
1320 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
1321 ivp->ctx = IWN_RXON_BSS_CTX;
1322 vap->iv_bmissthreshold = 10; /* override default */
1323 /* Override with driver methods. */
1324 ivp->iv_newstate = vap->iv_newstate;
1325 vap->iv_newstate = iwn_newstate;
1326 sc->ivap[IWN_RXON_BSS_CTX] = vap;
1327
1328 ieee80211_ratectl_init(vap);
1329 /* Complete setup. */
1330 ieee80211_vap_attach(vap, iwn_media_change, ieee80211_media_status,
1331 mac);
1332 ic->ic_opmode = opmode;
1333 return vap;
1334}
1335
1336static void
1337iwn_vap_delete(struct ieee80211vap *vap)
1338{
1339 struct iwn_vap *ivp = IWN_VAP(vap);
1340
1341 ieee80211_ratectl_deinit(vap);
1342 ieee80211_vap_detach(vap);
1343 free(ivp, M_80211_VAP);
1344}
1345
1346static void
1347iwn_xmit_queue_drain(struct iwn_softc *sc)
1348{
1349 struct mbuf *m;
1350 struct ieee80211_node *ni;
1351
1352 IWN_LOCK_ASSERT(sc);
1353 while ((m = mbufq_dequeue(&sc->sc_xmit_queue)) != NULL) {
1354 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
1355 ieee80211_free_node(ni);
1356 m_freem(m);
1357 }
1358}
1359
1360static int
1361iwn_xmit_queue_enqueue(struct iwn_softc *sc, struct mbuf *m)
1362{
1363
1364 IWN_LOCK_ASSERT(sc);
1365 return (mbufq_enqueue(&sc->sc_xmit_queue, m));
1366}
1367
1368static int
1369iwn_detach(device_t dev)
1370{
1371 struct iwn_softc *sc = device_get_softc(dev);
1372 int qid;
1373
1374 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1375
1376 if (sc->sc_ic.ic_softc != NULL) {
1377 /* Free the mbuf queue and node references */
1378 IWN_LOCK(sc);
1379 iwn_xmit_queue_drain(sc);
1380 IWN_UNLOCK(sc);
1381
1382 ieee80211_draintask(&sc->sc_ic, &sc->sc_reinit_task);
1383 ieee80211_draintask(&sc->sc_ic, &sc->sc_radioon_task);
1384 ieee80211_draintask(&sc->sc_ic, &sc->sc_radiooff_task);
1385 iwn_stop(sc);
1386
1387 taskqueue_drain_all(sc->sc_tq);
1388 taskqueue_free(sc->sc_tq);
1389
1390 callout_drain(&sc->watchdog_to);
1391 callout_drain(&sc->calib_to);
1392 ieee80211_ifdetach(&sc->sc_ic);
1393 }
1394
1395 mbufq_drain(&sc->sc_snd);
1396
1397 /* Uninstall interrupt handler. */
1398 if (sc->irq != NULL) {
1399 bus_teardown_intr(dev, sc->irq, sc->sc_ih);
1400 bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq),
1401 sc->irq);
1402 pci_release_msi(dev);
1403 }
1404
1405 /* Free DMA resources. */
1406 iwn_free_rx_ring(sc, &sc->rxq);
1407 for (qid = 0; qid < sc->ntxqs; qid++)
1408 iwn_free_tx_ring(sc, &sc->txq[qid]);
1409 iwn_free_sched(sc);
1410 iwn_free_kw(sc);
1411 if (sc->ict != NULL)
1412 iwn_free_ict(sc);
1413 iwn_free_fwmem(sc);
1414
1415 if (sc->mem != NULL)
1416 bus_release_resource(dev, SYS_RES_MEMORY,
1417 rman_get_rid(sc->mem), sc->mem);
1418
731 return 0;
732fail:
733 iwn_detach(dev);
734 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__);
735 return error;
736}
737
738/*
739 * Define specific configuration based on device id and subdevice id
740 * pid : PCI device id
741 */
742static int
743iwn_config_specific(struct iwn_softc *sc, uint16_t pid)
744{
745
746 switch (pid) {
747/* 4965 series */
748 case IWN_DID_4965_1:
749 case IWN_DID_4965_2:
750 case IWN_DID_4965_3:
751 case IWN_DID_4965_4:
752 sc->base_params = &iwn4965_base_params;
753 sc->limits = &iwn4965_sensitivity_limits;
754 sc->fwname = "iwn4965fw";
755 /* Override chains masks, ROM is known to be broken. */
756 sc->txchainmask = IWN_ANT_AB;
757 sc->rxchainmask = IWN_ANT_ABC;
758 /* Enable normal btcoex */
759 sc->sc_flags |= IWN_FLAG_BTCOEX;
760 break;
761/* 1000 Series */
762 case IWN_DID_1000_1:
763 case IWN_DID_1000_2:
764 switch(sc->subdevice_id) {
765 case IWN_SDID_1000_1:
766 case IWN_SDID_1000_2:
767 case IWN_SDID_1000_3:
768 case IWN_SDID_1000_4:
769 case IWN_SDID_1000_5:
770 case IWN_SDID_1000_6:
771 case IWN_SDID_1000_7:
772 case IWN_SDID_1000_8:
773 case IWN_SDID_1000_9:
774 case IWN_SDID_1000_10:
775 case IWN_SDID_1000_11:
776 case IWN_SDID_1000_12:
777 sc->limits = &iwn1000_sensitivity_limits;
778 sc->base_params = &iwn1000_base_params;
779 sc->fwname = "iwn1000fw";
780 break;
781 default:
782 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
783 "0x%04x rev %d not supported (subdevice)\n", pid,
784 sc->subdevice_id,sc->hw_type);
785 return ENOTSUP;
786 }
787 break;
788/* 6x00 Series */
789 case IWN_DID_6x00_2:
790 case IWN_DID_6x00_4:
791 case IWN_DID_6x00_1:
792 case IWN_DID_6x00_3:
793 sc->fwname = "iwn6000fw";
794 sc->limits = &iwn6000_sensitivity_limits;
795 switch(sc->subdevice_id) {
796 case IWN_SDID_6x00_1:
797 case IWN_SDID_6x00_2:
798 case IWN_SDID_6x00_8:
799 //iwl6000_3agn_cfg
800 sc->base_params = &iwn_6000_base_params;
801 break;
802 case IWN_SDID_6x00_3:
803 case IWN_SDID_6x00_6:
804 case IWN_SDID_6x00_9:
805 ////iwl6000i_2agn
806 case IWN_SDID_6x00_4:
807 case IWN_SDID_6x00_7:
808 case IWN_SDID_6x00_10:
809 //iwl6000i_2abg_cfg
810 case IWN_SDID_6x00_5:
811 //iwl6000i_2bg_cfg
812 sc->base_params = &iwn_6000i_base_params;
813 sc->sc_flags |= IWN_FLAG_INTERNAL_PA;
814 sc->txchainmask = IWN_ANT_BC;
815 sc->rxchainmask = IWN_ANT_BC;
816 break;
817 default:
818 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
819 "0x%04x rev %d not supported (subdevice)\n", pid,
820 sc->subdevice_id,sc->hw_type);
821 return ENOTSUP;
822 }
823 break;
824/* 6x05 Series */
825 case IWN_DID_6x05_1:
826 case IWN_DID_6x05_2:
827 switch(sc->subdevice_id) {
828 case IWN_SDID_6x05_1:
829 case IWN_SDID_6x05_4:
830 case IWN_SDID_6x05_6:
831 //iwl6005_2agn_cfg
832 case IWN_SDID_6x05_2:
833 case IWN_SDID_6x05_5:
834 case IWN_SDID_6x05_7:
835 //iwl6005_2abg_cfg
836 case IWN_SDID_6x05_3:
837 //iwl6005_2bg_cfg
838 case IWN_SDID_6x05_8:
839 case IWN_SDID_6x05_9:
840 //iwl6005_2agn_sff_cfg
841 case IWN_SDID_6x05_10:
842 //iwl6005_2agn_d_cfg
843 case IWN_SDID_6x05_11:
844 //iwl6005_2agn_mow1_cfg
845 case IWN_SDID_6x05_12:
846 //iwl6005_2agn_mow2_cfg
847 sc->fwname = "iwn6000g2afw";
848 sc->limits = &iwn6000_sensitivity_limits;
849 sc->base_params = &iwn_6000g2_base_params;
850 break;
851 default:
852 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
853 "0x%04x rev %d not supported (subdevice)\n", pid,
854 sc->subdevice_id,sc->hw_type);
855 return ENOTSUP;
856 }
857 break;
858/* 6x35 Series */
859 case IWN_DID_6035_1:
860 case IWN_DID_6035_2:
861 switch(sc->subdevice_id) {
862 case IWN_SDID_6035_1:
863 case IWN_SDID_6035_2:
864 case IWN_SDID_6035_3:
865 case IWN_SDID_6035_4:
866 sc->fwname = "iwn6000g2bfw";
867 sc->limits = &iwn6235_sensitivity_limits;
868 sc->base_params = &iwn_6235_base_params;
869 break;
870 default:
871 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
872 "0x%04x rev %d not supported (subdevice)\n", pid,
873 sc->subdevice_id,sc->hw_type);
874 return ENOTSUP;
875 }
876 break;
877/* 6x50 WiFi/WiMax Series */
878 case IWN_DID_6050_1:
879 case IWN_DID_6050_2:
880 switch(sc->subdevice_id) {
881 case IWN_SDID_6050_1:
882 case IWN_SDID_6050_3:
883 case IWN_SDID_6050_5:
884 //iwl6050_2agn_cfg
885 case IWN_SDID_6050_2:
886 case IWN_SDID_6050_4:
887 case IWN_SDID_6050_6:
888 //iwl6050_2abg_cfg
889 sc->fwname = "iwn6050fw";
890 sc->txchainmask = IWN_ANT_AB;
891 sc->rxchainmask = IWN_ANT_AB;
892 sc->limits = &iwn6000_sensitivity_limits;
893 sc->base_params = &iwn_6050_base_params;
894 break;
895 default:
896 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
897 "0x%04x rev %d not supported (subdevice)\n", pid,
898 sc->subdevice_id,sc->hw_type);
899 return ENOTSUP;
900 }
901 break;
902/* 6150 WiFi/WiMax Series */
903 case IWN_DID_6150_1:
904 case IWN_DID_6150_2:
905 switch(sc->subdevice_id) {
906 case IWN_SDID_6150_1:
907 case IWN_SDID_6150_3:
908 case IWN_SDID_6150_5:
909 // iwl6150_bgn_cfg
910 case IWN_SDID_6150_2:
911 case IWN_SDID_6150_4:
912 case IWN_SDID_6150_6:
913 //iwl6150_bg_cfg
914 sc->fwname = "iwn6050fw";
915 sc->limits = &iwn6000_sensitivity_limits;
916 sc->base_params = &iwn_6150_base_params;
917 break;
918 default:
919 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
920 "0x%04x rev %d not supported (subdevice)\n", pid,
921 sc->subdevice_id,sc->hw_type);
922 return ENOTSUP;
923 }
924 break;
925/* 6030 Series and 1030 Series */
926 case IWN_DID_x030_1:
927 case IWN_DID_x030_2:
928 case IWN_DID_x030_3:
929 case IWN_DID_x030_4:
930 switch(sc->subdevice_id) {
931 case IWN_SDID_x030_1:
932 case IWN_SDID_x030_3:
933 case IWN_SDID_x030_5:
934 // iwl1030_bgn_cfg
935 case IWN_SDID_x030_2:
936 case IWN_SDID_x030_4:
937 case IWN_SDID_x030_6:
938 //iwl1030_bg_cfg
939 case IWN_SDID_x030_7:
940 case IWN_SDID_x030_10:
941 case IWN_SDID_x030_14:
942 //iwl6030_2agn_cfg
943 case IWN_SDID_x030_8:
944 case IWN_SDID_x030_11:
945 case IWN_SDID_x030_15:
946 // iwl6030_2bgn_cfg
947 case IWN_SDID_x030_9:
948 case IWN_SDID_x030_12:
949 case IWN_SDID_x030_16:
950 // iwl6030_2abg_cfg
951 case IWN_SDID_x030_13:
952 //iwl6030_2bg_cfg
953 sc->fwname = "iwn6000g2bfw";
954 sc->limits = &iwn6000_sensitivity_limits;
955 sc->base_params = &iwn_6000g2b_base_params;
956 break;
957 default:
958 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
959 "0x%04x rev %d not supported (subdevice)\n", pid,
960 sc->subdevice_id,sc->hw_type);
961 return ENOTSUP;
962 }
963 break;
964/* 130 Series WiFi */
965/* XXX: This series will need adjustment for rate.
966 * see rx_with_siso_diversity in linux kernel
967 */
968 case IWN_DID_130_1:
969 case IWN_DID_130_2:
970 switch(sc->subdevice_id) {
971 case IWN_SDID_130_1:
972 case IWN_SDID_130_3:
973 case IWN_SDID_130_5:
974 //iwl130_bgn_cfg
975 case IWN_SDID_130_2:
976 case IWN_SDID_130_4:
977 case IWN_SDID_130_6:
978 //iwl130_bg_cfg
979 sc->fwname = "iwn6000g2bfw";
980 sc->limits = &iwn6000_sensitivity_limits;
981 sc->base_params = &iwn_6000g2b_base_params;
982 break;
983 default:
984 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
985 "0x%04x rev %d not supported (subdevice)\n", pid,
986 sc->subdevice_id,sc->hw_type);
987 return ENOTSUP;
988 }
989 break;
990/* 100 Series WiFi */
991 case IWN_DID_100_1:
992 case IWN_DID_100_2:
993 switch(sc->subdevice_id) {
994 case IWN_SDID_100_1:
995 case IWN_SDID_100_2:
996 case IWN_SDID_100_3:
997 case IWN_SDID_100_4:
998 case IWN_SDID_100_5:
999 case IWN_SDID_100_6:
1000 sc->limits = &iwn1000_sensitivity_limits;
1001 sc->base_params = &iwn1000_base_params;
1002 sc->fwname = "iwn100fw";
1003 break;
1004 default:
1005 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
1006 "0x%04x rev %d not supported (subdevice)\n", pid,
1007 sc->subdevice_id,sc->hw_type);
1008 return ENOTSUP;
1009 }
1010 break;
1011
1012/* 105 Series */
1013/* XXX: This series will need adjustment for rate.
1014 * see rx_with_siso_diversity in linux kernel
1015 */
1016 case IWN_DID_105_1:
1017 case IWN_DID_105_2:
1018 switch(sc->subdevice_id) {
1019 case IWN_SDID_105_1:
1020 case IWN_SDID_105_2:
1021 case IWN_SDID_105_3:
1022 //iwl105_bgn_cfg
1023 case IWN_SDID_105_4:
1024 //iwl105_bgn_d_cfg
1025 sc->limits = &iwn2030_sensitivity_limits;
1026 sc->base_params = &iwn2000_base_params;
1027 sc->fwname = "iwn105fw";
1028 break;
1029 default:
1030 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
1031 "0x%04x rev %d not supported (subdevice)\n", pid,
1032 sc->subdevice_id,sc->hw_type);
1033 return ENOTSUP;
1034 }
1035 break;
1036
1037/* 135 Series */
1038/* XXX: This series will need adjustment for rate.
1039 * see rx_with_siso_diversity in linux kernel
1040 */
1041 case IWN_DID_135_1:
1042 case IWN_DID_135_2:
1043 switch(sc->subdevice_id) {
1044 case IWN_SDID_135_1:
1045 case IWN_SDID_135_2:
1046 case IWN_SDID_135_3:
1047 sc->limits = &iwn2030_sensitivity_limits;
1048 sc->base_params = &iwn2030_base_params;
1049 sc->fwname = "iwn135fw";
1050 break;
1051 default:
1052 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
1053 "0x%04x rev %d not supported (subdevice)\n", pid,
1054 sc->subdevice_id,sc->hw_type);
1055 return ENOTSUP;
1056 }
1057 break;
1058
1059/* 2x00 Series */
1060 case IWN_DID_2x00_1:
1061 case IWN_DID_2x00_2:
1062 switch(sc->subdevice_id) {
1063 case IWN_SDID_2x00_1:
1064 case IWN_SDID_2x00_2:
1065 case IWN_SDID_2x00_3:
1066 //iwl2000_2bgn_cfg
1067 case IWN_SDID_2x00_4:
1068 //iwl2000_2bgn_d_cfg
1069 sc->limits = &iwn2030_sensitivity_limits;
1070 sc->base_params = &iwn2000_base_params;
1071 sc->fwname = "iwn2000fw";
1072 break;
1073 default:
1074 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
1075 "0x%04x rev %d not supported (subdevice) \n",
1076 pid, sc->subdevice_id, sc->hw_type);
1077 return ENOTSUP;
1078 }
1079 break;
1080/* 2x30 Series */
1081 case IWN_DID_2x30_1:
1082 case IWN_DID_2x30_2:
1083 switch(sc->subdevice_id) {
1084 case IWN_SDID_2x30_1:
1085 case IWN_SDID_2x30_3:
1086 case IWN_SDID_2x30_5:
1087 //iwl100_bgn_cfg
1088 case IWN_SDID_2x30_2:
1089 case IWN_SDID_2x30_4:
1090 case IWN_SDID_2x30_6:
1091 //iwl100_bg_cfg
1092 sc->limits = &iwn2030_sensitivity_limits;
1093 sc->base_params = &iwn2030_base_params;
1094 sc->fwname = "iwn2030fw";
1095 break;
1096 default:
1097 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
1098 "0x%04x rev %d not supported (subdevice)\n", pid,
1099 sc->subdevice_id,sc->hw_type);
1100 return ENOTSUP;
1101 }
1102 break;
1103/* 5x00 Series */
1104 case IWN_DID_5x00_1:
1105 case IWN_DID_5x00_2:
1106 case IWN_DID_5x00_3:
1107 case IWN_DID_5x00_4:
1108 sc->limits = &iwn5000_sensitivity_limits;
1109 sc->base_params = &iwn5000_base_params;
1110 sc->fwname = "iwn5000fw";
1111 switch(sc->subdevice_id) {
1112 case IWN_SDID_5x00_1:
1113 case IWN_SDID_5x00_2:
1114 case IWN_SDID_5x00_3:
1115 case IWN_SDID_5x00_4:
1116 case IWN_SDID_5x00_9:
1117 case IWN_SDID_5x00_10:
1118 case IWN_SDID_5x00_11:
1119 case IWN_SDID_5x00_12:
1120 case IWN_SDID_5x00_17:
1121 case IWN_SDID_5x00_18:
1122 case IWN_SDID_5x00_19:
1123 case IWN_SDID_5x00_20:
1124 //iwl5100_agn_cfg
1125 sc->txchainmask = IWN_ANT_B;
1126 sc->rxchainmask = IWN_ANT_AB;
1127 break;
1128 case IWN_SDID_5x00_5:
1129 case IWN_SDID_5x00_6:
1130 case IWN_SDID_5x00_13:
1131 case IWN_SDID_5x00_14:
1132 case IWN_SDID_5x00_21:
1133 case IWN_SDID_5x00_22:
1134 //iwl5100_bgn_cfg
1135 sc->txchainmask = IWN_ANT_B;
1136 sc->rxchainmask = IWN_ANT_AB;
1137 break;
1138 case IWN_SDID_5x00_7:
1139 case IWN_SDID_5x00_8:
1140 case IWN_SDID_5x00_15:
1141 case IWN_SDID_5x00_16:
1142 case IWN_SDID_5x00_23:
1143 case IWN_SDID_5x00_24:
1144 //iwl5100_abg_cfg
1145 sc->txchainmask = IWN_ANT_B;
1146 sc->rxchainmask = IWN_ANT_AB;
1147 break;
1148 case IWN_SDID_5x00_25:
1149 case IWN_SDID_5x00_26:
1150 case IWN_SDID_5x00_27:
1151 case IWN_SDID_5x00_28:
1152 case IWN_SDID_5x00_29:
1153 case IWN_SDID_5x00_30:
1154 case IWN_SDID_5x00_31:
1155 case IWN_SDID_5x00_32:
1156 case IWN_SDID_5x00_33:
1157 case IWN_SDID_5x00_34:
1158 case IWN_SDID_5x00_35:
1159 case IWN_SDID_5x00_36:
1160 //iwl5300_agn_cfg
1161 sc->txchainmask = IWN_ANT_ABC;
1162 sc->rxchainmask = IWN_ANT_ABC;
1163 break;
1164 default:
1165 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
1166 "0x%04x rev %d not supported (subdevice)\n", pid,
1167 sc->subdevice_id,sc->hw_type);
1168 return ENOTSUP;
1169 }
1170 break;
1171/* 5x50 Series */
1172 case IWN_DID_5x50_1:
1173 case IWN_DID_5x50_2:
1174 case IWN_DID_5x50_3:
1175 case IWN_DID_5x50_4:
1176 sc->limits = &iwn5000_sensitivity_limits;
1177 sc->base_params = &iwn5000_base_params;
1178 sc->fwname = "iwn5000fw";
1179 switch(sc->subdevice_id) {
1180 case IWN_SDID_5x50_1:
1181 case IWN_SDID_5x50_2:
1182 case IWN_SDID_5x50_3:
1183 //iwl5350_agn_cfg
1184 sc->limits = &iwn5000_sensitivity_limits;
1185 sc->base_params = &iwn5000_base_params;
1186 sc->fwname = "iwn5000fw";
1187 break;
1188 case IWN_SDID_5x50_4:
1189 case IWN_SDID_5x50_5:
1190 case IWN_SDID_5x50_8:
1191 case IWN_SDID_5x50_9:
1192 case IWN_SDID_5x50_10:
1193 case IWN_SDID_5x50_11:
1194 //iwl5150_agn_cfg
1195 case IWN_SDID_5x50_6:
1196 case IWN_SDID_5x50_7:
1197 case IWN_SDID_5x50_12:
1198 case IWN_SDID_5x50_13:
1199 //iwl5150_abg_cfg
1200 sc->limits = &iwn5000_sensitivity_limits;
1201 sc->fwname = "iwn5150fw";
1202 sc->base_params = &iwn_5x50_base_params;
1203 break;
1204 default:
1205 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
1206 "0x%04x rev %d not supported (subdevice)\n", pid,
1207 sc->subdevice_id,sc->hw_type);
1208 return ENOTSUP;
1209 }
1210 break;
1211 default:
1212 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id : 0x%04x"
1213 "rev 0x%08x not supported (device)\n", pid, sc->subdevice_id,
1214 sc->hw_type);
1215 return ENOTSUP;
1216 }
1217 return 0;
1218}
1219
1220static int
1221iwn4965_attach(struct iwn_softc *sc, uint16_t pid)
1222{
1223 struct iwn_ops *ops = &sc->ops;
1224
1225 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1226 ops->load_firmware = iwn4965_load_firmware;
1227 ops->read_eeprom = iwn4965_read_eeprom;
1228 ops->post_alive = iwn4965_post_alive;
1229 ops->nic_config = iwn4965_nic_config;
1230 ops->update_sched = iwn4965_update_sched;
1231 ops->get_temperature = iwn4965_get_temperature;
1232 ops->get_rssi = iwn4965_get_rssi;
1233 ops->set_txpower = iwn4965_set_txpower;
1234 ops->init_gains = iwn4965_init_gains;
1235 ops->set_gains = iwn4965_set_gains;
1236 ops->add_node = iwn4965_add_node;
1237 ops->tx_done = iwn4965_tx_done;
1238 ops->ampdu_tx_start = iwn4965_ampdu_tx_start;
1239 ops->ampdu_tx_stop = iwn4965_ampdu_tx_stop;
1240 sc->ntxqs = IWN4965_NTXQUEUES;
1241 sc->firstaggqueue = IWN4965_FIRSTAGGQUEUE;
1242 sc->ndmachnls = IWN4965_NDMACHNLS;
1243 sc->broadcast_id = IWN4965_ID_BROADCAST;
1244 sc->rxonsz = IWN4965_RXONSZ;
1245 sc->schedsz = IWN4965_SCHEDSZ;
1246 sc->fw_text_maxsz = IWN4965_FW_TEXT_MAXSZ;
1247 sc->fw_data_maxsz = IWN4965_FW_DATA_MAXSZ;
1248 sc->fwsz = IWN4965_FWSZ;
1249 sc->sched_txfact_addr = IWN4965_SCHED_TXFACT;
1250 sc->limits = &iwn4965_sensitivity_limits;
1251 sc->fwname = "iwn4965fw";
1252 /* Override chains masks, ROM is known to be broken. */
1253 sc->txchainmask = IWN_ANT_AB;
1254 sc->rxchainmask = IWN_ANT_ABC;
1255 /* Enable normal btcoex */
1256 sc->sc_flags |= IWN_FLAG_BTCOEX;
1257
1258 DPRINTF(sc, IWN_DEBUG_TRACE, "%s: end\n",__func__);
1259
1260 return 0;
1261}
1262
1263static int
1264iwn5000_attach(struct iwn_softc *sc, uint16_t pid)
1265{
1266 struct iwn_ops *ops = &sc->ops;
1267
1268 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1269
1270 ops->load_firmware = iwn5000_load_firmware;
1271 ops->read_eeprom = iwn5000_read_eeprom;
1272 ops->post_alive = iwn5000_post_alive;
1273 ops->nic_config = iwn5000_nic_config;
1274 ops->update_sched = iwn5000_update_sched;
1275 ops->get_temperature = iwn5000_get_temperature;
1276 ops->get_rssi = iwn5000_get_rssi;
1277 ops->set_txpower = iwn5000_set_txpower;
1278 ops->init_gains = iwn5000_init_gains;
1279 ops->set_gains = iwn5000_set_gains;
1280 ops->add_node = iwn5000_add_node;
1281 ops->tx_done = iwn5000_tx_done;
1282 ops->ampdu_tx_start = iwn5000_ampdu_tx_start;
1283 ops->ampdu_tx_stop = iwn5000_ampdu_tx_stop;
1284 sc->ntxqs = IWN5000_NTXQUEUES;
1285 sc->firstaggqueue = IWN5000_FIRSTAGGQUEUE;
1286 sc->ndmachnls = IWN5000_NDMACHNLS;
1287 sc->broadcast_id = IWN5000_ID_BROADCAST;
1288 sc->rxonsz = IWN5000_RXONSZ;
1289 sc->schedsz = IWN5000_SCHEDSZ;
1290 sc->fw_text_maxsz = IWN5000_FW_TEXT_MAXSZ;
1291 sc->fw_data_maxsz = IWN5000_FW_DATA_MAXSZ;
1292 sc->fwsz = IWN5000_FWSZ;
1293 sc->sched_txfact_addr = IWN5000_SCHED_TXFACT;
1294 sc->reset_noise_gain = IWN5000_PHY_CALIB_RESET_NOISE_GAIN;
1295 sc->noise_gain = IWN5000_PHY_CALIB_NOISE_GAIN;
1296
1297 return 0;
1298}
1299
1300/*
1301 * Attach the interface to 802.11 radiotap.
1302 */
1303static void
1304iwn_radiotap_attach(struct iwn_softc *sc)
1305{
1306
1307 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1308 ieee80211_radiotap_attach(&sc->sc_ic,
1309 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
1310 IWN_TX_RADIOTAP_PRESENT,
1311 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
1312 IWN_RX_RADIOTAP_PRESENT);
1313 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
1314}
1315
1316static void
1317iwn_sysctlattach(struct iwn_softc *sc)
1318{
1319#ifdef IWN_DEBUG
1320 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
1321 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
1322
1323 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1324 "debug", CTLFLAG_RW, &sc->sc_debug, sc->sc_debug,
1325 "control debugging printfs");
1326#endif
1327}
1328
1329static struct ieee80211vap *
1330iwn_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
1331 enum ieee80211_opmode opmode, int flags,
1332 const uint8_t bssid[IEEE80211_ADDR_LEN],
1333 const uint8_t mac[IEEE80211_ADDR_LEN])
1334{
1335 struct iwn_softc *sc = ic->ic_softc;
1336 struct iwn_vap *ivp;
1337 struct ieee80211vap *vap;
1338
1339 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
1340 return NULL;
1341
1342 ivp = malloc(sizeof(struct iwn_vap), M_80211_VAP, M_WAITOK | M_ZERO);
1343 vap = &ivp->iv_vap;
1344 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
1345 ivp->ctx = IWN_RXON_BSS_CTX;
1346 vap->iv_bmissthreshold = 10; /* override default */
1347 /* Override with driver methods. */
1348 ivp->iv_newstate = vap->iv_newstate;
1349 vap->iv_newstate = iwn_newstate;
1350 sc->ivap[IWN_RXON_BSS_CTX] = vap;
1351
1352 ieee80211_ratectl_init(vap);
1353 /* Complete setup. */
1354 ieee80211_vap_attach(vap, iwn_media_change, ieee80211_media_status,
1355 mac);
1356 ic->ic_opmode = opmode;
1357 return vap;
1358}
1359
1360static void
1361iwn_vap_delete(struct ieee80211vap *vap)
1362{
1363 struct iwn_vap *ivp = IWN_VAP(vap);
1364
1365 ieee80211_ratectl_deinit(vap);
1366 ieee80211_vap_detach(vap);
1367 free(ivp, M_80211_VAP);
1368}
1369
1370static void
1371iwn_xmit_queue_drain(struct iwn_softc *sc)
1372{
1373 struct mbuf *m;
1374 struct ieee80211_node *ni;
1375
1376 IWN_LOCK_ASSERT(sc);
1377 while ((m = mbufq_dequeue(&sc->sc_xmit_queue)) != NULL) {
1378 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
1379 ieee80211_free_node(ni);
1380 m_freem(m);
1381 }
1382}
1383
1384static int
1385iwn_xmit_queue_enqueue(struct iwn_softc *sc, struct mbuf *m)
1386{
1387
1388 IWN_LOCK_ASSERT(sc);
1389 return (mbufq_enqueue(&sc->sc_xmit_queue, m));
1390}
1391
1392static int
1393iwn_detach(device_t dev)
1394{
1395 struct iwn_softc *sc = device_get_softc(dev);
1396 int qid;
1397
1398 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1399
1400 if (sc->sc_ic.ic_softc != NULL) {
1401 /* Free the mbuf queue and node references */
1402 IWN_LOCK(sc);
1403 iwn_xmit_queue_drain(sc);
1404 IWN_UNLOCK(sc);
1405
1406 ieee80211_draintask(&sc->sc_ic, &sc->sc_reinit_task);
1407 ieee80211_draintask(&sc->sc_ic, &sc->sc_radioon_task);
1408 ieee80211_draintask(&sc->sc_ic, &sc->sc_radiooff_task);
1409 iwn_stop(sc);
1410
1411 taskqueue_drain_all(sc->sc_tq);
1412 taskqueue_free(sc->sc_tq);
1413
1414 callout_drain(&sc->watchdog_to);
1415 callout_drain(&sc->calib_to);
1416 ieee80211_ifdetach(&sc->sc_ic);
1417 }
1418
1419 mbufq_drain(&sc->sc_snd);
1420
1421 /* Uninstall interrupt handler. */
1422 if (sc->irq != NULL) {
1423 bus_teardown_intr(dev, sc->irq, sc->sc_ih);
1424 bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq),
1425 sc->irq);
1426 pci_release_msi(dev);
1427 }
1428
1429 /* Free DMA resources. */
1430 iwn_free_rx_ring(sc, &sc->rxq);
1431 for (qid = 0; qid < sc->ntxqs; qid++)
1432 iwn_free_tx_ring(sc, &sc->txq[qid]);
1433 iwn_free_sched(sc);
1434 iwn_free_kw(sc);
1435 if (sc->ict != NULL)
1436 iwn_free_ict(sc);
1437 iwn_free_fwmem(sc);
1438
1439 if (sc->mem != NULL)
1440 bus_release_resource(dev, SYS_RES_MEMORY,
1441 rman_get_rid(sc->mem), sc->mem);
1442
1443 if (sc->sc_cdev) {
1444 destroy_dev(sc->sc_cdev);
1445 sc->sc_cdev = NULL;
1446 }
1447
1419 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n", __func__);
1420 IWN_LOCK_DESTROY(sc);
1421 return 0;
1422}
1423
1424static int
1425iwn_shutdown(device_t dev)
1426{
1427 struct iwn_softc *sc = device_get_softc(dev);
1428
1429 iwn_stop(sc);
1430 return 0;
1431}
1432
1433static int
1434iwn_suspend(device_t dev)
1435{
1436 struct iwn_softc *sc = device_get_softc(dev);
1437
1438 ieee80211_suspend_all(&sc->sc_ic);
1439 return 0;
1440}
1441
1442static int
1443iwn_resume(device_t dev)
1444{
1445 struct iwn_softc *sc = device_get_softc(dev);
1446
1447 /* Clear device-specific "PCI retry timeout" register (41h). */
1448 pci_write_config(dev, 0x41, 0, 1);
1449
1450 ieee80211_resume_all(&sc->sc_ic);
1451 return 0;
1452}
1453
1454static int
1455iwn_nic_lock(struct iwn_softc *sc)
1456{
1457 int ntries;
1458
1459 /* Request exclusive access to NIC. */
1460 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ);
1461
1462 /* Spin until we actually get the lock. */
1463 for (ntries = 0; ntries < 1000; ntries++) {
1464 if ((IWN_READ(sc, IWN_GP_CNTRL) &
1465 (IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) ==
1466 IWN_GP_CNTRL_MAC_ACCESS_ENA)
1467 return 0;
1468 DELAY(10);
1469 }
1470 return ETIMEDOUT;
1471}
1472
1473static __inline void
1474iwn_nic_unlock(struct iwn_softc *sc)
1475{
1476 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ);
1477}
1478
1479static __inline uint32_t
1480iwn_prph_read(struct iwn_softc *sc, uint32_t addr)
1481{
1482 IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr);
1483 IWN_BARRIER_READ_WRITE(sc);
1484 return IWN_READ(sc, IWN_PRPH_RDATA);
1485}
1486
1487static __inline void
1488iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data)
1489{
1490 IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr);
1491 IWN_BARRIER_WRITE(sc);
1492 IWN_WRITE(sc, IWN_PRPH_WDATA, data);
1493}
1494
1495static __inline void
1496iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask)
1497{
1498 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask);
1499}
1500
1501static __inline void
1502iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask)
1503{
1504 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask);
1505}
1506
1507static __inline void
1508iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr,
1509 const uint32_t *data, int count)
1510{
1511 for (; count > 0; count--, data++, addr += 4)
1512 iwn_prph_write(sc, addr, *data);
1513}
1514
1515static __inline uint32_t
1516iwn_mem_read(struct iwn_softc *sc, uint32_t addr)
1517{
1518 IWN_WRITE(sc, IWN_MEM_RADDR, addr);
1519 IWN_BARRIER_READ_WRITE(sc);
1520 return IWN_READ(sc, IWN_MEM_RDATA);
1521}
1522
1523static __inline void
1524iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data)
1525{
1526 IWN_WRITE(sc, IWN_MEM_WADDR, addr);
1527 IWN_BARRIER_WRITE(sc);
1528 IWN_WRITE(sc, IWN_MEM_WDATA, data);
1529}
1530
1531static __inline void
1532iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data)
1533{
1534 uint32_t tmp;
1535
1536 tmp = iwn_mem_read(sc, addr & ~3);
1537 if (addr & 3)
1538 tmp = (tmp & 0x0000ffff) | data << 16;
1539 else
1540 tmp = (tmp & 0xffff0000) | data;
1541 iwn_mem_write(sc, addr & ~3, tmp);
1542}
1543
1544static __inline void
1545iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data,
1546 int count)
1547{
1548 for (; count > 0; count--, addr += 4)
1549 *data++ = iwn_mem_read(sc, addr);
1550}
1551
1552static __inline void
1553iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val,
1554 int count)
1555{
1556 for (; count > 0; count--, addr += 4)
1557 iwn_mem_write(sc, addr, val);
1558}
1559
1560static int
1561iwn_eeprom_lock(struct iwn_softc *sc)
1562{
1563 int i, ntries;
1564
1565 for (i = 0; i < 100; i++) {
1566 /* Request exclusive access to EEPROM. */
1567 IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
1568 IWN_HW_IF_CONFIG_EEPROM_LOCKED);
1569
1570 /* Spin until we actually get the lock. */
1571 for (ntries = 0; ntries < 100; ntries++) {
1572 if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
1573 IWN_HW_IF_CONFIG_EEPROM_LOCKED)
1574 return 0;
1575 DELAY(10);
1576 }
1577 }
1578 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end timeout\n", __func__);
1579 return ETIMEDOUT;
1580}
1581
1582static __inline void
1583iwn_eeprom_unlock(struct iwn_softc *sc)
1584{
1585 IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED);
1586}
1587
1588/*
1589 * Initialize access by host to One Time Programmable ROM.
1590 * NB: This kind of ROM can be found on 1000 or 6000 Series only.
1591 */
1592static int
1593iwn_init_otprom(struct iwn_softc *sc)
1594{
1595 uint16_t prev, base, next;
1596 int count, error;
1597
1598 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1599
1600 /* Wait for clock stabilization before accessing prph. */
1601 if ((error = iwn_clock_wait(sc)) != 0)
1602 return error;
1603
1604 if ((error = iwn_nic_lock(sc)) != 0)
1605 return error;
1606 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ);
1607 DELAY(5);
1608 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ);
1609 iwn_nic_unlock(sc);
1610
1611 /* Set auto clock gate disable bit for HW with OTP shadow RAM. */
1612 if (sc->base_params->shadow_ram_support) {
1613 IWN_SETBITS(sc, IWN_DBG_LINK_PWR_MGMT,
1614 IWN_RESET_LINK_PWR_MGMT_DIS);
1615 }
1616 IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER);
1617 /* Clear ECC status. */
1618 IWN_SETBITS(sc, IWN_OTP_GP,
1619 IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS);
1620
1621 /*
1622 * Find the block before last block (contains the EEPROM image)
1623 * for HW without OTP shadow RAM.
1624 */
1625 if (! sc->base_params->shadow_ram_support) {
1626 /* Switch to absolute addressing mode. */
1627 IWN_CLRBITS(sc, IWN_OTP_GP, IWN_OTP_GP_RELATIVE_ACCESS);
1628 base = prev = 0;
1629 for (count = 0; count < sc->base_params->max_ll_items;
1630 count++) {
1631 error = iwn_read_prom_data(sc, base, &next, 2);
1632 if (error != 0)
1633 return error;
1634 if (next == 0) /* End of linked-list. */
1635 break;
1636 prev = base;
1637 base = le16toh(next);
1638 }
1639 if (count == 0 || count == sc->base_params->max_ll_items)
1640 return EIO;
1641 /* Skip "next" word. */
1642 sc->prom_base = prev + 1;
1643 }
1644
1645 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
1646
1647 return 0;
1648}
1649
1650static int
1651iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count)
1652{
1653 uint8_t *out = data;
1654 uint32_t val, tmp;
1655 int ntries;
1656
1657 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1658
1659 addr += sc->prom_base;
1660 for (; count > 0; count -= 2, addr++) {
1661 IWN_WRITE(sc, IWN_EEPROM, addr << 2);
1662 for (ntries = 0; ntries < 10; ntries++) {
1663 val = IWN_READ(sc, IWN_EEPROM);
1664 if (val & IWN_EEPROM_READ_VALID)
1665 break;
1666 DELAY(5);
1667 }
1668 if (ntries == 10) {
1669 device_printf(sc->sc_dev,
1670 "timeout reading ROM at 0x%x\n", addr);
1671 return ETIMEDOUT;
1672 }
1673 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) {
1674 /* OTPROM, check for ECC errors. */
1675 tmp = IWN_READ(sc, IWN_OTP_GP);
1676 if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) {
1677 device_printf(sc->sc_dev,
1678 "OTPROM ECC error at 0x%x\n", addr);
1679 return EIO;
1680 }
1681 if (tmp & IWN_OTP_GP_ECC_CORR_STTS) {
1682 /* Correctable ECC error, clear bit. */
1683 IWN_SETBITS(sc, IWN_OTP_GP,
1684 IWN_OTP_GP_ECC_CORR_STTS);
1685 }
1686 }
1687 *out++ = val >> 16;
1688 if (count > 1)
1689 *out++ = val >> 24;
1690 }
1691
1692 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
1693
1694 return 0;
1695}
1696
1697static void
1698iwn_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1699{
1700 if (error != 0)
1701 return;
1702 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
1703 *(bus_addr_t *)arg = segs[0].ds_addr;
1704}
1705
1706static int
1707iwn_dma_contig_alloc(struct iwn_softc *sc, struct iwn_dma_info *dma,
1708 void **kvap, bus_size_t size, bus_size_t alignment)
1709{
1710 int error;
1711
1712 dma->tag = NULL;
1713 dma->size = size;
1714
1715 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment,
1716 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
1717 1, size, BUS_DMA_NOWAIT, NULL, NULL, &dma->tag);
1718 if (error != 0)
1719 goto fail;
1720
1721 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
1722 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
1723 if (error != 0)
1724 goto fail;
1725
1726 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
1727 iwn_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
1728 if (error != 0)
1729 goto fail;
1730
1731 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
1732
1733 if (kvap != NULL)
1734 *kvap = dma->vaddr;
1735
1736 return 0;
1737
1738fail: iwn_dma_contig_free(dma);
1739 return error;
1740}
1741
1742static void
1743iwn_dma_contig_free(struct iwn_dma_info *dma)
1744{
1745 if (dma->vaddr != NULL) {
1746 bus_dmamap_sync(dma->tag, dma->map,
1747 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1748 bus_dmamap_unload(dma->tag, dma->map);
1749 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
1750 dma->vaddr = NULL;
1751 }
1752 if (dma->tag != NULL) {
1753 bus_dma_tag_destroy(dma->tag);
1754 dma->tag = NULL;
1755 }
1756}
1757
1758static int
1759iwn_alloc_sched(struct iwn_softc *sc)
1760{
1761 /* TX scheduler rings must be aligned on a 1KB boundary. */
1762 return iwn_dma_contig_alloc(sc, &sc->sched_dma, (void **)&sc->sched,
1763 sc->schedsz, 1024);
1764}
1765
1766static void
1767iwn_free_sched(struct iwn_softc *sc)
1768{
1769 iwn_dma_contig_free(&sc->sched_dma);
1770}
1771
1772static int
1773iwn_alloc_kw(struct iwn_softc *sc)
1774{
1775 /* "Keep Warm" page must be aligned on a 4KB boundary. */
1776 return iwn_dma_contig_alloc(sc, &sc->kw_dma, NULL, 4096, 4096);
1777}
1778
1779static void
1780iwn_free_kw(struct iwn_softc *sc)
1781{
1782 iwn_dma_contig_free(&sc->kw_dma);
1783}
1784
1785static int
1786iwn_alloc_ict(struct iwn_softc *sc)
1787{
1788 /* ICT table must be aligned on a 4KB boundary. */
1789 return iwn_dma_contig_alloc(sc, &sc->ict_dma, (void **)&sc->ict,
1790 IWN_ICT_SIZE, 4096);
1791}
1792
1793static void
1794iwn_free_ict(struct iwn_softc *sc)
1795{
1796 iwn_dma_contig_free(&sc->ict_dma);
1797}
1798
1799static int
1800iwn_alloc_fwmem(struct iwn_softc *sc)
1801{
1802 /* Must be aligned on a 16-byte boundary. */
1803 return iwn_dma_contig_alloc(sc, &sc->fw_dma, NULL, sc->fwsz, 16);
1804}
1805
1806static void
1807iwn_free_fwmem(struct iwn_softc *sc)
1808{
1809 iwn_dma_contig_free(&sc->fw_dma);
1810}
1811
1812static int
1813iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1814{
1815 bus_size_t size;
1816 int i, error;
1817
1818 ring->cur = 0;
1819
1820 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1821
1822 /* Allocate RX descriptors (256-byte aligned). */
1823 size = IWN_RX_RING_COUNT * sizeof (uint32_t);
1824 error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc,
1825 size, 256);
1826 if (error != 0) {
1827 device_printf(sc->sc_dev,
1828 "%s: could not allocate RX ring DMA memory, error %d\n",
1829 __func__, error);
1830 goto fail;
1831 }
1832
1833 /* Allocate RX status area (16-byte aligned). */
1834 error = iwn_dma_contig_alloc(sc, &ring->stat_dma, (void **)&ring->stat,
1835 sizeof (struct iwn_rx_status), 16);
1836 if (error != 0) {
1837 device_printf(sc->sc_dev,
1838 "%s: could not allocate RX status DMA memory, error %d\n",
1839 __func__, error);
1840 goto fail;
1841 }
1842
1843 /* Create RX buffer DMA tag. */
1844 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
1845 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1846 IWN_RBUF_SIZE, 1, IWN_RBUF_SIZE, BUS_DMA_NOWAIT, NULL, NULL,
1847 &ring->data_dmat);
1848 if (error != 0) {
1849 device_printf(sc->sc_dev,
1850 "%s: could not create RX buf DMA tag, error %d\n",
1851 __func__, error);
1852 goto fail;
1853 }
1854
1855 /*
1856 * Allocate and map RX buffers.
1857 */
1858 for (i = 0; i < IWN_RX_RING_COUNT; i++) {
1859 struct iwn_rx_data *data = &ring->data[i];
1860 bus_addr_t paddr;
1861
1862 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1863 if (error != 0) {
1864 device_printf(sc->sc_dev,
1865 "%s: could not create RX buf DMA map, error %d\n",
1866 __func__, error);
1867 goto fail;
1868 }
1869
1870 data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1871 IWN_RBUF_SIZE);
1872 if (data->m == NULL) {
1873 device_printf(sc->sc_dev,
1874 "%s: could not allocate RX mbuf\n", __func__);
1875 error = ENOBUFS;
1876 goto fail;
1877 }
1878
1879 error = bus_dmamap_load(ring->data_dmat, data->map,
1880 mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr,
1881 &paddr, BUS_DMA_NOWAIT);
1882 if (error != 0 && error != EFBIG) {
1883 device_printf(sc->sc_dev,
1884 "%s: can't map mbuf, error %d\n", __func__,
1885 error);
1886 goto fail;
1887 }
1888
1889 /* Set physical address of RX buffer (256-byte aligned). */
1890 ring->desc[i] = htole32(paddr >> 8);
1891 }
1892
1893 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1894 BUS_DMASYNC_PREWRITE);
1895
1896 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
1897
1898 return 0;
1899
1900fail: iwn_free_rx_ring(sc, ring);
1901
1902 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__);
1903
1904 return error;
1905}
1906
1907static void
1908iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1909{
1910 int ntries;
1911
1912 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
1913
1914 if (iwn_nic_lock(sc) == 0) {
1915 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0);
1916 for (ntries = 0; ntries < 1000; ntries++) {
1917 if (IWN_READ(sc, IWN_FH_RX_STATUS) &
1918 IWN_FH_RX_STATUS_IDLE)
1919 break;
1920 DELAY(10);
1921 }
1922 iwn_nic_unlock(sc);
1923 }
1924 ring->cur = 0;
1925 sc->last_rx_valid = 0;
1926}
1927
1928static void
1929iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1930{
1931 int i;
1932
1933 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s \n", __func__);
1934
1935 iwn_dma_contig_free(&ring->desc_dma);
1936 iwn_dma_contig_free(&ring->stat_dma);
1937
1938 for (i = 0; i < IWN_RX_RING_COUNT; i++) {
1939 struct iwn_rx_data *data = &ring->data[i];
1940
1941 if (data->m != NULL) {
1942 bus_dmamap_sync(ring->data_dmat, data->map,
1943 BUS_DMASYNC_POSTREAD);
1944 bus_dmamap_unload(ring->data_dmat, data->map);
1945 m_freem(data->m);
1946 data->m = NULL;
1947 }
1948 if (data->map != NULL)
1949 bus_dmamap_destroy(ring->data_dmat, data->map);
1950 }
1951 if (ring->data_dmat != NULL) {
1952 bus_dma_tag_destroy(ring->data_dmat);
1953 ring->data_dmat = NULL;
1954 }
1955}
1956
1957static int
1958iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid)
1959{
1960 bus_addr_t paddr;
1961 bus_size_t size;
1962 int i, error;
1963
1964 ring->qid = qid;
1965 ring->queued = 0;
1966 ring->cur = 0;
1967
1968 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1969
1970 /* Allocate TX descriptors (256-byte aligned). */
1971 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_desc);
1972 error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc,
1973 size, 256);
1974 if (error != 0) {
1975 device_printf(sc->sc_dev,
1976 "%s: could not allocate TX ring DMA memory, error %d\n",
1977 __func__, error);
1978 goto fail;
1979 }
1980
1981 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_cmd);
1982 error = iwn_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd,
1983 size, 4);
1984 if (error != 0) {
1985 device_printf(sc->sc_dev,
1986 "%s: could not allocate TX cmd DMA memory, error %d\n",
1987 __func__, error);
1988 goto fail;
1989 }
1990
1991 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
1992 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
1993 IWN_MAX_SCATTER - 1, MCLBYTES, BUS_DMA_NOWAIT, NULL, NULL,
1994 &ring->data_dmat);
1995 if (error != 0) {
1996 device_printf(sc->sc_dev,
1997 "%s: could not create TX buf DMA tag, error %d\n",
1998 __func__, error);
1999 goto fail;
2000 }
2001
2002 paddr = ring->cmd_dma.paddr;
2003 for (i = 0; i < IWN_TX_RING_COUNT; i++) {
2004 struct iwn_tx_data *data = &ring->data[i];
2005
2006 data->cmd_paddr = paddr;
2007 data->scratch_paddr = paddr + 12;
2008 paddr += sizeof (struct iwn_tx_cmd);
2009
2010 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
2011 if (error != 0) {
2012 device_printf(sc->sc_dev,
2013 "%s: could not create TX buf DMA map, error %d\n",
2014 __func__, error);
2015 goto fail;
2016 }
2017 }
2018
2019 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
2020
2021 return 0;
2022
2023fail: iwn_free_tx_ring(sc, ring);
2024 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__);
2025 return error;
2026}
2027
2028static void
2029iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring)
2030{
2031 int i;
2032
2033 DPRINTF(sc, IWN_DEBUG_TRACE, "->doing %s \n", __func__);
2034
2035 for (i = 0; i < IWN_TX_RING_COUNT; i++) {
2036 struct iwn_tx_data *data = &ring->data[i];
2037
2038 if (data->m != NULL) {
2039 bus_dmamap_sync(ring->data_dmat, data->map,
2040 BUS_DMASYNC_POSTWRITE);
2041 bus_dmamap_unload(ring->data_dmat, data->map);
2042 m_freem(data->m);
2043 data->m = NULL;
2044 }
2045 if (data->ni != NULL) {
2046 ieee80211_free_node(data->ni);
2047 data->ni = NULL;
2048 }
2049 }
2050 /* Clear TX descriptors. */
2051 memset(ring->desc, 0, ring->desc_dma.size);
2052 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2053 BUS_DMASYNC_PREWRITE);
2054 sc->qfullmsk &= ~(1 << ring->qid);
2055 ring->queued = 0;
2056 ring->cur = 0;
2057}
2058
2059static void
2060iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring)
2061{
2062 int i;
2063
2064 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s \n", __func__);
2065
2066 iwn_dma_contig_free(&ring->desc_dma);
2067 iwn_dma_contig_free(&ring->cmd_dma);
2068
2069 for (i = 0; i < IWN_TX_RING_COUNT; i++) {
2070 struct iwn_tx_data *data = &ring->data[i];
2071
2072 if (data->m != NULL) {
2073 bus_dmamap_sync(ring->data_dmat, data->map,
2074 BUS_DMASYNC_POSTWRITE);
2075 bus_dmamap_unload(ring->data_dmat, data->map);
2076 m_freem(data->m);
2077 }
2078 if (data->map != NULL)
2079 bus_dmamap_destroy(ring->data_dmat, data->map);
2080 }
2081 if (ring->data_dmat != NULL) {
2082 bus_dma_tag_destroy(ring->data_dmat);
2083 ring->data_dmat = NULL;
2084 }
2085}
2086
2087static void
2088iwn5000_ict_reset(struct iwn_softc *sc)
2089{
2090 /* Disable interrupts. */
2091 IWN_WRITE(sc, IWN_INT_MASK, 0);
2092
2093 /* Reset ICT table. */
2094 memset(sc->ict, 0, IWN_ICT_SIZE);
2095 sc->ict_cur = 0;
2096
2097 /* Set physical address of ICT table (4KB aligned). */
2098 DPRINTF(sc, IWN_DEBUG_RESET, "%s: enabling ICT\n", __func__);
2099 IWN_WRITE(sc, IWN_DRAM_INT_TBL, IWN_DRAM_INT_TBL_ENABLE |
2100 IWN_DRAM_INT_TBL_WRAP_CHECK | sc->ict_dma.paddr >> 12);
2101
2102 /* Enable periodic RX interrupt. */
2103 sc->int_mask |= IWN_INT_RX_PERIODIC;
2104 /* Switch to ICT interrupt mode in driver. */
2105 sc->sc_flags |= IWN_FLAG_USE_ICT;
2106
2107 /* Re-enable interrupts. */
2108 IWN_WRITE(sc, IWN_INT, 0xffffffff);
2109 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
2110}
2111
2112static int
2113iwn_read_eeprom(struct iwn_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN])
2114{
2115 struct iwn_ops *ops = &sc->ops;
2116 uint16_t val;
2117 int error;
2118
2119 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
2120
2121 /* Check whether adapter has an EEPROM or an OTPROM. */
2122 if (sc->hw_type >= IWN_HW_REV_TYPE_1000 &&
2123 (IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP))
2124 sc->sc_flags |= IWN_FLAG_HAS_OTPROM;
2125 DPRINTF(sc, IWN_DEBUG_RESET, "%s found\n",
2126 (sc->sc_flags & IWN_FLAG_HAS_OTPROM) ? "OTPROM" : "EEPROM");
2127
2128 /* Adapter has to be powered on for EEPROM access to work. */
2129 if ((error = iwn_apm_init(sc)) != 0) {
2130 device_printf(sc->sc_dev,
2131 "%s: could not power ON adapter, error %d\n", __func__,
2132 error);
2133 return error;
2134 }
2135
2136 if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) {
2137 device_printf(sc->sc_dev, "%s: bad ROM signature\n", __func__);
2138 return EIO;
2139 }
2140 if ((error = iwn_eeprom_lock(sc)) != 0) {
2141 device_printf(sc->sc_dev, "%s: could not lock ROM, error %d\n",
2142 __func__, error);
2143 return error;
2144 }
2145 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) {
2146 if ((error = iwn_init_otprom(sc)) != 0) {
2147 device_printf(sc->sc_dev,
2148 "%s: could not initialize OTPROM, error %d\n",
2149 __func__, error);
2150 return error;
2151 }
2152 }
2153
2154 iwn_read_prom_data(sc, IWN_EEPROM_SKU_CAP, &val, 2);
2155 DPRINTF(sc, IWN_DEBUG_RESET, "SKU capabilities=0x%04x\n", le16toh(val));
2156 /* Check if HT support is bonded out. */
2157 if (val & htole16(IWN_EEPROM_SKU_CAP_11N))
2158 sc->sc_flags |= IWN_FLAG_HAS_11N;
2159
2160 iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2);
2161 sc->rfcfg = le16toh(val);
2162 DPRINTF(sc, IWN_DEBUG_RESET, "radio config=0x%04x\n", sc->rfcfg);
2163 /* Read Tx/Rx chains from ROM unless it's known to be broken. */
2164 if (sc->txchainmask == 0)
2165 sc->txchainmask = IWN_RFCFG_TXANTMSK(sc->rfcfg);
2166 if (sc->rxchainmask == 0)
2167 sc->rxchainmask = IWN_RFCFG_RXANTMSK(sc->rfcfg);
2168
2169 /* Read MAC address. */
2170 iwn_read_prom_data(sc, IWN_EEPROM_MAC, macaddr, 6);
2171
2172 /* Read adapter-specific information from EEPROM. */
2173 ops->read_eeprom(sc);
2174
2175 iwn_apm_stop(sc); /* Power OFF adapter. */
2176
2177 iwn_eeprom_unlock(sc);
2178
2179 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
2180
2181 return 0;
2182}
2183
2184static void
2185iwn4965_read_eeprom(struct iwn_softc *sc)
2186{
2187 uint32_t addr;
2188 uint16_t val;
2189 int i;
2190
2191 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
2192
2193 /* Read regulatory domain (4 ASCII characters). */
2194 iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4);
2195
2196 /* Read the list of authorized channels (20MHz ones only). */
2197 for (i = 0; i < IWN_NBANDS - 1; i++) {
2198 addr = iwn4965_regulatory_bands[i];
2199 iwn_read_eeprom_channels(sc, i, addr);
2200 }
2201
2202 /* Read maximum allowed TX power for 2GHz and 5GHz bands. */
2203 iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2);
2204 sc->maxpwr2GHz = val & 0xff;
2205 sc->maxpwr5GHz = val >> 8;
2206 /* Check that EEPROM values are within valid range. */
2207 if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50)
2208 sc->maxpwr5GHz = 38;
2209 if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50)
2210 sc->maxpwr2GHz = 38;
2211 DPRINTF(sc, IWN_DEBUG_RESET, "maxpwr 2GHz=%d 5GHz=%d\n",
2212 sc->maxpwr2GHz, sc->maxpwr5GHz);
2213
2214 /* Read samples for each TX power group. */
2215 iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands,
2216 sizeof sc->bands);
2217
2218 /* Read voltage at which samples were taken. */
2219 iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2);
2220 sc->eeprom_voltage = (int16_t)le16toh(val);
2221 DPRINTF(sc, IWN_DEBUG_RESET, "voltage=%d (in 0.3V)\n",
2222 sc->eeprom_voltage);
2223
2224#ifdef IWN_DEBUG
2225 /* Print samples. */
2226 if (sc->sc_debug & IWN_DEBUG_ANY) {
2227 for (i = 0; i < IWN_NBANDS - 1; i++)
2228 iwn4965_print_power_group(sc, i);
2229 }
2230#endif
2231
2232 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
2233}
2234
2235#ifdef IWN_DEBUG
2236static void
2237iwn4965_print_power_group(struct iwn_softc *sc, int i)
2238{
2239 struct iwn4965_eeprom_band *band = &sc->bands[i];
2240 struct iwn4965_eeprom_chan_samples *chans = band->chans;
2241 int j, c;
2242
2243 printf("===band %d===\n", i);
2244 printf("chan lo=%d, chan hi=%d\n", band->lo, band->hi);
2245 printf("chan1 num=%d\n", chans[0].num);
2246 for (c = 0; c < 2; c++) {
2247 for (j = 0; j < IWN_NSAMPLES; j++) {
2248 printf("chain %d, sample %d: temp=%d gain=%d "
2249 "power=%d pa_det=%d\n", c, j,
2250 chans[0].samples[c][j].temp,
2251 chans[0].samples[c][j].gain,
2252 chans[0].samples[c][j].power,
2253 chans[0].samples[c][j].pa_det);
2254 }
2255 }
2256 printf("chan2 num=%d\n", chans[1].num);
2257 for (c = 0; c < 2; c++) {
2258 for (j = 0; j < IWN_NSAMPLES; j++) {
2259 printf("chain %d, sample %d: temp=%d gain=%d "
2260 "power=%d pa_det=%d\n", c, j,
2261 chans[1].samples[c][j].temp,
2262 chans[1].samples[c][j].gain,
2263 chans[1].samples[c][j].power,
2264 chans[1].samples[c][j].pa_det);
2265 }
2266 }
2267}
2268#endif
2269
2270static void
2271iwn5000_read_eeprom(struct iwn_softc *sc)
2272{
2273 struct iwn5000_eeprom_calib_hdr hdr;
2274 int32_t volt;
2275 uint32_t base, addr;
2276 uint16_t val;
2277 int i;
2278
2279 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
2280
2281 /* Read regulatory domain (4 ASCII characters). */
2282 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2);
2283 base = le16toh(val);
2284 iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN,
2285 sc->eeprom_domain, 4);
2286
2287 /* Read the list of authorized channels (20MHz ones only). */
2288 for (i = 0; i < IWN_NBANDS - 1; i++) {
2289 addr = base + sc->base_params->regulatory_bands[i];
2290 iwn_read_eeprom_channels(sc, i, addr);
2291 }
2292
2293 /* Read enhanced TX power information for 6000 Series. */
2294 if (sc->base_params->enhanced_TX_power)
2295 iwn_read_eeprom_enhinfo(sc);
2296
2297 iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2);
2298 base = le16toh(val);
2299 iwn_read_prom_data(sc, base, &hdr, sizeof hdr);
2300 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
2301 "%s: calib version=%u pa type=%u voltage=%u\n", __func__,
2302 hdr.version, hdr.pa_type, le16toh(hdr.volt));
2303 sc->calib_ver = hdr.version;
2304
2305 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2) {
2306 sc->eeprom_voltage = le16toh(hdr.volt);
2307 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2);
2308 sc->eeprom_temp_high=le16toh(val);
2309 iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2);
2310 sc->eeprom_temp = le16toh(val);
2311 }
2312
2313 if (sc->hw_type == IWN_HW_REV_TYPE_5150) {
2314 /* Compute temperature offset. */
2315 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2);
2316 sc->eeprom_temp = le16toh(val);
2317 iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2);
2318 volt = le16toh(val);
2319 sc->temp_off = sc->eeprom_temp - (volt / -5);
2320 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "temp=%d volt=%d offset=%dK\n",
2321 sc->eeprom_temp, volt, sc->temp_off);
2322 } else {
2323 /* Read crystal calibration. */
2324 iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL,
2325 &sc->eeprom_crystal, sizeof (uint32_t));
2326 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "crystal calibration 0x%08x\n",
2327 le32toh(sc->eeprom_crystal));
2328 }
2329
2330 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
2331
2332}
2333
2334/*
2335 * Translate EEPROM flags to net80211.
2336 */
2337static uint32_t
2338iwn_eeprom_channel_flags(struct iwn_eeprom_chan *channel)
2339{
2340 uint32_t nflags;
2341
2342 nflags = 0;
2343 if ((channel->flags & IWN_EEPROM_CHAN_ACTIVE) == 0)
2344 nflags |= IEEE80211_CHAN_PASSIVE;
2345 if ((channel->flags & IWN_EEPROM_CHAN_IBSS) == 0)
2346 nflags |= IEEE80211_CHAN_NOADHOC;
2347 if (channel->flags & IWN_EEPROM_CHAN_RADAR) {
2348 nflags |= IEEE80211_CHAN_DFS;
2349 /* XXX apparently IBSS may still be marked */
2350 nflags |= IEEE80211_CHAN_NOADHOC;
2351 }
2352
2353 return nflags;
2354}
2355
2356static void
2357iwn_read_eeprom_band(struct iwn_softc *sc, int n)
2358{
2359 struct ieee80211com *ic = &sc->sc_ic;
2360 struct iwn_eeprom_chan *channels = sc->eeprom_channels[n];
2361 const struct iwn_chan_band *band = &iwn_bands[n];
2362 struct ieee80211_channel *c;
2363 uint8_t chan;
2364 int i, nflags;
2365
2366 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
2367
2368 for (i = 0; i < band->nchan; i++) {
2369 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) {
2370 DPRINTF(sc, IWN_DEBUG_RESET,
2371 "skip chan %d flags 0x%x maxpwr %d\n",
2372 band->chan[i], channels[i].flags,
2373 channels[i].maxpwr);
2374 continue;
2375 }
2376 chan = band->chan[i];
2377 nflags = iwn_eeprom_channel_flags(&channels[i]);
2378
2379 c = &ic->ic_channels[ic->ic_nchans++];
2380 c->ic_ieee = chan;
2381 c->ic_maxregpower = channels[i].maxpwr;
2382 c->ic_maxpower = 2*c->ic_maxregpower;
2383
2384 if (n == 0) { /* 2GHz band */
2385 c->ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_G);
2386 /* G =>'s B is supported */
2387 c->ic_flags = IEEE80211_CHAN_B | nflags;
2388 c = &ic->ic_channels[ic->ic_nchans++];
2389 c[0] = c[-1];
2390 c->ic_flags = IEEE80211_CHAN_G | nflags;
2391 } else { /* 5GHz band */
2392 c->ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_A);
2393 c->ic_flags = IEEE80211_CHAN_A | nflags;
2394 }
2395
2396 /* Save maximum allowed TX power for this channel. */
2397 sc->maxpwr[chan] = channels[i].maxpwr;
2398
2399 DPRINTF(sc, IWN_DEBUG_RESET,
2400 "add chan %d flags 0x%x maxpwr %d\n", chan,
2401 channels[i].flags, channels[i].maxpwr);
2402
2403 if (sc->sc_flags & IWN_FLAG_HAS_11N) {
2404 /* add HT20, HT40 added separately */
2405 c = &ic->ic_channels[ic->ic_nchans++];
2406 c[0] = c[-1];
2407 c->ic_flags |= IEEE80211_CHAN_HT20;
2408 }
2409 }
2410
2411 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
2412
2413}
2414
2415static void
2416iwn_read_eeprom_ht40(struct iwn_softc *sc, int n)
2417{
2418 struct ieee80211com *ic = &sc->sc_ic;
2419 struct iwn_eeprom_chan *channels = sc->eeprom_channels[n];
2420 const struct iwn_chan_band *band = &iwn_bands[n];
2421 struct ieee80211_channel *c, *cent, *extc;
2422 uint8_t chan;
2423 int i, nflags;
2424
2425 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s start\n", __func__);
2426
2427 if (!(sc->sc_flags & IWN_FLAG_HAS_11N)) {
2428 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end no 11n\n", __func__);
2429 return;
2430 }
2431
2432 for (i = 0; i < band->nchan; i++) {
2433 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) {
2434 DPRINTF(sc, IWN_DEBUG_RESET,
2435 "skip chan %d flags 0x%x maxpwr %d\n",
2436 band->chan[i], channels[i].flags,
2437 channels[i].maxpwr);
2438 continue;
2439 }
2440 chan = band->chan[i];
2441 nflags = iwn_eeprom_channel_flags(&channels[i]);
2442
2443 /*
2444 * Each entry defines an HT40 channel pair; find the
2445 * center channel, then the extension channel above.
2446 */
2447 cent = ieee80211_find_channel_byieee(ic, chan,
2448 (n == 5 ? IEEE80211_CHAN_G : IEEE80211_CHAN_A));
2449 if (cent == NULL) { /* XXX shouldn't happen */
2450 device_printf(sc->sc_dev,
2451 "%s: no entry for channel %d\n", __func__, chan);
2452 continue;
2453 }
2454 extc = ieee80211_find_channel(ic, cent->ic_freq+20,
2455 (n == 5 ? IEEE80211_CHAN_G : IEEE80211_CHAN_A));
2456 if (extc == NULL) {
2457 DPRINTF(sc, IWN_DEBUG_RESET,
2458 "%s: skip chan %d, extension channel not found\n",
2459 __func__, chan);
2460 continue;
2461 }
2462
2463 DPRINTF(sc, IWN_DEBUG_RESET,
2464 "add ht40 chan %d flags 0x%x maxpwr %d\n",
2465 chan, channels[i].flags, channels[i].maxpwr);
2466
2467 c = &ic->ic_channels[ic->ic_nchans++];
2468 c[0] = cent[0];
2469 c->ic_extieee = extc->ic_ieee;
2470 c->ic_flags &= ~IEEE80211_CHAN_HT;
2471 c->ic_flags |= IEEE80211_CHAN_HT40U | nflags;
2472 c = &ic->ic_channels[ic->ic_nchans++];
2473 c[0] = extc[0];
2474 c->ic_extieee = cent->ic_ieee;
2475 c->ic_flags &= ~IEEE80211_CHAN_HT;
2476 c->ic_flags |= IEEE80211_CHAN_HT40D | nflags;
2477 }
2478
2479 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
2480
2481}
2482
2483static void
2484iwn_read_eeprom_channels(struct iwn_softc *sc, int n, uint32_t addr)
2485{
2486 struct ieee80211com *ic = &sc->sc_ic;
2487
2488 iwn_read_prom_data(sc, addr, &sc->eeprom_channels[n],
2489 iwn_bands[n].nchan * sizeof (struct iwn_eeprom_chan));
2490
2491 if (n < 5)
2492 iwn_read_eeprom_band(sc, n);
2493 else
2494 iwn_read_eeprom_ht40(sc, n);
2495 ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans);
2496}
2497
2498static struct iwn_eeprom_chan *
2499iwn_find_eeprom_channel(struct iwn_softc *sc, struct ieee80211_channel *c)
2500{
2501 int band, chan, i, j;
2502
2503 if (IEEE80211_IS_CHAN_HT40(c)) {
2504 band = IEEE80211_IS_CHAN_5GHZ(c) ? 6 : 5;
2505 if (IEEE80211_IS_CHAN_HT40D(c))
2506 chan = c->ic_extieee;
2507 else
2508 chan = c->ic_ieee;
2509 for (i = 0; i < iwn_bands[band].nchan; i++) {
2510 if (iwn_bands[band].chan[i] == chan)
2511 return &sc->eeprom_channels[band][i];
2512 }
2513 } else {
2514 for (j = 0; j < 5; j++) {
2515 for (i = 0; i < iwn_bands[j].nchan; i++) {
2516 if (iwn_bands[j].chan[i] == c->ic_ieee)
2517 return &sc->eeprom_channels[j][i];
2518 }
2519 }
2520 }
2521 return NULL;
2522}
2523
2524/*
2525 * Enforce flags read from EEPROM.
2526 */
2527static int
2528iwn_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
2529 int nchan, struct ieee80211_channel chans[])
2530{
2531 struct iwn_softc *sc = ic->ic_softc;
2532 int i;
2533
2534 for (i = 0; i < nchan; i++) {
2535 struct ieee80211_channel *c = &chans[i];
2536 struct iwn_eeprom_chan *channel;
2537
2538 channel = iwn_find_eeprom_channel(sc, c);
2539 if (channel == NULL) {
2540 ic_printf(ic, "%s: invalid channel %u freq %u/0x%x\n",
2541 __func__, c->ic_ieee, c->ic_freq, c->ic_flags);
2542 return EINVAL;
2543 }
2544 c->ic_flags |= iwn_eeprom_channel_flags(channel);
2545 }
2546
2547 return 0;
2548}
2549
2550static void
2551iwn_read_eeprom_enhinfo(struct iwn_softc *sc)
2552{
2553 struct iwn_eeprom_enhinfo enhinfo[35];
2554 struct ieee80211com *ic = &sc->sc_ic;
2555 struct ieee80211_channel *c;
2556 uint16_t val, base;
2557 int8_t maxpwr;
2558 uint8_t flags;
2559 int i, j;
2560
2561 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
2562
2563 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2);
2564 base = le16toh(val);
2565 iwn_read_prom_data(sc, base + IWN6000_EEPROM_ENHINFO,
2566 enhinfo, sizeof enhinfo);
2567
2568 for (i = 0; i < nitems(enhinfo); i++) {
2569 flags = enhinfo[i].flags;
2570 if (!(flags & IWN_ENHINFO_VALID))
2571 continue; /* Skip invalid entries. */
2572
2573 maxpwr = 0;
2574 if (sc->txchainmask & IWN_ANT_A)
2575 maxpwr = MAX(maxpwr, enhinfo[i].chain[0]);
2576 if (sc->txchainmask & IWN_ANT_B)
2577 maxpwr = MAX(maxpwr, enhinfo[i].chain[1]);
2578 if (sc->txchainmask & IWN_ANT_C)
2579 maxpwr = MAX(maxpwr, enhinfo[i].chain[2]);
2580 if (sc->ntxchains == 2)
2581 maxpwr = MAX(maxpwr, enhinfo[i].mimo2);
2582 else if (sc->ntxchains == 3)
2583 maxpwr = MAX(maxpwr, enhinfo[i].mimo3);
2584
2585 for (j = 0; j < ic->ic_nchans; j++) {
2586 c = &ic->ic_channels[j];
2587 if ((flags & IWN_ENHINFO_5GHZ)) {
2588 if (!IEEE80211_IS_CHAN_A(c))
2589 continue;
2590 } else if ((flags & IWN_ENHINFO_OFDM)) {
2591 if (!IEEE80211_IS_CHAN_G(c))
2592 continue;
2593 } else if (!IEEE80211_IS_CHAN_B(c))
2594 continue;
2595 if ((flags & IWN_ENHINFO_HT40)) {
2596 if (!IEEE80211_IS_CHAN_HT40(c))
2597 continue;
2598 } else {
2599 if (IEEE80211_IS_CHAN_HT40(c))
2600 continue;
2601 }
2602 if (enhinfo[i].chan != 0 &&
2603 enhinfo[i].chan != c->ic_ieee)
2604 continue;
2605
2606 DPRINTF(sc, IWN_DEBUG_RESET,
2607 "channel %d(%x), maxpwr %d\n", c->ic_ieee,
2608 c->ic_flags, maxpwr / 2);
2609 c->ic_maxregpower = maxpwr / 2;
2610 c->ic_maxpower = maxpwr;
2611 }
2612 }
2613
2614 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
2615
2616}
2617
2618static struct ieee80211_node *
2619iwn_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
2620{
2621 return malloc(sizeof (struct iwn_node), M_80211_NODE,M_NOWAIT | M_ZERO);
2622}
2623
2624static __inline int
2625rate2plcp(int rate)
2626{
2627 switch (rate & 0xff) {
2628 case 12: return 0xd;
2629 case 18: return 0xf;
2630 case 24: return 0x5;
2631 case 36: return 0x7;
2632 case 48: return 0x9;
2633 case 72: return 0xb;
2634 case 96: return 0x1;
2635 case 108: return 0x3;
2636 case 2: return 10;
2637 case 4: return 20;
2638 case 11: return 55;
2639 case 22: return 110;
2640 }
2641 return 0;
2642}
2643
2644static int
2645iwn_get_1stream_tx_antmask(struct iwn_softc *sc)
2646{
2647
2648 return IWN_LSB(sc->txchainmask);
2649}
2650
2651static int
2652iwn_get_2stream_tx_antmask(struct iwn_softc *sc)
2653{
2654 int tx;
2655
2656 /*
2657 * The '2 stream' setup is a bit .. odd.
2658 *
2659 * For NICs that support only 1 antenna, default to IWN_ANT_AB or
2660 * the firmware panics (eg Intel 5100.)
2661 *
2662 * For NICs that support two antennas, we use ANT_AB.
2663 *
2664 * For NICs that support three antennas, we use the two that
2665 * wasn't the default one.
2666 *
2667 * XXX TODO: if bluetooth (full concurrent) is enabled, restrict
2668 * this to only one antenna.
2669 */
2670
2671 /* Default - transmit on the other antennas */
2672 tx = (sc->txchainmask & ~IWN_LSB(sc->txchainmask));
2673
2674 /* Now, if it's zero, set it to IWN_ANT_AB, so to not panic firmware */
2675 if (tx == 0)
2676 tx = IWN_ANT_AB;
2677
2678 /*
2679 * If the NIC is a two-stream TX NIC, configure the TX mask to
2680 * the default chainmask
2681 */
2682 else if (sc->ntxchains == 2)
2683 tx = sc->txchainmask;
2684
2685 return (tx);
2686}
2687
2688
2689
2690/*
2691 * Calculate the required PLCP value from the given rate,
2692 * to the given node.
2693 *
2694 * This will take the node configuration (eg 11n, rate table
2695 * setup, etc) into consideration.
2696 */
2697static uint32_t
2698iwn_rate_to_plcp(struct iwn_softc *sc, struct ieee80211_node *ni,
2699 uint8_t rate)
2700{
2701#define RV(v) ((v) & IEEE80211_RATE_VAL)
2702 struct ieee80211com *ic = ni->ni_ic;
2703 uint32_t plcp = 0;
2704 int ridx;
2705
2706 /*
2707 * If it's an MCS rate, let's set the plcp correctly
2708 * and set the relevant flags based on the node config.
2709 */
2710 if (rate & IEEE80211_RATE_MCS) {
2711 /*
2712 * Set the initial PLCP value to be between 0->31 for
2713 * MCS 0 -> MCS 31, then set the "I'm an MCS rate!"
2714 * flag.
2715 */
2716 plcp = RV(rate) | IWN_RFLAG_MCS;
2717
2718 /*
2719 * XXX the following should only occur if both
2720 * the local configuration _and_ the remote node
2721 * advertise these capabilities. Thus this code
2722 * may need fixing!
2723 */
2724
2725 /*
2726 * Set the channel width and guard interval.
2727 */
2728 if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
2729 plcp |= IWN_RFLAG_HT40;
2730 if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40)
2731 plcp |= IWN_RFLAG_SGI;
2732 } else if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20) {
2733 plcp |= IWN_RFLAG_SGI;
2734 }
2735
2736 /*
2737 * Ensure the selected rate matches the link quality
2738 * table entries being used.
2739 */
2740 if (rate > 0x8f)
2741 plcp |= IWN_RFLAG_ANT(sc->txchainmask);
2742 else if (rate > 0x87)
2743 plcp |= IWN_RFLAG_ANT(iwn_get_2stream_tx_antmask(sc));
2744 else
2745 plcp |= IWN_RFLAG_ANT(iwn_get_1stream_tx_antmask(sc));
2746 } else {
2747 /*
2748 * Set the initial PLCP - fine for both
2749 * OFDM and CCK rates.
2750 */
2751 plcp = rate2plcp(rate);
2752
2753 /* Set CCK flag if it's CCK */
2754
2755 /* XXX It would be nice to have a method
2756 * to map the ridx -> phy table entry
2757 * so we could just query that, rather than
2758 * this hack to check against IWN_RIDX_OFDM6.
2759 */
2760 ridx = ieee80211_legacy_rate_lookup(ic->ic_rt,
2761 rate & IEEE80211_RATE_VAL);
2762 if (ridx < IWN_RIDX_OFDM6 &&
2763 IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
2764 plcp |= IWN_RFLAG_CCK;
2765
2766 /* Set antenna configuration */
2767 /* XXX TODO: is this the right antenna to use for legacy? */
2768 plcp |= IWN_RFLAG_ANT(iwn_get_1stream_tx_antmask(sc));
2769 }
2770
2771 DPRINTF(sc, IWN_DEBUG_TXRATE, "%s: rate=0x%02x, plcp=0x%08x\n",
2772 __func__,
2773 rate,
2774 plcp);
2775
2776 return (htole32(plcp));
2777#undef RV
2778}
2779
2780static void
2781iwn_newassoc(struct ieee80211_node *ni, int isnew)
2782{
2783 /* Doesn't do anything at the moment */
2784}
2785
2786static int
2787iwn_media_change(struct ifnet *ifp)
2788{
2789 int error;
2790
2791 error = ieee80211_media_change(ifp);
2792 /* NB: only the fixed rate can change and that doesn't need a reset */
2793 return (error == ENETRESET ? 0 : error);
2794}
2795
2796static int
2797iwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
2798{
2799 struct iwn_vap *ivp = IWN_VAP(vap);
2800 struct ieee80211com *ic = vap->iv_ic;
2801 struct iwn_softc *sc = ic->ic_softc;
2802 int error = 0;
2803
2804 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
2805
2806 DPRINTF(sc, IWN_DEBUG_STATE, "%s: %s -> %s\n", __func__,
2807 ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate]);
2808
2809 IEEE80211_UNLOCK(ic);
2810 IWN_LOCK(sc);
2811 callout_stop(&sc->calib_to);
2812
2813 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX];
2814
2815 switch (nstate) {
2816 case IEEE80211_S_ASSOC:
2817 if (vap->iv_state != IEEE80211_S_RUN)
2818 break;
2819 /* FALLTHROUGH */
2820 case IEEE80211_S_AUTH:
2821 if (vap->iv_state == IEEE80211_S_AUTH)
2822 break;
2823
2824 /*
2825 * !AUTH -> AUTH transition requires state reset to handle
2826 * reassociations correctly.
2827 */
2828 sc->rxon->associd = 0;
2829 sc->rxon->filter &= ~htole32(IWN_FILTER_BSS);
2830 sc->calib.state = IWN_CALIB_STATE_INIT;
2831
2832 /* Wait until we hear a beacon before we transmit */
2833 sc->sc_beacon_wait = 1;
2834
2835 if ((error = iwn_auth(sc, vap)) != 0) {
2836 device_printf(sc->sc_dev,
2837 "%s: could not move to auth state\n", __func__);
2838 }
2839 break;
2840
2841 case IEEE80211_S_RUN:
2842 /*
2843 * RUN -> RUN transition; Just restart the timers.
2844 */
2845 if (vap->iv_state == IEEE80211_S_RUN) {
2846 sc->calib_cnt = 0;
2847 break;
2848 }
2849
2850 /* Wait until we hear a beacon before we transmit */
2851 sc->sc_beacon_wait = 1;
2852
2853 /*
2854 * !RUN -> RUN requires setting the association id
2855 * which is done with a firmware cmd. We also defer
2856 * starting the timers until that work is done.
2857 */
2858 if ((error = iwn_run(sc, vap)) != 0) {
2859 device_printf(sc->sc_dev,
2860 "%s: could not move to run state\n", __func__);
2861 }
2862 break;
2863
2864 case IEEE80211_S_INIT:
2865 sc->calib.state = IWN_CALIB_STATE_INIT;
2866 /*
2867 * Purge the xmit queue so we don't have old frames
2868 * during a new association attempt.
2869 */
2870 sc->sc_beacon_wait = 0;
2871 iwn_xmit_queue_drain(sc);
2872 break;
2873
2874 default:
2875 break;
2876 }
2877 IWN_UNLOCK(sc);
2878 IEEE80211_LOCK(ic);
2879 if (error != 0){
2880 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__);
2881 return error;
2882 }
2883
2884 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
2885
2886 return ivp->iv_newstate(vap, nstate, arg);
2887}
2888
2889static void
2890iwn_calib_timeout(void *arg)
2891{
2892 struct iwn_softc *sc = arg;
2893
2894 IWN_LOCK_ASSERT(sc);
2895
2896 /* Force automatic TX power calibration every 60 secs. */
2897 if (++sc->calib_cnt >= 120) {
2898 uint32_t flags = 0;
2899
2900 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s\n",
2901 "sending request for statistics");
2902 (void)iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags,
2903 sizeof flags, 1);
2904 sc->calib_cnt = 0;
2905 }
2906 callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout,
2907 sc);
2908}
2909
2910/*
2911 * Process an RX_PHY firmware notification. This is usually immediately
2912 * followed by an MPDU_RX_DONE notification.
2913 */
2914static void
2915iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2916 struct iwn_rx_data *data)
2917{
2918 struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1);
2919
2920 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received PHY stats\n", __func__);
2921 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2922
2923 /* Save RX statistics, they will be used on MPDU_RX_DONE. */
2924 memcpy(&sc->last_rx_stat, stat, sizeof (*stat));
2925 sc->last_rx_valid = 1;
2926}
2927
2928/*
2929 * Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification.
2930 * Each MPDU_RX_DONE notification must be preceded by an RX_PHY one.
2931 */
2932static void
2933iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2934 struct iwn_rx_data *data)
2935{
2936 struct iwn_ops *ops = &sc->ops;
2937 struct ieee80211com *ic = &sc->sc_ic;
2938 struct iwn_rx_ring *ring = &sc->rxq;
2939 struct ieee80211_frame *wh;
2940 struct ieee80211_node *ni;
2941 struct mbuf *m, *m1;
2942 struct iwn_rx_stat *stat;
2943 caddr_t head;
2944 bus_addr_t paddr;
2945 uint32_t flags;
2946 int error, len, rssi, nf;
2947
2948 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
2949
2950 if (desc->type == IWN_MPDU_RX_DONE) {
2951 /* Check for prior RX_PHY notification. */
2952 if (!sc->last_rx_valid) {
2953 DPRINTF(sc, IWN_DEBUG_ANY,
2954 "%s: missing RX_PHY\n", __func__);
2955 return;
2956 }
2957 stat = &sc->last_rx_stat;
2958 } else
2959 stat = (struct iwn_rx_stat *)(desc + 1);
2960
2961 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2962
2963 if (stat->cfg_phy_len > IWN_STAT_MAXLEN) {
2964 device_printf(sc->sc_dev,
2965 "%s: invalid RX statistic header, len %d\n", __func__,
2966 stat->cfg_phy_len);
2967 return;
2968 }
2969 if (desc->type == IWN_MPDU_RX_DONE) {
2970 struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1);
2971 head = (caddr_t)(mpdu + 1);
2972 len = le16toh(mpdu->len);
2973 } else {
2974 head = (caddr_t)(stat + 1) + stat->cfg_phy_len;
2975 len = le16toh(stat->len);
2976 }
2977
2978 flags = le32toh(*(uint32_t *)(head + len));
2979
2980 /* Discard frames with a bad FCS early. */
2981 if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) {
2982 DPRINTF(sc, IWN_DEBUG_RECV, "%s: RX flags error %x\n",
2983 __func__, flags);
2984 counter_u64_add(ic->ic_ierrors, 1);
2985 return;
2986 }
2987 /* Discard frames that are too short. */
2988 if (len < sizeof (struct ieee80211_frame_ack)) {
2989 DPRINTF(sc, IWN_DEBUG_RECV, "%s: frame too short: %d\n",
2990 __func__, len);
2991 counter_u64_add(ic->ic_ierrors, 1);
2992 return;
2993 }
2994
2995 m1 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWN_RBUF_SIZE);
2996 if (m1 == NULL) {
2997 DPRINTF(sc, IWN_DEBUG_ANY, "%s: no mbuf to restock ring\n",
2998 __func__);
2999 counter_u64_add(ic->ic_ierrors, 1);
3000 return;
3001 }
3002 bus_dmamap_unload(ring->data_dmat, data->map);
3003
3004 error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m1, void *),
3005 IWN_RBUF_SIZE, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
3006 if (error != 0 && error != EFBIG) {
3007 device_printf(sc->sc_dev,
3008 "%s: bus_dmamap_load failed, error %d\n", __func__, error);
3009 m_freem(m1);
3010
3011 /* Try to reload the old mbuf. */
3012 error = bus_dmamap_load(ring->data_dmat, data->map,
3013 mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr,
3014 &paddr, BUS_DMA_NOWAIT);
3015 if (error != 0 && error != EFBIG) {
3016 panic("%s: could not load old RX mbuf", __func__);
3017 }
3018 /* Physical address may have changed. */
3019 ring->desc[ring->cur] = htole32(paddr >> 8);
3020 bus_dmamap_sync(ring->data_dmat, ring->desc_dma.map,
3021 BUS_DMASYNC_PREWRITE);
3022 counter_u64_add(ic->ic_ierrors, 1);
3023 return;
3024 }
3025
3026 m = data->m;
3027 data->m = m1;
3028 /* Update RX descriptor. */
3029 ring->desc[ring->cur] = htole32(paddr >> 8);
3030 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3031 BUS_DMASYNC_PREWRITE);
3032
3033 /* Finalize mbuf. */
3034 m->m_data = head;
3035 m->m_pkthdr.len = m->m_len = len;
3036
3037 /* Grab a reference to the source node. */
3038 wh = mtod(m, struct ieee80211_frame *);
3039 if (len >= sizeof(struct ieee80211_frame_min))
3040 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3041 else
3042 ni = NULL;
3043 nf = (ni != NULL && ni->ni_vap->iv_state == IEEE80211_S_RUN &&
3044 (ic->ic_flags & IEEE80211_F_SCAN) == 0) ? sc->noise : -95;
3045
3046 rssi = ops->get_rssi(sc, stat);
3047
3048 if (ieee80211_radiotap_active(ic)) {
3049 struct iwn_rx_radiotap_header *tap = &sc->sc_rxtap;
3050
3051 tap->wr_flags = 0;
3052 if (stat->flags & htole16(IWN_STAT_FLAG_SHPREAMBLE))
3053 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3054 tap->wr_dbm_antsignal = (int8_t)rssi;
3055 tap->wr_dbm_antnoise = (int8_t)nf;
3056 tap->wr_tsft = stat->tstamp;
3057 switch (stat->rate) {
3058 /* CCK rates. */
3059 case 10: tap->wr_rate = 2; break;
3060 case 20: tap->wr_rate = 4; break;
3061 case 55: tap->wr_rate = 11; break;
3062 case 110: tap->wr_rate = 22; break;
3063 /* OFDM rates. */
3064 case 0xd: tap->wr_rate = 12; break;
3065 case 0xf: tap->wr_rate = 18; break;
3066 case 0x5: tap->wr_rate = 24; break;
3067 case 0x7: tap->wr_rate = 36; break;
3068 case 0x9: tap->wr_rate = 48; break;
3069 case 0xb: tap->wr_rate = 72; break;
3070 case 0x1: tap->wr_rate = 96; break;
3071 case 0x3: tap->wr_rate = 108; break;
3072 /* Unknown rate: should not happen. */
3073 default: tap->wr_rate = 0;
3074 }
3075 }
3076
3077 /*
3078 * If it's a beacon and we're waiting, then do the
3079 * wakeup. This should unblock raw_xmit/start.
3080 */
3081 if (sc->sc_beacon_wait) {
3082 uint8_t type, subtype;
3083 /* NB: Re-assign wh */
3084 wh = mtod(m, struct ieee80211_frame *);
3085 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3086 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3087 /*
3088 * This assumes at this point we've received our own
3089 * beacon.
3090 */
3091 DPRINTF(sc, IWN_DEBUG_TRACE,
3092 "%s: beacon_wait, type=%d, subtype=%d\n",
3093 __func__, type, subtype);
3094 if (type == IEEE80211_FC0_TYPE_MGT &&
3095 subtype == IEEE80211_FC0_SUBTYPE_BEACON) {
3096 DPRINTF(sc, IWN_DEBUG_TRACE | IWN_DEBUG_XMIT,
3097 "%s: waking things up\n", __func__);
3098 /* queue taskqueue to transmit! */
3099 taskqueue_enqueue(sc->sc_tq, &sc->sc_xmit_task);
3100 }
3101 }
3102
3103 IWN_UNLOCK(sc);
3104
3105 /* Send the frame to the 802.11 layer. */
3106 if (ni != NULL) {
3107 if (ni->ni_flags & IEEE80211_NODE_HT)
3108 m->m_flags |= M_AMPDU;
3109 (void)ieee80211_input(ni, m, rssi - nf, nf);
3110 /* Node is no longer needed. */
3111 ieee80211_free_node(ni);
3112 } else
3113 (void)ieee80211_input_all(ic, m, rssi - nf, nf);
3114
3115 IWN_LOCK(sc);
3116
3117 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
3118
3119}
3120
3121/* Process an incoming Compressed BlockAck. */
3122static void
3123iwn_rx_compressed_ba(struct iwn_softc *sc, struct iwn_rx_desc *desc,
3124 struct iwn_rx_data *data)
3125{
3126 struct iwn_ops *ops = &sc->ops;
3127 struct iwn_node *wn;
3128 struct ieee80211_node *ni;
3129 struct iwn_compressed_ba *ba = (struct iwn_compressed_ba *)(desc + 1);
3130 struct iwn_tx_ring *txq;
3131 struct iwn_tx_data *txdata;
3132 struct ieee80211_tx_ampdu *tap;
3133 struct mbuf *m;
3134 uint64_t bitmap;
3135 uint16_t ssn;
3136 uint8_t tid;
3137 int ackfailcnt = 0, i, lastidx, qid, *res, shift;
3138 int tx_ok = 0, tx_err = 0;
3139
3140 DPRINTF(sc, IWN_DEBUG_TRACE | IWN_DEBUG_XMIT, "->%s begin\n", __func__);
3141
3142 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3143
3144 qid = le16toh(ba->qid);
3145 txq = &sc->txq[ba->qid];
3146 tap = sc->qid2tap[ba->qid];
3147 tid = tap->txa_tid;
3148 wn = (void *)tap->txa_ni;
3149
3150 res = NULL;
3151 ssn = 0;
3152 if (!IEEE80211_AMPDU_RUNNING(tap)) {
3153 res = tap->txa_private;
3154 ssn = tap->txa_start & 0xfff;
3155 }
3156
3157 for (lastidx = le16toh(ba->ssn) & 0xff; txq->read != lastidx;) {
3158 txdata = &txq->data[txq->read];
3159
3160 /* Unmap and free mbuf. */
3161 bus_dmamap_sync(txq->data_dmat, txdata->map,
3162 BUS_DMASYNC_POSTWRITE);
3163 bus_dmamap_unload(txq->data_dmat, txdata->map);
3164 m = txdata->m, txdata->m = NULL;
3165 ni = txdata->ni, txdata->ni = NULL;
3166
3167 KASSERT(ni != NULL, ("no node"));
3168 KASSERT(m != NULL, ("no mbuf"));
3169
3170 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: freeing m=%p\n", __func__, m);
3171 ieee80211_tx_complete(ni, m, 1);
3172
3173 txq->queued--;
3174 txq->read = (txq->read + 1) % IWN_TX_RING_COUNT;
3175 }
3176
3177 if (txq->queued == 0 && res != NULL) {
3178 iwn_nic_lock(sc);
3179 ops->ampdu_tx_stop(sc, qid, tid, ssn);
3180 iwn_nic_unlock(sc);
3181 sc->qid2tap[qid] = NULL;
3182 free(res, M_DEVBUF);
3183 return;
3184 }
3185
3186 if (wn->agg[tid].bitmap == 0)
3187 return;
3188
3189 shift = wn->agg[tid].startidx - ((le16toh(ba->seq) >> 4) & 0xff);
3190 if (shift < 0)
3191 shift += 0x100;
3192
3193 if (wn->agg[tid].nframes > (64 - shift))
3194 return;
3195
3196 /*
3197 * Walk the bitmap and calculate how many successful and failed
3198 * attempts are made.
3199 *
3200 * Yes, the rate control code doesn't know these are A-MPDU
3201 * subframes and that it's okay to fail some of these.
3202 */
3203 ni = tap->txa_ni;
3204 bitmap = (le64toh(ba->bitmap) >> shift) & wn->agg[tid].bitmap;
3205 for (i = 0; bitmap; i++) {
3206 if ((bitmap & 1) == 0) {
3207 tx_err ++;
3208 ieee80211_ratectl_tx_complete(ni->ni_vap, ni,
3209 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL);
3210 } else {
3211 tx_ok ++;
3212 ieee80211_ratectl_tx_complete(ni->ni_vap, ni,
3213 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL);
3214 }
3215 bitmap >>= 1;
3216 }
3217
3218 DPRINTF(sc, IWN_DEBUG_TRACE | IWN_DEBUG_XMIT,
3219 "->%s: end; %d ok; %d err\n",__func__, tx_ok, tx_err);
3220
3221}
3222
3223/*
3224 * Process a CALIBRATION_RESULT notification sent by the initialization
3225 * firmware on response to a CMD_CALIB_CONFIG command (5000 only).
3226 */
3227static void
3228iwn5000_rx_calib_results(struct iwn_softc *sc, struct iwn_rx_desc *desc,
3229 struct iwn_rx_data *data)
3230{
3231 struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1);
3232 int len, idx = -1;
3233
3234 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
3235
3236 /* Runtime firmware should not send such a notification. */
3237 if (sc->sc_flags & IWN_FLAG_CALIB_DONE){
3238 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s received after clib done\n",
3239 __func__);
3240 return;
3241 }
3242 len = (le32toh(desc->len) & 0x3fff) - 4;
3243 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3244
3245 switch (calib->code) {
3246 case IWN5000_PHY_CALIB_DC:
3247 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_DC)
3248 idx = 0;
3249 break;
3250 case IWN5000_PHY_CALIB_LO:
3251 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_LO)
3252 idx = 1;
3253 break;
3254 case IWN5000_PHY_CALIB_TX_IQ:
3255 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TX_IQ)
3256 idx = 2;
3257 break;
3258 case IWN5000_PHY_CALIB_TX_IQ_PERIODIC:
3259 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TX_IQ_PERIODIC)
3260 idx = 3;
3261 break;
3262 case IWN5000_PHY_CALIB_BASE_BAND:
3263 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_BASE_BAND)
3264 idx = 4;
3265 break;
3266 }
3267 if (idx == -1) /* Ignore other results. */
3268 return;
3269
3270 /* Save calibration result. */
3271 if (sc->calibcmd[idx].buf != NULL)
3272 free(sc->calibcmd[idx].buf, M_DEVBUF);
3273 sc->calibcmd[idx].buf = malloc(len, M_DEVBUF, M_NOWAIT);
3274 if (sc->calibcmd[idx].buf == NULL) {
3275 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
3276 "not enough memory for calibration result %d\n",
3277 calib->code);
3278 return;
3279 }
3280 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
3281 "saving calibration result idx=%d, code=%d len=%d\n", idx, calib->code, len);
3282 sc->calibcmd[idx].len = len;
3283 memcpy(sc->calibcmd[idx].buf, calib, len);
3284}
3285
3286static void
3287iwn_stats_update(struct iwn_softc *sc, struct iwn_calib_state *calib,
3288 struct iwn_stats *stats, int len)
3289{
3290 struct iwn_stats_bt *stats_bt;
3291 struct iwn_stats *lstats;
3292
3293 /*
3294 * First - check whether the length is the bluetooth or normal.
3295 *
3296 * If it's normal - just copy it and bump out.
3297 * Otherwise we have to convert things.
3298 */
3299
3300 if (len == sizeof(struct iwn_stats) + 4) {
3301 memcpy(&sc->last_stat, stats, sizeof(struct iwn_stats));
3302 sc->last_stat_valid = 1;
3303 return;
3304 }
3305
3306 /*
3307 * If it's not the bluetooth size - log, then just copy.
3308 */
3309 if (len != sizeof(struct iwn_stats_bt) + 4) {
3310 DPRINTF(sc, IWN_DEBUG_STATS,
3311 "%s: size of rx statistics (%d) not an expected size!\n",
3312 __func__,
3313 len);
3314 memcpy(&sc->last_stat, stats, sizeof(struct iwn_stats));
3315 sc->last_stat_valid = 1;
3316 return;
3317 }
3318
3319 /*
3320 * Ok. Time to copy.
3321 */
3322 stats_bt = (struct iwn_stats_bt *) stats;
3323 lstats = &sc->last_stat;
3324
3325 /* flags */
3326 lstats->flags = stats_bt->flags;
3327 /* rx_bt */
3328 memcpy(&lstats->rx.ofdm, &stats_bt->rx_bt.ofdm,
3329 sizeof(struct iwn_rx_phy_stats));
3330 memcpy(&lstats->rx.cck, &stats_bt->rx_bt.cck,
3331 sizeof(struct iwn_rx_phy_stats));
3332 memcpy(&lstats->rx.general, &stats_bt->rx_bt.general_bt.common,
3333 sizeof(struct iwn_rx_general_stats));
3334 memcpy(&lstats->rx.ht, &stats_bt->rx_bt.ht,
3335 sizeof(struct iwn_rx_ht_phy_stats));
3336 /* tx */
3337 memcpy(&lstats->tx, &stats_bt->tx,
3338 sizeof(struct iwn_tx_stats));
3339 /* general */
3340 memcpy(&lstats->general, &stats_bt->general,
3341 sizeof(struct iwn_general_stats));
3342
3343 /* XXX TODO: Squirrel away the extra bluetooth stats somewhere */
3344 sc->last_stat_valid = 1;
3345}
3346
3347/*
3348 * Process an RX_STATISTICS or BEACON_STATISTICS firmware notification.
3349 * The latter is sent by the firmware after each received beacon.
3350 */
3351static void
3352iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc,
3353 struct iwn_rx_data *data)
3354{
3355 struct iwn_ops *ops = &sc->ops;
3356 struct ieee80211com *ic = &sc->sc_ic;
3357 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3358 struct iwn_calib_state *calib = &sc->calib;
3359 struct iwn_stats *stats = (struct iwn_stats *)(desc + 1);
3360 struct iwn_stats *lstats;
3361 int temp;
3362
3363 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
3364
3365 /* Ignore statistics received during a scan. */
3366 if (vap->iv_state != IEEE80211_S_RUN ||
3367 (ic->ic_flags & IEEE80211_F_SCAN)){
3368 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s received during calib\n",
3369 __func__);
3370 return;
3371 }
3372
3373 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3374
3375 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_STATS,
3376 "%s: received statistics, cmd %d, len %d\n",
3377 __func__, desc->type, le16toh(desc->len));
3378 sc->calib_cnt = 0; /* Reset TX power calibration timeout. */
3379
3380 /*
3381 * Collect/track general statistics for reporting.
3382 *
3383 * This takes care of ensuring that the bluetooth sized message
3384 * will be correctly converted to the legacy sized message.
3385 */
3386 iwn_stats_update(sc, calib, stats, le16toh(desc->len));
3387
3388 /*
3389 * And now, let's take a reference of it to use!
3390 */
3391 lstats = &sc->last_stat;
3392
3393 /* Test if temperature has changed. */
3394 if (lstats->general.temp != sc->rawtemp) {
3395 /* Convert "raw" temperature to degC. */
3396 sc->rawtemp = stats->general.temp;
3397 temp = ops->get_temperature(sc);
3398 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d\n",
3399 __func__, temp);
3400
3401 /* Update TX power if need be (4965AGN only). */
3402 if (sc->hw_type == IWN_HW_REV_TYPE_4965)
3403 iwn4965_power_calibration(sc, temp);
3404 }
3405
3406 if (desc->type != IWN_BEACON_STATISTICS)
3407 return; /* Reply to a statistics request. */
3408
3409 sc->noise = iwn_get_noise(&lstats->rx.general);
3410 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: noise %d\n", __func__, sc->noise);
3411
3412 /* Test that RSSI and noise are present in stats report. */
3413 if (le32toh(lstats->rx.general.flags) != 1) {
3414 DPRINTF(sc, IWN_DEBUG_ANY, "%s\n",
3415 "received statistics without RSSI");
3416 return;
3417 }
3418
3419 if (calib->state == IWN_CALIB_STATE_ASSOC)
3420 iwn_collect_noise(sc, &lstats->rx.general);
3421 else if (calib->state == IWN_CALIB_STATE_RUN) {
3422 iwn_tune_sensitivity(sc, &lstats->rx);
3423 /*
3424 * XXX TODO: Only run the RX recovery if we're associated!
3425 */
3426 iwn_check_rx_recovery(sc, lstats);
3427 iwn_save_stats_counters(sc, lstats);
3428 }
3429
3430 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
3431}
3432
3433/*
3434 * Save the relevant statistic counters for the next calibration
3435 * pass.
3436 */
3437static void
3438iwn_save_stats_counters(struct iwn_softc *sc, const struct iwn_stats *rs)
3439{
3440 struct iwn_calib_state *calib = &sc->calib;
3441
3442 /* Save counters values for next call. */
3443 calib->bad_plcp_cck = le32toh(rs->rx.cck.bad_plcp);
3444 calib->fa_cck = le32toh(rs->rx.cck.fa);
3445 calib->bad_plcp_ht = le32toh(rs->rx.ht.bad_plcp);
3446 calib->bad_plcp_ofdm = le32toh(rs->rx.ofdm.bad_plcp);
3447 calib->fa_ofdm = le32toh(rs->rx.ofdm.fa);
3448
3449 /* Last time we received these tick values */
3450 sc->last_calib_ticks = ticks;
3451}
3452
3453/*
3454 * Process a TX_DONE firmware notification. Unfortunately, the 4965AGN
3455 * and 5000 adapters have different incompatible TX status formats.
3456 */
3457static void
3458iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
3459 struct iwn_rx_data *data)
3460{
3461 struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1);
3462 struct iwn_tx_ring *ring;
3463 int qid;
3464
3465 qid = desc->qid & 0xf;
3466 ring = &sc->txq[qid];
3467
3468 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: "
3469 "qid %d idx %d RTS retries %d ACK retries %d nkill %d rate %x duration %d status %x\n",
3470 __func__, desc->qid, desc->idx,
3471 stat->rtsfailcnt,
3472 stat->ackfailcnt,
3473 stat->btkillcnt,
3474 stat->rate, le16toh(stat->duration),
3475 le32toh(stat->status));
3476
3477 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3478 if (qid >= sc->firstaggqueue) {
3479 iwn_ampdu_tx_done(sc, qid, desc->idx, stat->nframes,
3480 stat->ackfailcnt, &stat->status);
3481 } else {
3482 iwn_tx_done(sc, desc, stat->ackfailcnt,
3483 le32toh(stat->status) & 0xff);
3484 }
3485}
3486
3487static void
3488iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
3489 struct iwn_rx_data *data)
3490{
3491 struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1);
3492 struct iwn_tx_ring *ring;
3493 int qid;
3494
3495 qid = desc->qid & 0xf;
3496 ring = &sc->txq[qid];
3497
3498 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: "
3499 "qid %d idx %d RTS retries %d ACK retries %d nkill %d rate %x duration %d status %x\n",
3500 __func__, desc->qid, desc->idx,
3501 stat->rtsfailcnt,
3502 stat->ackfailcnt,
3503 stat->btkillcnt,
3504 stat->rate, le16toh(stat->duration),
3505 le32toh(stat->status));
3506
3507#ifdef notyet
3508 /* Reset TX scheduler slot. */
3509 iwn5000_reset_sched(sc, desc->qid & 0xf, desc->idx);
3510#endif
3511
3512 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3513 if (qid >= sc->firstaggqueue) {
3514 iwn_ampdu_tx_done(sc, qid, desc->idx, stat->nframes,
3515 stat->ackfailcnt, &stat->status);
3516 } else {
3517 iwn_tx_done(sc, desc, stat->ackfailcnt,
3518 le16toh(stat->status) & 0xff);
3519 }
3520}
3521
3522/*
3523 * Adapter-independent backend for TX_DONE firmware notifications.
3524 */
3525static void
3526iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, int ackfailcnt,
3527 uint8_t status)
3528{
3529 struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf];
3530 struct iwn_tx_data *data = &ring->data[desc->idx];
3531 struct mbuf *m;
3532 struct ieee80211_node *ni;
3533 struct ieee80211vap *vap;
3534
3535 KASSERT(data->ni != NULL, ("no node"));
3536
3537 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
3538
3539 /* Unmap and free mbuf. */
3540 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE);
3541 bus_dmamap_unload(ring->data_dmat, data->map);
3542 m = data->m, data->m = NULL;
3543 ni = data->ni, data->ni = NULL;
3544 vap = ni->ni_vap;
3545
3546 /*
3547 * Update rate control statistics for the node.
3548 */
3549 if (status & IWN_TX_FAIL)
3550 ieee80211_ratectl_tx_complete(vap, ni,
3551 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL);
3552 else
3553 ieee80211_ratectl_tx_complete(vap, ni,
3554 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL);
3555
3556 /*
3557 * Channels marked for "radar" require traffic to be received
3558 * to unlock before we can transmit. Until traffic is seen
3559 * any attempt to transmit is returned immediately with status
3560 * set to IWN_TX_FAIL_TX_LOCKED. Unfortunately this can easily
3561 * happen on first authenticate after scanning. To workaround
3562 * this we ignore a failure of this sort in AUTH state so the
3563 * 802.11 layer will fall back to using a timeout to wait for
3564 * the AUTH reply. This allows the firmware time to see
3565 * traffic so a subsequent retry of AUTH succeeds. It's
3566 * unclear why the firmware does not maintain state for
3567 * channels recently visited as this would allow immediate
3568 * use of the channel after a scan (where we see traffic).
3569 */
3570 if (status == IWN_TX_FAIL_TX_LOCKED &&
3571 ni->ni_vap->iv_state == IEEE80211_S_AUTH)
3572 ieee80211_tx_complete(ni, m, 0);
3573 else
3574 ieee80211_tx_complete(ni, m,
3575 (status & IWN_TX_FAIL) != 0);
3576
3577 sc->sc_tx_timer = 0;
3578 if (--ring->queued < IWN_TX_RING_LOMARK) {
3579 sc->qfullmsk &= ~(1 << ring->qid);
3580 if (sc->qfullmsk == 0)
3581 iwn_start_locked(sc);
3582 }
3583
3584 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
3585
3586}
3587
3588/*
3589 * Process a "command done" firmware notification. This is where we wakeup
3590 * processes waiting for a synchronous command completion.
3591 */
3592static void
3593iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc)
3594{
3595 struct iwn_tx_ring *ring;
3596 struct iwn_tx_data *data;
3597 int cmd_queue_num;
3598
3599 if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT)
3600 cmd_queue_num = IWN_PAN_CMD_QUEUE;
3601 else
3602 cmd_queue_num = IWN_CMD_QUEUE_NUM;
3603
3604 if ((desc->qid & IWN_RX_DESC_QID_MSK) != cmd_queue_num)
3605 return; /* Not a command ack. */
3606
3607 ring = &sc->txq[cmd_queue_num];
3608 data = &ring->data[desc->idx];
3609
3610 /* If the command was mapped in an mbuf, free it. */
3611 if (data->m != NULL) {
3612 bus_dmamap_sync(ring->data_dmat, data->map,
3613 BUS_DMASYNC_POSTWRITE);
3614 bus_dmamap_unload(ring->data_dmat, data->map);
3615 m_freem(data->m);
3616 data->m = NULL;
3617 }
3618 wakeup(&ring->desc[desc->idx]);
3619}
3620
3621static void
3622iwn_ampdu_tx_done(struct iwn_softc *sc, int qid, int idx, int nframes,
3623 int ackfailcnt, void *stat)
3624{
3625 struct iwn_ops *ops = &sc->ops;
3626 struct iwn_tx_ring *ring = &sc->txq[qid];
3627 struct iwn_tx_data *data;
3628 struct mbuf *m;
3629 struct iwn_node *wn;
3630 struct ieee80211_node *ni;
3631 struct ieee80211_tx_ampdu *tap;
3632 uint64_t bitmap;
3633 uint32_t *status = stat;
3634 uint16_t *aggstatus = stat;
3635 uint16_t ssn;
3636 uint8_t tid;
3637 int bit, i, lastidx, *res, seqno, shift, start;
3638
3639 /* XXX TODO: status is le16 field! Grr */
3640
3641 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
3642 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: nframes=%d, status=0x%08x\n",
3643 __func__,
3644 nframes,
3645 *status);
3646
3647 tap = sc->qid2tap[qid];
3648 tid = tap->txa_tid;
3649 wn = (void *)tap->txa_ni;
3650 ni = tap->txa_ni;
3651
3652 /*
3653 * XXX TODO: ACK and RTS failures would be nice here!
3654 */
3655
3656 /*
3657 * A-MPDU single frame status - if we failed to transmit it
3658 * in A-MPDU, then it may be a permanent failure.
3659 *
3660 * XXX TODO: check what the Linux iwlwifi driver does here;
3661 * there's some permanent and temporary failures that may be
3662 * handled differently.
3663 */
3664 if (nframes == 1) {
3665 if ((*status & 0xff) != 1 && (*status & 0xff) != 2) {
3666#ifdef NOT_YET
3667 printf("ieee80211_send_bar()\n");
3668#endif
3669 /*
3670 * If we completely fail a transmit, make sure a
3671 * notification is pushed up to the rate control
3672 * layer.
3673 */
3674 ieee80211_ratectl_tx_complete(ni->ni_vap,
3675 ni,
3676 IEEE80211_RATECTL_TX_FAILURE,
3677 &ackfailcnt,
3678 NULL);
3679 } else {
3680 /*
3681 * If nframes=1, then we won't be getting a BA for
3682 * this frame. Ensure that we correctly update the
3683 * rate control code with how many retries were
3684 * needed to send it.
3685 */
3686 ieee80211_ratectl_tx_complete(ni->ni_vap,
3687 ni,
3688 IEEE80211_RATECTL_TX_SUCCESS,
3689 &ackfailcnt,
3690 NULL);
3691 }
3692 }
3693
3694 bitmap = 0;
3695 start = idx;
3696 for (i = 0; i < nframes; i++) {
3697 if (le16toh(aggstatus[i * 2]) & 0xc)
3698 continue;
3699
3700 idx = le16toh(aggstatus[2*i + 1]) & 0xff;
3701 bit = idx - start;
3702 shift = 0;
3703 if (bit >= 64) {
3704 shift = 0x100 - idx + start;
3705 bit = 0;
3706 start = idx;
3707 } else if (bit <= -64)
3708 bit = 0x100 - start + idx;
3709 else if (bit < 0) {
3710 shift = start - idx;
3711 start = idx;
3712 bit = 0;
3713 }
3714 bitmap = bitmap << shift;
3715 bitmap |= 1ULL << bit;
3716 }
3717 tap = sc->qid2tap[qid];
3718 tid = tap->txa_tid;
3719 wn = (void *)tap->txa_ni;
3720 wn->agg[tid].bitmap = bitmap;
3721 wn->agg[tid].startidx = start;
3722 wn->agg[tid].nframes = nframes;
3723
3724 res = NULL;
3725 ssn = 0;
3726 if (!IEEE80211_AMPDU_RUNNING(tap)) {
3727 res = tap->txa_private;
3728 ssn = tap->txa_start & 0xfff;
3729 }
3730
3731 /* This is going nframes DWORDS into the descriptor? */
3732 seqno = le32toh(*(status + nframes)) & 0xfff;
3733 for (lastidx = (seqno & 0xff); ring->read != lastidx;) {
3734 data = &ring->data[ring->read];
3735
3736 /* Unmap and free mbuf. */
3737 bus_dmamap_sync(ring->data_dmat, data->map,
3738 BUS_DMASYNC_POSTWRITE);
3739 bus_dmamap_unload(ring->data_dmat, data->map);
3740 m = data->m, data->m = NULL;
3741 ni = data->ni, data->ni = NULL;
3742
3743 KASSERT(ni != NULL, ("no node"));
3744 KASSERT(m != NULL, ("no mbuf"));
3745 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: freeing m=%p\n", __func__, m);
3746 ieee80211_tx_complete(ni, m, 1);
3747
3748 ring->queued--;
3749 ring->read = (ring->read + 1) % IWN_TX_RING_COUNT;
3750 }
3751
3752 if (ring->queued == 0 && res != NULL) {
3753 iwn_nic_lock(sc);
3754 ops->ampdu_tx_stop(sc, qid, tid, ssn);
3755 iwn_nic_unlock(sc);
3756 sc->qid2tap[qid] = NULL;
3757 free(res, M_DEVBUF);
3758 return;
3759 }
3760
3761 sc->sc_tx_timer = 0;
3762 if (ring->queued < IWN_TX_RING_LOMARK) {
3763 sc->qfullmsk &= ~(1 << ring->qid);
3764 if (sc->qfullmsk == 0)
3765 iwn_start_locked(sc);
3766 }
3767
3768 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
3769
3770}
3771
3772/*
3773 * Process an INT_FH_RX or INT_SW_RX interrupt.
3774 */
3775static void
3776iwn_notif_intr(struct iwn_softc *sc)
3777{
3778 struct iwn_ops *ops = &sc->ops;
3779 struct ieee80211com *ic = &sc->sc_ic;
3780 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3781 uint16_t hw;
3782
3783 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
3784 BUS_DMASYNC_POSTREAD);
3785
3786 hw = le16toh(sc->rxq.stat->closed_count) & 0xfff;
3787 while (sc->rxq.cur != hw) {
3788 struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur];
3789 struct iwn_rx_desc *desc;
3790
3791 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3792 BUS_DMASYNC_POSTREAD);
3793 desc = mtod(data->m, struct iwn_rx_desc *);
3794
3795 DPRINTF(sc, IWN_DEBUG_RECV,
3796 "%s: cur=%d; qid %x idx %d flags %x type %d(%s) len %d\n",
3797 __func__, sc->rxq.cur, desc->qid & 0xf, desc->idx, desc->flags,
3798 desc->type, iwn_intr_str(desc->type),
3799 le16toh(desc->len));
3800
3801 if (!(desc->qid & IWN_UNSOLICITED_RX_NOTIF)) /* Reply to a command. */
3802 iwn_cmd_done(sc, desc);
3803
3804 switch (desc->type) {
3805 case IWN_RX_PHY:
3806 iwn_rx_phy(sc, desc, data);
3807 break;
3808
3809 case IWN_RX_DONE: /* 4965AGN only. */
3810 case IWN_MPDU_RX_DONE:
3811 /* An 802.11 frame has been received. */
3812 iwn_rx_done(sc, desc, data);
3813 break;
3814
3815 case IWN_RX_COMPRESSED_BA:
3816 /* A Compressed BlockAck has been received. */
3817 iwn_rx_compressed_ba(sc, desc, data);
3818 break;
3819
3820 case IWN_TX_DONE:
3821 /* An 802.11 frame has been transmitted. */
3822 ops->tx_done(sc, desc, data);
3823 break;
3824
3825 case IWN_RX_STATISTICS:
3826 case IWN_BEACON_STATISTICS:
3827 iwn_rx_statistics(sc, desc, data);
3828 break;
3829
3830 case IWN_BEACON_MISSED:
3831 {
3832 struct iwn_beacon_missed *miss =
3833 (struct iwn_beacon_missed *)(desc + 1);
3834 int misses;
3835
3836 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3837 BUS_DMASYNC_POSTREAD);
3838 misses = le32toh(miss->consecutive);
3839
3840 DPRINTF(sc, IWN_DEBUG_STATE,
3841 "%s: beacons missed %d/%d\n", __func__,
3842 misses, le32toh(miss->total));
3843 /*
3844 * If more than 5 consecutive beacons are missed,
3845 * reinitialize the sensitivity state machine.
3846 */
3847 if (vap->iv_state == IEEE80211_S_RUN &&
3848 (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
3849 if (misses > 5)
3850 (void)iwn_init_sensitivity(sc);
3851 if (misses >= vap->iv_bmissthreshold) {
3852 IWN_UNLOCK(sc);
3853 ieee80211_beacon_miss(ic);
3854 IWN_LOCK(sc);
3855 }
3856 }
3857 break;
3858 }
3859 case IWN_UC_READY:
3860 {
3861 struct iwn_ucode_info *uc =
3862 (struct iwn_ucode_info *)(desc + 1);
3863
3864 /* The microcontroller is ready. */
3865 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3866 BUS_DMASYNC_POSTREAD);
3867 DPRINTF(sc, IWN_DEBUG_RESET,
3868 "microcode alive notification version=%d.%d "
3869 "subtype=%x alive=%x\n", uc->major, uc->minor,
3870 uc->subtype, le32toh(uc->valid));
3871
3872 if (le32toh(uc->valid) != 1) {
3873 device_printf(sc->sc_dev,
3874 "microcontroller initialization failed");
3875 break;
3876 }
3877 if (uc->subtype == IWN_UCODE_INIT) {
3878 /* Save microcontroller report. */
3879 memcpy(&sc->ucode_info, uc, sizeof (*uc));
3880 }
3881 /* Save the address of the error log in SRAM. */
3882 sc->errptr = le32toh(uc->errptr);
3883 break;
3884 }
3885 case IWN_STATE_CHANGED:
3886 {
3887 /*
3888 * State change allows hardware switch change to be
3889 * noted. However, we handle this in iwn_intr as we
3890 * get both the enable/disble intr.
3891 */
3892 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3893 BUS_DMASYNC_POSTREAD);
3894#ifdef IWN_DEBUG
3895 uint32_t *status = (uint32_t *)(desc + 1);
3896 DPRINTF(sc, IWN_DEBUG_INTR | IWN_DEBUG_STATE,
3897 "state changed to %x\n",
3898 le32toh(*status));
3899#endif
3900 break;
3901 }
3902 case IWN_START_SCAN:
3903 {
3904 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3905 BUS_DMASYNC_POSTREAD);
3906#ifdef IWN_DEBUG
3907 struct iwn_start_scan *scan =
3908 (struct iwn_start_scan *)(desc + 1);
3909 DPRINTF(sc, IWN_DEBUG_ANY,
3910 "%s: scanning channel %d status %x\n",
3911 __func__, scan->chan, le32toh(scan->status));
3912#endif
3913 break;
3914 }
3915 case IWN_STOP_SCAN:
3916 {
3917 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3918 BUS_DMASYNC_POSTREAD);
3919#ifdef IWN_DEBUG
3920 struct iwn_stop_scan *scan =
3921 (struct iwn_stop_scan *)(desc + 1);
3922 DPRINTF(sc, IWN_DEBUG_STATE | IWN_DEBUG_SCAN,
3923 "scan finished nchan=%d status=%d chan=%d\n",
3924 scan->nchan, scan->status, scan->chan);
3925#endif
3926 sc->sc_is_scanning = 0;
3927 IWN_UNLOCK(sc);
3928 ieee80211_scan_next(vap);
3929 IWN_LOCK(sc);
3930 break;
3931 }
3932 case IWN5000_CALIBRATION_RESULT:
3933 iwn5000_rx_calib_results(sc, desc, data);
3934 break;
3935
3936 case IWN5000_CALIBRATION_DONE:
3937 sc->sc_flags |= IWN_FLAG_CALIB_DONE;
3938 wakeup(sc);
3939 break;
3940 }
3941
3942 sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT;
3943 }
3944
3945 /* Tell the firmware what we have processed. */
3946 hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1;
3947 IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7);
3948}
3949
3950/*
3951 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up
3952 * from power-down sleep mode.
3953 */
3954static void
3955iwn_wakeup_intr(struct iwn_softc *sc)
3956{
3957 int qid;
3958
3959 DPRINTF(sc, IWN_DEBUG_RESET, "%s: ucode wakeup from power-down sleep\n",
3960 __func__);
3961
3962 /* Wakeup RX and TX rings. */
3963 IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7);
3964 for (qid = 0; qid < sc->ntxqs; qid++) {
3965 struct iwn_tx_ring *ring = &sc->txq[qid];
3966 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur);
3967 }
3968}
3969
3970static void
3971iwn_rftoggle_intr(struct iwn_softc *sc)
3972{
3973 struct ieee80211com *ic = &sc->sc_ic;
3974 uint32_t tmp = IWN_READ(sc, IWN_GP_CNTRL);
3975
3976 IWN_LOCK_ASSERT(sc);
3977
3978 device_printf(sc->sc_dev, "RF switch: radio %s\n",
3979 (tmp & IWN_GP_CNTRL_RFKILL) ? "enabled" : "disabled");
3980 if (tmp & IWN_GP_CNTRL_RFKILL)
3981 ieee80211_runtask(ic, &sc->sc_radioon_task);
3982 else
3983 ieee80211_runtask(ic, &sc->sc_radiooff_task);
3984}
3985
3986/*
3987 * Dump the error log of the firmware when a firmware panic occurs. Although
3988 * we can't debug the firmware because it is neither open source nor free, it
3989 * can help us to identify certain classes of problems.
3990 */
3991static void
3992iwn_fatal_intr(struct iwn_softc *sc)
3993{
3994 struct iwn_fw_dump dump;
3995 int i;
3996
3997 IWN_LOCK_ASSERT(sc);
3998
3999 /* Force a complete recalibration on next init. */
4000 sc->sc_flags &= ~IWN_FLAG_CALIB_DONE;
4001
4002 /* Check that the error log address is valid. */
4003 if (sc->errptr < IWN_FW_DATA_BASE ||
4004 sc->errptr + sizeof (dump) >
4005 IWN_FW_DATA_BASE + sc->fw_data_maxsz) {
4006 printf("%s: bad firmware error log address 0x%08x\n", __func__,
4007 sc->errptr);
4008 return;
4009 }
4010 if (iwn_nic_lock(sc) != 0) {
4011 printf("%s: could not read firmware error log\n", __func__);
4012 return;
4013 }
4014 /* Read firmware error log from SRAM. */
4015 iwn_mem_read_region_4(sc, sc->errptr, (uint32_t *)&dump,
4016 sizeof (dump) / sizeof (uint32_t));
4017 iwn_nic_unlock(sc);
4018
4019 if (dump.valid == 0) {
4020 printf("%s: firmware error log is empty\n", __func__);
4021 return;
4022 }
4023 printf("firmware error log:\n");
4024 printf(" error type = \"%s\" (0x%08X)\n",
4025 (dump.id < nitems(iwn_fw_errmsg)) ?
4026 iwn_fw_errmsg[dump.id] : "UNKNOWN",
4027 dump.id);
4028 printf(" program counter = 0x%08X\n", dump.pc);
4029 printf(" source line = 0x%08X\n", dump.src_line);
4030 printf(" error data = 0x%08X%08X\n",
4031 dump.error_data[0], dump.error_data[1]);
4032 printf(" branch link = 0x%08X%08X\n",
4033 dump.branch_link[0], dump.branch_link[1]);
4034 printf(" interrupt link = 0x%08X%08X\n",
4035 dump.interrupt_link[0], dump.interrupt_link[1]);
4036 printf(" time = %u\n", dump.time[0]);
4037
4038 /* Dump driver status (TX and RX rings) while we're here. */
4039 printf("driver status:\n");
4040 for (i = 0; i < sc->ntxqs; i++) {
4041 struct iwn_tx_ring *ring = &sc->txq[i];
4042 printf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n",
4043 i, ring->qid, ring->cur, ring->queued);
4044 }
4045 printf(" rx ring: cur=%d\n", sc->rxq.cur);
4046}
4047
4048static void
4049iwn_intr(void *arg)
4050{
4051 struct iwn_softc *sc = arg;
4052 uint32_t r1, r2, tmp;
4053
4054 IWN_LOCK(sc);
4055
4056 /* Disable interrupts. */
4057 IWN_WRITE(sc, IWN_INT_MASK, 0);
4058
4059 /* Read interrupts from ICT (fast) or from registers (slow). */
4060 if (sc->sc_flags & IWN_FLAG_USE_ICT) {
4061 tmp = 0;
4062 while (sc->ict[sc->ict_cur] != 0) {
4063 tmp |= sc->ict[sc->ict_cur];
4064 sc->ict[sc->ict_cur] = 0; /* Acknowledge. */
4065 sc->ict_cur = (sc->ict_cur + 1) % IWN_ICT_COUNT;
4066 }
4067 tmp = le32toh(tmp);
4068 if (tmp == 0xffffffff) /* Shouldn't happen. */
4069 tmp = 0;
4070 else if (tmp & 0xc0000) /* Workaround a HW bug. */
4071 tmp |= 0x8000;
4072 r1 = (tmp & 0xff00) << 16 | (tmp & 0xff);
4073 r2 = 0; /* Unused. */
4074 } else {
4075 r1 = IWN_READ(sc, IWN_INT);
4076 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0) {
4077 IWN_UNLOCK(sc);
4078 return; /* Hardware gone! */
4079 }
4080 r2 = IWN_READ(sc, IWN_FH_INT);
4081 }
4082
4083 DPRINTF(sc, IWN_DEBUG_INTR, "interrupt reg1=0x%08x reg2=0x%08x\n"
4084 , r1, r2);
4085
4086 if (r1 == 0 && r2 == 0)
4087 goto done; /* Interrupt not for us. */
4088
4089 /* Acknowledge interrupts. */
4090 IWN_WRITE(sc, IWN_INT, r1);
4091 if (!(sc->sc_flags & IWN_FLAG_USE_ICT))
4092 IWN_WRITE(sc, IWN_FH_INT, r2);
4093
4094 if (r1 & IWN_INT_RF_TOGGLED) {
4095 iwn_rftoggle_intr(sc);
4096 goto done;
4097 }
4098 if (r1 & IWN_INT_CT_REACHED) {
4099 device_printf(sc->sc_dev, "%s: critical temperature reached!\n",
4100 __func__);
4101 }
4102 if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) {
4103 device_printf(sc->sc_dev, "%s: fatal firmware error\n",
4104 __func__);
4105#ifdef IWN_DEBUG
4106 iwn_debug_register(sc);
4107#endif
4108 /* Dump firmware error log and stop. */
4109 iwn_fatal_intr(sc);
4110
4111 taskqueue_enqueue(sc->sc_tq, &sc->sc_panic_task);
4112 goto done;
4113 }
4114 if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX | IWN_INT_RX_PERIODIC)) ||
4115 (r2 & IWN_FH_INT_RX)) {
4116 if (sc->sc_flags & IWN_FLAG_USE_ICT) {
4117 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX))
4118 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX);
4119 IWN_WRITE_1(sc, IWN_INT_PERIODIC,
4120 IWN_INT_PERIODIC_DIS);
4121 iwn_notif_intr(sc);
4122 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) {
4123 IWN_WRITE_1(sc, IWN_INT_PERIODIC,
4124 IWN_INT_PERIODIC_ENA);
4125 }
4126 } else
4127 iwn_notif_intr(sc);
4128 }
4129
4130 if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) {
4131 if (sc->sc_flags & IWN_FLAG_USE_ICT)
4132 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_TX);
4133 wakeup(sc); /* FH DMA transfer completed. */
4134 }
4135
4136 if (r1 & IWN_INT_ALIVE)
4137 wakeup(sc); /* Firmware is alive. */
4138
4139 if (r1 & IWN_INT_WAKEUP)
4140 iwn_wakeup_intr(sc);
4141
4142done:
4143 /* Re-enable interrupts. */
4144 if (sc->sc_flags & IWN_FLAG_RUNNING)
4145 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
4146
4147 IWN_UNLOCK(sc);
4148}
4149
4150/*
4151 * Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and
4152 * 5000 adapters use a slightly different format).
4153 */
4154static void
4155iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id,
4156 uint16_t len)
4157{
4158 uint16_t *w = &sc->sched[qid * IWN4965_SCHED_COUNT + idx];
4159
4160 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4161
4162 *w = htole16(len + 8);
4163 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
4164 BUS_DMASYNC_PREWRITE);
4165 if (idx < IWN_SCHED_WINSZ) {
4166 *(w + IWN_TX_RING_COUNT) = *w;
4167 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
4168 BUS_DMASYNC_PREWRITE);
4169 }
4170}
4171
4172static void
4173iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id,
4174 uint16_t len)
4175{
4176 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx];
4177
4178 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4179
4180 *w = htole16(id << 12 | (len + 8));
4181 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
4182 BUS_DMASYNC_PREWRITE);
4183 if (idx < IWN_SCHED_WINSZ) {
4184 *(w + IWN_TX_RING_COUNT) = *w;
4185 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
4186 BUS_DMASYNC_PREWRITE);
4187 }
4188}
4189
4190#ifdef notyet
4191static void
4192iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx)
4193{
4194 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx];
4195
4196 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4197
4198 *w = (*w & htole16(0xf000)) | htole16(1);
4199 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
4200 BUS_DMASYNC_PREWRITE);
4201 if (idx < IWN_SCHED_WINSZ) {
4202 *(w + IWN_TX_RING_COUNT) = *w;
4203 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
4204 BUS_DMASYNC_PREWRITE);
4205 }
4206}
4207#endif
4208
4209/*
4210 * Check whether OFDM 11g protection will be enabled for the given rate.
4211 *
4212 * The original driver code only enabled protection for OFDM rates.
4213 * It didn't check to see whether it was operating in 11a or 11bg mode.
4214 */
4215static int
4216iwn_check_rate_needs_protection(struct iwn_softc *sc,
4217 struct ieee80211vap *vap, uint8_t rate)
4218{
4219 struct ieee80211com *ic = vap->iv_ic;
4220
4221 /*
4222 * Not in 2GHz mode? Then there's no need to enable OFDM
4223 * 11bg protection.
4224 */
4225 if (! IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) {
4226 return (0);
4227 }
4228
4229 /*
4230 * 11bg protection not enabled? Then don't use it.
4231 */
4232 if ((ic->ic_flags & IEEE80211_F_USEPROT) == 0)
4233 return (0);
4234
4235 /*
4236 * If it's an 11n rate - no protection.
4237 * We'll do it via a specific 11n check.
4238 */
4239 if (rate & IEEE80211_RATE_MCS) {
4240 return (0);
4241 }
4242
4243 /*
4244 * Do a rate table lookup. If the PHY is CCK,
4245 * don't do protection.
4246 */
4247 if (ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_CCK)
4248 return (0);
4249
4250 /*
4251 * Yup, enable protection.
4252 */
4253 return (1);
4254}
4255
4256/*
4257 * return a value between 0 and IWN_MAX_TX_RETRIES-1 as an index into
4258 * the link quality table that reflects this particular entry.
4259 */
4260static int
4261iwn_tx_rate_to_linkq_offset(struct iwn_softc *sc, struct ieee80211_node *ni,
4262 uint8_t rate)
4263{
4264 struct ieee80211_rateset *rs;
4265 int is_11n;
4266 int nr;
4267 int i;
4268 uint8_t cmp_rate;
4269
4270 /*
4271 * Figure out if we're using 11n or not here.
4272 */
4273 if (IEEE80211_IS_CHAN_HT(ni->ni_chan) && ni->ni_htrates.rs_nrates > 0)
4274 is_11n = 1;
4275 else
4276 is_11n = 0;
4277
4278 /*
4279 * Use the correct rate table.
4280 */
4281 if (is_11n) {
4282 rs = (struct ieee80211_rateset *) &ni->ni_htrates;
4283 nr = ni->ni_htrates.rs_nrates;
4284 } else {
4285 rs = &ni->ni_rates;
4286 nr = rs->rs_nrates;
4287 }
4288
4289 /*
4290 * Find the relevant link quality entry in the table.
4291 */
4292 for (i = 0; i < nr && i < IWN_MAX_TX_RETRIES - 1 ; i++) {
4293 /*
4294 * The link quality table index starts at 0 == highest
4295 * rate, so we walk the rate table backwards.
4296 */
4297 cmp_rate = rs->rs_rates[(nr - 1) - i];
4298 if (rate & IEEE80211_RATE_MCS)
4299 cmp_rate |= IEEE80211_RATE_MCS;
4300
4301#if 0
4302 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: idx %d: nr=%d, rate=0x%02x, rateentry=0x%02x\n",
4303 __func__,
4304 i,
4305 nr,
4306 rate,
4307 cmp_rate);
4308#endif
4309
4310 if (cmp_rate == rate)
4311 return (i);
4312 }
4313
4314 /* Failed? Start at the end */
4315 return (IWN_MAX_TX_RETRIES - 1);
4316}
4317
4318static int
4319iwn_tx_data(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
4320{
4321 struct iwn_ops *ops = &sc->ops;
4322 const struct ieee80211_txparam *tp;
4323 struct ieee80211vap *vap = ni->ni_vap;
4324 struct ieee80211com *ic = ni->ni_ic;
4325 struct iwn_node *wn = (void *)ni;
4326 struct iwn_tx_ring *ring;
4327 struct iwn_tx_desc *desc;
4328 struct iwn_tx_data *data;
4329 struct iwn_tx_cmd *cmd;
4330 struct iwn_cmd_data *tx;
4331 struct ieee80211_frame *wh;
4332 struct ieee80211_key *k = NULL;
4333 struct mbuf *m1;
4334 uint32_t flags;
4335 uint16_t qos;
4336 u_int hdrlen;
4337 bus_dma_segment_t *seg, segs[IWN_MAX_SCATTER];
4338 uint8_t tid, type;
4339 int ac, i, totlen, error, pad, nsegs = 0, rate;
4340
4341 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
4342
4343 IWN_LOCK_ASSERT(sc);
4344
4345 wh = mtod(m, struct ieee80211_frame *);
4346 hdrlen = ieee80211_anyhdrsize(wh);
4347 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4348
4349 /* Select EDCA Access Category and TX ring for this frame. */
4350 if (IEEE80211_QOS_HAS_SEQ(wh)) {
4351 qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0];
4352 tid = qos & IEEE80211_QOS_TID;
4353 } else {
4354 qos = 0;
4355 tid = 0;
4356 }
4357 ac = M_WME_GETAC(m);
4358 if (m->m_flags & M_AMPDU_MPDU) {
4359 uint16_t seqno;
4360 struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[ac];
4361
4362 if (!IEEE80211_AMPDU_RUNNING(tap)) {
4363 m_freem(m);
4364 return EINVAL;
4365 }
4366
4367 /*
4368 * Queue this frame to the hardware ring that we've
4369 * negotiated AMPDU TX on.
4370 *
4371 * Note that the sequence number must match the TX slot
4372 * being used!
4373 */
4374 ac = *(int *)tap->txa_private;
4375 seqno = ni->ni_txseqs[tid];
4376 *(uint16_t *)wh->i_seq =
4377 htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
4378 ring = &sc->txq[ac];
4379 if ((seqno % 256) != ring->cur) {
4380 device_printf(sc->sc_dev,
4381 "%s: m=%p: seqno (%d) (%d) != ring index (%d) !\n",
4382 __func__,
4383 m,
4384 seqno,
4385 seqno % 256,
4386 ring->cur);
4387 }
4388 ni->ni_txseqs[tid]++;
4389 }
4390 ring = &sc->txq[ac];
4391 desc = &ring->desc[ring->cur];
4392 data = &ring->data[ring->cur];
4393
4394 /* Choose a TX rate index. */
4395 tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)];
4396 if (type == IEEE80211_FC0_TYPE_MGT)
4397 rate = tp->mgmtrate;
4398 else if (IEEE80211_IS_MULTICAST(wh->i_addr1))
4399 rate = tp->mcastrate;
4400 else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE)
4401 rate = tp->ucastrate;
4402 else if (m->m_flags & M_EAPOL)
4403 rate = tp->mgmtrate;
4404 else {
4405 /* XXX pass pktlen */
4406 (void) ieee80211_ratectl_rate(ni, NULL, 0);
4407 rate = ni->ni_txrate;
4408 }
4409
4410 /* Encrypt the frame if need be. */
4411 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
4412 /* Retrieve key for TX. */
4413 k = ieee80211_crypto_encap(ni, m);
4414 if (k == NULL) {
4415 m_freem(m);
4416 return ENOBUFS;
4417 }
4418 /* 802.11 header may have moved. */
4419 wh = mtod(m, struct ieee80211_frame *);
4420 }
4421 totlen = m->m_pkthdr.len;
4422
4423 if (ieee80211_radiotap_active_vap(vap)) {
4424 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap;
4425
4426 tap->wt_flags = 0;
4427 tap->wt_rate = rate;
4428 if (k != NULL)
4429 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
4430
4431 ieee80211_radiotap_tx(vap, m);
4432 }
4433
4434 /* Prepare TX firmware command. */
4435 cmd = &ring->cmd[ring->cur];
4436 cmd->code = IWN_CMD_TX_DATA;
4437 cmd->flags = 0;
4438 cmd->qid = ring->qid;
4439 cmd->idx = ring->cur;
4440
4441 tx = (struct iwn_cmd_data *)cmd->data;
4442 /* NB: No need to clear tx, all fields are reinitialized here. */
4443 tx->scratch = 0; /* clear "scratch" area */
4444
4445 flags = 0;
4446 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
4447 /* Unicast frame, check if an ACK is expected. */
4448 if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) !=
4449 IEEE80211_QOS_ACKPOLICY_NOACK)
4450 flags |= IWN_TX_NEED_ACK;
4451 }
4452 if ((wh->i_fc[0] &
4453 (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) ==
4454 (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR))
4455 flags |= IWN_TX_IMM_BA; /* Cannot happen yet. */
4456
4457 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG)
4458 flags |= IWN_TX_MORE_FRAG; /* Cannot happen yet. */
4459
4460 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */
4461 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
4462 /* NB: Group frames are sent using CCK in 802.11b/g. */
4463 if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) {
4464 flags |= IWN_TX_NEED_RTS;
4465 } else if (iwn_check_rate_needs_protection(sc, vap, rate)) {
4466 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
4467 flags |= IWN_TX_NEED_CTS;
4468 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
4469 flags |= IWN_TX_NEED_RTS;
4470 } else if ((rate & IEEE80211_RATE_MCS) &&
4471 (ic->ic_htprotmode == IEEE80211_PROT_RTSCTS)) {
4472 flags |= IWN_TX_NEED_RTS;
4473 }
4474
4475 /* XXX HT protection? */
4476
4477 if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) {
4478 if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
4479 /* 5000 autoselects RTS/CTS or CTS-to-self. */
4480 flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS);
4481 flags |= IWN_TX_NEED_PROTECTION;
4482 } else
4483 flags |= IWN_TX_FULL_TXOP;
4484 }
4485 }
4486
4487 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4488 type != IEEE80211_FC0_TYPE_DATA)
4489 tx->id = sc->broadcast_id;
4490 else
4491 tx->id = wn->id;
4492
4493 if (type == IEEE80211_FC0_TYPE_MGT) {
4494 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4495
4496 /* Tell HW to set timestamp in probe responses. */
4497 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
4498 flags |= IWN_TX_INSERT_TSTAMP;
4499 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
4500 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
4501 tx->timeout = htole16(3);
4502 else
4503 tx->timeout = htole16(2);
4504 } else
4505 tx->timeout = htole16(0);
4506
4507 if (hdrlen & 3) {
4508 /* First segment length must be a multiple of 4. */
4509 flags |= IWN_TX_NEED_PADDING;
4510 pad = 4 - (hdrlen & 3);
4511 } else
4512 pad = 0;
4513
4514 tx->len = htole16(totlen);
4515 tx->tid = tid;
4516 tx->rts_ntries = 60;
4517 tx->data_ntries = 15;
4518 tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
4519 tx->rate = iwn_rate_to_plcp(sc, ni, rate);
4520 if (tx->id == sc->broadcast_id) {
4521 /* Group or management frame. */
4522 tx->linkq = 0;
4523 } else {
4524 tx->linkq = iwn_tx_rate_to_linkq_offset(sc, ni, rate);
4525 flags |= IWN_TX_LINKQ; /* enable MRR */
4526 }
4527
4528 /* Set physical address of "scratch area". */
4529 tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr));
4530 tx->hiaddr = IWN_HIADDR(data->scratch_paddr);
4531
4532 /* Copy 802.11 header in TX command. */
4533 memcpy((uint8_t *)(tx + 1), wh, hdrlen);
4534
4535 /* Trim 802.11 header. */
4536 m_adj(m, hdrlen);
4537 tx->security = 0;
4538 tx->flags = htole32(flags);
4539
4540 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs,
4541 &nsegs, BUS_DMA_NOWAIT);
4542 if (error != 0) {
4543 if (error != EFBIG) {
4544 device_printf(sc->sc_dev,
4545 "%s: can't map mbuf (error %d)\n", __func__, error);
4546 m_freem(m);
4547 return error;
4548 }
4549 /* Too many DMA segments, linearize mbuf. */
4550 m1 = m_collapse(m, M_NOWAIT, IWN_MAX_SCATTER - 1);
4551 if (m1 == NULL) {
4552 device_printf(sc->sc_dev,
4553 "%s: could not defrag mbuf\n", __func__);
4554 m_freem(m);
4555 return ENOBUFS;
4556 }
4557 m = m1;
4558
4559 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
4560 segs, &nsegs, BUS_DMA_NOWAIT);
4561 if (error != 0) {
4562 device_printf(sc->sc_dev,
4563 "%s: can't map mbuf (error %d)\n", __func__, error);
4564 m_freem(m);
4565 return error;
4566 }
4567 }
4568
4569 data->m = m;
4570 data->ni = ni;
4571
4572 DPRINTF(sc, IWN_DEBUG_XMIT,
4573 "%s: qid %d idx %d len %d nsegs %d flags 0x%08x rate 0x%04x plcp 0x%08x\n",
4574 __func__,
4575 ring->qid,
4576 ring->cur,
4577 m->m_pkthdr.len,
4578 nsegs,
4579 flags,
4580 rate,
4581 tx->rate);
4582
4583 /* Fill TX descriptor. */
4584 desc->nsegs = 1;
4585 if (m->m_len != 0)
4586 desc->nsegs += nsegs;
4587 /* First DMA segment is used by the TX command. */
4588 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr));
4589 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) |
4590 (4 + sizeof (*tx) + hdrlen + pad) << 4);
4591 /* Other DMA segments are for data payload. */
4592 seg = &segs[0];
4593 for (i = 1; i <= nsegs; i++) {
4594 desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr));
4595 desc->segs[i].len = htole16(IWN_HIADDR(seg->ds_addr) |
4596 seg->ds_len << 4);
4597 seg++;
4598 }
4599
4600 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
4601 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
4602 BUS_DMASYNC_PREWRITE);
4603 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
4604 BUS_DMASYNC_PREWRITE);
4605
4606 /* Update TX scheduler. */
4607 if (ring->qid >= sc->firstaggqueue)
4608 ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen);
4609
4610 /* Kick TX ring. */
4611 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
4612 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4613
4614 /* Mark TX ring as full if we reach a certain threshold. */
4615 if (++ring->queued > IWN_TX_RING_HIMARK)
4616 sc->qfullmsk |= 1 << ring->qid;
4617
4618 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
4619
4620 return 0;
4621}
4622
4623static int
4624iwn_tx_data_raw(struct iwn_softc *sc, struct mbuf *m,
4625 struct ieee80211_node *ni, const struct ieee80211_bpf_params *params)
4626{
4627 struct iwn_ops *ops = &sc->ops;
4628 struct ieee80211vap *vap = ni->ni_vap;
4629 struct iwn_tx_cmd *cmd;
4630 struct iwn_cmd_data *tx;
4631 struct ieee80211_frame *wh;
4632 struct iwn_tx_ring *ring;
4633 struct iwn_tx_desc *desc;
4634 struct iwn_tx_data *data;
4635 struct mbuf *m1;
4636 bus_dma_segment_t *seg, segs[IWN_MAX_SCATTER];
4637 uint32_t flags;
4638 u_int hdrlen;
4639 int ac, totlen, error, pad, nsegs = 0, i, rate;
4640 uint8_t type;
4641
4642 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
4643
4644 IWN_LOCK_ASSERT(sc);
4645
4646 wh = mtod(m, struct ieee80211_frame *);
4647 hdrlen = ieee80211_anyhdrsize(wh);
4648 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4649
4650 ac = params->ibp_pri & 3;
4651
4652 ring = &sc->txq[ac];
4653 desc = &ring->desc[ring->cur];
4654 data = &ring->data[ring->cur];
4655
4656 /* Choose a TX rate. */
4657 rate = params->ibp_rate0;
4658 totlen = m->m_pkthdr.len;
4659
4660 /* Prepare TX firmware command. */
4661 cmd = &ring->cmd[ring->cur];
4662 cmd->code = IWN_CMD_TX_DATA;
4663 cmd->flags = 0;
4664 cmd->qid = ring->qid;
4665 cmd->idx = ring->cur;
4666
4667 tx = (struct iwn_cmd_data *)cmd->data;
4668 /* NB: No need to clear tx, all fields are reinitialized here. */
4669 tx->scratch = 0; /* clear "scratch" area */
4670
4671 flags = 0;
4672 if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0)
4673 flags |= IWN_TX_NEED_ACK;
4674 if (params->ibp_flags & IEEE80211_BPF_RTS) {
4675 if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
4676 /* 5000 autoselects RTS/CTS or CTS-to-self. */
4677 flags &= ~IWN_TX_NEED_RTS;
4678 flags |= IWN_TX_NEED_PROTECTION;
4679 } else
4680 flags |= IWN_TX_NEED_RTS | IWN_TX_FULL_TXOP;
4681 }
4682 if (params->ibp_flags & IEEE80211_BPF_CTS) {
4683 if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
4684 /* 5000 autoselects RTS/CTS or CTS-to-self. */
4685 flags &= ~IWN_TX_NEED_CTS;
4686 flags |= IWN_TX_NEED_PROTECTION;
4687 } else
4688 flags |= IWN_TX_NEED_CTS | IWN_TX_FULL_TXOP;
4689 }
4690 if (type == IEEE80211_FC0_TYPE_MGT) {
4691 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4692
4693 /* Tell HW to set timestamp in probe responses. */
4694 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
4695 flags |= IWN_TX_INSERT_TSTAMP;
4696
4697 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
4698 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
4699 tx->timeout = htole16(3);
4700 else
4701 tx->timeout = htole16(2);
4702 } else
4703 tx->timeout = htole16(0);
4704
4705 if (hdrlen & 3) {
4706 /* First segment length must be a multiple of 4. */
4707 flags |= IWN_TX_NEED_PADDING;
4708 pad = 4 - (hdrlen & 3);
4709 } else
4710 pad = 0;
4711
4712 if (ieee80211_radiotap_active_vap(vap)) {
4713 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap;
4714
4715 tap->wt_flags = 0;
4716 tap->wt_rate = rate;
4717
4718 ieee80211_radiotap_tx(vap, m);
4719 }
4720
4721 tx->len = htole16(totlen);
4722 tx->tid = 0;
4723 tx->id = sc->broadcast_id;
4724 tx->rts_ntries = params->ibp_try1;
4725 tx->data_ntries = params->ibp_try0;
4726 tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
4727 tx->rate = iwn_rate_to_plcp(sc, ni, rate);
4728
4729 /* Group or management frame. */
4730 tx->linkq = 0;
4731
4732 /* Set physical address of "scratch area". */
4733 tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr));
4734 tx->hiaddr = IWN_HIADDR(data->scratch_paddr);
4735
4736 /* Copy 802.11 header in TX command. */
4737 memcpy((uint8_t *)(tx + 1), wh, hdrlen);
4738
4739 /* Trim 802.11 header. */
4740 m_adj(m, hdrlen);
4741 tx->security = 0;
4742 tx->flags = htole32(flags);
4743
4744 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs,
4745 &nsegs, BUS_DMA_NOWAIT);
4746 if (error != 0) {
4747 if (error != EFBIG) {
4748 device_printf(sc->sc_dev,
4749 "%s: can't map mbuf (error %d)\n", __func__, error);
4750 m_freem(m);
4751 return error;
4752 }
4753 /* Too many DMA segments, linearize mbuf. */
4754 m1 = m_collapse(m, M_NOWAIT, IWN_MAX_SCATTER - 1);
4755 if (m1 == NULL) {
4756 device_printf(sc->sc_dev,
4757 "%s: could not defrag mbuf\n", __func__);
4758 m_freem(m);
4759 return ENOBUFS;
4760 }
4761 m = m1;
4762
4763 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
4764 segs, &nsegs, BUS_DMA_NOWAIT);
4765 if (error != 0) {
4766 device_printf(sc->sc_dev,
4767 "%s: can't map mbuf (error %d)\n", __func__, error);
4768 m_freem(m);
4769 return error;
4770 }
4771 }
4772
4773 data->m = m;
4774 data->ni = ni;
4775
4776 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n",
4777 __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs);
4778
4779 /* Fill TX descriptor. */
4780 desc->nsegs = 1;
4781 if (m->m_len != 0)
4782 desc->nsegs += nsegs;
4783 /* First DMA segment is used by the TX command. */
4784 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr));
4785 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) |
4786 (4 + sizeof (*tx) + hdrlen + pad) << 4);
4787 /* Other DMA segments are for data payload. */
4788 seg = &segs[0];
4789 for (i = 1; i <= nsegs; i++) {
4790 desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr));
4791 desc->segs[i].len = htole16(IWN_HIADDR(seg->ds_addr) |
4792 seg->ds_len << 4);
4793 seg++;
4794 }
4795
4796 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
4797 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
4798 BUS_DMASYNC_PREWRITE);
4799 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
4800 BUS_DMASYNC_PREWRITE);
4801
4802 /* Update TX scheduler. */
4803 if (ring->qid >= sc->firstaggqueue)
4804 ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen);
4805
4806 /* Kick TX ring. */
4807 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
4808 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4809
4810 /* Mark TX ring as full if we reach a certain threshold. */
4811 if (++ring->queued > IWN_TX_RING_HIMARK)
4812 sc->qfullmsk |= 1 << ring->qid;
4813
4814 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
4815
4816 return 0;
4817}
4818
4819static void
4820iwn_xmit_task(void *arg0, int pending)
4821{
4822 struct iwn_softc *sc = arg0;
4823 struct ieee80211_node *ni;
4824 struct mbuf *m;
4825 int error;
4826 struct ieee80211_bpf_params p;
4827 int have_p;
4828
4829 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: called\n", __func__);
4830
4831 IWN_LOCK(sc);
4832 /*
4833 * Dequeue frames, attempt to transmit,
4834 * then disable beaconwait when we're done.
4835 */
4836 while ((m = mbufq_dequeue(&sc->sc_xmit_queue)) != NULL) {
4837 have_p = 0;
4838 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4839
4840 /* Get xmit params if appropriate */
4841 if (ieee80211_get_xmit_params(m, &p) == 0)
4842 have_p = 1;
4843
4844 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: m=%p, have_p=%d\n",
4845 __func__, m, have_p);
4846
4847 /* If we have xmit params, use them */
4848 if (have_p)
4849 error = iwn_tx_data_raw(sc, m, ni, &p);
4850 else
4851 error = iwn_tx_data(sc, m, ni);
4852
4853 if (error != 0) {
4854 if_inc_counter(ni->ni_vap->iv_ifp,
4855 IFCOUNTER_OERRORS, 1);
4856 ieee80211_free_node(ni);
4857 }
4858 }
4859
4860 sc->sc_beacon_wait = 0;
4861 IWN_UNLOCK(sc);
4862}
4863
4864static int
4865iwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
4866 const struct ieee80211_bpf_params *params)
4867{
4868 struct ieee80211com *ic = ni->ni_ic;
4869 struct iwn_softc *sc = ic->ic_softc;
4870 int error = 0;
4871
4872 DPRINTF(sc, IWN_DEBUG_XMIT | IWN_DEBUG_TRACE, "->%s begin\n", __func__);
4873
4874 if ((sc->sc_flags & IWN_FLAG_RUNNING) == 0) {
4875 ieee80211_free_node(ni);
4876 m_freem(m);
4877 return ENETDOWN;
4878 }
4879
4880 /* XXX? net80211 doesn't set this on xmit'ed raw frames? */
4881 m->m_pkthdr.rcvif = (void *) ni;
4882
4883 IWN_LOCK(sc);
4884
4885 /* queue frame if we have to */
4886 if (sc->sc_beacon_wait) {
4887 if (iwn_xmit_queue_enqueue(sc, m) != 0) {
4888 m_freem(m);
4889 if_inc_counter(ni->ni_vap->iv_ifp,
4890 IFCOUNTER_OERRORS, 1);
4891 ieee80211_free_node(ni);
4892 IWN_UNLOCK(sc);
4893 return (ENOBUFS);
4894 }
4895 /* Queued, so just return OK */
4896 IWN_UNLOCK(sc);
4897 return (0);
4898 }
4899
4900 if (params == NULL) {
4901 /*
4902 * Legacy path; interpret frame contents to decide
4903 * precisely how to send the frame.
4904 */
4905 error = iwn_tx_data(sc, m, ni);
4906 } else {
4907 /*
4908 * Caller supplied explicit parameters to use in
4909 * sending the frame.
4910 */
4911 error = iwn_tx_data_raw(sc, m, ni, params);
4912 }
4913 if (error != 0) {
4914 /* NB: m is reclaimed on tx failure */
4915 ieee80211_free_node(ni);
4916 } else
4917 sc->sc_tx_timer = 5;
4918
4919 IWN_UNLOCK(sc);
4920
4921 DPRINTF(sc, IWN_DEBUG_TRACE | IWN_DEBUG_XMIT, "->%s: end\n",__func__);
4922
4923 return error;
4924}
4925
4926static int
4927iwn_transmit(struct ieee80211com *ic, struct mbuf *m)
4928{
4929 struct iwn_softc *sc;
4930 int error;
4931
4932 sc = ic->ic_softc;
4933
4934 IWN_LOCK(sc);
4935 if ((sc->sc_flags & IWN_FLAG_RUNNING) == 0) {
4936 IWN_UNLOCK(sc);
4937 return (ENXIO);
4938 }
4939 error = mbufq_enqueue(&sc->sc_snd, m);
4940 if (error) {
4941 IWN_UNLOCK(sc);
4942 return (error);
4943 }
4944 iwn_start_locked(sc);
4945 IWN_UNLOCK(sc);
4946 return (0);
4947}
4948
4949static void
4950iwn_start_locked(struct iwn_softc *sc)
4951{
4952 struct ieee80211_node *ni;
4953 struct mbuf *m;
4954
4955 IWN_LOCK_ASSERT(sc);
4956
4957 /*
4958 * If we're waiting for a beacon, we can just exit out here
4959 * and wait for the taskqueue to be kicked.
4960 */
4961 if (sc->sc_beacon_wait) {
4962 return;
4963 }
4964
4965 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: called\n", __func__);
4966 while (sc->qfullmsk == 0 &&
4967 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4968 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4969 if (iwn_tx_data(sc, m, ni) != 0) {
4970 if_inc_counter(ni->ni_vap->iv_ifp,
4971 IFCOUNTER_OERRORS, 1);
4972 ieee80211_free_node(ni);
4973 } else
4974 sc->sc_tx_timer = 5;
4975 }
4976 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: done\n", __func__);
4977}
4978
4979static void
4980iwn_watchdog(void *arg)
4981{
4982 struct iwn_softc *sc = arg;
4983 struct ieee80211com *ic = &sc->sc_ic;
4984
4985 IWN_LOCK_ASSERT(sc);
4986
4987 KASSERT(sc->sc_flags & IWN_FLAG_RUNNING, ("not running"));
4988
4989 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4990
4991 if (sc->sc_tx_timer > 0) {
4992 if (--sc->sc_tx_timer == 0) {
4993 ic_printf(ic, "device timeout\n");
4994 ieee80211_runtask(ic, &sc->sc_reinit_task);
4995 return;
4996 }
4997 }
4998 callout_reset(&sc->watchdog_to, hz, iwn_watchdog, sc);
4999}
5000
5001static int
1448 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n", __func__);
1449 IWN_LOCK_DESTROY(sc);
1450 return 0;
1451}
1452
1453static int
1454iwn_shutdown(device_t dev)
1455{
1456 struct iwn_softc *sc = device_get_softc(dev);
1457
1458 iwn_stop(sc);
1459 return 0;
1460}
1461
1462static int
1463iwn_suspend(device_t dev)
1464{
1465 struct iwn_softc *sc = device_get_softc(dev);
1466
1467 ieee80211_suspend_all(&sc->sc_ic);
1468 return 0;
1469}
1470
1471static int
1472iwn_resume(device_t dev)
1473{
1474 struct iwn_softc *sc = device_get_softc(dev);
1475
1476 /* Clear device-specific "PCI retry timeout" register (41h). */
1477 pci_write_config(dev, 0x41, 0, 1);
1478
1479 ieee80211_resume_all(&sc->sc_ic);
1480 return 0;
1481}
1482
1483static int
1484iwn_nic_lock(struct iwn_softc *sc)
1485{
1486 int ntries;
1487
1488 /* Request exclusive access to NIC. */
1489 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ);
1490
1491 /* Spin until we actually get the lock. */
1492 for (ntries = 0; ntries < 1000; ntries++) {
1493 if ((IWN_READ(sc, IWN_GP_CNTRL) &
1494 (IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) ==
1495 IWN_GP_CNTRL_MAC_ACCESS_ENA)
1496 return 0;
1497 DELAY(10);
1498 }
1499 return ETIMEDOUT;
1500}
1501
1502static __inline void
1503iwn_nic_unlock(struct iwn_softc *sc)
1504{
1505 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ);
1506}
1507
1508static __inline uint32_t
1509iwn_prph_read(struct iwn_softc *sc, uint32_t addr)
1510{
1511 IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr);
1512 IWN_BARRIER_READ_WRITE(sc);
1513 return IWN_READ(sc, IWN_PRPH_RDATA);
1514}
1515
1516static __inline void
1517iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data)
1518{
1519 IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr);
1520 IWN_BARRIER_WRITE(sc);
1521 IWN_WRITE(sc, IWN_PRPH_WDATA, data);
1522}
1523
1524static __inline void
1525iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask)
1526{
1527 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask);
1528}
1529
1530static __inline void
1531iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask)
1532{
1533 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask);
1534}
1535
1536static __inline void
1537iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr,
1538 const uint32_t *data, int count)
1539{
1540 for (; count > 0; count--, data++, addr += 4)
1541 iwn_prph_write(sc, addr, *data);
1542}
1543
1544static __inline uint32_t
1545iwn_mem_read(struct iwn_softc *sc, uint32_t addr)
1546{
1547 IWN_WRITE(sc, IWN_MEM_RADDR, addr);
1548 IWN_BARRIER_READ_WRITE(sc);
1549 return IWN_READ(sc, IWN_MEM_RDATA);
1550}
1551
1552static __inline void
1553iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data)
1554{
1555 IWN_WRITE(sc, IWN_MEM_WADDR, addr);
1556 IWN_BARRIER_WRITE(sc);
1557 IWN_WRITE(sc, IWN_MEM_WDATA, data);
1558}
1559
1560static __inline void
1561iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data)
1562{
1563 uint32_t tmp;
1564
1565 tmp = iwn_mem_read(sc, addr & ~3);
1566 if (addr & 3)
1567 tmp = (tmp & 0x0000ffff) | data << 16;
1568 else
1569 tmp = (tmp & 0xffff0000) | data;
1570 iwn_mem_write(sc, addr & ~3, tmp);
1571}
1572
1573static __inline void
1574iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data,
1575 int count)
1576{
1577 for (; count > 0; count--, addr += 4)
1578 *data++ = iwn_mem_read(sc, addr);
1579}
1580
1581static __inline void
1582iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val,
1583 int count)
1584{
1585 for (; count > 0; count--, addr += 4)
1586 iwn_mem_write(sc, addr, val);
1587}
1588
1589static int
1590iwn_eeprom_lock(struct iwn_softc *sc)
1591{
1592 int i, ntries;
1593
1594 for (i = 0; i < 100; i++) {
1595 /* Request exclusive access to EEPROM. */
1596 IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
1597 IWN_HW_IF_CONFIG_EEPROM_LOCKED);
1598
1599 /* Spin until we actually get the lock. */
1600 for (ntries = 0; ntries < 100; ntries++) {
1601 if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
1602 IWN_HW_IF_CONFIG_EEPROM_LOCKED)
1603 return 0;
1604 DELAY(10);
1605 }
1606 }
1607 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end timeout\n", __func__);
1608 return ETIMEDOUT;
1609}
1610
1611static __inline void
1612iwn_eeprom_unlock(struct iwn_softc *sc)
1613{
1614 IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED);
1615}
1616
1617/*
1618 * Initialize access by host to One Time Programmable ROM.
1619 * NB: This kind of ROM can be found on 1000 or 6000 Series only.
1620 */
1621static int
1622iwn_init_otprom(struct iwn_softc *sc)
1623{
1624 uint16_t prev, base, next;
1625 int count, error;
1626
1627 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1628
1629 /* Wait for clock stabilization before accessing prph. */
1630 if ((error = iwn_clock_wait(sc)) != 0)
1631 return error;
1632
1633 if ((error = iwn_nic_lock(sc)) != 0)
1634 return error;
1635 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ);
1636 DELAY(5);
1637 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ);
1638 iwn_nic_unlock(sc);
1639
1640 /* Set auto clock gate disable bit for HW with OTP shadow RAM. */
1641 if (sc->base_params->shadow_ram_support) {
1642 IWN_SETBITS(sc, IWN_DBG_LINK_PWR_MGMT,
1643 IWN_RESET_LINK_PWR_MGMT_DIS);
1644 }
1645 IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER);
1646 /* Clear ECC status. */
1647 IWN_SETBITS(sc, IWN_OTP_GP,
1648 IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS);
1649
1650 /*
1651 * Find the block before last block (contains the EEPROM image)
1652 * for HW without OTP shadow RAM.
1653 */
1654 if (! sc->base_params->shadow_ram_support) {
1655 /* Switch to absolute addressing mode. */
1656 IWN_CLRBITS(sc, IWN_OTP_GP, IWN_OTP_GP_RELATIVE_ACCESS);
1657 base = prev = 0;
1658 for (count = 0; count < sc->base_params->max_ll_items;
1659 count++) {
1660 error = iwn_read_prom_data(sc, base, &next, 2);
1661 if (error != 0)
1662 return error;
1663 if (next == 0) /* End of linked-list. */
1664 break;
1665 prev = base;
1666 base = le16toh(next);
1667 }
1668 if (count == 0 || count == sc->base_params->max_ll_items)
1669 return EIO;
1670 /* Skip "next" word. */
1671 sc->prom_base = prev + 1;
1672 }
1673
1674 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
1675
1676 return 0;
1677}
1678
1679static int
1680iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count)
1681{
1682 uint8_t *out = data;
1683 uint32_t val, tmp;
1684 int ntries;
1685
1686 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1687
1688 addr += sc->prom_base;
1689 for (; count > 0; count -= 2, addr++) {
1690 IWN_WRITE(sc, IWN_EEPROM, addr << 2);
1691 for (ntries = 0; ntries < 10; ntries++) {
1692 val = IWN_READ(sc, IWN_EEPROM);
1693 if (val & IWN_EEPROM_READ_VALID)
1694 break;
1695 DELAY(5);
1696 }
1697 if (ntries == 10) {
1698 device_printf(sc->sc_dev,
1699 "timeout reading ROM at 0x%x\n", addr);
1700 return ETIMEDOUT;
1701 }
1702 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) {
1703 /* OTPROM, check for ECC errors. */
1704 tmp = IWN_READ(sc, IWN_OTP_GP);
1705 if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) {
1706 device_printf(sc->sc_dev,
1707 "OTPROM ECC error at 0x%x\n", addr);
1708 return EIO;
1709 }
1710 if (tmp & IWN_OTP_GP_ECC_CORR_STTS) {
1711 /* Correctable ECC error, clear bit. */
1712 IWN_SETBITS(sc, IWN_OTP_GP,
1713 IWN_OTP_GP_ECC_CORR_STTS);
1714 }
1715 }
1716 *out++ = val >> 16;
1717 if (count > 1)
1718 *out++ = val >> 24;
1719 }
1720
1721 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
1722
1723 return 0;
1724}
1725
1726static void
1727iwn_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1728{
1729 if (error != 0)
1730 return;
1731 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
1732 *(bus_addr_t *)arg = segs[0].ds_addr;
1733}
1734
1735static int
1736iwn_dma_contig_alloc(struct iwn_softc *sc, struct iwn_dma_info *dma,
1737 void **kvap, bus_size_t size, bus_size_t alignment)
1738{
1739 int error;
1740
1741 dma->tag = NULL;
1742 dma->size = size;
1743
1744 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment,
1745 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
1746 1, size, BUS_DMA_NOWAIT, NULL, NULL, &dma->tag);
1747 if (error != 0)
1748 goto fail;
1749
1750 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
1751 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
1752 if (error != 0)
1753 goto fail;
1754
1755 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
1756 iwn_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
1757 if (error != 0)
1758 goto fail;
1759
1760 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
1761
1762 if (kvap != NULL)
1763 *kvap = dma->vaddr;
1764
1765 return 0;
1766
1767fail: iwn_dma_contig_free(dma);
1768 return error;
1769}
1770
1771static void
1772iwn_dma_contig_free(struct iwn_dma_info *dma)
1773{
1774 if (dma->vaddr != NULL) {
1775 bus_dmamap_sync(dma->tag, dma->map,
1776 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1777 bus_dmamap_unload(dma->tag, dma->map);
1778 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
1779 dma->vaddr = NULL;
1780 }
1781 if (dma->tag != NULL) {
1782 bus_dma_tag_destroy(dma->tag);
1783 dma->tag = NULL;
1784 }
1785}
1786
1787static int
1788iwn_alloc_sched(struct iwn_softc *sc)
1789{
1790 /* TX scheduler rings must be aligned on a 1KB boundary. */
1791 return iwn_dma_contig_alloc(sc, &sc->sched_dma, (void **)&sc->sched,
1792 sc->schedsz, 1024);
1793}
1794
1795static void
1796iwn_free_sched(struct iwn_softc *sc)
1797{
1798 iwn_dma_contig_free(&sc->sched_dma);
1799}
1800
1801static int
1802iwn_alloc_kw(struct iwn_softc *sc)
1803{
1804 /* "Keep Warm" page must be aligned on a 4KB boundary. */
1805 return iwn_dma_contig_alloc(sc, &sc->kw_dma, NULL, 4096, 4096);
1806}
1807
1808static void
1809iwn_free_kw(struct iwn_softc *sc)
1810{
1811 iwn_dma_contig_free(&sc->kw_dma);
1812}
1813
1814static int
1815iwn_alloc_ict(struct iwn_softc *sc)
1816{
1817 /* ICT table must be aligned on a 4KB boundary. */
1818 return iwn_dma_contig_alloc(sc, &sc->ict_dma, (void **)&sc->ict,
1819 IWN_ICT_SIZE, 4096);
1820}
1821
1822static void
1823iwn_free_ict(struct iwn_softc *sc)
1824{
1825 iwn_dma_contig_free(&sc->ict_dma);
1826}
1827
1828static int
1829iwn_alloc_fwmem(struct iwn_softc *sc)
1830{
1831 /* Must be aligned on a 16-byte boundary. */
1832 return iwn_dma_contig_alloc(sc, &sc->fw_dma, NULL, sc->fwsz, 16);
1833}
1834
1835static void
1836iwn_free_fwmem(struct iwn_softc *sc)
1837{
1838 iwn_dma_contig_free(&sc->fw_dma);
1839}
1840
1841static int
1842iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1843{
1844 bus_size_t size;
1845 int i, error;
1846
1847 ring->cur = 0;
1848
1849 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1850
1851 /* Allocate RX descriptors (256-byte aligned). */
1852 size = IWN_RX_RING_COUNT * sizeof (uint32_t);
1853 error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc,
1854 size, 256);
1855 if (error != 0) {
1856 device_printf(sc->sc_dev,
1857 "%s: could not allocate RX ring DMA memory, error %d\n",
1858 __func__, error);
1859 goto fail;
1860 }
1861
1862 /* Allocate RX status area (16-byte aligned). */
1863 error = iwn_dma_contig_alloc(sc, &ring->stat_dma, (void **)&ring->stat,
1864 sizeof (struct iwn_rx_status), 16);
1865 if (error != 0) {
1866 device_printf(sc->sc_dev,
1867 "%s: could not allocate RX status DMA memory, error %d\n",
1868 __func__, error);
1869 goto fail;
1870 }
1871
1872 /* Create RX buffer DMA tag. */
1873 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
1874 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1875 IWN_RBUF_SIZE, 1, IWN_RBUF_SIZE, BUS_DMA_NOWAIT, NULL, NULL,
1876 &ring->data_dmat);
1877 if (error != 0) {
1878 device_printf(sc->sc_dev,
1879 "%s: could not create RX buf DMA tag, error %d\n",
1880 __func__, error);
1881 goto fail;
1882 }
1883
1884 /*
1885 * Allocate and map RX buffers.
1886 */
1887 for (i = 0; i < IWN_RX_RING_COUNT; i++) {
1888 struct iwn_rx_data *data = &ring->data[i];
1889 bus_addr_t paddr;
1890
1891 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1892 if (error != 0) {
1893 device_printf(sc->sc_dev,
1894 "%s: could not create RX buf DMA map, error %d\n",
1895 __func__, error);
1896 goto fail;
1897 }
1898
1899 data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1900 IWN_RBUF_SIZE);
1901 if (data->m == NULL) {
1902 device_printf(sc->sc_dev,
1903 "%s: could not allocate RX mbuf\n", __func__);
1904 error = ENOBUFS;
1905 goto fail;
1906 }
1907
1908 error = bus_dmamap_load(ring->data_dmat, data->map,
1909 mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr,
1910 &paddr, BUS_DMA_NOWAIT);
1911 if (error != 0 && error != EFBIG) {
1912 device_printf(sc->sc_dev,
1913 "%s: can't map mbuf, error %d\n", __func__,
1914 error);
1915 goto fail;
1916 }
1917
1918 /* Set physical address of RX buffer (256-byte aligned). */
1919 ring->desc[i] = htole32(paddr >> 8);
1920 }
1921
1922 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1923 BUS_DMASYNC_PREWRITE);
1924
1925 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
1926
1927 return 0;
1928
1929fail: iwn_free_rx_ring(sc, ring);
1930
1931 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__);
1932
1933 return error;
1934}
1935
1936static void
1937iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1938{
1939 int ntries;
1940
1941 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
1942
1943 if (iwn_nic_lock(sc) == 0) {
1944 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0);
1945 for (ntries = 0; ntries < 1000; ntries++) {
1946 if (IWN_READ(sc, IWN_FH_RX_STATUS) &
1947 IWN_FH_RX_STATUS_IDLE)
1948 break;
1949 DELAY(10);
1950 }
1951 iwn_nic_unlock(sc);
1952 }
1953 ring->cur = 0;
1954 sc->last_rx_valid = 0;
1955}
1956
1957static void
1958iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1959{
1960 int i;
1961
1962 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s \n", __func__);
1963
1964 iwn_dma_contig_free(&ring->desc_dma);
1965 iwn_dma_contig_free(&ring->stat_dma);
1966
1967 for (i = 0; i < IWN_RX_RING_COUNT; i++) {
1968 struct iwn_rx_data *data = &ring->data[i];
1969
1970 if (data->m != NULL) {
1971 bus_dmamap_sync(ring->data_dmat, data->map,
1972 BUS_DMASYNC_POSTREAD);
1973 bus_dmamap_unload(ring->data_dmat, data->map);
1974 m_freem(data->m);
1975 data->m = NULL;
1976 }
1977 if (data->map != NULL)
1978 bus_dmamap_destroy(ring->data_dmat, data->map);
1979 }
1980 if (ring->data_dmat != NULL) {
1981 bus_dma_tag_destroy(ring->data_dmat);
1982 ring->data_dmat = NULL;
1983 }
1984}
1985
1986static int
1987iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid)
1988{
1989 bus_addr_t paddr;
1990 bus_size_t size;
1991 int i, error;
1992
1993 ring->qid = qid;
1994 ring->queued = 0;
1995 ring->cur = 0;
1996
1997 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
1998
1999 /* Allocate TX descriptors (256-byte aligned). */
2000 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_desc);
2001 error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc,
2002 size, 256);
2003 if (error != 0) {
2004 device_printf(sc->sc_dev,
2005 "%s: could not allocate TX ring DMA memory, error %d\n",
2006 __func__, error);
2007 goto fail;
2008 }
2009
2010 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_cmd);
2011 error = iwn_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd,
2012 size, 4);
2013 if (error != 0) {
2014 device_printf(sc->sc_dev,
2015 "%s: could not allocate TX cmd DMA memory, error %d\n",
2016 __func__, error);
2017 goto fail;
2018 }
2019
2020 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
2021 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
2022 IWN_MAX_SCATTER - 1, MCLBYTES, BUS_DMA_NOWAIT, NULL, NULL,
2023 &ring->data_dmat);
2024 if (error != 0) {
2025 device_printf(sc->sc_dev,
2026 "%s: could not create TX buf DMA tag, error %d\n",
2027 __func__, error);
2028 goto fail;
2029 }
2030
2031 paddr = ring->cmd_dma.paddr;
2032 for (i = 0; i < IWN_TX_RING_COUNT; i++) {
2033 struct iwn_tx_data *data = &ring->data[i];
2034
2035 data->cmd_paddr = paddr;
2036 data->scratch_paddr = paddr + 12;
2037 paddr += sizeof (struct iwn_tx_cmd);
2038
2039 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
2040 if (error != 0) {
2041 device_printf(sc->sc_dev,
2042 "%s: could not create TX buf DMA map, error %d\n",
2043 __func__, error);
2044 goto fail;
2045 }
2046 }
2047
2048 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
2049
2050 return 0;
2051
2052fail: iwn_free_tx_ring(sc, ring);
2053 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__);
2054 return error;
2055}
2056
2057static void
2058iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring)
2059{
2060 int i;
2061
2062 DPRINTF(sc, IWN_DEBUG_TRACE, "->doing %s \n", __func__);
2063
2064 for (i = 0; i < IWN_TX_RING_COUNT; i++) {
2065 struct iwn_tx_data *data = &ring->data[i];
2066
2067 if (data->m != NULL) {
2068 bus_dmamap_sync(ring->data_dmat, data->map,
2069 BUS_DMASYNC_POSTWRITE);
2070 bus_dmamap_unload(ring->data_dmat, data->map);
2071 m_freem(data->m);
2072 data->m = NULL;
2073 }
2074 if (data->ni != NULL) {
2075 ieee80211_free_node(data->ni);
2076 data->ni = NULL;
2077 }
2078 }
2079 /* Clear TX descriptors. */
2080 memset(ring->desc, 0, ring->desc_dma.size);
2081 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2082 BUS_DMASYNC_PREWRITE);
2083 sc->qfullmsk &= ~(1 << ring->qid);
2084 ring->queued = 0;
2085 ring->cur = 0;
2086}
2087
2088static void
2089iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring)
2090{
2091 int i;
2092
2093 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s \n", __func__);
2094
2095 iwn_dma_contig_free(&ring->desc_dma);
2096 iwn_dma_contig_free(&ring->cmd_dma);
2097
2098 for (i = 0; i < IWN_TX_RING_COUNT; i++) {
2099 struct iwn_tx_data *data = &ring->data[i];
2100
2101 if (data->m != NULL) {
2102 bus_dmamap_sync(ring->data_dmat, data->map,
2103 BUS_DMASYNC_POSTWRITE);
2104 bus_dmamap_unload(ring->data_dmat, data->map);
2105 m_freem(data->m);
2106 }
2107 if (data->map != NULL)
2108 bus_dmamap_destroy(ring->data_dmat, data->map);
2109 }
2110 if (ring->data_dmat != NULL) {
2111 bus_dma_tag_destroy(ring->data_dmat);
2112 ring->data_dmat = NULL;
2113 }
2114}
2115
2116static void
2117iwn5000_ict_reset(struct iwn_softc *sc)
2118{
2119 /* Disable interrupts. */
2120 IWN_WRITE(sc, IWN_INT_MASK, 0);
2121
2122 /* Reset ICT table. */
2123 memset(sc->ict, 0, IWN_ICT_SIZE);
2124 sc->ict_cur = 0;
2125
2126 /* Set physical address of ICT table (4KB aligned). */
2127 DPRINTF(sc, IWN_DEBUG_RESET, "%s: enabling ICT\n", __func__);
2128 IWN_WRITE(sc, IWN_DRAM_INT_TBL, IWN_DRAM_INT_TBL_ENABLE |
2129 IWN_DRAM_INT_TBL_WRAP_CHECK | sc->ict_dma.paddr >> 12);
2130
2131 /* Enable periodic RX interrupt. */
2132 sc->int_mask |= IWN_INT_RX_PERIODIC;
2133 /* Switch to ICT interrupt mode in driver. */
2134 sc->sc_flags |= IWN_FLAG_USE_ICT;
2135
2136 /* Re-enable interrupts. */
2137 IWN_WRITE(sc, IWN_INT, 0xffffffff);
2138 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
2139}
2140
2141static int
2142iwn_read_eeprom(struct iwn_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN])
2143{
2144 struct iwn_ops *ops = &sc->ops;
2145 uint16_t val;
2146 int error;
2147
2148 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
2149
2150 /* Check whether adapter has an EEPROM or an OTPROM. */
2151 if (sc->hw_type >= IWN_HW_REV_TYPE_1000 &&
2152 (IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP))
2153 sc->sc_flags |= IWN_FLAG_HAS_OTPROM;
2154 DPRINTF(sc, IWN_DEBUG_RESET, "%s found\n",
2155 (sc->sc_flags & IWN_FLAG_HAS_OTPROM) ? "OTPROM" : "EEPROM");
2156
2157 /* Adapter has to be powered on for EEPROM access to work. */
2158 if ((error = iwn_apm_init(sc)) != 0) {
2159 device_printf(sc->sc_dev,
2160 "%s: could not power ON adapter, error %d\n", __func__,
2161 error);
2162 return error;
2163 }
2164
2165 if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) {
2166 device_printf(sc->sc_dev, "%s: bad ROM signature\n", __func__);
2167 return EIO;
2168 }
2169 if ((error = iwn_eeprom_lock(sc)) != 0) {
2170 device_printf(sc->sc_dev, "%s: could not lock ROM, error %d\n",
2171 __func__, error);
2172 return error;
2173 }
2174 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) {
2175 if ((error = iwn_init_otprom(sc)) != 0) {
2176 device_printf(sc->sc_dev,
2177 "%s: could not initialize OTPROM, error %d\n",
2178 __func__, error);
2179 return error;
2180 }
2181 }
2182
2183 iwn_read_prom_data(sc, IWN_EEPROM_SKU_CAP, &val, 2);
2184 DPRINTF(sc, IWN_DEBUG_RESET, "SKU capabilities=0x%04x\n", le16toh(val));
2185 /* Check if HT support is bonded out. */
2186 if (val & htole16(IWN_EEPROM_SKU_CAP_11N))
2187 sc->sc_flags |= IWN_FLAG_HAS_11N;
2188
2189 iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2);
2190 sc->rfcfg = le16toh(val);
2191 DPRINTF(sc, IWN_DEBUG_RESET, "radio config=0x%04x\n", sc->rfcfg);
2192 /* Read Tx/Rx chains from ROM unless it's known to be broken. */
2193 if (sc->txchainmask == 0)
2194 sc->txchainmask = IWN_RFCFG_TXANTMSK(sc->rfcfg);
2195 if (sc->rxchainmask == 0)
2196 sc->rxchainmask = IWN_RFCFG_RXANTMSK(sc->rfcfg);
2197
2198 /* Read MAC address. */
2199 iwn_read_prom_data(sc, IWN_EEPROM_MAC, macaddr, 6);
2200
2201 /* Read adapter-specific information from EEPROM. */
2202 ops->read_eeprom(sc);
2203
2204 iwn_apm_stop(sc); /* Power OFF adapter. */
2205
2206 iwn_eeprom_unlock(sc);
2207
2208 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
2209
2210 return 0;
2211}
2212
2213static void
2214iwn4965_read_eeprom(struct iwn_softc *sc)
2215{
2216 uint32_t addr;
2217 uint16_t val;
2218 int i;
2219
2220 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
2221
2222 /* Read regulatory domain (4 ASCII characters). */
2223 iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4);
2224
2225 /* Read the list of authorized channels (20MHz ones only). */
2226 for (i = 0; i < IWN_NBANDS - 1; i++) {
2227 addr = iwn4965_regulatory_bands[i];
2228 iwn_read_eeprom_channels(sc, i, addr);
2229 }
2230
2231 /* Read maximum allowed TX power for 2GHz and 5GHz bands. */
2232 iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2);
2233 sc->maxpwr2GHz = val & 0xff;
2234 sc->maxpwr5GHz = val >> 8;
2235 /* Check that EEPROM values are within valid range. */
2236 if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50)
2237 sc->maxpwr5GHz = 38;
2238 if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50)
2239 sc->maxpwr2GHz = 38;
2240 DPRINTF(sc, IWN_DEBUG_RESET, "maxpwr 2GHz=%d 5GHz=%d\n",
2241 sc->maxpwr2GHz, sc->maxpwr5GHz);
2242
2243 /* Read samples for each TX power group. */
2244 iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands,
2245 sizeof sc->bands);
2246
2247 /* Read voltage at which samples were taken. */
2248 iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2);
2249 sc->eeprom_voltage = (int16_t)le16toh(val);
2250 DPRINTF(sc, IWN_DEBUG_RESET, "voltage=%d (in 0.3V)\n",
2251 sc->eeprom_voltage);
2252
2253#ifdef IWN_DEBUG
2254 /* Print samples. */
2255 if (sc->sc_debug & IWN_DEBUG_ANY) {
2256 for (i = 0; i < IWN_NBANDS - 1; i++)
2257 iwn4965_print_power_group(sc, i);
2258 }
2259#endif
2260
2261 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
2262}
2263
2264#ifdef IWN_DEBUG
2265static void
2266iwn4965_print_power_group(struct iwn_softc *sc, int i)
2267{
2268 struct iwn4965_eeprom_band *band = &sc->bands[i];
2269 struct iwn4965_eeprom_chan_samples *chans = band->chans;
2270 int j, c;
2271
2272 printf("===band %d===\n", i);
2273 printf("chan lo=%d, chan hi=%d\n", band->lo, band->hi);
2274 printf("chan1 num=%d\n", chans[0].num);
2275 for (c = 0; c < 2; c++) {
2276 for (j = 0; j < IWN_NSAMPLES; j++) {
2277 printf("chain %d, sample %d: temp=%d gain=%d "
2278 "power=%d pa_det=%d\n", c, j,
2279 chans[0].samples[c][j].temp,
2280 chans[0].samples[c][j].gain,
2281 chans[0].samples[c][j].power,
2282 chans[0].samples[c][j].pa_det);
2283 }
2284 }
2285 printf("chan2 num=%d\n", chans[1].num);
2286 for (c = 0; c < 2; c++) {
2287 for (j = 0; j < IWN_NSAMPLES; j++) {
2288 printf("chain %d, sample %d: temp=%d gain=%d "
2289 "power=%d pa_det=%d\n", c, j,
2290 chans[1].samples[c][j].temp,
2291 chans[1].samples[c][j].gain,
2292 chans[1].samples[c][j].power,
2293 chans[1].samples[c][j].pa_det);
2294 }
2295 }
2296}
2297#endif
2298
2299static void
2300iwn5000_read_eeprom(struct iwn_softc *sc)
2301{
2302 struct iwn5000_eeprom_calib_hdr hdr;
2303 int32_t volt;
2304 uint32_t base, addr;
2305 uint16_t val;
2306 int i;
2307
2308 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
2309
2310 /* Read regulatory domain (4 ASCII characters). */
2311 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2);
2312 base = le16toh(val);
2313 iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN,
2314 sc->eeprom_domain, 4);
2315
2316 /* Read the list of authorized channels (20MHz ones only). */
2317 for (i = 0; i < IWN_NBANDS - 1; i++) {
2318 addr = base + sc->base_params->regulatory_bands[i];
2319 iwn_read_eeprom_channels(sc, i, addr);
2320 }
2321
2322 /* Read enhanced TX power information for 6000 Series. */
2323 if (sc->base_params->enhanced_TX_power)
2324 iwn_read_eeprom_enhinfo(sc);
2325
2326 iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2);
2327 base = le16toh(val);
2328 iwn_read_prom_data(sc, base, &hdr, sizeof hdr);
2329 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
2330 "%s: calib version=%u pa type=%u voltage=%u\n", __func__,
2331 hdr.version, hdr.pa_type, le16toh(hdr.volt));
2332 sc->calib_ver = hdr.version;
2333
2334 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2) {
2335 sc->eeprom_voltage = le16toh(hdr.volt);
2336 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2);
2337 sc->eeprom_temp_high=le16toh(val);
2338 iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2);
2339 sc->eeprom_temp = le16toh(val);
2340 }
2341
2342 if (sc->hw_type == IWN_HW_REV_TYPE_5150) {
2343 /* Compute temperature offset. */
2344 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2);
2345 sc->eeprom_temp = le16toh(val);
2346 iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2);
2347 volt = le16toh(val);
2348 sc->temp_off = sc->eeprom_temp - (volt / -5);
2349 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "temp=%d volt=%d offset=%dK\n",
2350 sc->eeprom_temp, volt, sc->temp_off);
2351 } else {
2352 /* Read crystal calibration. */
2353 iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL,
2354 &sc->eeprom_crystal, sizeof (uint32_t));
2355 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "crystal calibration 0x%08x\n",
2356 le32toh(sc->eeprom_crystal));
2357 }
2358
2359 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
2360
2361}
2362
2363/*
2364 * Translate EEPROM flags to net80211.
2365 */
2366static uint32_t
2367iwn_eeprom_channel_flags(struct iwn_eeprom_chan *channel)
2368{
2369 uint32_t nflags;
2370
2371 nflags = 0;
2372 if ((channel->flags & IWN_EEPROM_CHAN_ACTIVE) == 0)
2373 nflags |= IEEE80211_CHAN_PASSIVE;
2374 if ((channel->flags & IWN_EEPROM_CHAN_IBSS) == 0)
2375 nflags |= IEEE80211_CHAN_NOADHOC;
2376 if (channel->flags & IWN_EEPROM_CHAN_RADAR) {
2377 nflags |= IEEE80211_CHAN_DFS;
2378 /* XXX apparently IBSS may still be marked */
2379 nflags |= IEEE80211_CHAN_NOADHOC;
2380 }
2381
2382 return nflags;
2383}
2384
2385static void
2386iwn_read_eeprom_band(struct iwn_softc *sc, int n)
2387{
2388 struct ieee80211com *ic = &sc->sc_ic;
2389 struct iwn_eeprom_chan *channels = sc->eeprom_channels[n];
2390 const struct iwn_chan_band *band = &iwn_bands[n];
2391 struct ieee80211_channel *c;
2392 uint8_t chan;
2393 int i, nflags;
2394
2395 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
2396
2397 for (i = 0; i < band->nchan; i++) {
2398 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) {
2399 DPRINTF(sc, IWN_DEBUG_RESET,
2400 "skip chan %d flags 0x%x maxpwr %d\n",
2401 band->chan[i], channels[i].flags,
2402 channels[i].maxpwr);
2403 continue;
2404 }
2405 chan = band->chan[i];
2406 nflags = iwn_eeprom_channel_flags(&channels[i]);
2407
2408 c = &ic->ic_channels[ic->ic_nchans++];
2409 c->ic_ieee = chan;
2410 c->ic_maxregpower = channels[i].maxpwr;
2411 c->ic_maxpower = 2*c->ic_maxregpower;
2412
2413 if (n == 0) { /* 2GHz band */
2414 c->ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_G);
2415 /* G =>'s B is supported */
2416 c->ic_flags = IEEE80211_CHAN_B | nflags;
2417 c = &ic->ic_channels[ic->ic_nchans++];
2418 c[0] = c[-1];
2419 c->ic_flags = IEEE80211_CHAN_G | nflags;
2420 } else { /* 5GHz band */
2421 c->ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_A);
2422 c->ic_flags = IEEE80211_CHAN_A | nflags;
2423 }
2424
2425 /* Save maximum allowed TX power for this channel. */
2426 sc->maxpwr[chan] = channels[i].maxpwr;
2427
2428 DPRINTF(sc, IWN_DEBUG_RESET,
2429 "add chan %d flags 0x%x maxpwr %d\n", chan,
2430 channels[i].flags, channels[i].maxpwr);
2431
2432 if (sc->sc_flags & IWN_FLAG_HAS_11N) {
2433 /* add HT20, HT40 added separately */
2434 c = &ic->ic_channels[ic->ic_nchans++];
2435 c[0] = c[-1];
2436 c->ic_flags |= IEEE80211_CHAN_HT20;
2437 }
2438 }
2439
2440 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
2441
2442}
2443
2444static void
2445iwn_read_eeprom_ht40(struct iwn_softc *sc, int n)
2446{
2447 struct ieee80211com *ic = &sc->sc_ic;
2448 struct iwn_eeprom_chan *channels = sc->eeprom_channels[n];
2449 const struct iwn_chan_band *band = &iwn_bands[n];
2450 struct ieee80211_channel *c, *cent, *extc;
2451 uint8_t chan;
2452 int i, nflags;
2453
2454 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s start\n", __func__);
2455
2456 if (!(sc->sc_flags & IWN_FLAG_HAS_11N)) {
2457 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end no 11n\n", __func__);
2458 return;
2459 }
2460
2461 for (i = 0; i < band->nchan; i++) {
2462 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) {
2463 DPRINTF(sc, IWN_DEBUG_RESET,
2464 "skip chan %d flags 0x%x maxpwr %d\n",
2465 band->chan[i], channels[i].flags,
2466 channels[i].maxpwr);
2467 continue;
2468 }
2469 chan = band->chan[i];
2470 nflags = iwn_eeprom_channel_flags(&channels[i]);
2471
2472 /*
2473 * Each entry defines an HT40 channel pair; find the
2474 * center channel, then the extension channel above.
2475 */
2476 cent = ieee80211_find_channel_byieee(ic, chan,
2477 (n == 5 ? IEEE80211_CHAN_G : IEEE80211_CHAN_A));
2478 if (cent == NULL) { /* XXX shouldn't happen */
2479 device_printf(sc->sc_dev,
2480 "%s: no entry for channel %d\n", __func__, chan);
2481 continue;
2482 }
2483 extc = ieee80211_find_channel(ic, cent->ic_freq+20,
2484 (n == 5 ? IEEE80211_CHAN_G : IEEE80211_CHAN_A));
2485 if (extc == NULL) {
2486 DPRINTF(sc, IWN_DEBUG_RESET,
2487 "%s: skip chan %d, extension channel not found\n",
2488 __func__, chan);
2489 continue;
2490 }
2491
2492 DPRINTF(sc, IWN_DEBUG_RESET,
2493 "add ht40 chan %d flags 0x%x maxpwr %d\n",
2494 chan, channels[i].flags, channels[i].maxpwr);
2495
2496 c = &ic->ic_channels[ic->ic_nchans++];
2497 c[0] = cent[0];
2498 c->ic_extieee = extc->ic_ieee;
2499 c->ic_flags &= ~IEEE80211_CHAN_HT;
2500 c->ic_flags |= IEEE80211_CHAN_HT40U | nflags;
2501 c = &ic->ic_channels[ic->ic_nchans++];
2502 c[0] = extc[0];
2503 c->ic_extieee = cent->ic_ieee;
2504 c->ic_flags &= ~IEEE80211_CHAN_HT;
2505 c->ic_flags |= IEEE80211_CHAN_HT40D | nflags;
2506 }
2507
2508 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
2509
2510}
2511
2512static void
2513iwn_read_eeprom_channels(struct iwn_softc *sc, int n, uint32_t addr)
2514{
2515 struct ieee80211com *ic = &sc->sc_ic;
2516
2517 iwn_read_prom_data(sc, addr, &sc->eeprom_channels[n],
2518 iwn_bands[n].nchan * sizeof (struct iwn_eeprom_chan));
2519
2520 if (n < 5)
2521 iwn_read_eeprom_band(sc, n);
2522 else
2523 iwn_read_eeprom_ht40(sc, n);
2524 ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans);
2525}
2526
2527static struct iwn_eeprom_chan *
2528iwn_find_eeprom_channel(struct iwn_softc *sc, struct ieee80211_channel *c)
2529{
2530 int band, chan, i, j;
2531
2532 if (IEEE80211_IS_CHAN_HT40(c)) {
2533 band = IEEE80211_IS_CHAN_5GHZ(c) ? 6 : 5;
2534 if (IEEE80211_IS_CHAN_HT40D(c))
2535 chan = c->ic_extieee;
2536 else
2537 chan = c->ic_ieee;
2538 for (i = 0; i < iwn_bands[band].nchan; i++) {
2539 if (iwn_bands[band].chan[i] == chan)
2540 return &sc->eeprom_channels[band][i];
2541 }
2542 } else {
2543 for (j = 0; j < 5; j++) {
2544 for (i = 0; i < iwn_bands[j].nchan; i++) {
2545 if (iwn_bands[j].chan[i] == c->ic_ieee)
2546 return &sc->eeprom_channels[j][i];
2547 }
2548 }
2549 }
2550 return NULL;
2551}
2552
2553/*
2554 * Enforce flags read from EEPROM.
2555 */
2556static int
2557iwn_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
2558 int nchan, struct ieee80211_channel chans[])
2559{
2560 struct iwn_softc *sc = ic->ic_softc;
2561 int i;
2562
2563 for (i = 0; i < nchan; i++) {
2564 struct ieee80211_channel *c = &chans[i];
2565 struct iwn_eeprom_chan *channel;
2566
2567 channel = iwn_find_eeprom_channel(sc, c);
2568 if (channel == NULL) {
2569 ic_printf(ic, "%s: invalid channel %u freq %u/0x%x\n",
2570 __func__, c->ic_ieee, c->ic_freq, c->ic_flags);
2571 return EINVAL;
2572 }
2573 c->ic_flags |= iwn_eeprom_channel_flags(channel);
2574 }
2575
2576 return 0;
2577}
2578
2579static void
2580iwn_read_eeprom_enhinfo(struct iwn_softc *sc)
2581{
2582 struct iwn_eeprom_enhinfo enhinfo[35];
2583 struct ieee80211com *ic = &sc->sc_ic;
2584 struct ieee80211_channel *c;
2585 uint16_t val, base;
2586 int8_t maxpwr;
2587 uint8_t flags;
2588 int i, j;
2589
2590 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
2591
2592 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2);
2593 base = le16toh(val);
2594 iwn_read_prom_data(sc, base + IWN6000_EEPROM_ENHINFO,
2595 enhinfo, sizeof enhinfo);
2596
2597 for (i = 0; i < nitems(enhinfo); i++) {
2598 flags = enhinfo[i].flags;
2599 if (!(flags & IWN_ENHINFO_VALID))
2600 continue; /* Skip invalid entries. */
2601
2602 maxpwr = 0;
2603 if (sc->txchainmask & IWN_ANT_A)
2604 maxpwr = MAX(maxpwr, enhinfo[i].chain[0]);
2605 if (sc->txchainmask & IWN_ANT_B)
2606 maxpwr = MAX(maxpwr, enhinfo[i].chain[1]);
2607 if (sc->txchainmask & IWN_ANT_C)
2608 maxpwr = MAX(maxpwr, enhinfo[i].chain[2]);
2609 if (sc->ntxchains == 2)
2610 maxpwr = MAX(maxpwr, enhinfo[i].mimo2);
2611 else if (sc->ntxchains == 3)
2612 maxpwr = MAX(maxpwr, enhinfo[i].mimo3);
2613
2614 for (j = 0; j < ic->ic_nchans; j++) {
2615 c = &ic->ic_channels[j];
2616 if ((flags & IWN_ENHINFO_5GHZ)) {
2617 if (!IEEE80211_IS_CHAN_A(c))
2618 continue;
2619 } else if ((flags & IWN_ENHINFO_OFDM)) {
2620 if (!IEEE80211_IS_CHAN_G(c))
2621 continue;
2622 } else if (!IEEE80211_IS_CHAN_B(c))
2623 continue;
2624 if ((flags & IWN_ENHINFO_HT40)) {
2625 if (!IEEE80211_IS_CHAN_HT40(c))
2626 continue;
2627 } else {
2628 if (IEEE80211_IS_CHAN_HT40(c))
2629 continue;
2630 }
2631 if (enhinfo[i].chan != 0 &&
2632 enhinfo[i].chan != c->ic_ieee)
2633 continue;
2634
2635 DPRINTF(sc, IWN_DEBUG_RESET,
2636 "channel %d(%x), maxpwr %d\n", c->ic_ieee,
2637 c->ic_flags, maxpwr / 2);
2638 c->ic_maxregpower = maxpwr / 2;
2639 c->ic_maxpower = maxpwr;
2640 }
2641 }
2642
2643 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
2644
2645}
2646
2647static struct ieee80211_node *
2648iwn_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
2649{
2650 return malloc(sizeof (struct iwn_node), M_80211_NODE,M_NOWAIT | M_ZERO);
2651}
2652
2653static __inline int
2654rate2plcp(int rate)
2655{
2656 switch (rate & 0xff) {
2657 case 12: return 0xd;
2658 case 18: return 0xf;
2659 case 24: return 0x5;
2660 case 36: return 0x7;
2661 case 48: return 0x9;
2662 case 72: return 0xb;
2663 case 96: return 0x1;
2664 case 108: return 0x3;
2665 case 2: return 10;
2666 case 4: return 20;
2667 case 11: return 55;
2668 case 22: return 110;
2669 }
2670 return 0;
2671}
2672
2673static int
2674iwn_get_1stream_tx_antmask(struct iwn_softc *sc)
2675{
2676
2677 return IWN_LSB(sc->txchainmask);
2678}
2679
2680static int
2681iwn_get_2stream_tx_antmask(struct iwn_softc *sc)
2682{
2683 int tx;
2684
2685 /*
2686 * The '2 stream' setup is a bit .. odd.
2687 *
2688 * For NICs that support only 1 antenna, default to IWN_ANT_AB or
2689 * the firmware panics (eg Intel 5100.)
2690 *
2691 * For NICs that support two antennas, we use ANT_AB.
2692 *
2693 * For NICs that support three antennas, we use the two that
2694 * wasn't the default one.
2695 *
2696 * XXX TODO: if bluetooth (full concurrent) is enabled, restrict
2697 * this to only one antenna.
2698 */
2699
2700 /* Default - transmit on the other antennas */
2701 tx = (sc->txchainmask & ~IWN_LSB(sc->txchainmask));
2702
2703 /* Now, if it's zero, set it to IWN_ANT_AB, so to not panic firmware */
2704 if (tx == 0)
2705 tx = IWN_ANT_AB;
2706
2707 /*
2708 * If the NIC is a two-stream TX NIC, configure the TX mask to
2709 * the default chainmask
2710 */
2711 else if (sc->ntxchains == 2)
2712 tx = sc->txchainmask;
2713
2714 return (tx);
2715}
2716
2717
2718
2719/*
2720 * Calculate the required PLCP value from the given rate,
2721 * to the given node.
2722 *
2723 * This will take the node configuration (eg 11n, rate table
2724 * setup, etc) into consideration.
2725 */
2726static uint32_t
2727iwn_rate_to_plcp(struct iwn_softc *sc, struct ieee80211_node *ni,
2728 uint8_t rate)
2729{
2730#define RV(v) ((v) & IEEE80211_RATE_VAL)
2731 struct ieee80211com *ic = ni->ni_ic;
2732 uint32_t plcp = 0;
2733 int ridx;
2734
2735 /*
2736 * If it's an MCS rate, let's set the plcp correctly
2737 * and set the relevant flags based on the node config.
2738 */
2739 if (rate & IEEE80211_RATE_MCS) {
2740 /*
2741 * Set the initial PLCP value to be between 0->31 for
2742 * MCS 0 -> MCS 31, then set the "I'm an MCS rate!"
2743 * flag.
2744 */
2745 plcp = RV(rate) | IWN_RFLAG_MCS;
2746
2747 /*
2748 * XXX the following should only occur if both
2749 * the local configuration _and_ the remote node
2750 * advertise these capabilities. Thus this code
2751 * may need fixing!
2752 */
2753
2754 /*
2755 * Set the channel width and guard interval.
2756 */
2757 if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
2758 plcp |= IWN_RFLAG_HT40;
2759 if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40)
2760 plcp |= IWN_RFLAG_SGI;
2761 } else if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20) {
2762 plcp |= IWN_RFLAG_SGI;
2763 }
2764
2765 /*
2766 * Ensure the selected rate matches the link quality
2767 * table entries being used.
2768 */
2769 if (rate > 0x8f)
2770 plcp |= IWN_RFLAG_ANT(sc->txchainmask);
2771 else if (rate > 0x87)
2772 plcp |= IWN_RFLAG_ANT(iwn_get_2stream_tx_antmask(sc));
2773 else
2774 plcp |= IWN_RFLAG_ANT(iwn_get_1stream_tx_antmask(sc));
2775 } else {
2776 /*
2777 * Set the initial PLCP - fine for both
2778 * OFDM and CCK rates.
2779 */
2780 plcp = rate2plcp(rate);
2781
2782 /* Set CCK flag if it's CCK */
2783
2784 /* XXX It would be nice to have a method
2785 * to map the ridx -> phy table entry
2786 * so we could just query that, rather than
2787 * this hack to check against IWN_RIDX_OFDM6.
2788 */
2789 ridx = ieee80211_legacy_rate_lookup(ic->ic_rt,
2790 rate & IEEE80211_RATE_VAL);
2791 if (ridx < IWN_RIDX_OFDM6 &&
2792 IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
2793 plcp |= IWN_RFLAG_CCK;
2794
2795 /* Set antenna configuration */
2796 /* XXX TODO: is this the right antenna to use for legacy? */
2797 plcp |= IWN_RFLAG_ANT(iwn_get_1stream_tx_antmask(sc));
2798 }
2799
2800 DPRINTF(sc, IWN_DEBUG_TXRATE, "%s: rate=0x%02x, plcp=0x%08x\n",
2801 __func__,
2802 rate,
2803 plcp);
2804
2805 return (htole32(plcp));
2806#undef RV
2807}
2808
2809static void
2810iwn_newassoc(struct ieee80211_node *ni, int isnew)
2811{
2812 /* Doesn't do anything at the moment */
2813}
2814
2815static int
2816iwn_media_change(struct ifnet *ifp)
2817{
2818 int error;
2819
2820 error = ieee80211_media_change(ifp);
2821 /* NB: only the fixed rate can change and that doesn't need a reset */
2822 return (error == ENETRESET ? 0 : error);
2823}
2824
2825static int
2826iwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
2827{
2828 struct iwn_vap *ivp = IWN_VAP(vap);
2829 struct ieee80211com *ic = vap->iv_ic;
2830 struct iwn_softc *sc = ic->ic_softc;
2831 int error = 0;
2832
2833 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
2834
2835 DPRINTF(sc, IWN_DEBUG_STATE, "%s: %s -> %s\n", __func__,
2836 ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate]);
2837
2838 IEEE80211_UNLOCK(ic);
2839 IWN_LOCK(sc);
2840 callout_stop(&sc->calib_to);
2841
2842 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX];
2843
2844 switch (nstate) {
2845 case IEEE80211_S_ASSOC:
2846 if (vap->iv_state != IEEE80211_S_RUN)
2847 break;
2848 /* FALLTHROUGH */
2849 case IEEE80211_S_AUTH:
2850 if (vap->iv_state == IEEE80211_S_AUTH)
2851 break;
2852
2853 /*
2854 * !AUTH -> AUTH transition requires state reset to handle
2855 * reassociations correctly.
2856 */
2857 sc->rxon->associd = 0;
2858 sc->rxon->filter &= ~htole32(IWN_FILTER_BSS);
2859 sc->calib.state = IWN_CALIB_STATE_INIT;
2860
2861 /* Wait until we hear a beacon before we transmit */
2862 sc->sc_beacon_wait = 1;
2863
2864 if ((error = iwn_auth(sc, vap)) != 0) {
2865 device_printf(sc->sc_dev,
2866 "%s: could not move to auth state\n", __func__);
2867 }
2868 break;
2869
2870 case IEEE80211_S_RUN:
2871 /*
2872 * RUN -> RUN transition; Just restart the timers.
2873 */
2874 if (vap->iv_state == IEEE80211_S_RUN) {
2875 sc->calib_cnt = 0;
2876 break;
2877 }
2878
2879 /* Wait until we hear a beacon before we transmit */
2880 sc->sc_beacon_wait = 1;
2881
2882 /*
2883 * !RUN -> RUN requires setting the association id
2884 * which is done with a firmware cmd. We also defer
2885 * starting the timers until that work is done.
2886 */
2887 if ((error = iwn_run(sc, vap)) != 0) {
2888 device_printf(sc->sc_dev,
2889 "%s: could not move to run state\n", __func__);
2890 }
2891 break;
2892
2893 case IEEE80211_S_INIT:
2894 sc->calib.state = IWN_CALIB_STATE_INIT;
2895 /*
2896 * Purge the xmit queue so we don't have old frames
2897 * during a new association attempt.
2898 */
2899 sc->sc_beacon_wait = 0;
2900 iwn_xmit_queue_drain(sc);
2901 break;
2902
2903 default:
2904 break;
2905 }
2906 IWN_UNLOCK(sc);
2907 IEEE80211_LOCK(ic);
2908 if (error != 0){
2909 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__);
2910 return error;
2911 }
2912
2913 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
2914
2915 return ivp->iv_newstate(vap, nstate, arg);
2916}
2917
2918static void
2919iwn_calib_timeout(void *arg)
2920{
2921 struct iwn_softc *sc = arg;
2922
2923 IWN_LOCK_ASSERT(sc);
2924
2925 /* Force automatic TX power calibration every 60 secs. */
2926 if (++sc->calib_cnt >= 120) {
2927 uint32_t flags = 0;
2928
2929 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s\n",
2930 "sending request for statistics");
2931 (void)iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags,
2932 sizeof flags, 1);
2933 sc->calib_cnt = 0;
2934 }
2935 callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout,
2936 sc);
2937}
2938
2939/*
2940 * Process an RX_PHY firmware notification. This is usually immediately
2941 * followed by an MPDU_RX_DONE notification.
2942 */
2943static void
2944iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2945 struct iwn_rx_data *data)
2946{
2947 struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1);
2948
2949 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received PHY stats\n", __func__);
2950 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2951
2952 /* Save RX statistics, they will be used on MPDU_RX_DONE. */
2953 memcpy(&sc->last_rx_stat, stat, sizeof (*stat));
2954 sc->last_rx_valid = 1;
2955}
2956
2957/*
2958 * Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification.
2959 * Each MPDU_RX_DONE notification must be preceded by an RX_PHY one.
2960 */
2961static void
2962iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2963 struct iwn_rx_data *data)
2964{
2965 struct iwn_ops *ops = &sc->ops;
2966 struct ieee80211com *ic = &sc->sc_ic;
2967 struct iwn_rx_ring *ring = &sc->rxq;
2968 struct ieee80211_frame *wh;
2969 struct ieee80211_node *ni;
2970 struct mbuf *m, *m1;
2971 struct iwn_rx_stat *stat;
2972 caddr_t head;
2973 bus_addr_t paddr;
2974 uint32_t flags;
2975 int error, len, rssi, nf;
2976
2977 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
2978
2979 if (desc->type == IWN_MPDU_RX_DONE) {
2980 /* Check for prior RX_PHY notification. */
2981 if (!sc->last_rx_valid) {
2982 DPRINTF(sc, IWN_DEBUG_ANY,
2983 "%s: missing RX_PHY\n", __func__);
2984 return;
2985 }
2986 stat = &sc->last_rx_stat;
2987 } else
2988 stat = (struct iwn_rx_stat *)(desc + 1);
2989
2990 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2991
2992 if (stat->cfg_phy_len > IWN_STAT_MAXLEN) {
2993 device_printf(sc->sc_dev,
2994 "%s: invalid RX statistic header, len %d\n", __func__,
2995 stat->cfg_phy_len);
2996 return;
2997 }
2998 if (desc->type == IWN_MPDU_RX_DONE) {
2999 struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1);
3000 head = (caddr_t)(mpdu + 1);
3001 len = le16toh(mpdu->len);
3002 } else {
3003 head = (caddr_t)(stat + 1) + stat->cfg_phy_len;
3004 len = le16toh(stat->len);
3005 }
3006
3007 flags = le32toh(*(uint32_t *)(head + len));
3008
3009 /* Discard frames with a bad FCS early. */
3010 if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) {
3011 DPRINTF(sc, IWN_DEBUG_RECV, "%s: RX flags error %x\n",
3012 __func__, flags);
3013 counter_u64_add(ic->ic_ierrors, 1);
3014 return;
3015 }
3016 /* Discard frames that are too short. */
3017 if (len < sizeof (struct ieee80211_frame_ack)) {
3018 DPRINTF(sc, IWN_DEBUG_RECV, "%s: frame too short: %d\n",
3019 __func__, len);
3020 counter_u64_add(ic->ic_ierrors, 1);
3021 return;
3022 }
3023
3024 m1 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWN_RBUF_SIZE);
3025 if (m1 == NULL) {
3026 DPRINTF(sc, IWN_DEBUG_ANY, "%s: no mbuf to restock ring\n",
3027 __func__);
3028 counter_u64_add(ic->ic_ierrors, 1);
3029 return;
3030 }
3031 bus_dmamap_unload(ring->data_dmat, data->map);
3032
3033 error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m1, void *),
3034 IWN_RBUF_SIZE, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
3035 if (error != 0 && error != EFBIG) {
3036 device_printf(sc->sc_dev,
3037 "%s: bus_dmamap_load failed, error %d\n", __func__, error);
3038 m_freem(m1);
3039
3040 /* Try to reload the old mbuf. */
3041 error = bus_dmamap_load(ring->data_dmat, data->map,
3042 mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr,
3043 &paddr, BUS_DMA_NOWAIT);
3044 if (error != 0 && error != EFBIG) {
3045 panic("%s: could not load old RX mbuf", __func__);
3046 }
3047 /* Physical address may have changed. */
3048 ring->desc[ring->cur] = htole32(paddr >> 8);
3049 bus_dmamap_sync(ring->data_dmat, ring->desc_dma.map,
3050 BUS_DMASYNC_PREWRITE);
3051 counter_u64_add(ic->ic_ierrors, 1);
3052 return;
3053 }
3054
3055 m = data->m;
3056 data->m = m1;
3057 /* Update RX descriptor. */
3058 ring->desc[ring->cur] = htole32(paddr >> 8);
3059 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3060 BUS_DMASYNC_PREWRITE);
3061
3062 /* Finalize mbuf. */
3063 m->m_data = head;
3064 m->m_pkthdr.len = m->m_len = len;
3065
3066 /* Grab a reference to the source node. */
3067 wh = mtod(m, struct ieee80211_frame *);
3068 if (len >= sizeof(struct ieee80211_frame_min))
3069 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3070 else
3071 ni = NULL;
3072 nf = (ni != NULL && ni->ni_vap->iv_state == IEEE80211_S_RUN &&
3073 (ic->ic_flags & IEEE80211_F_SCAN) == 0) ? sc->noise : -95;
3074
3075 rssi = ops->get_rssi(sc, stat);
3076
3077 if (ieee80211_radiotap_active(ic)) {
3078 struct iwn_rx_radiotap_header *tap = &sc->sc_rxtap;
3079
3080 tap->wr_flags = 0;
3081 if (stat->flags & htole16(IWN_STAT_FLAG_SHPREAMBLE))
3082 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3083 tap->wr_dbm_antsignal = (int8_t)rssi;
3084 tap->wr_dbm_antnoise = (int8_t)nf;
3085 tap->wr_tsft = stat->tstamp;
3086 switch (stat->rate) {
3087 /* CCK rates. */
3088 case 10: tap->wr_rate = 2; break;
3089 case 20: tap->wr_rate = 4; break;
3090 case 55: tap->wr_rate = 11; break;
3091 case 110: tap->wr_rate = 22; break;
3092 /* OFDM rates. */
3093 case 0xd: tap->wr_rate = 12; break;
3094 case 0xf: tap->wr_rate = 18; break;
3095 case 0x5: tap->wr_rate = 24; break;
3096 case 0x7: tap->wr_rate = 36; break;
3097 case 0x9: tap->wr_rate = 48; break;
3098 case 0xb: tap->wr_rate = 72; break;
3099 case 0x1: tap->wr_rate = 96; break;
3100 case 0x3: tap->wr_rate = 108; break;
3101 /* Unknown rate: should not happen. */
3102 default: tap->wr_rate = 0;
3103 }
3104 }
3105
3106 /*
3107 * If it's a beacon and we're waiting, then do the
3108 * wakeup. This should unblock raw_xmit/start.
3109 */
3110 if (sc->sc_beacon_wait) {
3111 uint8_t type, subtype;
3112 /* NB: Re-assign wh */
3113 wh = mtod(m, struct ieee80211_frame *);
3114 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3115 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3116 /*
3117 * This assumes at this point we've received our own
3118 * beacon.
3119 */
3120 DPRINTF(sc, IWN_DEBUG_TRACE,
3121 "%s: beacon_wait, type=%d, subtype=%d\n",
3122 __func__, type, subtype);
3123 if (type == IEEE80211_FC0_TYPE_MGT &&
3124 subtype == IEEE80211_FC0_SUBTYPE_BEACON) {
3125 DPRINTF(sc, IWN_DEBUG_TRACE | IWN_DEBUG_XMIT,
3126 "%s: waking things up\n", __func__);
3127 /* queue taskqueue to transmit! */
3128 taskqueue_enqueue(sc->sc_tq, &sc->sc_xmit_task);
3129 }
3130 }
3131
3132 IWN_UNLOCK(sc);
3133
3134 /* Send the frame to the 802.11 layer. */
3135 if (ni != NULL) {
3136 if (ni->ni_flags & IEEE80211_NODE_HT)
3137 m->m_flags |= M_AMPDU;
3138 (void)ieee80211_input(ni, m, rssi - nf, nf);
3139 /* Node is no longer needed. */
3140 ieee80211_free_node(ni);
3141 } else
3142 (void)ieee80211_input_all(ic, m, rssi - nf, nf);
3143
3144 IWN_LOCK(sc);
3145
3146 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
3147
3148}
3149
3150/* Process an incoming Compressed BlockAck. */
3151static void
3152iwn_rx_compressed_ba(struct iwn_softc *sc, struct iwn_rx_desc *desc,
3153 struct iwn_rx_data *data)
3154{
3155 struct iwn_ops *ops = &sc->ops;
3156 struct iwn_node *wn;
3157 struct ieee80211_node *ni;
3158 struct iwn_compressed_ba *ba = (struct iwn_compressed_ba *)(desc + 1);
3159 struct iwn_tx_ring *txq;
3160 struct iwn_tx_data *txdata;
3161 struct ieee80211_tx_ampdu *tap;
3162 struct mbuf *m;
3163 uint64_t bitmap;
3164 uint16_t ssn;
3165 uint8_t tid;
3166 int ackfailcnt = 0, i, lastidx, qid, *res, shift;
3167 int tx_ok = 0, tx_err = 0;
3168
3169 DPRINTF(sc, IWN_DEBUG_TRACE | IWN_DEBUG_XMIT, "->%s begin\n", __func__);
3170
3171 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3172
3173 qid = le16toh(ba->qid);
3174 txq = &sc->txq[ba->qid];
3175 tap = sc->qid2tap[ba->qid];
3176 tid = tap->txa_tid;
3177 wn = (void *)tap->txa_ni;
3178
3179 res = NULL;
3180 ssn = 0;
3181 if (!IEEE80211_AMPDU_RUNNING(tap)) {
3182 res = tap->txa_private;
3183 ssn = tap->txa_start & 0xfff;
3184 }
3185
3186 for (lastidx = le16toh(ba->ssn) & 0xff; txq->read != lastidx;) {
3187 txdata = &txq->data[txq->read];
3188
3189 /* Unmap and free mbuf. */
3190 bus_dmamap_sync(txq->data_dmat, txdata->map,
3191 BUS_DMASYNC_POSTWRITE);
3192 bus_dmamap_unload(txq->data_dmat, txdata->map);
3193 m = txdata->m, txdata->m = NULL;
3194 ni = txdata->ni, txdata->ni = NULL;
3195
3196 KASSERT(ni != NULL, ("no node"));
3197 KASSERT(m != NULL, ("no mbuf"));
3198
3199 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: freeing m=%p\n", __func__, m);
3200 ieee80211_tx_complete(ni, m, 1);
3201
3202 txq->queued--;
3203 txq->read = (txq->read + 1) % IWN_TX_RING_COUNT;
3204 }
3205
3206 if (txq->queued == 0 && res != NULL) {
3207 iwn_nic_lock(sc);
3208 ops->ampdu_tx_stop(sc, qid, tid, ssn);
3209 iwn_nic_unlock(sc);
3210 sc->qid2tap[qid] = NULL;
3211 free(res, M_DEVBUF);
3212 return;
3213 }
3214
3215 if (wn->agg[tid].bitmap == 0)
3216 return;
3217
3218 shift = wn->agg[tid].startidx - ((le16toh(ba->seq) >> 4) & 0xff);
3219 if (shift < 0)
3220 shift += 0x100;
3221
3222 if (wn->agg[tid].nframes > (64 - shift))
3223 return;
3224
3225 /*
3226 * Walk the bitmap and calculate how many successful and failed
3227 * attempts are made.
3228 *
3229 * Yes, the rate control code doesn't know these are A-MPDU
3230 * subframes and that it's okay to fail some of these.
3231 */
3232 ni = tap->txa_ni;
3233 bitmap = (le64toh(ba->bitmap) >> shift) & wn->agg[tid].bitmap;
3234 for (i = 0; bitmap; i++) {
3235 if ((bitmap & 1) == 0) {
3236 tx_err ++;
3237 ieee80211_ratectl_tx_complete(ni->ni_vap, ni,
3238 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL);
3239 } else {
3240 tx_ok ++;
3241 ieee80211_ratectl_tx_complete(ni->ni_vap, ni,
3242 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL);
3243 }
3244 bitmap >>= 1;
3245 }
3246
3247 DPRINTF(sc, IWN_DEBUG_TRACE | IWN_DEBUG_XMIT,
3248 "->%s: end; %d ok; %d err\n",__func__, tx_ok, tx_err);
3249
3250}
3251
3252/*
3253 * Process a CALIBRATION_RESULT notification sent by the initialization
3254 * firmware on response to a CMD_CALIB_CONFIG command (5000 only).
3255 */
3256static void
3257iwn5000_rx_calib_results(struct iwn_softc *sc, struct iwn_rx_desc *desc,
3258 struct iwn_rx_data *data)
3259{
3260 struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1);
3261 int len, idx = -1;
3262
3263 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
3264
3265 /* Runtime firmware should not send such a notification. */
3266 if (sc->sc_flags & IWN_FLAG_CALIB_DONE){
3267 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s received after clib done\n",
3268 __func__);
3269 return;
3270 }
3271 len = (le32toh(desc->len) & 0x3fff) - 4;
3272 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3273
3274 switch (calib->code) {
3275 case IWN5000_PHY_CALIB_DC:
3276 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_DC)
3277 idx = 0;
3278 break;
3279 case IWN5000_PHY_CALIB_LO:
3280 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_LO)
3281 idx = 1;
3282 break;
3283 case IWN5000_PHY_CALIB_TX_IQ:
3284 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TX_IQ)
3285 idx = 2;
3286 break;
3287 case IWN5000_PHY_CALIB_TX_IQ_PERIODIC:
3288 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TX_IQ_PERIODIC)
3289 idx = 3;
3290 break;
3291 case IWN5000_PHY_CALIB_BASE_BAND:
3292 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_BASE_BAND)
3293 idx = 4;
3294 break;
3295 }
3296 if (idx == -1) /* Ignore other results. */
3297 return;
3298
3299 /* Save calibration result. */
3300 if (sc->calibcmd[idx].buf != NULL)
3301 free(sc->calibcmd[idx].buf, M_DEVBUF);
3302 sc->calibcmd[idx].buf = malloc(len, M_DEVBUF, M_NOWAIT);
3303 if (sc->calibcmd[idx].buf == NULL) {
3304 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
3305 "not enough memory for calibration result %d\n",
3306 calib->code);
3307 return;
3308 }
3309 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
3310 "saving calibration result idx=%d, code=%d len=%d\n", idx, calib->code, len);
3311 sc->calibcmd[idx].len = len;
3312 memcpy(sc->calibcmd[idx].buf, calib, len);
3313}
3314
3315static void
3316iwn_stats_update(struct iwn_softc *sc, struct iwn_calib_state *calib,
3317 struct iwn_stats *stats, int len)
3318{
3319 struct iwn_stats_bt *stats_bt;
3320 struct iwn_stats *lstats;
3321
3322 /*
3323 * First - check whether the length is the bluetooth or normal.
3324 *
3325 * If it's normal - just copy it and bump out.
3326 * Otherwise we have to convert things.
3327 */
3328
3329 if (len == sizeof(struct iwn_stats) + 4) {
3330 memcpy(&sc->last_stat, stats, sizeof(struct iwn_stats));
3331 sc->last_stat_valid = 1;
3332 return;
3333 }
3334
3335 /*
3336 * If it's not the bluetooth size - log, then just copy.
3337 */
3338 if (len != sizeof(struct iwn_stats_bt) + 4) {
3339 DPRINTF(sc, IWN_DEBUG_STATS,
3340 "%s: size of rx statistics (%d) not an expected size!\n",
3341 __func__,
3342 len);
3343 memcpy(&sc->last_stat, stats, sizeof(struct iwn_stats));
3344 sc->last_stat_valid = 1;
3345 return;
3346 }
3347
3348 /*
3349 * Ok. Time to copy.
3350 */
3351 stats_bt = (struct iwn_stats_bt *) stats;
3352 lstats = &sc->last_stat;
3353
3354 /* flags */
3355 lstats->flags = stats_bt->flags;
3356 /* rx_bt */
3357 memcpy(&lstats->rx.ofdm, &stats_bt->rx_bt.ofdm,
3358 sizeof(struct iwn_rx_phy_stats));
3359 memcpy(&lstats->rx.cck, &stats_bt->rx_bt.cck,
3360 sizeof(struct iwn_rx_phy_stats));
3361 memcpy(&lstats->rx.general, &stats_bt->rx_bt.general_bt.common,
3362 sizeof(struct iwn_rx_general_stats));
3363 memcpy(&lstats->rx.ht, &stats_bt->rx_bt.ht,
3364 sizeof(struct iwn_rx_ht_phy_stats));
3365 /* tx */
3366 memcpy(&lstats->tx, &stats_bt->tx,
3367 sizeof(struct iwn_tx_stats));
3368 /* general */
3369 memcpy(&lstats->general, &stats_bt->general,
3370 sizeof(struct iwn_general_stats));
3371
3372 /* XXX TODO: Squirrel away the extra bluetooth stats somewhere */
3373 sc->last_stat_valid = 1;
3374}
3375
3376/*
3377 * Process an RX_STATISTICS or BEACON_STATISTICS firmware notification.
3378 * The latter is sent by the firmware after each received beacon.
3379 */
3380static void
3381iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc,
3382 struct iwn_rx_data *data)
3383{
3384 struct iwn_ops *ops = &sc->ops;
3385 struct ieee80211com *ic = &sc->sc_ic;
3386 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3387 struct iwn_calib_state *calib = &sc->calib;
3388 struct iwn_stats *stats = (struct iwn_stats *)(desc + 1);
3389 struct iwn_stats *lstats;
3390 int temp;
3391
3392 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
3393
3394 /* Ignore statistics received during a scan. */
3395 if (vap->iv_state != IEEE80211_S_RUN ||
3396 (ic->ic_flags & IEEE80211_F_SCAN)){
3397 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s received during calib\n",
3398 __func__);
3399 return;
3400 }
3401
3402 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3403
3404 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_STATS,
3405 "%s: received statistics, cmd %d, len %d\n",
3406 __func__, desc->type, le16toh(desc->len));
3407 sc->calib_cnt = 0; /* Reset TX power calibration timeout. */
3408
3409 /*
3410 * Collect/track general statistics for reporting.
3411 *
3412 * This takes care of ensuring that the bluetooth sized message
3413 * will be correctly converted to the legacy sized message.
3414 */
3415 iwn_stats_update(sc, calib, stats, le16toh(desc->len));
3416
3417 /*
3418 * And now, let's take a reference of it to use!
3419 */
3420 lstats = &sc->last_stat;
3421
3422 /* Test if temperature has changed. */
3423 if (lstats->general.temp != sc->rawtemp) {
3424 /* Convert "raw" temperature to degC. */
3425 sc->rawtemp = stats->general.temp;
3426 temp = ops->get_temperature(sc);
3427 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d\n",
3428 __func__, temp);
3429
3430 /* Update TX power if need be (4965AGN only). */
3431 if (sc->hw_type == IWN_HW_REV_TYPE_4965)
3432 iwn4965_power_calibration(sc, temp);
3433 }
3434
3435 if (desc->type != IWN_BEACON_STATISTICS)
3436 return; /* Reply to a statistics request. */
3437
3438 sc->noise = iwn_get_noise(&lstats->rx.general);
3439 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: noise %d\n", __func__, sc->noise);
3440
3441 /* Test that RSSI and noise are present in stats report. */
3442 if (le32toh(lstats->rx.general.flags) != 1) {
3443 DPRINTF(sc, IWN_DEBUG_ANY, "%s\n",
3444 "received statistics without RSSI");
3445 return;
3446 }
3447
3448 if (calib->state == IWN_CALIB_STATE_ASSOC)
3449 iwn_collect_noise(sc, &lstats->rx.general);
3450 else if (calib->state == IWN_CALIB_STATE_RUN) {
3451 iwn_tune_sensitivity(sc, &lstats->rx);
3452 /*
3453 * XXX TODO: Only run the RX recovery if we're associated!
3454 */
3455 iwn_check_rx_recovery(sc, lstats);
3456 iwn_save_stats_counters(sc, lstats);
3457 }
3458
3459 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
3460}
3461
3462/*
3463 * Save the relevant statistic counters for the next calibration
3464 * pass.
3465 */
3466static void
3467iwn_save_stats_counters(struct iwn_softc *sc, const struct iwn_stats *rs)
3468{
3469 struct iwn_calib_state *calib = &sc->calib;
3470
3471 /* Save counters values for next call. */
3472 calib->bad_plcp_cck = le32toh(rs->rx.cck.bad_plcp);
3473 calib->fa_cck = le32toh(rs->rx.cck.fa);
3474 calib->bad_plcp_ht = le32toh(rs->rx.ht.bad_plcp);
3475 calib->bad_plcp_ofdm = le32toh(rs->rx.ofdm.bad_plcp);
3476 calib->fa_ofdm = le32toh(rs->rx.ofdm.fa);
3477
3478 /* Last time we received these tick values */
3479 sc->last_calib_ticks = ticks;
3480}
3481
3482/*
3483 * Process a TX_DONE firmware notification. Unfortunately, the 4965AGN
3484 * and 5000 adapters have different incompatible TX status formats.
3485 */
3486static void
3487iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
3488 struct iwn_rx_data *data)
3489{
3490 struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1);
3491 struct iwn_tx_ring *ring;
3492 int qid;
3493
3494 qid = desc->qid & 0xf;
3495 ring = &sc->txq[qid];
3496
3497 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: "
3498 "qid %d idx %d RTS retries %d ACK retries %d nkill %d rate %x duration %d status %x\n",
3499 __func__, desc->qid, desc->idx,
3500 stat->rtsfailcnt,
3501 stat->ackfailcnt,
3502 stat->btkillcnt,
3503 stat->rate, le16toh(stat->duration),
3504 le32toh(stat->status));
3505
3506 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3507 if (qid >= sc->firstaggqueue) {
3508 iwn_ampdu_tx_done(sc, qid, desc->idx, stat->nframes,
3509 stat->ackfailcnt, &stat->status);
3510 } else {
3511 iwn_tx_done(sc, desc, stat->ackfailcnt,
3512 le32toh(stat->status) & 0xff);
3513 }
3514}
3515
3516static void
3517iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
3518 struct iwn_rx_data *data)
3519{
3520 struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1);
3521 struct iwn_tx_ring *ring;
3522 int qid;
3523
3524 qid = desc->qid & 0xf;
3525 ring = &sc->txq[qid];
3526
3527 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: "
3528 "qid %d idx %d RTS retries %d ACK retries %d nkill %d rate %x duration %d status %x\n",
3529 __func__, desc->qid, desc->idx,
3530 stat->rtsfailcnt,
3531 stat->ackfailcnt,
3532 stat->btkillcnt,
3533 stat->rate, le16toh(stat->duration),
3534 le32toh(stat->status));
3535
3536#ifdef notyet
3537 /* Reset TX scheduler slot. */
3538 iwn5000_reset_sched(sc, desc->qid & 0xf, desc->idx);
3539#endif
3540
3541 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3542 if (qid >= sc->firstaggqueue) {
3543 iwn_ampdu_tx_done(sc, qid, desc->idx, stat->nframes,
3544 stat->ackfailcnt, &stat->status);
3545 } else {
3546 iwn_tx_done(sc, desc, stat->ackfailcnt,
3547 le16toh(stat->status) & 0xff);
3548 }
3549}
3550
3551/*
3552 * Adapter-independent backend for TX_DONE firmware notifications.
3553 */
3554static void
3555iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, int ackfailcnt,
3556 uint8_t status)
3557{
3558 struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf];
3559 struct iwn_tx_data *data = &ring->data[desc->idx];
3560 struct mbuf *m;
3561 struct ieee80211_node *ni;
3562 struct ieee80211vap *vap;
3563
3564 KASSERT(data->ni != NULL, ("no node"));
3565
3566 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
3567
3568 /* Unmap and free mbuf. */
3569 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE);
3570 bus_dmamap_unload(ring->data_dmat, data->map);
3571 m = data->m, data->m = NULL;
3572 ni = data->ni, data->ni = NULL;
3573 vap = ni->ni_vap;
3574
3575 /*
3576 * Update rate control statistics for the node.
3577 */
3578 if (status & IWN_TX_FAIL)
3579 ieee80211_ratectl_tx_complete(vap, ni,
3580 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL);
3581 else
3582 ieee80211_ratectl_tx_complete(vap, ni,
3583 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL);
3584
3585 /*
3586 * Channels marked for "radar" require traffic to be received
3587 * to unlock before we can transmit. Until traffic is seen
3588 * any attempt to transmit is returned immediately with status
3589 * set to IWN_TX_FAIL_TX_LOCKED. Unfortunately this can easily
3590 * happen on first authenticate after scanning. To workaround
3591 * this we ignore a failure of this sort in AUTH state so the
3592 * 802.11 layer will fall back to using a timeout to wait for
3593 * the AUTH reply. This allows the firmware time to see
3594 * traffic so a subsequent retry of AUTH succeeds. It's
3595 * unclear why the firmware does not maintain state for
3596 * channels recently visited as this would allow immediate
3597 * use of the channel after a scan (where we see traffic).
3598 */
3599 if (status == IWN_TX_FAIL_TX_LOCKED &&
3600 ni->ni_vap->iv_state == IEEE80211_S_AUTH)
3601 ieee80211_tx_complete(ni, m, 0);
3602 else
3603 ieee80211_tx_complete(ni, m,
3604 (status & IWN_TX_FAIL) != 0);
3605
3606 sc->sc_tx_timer = 0;
3607 if (--ring->queued < IWN_TX_RING_LOMARK) {
3608 sc->qfullmsk &= ~(1 << ring->qid);
3609 if (sc->qfullmsk == 0)
3610 iwn_start_locked(sc);
3611 }
3612
3613 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
3614
3615}
3616
3617/*
3618 * Process a "command done" firmware notification. This is where we wakeup
3619 * processes waiting for a synchronous command completion.
3620 */
3621static void
3622iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc)
3623{
3624 struct iwn_tx_ring *ring;
3625 struct iwn_tx_data *data;
3626 int cmd_queue_num;
3627
3628 if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT)
3629 cmd_queue_num = IWN_PAN_CMD_QUEUE;
3630 else
3631 cmd_queue_num = IWN_CMD_QUEUE_NUM;
3632
3633 if ((desc->qid & IWN_RX_DESC_QID_MSK) != cmd_queue_num)
3634 return; /* Not a command ack. */
3635
3636 ring = &sc->txq[cmd_queue_num];
3637 data = &ring->data[desc->idx];
3638
3639 /* If the command was mapped in an mbuf, free it. */
3640 if (data->m != NULL) {
3641 bus_dmamap_sync(ring->data_dmat, data->map,
3642 BUS_DMASYNC_POSTWRITE);
3643 bus_dmamap_unload(ring->data_dmat, data->map);
3644 m_freem(data->m);
3645 data->m = NULL;
3646 }
3647 wakeup(&ring->desc[desc->idx]);
3648}
3649
3650static void
3651iwn_ampdu_tx_done(struct iwn_softc *sc, int qid, int idx, int nframes,
3652 int ackfailcnt, void *stat)
3653{
3654 struct iwn_ops *ops = &sc->ops;
3655 struct iwn_tx_ring *ring = &sc->txq[qid];
3656 struct iwn_tx_data *data;
3657 struct mbuf *m;
3658 struct iwn_node *wn;
3659 struct ieee80211_node *ni;
3660 struct ieee80211_tx_ampdu *tap;
3661 uint64_t bitmap;
3662 uint32_t *status = stat;
3663 uint16_t *aggstatus = stat;
3664 uint16_t ssn;
3665 uint8_t tid;
3666 int bit, i, lastidx, *res, seqno, shift, start;
3667
3668 /* XXX TODO: status is le16 field! Grr */
3669
3670 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
3671 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: nframes=%d, status=0x%08x\n",
3672 __func__,
3673 nframes,
3674 *status);
3675
3676 tap = sc->qid2tap[qid];
3677 tid = tap->txa_tid;
3678 wn = (void *)tap->txa_ni;
3679 ni = tap->txa_ni;
3680
3681 /*
3682 * XXX TODO: ACK and RTS failures would be nice here!
3683 */
3684
3685 /*
3686 * A-MPDU single frame status - if we failed to transmit it
3687 * in A-MPDU, then it may be a permanent failure.
3688 *
3689 * XXX TODO: check what the Linux iwlwifi driver does here;
3690 * there's some permanent and temporary failures that may be
3691 * handled differently.
3692 */
3693 if (nframes == 1) {
3694 if ((*status & 0xff) != 1 && (*status & 0xff) != 2) {
3695#ifdef NOT_YET
3696 printf("ieee80211_send_bar()\n");
3697#endif
3698 /*
3699 * If we completely fail a transmit, make sure a
3700 * notification is pushed up to the rate control
3701 * layer.
3702 */
3703 ieee80211_ratectl_tx_complete(ni->ni_vap,
3704 ni,
3705 IEEE80211_RATECTL_TX_FAILURE,
3706 &ackfailcnt,
3707 NULL);
3708 } else {
3709 /*
3710 * If nframes=1, then we won't be getting a BA for
3711 * this frame. Ensure that we correctly update the
3712 * rate control code with how many retries were
3713 * needed to send it.
3714 */
3715 ieee80211_ratectl_tx_complete(ni->ni_vap,
3716 ni,
3717 IEEE80211_RATECTL_TX_SUCCESS,
3718 &ackfailcnt,
3719 NULL);
3720 }
3721 }
3722
3723 bitmap = 0;
3724 start = idx;
3725 for (i = 0; i < nframes; i++) {
3726 if (le16toh(aggstatus[i * 2]) & 0xc)
3727 continue;
3728
3729 idx = le16toh(aggstatus[2*i + 1]) & 0xff;
3730 bit = idx - start;
3731 shift = 0;
3732 if (bit >= 64) {
3733 shift = 0x100 - idx + start;
3734 bit = 0;
3735 start = idx;
3736 } else if (bit <= -64)
3737 bit = 0x100 - start + idx;
3738 else if (bit < 0) {
3739 shift = start - idx;
3740 start = idx;
3741 bit = 0;
3742 }
3743 bitmap = bitmap << shift;
3744 bitmap |= 1ULL << bit;
3745 }
3746 tap = sc->qid2tap[qid];
3747 tid = tap->txa_tid;
3748 wn = (void *)tap->txa_ni;
3749 wn->agg[tid].bitmap = bitmap;
3750 wn->agg[tid].startidx = start;
3751 wn->agg[tid].nframes = nframes;
3752
3753 res = NULL;
3754 ssn = 0;
3755 if (!IEEE80211_AMPDU_RUNNING(tap)) {
3756 res = tap->txa_private;
3757 ssn = tap->txa_start & 0xfff;
3758 }
3759
3760 /* This is going nframes DWORDS into the descriptor? */
3761 seqno = le32toh(*(status + nframes)) & 0xfff;
3762 for (lastidx = (seqno & 0xff); ring->read != lastidx;) {
3763 data = &ring->data[ring->read];
3764
3765 /* Unmap and free mbuf. */
3766 bus_dmamap_sync(ring->data_dmat, data->map,
3767 BUS_DMASYNC_POSTWRITE);
3768 bus_dmamap_unload(ring->data_dmat, data->map);
3769 m = data->m, data->m = NULL;
3770 ni = data->ni, data->ni = NULL;
3771
3772 KASSERT(ni != NULL, ("no node"));
3773 KASSERT(m != NULL, ("no mbuf"));
3774 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: freeing m=%p\n", __func__, m);
3775 ieee80211_tx_complete(ni, m, 1);
3776
3777 ring->queued--;
3778 ring->read = (ring->read + 1) % IWN_TX_RING_COUNT;
3779 }
3780
3781 if (ring->queued == 0 && res != NULL) {
3782 iwn_nic_lock(sc);
3783 ops->ampdu_tx_stop(sc, qid, tid, ssn);
3784 iwn_nic_unlock(sc);
3785 sc->qid2tap[qid] = NULL;
3786 free(res, M_DEVBUF);
3787 return;
3788 }
3789
3790 sc->sc_tx_timer = 0;
3791 if (ring->queued < IWN_TX_RING_LOMARK) {
3792 sc->qfullmsk &= ~(1 << ring->qid);
3793 if (sc->qfullmsk == 0)
3794 iwn_start_locked(sc);
3795 }
3796
3797 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
3798
3799}
3800
3801/*
3802 * Process an INT_FH_RX or INT_SW_RX interrupt.
3803 */
3804static void
3805iwn_notif_intr(struct iwn_softc *sc)
3806{
3807 struct iwn_ops *ops = &sc->ops;
3808 struct ieee80211com *ic = &sc->sc_ic;
3809 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3810 uint16_t hw;
3811
3812 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
3813 BUS_DMASYNC_POSTREAD);
3814
3815 hw = le16toh(sc->rxq.stat->closed_count) & 0xfff;
3816 while (sc->rxq.cur != hw) {
3817 struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur];
3818 struct iwn_rx_desc *desc;
3819
3820 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3821 BUS_DMASYNC_POSTREAD);
3822 desc = mtod(data->m, struct iwn_rx_desc *);
3823
3824 DPRINTF(sc, IWN_DEBUG_RECV,
3825 "%s: cur=%d; qid %x idx %d flags %x type %d(%s) len %d\n",
3826 __func__, sc->rxq.cur, desc->qid & 0xf, desc->idx, desc->flags,
3827 desc->type, iwn_intr_str(desc->type),
3828 le16toh(desc->len));
3829
3830 if (!(desc->qid & IWN_UNSOLICITED_RX_NOTIF)) /* Reply to a command. */
3831 iwn_cmd_done(sc, desc);
3832
3833 switch (desc->type) {
3834 case IWN_RX_PHY:
3835 iwn_rx_phy(sc, desc, data);
3836 break;
3837
3838 case IWN_RX_DONE: /* 4965AGN only. */
3839 case IWN_MPDU_RX_DONE:
3840 /* An 802.11 frame has been received. */
3841 iwn_rx_done(sc, desc, data);
3842 break;
3843
3844 case IWN_RX_COMPRESSED_BA:
3845 /* A Compressed BlockAck has been received. */
3846 iwn_rx_compressed_ba(sc, desc, data);
3847 break;
3848
3849 case IWN_TX_DONE:
3850 /* An 802.11 frame has been transmitted. */
3851 ops->tx_done(sc, desc, data);
3852 break;
3853
3854 case IWN_RX_STATISTICS:
3855 case IWN_BEACON_STATISTICS:
3856 iwn_rx_statistics(sc, desc, data);
3857 break;
3858
3859 case IWN_BEACON_MISSED:
3860 {
3861 struct iwn_beacon_missed *miss =
3862 (struct iwn_beacon_missed *)(desc + 1);
3863 int misses;
3864
3865 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3866 BUS_DMASYNC_POSTREAD);
3867 misses = le32toh(miss->consecutive);
3868
3869 DPRINTF(sc, IWN_DEBUG_STATE,
3870 "%s: beacons missed %d/%d\n", __func__,
3871 misses, le32toh(miss->total));
3872 /*
3873 * If more than 5 consecutive beacons are missed,
3874 * reinitialize the sensitivity state machine.
3875 */
3876 if (vap->iv_state == IEEE80211_S_RUN &&
3877 (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
3878 if (misses > 5)
3879 (void)iwn_init_sensitivity(sc);
3880 if (misses >= vap->iv_bmissthreshold) {
3881 IWN_UNLOCK(sc);
3882 ieee80211_beacon_miss(ic);
3883 IWN_LOCK(sc);
3884 }
3885 }
3886 break;
3887 }
3888 case IWN_UC_READY:
3889 {
3890 struct iwn_ucode_info *uc =
3891 (struct iwn_ucode_info *)(desc + 1);
3892
3893 /* The microcontroller is ready. */
3894 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3895 BUS_DMASYNC_POSTREAD);
3896 DPRINTF(sc, IWN_DEBUG_RESET,
3897 "microcode alive notification version=%d.%d "
3898 "subtype=%x alive=%x\n", uc->major, uc->minor,
3899 uc->subtype, le32toh(uc->valid));
3900
3901 if (le32toh(uc->valid) != 1) {
3902 device_printf(sc->sc_dev,
3903 "microcontroller initialization failed");
3904 break;
3905 }
3906 if (uc->subtype == IWN_UCODE_INIT) {
3907 /* Save microcontroller report. */
3908 memcpy(&sc->ucode_info, uc, sizeof (*uc));
3909 }
3910 /* Save the address of the error log in SRAM. */
3911 sc->errptr = le32toh(uc->errptr);
3912 break;
3913 }
3914 case IWN_STATE_CHANGED:
3915 {
3916 /*
3917 * State change allows hardware switch change to be
3918 * noted. However, we handle this in iwn_intr as we
3919 * get both the enable/disble intr.
3920 */
3921 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3922 BUS_DMASYNC_POSTREAD);
3923#ifdef IWN_DEBUG
3924 uint32_t *status = (uint32_t *)(desc + 1);
3925 DPRINTF(sc, IWN_DEBUG_INTR | IWN_DEBUG_STATE,
3926 "state changed to %x\n",
3927 le32toh(*status));
3928#endif
3929 break;
3930 }
3931 case IWN_START_SCAN:
3932 {
3933 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3934 BUS_DMASYNC_POSTREAD);
3935#ifdef IWN_DEBUG
3936 struct iwn_start_scan *scan =
3937 (struct iwn_start_scan *)(desc + 1);
3938 DPRINTF(sc, IWN_DEBUG_ANY,
3939 "%s: scanning channel %d status %x\n",
3940 __func__, scan->chan, le32toh(scan->status));
3941#endif
3942 break;
3943 }
3944 case IWN_STOP_SCAN:
3945 {
3946 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3947 BUS_DMASYNC_POSTREAD);
3948#ifdef IWN_DEBUG
3949 struct iwn_stop_scan *scan =
3950 (struct iwn_stop_scan *)(desc + 1);
3951 DPRINTF(sc, IWN_DEBUG_STATE | IWN_DEBUG_SCAN,
3952 "scan finished nchan=%d status=%d chan=%d\n",
3953 scan->nchan, scan->status, scan->chan);
3954#endif
3955 sc->sc_is_scanning = 0;
3956 IWN_UNLOCK(sc);
3957 ieee80211_scan_next(vap);
3958 IWN_LOCK(sc);
3959 break;
3960 }
3961 case IWN5000_CALIBRATION_RESULT:
3962 iwn5000_rx_calib_results(sc, desc, data);
3963 break;
3964
3965 case IWN5000_CALIBRATION_DONE:
3966 sc->sc_flags |= IWN_FLAG_CALIB_DONE;
3967 wakeup(sc);
3968 break;
3969 }
3970
3971 sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT;
3972 }
3973
3974 /* Tell the firmware what we have processed. */
3975 hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1;
3976 IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7);
3977}
3978
3979/*
3980 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up
3981 * from power-down sleep mode.
3982 */
3983static void
3984iwn_wakeup_intr(struct iwn_softc *sc)
3985{
3986 int qid;
3987
3988 DPRINTF(sc, IWN_DEBUG_RESET, "%s: ucode wakeup from power-down sleep\n",
3989 __func__);
3990
3991 /* Wakeup RX and TX rings. */
3992 IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7);
3993 for (qid = 0; qid < sc->ntxqs; qid++) {
3994 struct iwn_tx_ring *ring = &sc->txq[qid];
3995 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur);
3996 }
3997}
3998
3999static void
4000iwn_rftoggle_intr(struct iwn_softc *sc)
4001{
4002 struct ieee80211com *ic = &sc->sc_ic;
4003 uint32_t tmp = IWN_READ(sc, IWN_GP_CNTRL);
4004
4005 IWN_LOCK_ASSERT(sc);
4006
4007 device_printf(sc->sc_dev, "RF switch: radio %s\n",
4008 (tmp & IWN_GP_CNTRL_RFKILL) ? "enabled" : "disabled");
4009 if (tmp & IWN_GP_CNTRL_RFKILL)
4010 ieee80211_runtask(ic, &sc->sc_radioon_task);
4011 else
4012 ieee80211_runtask(ic, &sc->sc_radiooff_task);
4013}
4014
4015/*
4016 * Dump the error log of the firmware when a firmware panic occurs. Although
4017 * we can't debug the firmware because it is neither open source nor free, it
4018 * can help us to identify certain classes of problems.
4019 */
4020static void
4021iwn_fatal_intr(struct iwn_softc *sc)
4022{
4023 struct iwn_fw_dump dump;
4024 int i;
4025
4026 IWN_LOCK_ASSERT(sc);
4027
4028 /* Force a complete recalibration on next init. */
4029 sc->sc_flags &= ~IWN_FLAG_CALIB_DONE;
4030
4031 /* Check that the error log address is valid. */
4032 if (sc->errptr < IWN_FW_DATA_BASE ||
4033 sc->errptr + sizeof (dump) >
4034 IWN_FW_DATA_BASE + sc->fw_data_maxsz) {
4035 printf("%s: bad firmware error log address 0x%08x\n", __func__,
4036 sc->errptr);
4037 return;
4038 }
4039 if (iwn_nic_lock(sc) != 0) {
4040 printf("%s: could not read firmware error log\n", __func__);
4041 return;
4042 }
4043 /* Read firmware error log from SRAM. */
4044 iwn_mem_read_region_4(sc, sc->errptr, (uint32_t *)&dump,
4045 sizeof (dump) / sizeof (uint32_t));
4046 iwn_nic_unlock(sc);
4047
4048 if (dump.valid == 0) {
4049 printf("%s: firmware error log is empty\n", __func__);
4050 return;
4051 }
4052 printf("firmware error log:\n");
4053 printf(" error type = \"%s\" (0x%08X)\n",
4054 (dump.id < nitems(iwn_fw_errmsg)) ?
4055 iwn_fw_errmsg[dump.id] : "UNKNOWN",
4056 dump.id);
4057 printf(" program counter = 0x%08X\n", dump.pc);
4058 printf(" source line = 0x%08X\n", dump.src_line);
4059 printf(" error data = 0x%08X%08X\n",
4060 dump.error_data[0], dump.error_data[1]);
4061 printf(" branch link = 0x%08X%08X\n",
4062 dump.branch_link[0], dump.branch_link[1]);
4063 printf(" interrupt link = 0x%08X%08X\n",
4064 dump.interrupt_link[0], dump.interrupt_link[1]);
4065 printf(" time = %u\n", dump.time[0]);
4066
4067 /* Dump driver status (TX and RX rings) while we're here. */
4068 printf("driver status:\n");
4069 for (i = 0; i < sc->ntxqs; i++) {
4070 struct iwn_tx_ring *ring = &sc->txq[i];
4071 printf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n",
4072 i, ring->qid, ring->cur, ring->queued);
4073 }
4074 printf(" rx ring: cur=%d\n", sc->rxq.cur);
4075}
4076
4077static void
4078iwn_intr(void *arg)
4079{
4080 struct iwn_softc *sc = arg;
4081 uint32_t r1, r2, tmp;
4082
4083 IWN_LOCK(sc);
4084
4085 /* Disable interrupts. */
4086 IWN_WRITE(sc, IWN_INT_MASK, 0);
4087
4088 /* Read interrupts from ICT (fast) or from registers (slow). */
4089 if (sc->sc_flags & IWN_FLAG_USE_ICT) {
4090 tmp = 0;
4091 while (sc->ict[sc->ict_cur] != 0) {
4092 tmp |= sc->ict[sc->ict_cur];
4093 sc->ict[sc->ict_cur] = 0; /* Acknowledge. */
4094 sc->ict_cur = (sc->ict_cur + 1) % IWN_ICT_COUNT;
4095 }
4096 tmp = le32toh(tmp);
4097 if (tmp == 0xffffffff) /* Shouldn't happen. */
4098 tmp = 0;
4099 else if (tmp & 0xc0000) /* Workaround a HW bug. */
4100 tmp |= 0x8000;
4101 r1 = (tmp & 0xff00) << 16 | (tmp & 0xff);
4102 r2 = 0; /* Unused. */
4103 } else {
4104 r1 = IWN_READ(sc, IWN_INT);
4105 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0) {
4106 IWN_UNLOCK(sc);
4107 return; /* Hardware gone! */
4108 }
4109 r2 = IWN_READ(sc, IWN_FH_INT);
4110 }
4111
4112 DPRINTF(sc, IWN_DEBUG_INTR, "interrupt reg1=0x%08x reg2=0x%08x\n"
4113 , r1, r2);
4114
4115 if (r1 == 0 && r2 == 0)
4116 goto done; /* Interrupt not for us. */
4117
4118 /* Acknowledge interrupts. */
4119 IWN_WRITE(sc, IWN_INT, r1);
4120 if (!(sc->sc_flags & IWN_FLAG_USE_ICT))
4121 IWN_WRITE(sc, IWN_FH_INT, r2);
4122
4123 if (r1 & IWN_INT_RF_TOGGLED) {
4124 iwn_rftoggle_intr(sc);
4125 goto done;
4126 }
4127 if (r1 & IWN_INT_CT_REACHED) {
4128 device_printf(sc->sc_dev, "%s: critical temperature reached!\n",
4129 __func__);
4130 }
4131 if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) {
4132 device_printf(sc->sc_dev, "%s: fatal firmware error\n",
4133 __func__);
4134#ifdef IWN_DEBUG
4135 iwn_debug_register(sc);
4136#endif
4137 /* Dump firmware error log and stop. */
4138 iwn_fatal_intr(sc);
4139
4140 taskqueue_enqueue(sc->sc_tq, &sc->sc_panic_task);
4141 goto done;
4142 }
4143 if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX | IWN_INT_RX_PERIODIC)) ||
4144 (r2 & IWN_FH_INT_RX)) {
4145 if (sc->sc_flags & IWN_FLAG_USE_ICT) {
4146 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX))
4147 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX);
4148 IWN_WRITE_1(sc, IWN_INT_PERIODIC,
4149 IWN_INT_PERIODIC_DIS);
4150 iwn_notif_intr(sc);
4151 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) {
4152 IWN_WRITE_1(sc, IWN_INT_PERIODIC,
4153 IWN_INT_PERIODIC_ENA);
4154 }
4155 } else
4156 iwn_notif_intr(sc);
4157 }
4158
4159 if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) {
4160 if (sc->sc_flags & IWN_FLAG_USE_ICT)
4161 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_TX);
4162 wakeup(sc); /* FH DMA transfer completed. */
4163 }
4164
4165 if (r1 & IWN_INT_ALIVE)
4166 wakeup(sc); /* Firmware is alive. */
4167
4168 if (r1 & IWN_INT_WAKEUP)
4169 iwn_wakeup_intr(sc);
4170
4171done:
4172 /* Re-enable interrupts. */
4173 if (sc->sc_flags & IWN_FLAG_RUNNING)
4174 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
4175
4176 IWN_UNLOCK(sc);
4177}
4178
4179/*
4180 * Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and
4181 * 5000 adapters use a slightly different format).
4182 */
4183static void
4184iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id,
4185 uint16_t len)
4186{
4187 uint16_t *w = &sc->sched[qid * IWN4965_SCHED_COUNT + idx];
4188
4189 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4190
4191 *w = htole16(len + 8);
4192 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
4193 BUS_DMASYNC_PREWRITE);
4194 if (idx < IWN_SCHED_WINSZ) {
4195 *(w + IWN_TX_RING_COUNT) = *w;
4196 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
4197 BUS_DMASYNC_PREWRITE);
4198 }
4199}
4200
4201static void
4202iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id,
4203 uint16_t len)
4204{
4205 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx];
4206
4207 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4208
4209 *w = htole16(id << 12 | (len + 8));
4210 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
4211 BUS_DMASYNC_PREWRITE);
4212 if (idx < IWN_SCHED_WINSZ) {
4213 *(w + IWN_TX_RING_COUNT) = *w;
4214 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
4215 BUS_DMASYNC_PREWRITE);
4216 }
4217}
4218
4219#ifdef notyet
4220static void
4221iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx)
4222{
4223 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx];
4224
4225 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
4226
4227 *w = (*w & htole16(0xf000)) | htole16(1);
4228 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
4229 BUS_DMASYNC_PREWRITE);
4230 if (idx < IWN_SCHED_WINSZ) {
4231 *(w + IWN_TX_RING_COUNT) = *w;
4232 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
4233 BUS_DMASYNC_PREWRITE);
4234 }
4235}
4236#endif
4237
4238/*
4239 * Check whether OFDM 11g protection will be enabled for the given rate.
4240 *
4241 * The original driver code only enabled protection for OFDM rates.
4242 * It didn't check to see whether it was operating in 11a or 11bg mode.
4243 */
4244static int
4245iwn_check_rate_needs_protection(struct iwn_softc *sc,
4246 struct ieee80211vap *vap, uint8_t rate)
4247{
4248 struct ieee80211com *ic = vap->iv_ic;
4249
4250 /*
4251 * Not in 2GHz mode? Then there's no need to enable OFDM
4252 * 11bg protection.
4253 */
4254 if (! IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) {
4255 return (0);
4256 }
4257
4258 /*
4259 * 11bg protection not enabled? Then don't use it.
4260 */
4261 if ((ic->ic_flags & IEEE80211_F_USEPROT) == 0)
4262 return (0);
4263
4264 /*
4265 * If it's an 11n rate - no protection.
4266 * We'll do it via a specific 11n check.
4267 */
4268 if (rate & IEEE80211_RATE_MCS) {
4269 return (0);
4270 }
4271
4272 /*
4273 * Do a rate table lookup. If the PHY is CCK,
4274 * don't do protection.
4275 */
4276 if (ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_CCK)
4277 return (0);
4278
4279 /*
4280 * Yup, enable protection.
4281 */
4282 return (1);
4283}
4284
4285/*
4286 * return a value between 0 and IWN_MAX_TX_RETRIES-1 as an index into
4287 * the link quality table that reflects this particular entry.
4288 */
4289static int
4290iwn_tx_rate_to_linkq_offset(struct iwn_softc *sc, struct ieee80211_node *ni,
4291 uint8_t rate)
4292{
4293 struct ieee80211_rateset *rs;
4294 int is_11n;
4295 int nr;
4296 int i;
4297 uint8_t cmp_rate;
4298
4299 /*
4300 * Figure out if we're using 11n or not here.
4301 */
4302 if (IEEE80211_IS_CHAN_HT(ni->ni_chan) && ni->ni_htrates.rs_nrates > 0)
4303 is_11n = 1;
4304 else
4305 is_11n = 0;
4306
4307 /*
4308 * Use the correct rate table.
4309 */
4310 if (is_11n) {
4311 rs = (struct ieee80211_rateset *) &ni->ni_htrates;
4312 nr = ni->ni_htrates.rs_nrates;
4313 } else {
4314 rs = &ni->ni_rates;
4315 nr = rs->rs_nrates;
4316 }
4317
4318 /*
4319 * Find the relevant link quality entry in the table.
4320 */
4321 for (i = 0; i < nr && i < IWN_MAX_TX_RETRIES - 1 ; i++) {
4322 /*
4323 * The link quality table index starts at 0 == highest
4324 * rate, so we walk the rate table backwards.
4325 */
4326 cmp_rate = rs->rs_rates[(nr - 1) - i];
4327 if (rate & IEEE80211_RATE_MCS)
4328 cmp_rate |= IEEE80211_RATE_MCS;
4329
4330#if 0
4331 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: idx %d: nr=%d, rate=0x%02x, rateentry=0x%02x\n",
4332 __func__,
4333 i,
4334 nr,
4335 rate,
4336 cmp_rate);
4337#endif
4338
4339 if (cmp_rate == rate)
4340 return (i);
4341 }
4342
4343 /* Failed? Start at the end */
4344 return (IWN_MAX_TX_RETRIES - 1);
4345}
4346
4347static int
4348iwn_tx_data(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
4349{
4350 struct iwn_ops *ops = &sc->ops;
4351 const struct ieee80211_txparam *tp;
4352 struct ieee80211vap *vap = ni->ni_vap;
4353 struct ieee80211com *ic = ni->ni_ic;
4354 struct iwn_node *wn = (void *)ni;
4355 struct iwn_tx_ring *ring;
4356 struct iwn_tx_desc *desc;
4357 struct iwn_tx_data *data;
4358 struct iwn_tx_cmd *cmd;
4359 struct iwn_cmd_data *tx;
4360 struct ieee80211_frame *wh;
4361 struct ieee80211_key *k = NULL;
4362 struct mbuf *m1;
4363 uint32_t flags;
4364 uint16_t qos;
4365 u_int hdrlen;
4366 bus_dma_segment_t *seg, segs[IWN_MAX_SCATTER];
4367 uint8_t tid, type;
4368 int ac, i, totlen, error, pad, nsegs = 0, rate;
4369
4370 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
4371
4372 IWN_LOCK_ASSERT(sc);
4373
4374 wh = mtod(m, struct ieee80211_frame *);
4375 hdrlen = ieee80211_anyhdrsize(wh);
4376 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4377
4378 /* Select EDCA Access Category and TX ring for this frame. */
4379 if (IEEE80211_QOS_HAS_SEQ(wh)) {
4380 qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0];
4381 tid = qos & IEEE80211_QOS_TID;
4382 } else {
4383 qos = 0;
4384 tid = 0;
4385 }
4386 ac = M_WME_GETAC(m);
4387 if (m->m_flags & M_AMPDU_MPDU) {
4388 uint16_t seqno;
4389 struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[ac];
4390
4391 if (!IEEE80211_AMPDU_RUNNING(tap)) {
4392 m_freem(m);
4393 return EINVAL;
4394 }
4395
4396 /*
4397 * Queue this frame to the hardware ring that we've
4398 * negotiated AMPDU TX on.
4399 *
4400 * Note that the sequence number must match the TX slot
4401 * being used!
4402 */
4403 ac = *(int *)tap->txa_private;
4404 seqno = ni->ni_txseqs[tid];
4405 *(uint16_t *)wh->i_seq =
4406 htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
4407 ring = &sc->txq[ac];
4408 if ((seqno % 256) != ring->cur) {
4409 device_printf(sc->sc_dev,
4410 "%s: m=%p: seqno (%d) (%d) != ring index (%d) !\n",
4411 __func__,
4412 m,
4413 seqno,
4414 seqno % 256,
4415 ring->cur);
4416 }
4417 ni->ni_txseqs[tid]++;
4418 }
4419 ring = &sc->txq[ac];
4420 desc = &ring->desc[ring->cur];
4421 data = &ring->data[ring->cur];
4422
4423 /* Choose a TX rate index. */
4424 tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)];
4425 if (type == IEEE80211_FC0_TYPE_MGT)
4426 rate = tp->mgmtrate;
4427 else if (IEEE80211_IS_MULTICAST(wh->i_addr1))
4428 rate = tp->mcastrate;
4429 else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE)
4430 rate = tp->ucastrate;
4431 else if (m->m_flags & M_EAPOL)
4432 rate = tp->mgmtrate;
4433 else {
4434 /* XXX pass pktlen */
4435 (void) ieee80211_ratectl_rate(ni, NULL, 0);
4436 rate = ni->ni_txrate;
4437 }
4438
4439 /* Encrypt the frame if need be. */
4440 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
4441 /* Retrieve key for TX. */
4442 k = ieee80211_crypto_encap(ni, m);
4443 if (k == NULL) {
4444 m_freem(m);
4445 return ENOBUFS;
4446 }
4447 /* 802.11 header may have moved. */
4448 wh = mtod(m, struct ieee80211_frame *);
4449 }
4450 totlen = m->m_pkthdr.len;
4451
4452 if (ieee80211_radiotap_active_vap(vap)) {
4453 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap;
4454
4455 tap->wt_flags = 0;
4456 tap->wt_rate = rate;
4457 if (k != NULL)
4458 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
4459
4460 ieee80211_radiotap_tx(vap, m);
4461 }
4462
4463 /* Prepare TX firmware command. */
4464 cmd = &ring->cmd[ring->cur];
4465 cmd->code = IWN_CMD_TX_DATA;
4466 cmd->flags = 0;
4467 cmd->qid = ring->qid;
4468 cmd->idx = ring->cur;
4469
4470 tx = (struct iwn_cmd_data *)cmd->data;
4471 /* NB: No need to clear tx, all fields are reinitialized here. */
4472 tx->scratch = 0; /* clear "scratch" area */
4473
4474 flags = 0;
4475 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
4476 /* Unicast frame, check if an ACK is expected. */
4477 if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) !=
4478 IEEE80211_QOS_ACKPOLICY_NOACK)
4479 flags |= IWN_TX_NEED_ACK;
4480 }
4481 if ((wh->i_fc[0] &
4482 (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) ==
4483 (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR))
4484 flags |= IWN_TX_IMM_BA; /* Cannot happen yet. */
4485
4486 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG)
4487 flags |= IWN_TX_MORE_FRAG; /* Cannot happen yet. */
4488
4489 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */
4490 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
4491 /* NB: Group frames are sent using CCK in 802.11b/g. */
4492 if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) {
4493 flags |= IWN_TX_NEED_RTS;
4494 } else if (iwn_check_rate_needs_protection(sc, vap, rate)) {
4495 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
4496 flags |= IWN_TX_NEED_CTS;
4497 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
4498 flags |= IWN_TX_NEED_RTS;
4499 } else if ((rate & IEEE80211_RATE_MCS) &&
4500 (ic->ic_htprotmode == IEEE80211_PROT_RTSCTS)) {
4501 flags |= IWN_TX_NEED_RTS;
4502 }
4503
4504 /* XXX HT protection? */
4505
4506 if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) {
4507 if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
4508 /* 5000 autoselects RTS/CTS or CTS-to-self. */
4509 flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS);
4510 flags |= IWN_TX_NEED_PROTECTION;
4511 } else
4512 flags |= IWN_TX_FULL_TXOP;
4513 }
4514 }
4515
4516 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4517 type != IEEE80211_FC0_TYPE_DATA)
4518 tx->id = sc->broadcast_id;
4519 else
4520 tx->id = wn->id;
4521
4522 if (type == IEEE80211_FC0_TYPE_MGT) {
4523 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4524
4525 /* Tell HW to set timestamp in probe responses. */
4526 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
4527 flags |= IWN_TX_INSERT_TSTAMP;
4528 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
4529 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
4530 tx->timeout = htole16(3);
4531 else
4532 tx->timeout = htole16(2);
4533 } else
4534 tx->timeout = htole16(0);
4535
4536 if (hdrlen & 3) {
4537 /* First segment length must be a multiple of 4. */
4538 flags |= IWN_TX_NEED_PADDING;
4539 pad = 4 - (hdrlen & 3);
4540 } else
4541 pad = 0;
4542
4543 tx->len = htole16(totlen);
4544 tx->tid = tid;
4545 tx->rts_ntries = 60;
4546 tx->data_ntries = 15;
4547 tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
4548 tx->rate = iwn_rate_to_plcp(sc, ni, rate);
4549 if (tx->id == sc->broadcast_id) {
4550 /* Group or management frame. */
4551 tx->linkq = 0;
4552 } else {
4553 tx->linkq = iwn_tx_rate_to_linkq_offset(sc, ni, rate);
4554 flags |= IWN_TX_LINKQ; /* enable MRR */
4555 }
4556
4557 /* Set physical address of "scratch area". */
4558 tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr));
4559 tx->hiaddr = IWN_HIADDR(data->scratch_paddr);
4560
4561 /* Copy 802.11 header in TX command. */
4562 memcpy((uint8_t *)(tx + 1), wh, hdrlen);
4563
4564 /* Trim 802.11 header. */
4565 m_adj(m, hdrlen);
4566 tx->security = 0;
4567 tx->flags = htole32(flags);
4568
4569 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs,
4570 &nsegs, BUS_DMA_NOWAIT);
4571 if (error != 0) {
4572 if (error != EFBIG) {
4573 device_printf(sc->sc_dev,
4574 "%s: can't map mbuf (error %d)\n", __func__, error);
4575 m_freem(m);
4576 return error;
4577 }
4578 /* Too many DMA segments, linearize mbuf. */
4579 m1 = m_collapse(m, M_NOWAIT, IWN_MAX_SCATTER - 1);
4580 if (m1 == NULL) {
4581 device_printf(sc->sc_dev,
4582 "%s: could not defrag mbuf\n", __func__);
4583 m_freem(m);
4584 return ENOBUFS;
4585 }
4586 m = m1;
4587
4588 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
4589 segs, &nsegs, BUS_DMA_NOWAIT);
4590 if (error != 0) {
4591 device_printf(sc->sc_dev,
4592 "%s: can't map mbuf (error %d)\n", __func__, error);
4593 m_freem(m);
4594 return error;
4595 }
4596 }
4597
4598 data->m = m;
4599 data->ni = ni;
4600
4601 DPRINTF(sc, IWN_DEBUG_XMIT,
4602 "%s: qid %d idx %d len %d nsegs %d flags 0x%08x rate 0x%04x plcp 0x%08x\n",
4603 __func__,
4604 ring->qid,
4605 ring->cur,
4606 m->m_pkthdr.len,
4607 nsegs,
4608 flags,
4609 rate,
4610 tx->rate);
4611
4612 /* Fill TX descriptor. */
4613 desc->nsegs = 1;
4614 if (m->m_len != 0)
4615 desc->nsegs += nsegs;
4616 /* First DMA segment is used by the TX command. */
4617 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr));
4618 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) |
4619 (4 + sizeof (*tx) + hdrlen + pad) << 4);
4620 /* Other DMA segments are for data payload. */
4621 seg = &segs[0];
4622 for (i = 1; i <= nsegs; i++) {
4623 desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr));
4624 desc->segs[i].len = htole16(IWN_HIADDR(seg->ds_addr) |
4625 seg->ds_len << 4);
4626 seg++;
4627 }
4628
4629 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
4630 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
4631 BUS_DMASYNC_PREWRITE);
4632 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
4633 BUS_DMASYNC_PREWRITE);
4634
4635 /* Update TX scheduler. */
4636 if (ring->qid >= sc->firstaggqueue)
4637 ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen);
4638
4639 /* Kick TX ring. */
4640 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
4641 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4642
4643 /* Mark TX ring as full if we reach a certain threshold. */
4644 if (++ring->queued > IWN_TX_RING_HIMARK)
4645 sc->qfullmsk |= 1 << ring->qid;
4646
4647 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
4648
4649 return 0;
4650}
4651
4652static int
4653iwn_tx_data_raw(struct iwn_softc *sc, struct mbuf *m,
4654 struct ieee80211_node *ni, const struct ieee80211_bpf_params *params)
4655{
4656 struct iwn_ops *ops = &sc->ops;
4657 struct ieee80211vap *vap = ni->ni_vap;
4658 struct iwn_tx_cmd *cmd;
4659 struct iwn_cmd_data *tx;
4660 struct ieee80211_frame *wh;
4661 struct iwn_tx_ring *ring;
4662 struct iwn_tx_desc *desc;
4663 struct iwn_tx_data *data;
4664 struct mbuf *m1;
4665 bus_dma_segment_t *seg, segs[IWN_MAX_SCATTER];
4666 uint32_t flags;
4667 u_int hdrlen;
4668 int ac, totlen, error, pad, nsegs = 0, i, rate;
4669 uint8_t type;
4670
4671 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
4672
4673 IWN_LOCK_ASSERT(sc);
4674
4675 wh = mtod(m, struct ieee80211_frame *);
4676 hdrlen = ieee80211_anyhdrsize(wh);
4677 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4678
4679 ac = params->ibp_pri & 3;
4680
4681 ring = &sc->txq[ac];
4682 desc = &ring->desc[ring->cur];
4683 data = &ring->data[ring->cur];
4684
4685 /* Choose a TX rate. */
4686 rate = params->ibp_rate0;
4687 totlen = m->m_pkthdr.len;
4688
4689 /* Prepare TX firmware command. */
4690 cmd = &ring->cmd[ring->cur];
4691 cmd->code = IWN_CMD_TX_DATA;
4692 cmd->flags = 0;
4693 cmd->qid = ring->qid;
4694 cmd->idx = ring->cur;
4695
4696 tx = (struct iwn_cmd_data *)cmd->data;
4697 /* NB: No need to clear tx, all fields are reinitialized here. */
4698 tx->scratch = 0; /* clear "scratch" area */
4699
4700 flags = 0;
4701 if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0)
4702 flags |= IWN_TX_NEED_ACK;
4703 if (params->ibp_flags & IEEE80211_BPF_RTS) {
4704 if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
4705 /* 5000 autoselects RTS/CTS or CTS-to-self. */
4706 flags &= ~IWN_TX_NEED_RTS;
4707 flags |= IWN_TX_NEED_PROTECTION;
4708 } else
4709 flags |= IWN_TX_NEED_RTS | IWN_TX_FULL_TXOP;
4710 }
4711 if (params->ibp_flags & IEEE80211_BPF_CTS) {
4712 if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
4713 /* 5000 autoselects RTS/CTS or CTS-to-self. */
4714 flags &= ~IWN_TX_NEED_CTS;
4715 flags |= IWN_TX_NEED_PROTECTION;
4716 } else
4717 flags |= IWN_TX_NEED_CTS | IWN_TX_FULL_TXOP;
4718 }
4719 if (type == IEEE80211_FC0_TYPE_MGT) {
4720 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4721
4722 /* Tell HW to set timestamp in probe responses. */
4723 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
4724 flags |= IWN_TX_INSERT_TSTAMP;
4725
4726 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
4727 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
4728 tx->timeout = htole16(3);
4729 else
4730 tx->timeout = htole16(2);
4731 } else
4732 tx->timeout = htole16(0);
4733
4734 if (hdrlen & 3) {
4735 /* First segment length must be a multiple of 4. */
4736 flags |= IWN_TX_NEED_PADDING;
4737 pad = 4 - (hdrlen & 3);
4738 } else
4739 pad = 0;
4740
4741 if (ieee80211_radiotap_active_vap(vap)) {
4742 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap;
4743
4744 tap->wt_flags = 0;
4745 tap->wt_rate = rate;
4746
4747 ieee80211_radiotap_tx(vap, m);
4748 }
4749
4750 tx->len = htole16(totlen);
4751 tx->tid = 0;
4752 tx->id = sc->broadcast_id;
4753 tx->rts_ntries = params->ibp_try1;
4754 tx->data_ntries = params->ibp_try0;
4755 tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
4756 tx->rate = iwn_rate_to_plcp(sc, ni, rate);
4757
4758 /* Group or management frame. */
4759 tx->linkq = 0;
4760
4761 /* Set physical address of "scratch area". */
4762 tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr));
4763 tx->hiaddr = IWN_HIADDR(data->scratch_paddr);
4764
4765 /* Copy 802.11 header in TX command. */
4766 memcpy((uint8_t *)(tx + 1), wh, hdrlen);
4767
4768 /* Trim 802.11 header. */
4769 m_adj(m, hdrlen);
4770 tx->security = 0;
4771 tx->flags = htole32(flags);
4772
4773 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs,
4774 &nsegs, BUS_DMA_NOWAIT);
4775 if (error != 0) {
4776 if (error != EFBIG) {
4777 device_printf(sc->sc_dev,
4778 "%s: can't map mbuf (error %d)\n", __func__, error);
4779 m_freem(m);
4780 return error;
4781 }
4782 /* Too many DMA segments, linearize mbuf. */
4783 m1 = m_collapse(m, M_NOWAIT, IWN_MAX_SCATTER - 1);
4784 if (m1 == NULL) {
4785 device_printf(sc->sc_dev,
4786 "%s: could not defrag mbuf\n", __func__);
4787 m_freem(m);
4788 return ENOBUFS;
4789 }
4790 m = m1;
4791
4792 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
4793 segs, &nsegs, BUS_DMA_NOWAIT);
4794 if (error != 0) {
4795 device_printf(sc->sc_dev,
4796 "%s: can't map mbuf (error %d)\n", __func__, error);
4797 m_freem(m);
4798 return error;
4799 }
4800 }
4801
4802 data->m = m;
4803 data->ni = ni;
4804
4805 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n",
4806 __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs);
4807
4808 /* Fill TX descriptor. */
4809 desc->nsegs = 1;
4810 if (m->m_len != 0)
4811 desc->nsegs += nsegs;
4812 /* First DMA segment is used by the TX command. */
4813 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr));
4814 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) |
4815 (4 + sizeof (*tx) + hdrlen + pad) << 4);
4816 /* Other DMA segments are for data payload. */
4817 seg = &segs[0];
4818 for (i = 1; i <= nsegs; i++) {
4819 desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr));
4820 desc->segs[i].len = htole16(IWN_HIADDR(seg->ds_addr) |
4821 seg->ds_len << 4);
4822 seg++;
4823 }
4824
4825 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
4826 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
4827 BUS_DMASYNC_PREWRITE);
4828 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
4829 BUS_DMASYNC_PREWRITE);
4830
4831 /* Update TX scheduler. */
4832 if (ring->qid >= sc->firstaggqueue)
4833 ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen);
4834
4835 /* Kick TX ring. */
4836 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
4837 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4838
4839 /* Mark TX ring as full if we reach a certain threshold. */
4840 if (++ring->queued > IWN_TX_RING_HIMARK)
4841 sc->qfullmsk |= 1 << ring->qid;
4842
4843 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
4844
4845 return 0;
4846}
4847
4848static void
4849iwn_xmit_task(void *arg0, int pending)
4850{
4851 struct iwn_softc *sc = arg0;
4852 struct ieee80211_node *ni;
4853 struct mbuf *m;
4854 int error;
4855 struct ieee80211_bpf_params p;
4856 int have_p;
4857
4858 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: called\n", __func__);
4859
4860 IWN_LOCK(sc);
4861 /*
4862 * Dequeue frames, attempt to transmit,
4863 * then disable beaconwait when we're done.
4864 */
4865 while ((m = mbufq_dequeue(&sc->sc_xmit_queue)) != NULL) {
4866 have_p = 0;
4867 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4868
4869 /* Get xmit params if appropriate */
4870 if (ieee80211_get_xmit_params(m, &p) == 0)
4871 have_p = 1;
4872
4873 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: m=%p, have_p=%d\n",
4874 __func__, m, have_p);
4875
4876 /* If we have xmit params, use them */
4877 if (have_p)
4878 error = iwn_tx_data_raw(sc, m, ni, &p);
4879 else
4880 error = iwn_tx_data(sc, m, ni);
4881
4882 if (error != 0) {
4883 if_inc_counter(ni->ni_vap->iv_ifp,
4884 IFCOUNTER_OERRORS, 1);
4885 ieee80211_free_node(ni);
4886 }
4887 }
4888
4889 sc->sc_beacon_wait = 0;
4890 IWN_UNLOCK(sc);
4891}
4892
4893static int
4894iwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
4895 const struct ieee80211_bpf_params *params)
4896{
4897 struct ieee80211com *ic = ni->ni_ic;
4898 struct iwn_softc *sc = ic->ic_softc;
4899 int error = 0;
4900
4901 DPRINTF(sc, IWN_DEBUG_XMIT | IWN_DEBUG_TRACE, "->%s begin\n", __func__);
4902
4903 if ((sc->sc_flags & IWN_FLAG_RUNNING) == 0) {
4904 ieee80211_free_node(ni);
4905 m_freem(m);
4906 return ENETDOWN;
4907 }
4908
4909 /* XXX? net80211 doesn't set this on xmit'ed raw frames? */
4910 m->m_pkthdr.rcvif = (void *) ni;
4911
4912 IWN_LOCK(sc);
4913
4914 /* queue frame if we have to */
4915 if (sc->sc_beacon_wait) {
4916 if (iwn_xmit_queue_enqueue(sc, m) != 0) {
4917 m_freem(m);
4918 if_inc_counter(ni->ni_vap->iv_ifp,
4919 IFCOUNTER_OERRORS, 1);
4920 ieee80211_free_node(ni);
4921 IWN_UNLOCK(sc);
4922 return (ENOBUFS);
4923 }
4924 /* Queued, so just return OK */
4925 IWN_UNLOCK(sc);
4926 return (0);
4927 }
4928
4929 if (params == NULL) {
4930 /*
4931 * Legacy path; interpret frame contents to decide
4932 * precisely how to send the frame.
4933 */
4934 error = iwn_tx_data(sc, m, ni);
4935 } else {
4936 /*
4937 * Caller supplied explicit parameters to use in
4938 * sending the frame.
4939 */
4940 error = iwn_tx_data_raw(sc, m, ni, params);
4941 }
4942 if (error != 0) {
4943 /* NB: m is reclaimed on tx failure */
4944 ieee80211_free_node(ni);
4945 } else
4946 sc->sc_tx_timer = 5;
4947
4948 IWN_UNLOCK(sc);
4949
4950 DPRINTF(sc, IWN_DEBUG_TRACE | IWN_DEBUG_XMIT, "->%s: end\n",__func__);
4951
4952 return error;
4953}
4954
4955static int
4956iwn_transmit(struct ieee80211com *ic, struct mbuf *m)
4957{
4958 struct iwn_softc *sc;
4959 int error;
4960
4961 sc = ic->ic_softc;
4962
4963 IWN_LOCK(sc);
4964 if ((sc->sc_flags & IWN_FLAG_RUNNING) == 0) {
4965 IWN_UNLOCK(sc);
4966 return (ENXIO);
4967 }
4968 error = mbufq_enqueue(&sc->sc_snd, m);
4969 if (error) {
4970 IWN_UNLOCK(sc);
4971 return (error);
4972 }
4973 iwn_start_locked(sc);
4974 IWN_UNLOCK(sc);
4975 return (0);
4976}
4977
4978static void
4979iwn_start_locked(struct iwn_softc *sc)
4980{
4981 struct ieee80211_node *ni;
4982 struct mbuf *m;
4983
4984 IWN_LOCK_ASSERT(sc);
4985
4986 /*
4987 * If we're waiting for a beacon, we can just exit out here
4988 * and wait for the taskqueue to be kicked.
4989 */
4990 if (sc->sc_beacon_wait) {
4991 return;
4992 }
4993
4994 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: called\n", __func__);
4995 while (sc->qfullmsk == 0 &&
4996 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4997 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4998 if (iwn_tx_data(sc, m, ni) != 0) {
4999 if_inc_counter(ni->ni_vap->iv_ifp,
5000 IFCOUNTER_OERRORS, 1);
5001 ieee80211_free_node(ni);
5002 } else
5003 sc->sc_tx_timer = 5;
5004 }
5005 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: done\n", __func__);
5006}
5007
5008static void
5009iwn_watchdog(void *arg)
5010{
5011 struct iwn_softc *sc = arg;
5012 struct ieee80211com *ic = &sc->sc_ic;
5013
5014 IWN_LOCK_ASSERT(sc);
5015
5016 KASSERT(sc->sc_flags & IWN_FLAG_RUNNING, ("not running"));
5017
5018 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5019
5020 if (sc->sc_tx_timer > 0) {
5021 if (--sc->sc_tx_timer == 0) {
5022 ic_printf(ic, "device timeout\n");
5023 ieee80211_runtask(ic, &sc->sc_reinit_task);
5024 return;
5025 }
5026 }
5027 callout_reset(&sc->watchdog_to, hz, iwn_watchdog, sc);
5028}
5029
5030static int
5002iwn_ioctl(struct ieee80211com *ic, u_long cmd, void *data)
5031iwn_cdev_open(struct cdev *dev, int flags, int type, struct thread *td)
5003{
5032{
5004 struct ifreq *ifr = data;
5005 struct iwn_softc *sc = ic->ic_softc;
5006 int error = 0;
5007
5033
5034 return (0);
5035}
5036
5037static int
5038iwn_cdev_close(struct cdev *dev, int flags, int type, struct thread *td)
5039{
5040
5041 return (0);
5042}
5043
5044static int
5045iwn_cdev_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
5046 struct thread *td)
5047{
5048 int rc;
5049 struct iwn_softc *sc = dev->si_drv1;
5050 struct iwn_ioctl_data *d;
5051
5052 rc = priv_check(td, PRIV_DRIVER);
5053 if (rc != 0)
5054 return (0);
5055
5008 switch (cmd) {
5009 case SIOCGIWNSTATS:
5056 switch (cmd) {
5057 case SIOCGIWNSTATS:
5058 d = (struct iwn_ioctl_data *) data;
5010 IWN_LOCK(sc);
5011 /* XXX validate permissions/memory/etc? */
5059 IWN_LOCK(sc);
5060 /* XXX validate permissions/memory/etc? */
5012 error = copyout(&sc->last_stat, ifr->ifr_data,
5013 sizeof(struct iwn_stats));
5061 rc = copyout(&sc->last_stat, d->dst_addr, sizeof(struct iwn_stats));
5014 IWN_UNLOCK(sc);
5015 break;
5016 case SIOCZIWNSTATS:
5017 IWN_LOCK(sc);
5018 memset(&sc->last_stat, 0, sizeof(struct iwn_stats));
5019 IWN_UNLOCK(sc);
5020 break;
5021 default:
5062 IWN_UNLOCK(sc);
5063 break;
5064 case SIOCZIWNSTATS:
5065 IWN_LOCK(sc);
5066 memset(&sc->last_stat, 0, sizeof(struct iwn_stats));
5067 IWN_UNLOCK(sc);
5068 break;
5069 default:
5022 error = ENOTTY;
5070 rc = EINVAL;
5023 break;
5024 }
5071 break;
5072 }
5025 return (error);
5073 return (rc);
5026}
5027
5074}
5075
5076static int
5077iwn_ioctl(struct ieee80211com *ic, u_long cmd, void *data)
5078{
5079
5080 return (ENOTTY);
5081}
5082
5028static void
5029iwn_parent(struct ieee80211com *ic)
5030{
5031 struct iwn_softc *sc = ic->ic_softc;
5032 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5033 int startall = 0, stop = 0;
5034
5035 IWN_LOCK(sc);
5036 if (ic->ic_nrunning > 0) {
5037 if (!(sc->sc_flags & IWN_FLAG_RUNNING)) {
5038 iwn_init_locked(sc);
5039 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)
5040 startall = 1;
5041 else
5042 stop = 1;
5043 }
5044 } else if (sc->sc_flags & IWN_FLAG_RUNNING)
5045 iwn_stop_locked(sc);
5046 IWN_UNLOCK(sc);
5047 if (startall)
5048 ieee80211_start_all(ic);
5049 else if (vap != NULL && stop)
5050 ieee80211_stop(vap);
5051}
5052
5053/*
5054 * Send a command to the firmware.
5055 */
5056static int
5057iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async)
5058{
5059 struct iwn_tx_ring *ring;
5060 struct iwn_tx_desc *desc;
5061 struct iwn_tx_data *data;
5062 struct iwn_tx_cmd *cmd;
5063 struct mbuf *m;
5064 bus_addr_t paddr;
5065 int totlen, error;
5066 int cmd_queue_num;
5067
5068 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
5069
5070 if (async == 0)
5071 IWN_LOCK_ASSERT(sc);
5072
5073 if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT)
5074 cmd_queue_num = IWN_PAN_CMD_QUEUE;
5075 else
5076 cmd_queue_num = IWN_CMD_QUEUE_NUM;
5077
5078 ring = &sc->txq[cmd_queue_num];
5079 desc = &ring->desc[ring->cur];
5080 data = &ring->data[ring->cur];
5081 totlen = 4 + size;
5082
5083 if (size > sizeof cmd->data) {
5084 /* Command is too large to fit in a descriptor. */
5085 if (totlen > MCLBYTES)
5086 return EINVAL;
5087 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
5088 if (m == NULL)
5089 return ENOMEM;
5090 cmd = mtod(m, struct iwn_tx_cmd *);
5091 error = bus_dmamap_load(ring->data_dmat, data->map, cmd,
5092 totlen, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
5093 if (error != 0) {
5094 m_freem(m);
5095 return error;
5096 }
5097 data->m = m;
5098 } else {
5099 cmd = &ring->cmd[ring->cur];
5100 paddr = data->cmd_paddr;
5101 }
5102
5103 cmd->code = code;
5104 cmd->flags = 0;
5105 cmd->qid = ring->qid;
5106 cmd->idx = ring->cur;
5107 memcpy(cmd->data, buf, size);
5108
5109 desc->nsegs = 1;
5110 desc->segs[0].addr = htole32(IWN_LOADDR(paddr));
5111 desc->segs[0].len = htole16(IWN_HIADDR(paddr) | totlen << 4);
5112
5113 DPRINTF(sc, IWN_DEBUG_CMD, "%s: %s (0x%x) flags %d qid %d idx %d\n",
5114 __func__, iwn_intr_str(cmd->code), cmd->code,
5115 cmd->flags, cmd->qid, cmd->idx);
5116
5117 if (size > sizeof cmd->data) {
5118 bus_dmamap_sync(ring->data_dmat, data->map,
5119 BUS_DMASYNC_PREWRITE);
5120 } else {
5121 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
5122 BUS_DMASYNC_PREWRITE);
5123 }
5124 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
5125 BUS_DMASYNC_PREWRITE);
5126
5127 /* Kick command ring. */
5128 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
5129 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
5130
5131 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
5132
5133 return async ? 0 : msleep(desc, &sc->sc_mtx, PCATCH, "iwncmd", hz);
5134}
5135
5136static int
5137iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
5138{
5139 struct iwn4965_node_info hnode;
5140 caddr_t src, dst;
5141
5142 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5143
5144 /*
5145 * We use the node structure for 5000 Series internally (it is
5146 * a superset of the one for 4965AGN). We thus copy the common
5147 * fields before sending the command.
5148 */
5149 src = (caddr_t)node;
5150 dst = (caddr_t)&hnode;
5151 memcpy(dst, src, 48);
5152 /* Skip TSC, RX MIC and TX MIC fields from ``src''. */
5153 memcpy(dst + 48, src + 72, 20);
5154 return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async);
5155}
5156
5157static int
5158iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
5159{
5160
5161 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5162
5163 /* Direct mapping. */
5164 return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async);
5165}
5166
5167static int
5168iwn_set_link_quality(struct iwn_softc *sc, struct ieee80211_node *ni)
5169{
5170#define RV(v) ((v) & IEEE80211_RATE_VAL)
5171 struct iwn_node *wn = (void *)ni;
5172 struct ieee80211_rateset *rs;
5173 struct iwn_cmd_link_quality linkq;
5174 int i, rate, txrate;
5175 int is_11n;
5176
5177 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
5178
5179 memset(&linkq, 0, sizeof linkq);
5180 linkq.id = wn->id;
5181 linkq.antmsk_1stream = iwn_get_1stream_tx_antmask(sc);
5182 linkq.antmsk_2stream = iwn_get_2stream_tx_antmask(sc);
5183
5184 linkq.ampdu_max = 32; /* XXX negotiated? */
5185 linkq.ampdu_threshold = 3;
5186 linkq.ampdu_limit = htole16(4000); /* 4ms */
5187
5188 DPRINTF(sc, IWN_DEBUG_XMIT,
5189 "%s: 1stream antenna=0x%02x, 2stream antenna=0x%02x, ntxstreams=%d\n",
5190 __func__,
5191 linkq.antmsk_1stream,
5192 linkq.antmsk_2stream,
5193 sc->ntxchains);
5194
5195 /*
5196 * Are we using 11n rates? Ensure the channel is
5197 * 11n _and_ we have some 11n rates, or don't
5198 * try.
5199 */
5200 if (IEEE80211_IS_CHAN_HT(ni->ni_chan) && ni->ni_htrates.rs_nrates > 0) {
5201 rs = (struct ieee80211_rateset *) &ni->ni_htrates;
5202 is_11n = 1;
5203 } else {
5204 rs = &ni->ni_rates;
5205 is_11n = 0;
5206 }
5207
5208 /* Start at highest available bit-rate. */
5209 /*
5210 * XXX this is all very dirty!
5211 */
5212 if (is_11n)
5213 txrate = ni->ni_htrates.rs_nrates - 1;
5214 else
5215 txrate = rs->rs_nrates - 1;
5216 for (i = 0; i < IWN_MAX_TX_RETRIES; i++) {
5217 uint32_t plcp;
5218
5219 /*
5220 * XXX TODO: ensure the last two slots are the two lowest
5221 * rate entries, just for now.
5222 */
5223 if (i == 14 || i == 15)
5224 txrate = 0;
5225
5226 if (is_11n)
5227 rate = IEEE80211_RATE_MCS | rs->rs_rates[txrate];
5228 else
5229 rate = RV(rs->rs_rates[txrate]);
5230
5231 /* Do rate -> PLCP config mapping */
5232 plcp = iwn_rate_to_plcp(sc, ni, rate);
5233 linkq.retry[i] = plcp;
5234 DPRINTF(sc, IWN_DEBUG_XMIT,
5235 "%s: i=%d, txrate=%d, rate=0x%02x, plcp=0x%08x\n",
5236 __func__,
5237 i,
5238 txrate,
5239 rate,
5240 le32toh(plcp));
5241
5242 /*
5243 * The mimo field is an index into the table which
5244 * indicates the first index where it and subsequent entries
5245 * will not be using MIMO.
5246 *
5247 * Since we're filling linkq from 0..15 and we're filling
5248 * from the higest MCS rates to the lowest rates, if we
5249 * _are_ doing a dual-stream rate, set mimo to idx+1 (ie,
5250 * the next entry.) That way if the next entry is a non-MIMO
5251 * entry, we're already pointing at it.
5252 */
5253 if ((le32toh(plcp) & IWN_RFLAG_MCS) &&
5254 RV(le32toh(plcp)) > 7)
5255 linkq.mimo = i + 1;
5256
5257 /* Next retry at immediate lower bit-rate. */
5258 if (txrate > 0)
5259 txrate--;
5260 }
5261 /*
5262 * If we reached the end of the list and indeed we hit
5263 * all MIMO rates (eg 5300 doing MCS23-15) then yes,
5264 * set mimo to 15. Setting it to 16 panics the firmware.
5265 */
5266 if (linkq.mimo > 15)
5267 linkq.mimo = 15;
5268
5269 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: mimo = %d\n", __func__, linkq.mimo);
5270
5271 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
5272
5273 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, 1);
5274#undef RV
5275}
5276
5277/*
5278 * Broadcast node is used to send group-addressed and management frames.
5279 */
5280static int
5281iwn_add_broadcast_node(struct iwn_softc *sc, int async)
5282{
5283 struct iwn_ops *ops = &sc->ops;
5284 struct ieee80211com *ic = &sc->sc_ic;
5285 struct iwn_node_info node;
5286 struct iwn_cmd_link_quality linkq;
5287 uint8_t txant;
5288 int i, error;
5289
5290 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
5291
5292 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX];
5293
5294 memset(&node, 0, sizeof node);
5295 IEEE80211_ADDR_COPY(node.macaddr, ieee80211broadcastaddr);
5296 node.id = sc->broadcast_id;
5297 DPRINTF(sc, IWN_DEBUG_RESET, "%s: adding broadcast node\n", __func__);
5298 if ((error = ops->add_node(sc, &node, async)) != 0)
5299 return error;
5300
5301 /* Use the first valid TX antenna. */
5302 txant = IWN_LSB(sc->txchainmask);
5303
5304 memset(&linkq, 0, sizeof linkq);
5305 linkq.id = sc->broadcast_id;
5306 linkq.antmsk_1stream = iwn_get_1stream_tx_antmask(sc);
5307 linkq.antmsk_2stream = iwn_get_2stream_tx_antmask(sc);
5308 linkq.ampdu_max = 64;
5309 linkq.ampdu_threshold = 3;
5310 linkq.ampdu_limit = htole16(4000); /* 4ms */
5311
5312 /* Use lowest mandatory bit-rate. */
5313 /* XXX rate table lookup? */
5314 if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan))
5315 linkq.retry[0] = htole32(0xd);
5316 else
5317 linkq.retry[0] = htole32(10 | IWN_RFLAG_CCK);
5318 linkq.retry[0] |= htole32(IWN_RFLAG_ANT(txant));
5319 /* Use same bit-rate for all TX retries. */
5320 for (i = 1; i < IWN_MAX_TX_RETRIES; i++) {
5321 linkq.retry[i] = linkq.retry[0];
5322 }
5323
5324 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
5325
5326 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async);
5327}
5328
5329static int
5330iwn_updateedca(struct ieee80211com *ic)
5331{
5332#define IWN_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */
5333 struct iwn_softc *sc = ic->ic_softc;
5334 struct iwn_edca_params cmd;
5335 int aci;
5336
5337 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
5338
5339 memset(&cmd, 0, sizeof cmd);
5340 cmd.flags = htole32(IWN_EDCA_UPDATE);
5341 for (aci = 0; aci < WME_NUM_AC; aci++) {
5342 const struct wmeParams *ac =
5343 &ic->ic_wme.wme_chanParams.cap_wmeParams[aci];
5344 cmd.ac[aci].aifsn = ac->wmep_aifsn;
5345 cmd.ac[aci].cwmin = htole16(IWN_EXP2(ac->wmep_logcwmin));
5346 cmd.ac[aci].cwmax = htole16(IWN_EXP2(ac->wmep_logcwmax));
5347 cmd.ac[aci].txoplimit =
5348 htole16(IEEE80211_TXOP_TO_US(ac->wmep_txopLimit));
5349 }
5350 IEEE80211_UNLOCK(ic);
5351 IWN_LOCK(sc);
5352 (void)iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1);
5353 IWN_UNLOCK(sc);
5354 IEEE80211_LOCK(ic);
5355
5356 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
5357
5358 return 0;
5359#undef IWN_EXP2
5360}
5361
5362static void
5363iwn_update_mcast(struct ieee80211com *ic)
5364{
5365 /* Ignore */
5366}
5367
5368static void
5369iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on)
5370{
5371 struct iwn_cmd_led led;
5372
5373 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5374
5375#if 0
5376 /* XXX don't set LEDs during scan? */
5377 if (sc->sc_is_scanning)
5378 return;
5379#endif
5380
5381 /* Clear microcode LED ownership. */
5382 IWN_CLRBITS(sc, IWN_LED, IWN_LED_BSM_CTRL);
5383
5384 led.which = which;
5385 led.unit = htole32(10000); /* on/off in unit of 100ms */
5386 led.off = off;
5387 led.on = on;
5388 (void)iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1);
5389}
5390
5391/*
5392 * Set the critical temperature at which the firmware will stop the radio
5393 * and notify us.
5394 */
5395static int
5396iwn_set_critical_temp(struct iwn_softc *sc)
5397{
5398 struct iwn_critical_temp crit;
5399 int32_t temp;
5400
5401 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5402
5403 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CTEMP_STOP_RF);
5404
5405 if (sc->hw_type == IWN_HW_REV_TYPE_5150)
5406 temp = (IWN_CTOK(110) - sc->temp_off) * -5;
5407 else if (sc->hw_type == IWN_HW_REV_TYPE_4965)
5408 temp = IWN_CTOK(110);
5409 else
5410 temp = 110;
5411 memset(&crit, 0, sizeof crit);
5412 crit.tempR = htole32(temp);
5413 DPRINTF(sc, IWN_DEBUG_RESET, "setting critical temp to %d\n", temp);
5414 return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0);
5415}
5416
5417static int
5418iwn_set_timing(struct iwn_softc *sc, struct ieee80211_node *ni)
5419{
5420 struct iwn_cmd_timing cmd;
5421 uint64_t val, mod;
5422
5423 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5424
5425 memset(&cmd, 0, sizeof cmd);
5426 memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t));
5427 cmd.bintval = htole16(ni->ni_intval);
5428 cmd.lintval = htole16(10);
5429
5430 /* Compute remaining time until next beacon. */
5431 val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU;
5432 mod = le64toh(cmd.tstamp) % val;
5433 cmd.binitval = htole32((uint32_t)(val - mod));
5434
5435 DPRINTF(sc, IWN_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n",
5436 ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod));
5437
5438 return iwn_cmd(sc, IWN_CMD_TIMING, &cmd, sizeof cmd, 1);
5439}
5440
5441static void
5442iwn4965_power_calibration(struct iwn_softc *sc, int temp)
5443{
5444 struct ieee80211com *ic = &sc->sc_ic;
5445
5446 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5447
5448 /* Adjust TX power if need be (delta >= 3 degC). */
5449 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d->%d\n",
5450 __func__, sc->temp, temp);
5451 if (abs(temp - sc->temp) >= 3) {
5452 /* Record temperature of last calibration. */
5453 sc->temp = temp;
5454 (void)iwn4965_set_txpower(sc, ic->ic_bsschan, 1);
5455 }
5456}
5457
5458/*
5459 * Set TX power for current channel (each rate has its own power settings).
5460 * This function takes into account the regulatory information from EEPROM,
5461 * the current temperature and the current voltage.
5462 */
5463static int
5464iwn4965_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch,
5465 int async)
5466{
5467/* Fixed-point arithmetic division using a n-bit fractional part. */
5468#define fdivround(a, b, n) \
5469 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n))
5470/* Linear interpolation. */
5471#define interpolate(x, x1, y1, x2, y2, n) \
5472 ((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n))
5473
5474 static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 };
5475 struct iwn_ucode_info *uc = &sc->ucode_info;
5476 struct iwn4965_cmd_txpower cmd;
5477 struct iwn4965_eeprom_chan_samples *chans;
5478 const uint8_t *rf_gain, *dsp_gain;
5479 int32_t vdiff, tdiff;
5480 int i, c, grp, maxpwr;
5481 uint8_t chan;
5482
5483 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX];
5484 /* Retrieve current channel from last RXON. */
5485 chan = sc->rxon->chan;
5486 DPRINTF(sc, IWN_DEBUG_RESET, "setting TX power for channel %d\n",
5487 chan);
5488
5489 memset(&cmd, 0, sizeof cmd);
5490 cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1;
5491 cmd.chan = chan;
5492
5493 if (IEEE80211_IS_CHAN_5GHZ(ch)) {
5494 maxpwr = sc->maxpwr5GHz;
5495 rf_gain = iwn4965_rf_gain_5ghz;
5496 dsp_gain = iwn4965_dsp_gain_5ghz;
5497 } else {
5498 maxpwr = sc->maxpwr2GHz;
5499 rf_gain = iwn4965_rf_gain_2ghz;
5500 dsp_gain = iwn4965_dsp_gain_2ghz;
5501 }
5502
5503 /* Compute voltage compensation. */
5504 vdiff = ((int32_t)le32toh(uc->volt) - sc->eeprom_voltage) / 7;
5505 if (vdiff > 0)
5506 vdiff *= 2;
5507 if (abs(vdiff) > 2)
5508 vdiff = 0;
5509 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
5510 "%s: voltage compensation=%d (UCODE=%d, EEPROM=%d)\n",
5511 __func__, vdiff, le32toh(uc->volt), sc->eeprom_voltage);
5512
5513 /* Get channel attenuation group. */
5514 if (chan <= 20) /* 1-20 */
5515 grp = 4;
5516 else if (chan <= 43) /* 34-43 */
5517 grp = 0;
5518 else if (chan <= 70) /* 44-70 */
5519 grp = 1;
5520 else if (chan <= 124) /* 71-124 */
5521 grp = 2;
5522 else /* 125-200 */
5523 grp = 3;
5524 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
5525 "%s: chan %d, attenuation group=%d\n", __func__, chan, grp);
5526
5527 /* Get channel sub-band. */
5528 for (i = 0; i < IWN_NBANDS; i++)
5529 if (sc->bands[i].lo != 0 &&
5530 sc->bands[i].lo <= chan && chan <= sc->bands[i].hi)
5531 break;
5532 if (i == IWN_NBANDS) /* Can't happen in real-life. */
5533 return EINVAL;
5534 chans = sc->bands[i].chans;
5535 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
5536 "%s: chan %d sub-band=%d\n", __func__, chan, i);
5537
5538 for (c = 0; c < 2; c++) {
5539 uint8_t power, gain, temp;
5540 int maxchpwr, pwr, ridx, idx;
5541
5542 power = interpolate(chan,
5543 chans[0].num, chans[0].samples[c][1].power,
5544 chans[1].num, chans[1].samples[c][1].power, 1);
5545 gain = interpolate(chan,
5546 chans[0].num, chans[0].samples[c][1].gain,
5547 chans[1].num, chans[1].samples[c][1].gain, 1);
5548 temp = interpolate(chan,
5549 chans[0].num, chans[0].samples[c][1].temp,
5550 chans[1].num, chans[1].samples[c][1].temp, 1);
5551 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
5552 "%s: Tx chain %d: power=%d gain=%d temp=%d\n",
5553 __func__, c, power, gain, temp);
5554
5555 /* Compute temperature compensation. */
5556 tdiff = ((sc->temp - temp) * 2) / tdiv[grp];
5557 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
5558 "%s: temperature compensation=%d (current=%d, EEPROM=%d)\n",
5559 __func__, tdiff, sc->temp, temp);
5560
5561 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) {
5562 /* Convert dBm to half-dBm. */
5563 maxchpwr = sc->maxpwr[chan] * 2;
5564 if ((ridx / 8) & 1)
5565 maxchpwr -= 6; /* MIMO 2T: -3dB */
5566
5567 pwr = maxpwr;
5568
5569 /* Adjust TX power based on rate. */
5570 if ((ridx % 8) == 5)
5571 pwr -= 15; /* OFDM48: -7.5dB */
5572 else if ((ridx % 8) == 6)
5573 pwr -= 17; /* OFDM54: -8.5dB */
5574 else if ((ridx % 8) == 7)
5575 pwr -= 20; /* OFDM60: -10dB */
5576 else
5577 pwr -= 10; /* Others: -5dB */
5578
5579 /* Do not exceed channel max TX power. */
5580 if (pwr > maxchpwr)
5581 pwr = maxchpwr;
5582
5583 idx = gain - (pwr - power) - tdiff - vdiff;
5584 if ((ridx / 8) & 1) /* MIMO */
5585 idx += (int32_t)le32toh(uc->atten[grp][c]);
5586
5587 if (cmd.band == 0)
5588 idx += 9; /* 5GHz */
5589 if (ridx == IWN_RIDX_MAX)
5590 idx += 5; /* CCK */
5591
5592 /* Make sure idx stays in a valid range. */
5593 if (idx < 0)
5594 idx = 0;
5595 else if (idx > IWN4965_MAX_PWR_INDEX)
5596 idx = IWN4965_MAX_PWR_INDEX;
5597
5598 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
5599 "%s: Tx chain %d, rate idx %d: power=%d\n",
5600 __func__, c, ridx, idx);
5601 cmd.power[ridx].rf_gain[c] = rf_gain[idx];
5602 cmd.power[ridx].dsp_gain[c] = dsp_gain[idx];
5603 }
5604 }
5605
5606 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
5607 "%s: set tx power for chan %d\n", __func__, chan);
5608 return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async);
5609
5610#undef interpolate
5611#undef fdivround
5612}
5613
5614static int
5615iwn5000_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch,
5616 int async)
5617{
5618 struct iwn5000_cmd_txpower cmd;
5619 int cmdid;
5620
5621 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5622
5623 /*
5624 * TX power calibration is handled automatically by the firmware
5625 * for 5000 Series.
5626 */
5627 memset(&cmd, 0, sizeof cmd);
5628 cmd.global_limit = 2 * IWN5000_TXPOWER_MAX_DBM; /* 16 dBm */
5629 cmd.flags = IWN5000_TXPOWER_NO_CLOSED;
5630 cmd.srv_limit = IWN5000_TXPOWER_AUTO;
5631 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_XMIT,
5632 "%s: setting TX power; rev=%d\n",
5633 __func__,
5634 IWN_UCODE_API(sc->ucode_rev));
5635 if (IWN_UCODE_API(sc->ucode_rev) == 1)
5636 cmdid = IWN_CMD_TXPOWER_DBM_V1;
5637 else
5638 cmdid = IWN_CMD_TXPOWER_DBM;
5639 return iwn_cmd(sc, cmdid, &cmd, sizeof cmd, async);
5640}
5641
5642/*
5643 * Retrieve the maximum RSSI (in dBm) among receivers.
5644 */
5645static int
5646iwn4965_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat)
5647{
5648 struct iwn4965_rx_phystat *phy = (void *)stat->phybuf;
5649 uint8_t mask, agc;
5650 int rssi;
5651
5652 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5653
5654 mask = (le16toh(phy->antenna) >> 4) & IWN_ANT_ABC;
5655 agc = (le16toh(phy->agc) >> 7) & 0x7f;
5656
5657 rssi = 0;
5658 if (mask & IWN_ANT_A)
5659 rssi = MAX(rssi, phy->rssi[0]);
5660 if (mask & IWN_ANT_B)
5661 rssi = MAX(rssi, phy->rssi[2]);
5662 if (mask & IWN_ANT_C)
5663 rssi = MAX(rssi, phy->rssi[4]);
5664
5665 DPRINTF(sc, IWN_DEBUG_RECV,
5666 "%s: agc %d mask 0x%x rssi %d %d %d result %d\n", __func__, agc,
5667 mask, phy->rssi[0], phy->rssi[2], phy->rssi[4],
5668 rssi - agc - IWN_RSSI_TO_DBM);
5669 return rssi - agc - IWN_RSSI_TO_DBM;
5670}
5671
5672static int
5673iwn5000_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat)
5674{
5675 struct iwn5000_rx_phystat *phy = (void *)stat->phybuf;
5676 uint8_t agc;
5677 int rssi;
5678
5679 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5680
5681 agc = (le32toh(phy->agc) >> 9) & 0x7f;
5682
5683 rssi = MAX(le16toh(phy->rssi[0]) & 0xff,
5684 le16toh(phy->rssi[1]) & 0xff);
5685 rssi = MAX(le16toh(phy->rssi[2]) & 0xff, rssi);
5686
5687 DPRINTF(sc, IWN_DEBUG_RECV,
5688 "%s: agc %d rssi %d %d %d result %d\n", __func__, agc,
5689 phy->rssi[0], phy->rssi[1], phy->rssi[2],
5690 rssi - agc - IWN_RSSI_TO_DBM);
5691 return rssi - agc - IWN_RSSI_TO_DBM;
5692}
5693
5694/*
5695 * Retrieve the average noise (in dBm) among receivers.
5696 */
5697static int
5698iwn_get_noise(const struct iwn_rx_general_stats *stats)
5699{
5700 int i, total, nbant, noise;
5701
5702 total = nbant = 0;
5703 for (i = 0; i < 3; i++) {
5704 if ((noise = le32toh(stats->noise[i]) & 0xff) == 0)
5705 continue;
5706 total += noise;
5707 nbant++;
5708 }
5709 /* There should be at least one antenna but check anyway. */
5710 return (nbant == 0) ? -127 : (total / nbant) - 107;
5711}
5712
5713/*
5714 * Compute temperature (in degC) from last received statistics.
5715 */
5716static int
5717iwn4965_get_temperature(struct iwn_softc *sc)
5718{
5719 struct iwn_ucode_info *uc = &sc->ucode_info;
5720 int32_t r1, r2, r3, r4, temp;
5721
5722 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5723
5724 r1 = le32toh(uc->temp[0].chan20MHz);
5725 r2 = le32toh(uc->temp[1].chan20MHz);
5726 r3 = le32toh(uc->temp[2].chan20MHz);
5727 r4 = le32toh(sc->rawtemp);
5728
5729 if (r1 == r3) /* Prevents division by 0 (should not happen). */
5730 return 0;
5731
5732 /* Sign-extend 23-bit R4 value to 32-bit. */
5733 r4 = ((r4 & 0xffffff) ^ 0x800000) - 0x800000;
5734 /* Compute temperature in Kelvin. */
5735 temp = (259 * (r4 - r2)) / (r3 - r1);
5736 temp = (temp * 97) / 100 + 8;
5737
5738 DPRINTF(sc, IWN_DEBUG_ANY, "temperature %dK/%dC\n", temp,
5739 IWN_KTOC(temp));
5740 return IWN_KTOC(temp);
5741}
5742
5743static int
5744iwn5000_get_temperature(struct iwn_softc *sc)
5745{
5746 int32_t temp;
5747
5748 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5749
5750 /*
5751 * Temperature is not used by the driver for 5000 Series because
5752 * TX power calibration is handled by firmware.
5753 */
5754 temp = le32toh(sc->rawtemp);
5755 if (sc->hw_type == IWN_HW_REV_TYPE_5150) {
5756 temp = (temp / -5) + sc->temp_off;
5757 temp = IWN_KTOC(temp);
5758 }
5759 return temp;
5760}
5761
5762/*
5763 * Initialize sensitivity calibration state machine.
5764 */
5765static int
5766iwn_init_sensitivity(struct iwn_softc *sc)
5767{
5768 struct iwn_ops *ops = &sc->ops;
5769 struct iwn_calib_state *calib = &sc->calib;
5770 uint32_t flags;
5771 int error;
5772
5773 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5774
5775 /* Reset calibration state machine. */
5776 memset(calib, 0, sizeof (*calib));
5777 calib->state = IWN_CALIB_STATE_INIT;
5778 calib->cck_state = IWN_CCK_STATE_HIFA;
5779 /* Set initial correlation values. */
5780 calib->ofdm_x1 = sc->limits->min_ofdm_x1;
5781 calib->ofdm_mrc_x1 = sc->limits->min_ofdm_mrc_x1;
5782 calib->ofdm_x4 = sc->limits->min_ofdm_x4;
5783 calib->ofdm_mrc_x4 = sc->limits->min_ofdm_mrc_x4;
5784 calib->cck_x4 = 125;
5785 calib->cck_mrc_x4 = sc->limits->min_cck_mrc_x4;
5786 calib->energy_cck = sc->limits->energy_cck;
5787
5788 /* Write initial sensitivity. */
5789 if ((error = iwn_send_sensitivity(sc)) != 0)
5790 return error;
5791
5792 /* Write initial gains. */
5793 if ((error = ops->init_gains(sc)) != 0)
5794 return error;
5795
5796 /* Request statistics at each beacon interval. */
5797 flags = 0;
5798 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending request for statistics\n",
5799 __func__);
5800 return iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1);
5801}
5802
5803/*
5804 * Collect noise and RSSI statistics for the first 20 beacons received
5805 * after association and use them to determine connected antennas and
5806 * to set differential gains.
5807 */
5808static void
5809iwn_collect_noise(struct iwn_softc *sc,
5810 const struct iwn_rx_general_stats *stats)
5811{
5812 struct iwn_ops *ops = &sc->ops;
5813 struct iwn_calib_state *calib = &sc->calib;
5814 struct ieee80211com *ic = &sc->sc_ic;
5815 uint32_t val;
5816 int i;
5817
5818 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
5819
5820 /* Accumulate RSSI and noise for all 3 antennas. */
5821 for (i = 0; i < 3; i++) {
5822 calib->rssi[i] += le32toh(stats->rssi[i]) & 0xff;
5823 calib->noise[i] += le32toh(stats->noise[i]) & 0xff;
5824 }
5825 /* NB: We update differential gains only once after 20 beacons. */
5826 if (++calib->nbeacons < 20)
5827 return;
5828
5829 /* Determine highest average RSSI. */
5830 val = MAX(calib->rssi[0], calib->rssi[1]);
5831 val = MAX(calib->rssi[2], val);
5832
5833 /* Determine which antennas are connected. */
5834 sc->chainmask = sc->rxchainmask;
5835 for (i = 0; i < 3; i++)
5836 if (val - calib->rssi[i] > 15 * 20)
5837 sc->chainmask &= ~(1 << i);
5838 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_XMIT,
5839 "%s: RX chains mask: theoretical=0x%x, actual=0x%x\n",
5840 __func__, sc->rxchainmask, sc->chainmask);
5841
5842 /* If none of the TX antennas are connected, keep at least one. */
5843 if ((sc->chainmask & sc->txchainmask) == 0)
5844 sc->chainmask |= IWN_LSB(sc->txchainmask);
5845
5846 (void)ops->set_gains(sc);
5847 calib->state = IWN_CALIB_STATE_RUN;
5848
5849#ifdef notyet
5850 /* XXX Disable RX chains with no antennas connected. */
5851 sc->rxon->rxchain = htole16(IWN_RXCHAIN_SEL(sc->chainmask));
5852 if (sc->sc_is_scanning)
5853 device_printf(sc->sc_dev,
5854 "%s: is_scanning set, before RXON\n",
5855 __func__);
5856 (void)iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1);
5857#endif
5858
5859 /* Enable power-saving mode if requested by user. */
5860 if (ic->ic_flags & IEEE80211_F_PMGTON)
5861 (void)iwn_set_pslevel(sc, 0, 3, 1);
5862
5863 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
5864
5865}
5866
5867static int
5868iwn4965_init_gains(struct iwn_softc *sc)
5869{
5870 struct iwn_phy_calib_gain cmd;
5871
5872 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5873
5874 memset(&cmd, 0, sizeof cmd);
5875 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN;
5876 /* Differential gains initially set to 0 for all 3 antennas. */
5877 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5878 "%s: setting initial differential gains\n", __func__);
5879 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
5880}
5881
5882static int
5883iwn5000_init_gains(struct iwn_softc *sc)
5884{
5885 struct iwn_phy_calib cmd;
5886
5887 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5888
5889 memset(&cmd, 0, sizeof cmd);
5890 cmd.code = sc->reset_noise_gain;
5891 cmd.ngroups = 1;
5892 cmd.isvalid = 1;
5893 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5894 "%s: setting initial differential gains\n", __func__);
5895 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
5896}
5897
5898static int
5899iwn4965_set_gains(struct iwn_softc *sc)
5900{
5901 struct iwn_calib_state *calib = &sc->calib;
5902 struct iwn_phy_calib_gain cmd;
5903 int i, delta, noise;
5904
5905 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5906
5907 /* Get minimal noise among connected antennas. */
5908 noise = INT_MAX; /* NB: There's at least one antenna. */
5909 for (i = 0; i < 3; i++)
5910 if (sc->chainmask & (1 << i))
5911 noise = MIN(calib->noise[i], noise);
5912
5913 memset(&cmd, 0, sizeof cmd);
5914 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN;
5915 /* Set differential gains for connected antennas. */
5916 for (i = 0; i < 3; i++) {
5917 if (sc->chainmask & (1 << i)) {
5918 /* Compute attenuation (in unit of 1.5dB). */
5919 delta = (noise - (int32_t)calib->noise[i]) / 30;
5920 /* NB: delta <= 0 */
5921 /* Limit to [-4.5dB,0]. */
5922 cmd.gain[i] = MIN(abs(delta), 3);
5923 if (delta < 0)
5924 cmd.gain[i] |= 1 << 2; /* sign bit */
5925 }
5926 }
5927 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5928 "setting differential gains Ant A/B/C: %x/%x/%x (%x)\n",
5929 cmd.gain[0], cmd.gain[1], cmd.gain[2], sc->chainmask);
5930 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
5931}
5932
5933static int
5934iwn5000_set_gains(struct iwn_softc *sc)
5935{
5936 struct iwn_calib_state *calib = &sc->calib;
5937 struct iwn_phy_calib_gain cmd;
5938 int i, ant, div, delta;
5939
5940 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5941
5942 /* We collected 20 beacons and !=6050 need a 1.5 factor. */
5943 div = (sc->hw_type == IWN_HW_REV_TYPE_6050) ? 20 : 30;
5944
5945 memset(&cmd, 0, sizeof cmd);
5946 cmd.code = sc->noise_gain;
5947 cmd.ngroups = 1;
5948 cmd.isvalid = 1;
5949 /* Get first available RX antenna as referential. */
5950 ant = IWN_LSB(sc->rxchainmask);
5951 /* Set differential gains for other antennas. */
5952 for (i = ant + 1; i < 3; i++) {
5953 if (sc->chainmask & (1 << i)) {
5954 /* The delta is relative to antenna "ant". */
5955 delta = ((int32_t)calib->noise[ant] -
5956 (int32_t)calib->noise[i]) / div;
5957 /* Limit to [-4.5dB,+4.5dB]. */
5958 cmd.gain[i - 1] = MIN(abs(delta), 3);
5959 if (delta < 0)
5960 cmd.gain[i - 1] |= 1 << 2; /* sign bit */
5961 }
5962 }
5963 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_XMIT,
5964 "setting differential gains Ant B/C: %x/%x (%x)\n",
5965 cmd.gain[0], cmd.gain[1], sc->chainmask);
5966 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
5967}
5968
5969/*
5970 * Tune RF RX sensitivity based on the number of false alarms detected
5971 * during the last beacon period.
5972 */
5973static void
5974iwn_tune_sensitivity(struct iwn_softc *sc, const struct iwn_rx_stats *stats)
5975{
5976#define inc(val, inc, max) \
5977 if ((val) < (max)) { \
5978 if ((val) < (max) - (inc)) \
5979 (val) += (inc); \
5980 else \
5981 (val) = (max); \
5982 needs_update = 1; \
5983 }
5984#define dec(val, dec, min) \
5985 if ((val) > (min)) { \
5986 if ((val) > (min) + (dec)) \
5987 (val) -= (dec); \
5988 else \
5989 (val) = (min); \
5990 needs_update = 1; \
5991 }
5992
5993 const struct iwn_sensitivity_limits *limits = sc->limits;
5994 struct iwn_calib_state *calib = &sc->calib;
5995 uint32_t val, rxena, fa;
5996 uint32_t energy[3], energy_min;
5997 uint8_t noise[3], noise_ref;
5998 int i, needs_update = 0;
5999
6000 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
6001
6002 /* Check that we've been enabled long enough. */
6003 if ((rxena = le32toh(stats->general.load)) == 0){
6004 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end not so long\n", __func__);
6005 return;
6006 }
6007
6008 /* Compute number of false alarms since last call for OFDM. */
6009 fa = le32toh(stats->ofdm.bad_plcp) - calib->bad_plcp_ofdm;
6010 fa += le32toh(stats->ofdm.fa) - calib->fa_ofdm;
6011 fa *= 200 * IEEE80211_DUR_TU; /* 200TU */
6012
6013 if (fa > 50 * rxena) {
6014 /* High false alarm count, decrease sensitivity. */
6015 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
6016 "%s: OFDM high false alarm count: %u\n", __func__, fa);
6017 inc(calib->ofdm_x1, 1, limits->max_ofdm_x1);
6018 inc(calib->ofdm_mrc_x1, 1, limits->max_ofdm_mrc_x1);
6019 inc(calib->ofdm_x4, 1, limits->max_ofdm_x4);
6020 inc(calib->ofdm_mrc_x4, 1, limits->max_ofdm_mrc_x4);
6021
6022 } else if (fa < 5 * rxena) {
6023 /* Low false alarm count, increase sensitivity. */
6024 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
6025 "%s: OFDM low false alarm count: %u\n", __func__, fa);
6026 dec(calib->ofdm_x1, 1, limits->min_ofdm_x1);
6027 dec(calib->ofdm_mrc_x1, 1, limits->min_ofdm_mrc_x1);
6028 dec(calib->ofdm_x4, 1, limits->min_ofdm_x4);
6029 dec(calib->ofdm_mrc_x4, 1, limits->min_ofdm_mrc_x4);
6030 }
6031
6032 /* Compute maximum noise among 3 receivers. */
6033 for (i = 0; i < 3; i++)
6034 noise[i] = (le32toh(stats->general.noise[i]) >> 8) & 0xff;
6035 val = MAX(noise[0], noise[1]);
6036 val = MAX(noise[2], val);
6037 /* Insert it into our samples table. */
6038 calib->noise_samples[calib->cur_noise_sample] = val;
6039 calib->cur_noise_sample = (calib->cur_noise_sample + 1) % 20;
6040
6041 /* Compute maximum noise among last 20 samples. */
6042 noise_ref = calib->noise_samples[0];
6043 for (i = 1; i < 20; i++)
6044 noise_ref = MAX(noise_ref, calib->noise_samples[i]);
6045
6046 /* Compute maximum energy among 3 receivers. */
6047 for (i = 0; i < 3; i++)
6048 energy[i] = le32toh(stats->general.energy[i]);
6049 val = MIN(energy[0], energy[1]);
6050 val = MIN(energy[2], val);
6051 /* Insert it into our samples table. */
6052 calib->energy_samples[calib->cur_energy_sample] = val;
6053 calib->cur_energy_sample = (calib->cur_energy_sample + 1) % 10;
6054
6055 /* Compute minimum energy among last 10 samples. */
6056 energy_min = calib->energy_samples[0];
6057 for (i = 1; i < 10; i++)
6058 energy_min = MAX(energy_min, calib->energy_samples[i]);
6059 energy_min += 6;
6060
6061 /* Compute number of false alarms since last call for CCK. */
6062 fa = le32toh(stats->cck.bad_plcp) - calib->bad_plcp_cck;
6063 fa += le32toh(stats->cck.fa) - calib->fa_cck;
6064 fa *= 200 * IEEE80211_DUR_TU; /* 200TU */
6065
6066 if (fa > 50 * rxena) {
6067 /* High false alarm count, decrease sensitivity. */
6068 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
6069 "%s: CCK high false alarm count: %u\n", __func__, fa);
6070 calib->cck_state = IWN_CCK_STATE_HIFA;
6071 calib->low_fa = 0;
6072
6073 if (calib->cck_x4 > 160) {
6074 calib->noise_ref = noise_ref;
6075 if (calib->energy_cck > 2)
6076 dec(calib->energy_cck, 2, energy_min);
6077 }
6078 if (calib->cck_x4 < 160) {
6079 calib->cck_x4 = 161;
6080 needs_update = 1;
6081 } else
6082 inc(calib->cck_x4, 3, limits->max_cck_x4);
6083
6084 inc(calib->cck_mrc_x4, 3, limits->max_cck_mrc_x4);
6085
6086 } else if (fa < 5 * rxena) {
6087 /* Low false alarm count, increase sensitivity. */
6088 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
6089 "%s: CCK low false alarm count: %u\n", __func__, fa);
6090 calib->cck_state = IWN_CCK_STATE_LOFA;
6091 calib->low_fa++;
6092
6093 if (calib->cck_state != IWN_CCK_STATE_INIT &&
6094 (((int32_t)calib->noise_ref - (int32_t)noise_ref) > 2 ||
6095 calib->low_fa > 100)) {
6096 inc(calib->energy_cck, 2, limits->min_energy_cck);
6097 dec(calib->cck_x4, 3, limits->min_cck_x4);
6098 dec(calib->cck_mrc_x4, 3, limits->min_cck_mrc_x4);
6099 }
6100 } else {
6101 /* Not worth to increase or decrease sensitivity. */
6102 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
6103 "%s: CCK normal false alarm count: %u\n", __func__, fa);
6104 calib->low_fa = 0;
6105 calib->noise_ref = noise_ref;
6106
6107 if (calib->cck_state == IWN_CCK_STATE_HIFA) {
6108 /* Previous interval had many false alarms. */
6109 dec(calib->energy_cck, 8, energy_min);
6110 }
6111 calib->cck_state = IWN_CCK_STATE_INIT;
6112 }
6113
6114 if (needs_update)
6115 (void)iwn_send_sensitivity(sc);
6116
6117 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
6118
6119#undef dec
6120#undef inc
6121}
6122
6123static int
6124iwn_send_sensitivity(struct iwn_softc *sc)
6125{
6126 struct iwn_calib_state *calib = &sc->calib;
6127 struct iwn_enhanced_sensitivity_cmd cmd;
6128 int len;
6129
6130 memset(&cmd, 0, sizeof cmd);
6131 len = sizeof (struct iwn_sensitivity_cmd);
6132 cmd.which = IWN_SENSITIVITY_WORKTBL;
6133 /* OFDM modulation. */
6134 cmd.corr_ofdm_x1 = htole16(calib->ofdm_x1);
6135 cmd.corr_ofdm_mrc_x1 = htole16(calib->ofdm_mrc_x1);
6136 cmd.corr_ofdm_x4 = htole16(calib->ofdm_x4);
6137 cmd.corr_ofdm_mrc_x4 = htole16(calib->ofdm_mrc_x4);
6138 cmd.energy_ofdm = htole16(sc->limits->energy_ofdm);
6139 cmd.energy_ofdm_th = htole16(62);
6140 /* CCK modulation. */
6141 cmd.corr_cck_x4 = htole16(calib->cck_x4);
6142 cmd.corr_cck_mrc_x4 = htole16(calib->cck_mrc_x4);
6143 cmd.energy_cck = htole16(calib->energy_cck);
6144 /* Barker modulation: use default values. */
6145 cmd.corr_barker = htole16(190);
6146 cmd.corr_barker_mrc = htole16(sc->limits->barker_mrc);
6147
6148 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
6149 "%s: set sensitivity %d/%d/%d/%d/%d/%d/%d\n", __func__,
6150 calib->ofdm_x1, calib->ofdm_mrc_x1, calib->ofdm_x4,
6151 calib->ofdm_mrc_x4, calib->cck_x4,
6152 calib->cck_mrc_x4, calib->energy_cck);
6153
6154 if (!(sc->sc_flags & IWN_FLAG_ENH_SENS))
6155 goto send;
6156 /* Enhanced sensitivity settings. */
6157 len = sizeof (struct iwn_enhanced_sensitivity_cmd);
6158 cmd.ofdm_det_slope_mrc = htole16(668);
6159 cmd.ofdm_det_icept_mrc = htole16(4);
6160 cmd.ofdm_det_slope = htole16(486);
6161 cmd.ofdm_det_icept = htole16(37);
6162 cmd.cck_det_slope_mrc = htole16(853);
6163 cmd.cck_det_icept_mrc = htole16(4);
6164 cmd.cck_det_slope = htole16(476);
6165 cmd.cck_det_icept = htole16(99);
6166send:
6167 return iwn_cmd(sc, IWN_CMD_SET_SENSITIVITY, &cmd, len, 1);
6168}
6169
6170/*
6171 * Look at the increase of PLCP errors over time; if it exceeds
6172 * a programmed threshold then trigger an RF retune.
6173 */
6174static void
6175iwn_check_rx_recovery(struct iwn_softc *sc, struct iwn_stats *rs)
6176{
6177 int32_t delta_ofdm, delta_ht, delta_cck;
6178 struct iwn_calib_state *calib = &sc->calib;
6179 int delta_ticks, cur_ticks;
6180 int delta_msec;
6181 int thresh;
6182
6183 /*
6184 * Calculate the difference between the current and
6185 * previous statistics.
6186 */
6187 delta_cck = le32toh(rs->rx.cck.bad_plcp) - calib->bad_plcp_cck;
6188 delta_ofdm = le32toh(rs->rx.ofdm.bad_plcp) - calib->bad_plcp_ofdm;
6189 delta_ht = le32toh(rs->rx.ht.bad_plcp) - calib->bad_plcp_ht;
6190
6191 /*
6192 * Calculate the delta in time between successive statistics
6193 * messages. Yes, it can roll over; so we make sure that
6194 * this doesn't happen.
6195 *
6196 * XXX go figure out what to do about rollover
6197 * XXX go figure out what to do if ticks rolls over to -ve instead!
6198 * XXX go stab signed integer overflow undefined-ness in the face.
6199 */
6200 cur_ticks = ticks;
6201 delta_ticks = cur_ticks - sc->last_calib_ticks;
6202
6203 /*
6204 * If any are negative, then the firmware likely reset; so just
6205 * bail. We'll pick this up next time.
6206 */
6207 if (delta_cck < 0 || delta_ofdm < 0 || delta_ht < 0 || delta_ticks < 0)
6208 return;
6209
6210 /*
6211 * delta_ticks is in ticks; we need to convert it up to milliseconds
6212 * so we can do some useful math with it.
6213 */
6214 delta_msec = ticks_to_msecs(delta_ticks);
6215
6216 /*
6217 * Calculate what our threshold is given the current delta_msec.
6218 */
6219 thresh = sc->base_params->plcp_err_threshold * delta_msec;
6220
6221 DPRINTF(sc, IWN_DEBUG_STATE,
6222 "%s: time delta: %d; cck=%d, ofdm=%d, ht=%d, total=%d, thresh=%d\n",
6223 __func__,
6224 delta_msec,
6225 delta_cck,
6226 delta_ofdm,
6227 delta_ht,
6228 (delta_msec + delta_cck + delta_ofdm + delta_ht),
6229 thresh);
6230
6231 /*
6232 * If we need a retune, then schedule a single channel scan
6233 * to a channel that isn't the currently active one!
6234 *
6235 * The math from linux iwlwifi:
6236 *
6237 * if ((delta * 100 / msecs) > threshold)
6238 */
6239 if (thresh > 0 && (delta_cck + delta_ofdm + delta_ht) * 100 > thresh) {
6240 DPRINTF(sc, IWN_DEBUG_ANY,
6241 "%s: PLCP error threshold raw (%d) comparison (%d) "
6242 "over limit (%d); retune!\n",
6243 __func__,
6244 (delta_cck + delta_ofdm + delta_ht),
6245 (delta_cck + delta_ofdm + delta_ht) * 100,
6246 thresh);
6247 }
6248}
6249
6250/*
6251 * Set STA mode power saving level (between 0 and 5).
6252 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving.
6253 */
6254static int
6255iwn_set_pslevel(struct iwn_softc *sc, int dtim, int level, int async)
6256{
6257 struct iwn_pmgt_cmd cmd;
6258 const struct iwn_pmgt *pmgt;
6259 uint32_t max, skip_dtim;
6260 uint32_t reg;
6261 int i;
6262
6263 DPRINTF(sc, IWN_DEBUG_PWRSAVE,
6264 "%s: dtim=%d, level=%d, async=%d\n",
6265 __func__,
6266 dtim,
6267 level,
6268 async);
6269
6270 /* Select which PS parameters to use. */
6271 if (dtim <= 2)
6272 pmgt = &iwn_pmgt[0][level];
6273 else if (dtim <= 10)
6274 pmgt = &iwn_pmgt[1][level];
6275 else
6276 pmgt = &iwn_pmgt[2][level];
6277
6278 memset(&cmd, 0, sizeof cmd);
6279 if (level != 0) /* not CAM */
6280 cmd.flags |= htole16(IWN_PS_ALLOW_SLEEP);
6281 if (level == 5)
6282 cmd.flags |= htole16(IWN_PS_FAST_PD);
6283 /* Retrieve PCIe Active State Power Management (ASPM). */
6284 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1);
6285 if (!(reg & 0x1)) /* L0s Entry disabled. */
6286 cmd.flags |= htole16(IWN_PS_PCI_PMGT);
6287 cmd.rxtimeout = htole32(pmgt->rxtimeout * 1024);
6288 cmd.txtimeout = htole32(pmgt->txtimeout * 1024);
6289
6290 if (dtim == 0) {
6291 dtim = 1;
6292 skip_dtim = 0;
6293 } else
6294 skip_dtim = pmgt->skip_dtim;
6295 if (skip_dtim != 0) {
6296 cmd.flags |= htole16(IWN_PS_SLEEP_OVER_DTIM);
6297 max = pmgt->intval[4];
6298 if (max == (uint32_t)-1)
6299 max = dtim * (skip_dtim + 1);
6300 else if (max > dtim)
6301 max = (max / dtim) * dtim;
6302 } else
6303 max = dtim;
6304 for (i = 0; i < 5; i++)
6305 cmd.intval[i] = htole32(MIN(max, pmgt->intval[i]));
6306
6307 DPRINTF(sc, IWN_DEBUG_RESET, "setting power saving level to %d\n",
6308 level);
6309 return iwn_cmd(sc, IWN_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async);
6310}
6311
6312static int
6313iwn_send_btcoex(struct iwn_softc *sc)
6314{
6315 struct iwn_bluetooth cmd;
6316
6317 memset(&cmd, 0, sizeof cmd);
6318 cmd.flags = IWN_BT_COEX_CHAN_ANN | IWN_BT_COEX_BT_PRIO;
6319 cmd.lead_time = IWN_BT_LEAD_TIME_DEF;
6320 cmd.max_kill = IWN_BT_MAX_KILL_DEF;
6321 DPRINTF(sc, IWN_DEBUG_RESET, "%s: configuring bluetooth coexistence\n",
6322 __func__);
6323 return iwn_cmd(sc, IWN_CMD_BT_COEX, &cmd, sizeof(cmd), 0);
6324}
6325
6326static int
6327iwn_send_advanced_btcoex(struct iwn_softc *sc)
6328{
6329 static const uint32_t btcoex_3wire[12] = {
6330 0xaaaaaaaa, 0xaaaaaaaa, 0xaeaaaaaa, 0xaaaaaaaa,
6331 0xcc00ff28, 0x0000aaaa, 0xcc00aaaa, 0x0000aaaa,
6332 0xc0004000, 0x00004000, 0xf0005000, 0xf0005000,
6333 };
6334 struct iwn6000_btcoex_config btconfig;
6335 struct iwn2000_btcoex_config btconfig2k;
6336 struct iwn_btcoex_priotable btprio;
6337 struct iwn_btcoex_prot btprot;
6338 int error, i;
6339 uint8_t flags;
6340
6341 memset(&btconfig, 0, sizeof btconfig);
6342 memset(&btconfig2k, 0, sizeof btconfig2k);
6343
6344 flags = IWN_BT_FLAG_COEX6000_MODE_3W <<
6345 IWN_BT_FLAG_COEX6000_MODE_SHIFT; // Done as is in linux kernel 3.2
6346
6347 if (sc->base_params->bt_sco_disable)
6348 flags &= ~IWN_BT_FLAG_SYNC_2_BT_DISABLE;
6349 else
6350 flags |= IWN_BT_FLAG_SYNC_2_BT_DISABLE;
6351
6352 flags |= IWN_BT_FLAG_COEX6000_CHAN_INHIBITION;
6353
6354 /* Default flags result is 145 as old value */
6355
6356 /*
6357 * Flags value has to be review. Values must change if we
6358 * which to disable it
6359 */
6360 if (sc->base_params->bt_session_2) {
6361 btconfig2k.flags = flags;
6362 btconfig2k.max_kill = 5;
6363 btconfig2k.bt3_t7_timer = 1;
6364 btconfig2k.kill_ack = htole32(0xffff0000);
6365 btconfig2k.kill_cts = htole32(0xffff0000);
6366 btconfig2k.sample_time = 2;
6367 btconfig2k.bt3_t2_timer = 0xc;
6368
6369 for (i = 0; i < 12; i++)
6370 btconfig2k.lookup_table[i] = htole32(btcoex_3wire[i]);
6371 btconfig2k.valid = htole16(0xff);
6372 btconfig2k.prio_boost = htole32(0xf0);
6373 DPRINTF(sc, IWN_DEBUG_RESET,
6374 "%s: configuring advanced bluetooth coexistence"
6375 " session 2, flags : 0x%x\n",
6376 __func__,
6377 flags);
6378 error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig2k,
6379 sizeof(btconfig2k), 1);
6380 } else {
6381 btconfig.flags = flags;
6382 btconfig.max_kill = 5;
6383 btconfig.bt3_t7_timer = 1;
6384 btconfig.kill_ack = htole32(0xffff0000);
6385 btconfig.kill_cts = htole32(0xffff0000);
6386 btconfig.sample_time = 2;
6387 btconfig.bt3_t2_timer = 0xc;
6388
6389 for (i = 0; i < 12; i++)
6390 btconfig.lookup_table[i] = htole32(btcoex_3wire[i]);
6391 btconfig.valid = htole16(0xff);
6392 btconfig.prio_boost = 0xf0;
6393 DPRINTF(sc, IWN_DEBUG_RESET,
6394 "%s: configuring advanced bluetooth coexistence,"
6395 " flags : 0x%x\n",
6396 __func__,
6397 flags);
6398 error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig,
6399 sizeof(btconfig), 1);
6400 }
6401
6402 if (error != 0)
6403 return error;
6404
6405 memset(&btprio, 0, sizeof btprio);
6406 btprio.calib_init1 = 0x6;
6407 btprio.calib_init2 = 0x7;
6408 btprio.calib_periodic_low1 = 0x2;
6409 btprio.calib_periodic_low2 = 0x3;
6410 btprio.calib_periodic_high1 = 0x4;
6411 btprio.calib_periodic_high2 = 0x5;
6412 btprio.dtim = 0x6;
6413 btprio.scan52 = 0x8;
6414 btprio.scan24 = 0xa;
6415 error = iwn_cmd(sc, IWN_CMD_BT_COEX_PRIOTABLE, &btprio, sizeof(btprio),
6416 1);
6417 if (error != 0)
6418 return error;
6419
6420 /* Force BT state machine change. */
6421 memset(&btprot, 0, sizeof btprot);
6422 btprot.open = 1;
6423 btprot.type = 1;
6424 error = iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1);
6425 if (error != 0)
6426 return error;
6427 btprot.open = 0;
6428 return iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1);
6429}
6430
6431static int
6432iwn5000_runtime_calib(struct iwn_softc *sc)
6433{
6434 struct iwn5000_calib_config cmd;
6435
6436 memset(&cmd, 0, sizeof cmd);
6437 cmd.ucode.once.enable = 0xffffffff;
6438 cmd.ucode.once.start = IWN5000_CALIB_DC;
6439 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
6440 "%s: configuring runtime calibration\n", __func__);
6441 return iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof(cmd), 0);
6442}
6443
6444static uint32_t
6445iwn_get_rxon_ht_flags(struct iwn_softc *sc, struct ieee80211_channel *c)
6446{
6447 struct ieee80211com *ic = &sc->sc_ic;
6448 uint32_t htflags = 0;
6449
6450 if (! IEEE80211_IS_CHAN_HT(c))
6451 return (0);
6452
6453 htflags |= IWN_RXON_HT_PROTMODE(ic->ic_curhtprotmode);
6454
6455 if (IEEE80211_IS_CHAN_HT40(c)) {
6456 switch (ic->ic_curhtprotmode) {
6457 case IEEE80211_HTINFO_OPMODE_HT20PR:
6458 htflags |= IWN_RXON_HT_MODEPURE40;
6459 break;
6460 default:
6461 htflags |= IWN_RXON_HT_MODEMIXED;
6462 break;
6463 }
6464 }
6465 if (IEEE80211_IS_CHAN_HT40D(c))
6466 htflags |= IWN_RXON_HT_HT40MINUS;
6467
6468 return (htflags);
6469}
6470
6471static int
6472iwn_config(struct iwn_softc *sc)
6473{
6474 struct iwn_ops *ops = &sc->ops;
6475 struct ieee80211com *ic = &sc->sc_ic;
6476 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6477 const uint8_t *macaddr;
6478 uint32_t txmask;
6479 uint16_t rxchain;
6480 int error;
6481
6482 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
6483
6484 if ((sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSET)
6485 && (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2)) {
6486 device_printf(sc->sc_dev,"%s: temp_offset and temp_offsetv2 are"
6487 " exclusive each together. Review NIC config file. Conf"
6488 " : 0x%08x Flags : 0x%08x \n", __func__,
6489 sc->base_params->calib_need,
6490 (IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSET |
6491 IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2));
6492 return (EINVAL);
6493 }
6494
6495 /* Compute temperature calib if needed. Will be send by send calib */
6496 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSET) {
6497 error = iwn5000_temp_offset_calib(sc);
6498 if (error != 0) {
6499 device_printf(sc->sc_dev,
6500 "%s: could not set temperature offset\n", __func__);
6501 return (error);
6502 }
6503 } else if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2) {
6504 error = iwn5000_temp_offset_calibv2(sc);
6505 if (error != 0) {
6506 device_printf(sc->sc_dev,
6507 "%s: could not compute temperature offset v2\n",
6508 __func__);
6509 return (error);
6510 }
6511 }
6512
6513 if (sc->hw_type == IWN_HW_REV_TYPE_6050) {
6514 /* Configure runtime DC calibration. */
6515 error = iwn5000_runtime_calib(sc);
6516 if (error != 0) {
6517 device_printf(sc->sc_dev,
6518 "%s: could not configure runtime calibration\n",
6519 __func__);
6520 return error;
6521 }
6522 }
6523
6524 /* Configure valid TX chains for >=5000 Series. */
6525 if (sc->hw_type != IWN_HW_REV_TYPE_4965 &&
6526 IWN_UCODE_API(sc->ucode_rev) > 1) {
6527 txmask = htole32(sc->txchainmask);
6528 DPRINTF(sc, IWN_DEBUG_RESET | IWN_DEBUG_XMIT,
6529 "%s: configuring valid TX chains 0x%x\n", __func__, txmask);
6530 error = iwn_cmd(sc, IWN5000_CMD_TX_ANT_CONFIG, &txmask,
6531 sizeof txmask, 0);
6532 if (error != 0) {
6533 device_printf(sc->sc_dev,
6534 "%s: could not configure valid TX chains, "
6535 "error %d\n", __func__, error);
6536 return error;
6537 }
6538 }
6539
6540 /* Configure bluetooth coexistence. */
6541 error = 0;
6542
6543 /* Configure bluetooth coexistence if needed. */
6544 if (sc->base_params->bt_mode == IWN_BT_ADVANCED)
6545 error = iwn_send_advanced_btcoex(sc);
6546 if (sc->base_params->bt_mode == IWN_BT_SIMPLE)
6547 error = iwn_send_btcoex(sc);
6548
6549 if (error != 0) {
6550 device_printf(sc->sc_dev,
6551 "%s: could not configure bluetooth coexistence, error %d\n",
6552 __func__, error);
6553 return error;
6554 }
6555
6556 /* Set mode, channel, RX filter and enable RX. */
6557 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX];
6558 memset(sc->rxon, 0, sizeof (struct iwn_rxon));
6559 macaddr = vap ? vap->iv_myaddr : ic->ic_macaddr;
6560 IEEE80211_ADDR_COPY(sc->rxon->myaddr, macaddr);
6561 IEEE80211_ADDR_COPY(sc->rxon->wlap, macaddr);
6562 sc->rxon->chan = ieee80211_chan2ieee(ic, ic->ic_curchan);
6563 sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
6564 if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan))
6565 sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
6566 switch (ic->ic_opmode) {
6567 case IEEE80211_M_STA:
6568 sc->rxon->mode = IWN_MODE_STA;
6569 sc->rxon->filter = htole32(IWN_FILTER_MULTICAST);
6570 break;
6571 case IEEE80211_M_MONITOR:
6572 sc->rxon->mode = IWN_MODE_MONITOR;
6573 sc->rxon->filter = htole32(IWN_FILTER_MULTICAST |
6574 IWN_FILTER_CTL | IWN_FILTER_PROMISC);
6575 break;
6576 default:
6577 /* Should not get there. */
6578 break;
6579 }
6580 sc->rxon->cck_mask = 0x0f; /* not yet negotiated */
6581 sc->rxon->ofdm_mask = 0xff; /* not yet negotiated */
6582 sc->rxon->ht_single_mask = 0xff;
6583 sc->rxon->ht_dual_mask = 0xff;
6584 sc->rxon->ht_triple_mask = 0xff;
6585 /*
6586 * In active association mode, ensure that
6587 * all the receive chains are enabled.
6588 *
6589 * Since we're not yet doing SMPS, don't allow the
6590 * number of idle RX chains to be less than the active
6591 * number.
6592 */
6593 rxchain =
6594 IWN_RXCHAIN_VALID(sc->rxchainmask) |
6595 IWN_RXCHAIN_MIMO_COUNT(sc->nrxchains) |
6596 IWN_RXCHAIN_IDLE_COUNT(sc->nrxchains);
6597 sc->rxon->rxchain = htole16(rxchain);
6598 DPRINTF(sc, IWN_DEBUG_RESET | IWN_DEBUG_XMIT,
6599 "%s: rxchainmask=0x%x, nrxchains=%d\n",
6600 __func__,
6601 sc->rxchainmask,
6602 sc->nrxchains);
6603
6604 sc->rxon->flags |= htole32(iwn_get_rxon_ht_flags(sc, ic->ic_curchan));
6605
6606 DPRINTF(sc, IWN_DEBUG_RESET,
6607 "%s: setting configuration; flags=0x%08x\n",
6608 __func__, le32toh(sc->rxon->flags));
6609 if (sc->sc_is_scanning)
6610 device_printf(sc->sc_dev,
6611 "%s: is_scanning set, before RXON\n",
6612 __func__);
6613 error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 0);
6614 if (error != 0) {
6615 device_printf(sc->sc_dev, "%s: RXON command failed\n",
6616 __func__);
6617 return error;
6618 }
6619
6620 if ((error = iwn_add_broadcast_node(sc, 0)) != 0) {
6621 device_printf(sc->sc_dev, "%s: could not add broadcast node\n",
6622 __func__);
6623 return error;
6624 }
6625
6626 /* Configuration has changed, set TX power accordingly. */
6627 if ((error = ops->set_txpower(sc, ic->ic_curchan, 0)) != 0) {
6628 device_printf(sc->sc_dev, "%s: could not set TX power\n",
6629 __func__);
6630 return error;
6631 }
6632
6633 if ((error = iwn_set_critical_temp(sc)) != 0) {
6634 device_printf(sc->sc_dev,
6635 "%s: could not set critical temperature\n", __func__);
6636 return error;
6637 }
6638
6639 /* Set power saving level to CAM during initialization. */
6640 if ((error = iwn_set_pslevel(sc, 0, 0, 0)) != 0) {
6641 device_printf(sc->sc_dev,
6642 "%s: could not set power saving level\n", __func__);
6643 return error;
6644 }
6645
6646 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
6647
6648 return 0;
6649}
6650
6651static uint16_t
6652iwn_get_active_dwell_time(struct iwn_softc *sc,
6653 struct ieee80211_channel *c, uint8_t n_probes)
6654{
6655 /* No channel? Default to 2GHz settings */
6656 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) {
6657 return (IWN_ACTIVE_DWELL_TIME_2GHZ +
6658 IWN_ACTIVE_DWELL_FACTOR_2GHZ * (n_probes + 1));
6659 }
6660
6661 /* 5GHz dwell time */
6662 return (IWN_ACTIVE_DWELL_TIME_5GHZ +
6663 IWN_ACTIVE_DWELL_FACTOR_5GHZ * (n_probes + 1));
6664}
6665
6666/*
6667 * Limit the total dwell time to 85% of the beacon interval.
6668 *
6669 * Returns the dwell time in milliseconds.
6670 */
6671static uint16_t
6672iwn_limit_dwell(struct iwn_softc *sc, uint16_t dwell_time)
6673{
6674 struct ieee80211com *ic = &sc->sc_ic;
6675 struct ieee80211vap *vap = NULL;
6676 int bintval = 0;
6677
6678 /* bintval is in TU (1.024mS) */
6679 if (! TAILQ_EMPTY(&ic->ic_vaps)) {
6680 vap = TAILQ_FIRST(&ic->ic_vaps);
6681 bintval = vap->iv_bss->ni_intval;
6682 }
6683
6684 /*
6685 * If it's non-zero, we should calculate the minimum of
6686 * it and the DWELL_BASE.
6687 *
6688 * XXX Yes, the math should take into account that bintval
6689 * is 1.024mS, not 1mS..
6690 */
6691 if (bintval > 0) {
6692 DPRINTF(sc, IWN_DEBUG_SCAN,
6693 "%s: bintval=%d\n",
6694 __func__,
6695 bintval);
6696 return (MIN(IWN_PASSIVE_DWELL_BASE, ((bintval * 85) / 100)));
6697 }
6698
6699 /* No association context? Default */
6700 return (IWN_PASSIVE_DWELL_BASE);
6701}
6702
6703static uint16_t
6704iwn_get_passive_dwell_time(struct iwn_softc *sc, struct ieee80211_channel *c)
6705{
6706 uint16_t passive;
6707
6708 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) {
6709 passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_2GHZ;
6710 } else {
6711 passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_5GHZ;
6712 }
6713
6714 /* Clamp to the beacon interval if we're associated */
6715 return (iwn_limit_dwell(sc, passive));
6716}
6717
6718static int
6719iwn_scan(struct iwn_softc *sc, struct ieee80211vap *vap,
6720 struct ieee80211_scan_state *ss, struct ieee80211_channel *c)
6721{
6722 struct ieee80211com *ic = &sc->sc_ic;
6723 struct ieee80211_node *ni = vap->iv_bss;
6724 struct iwn_scan_hdr *hdr;
6725 struct iwn_cmd_data *tx;
6726 struct iwn_scan_essid *essid;
6727 struct iwn_scan_chan *chan;
6728 struct ieee80211_frame *wh;
6729 struct ieee80211_rateset *rs;
6730 uint8_t *buf, *frm;
6731 uint16_t rxchain;
6732 uint8_t txant;
6733 int buflen, error;
6734 int is_active;
6735 uint16_t dwell_active, dwell_passive;
6736 uint32_t extra, scan_service_time;
6737
6738 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
6739
6740 /*
6741 * We are absolutely not allowed to send a scan command when another
6742 * scan command is pending.
6743 */
6744 if (sc->sc_is_scanning) {
6745 device_printf(sc->sc_dev, "%s: called whilst scanning!\n",
6746 __func__);
6747 return (EAGAIN);
6748 }
6749
6750 /* Assign the scan channel */
6751 c = ic->ic_curchan;
6752
6753 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX];
6754 buf = malloc(IWN_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO);
6755 if (buf == NULL) {
6756 device_printf(sc->sc_dev,
6757 "%s: could not allocate buffer for scan command\n",
6758 __func__);
6759 return ENOMEM;
6760 }
6761 hdr = (struct iwn_scan_hdr *)buf;
6762 /*
6763 * Move to the next channel if no frames are received within 10ms
6764 * after sending the probe request.
6765 */
6766 hdr->quiet_time = htole16(10); /* timeout in milliseconds */
6767 hdr->quiet_threshold = htole16(1); /* min # of packets */
6768 /*
6769 * Max needs to be greater than active and passive and quiet!
6770 * It's also in microseconds!
6771 */
6772 hdr->max_svc = htole32(250 * 1024);
6773
6774 /*
6775 * Reset scan: interval=100
6776 * Normal scan: interval=becaon interval
6777 * suspend_time: 100 (TU)
6778 *
6779 */
6780 extra = (100 /* suspend_time */ / 100 /* beacon interval */) << 22;
6781 //scan_service_time = extra | ((100 /* susp */ % 100 /* int */) * 1024);
6782 scan_service_time = (4 << 22) | (100 * 1024); /* Hardcode for now! */
6783 hdr->pause_svc = htole32(scan_service_time);
6784
6785 /* Select antennas for scanning. */
6786 rxchain =
6787 IWN_RXCHAIN_VALID(sc->rxchainmask) |
6788 IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask) |
6789 IWN_RXCHAIN_DRIVER_FORCE;
6790 if (IEEE80211_IS_CHAN_A(c) &&
6791 sc->hw_type == IWN_HW_REV_TYPE_4965) {
6792 /* Ant A must be avoided in 5GHz because of an HW bug. */
6793 rxchain |= IWN_RXCHAIN_FORCE_SEL(IWN_ANT_B);
6794 } else /* Use all available RX antennas. */
6795 rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask);
6796 hdr->rxchain = htole16(rxchain);
6797 hdr->filter = htole32(IWN_FILTER_MULTICAST | IWN_FILTER_BEACON);
6798
6799 tx = (struct iwn_cmd_data *)(hdr + 1);
6800 tx->flags = htole32(IWN_TX_AUTO_SEQ);
6801 tx->id = sc->broadcast_id;
6802 tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
6803
6804 if (IEEE80211_IS_CHAN_5GHZ(c)) {
6805 /* Send probe requests at 6Mbps. */
6806 tx->rate = htole32(0xd);
6807 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
6808 } else {
6809 hdr->flags = htole32(IWN_RXON_24GHZ | IWN_RXON_AUTO);
6810 if (sc->hw_type == IWN_HW_REV_TYPE_4965 &&
6811 sc->rxon->associd && sc->rxon->chan > 14)
6812 tx->rate = htole32(0xd);
6813 else {
6814 /* Send probe requests at 1Mbps. */
6815 tx->rate = htole32(10 | IWN_RFLAG_CCK);
6816 }
6817 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
6818 }
6819 /* Use the first valid TX antenna. */
6820 txant = IWN_LSB(sc->txchainmask);
6821 tx->rate |= htole32(IWN_RFLAG_ANT(txant));
6822
6823 /*
6824 * Only do active scanning if we're announcing a probe request
6825 * for a given SSID (or more, if we ever add it to the driver.)
6826 */
6827 is_active = 0;
6828
6829 /*
6830 * If we're scanning for a specific SSID, add it to the command.
6831 *
6832 * XXX maybe look at adding support for scanning multiple SSIDs?
6833 */
6834 essid = (struct iwn_scan_essid *)(tx + 1);
6835 if (ss != NULL) {
6836 if (ss->ss_ssid[0].len != 0) {
6837 essid[0].id = IEEE80211_ELEMID_SSID;
6838 essid[0].len = ss->ss_ssid[0].len;
6839 memcpy(essid[0].data, ss->ss_ssid[0].ssid, ss->ss_ssid[0].len);
6840 }
6841
6842 DPRINTF(sc, IWN_DEBUG_SCAN, "%s: ssid_len=%d, ssid=%*s\n",
6843 __func__,
6844 ss->ss_ssid[0].len,
6845 ss->ss_ssid[0].len,
6846 ss->ss_ssid[0].ssid);
6847
6848 if (ss->ss_nssid > 0)
6849 is_active = 1;
6850 }
6851
6852 /*
6853 * Build a probe request frame. Most of the following code is a
6854 * copy & paste of what is done in net80211.
6855 */
6856 wh = (struct ieee80211_frame *)(essid + 20);
6857 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
6858 IEEE80211_FC0_SUBTYPE_PROBE_REQ;
6859 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
6860 IEEE80211_ADDR_COPY(wh->i_addr1, vap->iv_ifp->if_broadcastaddr);
6861 IEEE80211_ADDR_COPY(wh->i_addr2, IF_LLADDR(vap->iv_ifp));
6862 IEEE80211_ADDR_COPY(wh->i_addr3, vap->iv_ifp->if_broadcastaddr);
6863 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */
6864 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */
6865
6866 frm = (uint8_t *)(wh + 1);
6867 frm = ieee80211_add_ssid(frm, NULL, 0);
6868 frm = ieee80211_add_rates(frm, rs);
6869 if (rs->rs_nrates > IEEE80211_RATE_SIZE)
6870 frm = ieee80211_add_xrates(frm, rs);
6871 if (ic->ic_htcaps & IEEE80211_HTC_HT)
6872 frm = ieee80211_add_htcap(frm, ni);
6873
6874 /* Set length of probe request. */
6875 tx->len = htole16(frm - (uint8_t *)wh);
6876
6877 /*
6878 * If active scanning is requested but a certain channel is
6879 * marked passive, we can do active scanning if we detect
6880 * transmissions.
6881 *
6882 * There is an issue with some firmware versions that triggers
6883 * a sysassert on a "good CRC threshold" of zero (== disabled),
6884 * on a radar channel even though this means that we should NOT
6885 * send probes.
6886 *
6887 * The "good CRC threshold" is the number of frames that we
6888 * need to receive during our dwell time on a channel before
6889 * sending out probes -- setting this to a huge value will
6890 * mean we never reach it, but at the same time work around
6891 * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
6892 * here instead of IWL_GOOD_CRC_TH_DISABLED.
6893 *
6894 * This was fixed in later versions along with some other
6895 * scan changes, and the threshold behaves as a flag in those
6896 * versions.
6897 */
6898
6899 /*
6900 * If we're doing active scanning, set the crc_threshold
6901 * to a suitable value. This is different to active veruss
6902 * passive scanning depending upon the channel flags; the
6903 * firmware will obey that particular check for us.
6904 */
6905 if (sc->tlv_feature_flags & IWN_UCODE_TLV_FLAGS_NEWSCAN)
6906 hdr->crc_threshold = is_active ?
6907 IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_DISABLED;
6908 else
6909 hdr->crc_threshold = is_active ?
6910 IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_NEVER;
6911
6912 chan = (struct iwn_scan_chan *)frm;
6913 chan->chan = htole16(ieee80211_chan2ieee(ic, c));
6914 chan->flags = 0;
6915 if (ss->ss_nssid > 0)
6916 chan->flags |= htole32(IWN_CHAN_NPBREQS(1));
6917 chan->dsp_gain = 0x6e;
6918
6919 /*
6920 * Set the passive/active flag depending upon the channel mode.
6921 * XXX TODO: take the is_active flag into account as well?
6922 */
6923 if (c->ic_flags & IEEE80211_CHAN_PASSIVE)
6924 chan->flags |= htole32(IWN_CHAN_PASSIVE);
6925 else
6926 chan->flags |= htole32(IWN_CHAN_ACTIVE);
6927
6928 /*
6929 * Calculate the active/passive dwell times.
6930 */
6931
6932 dwell_active = iwn_get_active_dwell_time(sc, c, ss->ss_nssid);
6933 dwell_passive = iwn_get_passive_dwell_time(sc, c);
6934
6935 /* Make sure they're valid */
6936 if (dwell_passive <= dwell_active)
6937 dwell_passive = dwell_active + 1;
6938
6939 chan->active = htole16(dwell_active);
6940 chan->passive = htole16(dwell_passive);
6941
6942 if (IEEE80211_IS_CHAN_5GHZ(c))
6943 chan->rf_gain = 0x3b;
6944 else
6945 chan->rf_gain = 0x28;
6946
6947 DPRINTF(sc, IWN_DEBUG_STATE,
6948 "%s: chan %u flags 0x%x rf_gain 0x%x "
6949 "dsp_gain 0x%x active %d passive %d scan_svc_time %d crc 0x%x "
6950 "isactive=%d numssid=%d\n", __func__,
6951 chan->chan, chan->flags, chan->rf_gain, chan->dsp_gain,
6952 dwell_active, dwell_passive, scan_service_time,
6953 hdr->crc_threshold, is_active, ss->ss_nssid);
6954
6955 hdr->nchan++;
6956 chan++;
6957 buflen = (uint8_t *)chan - buf;
6958 hdr->len = htole16(buflen);
6959
6960 if (sc->sc_is_scanning) {
6961 device_printf(sc->sc_dev,
6962 "%s: called with is_scanning set!\n",
6963 __func__);
6964 }
6965 sc->sc_is_scanning = 1;
6966
6967 DPRINTF(sc, IWN_DEBUG_STATE, "sending scan command nchan=%d\n",
6968 hdr->nchan);
6969 error = iwn_cmd(sc, IWN_CMD_SCAN, buf, buflen, 1);
6970 free(buf, M_DEVBUF);
6971
6972 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
6973
6974 return error;
6975}
6976
6977static int
6978iwn_auth(struct iwn_softc *sc, struct ieee80211vap *vap)
6979{
6980 struct iwn_ops *ops = &sc->ops;
6981 struct ieee80211com *ic = &sc->sc_ic;
6982 struct ieee80211_node *ni = vap->iv_bss;
6983 int error;
6984
6985 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
6986
6987 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX];
6988 /* Update adapter configuration. */
6989 IEEE80211_ADDR_COPY(sc->rxon->bssid, ni->ni_bssid);
6990 sc->rxon->chan = ieee80211_chan2ieee(ic, ni->ni_chan);
6991 sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
6992 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
6993 sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
6994 if (ic->ic_flags & IEEE80211_F_SHSLOT)
6995 sc->rxon->flags |= htole32(IWN_RXON_SHSLOT);
6996 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
6997 sc->rxon->flags |= htole32(IWN_RXON_SHPREAMBLE);
6998 if (IEEE80211_IS_CHAN_A(ni->ni_chan)) {
6999 sc->rxon->cck_mask = 0;
7000 sc->rxon->ofdm_mask = 0x15;
7001 } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) {
7002 sc->rxon->cck_mask = 0x03;
7003 sc->rxon->ofdm_mask = 0;
7004 } else {
7005 /* Assume 802.11b/g. */
7006 sc->rxon->cck_mask = 0x03;
7007 sc->rxon->ofdm_mask = 0x15;
7008 }
7009
7010 /* try HT */
7011 sc->rxon->flags |= htole32(iwn_get_rxon_ht_flags(sc, ic->ic_curchan));
7012
7013 DPRINTF(sc, IWN_DEBUG_STATE, "rxon chan %d flags %x cck %x ofdm %x\n",
7014 sc->rxon->chan, sc->rxon->flags, sc->rxon->cck_mask,
7015 sc->rxon->ofdm_mask);
7016 if (sc->sc_is_scanning)
7017 device_printf(sc->sc_dev,
7018 "%s: is_scanning set, before RXON\n",
7019 __func__);
7020 error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1);
7021 if (error != 0) {
7022 device_printf(sc->sc_dev, "%s: RXON command failed, error %d\n",
7023 __func__, error);
7024 return error;
7025 }
7026
7027 /* Configuration has changed, set TX power accordingly. */
7028 if ((error = ops->set_txpower(sc, ni->ni_chan, 1)) != 0) {
7029 device_printf(sc->sc_dev,
7030 "%s: could not set TX power, error %d\n", __func__, error);
7031 return error;
7032 }
7033 /*
7034 * Reconfiguring RXON clears the firmware nodes table so we must
7035 * add the broadcast node again.
7036 */
7037 if ((error = iwn_add_broadcast_node(sc, 1)) != 0) {
7038 device_printf(sc->sc_dev,
7039 "%s: could not add broadcast node, error %d\n", __func__,
7040 error);
7041 return error;
7042 }
7043
7044 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
7045
7046 return 0;
7047}
7048
7049static int
7050iwn_run(struct iwn_softc *sc, struct ieee80211vap *vap)
7051{
7052 struct iwn_ops *ops = &sc->ops;
7053 struct ieee80211com *ic = &sc->sc_ic;
7054 struct ieee80211_node *ni = vap->iv_bss;
7055 struct iwn_node_info node;
7056 int error;
7057
7058 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
7059
7060 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX];
7061 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7062 /* Link LED blinks while monitoring. */
7063 iwn_set_led(sc, IWN_LED_LINK, 5, 5);
7064 return 0;
7065 }
7066 if ((error = iwn_set_timing(sc, ni)) != 0) {
7067 device_printf(sc->sc_dev,
7068 "%s: could not set timing, error %d\n", __func__, error);
7069 return error;
7070 }
7071
7072 /* Update adapter configuration. */
7073 IEEE80211_ADDR_COPY(sc->rxon->bssid, ni->ni_bssid);
7074 sc->rxon->associd = htole16(IEEE80211_AID(ni->ni_associd));
7075 sc->rxon->chan = ieee80211_chan2ieee(ic, ni->ni_chan);
7076 sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
7077 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
7078 sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
7079 if (ic->ic_flags & IEEE80211_F_SHSLOT)
7080 sc->rxon->flags |= htole32(IWN_RXON_SHSLOT);
7081 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
7082 sc->rxon->flags |= htole32(IWN_RXON_SHPREAMBLE);
7083 if (IEEE80211_IS_CHAN_A(ni->ni_chan)) {
7084 sc->rxon->cck_mask = 0;
7085 sc->rxon->ofdm_mask = 0x15;
7086 } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) {
7087 sc->rxon->cck_mask = 0x03;
7088 sc->rxon->ofdm_mask = 0;
7089 } else {
7090 /* Assume 802.11b/g. */
7091 sc->rxon->cck_mask = 0x0f;
7092 sc->rxon->ofdm_mask = 0x15;
7093 }
7094 /* try HT */
7095 sc->rxon->flags |= htole32(iwn_get_rxon_ht_flags(sc, ni->ni_chan));
7096 sc->rxon->filter |= htole32(IWN_FILTER_BSS);
7097 DPRINTF(sc, IWN_DEBUG_STATE, "rxon chan %d flags %x, curhtprotmode=%d\n",
7098 sc->rxon->chan, le32toh(sc->rxon->flags), ic->ic_curhtprotmode);
7099 if (sc->sc_is_scanning)
7100 device_printf(sc->sc_dev,
7101 "%s: is_scanning set, before RXON\n",
7102 __func__);
7103 error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1);
7104 if (error != 0) {
7105 device_printf(sc->sc_dev,
7106 "%s: could not update configuration, error %d\n", __func__,
7107 error);
7108 return error;
7109 }
7110
7111 /* Configuration has changed, set TX power accordingly. */
7112 if ((error = ops->set_txpower(sc, ni->ni_chan, 1)) != 0) {
7113 device_printf(sc->sc_dev,
7114 "%s: could not set TX power, error %d\n", __func__, error);
7115 return error;
7116 }
7117
7118 /* Fake a join to initialize the TX rate. */
7119 ((struct iwn_node *)ni)->id = IWN_ID_BSS;
7120 iwn_newassoc(ni, 1);
7121
7122 /* Add BSS node. */
7123 memset(&node, 0, sizeof node);
7124 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr);
7125 node.id = IWN_ID_BSS;
7126 if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) {
7127 switch (ni->ni_htcap & IEEE80211_HTCAP_SMPS) {
7128 case IEEE80211_HTCAP_SMPS_ENA:
7129 node.htflags |= htole32(IWN_SMPS_MIMO_DIS);
7130 break;
7131 case IEEE80211_HTCAP_SMPS_DYNAMIC:
7132 node.htflags |= htole32(IWN_SMPS_MIMO_PROT);
7133 break;
7134 }
7135 node.htflags |= htole32(IWN_AMDPU_SIZE_FACTOR(3) |
7136 IWN_AMDPU_DENSITY(5)); /* 4us */
7137 if (IEEE80211_IS_CHAN_HT40(ni->ni_chan))
7138 node.htflags |= htole32(IWN_NODE_HT40);
7139 }
7140 DPRINTF(sc, IWN_DEBUG_STATE, "%s: adding BSS node\n", __func__);
7141 error = ops->add_node(sc, &node, 1);
7142 if (error != 0) {
7143 device_printf(sc->sc_dev,
7144 "%s: could not add BSS node, error %d\n", __func__, error);
7145 return error;
7146 }
7147 DPRINTF(sc, IWN_DEBUG_STATE, "%s: setting link quality for node %d\n",
7148 __func__, node.id);
7149 if ((error = iwn_set_link_quality(sc, ni)) != 0) {
7150 device_printf(sc->sc_dev,
7151 "%s: could not setup link quality for node %d, error %d\n",
7152 __func__, node.id, error);
7153 return error;
7154 }
7155
7156 if ((error = iwn_init_sensitivity(sc)) != 0) {
7157 device_printf(sc->sc_dev,
7158 "%s: could not set sensitivity, error %d\n", __func__,
7159 error);
7160 return error;
7161 }
7162 /* Start periodic calibration timer. */
7163 sc->calib.state = IWN_CALIB_STATE_ASSOC;
7164 sc->calib_cnt = 0;
7165 callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout,
7166 sc);
7167
7168 /* Link LED always on while associated. */
7169 iwn_set_led(sc, IWN_LED_LINK, 0, 1);
7170
7171 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
7172
7173 return 0;
7174}
7175
7176/*
7177 * This function is called by upper layer when an ADDBA request is received
7178 * from another STA and before the ADDBA response is sent.
7179 */
7180static int
7181iwn_ampdu_rx_start(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap,
7182 int baparamset, int batimeout, int baseqctl)
7183{
7184#define MS(_v, _f) (((_v) & _f) >> _f##_S)
7185 struct iwn_softc *sc = ni->ni_ic->ic_softc;
7186 struct iwn_ops *ops = &sc->ops;
7187 struct iwn_node *wn = (void *)ni;
7188 struct iwn_node_info node;
7189 uint16_t ssn;
7190 uint8_t tid;
7191 int error;
7192
7193 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7194
7195 tid = MS(le16toh(baparamset), IEEE80211_BAPS_TID);
7196 ssn = MS(le16toh(baseqctl), IEEE80211_BASEQ_START);
7197
7198 memset(&node, 0, sizeof node);
7199 node.id = wn->id;
7200 node.control = IWN_NODE_UPDATE;
7201 node.flags = IWN_FLAG_SET_ADDBA;
7202 node.addba_tid = tid;
7203 node.addba_ssn = htole16(ssn);
7204 DPRINTF(sc, IWN_DEBUG_RECV, "ADDBA RA=%d TID=%d SSN=%d\n",
7205 wn->id, tid, ssn);
7206 error = ops->add_node(sc, &node, 1);
7207 if (error != 0)
7208 return error;
7209 return sc->sc_ampdu_rx_start(ni, rap, baparamset, batimeout, baseqctl);
7210#undef MS
7211}
7212
7213/*
7214 * This function is called by upper layer on teardown of an HT-immediate
7215 * Block Ack agreement (eg. uppon receipt of a DELBA frame).
7216 */
7217static void
7218iwn_ampdu_rx_stop(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap)
7219{
7220 struct ieee80211com *ic = ni->ni_ic;
7221 struct iwn_softc *sc = ic->ic_softc;
7222 struct iwn_ops *ops = &sc->ops;
7223 struct iwn_node *wn = (void *)ni;
7224 struct iwn_node_info node;
7225 uint8_t tid;
7226
7227 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7228
7229 /* XXX: tid as an argument */
7230 for (tid = 0; tid < WME_NUM_TID; tid++) {
7231 if (&ni->ni_rx_ampdu[tid] == rap)
7232 break;
7233 }
7234
7235 memset(&node, 0, sizeof node);
7236 node.id = wn->id;
7237 node.control = IWN_NODE_UPDATE;
7238 node.flags = IWN_FLAG_SET_DELBA;
7239 node.delba_tid = tid;
7240 DPRINTF(sc, IWN_DEBUG_RECV, "DELBA RA=%d TID=%d\n", wn->id, tid);
7241 (void)ops->add_node(sc, &node, 1);
7242 sc->sc_ampdu_rx_stop(ni, rap);
7243}
7244
7245static int
7246iwn_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
7247 int dialogtoken, int baparamset, int batimeout)
7248{
7249 struct iwn_softc *sc = ni->ni_ic->ic_softc;
7250 int qid;
7251
7252 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7253
7254 for (qid = sc->firstaggqueue; qid < sc->ntxqs; qid++) {
7255 if (sc->qid2tap[qid] == NULL)
7256 break;
7257 }
7258 if (qid == sc->ntxqs) {
7259 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: not free aggregation queue\n",
7260 __func__);
7261 return 0;
7262 }
7263 tap->txa_private = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
7264 if (tap->txa_private == NULL) {
7265 device_printf(sc->sc_dev,
7266 "%s: failed to alloc TX aggregation structure\n", __func__);
7267 return 0;
7268 }
7269 sc->qid2tap[qid] = tap;
7270 *(int *)tap->txa_private = qid;
7271 return sc->sc_addba_request(ni, tap, dialogtoken, baparamset,
7272 batimeout);
7273}
7274
7275static int
7276iwn_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
7277 int code, int baparamset, int batimeout)
7278{
7279 struct iwn_softc *sc = ni->ni_ic->ic_softc;
7280 int qid = *(int *)tap->txa_private;
7281 uint8_t tid = tap->txa_tid;
7282 int ret;
7283
7284 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7285
7286 if (code == IEEE80211_STATUS_SUCCESS) {
7287 ni->ni_txseqs[tid] = tap->txa_start & 0xfff;
7288 ret = iwn_ampdu_tx_start(ni->ni_ic, ni, tid);
7289 if (ret != 1)
7290 return ret;
7291 } else {
7292 sc->qid2tap[qid] = NULL;
7293 free(tap->txa_private, M_DEVBUF);
7294 tap->txa_private = NULL;
7295 }
7296 return sc->sc_addba_response(ni, tap, code, baparamset, batimeout);
7297}
7298
7299/*
7300 * This function is called by upper layer when an ADDBA response is received
7301 * from another STA.
7302 */
7303static int
7304iwn_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
7305 uint8_t tid)
7306{
7307 struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[tid];
7308 struct iwn_softc *sc = ni->ni_ic->ic_softc;
7309 struct iwn_ops *ops = &sc->ops;
7310 struct iwn_node *wn = (void *)ni;
7311 struct iwn_node_info node;
7312 int error, qid;
7313
7314 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7315
7316 /* Enable TX for the specified RA/TID. */
7317 wn->disable_tid &= ~(1 << tid);
7318 memset(&node, 0, sizeof node);
7319 node.id = wn->id;
7320 node.control = IWN_NODE_UPDATE;
7321 node.flags = IWN_FLAG_SET_DISABLE_TID;
7322 node.disable_tid = htole16(wn->disable_tid);
7323 error = ops->add_node(sc, &node, 1);
7324 if (error != 0)
7325 return 0;
7326
7327 if ((error = iwn_nic_lock(sc)) != 0)
7328 return 0;
7329 qid = *(int *)tap->txa_private;
7330 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: ra=%d tid=%d ssn=%d qid=%d\n",
7331 __func__, wn->id, tid, tap->txa_start, qid);
7332 ops->ampdu_tx_start(sc, ni, qid, tid, tap->txa_start & 0xfff);
7333 iwn_nic_unlock(sc);
7334
7335 iwn_set_link_quality(sc, ni);
7336 return 1;
7337}
7338
7339static void
7340iwn_ampdu_tx_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
7341{
7342 struct iwn_softc *sc = ni->ni_ic->ic_softc;
7343 struct iwn_ops *ops = &sc->ops;
7344 uint8_t tid = tap->txa_tid;
7345 int qid;
7346
7347 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7348
7349 sc->sc_addba_stop(ni, tap);
7350
7351 if (tap->txa_private == NULL)
7352 return;
7353
7354 qid = *(int *)tap->txa_private;
7355 if (sc->txq[qid].queued != 0)
7356 return;
7357 if (iwn_nic_lock(sc) != 0)
7358 return;
7359 ops->ampdu_tx_stop(sc, qid, tid, tap->txa_start & 0xfff);
7360 iwn_nic_unlock(sc);
7361 sc->qid2tap[qid] = NULL;
7362 free(tap->txa_private, M_DEVBUF);
7363 tap->txa_private = NULL;
7364}
7365
7366static void
7367iwn4965_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni,
7368 int qid, uint8_t tid, uint16_t ssn)
7369{
7370 struct iwn_node *wn = (void *)ni;
7371
7372 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7373
7374 /* Stop TX scheduler while we're changing its configuration. */
7375 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
7376 IWN4965_TXQ_STATUS_CHGACT);
7377
7378 /* Assign RA/TID translation to the queue. */
7379 iwn_mem_write_2(sc, sc->sched_base + IWN4965_SCHED_TRANS_TBL(qid),
7380 wn->id << 4 | tid);
7381
7382 /* Enable chain-building mode for the queue. */
7383 iwn_prph_setbits(sc, IWN4965_SCHED_QCHAIN_SEL, 1 << qid);
7384
7385 /* Set starting sequence number from the ADDBA request. */
7386 sc->txq[qid].cur = sc->txq[qid].read = (ssn & 0xff);
7387 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
7388 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn);
7389
7390 /* Set scheduler window size. */
7391 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid),
7392 IWN_SCHED_WINSZ);
7393 /* Set scheduler frame limit. */
7394 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid) + 4,
7395 IWN_SCHED_LIMIT << 16);
7396
7397 /* Enable interrupts for the queue. */
7398 iwn_prph_setbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid);
7399
7400 /* Mark the queue as active. */
7401 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
7402 IWN4965_TXQ_STATUS_ACTIVE | IWN4965_TXQ_STATUS_AGGR_ENA |
7403 iwn_tid2fifo[tid] << 1);
7404}
7405
7406static void
7407iwn4965_ampdu_tx_stop(struct iwn_softc *sc, int qid, uint8_t tid, uint16_t ssn)
7408{
7409 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7410
7411 /* Stop TX scheduler while we're changing its configuration. */
7412 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
7413 IWN4965_TXQ_STATUS_CHGACT);
7414
7415 /* Set starting sequence number from the ADDBA request. */
7416 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
7417 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn);
7418
7419 /* Disable interrupts for the queue. */
7420 iwn_prph_clrbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid);
7421
7422 /* Mark the queue as inactive. */
7423 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
7424 IWN4965_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid] << 1);
7425}
7426
7427static void
7428iwn5000_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni,
7429 int qid, uint8_t tid, uint16_t ssn)
7430{
7431 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7432
7433 struct iwn_node *wn = (void *)ni;
7434
7435 /* Stop TX scheduler while we're changing its configuration. */
7436 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
7437 IWN5000_TXQ_STATUS_CHGACT);
7438
7439 /* Assign RA/TID translation to the queue. */
7440 iwn_mem_write_2(sc, sc->sched_base + IWN5000_SCHED_TRANS_TBL(qid),
7441 wn->id << 4 | tid);
7442
7443 /* Enable chain-building mode for the queue. */
7444 iwn_prph_setbits(sc, IWN5000_SCHED_QCHAIN_SEL, 1 << qid);
7445
7446 /* Enable aggregation for the queue. */
7447 iwn_prph_setbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid);
7448
7449 /* Set starting sequence number from the ADDBA request. */
7450 sc->txq[qid].cur = sc->txq[qid].read = (ssn & 0xff);
7451 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
7452 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn);
7453
7454 /* Set scheduler window size and frame limit. */
7455 iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid) + 4,
7456 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ);
7457
7458 /* Enable interrupts for the queue. */
7459 iwn_prph_setbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid);
7460
7461 /* Mark the queue as active. */
7462 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
7463 IWN5000_TXQ_STATUS_ACTIVE | iwn_tid2fifo[tid]);
7464}
7465
7466static void
7467iwn5000_ampdu_tx_stop(struct iwn_softc *sc, int qid, uint8_t tid, uint16_t ssn)
7468{
7469 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7470
7471 /* Stop TX scheduler while we're changing its configuration. */
7472 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
7473 IWN5000_TXQ_STATUS_CHGACT);
7474
7475 /* Disable aggregation for the queue. */
7476 iwn_prph_clrbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid);
7477
7478 /* Set starting sequence number from the ADDBA request. */
7479 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
7480 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn);
7481
7482 /* Disable interrupts for the queue. */
7483 iwn_prph_clrbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid);
7484
7485 /* Mark the queue as inactive. */
7486 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
7487 IWN5000_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid]);
7488}
7489
7490/*
7491 * Query calibration tables from the initialization firmware. We do this
7492 * only once at first boot. Called from a process context.
7493 */
7494static int
7495iwn5000_query_calibration(struct iwn_softc *sc)
7496{
7497 struct iwn5000_calib_config cmd;
7498 int error;
7499
7500 memset(&cmd, 0, sizeof cmd);
7501 cmd.ucode.once.enable = htole32(0xffffffff);
7502 cmd.ucode.once.start = htole32(0xffffffff);
7503 cmd.ucode.once.send = htole32(0xffffffff);
7504 cmd.ucode.flags = htole32(0xffffffff);
7505 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending calibration query\n",
7506 __func__);
7507 error = iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof cmd, 0);
7508 if (error != 0)
7509 return error;
7510
7511 /* Wait at most two seconds for calibration to complete. */
7512 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE))
7513 error = msleep(sc, &sc->sc_mtx, PCATCH, "iwncal", 2 * hz);
7514 return error;
7515}
7516
7517/*
7518 * Send calibration results to the runtime firmware. These results were
7519 * obtained on first boot from the initialization firmware.
7520 */
7521static int
7522iwn5000_send_calibration(struct iwn_softc *sc)
7523{
7524 int idx, error;
7525
7526 for (idx = 0; idx < IWN5000_PHY_CALIB_MAX_RESULT; idx++) {
7527 if (!(sc->base_params->calib_need & (1<<idx))) {
7528 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
7529 "No need of calib %d\n",
7530 idx);
7531 continue; /* no need for this calib */
7532 }
7533 if (sc->calibcmd[idx].buf == NULL) {
7534 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
7535 "Need calib idx : %d but no available data\n",
7536 idx);
7537 continue;
7538 }
7539
7540 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
7541 "send calibration result idx=%d len=%d\n", idx,
7542 sc->calibcmd[idx].len);
7543 error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, sc->calibcmd[idx].buf,
7544 sc->calibcmd[idx].len, 0);
7545 if (error != 0) {
7546 device_printf(sc->sc_dev,
7547 "%s: could not send calibration result, error %d\n",
7548 __func__, error);
7549 return error;
7550 }
7551 }
7552 return 0;
7553}
7554
7555static int
7556iwn5000_send_wimax_coex(struct iwn_softc *sc)
7557{
7558 struct iwn5000_wimax_coex wimax;
7559
7560#if 0
7561 if (sc->hw_type == IWN_HW_REV_TYPE_6050) {
7562 /* Enable WiMAX coexistence for combo adapters. */
7563 wimax.flags =
7564 IWN_WIMAX_COEX_ASSOC_WA_UNMASK |
7565 IWN_WIMAX_COEX_UNASSOC_WA_UNMASK |
7566 IWN_WIMAX_COEX_STA_TABLE_VALID |
7567 IWN_WIMAX_COEX_ENABLE;
7568 memcpy(wimax.events, iwn6050_wimax_events,
7569 sizeof iwn6050_wimax_events);
7570 } else
7571#endif
7572 {
7573 /* Disable WiMAX coexistence. */
7574 wimax.flags = 0;
7575 memset(wimax.events, 0, sizeof wimax.events);
7576 }
7577 DPRINTF(sc, IWN_DEBUG_RESET, "%s: Configuring WiMAX coexistence\n",
7578 __func__);
7579 return iwn_cmd(sc, IWN5000_CMD_WIMAX_COEX, &wimax, sizeof wimax, 0);
7580}
7581
7582static int
7583iwn5000_crystal_calib(struct iwn_softc *sc)
7584{
7585 struct iwn5000_phy_calib_crystal cmd;
7586
7587 memset(&cmd, 0, sizeof cmd);
7588 cmd.code = IWN5000_PHY_CALIB_CRYSTAL;
7589 cmd.ngroups = 1;
7590 cmd.isvalid = 1;
7591 cmd.cap_pin[0] = le32toh(sc->eeprom_crystal) & 0xff;
7592 cmd.cap_pin[1] = (le32toh(sc->eeprom_crystal) >> 16) & 0xff;
7593 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "sending crystal calibration %d, %d\n",
7594 cmd.cap_pin[0], cmd.cap_pin[1]);
7595 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0);
7596}
7597
7598static int
7599iwn5000_temp_offset_calib(struct iwn_softc *sc)
7600{
7601 struct iwn5000_phy_calib_temp_offset cmd;
7602
7603 memset(&cmd, 0, sizeof cmd);
7604 cmd.code = IWN5000_PHY_CALIB_TEMP_OFFSET;
7605 cmd.ngroups = 1;
7606 cmd.isvalid = 1;
7607 if (sc->eeprom_temp != 0)
7608 cmd.offset = htole16(sc->eeprom_temp);
7609 else
7610 cmd.offset = htole16(IWN_DEFAULT_TEMP_OFFSET);
7611 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "setting radio sensor offset to %d\n",
7612 le16toh(cmd.offset));
7613 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0);
7614}
7615
7616static int
7617iwn5000_temp_offset_calibv2(struct iwn_softc *sc)
7618{
7619 struct iwn5000_phy_calib_temp_offsetv2 cmd;
7620
7621 memset(&cmd, 0, sizeof cmd);
7622 cmd.code = IWN5000_PHY_CALIB_TEMP_OFFSET;
7623 cmd.ngroups = 1;
7624 cmd.isvalid = 1;
7625 if (sc->eeprom_temp != 0) {
7626 cmd.offset_low = htole16(sc->eeprom_temp);
7627 cmd.offset_high = htole16(sc->eeprom_temp_high);
7628 } else {
7629 cmd.offset_low = htole16(IWN_DEFAULT_TEMP_OFFSET);
7630 cmd.offset_high = htole16(IWN_DEFAULT_TEMP_OFFSET);
7631 }
7632 cmd.burnt_voltage_ref = htole16(sc->eeprom_voltage);
7633
7634 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
7635 "setting radio sensor low offset to %d, high offset to %d, voltage to %d\n",
7636 le16toh(cmd.offset_low),
7637 le16toh(cmd.offset_high),
7638 le16toh(cmd.burnt_voltage_ref));
7639
7640 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0);
7641}
7642
7643/*
7644 * This function is called after the runtime firmware notifies us of its
7645 * readiness (called in a process context).
7646 */
7647static int
7648iwn4965_post_alive(struct iwn_softc *sc)
7649{
7650 int error, qid;
7651
7652 if ((error = iwn_nic_lock(sc)) != 0)
7653 return error;
7654
7655 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7656
7657 /* Clear TX scheduler state in SRAM. */
7658 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR);
7659 iwn_mem_set_region_4(sc, sc->sched_base + IWN4965_SCHED_CTX_OFF, 0,
7660 IWN4965_SCHED_CTX_LEN / sizeof (uint32_t));
7661
7662 /* Set physical address of TX scheduler rings (1KB aligned). */
7663 iwn_prph_write(sc, IWN4965_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10);
7664
7665 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY);
7666
7667 /* Disable chain mode for all our 16 queues. */
7668 iwn_prph_write(sc, IWN4965_SCHED_QCHAIN_SEL, 0);
7669
7670 for (qid = 0; qid < IWN4965_NTXQUEUES; qid++) {
7671 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), 0);
7672 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0);
7673
7674 /* Set scheduler window size. */
7675 iwn_mem_write(sc, sc->sched_base +
7676 IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ);
7677 /* Set scheduler frame limit. */
7678 iwn_mem_write(sc, sc->sched_base +
7679 IWN4965_SCHED_QUEUE_OFFSET(qid) + 4,
7680 IWN_SCHED_LIMIT << 16);
7681 }
7682
7683 /* Enable interrupts for all our 16 queues. */
7684 iwn_prph_write(sc, IWN4965_SCHED_INTR_MASK, 0xffff);
7685 /* Identify TX FIFO rings (0-7). */
7686 iwn_prph_write(sc, IWN4965_SCHED_TXFACT, 0xff);
7687
7688 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */
7689 for (qid = 0; qid < 7; qid++) {
7690 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 4, 5, 6 };
7691 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
7692 IWN4965_TXQ_STATUS_ACTIVE | qid2fifo[qid] << 1);
7693 }
7694 iwn_nic_unlock(sc);
7695 return 0;
7696}
7697
7698/*
7699 * This function is called after the initialization or runtime firmware
7700 * notifies us of its readiness (called in a process context).
7701 */
7702static int
7703iwn5000_post_alive(struct iwn_softc *sc)
7704{
7705 int error, qid;
7706
7707 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
7708
7709 /* Switch to using ICT interrupt mode. */
7710 iwn5000_ict_reset(sc);
7711
7712 if ((error = iwn_nic_lock(sc)) != 0){
7713 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__);
7714 return error;
7715 }
7716
7717 /* Clear TX scheduler state in SRAM. */
7718 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR);
7719 iwn_mem_set_region_4(sc, sc->sched_base + IWN5000_SCHED_CTX_OFF, 0,
7720 IWN5000_SCHED_CTX_LEN / sizeof (uint32_t));
7721
7722 /* Set physical address of TX scheduler rings (1KB aligned). */
7723 iwn_prph_write(sc, IWN5000_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10);
7724
7725 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY);
7726
7727 /* Enable chain mode for all queues, except command queue. */
7728 if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT)
7729 iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffdf);
7730 else
7731 iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffef);
7732 iwn_prph_write(sc, IWN5000_SCHED_AGGR_SEL, 0);
7733
7734 for (qid = 0; qid < IWN5000_NTXQUEUES; qid++) {
7735 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), 0);
7736 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0);
7737
7738 iwn_mem_write(sc, sc->sched_base +
7739 IWN5000_SCHED_QUEUE_OFFSET(qid), 0);
7740 /* Set scheduler window size and frame limit. */
7741 iwn_mem_write(sc, sc->sched_base +
7742 IWN5000_SCHED_QUEUE_OFFSET(qid) + 4,
7743 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ);
7744 }
7745
7746 /* Enable interrupts for all our 20 queues. */
7747 iwn_prph_write(sc, IWN5000_SCHED_INTR_MASK, 0xfffff);
7748 /* Identify TX FIFO rings (0-7). */
7749 iwn_prph_write(sc, IWN5000_SCHED_TXFACT, 0xff);
7750
7751 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */
7752 if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT) {
7753 /* Mark TX rings as active. */
7754 for (qid = 0; qid < 11; qid++) {
7755 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 0, 4, 2, 5, 4, 7, 5 };
7756 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
7757 IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]);
7758 }
7759 } else {
7760 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */
7761 for (qid = 0; qid < 7; qid++) {
7762 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 7, 5, 6 };
7763 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
7764 IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]);
7765 }
7766 }
7767 iwn_nic_unlock(sc);
7768
7769 /* Configure WiMAX coexistence for combo adapters. */
7770 error = iwn5000_send_wimax_coex(sc);
7771 if (error != 0) {
7772 device_printf(sc->sc_dev,
7773 "%s: could not configure WiMAX coexistence, error %d\n",
7774 __func__, error);
7775 return error;
7776 }
7777 if (sc->hw_type != IWN_HW_REV_TYPE_5150) {
7778 /* Perform crystal calibration. */
7779 error = iwn5000_crystal_calib(sc);
7780 if (error != 0) {
7781 device_printf(sc->sc_dev,
7782 "%s: crystal calibration failed, error %d\n",
7783 __func__, error);
7784 return error;
7785 }
7786 }
7787 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) {
7788 /* Query calibration from the initialization firmware. */
7789 if ((error = iwn5000_query_calibration(sc)) != 0) {
7790 device_printf(sc->sc_dev,
7791 "%s: could not query calibration, error %d\n",
7792 __func__, error);
7793 return error;
7794 }
7795 /*
7796 * We have the calibration results now, reboot with the
7797 * runtime firmware (call ourselves recursively!)
7798 */
7799 iwn_hw_stop(sc);
7800 error = iwn_hw_init(sc);
7801 } else {
7802 /* Send calibration results to runtime firmware. */
7803 error = iwn5000_send_calibration(sc);
7804 }
7805
7806 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
7807
7808 return error;
7809}
7810
7811/*
7812 * The firmware boot code is small and is intended to be copied directly into
7813 * the NIC internal memory (no DMA transfer).
7814 */
7815static int
7816iwn4965_load_bootcode(struct iwn_softc *sc, const uint8_t *ucode, int size)
7817{
7818 int error, ntries;
7819
7820 size /= sizeof (uint32_t);
7821
7822 if ((error = iwn_nic_lock(sc)) != 0)
7823 return error;
7824
7825 /* Copy microcode image into NIC memory. */
7826 iwn_prph_write_region_4(sc, IWN_BSM_SRAM_BASE,
7827 (const uint32_t *)ucode, size);
7828
7829 iwn_prph_write(sc, IWN_BSM_WR_MEM_SRC, 0);
7830 iwn_prph_write(sc, IWN_BSM_WR_MEM_DST, IWN_FW_TEXT_BASE);
7831 iwn_prph_write(sc, IWN_BSM_WR_DWCOUNT, size);
7832
7833 /* Start boot load now. */
7834 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START);
7835
7836 /* Wait for transfer to complete. */
7837 for (ntries = 0; ntries < 1000; ntries++) {
7838 if (!(iwn_prph_read(sc, IWN_BSM_WR_CTRL) &
7839 IWN_BSM_WR_CTRL_START))
7840 break;
7841 DELAY(10);
7842 }
7843 if (ntries == 1000) {
7844 device_printf(sc->sc_dev, "%s: could not load boot firmware\n",
7845 __func__);
7846 iwn_nic_unlock(sc);
7847 return ETIMEDOUT;
7848 }
7849
7850 /* Enable boot after power up. */
7851 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START_EN);
7852
7853 iwn_nic_unlock(sc);
7854 return 0;
7855}
7856
7857static int
7858iwn4965_load_firmware(struct iwn_softc *sc)
7859{
7860 struct iwn_fw_info *fw = &sc->fw;
7861 struct iwn_dma_info *dma = &sc->fw_dma;
7862 int error;
7863
7864 /* Copy initialization sections into pre-allocated DMA-safe memory. */
7865 memcpy(dma->vaddr, fw->init.data, fw->init.datasz);
7866 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
7867 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ,
7868 fw->init.text, fw->init.textsz);
7869 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
7870
7871 /* Tell adapter where to find initialization sections. */
7872 if ((error = iwn_nic_lock(sc)) != 0)
7873 return error;
7874 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4);
7875 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->init.datasz);
7876 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR,
7877 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4);
7878 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, fw->init.textsz);
7879 iwn_nic_unlock(sc);
7880
7881 /* Load firmware boot code. */
7882 error = iwn4965_load_bootcode(sc, fw->boot.text, fw->boot.textsz);
7883 if (error != 0) {
7884 device_printf(sc->sc_dev, "%s: could not load boot firmware\n",
7885 __func__);
7886 return error;
7887 }
7888 /* Now press "execute". */
7889 IWN_WRITE(sc, IWN_RESET, 0);
7890
7891 /* Wait at most one second for first alive notification. */
7892 if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz)) != 0) {
7893 device_printf(sc->sc_dev,
7894 "%s: timeout waiting for adapter to initialize, error %d\n",
7895 __func__, error);
7896 return error;
7897 }
7898
7899 /* Retrieve current temperature for initial TX power calibration. */
7900 sc->rawtemp = sc->ucode_info.temp[3].chan20MHz;
7901 sc->temp = iwn4965_get_temperature(sc);
7902
7903 /* Copy runtime sections into pre-allocated DMA-safe memory. */
7904 memcpy(dma->vaddr, fw->main.data, fw->main.datasz);
7905 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
7906 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ,
7907 fw->main.text, fw->main.textsz);
7908 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
7909
7910 /* Tell adapter where to find runtime sections. */
7911 if ((error = iwn_nic_lock(sc)) != 0)
7912 return error;
7913 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4);
7914 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->main.datasz);
7915 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR,
7916 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4);
7917 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE,
7918 IWN_FW_UPDATED | fw->main.textsz);
7919 iwn_nic_unlock(sc);
7920
7921 return 0;
7922}
7923
7924static int
7925iwn5000_load_firmware_section(struct iwn_softc *sc, uint32_t dst,
7926 const uint8_t *section, int size)
7927{
7928 struct iwn_dma_info *dma = &sc->fw_dma;
7929 int error;
7930
7931 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7932
7933 /* Copy firmware section into pre-allocated DMA-safe memory. */
7934 memcpy(dma->vaddr, section, size);
7935 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
7936
7937 if ((error = iwn_nic_lock(sc)) != 0)
7938 return error;
7939
7940 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL),
7941 IWN_FH_TX_CONFIG_DMA_PAUSE);
7942
7943 IWN_WRITE(sc, IWN_FH_SRAM_ADDR(IWN_SRVC_DMACHNL), dst);
7944 IWN_WRITE(sc, IWN_FH_TFBD_CTRL0(IWN_SRVC_DMACHNL),
7945 IWN_LOADDR(dma->paddr));
7946 IWN_WRITE(sc, IWN_FH_TFBD_CTRL1(IWN_SRVC_DMACHNL),
7947 IWN_HIADDR(dma->paddr) << 28 | size);
7948 IWN_WRITE(sc, IWN_FH_TXBUF_STATUS(IWN_SRVC_DMACHNL),
7949 IWN_FH_TXBUF_STATUS_TBNUM(1) |
7950 IWN_FH_TXBUF_STATUS_TBIDX(1) |
7951 IWN_FH_TXBUF_STATUS_TFBD_VALID);
7952
7953 /* Kick Flow Handler to start DMA transfer. */
7954 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL),
7955 IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_CIRQ_HOST_ENDTFD);
7956
7957 iwn_nic_unlock(sc);
7958
7959 /* Wait at most five seconds for FH DMA transfer to complete. */
7960 return msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", 5 * hz);
7961}
7962
7963static int
7964iwn5000_load_firmware(struct iwn_softc *sc)
7965{
7966 struct iwn_fw_part *fw;
7967 int error;
7968
7969 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7970
7971 /* Load the initialization firmware on first boot only. */
7972 fw = (sc->sc_flags & IWN_FLAG_CALIB_DONE) ?
7973 &sc->fw.main : &sc->fw.init;
7974
7975 error = iwn5000_load_firmware_section(sc, IWN_FW_TEXT_BASE,
7976 fw->text, fw->textsz);
7977 if (error != 0) {
7978 device_printf(sc->sc_dev,
7979 "%s: could not load firmware %s section, error %d\n",
7980 __func__, ".text", error);
7981 return error;
7982 }
7983 error = iwn5000_load_firmware_section(sc, IWN_FW_DATA_BASE,
7984 fw->data, fw->datasz);
7985 if (error != 0) {
7986 device_printf(sc->sc_dev,
7987 "%s: could not load firmware %s section, error %d\n",
7988 __func__, ".data", error);
7989 return error;
7990 }
7991
7992 /* Now press "execute". */
7993 IWN_WRITE(sc, IWN_RESET, 0);
7994 return 0;
7995}
7996
7997/*
7998 * Extract text and data sections from a legacy firmware image.
7999 */
8000static int
8001iwn_read_firmware_leg(struct iwn_softc *sc, struct iwn_fw_info *fw)
8002{
8003 const uint32_t *ptr;
8004 size_t hdrlen = 24;
8005 uint32_t rev;
8006
8007 ptr = (const uint32_t *)fw->data;
8008 rev = le32toh(*ptr++);
8009
8010 sc->ucode_rev = rev;
8011
8012 /* Check firmware API version. */
8013 if (IWN_FW_API(rev) <= 1) {
8014 device_printf(sc->sc_dev,
8015 "%s: bad firmware, need API version >=2\n", __func__);
8016 return EINVAL;
8017 }
8018 if (IWN_FW_API(rev) >= 3) {
8019 /* Skip build number (version 2 header). */
8020 hdrlen += 4;
8021 ptr++;
8022 }
8023 if (fw->size < hdrlen) {
8024 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n",
8025 __func__, fw->size);
8026 return EINVAL;
8027 }
8028 fw->main.textsz = le32toh(*ptr++);
8029 fw->main.datasz = le32toh(*ptr++);
8030 fw->init.textsz = le32toh(*ptr++);
8031 fw->init.datasz = le32toh(*ptr++);
8032 fw->boot.textsz = le32toh(*ptr++);
8033
8034 /* Check that all firmware sections fit. */
8035 if (fw->size < hdrlen + fw->main.textsz + fw->main.datasz +
8036 fw->init.textsz + fw->init.datasz + fw->boot.textsz) {
8037 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n",
8038 __func__, fw->size);
8039 return EINVAL;
8040 }
8041
8042 /* Get pointers to firmware sections. */
8043 fw->main.text = (const uint8_t *)ptr;
8044 fw->main.data = fw->main.text + fw->main.textsz;
8045 fw->init.text = fw->main.data + fw->main.datasz;
8046 fw->init.data = fw->init.text + fw->init.textsz;
8047 fw->boot.text = fw->init.data + fw->init.datasz;
8048 return 0;
8049}
8050
8051/*
8052 * Extract text and data sections from a TLV firmware image.
8053 */
8054static int
8055iwn_read_firmware_tlv(struct iwn_softc *sc, struct iwn_fw_info *fw,
8056 uint16_t alt)
8057{
8058 const struct iwn_fw_tlv_hdr *hdr;
8059 const struct iwn_fw_tlv *tlv;
8060 const uint8_t *ptr, *end;
8061 uint64_t altmask;
8062 uint32_t len, tmp;
8063
8064 if (fw->size < sizeof (*hdr)) {
8065 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n",
8066 __func__, fw->size);
8067 return EINVAL;
8068 }
8069 hdr = (const struct iwn_fw_tlv_hdr *)fw->data;
8070 if (hdr->signature != htole32(IWN_FW_SIGNATURE)) {
8071 device_printf(sc->sc_dev, "%s: bad firmware signature 0x%08x\n",
8072 __func__, le32toh(hdr->signature));
8073 return EINVAL;
8074 }
8075 DPRINTF(sc, IWN_DEBUG_RESET, "FW: \"%.64s\", build 0x%x\n", hdr->descr,
8076 le32toh(hdr->build));
8077 sc->ucode_rev = le32toh(hdr->rev);
8078
8079 /*
8080 * Select the closest supported alternative that is less than
8081 * or equal to the specified one.
8082 */
8083 altmask = le64toh(hdr->altmask);
8084 while (alt > 0 && !(altmask & (1ULL << alt)))
8085 alt--; /* Downgrade. */
8086 DPRINTF(sc, IWN_DEBUG_RESET, "using alternative %d\n", alt);
8087
8088 ptr = (const uint8_t *)(hdr + 1);
8089 end = (const uint8_t *)(fw->data + fw->size);
8090
8091 /* Parse type-length-value fields. */
8092 while (ptr + sizeof (*tlv) <= end) {
8093 tlv = (const struct iwn_fw_tlv *)ptr;
8094 len = le32toh(tlv->len);
8095
8096 ptr += sizeof (*tlv);
8097 if (ptr + len > end) {
8098 device_printf(sc->sc_dev,
8099 "%s: firmware too short: %zu bytes\n", __func__,
8100 fw->size);
8101 return EINVAL;
8102 }
8103 /* Skip other alternatives. */
8104 if (tlv->alt != 0 && tlv->alt != htole16(alt))
8105 goto next;
8106
8107 switch (le16toh(tlv->type)) {
8108 case IWN_FW_TLV_MAIN_TEXT:
8109 fw->main.text = ptr;
8110 fw->main.textsz = len;
8111 break;
8112 case IWN_FW_TLV_MAIN_DATA:
8113 fw->main.data = ptr;
8114 fw->main.datasz = len;
8115 break;
8116 case IWN_FW_TLV_INIT_TEXT:
8117 fw->init.text = ptr;
8118 fw->init.textsz = len;
8119 break;
8120 case IWN_FW_TLV_INIT_DATA:
8121 fw->init.data = ptr;
8122 fw->init.datasz = len;
8123 break;
8124 case IWN_FW_TLV_BOOT_TEXT:
8125 fw->boot.text = ptr;
8126 fw->boot.textsz = len;
8127 break;
8128 case IWN_FW_TLV_ENH_SENS:
8129 if (!len)
8130 sc->sc_flags |= IWN_FLAG_ENH_SENS;
8131 break;
8132 case IWN_FW_TLV_PHY_CALIB:
8133 tmp = le32toh(*ptr);
8134 if (tmp < 253) {
8135 sc->reset_noise_gain = tmp;
8136 sc->noise_gain = tmp + 1;
8137 }
8138 break;
8139 case IWN_FW_TLV_PAN:
8140 sc->sc_flags |= IWN_FLAG_PAN_SUPPORT;
8141 DPRINTF(sc, IWN_DEBUG_RESET,
8142 "PAN Support found: %d\n", 1);
8143 break;
8144 case IWN_FW_TLV_FLAGS:
8145 if (len < sizeof(uint32_t))
8146 break;
8147 if (len % sizeof(uint32_t))
8148 break;
8149 sc->tlv_feature_flags = le32toh(*ptr);
8150 DPRINTF(sc, IWN_DEBUG_RESET,
8151 "%s: feature: 0x%08x\n",
8152 __func__,
8153 sc->tlv_feature_flags);
8154 break;
8155 case IWN_FW_TLV_PBREQ_MAXLEN:
8156 case IWN_FW_TLV_RUNT_EVTLOG_PTR:
8157 case IWN_FW_TLV_RUNT_EVTLOG_SIZE:
8158 case IWN_FW_TLV_RUNT_ERRLOG_PTR:
8159 case IWN_FW_TLV_INIT_EVTLOG_PTR:
8160 case IWN_FW_TLV_INIT_EVTLOG_SIZE:
8161 case IWN_FW_TLV_INIT_ERRLOG_PTR:
8162 case IWN_FW_TLV_WOWLAN_INST:
8163 case IWN_FW_TLV_WOWLAN_DATA:
8164 DPRINTF(sc, IWN_DEBUG_RESET,
8165 "TLV type %d recognized but not handled\n",
8166 le16toh(tlv->type));
8167 break;
8168 default:
8169 DPRINTF(sc, IWN_DEBUG_RESET,
8170 "TLV type %d not handled\n", le16toh(tlv->type));
8171 break;
8172 }
8173 next: /* TLV fields are 32-bit aligned. */
8174 ptr += (len + 3) & ~3;
8175 }
8176 return 0;
8177}
8178
8179static int
8180iwn_read_firmware(struct iwn_softc *sc)
8181{
8182 struct iwn_fw_info *fw = &sc->fw;
8183 int error;
8184
8185 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
8186
8187 IWN_UNLOCK(sc);
8188
8189 memset(fw, 0, sizeof (*fw));
8190
8191 /* Read firmware image from filesystem. */
8192 sc->fw_fp = firmware_get(sc->fwname);
8193 if (sc->fw_fp == NULL) {
8194 device_printf(sc->sc_dev, "%s: could not read firmware %s\n",
8195 __func__, sc->fwname);
8196 IWN_LOCK(sc);
8197 return EINVAL;
8198 }
8199 IWN_LOCK(sc);
8200
8201 fw->size = sc->fw_fp->datasize;
8202 fw->data = (const uint8_t *)sc->fw_fp->data;
8203 if (fw->size < sizeof (uint32_t)) {
8204 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n",
8205 __func__, fw->size);
8206 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
8207 sc->fw_fp = NULL;
8208 return EINVAL;
8209 }
8210
8211 /* Retrieve text and data sections. */
8212 if (*(const uint32_t *)fw->data != 0) /* Legacy image. */
8213 error = iwn_read_firmware_leg(sc, fw);
8214 else
8215 error = iwn_read_firmware_tlv(sc, fw, 1);
8216 if (error != 0) {
8217 device_printf(sc->sc_dev,
8218 "%s: could not read firmware sections, error %d\n",
8219 __func__, error);
8220 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
8221 sc->fw_fp = NULL;
8222 return error;
8223 }
8224
8225 device_printf(sc->sc_dev, "%s: ucode rev=0x%08x\n", __func__, sc->ucode_rev);
8226
8227 /* Make sure text and data sections fit in hardware memory. */
8228 if (fw->main.textsz > sc->fw_text_maxsz ||
8229 fw->main.datasz > sc->fw_data_maxsz ||
8230 fw->init.textsz > sc->fw_text_maxsz ||
8231 fw->init.datasz > sc->fw_data_maxsz ||
8232 fw->boot.textsz > IWN_FW_BOOT_TEXT_MAXSZ ||
8233 (fw->boot.textsz & 3) != 0) {
8234 device_printf(sc->sc_dev, "%s: firmware sections too large\n",
8235 __func__);
8236 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
8237 sc->fw_fp = NULL;
8238 return EINVAL;
8239 }
8240
8241 /* We can proceed with loading the firmware. */
8242 return 0;
8243}
8244
8245static int
8246iwn_clock_wait(struct iwn_softc *sc)
8247{
8248 int ntries;
8249
8250 /* Set "initialization complete" bit. */
8251 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE);
8252
8253 /* Wait for clock stabilization. */
8254 for (ntries = 0; ntries < 2500; ntries++) {
8255 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_MAC_CLOCK_READY)
8256 return 0;
8257 DELAY(10);
8258 }
8259 device_printf(sc->sc_dev,
8260 "%s: timeout waiting for clock stabilization\n", __func__);
8261 return ETIMEDOUT;
8262}
8263
8264static int
8265iwn_apm_init(struct iwn_softc *sc)
8266{
8267 uint32_t reg;
8268 int error;
8269
8270 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
8271
8272 /* Disable L0s exit timer (NMI bug workaround). */
8273 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_DIS_L0S_TIMER);
8274 /* Don't wait for ICH L0s (ICH bug workaround). */
8275 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_L1A_NO_L0S_RX);
8276
8277 /* Set FH wait threshold to max (HW bug under stress workaround). */
8278 IWN_SETBITS(sc, IWN_DBG_HPET_MEM, 0xffff0000);
8279
8280 /* Enable HAP INTA to move adapter from L1a to L0s. */
8281 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_HAP_WAKE_L1A);
8282
8283 /* Retrieve PCIe Active State Power Management (ASPM). */
8284 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1);
8285 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */
8286 if (reg & 0x02) /* L1 Entry enabled. */
8287 IWN_SETBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA);
8288 else
8289 IWN_CLRBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA);
8290
8291 if (sc->base_params->pll_cfg_val)
8292 IWN_SETBITS(sc, IWN_ANA_PLL, sc->base_params->pll_cfg_val);
8293
8294 /* Wait for clock stabilization before accessing prph. */
8295 if ((error = iwn_clock_wait(sc)) != 0)
8296 return error;
8297
8298 if ((error = iwn_nic_lock(sc)) != 0)
8299 return error;
8300 if (sc->hw_type == IWN_HW_REV_TYPE_4965) {
8301 /* Enable DMA and BSM (Bootstrap State Machine). */
8302 iwn_prph_write(sc, IWN_APMG_CLK_EN,
8303 IWN_APMG_CLK_CTRL_DMA_CLK_RQT |
8304 IWN_APMG_CLK_CTRL_BSM_CLK_RQT);
8305 } else {
8306 /* Enable DMA. */
8307 iwn_prph_write(sc, IWN_APMG_CLK_EN,
8308 IWN_APMG_CLK_CTRL_DMA_CLK_RQT);
8309 }
8310 DELAY(20);
8311 /* Disable L1-Active. */
8312 iwn_prph_setbits(sc, IWN_APMG_PCI_STT, IWN_APMG_PCI_STT_L1A_DIS);
8313 iwn_nic_unlock(sc);
8314
8315 return 0;
8316}
8317
8318static void
8319iwn_apm_stop_master(struct iwn_softc *sc)
8320{
8321 int ntries;
8322
8323 /* Stop busmaster DMA activity. */
8324 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_STOP_MASTER);
8325 for (ntries = 0; ntries < 100; ntries++) {
8326 if (IWN_READ(sc, IWN_RESET) & IWN_RESET_MASTER_DISABLED)
8327 return;
8328 DELAY(10);
8329 }
8330 device_printf(sc->sc_dev, "%s: timeout waiting for master\n", __func__);
8331}
8332
8333static void
8334iwn_apm_stop(struct iwn_softc *sc)
8335{
8336 iwn_apm_stop_master(sc);
8337
8338 /* Reset the entire device. */
8339 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_SW);
8340 DELAY(10);
8341 /* Clear "initialization complete" bit. */
8342 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE);
8343}
8344
8345static int
8346iwn4965_nic_config(struct iwn_softc *sc)
8347{
8348 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
8349
8350 if (IWN_RFCFG_TYPE(sc->rfcfg) == 1) {
8351 /*
8352 * I don't believe this to be correct but this is what the
8353 * vendor driver is doing. Probably the bits should not be
8354 * shifted in IWN_RFCFG_*.
8355 */
8356 IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
8357 IWN_RFCFG_TYPE(sc->rfcfg) |
8358 IWN_RFCFG_STEP(sc->rfcfg) |
8359 IWN_RFCFG_DASH(sc->rfcfg));
8360 }
8361 IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
8362 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI);
8363 return 0;
8364}
8365
8366static int
8367iwn5000_nic_config(struct iwn_softc *sc)
8368{
8369 uint32_t tmp;
8370 int error;
8371
8372 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
8373
8374 if (IWN_RFCFG_TYPE(sc->rfcfg) < 3) {
8375 IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
8376 IWN_RFCFG_TYPE(sc->rfcfg) |
8377 IWN_RFCFG_STEP(sc->rfcfg) |
8378 IWN_RFCFG_DASH(sc->rfcfg));
8379 }
8380 IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
8381 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI);
8382
8383 if ((error = iwn_nic_lock(sc)) != 0)
8384 return error;
8385 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_EARLY_PWROFF_DIS);
8386
8387 if (sc->hw_type == IWN_HW_REV_TYPE_1000) {
8388 /*
8389 * Select first Switching Voltage Regulator (1.32V) to
8390 * solve a stability issue related to noisy DC2DC line
8391 * in the silicon of 1000 Series.
8392 */
8393 tmp = iwn_prph_read(sc, IWN_APMG_DIGITAL_SVR);
8394 tmp &= ~IWN_APMG_DIGITAL_SVR_VOLTAGE_MASK;
8395 tmp |= IWN_APMG_DIGITAL_SVR_VOLTAGE_1_32;
8396 iwn_prph_write(sc, IWN_APMG_DIGITAL_SVR, tmp);
8397 }
8398 iwn_nic_unlock(sc);
8399
8400 if (sc->sc_flags & IWN_FLAG_INTERNAL_PA) {
8401 /* Use internal power amplifier only. */
8402 IWN_WRITE(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_2X2_IPA);
8403 }
8404 if (sc->base_params->additional_nic_config && sc->calib_ver >= 6) {
8405 /* Indicate that ROM calibration version is >=6. */
8406 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_CALIB_VER6);
8407 }
8408 if (sc->base_params->additional_gp_drv_bit)
8409 IWN_SETBITS(sc, IWN_GP_DRIVER,
8410 sc->base_params->additional_gp_drv_bit);
8411 return 0;
8412}
8413
8414/*
8415 * Take NIC ownership over Intel Active Management Technology (AMT).
8416 */
8417static int
8418iwn_hw_prepare(struct iwn_softc *sc)
8419{
8420 int ntries;
8421
8422 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
8423
8424 /* Check if hardware is ready. */
8425 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY);
8426 for (ntries = 0; ntries < 5; ntries++) {
8427 if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
8428 IWN_HW_IF_CONFIG_NIC_READY)
8429 return 0;
8430 DELAY(10);
8431 }
8432
8433 /* Hardware not ready, force into ready state. */
8434 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_PREPARE);
8435 for (ntries = 0; ntries < 15000; ntries++) {
8436 if (!(IWN_READ(sc, IWN_HW_IF_CONFIG) &
8437 IWN_HW_IF_CONFIG_PREPARE_DONE))
8438 break;
8439 DELAY(10);
8440 }
8441 if (ntries == 15000)
8442 return ETIMEDOUT;
8443
8444 /* Hardware should be ready now. */
8445 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY);
8446 for (ntries = 0; ntries < 5; ntries++) {
8447 if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
8448 IWN_HW_IF_CONFIG_NIC_READY)
8449 return 0;
8450 DELAY(10);
8451 }
8452 return ETIMEDOUT;
8453}
8454
8455static int
8456iwn_hw_init(struct iwn_softc *sc)
8457{
8458 struct iwn_ops *ops = &sc->ops;
8459 int error, chnl, qid;
8460
8461 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
8462
8463 /* Clear pending interrupts. */
8464 IWN_WRITE(sc, IWN_INT, 0xffffffff);
8465
8466 if ((error = iwn_apm_init(sc)) != 0) {
8467 device_printf(sc->sc_dev,
8468 "%s: could not power ON adapter, error %d\n", __func__,
8469 error);
8470 return error;
8471 }
8472
8473 /* Select VMAIN power source. */
8474 if ((error = iwn_nic_lock(sc)) != 0)
8475 return error;
8476 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_PWR_SRC_MASK);
8477 iwn_nic_unlock(sc);
8478
8479 /* Perform adapter-specific initialization. */
8480 if ((error = ops->nic_config(sc)) != 0)
8481 return error;
8482
8483 /* Initialize RX ring. */
8484 if ((error = iwn_nic_lock(sc)) != 0)
8485 return error;
8486 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0);
8487 IWN_WRITE(sc, IWN_FH_RX_WPTR, 0);
8488 /* Set physical address of RX ring (256-byte aligned). */
8489 IWN_WRITE(sc, IWN_FH_RX_BASE, sc->rxq.desc_dma.paddr >> 8);
8490 /* Set physical address of RX status (16-byte aligned). */
8491 IWN_WRITE(sc, IWN_FH_STATUS_WPTR, sc->rxq.stat_dma.paddr >> 4);
8492 /* Enable RX. */
8493 IWN_WRITE(sc, IWN_FH_RX_CONFIG,
8494 IWN_FH_RX_CONFIG_ENA |
8495 IWN_FH_RX_CONFIG_IGN_RXF_EMPTY | /* HW bug workaround */
8496 IWN_FH_RX_CONFIG_IRQ_DST_HOST |
8497 IWN_FH_RX_CONFIG_SINGLE_FRAME |
8498 IWN_FH_RX_CONFIG_RB_TIMEOUT(0) |
8499 IWN_FH_RX_CONFIG_NRBD(IWN_RX_RING_COUNT_LOG));
8500 iwn_nic_unlock(sc);
8501 IWN_WRITE(sc, IWN_FH_RX_WPTR, (IWN_RX_RING_COUNT - 1) & ~7);
8502
8503 if ((error = iwn_nic_lock(sc)) != 0)
8504 return error;
8505
8506 /* Initialize TX scheduler. */
8507 iwn_prph_write(sc, sc->sched_txfact_addr, 0);
8508
8509 /* Set physical address of "keep warm" page (16-byte aligned). */
8510 IWN_WRITE(sc, IWN_FH_KW_ADDR, sc->kw_dma.paddr >> 4);
8511
8512 /* Initialize TX rings. */
8513 for (qid = 0; qid < sc->ntxqs; qid++) {
8514 struct iwn_tx_ring *txq = &sc->txq[qid];
8515
8516 /* Set physical address of TX ring (256-byte aligned). */
8517 IWN_WRITE(sc, IWN_FH_CBBC_QUEUE(qid),
8518 txq->desc_dma.paddr >> 8);
8519 }
8520 iwn_nic_unlock(sc);
8521
8522 /* Enable DMA channels. */
8523 for (chnl = 0; chnl < sc->ndmachnls; chnl++) {
8524 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl),
8525 IWN_FH_TX_CONFIG_DMA_ENA |
8526 IWN_FH_TX_CONFIG_DMA_CREDIT_ENA);
8527 }
8528
8529 /* Clear "radio off" and "commands blocked" bits. */
8530 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
8531 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CMD_BLOCKED);
8532
8533 /* Clear pending interrupts. */
8534 IWN_WRITE(sc, IWN_INT, 0xffffffff);
8535 /* Enable interrupt coalescing. */
8536 IWN_WRITE(sc, IWN_INT_COALESCING, 512 / 8);
8537 /* Enable interrupts. */
8538 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
8539
8540 /* _Really_ make sure "radio off" bit is cleared! */
8541 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
8542 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
8543
8544 /* Enable shadow registers. */
8545 if (sc->base_params->shadow_reg_enable)
8546 IWN_SETBITS(sc, IWN_SHADOW_REG_CTRL, 0x800fffff);
8547
8548 if ((error = ops->load_firmware(sc)) != 0) {
8549 device_printf(sc->sc_dev,
8550 "%s: could not load firmware, error %d\n", __func__,
8551 error);
8552 return error;
8553 }
8554 /* Wait at most one second for firmware alive notification. */
8555 if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz)) != 0) {
8556 device_printf(sc->sc_dev,
8557 "%s: timeout waiting for adapter to initialize, error %d\n",
8558 __func__, error);
8559 return error;
8560 }
8561 /* Do post-firmware initialization. */
8562
8563 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
8564
8565 return ops->post_alive(sc);
8566}
8567
8568static void
8569iwn_hw_stop(struct iwn_softc *sc)
8570{
8571 int chnl, qid, ntries;
8572
8573 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
8574
8575 IWN_WRITE(sc, IWN_RESET, IWN_RESET_NEVO);
8576
8577 /* Disable interrupts. */
8578 IWN_WRITE(sc, IWN_INT_MASK, 0);
8579 IWN_WRITE(sc, IWN_INT, 0xffffffff);
8580 IWN_WRITE(sc, IWN_FH_INT, 0xffffffff);
8581 sc->sc_flags &= ~IWN_FLAG_USE_ICT;
8582
8583 /* Make sure we no longer hold the NIC lock. */
8584 iwn_nic_unlock(sc);
8585
8586 /* Stop TX scheduler. */
8587 iwn_prph_write(sc, sc->sched_txfact_addr, 0);
8588
8589 /* Stop all DMA channels. */
8590 if (iwn_nic_lock(sc) == 0) {
8591 for (chnl = 0; chnl < sc->ndmachnls; chnl++) {
8592 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 0);
8593 for (ntries = 0; ntries < 200; ntries++) {
8594 if (IWN_READ(sc, IWN_FH_TX_STATUS) &
8595 IWN_FH_TX_STATUS_IDLE(chnl))
8596 break;
8597 DELAY(10);
8598 }
8599 }
8600 iwn_nic_unlock(sc);
8601 }
8602
8603 /* Stop RX ring. */
8604 iwn_reset_rx_ring(sc, &sc->rxq);
8605
8606 /* Reset all TX rings. */
8607 for (qid = 0; qid < sc->ntxqs; qid++)
8608 iwn_reset_tx_ring(sc, &sc->txq[qid]);
8609
8610 if (iwn_nic_lock(sc) == 0) {
8611 iwn_prph_write(sc, IWN_APMG_CLK_DIS,
8612 IWN_APMG_CLK_CTRL_DMA_CLK_RQT);
8613 iwn_nic_unlock(sc);
8614 }
8615 DELAY(5);
8616 /* Power OFF adapter. */
8617 iwn_apm_stop(sc);
8618}
8619
8620static void
8621iwn_radio_on(void *arg0, int pending)
8622{
8623 struct iwn_softc *sc = arg0;
8624 struct ieee80211com *ic = &sc->sc_ic;
8625 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8626
8627 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
8628
8629 if (vap != NULL) {
8630 iwn_init(sc);
8631 ieee80211_init(vap);
8632 }
8633}
8634
8635static void
8636iwn_radio_off(void *arg0, int pending)
8637{
8638 struct iwn_softc *sc = arg0;
8639 struct ieee80211com *ic = &sc->sc_ic;
8640 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8641
8642 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
8643
8644 iwn_stop(sc);
8645 if (vap != NULL)
8646 ieee80211_stop(vap);
8647
8648 /* Enable interrupts to get RF toggle notification. */
8649 IWN_LOCK(sc);
8650 IWN_WRITE(sc, IWN_INT, 0xffffffff);
8651 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
8652 IWN_UNLOCK(sc);
8653}
8654
8655static void
8656iwn_panicked(void *arg0, int pending)
8657{
8658 struct iwn_softc *sc = arg0;
8659 struct ieee80211com *ic = &sc->sc_ic;
8660 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8661 int error;
8662
8663 if (vap == NULL) {
8664 printf("%s: null vap\n", __func__);
8665 return;
8666 }
8667
8668 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
8669 "resetting...\n", __func__, vap->iv_state);
8670
8671 IWN_LOCK(sc);
8672
8673 iwn_stop_locked(sc);
8674 iwn_init_locked(sc);
8675 if (vap->iv_state >= IEEE80211_S_AUTH &&
8676 (error = iwn_auth(sc, vap)) != 0) {
8677 device_printf(sc->sc_dev,
8678 "%s: could not move to auth state\n", __func__);
8679 }
8680 if (vap->iv_state >= IEEE80211_S_RUN &&
8681 (error = iwn_run(sc, vap)) != 0) {
8682 device_printf(sc->sc_dev,
8683 "%s: could not move to run state\n", __func__);
8684 }
8685
8686 /* Only run start once the NIC is in a useful state, like associated */
8687 iwn_start_locked(sc);
8688
8689 IWN_UNLOCK(sc);
8690}
8691
8692static void
8693iwn_init_locked(struct iwn_softc *sc)
8694{
8695 int error;
8696
8697 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
8698
8699 IWN_LOCK_ASSERT(sc);
8700
8701 sc->sc_flags |= IWN_FLAG_RUNNING;
8702
8703 if ((error = iwn_hw_prepare(sc)) != 0) {
8704 device_printf(sc->sc_dev, "%s: hardware not ready, error %d\n",
8705 __func__, error);
8706 goto fail;
8707 }
8708
8709 /* Initialize interrupt mask to default value. */
8710 sc->int_mask = IWN_INT_MASK_DEF;
8711 sc->sc_flags &= ~IWN_FLAG_USE_ICT;
8712
8713 /* Check that the radio is not disabled by hardware switch. */
8714 if (!(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) {
8715 device_printf(sc->sc_dev,
8716 "radio is disabled by hardware switch\n");
8717 /* Enable interrupts to get RF toggle notifications. */
8718 IWN_WRITE(sc, IWN_INT, 0xffffffff);
8719 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
8720 return;
8721 }
8722
8723 /* Read firmware images from the filesystem. */
8724 if ((error = iwn_read_firmware(sc)) != 0) {
8725 device_printf(sc->sc_dev,
8726 "%s: could not read firmware, error %d\n", __func__,
8727 error);
8728 goto fail;
8729 }
8730
8731 /* Initialize hardware and upload firmware. */
8732 error = iwn_hw_init(sc);
8733 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
8734 sc->fw_fp = NULL;
8735 if (error != 0) {
8736 device_printf(sc->sc_dev,
8737 "%s: could not initialize hardware, error %d\n", __func__,
8738 error);
8739 goto fail;
8740 }
8741
8742 /* Configure adapter now that it is ready. */
8743 if ((error = iwn_config(sc)) != 0) {
8744 device_printf(sc->sc_dev,
8745 "%s: could not configure device, error %d\n", __func__,
8746 error);
8747 goto fail;
8748 }
8749
8750 callout_reset(&sc->watchdog_to, hz, iwn_watchdog, sc);
8751
8752 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
8753
8754 return;
8755
8756fail:
8757 sc->sc_flags &= ~IWN_FLAG_RUNNING;
8758 iwn_stop_locked(sc);
8759 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__);
8760}
8761
8762static void
8763iwn_init(struct iwn_softc *sc)
8764{
8765
8766 IWN_LOCK(sc);
8767 iwn_init_locked(sc);
8768 IWN_UNLOCK(sc);
8769
8770 if (sc->sc_flags & IWN_FLAG_RUNNING)
8771 ieee80211_start_all(&sc->sc_ic);
8772}
8773
8774static void
8775iwn_stop_locked(struct iwn_softc *sc)
8776{
8777
8778 IWN_LOCK_ASSERT(sc);
8779
8780 sc->sc_is_scanning = 0;
8781 sc->sc_tx_timer = 0;
8782 callout_stop(&sc->watchdog_to);
8783 callout_stop(&sc->calib_to);
8784 sc->sc_flags &= ~IWN_FLAG_RUNNING;
8785
8786 /* Power OFF hardware. */
8787 iwn_hw_stop(sc);
8788}
8789
8790static void
8791iwn_stop(struct iwn_softc *sc)
8792{
8793 IWN_LOCK(sc);
8794 iwn_stop_locked(sc);
8795 IWN_UNLOCK(sc);
8796}
8797
8798/*
8799 * Callback from net80211 to start a scan.
8800 */
8801static void
8802iwn_scan_start(struct ieee80211com *ic)
8803{
8804 struct iwn_softc *sc = ic->ic_softc;
8805
8806 IWN_LOCK(sc);
8807 /* make the link LED blink while we're scanning */
8808 iwn_set_led(sc, IWN_LED_LINK, 20, 2);
8809 IWN_UNLOCK(sc);
8810}
8811
8812/*
8813 * Callback from net80211 to terminate a scan.
8814 */
8815static void
8816iwn_scan_end(struct ieee80211com *ic)
8817{
8818 struct iwn_softc *sc = ic->ic_softc;
8819 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8820
8821 IWN_LOCK(sc);
8822 if (vap->iv_state == IEEE80211_S_RUN) {
8823 /* Set link LED to ON status if we are associated */
8824 iwn_set_led(sc, IWN_LED_LINK, 0, 1);
8825 }
8826 IWN_UNLOCK(sc);
8827}
8828
8829/*
8830 * Callback from net80211 to force a channel change.
8831 */
8832static void
8833iwn_set_channel(struct ieee80211com *ic)
8834{
8835 const struct ieee80211_channel *c = ic->ic_curchan;
8836 struct iwn_softc *sc = ic->ic_softc;
8837 int error;
8838
8839 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
8840
8841 IWN_LOCK(sc);
8842 sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq);
8843 sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags);
8844 sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq);
8845 sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags);
8846
8847 /*
8848 * Only need to set the channel in Monitor mode. AP scanning and auth
8849 * are already taken care of by their respective firmware commands.
8850 */
8851 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8852 error = iwn_config(sc);
8853 if (error != 0)
8854 device_printf(sc->sc_dev,
8855 "%s: error %d settting channel\n", __func__, error);
8856 }
8857 IWN_UNLOCK(sc);
8858}
8859
8860/*
8861 * Callback from net80211 to start scanning of the current channel.
8862 */
8863static void
8864iwn_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
8865{
8866 struct ieee80211vap *vap = ss->ss_vap;
8867 struct ieee80211com *ic = vap->iv_ic;
8868 struct iwn_softc *sc = ic->ic_softc;
8869 int error;
8870
8871 IWN_LOCK(sc);
8872 error = iwn_scan(sc, vap, ss, ic->ic_curchan);
8873 IWN_UNLOCK(sc);
8874 if (error != 0)
8875 ieee80211_cancel_scan(vap);
8876}
8877
8878/*
8879 * Callback from net80211 to handle the minimum dwell time being met.
8880 * The intent is to terminate the scan but we just let the firmware
8881 * notify us when it's finished as we have no safe way to abort it.
8882 */
8883static void
8884iwn_scan_mindwell(struct ieee80211_scan_state *ss)
8885{
8886 /* NB: don't try to abort scan; wait for firmware to finish */
8887}
8888
8889static void
8890iwn_hw_reset(void *arg0, int pending)
8891{
8892 struct iwn_softc *sc = arg0;
8893 struct ieee80211com *ic = &sc->sc_ic;
8894
8895 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
8896
8897 iwn_stop(sc);
8898 iwn_init(sc);
8899 ieee80211_notify_radio(ic, 1);
8900}
8901#ifdef IWN_DEBUG
8902#define IWN_DESC(x) case x: return #x
8903
8904/*
8905 * Translate CSR code to string
8906 */
8907static char *iwn_get_csr_string(int csr)
8908{
8909 switch (csr) {
8910 IWN_DESC(IWN_HW_IF_CONFIG);
8911 IWN_DESC(IWN_INT_COALESCING);
8912 IWN_DESC(IWN_INT);
8913 IWN_DESC(IWN_INT_MASK);
8914 IWN_DESC(IWN_FH_INT);
8915 IWN_DESC(IWN_GPIO_IN);
8916 IWN_DESC(IWN_RESET);
8917 IWN_DESC(IWN_GP_CNTRL);
8918 IWN_DESC(IWN_HW_REV);
8919 IWN_DESC(IWN_EEPROM);
8920 IWN_DESC(IWN_EEPROM_GP);
8921 IWN_DESC(IWN_OTP_GP);
8922 IWN_DESC(IWN_GIO);
8923 IWN_DESC(IWN_GP_UCODE);
8924 IWN_DESC(IWN_GP_DRIVER);
8925 IWN_DESC(IWN_UCODE_GP1);
8926 IWN_DESC(IWN_UCODE_GP2);
8927 IWN_DESC(IWN_LED);
8928 IWN_DESC(IWN_DRAM_INT_TBL);
8929 IWN_DESC(IWN_GIO_CHICKEN);
8930 IWN_DESC(IWN_ANA_PLL);
8931 IWN_DESC(IWN_HW_REV_WA);
8932 IWN_DESC(IWN_DBG_HPET_MEM);
8933 default:
8934 return "UNKNOWN CSR";
8935 }
8936}
8937
8938/*
8939 * This function print firmware register
8940 */
8941static void
8942iwn_debug_register(struct iwn_softc *sc)
8943{
8944 int i;
8945 static const uint32_t csr_tbl[] = {
8946 IWN_HW_IF_CONFIG,
8947 IWN_INT_COALESCING,
8948 IWN_INT,
8949 IWN_INT_MASK,
8950 IWN_FH_INT,
8951 IWN_GPIO_IN,
8952 IWN_RESET,
8953 IWN_GP_CNTRL,
8954 IWN_HW_REV,
8955 IWN_EEPROM,
8956 IWN_EEPROM_GP,
8957 IWN_OTP_GP,
8958 IWN_GIO,
8959 IWN_GP_UCODE,
8960 IWN_GP_DRIVER,
8961 IWN_UCODE_GP1,
8962 IWN_UCODE_GP2,
8963 IWN_LED,
8964 IWN_DRAM_INT_TBL,
8965 IWN_GIO_CHICKEN,
8966 IWN_ANA_PLL,
8967 IWN_HW_REV_WA,
8968 IWN_DBG_HPET_MEM,
8969 };
8970 DPRINTF(sc, IWN_DEBUG_REGISTER,
8971 "CSR values: (2nd byte of IWN_INT_COALESCING is IWN_INT_PERIODIC)%s",
8972 "\n");
8973 for (i = 0; i < nitems(csr_tbl); i++){
8974 DPRINTF(sc, IWN_DEBUG_REGISTER," %10s: 0x%08x ",
8975 iwn_get_csr_string(csr_tbl[i]), IWN_READ(sc, csr_tbl[i]));
8976 if ((i+1) % 3 == 0)
8977 DPRINTF(sc, IWN_DEBUG_REGISTER,"%s","\n");
8978 }
8979 DPRINTF(sc, IWN_DEBUG_REGISTER,"%s","\n");
8980}
8981#endif
5083static void
5084iwn_parent(struct ieee80211com *ic)
5085{
5086 struct iwn_softc *sc = ic->ic_softc;
5087 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5088 int startall = 0, stop = 0;
5089
5090 IWN_LOCK(sc);
5091 if (ic->ic_nrunning > 0) {
5092 if (!(sc->sc_flags & IWN_FLAG_RUNNING)) {
5093 iwn_init_locked(sc);
5094 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)
5095 startall = 1;
5096 else
5097 stop = 1;
5098 }
5099 } else if (sc->sc_flags & IWN_FLAG_RUNNING)
5100 iwn_stop_locked(sc);
5101 IWN_UNLOCK(sc);
5102 if (startall)
5103 ieee80211_start_all(ic);
5104 else if (vap != NULL && stop)
5105 ieee80211_stop(vap);
5106}
5107
5108/*
5109 * Send a command to the firmware.
5110 */
5111static int
5112iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async)
5113{
5114 struct iwn_tx_ring *ring;
5115 struct iwn_tx_desc *desc;
5116 struct iwn_tx_data *data;
5117 struct iwn_tx_cmd *cmd;
5118 struct mbuf *m;
5119 bus_addr_t paddr;
5120 int totlen, error;
5121 int cmd_queue_num;
5122
5123 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
5124
5125 if (async == 0)
5126 IWN_LOCK_ASSERT(sc);
5127
5128 if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT)
5129 cmd_queue_num = IWN_PAN_CMD_QUEUE;
5130 else
5131 cmd_queue_num = IWN_CMD_QUEUE_NUM;
5132
5133 ring = &sc->txq[cmd_queue_num];
5134 desc = &ring->desc[ring->cur];
5135 data = &ring->data[ring->cur];
5136 totlen = 4 + size;
5137
5138 if (size > sizeof cmd->data) {
5139 /* Command is too large to fit in a descriptor. */
5140 if (totlen > MCLBYTES)
5141 return EINVAL;
5142 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
5143 if (m == NULL)
5144 return ENOMEM;
5145 cmd = mtod(m, struct iwn_tx_cmd *);
5146 error = bus_dmamap_load(ring->data_dmat, data->map, cmd,
5147 totlen, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
5148 if (error != 0) {
5149 m_freem(m);
5150 return error;
5151 }
5152 data->m = m;
5153 } else {
5154 cmd = &ring->cmd[ring->cur];
5155 paddr = data->cmd_paddr;
5156 }
5157
5158 cmd->code = code;
5159 cmd->flags = 0;
5160 cmd->qid = ring->qid;
5161 cmd->idx = ring->cur;
5162 memcpy(cmd->data, buf, size);
5163
5164 desc->nsegs = 1;
5165 desc->segs[0].addr = htole32(IWN_LOADDR(paddr));
5166 desc->segs[0].len = htole16(IWN_HIADDR(paddr) | totlen << 4);
5167
5168 DPRINTF(sc, IWN_DEBUG_CMD, "%s: %s (0x%x) flags %d qid %d idx %d\n",
5169 __func__, iwn_intr_str(cmd->code), cmd->code,
5170 cmd->flags, cmd->qid, cmd->idx);
5171
5172 if (size > sizeof cmd->data) {
5173 bus_dmamap_sync(ring->data_dmat, data->map,
5174 BUS_DMASYNC_PREWRITE);
5175 } else {
5176 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
5177 BUS_DMASYNC_PREWRITE);
5178 }
5179 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
5180 BUS_DMASYNC_PREWRITE);
5181
5182 /* Kick command ring. */
5183 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
5184 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
5185
5186 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
5187
5188 return async ? 0 : msleep(desc, &sc->sc_mtx, PCATCH, "iwncmd", hz);
5189}
5190
5191static int
5192iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
5193{
5194 struct iwn4965_node_info hnode;
5195 caddr_t src, dst;
5196
5197 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5198
5199 /*
5200 * We use the node structure for 5000 Series internally (it is
5201 * a superset of the one for 4965AGN). We thus copy the common
5202 * fields before sending the command.
5203 */
5204 src = (caddr_t)node;
5205 dst = (caddr_t)&hnode;
5206 memcpy(dst, src, 48);
5207 /* Skip TSC, RX MIC and TX MIC fields from ``src''. */
5208 memcpy(dst + 48, src + 72, 20);
5209 return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async);
5210}
5211
5212static int
5213iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
5214{
5215
5216 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5217
5218 /* Direct mapping. */
5219 return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async);
5220}
5221
5222static int
5223iwn_set_link_quality(struct iwn_softc *sc, struct ieee80211_node *ni)
5224{
5225#define RV(v) ((v) & IEEE80211_RATE_VAL)
5226 struct iwn_node *wn = (void *)ni;
5227 struct ieee80211_rateset *rs;
5228 struct iwn_cmd_link_quality linkq;
5229 int i, rate, txrate;
5230 int is_11n;
5231
5232 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
5233
5234 memset(&linkq, 0, sizeof linkq);
5235 linkq.id = wn->id;
5236 linkq.antmsk_1stream = iwn_get_1stream_tx_antmask(sc);
5237 linkq.antmsk_2stream = iwn_get_2stream_tx_antmask(sc);
5238
5239 linkq.ampdu_max = 32; /* XXX negotiated? */
5240 linkq.ampdu_threshold = 3;
5241 linkq.ampdu_limit = htole16(4000); /* 4ms */
5242
5243 DPRINTF(sc, IWN_DEBUG_XMIT,
5244 "%s: 1stream antenna=0x%02x, 2stream antenna=0x%02x, ntxstreams=%d\n",
5245 __func__,
5246 linkq.antmsk_1stream,
5247 linkq.antmsk_2stream,
5248 sc->ntxchains);
5249
5250 /*
5251 * Are we using 11n rates? Ensure the channel is
5252 * 11n _and_ we have some 11n rates, or don't
5253 * try.
5254 */
5255 if (IEEE80211_IS_CHAN_HT(ni->ni_chan) && ni->ni_htrates.rs_nrates > 0) {
5256 rs = (struct ieee80211_rateset *) &ni->ni_htrates;
5257 is_11n = 1;
5258 } else {
5259 rs = &ni->ni_rates;
5260 is_11n = 0;
5261 }
5262
5263 /* Start at highest available bit-rate. */
5264 /*
5265 * XXX this is all very dirty!
5266 */
5267 if (is_11n)
5268 txrate = ni->ni_htrates.rs_nrates - 1;
5269 else
5270 txrate = rs->rs_nrates - 1;
5271 for (i = 0; i < IWN_MAX_TX_RETRIES; i++) {
5272 uint32_t plcp;
5273
5274 /*
5275 * XXX TODO: ensure the last two slots are the two lowest
5276 * rate entries, just for now.
5277 */
5278 if (i == 14 || i == 15)
5279 txrate = 0;
5280
5281 if (is_11n)
5282 rate = IEEE80211_RATE_MCS | rs->rs_rates[txrate];
5283 else
5284 rate = RV(rs->rs_rates[txrate]);
5285
5286 /* Do rate -> PLCP config mapping */
5287 plcp = iwn_rate_to_plcp(sc, ni, rate);
5288 linkq.retry[i] = plcp;
5289 DPRINTF(sc, IWN_DEBUG_XMIT,
5290 "%s: i=%d, txrate=%d, rate=0x%02x, plcp=0x%08x\n",
5291 __func__,
5292 i,
5293 txrate,
5294 rate,
5295 le32toh(plcp));
5296
5297 /*
5298 * The mimo field is an index into the table which
5299 * indicates the first index where it and subsequent entries
5300 * will not be using MIMO.
5301 *
5302 * Since we're filling linkq from 0..15 and we're filling
5303 * from the higest MCS rates to the lowest rates, if we
5304 * _are_ doing a dual-stream rate, set mimo to idx+1 (ie,
5305 * the next entry.) That way if the next entry is a non-MIMO
5306 * entry, we're already pointing at it.
5307 */
5308 if ((le32toh(plcp) & IWN_RFLAG_MCS) &&
5309 RV(le32toh(plcp)) > 7)
5310 linkq.mimo = i + 1;
5311
5312 /* Next retry at immediate lower bit-rate. */
5313 if (txrate > 0)
5314 txrate--;
5315 }
5316 /*
5317 * If we reached the end of the list and indeed we hit
5318 * all MIMO rates (eg 5300 doing MCS23-15) then yes,
5319 * set mimo to 15. Setting it to 16 panics the firmware.
5320 */
5321 if (linkq.mimo > 15)
5322 linkq.mimo = 15;
5323
5324 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: mimo = %d\n", __func__, linkq.mimo);
5325
5326 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
5327
5328 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, 1);
5329#undef RV
5330}
5331
5332/*
5333 * Broadcast node is used to send group-addressed and management frames.
5334 */
5335static int
5336iwn_add_broadcast_node(struct iwn_softc *sc, int async)
5337{
5338 struct iwn_ops *ops = &sc->ops;
5339 struct ieee80211com *ic = &sc->sc_ic;
5340 struct iwn_node_info node;
5341 struct iwn_cmd_link_quality linkq;
5342 uint8_t txant;
5343 int i, error;
5344
5345 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
5346
5347 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX];
5348
5349 memset(&node, 0, sizeof node);
5350 IEEE80211_ADDR_COPY(node.macaddr, ieee80211broadcastaddr);
5351 node.id = sc->broadcast_id;
5352 DPRINTF(sc, IWN_DEBUG_RESET, "%s: adding broadcast node\n", __func__);
5353 if ((error = ops->add_node(sc, &node, async)) != 0)
5354 return error;
5355
5356 /* Use the first valid TX antenna. */
5357 txant = IWN_LSB(sc->txchainmask);
5358
5359 memset(&linkq, 0, sizeof linkq);
5360 linkq.id = sc->broadcast_id;
5361 linkq.antmsk_1stream = iwn_get_1stream_tx_antmask(sc);
5362 linkq.antmsk_2stream = iwn_get_2stream_tx_antmask(sc);
5363 linkq.ampdu_max = 64;
5364 linkq.ampdu_threshold = 3;
5365 linkq.ampdu_limit = htole16(4000); /* 4ms */
5366
5367 /* Use lowest mandatory bit-rate. */
5368 /* XXX rate table lookup? */
5369 if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan))
5370 linkq.retry[0] = htole32(0xd);
5371 else
5372 linkq.retry[0] = htole32(10 | IWN_RFLAG_CCK);
5373 linkq.retry[0] |= htole32(IWN_RFLAG_ANT(txant));
5374 /* Use same bit-rate for all TX retries. */
5375 for (i = 1; i < IWN_MAX_TX_RETRIES; i++) {
5376 linkq.retry[i] = linkq.retry[0];
5377 }
5378
5379 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
5380
5381 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async);
5382}
5383
5384static int
5385iwn_updateedca(struct ieee80211com *ic)
5386{
5387#define IWN_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */
5388 struct iwn_softc *sc = ic->ic_softc;
5389 struct iwn_edca_params cmd;
5390 int aci;
5391
5392 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
5393
5394 memset(&cmd, 0, sizeof cmd);
5395 cmd.flags = htole32(IWN_EDCA_UPDATE);
5396 for (aci = 0; aci < WME_NUM_AC; aci++) {
5397 const struct wmeParams *ac =
5398 &ic->ic_wme.wme_chanParams.cap_wmeParams[aci];
5399 cmd.ac[aci].aifsn = ac->wmep_aifsn;
5400 cmd.ac[aci].cwmin = htole16(IWN_EXP2(ac->wmep_logcwmin));
5401 cmd.ac[aci].cwmax = htole16(IWN_EXP2(ac->wmep_logcwmax));
5402 cmd.ac[aci].txoplimit =
5403 htole16(IEEE80211_TXOP_TO_US(ac->wmep_txopLimit));
5404 }
5405 IEEE80211_UNLOCK(ic);
5406 IWN_LOCK(sc);
5407 (void)iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1);
5408 IWN_UNLOCK(sc);
5409 IEEE80211_LOCK(ic);
5410
5411 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
5412
5413 return 0;
5414#undef IWN_EXP2
5415}
5416
5417static void
5418iwn_update_mcast(struct ieee80211com *ic)
5419{
5420 /* Ignore */
5421}
5422
5423static void
5424iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on)
5425{
5426 struct iwn_cmd_led led;
5427
5428 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5429
5430#if 0
5431 /* XXX don't set LEDs during scan? */
5432 if (sc->sc_is_scanning)
5433 return;
5434#endif
5435
5436 /* Clear microcode LED ownership. */
5437 IWN_CLRBITS(sc, IWN_LED, IWN_LED_BSM_CTRL);
5438
5439 led.which = which;
5440 led.unit = htole32(10000); /* on/off in unit of 100ms */
5441 led.off = off;
5442 led.on = on;
5443 (void)iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1);
5444}
5445
5446/*
5447 * Set the critical temperature at which the firmware will stop the radio
5448 * and notify us.
5449 */
5450static int
5451iwn_set_critical_temp(struct iwn_softc *sc)
5452{
5453 struct iwn_critical_temp crit;
5454 int32_t temp;
5455
5456 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5457
5458 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CTEMP_STOP_RF);
5459
5460 if (sc->hw_type == IWN_HW_REV_TYPE_5150)
5461 temp = (IWN_CTOK(110) - sc->temp_off) * -5;
5462 else if (sc->hw_type == IWN_HW_REV_TYPE_4965)
5463 temp = IWN_CTOK(110);
5464 else
5465 temp = 110;
5466 memset(&crit, 0, sizeof crit);
5467 crit.tempR = htole32(temp);
5468 DPRINTF(sc, IWN_DEBUG_RESET, "setting critical temp to %d\n", temp);
5469 return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0);
5470}
5471
5472static int
5473iwn_set_timing(struct iwn_softc *sc, struct ieee80211_node *ni)
5474{
5475 struct iwn_cmd_timing cmd;
5476 uint64_t val, mod;
5477
5478 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5479
5480 memset(&cmd, 0, sizeof cmd);
5481 memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t));
5482 cmd.bintval = htole16(ni->ni_intval);
5483 cmd.lintval = htole16(10);
5484
5485 /* Compute remaining time until next beacon. */
5486 val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU;
5487 mod = le64toh(cmd.tstamp) % val;
5488 cmd.binitval = htole32((uint32_t)(val - mod));
5489
5490 DPRINTF(sc, IWN_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n",
5491 ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod));
5492
5493 return iwn_cmd(sc, IWN_CMD_TIMING, &cmd, sizeof cmd, 1);
5494}
5495
5496static void
5497iwn4965_power_calibration(struct iwn_softc *sc, int temp)
5498{
5499 struct ieee80211com *ic = &sc->sc_ic;
5500
5501 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5502
5503 /* Adjust TX power if need be (delta >= 3 degC). */
5504 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d->%d\n",
5505 __func__, sc->temp, temp);
5506 if (abs(temp - sc->temp) >= 3) {
5507 /* Record temperature of last calibration. */
5508 sc->temp = temp;
5509 (void)iwn4965_set_txpower(sc, ic->ic_bsschan, 1);
5510 }
5511}
5512
5513/*
5514 * Set TX power for current channel (each rate has its own power settings).
5515 * This function takes into account the regulatory information from EEPROM,
5516 * the current temperature and the current voltage.
5517 */
5518static int
5519iwn4965_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch,
5520 int async)
5521{
5522/* Fixed-point arithmetic division using a n-bit fractional part. */
5523#define fdivround(a, b, n) \
5524 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n))
5525/* Linear interpolation. */
5526#define interpolate(x, x1, y1, x2, y2, n) \
5527 ((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n))
5528
5529 static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 };
5530 struct iwn_ucode_info *uc = &sc->ucode_info;
5531 struct iwn4965_cmd_txpower cmd;
5532 struct iwn4965_eeprom_chan_samples *chans;
5533 const uint8_t *rf_gain, *dsp_gain;
5534 int32_t vdiff, tdiff;
5535 int i, c, grp, maxpwr;
5536 uint8_t chan;
5537
5538 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX];
5539 /* Retrieve current channel from last RXON. */
5540 chan = sc->rxon->chan;
5541 DPRINTF(sc, IWN_DEBUG_RESET, "setting TX power for channel %d\n",
5542 chan);
5543
5544 memset(&cmd, 0, sizeof cmd);
5545 cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1;
5546 cmd.chan = chan;
5547
5548 if (IEEE80211_IS_CHAN_5GHZ(ch)) {
5549 maxpwr = sc->maxpwr5GHz;
5550 rf_gain = iwn4965_rf_gain_5ghz;
5551 dsp_gain = iwn4965_dsp_gain_5ghz;
5552 } else {
5553 maxpwr = sc->maxpwr2GHz;
5554 rf_gain = iwn4965_rf_gain_2ghz;
5555 dsp_gain = iwn4965_dsp_gain_2ghz;
5556 }
5557
5558 /* Compute voltage compensation. */
5559 vdiff = ((int32_t)le32toh(uc->volt) - sc->eeprom_voltage) / 7;
5560 if (vdiff > 0)
5561 vdiff *= 2;
5562 if (abs(vdiff) > 2)
5563 vdiff = 0;
5564 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
5565 "%s: voltage compensation=%d (UCODE=%d, EEPROM=%d)\n",
5566 __func__, vdiff, le32toh(uc->volt), sc->eeprom_voltage);
5567
5568 /* Get channel attenuation group. */
5569 if (chan <= 20) /* 1-20 */
5570 grp = 4;
5571 else if (chan <= 43) /* 34-43 */
5572 grp = 0;
5573 else if (chan <= 70) /* 44-70 */
5574 grp = 1;
5575 else if (chan <= 124) /* 71-124 */
5576 grp = 2;
5577 else /* 125-200 */
5578 grp = 3;
5579 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
5580 "%s: chan %d, attenuation group=%d\n", __func__, chan, grp);
5581
5582 /* Get channel sub-band. */
5583 for (i = 0; i < IWN_NBANDS; i++)
5584 if (sc->bands[i].lo != 0 &&
5585 sc->bands[i].lo <= chan && chan <= sc->bands[i].hi)
5586 break;
5587 if (i == IWN_NBANDS) /* Can't happen in real-life. */
5588 return EINVAL;
5589 chans = sc->bands[i].chans;
5590 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
5591 "%s: chan %d sub-band=%d\n", __func__, chan, i);
5592
5593 for (c = 0; c < 2; c++) {
5594 uint8_t power, gain, temp;
5595 int maxchpwr, pwr, ridx, idx;
5596
5597 power = interpolate(chan,
5598 chans[0].num, chans[0].samples[c][1].power,
5599 chans[1].num, chans[1].samples[c][1].power, 1);
5600 gain = interpolate(chan,
5601 chans[0].num, chans[0].samples[c][1].gain,
5602 chans[1].num, chans[1].samples[c][1].gain, 1);
5603 temp = interpolate(chan,
5604 chans[0].num, chans[0].samples[c][1].temp,
5605 chans[1].num, chans[1].samples[c][1].temp, 1);
5606 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
5607 "%s: Tx chain %d: power=%d gain=%d temp=%d\n",
5608 __func__, c, power, gain, temp);
5609
5610 /* Compute temperature compensation. */
5611 tdiff = ((sc->temp - temp) * 2) / tdiv[grp];
5612 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
5613 "%s: temperature compensation=%d (current=%d, EEPROM=%d)\n",
5614 __func__, tdiff, sc->temp, temp);
5615
5616 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) {
5617 /* Convert dBm to half-dBm. */
5618 maxchpwr = sc->maxpwr[chan] * 2;
5619 if ((ridx / 8) & 1)
5620 maxchpwr -= 6; /* MIMO 2T: -3dB */
5621
5622 pwr = maxpwr;
5623
5624 /* Adjust TX power based on rate. */
5625 if ((ridx % 8) == 5)
5626 pwr -= 15; /* OFDM48: -7.5dB */
5627 else if ((ridx % 8) == 6)
5628 pwr -= 17; /* OFDM54: -8.5dB */
5629 else if ((ridx % 8) == 7)
5630 pwr -= 20; /* OFDM60: -10dB */
5631 else
5632 pwr -= 10; /* Others: -5dB */
5633
5634 /* Do not exceed channel max TX power. */
5635 if (pwr > maxchpwr)
5636 pwr = maxchpwr;
5637
5638 idx = gain - (pwr - power) - tdiff - vdiff;
5639 if ((ridx / 8) & 1) /* MIMO */
5640 idx += (int32_t)le32toh(uc->atten[grp][c]);
5641
5642 if (cmd.band == 0)
5643 idx += 9; /* 5GHz */
5644 if (ridx == IWN_RIDX_MAX)
5645 idx += 5; /* CCK */
5646
5647 /* Make sure idx stays in a valid range. */
5648 if (idx < 0)
5649 idx = 0;
5650 else if (idx > IWN4965_MAX_PWR_INDEX)
5651 idx = IWN4965_MAX_PWR_INDEX;
5652
5653 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
5654 "%s: Tx chain %d, rate idx %d: power=%d\n",
5655 __func__, c, ridx, idx);
5656 cmd.power[ridx].rf_gain[c] = rf_gain[idx];
5657 cmd.power[ridx].dsp_gain[c] = dsp_gain[idx];
5658 }
5659 }
5660
5661 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
5662 "%s: set tx power for chan %d\n", __func__, chan);
5663 return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async);
5664
5665#undef interpolate
5666#undef fdivround
5667}
5668
5669static int
5670iwn5000_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch,
5671 int async)
5672{
5673 struct iwn5000_cmd_txpower cmd;
5674 int cmdid;
5675
5676 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5677
5678 /*
5679 * TX power calibration is handled automatically by the firmware
5680 * for 5000 Series.
5681 */
5682 memset(&cmd, 0, sizeof cmd);
5683 cmd.global_limit = 2 * IWN5000_TXPOWER_MAX_DBM; /* 16 dBm */
5684 cmd.flags = IWN5000_TXPOWER_NO_CLOSED;
5685 cmd.srv_limit = IWN5000_TXPOWER_AUTO;
5686 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_XMIT,
5687 "%s: setting TX power; rev=%d\n",
5688 __func__,
5689 IWN_UCODE_API(sc->ucode_rev));
5690 if (IWN_UCODE_API(sc->ucode_rev) == 1)
5691 cmdid = IWN_CMD_TXPOWER_DBM_V1;
5692 else
5693 cmdid = IWN_CMD_TXPOWER_DBM;
5694 return iwn_cmd(sc, cmdid, &cmd, sizeof cmd, async);
5695}
5696
5697/*
5698 * Retrieve the maximum RSSI (in dBm) among receivers.
5699 */
5700static int
5701iwn4965_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat)
5702{
5703 struct iwn4965_rx_phystat *phy = (void *)stat->phybuf;
5704 uint8_t mask, agc;
5705 int rssi;
5706
5707 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5708
5709 mask = (le16toh(phy->antenna) >> 4) & IWN_ANT_ABC;
5710 agc = (le16toh(phy->agc) >> 7) & 0x7f;
5711
5712 rssi = 0;
5713 if (mask & IWN_ANT_A)
5714 rssi = MAX(rssi, phy->rssi[0]);
5715 if (mask & IWN_ANT_B)
5716 rssi = MAX(rssi, phy->rssi[2]);
5717 if (mask & IWN_ANT_C)
5718 rssi = MAX(rssi, phy->rssi[4]);
5719
5720 DPRINTF(sc, IWN_DEBUG_RECV,
5721 "%s: agc %d mask 0x%x rssi %d %d %d result %d\n", __func__, agc,
5722 mask, phy->rssi[0], phy->rssi[2], phy->rssi[4],
5723 rssi - agc - IWN_RSSI_TO_DBM);
5724 return rssi - agc - IWN_RSSI_TO_DBM;
5725}
5726
5727static int
5728iwn5000_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat)
5729{
5730 struct iwn5000_rx_phystat *phy = (void *)stat->phybuf;
5731 uint8_t agc;
5732 int rssi;
5733
5734 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5735
5736 agc = (le32toh(phy->agc) >> 9) & 0x7f;
5737
5738 rssi = MAX(le16toh(phy->rssi[0]) & 0xff,
5739 le16toh(phy->rssi[1]) & 0xff);
5740 rssi = MAX(le16toh(phy->rssi[2]) & 0xff, rssi);
5741
5742 DPRINTF(sc, IWN_DEBUG_RECV,
5743 "%s: agc %d rssi %d %d %d result %d\n", __func__, agc,
5744 phy->rssi[0], phy->rssi[1], phy->rssi[2],
5745 rssi - agc - IWN_RSSI_TO_DBM);
5746 return rssi - agc - IWN_RSSI_TO_DBM;
5747}
5748
5749/*
5750 * Retrieve the average noise (in dBm) among receivers.
5751 */
5752static int
5753iwn_get_noise(const struct iwn_rx_general_stats *stats)
5754{
5755 int i, total, nbant, noise;
5756
5757 total = nbant = 0;
5758 for (i = 0; i < 3; i++) {
5759 if ((noise = le32toh(stats->noise[i]) & 0xff) == 0)
5760 continue;
5761 total += noise;
5762 nbant++;
5763 }
5764 /* There should be at least one antenna but check anyway. */
5765 return (nbant == 0) ? -127 : (total / nbant) - 107;
5766}
5767
5768/*
5769 * Compute temperature (in degC) from last received statistics.
5770 */
5771static int
5772iwn4965_get_temperature(struct iwn_softc *sc)
5773{
5774 struct iwn_ucode_info *uc = &sc->ucode_info;
5775 int32_t r1, r2, r3, r4, temp;
5776
5777 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5778
5779 r1 = le32toh(uc->temp[0].chan20MHz);
5780 r2 = le32toh(uc->temp[1].chan20MHz);
5781 r3 = le32toh(uc->temp[2].chan20MHz);
5782 r4 = le32toh(sc->rawtemp);
5783
5784 if (r1 == r3) /* Prevents division by 0 (should not happen). */
5785 return 0;
5786
5787 /* Sign-extend 23-bit R4 value to 32-bit. */
5788 r4 = ((r4 & 0xffffff) ^ 0x800000) - 0x800000;
5789 /* Compute temperature in Kelvin. */
5790 temp = (259 * (r4 - r2)) / (r3 - r1);
5791 temp = (temp * 97) / 100 + 8;
5792
5793 DPRINTF(sc, IWN_DEBUG_ANY, "temperature %dK/%dC\n", temp,
5794 IWN_KTOC(temp));
5795 return IWN_KTOC(temp);
5796}
5797
5798static int
5799iwn5000_get_temperature(struct iwn_softc *sc)
5800{
5801 int32_t temp;
5802
5803 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5804
5805 /*
5806 * Temperature is not used by the driver for 5000 Series because
5807 * TX power calibration is handled by firmware.
5808 */
5809 temp = le32toh(sc->rawtemp);
5810 if (sc->hw_type == IWN_HW_REV_TYPE_5150) {
5811 temp = (temp / -5) + sc->temp_off;
5812 temp = IWN_KTOC(temp);
5813 }
5814 return temp;
5815}
5816
5817/*
5818 * Initialize sensitivity calibration state machine.
5819 */
5820static int
5821iwn_init_sensitivity(struct iwn_softc *sc)
5822{
5823 struct iwn_ops *ops = &sc->ops;
5824 struct iwn_calib_state *calib = &sc->calib;
5825 uint32_t flags;
5826 int error;
5827
5828 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5829
5830 /* Reset calibration state machine. */
5831 memset(calib, 0, sizeof (*calib));
5832 calib->state = IWN_CALIB_STATE_INIT;
5833 calib->cck_state = IWN_CCK_STATE_HIFA;
5834 /* Set initial correlation values. */
5835 calib->ofdm_x1 = sc->limits->min_ofdm_x1;
5836 calib->ofdm_mrc_x1 = sc->limits->min_ofdm_mrc_x1;
5837 calib->ofdm_x4 = sc->limits->min_ofdm_x4;
5838 calib->ofdm_mrc_x4 = sc->limits->min_ofdm_mrc_x4;
5839 calib->cck_x4 = 125;
5840 calib->cck_mrc_x4 = sc->limits->min_cck_mrc_x4;
5841 calib->energy_cck = sc->limits->energy_cck;
5842
5843 /* Write initial sensitivity. */
5844 if ((error = iwn_send_sensitivity(sc)) != 0)
5845 return error;
5846
5847 /* Write initial gains. */
5848 if ((error = ops->init_gains(sc)) != 0)
5849 return error;
5850
5851 /* Request statistics at each beacon interval. */
5852 flags = 0;
5853 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending request for statistics\n",
5854 __func__);
5855 return iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1);
5856}
5857
5858/*
5859 * Collect noise and RSSI statistics for the first 20 beacons received
5860 * after association and use them to determine connected antennas and
5861 * to set differential gains.
5862 */
5863static void
5864iwn_collect_noise(struct iwn_softc *sc,
5865 const struct iwn_rx_general_stats *stats)
5866{
5867 struct iwn_ops *ops = &sc->ops;
5868 struct iwn_calib_state *calib = &sc->calib;
5869 struct ieee80211com *ic = &sc->sc_ic;
5870 uint32_t val;
5871 int i;
5872
5873 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
5874
5875 /* Accumulate RSSI and noise for all 3 antennas. */
5876 for (i = 0; i < 3; i++) {
5877 calib->rssi[i] += le32toh(stats->rssi[i]) & 0xff;
5878 calib->noise[i] += le32toh(stats->noise[i]) & 0xff;
5879 }
5880 /* NB: We update differential gains only once after 20 beacons. */
5881 if (++calib->nbeacons < 20)
5882 return;
5883
5884 /* Determine highest average RSSI. */
5885 val = MAX(calib->rssi[0], calib->rssi[1]);
5886 val = MAX(calib->rssi[2], val);
5887
5888 /* Determine which antennas are connected. */
5889 sc->chainmask = sc->rxchainmask;
5890 for (i = 0; i < 3; i++)
5891 if (val - calib->rssi[i] > 15 * 20)
5892 sc->chainmask &= ~(1 << i);
5893 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_XMIT,
5894 "%s: RX chains mask: theoretical=0x%x, actual=0x%x\n",
5895 __func__, sc->rxchainmask, sc->chainmask);
5896
5897 /* If none of the TX antennas are connected, keep at least one. */
5898 if ((sc->chainmask & sc->txchainmask) == 0)
5899 sc->chainmask |= IWN_LSB(sc->txchainmask);
5900
5901 (void)ops->set_gains(sc);
5902 calib->state = IWN_CALIB_STATE_RUN;
5903
5904#ifdef notyet
5905 /* XXX Disable RX chains with no antennas connected. */
5906 sc->rxon->rxchain = htole16(IWN_RXCHAIN_SEL(sc->chainmask));
5907 if (sc->sc_is_scanning)
5908 device_printf(sc->sc_dev,
5909 "%s: is_scanning set, before RXON\n",
5910 __func__);
5911 (void)iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1);
5912#endif
5913
5914 /* Enable power-saving mode if requested by user. */
5915 if (ic->ic_flags & IEEE80211_F_PMGTON)
5916 (void)iwn_set_pslevel(sc, 0, 3, 1);
5917
5918 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
5919
5920}
5921
5922static int
5923iwn4965_init_gains(struct iwn_softc *sc)
5924{
5925 struct iwn_phy_calib_gain cmd;
5926
5927 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5928
5929 memset(&cmd, 0, sizeof cmd);
5930 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN;
5931 /* Differential gains initially set to 0 for all 3 antennas. */
5932 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5933 "%s: setting initial differential gains\n", __func__);
5934 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
5935}
5936
5937static int
5938iwn5000_init_gains(struct iwn_softc *sc)
5939{
5940 struct iwn_phy_calib cmd;
5941
5942 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5943
5944 memset(&cmd, 0, sizeof cmd);
5945 cmd.code = sc->reset_noise_gain;
5946 cmd.ngroups = 1;
5947 cmd.isvalid = 1;
5948 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5949 "%s: setting initial differential gains\n", __func__);
5950 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
5951}
5952
5953static int
5954iwn4965_set_gains(struct iwn_softc *sc)
5955{
5956 struct iwn_calib_state *calib = &sc->calib;
5957 struct iwn_phy_calib_gain cmd;
5958 int i, delta, noise;
5959
5960 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5961
5962 /* Get minimal noise among connected antennas. */
5963 noise = INT_MAX; /* NB: There's at least one antenna. */
5964 for (i = 0; i < 3; i++)
5965 if (sc->chainmask & (1 << i))
5966 noise = MIN(calib->noise[i], noise);
5967
5968 memset(&cmd, 0, sizeof cmd);
5969 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN;
5970 /* Set differential gains for connected antennas. */
5971 for (i = 0; i < 3; i++) {
5972 if (sc->chainmask & (1 << i)) {
5973 /* Compute attenuation (in unit of 1.5dB). */
5974 delta = (noise - (int32_t)calib->noise[i]) / 30;
5975 /* NB: delta <= 0 */
5976 /* Limit to [-4.5dB,0]. */
5977 cmd.gain[i] = MIN(abs(delta), 3);
5978 if (delta < 0)
5979 cmd.gain[i] |= 1 << 2; /* sign bit */
5980 }
5981 }
5982 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5983 "setting differential gains Ant A/B/C: %x/%x/%x (%x)\n",
5984 cmd.gain[0], cmd.gain[1], cmd.gain[2], sc->chainmask);
5985 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
5986}
5987
5988static int
5989iwn5000_set_gains(struct iwn_softc *sc)
5990{
5991 struct iwn_calib_state *calib = &sc->calib;
5992 struct iwn_phy_calib_gain cmd;
5993 int i, ant, div, delta;
5994
5995 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
5996
5997 /* We collected 20 beacons and !=6050 need a 1.5 factor. */
5998 div = (sc->hw_type == IWN_HW_REV_TYPE_6050) ? 20 : 30;
5999
6000 memset(&cmd, 0, sizeof cmd);
6001 cmd.code = sc->noise_gain;
6002 cmd.ngroups = 1;
6003 cmd.isvalid = 1;
6004 /* Get first available RX antenna as referential. */
6005 ant = IWN_LSB(sc->rxchainmask);
6006 /* Set differential gains for other antennas. */
6007 for (i = ant + 1; i < 3; i++) {
6008 if (sc->chainmask & (1 << i)) {
6009 /* The delta is relative to antenna "ant". */
6010 delta = ((int32_t)calib->noise[ant] -
6011 (int32_t)calib->noise[i]) / div;
6012 /* Limit to [-4.5dB,+4.5dB]. */
6013 cmd.gain[i - 1] = MIN(abs(delta), 3);
6014 if (delta < 0)
6015 cmd.gain[i - 1] |= 1 << 2; /* sign bit */
6016 }
6017 }
6018 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_XMIT,
6019 "setting differential gains Ant B/C: %x/%x (%x)\n",
6020 cmd.gain[0], cmd.gain[1], sc->chainmask);
6021 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
6022}
6023
6024/*
6025 * Tune RF RX sensitivity based on the number of false alarms detected
6026 * during the last beacon period.
6027 */
6028static void
6029iwn_tune_sensitivity(struct iwn_softc *sc, const struct iwn_rx_stats *stats)
6030{
6031#define inc(val, inc, max) \
6032 if ((val) < (max)) { \
6033 if ((val) < (max) - (inc)) \
6034 (val) += (inc); \
6035 else \
6036 (val) = (max); \
6037 needs_update = 1; \
6038 }
6039#define dec(val, dec, min) \
6040 if ((val) > (min)) { \
6041 if ((val) > (min) + (dec)) \
6042 (val) -= (dec); \
6043 else \
6044 (val) = (min); \
6045 needs_update = 1; \
6046 }
6047
6048 const struct iwn_sensitivity_limits *limits = sc->limits;
6049 struct iwn_calib_state *calib = &sc->calib;
6050 uint32_t val, rxena, fa;
6051 uint32_t energy[3], energy_min;
6052 uint8_t noise[3], noise_ref;
6053 int i, needs_update = 0;
6054
6055 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
6056
6057 /* Check that we've been enabled long enough. */
6058 if ((rxena = le32toh(stats->general.load)) == 0){
6059 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end not so long\n", __func__);
6060 return;
6061 }
6062
6063 /* Compute number of false alarms since last call for OFDM. */
6064 fa = le32toh(stats->ofdm.bad_plcp) - calib->bad_plcp_ofdm;
6065 fa += le32toh(stats->ofdm.fa) - calib->fa_ofdm;
6066 fa *= 200 * IEEE80211_DUR_TU; /* 200TU */
6067
6068 if (fa > 50 * rxena) {
6069 /* High false alarm count, decrease sensitivity. */
6070 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
6071 "%s: OFDM high false alarm count: %u\n", __func__, fa);
6072 inc(calib->ofdm_x1, 1, limits->max_ofdm_x1);
6073 inc(calib->ofdm_mrc_x1, 1, limits->max_ofdm_mrc_x1);
6074 inc(calib->ofdm_x4, 1, limits->max_ofdm_x4);
6075 inc(calib->ofdm_mrc_x4, 1, limits->max_ofdm_mrc_x4);
6076
6077 } else if (fa < 5 * rxena) {
6078 /* Low false alarm count, increase sensitivity. */
6079 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
6080 "%s: OFDM low false alarm count: %u\n", __func__, fa);
6081 dec(calib->ofdm_x1, 1, limits->min_ofdm_x1);
6082 dec(calib->ofdm_mrc_x1, 1, limits->min_ofdm_mrc_x1);
6083 dec(calib->ofdm_x4, 1, limits->min_ofdm_x4);
6084 dec(calib->ofdm_mrc_x4, 1, limits->min_ofdm_mrc_x4);
6085 }
6086
6087 /* Compute maximum noise among 3 receivers. */
6088 for (i = 0; i < 3; i++)
6089 noise[i] = (le32toh(stats->general.noise[i]) >> 8) & 0xff;
6090 val = MAX(noise[0], noise[1]);
6091 val = MAX(noise[2], val);
6092 /* Insert it into our samples table. */
6093 calib->noise_samples[calib->cur_noise_sample] = val;
6094 calib->cur_noise_sample = (calib->cur_noise_sample + 1) % 20;
6095
6096 /* Compute maximum noise among last 20 samples. */
6097 noise_ref = calib->noise_samples[0];
6098 for (i = 1; i < 20; i++)
6099 noise_ref = MAX(noise_ref, calib->noise_samples[i]);
6100
6101 /* Compute maximum energy among 3 receivers. */
6102 for (i = 0; i < 3; i++)
6103 energy[i] = le32toh(stats->general.energy[i]);
6104 val = MIN(energy[0], energy[1]);
6105 val = MIN(energy[2], val);
6106 /* Insert it into our samples table. */
6107 calib->energy_samples[calib->cur_energy_sample] = val;
6108 calib->cur_energy_sample = (calib->cur_energy_sample + 1) % 10;
6109
6110 /* Compute minimum energy among last 10 samples. */
6111 energy_min = calib->energy_samples[0];
6112 for (i = 1; i < 10; i++)
6113 energy_min = MAX(energy_min, calib->energy_samples[i]);
6114 energy_min += 6;
6115
6116 /* Compute number of false alarms since last call for CCK. */
6117 fa = le32toh(stats->cck.bad_plcp) - calib->bad_plcp_cck;
6118 fa += le32toh(stats->cck.fa) - calib->fa_cck;
6119 fa *= 200 * IEEE80211_DUR_TU; /* 200TU */
6120
6121 if (fa > 50 * rxena) {
6122 /* High false alarm count, decrease sensitivity. */
6123 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
6124 "%s: CCK high false alarm count: %u\n", __func__, fa);
6125 calib->cck_state = IWN_CCK_STATE_HIFA;
6126 calib->low_fa = 0;
6127
6128 if (calib->cck_x4 > 160) {
6129 calib->noise_ref = noise_ref;
6130 if (calib->energy_cck > 2)
6131 dec(calib->energy_cck, 2, energy_min);
6132 }
6133 if (calib->cck_x4 < 160) {
6134 calib->cck_x4 = 161;
6135 needs_update = 1;
6136 } else
6137 inc(calib->cck_x4, 3, limits->max_cck_x4);
6138
6139 inc(calib->cck_mrc_x4, 3, limits->max_cck_mrc_x4);
6140
6141 } else if (fa < 5 * rxena) {
6142 /* Low false alarm count, increase sensitivity. */
6143 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
6144 "%s: CCK low false alarm count: %u\n", __func__, fa);
6145 calib->cck_state = IWN_CCK_STATE_LOFA;
6146 calib->low_fa++;
6147
6148 if (calib->cck_state != IWN_CCK_STATE_INIT &&
6149 (((int32_t)calib->noise_ref - (int32_t)noise_ref) > 2 ||
6150 calib->low_fa > 100)) {
6151 inc(calib->energy_cck, 2, limits->min_energy_cck);
6152 dec(calib->cck_x4, 3, limits->min_cck_x4);
6153 dec(calib->cck_mrc_x4, 3, limits->min_cck_mrc_x4);
6154 }
6155 } else {
6156 /* Not worth to increase or decrease sensitivity. */
6157 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
6158 "%s: CCK normal false alarm count: %u\n", __func__, fa);
6159 calib->low_fa = 0;
6160 calib->noise_ref = noise_ref;
6161
6162 if (calib->cck_state == IWN_CCK_STATE_HIFA) {
6163 /* Previous interval had many false alarms. */
6164 dec(calib->energy_cck, 8, energy_min);
6165 }
6166 calib->cck_state = IWN_CCK_STATE_INIT;
6167 }
6168
6169 if (needs_update)
6170 (void)iwn_send_sensitivity(sc);
6171
6172 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
6173
6174#undef dec
6175#undef inc
6176}
6177
6178static int
6179iwn_send_sensitivity(struct iwn_softc *sc)
6180{
6181 struct iwn_calib_state *calib = &sc->calib;
6182 struct iwn_enhanced_sensitivity_cmd cmd;
6183 int len;
6184
6185 memset(&cmd, 0, sizeof cmd);
6186 len = sizeof (struct iwn_sensitivity_cmd);
6187 cmd.which = IWN_SENSITIVITY_WORKTBL;
6188 /* OFDM modulation. */
6189 cmd.corr_ofdm_x1 = htole16(calib->ofdm_x1);
6190 cmd.corr_ofdm_mrc_x1 = htole16(calib->ofdm_mrc_x1);
6191 cmd.corr_ofdm_x4 = htole16(calib->ofdm_x4);
6192 cmd.corr_ofdm_mrc_x4 = htole16(calib->ofdm_mrc_x4);
6193 cmd.energy_ofdm = htole16(sc->limits->energy_ofdm);
6194 cmd.energy_ofdm_th = htole16(62);
6195 /* CCK modulation. */
6196 cmd.corr_cck_x4 = htole16(calib->cck_x4);
6197 cmd.corr_cck_mrc_x4 = htole16(calib->cck_mrc_x4);
6198 cmd.energy_cck = htole16(calib->energy_cck);
6199 /* Barker modulation: use default values. */
6200 cmd.corr_barker = htole16(190);
6201 cmd.corr_barker_mrc = htole16(sc->limits->barker_mrc);
6202
6203 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
6204 "%s: set sensitivity %d/%d/%d/%d/%d/%d/%d\n", __func__,
6205 calib->ofdm_x1, calib->ofdm_mrc_x1, calib->ofdm_x4,
6206 calib->ofdm_mrc_x4, calib->cck_x4,
6207 calib->cck_mrc_x4, calib->energy_cck);
6208
6209 if (!(sc->sc_flags & IWN_FLAG_ENH_SENS))
6210 goto send;
6211 /* Enhanced sensitivity settings. */
6212 len = sizeof (struct iwn_enhanced_sensitivity_cmd);
6213 cmd.ofdm_det_slope_mrc = htole16(668);
6214 cmd.ofdm_det_icept_mrc = htole16(4);
6215 cmd.ofdm_det_slope = htole16(486);
6216 cmd.ofdm_det_icept = htole16(37);
6217 cmd.cck_det_slope_mrc = htole16(853);
6218 cmd.cck_det_icept_mrc = htole16(4);
6219 cmd.cck_det_slope = htole16(476);
6220 cmd.cck_det_icept = htole16(99);
6221send:
6222 return iwn_cmd(sc, IWN_CMD_SET_SENSITIVITY, &cmd, len, 1);
6223}
6224
6225/*
6226 * Look at the increase of PLCP errors over time; if it exceeds
6227 * a programmed threshold then trigger an RF retune.
6228 */
6229static void
6230iwn_check_rx_recovery(struct iwn_softc *sc, struct iwn_stats *rs)
6231{
6232 int32_t delta_ofdm, delta_ht, delta_cck;
6233 struct iwn_calib_state *calib = &sc->calib;
6234 int delta_ticks, cur_ticks;
6235 int delta_msec;
6236 int thresh;
6237
6238 /*
6239 * Calculate the difference between the current and
6240 * previous statistics.
6241 */
6242 delta_cck = le32toh(rs->rx.cck.bad_plcp) - calib->bad_plcp_cck;
6243 delta_ofdm = le32toh(rs->rx.ofdm.bad_plcp) - calib->bad_plcp_ofdm;
6244 delta_ht = le32toh(rs->rx.ht.bad_plcp) - calib->bad_plcp_ht;
6245
6246 /*
6247 * Calculate the delta in time between successive statistics
6248 * messages. Yes, it can roll over; so we make sure that
6249 * this doesn't happen.
6250 *
6251 * XXX go figure out what to do about rollover
6252 * XXX go figure out what to do if ticks rolls over to -ve instead!
6253 * XXX go stab signed integer overflow undefined-ness in the face.
6254 */
6255 cur_ticks = ticks;
6256 delta_ticks = cur_ticks - sc->last_calib_ticks;
6257
6258 /*
6259 * If any are negative, then the firmware likely reset; so just
6260 * bail. We'll pick this up next time.
6261 */
6262 if (delta_cck < 0 || delta_ofdm < 0 || delta_ht < 0 || delta_ticks < 0)
6263 return;
6264
6265 /*
6266 * delta_ticks is in ticks; we need to convert it up to milliseconds
6267 * so we can do some useful math with it.
6268 */
6269 delta_msec = ticks_to_msecs(delta_ticks);
6270
6271 /*
6272 * Calculate what our threshold is given the current delta_msec.
6273 */
6274 thresh = sc->base_params->plcp_err_threshold * delta_msec;
6275
6276 DPRINTF(sc, IWN_DEBUG_STATE,
6277 "%s: time delta: %d; cck=%d, ofdm=%d, ht=%d, total=%d, thresh=%d\n",
6278 __func__,
6279 delta_msec,
6280 delta_cck,
6281 delta_ofdm,
6282 delta_ht,
6283 (delta_msec + delta_cck + delta_ofdm + delta_ht),
6284 thresh);
6285
6286 /*
6287 * If we need a retune, then schedule a single channel scan
6288 * to a channel that isn't the currently active one!
6289 *
6290 * The math from linux iwlwifi:
6291 *
6292 * if ((delta * 100 / msecs) > threshold)
6293 */
6294 if (thresh > 0 && (delta_cck + delta_ofdm + delta_ht) * 100 > thresh) {
6295 DPRINTF(sc, IWN_DEBUG_ANY,
6296 "%s: PLCP error threshold raw (%d) comparison (%d) "
6297 "over limit (%d); retune!\n",
6298 __func__,
6299 (delta_cck + delta_ofdm + delta_ht),
6300 (delta_cck + delta_ofdm + delta_ht) * 100,
6301 thresh);
6302 }
6303}
6304
6305/*
6306 * Set STA mode power saving level (between 0 and 5).
6307 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving.
6308 */
6309static int
6310iwn_set_pslevel(struct iwn_softc *sc, int dtim, int level, int async)
6311{
6312 struct iwn_pmgt_cmd cmd;
6313 const struct iwn_pmgt *pmgt;
6314 uint32_t max, skip_dtim;
6315 uint32_t reg;
6316 int i;
6317
6318 DPRINTF(sc, IWN_DEBUG_PWRSAVE,
6319 "%s: dtim=%d, level=%d, async=%d\n",
6320 __func__,
6321 dtim,
6322 level,
6323 async);
6324
6325 /* Select which PS parameters to use. */
6326 if (dtim <= 2)
6327 pmgt = &iwn_pmgt[0][level];
6328 else if (dtim <= 10)
6329 pmgt = &iwn_pmgt[1][level];
6330 else
6331 pmgt = &iwn_pmgt[2][level];
6332
6333 memset(&cmd, 0, sizeof cmd);
6334 if (level != 0) /* not CAM */
6335 cmd.flags |= htole16(IWN_PS_ALLOW_SLEEP);
6336 if (level == 5)
6337 cmd.flags |= htole16(IWN_PS_FAST_PD);
6338 /* Retrieve PCIe Active State Power Management (ASPM). */
6339 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1);
6340 if (!(reg & 0x1)) /* L0s Entry disabled. */
6341 cmd.flags |= htole16(IWN_PS_PCI_PMGT);
6342 cmd.rxtimeout = htole32(pmgt->rxtimeout * 1024);
6343 cmd.txtimeout = htole32(pmgt->txtimeout * 1024);
6344
6345 if (dtim == 0) {
6346 dtim = 1;
6347 skip_dtim = 0;
6348 } else
6349 skip_dtim = pmgt->skip_dtim;
6350 if (skip_dtim != 0) {
6351 cmd.flags |= htole16(IWN_PS_SLEEP_OVER_DTIM);
6352 max = pmgt->intval[4];
6353 if (max == (uint32_t)-1)
6354 max = dtim * (skip_dtim + 1);
6355 else if (max > dtim)
6356 max = (max / dtim) * dtim;
6357 } else
6358 max = dtim;
6359 for (i = 0; i < 5; i++)
6360 cmd.intval[i] = htole32(MIN(max, pmgt->intval[i]));
6361
6362 DPRINTF(sc, IWN_DEBUG_RESET, "setting power saving level to %d\n",
6363 level);
6364 return iwn_cmd(sc, IWN_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async);
6365}
6366
6367static int
6368iwn_send_btcoex(struct iwn_softc *sc)
6369{
6370 struct iwn_bluetooth cmd;
6371
6372 memset(&cmd, 0, sizeof cmd);
6373 cmd.flags = IWN_BT_COEX_CHAN_ANN | IWN_BT_COEX_BT_PRIO;
6374 cmd.lead_time = IWN_BT_LEAD_TIME_DEF;
6375 cmd.max_kill = IWN_BT_MAX_KILL_DEF;
6376 DPRINTF(sc, IWN_DEBUG_RESET, "%s: configuring bluetooth coexistence\n",
6377 __func__);
6378 return iwn_cmd(sc, IWN_CMD_BT_COEX, &cmd, sizeof(cmd), 0);
6379}
6380
6381static int
6382iwn_send_advanced_btcoex(struct iwn_softc *sc)
6383{
6384 static const uint32_t btcoex_3wire[12] = {
6385 0xaaaaaaaa, 0xaaaaaaaa, 0xaeaaaaaa, 0xaaaaaaaa,
6386 0xcc00ff28, 0x0000aaaa, 0xcc00aaaa, 0x0000aaaa,
6387 0xc0004000, 0x00004000, 0xf0005000, 0xf0005000,
6388 };
6389 struct iwn6000_btcoex_config btconfig;
6390 struct iwn2000_btcoex_config btconfig2k;
6391 struct iwn_btcoex_priotable btprio;
6392 struct iwn_btcoex_prot btprot;
6393 int error, i;
6394 uint8_t flags;
6395
6396 memset(&btconfig, 0, sizeof btconfig);
6397 memset(&btconfig2k, 0, sizeof btconfig2k);
6398
6399 flags = IWN_BT_FLAG_COEX6000_MODE_3W <<
6400 IWN_BT_FLAG_COEX6000_MODE_SHIFT; // Done as is in linux kernel 3.2
6401
6402 if (sc->base_params->bt_sco_disable)
6403 flags &= ~IWN_BT_FLAG_SYNC_2_BT_DISABLE;
6404 else
6405 flags |= IWN_BT_FLAG_SYNC_2_BT_DISABLE;
6406
6407 flags |= IWN_BT_FLAG_COEX6000_CHAN_INHIBITION;
6408
6409 /* Default flags result is 145 as old value */
6410
6411 /*
6412 * Flags value has to be review. Values must change if we
6413 * which to disable it
6414 */
6415 if (sc->base_params->bt_session_2) {
6416 btconfig2k.flags = flags;
6417 btconfig2k.max_kill = 5;
6418 btconfig2k.bt3_t7_timer = 1;
6419 btconfig2k.kill_ack = htole32(0xffff0000);
6420 btconfig2k.kill_cts = htole32(0xffff0000);
6421 btconfig2k.sample_time = 2;
6422 btconfig2k.bt3_t2_timer = 0xc;
6423
6424 for (i = 0; i < 12; i++)
6425 btconfig2k.lookup_table[i] = htole32(btcoex_3wire[i]);
6426 btconfig2k.valid = htole16(0xff);
6427 btconfig2k.prio_boost = htole32(0xf0);
6428 DPRINTF(sc, IWN_DEBUG_RESET,
6429 "%s: configuring advanced bluetooth coexistence"
6430 " session 2, flags : 0x%x\n",
6431 __func__,
6432 flags);
6433 error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig2k,
6434 sizeof(btconfig2k), 1);
6435 } else {
6436 btconfig.flags = flags;
6437 btconfig.max_kill = 5;
6438 btconfig.bt3_t7_timer = 1;
6439 btconfig.kill_ack = htole32(0xffff0000);
6440 btconfig.kill_cts = htole32(0xffff0000);
6441 btconfig.sample_time = 2;
6442 btconfig.bt3_t2_timer = 0xc;
6443
6444 for (i = 0; i < 12; i++)
6445 btconfig.lookup_table[i] = htole32(btcoex_3wire[i]);
6446 btconfig.valid = htole16(0xff);
6447 btconfig.prio_boost = 0xf0;
6448 DPRINTF(sc, IWN_DEBUG_RESET,
6449 "%s: configuring advanced bluetooth coexistence,"
6450 " flags : 0x%x\n",
6451 __func__,
6452 flags);
6453 error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig,
6454 sizeof(btconfig), 1);
6455 }
6456
6457 if (error != 0)
6458 return error;
6459
6460 memset(&btprio, 0, sizeof btprio);
6461 btprio.calib_init1 = 0x6;
6462 btprio.calib_init2 = 0x7;
6463 btprio.calib_periodic_low1 = 0x2;
6464 btprio.calib_periodic_low2 = 0x3;
6465 btprio.calib_periodic_high1 = 0x4;
6466 btprio.calib_periodic_high2 = 0x5;
6467 btprio.dtim = 0x6;
6468 btprio.scan52 = 0x8;
6469 btprio.scan24 = 0xa;
6470 error = iwn_cmd(sc, IWN_CMD_BT_COEX_PRIOTABLE, &btprio, sizeof(btprio),
6471 1);
6472 if (error != 0)
6473 return error;
6474
6475 /* Force BT state machine change. */
6476 memset(&btprot, 0, sizeof btprot);
6477 btprot.open = 1;
6478 btprot.type = 1;
6479 error = iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1);
6480 if (error != 0)
6481 return error;
6482 btprot.open = 0;
6483 return iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1);
6484}
6485
6486static int
6487iwn5000_runtime_calib(struct iwn_softc *sc)
6488{
6489 struct iwn5000_calib_config cmd;
6490
6491 memset(&cmd, 0, sizeof cmd);
6492 cmd.ucode.once.enable = 0xffffffff;
6493 cmd.ucode.once.start = IWN5000_CALIB_DC;
6494 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
6495 "%s: configuring runtime calibration\n", __func__);
6496 return iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof(cmd), 0);
6497}
6498
6499static uint32_t
6500iwn_get_rxon_ht_flags(struct iwn_softc *sc, struct ieee80211_channel *c)
6501{
6502 struct ieee80211com *ic = &sc->sc_ic;
6503 uint32_t htflags = 0;
6504
6505 if (! IEEE80211_IS_CHAN_HT(c))
6506 return (0);
6507
6508 htflags |= IWN_RXON_HT_PROTMODE(ic->ic_curhtprotmode);
6509
6510 if (IEEE80211_IS_CHAN_HT40(c)) {
6511 switch (ic->ic_curhtprotmode) {
6512 case IEEE80211_HTINFO_OPMODE_HT20PR:
6513 htflags |= IWN_RXON_HT_MODEPURE40;
6514 break;
6515 default:
6516 htflags |= IWN_RXON_HT_MODEMIXED;
6517 break;
6518 }
6519 }
6520 if (IEEE80211_IS_CHAN_HT40D(c))
6521 htflags |= IWN_RXON_HT_HT40MINUS;
6522
6523 return (htflags);
6524}
6525
6526static int
6527iwn_config(struct iwn_softc *sc)
6528{
6529 struct iwn_ops *ops = &sc->ops;
6530 struct ieee80211com *ic = &sc->sc_ic;
6531 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6532 const uint8_t *macaddr;
6533 uint32_t txmask;
6534 uint16_t rxchain;
6535 int error;
6536
6537 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
6538
6539 if ((sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSET)
6540 && (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2)) {
6541 device_printf(sc->sc_dev,"%s: temp_offset and temp_offsetv2 are"
6542 " exclusive each together. Review NIC config file. Conf"
6543 " : 0x%08x Flags : 0x%08x \n", __func__,
6544 sc->base_params->calib_need,
6545 (IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSET |
6546 IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2));
6547 return (EINVAL);
6548 }
6549
6550 /* Compute temperature calib if needed. Will be send by send calib */
6551 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSET) {
6552 error = iwn5000_temp_offset_calib(sc);
6553 if (error != 0) {
6554 device_printf(sc->sc_dev,
6555 "%s: could not set temperature offset\n", __func__);
6556 return (error);
6557 }
6558 } else if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2) {
6559 error = iwn5000_temp_offset_calibv2(sc);
6560 if (error != 0) {
6561 device_printf(sc->sc_dev,
6562 "%s: could not compute temperature offset v2\n",
6563 __func__);
6564 return (error);
6565 }
6566 }
6567
6568 if (sc->hw_type == IWN_HW_REV_TYPE_6050) {
6569 /* Configure runtime DC calibration. */
6570 error = iwn5000_runtime_calib(sc);
6571 if (error != 0) {
6572 device_printf(sc->sc_dev,
6573 "%s: could not configure runtime calibration\n",
6574 __func__);
6575 return error;
6576 }
6577 }
6578
6579 /* Configure valid TX chains for >=5000 Series. */
6580 if (sc->hw_type != IWN_HW_REV_TYPE_4965 &&
6581 IWN_UCODE_API(sc->ucode_rev) > 1) {
6582 txmask = htole32(sc->txchainmask);
6583 DPRINTF(sc, IWN_DEBUG_RESET | IWN_DEBUG_XMIT,
6584 "%s: configuring valid TX chains 0x%x\n", __func__, txmask);
6585 error = iwn_cmd(sc, IWN5000_CMD_TX_ANT_CONFIG, &txmask,
6586 sizeof txmask, 0);
6587 if (error != 0) {
6588 device_printf(sc->sc_dev,
6589 "%s: could not configure valid TX chains, "
6590 "error %d\n", __func__, error);
6591 return error;
6592 }
6593 }
6594
6595 /* Configure bluetooth coexistence. */
6596 error = 0;
6597
6598 /* Configure bluetooth coexistence if needed. */
6599 if (sc->base_params->bt_mode == IWN_BT_ADVANCED)
6600 error = iwn_send_advanced_btcoex(sc);
6601 if (sc->base_params->bt_mode == IWN_BT_SIMPLE)
6602 error = iwn_send_btcoex(sc);
6603
6604 if (error != 0) {
6605 device_printf(sc->sc_dev,
6606 "%s: could not configure bluetooth coexistence, error %d\n",
6607 __func__, error);
6608 return error;
6609 }
6610
6611 /* Set mode, channel, RX filter and enable RX. */
6612 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX];
6613 memset(sc->rxon, 0, sizeof (struct iwn_rxon));
6614 macaddr = vap ? vap->iv_myaddr : ic->ic_macaddr;
6615 IEEE80211_ADDR_COPY(sc->rxon->myaddr, macaddr);
6616 IEEE80211_ADDR_COPY(sc->rxon->wlap, macaddr);
6617 sc->rxon->chan = ieee80211_chan2ieee(ic, ic->ic_curchan);
6618 sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
6619 if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan))
6620 sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
6621 switch (ic->ic_opmode) {
6622 case IEEE80211_M_STA:
6623 sc->rxon->mode = IWN_MODE_STA;
6624 sc->rxon->filter = htole32(IWN_FILTER_MULTICAST);
6625 break;
6626 case IEEE80211_M_MONITOR:
6627 sc->rxon->mode = IWN_MODE_MONITOR;
6628 sc->rxon->filter = htole32(IWN_FILTER_MULTICAST |
6629 IWN_FILTER_CTL | IWN_FILTER_PROMISC);
6630 break;
6631 default:
6632 /* Should not get there. */
6633 break;
6634 }
6635 sc->rxon->cck_mask = 0x0f; /* not yet negotiated */
6636 sc->rxon->ofdm_mask = 0xff; /* not yet negotiated */
6637 sc->rxon->ht_single_mask = 0xff;
6638 sc->rxon->ht_dual_mask = 0xff;
6639 sc->rxon->ht_triple_mask = 0xff;
6640 /*
6641 * In active association mode, ensure that
6642 * all the receive chains are enabled.
6643 *
6644 * Since we're not yet doing SMPS, don't allow the
6645 * number of idle RX chains to be less than the active
6646 * number.
6647 */
6648 rxchain =
6649 IWN_RXCHAIN_VALID(sc->rxchainmask) |
6650 IWN_RXCHAIN_MIMO_COUNT(sc->nrxchains) |
6651 IWN_RXCHAIN_IDLE_COUNT(sc->nrxchains);
6652 sc->rxon->rxchain = htole16(rxchain);
6653 DPRINTF(sc, IWN_DEBUG_RESET | IWN_DEBUG_XMIT,
6654 "%s: rxchainmask=0x%x, nrxchains=%d\n",
6655 __func__,
6656 sc->rxchainmask,
6657 sc->nrxchains);
6658
6659 sc->rxon->flags |= htole32(iwn_get_rxon_ht_flags(sc, ic->ic_curchan));
6660
6661 DPRINTF(sc, IWN_DEBUG_RESET,
6662 "%s: setting configuration; flags=0x%08x\n",
6663 __func__, le32toh(sc->rxon->flags));
6664 if (sc->sc_is_scanning)
6665 device_printf(sc->sc_dev,
6666 "%s: is_scanning set, before RXON\n",
6667 __func__);
6668 error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 0);
6669 if (error != 0) {
6670 device_printf(sc->sc_dev, "%s: RXON command failed\n",
6671 __func__);
6672 return error;
6673 }
6674
6675 if ((error = iwn_add_broadcast_node(sc, 0)) != 0) {
6676 device_printf(sc->sc_dev, "%s: could not add broadcast node\n",
6677 __func__);
6678 return error;
6679 }
6680
6681 /* Configuration has changed, set TX power accordingly. */
6682 if ((error = ops->set_txpower(sc, ic->ic_curchan, 0)) != 0) {
6683 device_printf(sc->sc_dev, "%s: could not set TX power\n",
6684 __func__);
6685 return error;
6686 }
6687
6688 if ((error = iwn_set_critical_temp(sc)) != 0) {
6689 device_printf(sc->sc_dev,
6690 "%s: could not set critical temperature\n", __func__);
6691 return error;
6692 }
6693
6694 /* Set power saving level to CAM during initialization. */
6695 if ((error = iwn_set_pslevel(sc, 0, 0, 0)) != 0) {
6696 device_printf(sc->sc_dev,
6697 "%s: could not set power saving level\n", __func__);
6698 return error;
6699 }
6700
6701 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
6702
6703 return 0;
6704}
6705
6706static uint16_t
6707iwn_get_active_dwell_time(struct iwn_softc *sc,
6708 struct ieee80211_channel *c, uint8_t n_probes)
6709{
6710 /* No channel? Default to 2GHz settings */
6711 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) {
6712 return (IWN_ACTIVE_DWELL_TIME_2GHZ +
6713 IWN_ACTIVE_DWELL_FACTOR_2GHZ * (n_probes + 1));
6714 }
6715
6716 /* 5GHz dwell time */
6717 return (IWN_ACTIVE_DWELL_TIME_5GHZ +
6718 IWN_ACTIVE_DWELL_FACTOR_5GHZ * (n_probes + 1));
6719}
6720
6721/*
6722 * Limit the total dwell time to 85% of the beacon interval.
6723 *
6724 * Returns the dwell time in milliseconds.
6725 */
6726static uint16_t
6727iwn_limit_dwell(struct iwn_softc *sc, uint16_t dwell_time)
6728{
6729 struct ieee80211com *ic = &sc->sc_ic;
6730 struct ieee80211vap *vap = NULL;
6731 int bintval = 0;
6732
6733 /* bintval is in TU (1.024mS) */
6734 if (! TAILQ_EMPTY(&ic->ic_vaps)) {
6735 vap = TAILQ_FIRST(&ic->ic_vaps);
6736 bintval = vap->iv_bss->ni_intval;
6737 }
6738
6739 /*
6740 * If it's non-zero, we should calculate the minimum of
6741 * it and the DWELL_BASE.
6742 *
6743 * XXX Yes, the math should take into account that bintval
6744 * is 1.024mS, not 1mS..
6745 */
6746 if (bintval > 0) {
6747 DPRINTF(sc, IWN_DEBUG_SCAN,
6748 "%s: bintval=%d\n",
6749 __func__,
6750 bintval);
6751 return (MIN(IWN_PASSIVE_DWELL_BASE, ((bintval * 85) / 100)));
6752 }
6753
6754 /* No association context? Default */
6755 return (IWN_PASSIVE_DWELL_BASE);
6756}
6757
6758static uint16_t
6759iwn_get_passive_dwell_time(struct iwn_softc *sc, struct ieee80211_channel *c)
6760{
6761 uint16_t passive;
6762
6763 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) {
6764 passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_2GHZ;
6765 } else {
6766 passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_5GHZ;
6767 }
6768
6769 /* Clamp to the beacon interval if we're associated */
6770 return (iwn_limit_dwell(sc, passive));
6771}
6772
6773static int
6774iwn_scan(struct iwn_softc *sc, struct ieee80211vap *vap,
6775 struct ieee80211_scan_state *ss, struct ieee80211_channel *c)
6776{
6777 struct ieee80211com *ic = &sc->sc_ic;
6778 struct ieee80211_node *ni = vap->iv_bss;
6779 struct iwn_scan_hdr *hdr;
6780 struct iwn_cmd_data *tx;
6781 struct iwn_scan_essid *essid;
6782 struct iwn_scan_chan *chan;
6783 struct ieee80211_frame *wh;
6784 struct ieee80211_rateset *rs;
6785 uint8_t *buf, *frm;
6786 uint16_t rxchain;
6787 uint8_t txant;
6788 int buflen, error;
6789 int is_active;
6790 uint16_t dwell_active, dwell_passive;
6791 uint32_t extra, scan_service_time;
6792
6793 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
6794
6795 /*
6796 * We are absolutely not allowed to send a scan command when another
6797 * scan command is pending.
6798 */
6799 if (sc->sc_is_scanning) {
6800 device_printf(sc->sc_dev, "%s: called whilst scanning!\n",
6801 __func__);
6802 return (EAGAIN);
6803 }
6804
6805 /* Assign the scan channel */
6806 c = ic->ic_curchan;
6807
6808 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX];
6809 buf = malloc(IWN_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO);
6810 if (buf == NULL) {
6811 device_printf(sc->sc_dev,
6812 "%s: could not allocate buffer for scan command\n",
6813 __func__);
6814 return ENOMEM;
6815 }
6816 hdr = (struct iwn_scan_hdr *)buf;
6817 /*
6818 * Move to the next channel if no frames are received within 10ms
6819 * after sending the probe request.
6820 */
6821 hdr->quiet_time = htole16(10); /* timeout in milliseconds */
6822 hdr->quiet_threshold = htole16(1); /* min # of packets */
6823 /*
6824 * Max needs to be greater than active and passive and quiet!
6825 * It's also in microseconds!
6826 */
6827 hdr->max_svc = htole32(250 * 1024);
6828
6829 /*
6830 * Reset scan: interval=100
6831 * Normal scan: interval=becaon interval
6832 * suspend_time: 100 (TU)
6833 *
6834 */
6835 extra = (100 /* suspend_time */ / 100 /* beacon interval */) << 22;
6836 //scan_service_time = extra | ((100 /* susp */ % 100 /* int */) * 1024);
6837 scan_service_time = (4 << 22) | (100 * 1024); /* Hardcode for now! */
6838 hdr->pause_svc = htole32(scan_service_time);
6839
6840 /* Select antennas for scanning. */
6841 rxchain =
6842 IWN_RXCHAIN_VALID(sc->rxchainmask) |
6843 IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask) |
6844 IWN_RXCHAIN_DRIVER_FORCE;
6845 if (IEEE80211_IS_CHAN_A(c) &&
6846 sc->hw_type == IWN_HW_REV_TYPE_4965) {
6847 /* Ant A must be avoided in 5GHz because of an HW bug. */
6848 rxchain |= IWN_RXCHAIN_FORCE_SEL(IWN_ANT_B);
6849 } else /* Use all available RX antennas. */
6850 rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask);
6851 hdr->rxchain = htole16(rxchain);
6852 hdr->filter = htole32(IWN_FILTER_MULTICAST | IWN_FILTER_BEACON);
6853
6854 tx = (struct iwn_cmd_data *)(hdr + 1);
6855 tx->flags = htole32(IWN_TX_AUTO_SEQ);
6856 tx->id = sc->broadcast_id;
6857 tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
6858
6859 if (IEEE80211_IS_CHAN_5GHZ(c)) {
6860 /* Send probe requests at 6Mbps. */
6861 tx->rate = htole32(0xd);
6862 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
6863 } else {
6864 hdr->flags = htole32(IWN_RXON_24GHZ | IWN_RXON_AUTO);
6865 if (sc->hw_type == IWN_HW_REV_TYPE_4965 &&
6866 sc->rxon->associd && sc->rxon->chan > 14)
6867 tx->rate = htole32(0xd);
6868 else {
6869 /* Send probe requests at 1Mbps. */
6870 tx->rate = htole32(10 | IWN_RFLAG_CCK);
6871 }
6872 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
6873 }
6874 /* Use the first valid TX antenna. */
6875 txant = IWN_LSB(sc->txchainmask);
6876 tx->rate |= htole32(IWN_RFLAG_ANT(txant));
6877
6878 /*
6879 * Only do active scanning if we're announcing a probe request
6880 * for a given SSID (or more, if we ever add it to the driver.)
6881 */
6882 is_active = 0;
6883
6884 /*
6885 * If we're scanning for a specific SSID, add it to the command.
6886 *
6887 * XXX maybe look at adding support for scanning multiple SSIDs?
6888 */
6889 essid = (struct iwn_scan_essid *)(tx + 1);
6890 if (ss != NULL) {
6891 if (ss->ss_ssid[0].len != 0) {
6892 essid[0].id = IEEE80211_ELEMID_SSID;
6893 essid[0].len = ss->ss_ssid[0].len;
6894 memcpy(essid[0].data, ss->ss_ssid[0].ssid, ss->ss_ssid[0].len);
6895 }
6896
6897 DPRINTF(sc, IWN_DEBUG_SCAN, "%s: ssid_len=%d, ssid=%*s\n",
6898 __func__,
6899 ss->ss_ssid[0].len,
6900 ss->ss_ssid[0].len,
6901 ss->ss_ssid[0].ssid);
6902
6903 if (ss->ss_nssid > 0)
6904 is_active = 1;
6905 }
6906
6907 /*
6908 * Build a probe request frame. Most of the following code is a
6909 * copy & paste of what is done in net80211.
6910 */
6911 wh = (struct ieee80211_frame *)(essid + 20);
6912 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
6913 IEEE80211_FC0_SUBTYPE_PROBE_REQ;
6914 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
6915 IEEE80211_ADDR_COPY(wh->i_addr1, vap->iv_ifp->if_broadcastaddr);
6916 IEEE80211_ADDR_COPY(wh->i_addr2, IF_LLADDR(vap->iv_ifp));
6917 IEEE80211_ADDR_COPY(wh->i_addr3, vap->iv_ifp->if_broadcastaddr);
6918 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */
6919 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */
6920
6921 frm = (uint8_t *)(wh + 1);
6922 frm = ieee80211_add_ssid(frm, NULL, 0);
6923 frm = ieee80211_add_rates(frm, rs);
6924 if (rs->rs_nrates > IEEE80211_RATE_SIZE)
6925 frm = ieee80211_add_xrates(frm, rs);
6926 if (ic->ic_htcaps & IEEE80211_HTC_HT)
6927 frm = ieee80211_add_htcap(frm, ni);
6928
6929 /* Set length of probe request. */
6930 tx->len = htole16(frm - (uint8_t *)wh);
6931
6932 /*
6933 * If active scanning is requested but a certain channel is
6934 * marked passive, we can do active scanning if we detect
6935 * transmissions.
6936 *
6937 * There is an issue with some firmware versions that triggers
6938 * a sysassert on a "good CRC threshold" of zero (== disabled),
6939 * on a radar channel even though this means that we should NOT
6940 * send probes.
6941 *
6942 * The "good CRC threshold" is the number of frames that we
6943 * need to receive during our dwell time on a channel before
6944 * sending out probes -- setting this to a huge value will
6945 * mean we never reach it, but at the same time work around
6946 * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
6947 * here instead of IWL_GOOD_CRC_TH_DISABLED.
6948 *
6949 * This was fixed in later versions along with some other
6950 * scan changes, and the threshold behaves as a flag in those
6951 * versions.
6952 */
6953
6954 /*
6955 * If we're doing active scanning, set the crc_threshold
6956 * to a suitable value. This is different to active veruss
6957 * passive scanning depending upon the channel flags; the
6958 * firmware will obey that particular check for us.
6959 */
6960 if (sc->tlv_feature_flags & IWN_UCODE_TLV_FLAGS_NEWSCAN)
6961 hdr->crc_threshold = is_active ?
6962 IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_DISABLED;
6963 else
6964 hdr->crc_threshold = is_active ?
6965 IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_NEVER;
6966
6967 chan = (struct iwn_scan_chan *)frm;
6968 chan->chan = htole16(ieee80211_chan2ieee(ic, c));
6969 chan->flags = 0;
6970 if (ss->ss_nssid > 0)
6971 chan->flags |= htole32(IWN_CHAN_NPBREQS(1));
6972 chan->dsp_gain = 0x6e;
6973
6974 /*
6975 * Set the passive/active flag depending upon the channel mode.
6976 * XXX TODO: take the is_active flag into account as well?
6977 */
6978 if (c->ic_flags & IEEE80211_CHAN_PASSIVE)
6979 chan->flags |= htole32(IWN_CHAN_PASSIVE);
6980 else
6981 chan->flags |= htole32(IWN_CHAN_ACTIVE);
6982
6983 /*
6984 * Calculate the active/passive dwell times.
6985 */
6986
6987 dwell_active = iwn_get_active_dwell_time(sc, c, ss->ss_nssid);
6988 dwell_passive = iwn_get_passive_dwell_time(sc, c);
6989
6990 /* Make sure they're valid */
6991 if (dwell_passive <= dwell_active)
6992 dwell_passive = dwell_active + 1;
6993
6994 chan->active = htole16(dwell_active);
6995 chan->passive = htole16(dwell_passive);
6996
6997 if (IEEE80211_IS_CHAN_5GHZ(c))
6998 chan->rf_gain = 0x3b;
6999 else
7000 chan->rf_gain = 0x28;
7001
7002 DPRINTF(sc, IWN_DEBUG_STATE,
7003 "%s: chan %u flags 0x%x rf_gain 0x%x "
7004 "dsp_gain 0x%x active %d passive %d scan_svc_time %d crc 0x%x "
7005 "isactive=%d numssid=%d\n", __func__,
7006 chan->chan, chan->flags, chan->rf_gain, chan->dsp_gain,
7007 dwell_active, dwell_passive, scan_service_time,
7008 hdr->crc_threshold, is_active, ss->ss_nssid);
7009
7010 hdr->nchan++;
7011 chan++;
7012 buflen = (uint8_t *)chan - buf;
7013 hdr->len = htole16(buflen);
7014
7015 if (sc->sc_is_scanning) {
7016 device_printf(sc->sc_dev,
7017 "%s: called with is_scanning set!\n",
7018 __func__);
7019 }
7020 sc->sc_is_scanning = 1;
7021
7022 DPRINTF(sc, IWN_DEBUG_STATE, "sending scan command nchan=%d\n",
7023 hdr->nchan);
7024 error = iwn_cmd(sc, IWN_CMD_SCAN, buf, buflen, 1);
7025 free(buf, M_DEVBUF);
7026
7027 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
7028
7029 return error;
7030}
7031
7032static int
7033iwn_auth(struct iwn_softc *sc, struct ieee80211vap *vap)
7034{
7035 struct iwn_ops *ops = &sc->ops;
7036 struct ieee80211com *ic = &sc->sc_ic;
7037 struct ieee80211_node *ni = vap->iv_bss;
7038 int error;
7039
7040 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
7041
7042 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX];
7043 /* Update adapter configuration. */
7044 IEEE80211_ADDR_COPY(sc->rxon->bssid, ni->ni_bssid);
7045 sc->rxon->chan = ieee80211_chan2ieee(ic, ni->ni_chan);
7046 sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
7047 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
7048 sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
7049 if (ic->ic_flags & IEEE80211_F_SHSLOT)
7050 sc->rxon->flags |= htole32(IWN_RXON_SHSLOT);
7051 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
7052 sc->rxon->flags |= htole32(IWN_RXON_SHPREAMBLE);
7053 if (IEEE80211_IS_CHAN_A(ni->ni_chan)) {
7054 sc->rxon->cck_mask = 0;
7055 sc->rxon->ofdm_mask = 0x15;
7056 } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) {
7057 sc->rxon->cck_mask = 0x03;
7058 sc->rxon->ofdm_mask = 0;
7059 } else {
7060 /* Assume 802.11b/g. */
7061 sc->rxon->cck_mask = 0x03;
7062 sc->rxon->ofdm_mask = 0x15;
7063 }
7064
7065 /* try HT */
7066 sc->rxon->flags |= htole32(iwn_get_rxon_ht_flags(sc, ic->ic_curchan));
7067
7068 DPRINTF(sc, IWN_DEBUG_STATE, "rxon chan %d flags %x cck %x ofdm %x\n",
7069 sc->rxon->chan, sc->rxon->flags, sc->rxon->cck_mask,
7070 sc->rxon->ofdm_mask);
7071 if (sc->sc_is_scanning)
7072 device_printf(sc->sc_dev,
7073 "%s: is_scanning set, before RXON\n",
7074 __func__);
7075 error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1);
7076 if (error != 0) {
7077 device_printf(sc->sc_dev, "%s: RXON command failed, error %d\n",
7078 __func__, error);
7079 return error;
7080 }
7081
7082 /* Configuration has changed, set TX power accordingly. */
7083 if ((error = ops->set_txpower(sc, ni->ni_chan, 1)) != 0) {
7084 device_printf(sc->sc_dev,
7085 "%s: could not set TX power, error %d\n", __func__, error);
7086 return error;
7087 }
7088 /*
7089 * Reconfiguring RXON clears the firmware nodes table so we must
7090 * add the broadcast node again.
7091 */
7092 if ((error = iwn_add_broadcast_node(sc, 1)) != 0) {
7093 device_printf(sc->sc_dev,
7094 "%s: could not add broadcast node, error %d\n", __func__,
7095 error);
7096 return error;
7097 }
7098
7099 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
7100
7101 return 0;
7102}
7103
7104static int
7105iwn_run(struct iwn_softc *sc, struct ieee80211vap *vap)
7106{
7107 struct iwn_ops *ops = &sc->ops;
7108 struct ieee80211com *ic = &sc->sc_ic;
7109 struct ieee80211_node *ni = vap->iv_bss;
7110 struct iwn_node_info node;
7111 int error;
7112
7113 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
7114
7115 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX];
7116 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7117 /* Link LED blinks while monitoring. */
7118 iwn_set_led(sc, IWN_LED_LINK, 5, 5);
7119 return 0;
7120 }
7121 if ((error = iwn_set_timing(sc, ni)) != 0) {
7122 device_printf(sc->sc_dev,
7123 "%s: could not set timing, error %d\n", __func__, error);
7124 return error;
7125 }
7126
7127 /* Update adapter configuration. */
7128 IEEE80211_ADDR_COPY(sc->rxon->bssid, ni->ni_bssid);
7129 sc->rxon->associd = htole16(IEEE80211_AID(ni->ni_associd));
7130 sc->rxon->chan = ieee80211_chan2ieee(ic, ni->ni_chan);
7131 sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
7132 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
7133 sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
7134 if (ic->ic_flags & IEEE80211_F_SHSLOT)
7135 sc->rxon->flags |= htole32(IWN_RXON_SHSLOT);
7136 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
7137 sc->rxon->flags |= htole32(IWN_RXON_SHPREAMBLE);
7138 if (IEEE80211_IS_CHAN_A(ni->ni_chan)) {
7139 sc->rxon->cck_mask = 0;
7140 sc->rxon->ofdm_mask = 0x15;
7141 } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) {
7142 sc->rxon->cck_mask = 0x03;
7143 sc->rxon->ofdm_mask = 0;
7144 } else {
7145 /* Assume 802.11b/g. */
7146 sc->rxon->cck_mask = 0x0f;
7147 sc->rxon->ofdm_mask = 0x15;
7148 }
7149 /* try HT */
7150 sc->rxon->flags |= htole32(iwn_get_rxon_ht_flags(sc, ni->ni_chan));
7151 sc->rxon->filter |= htole32(IWN_FILTER_BSS);
7152 DPRINTF(sc, IWN_DEBUG_STATE, "rxon chan %d flags %x, curhtprotmode=%d\n",
7153 sc->rxon->chan, le32toh(sc->rxon->flags), ic->ic_curhtprotmode);
7154 if (sc->sc_is_scanning)
7155 device_printf(sc->sc_dev,
7156 "%s: is_scanning set, before RXON\n",
7157 __func__);
7158 error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1);
7159 if (error != 0) {
7160 device_printf(sc->sc_dev,
7161 "%s: could not update configuration, error %d\n", __func__,
7162 error);
7163 return error;
7164 }
7165
7166 /* Configuration has changed, set TX power accordingly. */
7167 if ((error = ops->set_txpower(sc, ni->ni_chan, 1)) != 0) {
7168 device_printf(sc->sc_dev,
7169 "%s: could not set TX power, error %d\n", __func__, error);
7170 return error;
7171 }
7172
7173 /* Fake a join to initialize the TX rate. */
7174 ((struct iwn_node *)ni)->id = IWN_ID_BSS;
7175 iwn_newassoc(ni, 1);
7176
7177 /* Add BSS node. */
7178 memset(&node, 0, sizeof node);
7179 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr);
7180 node.id = IWN_ID_BSS;
7181 if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) {
7182 switch (ni->ni_htcap & IEEE80211_HTCAP_SMPS) {
7183 case IEEE80211_HTCAP_SMPS_ENA:
7184 node.htflags |= htole32(IWN_SMPS_MIMO_DIS);
7185 break;
7186 case IEEE80211_HTCAP_SMPS_DYNAMIC:
7187 node.htflags |= htole32(IWN_SMPS_MIMO_PROT);
7188 break;
7189 }
7190 node.htflags |= htole32(IWN_AMDPU_SIZE_FACTOR(3) |
7191 IWN_AMDPU_DENSITY(5)); /* 4us */
7192 if (IEEE80211_IS_CHAN_HT40(ni->ni_chan))
7193 node.htflags |= htole32(IWN_NODE_HT40);
7194 }
7195 DPRINTF(sc, IWN_DEBUG_STATE, "%s: adding BSS node\n", __func__);
7196 error = ops->add_node(sc, &node, 1);
7197 if (error != 0) {
7198 device_printf(sc->sc_dev,
7199 "%s: could not add BSS node, error %d\n", __func__, error);
7200 return error;
7201 }
7202 DPRINTF(sc, IWN_DEBUG_STATE, "%s: setting link quality for node %d\n",
7203 __func__, node.id);
7204 if ((error = iwn_set_link_quality(sc, ni)) != 0) {
7205 device_printf(sc->sc_dev,
7206 "%s: could not setup link quality for node %d, error %d\n",
7207 __func__, node.id, error);
7208 return error;
7209 }
7210
7211 if ((error = iwn_init_sensitivity(sc)) != 0) {
7212 device_printf(sc->sc_dev,
7213 "%s: could not set sensitivity, error %d\n", __func__,
7214 error);
7215 return error;
7216 }
7217 /* Start periodic calibration timer. */
7218 sc->calib.state = IWN_CALIB_STATE_ASSOC;
7219 sc->calib_cnt = 0;
7220 callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout,
7221 sc);
7222
7223 /* Link LED always on while associated. */
7224 iwn_set_led(sc, IWN_LED_LINK, 0, 1);
7225
7226 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
7227
7228 return 0;
7229}
7230
7231/*
7232 * This function is called by upper layer when an ADDBA request is received
7233 * from another STA and before the ADDBA response is sent.
7234 */
7235static int
7236iwn_ampdu_rx_start(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap,
7237 int baparamset, int batimeout, int baseqctl)
7238{
7239#define MS(_v, _f) (((_v) & _f) >> _f##_S)
7240 struct iwn_softc *sc = ni->ni_ic->ic_softc;
7241 struct iwn_ops *ops = &sc->ops;
7242 struct iwn_node *wn = (void *)ni;
7243 struct iwn_node_info node;
7244 uint16_t ssn;
7245 uint8_t tid;
7246 int error;
7247
7248 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7249
7250 tid = MS(le16toh(baparamset), IEEE80211_BAPS_TID);
7251 ssn = MS(le16toh(baseqctl), IEEE80211_BASEQ_START);
7252
7253 memset(&node, 0, sizeof node);
7254 node.id = wn->id;
7255 node.control = IWN_NODE_UPDATE;
7256 node.flags = IWN_FLAG_SET_ADDBA;
7257 node.addba_tid = tid;
7258 node.addba_ssn = htole16(ssn);
7259 DPRINTF(sc, IWN_DEBUG_RECV, "ADDBA RA=%d TID=%d SSN=%d\n",
7260 wn->id, tid, ssn);
7261 error = ops->add_node(sc, &node, 1);
7262 if (error != 0)
7263 return error;
7264 return sc->sc_ampdu_rx_start(ni, rap, baparamset, batimeout, baseqctl);
7265#undef MS
7266}
7267
7268/*
7269 * This function is called by upper layer on teardown of an HT-immediate
7270 * Block Ack agreement (eg. uppon receipt of a DELBA frame).
7271 */
7272static void
7273iwn_ampdu_rx_stop(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap)
7274{
7275 struct ieee80211com *ic = ni->ni_ic;
7276 struct iwn_softc *sc = ic->ic_softc;
7277 struct iwn_ops *ops = &sc->ops;
7278 struct iwn_node *wn = (void *)ni;
7279 struct iwn_node_info node;
7280 uint8_t tid;
7281
7282 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7283
7284 /* XXX: tid as an argument */
7285 for (tid = 0; tid < WME_NUM_TID; tid++) {
7286 if (&ni->ni_rx_ampdu[tid] == rap)
7287 break;
7288 }
7289
7290 memset(&node, 0, sizeof node);
7291 node.id = wn->id;
7292 node.control = IWN_NODE_UPDATE;
7293 node.flags = IWN_FLAG_SET_DELBA;
7294 node.delba_tid = tid;
7295 DPRINTF(sc, IWN_DEBUG_RECV, "DELBA RA=%d TID=%d\n", wn->id, tid);
7296 (void)ops->add_node(sc, &node, 1);
7297 sc->sc_ampdu_rx_stop(ni, rap);
7298}
7299
7300static int
7301iwn_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
7302 int dialogtoken, int baparamset, int batimeout)
7303{
7304 struct iwn_softc *sc = ni->ni_ic->ic_softc;
7305 int qid;
7306
7307 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7308
7309 for (qid = sc->firstaggqueue; qid < sc->ntxqs; qid++) {
7310 if (sc->qid2tap[qid] == NULL)
7311 break;
7312 }
7313 if (qid == sc->ntxqs) {
7314 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: not free aggregation queue\n",
7315 __func__);
7316 return 0;
7317 }
7318 tap->txa_private = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
7319 if (tap->txa_private == NULL) {
7320 device_printf(sc->sc_dev,
7321 "%s: failed to alloc TX aggregation structure\n", __func__);
7322 return 0;
7323 }
7324 sc->qid2tap[qid] = tap;
7325 *(int *)tap->txa_private = qid;
7326 return sc->sc_addba_request(ni, tap, dialogtoken, baparamset,
7327 batimeout);
7328}
7329
7330static int
7331iwn_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
7332 int code, int baparamset, int batimeout)
7333{
7334 struct iwn_softc *sc = ni->ni_ic->ic_softc;
7335 int qid = *(int *)tap->txa_private;
7336 uint8_t tid = tap->txa_tid;
7337 int ret;
7338
7339 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7340
7341 if (code == IEEE80211_STATUS_SUCCESS) {
7342 ni->ni_txseqs[tid] = tap->txa_start & 0xfff;
7343 ret = iwn_ampdu_tx_start(ni->ni_ic, ni, tid);
7344 if (ret != 1)
7345 return ret;
7346 } else {
7347 sc->qid2tap[qid] = NULL;
7348 free(tap->txa_private, M_DEVBUF);
7349 tap->txa_private = NULL;
7350 }
7351 return sc->sc_addba_response(ni, tap, code, baparamset, batimeout);
7352}
7353
7354/*
7355 * This function is called by upper layer when an ADDBA response is received
7356 * from another STA.
7357 */
7358static int
7359iwn_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
7360 uint8_t tid)
7361{
7362 struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[tid];
7363 struct iwn_softc *sc = ni->ni_ic->ic_softc;
7364 struct iwn_ops *ops = &sc->ops;
7365 struct iwn_node *wn = (void *)ni;
7366 struct iwn_node_info node;
7367 int error, qid;
7368
7369 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7370
7371 /* Enable TX for the specified RA/TID. */
7372 wn->disable_tid &= ~(1 << tid);
7373 memset(&node, 0, sizeof node);
7374 node.id = wn->id;
7375 node.control = IWN_NODE_UPDATE;
7376 node.flags = IWN_FLAG_SET_DISABLE_TID;
7377 node.disable_tid = htole16(wn->disable_tid);
7378 error = ops->add_node(sc, &node, 1);
7379 if (error != 0)
7380 return 0;
7381
7382 if ((error = iwn_nic_lock(sc)) != 0)
7383 return 0;
7384 qid = *(int *)tap->txa_private;
7385 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: ra=%d tid=%d ssn=%d qid=%d\n",
7386 __func__, wn->id, tid, tap->txa_start, qid);
7387 ops->ampdu_tx_start(sc, ni, qid, tid, tap->txa_start & 0xfff);
7388 iwn_nic_unlock(sc);
7389
7390 iwn_set_link_quality(sc, ni);
7391 return 1;
7392}
7393
7394static void
7395iwn_ampdu_tx_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
7396{
7397 struct iwn_softc *sc = ni->ni_ic->ic_softc;
7398 struct iwn_ops *ops = &sc->ops;
7399 uint8_t tid = tap->txa_tid;
7400 int qid;
7401
7402 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7403
7404 sc->sc_addba_stop(ni, tap);
7405
7406 if (tap->txa_private == NULL)
7407 return;
7408
7409 qid = *(int *)tap->txa_private;
7410 if (sc->txq[qid].queued != 0)
7411 return;
7412 if (iwn_nic_lock(sc) != 0)
7413 return;
7414 ops->ampdu_tx_stop(sc, qid, tid, tap->txa_start & 0xfff);
7415 iwn_nic_unlock(sc);
7416 sc->qid2tap[qid] = NULL;
7417 free(tap->txa_private, M_DEVBUF);
7418 tap->txa_private = NULL;
7419}
7420
7421static void
7422iwn4965_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni,
7423 int qid, uint8_t tid, uint16_t ssn)
7424{
7425 struct iwn_node *wn = (void *)ni;
7426
7427 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7428
7429 /* Stop TX scheduler while we're changing its configuration. */
7430 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
7431 IWN4965_TXQ_STATUS_CHGACT);
7432
7433 /* Assign RA/TID translation to the queue. */
7434 iwn_mem_write_2(sc, sc->sched_base + IWN4965_SCHED_TRANS_TBL(qid),
7435 wn->id << 4 | tid);
7436
7437 /* Enable chain-building mode for the queue. */
7438 iwn_prph_setbits(sc, IWN4965_SCHED_QCHAIN_SEL, 1 << qid);
7439
7440 /* Set starting sequence number from the ADDBA request. */
7441 sc->txq[qid].cur = sc->txq[qid].read = (ssn & 0xff);
7442 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
7443 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn);
7444
7445 /* Set scheduler window size. */
7446 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid),
7447 IWN_SCHED_WINSZ);
7448 /* Set scheduler frame limit. */
7449 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid) + 4,
7450 IWN_SCHED_LIMIT << 16);
7451
7452 /* Enable interrupts for the queue. */
7453 iwn_prph_setbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid);
7454
7455 /* Mark the queue as active. */
7456 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
7457 IWN4965_TXQ_STATUS_ACTIVE | IWN4965_TXQ_STATUS_AGGR_ENA |
7458 iwn_tid2fifo[tid] << 1);
7459}
7460
7461static void
7462iwn4965_ampdu_tx_stop(struct iwn_softc *sc, int qid, uint8_t tid, uint16_t ssn)
7463{
7464 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7465
7466 /* Stop TX scheduler while we're changing its configuration. */
7467 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
7468 IWN4965_TXQ_STATUS_CHGACT);
7469
7470 /* Set starting sequence number from the ADDBA request. */
7471 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
7472 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn);
7473
7474 /* Disable interrupts for the queue. */
7475 iwn_prph_clrbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid);
7476
7477 /* Mark the queue as inactive. */
7478 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
7479 IWN4965_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid] << 1);
7480}
7481
7482static void
7483iwn5000_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni,
7484 int qid, uint8_t tid, uint16_t ssn)
7485{
7486 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7487
7488 struct iwn_node *wn = (void *)ni;
7489
7490 /* Stop TX scheduler while we're changing its configuration. */
7491 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
7492 IWN5000_TXQ_STATUS_CHGACT);
7493
7494 /* Assign RA/TID translation to the queue. */
7495 iwn_mem_write_2(sc, sc->sched_base + IWN5000_SCHED_TRANS_TBL(qid),
7496 wn->id << 4 | tid);
7497
7498 /* Enable chain-building mode for the queue. */
7499 iwn_prph_setbits(sc, IWN5000_SCHED_QCHAIN_SEL, 1 << qid);
7500
7501 /* Enable aggregation for the queue. */
7502 iwn_prph_setbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid);
7503
7504 /* Set starting sequence number from the ADDBA request. */
7505 sc->txq[qid].cur = sc->txq[qid].read = (ssn & 0xff);
7506 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
7507 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn);
7508
7509 /* Set scheduler window size and frame limit. */
7510 iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid) + 4,
7511 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ);
7512
7513 /* Enable interrupts for the queue. */
7514 iwn_prph_setbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid);
7515
7516 /* Mark the queue as active. */
7517 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
7518 IWN5000_TXQ_STATUS_ACTIVE | iwn_tid2fifo[tid]);
7519}
7520
7521static void
7522iwn5000_ampdu_tx_stop(struct iwn_softc *sc, int qid, uint8_t tid, uint16_t ssn)
7523{
7524 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7525
7526 /* Stop TX scheduler while we're changing its configuration. */
7527 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
7528 IWN5000_TXQ_STATUS_CHGACT);
7529
7530 /* Disable aggregation for the queue. */
7531 iwn_prph_clrbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid);
7532
7533 /* Set starting sequence number from the ADDBA request. */
7534 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
7535 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn);
7536
7537 /* Disable interrupts for the queue. */
7538 iwn_prph_clrbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid);
7539
7540 /* Mark the queue as inactive. */
7541 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
7542 IWN5000_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid]);
7543}
7544
7545/*
7546 * Query calibration tables from the initialization firmware. We do this
7547 * only once at first boot. Called from a process context.
7548 */
7549static int
7550iwn5000_query_calibration(struct iwn_softc *sc)
7551{
7552 struct iwn5000_calib_config cmd;
7553 int error;
7554
7555 memset(&cmd, 0, sizeof cmd);
7556 cmd.ucode.once.enable = htole32(0xffffffff);
7557 cmd.ucode.once.start = htole32(0xffffffff);
7558 cmd.ucode.once.send = htole32(0xffffffff);
7559 cmd.ucode.flags = htole32(0xffffffff);
7560 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending calibration query\n",
7561 __func__);
7562 error = iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof cmd, 0);
7563 if (error != 0)
7564 return error;
7565
7566 /* Wait at most two seconds for calibration to complete. */
7567 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE))
7568 error = msleep(sc, &sc->sc_mtx, PCATCH, "iwncal", 2 * hz);
7569 return error;
7570}
7571
7572/*
7573 * Send calibration results to the runtime firmware. These results were
7574 * obtained on first boot from the initialization firmware.
7575 */
7576static int
7577iwn5000_send_calibration(struct iwn_softc *sc)
7578{
7579 int idx, error;
7580
7581 for (idx = 0; idx < IWN5000_PHY_CALIB_MAX_RESULT; idx++) {
7582 if (!(sc->base_params->calib_need & (1<<idx))) {
7583 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
7584 "No need of calib %d\n",
7585 idx);
7586 continue; /* no need for this calib */
7587 }
7588 if (sc->calibcmd[idx].buf == NULL) {
7589 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
7590 "Need calib idx : %d but no available data\n",
7591 idx);
7592 continue;
7593 }
7594
7595 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
7596 "send calibration result idx=%d len=%d\n", idx,
7597 sc->calibcmd[idx].len);
7598 error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, sc->calibcmd[idx].buf,
7599 sc->calibcmd[idx].len, 0);
7600 if (error != 0) {
7601 device_printf(sc->sc_dev,
7602 "%s: could not send calibration result, error %d\n",
7603 __func__, error);
7604 return error;
7605 }
7606 }
7607 return 0;
7608}
7609
7610static int
7611iwn5000_send_wimax_coex(struct iwn_softc *sc)
7612{
7613 struct iwn5000_wimax_coex wimax;
7614
7615#if 0
7616 if (sc->hw_type == IWN_HW_REV_TYPE_6050) {
7617 /* Enable WiMAX coexistence for combo adapters. */
7618 wimax.flags =
7619 IWN_WIMAX_COEX_ASSOC_WA_UNMASK |
7620 IWN_WIMAX_COEX_UNASSOC_WA_UNMASK |
7621 IWN_WIMAX_COEX_STA_TABLE_VALID |
7622 IWN_WIMAX_COEX_ENABLE;
7623 memcpy(wimax.events, iwn6050_wimax_events,
7624 sizeof iwn6050_wimax_events);
7625 } else
7626#endif
7627 {
7628 /* Disable WiMAX coexistence. */
7629 wimax.flags = 0;
7630 memset(wimax.events, 0, sizeof wimax.events);
7631 }
7632 DPRINTF(sc, IWN_DEBUG_RESET, "%s: Configuring WiMAX coexistence\n",
7633 __func__);
7634 return iwn_cmd(sc, IWN5000_CMD_WIMAX_COEX, &wimax, sizeof wimax, 0);
7635}
7636
7637static int
7638iwn5000_crystal_calib(struct iwn_softc *sc)
7639{
7640 struct iwn5000_phy_calib_crystal cmd;
7641
7642 memset(&cmd, 0, sizeof cmd);
7643 cmd.code = IWN5000_PHY_CALIB_CRYSTAL;
7644 cmd.ngroups = 1;
7645 cmd.isvalid = 1;
7646 cmd.cap_pin[0] = le32toh(sc->eeprom_crystal) & 0xff;
7647 cmd.cap_pin[1] = (le32toh(sc->eeprom_crystal) >> 16) & 0xff;
7648 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "sending crystal calibration %d, %d\n",
7649 cmd.cap_pin[0], cmd.cap_pin[1]);
7650 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0);
7651}
7652
7653static int
7654iwn5000_temp_offset_calib(struct iwn_softc *sc)
7655{
7656 struct iwn5000_phy_calib_temp_offset cmd;
7657
7658 memset(&cmd, 0, sizeof cmd);
7659 cmd.code = IWN5000_PHY_CALIB_TEMP_OFFSET;
7660 cmd.ngroups = 1;
7661 cmd.isvalid = 1;
7662 if (sc->eeprom_temp != 0)
7663 cmd.offset = htole16(sc->eeprom_temp);
7664 else
7665 cmd.offset = htole16(IWN_DEFAULT_TEMP_OFFSET);
7666 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "setting radio sensor offset to %d\n",
7667 le16toh(cmd.offset));
7668 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0);
7669}
7670
7671static int
7672iwn5000_temp_offset_calibv2(struct iwn_softc *sc)
7673{
7674 struct iwn5000_phy_calib_temp_offsetv2 cmd;
7675
7676 memset(&cmd, 0, sizeof cmd);
7677 cmd.code = IWN5000_PHY_CALIB_TEMP_OFFSET;
7678 cmd.ngroups = 1;
7679 cmd.isvalid = 1;
7680 if (sc->eeprom_temp != 0) {
7681 cmd.offset_low = htole16(sc->eeprom_temp);
7682 cmd.offset_high = htole16(sc->eeprom_temp_high);
7683 } else {
7684 cmd.offset_low = htole16(IWN_DEFAULT_TEMP_OFFSET);
7685 cmd.offset_high = htole16(IWN_DEFAULT_TEMP_OFFSET);
7686 }
7687 cmd.burnt_voltage_ref = htole16(sc->eeprom_voltage);
7688
7689 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
7690 "setting radio sensor low offset to %d, high offset to %d, voltage to %d\n",
7691 le16toh(cmd.offset_low),
7692 le16toh(cmd.offset_high),
7693 le16toh(cmd.burnt_voltage_ref));
7694
7695 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0);
7696}
7697
7698/*
7699 * This function is called after the runtime firmware notifies us of its
7700 * readiness (called in a process context).
7701 */
7702static int
7703iwn4965_post_alive(struct iwn_softc *sc)
7704{
7705 int error, qid;
7706
7707 if ((error = iwn_nic_lock(sc)) != 0)
7708 return error;
7709
7710 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7711
7712 /* Clear TX scheduler state in SRAM. */
7713 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR);
7714 iwn_mem_set_region_4(sc, sc->sched_base + IWN4965_SCHED_CTX_OFF, 0,
7715 IWN4965_SCHED_CTX_LEN / sizeof (uint32_t));
7716
7717 /* Set physical address of TX scheduler rings (1KB aligned). */
7718 iwn_prph_write(sc, IWN4965_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10);
7719
7720 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY);
7721
7722 /* Disable chain mode for all our 16 queues. */
7723 iwn_prph_write(sc, IWN4965_SCHED_QCHAIN_SEL, 0);
7724
7725 for (qid = 0; qid < IWN4965_NTXQUEUES; qid++) {
7726 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), 0);
7727 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0);
7728
7729 /* Set scheduler window size. */
7730 iwn_mem_write(sc, sc->sched_base +
7731 IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ);
7732 /* Set scheduler frame limit. */
7733 iwn_mem_write(sc, sc->sched_base +
7734 IWN4965_SCHED_QUEUE_OFFSET(qid) + 4,
7735 IWN_SCHED_LIMIT << 16);
7736 }
7737
7738 /* Enable interrupts for all our 16 queues. */
7739 iwn_prph_write(sc, IWN4965_SCHED_INTR_MASK, 0xffff);
7740 /* Identify TX FIFO rings (0-7). */
7741 iwn_prph_write(sc, IWN4965_SCHED_TXFACT, 0xff);
7742
7743 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */
7744 for (qid = 0; qid < 7; qid++) {
7745 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 4, 5, 6 };
7746 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
7747 IWN4965_TXQ_STATUS_ACTIVE | qid2fifo[qid] << 1);
7748 }
7749 iwn_nic_unlock(sc);
7750 return 0;
7751}
7752
7753/*
7754 * This function is called after the initialization or runtime firmware
7755 * notifies us of its readiness (called in a process context).
7756 */
7757static int
7758iwn5000_post_alive(struct iwn_softc *sc)
7759{
7760 int error, qid;
7761
7762 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
7763
7764 /* Switch to using ICT interrupt mode. */
7765 iwn5000_ict_reset(sc);
7766
7767 if ((error = iwn_nic_lock(sc)) != 0){
7768 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__);
7769 return error;
7770 }
7771
7772 /* Clear TX scheduler state in SRAM. */
7773 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR);
7774 iwn_mem_set_region_4(sc, sc->sched_base + IWN5000_SCHED_CTX_OFF, 0,
7775 IWN5000_SCHED_CTX_LEN / sizeof (uint32_t));
7776
7777 /* Set physical address of TX scheduler rings (1KB aligned). */
7778 iwn_prph_write(sc, IWN5000_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10);
7779
7780 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY);
7781
7782 /* Enable chain mode for all queues, except command queue. */
7783 if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT)
7784 iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffdf);
7785 else
7786 iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffef);
7787 iwn_prph_write(sc, IWN5000_SCHED_AGGR_SEL, 0);
7788
7789 for (qid = 0; qid < IWN5000_NTXQUEUES; qid++) {
7790 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), 0);
7791 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0);
7792
7793 iwn_mem_write(sc, sc->sched_base +
7794 IWN5000_SCHED_QUEUE_OFFSET(qid), 0);
7795 /* Set scheduler window size and frame limit. */
7796 iwn_mem_write(sc, sc->sched_base +
7797 IWN5000_SCHED_QUEUE_OFFSET(qid) + 4,
7798 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ);
7799 }
7800
7801 /* Enable interrupts for all our 20 queues. */
7802 iwn_prph_write(sc, IWN5000_SCHED_INTR_MASK, 0xfffff);
7803 /* Identify TX FIFO rings (0-7). */
7804 iwn_prph_write(sc, IWN5000_SCHED_TXFACT, 0xff);
7805
7806 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */
7807 if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT) {
7808 /* Mark TX rings as active. */
7809 for (qid = 0; qid < 11; qid++) {
7810 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 0, 4, 2, 5, 4, 7, 5 };
7811 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
7812 IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]);
7813 }
7814 } else {
7815 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */
7816 for (qid = 0; qid < 7; qid++) {
7817 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 7, 5, 6 };
7818 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
7819 IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]);
7820 }
7821 }
7822 iwn_nic_unlock(sc);
7823
7824 /* Configure WiMAX coexistence for combo adapters. */
7825 error = iwn5000_send_wimax_coex(sc);
7826 if (error != 0) {
7827 device_printf(sc->sc_dev,
7828 "%s: could not configure WiMAX coexistence, error %d\n",
7829 __func__, error);
7830 return error;
7831 }
7832 if (sc->hw_type != IWN_HW_REV_TYPE_5150) {
7833 /* Perform crystal calibration. */
7834 error = iwn5000_crystal_calib(sc);
7835 if (error != 0) {
7836 device_printf(sc->sc_dev,
7837 "%s: crystal calibration failed, error %d\n",
7838 __func__, error);
7839 return error;
7840 }
7841 }
7842 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) {
7843 /* Query calibration from the initialization firmware. */
7844 if ((error = iwn5000_query_calibration(sc)) != 0) {
7845 device_printf(sc->sc_dev,
7846 "%s: could not query calibration, error %d\n",
7847 __func__, error);
7848 return error;
7849 }
7850 /*
7851 * We have the calibration results now, reboot with the
7852 * runtime firmware (call ourselves recursively!)
7853 */
7854 iwn_hw_stop(sc);
7855 error = iwn_hw_init(sc);
7856 } else {
7857 /* Send calibration results to runtime firmware. */
7858 error = iwn5000_send_calibration(sc);
7859 }
7860
7861 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
7862
7863 return error;
7864}
7865
7866/*
7867 * The firmware boot code is small and is intended to be copied directly into
7868 * the NIC internal memory (no DMA transfer).
7869 */
7870static int
7871iwn4965_load_bootcode(struct iwn_softc *sc, const uint8_t *ucode, int size)
7872{
7873 int error, ntries;
7874
7875 size /= sizeof (uint32_t);
7876
7877 if ((error = iwn_nic_lock(sc)) != 0)
7878 return error;
7879
7880 /* Copy microcode image into NIC memory. */
7881 iwn_prph_write_region_4(sc, IWN_BSM_SRAM_BASE,
7882 (const uint32_t *)ucode, size);
7883
7884 iwn_prph_write(sc, IWN_BSM_WR_MEM_SRC, 0);
7885 iwn_prph_write(sc, IWN_BSM_WR_MEM_DST, IWN_FW_TEXT_BASE);
7886 iwn_prph_write(sc, IWN_BSM_WR_DWCOUNT, size);
7887
7888 /* Start boot load now. */
7889 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START);
7890
7891 /* Wait for transfer to complete. */
7892 for (ntries = 0; ntries < 1000; ntries++) {
7893 if (!(iwn_prph_read(sc, IWN_BSM_WR_CTRL) &
7894 IWN_BSM_WR_CTRL_START))
7895 break;
7896 DELAY(10);
7897 }
7898 if (ntries == 1000) {
7899 device_printf(sc->sc_dev, "%s: could not load boot firmware\n",
7900 __func__);
7901 iwn_nic_unlock(sc);
7902 return ETIMEDOUT;
7903 }
7904
7905 /* Enable boot after power up. */
7906 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START_EN);
7907
7908 iwn_nic_unlock(sc);
7909 return 0;
7910}
7911
7912static int
7913iwn4965_load_firmware(struct iwn_softc *sc)
7914{
7915 struct iwn_fw_info *fw = &sc->fw;
7916 struct iwn_dma_info *dma = &sc->fw_dma;
7917 int error;
7918
7919 /* Copy initialization sections into pre-allocated DMA-safe memory. */
7920 memcpy(dma->vaddr, fw->init.data, fw->init.datasz);
7921 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
7922 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ,
7923 fw->init.text, fw->init.textsz);
7924 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
7925
7926 /* Tell adapter where to find initialization sections. */
7927 if ((error = iwn_nic_lock(sc)) != 0)
7928 return error;
7929 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4);
7930 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->init.datasz);
7931 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR,
7932 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4);
7933 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, fw->init.textsz);
7934 iwn_nic_unlock(sc);
7935
7936 /* Load firmware boot code. */
7937 error = iwn4965_load_bootcode(sc, fw->boot.text, fw->boot.textsz);
7938 if (error != 0) {
7939 device_printf(sc->sc_dev, "%s: could not load boot firmware\n",
7940 __func__);
7941 return error;
7942 }
7943 /* Now press "execute". */
7944 IWN_WRITE(sc, IWN_RESET, 0);
7945
7946 /* Wait at most one second for first alive notification. */
7947 if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz)) != 0) {
7948 device_printf(sc->sc_dev,
7949 "%s: timeout waiting for adapter to initialize, error %d\n",
7950 __func__, error);
7951 return error;
7952 }
7953
7954 /* Retrieve current temperature for initial TX power calibration. */
7955 sc->rawtemp = sc->ucode_info.temp[3].chan20MHz;
7956 sc->temp = iwn4965_get_temperature(sc);
7957
7958 /* Copy runtime sections into pre-allocated DMA-safe memory. */
7959 memcpy(dma->vaddr, fw->main.data, fw->main.datasz);
7960 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
7961 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ,
7962 fw->main.text, fw->main.textsz);
7963 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
7964
7965 /* Tell adapter where to find runtime sections. */
7966 if ((error = iwn_nic_lock(sc)) != 0)
7967 return error;
7968 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4);
7969 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->main.datasz);
7970 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR,
7971 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4);
7972 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE,
7973 IWN_FW_UPDATED | fw->main.textsz);
7974 iwn_nic_unlock(sc);
7975
7976 return 0;
7977}
7978
7979static int
7980iwn5000_load_firmware_section(struct iwn_softc *sc, uint32_t dst,
7981 const uint8_t *section, int size)
7982{
7983 struct iwn_dma_info *dma = &sc->fw_dma;
7984 int error;
7985
7986 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
7987
7988 /* Copy firmware section into pre-allocated DMA-safe memory. */
7989 memcpy(dma->vaddr, section, size);
7990 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
7991
7992 if ((error = iwn_nic_lock(sc)) != 0)
7993 return error;
7994
7995 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL),
7996 IWN_FH_TX_CONFIG_DMA_PAUSE);
7997
7998 IWN_WRITE(sc, IWN_FH_SRAM_ADDR(IWN_SRVC_DMACHNL), dst);
7999 IWN_WRITE(sc, IWN_FH_TFBD_CTRL0(IWN_SRVC_DMACHNL),
8000 IWN_LOADDR(dma->paddr));
8001 IWN_WRITE(sc, IWN_FH_TFBD_CTRL1(IWN_SRVC_DMACHNL),
8002 IWN_HIADDR(dma->paddr) << 28 | size);
8003 IWN_WRITE(sc, IWN_FH_TXBUF_STATUS(IWN_SRVC_DMACHNL),
8004 IWN_FH_TXBUF_STATUS_TBNUM(1) |
8005 IWN_FH_TXBUF_STATUS_TBIDX(1) |
8006 IWN_FH_TXBUF_STATUS_TFBD_VALID);
8007
8008 /* Kick Flow Handler to start DMA transfer. */
8009 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL),
8010 IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_CIRQ_HOST_ENDTFD);
8011
8012 iwn_nic_unlock(sc);
8013
8014 /* Wait at most five seconds for FH DMA transfer to complete. */
8015 return msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", 5 * hz);
8016}
8017
8018static int
8019iwn5000_load_firmware(struct iwn_softc *sc)
8020{
8021 struct iwn_fw_part *fw;
8022 int error;
8023
8024 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
8025
8026 /* Load the initialization firmware on first boot only. */
8027 fw = (sc->sc_flags & IWN_FLAG_CALIB_DONE) ?
8028 &sc->fw.main : &sc->fw.init;
8029
8030 error = iwn5000_load_firmware_section(sc, IWN_FW_TEXT_BASE,
8031 fw->text, fw->textsz);
8032 if (error != 0) {
8033 device_printf(sc->sc_dev,
8034 "%s: could not load firmware %s section, error %d\n",
8035 __func__, ".text", error);
8036 return error;
8037 }
8038 error = iwn5000_load_firmware_section(sc, IWN_FW_DATA_BASE,
8039 fw->data, fw->datasz);
8040 if (error != 0) {
8041 device_printf(sc->sc_dev,
8042 "%s: could not load firmware %s section, error %d\n",
8043 __func__, ".data", error);
8044 return error;
8045 }
8046
8047 /* Now press "execute". */
8048 IWN_WRITE(sc, IWN_RESET, 0);
8049 return 0;
8050}
8051
8052/*
8053 * Extract text and data sections from a legacy firmware image.
8054 */
8055static int
8056iwn_read_firmware_leg(struct iwn_softc *sc, struct iwn_fw_info *fw)
8057{
8058 const uint32_t *ptr;
8059 size_t hdrlen = 24;
8060 uint32_t rev;
8061
8062 ptr = (const uint32_t *)fw->data;
8063 rev = le32toh(*ptr++);
8064
8065 sc->ucode_rev = rev;
8066
8067 /* Check firmware API version. */
8068 if (IWN_FW_API(rev) <= 1) {
8069 device_printf(sc->sc_dev,
8070 "%s: bad firmware, need API version >=2\n", __func__);
8071 return EINVAL;
8072 }
8073 if (IWN_FW_API(rev) >= 3) {
8074 /* Skip build number (version 2 header). */
8075 hdrlen += 4;
8076 ptr++;
8077 }
8078 if (fw->size < hdrlen) {
8079 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n",
8080 __func__, fw->size);
8081 return EINVAL;
8082 }
8083 fw->main.textsz = le32toh(*ptr++);
8084 fw->main.datasz = le32toh(*ptr++);
8085 fw->init.textsz = le32toh(*ptr++);
8086 fw->init.datasz = le32toh(*ptr++);
8087 fw->boot.textsz = le32toh(*ptr++);
8088
8089 /* Check that all firmware sections fit. */
8090 if (fw->size < hdrlen + fw->main.textsz + fw->main.datasz +
8091 fw->init.textsz + fw->init.datasz + fw->boot.textsz) {
8092 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n",
8093 __func__, fw->size);
8094 return EINVAL;
8095 }
8096
8097 /* Get pointers to firmware sections. */
8098 fw->main.text = (const uint8_t *)ptr;
8099 fw->main.data = fw->main.text + fw->main.textsz;
8100 fw->init.text = fw->main.data + fw->main.datasz;
8101 fw->init.data = fw->init.text + fw->init.textsz;
8102 fw->boot.text = fw->init.data + fw->init.datasz;
8103 return 0;
8104}
8105
8106/*
8107 * Extract text and data sections from a TLV firmware image.
8108 */
8109static int
8110iwn_read_firmware_tlv(struct iwn_softc *sc, struct iwn_fw_info *fw,
8111 uint16_t alt)
8112{
8113 const struct iwn_fw_tlv_hdr *hdr;
8114 const struct iwn_fw_tlv *tlv;
8115 const uint8_t *ptr, *end;
8116 uint64_t altmask;
8117 uint32_t len, tmp;
8118
8119 if (fw->size < sizeof (*hdr)) {
8120 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n",
8121 __func__, fw->size);
8122 return EINVAL;
8123 }
8124 hdr = (const struct iwn_fw_tlv_hdr *)fw->data;
8125 if (hdr->signature != htole32(IWN_FW_SIGNATURE)) {
8126 device_printf(sc->sc_dev, "%s: bad firmware signature 0x%08x\n",
8127 __func__, le32toh(hdr->signature));
8128 return EINVAL;
8129 }
8130 DPRINTF(sc, IWN_DEBUG_RESET, "FW: \"%.64s\", build 0x%x\n", hdr->descr,
8131 le32toh(hdr->build));
8132 sc->ucode_rev = le32toh(hdr->rev);
8133
8134 /*
8135 * Select the closest supported alternative that is less than
8136 * or equal to the specified one.
8137 */
8138 altmask = le64toh(hdr->altmask);
8139 while (alt > 0 && !(altmask & (1ULL << alt)))
8140 alt--; /* Downgrade. */
8141 DPRINTF(sc, IWN_DEBUG_RESET, "using alternative %d\n", alt);
8142
8143 ptr = (const uint8_t *)(hdr + 1);
8144 end = (const uint8_t *)(fw->data + fw->size);
8145
8146 /* Parse type-length-value fields. */
8147 while (ptr + sizeof (*tlv) <= end) {
8148 tlv = (const struct iwn_fw_tlv *)ptr;
8149 len = le32toh(tlv->len);
8150
8151 ptr += sizeof (*tlv);
8152 if (ptr + len > end) {
8153 device_printf(sc->sc_dev,
8154 "%s: firmware too short: %zu bytes\n", __func__,
8155 fw->size);
8156 return EINVAL;
8157 }
8158 /* Skip other alternatives. */
8159 if (tlv->alt != 0 && tlv->alt != htole16(alt))
8160 goto next;
8161
8162 switch (le16toh(tlv->type)) {
8163 case IWN_FW_TLV_MAIN_TEXT:
8164 fw->main.text = ptr;
8165 fw->main.textsz = len;
8166 break;
8167 case IWN_FW_TLV_MAIN_DATA:
8168 fw->main.data = ptr;
8169 fw->main.datasz = len;
8170 break;
8171 case IWN_FW_TLV_INIT_TEXT:
8172 fw->init.text = ptr;
8173 fw->init.textsz = len;
8174 break;
8175 case IWN_FW_TLV_INIT_DATA:
8176 fw->init.data = ptr;
8177 fw->init.datasz = len;
8178 break;
8179 case IWN_FW_TLV_BOOT_TEXT:
8180 fw->boot.text = ptr;
8181 fw->boot.textsz = len;
8182 break;
8183 case IWN_FW_TLV_ENH_SENS:
8184 if (!len)
8185 sc->sc_flags |= IWN_FLAG_ENH_SENS;
8186 break;
8187 case IWN_FW_TLV_PHY_CALIB:
8188 tmp = le32toh(*ptr);
8189 if (tmp < 253) {
8190 sc->reset_noise_gain = tmp;
8191 sc->noise_gain = tmp + 1;
8192 }
8193 break;
8194 case IWN_FW_TLV_PAN:
8195 sc->sc_flags |= IWN_FLAG_PAN_SUPPORT;
8196 DPRINTF(sc, IWN_DEBUG_RESET,
8197 "PAN Support found: %d\n", 1);
8198 break;
8199 case IWN_FW_TLV_FLAGS:
8200 if (len < sizeof(uint32_t))
8201 break;
8202 if (len % sizeof(uint32_t))
8203 break;
8204 sc->tlv_feature_flags = le32toh(*ptr);
8205 DPRINTF(sc, IWN_DEBUG_RESET,
8206 "%s: feature: 0x%08x\n",
8207 __func__,
8208 sc->tlv_feature_flags);
8209 break;
8210 case IWN_FW_TLV_PBREQ_MAXLEN:
8211 case IWN_FW_TLV_RUNT_EVTLOG_PTR:
8212 case IWN_FW_TLV_RUNT_EVTLOG_SIZE:
8213 case IWN_FW_TLV_RUNT_ERRLOG_PTR:
8214 case IWN_FW_TLV_INIT_EVTLOG_PTR:
8215 case IWN_FW_TLV_INIT_EVTLOG_SIZE:
8216 case IWN_FW_TLV_INIT_ERRLOG_PTR:
8217 case IWN_FW_TLV_WOWLAN_INST:
8218 case IWN_FW_TLV_WOWLAN_DATA:
8219 DPRINTF(sc, IWN_DEBUG_RESET,
8220 "TLV type %d recognized but not handled\n",
8221 le16toh(tlv->type));
8222 break;
8223 default:
8224 DPRINTF(sc, IWN_DEBUG_RESET,
8225 "TLV type %d not handled\n", le16toh(tlv->type));
8226 break;
8227 }
8228 next: /* TLV fields are 32-bit aligned. */
8229 ptr += (len + 3) & ~3;
8230 }
8231 return 0;
8232}
8233
8234static int
8235iwn_read_firmware(struct iwn_softc *sc)
8236{
8237 struct iwn_fw_info *fw = &sc->fw;
8238 int error;
8239
8240 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
8241
8242 IWN_UNLOCK(sc);
8243
8244 memset(fw, 0, sizeof (*fw));
8245
8246 /* Read firmware image from filesystem. */
8247 sc->fw_fp = firmware_get(sc->fwname);
8248 if (sc->fw_fp == NULL) {
8249 device_printf(sc->sc_dev, "%s: could not read firmware %s\n",
8250 __func__, sc->fwname);
8251 IWN_LOCK(sc);
8252 return EINVAL;
8253 }
8254 IWN_LOCK(sc);
8255
8256 fw->size = sc->fw_fp->datasize;
8257 fw->data = (const uint8_t *)sc->fw_fp->data;
8258 if (fw->size < sizeof (uint32_t)) {
8259 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n",
8260 __func__, fw->size);
8261 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
8262 sc->fw_fp = NULL;
8263 return EINVAL;
8264 }
8265
8266 /* Retrieve text and data sections. */
8267 if (*(const uint32_t *)fw->data != 0) /* Legacy image. */
8268 error = iwn_read_firmware_leg(sc, fw);
8269 else
8270 error = iwn_read_firmware_tlv(sc, fw, 1);
8271 if (error != 0) {
8272 device_printf(sc->sc_dev,
8273 "%s: could not read firmware sections, error %d\n",
8274 __func__, error);
8275 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
8276 sc->fw_fp = NULL;
8277 return error;
8278 }
8279
8280 device_printf(sc->sc_dev, "%s: ucode rev=0x%08x\n", __func__, sc->ucode_rev);
8281
8282 /* Make sure text and data sections fit in hardware memory. */
8283 if (fw->main.textsz > sc->fw_text_maxsz ||
8284 fw->main.datasz > sc->fw_data_maxsz ||
8285 fw->init.textsz > sc->fw_text_maxsz ||
8286 fw->init.datasz > sc->fw_data_maxsz ||
8287 fw->boot.textsz > IWN_FW_BOOT_TEXT_MAXSZ ||
8288 (fw->boot.textsz & 3) != 0) {
8289 device_printf(sc->sc_dev, "%s: firmware sections too large\n",
8290 __func__);
8291 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
8292 sc->fw_fp = NULL;
8293 return EINVAL;
8294 }
8295
8296 /* We can proceed with loading the firmware. */
8297 return 0;
8298}
8299
8300static int
8301iwn_clock_wait(struct iwn_softc *sc)
8302{
8303 int ntries;
8304
8305 /* Set "initialization complete" bit. */
8306 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE);
8307
8308 /* Wait for clock stabilization. */
8309 for (ntries = 0; ntries < 2500; ntries++) {
8310 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_MAC_CLOCK_READY)
8311 return 0;
8312 DELAY(10);
8313 }
8314 device_printf(sc->sc_dev,
8315 "%s: timeout waiting for clock stabilization\n", __func__);
8316 return ETIMEDOUT;
8317}
8318
8319static int
8320iwn_apm_init(struct iwn_softc *sc)
8321{
8322 uint32_t reg;
8323 int error;
8324
8325 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
8326
8327 /* Disable L0s exit timer (NMI bug workaround). */
8328 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_DIS_L0S_TIMER);
8329 /* Don't wait for ICH L0s (ICH bug workaround). */
8330 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_L1A_NO_L0S_RX);
8331
8332 /* Set FH wait threshold to max (HW bug under stress workaround). */
8333 IWN_SETBITS(sc, IWN_DBG_HPET_MEM, 0xffff0000);
8334
8335 /* Enable HAP INTA to move adapter from L1a to L0s. */
8336 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_HAP_WAKE_L1A);
8337
8338 /* Retrieve PCIe Active State Power Management (ASPM). */
8339 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1);
8340 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */
8341 if (reg & 0x02) /* L1 Entry enabled. */
8342 IWN_SETBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA);
8343 else
8344 IWN_CLRBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA);
8345
8346 if (sc->base_params->pll_cfg_val)
8347 IWN_SETBITS(sc, IWN_ANA_PLL, sc->base_params->pll_cfg_val);
8348
8349 /* Wait for clock stabilization before accessing prph. */
8350 if ((error = iwn_clock_wait(sc)) != 0)
8351 return error;
8352
8353 if ((error = iwn_nic_lock(sc)) != 0)
8354 return error;
8355 if (sc->hw_type == IWN_HW_REV_TYPE_4965) {
8356 /* Enable DMA and BSM (Bootstrap State Machine). */
8357 iwn_prph_write(sc, IWN_APMG_CLK_EN,
8358 IWN_APMG_CLK_CTRL_DMA_CLK_RQT |
8359 IWN_APMG_CLK_CTRL_BSM_CLK_RQT);
8360 } else {
8361 /* Enable DMA. */
8362 iwn_prph_write(sc, IWN_APMG_CLK_EN,
8363 IWN_APMG_CLK_CTRL_DMA_CLK_RQT);
8364 }
8365 DELAY(20);
8366 /* Disable L1-Active. */
8367 iwn_prph_setbits(sc, IWN_APMG_PCI_STT, IWN_APMG_PCI_STT_L1A_DIS);
8368 iwn_nic_unlock(sc);
8369
8370 return 0;
8371}
8372
8373static void
8374iwn_apm_stop_master(struct iwn_softc *sc)
8375{
8376 int ntries;
8377
8378 /* Stop busmaster DMA activity. */
8379 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_STOP_MASTER);
8380 for (ntries = 0; ntries < 100; ntries++) {
8381 if (IWN_READ(sc, IWN_RESET) & IWN_RESET_MASTER_DISABLED)
8382 return;
8383 DELAY(10);
8384 }
8385 device_printf(sc->sc_dev, "%s: timeout waiting for master\n", __func__);
8386}
8387
8388static void
8389iwn_apm_stop(struct iwn_softc *sc)
8390{
8391 iwn_apm_stop_master(sc);
8392
8393 /* Reset the entire device. */
8394 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_SW);
8395 DELAY(10);
8396 /* Clear "initialization complete" bit. */
8397 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE);
8398}
8399
8400static int
8401iwn4965_nic_config(struct iwn_softc *sc)
8402{
8403 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
8404
8405 if (IWN_RFCFG_TYPE(sc->rfcfg) == 1) {
8406 /*
8407 * I don't believe this to be correct but this is what the
8408 * vendor driver is doing. Probably the bits should not be
8409 * shifted in IWN_RFCFG_*.
8410 */
8411 IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
8412 IWN_RFCFG_TYPE(sc->rfcfg) |
8413 IWN_RFCFG_STEP(sc->rfcfg) |
8414 IWN_RFCFG_DASH(sc->rfcfg));
8415 }
8416 IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
8417 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI);
8418 return 0;
8419}
8420
8421static int
8422iwn5000_nic_config(struct iwn_softc *sc)
8423{
8424 uint32_t tmp;
8425 int error;
8426
8427 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
8428
8429 if (IWN_RFCFG_TYPE(sc->rfcfg) < 3) {
8430 IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
8431 IWN_RFCFG_TYPE(sc->rfcfg) |
8432 IWN_RFCFG_STEP(sc->rfcfg) |
8433 IWN_RFCFG_DASH(sc->rfcfg));
8434 }
8435 IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
8436 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI);
8437
8438 if ((error = iwn_nic_lock(sc)) != 0)
8439 return error;
8440 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_EARLY_PWROFF_DIS);
8441
8442 if (sc->hw_type == IWN_HW_REV_TYPE_1000) {
8443 /*
8444 * Select first Switching Voltage Regulator (1.32V) to
8445 * solve a stability issue related to noisy DC2DC line
8446 * in the silicon of 1000 Series.
8447 */
8448 tmp = iwn_prph_read(sc, IWN_APMG_DIGITAL_SVR);
8449 tmp &= ~IWN_APMG_DIGITAL_SVR_VOLTAGE_MASK;
8450 tmp |= IWN_APMG_DIGITAL_SVR_VOLTAGE_1_32;
8451 iwn_prph_write(sc, IWN_APMG_DIGITAL_SVR, tmp);
8452 }
8453 iwn_nic_unlock(sc);
8454
8455 if (sc->sc_flags & IWN_FLAG_INTERNAL_PA) {
8456 /* Use internal power amplifier only. */
8457 IWN_WRITE(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_2X2_IPA);
8458 }
8459 if (sc->base_params->additional_nic_config && sc->calib_ver >= 6) {
8460 /* Indicate that ROM calibration version is >=6. */
8461 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_CALIB_VER6);
8462 }
8463 if (sc->base_params->additional_gp_drv_bit)
8464 IWN_SETBITS(sc, IWN_GP_DRIVER,
8465 sc->base_params->additional_gp_drv_bit);
8466 return 0;
8467}
8468
8469/*
8470 * Take NIC ownership over Intel Active Management Technology (AMT).
8471 */
8472static int
8473iwn_hw_prepare(struct iwn_softc *sc)
8474{
8475 int ntries;
8476
8477 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
8478
8479 /* Check if hardware is ready. */
8480 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY);
8481 for (ntries = 0; ntries < 5; ntries++) {
8482 if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
8483 IWN_HW_IF_CONFIG_NIC_READY)
8484 return 0;
8485 DELAY(10);
8486 }
8487
8488 /* Hardware not ready, force into ready state. */
8489 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_PREPARE);
8490 for (ntries = 0; ntries < 15000; ntries++) {
8491 if (!(IWN_READ(sc, IWN_HW_IF_CONFIG) &
8492 IWN_HW_IF_CONFIG_PREPARE_DONE))
8493 break;
8494 DELAY(10);
8495 }
8496 if (ntries == 15000)
8497 return ETIMEDOUT;
8498
8499 /* Hardware should be ready now. */
8500 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY);
8501 for (ntries = 0; ntries < 5; ntries++) {
8502 if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
8503 IWN_HW_IF_CONFIG_NIC_READY)
8504 return 0;
8505 DELAY(10);
8506 }
8507 return ETIMEDOUT;
8508}
8509
8510static int
8511iwn_hw_init(struct iwn_softc *sc)
8512{
8513 struct iwn_ops *ops = &sc->ops;
8514 int error, chnl, qid;
8515
8516 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
8517
8518 /* Clear pending interrupts. */
8519 IWN_WRITE(sc, IWN_INT, 0xffffffff);
8520
8521 if ((error = iwn_apm_init(sc)) != 0) {
8522 device_printf(sc->sc_dev,
8523 "%s: could not power ON adapter, error %d\n", __func__,
8524 error);
8525 return error;
8526 }
8527
8528 /* Select VMAIN power source. */
8529 if ((error = iwn_nic_lock(sc)) != 0)
8530 return error;
8531 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_PWR_SRC_MASK);
8532 iwn_nic_unlock(sc);
8533
8534 /* Perform adapter-specific initialization. */
8535 if ((error = ops->nic_config(sc)) != 0)
8536 return error;
8537
8538 /* Initialize RX ring. */
8539 if ((error = iwn_nic_lock(sc)) != 0)
8540 return error;
8541 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0);
8542 IWN_WRITE(sc, IWN_FH_RX_WPTR, 0);
8543 /* Set physical address of RX ring (256-byte aligned). */
8544 IWN_WRITE(sc, IWN_FH_RX_BASE, sc->rxq.desc_dma.paddr >> 8);
8545 /* Set physical address of RX status (16-byte aligned). */
8546 IWN_WRITE(sc, IWN_FH_STATUS_WPTR, sc->rxq.stat_dma.paddr >> 4);
8547 /* Enable RX. */
8548 IWN_WRITE(sc, IWN_FH_RX_CONFIG,
8549 IWN_FH_RX_CONFIG_ENA |
8550 IWN_FH_RX_CONFIG_IGN_RXF_EMPTY | /* HW bug workaround */
8551 IWN_FH_RX_CONFIG_IRQ_DST_HOST |
8552 IWN_FH_RX_CONFIG_SINGLE_FRAME |
8553 IWN_FH_RX_CONFIG_RB_TIMEOUT(0) |
8554 IWN_FH_RX_CONFIG_NRBD(IWN_RX_RING_COUNT_LOG));
8555 iwn_nic_unlock(sc);
8556 IWN_WRITE(sc, IWN_FH_RX_WPTR, (IWN_RX_RING_COUNT - 1) & ~7);
8557
8558 if ((error = iwn_nic_lock(sc)) != 0)
8559 return error;
8560
8561 /* Initialize TX scheduler. */
8562 iwn_prph_write(sc, sc->sched_txfact_addr, 0);
8563
8564 /* Set physical address of "keep warm" page (16-byte aligned). */
8565 IWN_WRITE(sc, IWN_FH_KW_ADDR, sc->kw_dma.paddr >> 4);
8566
8567 /* Initialize TX rings. */
8568 for (qid = 0; qid < sc->ntxqs; qid++) {
8569 struct iwn_tx_ring *txq = &sc->txq[qid];
8570
8571 /* Set physical address of TX ring (256-byte aligned). */
8572 IWN_WRITE(sc, IWN_FH_CBBC_QUEUE(qid),
8573 txq->desc_dma.paddr >> 8);
8574 }
8575 iwn_nic_unlock(sc);
8576
8577 /* Enable DMA channels. */
8578 for (chnl = 0; chnl < sc->ndmachnls; chnl++) {
8579 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl),
8580 IWN_FH_TX_CONFIG_DMA_ENA |
8581 IWN_FH_TX_CONFIG_DMA_CREDIT_ENA);
8582 }
8583
8584 /* Clear "radio off" and "commands blocked" bits. */
8585 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
8586 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CMD_BLOCKED);
8587
8588 /* Clear pending interrupts. */
8589 IWN_WRITE(sc, IWN_INT, 0xffffffff);
8590 /* Enable interrupt coalescing. */
8591 IWN_WRITE(sc, IWN_INT_COALESCING, 512 / 8);
8592 /* Enable interrupts. */
8593 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
8594
8595 /* _Really_ make sure "radio off" bit is cleared! */
8596 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
8597 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
8598
8599 /* Enable shadow registers. */
8600 if (sc->base_params->shadow_reg_enable)
8601 IWN_SETBITS(sc, IWN_SHADOW_REG_CTRL, 0x800fffff);
8602
8603 if ((error = ops->load_firmware(sc)) != 0) {
8604 device_printf(sc->sc_dev,
8605 "%s: could not load firmware, error %d\n", __func__,
8606 error);
8607 return error;
8608 }
8609 /* Wait at most one second for firmware alive notification. */
8610 if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz)) != 0) {
8611 device_printf(sc->sc_dev,
8612 "%s: timeout waiting for adapter to initialize, error %d\n",
8613 __func__, error);
8614 return error;
8615 }
8616 /* Do post-firmware initialization. */
8617
8618 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
8619
8620 return ops->post_alive(sc);
8621}
8622
8623static void
8624iwn_hw_stop(struct iwn_softc *sc)
8625{
8626 int chnl, qid, ntries;
8627
8628 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
8629
8630 IWN_WRITE(sc, IWN_RESET, IWN_RESET_NEVO);
8631
8632 /* Disable interrupts. */
8633 IWN_WRITE(sc, IWN_INT_MASK, 0);
8634 IWN_WRITE(sc, IWN_INT, 0xffffffff);
8635 IWN_WRITE(sc, IWN_FH_INT, 0xffffffff);
8636 sc->sc_flags &= ~IWN_FLAG_USE_ICT;
8637
8638 /* Make sure we no longer hold the NIC lock. */
8639 iwn_nic_unlock(sc);
8640
8641 /* Stop TX scheduler. */
8642 iwn_prph_write(sc, sc->sched_txfact_addr, 0);
8643
8644 /* Stop all DMA channels. */
8645 if (iwn_nic_lock(sc) == 0) {
8646 for (chnl = 0; chnl < sc->ndmachnls; chnl++) {
8647 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 0);
8648 for (ntries = 0; ntries < 200; ntries++) {
8649 if (IWN_READ(sc, IWN_FH_TX_STATUS) &
8650 IWN_FH_TX_STATUS_IDLE(chnl))
8651 break;
8652 DELAY(10);
8653 }
8654 }
8655 iwn_nic_unlock(sc);
8656 }
8657
8658 /* Stop RX ring. */
8659 iwn_reset_rx_ring(sc, &sc->rxq);
8660
8661 /* Reset all TX rings. */
8662 for (qid = 0; qid < sc->ntxqs; qid++)
8663 iwn_reset_tx_ring(sc, &sc->txq[qid]);
8664
8665 if (iwn_nic_lock(sc) == 0) {
8666 iwn_prph_write(sc, IWN_APMG_CLK_DIS,
8667 IWN_APMG_CLK_CTRL_DMA_CLK_RQT);
8668 iwn_nic_unlock(sc);
8669 }
8670 DELAY(5);
8671 /* Power OFF adapter. */
8672 iwn_apm_stop(sc);
8673}
8674
8675static void
8676iwn_radio_on(void *arg0, int pending)
8677{
8678 struct iwn_softc *sc = arg0;
8679 struct ieee80211com *ic = &sc->sc_ic;
8680 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8681
8682 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
8683
8684 if (vap != NULL) {
8685 iwn_init(sc);
8686 ieee80211_init(vap);
8687 }
8688}
8689
8690static void
8691iwn_radio_off(void *arg0, int pending)
8692{
8693 struct iwn_softc *sc = arg0;
8694 struct ieee80211com *ic = &sc->sc_ic;
8695 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8696
8697 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
8698
8699 iwn_stop(sc);
8700 if (vap != NULL)
8701 ieee80211_stop(vap);
8702
8703 /* Enable interrupts to get RF toggle notification. */
8704 IWN_LOCK(sc);
8705 IWN_WRITE(sc, IWN_INT, 0xffffffff);
8706 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
8707 IWN_UNLOCK(sc);
8708}
8709
8710static void
8711iwn_panicked(void *arg0, int pending)
8712{
8713 struct iwn_softc *sc = arg0;
8714 struct ieee80211com *ic = &sc->sc_ic;
8715 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8716 int error;
8717
8718 if (vap == NULL) {
8719 printf("%s: null vap\n", __func__);
8720 return;
8721 }
8722
8723 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
8724 "resetting...\n", __func__, vap->iv_state);
8725
8726 IWN_LOCK(sc);
8727
8728 iwn_stop_locked(sc);
8729 iwn_init_locked(sc);
8730 if (vap->iv_state >= IEEE80211_S_AUTH &&
8731 (error = iwn_auth(sc, vap)) != 0) {
8732 device_printf(sc->sc_dev,
8733 "%s: could not move to auth state\n", __func__);
8734 }
8735 if (vap->iv_state >= IEEE80211_S_RUN &&
8736 (error = iwn_run(sc, vap)) != 0) {
8737 device_printf(sc->sc_dev,
8738 "%s: could not move to run state\n", __func__);
8739 }
8740
8741 /* Only run start once the NIC is in a useful state, like associated */
8742 iwn_start_locked(sc);
8743
8744 IWN_UNLOCK(sc);
8745}
8746
8747static void
8748iwn_init_locked(struct iwn_softc *sc)
8749{
8750 int error;
8751
8752 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
8753
8754 IWN_LOCK_ASSERT(sc);
8755
8756 sc->sc_flags |= IWN_FLAG_RUNNING;
8757
8758 if ((error = iwn_hw_prepare(sc)) != 0) {
8759 device_printf(sc->sc_dev, "%s: hardware not ready, error %d\n",
8760 __func__, error);
8761 goto fail;
8762 }
8763
8764 /* Initialize interrupt mask to default value. */
8765 sc->int_mask = IWN_INT_MASK_DEF;
8766 sc->sc_flags &= ~IWN_FLAG_USE_ICT;
8767
8768 /* Check that the radio is not disabled by hardware switch. */
8769 if (!(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) {
8770 device_printf(sc->sc_dev,
8771 "radio is disabled by hardware switch\n");
8772 /* Enable interrupts to get RF toggle notifications. */
8773 IWN_WRITE(sc, IWN_INT, 0xffffffff);
8774 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
8775 return;
8776 }
8777
8778 /* Read firmware images from the filesystem. */
8779 if ((error = iwn_read_firmware(sc)) != 0) {
8780 device_printf(sc->sc_dev,
8781 "%s: could not read firmware, error %d\n", __func__,
8782 error);
8783 goto fail;
8784 }
8785
8786 /* Initialize hardware and upload firmware. */
8787 error = iwn_hw_init(sc);
8788 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
8789 sc->fw_fp = NULL;
8790 if (error != 0) {
8791 device_printf(sc->sc_dev,
8792 "%s: could not initialize hardware, error %d\n", __func__,
8793 error);
8794 goto fail;
8795 }
8796
8797 /* Configure adapter now that it is ready. */
8798 if ((error = iwn_config(sc)) != 0) {
8799 device_printf(sc->sc_dev,
8800 "%s: could not configure device, error %d\n", __func__,
8801 error);
8802 goto fail;
8803 }
8804
8805 callout_reset(&sc->watchdog_to, hz, iwn_watchdog, sc);
8806
8807 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
8808
8809 return;
8810
8811fail:
8812 sc->sc_flags &= ~IWN_FLAG_RUNNING;
8813 iwn_stop_locked(sc);
8814 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__);
8815}
8816
8817static void
8818iwn_init(struct iwn_softc *sc)
8819{
8820
8821 IWN_LOCK(sc);
8822 iwn_init_locked(sc);
8823 IWN_UNLOCK(sc);
8824
8825 if (sc->sc_flags & IWN_FLAG_RUNNING)
8826 ieee80211_start_all(&sc->sc_ic);
8827}
8828
8829static void
8830iwn_stop_locked(struct iwn_softc *sc)
8831{
8832
8833 IWN_LOCK_ASSERT(sc);
8834
8835 sc->sc_is_scanning = 0;
8836 sc->sc_tx_timer = 0;
8837 callout_stop(&sc->watchdog_to);
8838 callout_stop(&sc->calib_to);
8839 sc->sc_flags &= ~IWN_FLAG_RUNNING;
8840
8841 /* Power OFF hardware. */
8842 iwn_hw_stop(sc);
8843}
8844
8845static void
8846iwn_stop(struct iwn_softc *sc)
8847{
8848 IWN_LOCK(sc);
8849 iwn_stop_locked(sc);
8850 IWN_UNLOCK(sc);
8851}
8852
8853/*
8854 * Callback from net80211 to start a scan.
8855 */
8856static void
8857iwn_scan_start(struct ieee80211com *ic)
8858{
8859 struct iwn_softc *sc = ic->ic_softc;
8860
8861 IWN_LOCK(sc);
8862 /* make the link LED blink while we're scanning */
8863 iwn_set_led(sc, IWN_LED_LINK, 20, 2);
8864 IWN_UNLOCK(sc);
8865}
8866
8867/*
8868 * Callback from net80211 to terminate a scan.
8869 */
8870static void
8871iwn_scan_end(struct ieee80211com *ic)
8872{
8873 struct iwn_softc *sc = ic->ic_softc;
8874 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
8875
8876 IWN_LOCK(sc);
8877 if (vap->iv_state == IEEE80211_S_RUN) {
8878 /* Set link LED to ON status if we are associated */
8879 iwn_set_led(sc, IWN_LED_LINK, 0, 1);
8880 }
8881 IWN_UNLOCK(sc);
8882}
8883
8884/*
8885 * Callback from net80211 to force a channel change.
8886 */
8887static void
8888iwn_set_channel(struct ieee80211com *ic)
8889{
8890 const struct ieee80211_channel *c = ic->ic_curchan;
8891 struct iwn_softc *sc = ic->ic_softc;
8892 int error;
8893
8894 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
8895
8896 IWN_LOCK(sc);
8897 sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq);
8898 sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags);
8899 sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq);
8900 sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags);
8901
8902 /*
8903 * Only need to set the channel in Monitor mode. AP scanning and auth
8904 * are already taken care of by their respective firmware commands.
8905 */
8906 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8907 error = iwn_config(sc);
8908 if (error != 0)
8909 device_printf(sc->sc_dev,
8910 "%s: error %d settting channel\n", __func__, error);
8911 }
8912 IWN_UNLOCK(sc);
8913}
8914
8915/*
8916 * Callback from net80211 to start scanning of the current channel.
8917 */
8918static void
8919iwn_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
8920{
8921 struct ieee80211vap *vap = ss->ss_vap;
8922 struct ieee80211com *ic = vap->iv_ic;
8923 struct iwn_softc *sc = ic->ic_softc;
8924 int error;
8925
8926 IWN_LOCK(sc);
8927 error = iwn_scan(sc, vap, ss, ic->ic_curchan);
8928 IWN_UNLOCK(sc);
8929 if (error != 0)
8930 ieee80211_cancel_scan(vap);
8931}
8932
8933/*
8934 * Callback from net80211 to handle the minimum dwell time being met.
8935 * The intent is to terminate the scan but we just let the firmware
8936 * notify us when it's finished as we have no safe way to abort it.
8937 */
8938static void
8939iwn_scan_mindwell(struct ieee80211_scan_state *ss)
8940{
8941 /* NB: don't try to abort scan; wait for firmware to finish */
8942}
8943
8944static void
8945iwn_hw_reset(void *arg0, int pending)
8946{
8947 struct iwn_softc *sc = arg0;
8948 struct ieee80211com *ic = &sc->sc_ic;
8949
8950 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
8951
8952 iwn_stop(sc);
8953 iwn_init(sc);
8954 ieee80211_notify_radio(ic, 1);
8955}
8956#ifdef IWN_DEBUG
8957#define IWN_DESC(x) case x: return #x
8958
8959/*
8960 * Translate CSR code to string
8961 */
8962static char *iwn_get_csr_string(int csr)
8963{
8964 switch (csr) {
8965 IWN_DESC(IWN_HW_IF_CONFIG);
8966 IWN_DESC(IWN_INT_COALESCING);
8967 IWN_DESC(IWN_INT);
8968 IWN_DESC(IWN_INT_MASK);
8969 IWN_DESC(IWN_FH_INT);
8970 IWN_DESC(IWN_GPIO_IN);
8971 IWN_DESC(IWN_RESET);
8972 IWN_DESC(IWN_GP_CNTRL);
8973 IWN_DESC(IWN_HW_REV);
8974 IWN_DESC(IWN_EEPROM);
8975 IWN_DESC(IWN_EEPROM_GP);
8976 IWN_DESC(IWN_OTP_GP);
8977 IWN_DESC(IWN_GIO);
8978 IWN_DESC(IWN_GP_UCODE);
8979 IWN_DESC(IWN_GP_DRIVER);
8980 IWN_DESC(IWN_UCODE_GP1);
8981 IWN_DESC(IWN_UCODE_GP2);
8982 IWN_DESC(IWN_LED);
8983 IWN_DESC(IWN_DRAM_INT_TBL);
8984 IWN_DESC(IWN_GIO_CHICKEN);
8985 IWN_DESC(IWN_ANA_PLL);
8986 IWN_DESC(IWN_HW_REV_WA);
8987 IWN_DESC(IWN_DBG_HPET_MEM);
8988 default:
8989 return "UNKNOWN CSR";
8990 }
8991}
8992
8993/*
8994 * This function print firmware register
8995 */
8996static void
8997iwn_debug_register(struct iwn_softc *sc)
8998{
8999 int i;
9000 static const uint32_t csr_tbl[] = {
9001 IWN_HW_IF_CONFIG,
9002 IWN_INT_COALESCING,
9003 IWN_INT,
9004 IWN_INT_MASK,
9005 IWN_FH_INT,
9006 IWN_GPIO_IN,
9007 IWN_RESET,
9008 IWN_GP_CNTRL,
9009 IWN_HW_REV,
9010 IWN_EEPROM,
9011 IWN_EEPROM_GP,
9012 IWN_OTP_GP,
9013 IWN_GIO,
9014 IWN_GP_UCODE,
9015 IWN_GP_DRIVER,
9016 IWN_UCODE_GP1,
9017 IWN_UCODE_GP2,
9018 IWN_LED,
9019 IWN_DRAM_INT_TBL,
9020 IWN_GIO_CHICKEN,
9021 IWN_ANA_PLL,
9022 IWN_HW_REV_WA,
9023 IWN_DBG_HPET_MEM,
9024 };
9025 DPRINTF(sc, IWN_DEBUG_REGISTER,
9026 "CSR values: (2nd byte of IWN_INT_COALESCING is IWN_INT_PERIODIC)%s",
9027 "\n");
9028 for (i = 0; i < nitems(csr_tbl); i++){
9029 DPRINTF(sc, IWN_DEBUG_REGISTER," %10s: 0x%08x ",
9030 iwn_get_csr_string(csr_tbl[i]), IWN_READ(sc, csr_tbl[i]));
9031 if ((i+1) % 3 == 0)
9032 DPRINTF(sc, IWN_DEBUG_REGISTER,"%s","\n");
9033 }
9034 DPRINTF(sc, IWN_DEBUG_REGISTER,"%s","\n");
9035}
9036#endif
9037
9038