Deleted Added
full compact
1/*-
2 * Copyright (c) 2007-2009
3 * Damien Bergamini <damien.bergamini@free.fr>
4 * Copyright (c) 2008
5 * Benjamin Close <benjsc@FreeBSD.org>
6 * Copyright (c) 2008 Sam Leffler, Errno Consulting
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21/*
22 * Driver for Intel WiFi Link 4965 and 1000/5000/6000 Series 802.11 network
23 * adapters.
24 */
25
26#include <sys/cdefs.h>
27__FBSDID("$FreeBSD: head/sys/dev/iwn/if_iwn.c 252717 2013-07-04 19:45:34Z adrian $");
27__FBSDID("$FreeBSD: head/sys/dev/iwn/if_iwn.c 252727 2013-07-04 21:16:49Z adrian $");
28
29#include "opt_wlan.h"
30
31#include <sys/param.h>
32#include <sys/sockio.h>
33#include <sys/sysctl.h>
34#include <sys/mbuf.h>
35#include <sys/kernel.h>
36#include <sys/socket.h>
37#include <sys/systm.h>
38#include <sys/malloc.h>
39#include <sys/bus.h>
40#include <sys/rman.h>
41#include <sys/endian.h>
42#include <sys/firmware.h>
43#include <sys/limits.h>
44#include <sys/module.h>
45#include <sys/queue.h>
46#include <sys/taskqueue.h>
47
48#include <machine/bus.h>
49#include <machine/resource.h>
50#include <machine/clock.h>
51
52#include <dev/pci/pcireg.h>
53#include <dev/pci/pcivar.h>
54
55#include <net/bpf.h>
56#include <net/if.h>
57#include <net/if_arp.h>
58#include <net/ethernet.h>
59#include <net/if_dl.h>
60#include <net/if_media.h>
61#include <net/if_types.h>
62
63#include <netinet/in.h>
64#include <netinet/in_systm.h>
65#include <netinet/in_var.h>
66#include <netinet/if_ether.h>
67#include <netinet/ip.h>
68
69#include <net80211/ieee80211_var.h>
70#include <net80211/ieee80211_radiotap.h>
71#include <net80211/ieee80211_regdomain.h>
72#include <net80211/ieee80211_ratectl.h>
73
74#include <dev/iwn/if_iwnreg.h>
75#include <dev/iwn/if_iwnvar.h>
76
77struct iwn_ident {
78 uint16_t vendor;
79 uint16_t device;
80 const char *name;
81};
82
83static const struct iwn_ident iwn_ident_table[] = {
84 { 0x8086, 0x0082, "Intel Centrino Advanced-N 6205" },
85 { 0x8086, 0x0083, "Intel Centrino Wireless-N 1000" },
86 { 0x8086, 0x0084, "Intel Centrino Wireless-N 1000" },
87 { 0x8086, 0x0085, "Intel Centrino Advanced-N 6205" },
88 { 0x8086, 0x0087, "Intel Centrino Advanced-N + WiMAX 6250" },
89 { 0x8086, 0x0089, "Intel Centrino Advanced-N + WiMAX 6250" },
90 { 0x8086, 0x008a, "Intel Centrino Wireless-N 1030" },
91 { 0x8086, 0x008b, "Intel Centrino Wireless-N 1030" },
92 { 0x8086, 0x0090, "Intel Centrino Advanced-N 6230" },
93 { 0x8086, 0x0091, "Intel Centrino Advanced-N 6230" },
94 { 0x8086, 0x0885, "Intel Centrino Wireless-N + WiMAX 6150" },
95 { 0x8086, 0x0886, "Intel Centrino Wireless-N + WiMAX 6150" },
96 { 0x8086, 0x0896, "Intel Centrino Wireless-N 130" },
97 { 0x8086, 0x0897, "Intel Centrino Wireless-N 130" },
98 { 0x8086, 0x08ae, "Intel Centrino Wireless-N 100" },
99 { 0x8086, 0x08af, "Intel Centrino Wireless-N 100" },
100 { 0x8086, 0x4229, "Intel Wireless WiFi Link 4965" },
101 { 0x8086, 0x422b, "Intel Centrino Ultimate-N 6300" },
102 { 0x8086, 0x422c, "Intel Centrino Advanced-N 6200" },
103 { 0x8086, 0x422d, "Intel Wireless WiFi Link 4965" },
104 { 0x8086, 0x4230, "Intel Wireless WiFi Link 4965" },
105 { 0x8086, 0x4232, "Intel WiFi Link 5100" },
106 { 0x8086, 0x4233, "Intel Wireless WiFi Link 4965" },
107 { 0x8086, 0x4235, "Intel Ultimate N WiFi Link 5300" },
108 { 0x8086, 0x4236, "Intel Ultimate N WiFi Link 5300" },
109 { 0x8086, 0x4237, "Intel WiFi Link 5100" },
110 { 0x8086, 0x4238, "Intel Centrino Ultimate-N 6300" },
111 { 0x8086, 0x4239, "Intel Centrino Advanced-N 6200" },
112 { 0x8086, 0x423a, "Intel WiMAX/WiFi Link 5350" },
113 { 0x8086, 0x423b, "Intel WiMAX/WiFi Link 5350" },
114 { 0x8086, 0x423c, "Intel WiMAX/WiFi Link 5150" },
115 { 0x8086, 0x423d, "Intel WiMAX/WiFi Link 5150" },
116 { 0, 0, NULL }
117};
118
119static int iwn_probe(device_t);
120static int iwn_attach(device_t);
121static int iwn4965_attach(struct iwn_softc *, uint16_t);
122static int iwn5000_attach(struct iwn_softc *, uint16_t);
123static void iwn_radiotap_attach(struct iwn_softc *);
124static void iwn_sysctlattach(struct iwn_softc *);
125static struct ieee80211vap *iwn_vap_create(struct ieee80211com *,
126 const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
127 const uint8_t [IEEE80211_ADDR_LEN],
128 const uint8_t [IEEE80211_ADDR_LEN]);
129static void iwn_vap_delete(struct ieee80211vap *);
130static int iwn_detach(device_t);
131static int iwn_shutdown(device_t);
132static int iwn_suspend(device_t);
133static int iwn_resume(device_t);
134static int iwn_nic_lock(struct iwn_softc *);
135static int iwn_eeprom_lock(struct iwn_softc *);
136static int iwn_init_otprom(struct iwn_softc *);
137static int iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int);
138static void iwn_dma_map_addr(void *, bus_dma_segment_t *, int, int);
139static int iwn_dma_contig_alloc(struct iwn_softc *, struct iwn_dma_info *,
140 void **, bus_size_t, bus_size_t);
141static void iwn_dma_contig_free(struct iwn_dma_info *);
142static int iwn_alloc_sched(struct iwn_softc *);
143static void iwn_free_sched(struct iwn_softc *);
144static int iwn_alloc_kw(struct iwn_softc *);
145static void iwn_free_kw(struct iwn_softc *);
146static int iwn_alloc_ict(struct iwn_softc *);
147static void iwn_free_ict(struct iwn_softc *);
148static int iwn_alloc_fwmem(struct iwn_softc *);
149static void iwn_free_fwmem(struct iwn_softc *);
150static int iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
151static void iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
152static void iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
153static int iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *,
154 int);
155static void iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *);
156static void iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *);
157static void iwn5000_ict_reset(struct iwn_softc *);
158static int iwn_read_eeprom(struct iwn_softc *,
159 uint8_t macaddr[IEEE80211_ADDR_LEN]);
160static void iwn4965_read_eeprom(struct iwn_softc *);
161static void iwn4965_print_power_group(struct iwn_softc *, int);
162static void iwn5000_read_eeprom(struct iwn_softc *);
163static uint32_t iwn_eeprom_channel_flags(struct iwn_eeprom_chan *);
164static void iwn_read_eeprom_band(struct iwn_softc *, int);
165static void iwn_read_eeprom_ht40(struct iwn_softc *, int);
166static void iwn_read_eeprom_channels(struct iwn_softc *, int, uint32_t);
167static struct iwn_eeprom_chan *iwn_find_eeprom_channel(struct iwn_softc *,
168 struct ieee80211_channel *);
169static int iwn_setregdomain(struct ieee80211com *,
170 struct ieee80211_regdomain *, int,
171 struct ieee80211_channel[]);
172static void iwn_read_eeprom_enhinfo(struct iwn_softc *);
173static struct ieee80211_node *iwn_node_alloc(struct ieee80211vap *,
174 const uint8_t mac[IEEE80211_ADDR_LEN]);
175static void iwn_newassoc(struct ieee80211_node *, int);
176static int iwn_media_change(struct ifnet *);
177static int iwn_newstate(struct ieee80211vap *, enum ieee80211_state, int);
178static void iwn_calib_timeout(void *);
179static void iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *,
180 struct iwn_rx_data *);
181static void iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *,
182 struct iwn_rx_data *);
183static void iwn_rx_compressed_ba(struct iwn_softc *, struct iwn_rx_desc *,
184 struct iwn_rx_data *);
185static void iwn5000_rx_calib_results(struct iwn_softc *,
186 struct iwn_rx_desc *, struct iwn_rx_data *);
187static void iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *,
188 struct iwn_rx_data *);
189static void iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
190 struct iwn_rx_data *);
191static void iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
192 struct iwn_rx_data *);
193static void iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *, int,
194 uint8_t);
195static void iwn_ampdu_tx_done(struct iwn_softc *, int, int, int, void *);
196static void iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *);
197static void iwn_notif_intr(struct iwn_softc *);
198static void iwn_wakeup_intr(struct iwn_softc *);
199static void iwn_rftoggle_intr(struct iwn_softc *);
200static void iwn_fatal_intr(struct iwn_softc *);
201static void iwn_intr(void *);
202static void iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t,
203 uint16_t);
204static void iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t,
205 uint16_t);
206#ifdef notyet
207static void iwn5000_reset_sched(struct iwn_softc *, int, int);
208#endif
209static int iwn_tx_data(struct iwn_softc *, struct mbuf *,
210 struct ieee80211_node *);
211static int iwn_tx_data_raw(struct iwn_softc *, struct mbuf *,
212 struct ieee80211_node *,
213 const struct ieee80211_bpf_params *params);
214static int iwn_raw_xmit(struct ieee80211_node *, struct mbuf *,
215 const struct ieee80211_bpf_params *);
216static void iwn_start(struct ifnet *);
217static void iwn_start_locked(struct ifnet *);
218static void iwn_watchdog(void *);
219static int iwn_ioctl(struct ifnet *, u_long, caddr_t);
220static int iwn_cmd(struct iwn_softc *, int, const void *, int, int);
221static int iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *,
222 int);
223static int iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *,
224 int);
225static int iwn_set_link_quality(struct iwn_softc *,
226 struct ieee80211_node *);
227static int iwn_add_broadcast_node(struct iwn_softc *, int);
228static int iwn_updateedca(struct ieee80211com *);
229static void iwn_update_mcast(struct ifnet *);
230static void iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t);
231static int iwn_set_critical_temp(struct iwn_softc *);
232static int iwn_set_timing(struct iwn_softc *, struct ieee80211_node *);
233static void iwn4965_power_calibration(struct iwn_softc *, int);
234static int iwn4965_set_txpower(struct iwn_softc *,
235 struct ieee80211_channel *, int);
236static int iwn5000_set_txpower(struct iwn_softc *,
237 struct ieee80211_channel *, int);
238static int iwn4965_get_rssi(struct iwn_softc *, struct iwn_rx_stat *);
239static int iwn5000_get_rssi(struct iwn_softc *, struct iwn_rx_stat *);
240static int iwn_get_noise(const struct iwn_rx_general_stats *);
241static int iwn4965_get_temperature(struct iwn_softc *);
242static int iwn5000_get_temperature(struct iwn_softc *);
243static int iwn_init_sensitivity(struct iwn_softc *);
244static void iwn_collect_noise(struct iwn_softc *,
245 const struct iwn_rx_general_stats *);
246static int iwn4965_init_gains(struct iwn_softc *);
247static int iwn5000_init_gains(struct iwn_softc *);
248static int iwn4965_set_gains(struct iwn_softc *);
249static int iwn5000_set_gains(struct iwn_softc *);
250static void iwn_tune_sensitivity(struct iwn_softc *,
251 const struct iwn_rx_stats *);
252static int iwn_send_sensitivity(struct iwn_softc *);
253static int iwn_set_pslevel(struct iwn_softc *, int, int, int);
254static int iwn_send_btcoex(struct iwn_softc *);
255static int iwn_send_advanced_btcoex(struct iwn_softc *);
256static int iwn5000_runtime_calib(struct iwn_softc *);
257static int iwn_config(struct iwn_softc *);
258static uint8_t *ieee80211_add_ssid(uint8_t *, const uint8_t *, u_int);
259static int iwn_scan(struct iwn_softc *);
260static int iwn_auth(struct iwn_softc *, struct ieee80211vap *vap);
261static int iwn_run(struct iwn_softc *, struct ieee80211vap *vap);
262static int iwn_ampdu_rx_start(struct ieee80211_node *,
263 struct ieee80211_rx_ampdu *, int, int, int);
264static void iwn_ampdu_rx_stop(struct ieee80211_node *,
265 struct ieee80211_rx_ampdu *);
266static int iwn_addba_request(struct ieee80211_node *,
267 struct ieee80211_tx_ampdu *, int, int, int);
268static int iwn_addba_response(struct ieee80211_node *,
269 struct ieee80211_tx_ampdu *, int, int, int);
270static int iwn_ampdu_tx_start(struct ieee80211com *,
271 struct ieee80211_node *, uint8_t);
272static void iwn_ampdu_tx_stop(struct ieee80211_node *,
273 struct ieee80211_tx_ampdu *);
274static void iwn4965_ampdu_tx_start(struct iwn_softc *,
275 struct ieee80211_node *, int, uint8_t, uint16_t);
276static void iwn4965_ampdu_tx_stop(struct iwn_softc *, int,
277 uint8_t, uint16_t);
278static void iwn5000_ampdu_tx_start(struct iwn_softc *,
279 struct ieee80211_node *, int, uint8_t, uint16_t);
280static void iwn5000_ampdu_tx_stop(struct iwn_softc *, int,
281 uint8_t, uint16_t);
282static int iwn5000_query_calibration(struct iwn_softc *);
283static int iwn5000_send_calibration(struct iwn_softc *);
284static int iwn5000_send_wimax_coex(struct iwn_softc *);
285static int iwn5000_crystal_calib(struct iwn_softc *);
286static int iwn5000_temp_offset_calib(struct iwn_softc *);
287static int iwn4965_post_alive(struct iwn_softc *);
288static int iwn5000_post_alive(struct iwn_softc *);
289static int iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *,
290 int);
291static int iwn4965_load_firmware(struct iwn_softc *);
292static int iwn5000_load_firmware_section(struct iwn_softc *, uint32_t,
293 const uint8_t *, int);
294static int iwn5000_load_firmware(struct iwn_softc *);
295static int iwn_read_firmware_leg(struct iwn_softc *,
296 struct iwn_fw_info *);
297static int iwn_read_firmware_tlv(struct iwn_softc *,
298 struct iwn_fw_info *, uint16_t);
299static int iwn_read_firmware(struct iwn_softc *);
300static int iwn_clock_wait(struct iwn_softc *);
301static int iwn_apm_init(struct iwn_softc *);
302static void iwn_apm_stop_master(struct iwn_softc *);
303static void iwn_apm_stop(struct iwn_softc *);
304static int iwn4965_nic_config(struct iwn_softc *);
305static int iwn5000_nic_config(struct iwn_softc *);
306static int iwn_hw_prepare(struct iwn_softc *);
307static int iwn_hw_init(struct iwn_softc *);
308static void iwn_hw_stop(struct iwn_softc *);
309static void iwn_radio_on(void *, int);
310static void iwn_radio_off(void *, int);
311static void iwn_init_locked(struct iwn_softc *);
312static void iwn_init(void *);
313static void iwn_stop_locked(struct iwn_softc *);
314static void iwn_stop(struct iwn_softc *);
315static void iwn_scan_start(struct ieee80211com *);
316static void iwn_scan_end(struct ieee80211com *);
317static void iwn_set_channel(struct ieee80211com *);
318static void iwn_scan_curchan(struct ieee80211_scan_state *, unsigned long);
319static void iwn_scan_mindwell(struct ieee80211_scan_state *);
320static void iwn_hw_reset(void *, int);
321
322#define IWN_DEBUG
323#ifdef IWN_DEBUG
324enum {
325 IWN_DEBUG_XMIT = 0x00000001, /* basic xmit operation */
326 IWN_DEBUG_RECV = 0x00000002, /* basic recv operation */
327 IWN_DEBUG_STATE = 0x00000004, /* 802.11 state transitions */
328 IWN_DEBUG_TXPOW = 0x00000008, /* tx power processing */
329 IWN_DEBUG_RESET = 0x00000010, /* reset processing */
330 IWN_DEBUG_OPS = 0x00000020, /* iwn_ops processing */
331 IWN_DEBUG_BEACON = 0x00000040, /* beacon handling */
332 IWN_DEBUG_WATCHDOG = 0x00000080, /* watchdog timeout */
333 IWN_DEBUG_INTR = 0x00000100, /* ISR */
334 IWN_DEBUG_CALIBRATE = 0x00000200, /* periodic calibration */
335 IWN_DEBUG_NODE = 0x00000400, /* node management */
336 IWN_DEBUG_LED = 0x00000800, /* led management */
337 IWN_DEBUG_CMD = 0x00001000, /* cmd submission */
338 IWN_DEBUG_TXRATE = 0x00002000, /* TX rate debugging */
339 IWN_DEBUG_PWRSAVE = 0x00004000, /* Power save operations */
340 IWN_DEBUG_FATAL = 0x80000000, /* fatal errors */
341 IWN_DEBUG_ANY = 0xffffffff
342};
343
344#define DPRINTF(sc, m, fmt, ...) do { \
345 if (sc->sc_debug & (m)) \
346 printf(fmt, __VA_ARGS__); \
347} while (0)
348
349static const char *
350iwn_intr_str(uint8_t cmd)
351{
352 switch (cmd) {
353 /* Notifications */
354 case IWN_UC_READY: return "UC_READY";
355 case IWN_ADD_NODE_DONE: return "ADD_NODE_DONE";
356 case IWN_TX_DONE: return "TX_DONE";
357 case IWN_START_SCAN: return "START_SCAN";
358 case IWN_STOP_SCAN: return "STOP_SCAN";
359 case IWN_RX_STATISTICS: return "RX_STATS";
360 case IWN_BEACON_STATISTICS: return "BEACON_STATS";
361 case IWN_STATE_CHANGED: return "STATE_CHANGED";
362 case IWN_BEACON_MISSED: return "BEACON_MISSED";
363 case IWN_RX_PHY: return "RX_PHY";
364 case IWN_MPDU_RX_DONE: return "MPDU_RX_DONE";
365 case IWN_RX_DONE: return "RX_DONE";
366
367 /* Command Notifications */
368 case IWN_CMD_RXON: return "IWN_CMD_RXON";
369 case IWN_CMD_RXON_ASSOC: return "IWN_CMD_RXON_ASSOC";
370 case IWN_CMD_EDCA_PARAMS: return "IWN_CMD_EDCA_PARAMS";
371 case IWN_CMD_TIMING: return "IWN_CMD_TIMING";
372 case IWN_CMD_LINK_QUALITY: return "IWN_CMD_LINK_QUALITY";
373 case IWN_CMD_SET_LED: return "IWN_CMD_SET_LED";
374 case IWN5000_CMD_WIMAX_COEX: return "IWN5000_CMD_WIMAX_COEX";
375 case IWN5000_CMD_CALIB_CONFIG: return "IWN5000_CMD_CALIB_CONFIG";
376 case IWN5000_CMD_CALIB_RESULT: return "IWN5000_CMD_CALIB_RESULT";
377 case IWN5000_CMD_CALIB_COMPLETE: return "IWN5000_CMD_CALIB_COMPLETE";
378 case IWN_CMD_SET_POWER_MODE: return "IWN_CMD_SET_POWER_MODE";
379 case IWN_CMD_SCAN: return "IWN_CMD_SCAN";
380 case IWN_CMD_SCAN_RESULTS: return "IWN_CMD_SCAN_RESULTS";
381 case IWN_CMD_TXPOWER: return "IWN_CMD_TXPOWER";
382 case IWN_CMD_TXPOWER_DBM: return "IWN_CMD_TXPOWER_DBM";
383 case IWN5000_CMD_TX_ANT_CONFIG: return "IWN5000_CMD_TX_ANT_CONFIG";
384 case IWN_CMD_BT_COEX: return "IWN_CMD_BT_COEX";
385 case IWN_CMD_SET_CRITICAL_TEMP: return "IWN_CMD_SET_CRITICAL_TEMP";
386 case IWN_CMD_SET_SENSITIVITY: return "IWN_CMD_SET_SENSITIVITY";
387 case IWN_CMD_PHY_CALIB: return "IWN_CMD_PHY_CALIB";
388 }
389 return "UNKNOWN INTR NOTIF/CMD";
390}
391#else
392#define DPRINTF(sc, m, fmt, ...) do { (void) sc; } while (0)
393#endif
394
395static device_method_t iwn_methods[] = {
396 /* Device interface */
397 DEVMETHOD(device_probe, iwn_probe),
398 DEVMETHOD(device_attach, iwn_attach),
399 DEVMETHOD(device_detach, iwn_detach),
400 DEVMETHOD(device_shutdown, iwn_shutdown),
401 DEVMETHOD(device_suspend, iwn_suspend),
402 DEVMETHOD(device_resume, iwn_resume),
403 { 0, 0 }
404};
405
406static driver_t iwn_driver = {
407 "iwn",
408 iwn_methods,
409 sizeof(struct iwn_softc)
410};
411static devclass_t iwn_devclass;
412
413DRIVER_MODULE(iwn, pci, iwn_driver, iwn_devclass, 0, 0);
414
415MODULE_VERSION(iwn, 1);
416
417MODULE_DEPEND(iwn, firmware, 1, 1, 1);
418MODULE_DEPEND(iwn, pci, 1, 1, 1);
419MODULE_DEPEND(iwn, wlan, 1, 1, 1);
420
421static int
422iwn_probe(device_t dev)
423{
424 const struct iwn_ident *ident;
425
426 for (ident = iwn_ident_table; ident->name != NULL; ident++) {
427 if (pci_get_vendor(dev) == ident->vendor &&
428 pci_get_device(dev) == ident->device) {
429 device_set_desc(dev, ident->name);
430 return 0;
431 }
432 }
433 return ENXIO;
434}
435
436static int
437iwn_attach(device_t dev)
438{
439 struct iwn_softc *sc = (struct iwn_softc *)device_get_softc(dev);
440 struct ieee80211com *ic;
441 struct ifnet *ifp;
442 uint32_t reg;
443 int i, error, result;
444 uint8_t macaddr[IEEE80211_ADDR_LEN];
445
446 sc->sc_dev = dev;
447
448 /*
449 * Get the offset of the PCI Express Capability Structure in PCI
450 * Configuration Space.
451 */
452 error = pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off);
453 if (error != 0) {
454 device_printf(dev, "PCIe capability structure not found!\n");
455 return error;
456 }
457
458 /* Clear device-specific "PCI retry timeout" register (41h). */
459 pci_write_config(dev, 0x41, 0, 1);
460
461 /* Hardware bug workaround. */
462 reg = pci_read_config(dev, PCIR_COMMAND, 1);
463 if (reg & PCIM_CMD_INTxDIS) {
464 DPRINTF(sc, IWN_DEBUG_RESET, "%s: PCIe INTx Disable set\n",
465 __func__);
466 reg &= ~PCIM_CMD_INTxDIS;
467 pci_write_config(dev, PCIR_COMMAND, reg, 1);
468 }
469
470 /* Enable bus-mastering. */
471 pci_enable_busmaster(dev);
472
473 sc->mem_rid = PCIR_BAR(0);
474 sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid,
475 RF_ACTIVE);
476 if (sc->mem == NULL) {
477 device_printf(dev, "can't map mem space\n");
478 error = ENOMEM;
479 return error;
480 }
481 sc->sc_st = rman_get_bustag(sc->mem);
482 sc->sc_sh = rman_get_bushandle(sc->mem);
483
484 sc->irq_rid = 0;
485 if ((result = pci_msi_count(dev)) == 1 &&
486 pci_alloc_msi(dev, &result) == 0)
487 sc->irq_rid = 1;
488 /* Install interrupt handler. */
489 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid,
490 RF_ACTIVE | RF_SHAREABLE);
491 if (sc->irq == NULL) {
492 device_printf(dev, "can't map interrupt\n");
493 error = ENOMEM;
494 goto fail;
495 }
496
497 IWN_LOCK_INIT(sc);
498
499 /* Read hardware revision and attach. */
500 sc->hw_type = (IWN_READ(sc, IWN_HW_REV) >> 4) & 0xf;
501 if (sc->hw_type == IWN_HW_REV_TYPE_4965)
502 error = iwn4965_attach(sc, pci_get_device(dev));
503 else
504 error = iwn5000_attach(sc, pci_get_device(dev));
505 if (error != 0) {
506 device_printf(dev, "could not attach device, error %d\n",
507 error);
508 goto fail;
509 }
510
511 if ((error = iwn_hw_prepare(sc)) != 0) {
512 device_printf(dev, "hardware not ready, error %d\n", error);
513 goto fail;
514 }
515
516 /* Allocate DMA memory for firmware transfers. */
517 if ((error = iwn_alloc_fwmem(sc)) != 0) {
518 device_printf(dev,
519 "could not allocate memory for firmware, error %d\n",
520 error);
521 goto fail;
522 }
523
524 /* Allocate "Keep Warm" page. */
525 if ((error = iwn_alloc_kw(sc)) != 0) {
526 device_printf(dev,
527 "could not allocate keep warm page, error %d\n", error);
528 goto fail;
529 }
530
531 /* Allocate ICT table for 5000 Series. */
532 if (sc->hw_type != IWN_HW_REV_TYPE_4965 &&
533 (error = iwn_alloc_ict(sc)) != 0) {
534 device_printf(dev, "could not allocate ICT table, error %d\n",
535 error);
536 goto fail;
537 }
538
539 /* Allocate TX scheduler "rings". */
540 if ((error = iwn_alloc_sched(sc)) != 0) {
541 device_printf(dev,
542 "could not allocate TX scheduler rings, error %d\n", error);
543 goto fail;
544 }
545
546 /* Allocate TX rings (16 on 4965AGN, 20 on >=5000). */
547 for (i = 0; i < sc->ntxqs; i++) {
548 if ((error = iwn_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) {
549 device_printf(dev,
550 "could not allocate TX ring %d, error %d\n", i,
551 error);
552 goto fail;
553 }
554 }
555
556 /* Allocate RX ring. */
557 if ((error = iwn_alloc_rx_ring(sc, &sc->rxq)) != 0) {
558 device_printf(dev, "could not allocate RX ring, error %d\n",
559 error);
560 goto fail;
561 }
562
563 /* Clear pending interrupts. */
564 IWN_WRITE(sc, IWN_INT, 0xffffffff);
565
566 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
567 if (ifp == NULL) {
568 device_printf(dev, "can not allocate ifnet structure\n");
569 goto fail;
570 }
571
572 ic = ifp->if_l2com;
573 ic->ic_ifp = ifp;
574 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
575 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
576
577 /* Set device capabilities. */
578 ic->ic_caps =
579 IEEE80211_C_STA /* station mode supported */
580 | IEEE80211_C_MONITOR /* monitor mode supported */
581 | IEEE80211_C_BGSCAN /* background scanning */
582 | IEEE80211_C_TXPMGT /* tx power management */
583 | IEEE80211_C_SHSLOT /* short slot time supported */
584 | IEEE80211_C_WPA
585 | IEEE80211_C_SHPREAMBLE /* short preamble supported */
586#if 0
587 | IEEE80211_C_IBSS /* ibss/adhoc mode */
588#endif
589 | IEEE80211_C_WME /* WME */
590 | IEEE80211_C_PMGT /* Station-side power mgmt */
591 ;
592
593 /* Read MAC address, channels, etc from EEPROM. */
594 if ((error = iwn_read_eeprom(sc, macaddr)) != 0) {
595 device_printf(dev, "could not read EEPROM, error %d\n",
596 error);
597 goto fail;
598 }
599
600 /* Count the number of available chains. */
601 sc->ntxchains =
602 ((sc->txchainmask >> 2) & 1) +
603 ((sc->txchainmask >> 1) & 1) +
604 ((sc->txchainmask >> 0) & 1);
605 sc->nrxchains =
606 ((sc->rxchainmask >> 2) & 1) +
607 ((sc->rxchainmask >> 1) & 1) +
608 ((sc->rxchainmask >> 0) & 1);
609 if (bootverbose) {
610 device_printf(dev, "MIMO %dT%dR, %.4s, address %6D\n",
611 sc->ntxchains, sc->nrxchains, sc->eeprom_domain,
612 macaddr, ":");
613 }
614
615 if (sc->sc_flags & IWN_FLAG_HAS_11N) {
616 ic->ic_rxstream = sc->nrxchains;
617 ic->ic_txstream = sc->ntxchains;
618 ic->ic_htcaps =
619 IEEE80211_HTCAP_SMPS_OFF /* SMPS mode disabled */
620 | IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */
621 | IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width*/
622 | IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */
623#ifdef notyet
624 | IEEE80211_HTCAP_GREENFIELD
625#if IWN_RBUF_SIZE == 8192
626 | IEEE80211_HTCAP_MAXAMSDU_7935 /* max A-MSDU length */
627#else
628 | IEEE80211_HTCAP_MAXAMSDU_3839 /* max A-MSDU length */
629#endif
630#endif
631 /* s/w capabilities */
632 | IEEE80211_HTC_HT /* HT operation */
633 | IEEE80211_HTC_AMPDU /* tx A-MPDU */
634#ifdef notyet
635 | IEEE80211_HTC_AMSDU /* tx A-MSDU */
636#endif
637 ;
638 }
639
640 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
641 ifp->if_softc = sc;
642 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
643 ifp->if_init = iwn_init;
644 ifp->if_ioctl = iwn_ioctl;
645 ifp->if_start = iwn_start;
646 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
647 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
648 IFQ_SET_READY(&ifp->if_snd);
649
650 ieee80211_ifattach(ic, macaddr);
651 ic->ic_vap_create = iwn_vap_create;
652 ic->ic_vap_delete = iwn_vap_delete;
653 ic->ic_raw_xmit = iwn_raw_xmit;
654 ic->ic_node_alloc = iwn_node_alloc;
655 sc->sc_ampdu_rx_start = ic->ic_ampdu_rx_start;
656 ic->ic_ampdu_rx_start = iwn_ampdu_rx_start;
657 sc->sc_ampdu_rx_stop = ic->ic_ampdu_rx_stop;
658 ic->ic_ampdu_rx_stop = iwn_ampdu_rx_stop;
659 sc->sc_addba_request = ic->ic_addba_request;
660 ic->ic_addba_request = iwn_addba_request;
661 sc->sc_addba_response = ic->ic_addba_response;
662 ic->ic_addba_response = iwn_addba_response;
663 sc->sc_addba_stop = ic->ic_addba_stop;
664 ic->ic_addba_stop = iwn_ampdu_tx_stop;
665 ic->ic_newassoc = iwn_newassoc;
666 ic->ic_wme.wme_update = iwn_updateedca;
667 ic->ic_update_mcast = iwn_update_mcast;
668 ic->ic_scan_start = iwn_scan_start;
669 ic->ic_scan_end = iwn_scan_end;
670 ic->ic_set_channel = iwn_set_channel;
671 ic->ic_scan_curchan = iwn_scan_curchan;
672 ic->ic_scan_mindwell = iwn_scan_mindwell;
673 ic->ic_setregdomain = iwn_setregdomain;
674
675 iwn_radiotap_attach(sc);
676
677 callout_init_mtx(&sc->calib_to, &sc->sc_mtx, 0);
678 callout_init_mtx(&sc->watchdog_to, &sc->sc_mtx, 0);
679 TASK_INIT(&sc->sc_reinit_task, 0, iwn_hw_reset, sc);
680 TASK_INIT(&sc->sc_radioon_task, 0, iwn_radio_on, sc);
681 TASK_INIT(&sc->sc_radiooff_task, 0, iwn_radio_off, sc);
682
683 iwn_sysctlattach(sc);
684
685 /*
686 * Hook our interrupt after all initialization is complete.
687 */
688 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
689 NULL, iwn_intr, sc, &sc->sc_ih);
690 if (error != 0) {
691 device_printf(dev, "can't establish interrupt, error %d\n",
692 error);
693 goto fail;
694 }
695
696 if (bootverbose)
697 ieee80211_announce(ic);
698 return 0;
699fail:
700 iwn_detach(dev);
701 return error;
702}
703
704static int
705iwn4965_attach(struct iwn_softc *sc, uint16_t pid)
706{
707 struct iwn_ops *ops = &sc->ops;
708
709 ops->load_firmware = iwn4965_load_firmware;
710 ops->read_eeprom = iwn4965_read_eeprom;
711 ops->post_alive = iwn4965_post_alive;
712 ops->nic_config = iwn4965_nic_config;
713 ops->update_sched = iwn4965_update_sched;
714 ops->get_temperature = iwn4965_get_temperature;
715 ops->get_rssi = iwn4965_get_rssi;
716 ops->set_txpower = iwn4965_set_txpower;
717 ops->init_gains = iwn4965_init_gains;
718 ops->set_gains = iwn4965_set_gains;
719 ops->add_node = iwn4965_add_node;
720 ops->tx_done = iwn4965_tx_done;
721 ops->ampdu_tx_start = iwn4965_ampdu_tx_start;
722 ops->ampdu_tx_stop = iwn4965_ampdu_tx_stop;
723 sc->ntxqs = IWN4965_NTXQUEUES;
724 sc->firstaggqueue = IWN4965_FIRSTAGGQUEUE;
725 sc->ndmachnls = IWN4965_NDMACHNLS;
726 sc->broadcast_id = IWN4965_ID_BROADCAST;
727 sc->rxonsz = IWN4965_RXONSZ;
728 sc->schedsz = IWN4965_SCHEDSZ;
729 sc->fw_text_maxsz = IWN4965_FW_TEXT_MAXSZ;
730 sc->fw_data_maxsz = IWN4965_FW_DATA_MAXSZ;
731 sc->fwsz = IWN4965_FWSZ;
732 sc->sched_txfact_addr = IWN4965_SCHED_TXFACT;
733 sc->limits = &iwn4965_sensitivity_limits;
734 sc->fwname = "iwn4965fw";
735 /* Override chains masks, ROM is known to be broken. */
736 sc->txchainmask = IWN_ANT_AB;
737 sc->rxchainmask = IWN_ANT_ABC;
738
739 return 0;
740}
741
742static int
743iwn5000_attach(struct iwn_softc *sc, uint16_t pid)
744{
745 struct iwn_ops *ops = &sc->ops;
746
747 ops->load_firmware = iwn5000_load_firmware;
748 ops->read_eeprom = iwn5000_read_eeprom;
749 ops->post_alive = iwn5000_post_alive;
750 ops->nic_config = iwn5000_nic_config;
751 ops->update_sched = iwn5000_update_sched;
752 ops->get_temperature = iwn5000_get_temperature;
753 ops->get_rssi = iwn5000_get_rssi;
754 ops->set_txpower = iwn5000_set_txpower;
755 ops->init_gains = iwn5000_init_gains;
756 ops->set_gains = iwn5000_set_gains;
757 ops->add_node = iwn5000_add_node;
758 ops->tx_done = iwn5000_tx_done;
759 ops->ampdu_tx_start = iwn5000_ampdu_tx_start;
760 ops->ampdu_tx_stop = iwn5000_ampdu_tx_stop;
761 sc->ntxqs = IWN5000_NTXQUEUES;
762 sc->firstaggqueue = IWN5000_FIRSTAGGQUEUE;
763 sc->ndmachnls = IWN5000_NDMACHNLS;
764 sc->broadcast_id = IWN5000_ID_BROADCAST;
765 sc->rxonsz = IWN5000_RXONSZ;
766 sc->schedsz = IWN5000_SCHEDSZ;
767 sc->fw_text_maxsz = IWN5000_FW_TEXT_MAXSZ;
768 sc->fw_data_maxsz = IWN5000_FW_DATA_MAXSZ;
769 sc->fwsz = IWN5000_FWSZ;
770 sc->sched_txfact_addr = IWN5000_SCHED_TXFACT;
771 sc->reset_noise_gain = IWN5000_PHY_CALIB_RESET_NOISE_GAIN;
772 sc->noise_gain = IWN5000_PHY_CALIB_NOISE_GAIN;
773
774 switch (sc->hw_type) {
775 case IWN_HW_REV_TYPE_5100:
776 sc->limits = &iwn5000_sensitivity_limits;
777 sc->fwname = "iwn5000fw";
778 /* Override chains masks, ROM is known to be broken. */
779 sc->txchainmask = IWN_ANT_B;
780 sc->rxchainmask = IWN_ANT_AB;
781 break;
782 case IWN_HW_REV_TYPE_5150:
783 sc->limits = &iwn5150_sensitivity_limits;
784 sc->fwname = "iwn5150fw";
785 break;
786 case IWN_HW_REV_TYPE_5300:
787 case IWN_HW_REV_TYPE_5350:
788 sc->limits = &iwn5000_sensitivity_limits;
789 sc->fwname = "iwn5000fw";
790 break;
791 case IWN_HW_REV_TYPE_1000:
792 sc->limits = &iwn1000_sensitivity_limits;
793 sc->fwname = "iwn1000fw";
794 break;
795 case IWN_HW_REV_TYPE_6000:
796 sc->limits = &iwn6000_sensitivity_limits;
797 sc->fwname = "iwn6000fw";
798 if (pid == 0x422c || pid == 0x4239) {
799 sc->sc_flags |= IWN_FLAG_INTERNAL_PA;
800 /* Override chains masks, ROM is known to be broken. */
801 sc->txchainmask = IWN_ANT_BC;
802 sc->rxchainmask = IWN_ANT_BC;
803 }
804 break;
805 case IWN_HW_REV_TYPE_6050:
806 sc->limits = &iwn6000_sensitivity_limits;
807 sc->fwname = "iwn6050fw";
808 /* Override chains masks, ROM is known to be broken. */
809 sc->txchainmask = IWN_ANT_AB;
810 sc->rxchainmask = IWN_ANT_AB;
811 break;
812 case IWN_HW_REV_TYPE_6005:
813 sc->limits = &iwn6000_sensitivity_limits;
814 if (pid != 0x0082 && pid != 0x0085) {
815 sc->fwname = "iwn6000g2bfw";
816 sc->sc_flags |= IWN_FLAG_ADV_BTCOEX;
817 } else
818 sc->fwname = "iwn6000g2afw";
819 break;
820 default:
821 device_printf(sc->sc_dev, "adapter type %d not supported\n",
822 sc->hw_type);
823 return ENOTSUP;
824 }
825 return 0;
826}
827
828/*
829 * Attach the interface to 802.11 radiotap.
830 */
831static void
832iwn_radiotap_attach(struct iwn_softc *sc)
833{
834 struct ifnet *ifp = sc->sc_ifp;
835 struct ieee80211com *ic = ifp->if_l2com;
836
837 ieee80211_radiotap_attach(ic,
838 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
839 IWN_TX_RADIOTAP_PRESENT,
840 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
841 IWN_RX_RADIOTAP_PRESENT);
842}
843
844static void
845iwn_sysctlattach(struct iwn_softc *sc)
846{
847 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
848 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
849
850#ifdef IWN_DEBUG
851 sc->sc_debug = 0;
852 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
853 "debug", CTLFLAG_RW, &sc->sc_debug, 0, "control debugging printfs");
854#endif
855}
856
857static struct ieee80211vap *
858iwn_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
859 enum ieee80211_opmode opmode, int flags,
860 const uint8_t bssid[IEEE80211_ADDR_LEN],
861 const uint8_t mac[IEEE80211_ADDR_LEN])
862{
863 struct iwn_vap *ivp;
864 struct ieee80211vap *vap;
865
866 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
867 return NULL;
868 ivp = (struct iwn_vap *) malloc(sizeof(struct iwn_vap),
869 M_80211_VAP, M_NOWAIT | M_ZERO);
870 if (ivp == NULL)
871 return NULL;
872 vap = &ivp->iv_vap;
873 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac);
874 vap->iv_bmissthreshold = 10; /* override default */
875 /* Override with driver methods. */
876 ivp->iv_newstate = vap->iv_newstate;
877 vap->iv_newstate = iwn_newstate;
878
879 ieee80211_ratectl_init(vap);
880 /* Complete setup. */
881 ieee80211_vap_attach(vap, iwn_media_change, ieee80211_media_status);
882 ic->ic_opmode = opmode;
883 return vap;
884}
885
886static void
887iwn_vap_delete(struct ieee80211vap *vap)
888{
889 struct iwn_vap *ivp = IWN_VAP(vap);
890
891 ieee80211_ratectl_deinit(vap);
892 ieee80211_vap_detach(vap);
893 free(ivp, M_80211_VAP);
894}
895
896static int
897iwn_detach(device_t dev)
898{
899 struct iwn_softc *sc = device_get_softc(dev);
900 struct ifnet *ifp = sc->sc_ifp;
901 struct ieee80211com *ic;
902 int qid;
903
904 if (ifp != NULL) {
905 ic = ifp->if_l2com;
906
907 ieee80211_draintask(ic, &sc->sc_reinit_task);
908 ieee80211_draintask(ic, &sc->sc_radioon_task);
909 ieee80211_draintask(ic, &sc->sc_radiooff_task);
910
911 iwn_stop(sc);
912 callout_drain(&sc->watchdog_to);
913 callout_drain(&sc->calib_to);
914 ieee80211_ifdetach(ic);
915 }
916
917 /* Uninstall interrupt handler. */
918 if (sc->irq != NULL) {
919 bus_teardown_intr(dev, sc->irq, sc->sc_ih);
920 bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq);
921 if (sc->irq_rid == 1)
922 pci_release_msi(dev);
923 }
924
925 /* Free DMA resources. */
926 iwn_free_rx_ring(sc, &sc->rxq);
927 for (qid = 0; qid < sc->ntxqs; qid++)
928 iwn_free_tx_ring(sc, &sc->txq[qid]);
929 iwn_free_sched(sc);
930 iwn_free_kw(sc);
931 if (sc->ict != NULL)
932 iwn_free_ict(sc);
933 iwn_free_fwmem(sc);
934
935 if (sc->mem != NULL)
936 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem);
937
938 if (ifp != NULL)
939 if_free(ifp);
940
941 IWN_LOCK_DESTROY(sc);
942 return 0;
943}
944
945static int
946iwn_shutdown(device_t dev)
947{
948 struct iwn_softc *sc = device_get_softc(dev);
949
950 iwn_stop(sc);
951 return 0;
952}
953
954static int
955iwn_suspend(device_t dev)
956{
957 struct iwn_softc *sc = device_get_softc(dev);
958 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
959
960 ieee80211_suspend_all(ic);
961 return 0;
962}
963
964static int
965iwn_resume(device_t dev)
966{
967 struct iwn_softc *sc = device_get_softc(dev);
968 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
969
970 /* Clear device-specific "PCI retry timeout" register (41h). */
971 pci_write_config(dev, 0x41, 0, 1);
972
973 ieee80211_resume_all(ic);
974 return 0;
975}
976
977static int
978iwn_nic_lock(struct iwn_softc *sc)
979{
980 int ntries;
981
982 /* Request exclusive access to NIC. */
983 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ);
984
985 /* Spin until we actually get the lock. */
986 for (ntries = 0; ntries < 1000; ntries++) {
987 if ((IWN_READ(sc, IWN_GP_CNTRL) &
988 (IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) ==
989 IWN_GP_CNTRL_MAC_ACCESS_ENA)
990 return 0;
991 DELAY(10);
992 }
993 return ETIMEDOUT;
994}
995
996static __inline void
997iwn_nic_unlock(struct iwn_softc *sc)
998{
999 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ);
1000}
1001
1002static __inline uint32_t
1003iwn_prph_read(struct iwn_softc *sc, uint32_t addr)
1004{
1005 IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr);
1006 IWN_BARRIER_READ_WRITE(sc);
1007 return IWN_READ(sc, IWN_PRPH_RDATA);
1008}
1009
1010static __inline void
1011iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data)
1012{
1013 IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr);
1014 IWN_BARRIER_WRITE(sc);
1015 IWN_WRITE(sc, IWN_PRPH_WDATA, data);
1016}
1017
1018static __inline void
1019iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask)
1020{
1021 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask);
1022}
1023
1024static __inline void
1025iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask)
1026{
1027 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask);
1028}
1029
1030static __inline void
1031iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr,
1032 const uint32_t *data, int count)
1033{
1034 for (; count > 0; count--, data++, addr += 4)
1035 iwn_prph_write(sc, addr, *data);
1036}
1037
1038static __inline uint32_t
1039iwn_mem_read(struct iwn_softc *sc, uint32_t addr)
1040{
1041 IWN_WRITE(sc, IWN_MEM_RADDR, addr);
1042 IWN_BARRIER_READ_WRITE(sc);
1043 return IWN_READ(sc, IWN_MEM_RDATA);
1044}
1045
1046static __inline void
1047iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data)
1048{
1049 IWN_WRITE(sc, IWN_MEM_WADDR, addr);
1050 IWN_BARRIER_WRITE(sc);
1051 IWN_WRITE(sc, IWN_MEM_WDATA, data);
1052}
1053
1054static __inline void
1055iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data)
1056{
1057 uint32_t tmp;
1058
1059 tmp = iwn_mem_read(sc, addr & ~3);
1060 if (addr & 3)
1061 tmp = (tmp & 0x0000ffff) | data << 16;
1062 else
1063 tmp = (tmp & 0xffff0000) | data;
1064 iwn_mem_write(sc, addr & ~3, tmp);
1065}
1066
1067static __inline void
1068iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data,
1069 int count)
1070{
1071 for (; count > 0; count--, addr += 4)
1072 *data++ = iwn_mem_read(sc, addr);
1073}
1074
1075static __inline void
1076iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val,
1077 int count)
1078{
1079 for (; count > 0; count--, addr += 4)
1080 iwn_mem_write(sc, addr, val);
1081}
1082
1083static int
1084iwn_eeprom_lock(struct iwn_softc *sc)
1085{
1086 int i, ntries;
1087
1088 for (i = 0; i < 100; i++) {
1089 /* Request exclusive access to EEPROM. */
1090 IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
1091 IWN_HW_IF_CONFIG_EEPROM_LOCKED);
1092
1093 /* Spin until we actually get the lock. */
1094 for (ntries = 0; ntries < 100; ntries++) {
1095 if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
1096 IWN_HW_IF_CONFIG_EEPROM_LOCKED)
1097 return 0;
1098 DELAY(10);
1099 }
1100 }
1101 return ETIMEDOUT;
1102}
1103
1104static __inline void
1105iwn_eeprom_unlock(struct iwn_softc *sc)
1106{
1107 IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED);
1108}
1109
1110/*
1111 * Initialize access by host to One Time Programmable ROM.
1112 * NB: This kind of ROM can be found on 1000 or 6000 Series only.
1113 */
1114static int
1115iwn_init_otprom(struct iwn_softc *sc)
1116{
1117 uint16_t prev, base, next;
1118 int count, error;
1119
1120 /* Wait for clock stabilization before accessing prph. */
1121 if ((error = iwn_clock_wait(sc)) != 0)
1122 return error;
1123
1124 if ((error = iwn_nic_lock(sc)) != 0)
1125 return error;
1126 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ);
1127 DELAY(5);
1128 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ);
1129 iwn_nic_unlock(sc);
1130
1131 /* Set auto clock gate disable bit for HW with OTP shadow RAM. */
1132 if (sc->hw_type != IWN_HW_REV_TYPE_1000) {
1133 IWN_SETBITS(sc, IWN_DBG_LINK_PWR_MGMT,
1134 IWN_RESET_LINK_PWR_MGMT_DIS);
1135 }
1136 IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER);
1137 /* Clear ECC status. */
1138 IWN_SETBITS(sc, IWN_OTP_GP,
1139 IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS);
1140
1141 /*
1142 * Find the block before last block (contains the EEPROM image)
1143 * for HW without OTP shadow RAM.
1144 */
1145 if (sc->hw_type == IWN_HW_REV_TYPE_1000) {
1146 /* Switch to absolute addressing mode. */
1147 IWN_CLRBITS(sc, IWN_OTP_GP, IWN_OTP_GP_RELATIVE_ACCESS);
1148 base = prev = 0;
1149 for (count = 0; count < IWN1000_OTP_NBLOCKS; count++) {
1150 error = iwn_read_prom_data(sc, base, &next, 2);
1151 if (error != 0)
1152 return error;
1153 if (next == 0) /* End of linked-list. */
1154 break;
1155 prev = base;
1156 base = le16toh(next);
1157 }
1158 if (count == 0 || count == IWN1000_OTP_NBLOCKS)
1159 return EIO;
1160 /* Skip "next" word. */
1161 sc->prom_base = prev + 1;
1162 }
1163 return 0;
1164}
1165
1166static int
1167iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count)
1168{
1169 uint8_t *out = data;
1170 uint32_t val, tmp;
1171 int ntries;
1172
1173 addr += sc->prom_base;
1174 for (; count > 0; count -= 2, addr++) {
1175 IWN_WRITE(sc, IWN_EEPROM, addr << 2);
1176 for (ntries = 0; ntries < 10; ntries++) {
1177 val = IWN_READ(sc, IWN_EEPROM);
1178 if (val & IWN_EEPROM_READ_VALID)
1179 break;
1180 DELAY(5);
1181 }
1182 if (ntries == 10) {
1183 device_printf(sc->sc_dev,
1184 "timeout reading ROM at 0x%x\n", addr);
1185 return ETIMEDOUT;
1186 }
1187 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) {
1188 /* OTPROM, check for ECC errors. */
1189 tmp = IWN_READ(sc, IWN_OTP_GP);
1190 if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) {
1191 device_printf(sc->sc_dev,
1192 "OTPROM ECC error at 0x%x\n", addr);
1193 return EIO;
1194 }
1195 if (tmp & IWN_OTP_GP_ECC_CORR_STTS) {
1196 /* Correctable ECC error, clear bit. */
1197 IWN_SETBITS(sc, IWN_OTP_GP,
1198 IWN_OTP_GP_ECC_CORR_STTS);
1199 }
1200 }
1201 *out++ = val >> 16;
1202 if (count > 1)
1203 *out++ = val >> 24;
1204 }
1205 return 0;
1206}
1207
1208static void
1209iwn_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1210{
1211 if (error != 0)
1212 return;
1213 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
1214 *(bus_addr_t *)arg = segs[0].ds_addr;
1215}
1216
1217static int
1218iwn_dma_contig_alloc(struct iwn_softc *sc, struct iwn_dma_info *dma,
1219 void **kvap, bus_size_t size, bus_size_t alignment)
1220{
1221 int error;
1222
1223 dma->tag = NULL;
1224 dma->size = size;
1225
1226 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment,
1227 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
1228 1, size, BUS_DMA_NOWAIT, NULL, NULL, &dma->tag);
1229 if (error != 0)
1230 goto fail;
1231
1232 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
1233 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
1234 if (error != 0)
1235 goto fail;
1236
1237 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
1238 iwn_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
1239 if (error != 0)
1240 goto fail;
1241
1242 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
1243
1244 if (kvap != NULL)
1245 *kvap = dma->vaddr;
1246
1247 return 0;
1248
1249fail: iwn_dma_contig_free(dma);
1250 return error;
1251}
1252
1253static void
1254iwn_dma_contig_free(struct iwn_dma_info *dma)
1255{
1256 if (dma->map != NULL) {
1257 if (dma->vaddr != NULL) {
1258 bus_dmamap_sync(dma->tag, dma->map,
1259 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1260 bus_dmamap_unload(dma->tag, dma->map);
1261 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
1262 dma->vaddr = NULL;
1263 }
1264 bus_dmamap_destroy(dma->tag, dma->map);
1265 dma->map = NULL;
1266 }
1267 if (dma->tag != NULL) {
1268 bus_dma_tag_destroy(dma->tag);
1269 dma->tag = NULL;
1270 }
1271}
1272
1273static int
1274iwn_alloc_sched(struct iwn_softc *sc)
1275{
1276 /* TX scheduler rings must be aligned on a 1KB boundary. */
1277 return iwn_dma_contig_alloc(sc, &sc->sched_dma, (void **)&sc->sched,
1278 sc->schedsz, 1024);
1279}
1280
1281static void
1282iwn_free_sched(struct iwn_softc *sc)
1283{
1284 iwn_dma_contig_free(&sc->sched_dma);
1285}
1286
1287static int
1288iwn_alloc_kw(struct iwn_softc *sc)
1289{
1290 /* "Keep Warm" page must be aligned on a 4KB boundary. */
1291 return iwn_dma_contig_alloc(sc, &sc->kw_dma, NULL, 4096, 4096);
1292}
1293
1294static void
1295iwn_free_kw(struct iwn_softc *sc)
1296{
1297 iwn_dma_contig_free(&sc->kw_dma);
1298}
1299
1300static int
1301iwn_alloc_ict(struct iwn_softc *sc)
1302{
1303 /* ICT table must be aligned on a 4KB boundary. */
1304 return iwn_dma_contig_alloc(sc, &sc->ict_dma, (void **)&sc->ict,
1305 IWN_ICT_SIZE, 4096);
1306}
1307
1308static void
1309iwn_free_ict(struct iwn_softc *sc)
1310{
1311 iwn_dma_contig_free(&sc->ict_dma);
1312}
1313
1314static int
1315iwn_alloc_fwmem(struct iwn_softc *sc)
1316{
1317 /* Must be aligned on a 16-byte boundary. */
1318 return iwn_dma_contig_alloc(sc, &sc->fw_dma, NULL, sc->fwsz, 16);
1319}
1320
1321static void
1322iwn_free_fwmem(struct iwn_softc *sc)
1323{
1324 iwn_dma_contig_free(&sc->fw_dma);
1325}
1326
1327static int
1328iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1329{
1330 bus_size_t size;
1331 int i, error;
1332
1333 ring->cur = 0;
1334
1335 /* Allocate RX descriptors (256-byte aligned). */
1336 size = IWN_RX_RING_COUNT * sizeof (uint32_t);
1337 error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc,
1338 size, 256);
1339 if (error != 0) {
1340 device_printf(sc->sc_dev,
1341 "%s: could not allocate RX ring DMA memory, error %d\n",
1342 __func__, error);
1343 goto fail;
1344 }
1345
1346 /* Allocate RX status area (16-byte aligned). */
1347 error = iwn_dma_contig_alloc(sc, &ring->stat_dma, (void **)&ring->stat,
1348 sizeof (struct iwn_rx_status), 16);
1349 if (error != 0) {
1350 device_printf(sc->sc_dev,
1351 "%s: could not allocate RX status DMA memory, error %d\n",
1352 __func__, error);
1353 goto fail;
1354 }
1355
1356 /* Create RX buffer DMA tag. */
1357 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
1358 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1359 IWN_RBUF_SIZE, 1, IWN_RBUF_SIZE, BUS_DMA_NOWAIT, NULL, NULL,
1360 &ring->data_dmat);
1361 if (error != 0) {
1362 device_printf(sc->sc_dev,
1363 "%s: could not create RX buf DMA tag, error %d\n",
1364 __func__, error);
1365 goto fail;
1366 }
1367
1368 /*
1369 * Allocate and map RX buffers.
1370 */
1371 for (i = 0; i < IWN_RX_RING_COUNT; i++) {
1372 struct iwn_rx_data *data = &ring->data[i];
1373 bus_addr_t paddr;
1374
1375 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1376 if (error != 0) {
1377 device_printf(sc->sc_dev,
1378 "%s: could not create RX buf DMA map, error %d\n",
1379 __func__, error);
1380 goto fail;
1381 }
1382
1383 data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1384 IWN_RBUF_SIZE);
1385 if (data->m == NULL) {
1386 device_printf(sc->sc_dev,
1387 "%s: could not allocate RX mbuf\n", __func__);
1388 error = ENOBUFS;
1389 goto fail;
1390 }
1391
1392 error = bus_dmamap_load(ring->data_dmat, data->map,
1393 mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr,
1394 &paddr, BUS_DMA_NOWAIT);
1395 if (error != 0 && error != EFBIG) {
1396 device_printf(sc->sc_dev,
1397 "%s: can't not map mbuf, error %d\n", __func__,
1398 error);
1399 goto fail;
1400 }
1401
1402 /* Set physical address of RX buffer (256-byte aligned). */
1403 ring->desc[i] = htole32(paddr >> 8);
1404 }
1405
1406 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1407 BUS_DMASYNC_PREWRITE);
1408
1409 return 0;
1410
1411fail: iwn_free_rx_ring(sc, ring);
1412 return error;
1413}
1414
1415static void
1416iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1417{
1418 int ntries;
1419
1420 if (iwn_nic_lock(sc) == 0) {
1421 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0);
1422 for (ntries = 0; ntries < 1000; ntries++) {
1423 if (IWN_READ(sc, IWN_FH_RX_STATUS) &
1424 IWN_FH_RX_STATUS_IDLE)
1425 break;
1426 DELAY(10);
1427 }
1428 iwn_nic_unlock(sc);
1429 }
1430 ring->cur = 0;
1431 sc->last_rx_valid = 0;
1432}
1433
1434static void
1435iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1436{
1437 int i;
1438
1439 iwn_dma_contig_free(&ring->desc_dma);
1440 iwn_dma_contig_free(&ring->stat_dma);
1441
1442 for (i = 0; i < IWN_RX_RING_COUNT; i++) {
1443 struct iwn_rx_data *data = &ring->data[i];
1444
1445 if (data->m != NULL) {
1446 bus_dmamap_sync(ring->data_dmat, data->map,
1447 BUS_DMASYNC_POSTREAD);
1448 bus_dmamap_unload(ring->data_dmat, data->map);
1449 m_freem(data->m);
1450 data->m = NULL;
1451 }
1452 if (data->map != NULL)
1453 bus_dmamap_destroy(ring->data_dmat, data->map);
1454 }
1455 if (ring->data_dmat != NULL) {
1456 bus_dma_tag_destroy(ring->data_dmat);
1457 ring->data_dmat = NULL;
1458 }
1459}
1460
1461static int
1462iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid)
1463{
1464 bus_addr_t paddr;
1465 bus_size_t size;
1466 int i, error;
1467
1468 ring->qid = qid;
1469 ring->queued = 0;
1470 ring->cur = 0;
1471
1472 /* Allocate TX descriptors (256-byte aligned). */
1473 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_desc);
1474 error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc,
1475 size, 256);
1476 if (error != 0) {
1477 device_printf(sc->sc_dev,
1478 "%s: could not allocate TX ring DMA memory, error %d\n",
1479 __func__, error);
1480 goto fail;
1481 }
1482
1483 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_cmd);
1484 error = iwn_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd,
1485 size, 4);
1486 if (error != 0) {
1487 device_printf(sc->sc_dev,
1488 "%s: could not allocate TX cmd DMA memory, error %d\n",
1489 __func__, error);
1490 goto fail;
1491 }
1492
1493 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
1494 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
1495 IWN_MAX_SCATTER - 1, MCLBYTES, BUS_DMA_NOWAIT, NULL, NULL,
1496 &ring->data_dmat);
1497 if (error != 0) {
1498 device_printf(sc->sc_dev,
1499 "%s: could not create TX buf DMA tag, error %d\n",
1500 __func__, error);
1501 goto fail;
1502 }
1503
1504 paddr = ring->cmd_dma.paddr;
1505 for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1506 struct iwn_tx_data *data = &ring->data[i];
1507
1508 data->cmd_paddr = paddr;
1509 data->scratch_paddr = paddr + 12;
1510 paddr += sizeof (struct iwn_tx_cmd);
1511
1512 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1513 if (error != 0) {
1514 device_printf(sc->sc_dev,
1515 "%s: could not create TX buf DMA map, error %d\n",
1516 __func__, error);
1517 goto fail;
1518 }
1519 }
1520 return 0;
1521
1522fail: iwn_free_tx_ring(sc, ring);
1523 return error;
1524}
1525
1526static void
1527iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring)
1528{
1529 int i;
1530
1531 for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1532 struct iwn_tx_data *data = &ring->data[i];
1533
1534 if (data->m != NULL) {
1535 bus_dmamap_sync(ring->data_dmat, data->map,
1536 BUS_DMASYNC_POSTWRITE);
1537 bus_dmamap_unload(ring->data_dmat, data->map);
1538 m_freem(data->m);
1539 data->m = NULL;
1540 }
1541 }
1542 /* Clear TX descriptors. */
1543 memset(ring->desc, 0, ring->desc_dma.size);
1544 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1545 BUS_DMASYNC_PREWRITE);
1546 sc->qfullmsk &= ~(1 << ring->qid);
1547 ring->queued = 0;
1548 ring->cur = 0;
1549}
1550
1551static void
1552iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring)
1553{
1554 int i;
1555
1556 iwn_dma_contig_free(&ring->desc_dma);
1557 iwn_dma_contig_free(&ring->cmd_dma);
1558
1559 for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1560 struct iwn_tx_data *data = &ring->data[i];
1561
1562 if (data->m != NULL) {
1563 bus_dmamap_sync(ring->data_dmat, data->map,
1564 BUS_DMASYNC_POSTWRITE);
1565 bus_dmamap_unload(ring->data_dmat, data->map);
1566 m_freem(data->m);
1567 }
1568 if (data->map != NULL)
1569 bus_dmamap_destroy(ring->data_dmat, data->map);
1570 }
1571 if (ring->data_dmat != NULL) {
1572 bus_dma_tag_destroy(ring->data_dmat);
1573 ring->data_dmat = NULL;
1574 }
1575}
1576
1577static void
1578iwn5000_ict_reset(struct iwn_softc *sc)
1579{
1580 /* Disable interrupts. */
1581 IWN_WRITE(sc, IWN_INT_MASK, 0);
1582
1583 /* Reset ICT table. */
1584 memset(sc->ict, 0, IWN_ICT_SIZE);
1585 sc->ict_cur = 0;
1586
1587 /* Set physical address of ICT table (4KB aligned). */
1588 DPRINTF(sc, IWN_DEBUG_RESET, "%s: enabling ICT\n", __func__);
1589 IWN_WRITE(sc, IWN_DRAM_INT_TBL, IWN_DRAM_INT_TBL_ENABLE |
1590 IWN_DRAM_INT_TBL_WRAP_CHECK | sc->ict_dma.paddr >> 12);
1591
1592 /* Enable periodic RX interrupt. */
1593 sc->int_mask |= IWN_INT_RX_PERIODIC;
1594 /* Switch to ICT interrupt mode in driver. */
1595 sc->sc_flags |= IWN_FLAG_USE_ICT;
1596
1597 /* Re-enable interrupts. */
1598 IWN_WRITE(sc, IWN_INT, 0xffffffff);
1599 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
1600}
1601
1602static int
1603iwn_read_eeprom(struct iwn_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN])
1604{
1605 struct iwn_ops *ops = &sc->ops;
1606 uint16_t val;
1607 int error;
1608
1609 /* Check whether adapter has an EEPROM or an OTPROM. */
1610 if (sc->hw_type >= IWN_HW_REV_TYPE_1000 &&
1611 (IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP))
1612 sc->sc_flags |= IWN_FLAG_HAS_OTPROM;
1613 DPRINTF(sc, IWN_DEBUG_RESET, "%s found\n",
1614 (sc->sc_flags & IWN_FLAG_HAS_OTPROM) ? "OTPROM" : "EEPROM");
1615
1616 /* Adapter has to be powered on for EEPROM access to work. */
1617 if ((error = iwn_apm_init(sc)) != 0) {
1618 device_printf(sc->sc_dev,
1619 "%s: could not power ON adapter, error %d\n", __func__,
1620 error);
1621 return error;
1622 }
1623
1624 if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) {
1625 device_printf(sc->sc_dev, "%s: bad ROM signature\n", __func__);
1626 return EIO;
1627 }
1628 if ((error = iwn_eeprom_lock(sc)) != 0) {
1629 device_printf(sc->sc_dev, "%s: could not lock ROM, error %d\n",
1630 __func__, error);
1631 return error;
1632 }
1633 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) {
1634 if ((error = iwn_init_otprom(sc)) != 0) {
1635 device_printf(sc->sc_dev,
1636 "%s: could not initialize OTPROM, error %d\n",
1637 __func__, error);
1638 return error;
1639 }
1640 }
1641
1642 iwn_read_prom_data(sc, IWN_EEPROM_SKU_CAP, &val, 2);
1643 DPRINTF(sc, IWN_DEBUG_RESET, "SKU capabilities=0x%04x\n", le16toh(val));
1644 /* Check if HT support is bonded out. */
1645 if (val & htole16(IWN_EEPROM_SKU_CAP_11N))
1646 sc->sc_flags |= IWN_FLAG_HAS_11N;
1647
1648 iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2);
1649 sc->rfcfg = le16toh(val);
1650 DPRINTF(sc, IWN_DEBUG_RESET, "radio config=0x%04x\n", sc->rfcfg);
1651 /* Read Tx/Rx chains from ROM unless it's known to be broken. */
1652 if (sc->txchainmask == 0)
1653 sc->txchainmask = IWN_RFCFG_TXANTMSK(sc->rfcfg);
1654 if (sc->rxchainmask == 0)
1655 sc->rxchainmask = IWN_RFCFG_RXANTMSK(sc->rfcfg);
1656
1657 /* Read MAC address. */
1658 iwn_read_prom_data(sc, IWN_EEPROM_MAC, macaddr, 6);
1659
1660 /* Read adapter-specific information from EEPROM. */
1661 ops->read_eeprom(sc);
1662
1663 iwn_apm_stop(sc); /* Power OFF adapter. */
1664
1665 iwn_eeprom_unlock(sc);
1666 return 0;
1667}
1668
1669static void
1670iwn4965_read_eeprom(struct iwn_softc *sc)
1671{
1672 uint32_t addr;
1673 uint16_t val;
1674 int i;
1675
1676 /* Read regulatory domain (4 ASCII characters). */
1677 iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4);
1678
1679 /* Read the list of authorized channels (20MHz ones only). */
1680 for (i = 0; i < 7; i++) {
1681 addr = iwn4965_regulatory_bands[i];
1682 iwn_read_eeprom_channels(sc, i, addr);
1683 }
1684
1685 /* Read maximum allowed TX power for 2GHz and 5GHz bands. */
1686 iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2);
1687 sc->maxpwr2GHz = val & 0xff;
1688 sc->maxpwr5GHz = val >> 8;
1689 /* Check that EEPROM values are within valid range. */
1690 if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50)
1691 sc->maxpwr5GHz = 38;
1692 if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50)
1693 sc->maxpwr2GHz = 38;
1694 DPRINTF(sc, IWN_DEBUG_RESET, "maxpwr 2GHz=%d 5GHz=%d\n",
1695 sc->maxpwr2GHz, sc->maxpwr5GHz);
1696
1697 /* Read samples for each TX power group. */
1698 iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands,
1699 sizeof sc->bands);
1700
1701 /* Read voltage at which samples were taken. */
1702 iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2);
1703 sc->eeprom_voltage = (int16_t)le16toh(val);
1704 DPRINTF(sc, IWN_DEBUG_RESET, "voltage=%d (in 0.3V)\n",
1705 sc->eeprom_voltage);
1706
1707#ifdef IWN_DEBUG
1708 /* Print samples. */
1709 if (sc->sc_debug & IWN_DEBUG_ANY) {
1710 for (i = 0; i < IWN_NBANDS; i++)
1711 iwn4965_print_power_group(sc, i);
1712 }
1713#endif
1714}
1715
1716#ifdef IWN_DEBUG
1717static void
1718iwn4965_print_power_group(struct iwn_softc *sc, int i)
1719{
1720 struct iwn4965_eeprom_band *band = &sc->bands[i];
1721 struct iwn4965_eeprom_chan_samples *chans = band->chans;
1722 int j, c;
1723
1724 printf("===band %d===\n", i);
1725 printf("chan lo=%d, chan hi=%d\n", band->lo, band->hi);
1726 printf("chan1 num=%d\n", chans[0].num);
1727 for (c = 0; c < 2; c++) {
1728 for (j = 0; j < IWN_NSAMPLES; j++) {
1729 printf("chain %d, sample %d: temp=%d gain=%d "
1730 "power=%d pa_det=%d\n", c, j,
1731 chans[0].samples[c][j].temp,
1732 chans[0].samples[c][j].gain,
1733 chans[0].samples[c][j].power,
1734 chans[0].samples[c][j].pa_det);
1735 }
1736 }
1737 printf("chan2 num=%d\n", chans[1].num);
1738 for (c = 0; c < 2; c++) {
1739 for (j = 0; j < IWN_NSAMPLES; j++) {
1740 printf("chain %d, sample %d: temp=%d gain=%d "
1741 "power=%d pa_det=%d\n", c, j,
1742 chans[1].samples[c][j].temp,
1743 chans[1].samples[c][j].gain,
1744 chans[1].samples[c][j].power,
1745 chans[1].samples[c][j].pa_det);
1746 }
1747 }
1748}
1749#endif
1750
1751static void
1752iwn5000_read_eeprom(struct iwn_softc *sc)
1753{
1754 struct iwn5000_eeprom_calib_hdr hdr;
1755 int32_t volt;
1756 uint32_t base, addr;
1757 uint16_t val;
1758 int i;
1759
1760 /* Read regulatory domain (4 ASCII characters). */
1761 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2);
1762 base = le16toh(val);
1763 iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN,
1764 sc->eeprom_domain, 4);
1765
1766 /* Read the list of authorized channels (20MHz ones only). */
1767 for (i = 0; i < 7; i++) {
1768 if (sc->hw_type >= IWN_HW_REV_TYPE_6000)
1769 addr = base + iwn6000_regulatory_bands[i];
1770 else
1771 addr = base + iwn5000_regulatory_bands[i];
1772 iwn_read_eeprom_channels(sc, i, addr);
1773 }
1774
1775 /* Read enhanced TX power information for 6000 Series. */
1776 if (sc->hw_type >= IWN_HW_REV_TYPE_6000)
1777 iwn_read_eeprom_enhinfo(sc);
1778
1779 iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2);
1780 base = le16toh(val);
1781 iwn_read_prom_data(sc, base, &hdr, sizeof hdr);
1782 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
1783 "%s: calib version=%u pa type=%u voltage=%u\n", __func__,
1784 hdr.version, hdr.pa_type, le16toh(hdr.volt));
1785 sc->calib_ver = hdr.version;
1786
1787 if (sc->hw_type == IWN_HW_REV_TYPE_5150) {
1788 /* Compute temperature offset. */
1789 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2);
1790 sc->eeprom_temp = le16toh(val);
1791 iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2);
1792 volt = le16toh(val);
1793 sc->temp_off = sc->eeprom_temp - (volt / -5);
1794 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "temp=%d volt=%d offset=%dK\n",
1795 sc->eeprom_temp, volt, sc->temp_off);
1796 } else {
1797 /* Read crystal calibration. */
1798 iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL,
1799 &sc->eeprom_crystal, sizeof (uint32_t));
1800 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "crystal calibration 0x%08x\n",
1801 le32toh(sc->eeprom_crystal));
1802 }
1803}
1804
1805/*
1806 * Translate EEPROM flags to net80211.
1807 */
1808static uint32_t
1809iwn_eeprom_channel_flags(struct iwn_eeprom_chan *channel)
1810{
1811 uint32_t nflags;
1812
1813 nflags = 0;
1814 if ((channel->flags & IWN_EEPROM_CHAN_ACTIVE) == 0)
1815 nflags |= IEEE80211_CHAN_PASSIVE;
1816 if ((channel->flags & IWN_EEPROM_CHAN_IBSS) == 0)
1817 nflags |= IEEE80211_CHAN_NOADHOC;
1818 if (channel->flags & IWN_EEPROM_CHAN_RADAR) {
1819 nflags |= IEEE80211_CHAN_DFS;
1820 /* XXX apparently IBSS may still be marked */
1821 nflags |= IEEE80211_CHAN_NOADHOC;
1822 }
1823
1824 return nflags;
1825}
1826
1827static void
1828iwn_read_eeprom_band(struct iwn_softc *sc, int n)
1829{
1830 struct ifnet *ifp = sc->sc_ifp;
1831 struct ieee80211com *ic = ifp->if_l2com;
1832 struct iwn_eeprom_chan *channels = sc->eeprom_channels[n];
1833 const struct iwn_chan_band *band = &iwn_bands[n];
1834 struct ieee80211_channel *c;
1835 uint8_t chan;
1836 int i, nflags;
1837
1838 for (i = 0; i < band->nchan; i++) {
1839 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) {
1840 DPRINTF(sc, IWN_DEBUG_RESET,
1841 "skip chan %d flags 0x%x maxpwr %d\n",
1842 band->chan[i], channels[i].flags,
1843 channels[i].maxpwr);
1844 continue;
1845 }
1846 chan = band->chan[i];
1847 nflags = iwn_eeprom_channel_flags(&channels[i]);
1848
1849 c = &ic->ic_channels[ic->ic_nchans++];
1850 c->ic_ieee = chan;
1851 c->ic_maxregpower = channels[i].maxpwr;
1852 c->ic_maxpower = 2*c->ic_maxregpower;
1853
1854 if (n == 0) { /* 2GHz band */
1855 c->ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_G);
1856 /* G =>'s B is supported */
1857 c->ic_flags = IEEE80211_CHAN_B | nflags;
1858 c = &ic->ic_channels[ic->ic_nchans++];
1859 c[0] = c[-1];
1860 c->ic_flags = IEEE80211_CHAN_G | nflags;
1861 } else { /* 5GHz band */
1862 c->ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_A);
1863 c->ic_flags = IEEE80211_CHAN_A | nflags;
1864 }
1865
1866 /* Save maximum allowed TX power for this channel. */
1867 sc->maxpwr[chan] = channels[i].maxpwr;
1868
1869 DPRINTF(sc, IWN_DEBUG_RESET,
1870 "add chan %d flags 0x%x maxpwr %d\n", chan,
1871 channels[i].flags, channels[i].maxpwr);
1872
1873 if (sc->sc_flags & IWN_FLAG_HAS_11N) {
1874 /* add HT20, HT40 added separately */
1875 c = &ic->ic_channels[ic->ic_nchans++];
1876 c[0] = c[-1];
1877 c->ic_flags |= IEEE80211_CHAN_HT20;
1878 }
1879 }
1880}
1881
1882static void
1883iwn_read_eeprom_ht40(struct iwn_softc *sc, int n)
1884{
1885 struct ifnet *ifp = sc->sc_ifp;
1886 struct ieee80211com *ic = ifp->if_l2com;
1887 struct iwn_eeprom_chan *channels = sc->eeprom_channels[n];
1888 const struct iwn_chan_band *band = &iwn_bands[n];
1889 struct ieee80211_channel *c, *cent, *extc;
1890 uint8_t chan;
1891 int i, nflags;
1892
1893 if (!(sc->sc_flags & IWN_FLAG_HAS_11N))
1894 return;
1895
1896 for (i = 0; i < band->nchan; i++) {
1897 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) {
1898 DPRINTF(sc, IWN_DEBUG_RESET,
1899 "skip chan %d flags 0x%x maxpwr %d\n",
1900 band->chan[i], channels[i].flags,
1901 channels[i].maxpwr);
1902 continue;
1903 }
1904 chan = band->chan[i];
1905 nflags = iwn_eeprom_channel_flags(&channels[i]);
1906
1907 /*
1908 * Each entry defines an HT40 channel pair; find the
1909 * center channel, then the extension channel above.
1910 */
1911 cent = ieee80211_find_channel_byieee(ic, chan,
1912 (n == 5 ? IEEE80211_CHAN_G : IEEE80211_CHAN_A));
1913 if (cent == NULL) { /* XXX shouldn't happen */
1914 device_printf(sc->sc_dev,
1915 "%s: no entry for channel %d\n", __func__, chan);
1916 continue;
1917 }
1918 extc = ieee80211_find_channel(ic, cent->ic_freq+20,
1919 (n == 5 ? IEEE80211_CHAN_G : IEEE80211_CHAN_A));
1920 if (extc == NULL) {
1921 DPRINTF(sc, IWN_DEBUG_RESET,
1922 "%s: skip chan %d, extension channel not found\n",
1923 __func__, chan);
1924 continue;
1925 }
1926
1927 DPRINTF(sc, IWN_DEBUG_RESET,
1928 "add ht40 chan %d flags 0x%x maxpwr %d\n",
1929 chan, channels[i].flags, channels[i].maxpwr);
1930
1931 c = &ic->ic_channels[ic->ic_nchans++];
1932 c[0] = cent[0];
1933 c->ic_extieee = extc->ic_ieee;
1934 c->ic_flags &= ~IEEE80211_CHAN_HT;
1935 c->ic_flags |= IEEE80211_CHAN_HT40U | nflags;
1936 c = &ic->ic_channels[ic->ic_nchans++];
1937 c[0] = extc[0];
1938 c->ic_extieee = cent->ic_ieee;
1939 c->ic_flags &= ~IEEE80211_CHAN_HT;
1940 c->ic_flags |= IEEE80211_CHAN_HT40D | nflags;
1941 }
1942}
1943
1944static void
1945iwn_read_eeprom_channels(struct iwn_softc *sc, int n, uint32_t addr)
1946{
1947 struct ifnet *ifp = sc->sc_ifp;
1948 struct ieee80211com *ic = ifp->if_l2com;
1949
1950 iwn_read_prom_data(sc, addr, &sc->eeprom_channels[n],
1951 iwn_bands[n].nchan * sizeof (struct iwn_eeprom_chan));
1952
1953 if (n < 5)
1954 iwn_read_eeprom_band(sc, n);
1955 else
1956 iwn_read_eeprom_ht40(sc, n);
1957 ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans);
1958}
1959
1960static struct iwn_eeprom_chan *
1961iwn_find_eeprom_channel(struct iwn_softc *sc, struct ieee80211_channel *c)
1962{
1963 int band, chan, i, j;
1964
1965 if (IEEE80211_IS_CHAN_HT40(c)) {
1966 band = IEEE80211_IS_CHAN_5GHZ(c) ? 6 : 5;
1967 if (IEEE80211_IS_CHAN_HT40D(c))
1968 chan = c->ic_extieee;
1969 else
1970 chan = c->ic_ieee;
1971 for (i = 0; i < iwn_bands[band].nchan; i++) {
1972 if (iwn_bands[band].chan[i] == chan)
1973 return &sc->eeprom_channels[band][i];
1974 }
1975 } else {
1976 for (j = 0; j < 5; j++) {
1977 for (i = 0; i < iwn_bands[j].nchan; i++) {
1978 if (iwn_bands[j].chan[i] == c->ic_ieee)
1979 return &sc->eeprom_channels[j][i];
1980 }
1981 }
1982 }
1983 return NULL;
1984}
1985
1986/*
1987 * Enforce flags read from EEPROM.
1988 */
1989static int
1990iwn_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
1991 int nchan, struct ieee80211_channel chans[])
1992{
1993 struct iwn_softc *sc = ic->ic_ifp->if_softc;
1994 int i;
1995
1996 for (i = 0; i < nchan; i++) {
1997 struct ieee80211_channel *c = &chans[i];
1998 struct iwn_eeprom_chan *channel;
1999
2000 channel = iwn_find_eeprom_channel(sc, c);
2001 if (channel == NULL) {
2002 if_printf(ic->ic_ifp,
2003 "%s: invalid channel %u freq %u/0x%x\n",
2004 __func__, c->ic_ieee, c->ic_freq, c->ic_flags);
2005 return EINVAL;
2006 }
2007 c->ic_flags |= iwn_eeprom_channel_flags(channel);
2008 }
2009
2010 return 0;
2011}
2012
2013static void
2014iwn_read_eeprom_enhinfo(struct iwn_softc *sc)
2015{
2016 struct iwn_eeprom_enhinfo enhinfo[35];
2017 struct ifnet *ifp = sc->sc_ifp;
2018 struct ieee80211com *ic = ifp->if_l2com;
2019 struct ieee80211_channel *c;
2020 uint16_t val, base;
2021 int8_t maxpwr;
2022 uint8_t flags;
2023 int i, j;
2024
2025 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2);
2026 base = le16toh(val);
2027 iwn_read_prom_data(sc, base + IWN6000_EEPROM_ENHINFO,
2028 enhinfo, sizeof enhinfo);
2029
2030 for (i = 0; i < nitems(enhinfo); i++) {
2031 flags = enhinfo[i].flags;
2032 if (!(flags & IWN_ENHINFO_VALID))
2033 continue; /* Skip invalid entries. */
2034
2035 maxpwr = 0;
2036 if (sc->txchainmask & IWN_ANT_A)
2037 maxpwr = MAX(maxpwr, enhinfo[i].chain[0]);
2038 if (sc->txchainmask & IWN_ANT_B)
2039 maxpwr = MAX(maxpwr, enhinfo[i].chain[1]);
2040 if (sc->txchainmask & IWN_ANT_C)
2041 maxpwr = MAX(maxpwr, enhinfo[i].chain[2]);
2042 if (sc->ntxchains == 2)
2043 maxpwr = MAX(maxpwr, enhinfo[i].mimo2);
2044 else if (sc->ntxchains == 3)
2045 maxpwr = MAX(maxpwr, enhinfo[i].mimo3);
2046
2047 for (j = 0; j < ic->ic_nchans; j++) {
2048 c = &ic->ic_channels[j];
2049 if ((flags & IWN_ENHINFO_5GHZ)) {
2050 if (!IEEE80211_IS_CHAN_A(c))
2051 continue;
2052 } else if ((flags & IWN_ENHINFO_OFDM)) {
2053 if (!IEEE80211_IS_CHAN_G(c))
2054 continue;
2055 } else if (!IEEE80211_IS_CHAN_B(c))
2056 continue;
2057 if ((flags & IWN_ENHINFO_HT40)) {
2058 if (!IEEE80211_IS_CHAN_HT40(c))
2059 continue;
2060 } else {
2061 if (IEEE80211_IS_CHAN_HT40(c))
2062 continue;
2063 }
2064 if (enhinfo[i].chan != 0 &&
2065 enhinfo[i].chan != c->ic_ieee)
2066 continue;
2067
2068 DPRINTF(sc, IWN_DEBUG_RESET,
2069 "channel %d(%x), maxpwr %d\n", c->ic_ieee,
2070 c->ic_flags, maxpwr / 2);
2071 c->ic_maxregpower = maxpwr / 2;
2072 c->ic_maxpower = maxpwr;
2073 }
2074 }
2075}
2076
2077static struct ieee80211_node *
2078iwn_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
2079{
2080 return malloc(sizeof (struct iwn_node), M_80211_NODE,M_NOWAIT | M_ZERO);
2081}
2082
2083static __inline int
2084rate2plcp(int rate)
2085{
2086 switch (rate & 0xff) {
2087 case 12: return 0xd;
2088 case 18: return 0xf;
2089 case 24: return 0x5;
2090 case 36: return 0x7;
2091 case 48: return 0x9;
2092 case 72: return 0xb;
2093 case 96: return 0x1;
2094 case 108: return 0x3;
2095 case 2: return 10;
2096 case 4: return 20;
2097 case 11: return 55;
2098 case 22: return 110;
2099 }
2100 return 0;
2101}
2102
2101static void
2102iwn_newassoc(struct ieee80211_node *ni, int isnew)
2103/*
2104 * Calculate the required PLCP value from the given rate,
2105 * to the given node.
2106 *
2107 * This will take the node configuration (eg 11n, rate table
2108 * setup, etc) into consideration.
2109 */
2110static uint32_t
2111iwn_rate_to_plcp(struct iwn_softc *sc, struct ieee80211_node *ni,
2112 uint8_t rate)
2113{
2114#define RV(v) ((v) & IEEE80211_RATE_VAL)
2115 struct ieee80211com *ic = ni->ni_ic;
2106 struct iwn_softc *sc = ic->ic_ifp->if_softc;
2107 struct iwn_node *wn = (void *)ni;
2116 uint8_t txant1, txant2;
2109 int i, plcp, rate, ridx;
2117 uint32_t plcp = 0;
2118 int ridx;
2119
2120 /* Use the first valid TX antenna. */
2121 txant1 = IWN_LSB(sc->txchainmask);
2122 txant2 = IWN_LSB(sc->txchainmask & ~txant1);
2123
2124 /*
2125 * If it's an MCS rate, let's set the plcp correctly
2126 * and set the relevant flags based on the node config.
2127 */
2128 if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) {
2116 ridx = ni->ni_rates.rs_nrates - 1;
2117 for (i = ni->ni_htrates.rs_nrates - 1; i >= 0; i--) {
2118 plcp = RV(ni->ni_htrates.rs_rates[i]) | IWN_RFLAG_MCS;
2119 if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
2120 plcp |= IWN_RFLAG_HT40;
2121 if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40)
2122 plcp |= IWN_RFLAG_SGI;
2123 } else if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20)
2129 /*
2130 * Set the initial PLCP value to be between 0->31 for
2131 * MCS 0 -> MCS 31, then set the "I'm an MCS rate!"
2132 * flag.
2133 */
2134 plcp = RV(rate) | IWN_RFLAG_MCS;
2135
2136 /*
2137 * XXX the following should only occur if both
2138 * the local configuration _and_ the remote node
2139 * advertise these capabilities. Thus this code
2140 * may need fixing!
2141 */
2142
2143 /*
2144 * Set the channel width and guard interval.
2145 */
2146 if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
2147 plcp |= IWN_RFLAG_HT40;
2148 if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40)
2149 plcp |= IWN_RFLAG_SGI;
2125 if (RV(ni->ni_htrates.rs_rates[i]) > 7)
2126 plcp |= IWN_RFLAG_ANT(txant1 | txant2);
2127 else
2128 plcp |= IWN_RFLAG_ANT(txant1);
2129 if (ridx >= 0) {
2130 rate = RV(ni->ni_rates.rs_rates[ridx]);
2131 wn->ridx[rate] = plcp;
2132 }
2133 wn->ridx[IEEE80211_RATE_MCS | i] = plcp;
2134 ridx--;
2150 } else if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20) {
2151 plcp |= IWN_RFLAG_SGI;
2152 }
2136 } else {
2137 for (i = 0; i < ni->ni_rates.rs_nrates; i++) {
2138 rate = RV(ni->ni_rates.rs_rates[i]);
2139 plcp = rate2plcp(rate);
2140 ridx = ic->ic_rt->rateCodeToIndex[rate];
2141 if (ridx < IWN_RIDX_OFDM6 &&
2142 IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
2143 plcp |= IWN_RFLAG_CCK;
2153
2154 /*
2155 * If it's a two stream rate, enable TX on both
2156 * antennas.
2157 *
2158 * XXX three stream rates?
2159 */
2160 if (rate > 0x87)
2161 plcp |= IWN_RFLAG_ANT(txant1 | txant2);
2162 else
2163 plcp |= IWN_RFLAG_ANT(txant1);
2145 wn->ridx[rate] = htole32(plcp);
2146 }
2164 } else {
2165 /*
2166 * Set the initial PLCP - fine for both
2167 * OFDM and CCK rates.
2168 */
2169 plcp = rate2plcp(rate);
2170
2171 /* Set CCK flag if it's CCK */
2172
2173 /* XXX It would be nice to have a method
2174 * to map the ridx -> phy table entry
2175 * so we could just query that, rather than
2176 * this hack to check against IWN_RIDX_OFDM6.
2177 */
2178 ridx = ieee80211_legacy_rate_lookup(ic->ic_rt,
2179 rate & IEEE80211_RATE_VAL);
2180 if (ridx < IWN_RIDX_OFDM6 &&
2181 IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
2182 plcp |= IWN_RFLAG_CCK;
2183
2184 /* Set antenna configuration */
2185 plcp |= IWN_RFLAG_ANT(txant1);
2186 }
2187
2188 DPRINTF(sc, IWN_DEBUG_TXRATE, "%s: rate=0x%02x, plcp=0x%08x\n",
2189 __func__,
2190 rate,
2191 plcp);
2192
2193 return (htole32(plcp));
2194#undef RV
2195}
2196
2197static void
2198iwn_newassoc(struct ieee80211_node *ni, int isnew)
2199{
2200 /* Doesn't do anything at the moment */
2201}
2202
2203static int
2204iwn_media_change(struct ifnet *ifp)
2205{
2206 int error;
2207
2208 error = ieee80211_media_change(ifp);
2209 /* NB: only the fixed rate can change and that doesn't need a reset */
2210 return (error == ENETRESET ? 0 : error);
2211}
2212
2213static int
2214iwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
2215{
2216 struct iwn_vap *ivp = IWN_VAP(vap);
2217 struct ieee80211com *ic = vap->iv_ic;
2218 struct iwn_softc *sc = ic->ic_ifp->if_softc;
2219 int error = 0;
2220
2221 DPRINTF(sc, IWN_DEBUG_STATE, "%s: %s -> %s\n", __func__,
2222 ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate]);
2223
2224 IEEE80211_UNLOCK(ic);
2225 IWN_LOCK(sc);
2226 callout_stop(&sc->calib_to);
2227
2228 switch (nstate) {
2229 case IEEE80211_S_ASSOC:
2230 if (vap->iv_state != IEEE80211_S_RUN)
2231 break;
2232 /* FALLTHROUGH */
2233 case IEEE80211_S_AUTH:
2234 if (vap->iv_state == IEEE80211_S_AUTH)
2235 break;
2236
2237 /*
2238 * !AUTH -> AUTH transition requires state reset to handle
2239 * reassociations correctly.
2240 */
2241 sc->rxon.associd = 0;
2242 sc->rxon.filter &= ~htole32(IWN_FILTER_BSS);
2243 sc->calib.state = IWN_CALIB_STATE_INIT;
2244
2245 if ((error = iwn_auth(sc, vap)) != 0) {
2246 device_printf(sc->sc_dev,
2247 "%s: could not move to auth state\n", __func__);
2248 }
2249 break;
2250
2251 case IEEE80211_S_RUN:
2252 /*
2253 * RUN -> RUN transition; Just restart the timers.
2254 */
2255 if (vap->iv_state == IEEE80211_S_RUN) {
2256 sc->calib_cnt = 0;
2257 break;
2258 }
2259
2260 /*
2261 * !RUN -> RUN requires setting the association id
2262 * which is done with a firmware cmd. We also defer
2263 * starting the timers until that work is done.
2264 */
2265 if ((error = iwn_run(sc, vap)) != 0) {
2266 device_printf(sc->sc_dev,
2267 "%s: could not move to run state\n", __func__);
2268 }
2269 break;
2270
2271 case IEEE80211_S_INIT:
2272 sc->calib.state = IWN_CALIB_STATE_INIT;
2273 break;
2274
2275 default:
2276 break;
2277 }
2278 IWN_UNLOCK(sc);
2279 IEEE80211_LOCK(ic);
2280 if (error != 0)
2281 return error;
2282 return ivp->iv_newstate(vap, nstate, arg);
2283}
2284
2285static void
2286iwn_calib_timeout(void *arg)
2287{
2288 struct iwn_softc *sc = arg;
2289
2290 IWN_LOCK_ASSERT(sc);
2291
2292 /* Force automatic TX power calibration every 60 secs. */
2293 if (++sc->calib_cnt >= 120) {
2294 uint32_t flags = 0;
2295
2296 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s\n",
2297 "sending request for statistics");
2298 (void)iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags,
2299 sizeof flags, 1);
2300 sc->calib_cnt = 0;
2301 }
2302 callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout,
2303 sc);
2304}
2305
2306/*
2307 * Process an RX_PHY firmware notification. This is usually immediately
2308 * followed by an MPDU_RX_DONE notification.
2309 */
2310static void
2311iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2312 struct iwn_rx_data *data)
2313{
2314 struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1);
2315
2316 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received PHY stats\n", __func__);
2317 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2318
2319 /* Save RX statistics, they will be used on MPDU_RX_DONE. */
2320 memcpy(&sc->last_rx_stat, stat, sizeof (*stat));
2321 sc->last_rx_valid = 1;
2322}
2323
2324/*
2325 * Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification.
2326 * Each MPDU_RX_DONE notification must be preceded by an RX_PHY one.
2327 */
2328static void
2329iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2330 struct iwn_rx_data *data)
2331{
2332 struct iwn_ops *ops = &sc->ops;
2333 struct ifnet *ifp = sc->sc_ifp;
2334 struct ieee80211com *ic = ifp->if_l2com;
2335 struct iwn_rx_ring *ring = &sc->rxq;
2336 struct ieee80211_frame *wh;
2337 struct ieee80211_node *ni;
2338 struct mbuf *m, *m1;
2339 struct iwn_rx_stat *stat;
2340 caddr_t head;
2341 bus_addr_t paddr;
2342 uint32_t flags;
2343 int error, len, rssi, nf;
2344
2345 if (desc->type == IWN_MPDU_RX_DONE) {
2346 /* Check for prior RX_PHY notification. */
2347 if (!sc->last_rx_valid) {
2348 DPRINTF(sc, IWN_DEBUG_ANY,
2349 "%s: missing RX_PHY\n", __func__);
2350 return;
2351 }
2352 stat = &sc->last_rx_stat;
2353 } else
2354 stat = (struct iwn_rx_stat *)(desc + 1);
2355
2356 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2357
2358 if (stat->cfg_phy_len > IWN_STAT_MAXLEN) {
2359 device_printf(sc->sc_dev,
2360 "%s: invalid RX statistic header, len %d\n", __func__,
2361 stat->cfg_phy_len);
2362 return;
2363 }
2364 if (desc->type == IWN_MPDU_RX_DONE) {
2365 struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1);
2366 head = (caddr_t)(mpdu + 1);
2367 len = le16toh(mpdu->len);
2368 } else {
2369 head = (caddr_t)(stat + 1) + stat->cfg_phy_len;
2370 len = le16toh(stat->len);
2371 }
2372
2373 flags = le32toh(*(uint32_t *)(head + len));
2374
2375 /* Discard frames with a bad FCS early. */
2376 if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) {
2377 DPRINTF(sc, IWN_DEBUG_RECV, "%s: RX flags error %x\n",
2378 __func__, flags);
2379 ifp->if_ierrors++;
2380 return;
2381 }
2382 /* Discard frames that are too short. */
2383 if (len < sizeof (*wh)) {
2384 DPRINTF(sc, IWN_DEBUG_RECV, "%s: frame too short: %d\n",
2385 __func__, len);
2386 ifp->if_ierrors++;
2387 return;
2388 }
2389
2390 m1 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWN_RBUF_SIZE);
2391 if (m1 == NULL) {
2392 DPRINTF(sc, IWN_DEBUG_ANY, "%s: no mbuf to restock ring\n",
2393 __func__);
2394 ifp->if_ierrors++;
2395 return;
2396 }
2397 bus_dmamap_unload(ring->data_dmat, data->map);
2398
2399 error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m1, void *),
2400 IWN_RBUF_SIZE, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
2401 if (error != 0 && error != EFBIG) {
2402 device_printf(sc->sc_dev,
2403 "%s: bus_dmamap_load failed, error %d\n", __func__, error);
2404 m_freem(m1);
2405
2406 /* Try to reload the old mbuf. */
2407 error = bus_dmamap_load(ring->data_dmat, data->map,
2408 mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr,
2409 &paddr, BUS_DMA_NOWAIT);
2410 if (error != 0 && error != EFBIG) {
2411 panic("%s: could not load old RX mbuf", __func__);
2412 }
2413 /* Physical address may have changed. */
2414 ring->desc[ring->cur] = htole32(paddr >> 8);
2415 bus_dmamap_sync(ring->data_dmat, ring->desc_dma.map,
2416 BUS_DMASYNC_PREWRITE);
2417 ifp->if_ierrors++;
2418 return;
2419 }
2420
2421 m = data->m;
2422 data->m = m1;
2423 /* Update RX descriptor. */
2424 ring->desc[ring->cur] = htole32(paddr >> 8);
2425 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2426 BUS_DMASYNC_PREWRITE);
2427
2428 /* Finalize mbuf. */
2429 m->m_pkthdr.rcvif = ifp;
2430 m->m_data = head;
2431 m->m_pkthdr.len = m->m_len = len;
2432
2433 /* Grab a reference to the source node. */
2434 wh = mtod(m, struct ieee80211_frame *);
2435 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
2436 nf = (ni != NULL && ni->ni_vap->iv_state == IEEE80211_S_RUN &&
2437 (ic->ic_flags & IEEE80211_F_SCAN) == 0) ? sc->noise : -95;
2438
2439 rssi = ops->get_rssi(sc, stat);
2440
2441 if (ieee80211_radiotap_active(ic)) {
2442 struct iwn_rx_radiotap_header *tap = &sc->sc_rxtap;
2443
2444 tap->wr_flags = 0;
2445 if (stat->flags & htole16(IWN_STAT_FLAG_SHPREAMBLE))
2446 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2447 tap->wr_dbm_antsignal = (int8_t)rssi;
2448 tap->wr_dbm_antnoise = (int8_t)nf;
2449 tap->wr_tsft = stat->tstamp;
2450 switch (stat->rate) {
2451 /* CCK rates. */
2452 case 10: tap->wr_rate = 2; break;
2453 case 20: tap->wr_rate = 4; break;
2454 case 55: tap->wr_rate = 11; break;
2455 case 110: tap->wr_rate = 22; break;
2456 /* OFDM rates. */
2457 case 0xd: tap->wr_rate = 12; break;
2458 case 0xf: tap->wr_rate = 18; break;
2459 case 0x5: tap->wr_rate = 24; break;
2460 case 0x7: tap->wr_rate = 36; break;
2461 case 0x9: tap->wr_rate = 48; break;
2462 case 0xb: tap->wr_rate = 72; break;
2463 case 0x1: tap->wr_rate = 96; break;
2464 case 0x3: tap->wr_rate = 108; break;
2465 /* Unknown rate: should not happen. */
2466 default: tap->wr_rate = 0;
2467 }
2468 }
2469
2470 IWN_UNLOCK(sc);
2471
2472 /* Send the frame to the 802.11 layer. */
2473 if (ni != NULL) {
2474 if (ni->ni_flags & IEEE80211_NODE_HT)
2475 m->m_flags |= M_AMPDU;
2476 (void)ieee80211_input(ni, m, rssi - nf, nf);
2477 /* Node is no longer needed. */
2478 ieee80211_free_node(ni);
2479 } else
2480 (void)ieee80211_input_all(ic, m, rssi - nf, nf);
2481
2482 IWN_LOCK(sc);
2483}
2484
2485/* Process an incoming Compressed BlockAck. */
2486static void
2487iwn_rx_compressed_ba(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2488 struct iwn_rx_data *data)
2489{
2490 struct iwn_ops *ops = &sc->ops;
2491 struct ifnet *ifp = sc->sc_ifp;
2492 struct iwn_node *wn;
2493 struct ieee80211_node *ni;
2494 struct iwn_compressed_ba *ba = (struct iwn_compressed_ba *)(desc + 1);
2495 struct iwn_tx_ring *txq;
2496 struct iwn_tx_data *txdata;
2497 struct ieee80211_tx_ampdu *tap;
2498 struct mbuf *m;
2499 uint64_t bitmap;
2500 uint16_t ssn;
2501 uint8_t tid;
2502 int ackfailcnt = 0, i, lastidx, qid, *res, shift;
2503
2504 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2505
2506 qid = le16toh(ba->qid);
2507 txq = &sc->txq[ba->qid];
2508 tap = sc->qid2tap[ba->qid];
2509 tid = tap->txa_tid;
2510 wn = (void *)tap->txa_ni;
2511
2512 res = NULL;
2513 ssn = 0;
2514 if (!IEEE80211_AMPDU_RUNNING(tap)) {
2515 res = tap->txa_private;
2516 ssn = tap->txa_start & 0xfff;
2517 }
2518
2519 for (lastidx = le16toh(ba->ssn) & 0xff; txq->read != lastidx;) {
2520 txdata = &txq->data[txq->read];
2521
2522 /* Unmap and free mbuf. */
2523 bus_dmamap_sync(txq->data_dmat, txdata->map,
2524 BUS_DMASYNC_POSTWRITE);
2525 bus_dmamap_unload(txq->data_dmat, txdata->map);
2526 m = txdata->m, txdata->m = NULL;
2527 ni = txdata->ni, txdata->ni = NULL;
2528
2529 KASSERT(ni != NULL, ("no node"));
2530 KASSERT(m != NULL, ("no mbuf"));
2531
2532 if (m->m_flags & M_TXCB)
2533 ieee80211_process_callback(ni, m, 1);
2534
2535 m_freem(m);
2536 ieee80211_free_node(ni);
2537
2538 txq->queued--;
2539 txq->read = (txq->read + 1) % IWN_TX_RING_COUNT;
2540 }
2541
2542 if (txq->queued == 0 && res != NULL) {
2543 iwn_nic_lock(sc);
2544 ops->ampdu_tx_stop(sc, qid, tid, ssn);
2545 iwn_nic_unlock(sc);
2546 sc->qid2tap[qid] = NULL;
2547 free(res, M_DEVBUF);
2548 return;
2549 }
2550
2551 if (wn->agg[tid].bitmap == 0)
2552 return;
2553
2554 shift = wn->agg[tid].startidx - ((le16toh(ba->seq) >> 4) & 0xff);
2555 if (shift < 0)
2556 shift += 0x100;
2557
2558 if (wn->agg[tid].nframes > (64 - shift))
2559 return;
2560
2561 ni = tap->txa_ni;
2562 bitmap = (le64toh(ba->bitmap) >> shift) & wn->agg[tid].bitmap;
2563 for (i = 0; bitmap; i++) {
2564 if ((bitmap & 1) == 0) {
2565 ifp->if_oerrors++;
2566 ieee80211_ratectl_tx_complete(ni->ni_vap, ni,
2567 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL);
2568 } else {
2569 ifp->if_opackets++;
2570 ieee80211_ratectl_tx_complete(ni->ni_vap, ni,
2571 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL);
2572 }
2573 bitmap >>= 1;
2574 }
2575}
2576
2577/*
2578 * Process a CALIBRATION_RESULT notification sent by the initialization
2579 * firmware on response to a CMD_CALIB_CONFIG command (5000 only).
2580 */
2581static void
2582iwn5000_rx_calib_results(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2583 struct iwn_rx_data *data)
2584{
2585 struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1);
2586 int len, idx = -1;
2587
2588 /* Runtime firmware should not send such a notification. */
2589 if (sc->sc_flags & IWN_FLAG_CALIB_DONE)
2590 return;
2591
2592 len = (le32toh(desc->len) & 0x3fff) - 4;
2593 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2594
2595 switch (calib->code) {
2596 case IWN5000_PHY_CALIB_DC:
2597 if ((sc->sc_flags & IWN_FLAG_INTERNAL_PA) == 0 &&
2598 (sc->hw_type == IWN_HW_REV_TYPE_5150 ||
2599 sc->hw_type >= IWN_HW_REV_TYPE_6000) &&
2600 sc->hw_type != IWN_HW_REV_TYPE_6050)
2601 idx = 0;
2602 break;
2603 case IWN5000_PHY_CALIB_LO:
2604 idx = 1;
2605 break;
2606 case IWN5000_PHY_CALIB_TX_IQ:
2607 idx = 2;
2608 break;
2609 case IWN5000_PHY_CALIB_TX_IQ_PERIODIC:
2610 if (sc->hw_type < IWN_HW_REV_TYPE_6000 &&
2611 sc->hw_type != IWN_HW_REV_TYPE_5150)
2612 idx = 3;
2613 break;
2614 case IWN5000_PHY_CALIB_BASE_BAND:
2615 idx = 4;
2616 break;
2617 }
2618 if (idx == -1) /* Ignore other results. */
2619 return;
2620
2621 /* Save calibration result. */
2622 if (sc->calibcmd[idx].buf != NULL)
2623 free(sc->calibcmd[idx].buf, M_DEVBUF);
2624 sc->calibcmd[idx].buf = malloc(len, M_DEVBUF, M_NOWAIT);
2625 if (sc->calibcmd[idx].buf == NULL) {
2626 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
2627 "not enough memory for calibration result %d\n",
2628 calib->code);
2629 return;
2630 }
2631 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
2632 "saving calibration result code=%d len=%d\n", calib->code, len);
2633 sc->calibcmd[idx].len = len;
2634 memcpy(sc->calibcmd[idx].buf, calib, len);
2635}
2636
2637/*
2638 * Process an RX_STATISTICS or BEACON_STATISTICS firmware notification.
2639 * The latter is sent by the firmware after each received beacon.
2640 */
2641static void
2642iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2643 struct iwn_rx_data *data)
2644{
2645 struct iwn_ops *ops = &sc->ops;
2646 struct ifnet *ifp = sc->sc_ifp;
2647 struct ieee80211com *ic = ifp->if_l2com;
2648 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2649 struct iwn_calib_state *calib = &sc->calib;
2650 struct iwn_stats *stats = (struct iwn_stats *)(desc + 1);
2651 int temp;
2652
2653 /* Ignore statistics received during a scan. */
2654 if (vap->iv_state != IEEE80211_S_RUN ||
2655 (ic->ic_flags & IEEE80211_F_SCAN))
2656 return;
2657
2658 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2659
2660 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received statistics, cmd %d\n",
2661 __func__, desc->type);
2662 sc->calib_cnt = 0; /* Reset TX power calibration timeout. */
2663
2664 /* Test if temperature has changed. */
2665 if (stats->general.temp != sc->rawtemp) {
2666 /* Convert "raw" temperature to degC. */
2667 sc->rawtemp = stats->general.temp;
2668 temp = ops->get_temperature(sc);
2669 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d\n",
2670 __func__, temp);
2671
2672 /* Update TX power if need be (4965AGN only). */
2673 if (sc->hw_type == IWN_HW_REV_TYPE_4965)
2674 iwn4965_power_calibration(sc, temp);
2675 }
2676
2677 if (desc->type != IWN_BEACON_STATISTICS)
2678 return; /* Reply to a statistics request. */
2679
2680 sc->noise = iwn_get_noise(&stats->rx.general);
2681 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: noise %d\n", __func__, sc->noise);
2682
2683 /* Test that RSSI and noise are present in stats report. */
2684 if (le32toh(stats->rx.general.flags) != 1) {
2685 DPRINTF(sc, IWN_DEBUG_ANY, "%s\n",
2686 "received statistics without RSSI");
2687 return;
2688 }
2689
2690 if (calib->state == IWN_CALIB_STATE_ASSOC)
2691 iwn_collect_noise(sc, &stats->rx.general);
2692 else if (calib->state == IWN_CALIB_STATE_RUN)
2693 iwn_tune_sensitivity(sc, &stats->rx);
2694}
2695
2696/*
2697 * Process a TX_DONE firmware notification. Unfortunately, the 4965AGN
2698 * and 5000 adapters have different incompatible TX status formats.
2699 */
2700static void
2701iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2702 struct iwn_rx_data *data)
2703{
2704 struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1);
2705 struct iwn_tx_ring *ring;
2706 int qid;
2707
2708 qid = desc->qid & 0xf;
2709 ring = &sc->txq[qid];
2710
2711 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: "
2712 "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n",
2713 __func__, desc->qid, desc->idx, stat->ackfailcnt,
2714 stat->btkillcnt, stat->rate, le16toh(stat->duration),
2715 le32toh(stat->status));
2716
2717 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2718 if (qid >= sc->firstaggqueue) {
2719 iwn_ampdu_tx_done(sc, qid, desc->idx, stat->nframes,
2720 &stat->status);
2721 } else {
2722 iwn_tx_done(sc, desc, stat->ackfailcnt,
2723 le32toh(stat->status) & 0xff);
2724 }
2725}
2726
2727static void
2728iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2729 struct iwn_rx_data *data)
2730{
2731 struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1);
2732 struct iwn_tx_ring *ring;
2733 int qid;
2734
2735 qid = desc->qid & 0xf;
2736 ring = &sc->txq[qid];
2737
2738 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: "
2739 "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n",
2740 __func__, desc->qid, desc->idx, stat->ackfailcnt,
2741 stat->btkillcnt, stat->rate, le16toh(stat->duration),
2742 le32toh(stat->status));
2743
2744#ifdef notyet
2745 /* Reset TX scheduler slot. */
2746 iwn5000_reset_sched(sc, desc->qid & 0xf, desc->idx);
2747#endif
2748
2749 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2750 if (qid >= sc->firstaggqueue) {
2751 iwn_ampdu_tx_done(sc, qid, desc->idx, stat->nframes,
2752 &stat->status);
2753 } else {
2754 iwn_tx_done(sc, desc, stat->ackfailcnt,
2755 le16toh(stat->status) & 0xff);
2756 }
2757}
2758
2759/*
2760 * Adapter-independent backend for TX_DONE firmware notifications.
2761 */
2762static void
2763iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, int ackfailcnt,
2764 uint8_t status)
2765{
2766 struct ifnet *ifp = sc->sc_ifp;
2767 struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf];
2768 struct iwn_tx_data *data = &ring->data[desc->idx];
2769 struct mbuf *m;
2770 struct ieee80211_node *ni;
2771 struct ieee80211vap *vap;
2772
2773 KASSERT(data->ni != NULL, ("no node"));
2774
2775 /* Unmap and free mbuf. */
2776 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE);
2777 bus_dmamap_unload(ring->data_dmat, data->map);
2778 m = data->m, data->m = NULL;
2779 ni = data->ni, data->ni = NULL;
2780 vap = ni->ni_vap;
2781
2782 if (m->m_flags & M_TXCB) {
2783 /*
2784 * Channels marked for "radar" require traffic to be received
2785 * to unlock before we can transmit. Until traffic is seen
2786 * any attempt to transmit is returned immediately with status
2787 * set to IWN_TX_FAIL_TX_LOCKED. Unfortunately this can easily
2788 * happen on first authenticate after scanning. To workaround
2789 * this we ignore a failure of this sort in AUTH state so the
2790 * 802.11 layer will fall back to using a timeout to wait for
2791 * the AUTH reply. This allows the firmware time to see
2792 * traffic so a subsequent retry of AUTH succeeds. It's
2793 * unclear why the firmware does not maintain state for
2794 * channels recently visited as this would allow immediate
2795 * use of the channel after a scan (where we see traffic).
2796 */
2797 if (status == IWN_TX_FAIL_TX_LOCKED &&
2798 ni->ni_vap->iv_state == IEEE80211_S_AUTH)
2799 ieee80211_process_callback(ni, m, 0);
2800 else
2801 ieee80211_process_callback(ni, m,
2802 (status & IWN_TX_FAIL) != 0);
2803 }
2804
2805 /*
2806 * Update rate control statistics for the node.
2807 */
2808 if (status & IWN_TX_FAIL) {
2809 ifp->if_oerrors++;
2810 ieee80211_ratectl_tx_complete(vap, ni,
2811 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL);
2812 } else {
2813 ifp->if_opackets++;
2814 ieee80211_ratectl_tx_complete(vap, ni,
2815 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL);
2816 }
2817 m_freem(m);
2818 ieee80211_free_node(ni);
2819
2820 sc->sc_tx_timer = 0;
2821 if (--ring->queued < IWN_TX_RING_LOMARK) {
2822 sc->qfullmsk &= ~(1 << ring->qid);
2823 if (sc->qfullmsk == 0 &&
2824 (ifp->if_drv_flags & IFF_DRV_OACTIVE)) {
2825 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2826 iwn_start_locked(ifp);
2827 }
2828 }
2829}
2830
2831/*
2832 * Process a "command done" firmware notification. This is where we wakeup
2833 * processes waiting for a synchronous command completion.
2834 */
2835static void
2836iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc)
2837{
2838 struct iwn_tx_ring *ring = &sc->txq[4];
2839 struct iwn_tx_data *data;
2840
2841 if ((desc->qid & 0xf) != 4)
2842 return; /* Not a command ack. */
2843
2844 data = &ring->data[desc->idx];
2845
2846 /* If the command was mapped in an mbuf, free it. */
2847 if (data->m != NULL) {
2848 bus_dmamap_sync(ring->data_dmat, data->map,
2849 BUS_DMASYNC_POSTWRITE);
2850 bus_dmamap_unload(ring->data_dmat, data->map);
2851 m_freem(data->m);
2852 data->m = NULL;
2853 }
2854 wakeup(&ring->desc[desc->idx]);
2855}
2856
2857static void
2858iwn_ampdu_tx_done(struct iwn_softc *sc, int qid, int idx, int nframes,
2859 void *stat)
2860{
2861 struct iwn_ops *ops = &sc->ops;
2862 struct ifnet *ifp = sc->sc_ifp;
2863 struct iwn_tx_ring *ring = &sc->txq[qid];
2864 struct iwn_tx_data *data;
2865 struct mbuf *m;
2866 struct iwn_node *wn;
2867 struct ieee80211_node *ni;
2868 struct ieee80211_tx_ampdu *tap;
2869 uint64_t bitmap;
2870 uint32_t *status = stat;
2871 uint16_t *aggstatus = stat;
2872 uint16_t ssn;
2873 uint8_t tid;
2874 int bit, i, lastidx, *res, seqno, shift, start;
2875
2876#ifdef NOT_YET
2877 if (nframes == 1) {
2878 if ((*status & 0xff) != 1 && (*status & 0xff) != 2)
2879 printf("ieee80211_send_bar()\n");
2880 }
2881#endif
2882
2883 bitmap = 0;
2884 start = idx;
2885 for (i = 0; i < nframes; i++) {
2886 if (le16toh(aggstatus[i * 2]) & 0xc)
2887 continue;
2888
2889 idx = le16toh(aggstatus[2*i + 1]) & 0xff;
2890 bit = idx - start;
2891 shift = 0;
2892 if (bit >= 64) {
2893 shift = 0x100 - idx + start;
2894 bit = 0;
2895 start = idx;
2896 } else if (bit <= -64)
2897 bit = 0x100 - start + idx;
2898 else if (bit < 0) {
2899 shift = start - idx;
2900 start = idx;
2901 bit = 0;
2902 }
2903 bitmap = bitmap << shift;
2904 bitmap |= 1ULL << bit;
2905 }
2906 tap = sc->qid2tap[qid];
2907 tid = tap->txa_tid;
2908 wn = (void *)tap->txa_ni;
2909 wn->agg[tid].bitmap = bitmap;
2910 wn->agg[tid].startidx = start;
2911 wn->agg[tid].nframes = nframes;
2912
2913 res = NULL;
2914 ssn = 0;
2915 if (!IEEE80211_AMPDU_RUNNING(tap)) {
2916 res = tap->txa_private;
2917 ssn = tap->txa_start & 0xfff;
2918 }
2919
2920 seqno = le32toh(*(status + nframes)) & 0xfff;
2921 for (lastidx = (seqno & 0xff); ring->read != lastidx;) {
2922 data = &ring->data[ring->read];
2923
2924 /* Unmap and free mbuf. */
2925 bus_dmamap_sync(ring->data_dmat, data->map,
2926 BUS_DMASYNC_POSTWRITE);
2927 bus_dmamap_unload(ring->data_dmat, data->map);
2928 m = data->m, data->m = NULL;
2929 ni = data->ni, data->ni = NULL;
2930
2931 KASSERT(ni != NULL, ("no node"));
2932 KASSERT(m != NULL, ("no mbuf"));
2933
2934 if (m->m_flags & M_TXCB)
2935 ieee80211_process_callback(ni, m, 1);
2936
2937 m_freem(m);
2938 ieee80211_free_node(ni);
2939
2940 ring->queued--;
2941 ring->read = (ring->read + 1) % IWN_TX_RING_COUNT;
2942 }
2943
2944 if (ring->queued == 0 && res != NULL) {
2945 iwn_nic_lock(sc);
2946 ops->ampdu_tx_stop(sc, qid, tid, ssn);
2947 iwn_nic_unlock(sc);
2948 sc->qid2tap[qid] = NULL;
2949 free(res, M_DEVBUF);
2950 return;
2951 }
2952
2953 sc->sc_tx_timer = 0;
2954 if (ring->queued < IWN_TX_RING_LOMARK) {
2955 sc->qfullmsk &= ~(1 << ring->qid);
2956 if (sc->qfullmsk == 0 &&
2957 (ifp->if_drv_flags & IFF_DRV_OACTIVE)) {
2958 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2959 iwn_start_locked(ifp);
2960 }
2961 }
2962}
2963
2964/*
2965 * Process an INT_FH_RX or INT_SW_RX interrupt.
2966 */
2967static void
2968iwn_notif_intr(struct iwn_softc *sc)
2969{
2970 struct iwn_ops *ops = &sc->ops;
2971 struct ifnet *ifp = sc->sc_ifp;
2972 struct ieee80211com *ic = ifp->if_l2com;
2973 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2974 uint16_t hw;
2975
2976 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
2977 BUS_DMASYNC_POSTREAD);
2978
2979 hw = le16toh(sc->rxq.stat->closed_count) & 0xfff;
2980 while (sc->rxq.cur != hw) {
2981 struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur];
2982 struct iwn_rx_desc *desc;
2983
2984 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2985 BUS_DMASYNC_POSTREAD);
2986 desc = mtod(data->m, struct iwn_rx_desc *);
2987
2988 DPRINTF(sc, IWN_DEBUG_RECV,
2989 "%s: qid %x idx %d flags %x type %d(%s) len %d\n",
2990 __func__, desc->qid & 0xf, desc->idx, desc->flags,
2991 desc->type, iwn_intr_str(desc->type),
2992 le16toh(desc->len));
2993
2994 if (!(desc->qid & 0x80)) /* Reply to a command. */
2995 iwn_cmd_done(sc, desc);
2996
2997 switch (desc->type) {
2998 case IWN_RX_PHY:
2999 iwn_rx_phy(sc, desc, data);
3000 break;
3001
3002 case IWN_RX_DONE: /* 4965AGN only. */
3003 case IWN_MPDU_RX_DONE:
3004 /* An 802.11 frame has been received. */
3005 iwn_rx_done(sc, desc, data);
3006 break;
3007
3008 case IWN_RX_COMPRESSED_BA:
3009 /* A Compressed BlockAck has been received. */
3010 iwn_rx_compressed_ba(sc, desc, data);
3011 break;
3012
3013 case IWN_TX_DONE:
3014 /* An 802.11 frame has been transmitted. */
3015 ops->tx_done(sc, desc, data);
3016 break;
3017
3018 case IWN_RX_STATISTICS:
3019 case IWN_BEACON_STATISTICS:
3020 iwn_rx_statistics(sc, desc, data);
3021 break;
3022
3023 case IWN_BEACON_MISSED:
3024 {
3025 struct iwn_beacon_missed *miss =
3026 (struct iwn_beacon_missed *)(desc + 1);
3027 int misses;
3028
3029 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3030 BUS_DMASYNC_POSTREAD);
3031 misses = le32toh(miss->consecutive);
3032
3033 DPRINTF(sc, IWN_DEBUG_STATE,
3034 "%s: beacons missed %d/%d\n", __func__,
3035 misses, le32toh(miss->total));
3036 /*
3037 * If more than 5 consecutive beacons are missed,
3038 * reinitialize the sensitivity state machine.
3039 */
3040 if (vap->iv_state == IEEE80211_S_RUN &&
3041 (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
3042 if (misses > 5)
3043 (void)iwn_init_sensitivity(sc);
3044 if (misses >= vap->iv_bmissthreshold) {
3045 IWN_UNLOCK(sc);
3046 ieee80211_beacon_miss(ic);
3047 IWN_LOCK(sc);
3048 }
3049 }
3050 break;
3051 }
3052 case IWN_UC_READY:
3053 {
3054 struct iwn_ucode_info *uc =
3055 (struct iwn_ucode_info *)(desc + 1);
3056
3057 /* The microcontroller is ready. */
3058 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3059 BUS_DMASYNC_POSTREAD);
3060 DPRINTF(sc, IWN_DEBUG_RESET,
3061 "microcode alive notification version=%d.%d "
3062 "subtype=%x alive=%x\n", uc->major, uc->minor,
3063 uc->subtype, le32toh(uc->valid));
3064
3065 if (le32toh(uc->valid) != 1) {
3066 device_printf(sc->sc_dev,
3067 "microcontroller initialization failed");
3068 break;
3069 }
3070 if (uc->subtype == IWN_UCODE_INIT) {
3071 /* Save microcontroller report. */
3072 memcpy(&sc->ucode_info, uc, sizeof (*uc));
3073 }
3074 /* Save the address of the error log in SRAM. */
3075 sc->errptr = le32toh(uc->errptr);
3076 break;
3077 }
3078 case IWN_STATE_CHANGED:
3079 {
3080 uint32_t *status = (uint32_t *)(desc + 1);
3081
3082 /*
3083 * State change allows hardware switch change to be
3084 * noted. However, we handle this in iwn_intr as we
3085 * get both the enable/disble intr.
3086 */
3087 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3088 BUS_DMASYNC_POSTREAD);
3089 DPRINTF(sc, IWN_DEBUG_INTR, "state changed to %x\n",
3090 le32toh(*status));
3091 break;
3092 }
3093 case IWN_START_SCAN:
3094 {
3095 struct iwn_start_scan *scan =
3096 (struct iwn_start_scan *)(desc + 1);
3097
3098 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3099 BUS_DMASYNC_POSTREAD);
3100 DPRINTF(sc, IWN_DEBUG_ANY,
3101 "%s: scanning channel %d status %x\n",
3102 __func__, scan->chan, le32toh(scan->status));
3103 break;
3104 }
3105 case IWN_STOP_SCAN:
3106 {
3107 struct iwn_stop_scan *scan =
3108 (struct iwn_stop_scan *)(desc + 1);
3109
3110 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
3111 BUS_DMASYNC_POSTREAD);
3112 DPRINTF(sc, IWN_DEBUG_STATE,
3113 "scan finished nchan=%d status=%d chan=%d\n",
3114 scan->nchan, scan->status, scan->chan);
3115
3116 IWN_UNLOCK(sc);
3117 ieee80211_scan_next(vap);
3118 IWN_LOCK(sc);
3119 break;
3120 }
3121 case IWN5000_CALIBRATION_RESULT:
3122 iwn5000_rx_calib_results(sc, desc, data);
3123 break;
3124
3125 case IWN5000_CALIBRATION_DONE:
3126 sc->sc_flags |= IWN_FLAG_CALIB_DONE;
3127 wakeup(sc);
3128 break;
3129 }
3130
3131 sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT;
3132 }
3133
3134 /* Tell the firmware what we have processed. */
3135 hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1;
3136 IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7);
3137}
3138
3139/*
3140 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up
3141 * from power-down sleep mode.
3142 */
3143static void
3144iwn_wakeup_intr(struct iwn_softc *sc)
3145{
3146 int qid;
3147
3148 DPRINTF(sc, IWN_DEBUG_RESET, "%s: ucode wakeup from power-down sleep\n",
3149 __func__);
3150
3151 /* Wakeup RX and TX rings. */
3152 IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7);
3153 for (qid = 0; qid < sc->ntxqs; qid++) {
3154 struct iwn_tx_ring *ring = &sc->txq[qid];
3155 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur);
3156 }
3157}
3158
3159static void
3160iwn_rftoggle_intr(struct iwn_softc *sc)
3161{
3162 struct ifnet *ifp = sc->sc_ifp;
3163 struct ieee80211com *ic = ifp->if_l2com;
3164 uint32_t tmp = IWN_READ(sc, IWN_GP_CNTRL);
3165
3166 IWN_LOCK_ASSERT(sc);
3167
3168 device_printf(sc->sc_dev, "RF switch: radio %s\n",
3169 (tmp & IWN_GP_CNTRL_RFKILL) ? "enabled" : "disabled");
3170 if (tmp & IWN_GP_CNTRL_RFKILL)
3171 ieee80211_runtask(ic, &sc->sc_radioon_task);
3172 else
3173 ieee80211_runtask(ic, &sc->sc_radiooff_task);
3174}
3175
3176/*
3177 * Dump the error log of the firmware when a firmware panic occurs. Although
3178 * we can't debug the firmware because it is neither open source nor free, it
3179 * can help us to identify certain classes of problems.
3180 */
3181static void
3182iwn_fatal_intr(struct iwn_softc *sc)
3183{
3184 struct iwn_fw_dump dump;
3185 int i;
3186
3187 IWN_LOCK_ASSERT(sc);
3188
3189 /* Force a complete recalibration on next init. */
3190 sc->sc_flags &= ~IWN_FLAG_CALIB_DONE;
3191
3192 /* Check that the error log address is valid. */
3193 if (sc->errptr < IWN_FW_DATA_BASE ||
3194 sc->errptr + sizeof (dump) >
3195 IWN_FW_DATA_BASE + sc->fw_data_maxsz) {
3196 printf("%s: bad firmware error log address 0x%08x\n", __func__,
3197 sc->errptr);
3198 return;
3199 }
3200 if (iwn_nic_lock(sc) != 0) {
3201 printf("%s: could not read firmware error log\n", __func__);
3202 return;
3203 }
3204 /* Read firmware error log from SRAM. */
3205 iwn_mem_read_region_4(sc, sc->errptr, (uint32_t *)&dump,
3206 sizeof (dump) / sizeof (uint32_t));
3207 iwn_nic_unlock(sc);
3208
3209 if (dump.valid == 0) {
3210 printf("%s: firmware error log is empty\n", __func__);
3211 return;
3212 }
3213 printf("firmware error log:\n");
3214 printf(" error type = \"%s\" (0x%08X)\n",
3215 (dump.id < nitems(iwn_fw_errmsg)) ?
3216 iwn_fw_errmsg[dump.id] : "UNKNOWN",
3217 dump.id);
3218 printf(" program counter = 0x%08X\n", dump.pc);
3219 printf(" source line = 0x%08X\n", dump.src_line);
3220 printf(" error data = 0x%08X%08X\n",
3221 dump.error_data[0], dump.error_data[1]);
3222 printf(" branch link = 0x%08X%08X\n",
3223 dump.branch_link[0], dump.branch_link[1]);
3224 printf(" interrupt link = 0x%08X%08X\n",
3225 dump.interrupt_link[0], dump.interrupt_link[1]);
3226 printf(" time = %u\n", dump.time[0]);
3227
3228 /* Dump driver status (TX and RX rings) while we're here. */
3229 printf("driver status:\n");
3230 for (i = 0; i < sc->ntxqs; i++) {
3231 struct iwn_tx_ring *ring = &sc->txq[i];
3232 printf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n",
3233 i, ring->qid, ring->cur, ring->queued);
3234 }
3235 printf(" rx ring: cur=%d\n", sc->rxq.cur);
3236}
3237
3238static void
3239iwn_intr(void *arg)
3240{
3241 struct iwn_softc *sc = arg;
3242 struct ifnet *ifp = sc->sc_ifp;
3243 uint32_t r1, r2, tmp;
3244
3245 IWN_LOCK(sc);
3246
3247 /* Disable interrupts. */
3248 IWN_WRITE(sc, IWN_INT_MASK, 0);
3249
3250 /* Read interrupts from ICT (fast) or from registers (slow). */
3251 if (sc->sc_flags & IWN_FLAG_USE_ICT) {
3252 tmp = 0;
3253 while (sc->ict[sc->ict_cur] != 0) {
3254 tmp |= sc->ict[sc->ict_cur];
3255 sc->ict[sc->ict_cur] = 0; /* Acknowledge. */
3256 sc->ict_cur = (sc->ict_cur + 1) % IWN_ICT_COUNT;
3257 }
3258 tmp = le32toh(tmp);
3259 if (tmp == 0xffffffff) /* Shouldn't happen. */
3260 tmp = 0;
3261 else if (tmp & 0xc0000) /* Workaround a HW bug. */
3262 tmp |= 0x8000;
3263 r1 = (tmp & 0xff00) << 16 | (tmp & 0xff);
3264 r2 = 0; /* Unused. */
3265 } else {
3266 r1 = IWN_READ(sc, IWN_INT);
3267 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
3268 return; /* Hardware gone! */
3269 r2 = IWN_READ(sc, IWN_FH_INT);
3270 }
3271
3272 DPRINTF(sc, IWN_DEBUG_INTR, "interrupt reg1=%x reg2=%x\n", r1, r2);
3273
3274 if (r1 == 0 && r2 == 0)
3275 goto done; /* Interrupt not for us. */
3276
3277 /* Acknowledge interrupts. */
3278 IWN_WRITE(sc, IWN_INT, r1);
3279 if (!(sc->sc_flags & IWN_FLAG_USE_ICT))
3280 IWN_WRITE(sc, IWN_FH_INT, r2);
3281
3282 if (r1 & IWN_INT_RF_TOGGLED) {
3283 iwn_rftoggle_intr(sc);
3284 goto done;
3285 }
3286 if (r1 & IWN_INT_CT_REACHED) {
3287 device_printf(sc->sc_dev, "%s: critical temperature reached!\n",
3288 __func__);
3289 }
3290 if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) {
3291 device_printf(sc->sc_dev, "%s: fatal firmware error\n",
3292 __func__);
3293 /* Dump firmware error log and stop. */
3294 iwn_fatal_intr(sc);
3295 ifp->if_flags &= ~IFF_UP;
3296 iwn_stop_locked(sc);
3297 goto done;
3298 }
3299 if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX | IWN_INT_RX_PERIODIC)) ||
3300 (r2 & IWN_FH_INT_RX)) {
3301 if (sc->sc_flags & IWN_FLAG_USE_ICT) {
3302 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX))
3303 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX);
3304 IWN_WRITE_1(sc, IWN_INT_PERIODIC,
3305 IWN_INT_PERIODIC_DIS);
3306 iwn_notif_intr(sc);
3307 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) {
3308 IWN_WRITE_1(sc, IWN_INT_PERIODIC,
3309 IWN_INT_PERIODIC_ENA);
3310 }
3311 } else
3312 iwn_notif_intr(sc);
3313 }
3314
3315 if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) {
3316 if (sc->sc_flags & IWN_FLAG_USE_ICT)
3317 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_TX);
3318 wakeup(sc); /* FH DMA transfer completed. */
3319 }
3320
3321 if (r1 & IWN_INT_ALIVE)
3322 wakeup(sc); /* Firmware is alive. */
3323
3324 if (r1 & IWN_INT_WAKEUP)
3325 iwn_wakeup_intr(sc);
3326
3327done:
3328 /* Re-enable interrupts. */
3329 if (ifp->if_flags & IFF_UP)
3330 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
3331
3332 IWN_UNLOCK(sc);
3333}
3334
3335/*
3336 * Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and
3337 * 5000 adapters use a slightly different format).
3338 */
3339static void
3340iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id,
3341 uint16_t len)
3342{
3343 uint16_t *w = &sc->sched[qid * IWN4965_SCHED_COUNT + idx];
3344
3345 *w = htole16(len + 8);
3346 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3347 BUS_DMASYNC_PREWRITE);
3348 if (idx < IWN_SCHED_WINSZ) {
3349 *(w + IWN_TX_RING_COUNT) = *w;
3350 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3351 BUS_DMASYNC_PREWRITE);
3352 }
3353}
3354
3355static void
3356iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id,
3357 uint16_t len)
3358{
3359 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx];
3360
3361 *w = htole16(id << 12 | (len + 8));
3362 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3363 BUS_DMASYNC_PREWRITE);
3364 if (idx < IWN_SCHED_WINSZ) {
3365 *(w + IWN_TX_RING_COUNT) = *w;
3366 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3367 BUS_DMASYNC_PREWRITE);
3368 }
3369}
3370
3371#ifdef notyet
3372static void
3373iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx)
3374{
3375 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx];
3376
3377 *w = (*w & htole16(0xf000)) | htole16(1);
3378 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3379 BUS_DMASYNC_PREWRITE);
3380 if (idx < IWN_SCHED_WINSZ) {
3381 *(w + IWN_TX_RING_COUNT) = *w;
3382 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3383 BUS_DMASYNC_PREWRITE);
3384 }
3385}
3386#endif
3387
3388static int
3389iwn_tx_data(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
3390{
3391 struct iwn_ops *ops = &sc->ops;
3392 const struct ieee80211_txparam *tp;
3393 struct ieee80211vap *vap = ni->ni_vap;
3394 struct ieee80211com *ic = ni->ni_ic;
3395 struct iwn_node *wn = (void *)ni;
3396 struct iwn_tx_ring *ring;
3397 struct iwn_tx_desc *desc;
3398 struct iwn_tx_data *data;
3399 struct iwn_tx_cmd *cmd;
3400 struct iwn_cmd_data *tx;
3401 struct ieee80211_frame *wh;
3402 struct ieee80211_key *k = NULL;
3403 struct mbuf *m1;
3404 uint32_t flags;
3405 uint16_t qos;
3406 u_int hdrlen;
3407 bus_dma_segment_t *seg, segs[IWN_MAX_SCATTER];
3408 uint8_t tid, ridx, txant, type;
3409 int ac, i, totlen, error, pad, nsegs = 0, rate;
3410
3411 IWN_LOCK_ASSERT(sc);
3412
3413 wh = mtod(m, struct ieee80211_frame *);
3414 hdrlen = ieee80211_anyhdrsize(wh);
3415 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3416
3417 /* Select EDCA Access Category and TX ring for this frame. */
3418 if (IEEE80211_QOS_HAS_SEQ(wh)) {
3419 qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0];
3420 tid = qos & IEEE80211_QOS_TID;
3421 } else {
3422 qos = 0;
3423 tid = 0;
3424 }
3425 ac = M_WME_GETAC(m);
3426 if (m->m_flags & M_AMPDU_MPDU) {
3427 struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[ac];
3428
3429 if (!IEEE80211_AMPDU_RUNNING(tap)) {
3430 m_freem(m);
3431 return EINVAL;
3432 }
3433
3434 ac = *(int *)tap->txa_private;
3435 *(uint16_t *)wh->i_seq =
3436 htole16(ni->ni_txseqs[tid] << IEEE80211_SEQ_SEQ_SHIFT);
3437 ni->ni_txseqs[tid]++;
3438 }
3439 ring = &sc->txq[ac];
3440 desc = &ring->desc[ring->cur];
3441 data = &ring->data[ring->cur];
3442
3443 /* Choose a TX rate index. */
3444 tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)];
3445 if (type == IEEE80211_FC0_TYPE_MGT)
3446 rate = tp->mgmtrate;
3447 else if (IEEE80211_IS_MULTICAST(wh->i_addr1))
3448 rate = tp->mcastrate;
3449 else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE)
3450 rate = tp->ucastrate;
3451 else {
3452 /* XXX pass pktlen */
3453 (void) ieee80211_ratectl_rate(ni, NULL, 0);
3454 rate = ni->ni_txrate;
3455 }
3404 ridx = ic->ic_rt->rateCodeToIndex[rate];
3456 ridx = ieee80211_legacy_rate_lookup(ic->ic_rt,
3457 rate & IEEE80211_RATE_VAL);
3458
3459 /* Encrypt the frame if need be. */
3460 if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
3461 /* Retrieve key for TX. */
3462 k = ieee80211_crypto_encap(ni, m);
3463 if (k == NULL) {
3464 m_freem(m);
3465 return ENOBUFS;
3466 }
3467 /* 802.11 header may have moved. */
3468 wh = mtod(m, struct ieee80211_frame *);
3469 }
3470 totlen = m->m_pkthdr.len;
3471
3472 if (ieee80211_radiotap_active_vap(vap)) {
3473 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap;
3474
3475 tap->wt_flags = 0;
3476 tap->wt_rate = rate;
3477 if (k != NULL)
3478 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3479
3480 ieee80211_radiotap_tx(vap, m);
3481 }
3482
3483 /* Prepare TX firmware command. */
3484 cmd = &ring->cmd[ring->cur];
3485 cmd->code = IWN_CMD_TX_DATA;
3486 cmd->flags = 0;
3487 cmd->qid = ring->qid;
3488 cmd->idx = ring->cur;
3489
3490 tx = (struct iwn_cmd_data *)cmd->data;
3491 /* NB: No need to clear tx, all fields are reinitialized here. */
3492 tx->scratch = 0; /* clear "scratch" area */
3493
3494 flags = 0;
3495 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3496 /* Unicast frame, check if an ACK is expected. */
3497 if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) !=
3498 IEEE80211_QOS_ACKPOLICY_NOACK)
3499 flags |= IWN_TX_NEED_ACK;
3500 }
3501 if ((wh->i_fc[0] &
3502 (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) ==
3503 (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR))
3504 flags |= IWN_TX_IMM_BA; /* Cannot happen yet. */
3505
3506 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG)
3507 flags |= IWN_TX_MORE_FRAG; /* Cannot happen yet. */
3508
3509 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */
3510 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3511 /* NB: Group frames are sent using CCK in 802.11b/g. */
3512 if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) {
3513 flags |= IWN_TX_NEED_RTS;
3514 } else if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
3515 ridx >= IWN_RIDX_OFDM6) {
3516 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
3517 flags |= IWN_TX_NEED_CTS;
3518 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
3519 flags |= IWN_TX_NEED_RTS;
3520 }
3521 if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) {
3522 if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
3523 /* 5000 autoselects RTS/CTS or CTS-to-self. */
3524 flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS);
3525 flags |= IWN_TX_NEED_PROTECTION;
3526 } else
3527 flags |= IWN_TX_FULL_TXOP;
3528 }
3529 }
3530
3531 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3532 type != IEEE80211_FC0_TYPE_DATA)
3533 tx->id = sc->broadcast_id;
3534 else
3535 tx->id = wn->id;
3536
3537 if (type == IEEE80211_FC0_TYPE_MGT) {
3538 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3539
3540 /* Tell HW to set timestamp in probe responses. */
3541 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
3542 flags |= IWN_TX_INSERT_TSTAMP;
3543 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3544 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
3545 tx->timeout = htole16(3);
3546 else
3547 tx->timeout = htole16(2);
3548 } else
3549 tx->timeout = htole16(0);
3550
3551 if (hdrlen & 3) {
3552 /* First segment length must be a multiple of 4. */
3553 flags |= IWN_TX_NEED_PADDING;
3554 pad = 4 - (hdrlen & 3);
3555 } else
3556 pad = 0;
3557
3558 tx->len = htole16(totlen);
3559 tx->tid = tid;
3560 tx->rts_ntries = 60;
3561 tx->data_ntries = 15;
3562 tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
3510 tx->rate = wn->ridx[rate];
3563 tx->rate = iwn_rate_to_plcp(sc, ni, rate);
3564 if (tx->id == sc->broadcast_id) {
3565 /* Group or management frame. */
3566 tx->linkq = 0;
3567 /* XXX Alternate between antenna A and B? */
3568 txant = IWN_LSB(sc->txchainmask);
3569 tx->rate |= htole32(IWN_RFLAG_ANT(txant));
3570 } else {
3571 tx->linkq = ni->ni_rates.rs_nrates - ridx - 1;
3572 flags |= IWN_TX_LINKQ; /* enable MRR */
3573 }
3574 /* Set physical address of "scratch area". */
3575 tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr));
3576 tx->hiaddr = IWN_HIADDR(data->scratch_paddr);
3577
3578 /* Copy 802.11 header in TX command. */
3579 memcpy((uint8_t *)(tx + 1), wh, hdrlen);
3580
3581 /* Trim 802.11 header. */
3582 m_adj(m, hdrlen);
3583 tx->security = 0;
3584 tx->flags = htole32(flags);
3585
3586 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs,
3587 &nsegs, BUS_DMA_NOWAIT);
3588 if (error != 0) {
3589 if (error != EFBIG) {
3590 device_printf(sc->sc_dev,
3591 "%s: can't map mbuf (error %d)\n", __func__, error);
3592 m_freem(m);
3593 return error;
3594 }
3595 /* Too many DMA segments, linearize mbuf. */
3596 m1 = m_collapse(m, M_NOWAIT, IWN_MAX_SCATTER);
3597 if (m1 == NULL) {
3598 device_printf(sc->sc_dev,
3599 "%s: could not defrag mbuf\n", __func__);
3600 m_freem(m);
3601 return ENOBUFS;
3602 }
3603 m = m1;
3604
3605 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3606 segs, &nsegs, BUS_DMA_NOWAIT);
3607 if (error != 0) {
3608 device_printf(sc->sc_dev,
3609 "%s: can't map mbuf (error %d)\n", __func__, error);
3610 m_freem(m);
3611 return error;
3612 }
3613 }
3614
3615 data->m = m;
3616 data->ni = ni;
3617
3618 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n",
3619 __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs);
3620
3621 /* Fill TX descriptor. */
3622 desc->nsegs = 1;
3623 if (m->m_len != 0)
3624 desc->nsegs += nsegs;
3625 /* First DMA segment is used by the TX command. */
3626 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr));
3627 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) |
3628 (4 + sizeof (*tx) + hdrlen + pad) << 4);
3629 /* Other DMA segments are for data payload. */
3630 seg = &segs[0];
3631 for (i = 1; i <= nsegs; i++) {
3632 desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr));
3633 desc->segs[i].len = htole16(IWN_HIADDR(seg->ds_addr) |
3634 seg->ds_len << 4);
3635 seg++;
3636 }
3637
3638 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
3639 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
3640 BUS_DMASYNC_PREWRITE);
3641 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3642 BUS_DMASYNC_PREWRITE);
3643
3644 /* Update TX scheduler. */
3645 if (ring->qid >= sc->firstaggqueue)
3646 ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen);
3647
3648 /* Kick TX ring. */
3649 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
3650 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3651
3652 /* Mark TX ring as full if we reach a certain threshold. */
3653 if (++ring->queued > IWN_TX_RING_HIMARK)
3654 sc->qfullmsk |= 1 << ring->qid;
3655
3656 return 0;
3657}
3658
3659static int
3660iwn_tx_data_raw(struct iwn_softc *sc, struct mbuf *m,
3661 struct ieee80211_node *ni, const struct ieee80211_bpf_params *params)
3662{
3663 struct iwn_ops *ops = &sc->ops;
3664 struct ifnet *ifp = sc->sc_ifp;
3665 struct ieee80211vap *vap = ni->ni_vap;
3666 struct ieee80211com *ic = ifp->if_l2com;
3667 struct iwn_tx_cmd *cmd;
3668 struct iwn_cmd_data *tx;
3669 struct ieee80211_frame *wh;
3670 struct iwn_tx_ring *ring;
3671 struct iwn_tx_desc *desc;
3672 struct iwn_tx_data *data;
3673 struct mbuf *m1;
3674 bus_dma_segment_t *seg, segs[IWN_MAX_SCATTER];
3675 uint32_t flags;
3676 u_int hdrlen;
3677 int ac, totlen, error, pad, nsegs = 0, i, rate;
3678 uint8_t ridx, type, txant;
3679
3680 IWN_LOCK_ASSERT(sc);
3681
3682 wh = mtod(m, struct ieee80211_frame *);
3683 hdrlen = ieee80211_anyhdrsize(wh);
3684 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3685
3686 ac = params->ibp_pri & 3;
3687
3688 ring = &sc->txq[ac];
3689 desc = &ring->desc[ring->cur];
3690 data = &ring->data[ring->cur];
3691
3692 /* Choose a TX rate index. */
3693 rate = params->ibp_rate0;
3641 ridx = ic->ic_rt->rateCodeToIndex[rate];
3694 ridx = ieee80211_legacy_rate_lookup(ic->ic_rt,
3695 rate & IEEE80211_RATE_VAL);
3696 if (ridx == (uint8_t)-1) {
3697 /* XXX fall back to mcast/mgmt rate? */
3698 m_freem(m);
3699 return EINVAL;
3700 }
3701
3702 totlen = m->m_pkthdr.len;
3703
3704 /* Prepare TX firmware command. */
3705 cmd = &ring->cmd[ring->cur];
3706 cmd->code = IWN_CMD_TX_DATA;
3707 cmd->flags = 0;
3708 cmd->qid = ring->qid;
3709 cmd->idx = ring->cur;
3710
3711 tx = (struct iwn_cmd_data *)cmd->data;
3712 /* NB: No need to clear tx, all fields are reinitialized here. */
3713 tx->scratch = 0; /* clear "scratch" area */
3714
3715 flags = 0;
3716 if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0)
3717 flags |= IWN_TX_NEED_ACK;
3718 if (params->ibp_flags & IEEE80211_BPF_RTS) {
3719 if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
3720 /* 5000 autoselects RTS/CTS or CTS-to-self. */
3721 flags &= ~IWN_TX_NEED_RTS;
3722 flags |= IWN_TX_NEED_PROTECTION;
3723 } else
3724 flags |= IWN_TX_NEED_RTS | IWN_TX_FULL_TXOP;
3725 }
3726 if (params->ibp_flags & IEEE80211_BPF_CTS) {
3727 if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
3728 /* 5000 autoselects RTS/CTS or CTS-to-self. */
3729 flags &= ~IWN_TX_NEED_CTS;
3730 flags |= IWN_TX_NEED_PROTECTION;
3731 } else
3732 flags |= IWN_TX_NEED_CTS | IWN_TX_FULL_TXOP;
3733 }
3734 if (type == IEEE80211_FC0_TYPE_MGT) {
3735 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3736
3737 /* Tell HW to set timestamp in probe responses. */
3738 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
3739 flags |= IWN_TX_INSERT_TSTAMP;
3740
3741 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3742 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
3743 tx->timeout = htole16(3);
3744 else
3745 tx->timeout = htole16(2);
3746 } else
3747 tx->timeout = htole16(0);
3748
3749 if (hdrlen & 3) {
3750 /* First segment length must be a multiple of 4. */
3751 flags |= IWN_TX_NEED_PADDING;
3752 pad = 4 - (hdrlen & 3);
3753 } else
3754 pad = 0;
3755
3756 if (ieee80211_radiotap_active_vap(vap)) {
3757 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap;
3758
3759 tap->wt_flags = 0;
3760 tap->wt_rate = rate;
3761
3762 ieee80211_radiotap_tx(vap, m);
3763 }
3764
3765 tx->len = htole16(totlen);
3766 tx->tid = 0;
3767 tx->id = sc->broadcast_id;
3768 tx->rts_ntries = params->ibp_try1;
3769 tx->data_ntries = params->ibp_try0;
3770 tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
3771
3772 /* XXX should just use iwn_rate_to_plcp() */
3773 tx->rate = htole32(rate2plcp(rate));
3774 if (ridx < IWN_RIDX_OFDM6 &&
3775 IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
3776 tx->rate |= htole32(IWN_RFLAG_CCK);
3777
3778 /* Group or management frame. */
3779 tx->linkq = 0;
3780 txant = IWN_LSB(sc->txchainmask);
3781 tx->rate |= htole32(IWN_RFLAG_ANT(txant));
3782
3783 /* Set physical address of "scratch area". */
3784 tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr));
3785 tx->hiaddr = IWN_HIADDR(data->scratch_paddr);
3786
3787 /* Copy 802.11 header in TX command. */
3788 memcpy((uint8_t *)(tx + 1), wh, hdrlen);
3789
3790 /* Trim 802.11 header. */
3791 m_adj(m, hdrlen);
3792 tx->security = 0;
3793 tx->flags = htole32(flags);
3794
3795 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs,
3796 &nsegs, BUS_DMA_NOWAIT);
3797 if (error != 0) {
3798 if (error != EFBIG) {
3799 device_printf(sc->sc_dev,
3800 "%s: can't map mbuf (error %d)\n", __func__, error);
3801 m_freem(m);
3802 return error;
3803 }
3804 /* Too many DMA segments, linearize mbuf. */
3805 m1 = m_collapse(m, M_NOWAIT, IWN_MAX_SCATTER);
3806 if (m1 == NULL) {
3807 device_printf(sc->sc_dev,
3808 "%s: could not defrag mbuf\n", __func__);
3809 m_freem(m);
3810 return ENOBUFS;
3811 }
3812 m = m1;
3813
3814 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3815 segs, &nsegs, BUS_DMA_NOWAIT);
3816 if (error != 0) {
3817 device_printf(sc->sc_dev,
3818 "%s: can't map mbuf (error %d)\n", __func__, error);
3819 m_freem(m);
3820 return error;
3821 }
3822 }
3823
3824 data->m = m;
3825 data->ni = ni;
3826
3827 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n",
3828 __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs);
3829
3830 /* Fill TX descriptor. */
3831 desc->nsegs = 1;
3832 if (m->m_len != 0)
3833 desc->nsegs += nsegs;
3834 /* First DMA segment is used by the TX command. */
3835 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr));
3836 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) |
3837 (4 + sizeof (*tx) + hdrlen + pad) << 4);
3838 /* Other DMA segments are for data payload. */
3839 seg = &segs[0];
3840 for (i = 1; i <= nsegs; i++) {
3841 desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr));
3842 desc->segs[i].len = htole16(IWN_HIADDR(seg->ds_addr) |
3843 seg->ds_len << 4);
3844 seg++;
3845 }
3846
3847 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
3848 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
3849 BUS_DMASYNC_PREWRITE);
3850 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3851 BUS_DMASYNC_PREWRITE);
3852
3853 /* Update TX scheduler. */
3854 if (ring->qid >= sc->firstaggqueue)
3855 ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen);
3856
3857 /* Kick TX ring. */
3858 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
3859 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3860
3861 /* Mark TX ring as full if we reach a certain threshold. */
3862 if (++ring->queued > IWN_TX_RING_HIMARK)
3863 sc->qfullmsk |= 1 << ring->qid;
3864
3865 return 0;
3866}
3867
3868static int
3869iwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3870 const struct ieee80211_bpf_params *params)
3871{
3872 struct ieee80211com *ic = ni->ni_ic;
3873 struct ifnet *ifp = ic->ic_ifp;
3874 struct iwn_softc *sc = ifp->if_softc;
3875 int error = 0;
3876
3877 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
3878 ieee80211_free_node(ni);
3879 m_freem(m);
3880 return ENETDOWN;
3881 }
3882
3883 IWN_LOCK(sc);
3884 if (params == NULL) {
3885 /*
3886 * Legacy path; interpret frame contents to decide
3887 * precisely how to send the frame.
3888 */
3889 error = iwn_tx_data(sc, m, ni);
3890 } else {
3891 /*
3892 * Caller supplied explicit parameters to use in
3893 * sending the frame.
3894 */
3895 error = iwn_tx_data_raw(sc, m, ni, params);
3896 }
3897 if (error != 0) {
3898 /* NB: m is reclaimed on tx failure */
3899 ieee80211_free_node(ni);
3900 ifp->if_oerrors++;
3901 }
3902 sc->sc_tx_timer = 5;
3903
3904 IWN_UNLOCK(sc);
3905 return error;
3906}
3907
3908static void
3909iwn_start(struct ifnet *ifp)
3910{
3911 struct iwn_softc *sc = ifp->if_softc;
3912
3913 IWN_LOCK(sc);
3914 iwn_start_locked(ifp);
3915 IWN_UNLOCK(sc);
3916}
3917
3918static void
3919iwn_start_locked(struct ifnet *ifp)
3920{
3921 struct iwn_softc *sc = ifp->if_softc;
3922 struct ieee80211_node *ni;
3923 struct mbuf *m;
3924
3925 IWN_LOCK_ASSERT(sc);
3926
3927 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
3928 (ifp->if_drv_flags & IFF_DRV_OACTIVE))
3929 return;
3930
3931 for (;;) {
3932 if (sc->qfullmsk != 0) {
3933 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3934 break;
3935 }
3936 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
3937 if (m == NULL)
3938 break;
3939 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
3940 if (iwn_tx_data(sc, m, ni) != 0) {
3941 ieee80211_free_node(ni);
3942 ifp->if_oerrors++;
3943 continue;
3944 }
3945 sc->sc_tx_timer = 5;
3946 }
3947}
3948
3949static void
3950iwn_watchdog(void *arg)
3951{
3952 struct iwn_softc *sc = arg;
3953 struct ifnet *ifp = sc->sc_ifp;
3954 struct ieee80211com *ic = ifp->if_l2com;
3955
3956 IWN_LOCK_ASSERT(sc);
3957
3958 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING, ("not running"));
3959
3960 if (sc->sc_tx_timer > 0) {
3961 if (--sc->sc_tx_timer == 0) {
3962 if_printf(ifp, "device timeout\n");
3963 ieee80211_runtask(ic, &sc->sc_reinit_task);
3964 return;
3965 }
3966 }
3967 callout_reset(&sc->watchdog_to, hz, iwn_watchdog, sc);
3968}
3969
3970static int
3971iwn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
3972{
3973 struct iwn_softc *sc = ifp->if_softc;
3974 struct ieee80211com *ic = ifp->if_l2com;
3975 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3976 struct ifreq *ifr = (struct ifreq *) data;
3977 int error = 0, startall = 0, stop = 0;
3978
3979 switch (cmd) {
3980 case SIOCGIFADDR:
3981 error = ether_ioctl(ifp, cmd, data);
3982 break;
3983 case SIOCSIFFLAGS:
3984 IWN_LOCK(sc);
3985 if (ifp->if_flags & IFF_UP) {
3986 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3987 iwn_init_locked(sc);
3988 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)
3989 startall = 1;
3990 else
3991 stop = 1;
3992 }
3993 } else {
3994 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3995 iwn_stop_locked(sc);
3996 }
3997 IWN_UNLOCK(sc);
3998 if (startall)
3999 ieee80211_start_all(ic);
4000 else if (vap != NULL && stop)
4001 ieee80211_stop(vap);
4002 break;
4003 case SIOCGIFMEDIA:
4004 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
4005 break;
4006 default:
4007 error = EINVAL;
4008 break;
4009 }
4010 return error;
4011}
4012
4013/*
4014 * Send a command to the firmware.
4015 */
4016static int
4017iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async)
4018{
4019 struct iwn_tx_ring *ring = &sc->txq[4];
4020 struct iwn_tx_desc *desc;
4021 struct iwn_tx_data *data;
4022 struct iwn_tx_cmd *cmd;
4023 struct mbuf *m;
4024 bus_addr_t paddr;
4025 int totlen, error;
4026
4027 if (async == 0)
4028 IWN_LOCK_ASSERT(sc);
4029
4030 desc = &ring->desc[ring->cur];
4031 data = &ring->data[ring->cur];
4032 totlen = 4 + size;
4033
4034 if (size > sizeof cmd->data) {
4035 /* Command is too large to fit in a descriptor. */
4036 if (totlen > MCLBYTES)
4037 return EINVAL;
4038 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
4039 if (m == NULL)
4040 return ENOMEM;
4041 cmd = mtod(m, struct iwn_tx_cmd *);
4042 error = bus_dmamap_load(ring->data_dmat, data->map, cmd,
4043 totlen, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
4044 if (error != 0) {
4045 m_freem(m);
4046 return error;
4047 }
4048 data->m = m;
4049 } else {
4050 cmd = &ring->cmd[ring->cur];
4051 paddr = data->cmd_paddr;
4052 }
4053
4054 cmd->code = code;
4055 cmd->flags = 0;
4056 cmd->qid = ring->qid;
4057 cmd->idx = ring->cur;
4058 memcpy(cmd->data, buf, size);
4059
4060 desc->nsegs = 1;
4061 desc->segs[0].addr = htole32(IWN_LOADDR(paddr));
4062 desc->segs[0].len = htole16(IWN_HIADDR(paddr) | totlen << 4);
4063
4064 DPRINTF(sc, IWN_DEBUG_CMD, "%s: %s (0x%x) flags %d qid %d idx %d\n",
4065 __func__, iwn_intr_str(cmd->code), cmd->code,
4066 cmd->flags, cmd->qid, cmd->idx);
4067
4068 if (size > sizeof cmd->data) {
4069 bus_dmamap_sync(ring->data_dmat, data->map,
4070 BUS_DMASYNC_PREWRITE);
4071 } else {
4072 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
4073 BUS_DMASYNC_PREWRITE);
4074 }
4075 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
4076 BUS_DMASYNC_PREWRITE);
4077
4078 /* Kick command ring. */
4079 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
4080 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4081
4082 return async ? 0 : msleep(desc, &sc->sc_mtx, PCATCH, "iwncmd", hz);
4083}
4084
4085static int
4086iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
4087{
4088 struct iwn4965_node_info hnode;
4089 caddr_t src, dst;
4090
4091 /*
4092 * We use the node structure for 5000 Series internally (it is
4093 * a superset of the one for 4965AGN). We thus copy the common
4094 * fields before sending the command.
4095 */
4096 src = (caddr_t)node;
4097 dst = (caddr_t)&hnode;
4098 memcpy(dst, src, 48);
4099 /* Skip TSC, RX MIC and TX MIC fields from ``src''. */
4100 memcpy(dst + 48, src + 72, 20);
4101 return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async);
4102}
4103
4104static int
4105iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
4106{
4107 /* Direct mapping. */
4108 return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async);
4109}
4110
4111static int
4112iwn_set_link_quality(struct iwn_softc *sc, struct ieee80211_node *ni)
4113{
4114#define RV(v) ((v) & IEEE80211_RATE_VAL)
4115 struct iwn_node *wn = (void *)ni;
4116 struct ieee80211_rateset *rs = &ni->ni_rates;
4117 struct iwn_cmd_link_quality linkq;
4118 uint8_t txant;
4119 int i, rate, txrate;
4120
4121 /* Use the first valid TX antenna. */
4122 txant = IWN_LSB(sc->txchainmask);
4123
4124 memset(&linkq, 0, sizeof linkq);
4125 linkq.id = wn->id;
4126 linkq.antmsk_1stream = txant;
4127 linkq.antmsk_2stream = IWN_ANT_AB;
4128 linkq.ampdu_max = 64;
4129 linkq.ampdu_threshold = 3;
4130 linkq.ampdu_limit = htole16(4000); /* 4ms */
4131
4132 /* Start at highest available bit-rate. */
4133 if (IEEE80211_IS_CHAN_HT(ni->ni_chan))
4134 txrate = ni->ni_htrates.rs_nrates - 1;
4135 else
4136 txrate = rs->rs_nrates - 1;
4137 for (i = 0; i < IWN_MAX_TX_RETRIES; i++) {
4138 uint32_t plcp;
4139
4140 if (IEEE80211_IS_CHAN_HT(ni->ni_chan))
4141 rate = IEEE80211_RATE_MCS | txrate;
4142 else
4143 rate = RV(rs->rs_rates[txrate]);
4084 linkq.retry[i] = wn->ridx[rate];
4144
4086 if ((le32toh(wn->ridx[rate]) & IWN_RFLAG_MCS) &&
4087 RV(le32toh(wn->ridx[rate])) > 7)
4145 /* Do rate -> PLCP config mapping */
4146 plcp = iwn_rate_to_plcp(sc, ni, rate);
4147 linkq.retry[i] = plcp;
4148
4149 /* Special case for dual-stream rates? */
4150 if ((le32toh(plcp) & IWN_RFLAG_MCS) &&
4151 RV(le32toh(plcp)) > 7)
4152 linkq.mimo = i + 1;
4153
4154 /* Next retry at immediate lower bit-rate. */
4155 if (txrate > 0)
4156 txrate--;
4157 }
4158 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, 1);
4159#undef RV
4160}
4161
4162/*
4163 * Broadcast node is used to send group-addressed and management frames.
4164 */
4165static int
4166iwn_add_broadcast_node(struct iwn_softc *sc, int async)
4167{
4168 struct iwn_ops *ops = &sc->ops;
4169 struct ifnet *ifp = sc->sc_ifp;
4170 struct ieee80211com *ic = ifp->if_l2com;
4171 struct iwn_node_info node;
4172 struct iwn_cmd_link_quality linkq;
4173 uint8_t txant;
4174 int i, error;
4175
4176 memset(&node, 0, sizeof node);
4177 IEEE80211_ADDR_COPY(node.macaddr, ifp->if_broadcastaddr);
4178 node.id = sc->broadcast_id;
4179 DPRINTF(sc, IWN_DEBUG_RESET, "%s: adding broadcast node\n", __func__);
4180 if ((error = ops->add_node(sc, &node, async)) != 0)
4181 return error;
4182
4183 /* Use the first valid TX antenna. */
4184 txant = IWN_LSB(sc->txchainmask);
4185
4186 memset(&linkq, 0, sizeof linkq);
4187 linkq.id = sc->broadcast_id;
4188 linkq.antmsk_1stream = txant;
4189 linkq.antmsk_2stream = IWN_ANT_AB;
4190 linkq.ampdu_max = 64;
4191 linkq.ampdu_threshold = 3;
4192 linkq.ampdu_limit = htole16(4000); /* 4ms */
4193
4194 /* Use lowest mandatory bit-rate. */
4195 if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan))
4196 linkq.retry[0] = htole32(0xd);
4197 else
4198 linkq.retry[0] = htole32(10 | IWN_RFLAG_CCK);
4199 linkq.retry[0] |= htole32(IWN_RFLAG_ANT(txant));
4200 /* Use same bit-rate for all TX retries. */
4201 for (i = 1; i < IWN_MAX_TX_RETRIES; i++) {
4202 linkq.retry[i] = linkq.retry[0];
4203 }
4204 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async);
4205}
4206
4207static int
4208iwn_updateedca(struct ieee80211com *ic)
4209{
4210#define IWN_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */
4211 struct iwn_softc *sc = ic->ic_ifp->if_softc;
4212 struct iwn_edca_params cmd;
4213 int aci;
4214
4215 memset(&cmd, 0, sizeof cmd);
4216 cmd.flags = htole32(IWN_EDCA_UPDATE);
4217 for (aci = 0; aci < WME_NUM_AC; aci++) {
4218 const struct wmeParams *ac =
4219 &ic->ic_wme.wme_chanParams.cap_wmeParams[aci];
4220 cmd.ac[aci].aifsn = ac->wmep_aifsn;
4221 cmd.ac[aci].cwmin = htole16(IWN_EXP2(ac->wmep_logcwmin));
4222 cmd.ac[aci].cwmax = htole16(IWN_EXP2(ac->wmep_logcwmax));
4223 cmd.ac[aci].txoplimit =
4224 htole16(IEEE80211_TXOP_TO_US(ac->wmep_txopLimit));
4225 }
4226 IEEE80211_UNLOCK(ic);
4227 IWN_LOCK(sc);
4228 (void)iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1);
4229 IWN_UNLOCK(sc);
4230 IEEE80211_LOCK(ic);
4231 return 0;
4232#undef IWN_EXP2
4233}
4234
4235static void
4236iwn_update_mcast(struct ifnet *ifp)
4237{
4238 /* Ignore */
4239}
4240
4241static void
4242iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on)
4243{
4244 struct iwn_cmd_led led;
4245
4246 /* Clear microcode LED ownership. */
4247 IWN_CLRBITS(sc, IWN_LED, IWN_LED_BSM_CTRL);
4248
4249 led.which = which;
4250 led.unit = htole32(10000); /* on/off in unit of 100ms */
4251 led.off = off;
4252 led.on = on;
4253 (void)iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1);
4254}
4255
4256/*
4257 * Set the critical temperature at which the firmware will stop the radio
4258 * and notify us.
4259 */
4260static int
4261iwn_set_critical_temp(struct iwn_softc *sc)
4262{
4263 struct iwn_critical_temp crit;
4264 int32_t temp;
4265
4266 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CTEMP_STOP_RF);
4267
4268 if (sc->hw_type == IWN_HW_REV_TYPE_5150)
4269 temp = (IWN_CTOK(110) - sc->temp_off) * -5;
4270 else if (sc->hw_type == IWN_HW_REV_TYPE_4965)
4271 temp = IWN_CTOK(110);
4272 else
4273 temp = 110;
4274 memset(&crit, 0, sizeof crit);
4275 crit.tempR = htole32(temp);
4276 DPRINTF(sc, IWN_DEBUG_RESET, "setting critical temp to %d\n", temp);
4277 return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0);
4278}
4279
4280static int
4281iwn_set_timing(struct iwn_softc *sc, struct ieee80211_node *ni)
4282{
4283 struct iwn_cmd_timing cmd;
4284 uint64_t val, mod;
4285
4286 memset(&cmd, 0, sizeof cmd);
4287 memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t));
4288 cmd.bintval = htole16(ni->ni_intval);
4289 cmd.lintval = htole16(10);
4290
4291 /* Compute remaining time until next beacon. */
4292 val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU;
4293 mod = le64toh(cmd.tstamp) % val;
4294 cmd.binitval = htole32((uint32_t)(val - mod));
4295
4296 DPRINTF(sc, IWN_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n",
4297 ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod));
4298
4299 return iwn_cmd(sc, IWN_CMD_TIMING, &cmd, sizeof cmd, 1);
4300}
4301
4302static void
4303iwn4965_power_calibration(struct iwn_softc *sc, int temp)
4304{
4305 struct ifnet *ifp = sc->sc_ifp;
4306 struct ieee80211com *ic = ifp->if_l2com;
4307
4308 /* Adjust TX power if need be (delta >= 3 degC). */
4309 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d->%d\n",
4310 __func__, sc->temp, temp);
4311 if (abs(temp - sc->temp) >= 3) {
4312 /* Record temperature of last calibration. */
4313 sc->temp = temp;
4314 (void)iwn4965_set_txpower(sc, ic->ic_bsschan, 1);
4315 }
4316}
4317
4318/*
4319 * Set TX power for current channel (each rate has its own power settings).
4320 * This function takes into account the regulatory information from EEPROM,
4321 * the current temperature and the current voltage.
4322 */
4323static int
4324iwn4965_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch,
4325 int async)
4326{
4327/* Fixed-point arithmetic division using a n-bit fractional part. */
4328#define fdivround(a, b, n) \
4329 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n))
4330/* Linear interpolation. */
4331#define interpolate(x, x1, y1, x2, y2, n) \
4332 ((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n))
4333
4334 static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 };
4335 struct iwn_ucode_info *uc = &sc->ucode_info;
4336 struct iwn4965_cmd_txpower cmd;
4337 struct iwn4965_eeprom_chan_samples *chans;
4338 const uint8_t *rf_gain, *dsp_gain;
4339 int32_t vdiff, tdiff;
4340 int i, c, grp, maxpwr;
4341 uint8_t chan;
4342
4343 /* Retrieve current channel from last RXON. */
4344 chan = sc->rxon.chan;
4345 DPRINTF(sc, IWN_DEBUG_RESET, "setting TX power for channel %d\n",
4346 chan);
4347
4348 memset(&cmd, 0, sizeof cmd);
4349 cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1;
4350 cmd.chan = chan;
4351
4352 if (IEEE80211_IS_CHAN_5GHZ(ch)) {
4353 maxpwr = sc->maxpwr5GHz;
4354 rf_gain = iwn4965_rf_gain_5ghz;
4355 dsp_gain = iwn4965_dsp_gain_5ghz;
4356 } else {
4357 maxpwr = sc->maxpwr2GHz;
4358 rf_gain = iwn4965_rf_gain_2ghz;
4359 dsp_gain = iwn4965_dsp_gain_2ghz;
4360 }
4361
4362 /* Compute voltage compensation. */
4363 vdiff = ((int32_t)le32toh(uc->volt) - sc->eeprom_voltage) / 7;
4364 if (vdiff > 0)
4365 vdiff *= 2;
4366 if (abs(vdiff) > 2)
4367 vdiff = 0;
4368 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
4369 "%s: voltage compensation=%d (UCODE=%d, EEPROM=%d)\n",
4370 __func__, vdiff, le32toh(uc->volt), sc->eeprom_voltage);
4371
4372 /* Get channel attenuation group. */
4373 if (chan <= 20) /* 1-20 */
4374 grp = 4;
4375 else if (chan <= 43) /* 34-43 */
4376 grp = 0;
4377 else if (chan <= 70) /* 44-70 */
4378 grp = 1;
4379 else if (chan <= 124) /* 71-124 */
4380 grp = 2;
4381 else /* 125-200 */
4382 grp = 3;
4383 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
4384 "%s: chan %d, attenuation group=%d\n", __func__, chan, grp);
4385
4386 /* Get channel sub-band. */
4387 for (i = 0; i < IWN_NBANDS; i++)
4388 if (sc->bands[i].lo != 0 &&
4389 sc->bands[i].lo <= chan && chan <= sc->bands[i].hi)
4390 break;
4391 if (i == IWN_NBANDS) /* Can't happen in real-life. */
4392 return EINVAL;
4393 chans = sc->bands[i].chans;
4394 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
4395 "%s: chan %d sub-band=%d\n", __func__, chan, i);
4396
4397 for (c = 0; c < 2; c++) {
4398 uint8_t power, gain, temp;
4399 int maxchpwr, pwr, ridx, idx;
4400
4401 power = interpolate(chan,
4402 chans[0].num, chans[0].samples[c][1].power,
4403 chans[1].num, chans[1].samples[c][1].power, 1);
4404 gain = interpolate(chan,
4405 chans[0].num, chans[0].samples[c][1].gain,
4406 chans[1].num, chans[1].samples[c][1].gain, 1);
4407 temp = interpolate(chan,
4408 chans[0].num, chans[0].samples[c][1].temp,
4409 chans[1].num, chans[1].samples[c][1].temp, 1);
4410 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
4411 "%s: Tx chain %d: power=%d gain=%d temp=%d\n",
4412 __func__, c, power, gain, temp);
4413
4414 /* Compute temperature compensation. */
4415 tdiff = ((sc->temp - temp) * 2) / tdiv[grp];
4416 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
4417 "%s: temperature compensation=%d (current=%d, EEPROM=%d)\n",
4418 __func__, tdiff, sc->temp, temp);
4419
4420 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) {
4421 /* Convert dBm to half-dBm. */
4422 maxchpwr = sc->maxpwr[chan] * 2;
4423 if ((ridx / 8) & 1)
4424 maxchpwr -= 6; /* MIMO 2T: -3dB */
4425
4426 pwr = maxpwr;
4427
4428 /* Adjust TX power based on rate. */
4429 if ((ridx % 8) == 5)
4430 pwr -= 15; /* OFDM48: -7.5dB */
4431 else if ((ridx % 8) == 6)
4432 pwr -= 17; /* OFDM54: -8.5dB */
4433 else if ((ridx % 8) == 7)
4434 pwr -= 20; /* OFDM60: -10dB */
4435 else
4436 pwr -= 10; /* Others: -5dB */
4437
4438 /* Do not exceed channel max TX power. */
4439 if (pwr > maxchpwr)
4440 pwr = maxchpwr;
4441
4442 idx = gain - (pwr - power) - tdiff - vdiff;
4443 if ((ridx / 8) & 1) /* MIMO */
4444 idx += (int32_t)le32toh(uc->atten[grp][c]);
4445
4446 if (cmd.band == 0)
4447 idx += 9; /* 5GHz */
4448 if (ridx == IWN_RIDX_MAX)
4449 idx += 5; /* CCK */
4450
4451 /* Make sure idx stays in a valid range. */
4452 if (idx < 0)
4453 idx = 0;
4454 else if (idx > IWN4965_MAX_PWR_INDEX)
4455 idx = IWN4965_MAX_PWR_INDEX;
4456
4457 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
4458 "%s: Tx chain %d, rate idx %d: power=%d\n",
4459 __func__, c, ridx, idx);
4460 cmd.power[ridx].rf_gain[c] = rf_gain[idx];
4461 cmd.power[ridx].dsp_gain[c] = dsp_gain[idx];
4462 }
4463 }
4464
4465 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
4466 "%s: set tx power for chan %d\n", __func__, chan);
4467 return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async);
4468
4469#undef interpolate
4470#undef fdivround
4471}
4472
4473static int
4474iwn5000_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch,
4475 int async)
4476{
4477 struct iwn5000_cmd_txpower cmd;
4478
4479 /*
4480 * TX power calibration is handled automatically by the firmware
4481 * for 5000 Series.
4482 */
4483 memset(&cmd, 0, sizeof cmd);
4484 cmd.global_limit = 2 * IWN5000_TXPOWER_MAX_DBM; /* 16 dBm */
4485 cmd.flags = IWN5000_TXPOWER_NO_CLOSED;
4486 cmd.srv_limit = IWN5000_TXPOWER_AUTO;
4487 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: setting TX power\n", __func__);
4488 return iwn_cmd(sc, IWN_CMD_TXPOWER_DBM, &cmd, sizeof cmd, async);
4489}
4490
4491/*
4492 * Retrieve the maximum RSSI (in dBm) among receivers.
4493 */
4494static int
4495iwn4965_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat)
4496{
4497 struct iwn4965_rx_phystat *phy = (void *)stat->phybuf;
4498 uint8_t mask, agc;
4499 int rssi;
4500
4501 mask = (le16toh(phy->antenna) >> 4) & IWN_ANT_ABC;
4502 agc = (le16toh(phy->agc) >> 7) & 0x7f;
4503
4504 rssi = 0;
4505 if (mask & IWN_ANT_A)
4506 rssi = MAX(rssi, phy->rssi[0]);
4507 if (mask & IWN_ANT_B)
4508 rssi = MAX(rssi, phy->rssi[2]);
4509 if (mask & IWN_ANT_C)
4510 rssi = MAX(rssi, phy->rssi[4]);
4511
4512 DPRINTF(sc, IWN_DEBUG_RECV,
4513 "%s: agc %d mask 0x%x rssi %d %d %d result %d\n", __func__, agc,
4514 mask, phy->rssi[0], phy->rssi[2], phy->rssi[4],
4515 rssi - agc - IWN_RSSI_TO_DBM);
4516 return rssi - agc - IWN_RSSI_TO_DBM;
4517}
4518
4519static int
4520iwn5000_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat)
4521{
4522 struct iwn5000_rx_phystat *phy = (void *)stat->phybuf;
4523 uint8_t agc;
4524 int rssi;
4525
4526 agc = (le32toh(phy->agc) >> 9) & 0x7f;
4527
4528 rssi = MAX(le16toh(phy->rssi[0]) & 0xff,
4529 le16toh(phy->rssi[1]) & 0xff);
4530 rssi = MAX(le16toh(phy->rssi[2]) & 0xff, rssi);
4531
4532 DPRINTF(sc, IWN_DEBUG_RECV,
4533 "%s: agc %d rssi %d %d %d result %d\n", __func__, agc,
4534 phy->rssi[0], phy->rssi[1], phy->rssi[2],
4535 rssi - agc - IWN_RSSI_TO_DBM);
4536 return rssi - agc - IWN_RSSI_TO_DBM;
4537}
4538
4539/*
4540 * Retrieve the average noise (in dBm) among receivers.
4541 */
4542static int
4543iwn_get_noise(const struct iwn_rx_general_stats *stats)
4544{
4545 int i, total, nbant, noise;
4546
4547 total = nbant = 0;
4548 for (i = 0; i < 3; i++) {
4549 if ((noise = le32toh(stats->noise[i]) & 0xff) == 0)
4550 continue;
4551 total += noise;
4552 nbant++;
4553 }
4554 /* There should be at least one antenna but check anyway. */
4555 return (nbant == 0) ? -127 : (total / nbant) - 107;
4556}
4557
4558/*
4559 * Compute temperature (in degC) from last received statistics.
4560 */
4561static int
4562iwn4965_get_temperature(struct iwn_softc *sc)
4563{
4564 struct iwn_ucode_info *uc = &sc->ucode_info;
4565 int32_t r1, r2, r3, r4, temp;
4566
4567 r1 = le32toh(uc->temp[0].chan20MHz);
4568 r2 = le32toh(uc->temp[1].chan20MHz);
4569 r3 = le32toh(uc->temp[2].chan20MHz);
4570 r4 = le32toh(sc->rawtemp);
4571
4572 if (r1 == r3) /* Prevents division by 0 (should not happen). */
4573 return 0;
4574
4575 /* Sign-extend 23-bit R4 value to 32-bit. */
4576 r4 = ((r4 & 0xffffff) ^ 0x800000) - 0x800000;
4577 /* Compute temperature in Kelvin. */
4578 temp = (259 * (r4 - r2)) / (r3 - r1);
4579 temp = (temp * 97) / 100 + 8;
4580
4581 DPRINTF(sc, IWN_DEBUG_ANY, "temperature %dK/%dC\n", temp,
4582 IWN_KTOC(temp));
4583 return IWN_KTOC(temp);
4584}
4585
4586static int
4587iwn5000_get_temperature(struct iwn_softc *sc)
4588{
4589 int32_t temp;
4590
4591 /*
4592 * Temperature is not used by the driver for 5000 Series because
4593 * TX power calibration is handled by firmware.
4594 */
4595 temp = le32toh(sc->rawtemp);
4596 if (sc->hw_type == IWN_HW_REV_TYPE_5150) {
4597 temp = (temp / -5) + sc->temp_off;
4598 temp = IWN_KTOC(temp);
4599 }
4600 return temp;
4601}
4602
4603/*
4604 * Initialize sensitivity calibration state machine.
4605 */
4606static int
4607iwn_init_sensitivity(struct iwn_softc *sc)
4608{
4609 struct iwn_ops *ops = &sc->ops;
4610 struct iwn_calib_state *calib = &sc->calib;
4611 uint32_t flags;
4612 int error;
4613
4614 /* Reset calibration state machine. */
4615 memset(calib, 0, sizeof (*calib));
4616 calib->state = IWN_CALIB_STATE_INIT;
4617 calib->cck_state = IWN_CCK_STATE_HIFA;
4618 /* Set initial correlation values. */
4619 calib->ofdm_x1 = sc->limits->min_ofdm_x1;
4620 calib->ofdm_mrc_x1 = sc->limits->min_ofdm_mrc_x1;
4621 calib->ofdm_x4 = sc->limits->min_ofdm_x4;
4622 calib->ofdm_mrc_x4 = sc->limits->min_ofdm_mrc_x4;
4623 calib->cck_x4 = 125;
4624 calib->cck_mrc_x4 = sc->limits->min_cck_mrc_x4;
4625 calib->energy_cck = sc->limits->energy_cck;
4626
4627 /* Write initial sensitivity. */
4628 if ((error = iwn_send_sensitivity(sc)) != 0)
4629 return error;
4630
4631 /* Write initial gains. */
4632 if ((error = ops->init_gains(sc)) != 0)
4633 return error;
4634
4635 /* Request statistics at each beacon interval. */
4636 flags = 0;
4637 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending request for statistics\n",
4638 __func__);
4639 return iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1);
4640}
4641
4642/*
4643 * Collect noise and RSSI statistics for the first 20 beacons received
4644 * after association and use them to determine connected antennas and
4645 * to set differential gains.
4646 */
4647static void
4648iwn_collect_noise(struct iwn_softc *sc,
4649 const struct iwn_rx_general_stats *stats)
4650{
4651 struct iwn_ops *ops = &sc->ops;
4652 struct iwn_calib_state *calib = &sc->calib;
4653 struct ifnet *ifp = sc->sc_ifp;
4654 struct ieee80211com *ic = ifp->if_l2com;
4655 uint32_t val;
4656 int i;
4657
4658 /* Accumulate RSSI and noise for all 3 antennas. */
4659 for (i = 0; i < 3; i++) {
4660 calib->rssi[i] += le32toh(stats->rssi[i]) & 0xff;
4661 calib->noise[i] += le32toh(stats->noise[i]) & 0xff;
4662 }
4663 /* NB: We update differential gains only once after 20 beacons. */
4664 if (++calib->nbeacons < 20)
4665 return;
4666
4667 /* Determine highest average RSSI. */
4668 val = MAX(calib->rssi[0], calib->rssi[1]);
4669 val = MAX(calib->rssi[2], val);
4670
4671 /* Determine which antennas are connected. */
4672 sc->chainmask = sc->rxchainmask;
4673 for (i = 0; i < 3; i++)
4674 if (val - calib->rssi[i] > 15 * 20)
4675 sc->chainmask &= ~(1 << i);
4676 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4677 "%s: RX chains mask: theoretical=0x%x, actual=0x%x\n",
4678 __func__, sc->rxchainmask, sc->chainmask);
4679
4680 /* If none of the TX antennas are connected, keep at least one. */
4681 if ((sc->chainmask & sc->txchainmask) == 0)
4682 sc->chainmask |= IWN_LSB(sc->txchainmask);
4683
4684 (void)ops->set_gains(sc);
4685 calib->state = IWN_CALIB_STATE_RUN;
4686
4687#ifdef notyet
4688 /* XXX Disable RX chains with no antennas connected. */
4689 sc->rxon.rxchain = htole16(IWN_RXCHAIN_SEL(sc->chainmask));
4690 (void)iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1);
4691#endif
4692
4693 /* Enable power-saving mode if requested by user. */
4694 if (ic->ic_flags & IEEE80211_F_PMGTON)
4695 (void)iwn_set_pslevel(sc, 0, 3, 1);
4696}
4697
4698static int
4699iwn4965_init_gains(struct iwn_softc *sc)
4700{
4701 struct iwn_phy_calib_gain cmd;
4702
4703 memset(&cmd, 0, sizeof cmd);
4704 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN;
4705 /* Differential gains initially set to 0 for all 3 antennas. */
4706 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4707 "%s: setting initial differential gains\n", __func__);
4708 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4709}
4710
4711static int
4712iwn5000_init_gains(struct iwn_softc *sc)
4713{
4714 struct iwn_phy_calib cmd;
4715
4716 memset(&cmd, 0, sizeof cmd);
4717 cmd.code = sc->reset_noise_gain;
4718 cmd.ngroups = 1;
4719 cmd.isvalid = 1;
4720 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4721 "%s: setting initial differential gains\n", __func__);
4722 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4723}
4724
4725static int
4726iwn4965_set_gains(struct iwn_softc *sc)
4727{
4728 struct iwn_calib_state *calib = &sc->calib;
4729 struct iwn_phy_calib_gain cmd;
4730 int i, delta, noise;
4731
4732 /* Get minimal noise among connected antennas. */
4733 noise = INT_MAX; /* NB: There's at least one antenna. */
4734 for (i = 0; i < 3; i++)
4735 if (sc->chainmask & (1 << i))
4736 noise = MIN(calib->noise[i], noise);
4737
4738 memset(&cmd, 0, sizeof cmd);
4739 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN;
4740 /* Set differential gains for connected antennas. */
4741 for (i = 0; i < 3; i++) {
4742 if (sc->chainmask & (1 << i)) {
4743 /* Compute attenuation (in unit of 1.5dB). */
4744 delta = (noise - (int32_t)calib->noise[i]) / 30;
4745 /* NB: delta <= 0 */
4746 /* Limit to [-4.5dB,0]. */
4747 cmd.gain[i] = MIN(abs(delta), 3);
4748 if (delta < 0)
4749 cmd.gain[i] |= 1 << 2; /* sign bit */
4750 }
4751 }
4752 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4753 "setting differential gains Ant A/B/C: %x/%x/%x (%x)\n",
4754 cmd.gain[0], cmd.gain[1], cmd.gain[2], sc->chainmask);
4755 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4756}
4757
4758static int
4759iwn5000_set_gains(struct iwn_softc *sc)
4760{
4761 struct iwn_calib_state *calib = &sc->calib;
4762 struct iwn_phy_calib_gain cmd;
4763 int i, ant, div, delta;
4764
4765 /* We collected 20 beacons and !=6050 need a 1.5 factor. */
4766 div = (sc->hw_type == IWN_HW_REV_TYPE_6050) ? 20 : 30;
4767
4768 memset(&cmd, 0, sizeof cmd);
4769 cmd.code = sc->noise_gain;
4770 cmd.ngroups = 1;
4771 cmd.isvalid = 1;
4772 /* Get first available RX antenna as referential. */
4773 ant = IWN_LSB(sc->rxchainmask);
4774 /* Set differential gains for other antennas. */
4775 for (i = ant + 1; i < 3; i++) {
4776 if (sc->chainmask & (1 << i)) {
4777 /* The delta is relative to antenna "ant". */
4778 delta = ((int32_t)calib->noise[ant] -
4779 (int32_t)calib->noise[i]) / div;
4780 /* Limit to [-4.5dB,+4.5dB]. */
4781 cmd.gain[i - 1] = MIN(abs(delta), 3);
4782 if (delta < 0)
4783 cmd.gain[i - 1] |= 1 << 2; /* sign bit */
4784 }
4785 }
4786 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4787 "setting differential gains Ant B/C: %x/%x (%x)\n",
4788 cmd.gain[0], cmd.gain[1], sc->chainmask);
4789 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4790}
4791
4792/*
4793 * Tune RF RX sensitivity based on the number of false alarms detected
4794 * during the last beacon period.
4795 */
4796static void
4797iwn_tune_sensitivity(struct iwn_softc *sc, const struct iwn_rx_stats *stats)
4798{
4799#define inc(val, inc, max) \
4800 if ((val) < (max)) { \
4801 if ((val) < (max) - (inc)) \
4802 (val) += (inc); \
4803 else \
4804 (val) = (max); \
4805 needs_update = 1; \
4806 }
4807#define dec(val, dec, min) \
4808 if ((val) > (min)) { \
4809 if ((val) > (min) + (dec)) \
4810 (val) -= (dec); \
4811 else \
4812 (val) = (min); \
4813 needs_update = 1; \
4814 }
4815
4816 const struct iwn_sensitivity_limits *limits = sc->limits;
4817 struct iwn_calib_state *calib = &sc->calib;
4818 uint32_t val, rxena, fa;
4819 uint32_t energy[3], energy_min;
4820 uint8_t noise[3], noise_ref;
4821 int i, needs_update = 0;
4822
4823 /* Check that we've been enabled long enough. */
4824 if ((rxena = le32toh(stats->general.load)) == 0)
4825 return;
4826
4827 /* Compute number of false alarms since last call for OFDM. */
4828 fa = le32toh(stats->ofdm.bad_plcp) - calib->bad_plcp_ofdm;
4829 fa += le32toh(stats->ofdm.fa) - calib->fa_ofdm;
4830 fa *= 200 * IEEE80211_DUR_TU; /* 200TU */
4831
4832 /* Save counters values for next call. */
4833 calib->bad_plcp_ofdm = le32toh(stats->ofdm.bad_plcp);
4834 calib->fa_ofdm = le32toh(stats->ofdm.fa);
4835
4836 if (fa > 50 * rxena) {
4837 /* High false alarm count, decrease sensitivity. */
4838 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4839 "%s: OFDM high false alarm count: %u\n", __func__, fa);
4840 inc(calib->ofdm_x1, 1, limits->max_ofdm_x1);
4841 inc(calib->ofdm_mrc_x1, 1, limits->max_ofdm_mrc_x1);
4842 inc(calib->ofdm_x4, 1, limits->max_ofdm_x4);
4843 inc(calib->ofdm_mrc_x4, 1, limits->max_ofdm_mrc_x4);
4844
4845 } else if (fa < 5 * rxena) {
4846 /* Low false alarm count, increase sensitivity. */
4847 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4848 "%s: OFDM low false alarm count: %u\n", __func__, fa);
4849 dec(calib->ofdm_x1, 1, limits->min_ofdm_x1);
4850 dec(calib->ofdm_mrc_x1, 1, limits->min_ofdm_mrc_x1);
4851 dec(calib->ofdm_x4, 1, limits->min_ofdm_x4);
4852 dec(calib->ofdm_mrc_x4, 1, limits->min_ofdm_mrc_x4);
4853 }
4854
4855 /* Compute maximum noise among 3 receivers. */
4856 for (i = 0; i < 3; i++)
4857 noise[i] = (le32toh(stats->general.noise[i]) >> 8) & 0xff;
4858 val = MAX(noise[0], noise[1]);
4859 val = MAX(noise[2], val);
4860 /* Insert it into our samples table. */
4861 calib->noise_samples[calib->cur_noise_sample] = val;
4862 calib->cur_noise_sample = (calib->cur_noise_sample + 1) % 20;
4863
4864 /* Compute maximum noise among last 20 samples. */
4865 noise_ref = calib->noise_samples[0];
4866 for (i = 1; i < 20; i++)
4867 noise_ref = MAX(noise_ref, calib->noise_samples[i]);
4868
4869 /* Compute maximum energy among 3 receivers. */
4870 for (i = 0; i < 3; i++)
4871 energy[i] = le32toh(stats->general.energy[i]);
4872 val = MIN(energy[0], energy[1]);
4873 val = MIN(energy[2], val);
4874 /* Insert it into our samples table. */
4875 calib->energy_samples[calib->cur_energy_sample] = val;
4876 calib->cur_energy_sample = (calib->cur_energy_sample + 1) % 10;
4877
4878 /* Compute minimum energy among last 10 samples. */
4879 energy_min = calib->energy_samples[0];
4880 for (i = 1; i < 10; i++)
4881 energy_min = MAX(energy_min, calib->energy_samples[i]);
4882 energy_min += 6;
4883
4884 /* Compute number of false alarms since last call for CCK. */
4885 fa = le32toh(stats->cck.bad_plcp) - calib->bad_plcp_cck;
4886 fa += le32toh(stats->cck.fa) - calib->fa_cck;
4887 fa *= 200 * IEEE80211_DUR_TU; /* 200TU */
4888
4889 /* Save counters values for next call. */
4890 calib->bad_plcp_cck = le32toh(stats->cck.bad_plcp);
4891 calib->fa_cck = le32toh(stats->cck.fa);
4892
4893 if (fa > 50 * rxena) {
4894 /* High false alarm count, decrease sensitivity. */
4895 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4896 "%s: CCK high false alarm count: %u\n", __func__, fa);
4897 calib->cck_state = IWN_CCK_STATE_HIFA;
4898 calib->low_fa = 0;
4899
4900 if (calib->cck_x4 > 160) {
4901 calib->noise_ref = noise_ref;
4902 if (calib->energy_cck > 2)
4903 dec(calib->energy_cck, 2, energy_min);
4904 }
4905 if (calib->cck_x4 < 160) {
4906 calib->cck_x4 = 161;
4907 needs_update = 1;
4908 } else
4909 inc(calib->cck_x4, 3, limits->max_cck_x4);
4910
4911 inc(calib->cck_mrc_x4, 3, limits->max_cck_mrc_x4);
4912
4913 } else if (fa < 5 * rxena) {
4914 /* Low false alarm count, increase sensitivity. */
4915 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4916 "%s: CCK low false alarm count: %u\n", __func__, fa);
4917 calib->cck_state = IWN_CCK_STATE_LOFA;
4918 calib->low_fa++;
4919
4920 if (calib->cck_state != IWN_CCK_STATE_INIT &&
4921 (((int32_t)calib->noise_ref - (int32_t)noise_ref) > 2 ||
4922 calib->low_fa > 100)) {
4923 inc(calib->energy_cck, 2, limits->min_energy_cck);
4924 dec(calib->cck_x4, 3, limits->min_cck_x4);
4925 dec(calib->cck_mrc_x4, 3, limits->min_cck_mrc_x4);
4926 }
4927 } else {
4928 /* Not worth to increase or decrease sensitivity. */
4929 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4930 "%s: CCK normal false alarm count: %u\n", __func__, fa);
4931 calib->low_fa = 0;
4932 calib->noise_ref = noise_ref;
4933
4934 if (calib->cck_state == IWN_CCK_STATE_HIFA) {
4935 /* Previous interval had many false alarms. */
4936 dec(calib->energy_cck, 8, energy_min);
4937 }
4938 calib->cck_state = IWN_CCK_STATE_INIT;
4939 }
4940
4941 if (needs_update)
4942 (void)iwn_send_sensitivity(sc);
4943#undef dec
4944#undef inc
4945}
4946
4947static int
4948iwn_send_sensitivity(struct iwn_softc *sc)
4949{
4950 struct iwn_calib_state *calib = &sc->calib;
4951 struct iwn_enhanced_sensitivity_cmd cmd;
4952 int len;
4953
4954 memset(&cmd, 0, sizeof cmd);
4955 len = sizeof (struct iwn_sensitivity_cmd);
4956 cmd.which = IWN_SENSITIVITY_WORKTBL;
4957 /* OFDM modulation. */
4958 cmd.corr_ofdm_x1 = htole16(calib->ofdm_x1);
4959 cmd.corr_ofdm_mrc_x1 = htole16(calib->ofdm_mrc_x1);
4960 cmd.corr_ofdm_x4 = htole16(calib->ofdm_x4);
4961 cmd.corr_ofdm_mrc_x4 = htole16(calib->ofdm_mrc_x4);
4962 cmd.energy_ofdm = htole16(sc->limits->energy_ofdm);
4963 cmd.energy_ofdm_th = htole16(62);
4964 /* CCK modulation. */
4965 cmd.corr_cck_x4 = htole16(calib->cck_x4);
4966 cmd.corr_cck_mrc_x4 = htole16(calib->cck_mrc_x4);
4967 cmd.energy_cck = htole16(calib->energy_cck);
4968 /* Barker modulation: use default values. */
4969 cmd.corr_barker = htole16(190);
4970 cmd.corr_barker_mrc = htole16(390);
4971
4972 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4973 "%s: set sensitivity %d/%d/%d/%d/%d/%d/%d\n", __func__,
4974 calib->ofdm_x1, calib->ofdm_mrc_x1, calib->ofdm_x4,
4975 calib->ofdm_mrc_x4, calib->cck_x4,
4976 calib->cck_mrc_x4, calib->energy_cck);
4977
4978 if (!(sc->sc_flags & IWN_FLAG_ENH_SENS))
4979 goto send;
4980 /* Enhanced sensitivity settings. */
4981 len = sizeof (struct iwn_enhanced_sensitivity_cmd);
4982 cmd.ofdm_det_slope_mrc = htole16(668);
4983 cmd.ofdm_det_icept_mrc = htole16(4);
4984 cmd.ofdm_det_slope = htole16(486);
4985 cmd.ofdm_det_icept = htole16(37);
4986 cmd.cck_det_slope_mrc = htole16(853);
4987 cmd.cck_det_icept_mrc = htole16(4);
4988 cmd.cck_det_slope = htole16(476);
4989 cmd.cck_det_icept = htole16(99);
4990send:
4991 return iwn_cmd(sc, IWN_CMD_SET_SENSITIVITY, &cmd, len, 1);
4992}
4993
4994/*
4995 * Set STA mode power saving level (between 0 and 5).
4996 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving.
4997 */
4998static int
4999iwn_set_pslevel(struct iwn_softc *sc, int dtim, int level, int async)
5000{
5001 struct iwn_pmgt_cmd cmd;
5002 const struct iwn_pmgt *pmgt;
5003 uint32_t max, skip_dtim;
5004 uint32_t reg;
5005 int i;
5006
5007 DPRINTF(sc, IWN_DEBUG_PWRSAVE,
5008 "%s: dtim=%d, level=%d, async=%d\n",
5009 __func__,
5010 dtim,
5011 level,
5012 async);
5013
5014 /* Select which PS parameters to use. */
5015 if (dtim <= 2)
5016 pmgt = &iwn_pmgt[0][level];
5017 else if (dtim <= 10)
5018 pmgt = &iwn_pmgt[1][level];
5019 else
5020 pmgt = &iwn_pmgt[2][level];
5021
5022 memset(&cmd, 0, sizeof cmd);
5023 if (level != 0) /* not CAM */
5024 cmd.flags |= htole16(IWN_PS_ALLOW_SLEEP);
5025 if (level == 5)
5026 cmd.flags |= htole16(IWN_PS_FAST_PD);
5027 /* Retrieve PCIe Active State Power Management (ASPM). */
5028 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1);
5029 if (!(reg & 0x1)) /* L0s Entry disabled. */
5030 cmd.flags |= htole16(IWN_PS_PCI_PMGT);
5031 cmd.rxtimeout = htole32(pmgt->rxtimeout * 1024);
5032 cmd.txtimeout = htole32(pmgt->txtimeout * 1024);
5033
5034 if (dtim == 0) {
5035 dtim = 1;
5036 skip_dtim = 0;
5037 } else
5038 skip_dtim = pmgt->skip_dtim;
5039 if (skip_dtim != 0) {
5040 cmd.flags |= htole16(IWN_PS_SLEEP_OVER_DTIM);
5041 max = pmgt->intval[4];
5042 if (max == (uint32_t)-1)
5043 max = dtim * (skip_dtim + 1);
5044 else if (max > dtim)
5045 max = (max / dtim) * dtim;
5046 } else
5047 max = dtim;
5048 for (i = 0; i < 5; i++)
5049 cmd.intval[i] = htole32(MIN(max, pmgt->intval[i]));
5050
5051 DPRINTF(sc, IWN_DEBUG_RESET, "setting power saving level to %d\n",
5052 level);
5053 return iwn_cmd(sc, IWN_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async);
5054}
5055
5056static int
5057iwn_send_btcoex(struct iwn_softc *sc)
5058{
5059 struct iwn_bluetooth cmd;
5060
5061 memset(&cmd, 0, sizeof cmd);
5062 cmd.flags = IWN_BT_COEX_CHAN_ANN | IWN_BT_COEX_BT_PRIO;
5063 cmd.lead_time = IWN_BT_LEAD_TIME_DEF;
5064 cmd.max_kill = IWN_BT_MAX_KILL_DEF;
5065 DPRINTF(sc, IWN_DEBUG_RESET, "%s: configuring bluetooth coexistence\n",
5066 __func__);
5067 return iwn_cmd(sc, IWN_CMD_BT_COEX, &cmd, sizeof(cmd), 0);
5068}
5069
5070static int
5071iwn_send_advanced_btcoex(struct iwn_softc *sc)
5072{
5073 static const uint32_t btcoex_3wire[12] = {
5074 0xaaaaaaaa, 0xaaaaaaaa, 0xaeaaaaaa, 0xaaaaaaaa,
5075 0xcc00ff28, 0x0000aaaa, 0xcc00aaaa, 0x0000aaaa,
5076 0xc0004000, 0x00004000, 0xf0005000, 0xf0005000,
5077 };
5078 struct iwn6000_btcoex_config btconfig;
5079 struct iwn_btcoex_priotable btprio;
5080 struct iwn_btcoex_prot btprot;
5081 int error, i;
5082
5083 memset(&btconfig, 0, sizeof btconfig);
5084 btconfig.flags = 145;
5085 btconfig.max_kill = 5;
5086 btconfig.bt3_t7_timer = 1;
5087 btconfig.kill_ack = htole32(0xffff0000);
5088 btconfig.kill_cts = htole32(0xffff0000);
5089 btconfig.sample_time = 2;
5090 btconfig.bt3_t2_timer = 0xc;
5091 for (i = 0; i < 12; i++)
5092 btconfig.lookup_table[i] = htole32(btcoex_3wire[i]);
5093 btconfig.valid = htole16(0xff);
5094 btconfig.prio_boost = 0xf0;
5095 DPRINTF(sc, IWN_DEBUG_RESET,
5096 "%s: configuring advanced bluetooth coexistence\n", __func__);
5097 error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig, sizeof(btconfig), 1);
5098 if (error != 0)
5099 return error;
5100
5101 memset(&btprio, 0, sizeof btprio);
5102 btprio.calib_init1 = 0x6;
5103 btprio.calib_init2 = 0x7;
5104 btprio.calib_periodic_low1 = 0x2;
5105 btprio.calib_periodic_low2 = 0x3;
5106 btprio.calib_periodic_high1 = 0x4;
5107 btprio.calib_periodic_high2 = 0x5;
5108 btprio.dtim = 0x6;
5109 btprio.scan52 = 0x8;
5110 btprio.scan24 = 0xa;
5111 error = iwn_cmd(sc, IWN_CMD_BT_COEX_PRIOTABLE, &btprio, sizeof(btprio),
5112 1);
5113 if (error != 0)
5114 return error;
5115
5116 /* Force BT state machine change. */
5117 memset(&btprot, 0, sizeof btprio);
5118 btprot.open = 1;
5119 btprot.type = 1;
5120 error = iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1);
5121 if (error != 0)
5122 return error;
5123 btprot.open = 0;
5124 return iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1);
5125}
5126
5127static int
5128iwn5000_runtime_calib(struct iwn_softc *sc)
5129{
5130 struct iwn5000_calib_config cmd;
5131
5132 memset(&cmd, 0, sizeof cmd);
5133 cmd.ucode.once.enable = 0xffffffff;
5134 cmd.ucode.once.start = IWN5000_CALIB_DC;
5135 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5136 "%s: configuring runtime calibration\n", __func__);
5137 return iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof(cmd), 0);
5138}
5139
5140static int
5141iwn_config(struct iwn_softc *sc)
5142{
5143 struct iwn_ops *ops = &sc->ops;
5144 struct ifnet *ifp = sc->sc_ifp;
5145 struct ieee80211com *ic = ifp->if_l2com;
5146 uint32_t txmask;
5147 uint16_t rxchain;
5148 int error;
5149
5150 if (sc->hw_type == IWN_HW_REV_TYPE_6005) {
5151 /* Set radio temperature sensor offset. */
5152 error = iwn5000_temp_offset_calib(sc);
5153 if (error != 0) {
5154 device_printf(sc->sc_dev,
5155 "%s: could not set temperature offset\n", __func__);
5156 return error;
5157 }
5158 }
5159
5160 if (sc->hw_type == IWN_HW_REV_TYPE_6050) {
5161 /* Configure runtime DC calibration. */
5162 error = iwn5000_runtime_calib(sc);
5163 if (error != 0) {
5164 device_printf(sc->sc_dev,
5165 "%s: could not configure runtime calibration\n",
5166 __func__);
5167 return error;
5168 }
5169 }
5170
5171 /* Configure valid TX chains for >=5000 Series. */
5172 if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
5173 txmask = htole32(sc->txchainmask);
5174 DPRINTF(sc, IWN_DEBUG_RESET,
5175 "%s: configuring valid TX chains 0x%x\n", __func__, txmask);
5176 error = iwn_cmd(sc, IWN5000_CMD_TX_ANT_CONFIG, &txmask,
5177 sizeof txmask, 0);
5178 if (error != 0) {
5179 device_printf(sc->sc_dev,
5180 "%s: could not configure valid TX chains, "
5181 "error %d\n", __func__, error);
5182 return error;
5183 }
5184 }
5185
5186 /* Configure bluetooth coexistence. */
5187 if (sc->sc_flags & IWN_FLAG_ADV_BTCOEX)
5188 error = iwn_send_advanced_btcoex(sc);
5189 else
5190 error = iwn_send_btcoex(sc);
5191 if (error != 0) {
5192 device_printf(sc->sc_dev,
5193 "%s: could not configure bluetooth coexistence, error %d\n",
5194 __func__, error);
5195 return error;
5196 }
5197
5198 /* Set mode, channel, RX filter and enable RX. */
5199 memset(&sc->rxon, 0, sizeof (struct iwn_rxon));
5200 IEEE80211_ADDR_COPY(sc->rxon.myaddr, IF_LLADDR(ifp));
5201 IEEE80211_ADDR_COPY(sc->rxon.wlap, IF_LLADDR(ifp));
5202 sc->rxon.chan = ieee80211_chan2ieee(ic, ic->ic_curchan);
5203 sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
5204 if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan))
5205 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
5206 switch (ic->ic_opmode) {
5207 case IEEE80211_M_STA:
5208 sc->rxon.mode = IWN_MODE_STA;
5209 sc->rxon.filter = htole32(IWN_FILTER_MULTICAST);
5210 break;
5211 case IEEE80211_M_MONITOR:
5212 sc->rxon.mode = IWN_MODE_MONITOR;
5213 sc->rxon.filter = htole32(IWN_FILTER_MULTICAST |
5214 IWN_FILTER_CTL | IWN_FILTER_PROMISC);
5215 break;
5216 default:
5217 /* Should not get there. */
5218 break;
5219 }
5220 sc->rxon.cck_mask = 0x0f; /* not yet negotiated */
5221 sc->rxon.ofdm_mask = 0xff; /* not yet negotiated */
5222 sc->rxon.ht_single_mask = 0xff;
5223 sc->rxon.ht_dual_mask = 0xff;
5224 sc->rxon.ht_triple_mask = 0xff;
5225 rxchain =
5226 IWN_RXCHAIN_VALID(sc->rxchainmask) |
5227 IWN_RXCHAIN_MIMO_COUNT(2) |
5228 IWN_RXCHAIN_IDLE_COUNT(2);
5229 sc->rxon.rxchain = htole16(rxchain);
5230 DPRINTF(sc, IWN_DEBUG_RESET, "%s: setting configuration\n", __func__);
5231 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 0);
5232 if (error != 0) {
5233 device_printf(sc->sc_dev, "%s: RXON command failed\n",
5234 __func__);
5235 return error;
5236 }
5237
5238 if ((error = iwn_add_broadcast_node(sc, 0)) != 0) {
5239 device_printf(sc->sc_dev, "%s: could not add broadcast node\n",
5240 __func__);
5241 return error;
5242 }
5243
5244 /* Configuration has changed, set TX power accordingly. */
5245 if ((error = ops->set_txpower(sc, ic->ic_curchan, 0)) != 0) {
5246 device_printf(sc->sc_dev, "%s: could not set TX power\n",
5247 __func__);
5248 return error;
5249 }
5250
5251 if ((error = iwn_set_critical_temp(sc)) != 0) {
5252 device_printf(sc->sc_dev,
5253 "%s: could not set critical temperature\n", __func__);
5254 return error;
5255 }
5256
5257 /* Set power saving level to CAM during initialization. */
5258 if ((error = iwn_set_pslevel(sc, 0, 0, 0)) != 0) {
5259 device_printf(sc->sc_dev,
5260 "%s: could not set power saving level\n", __func__);
5261 return error;
5262 }
5263 return 0;
5264}
5265
5266/*
5267 * Add an ssid element to a frame.
5268 */
5269static uint8_t *
5270ieee80211_add_ssid(uint8_t *frm, const uint8_t *ssid, u_int len)
5271{
5272 *frm++ = IEEE80211_ELEMID_SSID;
5273 *frm++ = len;
5274 memcpy(frm, ssid, len);
5275 return frm + len;
5276}
5277
5278static int
5279iwn_scan(struct iwn_softc *sc)
5280{
5281 struct ifnet *ifp = sc->sc_ifp;
5282 struct ieee80211com *ic = ifp->if_l2com;
5283 struct ieee80211_scan_state *ss = ic->ic_scan; /*XXX*/
5284 struct ieee80211_node *ni = ss->ss_vap->iv_bss;
5285 struct iwn_scan_hdr *hdr;
5286 struct iwn_cmd_data *tx;
5287 struct iwn_scan_essid *essid;
5288 struct iwn_scan_chan *chan;
5289 struct ieee80211_frame *wh;
5290 struct ieee80211_rateset *rs;
5291 struct ieee80211_channel *c;
5292 uint8_t *buf, *frm;
5293 uint16_t rxchain;
5294 uint8_t txant;
5295 int buflen, error;
5296
5297 buf = malloc(IWN_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO);
5298 if (buf == NULL) {
5299 device_printf(sc->sc_dev,
5300 "%s: could not allocate buffer for scan command\n",
5301 __func__);
5302 return ENOMEM;
5303 }
5304 hdr = (struct iwn_scan_hdr *)buf;
5305 /*
5306 * Move to the next channel if no frames are received within 10ms
5307 * after sending the probe request.
5308 */
5309 hdr->quiet_time = htole16(10); /* timeout in milliseconds */
5310 hdr->quiet_threshold = htole16(1); /* min # of packets */
5311
5312 /* Select antennas for scanning. */
5313 rxchain =
5314 IWN_RXCHAIN_VALID(sc->rxchainmask) |
5315 IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask) |
5316 IWN_RXCHAIN_DRIVER_FORCE;
5317 if (IEEE80211_IS_CHAN_A(ic->ic_curchan) &&
5318 sc->hw_type == IWN_HW_REV_TYPE_4965) {
5319 /* Ant A must be avoided in 5GHz because of an HW bug. */
5320 rxchain |= IWN_RXCHAIN_FORCE_SEL(IWN_ANT_B);
5321 } else /* Use all available RX antennas. */
5322 rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask);
5323 hdr->rxchain = htole16(rxchain);
5324 hdr->filter = htole32(IWN_FILTER_MULTICAST | IWN_FILTER_BEACON);
5325
5326 tx = (struct iwn_cmd_data *)(hdr + 1);
5327 tx->flags = htole32(IWN_TX_AUTO_SEQ);
5328 tx->id = sc->broadcast_id;
5329 tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
5330
5331 if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) {
5332 /* Send probe requests at 6Mbps. */
5333 tx->rate = htole32(0xd);
5334 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
5335 } else {
5336 hdr->flags = htole32(IWN_RXON_24GHZ | IWN_RXON_AUTO);
5337 if (sc->hw_type == IWN_HW_REV_TYPE_4965 &&
5338 sc->rxon.associd && sc->rxon.chan > 14)
5339 tx->rate = htole32(0xd);
5340 else {
5341 /* Send probe requests at 1Mbps. */
5342 tx->rate = htole32(10 | IWN_RFLAG_CCK);
5343 }
5344 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
5345 }
5346 /* Use the first valid TX antenna. */
5347 txant = IWN_LSB(sc->txchainmask);
5348 tx->rate |= htole32(IWN_RFLAG_ANT(txant));
5349
5350 essid = (struct iwn_scan_essid *)(tx + 1);
5351 if (ss->ss_ssid[0].len != 0) {
5352 essid[0].id = IEEE80211_ELEMID_SSID;
5353 essid[0].len = ss->ss_ssid[0].len;
5354 memcpy(essid[0].data, ss->ss_ssid[0].ssid, ss->ss_ssid[0].len);
5355 }
5356 /*
5357 * Build a probe request frame. Most of the following code is a
5358 * copy & paste of what is done in net80211.
5359 */
5360 wh = (struct ieee80211_frame *)(essid + 20);
5361 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
5362 IEEE80211_FC0_SUBTYPE_PROBE_REQ;
5363 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
5364 IEEE80211_ADDR_COPY(wh->i_addr1, ifp->if_broadcastaddr);
5365 IEEE80211_ADDR_COPY(wh->i_addr2, IF_LLADDR(ifp));
5366 IEEE80211_ADDR_COPY(wh->i_addr3, ifp->if_broadcastaddr);
5367 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */
5368 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */
5369
5370 frm = (uint8_t *)(wh + 1);
5371 frm = ieee80211_add_ssid(frm, NULL, 0);
5372 frm = ieee80211_add_rates(frm, rs);
5373 if (rs->rs_nrates > IEEE80211_RATE_SIZE)
5374 frm = ieee80211_add_xrates(frm, rs);
5375 if (ic->ic_htcaps & IEEE80211_HTC_HT)
5376 frm = ieee80211_add_htcap(frm, ni);
5377
5378 /* Set length of probe request. */
5379 tx->len = htole16(frm - (uint8_t *)wh);
5380
5381 c = ic->ic_curchan;
5382 chan = (struct iwn_scan_chan *)frm;
5383 chan->chan = htole16(ieee80211_chan2ieee(ic, c));
5384 chan->flags = 0;
5385 if (ss->ss_nssid > 0)
5386 chan->flags |= htole32(IWN_CHAN_NPBREQS(1));
5387 chan->dsp_gain = 0x6e;
5388 if (IEEE80211_IS_CHAN_5GHZ(c) &&
5389 !(c->ic_flags & IEEE80211_CHAN_PASSIVE)) {
5390 chan->rf_gain = 0x3b;
5391 chan->active = htole16(24);
5392 chan->passive = htole16(110);
5393 chan->flags |= htole32(IWN_CHAN_ACTIVE);
5394 } else if (IEEE80211_IS_CHAN_5GHZ(c)) {
5395 chan->rf_gain = 0x3b;
5396 chan->active = htole16(24);
5397 if (sc->rxon.associd)
5398 chan->passive = htole16(78);
5399 else
5400 chan->passive = htole16(110);
5401 hdr->crc_threshold = 0xffff;
5402 } else if (!(c->ic_flags & IEEE80211_CHAN_PASSIVE)) {
5403 chan->rf_gain = 0x28;
5404 chan->active = htole16(36);
5405 chan->passive = htole16(120);
5406 chan->flags |= htole32(IWN_CHAN_ACTIVE);
5407 } else {
5408 chan->rf_gain = 0x28;
5409 chan->active = htole16(36);
5410 if (sc->rxon.associd)
5411 chan->passive = htole16(88);
5412 else
5413 chan->passive = htole16(120);
5414 hdr->crc_threshold = 0xffff;
5415 }
5416
5417 DPRINTF(sc, IWN_DEBUG_STATE,
5418 "%s: chan %u flags 0x%x rf_gain 0x%x "
5419 "dsp_gain 0x%x active 0x%x passive 0x%x\n", __func__,
5420 chan->chan, chan->flags, chan->rf_gain, chan->dsp_gain,
5421 chan->active, chan->passive);
5422
5423 hdr->nchan++;
5424 chan++;
5425 buflen = (uint8_t *)chan - buf;
5426 hdr->len = htole16(buflen);
5427
5428 DPRINTF(sc, IWN_DEBUG_STATE, "sending scan command nchan=%d\n",
5429 hdr->nchan);
5430 error = iwn_cmd(sc, IWN_CMD_SCAN, buf, buflen, 1);
5431 free(buf, M_DEVBUF);
5432 return error;
5433}
5434
5435static int
5436iwn_auth(struct iwn_softc *sc, struct ieee80211vap *vap)
5437{
5438 struct iwn_ops *ops = &sc->ops;
5439 struct ifnet *ifp = sc->sc_ifp;
5440 struct ieee80211com *ic = ifp->if_l2com;
5441 struct ieee80211_node *ni = vap->iv_bss;
5442 int error;
5443
5444 /* Update adapter configuration. */
5445 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid);
5446 sc->rxon.chan = ieee80211_chan2ieee(ic, ni->ni_chan);
5447 sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
5448 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
5449 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
5450 if (ic->ic_flags & IEEE80211_F_SHSLOT)
5451 sc->rxon.flags |= htole32(IWN_RXON_SHSLOT);
5452 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
5453 sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE);
5454 if (IEEE80211_IS_CHAN_A(ni->ni_chan)) {
5455 sc->rxon.cck_mask = 0;
5456 sc->rxon.ofdm_mask = 0x15;
5457 } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) {
5458 sc->rxon.cck_mask = 0x03;
5459 sc->rxon.ofdm_mask = 0;
5460 } else {
5461 /* Assume 802.11b/g. */
5462 sc->rxon.cck_mask = 0x0f;
5463 sc->rxon.ofdm_mask = 0x15;
5464 }
5465 DPRINTF(sc, IWN_DEBUG_STATE, "rxon chan %d flags %x cck %x ofdm %x\n",
5466 sc->rxon.chan, sc->rxon.flags, sc->rxon.cck_mask,
5467 sc->rxon.ofdm_mask);
5468 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1);
5469 if (error != 0) {
5470 device_printf(sc->sc_dev, "%s: RXON command failed, error %d\n",
5471 __func__, error);
5472 return error;
5473 }
5474
5475 /* Configuration has changed, set TX power accordingly. */
5476 if ((error = ops->set_txpower(sc, ni->ni_chan, 1)) != 0) {
5477 device_printf(sc->sc_dev,
5478 "%s: could not set TX power, error %d\n", __func__, error);
5479 return error;
5480 }
5481 /*
5482 * Reconfiguring RXON clears the firmware nodes table so we must
5483 * add the broadcast node again.
5484 */
5485 if ((error = iwn_add_broadcast_node(sc, 1)) != 0) {
5486 device_printf(sc->sc_dev,
5487 "%s: could not add broadcast node, error %d\n", __func__,
5488 error);
5489 return error;
5490 }
5491 return 0;
5492}
5493
5494static int
5495iwn_run(struct iwn_softc *sc, struct ieee80211vap *vap)
5496{
5497 struct iwn_ops *ops = &sc->ops;
5498 struct ifnet *ifp = sc->sc_ifp;
5499 struct ieee80211com *ic = ifp->if_l2com;
5500 struct ieee80211_node *ni = vap->iv_bss;
5501 struct iwn_node_info node;
5502 uint32_t htflags = 0;
5503 int error;
5504
5505 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5506 /* Link LED blinks while monitoring. */
5507 iwn_set_led(sc, IWN_LED_LINK, 5, 5);
5508 return 0;
5509 }
5510 if ((error = iwn_set_timing(sc, ni)) != 0) {
5511 device_printf(sc->sc_dev,
5512 "%s: could not set timing, error %d\n", __func__, error);
5513 return error;
5514 }
5515
5516 /* Update adapter configuration. */
5517 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid);
5518 sc->rxon.associd = htole16(IEEE80211_AID(ni->ni_associd));
5519 sc->rxon.chan = ieee80211_chan2ieee(ic, ni->ni_chan);
5520 sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
5521 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
5522 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
5523 if (ic->ic_flags & IEEE80211_F_SHSLOT)
5524 sc->rxon.flags |= htole32(IWN_RXON_SHSLOT);
5525 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
5526 sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE);
5527 if (IEEE80211_IS_CHAN_A(ni->ni_chan)) {
5528 sc->rxon.cck_mask = 0;
5529 sc->rxon.ofdm_mask = 0x15;
5530 } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) {
5531 sc->rxon.cck_mask = 0x03;
5532 sc->rxon.ofdm_mask = 0;
5533 } else {
5534 /* Assume 802.11b/g. */
5535 sc->rxon.cck_mask = 0x0f;
5536 sc->rxon.ofdm_mask = 0x15;
5537 }
5538 if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) {
5539 htflags |= IWN_RXON_HT_PROTMODE(ic->ic_curhtprotmode);
5540 if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
5541 switch (ic->ic_curhtprotmode) {
5542 case IEEE80211_HTINFO_OPMODE_HT20PR:
5543 htflags |= IWN_RXON_HT_MODEPURE40;
5544 break;
5545 default:
5546 htflags |= IWN_RXON_HT_MODEMIXED;
5547 break;
5548 }
5549 }
5550 if (IEEE80211_IS_CHAN_HT40D(ni->ni_chan))
5551 htflags |= IWN_RXON_HT_HT40MINUS;
5552 }
5553 sc->rxon.flags |= htole32(htflags);
5554 sc->rxon.filter |= htole32(IWN_FILTER_BSS);
5555 DPRINTF(sc, IWN_DEBUG_STATE, "rxon chan %d flags %x\n",
5556 sc->rxon.chan, sc->rxon.flags);
5557 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1);
5558 if (error != 0) {
5559 device_printf(sc->sc_dev,
5560 "%s: could not update configuration, error %d\n", __func__,
5561 error);
5562 return error;
5563 }
5564
5565 /* Configuration has changed, set TX power accordingly. */
5566 if ((error = ops->set_txpower(sc, ni->ni_chan, 1)) != 0) {
5567 device_printf(sc->sc_dev,
5568 "%s: could not set TX power, error %d\n", __func__, error);
5569 return error;
5570 }
5571
5572 /* Fake a join to initialize the TX rate. */
5573 ((struct iwn_node *)ni)->id = IWN_ID_BSS;
5574 iwn_newassoc(ni, 1);
5575
5576 /* Add BSS node. */
5577 memset(&node, 0, sizeof node);
5578 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr);
5579 node.id = IWN_ID_BSS;
5580 if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) {
5581 switch (ni->ni_htcap & IEEE80211_HTCAP_SMPS) {
5582 case IEEE80211_HTCAP_SMPS_ENA:
5583 node.htflags |= htole32(IWN_SMPS_MIMO_DIS);
5584 break;
5585 case IEEE80211_HTCAP_SMPS_DYNAMIC:
5586 node.htflags |= htole32(IWN_SMPS_MIMO_PROT);
5587 break;
5588 }
5589 node.htflags |= htole32(IWN_AMDPU_SIZE_FACTOR(3) |
5590 IWN_AMDPU_DENSITY(5)); /* 4us */
5591 if (IEEE80211_IS_CHAN_HT40(ni->ni_chan))
5592 node.htflags |= htole32(IWN_NODE_HT40);
5593 }
5594 DPRINTF(sc, IWN_DEBUG_STATE, "%s: adding BSS node\n", __func__);
5595 error = ops->add_node(sc, &node, 1);
5596 if (error != 0) {
5597 device_printf(sc->sc_dev,
5598 "%s: could not add BSS node, error %d\n", __func__, error);
5599 return error;
5600 }
5601 DPRINTF(sc, IWN_DEBUG_STATE, "%s: setting link quality for node %d\n",
5602 __func__, node.id);
5603 if ((error = iwn_set_link_quality(sc, ni)) != 0) {
5604 device_printf(sc->sc_dev,
5605 "%s: could not setup link quality for node %d, error %d\n",
5606 __func__, node.id, error);
5607 return error;
5608 }
5609
5610 if ((error = iwn_init_sensitivity(sc)) != 0) {
5611 device_printf(sc->sc_dev,
5612 "%s: could not set sensitivity, error %d\n", __func__,
5613 error);
5614 return error;
5615 }
5616 /* Start periodic calibration timer. */
5617 sc->calib.state = IWN_CALIB_STATE_ASSOC;
5618 sc->calib_cnt = 0;
5619 callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout,
5620 sc);
5621
5622 /* Link LED always on while associated. */
5623 iwn_set_led(sc, IWN_LED_LINK, 0, 1);
5624 return 0;
5625}
5626
5627/*
5628 * This function is called by upper layer when an ADDBA request is received
5629 * from another STA and before the ADDBA response is sent.
5630 */
5631static int
5632iwn_ampdu_rx_start(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap,
5633 int baparamset, int batimeout, int baseqctl)
5634{
5635#define MS(_v, _f) (((_v) & _f) >> _f##_S)
5636 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc;
5637 struct iwn_ops *ops = &sc->ops;
5638 struct iwn_node *wn = (void *)ni;
5639 struct iwn_node_info node;
5640 uint16_t ssn;
5641 uint8_t tid;
5642 int error;
5643
5644 tid = MS(le16toh(baparamset), IEEE80211_BAPS_TID);
5645 ssn = MS(le16toh(baseqctl), IEEE80211_BASEQ_START);
5646
5647 memset(&node, 0, sizeof node);
5648 node.id = wn->id;
5649 node.control = IWN_NODE_UPDATE;
5650 node.flags = IWN_FLAG_SET_ADDBA;
5651 node.addba_tid = tid;
5652 node.addba_ssn = htole16(ssn);
5653 DPRINTF(sc, IWN_DEBUG_RECV, "ADDBA RA=%d TID=%d SSN=%d\n",
5654 wn->id, tid, ssn);
5655 error = ops->add_node(sc, &node, 1);
5656 if (error != 0)
5657 return error;
5658 return sc->sc_ampdu_rx_start(ni, rap, baparamset, batimeout, baseqctl);
5659#undef MS
5660}
5661
5662/*
5663 * This function is called by upper layer on teardown of an HT-immediate
5664 * Block Ack agreement (eg. uppon receipt of a DELBA frame).
5665 */
5666static void
5667iwn_ampdu_rx_stop(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap)
5668{
5669 struct ieee80211com *ic = ni->ni_ic;
5670 struct iwn_softc *sc = ic->ic_ifp->if_softc;
5671 struct iwn_ops *ops = &sc->ops;
5672 struct iwn_node *wn = (void *)ni;
5673 struct iwn_node_info node;
5674 uint8_t tid;
5675
5676 /* XXX: tid as an argument */
5677 for (tid = 0; tid < WME_NUM_TID; tid++) {
5678 if (&ni->ni_rx_ampdu[tid] == rap)
5679 break;
5680 }
5681
5682 memset(&node, 0, sizeof node);
5683 node.id = wn->id;
5684 node.control = IWN_NODE_UPDATE;
5685 node.flags = IWN_FLAG_SET_DELBA;
5686 node.delba_tid = tid;
5687 DPRINTF(sc, IWN_DEBUG_RECV, "DELBA RA=%d TID=%d\n", wn->id, tid);
5688 (void)ops->add_node(sc, &node, 1);
5689 sc->sc_ampdu_rx_stop(ni, rap);
5690}
5691
5692static int
5693iwn_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
5694 int dialogtoken, int baparamset, int batimeout)
5695{
5696 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc;
5697 int qid;
5698
5699 for (qid = sc->firstaggqueue; qid < sc->ntxqs; qid++) {
5700 if (sc->qid2tap[qid] == NULL)
5701 break;
5702 }
5703 if (qid == sc->ntxqs) {
5704 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: not free aggregation queue\n",
5705 __func__);
5706 return 0;
5707 }
5708 tap->txa_private = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
5709 if (tap->txa_private == NULL) {
5710 device_printf(sc->sc_dev,
5711 "%s: failed to alloc TX aggregation structure\n", __func__);
5712 return 0;
5713 }
5714 sc->qid2tap[qid] = tap;
5715 *(int *)tap->txa_private = qid;
5716 return sc->sc_addba_request(ni, tap, dialogtoken, baparamset,
5717 batimeout);
5718}
5719
5720static int
5721iwn_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
5722 int code, int baparamset, int batimeout)
5723{
5724 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc;
5725 int qid = *(int *)tap->txa_private;
5726 uint8_t tid = tap->txa_tid;
5727 int ret;
5728
5729 if (code == IEEE80211_STATUS_SUCCESS) {
5730 ni->ni_txseqs[tid] = tap->txa_start & 0xfff;
5731 ret = iwn_ampdu_tx_start(ni->ni_ic, ni, tid);
5732 if (ret != 1)
5733 return ret;
5734 } else {
5735 sc->qid2tap[qid] = NULL;
5736 free(tap->txa_private, M_DEVBUF);
5737 tap->txa_private = NULL;
5738 }
5739 return sc->sc_addba_response(ni, tap, code, baparamset, batimeout);
5740}
5741
5742/*
5743 * This function is called by upper layer when an ADDBA response is received
5744 * from another STA.
5745 */
5746static int
5747iwn_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
5748 uint8_t tid)
5749{
5750 struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[tid];
5751 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc;
5752 struct iwn_ops *ops = &sc->ops;
5753 struct iwn_node *wn = (void *)ni;
5754 struct iwn_node_info node;
5755 int error, qid;
5756
5757 /* Enable TX for the specified RA/TID. */
5758 wn->disable_tid &= ~(1 << tid);
5759 memset(&node, 0, sizeof node);
5760 node.id = wn->id;
5761 node.control = IWN_NODE_UPDATE;
5762 node.flags = IWN_FLAG_SET_DISABLE_TID;
5763 node.disable_tid = htole16(wn->disable_tid);
5764 error = ops->add_node(sc, &node, 1);
5765 if (error != 0)
5766 return 0;
5767
5768 if ((error = iwn_nic_lock(sc)) != 0)
5769 return 0;
5770 qid = *(int *)tap->txa_private;
5771 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: ra=%d tid=%d ssn=%d qid=%d\n",
5772 __func__, wn->id, tid, tap->txa_start, qid);
5773 ops->ampdu_tx_start(sc, ni, qid, tid, tap->txa_start & 0xfff);
5774 iwn_nic_unlock(sc);
5775
5776 iwn_set_link_quality(sc, ni);
5777 return 1;
5778}
5779
5780static void
5781iwn_ampdu_tx_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
5782{
5783 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc;
5784 struct iwn_ops *ops = &sc->ops;
5785 uint8_t tid = tap->txa_tid;
5786 int qid;
5787
5788 sc->sc_addba_stop(ni, tap);
5789
5790 if (tap->txa_private == NULL)
5791 return;
5792
5793 qid = *(int *)tap->txa_private;
5794 if (sc->txq[qid].queued != 0)
5795 return;
5796 if (iwn_nic_lock(sc) != 0)
5797 return;
5798 ops->ampdu_tx_stop(sc, qid, tid, tap->txa_start & 0xfff);
5799 iwn_nic_unlock(sc);
5800 sc->qid2tap[qid] = NULL;
5801 free(tap->txa_private, M_DEVBUF);
5802 tap->txa_private = NULL;
5803}
5804
5805static void
5806iwn4965_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni,
5807 int qid, uint8_t tid, uint16_t ssn)
5808{
5809 struct iwn_node *wn = (void *)ni;
5810
5811 /* Stop TX scheduler while we're changing its configuration. */
5812 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5813 IWN4965_TXQ_STATUS_CHGACT);
5814
5815 /* Assign RA/TID translation to the queue. */
5816 iwn_mem_write_2(sc, sc->sched_base + IWN4965_SCHED_TRANS_TBL(qid),
5817 wn->id << 4 | tid);
5818
5819 /* Enable chain-building mode for the queue. */
5820 iwn_prph_setbits(sc, IWN4965_SCHED_QCHAIN_SEL, 1 << qid);
5821
5822 /* Set starting sequence number from the ADDBA request. */
5823 sc->txq[qid].cur = sc->txq[qid].read = (ssn & 0xff);
5824 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
5825 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn);
5826
5827 /* Set scheduler window size. */
5828 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid),
5829 IWN_SCHED_WINSZ);
5830 /* Set scheduler frame limit. */
5831 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid) + 4,
5832 IWN_SCHED_LIMIT << 16);
5833
5834 /* Enable interrupts for the queue. */
5835 iwn_prph_setbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid);
5836
5837 /* Mark the queue as active. */
5838 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5839 IWN4965_TXQ_STATUS_ACTIVE | IWN4965_TXQ_STATUS_AGGR_ENA |
5840 iwn_tid2fifo[tid] << 1);
5841}
5842
5843static void
5844iwn4965_ampdu_tx_stop(struct iwn_softc *sc, int qid, uint8_t tid, uint16_t ssn)
5845{
5846 /* Stop TX scheduler while we're changing its configuration. */
5847 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5848 IWN4965_TXQ_STATUS_CHGACT);
5849
5850 /* Set starting sequence number from the ADDBA request. */
5851 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
5852 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn);
5853
5854 /* Disable interrupts for the queue. */
5855 iwn_prph_clrbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid);
5856
5857 /* Mark the queue as inactive. */
5858 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5859 IWN4965_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid] << 1);
5860}
5861
5862static void
5863iwn5000_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni,
5864 int qid, uint8_t tid, uint16_t ssn)
5865{
5866 struct iwn_node *wn = (void *)ni;
5867
5868 /* Stop TX scheduler while we're changing its configuration. */
5869 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
5870 IWN5000_TXQ_STATUS_CHGACT);
5871
5872 /* Assign RA/TID translation to the queue. */
5873 iwn_mem_write_2(sc, sc->sched_base + IWN5000_SCHED_TRANS_TBL(qid),
5874 wn->id << 4 | tid);
5875
5876 /* Enable chain-building mode for the queue. */
5877 iwn_prph_setbits(sc, IWN5000_SCHED_QCHAIN_SEL, 1 << qid);
5878
5879 /* Enable aggregation for the queue. */
5880 iwn_prph_setbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid);
5881
5882 /* Set starting sequence number from the ADDBA request. */
5883 sc->txq[qid].cur = sc->txq[qid].read = (ssn & 0xff);
5884 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
5885 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn);
5886
5887 /* Set scheduler window size and frame limit. */
5888 iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid) + 4,
5889 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ);
5890
5891 /* Enable interrupts for the queue. */
5892 iwn_prph_setbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid);
5893
5894 /* Mark the queue as active. */
5895 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
5896 IWN5000_TXQ_STATUS_ACTIVE | iwn_tid2fifo[tid]);
5897}
5898
5899static void
5900iwn5000_ampdu_tx_stop(struct iwn_softc *sc, int qid, uint8_t tid, uint16_t ssn)
5901{
5902 /* Stop TX scheduler while we're changing its configuration. */
5903 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
5904 IWN5000_TXQ_STATUS_CHGACT);
5905
5906 /* Disable aggregation for the queue. */
5907 iwn_prph_clrbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid);
5908
5909 /* Set starting sequence number from the ADDBA request. */
5910 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
5911 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn);
5912
5913 /* Disable interrupts for the queue. */
5914 iwn_prph_clrbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid);
5915
5916 /* Mark the queue as inactive. */
5917 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
5918 IWN5000_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid]);
5919}
5920
5921/*
5922 * Query calibration tables from the initialization firmware. We do this
5923 * only once at first boot. Called from a process context.
5924 */
5925static int
5926iwn5000_query_calibration(struct iwn_softc *sc)
5927{
5928 struct iwn5000_calib_config cmd;
5929 int error;
5930
5931 memset(&cmd, 0, sizeof cmd);
5932 cmd.ucode.once.enable = 0xffffffff;
5933 cmd.ucode.once.start = 0xffffffff;
5934 cmd.ucode.once.send = 0xffffffff;
5935 cmd.ucode.flags = 0xffffffff;
5936 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending calibration query\n",
5937 __func__);
5938 error = iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof cmd, 0);
5939 if (error != 0)
5940 return error;
5941
5942 /* Wait at most two seconds for calibration to complete. */
5943 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE))
5944 error = msleep(sc, &sc->sc_mtx, PCATCH, "iwncal", 2 * hz);
5945 return error;
5946}
5947
5948/*
5949 * Send calibration results to the runtime firmware. These results were
5950 * obtained on first boot from the initialization firmware.
5951 */
5952static int
5953iwn5000_send_calibration(struct iwn_softc *sc)
5954{
5955 int idx, error;
5956
5957 for (idx = 0; idx < 5; idx++) {
5958 if (sc->calibcmd[idx].buf == NULL)
5959 continue; /* No results available. */
5960 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5961 "send calibration result idx=%d len=%d\n", idx,
5962 sc->calibcmd[idx].len);
5963 error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, sc->calibcmd[idx].buf,
5964 sc->calibcmd[idx].len, 0);
5965 if (error != 0) {
5966 device_printf(sc->sc_dev,
5967 "%s: could not send calibration result, error %d\n",
5968 __func__, error);
5969 return error;
5970 }
5971 }
5972 return 0;
5973}
5974
5975static int
5976iwn5000_send_wimax_coex(struct iwn_softc *sc)
5977{
5978 struct iwn5000_wimax_coex wimax;
5979
5980#ifdef notyet
5981 if (sc->hw_type == IWN_HW_REV_TYPE_6050) {
5982 /* Enable WiMAX coexistence for combo adapters. */
5983 wimax.flags =
5984 IWN_WIMAX_COEX_ASSOC_WA_UNMASK |
5985 IWN_WIMAX_COEX_UNASSOC_WA_UNMASK |
5986 IWN_WIMAX_COEX_STA_TABLE_VALID |
5987 IWN_WIMAX_COEX_ENABLE;
5988 memcpy(wimax.events, iwn6050_wimax_events,
5989 sizeof iwn6050_wimax_events);
5990 } else
5991#endif
5992 {
5993 /* Disable WiMAX coexistence. */
5994 wimax.flags = 0;
5995 memset(wimax.events, 0, sizeof wimax.events);
5996 }
5997 DPRINTF(sc, IWN_DEBUG_RESET, "%s: Configuring WiMAX coexistence\n",
5998 __func__);
5999 return iwn_cmd(sc, IWN5000_CMD_WIMAX_COEX, &wimax, sizeof wimax, 0);
6000}
6001
6002static int
6003iwn5000_crystal_calib(struct iwn_softc *sc)
6004{
6005 struct iwn5000_phy_calib_crystal cmd;
6006
6007 memset(&cmd, 0, sizeof cmd);
6008 cmd.code = IWN5000_PHY_CALIB_CRYSTAL;
6009 cmd.ngroups = 1;
6010 cmd.isvalid = 1;
6011 cmd.cap_pin[0] = le32toh(sc->eeprom_crystal) & 0xff;
6012 cmd.cap_pin[1] = (le32toh(sc->eeprom_crystal) >> 16) & 0xff;
6013 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "sending crystal calibration %d, %d\n",
6014 cmd.cap_pin[0], cmd.cap_pin[1]);
6015 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0);
6016}
6017
6018static int
6019iwn5000_temp_offset_calib(struct iwn_softc *sc)
6020{
6021 struct iwn5000_phy_calib_temp_offset cmd;
6022
6023 memset(&cmd, 0, sizeof cmd);
6024 cmd.code = IWN5000_PHY_CALIB_TEMP_OFFSET;
6025 cmd.ngroups = 1;
6026 cmd.isvalid = 1;
6027 if (sc->eeprom_temp != 0)
6028 cmd.offset = htole16(sc->eeprom_temp);
6029 else
6030 cmd.offset = htole16(IWN_DEFAULT_TEMP_OFFSET);
6031 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "setting radio sensor offset to %d\n",
6032 le16toh(cmd.offset));
6033 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0);
6034}
6035
6036/*
6037 * This function is called after the runtime firmware notifies us of its
6038 * readiness (called in a process context).
6039 */
6040static int
6041iwn4965_post_alive(struct iwn_softc *sc)
6042{
6043 int error, qid;
6044
6045 if ((error = iwn_nic_lock(sc)) != 0)
6046 return error;
6047
6048 /* Clear TX scheduler state in SRAM. */
6049 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR);
6050 iwn_mem_set_region_4(sc, sc->sched_base + IWN4965_SCHED_CTX_OFF, 0,
6051 IWN4965_SCHED_CTX_LEN / sizeof (uint32_t));
6052
6053 /* Set physical address of TX scheduler rings (1KB aligned). */
6054 iwn_prph_write(sc, IWN4965_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10);
6055
6056 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY);
6057
6058 /* Disable chain mode for all our 16 queues. */
6059 iwn_prph_write(sc, IWN4965_SCHED_QCHAIN_SEL, 0);
6060
6061 for (qid = 0; qid < IWN4965_NTXQUEUES; qid++) {
6062 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), 0);
6063 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0);
6064
6065 /* Set scheduler window size. */
6066 iwn_mem_write(sc, sc->sched_base +
6067 IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ);
6068 /* Set scheduler frame limit. */
6069 iwn_mem_write(sc, sc->sched_base +
6070 IWN4965_SCHED_QUEUE_OFFSET(qid) + 4,
6071 IWN_SCHED_LIMIT << 16);
6072 }
6073
6074 /* Enable interrupts for all our 16 queues. */
6075 iwn_prph_write(sc, IWN4965_SCHED_INTR_MASK, 0xffff);
6076 /* Identify TX FIFO rings (0-7). */
6077 iwn_prph_write(sc, IWN4965_SCHED_TXFACT, 0xff);
6078
6079 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */
6080 for (qid = 0; qid < 7; qid++) {
6081 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 4, 5, 6 };
6082 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
6083 IWN4965_TXQ_STATUS_ACTIVE | qid2fifo[qid] << 1);
6084 }
6085 iwn_nic_unlock(sc);
6086 return 0;
6087}
6088
6089/*
6090 * This function is called after the initialization or runtime firmware
6091 * notifies us of its readiness (called in a process context).
6092 */
6093static int
6094iwn5000_post_alive(struct iwn_softc *sc)
6095{
6096 int error, qid;
6097
6098 /* Switch to using ICT interrupt mode. */
6099 iwn5000_ict_reset(sc);
6100
6101 if ((error = iwn_nic_lock(sc)) != 0)
6102 return error;
6103
6104 /* Clear TX scheduler state in SRAM. */
6105 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR);
6106 iwn_mem_set_region_4(sc, sc->sched_base + IWN5000_SCHED_CTX_OFF, 0,
6107 IWN5000_SCHED_CTX_LEN / sizeof (uint32_t));
6108
6109 /* Set physical address of TX scheduler rings (1KB aligned). */
6110 iwn_prph_write(sc, IWN5000_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10);
6111
6112 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY);
6113
6114 /* Enable chain mode for all queues, except command queue. */
6115 iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffef);
6116 iwn_prph_write(sc, IWN5000_SCHED_AGGR_SEL, 0);
6117
6118 for (qid = 0; qid < IWN5000_NTXQUEUES; qid++) {
6119 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), 0);
6120 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0);
6121
6122 iwn_mem_write(sc, sc->sched_base +
6123 IWN5000_SCHED_QUEUE_OFFSET(qid), 0);
6124 /* Set scheduler window size and frame limit. */
6125 iwn_mem_write(sc, sc->sched_base +
6126 IWN5000_SCHED_QUEUE_OFFSET(qid) + 4,
6127 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ);
6128 }
6129
6130 /* Enable interrupts for all our 20 queues. */
6131 iwn_prph_write(sc, IWN5000_SCHED_INTR_MASK, 0xfffff);
6132 /* Identify TX FIFO rings (0-7). */
6133 iwn_prph_write(sc, IWN5000_SCHED_TXFACT, 0xff);
6134
6135 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */
6136 for (qid = 0; qid < 7; qid++) {
6137 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 7, 5, 6 };
6138 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
6139 IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]);
6140 }
6141 iwn_nic_unlock(sc);
6142
6143 /* Configure WiMAX coexistence for combo adapters. */
6144 error = iwn5000_send_wimax_coex(sc);
6145 if (error != 0) {
6146 device_printf(sc->sc_dev,
6147 "%s: could not configure WiMAX coexistence, error %d\n",
6148 __func__, error);
6149 return error;
6150 }
6151 if (sc->hw_type != IWN_HW_REV_TYPE_5150) {
6152 /* Perform crystal calibration. */
6153 error = iwn5000_crystal_calib(sc);
6154 if (error != 0) {
6155 device_printf(sc->sc_dev,
6156 "%s: crystal calibration failed, error %d\n",
6157 __func__, error);
6158 return error;
6159 }
6160 }
6161 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) {
6162 /* Query calibration from the initialization firmware. */
6163 if ((error = iwn5000_query_calibration(sc)) != 0) {
6164 device_printf(sc->sc_dev,
6165 "%s: could not query calibration, error %d\n",
6166 __func__, error);
6167 return error;
6168 }
6169 /*
6170 * We have the calibration results now, reboot with the
6171 * runtime firmware (call ourselves recursively!)
6172 */
6173 iwn_hw_stop(sc);
6174 error = iwn_hw_init(sc);
6175 } else {
6176 /* Send calibration results to runtime firmware. */
6177 error = iwn5000_send_calibration(sc);
6178 }
6179 return error;
6180}
6181
6182/*
6183 * The firmware boot code is small and is intended to be copied directly into
6184 * the NIC internal memory (no DMA transfer).
6185 */
6186static int
6187iwn4965_load_bootcode(struct iwn_softc *sc, const uint8_t *ucode, int size)
6188{
6189 int error, ntries;
6190
6191 size /= sizeof (uint32_t);
6192
6193 if ((error = iwn_nic_lock(sc)) != 0)
6194 return error;
6195
6196 /* Copy microcode image into NIC memory. */
6197 iwn_prph_write_region_4(sc, IWN_BSM_SRAM_BASE,
6198 (const uint32_t *)ucode, size);
6199
6200 iwn_prph_write(sc, IWN_BSM_WR_MEM_SRC, 0);
6201 iwn_prph_write(sc, IWN_BSM_WR_MEM_DST, IWN_FW_TEXT_BASE);
6202 iwn_prph_write(sc, IWN_BSM_WR_DWCOUNT, size);
6203
6204 /* Start boot load now. */
6205 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START);
6206
6207 /* Wait for transfer to complete. */
6208 for (ntries = 0; ntries < 1000; ntries++) {
6209 if (!(iwn_prph_read(sc, IWN_BSM_WR_CTRL) &
6210 IWN_BSM_WR_CTRL_START))
6211 break;
6212 DELAY(10);
6213 }
6214 if (ntries == 1000) {
6215 device_printf(sc->sc_dev, "%s: could not load boot firmware\n",
6216 __func__);
6217 iwn_nic_unlock(sc);
6218 return ETIMEDOUT;
6219 }
6220
6221 /* Enable boot after power up. */
6222 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START_EN);
6223
6224 iwn_nic_unlock(sc);
6225 return 0;
6226}
6227
6228static int
6229iwn4965_load_firmware(struct iwn_softc *sc)
6230{
6231 struct iwn_fw_info *fw = &sc->fw;
6232 struct iwn_dma_info *dma = &sc->fw_dma;
6233 int error;
6234
6235 /* Copy initialization sections into pre-allocated DMA-safe memory. */
6236 memcpy(dma->vaddr, fw->init.data, fw->init.datasz);
6237 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
6238 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ,
6239 fw->init.text, fw->init.textsz);
6240 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
6241
6242 /* Tell adapter where to find initialization sections. */
6243 if ((error = iwn_nic_lock(sc)) != 0)
6244 return error;
6245 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4);
6246 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->init.datasz);
6247 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR,
6248 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4);
6249 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, fw->init.textsz);
6250 iwn_nic_unlock(sc);
6251
6252 /* Load firmware boot code. */
6253 error = iwn4965_load_bootcode(sc, fw->boot.text, fw->boot.textsz);
6254 if (error != 0) {
6255 device_printf(sc->sc_dev, "%s: could not load boot firmware\n",
6256 __func__);
6257 return error;
6258 }
6259 /* Now press "execute". */
6260 IWN_WRITE(sc, IWN_RESET, 0);
6261
6262 /* Wait at most one second for first alive notification. */
6263 if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz)) != 0) {
6264 device_printf(sc->sc_dev,
6265 "%s: timeout waiting for adapter to initialize, error %d\n",
6266 __func__, error);
6267 return error;
6268 }
6269
6270 /* Retrieve current temperature for initial TX power calibration. */
6271 sc->rawtemp = sc->ucode_info.temp[3].chan20MHz;
6272 sc->temp = iwn4965_get_temperature(sc);
6273
6274 /* Copy runtime sections into pre-allocated DMA-safe memory. */
6275 memcpy(dma->vaddr, fw->main.data, fw->main.datasz);
6276 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
6277 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ,
6278 fw->main.text, fw->main.textsz);
6279 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
6280
6281 /* Tell adapter where to find runtime sections. */
6282 if ((error = iwn_nic_lock(sc)) != 0)
6283 return error;
6284 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4);
6285 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->main.datasz);
6286 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR,
6287 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4);
6288 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE,
6289 IWN_FW_UPDATED | fw->main.textsz);
6290 iwn_nic_unlock(sc);
6291
6292 return 0;
6293}
6294
6295static int
6296iwn5000_load_firmware_section(struct iwn_softc *sc, uint32_t dst,
6297 const uint8_t *section, int size)
6298{
6299 struct iwn_dma_info *dma = &sc->fw_dma;
6300 int error;
6301
6302 /* Copy firmware section into pre-allocated DMA-safe memory. */
6303 memcpy(dma->vaddr, section, size);
6304 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
6305
6306 if ((error = iwn_nic_lock(sc)) != 0)
6307 return error;
6308
6309 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL),
6310 IWN_FH_TX_CONFIG_DMA_PAUSE);
6311
6312 IWN_WRITE(sc, IWN_FH_SRAM_ADDR(IWN_SRVC_DMACHNL), dst);
6313 IWN_WRITE(sc, IWN_FH_TFBD_CTRL0(IWN_SRVC_DMACHNL),
6314 IWN_LOADDR(dma->paddr));
6315 IWN_WRITE(sc, IWN_FH_TFBD_CTRL1(IWN_SRVC_DMACHNL),
6316 IWN_HIADDR(dma->paddr) << 28 | size);
6317 IWN_WRITE(sc, IWN_FH_TXBUF_STATUS(IWN_SRVC_DMACHNL),
6318 IWN_FH_TXBUF_STATUS_TBNUM(1) |
6319 IWN_FH_TXBUF_STATUS_TBIDX(1) |
6320 IWN_FH_TXBUF_STATUS_TFBD_VALID);
6321
6322 /* Kick Flow Handler to start DMA transfer. */
6323 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL),
6324 IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_CIRQ_HOST_ENDTFD);
6325
6326 iwn_nic_unlock(sc);
6327
6328 /* Wait at most five seconds for FH DMA transfer to complete. */
6329 return msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", 5 * hz);
6330}
6331
6332static int
6333iwn5000_load_firmware(struct iwn_softc *sc)
6334{
6335 struct iwn_fw_part *fw;
6336 int error;
6337
6338 /* Load the initialization firmware on first boot only. */
6339 fw = (sc->sc_flags & IWN_FLAG_CALIB_DONE) ?
6340 &sc->fw.main : &sc->fw.init;
6341
6342 error = iwn5000_load_firmware_section(sc, IWN_FW_TEXT_BASE,
6343 fw->text, fw->textsz);
6344 if (error != 0) {
6345 device_printf(sc->sc_dev,
6346 "%s: could not load firmware %s section, error %d\n",
6347 __func__, ".text", error);
6348 return error;
6349 }
6350 error = iwn5000_load_firmware_section(sc, IWN_FW_DATA_BASE,
6351 fw->data, fw->datasz);
6352 if (error != 0) {
6353 device_printf(sc->sc_dev,
6354 "%s: could not load firmware %s section, error %d\n",
6355 __func__, ".data", error);
6356 return error;
6357 }
6358
6359 /* Now press "execute". */
6360 IWN_WRITE(sc, IWN_RESET, 0);
6361 return 0;
6362}
6363
6364/*
6365 * Extract text and data sections from a legacy firmware image.
6366 */
6367static int
6368iwn_read_firmware_leg(struct iwn_softc *sc, struct iwn_fw_info *fw)
6369{
6370 const uint32_t *ptr;
6371 size_t hdrlen = 24;
6372 uint32_t rev;
6373
6374 ptr = (const uint32_t *)fw->data;
6375 rev = le32toh(*ptr++);
6376
6377 /* Check firmware API version. */
6378 if (IWN_FW_API(rev) <= 1) {
6379 device_printf(sc->sc_dev,
6380 "%s: bad firmware, need API version >=2\n", __func__);
6381 return EINVAL;
6382 }
6383 if (IWN_FW_API(rev) >= 3) {
6384 /* Skip build number (version 2 header). */
6385 hdrlen += 4;
6386 ptr++;
6387 }
6388 if (fw->size < hdrlen) {
6389 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n",
6390 __func__, fw->size);
6391 return EINVAL;
6392 }
6393 fw->main.textsz = le32toh(*ptr++);
6394 fw->main.datasz = le32toh(*ptr++);
6395 fw->init.textsz = le32toh(*ptr++);
6396 fw->init.datasz = le32toh(*ptr++);
6397 fw->boot.textsz = le32toh(*ptr++);
6398
6399 /* Check that all firmware sections fit. */
6400 if (fw->size < hdrlen + fw->main.textsz + fw->main.datasz +
6401 fw->init.textsz + fw->init.datasz + fw->boot.textsz) {
6402 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n",
6403 __func__, fw->size);
6404 return EINVAL;
6405 }
6406
6407 /* Get pointers to firmware sections. */
6408 fw->main.text = (const uint8_t *)ptr;
6409 fw->main.data = fw->main.text + fw->main.textsz;
6410 fw->init.text = fw->main.data + fw->main.datasz;
6411 fw->init.data = fw->init.text + fw->init.textsz;
6412 fw->boot.text = fw->init.data + fw->init.datasz;
6413 return 0;
6414}
6415
6416/*
6417 * Extract text and data sections from a TLV firmware image.
6418 */
6419static int
6420iwn_read_firmware_tlv(struct iwn_softc *sc, struct iwn_fw_info *fw,
6421 uint16_t alt)
6422{
6423 const struct iwn_fw_tlv_hdr *hdr;
6424 const struct iwn_fw_tlv *tlv;
6425 const uint8_t *ptr, *end;
6426 uint64_t altmask;
6427 uint32_t len, tmp;
6428
6429 if (fw->size < sizeof (*hdr)) {
6430 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n",
6431 __func__, fw->size);
6432 return EINVAL;
6433 }
6434 hdr = (const struct iwn_fw_tlv_hdr *)fw->data;
6435 if (hdr->signature != htole32(IWN_FW_SIGNATURE)) {
6436 device_printf(sc->sc_dev, "%s: bad firmware signature 0x%08x\n",
6437 __func__, le32toh(hdr->signature));
6438 return EINVAL;
6439 }
6440 DPRINTF(sc, IWN_DEBUG_RESET, "FW: \"%.64s\", build 0x%x\n", hdr->descr,
6441 le32toh(hdr->build));
6442
6443 /*
6444 * Select the closest supported alternative that is less than
6445 * or equal to the specified one.
6446 */
6447 altmask = le64toh(hdr->altmask);
6448 while (alt > 0 && !(altmask & (1ULL << alt)))
6449 alt--; /* Downgrade. */
6450 DPRINTF(sc, IWN_DEBUG_RESET, "using alternative %d\n", alt);
6451
6452 ptr = (const uint8_t *)(hdr + 1);
6453 end = (const uint8_t *)(fw->data + fw->size);
6454
6455 /* Parse type-length-value fields. */
6456 while (ptr + sizeof (*tlv) <= end) {
6457 tlv = (const struct iwn_fw_tlv *)ptr;
6458 len = le32toh(tlv->len);
6459
6460 ptr += sizeof (*tlv);
6461 if (ptr + len > end) {
6462 device_printf(sc->sc_dev,
6463 "%s: firmware too short: %zu bytes\n", __func__,
6464 fw->size);
6465 return EINVAL;
6466 }
6467 /* Skip other alternatives. */
6468 if (tlv->alt != 0 && tlv->alt != htole16(alt))
6469 goto next;
6470
6471 switch (le16toh(tlv->type)) {
6472 case IWN_FW_TLV_MAIN_TEXT:
6473 fw->main.text = ptr;
6474 fw->main.textsz = len;
6475 break;
6476 case IWN_FW_TLV_MAIN_DATA:
6477 fw->main.data = ptr;
6478 fw->main.datasz = len;
6479 break;
6480 case IWN_FW_TLV_INIT_TEXT:
6481 fw->init.text = ptr;
6482 fw->init.textsz = len;
6483 break;
6484 case IWN_FW_TLV_INIT_DATA:
6485 fw->init.data = ptr;
6486 fw->init.datasz = len;
6487 break;
6488 case IWN_FW_TLV_BOOT_TEXT:
6489 fw->boot.text = ptr;
6490 fw->boot.textsz = len;
6491 break;
6492 case IWN_FW_TLV_ENH_SENS:
6493 if (!len)
6494 sc->sc_flags |= IWN_FLAG_ENH_SENS;
6495 break;
6496 case IWN_FW_TLV_PHY_CALIB:
6497 tmp = htole32(*ptr);
6498 if (tmp < 253) {
6499 sc->reset_noise_gain = tmp;
6500 sc->noise_gain = tmp + 1;
6501 }
6502 break;
6503 default:
6504 DPRINTF(sc, IWN_DEBUG_RESET,
6505 "TLV type %d not handled\n", le16toh(tlv->type));
6506 break;
6507 }
6508 next: /* TLV fields are 32-bit aligned. */
6509 ptr += (len + 3) & ~3;
6510 }
6511 return 0;
6512}
6513
6514static int
6515iwn_read_firmware(struct iwn_softc *sc)
6516{
6517 struct iwn_fw_info *fw = &sc->fw;
6518 int error;
6519
6520 IWN_UNLOCK(sc);
6521
6522 memset(fw, 0, sizeof (*fw));
6523
6524 /* Read firmware image from filesystem. */
6525 sc->fw_fp = firmware_get(sc->fwname);
6526 if (sc->fw_fp == NULL) {
6527 device_printf(sc->sc_dev, "%s: could not read firmware %s\n",
6528 __func__, sc->fwname);
6529 IWN_LOCK(sc);
6530 return EINVAL;
6531 }
6532 IWN_LOCK(sc);
6533
6534 fw->size = sc->fw_fp->datasize;
6535 fw->data = (const uint8_t *)sc->fw_fp->data;
6536 if (fw->size < sizeof (uint32_t)) {
6537 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n",
6538 __func__, fw->size);
6539 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
6540 sc->fw_fp = NULL;
6541 return EINVAL;
6542 }
6543
6544 /* Retrieve text and data sections. */
6545 if (*(const uint32_t *)fw->data != 0) /* Legacy image. */
6546 error = iwn_read_firmware_leg(sc, fw);
6547 else
6548 error = iwn_read_firmware_tlv(sc, fw, 1);
6549 if (error != 0) {
6550 device_printf(sc->sc_dev,
6551 "%s: could not read firmware sections, error %d\n",
6552 __func__, error);
6553 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
6554 sc->fw_fp = NULL;
6555 return error;
6556 }
6557
6558 /* Make sure text and data sections fit in hardware memory. */
6559 if (fw->main.textsz > sc->fw_text_maxsz ||
6560 fw->main.datasz > sc->fw_data_maxsz ||
6561 fw->init.textsz > sc->fw_text_maxsz ||
6562 fw->init.datasz > sc->fw_data_maxsz ||
6563 fw->boot.textsz > IWN_FW_BOOT_TEXT_MAXSZ ||
6564 (fw->boot.textsz & 3) != 0) {
6565 device_printf(sc->sc_dev, "%s: firmware sections too large\n",
6566 __func__);
6567 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
6568 sc->fw_fp = NULL;
6569 return EINVAL;
6570 }
6571
6572 /* We can proceed with loading the firmware. */
6573 return 0;
6574}
6575
6576static int
6577iwn_clock_wait(struct iwn_softc *sc)
6578{
6579 int ntries;
6580
6581 /* Set "initialization complete" bit. */
6582 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE);
6583
6584 /* Wait for clock stabilization. */
6585 for (ntries = 0; ntries < 2500; ntries++) {
6586 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_MAC_CLOCK_READY)
6587 return 0;
6588 DELAY(10);
6589 }
6590 device_printf(sc->sc_dev,
6591 "%s: timeout waiting for clock stabilization\n", __func__);
6592 return ETIMEDOUT;
6593}
6594
6595static int
6596iwn_apm_init(struct iwn_softc *sc)
6597{
6598 uint32_t reg;
6599 int error;
6600
6601 /* Disable L0s exit timer (NMI bug workaround). */
6602 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_DIS_L0S_TIMER);
6603 /* Don't wait for ICH L0s (ICH bug workaround). */
6604 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_L1A_NO_L0S_RX);
6605
6606 /* Set FH wait threshold to max (HW bug under stress workaround). */
6607 IWN_SETBITS(sc, IWN_DBG_HPET_MEM, 0xffff0000);
6608
6609 /* Enable HAP INTA to move adapter from L1a to L0s. */
6610 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_HAP_WAKE_L1A);
6611
6612 /* Retrieve PCIe Active State Power Management (ASPM). */
6613 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1);
6614 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */
6615 if (reg & 0x02) /* L1 Entry enabled. */
6616 IWN_SETBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA);
6617 else
6618 IWN_CLRBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA);
6619
6620 if (sc->hw_type != IWN_HW_REV_TYPE_4965 &&
6621 sc->hw_type <= IWN_HW_REV_TYPE_1000)
6622 IWN_SETBITS(sc, IWN_ANA_PLL, IWN_ANA_PLL_INIT);
6623
6624 /* Wait for clock stabilization before accessing prph. */
6625 if ((error = iwn_clock_wait(sc)) != 0)
6626 return error;
6627
6628 if ((error = iwn_nic_lock(sc)) != 0)
6629 return error;
6630 if (sc->hw_type == IWN_HW_REV_TYPE_4965) {
6631 /* Enable DMA and BSM (Bootstrap State Machine). */
6632 iwn_prph_write(sc, IWN_APMG_CLK_EN,
6633 IWN_APMG_CLK_CTRL_DMA_CLK_RQT |
6634 IWN_APMG_CLK_CTRL_BSM_CLK_RQT);
6635 } else {
6636 /* Enable DMA. */
6637 iwn_prph_write(sc, IWN_APMG_CLK_EN,
6638 IWN_APMG_CLK_CTRL_DMA_CLK_RQT);
6639 }
6640 DELAY(20);
6641 /* Disable L1-Active. */
6642 iwn_prph_setbits(sc, IWN_APMG_PCI_STT, IWN_APMG_PCI_STT_L1A_DIS);
6643 iwn_nic_unlock(sc);
6644
6645 return 0;
6646}
6647
6648static void
6649iwn_apm_stop_master(struct iwn_softc *sc)
6650{
6651 int ntries;
6652
6653 /* Stop busmaster DMA activity. */
6654 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_STOP_MASTER);
6655 for (ntries = 0; ntries < 100; ntries++) {
6656 if (IWN_READ(sc, IWN_RESET) & IWN_RESET_MASTER_DISABLED)
6657 return;
6658 DELAY(10);
6659 }
6660 device_printf(sc->sc_dev, "%s: timeout waiting for master\n", __func__);
6661}
6662
6663static void
6664iwn_apm_stop(struct iwn_softc *sc)
6665{
6666 iwn_apm_stop_master(sc);
6667
6668 /* Reset the entire device. */
6669 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_SW);
6670 DELAY(10);
6671 /* Clear "initialization complete" bit. */
6672 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE);
6673}
6674
6675static int
6676iwn4965_nic_config(struct iwn_softc *sc)
6677{
6678 if (IWN_RFCFG_TYPE(sc->rfcfg) == 1) {
6679 /*
6680 * I don't believe this to be correct but this is what the
6681 * vendor driver is doing. Probably the bits should not be
6682 * shifted in IWN_RFCFG_*.
6683 */
6684 IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
6685 IWN_RFCFG_TYPE(sc->rfcfg) |
6686 IWN_RFCFG_STEP(sc->rfcfg) |
6687 IWN_RFCFG_DASH(sc->rfcfg));
6688 }
6689 IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
6690 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI);
6691 return 0;
6692}
6693
6694static int
6695iwn5000_nic_config(struct iwn_softc *sc)
6696{
6697 uint32_t tmp;
6698 int error;
6699
6700 if (IWN_RFCFG_TYPE(sc->rfcfg) < 3) {
6701 IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
6702 IWN_RFCFG_TYPE(sc->rfcfg) |
6703 IWN_RFCFG_STEP(sc->rfcfg) |
6704 IWN_RFCFG_DASH(sc->rfcfg));
6705 }
6706 IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
6707 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI);
6708
6709 if ((error = iwn_nic_lock(sc)) != 0)
6710 return error;
6711 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_EARLY_PWROFF_DIS);
6712
6713 if (sc->hw_type == IWN_HW_REV_TYPE_1000) {
6714 /*
6715 * Select first Switching Voltage Regulator (1.32V) to
6716 * solve a stability issue related to noisy DC2DC line
6717 * in the silicon of 1000 Series.
6718 */
6719 tmp = iwn_prph_read(sc, IWN_APMG_DIGITAL_SVR);
6720 tmp &= ~IWN_APMG_DIGITAL_SVR_VOLTAGE_MASK;
6721 tmp |= IWN_APMG_DIGITAL_SVR_VOLTAGE_1_32;
6722 iwn_prph_write(sc, IWN_APMG_DIGITAL_SVR, tmp);
6723 }
6724 iwn_nic_unlock(sc);
6725
6726 if (sc->sc_flags & IWN_FLAG_INTERNAL_PA) {
6727 /* Use internal power amplifier only. */
6728 IWN_WRITE(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_2X2_IPA);
6729 }
6730 if ((sc->hw_type == IWN_HW_REV_TYPE_6050 ||
6731 sc->hw_type == IWN_HW_REV_TYPE_6005) && sc->calib_ver >= 6) {
6732 /* Indicate that ROM calibration version is >=6. */
6733 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_CALIB_VER6);
6734 }
6735 if (sc->hw_type == IWN_HW_REV_TYPE_6005)
6736 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_6050_1X2);
6737 return 0;
6738}
6739
6740/*
6741 * Take NIC ownership over Intel Active Management Technology (AMT).
6742 */
6743static int
6744iwn_hw_prepare(struct iwn_softc *sc)
6745{
6746 int ntries;
6747
6748 /* Check if hardware is ready. */
6749 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY);
6750 for (ntries = 0; ntries < 5; ntries++) {
6751 if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
6752 IWN_HW_IF_CONFIG_NIC_READY)
6753 return 0;
6754 DELAY(10);
6755 }
6756
6757 /* Hardware not ready, force into ready state. */
6758 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_PREPARE);
6759 for (ntries = 0; ntries < 15000; ntries++) {
6760 if (!(IWN_READ(sc, IWN_HW_IF_CONFIG) &
6761 IWN_HW_IF_CONFIG_PREPARE_DONE))
6762 break;
6763 DELAY(10);
6764 }
6765 if (ntries == 15000)
6766 return ETIMEDOUT;
6767
6768 /* Hardware should be ready now. */
6769 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY);
6770 for (ntries = 0; ntries < 5; ntries++) {
6771 if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
6772 IWN_HW_IF_CONFIG_NIC_READY)
6773 return 0;
6774 DELAY(10);
6775 }
6776 return ETIMEDOUT;
6777}
6778
6779static int
6780iwn_hw_init(struct iwn_softc *sc)
6781{
6782 struct iwn_ops *ops = &sc->ops;
6783 int error, chnl, qid;
6784
6785 /* Clear pending interrupts. */
6786 IWN_WRITE(sc, IWN_INT, 0xffffffff);
6787
6788 if ((error = iwn_apm_init(sc)) != 0) {
6789 device_printf(sc->sc_dev,
6790 "%s: could not power ON adapter, error %d\n", __func__,
6791 error);
6792 return error;
6793 }
6794
6795 /* Select VMAIN power source. */
6796 if ((error = iwn_nic_lock(sc)) != 0)
6797 return error;
6798 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_PWR_SRC_MASK);
6799 iwn_nic_unlock(sc);
6800
6801 /* Perform adapter-specific initialization. */
6802 if ((error = ops->nic_config(sc)) != 0)
6803 return error;
6804
6805 /* Initialize RX ring. */
6806 if ((error = iwn_nic_lock(sc)) != 0)
6807 return error;
6808 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0);
6809 IWN_WRITE(sc, IWN_FH_RX_WPTR, 0);
6810 /* Set physical address of RX ring (256-byte aligned). */
6811 IWN_WRITE(sc, IWN_FH_RX_BASE, sc->rxq.desc_dma.paddr >> 8);
6812 /* Set physical address of RX status (16-byte aligned). */
6813 IWN_WRITE(sc, IWN_FH_STATUS_WPTR, sc->rxq.stat_dma.paddr >> 4);
6814 /* Enable RX. */
6815 IWN_WRITE(sc, IWN_FH_RX_CONFIG,
6816 IWN_FH_RX_CONFIG_ENA |
6817 IWN_FH_RX_CONFIG_IGN_RXF_EMPTY | /* HW bug workaround */
6818 IWN_FH_RX_CONFIG_IRQ_DST_HOST |
6819 IWN_FH_RX_CONFIG_SINGLE_FRAME |
6820 IWN_FH_RX_CONFIG_RB_TIMEOUT(0) |
6821 IWN_FH_RX_CONFIG_NRBD(IWN_RX_RING_COUNT_LOG));
6822 iwn_nic_unlock(sc);
6823 IWN_WRITE(sc, IWN_FH_RX_WPTR, (IWN_RX_RING_COUNT - 1) & ~7);
6824
6825 if ((error = iwn_nic_lock(sc)) != 0)
6826 return error;
6827
6828 /* Initialize TX scheduler. */
6829 iwn_prph_write(sc, sc->sched_txfact_addr, 0);
6830
6831 /* Set physical address of "keep warm" page (16-byte aligned). */
6832 IWN_WRITE(sc, IWN_FH_KW_ADDR, sc->kw_dma.paddr >> 4);
6833
6834 /* Initialize TX rings. */
6835 for (qid = 0; qid < sc->ntxqs; qid++) {
6836 struct iwn_tx_ring *txq = &sc->txq[qid];
6837
6838 /* Set physical address of TX ring (256-byte aligned). */
6839 IWN_WRITE(sc, IWN_FH_CBBC_QUEUE(qid),
6840 txq->desc_dma.paddr >> 8);
6841 }
6842 iwn_nic_unlock(sc);
6843
6844 /* Enable DMA channels. */
6845 for (chnl = 0; chnl < sc->ndmachnls; chnl++) {
6846 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl),
6847 IWN_FH_TX_CONFIG_DMA_ENA |
6848 IWN_FH_TX_CONFIG_DMA_CREDIT_ENA);
6849 }
6850
6851 /* Clear "radio off" and "commands blocked" bits. */
6852 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
6853 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CMD_BLOCKED);
6854
6855 /* Clear pending interrupts. */
6856 IWN_WRITE(sc, IWN_INT, 0xffffffff);
6857 /* Enable interrupt coalescing. */
6858 IWN_WRITE(sc, IWN_INT_COALESCING, 512 / 8);
6859 /* Enable interrupts. */
6860 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
6861
6862 /* _Really_ make sure "radio off" bit is cleared! */
6863 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
6864 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
6865
6866 /* Enable shadow registers. */
6867 if (sc->hw_type >= IWN_HW_REV_TYPE_6000)
6868 IWN_SETBITS(sc, IWN_SHADOW_REG_CTRL, 0x800fffff);
6869
6870 if ((error = ops->load_firmware(sc)) != 0) {
6871 device_printf(sc->sc_dev,
6872 "%s: could not load firmware, error %d\n", __func__,
6873 error);
6874 return error;
6875 }
6876 /* Wait at most one second for firmware alive notification. */
6877 if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz)) != 0) {
6878 device_printf(sc->sc_dev,
6879 "%s: timeout waiting for adapter to initialize, error %d\n",
6880 __func__, error);
6881 return error;
6882 }
6883 /* Do post-firmware initialization. */
6884 return ops->post_alive(sc);
6885}
6886
6887static void
6888iwn_hw_stop(struct iwn_softc *sc)
6889{
6890 int chnl, qid, ntries;
6891
6892 IWN_WRITE(sc, IWN_RESET, IWN_RESET_NEVO);
6893
6894 /* Disable interrupts. */
6895 IWN_WRITE(sc, IWN_INT_MASK, 0);
6896 IWN_WRITE(sc, IWN_INT, 0xffffffff);
6897 IWN_WRITE(sc, IWN_FH_INT, 0xffffffff);
6898 sc->sc_flags &= ~IWN_FLAG_USE_ICT;
6899
6900 /* Make sure we no longer hold the NIC lock. */
6901 iwn_nic_unlock(sc);
6902
6903 /* Stop TX scheduler. */
6904 iwn_prph_write(sc, sc->sched_txfact_addr, 0);
6905
6906 /* Stop all DMA channels. */
6907 if (iwn_nic_lock(sc) == 0) {
6908 for (chnl = 0; chnl < sc->ndmachnls; chnl++) {
6909 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 0);
6910 for (ntries = 0; ntries < 200; ntries++) {
6911 if (IWN_READ(sc, IWN_FH_TX_STATUS) &
6912 IWN_FH_TX_STATUS_IDLE(chnl))
6913 break;
6914 DELAY(10);
6915 }
6916 }
6917 iwn_nic_unlock(sc);
6918 }
6919
6920 /* Stop RX ring. */
6921 iwn_reset_rx_ring(sc, &sc->rxq);
6922
6923 /* Reset all TX rings. */
6924 for (qid = 0; qid < sc->ntxqs; qid++)
6925 iwn_reset_tx_ring(sc, &sc->txq[qid]);
6926
6927 if (iwn_nic_lock(sc) == 0) {
6928 iwn_prph_write(sc, IWN_APMG_CLK_DIS,
6929 IWN_APMG_CLK_CTRL_DMA_CLK_RQT);
6930 iwn_nic_unlock(sc);
6931 }
6932 DELAY(5);
6933 /* Power OFF adapter. */
6934 iwn_apm_stop(sc);
6935}
6936
6937static void
6938iwn_radio_on(void *arg0, int pending)
6939{
6940 struct iwn_softc *sc = arg0;
6941 struct ifnet *ifp = sc->sc_ifp;
6942 struct ieee80211com *ic = ifp->if_l2com;
6943 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6944
6945 if (vap != NULL) {
6946 iwn_init(sc);
6947 ieee80211_init(vap);
6948 }
6949}
6950
6951static void
6952iwn_radio_off(void *arg0, int pending)
6953{
6954 struct iwn_softc *sc = arg0;
6955 struct ifnet *ifp = sc->sc_ifp;
6956 struct ieee80211com *ic = ifp->if_l2com;
6957 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6958
6959 iwn_stop(sc);
6960 if (vap != NULL)
6961 ieee80211_stop(vap);
6962
6963 /* Enable interrupts to get RF toggle notification. */
6964 IWN_LOCK(sc);
6965 IWN_WRITE(sc, IWN_INT, 0xffffffff);
6966 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
6967 IWN_UNLOCK(sc);
6968}
6969
6970static void
6971iwn_init_locked(struct iwn_softc *sc)
6972{
6973 struct ifnet *ifp = sc->sc_ifp;
6974 int error;
6975
6976 IWN_LOCK_ASSERT(sc);
6977
6978 if ((error = iwn_hw_prepare(sc)) != 0) {
6979 device_printf(sc->sc_dev, "%s: hardware not ready, error %d\n",
6980 __func__, error);
6981 goto fail;
6982 }
6983
6984 /* Initialize interrupt mask to default value. */
6985 sc->int_mask = IWN_INT_MASK_DEF;
6986 sc->sc_flags &= ~IWN_FLAG_USE_ICT;
6987
6988 /* Check that the radio is not disabled by hardware switch. */
6989 if (!(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) {
6990 device_printf(sc->sc_dev,
6991 "radio is disabled by hardware switch\n");
6992 /* Enable interrupts to get RF toggle notifications. */
6993 IWN_WRITE(sc, IWN_INT, 0xffffffff);
6994 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
6995 return;
6996 }
6997
6998 /* Read firmware images from the filesystem. */
6999 if ((error = iwn_read_firmware(sc)) != 0) {
7000 device_printf(sc->sc_dev,
7001 "%s: could not read firmware, error %d\n", __func__,
7002 error);
7003 goto fail;
7004 }
7005
7006 /* Initialize hardware and upload firmware. */
7007 error = iwn_hw_init(sc);
7008 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
7009 sc->fw_fp = NULL;
7010 if (error != 0) {
7011 device_printf(sc->sc_dev,
7012 "%s: could not initialize hardware, error %d\n", __func__,
7013 error);
7014 goto fail;
7015 }
7016
7017 /* Configure adapter now that it is ready. */
7018 if ((error = iwn_config(sc)) != 0) {
7019 device_printf(sc->sc_dev,
7020 "%s: could not configure device, error %d\n", __func__,
7021 error);
7022 goto fail;
7023 }
7024
7025 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
7026 ifp->if_drv_flags |= IFF_DRV_RUNNING;
7027
7028 callout_reset(&sc->watchdog_to, hz, iwn_watchdog, sc);
7029 return;
7030
7031fail: iwn_stop_locked(sc);
7032}
7033
7034static void
7035iwn_init(void *arg)
7036{
7037 struct iwn_softc *sc = arg;
7038 struct ifnet *ifp = sc->sc_ifp;
7039 struct ieee80211com *ic = ifp->if_l2com;
7040
7041 IWN_LOCK(sc);
7042 iwn_init_locked(sc);
7043 IWN_UNLOCK(sc);
7044
7045 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
7046 ieee80211_start_all(ic);
7047}
7048
7049static void
7050iwn_stop_locked(struct iwn_softc *sc)
7051{
7052 struct ifnet *ifp = sc->sc_ifp;
7053
7054 IWN_LOCK_ASSERT(sc);
7055
7056 sc->sc_tx_timer = 0;
7057 callout_stop(&sc->watchdog_to);
7058 callout_stop(&sc->calib_to);
7059 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
7060
7061 /* Power OFF hardware. */
7062 iwn_hw_stop(sc);
7063}
7064
7065static void
7066iwn_stop(struct iwn_softc *sc)
7067{
7068 IWN_LOCK(sc);
7069 iwn_stop_locked(sc);
7070 IWN_UNLOCK(sc);
7071}
7072
7073/*
7074 * Callback from net80211 to start a scan.
7075 */
7076static void
7077iwn_scan_start(struct ieee80211com *ic)
7078{
7079 struct ifnet *ifp = ic->ic_ifp;
7080 struct iwn_softc *sc = ifp->if_softc;
7081
7082 IWN_LOCK(sc);
7083 /* make the link LED blink while we're scanning */
7084 iwn_set_led(sc, IWN_LED_LINK, 20, 2);
7085 IWN_UNLOCK(sc);
7086}
7087
7088/*
7089 * Callback from net80211 to terminate a scan.
7090 */
7091static void
7092iwn_scan_end(struct ieee80211com *ic)
7093{
7094 struct ifnet *ifp = ic->ic_ifp;
7095 struct iwn_softc *sc = ifp->if_softc;
7096 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
7097
7098 IWN_LOCK(sc);
7099 if (vap->iv_state == IEEE80211_S_RUN) {
7100 /* Set link LED to ON status if we are associated */
7101 iwn_set_led(sc, IWN_LED_LINK, 0, 1);
7102 }
7103 IWN_UNLOCK(sc);
7104}
7105
7106/*
7107 * Callback from net80211 to force a channel change.
7108 */
7109static void
7110iwn_set_channel(struct ieee80211com *ic)
7111{
7112 const struct ieee80211_channel *c = ic->ic_curchan;
7113 struct ifnet *ifp = ic->ic_ifp;
7114 struct iwn_softc *sc = ifp->if_softc;
7115 int error;
7116
7117 IWN_LOCK(sc);
7118 sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq);
7119 sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags);
7120 sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq);
7121 sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags);
7122
7123 /*
7124 * Only need to set the channel in Monitor mode. AP scanning and auth
7125 * are already taken care of by their respective firmware commands.
7126 */
7127 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7128 error = iwn_config(sc);
7129 if (error != 0)
7130 device_printf(sc->sc_dev,
7131 "%s: error %d settting channel\n", __func__, error);
7132 }
7133 IWN_UNLOCK(sc);
7134}
7135
7136/*
7137 * Callback from net80211 to start scanning of the current channel.
7138 */
7139static void
7140iwn_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
7141{
7142 struct ieee80211vap *vap = ss->ss_vap;
7143 struct iwn_softc *sc = vap->iv_ic->ic_ifp->if_softc;
7144 int error;
7145
7146 IWN_LOCK(sc);
7147 error = iwn_scan(sc);
7148 IWN_UNLOCK(sc);
7149 if (error != 0)
7150 ieee80211_cancel_scan(vap);
7151}
7152
7153/*
7154 * Callback from net80211 to handle the minimum dwell time being met.
7155 * The intent is to terminate the scan but we just let the firmware
7156 * notify us when it's finished as we have no safe way to abort it.
7157 */
7158static void
7159iwn_scan_mindwell(struct ieee80211_scan_state *ss)
7160{
7161 /* NB: don't try to abort scan; wait for firmware to finish */
7162}
7163
7164static void
7165iwn_hw_reset(void *arg0, int pending)
7166{
7167 struct iwn_softc *sc = arg0;
7168 struct ifnet *ifp = sc->sc_ifp;
7169 struct ieee80211com *ic = ifp->if_l2com;
7170
7171 iwn_stop(sc);
7172 iwn_init(sc);
7173 ieee80211_notify_radio(ic, 1);
7174}