if_iwn.c revision 264945
1/*- 2 * Copyright (c) 2013 Cedric GROSS <c.gross@kreiz-it.fr> 3 * Copyright (c) 2011 Intel Corporation 4 * Copyright (c) 2007-2009 5 * Damien Bergamini <damien.bergamini@free.fr> 6 * Copyright (c) 2008 7 * Benjamin Close <benjsc@FreeBSD.org> 8 * Copyright (c) 2008 Sam Leffler, Errno Consulting 9 * 10 * Permission to use, copy, modify, and distribute this software for any 11 * purpose with or without fee is hereby granted, provided that the above 12 * copyright notice and this permission notice appear in all copies. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 15 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 16 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 17 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 18 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 19 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 20 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 21 */ 22 23/* 24 * Driver for Intel WiFi Link 4965 and 1000/5000/6000 Series 802.11 network 25 * adapters. 26 */ 27 28#include <sys/cdefs.h> 29__FBSDID("$FreeBSD: stable/10/sys/dev/iwn/if_iwn.c 264945 2014-04-25 21:42:52Z marius $"); 30 31#include "opt_wlan.h" 32#include "opt_iwn.h" 33 34#include <sys/param.h> 35#include <sys/sockio.h> 36#include <sys/sysctl.h> 37#include <sys/mbuf.h> 38#include <sys/kernel.h> 39#include <sys/socket.h> 40#include <sys/systm.h> 41#include <sys/malloc.h> 42#include <sys/bus.h> 43#include <sys/rman.h> 44#include <sys/endian.h> 45#include <sys/firmware.h> 46#include <sys/limits.h> 47#include <sys/module.h> 48#include <sys/queue.h> 49#include <sys/taskqueue.h> 50 51#include <machine/bus.h> 52#include <machine/resource.h> 53#include <machine/clock.h> 54 55#include <dev/pci/pcireg.h> 56#include <dev/pci/pcivar.h> 57 58#include <net/bpf.h> 59#include <net/if.h> 60#include <net/if_arp.h> 61#include <net/ethernet.h> 62#include <net/if_dl.h> 63#include <net/if_media.h> 64#include <net/if_types.h> 65 66#include <netinet/in.h> 67#include <netinet/in_systm.h> 68#include <netinet/in_var.h> 69#include <netinet/if_ether.h> 70#include <netinet/ip.h> 71 72#include <net80211/ieee80211_var.h> 73#include <net80211/ieee80211_radiotap.h> 74#include <net80211/ieee80211_regdomain.h> 75#include <net80211/ieee80211_ratectl.h> 76 77#include <dev/iwn/if_iwnreg.h> 78#include <dev/iwn/if_iwnvar.h> 79#include <dev/iwn/if_iwn_devid.h> 80 81struct iwn_ident { 82 uint16_t vendor; 83 uint16_t device; 84 const char *name; 85}; 86 87static const struct iwn_ident iwn_ident_table[] = { 88 { 0x8086, IWN_DID_6x05_1, "Intel Centrino Advanced-N 6205" }, 89 { 0x8086, IWN_DID_1000_1, "Intel Centrino Wireless-N 1000" }, 90 { 0x8086, IWN_DID_1000_2, "Intel Centrino Wireless-N 1000" }, 91 { 0x8086, IWN_DID_6x05_2, "Intel Centrino Advanced-N 6205" }, 92 { 0x8086, IWN_DID_6050_1, "Intel Centrino Advanced-N + WiMAX 6250" }, 93 { 0x8086, IWN_DID_6050_2, "Intel Centrino Advanced-N + WiMAX 6250" }, 94 { 0x8086, IWN_DID_x030_1, "Intel Centrino Wireless-N 1030" }, 95 { 0x8086, IWN_DID_x030_2, "Intel Centrino Wireless-N 1030" }, 96 { 0x8086, IWN_DID_x030_3, "Intel Centrino Advanced-N 6230" }, 97 { 0x8086, IWN_DID_x030_4, "Intel Centrino Advanced-N 6230" }, 98 { 0x8086, IWN_DID_6150_1, "Intel Centrino Wireless-N + WiMAX 6150" }, 99 { 0x8086, IWN_DID_6150_2, "Intel Centrino Wireless-N + WiMAX 6150" }, 100 { 0x8086, IWN_DID_2x30_1, "Intel Centrino Wireless-N 2230" }, 101 { 0x8086, IWN_DID_2x30_2, "Intel Centrino Wireless-N 2230" }, 102 { 0x8086, IWN_DID_130_1, "Intel Centrino Wireless-N 130" }, 103 { 0x8086, IWN_DID_130_2, "Intel Centrino Wireless-N 130" }, 104 { 0x8086, IWN_DID_100_1, "Intel Centrino Wireless-N 100" }, 105 { 0x8086, IWN_DID_100_2, "Intel Centrino Wireless-N 100" }, 106 { 0x8086, IWN_DID_4965_1, "Intel Wireless WiFi Link 4965" }, 107 { 0x8086, IWN_DID_6x00_1, "Intel Centrino Ultimate-N 6300" }, 108 { 0x8086, IWN_DID_6x00_2, "Intel Centrino Advanced-N 6200" }, 109 { 0x8086, IWN_DID_4965_2, "Intel Wireless WiFi Link 4965" }, 110 { 0x8086, IWN_DID_4965_3, "Intel Wireless WiFi Link 4965" }, 111 { 0x8086, IWN_DID_5x00_1, "Intel WiFi Link 5100" }, 112 { 0x8086, IWN_DID_4965_4, "Intel Wireless WiFi Link 4965" }, 113 { 0x8086, IWN_DID_5x00_3, "Intel Ultimate N WiFi Link 5300" }, 114 { 0x8086, IWN_DID_5x00_4, "Intel Ultimate N WiFi Link 5300" }, 115 { 0x8086, IWN_DID_5x00_2, "Intel WiFi Link 5100" }, 116 { 0x8086, IWN_DID_6x00_3, "Intel Centrino Ultimate-N 6300" }, 117 { 0x8086, IWN_DID_6x00_4, "Intel Centrino Advanced-N 6200" }, 118 { 0x8086, IWN_DID_5x50_1, "Intel WiMAX/WiFi Link 5350" }, 119 { 0x8086, IWN_DID_5x50_2, "Intel WiMAX/WiFi Link 5350" }, 120 { 0x8086, IWN_DID_5x50_3, "Intel WiMAX/WiFi Link 5150" }, 121 { 0x8086, IWN_DID_5x50_4, "Intel WiMAX/WiFi Link 5150" }, 122 { 0, 0, NULL } 123}; 124 125static int iwn_probe(device_t); 126static int iwn_attach(device_t); 127static int iwn4965_attach(struct iwn_softc *, uint16_t); 128static int iwn5000_attach(struct iwn_softc *, uint16_t); 129static void iwn_radiotap_attach(struct iwn_softc *); 130static void iwn_sysctlattach(struct iwn_softc *); 131static struct ieee80211vap *iwn_vap_create(struct ieee80211com *, 132 const char [IFNAMSIZ], int, enum ieee80211_opmode, int, 133 const uint8_t [IEEE80211_ADDR_LEN], 134 const uint8_t [IEEE80211_ADDR_LEN]); 135static void iwn_vap_delete(struct ieee80211vap *); 136static int iwn_detach(device_t); 137static int iwn_shutdown(device_t); 138static int iwn_suspend(device_t); 139static int iwn_resume(device_t); 140static int iwn_nic_lock(struct iwn_softc *); 141static int iwn_eeprom_lock(struct iwn_softc *); 142static int iwn_init_otprom(struct iwn_softc *); 143static int iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int); 144static void iwn_dma_map_addr(void *, bus_dma_segment_t *, int, int); 145static int iwn_dma_contig_alloc(struct iwn_softc *, struct iwn_dma_info *, 146 void **, bus_size_t, bus_size_t); 147static void iwn_dma_contig_free(struct iwn_dma_info *); 148static int iwn_alloc_sched(struct iwn_softc *); 149static void iwn_free_sched(struct iwn_softc *); 150static int iwn_alloc_kw(struct iwn_softc *); 151static void iwn_free_kw(struct iwn_softc *); 152static int iwn_alloc_ict(struct iwn_softc *); 153static void iwn_free_ict(struct iwn_softc *); 154static int iwn_alloc_fwmem(struct iwn_softc *); 155static void iwn_free_fwmem(struct iwn_softc *); 156static int iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 157static void iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 158static void iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 159static int iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *, 160 int); 161static void iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); 162static void iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); 163static void iwn5000_ict_reset(struct iwn_softc *); 164static int iwn_read_eeprom(struct iwn_softc *, 165 uint8_t macaddr[IEEE80211_ADDR_LEN]); 166static void iwn4965_read_eeprom(struct iwn_softc *); 167#ifdef IWN_DEBUG 168static void iwn4965_print_power_group(struct iwn_softc *, int); 169#endif 170static void iwn5000_read_eeprom(struct iwn_softc *); 171static uint32_t iwn_eeprom_channel_flags(struct iwn_eeprom_chan *); 172static void iwn_read_eeprom_band(struct iwn_softc *, int); 173static void iwn_read_eeprom_ht40(struct iwn_softc *, int); 174static void iwn_read_eeprom_channels(struct iwn_softc *, int, uint32_t); 175static struct iwn_eeprom_chan *iwn_find_eeprom_channel(struct iwn_softc *, 176 struct ieee80211_channel *); 177static int iwn_setregdomain(struct ieee80211com *, 178 struct ieee80211_regdomain *, int, 179 struct ieee80211_channel[]); 180static void iwn_read_eeprom_enhinfo(struct iwn_softc *); 181static struct ieee80211_node *iwn_node_alloc(struct ieee80211vap *, 182 const uint8_t mac[IEEE80211_ADDR_LEN]); 183static void iwn_newassoc(struct ieee80211_node *, int); 184static int iwn_media_change(struct ifnet *); 185static int iwn_newstate(struct ieee80211vap *, enum ieee80211_state, int); 186static void iwn_calib_timeout(void *); 187static void iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *, 188 struct iwn_rx_data *); 189static void iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *, 190 struct iwn_rx_data *); 191static void iwn_rx_compressed_ba(struct iwn_softc *, struct iwn_rx_desc *, 192 struct iwn_rx_data *); 193static void iwn5000_rx_calib_results(struct iwn_softc *, 194 struct iwn_rx_desc *, struct iwn_rx_data *); 195static void iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *, 196 struct iwn_rx_data *); 197static void iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *, 198 struct iwn_rx_data *); 199static void iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *, 200 struct iwn_rx_data *); 201static void iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *, int, 202 uint8_t); 203static void iwn_ampdu_tx_done(struct iwn_softc *, int, int, int, void *); 204static void iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *); 205static void iwn_notif_intr(struct iwn_softc *); 206static void iwn_wakeup_intr(struct iwn_softc *); 207static void iwn_rftoggle_intr(struct iwn_softc *); 208static void iwn_fatal_intr(struct iwn_softc *); 209static void iwn_intr(void *); 210static void iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t, 211 uint16_t); 212static void iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t, 213 uint16_t); 214#ifdef notyet 215static void iwn5000_reset_sched(struct iwn_softc *, int, int); 216#endif 217static int iwn_tx_data(struct iwn_softc *, struct mbuf *, 218 struct ieee80211_node *); 219static int iwn_tx_data_raw(struct iwn_softc *, struct mbuf *, 220 struct ieee80211_node *, 221 const struct ieee80211_bpf_params *params); 222static int iwn_raw_xmit(struct ieee80211_node *, struct mbuf *, 223 const struct ieee80211_bpf_params *); 224static void iwn_start(struct ifnet *); 225static void iwn_start_locked(struct ifnet *); 226static void iwn_watchdog(void *); 227static int iwn_ioctl(struct ifnet *, u_long, caddr_t); 228static int iwn_cmd(struct iwn_softc *, int, const void *, int, int); 229static int iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *, 230 int); 231static int iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *, 232 int); 233static int iwn_set_link_quality(struct iwn_softc *, 234 struct ieee80211_node *); 235static int iwn_add_broadcast_node(struct iwn_softc *, int); 236static int iwn_updateedca(struct ieee80211com *); 237static void iwn_update_mcast(struct ifnet *); 238static void iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t); 239static int iwn_set_critical_temp(struct iwn_softc *); 240static int iwn_set_timing(struct iwn_softc *, struct ieee80211_node *); 241static void iwn4965_power_calibration(struct iwn_softc *, int); 242static int iwn4965_set_txpower(struct iwn_softc *, 243 struct ieee80211_channel *, int); 244static int iwn5000_set_txpower(struct iwn_softc *, 245 struct ieee80211_channel *, int); 246static int iwn4965_get_rssi(struct iwn_softc *, struct iwn_rx_stat *); 247static int iwn5000_get_rssi(struct iwn_softc *, struct iwn_rx_stat *); 248static int iwn_get_noise(const struct iwn_rx_general_stats *); 249static int iwn4965_get_temperature(struct iwn_softc *); 250static int iwn5000_get_temperature(struct iwn_softc *); 251static int iwn_init_sensitivity(struct iwn_softc *); 252static void iwn_collect_noise(struct iwn_softc *, 253 const struct iwn_rx_general_stats *); 254static int iwn4965_init_gains(struct iwn_softc *); 255static int iwn5000_init_gains(struct iwn_softc *); 256static int iwn4965_set_gains(struct iwn_softc *); 257static int iwn5000_set_gains(struct iwn_softc *); 258static void iwn_tune_sensitivity(struct iwn_softc *, 259 const struct iwn_rx_stats *); 260static int iwn_send_sensitivity(struct iwn_softc *); 261static int iwn_set_pslevel(struct iwn_softc *, int, int, int); 262static int iwn_send_btcoex(struct iwn_softc *); 263static int iwn_send_advanced_btcoex(struct iwn_softc *); 264static int iwn5000_runtime_calib(struct iwn_softc *); 265static int iwn_config(struct iwn_softc *); 266static uint8_t *ieee80211_add_ssid(uint8_t *, const uint8_t *, u_int); 267static int iwn_scan(struct iwn_softc *); 268static int iwn_auth(struct iwn_softc *, struct ieee80211vap *vap); 269static int iwn_run(struct iwn_softc *, struct ieee80211vap *vap); 270static int iwn_ampdu_rx_start(struct ieee80211_node *, 271 struct ieee80211_rx_ampdu *, int, int, int); 272static void iwn_ampdu_rx_stop(struct ieee80211_node *, 273 struct ieee80211_rx_ampdu *); 274static int iwn_addba_request(struct ieee80211_node *, 275 struct ieee80211_tx_ampdu *, int, int, int); 276static int iwn_addba_response(struct ieee80211_node *, 277 struct ieee80211_tx_ampdu *, int, int, int); 278static int iwn_ampdu_tx_start(struct ieee80211com *, 279 struct ieee80211_node *, uint8_t); 280static void iwn_ampdu_tx_stop(struct ieee80211_node *, 281 struct ieee80211_tx_ampdu *); 282static void iwn4965_ampdu_tx_start(struct iwn_softc *, 283 struct ieee80211_node *, int, uint8_t, uint16_t); 284static void iwn4965_ampdu_tx_stop(struct iwn_softc *, int, 285 uint8_t, uint16_t); 286static void iwn5000_ampdu_tx_start(struct iwn_softc *, 287 struct ieee80211_node *, int, uint8_t, uint16_t); 288static void iwn5000_ampdu_tx_stop(struct iwn_softc *, int, 289 uint8_t, uint16_t); 290static int iwn5000_query_calibration(struct iwn_softc *); 291static int iwn5000_send_calibration(struct iwn_softc *); 292static int iwn5000_send_wimax_coex(struct iwn_softc *); 293static int iwn5000_crystal_calib(struct iwn_softc *); 294static int iwn5000_temp_offset_calib(struct iwn_softc *); 295static int iwn4965_post_alive(struct iwn_softc *); 296static int iwn5000_post_alive(struct iwn_softc *); 297static int iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *, 298 int); 299static int iwn4965_load_firmware(struct iwn_softc *); 300static int iwn5000_load_firmware_section(struct iwn_softc *, uint32_t, 301 const uint8_t *, int); 302static int iwn5000_load_firmware(struct iwn_softc *); 303static int iwn_read_firmware_leg(struct iwn_softc *, 304 struct iwn_fw_info *); 305static int iwn_read_firmware_tlv(struct iwn_softc *, 306 struct iwn_fw_info *, uint16_t); 307static int iwn_read_firmware(struct iwn_softc *); 308static int iwn_clock_wait(struct iwn_softc *); 309static int iwn_apm_init(struct iwn_softc *); 310static void iwn_apm_stop_master(struct iwn_softc *); 311static void iwn_apm_stop(struct iwn_softc *); 312static int iwn4965_nic_config(struct iwn_softc *); 313static int iwn5000_nic_config(struct iwn_softc *); 314static int iwn_hw_prepare(struct iwn_softc *); 315static int iwn_hw_init(struct iwn_softc *); 316static void iwn_hw_stop(struct iwn_softc *); 317static void iwn_radio_on(void *, int); 318static void iwn_radio_off(void *, int); 319static void iwn_init_locked(struct iwn_softc *); 320static void iwn_init(void *); 321static void iwn_stop_locked(struct iwn_softc *); 322static void iwn_stop(struct iwn_softc *); 323static void iwn_scan_start(struct ieee80211com *); 324static void iwn_scan_end(struct ieee80211com *); 325static void iwn_set_channel(struct ieee80211com *); 326static void iwn_scan_curchan(struct ieee80211_scan_state *, unsigned long); 327static void iwn_scan_mindwell(struct ieee80211_scan_state *); 328static void iwn_hw_reset(void *, int); 329#ifdef IWN_DEBUG 330static char *iwn_get_csr_string(int); 331static void iwn_debug_register(struct iwn_softc *); 332#endif 333 334#ifdef IWN_DEBUG 335enum { 336 IWN_DEBUG_XMIT = 0x00000001, /* basic xmit operation */ 337 IWN_DEBUG_RECV = 0x00000002, /* basic recv operation */ 338 IWN_DEBUG_STATE = 0x00000004, /* 802.11 state transitions */ 339 IWN_DEBUG_TXPOW = 0x00000008, /* tx power processing */ 340 IWN_DEBUG_RESET = 0x00000010, /* reset processing */ 341 IWN_DEBUG_OPS = 0x00000020, /* iwn_ops processing */ 342 IWN_DEBUG_BEACON = 0x00000040, /* beacon handling */ 343 IWN_DEBUG_WATCHDOG = 0x00000080, /* watchdog timeout */ 344 IWN_DEBUG_INTR = 0x00000100, /* ISR */ 345 IWN_DEBUG_CALIBRATE = 0x00000200, /* periodic calibration */ 346 IWN_DEBUG_NODE = 0x00000400, /* node management */ 347 IWN_DEBUG_LED = 0x00000800, /* led management */ 348 IWN_DEBUG_CMD = 0x00001000, /* cmd submission */ 349 IWN_DEBUG_TXRATE = 0x00002000, /* TX rate debugging */ 350 IWN_DEBUG_PWRSAVE = 0x00004000, /* Power save operations */ 351 IWN_DEBUG_REGISTER = 0x20000000, /* print chipset register */ 352 IWN_DEBUG_TRACE = 0x40000000, /* Print begin and start driver function */ 353 IWN_DEBUG_FATAL = 0x80000000, /* fatal errors */ 354 IWN_DEBUG_ANY = 0xffffffff 355}; 356 357#define DPRINTF(sc, m, fmt, ...) do { \ 358 if (sc->sc_debug & (m)) \ 359 printf(fmt, __VA_ARGS__); \ 360} while (0) 361 362static const char * 363iwn_intr_str(uint8_t cmd) 364{ 365 switch (cmd) { 366 /* Notifications */ 367 case IWN_UC_READY: return "UC_READY"; 368 case IWN_ADD_NODE_DONE: return "ADD_NODE_DONE"; 369 case IWN_TX_DONE: return "TX_DONE"; 370 case IWN_START_SCAN: return "START_SCAN"; 371 case IWN_STOP_SCAN: return "STOP_SCAN"; 372 case IWN_RX_STATISTICS: return "RX_STATS"; 373 case IWN_BEACON_STATISTICS: return "BEACON_STATS"; 374 case IWN_STATE_CHANGED: return "STATE_CHANGED"; 375 case IWN_BEACON_MISSED: return "BEACON_MISSED"; 376 case IWN_RX_PHY: return "RX_PHY"; 377 case IWN_MPDU_RX_DONE: return "MPDU_RX_DONE"; 378 case IWN_RX_DONE: return "RX_DONE"; 379 380 /* Command Notifications */ 381 case IWN_CMD_RXON: return "IWN_CMD_RXON"; 382 case IWN_CMD_RXON_ASSOC: return "IWN_CMD_RXON_ASSOC"; 383 case IWN_CMD_EDCA_PARAMS: return "IWN_CMD_EDCA_PARAMS"; 384 case IWN_CMD_TIMING: return "IWN_CMD_TIMING"; 385 case IWN_CMD_LINK_QUALITY: return "IWN_CMD_LINK_QUALITY"; 386 case IWN_CMD_SET_LED: return "IWN_CMD_SET_LED"; 387 case IWN5000_CMD_WIMAX_COEX: return "IWN5000_CMD_WIMAX_COEX"; 388 case IWN5000_CMD_CALIB_CONFIG: return "IWN5000_CMD_CALIB_CONFIG"; 389 case IWN5000_CMD_CALIB_RESULT: return "IWN5000_CMD_CALIB_RESULT"; 390 case IWN5000_CMD_CALIB_COMPLETE: return "IWN5000_CMD_CALIB_COMPLETE"; 391 case IWN_CMD_SET_POWER_MODE: return "IWN_CMD_SET_POWER_MODE"; 392 case IWN_CMD_SCAN: return "IWN_CMD_SCAN"; 393 case IWN_CMD_SCAN_RESULTS: return "IWN_CMD_SCAN_RESULTS"; 394 case IWN_CMD_TXPOWER: return "IWN_CMD_TXPOWER"; 395 case IWN_CMD_TXPOWER_DBM: return "IWN_CMD_TXPOWER_DBM"; 396 case IWN5000_CMD_TX_ANT_CONFIG: return "IWN5000_CMD_TX_ANT_CONFIG"; 397 case IWN_CMD_BT_COEX: return "IWN_CMD_BT_COEX"; 398 case IWN_CMD_SET_CRITICAL_TEMP: return "IWN_CMD_SET_CRITICAL_TEMP"; 399 case IWN_CMD_SET_SENSITIVITY: return "IWN_CMD_SET_SENSITIVITY"; 400 case IWN_CMD_PHY_CALIB: return "IWN_CMD_PHY_CALIB"; 401 } 402 return "UNKNOWN INTR NOTIF/CMD"; 403} 404#else 405#define DPRINTF(sc, m, fmt, ...) do { (void) sc; } while (0) 406#endif 407 408static device_method_t iwn_methods[] = { 409 /* Device interface */ 410 DEVMETHOD(device_probe, iwn_probe), 411 DEVMETHOD(device_attach, iwn_attach), 412 DEVMETHOD(device_detach, iwn_detach), 413 DEVMETHOD(device_shutdown, iwn_shutdown), 414 DEVMETHOD(device_suspend, iwn_suspend), 415 DEVMETHOD(device_resume, iwn_resume), 416 417 DEVMETHOD_END 418}; 419 420static driver_t iwn_driver = { 421 "iwn", 422 iwn_methods, 423 sizeof(struct iwn_softc) 424}; 425static devclass_t iwn_devclass; 426 427DRIVER_MODULE(iwn, pci, iwn_driver, iwn_devclass, NULL, NULL); 428 429MODULE_VERSION(iwn, 1); 430 431MODULE_DEPEND(iwn, firmware, 1, 1, 1); 432MODULE_DEPEND(iwn, pci, 1, 1, 1); 433MODULE_DEPEND(iwn, wlan, 1, 1, 1); 434 435static int 436iwn_probe(device_t dev) 437{ 438 const struct iwn_ident *ident; 439 440 for (ident = iwn_ident_table; ident->name != NULL; ident++) { 441 if (pci_get_vendor(dev) == ident->vendor && 442 pci_get_device(dev) == ident->device) { 443 device_set_desc(dev, ident->name); 444 return 0; 445 } 446 } 447 return ENXIO; 448} 449 450static int 451iwn_attach(device_t dev) 452{ 453 struct iwn_softc *sc = (struct iwn_softc *)device_get_softc(dev); 454 struct ieee80211com *ic; 455 struct ifnet *ifp; 456 uint32_t reg; 457 int i, error, rid; 458 uint8_t macaddr[IEEE80211_ADDR_LEN]; 459 460 sc->sc_dev = dev; 461 462#ifdef IWN_DEBUG 463 error = resource_int_value(device_get_name(sc->sc_dev), 464 device_get_unit(sc->sc_dev), "debug", &(sc->sc_debug)); 465 if (error != 0) 466 sc->sc_debug = 0; 467#else 468 sc->sc_debug = 0; 469#endif 470 471 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: begin\n",__func__); 472 473 /* 474 * Get the offset of the PCI Express Capability Structure in PCI 475 * Configuration Space. 476 */ 477 error = pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off); 478 if (error != 0) { 479 device_printf(dev, "PCIe capability structure not found!\n"); 480 return error; 481 } 482 483 /* Clear device-specific "PCI retry timeout" register (41h). */ 484 pci_write_config(dev, 0x41, 0, 1); 485 486 /* Hardware bug workaround. */ 487 reg = pci_read_config(dev, PCIR_COMMAND, 2); 488 if (reg & PCIM_CMD_INTxDIS) { 489 DPRINTF(sc, IWN_DEBUG_RESET, "%s: PCIe INTx Disable set\n", 490 __func__); 491 reg &= ~PCIM_CMD_INTxDIS; 492 pci_write_config(dev, PCIR_COMMAND, reg, 2); 493 } 494 495 /* Enable bus-mastering. */ 496 pci_enable_busmaster(dev); 497 498 rid = PCIR_BAR(0); 499 sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 500 RF_ACTIVE); 501 if (sc->mem == NULL) { 502 device_printf(dev, "can't map mem space\n"); 503 error = ENOMEM; 504 return error; 505 } 506 sc->sc_st = rman_get_bustag(sc->mem); 507 sc->sc_sh = rman_get_bushandle(sc->mem); 508 509 i = 1; 510 rid = 0; 511 if (pci_alloc_msi(dev, &i) == 0) 512 rid = 1; 513 /* Install interrupt handler. */ 514 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | 515 (rid != 0 ? 0 : RF_SHAREABLE)); 516 if (sc->irq == NULL) { 517 device_printf(dev, "can't map interrupt\n"); 518 error = ENOMEM; 519 goto fail; 520 } 521 522 IWN_LOCK_INIT(sc); 523 524 /* Read hardware revision and attach. */ 525 sc->hw_type = (IWN_READ(sc, IWN_HW_REV) >> IWN_HW_REV_TYPE_SHIFT) 526 & IWN_HW_REV_TYPE_MASK; 527 sc->subdevice_id = pci_get_subdevice(dev); 528 if (sc->hw_type == IWN_HW_REV_TYPE_4965) 529 error = iwn4965_attach(sc, pci_get_device(dev)); 530 else 531 error = iwn5000_attach(sc, pci_get_device(dev)); 532 if (error != 0) { 533 device_printf(dev, "could not attach device, error %d\n", 534 error); 535 goto fail; 536 } 537 538 if ((error = iwn_hw_prepare(sc)) != 0) { 539 device_printf(dev, "hardware not ready, error %d\n", error); 540 goto fail; 541 } 542 543 /* Allocate DMA memory for firmware transfers. */ 544 if ((error = iwn_alloc_fwmem(sc)) != 0) { 545 device_printf(dev, 546 "could not allocate memory for firmware, error %d\n", 547 error); 548 goto fail; 549 } 550 551 /* Allocate "Keep Warm" page. */ 552 if ((error = iwn_alloc_kw(sc)) != 0) { 553 device_printf(dev, 554 "could not allocate keep warm page, error %d\n", error); 555 goto fail; 556 } 557 558 /* Allocate ICT table for 5000 Series. */ 559 if (sc->hw_type != IWN_HW_REV_TYPE_4965 && 560 (error = iwn_alloc_ict(sc)) != 0) { 561 device_printf(dev, "could not allocate ICT table, error %d\n", 562 error); 563 goto fail; 564 } 565 566 /* Allocate TX scheduler "rings". */ 567 if ((error = iwn_alloc_sched(sc)) != 0) { 568 device_printf(dev, 569 "could not allocate TX scheduler rings, error %d\n", error); 570 goto fail; 571 } 572 573 /* Allocate TX rings (16 on 4965AGN, 20 on >=5000). */ 574 for (i = 0; i < sc->ntxqs; i++) { 575 if ((error = iwn_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) { 576 device_printf(dev, 577 "could not allocate TX ring %d, error %d\n", i, 578 error); 579 goto fail; 580 } 581 } 582 583 /* Allocate RX ring. */ 584 if ((error = iwn_alloc_rx_ring(sc, &sc->rxq)) != 0) { 585 device_printf(dev, "could not allocate RX ring, error %d\n", 586 error); 587 goto fail; 588 } 589 590 /* Clear pending interrupts. */ 591 IWN_WRITE(sc, IWN_INT, 0xffffffff); 592 593 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); 594 if (ifp == NULL) { 595 device_printf(dev, "can not allocate ifnet structure\n"); 596 goto fail; 597 } 598 599 ic = ifp->if_l2com; 600 ic->ic_ifp = ifp; 601 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 602 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 603 604 /* Set device capabilities. */ 605 ic->ic_caps = 606 IEEE80211_C_STA /* station mode supported */ 607 | IEEE80211_C_MONITOR /* monitor mode supported */ 608 | IEEE80211_C_BGSCAN /* background scanning */ 609 | IEEE80211_C_TXPMGT /* tx power management */ 610 | IEEE80211_C_SHSLOT /* short slot time supported */ 611 | IEEE80211_C_WPA 612 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 613#if 0 614 | IEEE80211_C_IBSS /* ibss/adhoc mode */ 615#endif 616 | IEEE80211_C_WME /* WME */ 617 | IEEE80211_C_PMGT /* Station-side power mgmt */ 618 ; 619 620 /* Read MAC address, channels, etc from EEPROM. */ 621 if ((error = iwn_read_eeprom(sc, macaddr)) != 0) { 622 device_printf(dev, "could not read EEPROM, error %d\n", 623 error); 624 goto fail; 625 } 626 627 /* Count the number of available chains. */ 628 sc->ntxchains = 629 ((sc->txchainmask >> 2) & 1) + 630 ((sc->txchainmask >> 1) & 1) + 631 ((sc->txchainmask >> 0) & 1); 632 sc->nrxchains = 633 ((sc->rxchainmask >> 2) & 1) + 634 ((sc->rxchainmask >> 1) & 1) + 635 ((sc->rxchainmask >> 0) & 1); 636 if (bootverbose) { 637 device_printf(dev, "MIMO %dT%dR, %.4s, address %6D\n", 638 sc->ntxchains, sc->nrxchains, sc->eeprom_domain, 639 macaddr, ":"); 640 } 641 642 if (sc->sc_flags & IWN_FLAG_HAS_11N) { 643 ic->ic_rxstream = sc->nrxchains; 644 ic->ic_txstream = sc->ntxchains; 645 646 /* 647 * The NICs we currently support cap out at 2x2 support 648 * separate from the chains being used. 649 * 650 * This is a total hack to work around that until some 651 * per-device method is implemented to return the 652 * actual stream support. 653 */ 654 if (ic->ic_rxstream > 2) 655 ic->ic_rxstream = 2; 656 if (ic->ic_txstream > 2) 657 ic->ic_txstream = 2; 658 659 ic->ic_htcaps = 660 IEEE80211_HTCAP_SMPS_OFF /* SMPS mode disabled */ 661 | IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */ 662 | IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width*/ 663 | IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */ 664#ifdef notyet 665 | IEEE80211_HTCAP_GREENFIELD 666#if IWN_RBUF_SIZE == 8192 667 | IEEE80211_HTCAP_MAXAMSDU_7935 /* max A-MSDU length */ 668#else 669 | IEEE80211_HTCAP_MAXAMSDU_3839 /* max A-MSDU length */ 670#endif 671#endif 672 /* s/w capabilities */ 673 | IEEE80211_HTC_HT /* HT operation */ 674 | IEEE80211_HTC_AMPDU /* tx A-MPDU */ 675#ifdef notyet 676 | IEEE80211_HTC_AMSDU /* tx A-MSDU */ 677#endif 678 ; 679 } 680 681 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 682 ifp->if_softc = sc; 683 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 684 ifp->if_init = iwn_init; 685 ifp->if_ioctl = iwn_ioctl; 686 ifp->if_start = iwn_start; 687 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 688 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 689 IFQ_SET_READY(&ifp->if_snd); 690 691 ieee80211_ifattach(ic, macaddr); 692 ic->ic_vap_create = iwn_vap_create; 693 ic->ic_vap_delete = iwn_vap_delete; 694 ic->ic_raw_xmit = iwn_raw_xmit; 695 ic->ic_node_alloc = iwn_node_alloc; 696 sc->sc_ampdu_rx_start = ic->ic_ampdu_rx_start; 697 ic->ic_ampdu_rx_start = iwn_ampdu_rx_start; 698 sc->sc_ampdu_rx_stop = ic->ic_ampdu_rx_stop; 699 ic->ic_ampdu_rx_stop = iwn_ampdu_rx_stop; 700 sc->sc_addba_request = ic->ic_addba_request; 701 ic->ic_addba_request = iwn_addba_request; 702 sc->sc_addba_response = ic->ic_addba_response; 703 ic->ic_addba_response = iwn_addba_response; 704 sc->sc_addba_stop = ic->ic_addba_stop; 705 ic->ic_addba_stop = iwn_ampdu_tx_stop; 706 ic->ic_newassoc = iwn_newassoc; 707 ic->ic_wme.wme_update = iwn_updateedca; 708 ic->ic_update_mcast = iwn_update_mcast; 709 ic->ic_scan_start = iwn_scan_start; 710 ic->ic_scan_end = iwn_scan_end; 711 ic->ic_set_channel = iwn_set_channel; 712 ic->ic_scan_curchan = iwn_scan_curchan; 713 ic->ic_scan_mindwell = iwn_scan_mindwell; 714 ic->ic_setregdomain = iwn_setregdomain; 715 716 iwn_radiotap_attach(sc); 717 718 callout_init_mtx(&sc->calib_to, &sc->sc_mtx, 0); 719 callout_init_mtx(&sc->watchdog_to, &sc->sc_mtx, 0); 720 TASK_INIT(&sc->sc_reinit_task, 0, iwn_hw_reset, sc); 721 TASK_INIT(&sc->sc_radioon_task, 0, iwn_radio_on, sc); 722 TASK_INIT(&sc->sc_radiooff_task, 0, iwn_radio_off, sc); 723 724 iwn_sysctlattach(sc); 725 726 /* 727 * Hook our interrupt after all initialization is complete. 728 */ 729 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, 730 NULL, iwn_intr, sc, &sc->sc_ih); 731 if (error != 0) { 732 device_printf(dev, "can't establish interrupt, error %d\n", 733 error); 734 goto fail; 735 } 736 737 if (bootverbose) 738 ieee80211_announce(ic); 739 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 740 return 0; 741fail: 742 iwn_detach(dev); 743 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__); 744 return error; 745} 746 747static int 748iwn4965_attach(struct iwn_softc *sc, uint16_t pid) 749{ 750 struct iwn_ops *ops = &sc->ops; 751 752 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 753 ops->load_firmware = iwn4965_load_firmware; 754 ops->read_eeprom = iwn4965_read_eeprom; 755 ops->post_alive = iwn4965_post_alive; 756 ops->nic_config = iwn4965_nic_config; 757 ops->update_sched = iwn4965_update_sched; 758 ops->get_temperature = iwn4965_get_temperature; 759 ops->get_rssi = iwn4965_get_rssi; 760 ops->set_txpower = iwn4965_set_txpower; 761 ops->init_gains = iwn4965_init_gains; 762 ops->set_gains = iwn4965_set_gains; 763 ops->add_node = iwn4965_add_node; 764 ops->tx_done = iwn4965_tx_done; 765 ops->ampdu_tx_start = iwn4965_ampdu_tx_start; 766 ops->ampdu_tx_stop = iwn4965_ampdu_tx_stop; 767 sc->ntxqs = IWN4965_NTXQUEUES; 768 sc->firstaggqueue = IWN4965_FIRSTAGGQUEUE; 769 sc->ndmachnls = IWN4965_NDMACHNLS; 770 sc->broadcast_id = IWN4965_ID_BROADCAST; 771 sc->rxonsz = IWN4965_RXONSZ; 772 sc->schedsz = IWN4965_SCHEDSZ; 773 sc->fw_text_maxsz = IWN4965_FW_TEXT_MAXSZ; 774 sc->fw_data_maxsz = IWN4965_FW_DATA_MAXSZ; 775 sc->fwsz = IWN4965_FWSZ; 776 sc->sched_txfact_addr = IWN4965_SCHED_TXFACT; 777 sc->limits = &iwn4965_sensitivity_limits; 778 sc->fwname = "iwn4965fw"; 779 /* Override chains masks, ROM is known to be broken. */ 780 sc->txchainmask = IWN_ANT_AB; 781 sc->rxchainmask = IWN_ANT_ABC; 782 783 DPRINTF(sc, IWN_DEBUG_TRACE, "%s: end\n",__func__); 784 785 return 0; 786} 787 788static int 789iwn5000_attach(struct iwn_softc *sc, uint16_t pid) 790{ 791 struct iwn_ops *ops = &sc->ops; 792 793 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 794 795 ops->load_firmware = iwn5000_load_firmware; 796 ops->read_eeprom = iwn5000_read_eeprom; 797 ops->post_alive = iwn5000_post_alive; 798 ops->nic_config = iwn5000_nic_config; 799 ops->update_sched = iwn5000_update_sched; 800 ops->get_temperature = iwn5000_get_temperature; 801 ops->get_rssi = iwn5000_get_rssi; 802 ops->set_txpower = iwn5000_set_txpower; 803 ops->init_gains = iwn5000_init_gains; 804 ops->set_gains = iwn5000_set_gains; 805 ops->add_node = iwn5000_add_node; 806 ops->tx_done = iwn5000_tx_done; 807 ops->ampdu_tx_start = iwn5000_ampdu_tx_start; 808 ops->ampdu_tx_stop = iwn5000_ampdu_tx_stop; 809 sc->ntxqs = IWN5000_NTXQUEUES; 810 sc->firstaggqueue = IWN5000_FIRSTAGGQUEUE; 811 sc->ndmachnls = IWN5000_NDMACHNLS; 812 sc->broadcast_id = IWN5000_ID_BROADCAST; 813 sc->rxonsz = IWN5000_RXONSZ; 814 sc->schedsz = IWN5000_SCHEDSZ; 815 sc->fw_text_maxsz = IWN5000_FW_TEXT_MAXSZ; 816 sc->fw_data_maxsz = IWN5000_FW_DATA_MAXSZ; 817 sc->fwsz = IWN5000_FWSZ; 818 sc->sched_txfact_addr = IWN5000_SCHED_TXFACT; 819 sc->reset_noise_gain = IWN5000_PHY_CALIB_RESET_NOISE_GAIN; 820 sc->noise_gain = IWN5000_PHY_CALIB_NOISE_GAIN; 821 822 switch (sc->hw_type) { 823 case IWN_HW_REV_TYPE_5100: 824 sc->limits = &iwn5000_sensitivity_limits; 825 sc->fwname = "iwn5000fw"; 826 /* Override chains masks, ROM is known to be broken. */ 827 sc->txchainmask = IWN_ANT_B; 828 sc->rxchainmask = IWN_ANT_AB; 829 break; 830 case IWN_HW_REV_TYPE_5150: 831 sc->limits = &iwn5150_sensitivity_limits; 832 sc->fwname = "iwn5150fw"; 833 break; 834 case IWN_HW_REV_TYPE_5300: 835 case IWN_HW_REV_TYPE_5350: 836 sc->limits = &iwn5000_sensitivity_limits; 837 sc->fwname = "iwn5000fw"; 838 break; 839 case IWN_HW_REV_TYPE_1000: 840 sc->limits = &iwn1000_sensitivity_limits; 841 sc->fwname = "iwn1000fw"; 842 break; 843 case IWN_HW_REV_TYPE_6000: 844 sc->limits = &iwn6000_sensitivity_limits; 845 sc->fwname = "iwn6000fw"; 846 if (pid == 0x422c || pid == 0x4239) { 847 sc->sc_flags |= IWN_FLAG_INTERNAL_PA; 848 /* Override chains masks, ROM is known to be broken. */ 849 sc->txchainmask = IWN_ANT_BC; 850 sc->rxchainmask = IWN_ANT_BC; 851 } 852 break; 853 case IWN_HW_REV_TYPE_6050: 854 sc->limits = &iwn6000_sensitivity_limits; 855 sc->fwname = "iwn6050fw"; 856 /* Override chains masks, ROM is known to be broken. */ 857 sc->txchainmask = IWN_ANT_AB; 858 sc->rxchainmask = IWN_ANT_AB; 859 break; 860 case IWN_HW_REV_TYPE_6005: 861 sc->limits = &iwn6000_sensitivity_limits; 862 if (pid != 0x0082 && pid != 0x0085) { 863 sc->fwname = "iwn6000g2bfw"; 864 sc->sc_flags |= IWN_FLAG_ADV_BTCOEX; 865 } else 866 sc->fwname = "iwn6000g2afw"; 867 break; 868 default: 869 device_printf(sc->sc_dev, "adapter type %d not supported\n", 870 sc->hw_type); 871 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__); 872 return ENOTSUP; 873 } 874 return 0; 875} 876 877/* 878 * Attach the interface to 802.11 radiotap. 879 */ 880static void 881iwn_radiotap_attach(struct iwn_softc *sc) 882{ 883 struct ifnet *ifp = sc->sc_ifp; 884 struct ieee80211com *ic = ifp->if_l2com; 885 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 886 ieee80211_radiotap_attach(ic, 887 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), 888 IWN_TX_RADIOTAP_PRESENT, 889 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), 890 IWN_RX_RADIOTAP_PRESENT); 891 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 892} 893 894static void 895iwn_sysctlattach(struct iwn_softc *sc) 896{ 897#ifdef IWN_DEBUG 898 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); 899 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); 900 901 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 902 "debug", CTLFLAG_RW, &sc->sc_debug, sc->sc_debug, 903 "control debugging printfs"); 904#endif 905} 906 907static struct ieee80211vap * 908iwn_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 909 enum ieee80211_opmode opmode, int flags, 910 const uint8_t bssid[IEEE80211_ADDR_LEN], 911 const uint8_t mac[IEEE80211_ADDR_LEN]) 912{ 913 struct iwn_vap *ivp; 914 struct ieee80211vap *vap; 915 uint8_t mac1[IEEE80211_ADDR_LEN]; 916 struct iwn_softc *sc = ic->ic_ifp->if_softc; 917 918 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ 919 return NULL; 920 921 IEEE80211_ADDR_COPY(mac1, mac); 922 923 ivp = (struct iwn_vap *) malloc(sizeof(struct iwn_vap), 924 M_80211_VAP, M_NOWAIT | M_ZERO); 925 if (ivp == NULL) 926 return NULL; 927 vap = &ivp->iv_vap; 928 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac1); 929 ivp->ctx = IWN_RXON_BSS_CTX; 930 IEEE80211_ADDR_COPY(ivp->macaddr, mac1); 931 vap->iv_bmissthreshold = 10; /* override default */ 932 /* Override with driver methods. */ 933 ivp->iv_newstate = vap->iv_newstate; 934 vap->iv_newstate = iwn_newstate; 935 sc->ivap[IWN_RXON_BSS_CTX] = vap; 936 937 ieee80211_ratectl_init(vap); 938 /* Complete setup. */ 939 ieee80211_vap_attach(vap, iwn_media_change, ieee80211_media_status); 940 ic->ic_opmode = opmode; 941 return vap; 942} 943 944static void 945iwn_vap_delete(struct ieee80211vap *vap) 946{ 947 struct iwn_vap *ivp = IWN_VAP(vap); 948 949 ieee80211_ratectl_deinit(vap); 950 ieee80211_vap_detach(vap); 951 free(ivp, M_80211_VAP); 952} 953 954static int 955iwn_detach(device_t dev) 956{ 957 struct iwn_softc *sc = device_get_softc(dev); 958 struct ifnet *ifp = sc->sc_ifp; 959 struct ieee80211com *ic; 960 int qid; 961 962 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 963 964 if (ifp != NULL) { 965 ic = ifp->if_l2com; 966 967 ieee80211_draintask(ic, &sc->sc_reinit_task); 968 ieee80211_draintask(ic, &sc->sc_radioon_task); 969 ieee80211_draintask(ic, &sc->sc_radiooff_task); 970 971 iwn_stop(sc); 972 callout_drain(&sc->watchdog_to); 973 callout_drain(&sc->calib_to); 974 ieee80211_ifdetach(ic); 975 } 976 977 /* Uninstall interrupt handler. */ 978 if (sc->irq != NULL) { 979 bus_teardown_intr(dev, sc->irq, sc->sc_ih); 980 bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq), 981 sc->irq); 982 pci_release_msi(dev); 983 } 984 985 /* Free DMA resources. */ 986 iwn_free_rx_ring(sc, &sc->rxq); 987 for (qid = 0; qid < sc->ntxqs; qid++) 988 iwn_free_tx_ring(sc, &sc->txq[qid]); 989 iwn_free_sched(sc); 990 iwn_free_kw(sc); 991 if (sc->ict != NULL) 992 iwn_free_ict(sc); 993 iwn_free_fwmem(sc); 994 995 if (sc->mem != NULL) 996 bus_release_resource(dev, SYS_RES_MEMORY, 997 rman_get_rid(sc->mem), sc->mem); 998 999 if (ifp != NULL) 1000 if_free(ifp); 1001 1002 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n", __func__); 1003 IWN_LOCK_DESTROY(sc); 1004 return 0; 1005} 1006 1007static int 1008iwn_shutdown(device_t dev) 1009{ 1010 struct iwn_softc *sc = device_get_softc(dev); 1011 1012 iwn_stop(sc); 1013 return 0; 1014} 1015 1016static int 1017iwn_suspend(device_t dev) 1018{ 1019 struct iwn_softc *sc = device_get_softc(dev); 1020 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 1021 1022 ieee80211_suspend_all(ic); 1023 return 0; 1024} 1025 1026static int 1027iwn_resume(device_t dev) 1028{ 1029 struct iwn_softc *sc = device_get_softc(dev); 1030 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 1031 1032 /* Clear device-specific "PCI retry timeout" register (41h). */ 1033 pci_write_config(dev, 0x41, 0, 1); 1034 1035 ieee80211_resume_all(ic); 1036 return 0; 1037} 1038 1039static int 1040iwn_nic_lock(struct iwn_softc *sc) 1041{ 1042 int ntries; 1043 1044 /* Request exclusive access to NIC. */ 1045 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); 1046 1047 /* Spin until we actually get the lock. */ 1048 for (ntries = 0; ntries < 1000; ntries++) { 1049 if ((IWN_READ(sc, IWN_GP_CNTRL) & 1050 (IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) == 1051 IWN_GP_CNTRL_MAC_ACCESS_ENA) 1052 return 0; 1053 DELAY(10); 1054 } 1055 return ETIMEDOUT; 1056} 1057 1058static __inline void 1059iwn_nic_unlock(struct iwn_softc *sc) 1060{ 1061 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); 1062} 1063 1064static __inline uint32_t 1065iwn_prph_read(struct iwn_softc *sc, uint32_t addr) 1066{ 1067 IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr); 1068 IWN_BARRIER_READ_WRITE(sc); 1069 return IWN_READ(sc, IWN_PRPH_RDATA); 1070} 1071 1072static __inline void 1073iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) 1074{ 1075 IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr); 1076 IWN_BARRIER_WRITE(sc); 1077 IWN_WRITE(sc, IWN_PRPH_WDATA, data); 1078} 1079 1080static __inline void 1081iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) 1082{ 1083 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask); 1084} 1085 1086static __inline void 1087iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) 1088{ 1089 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask); 1090} 1091 1092static __inline void 1093iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr, 1094 const uint32_t *data, int count) 1095{ 1096 for (; count > 0; count--, data++, addr += 4) 1097 iwn_prph_write(sc, addr, *data); 1098} 1099 1100static __inline uint32_t 1101iwn_mem_read(struct iwn_softc *sc, uint32_t addr) 1102{ 1103 IWN_WRITE(sc, IWN_MEM_RADDR, addr); 1104 IWN_BARRIER_READ_WRITE(sc); 1105 return IWN_READ(sc, IWN_MEM_RDATA); 1106} 1107 1108static __inline void 1109iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) 1110{ 1111 IWN_WRITE(sc, IWN_MEM_WADDR, addr); 1112 IWN_BARRIER_WRITE(sc); 1113 IWN_WRITE(sc, IWN_MEM_WDATA, data); 1114} 1115 1116static __inline void 1117iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data) 1118{ 1119 uint32_t tmp; 1120 1121 tmp = iwn_mem_read(sc, addr & ~3); 1122 if (addr & 3) 1123 tmp = (tmp & 0x0000ffff) | data << 16; 1124 else 1125 tmp = (tmp & 0xffff0000) | data; 1126 iwn_mem_write(sc, addr & ~3, tmp); 1127} 1128 1129static __inline void 1130iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data, 1131 int count) 1132{ 1133 for (; count > 0; count--, addr += 4) 1134 *data++ = iwn_mem_read(sc, addr); 1135} 1136 1137static __inline void 1138iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val, 1139 int count) 1140{ 1141 for (; count > 0; count--, addr += 4) 1142 iwn_mem_write(sc, addr, val); 1143} 1144 1145static int 1146iwn_eeprom_lock(struct iwn_softc *sc) 1147{ 1148 int i, ntries; 1149 1150 for (i = 0; i < 100; i++) { 1151 /* Request exclusive access to EEPROM. */ 1152 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 1153 IWN_HW_IF_CONFIG_EEPROM_LOCKED); 1154 1155 /* Spin until we actually get the lock. */ 1156 for (ntries = 0; ntries < 100; ntries++) { 1157 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 1158 IWN_HW_IF_CONFIG_EEPROM_LOCKED) 1159 return 0; 1160 DELAY(10); 1161 } 1162 } 1163 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end timeout\n", __func__); 1164 return ETIMEDOUT; 1165} 1166 1167static __inline void 1168iwn_eeprom_unlock(struct iwn_softc *sc) 1169{ 1170 IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED); 1171} 1172 1173/* 1174 * Initialize access by host to One Time Programmable ROM. 1175 * NB: This kind of ROM can be found on 1000 or 6000 Series only. 1176 */ 1177static int 1178iwn_init_otprom(struct iwn_softc *sc) 1179{ 1180 uint16_t prev, base, next; 1181 int count, error; 1182 1183 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1184 1185 /* Wait for clock stabilization before accessing prph. */ 1186 if ((error = iwn_clock_wait(sc)) != 0) 1187 return error; 1188 1189 if ((error = iwn_nic_lock(sc)) != 0) 1190 return error; 1191 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); 1192 DELAY(5); 1193 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); 1194 iwn_nic_unlock(sc); 1195 1196 /* Set auto clock gate disable bit for HW with OTP shadow RAM. */ 1197 if (sc->hw_type != IWN_HW_REV_TYPE_1000) { 1198 IWN_SETBITS(sc, IWN_DBG_LINK_PWR_MGMT, 1199 IWN_RESET_LINK_PWR_MGMT_DIS); 1200 } 1201 IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER); 1202 /* Clear ECC status. */ 1203 IWN_SETBITS(sc, IWN_OTP_GP, 1204 IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS); 1205 1206 /* 1207 * Find the block before last block (contains the EEPROM image) 1208 * for HW without OTP shadow RAM. 1209 */ 1210 if (sc->hw_type == IWN_HW_REV_TYPE_1000) { 1211 /* Switch to absolute addressing mode. */ 1212 IWN_CLRBITS(sc, IWN_OTP_GP, IWN_OTP_GP_RELATIVE_ACCESS); 1213 base = prev = 0; 1214 for (count = 0; count < IWN1000_OTP_NBLOCKS; count++) { 1215 error = iwn_read_prom_data(sc, base, &next, 2); 1216 if (error != 0) 1217 return error; 1218 if (next == 0) /* End of linked-list. */ 1219 break; 1220 prev = base; 1221 base = le16toh(next); 1222 } 1223 if (count == 0 || count == IWN1000_OTP_NBLOCKS) 1224 return EIO; 1225 /* Skip "next" word. */ 1226 sc->prom_base = prev + 1; 1227 } 1228 1229 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 1230 1231 return 0; 1232} 1233 1234static int 1235iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count) 1236{ 1237 uint8_t *out = data; 1238 uint32_t val, tmp; 1239 int ntries; 1240 1241 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1242 1243 addr += sc->prom_base; 1244 for (; count > 0; count -= 2, addr++) { 1245 IWN_WRITE(sc, IWN_EEPROM, addr << 2); 1246 for (ntries = 0; ntries < 10; ntries++) { 1247 val = IWN_READ(sc, IWN_EEPROM); 1248 if (val & IWN_EEPROM_READ_VALID) 1249 break; 1250 DELAY(5); 1251 } 1252 if (ntries == 10) { 1253 device_printf(sc->sc_dev, 1254 "timeout reading ROM at 0x%x\n", addr); 1255 return ETIMEDOUT; 1256 } 1257 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { 1258 /* OTPROM, check for ECC errors. */ 1259 tmp = IWN_READ(sc, IWN_OTP_GP); 1260 if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) { 1261 device_printf(sc->sc_dev, 1262 "OTPROM ECC error at 0x%x\n", addr); 1263 return EIO; 1264 } 1265 if (tmp & IWN_OTP_GP_ECC_CORR_STTS) { 1266 /* Correctable ECC error, clear bit. */ 1267 IWN_SETBITS(sc, IWN_OTP_GP, 1268 IWN_OTP_GP_ECC_CORR_STTS); 1269 } 1270 } 1271 *out++ = val >> 16; 1272 if (count > 1) 1273 *out++ = val >> 24; 1274 } 1275 1276 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 1277 1278 return 0; 1279} 1280 1281static void 1282iwn_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1283{ 1284 if (error != 0) 1285 return; 1286 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs)); 1287 *(bus_addr_t *)arg = segs[0].ds_addr; 1288} 1289 1290static int 1291iwn_dma_contig_alloc(struct iwn_softc *sc, struct iwn_dma_info *dma, 1292 void **kvap, bus_size_t size, bus_size_t alignment) 1293{ 1294 int error; 1295 1296 dma->tag = NULL; 1297 dma->size = size; 1298 1299 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment, 1300 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 1301 1, size, BUS_DMA_NOWAIT, NULL, NULL, &dma->tag); 1302 if (error != 0) 1303 goto fail; 1304 1305 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, 1306 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map); 1307 if (error != 0) 1308 goto fail; 1309 1310 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, 1311 iwn_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT); 1312 if (error != 0) 1313 goto fail; 1314 1315 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 1316 1317 if (kvap != NULL) 1318 *kvap = dma->vaddr; 1319 1320 return 0; 1321 1322fail: iwn_dma_contig_free(dma); 1323 return error; 1324} 1325 1326static void 1327iwn_dma_contig_free(struct iwn_dma_info *dma) 1328{ 1329 if (dma->map != NULL) { 1330 if (dma->vaddr != NULL) { 1331 bus_dmamap_sync(dma->tag, dma->map, 1332 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1333 bus_dmamap_unload(dma->tag, dma->map); 1334 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 1335 dma->vaddr = NULL; 1336 } 1337 bus_dmamap_destroy(dma->tag, dma->map); 1338 dma->map = NULL; 1339 } 1340 if (dma->tag != NULL) { 1341 bus_dma_tag_destroy(dma->tag); 1342 dma->tag = NULL; 1343 } 1344} 1345 1346static int 1347iwn_alloc_sched(struct iwn_softc *sc) 1348{ 1349 /* TX scheduler rings must be aligned on a 1KB boundary. */ 1350 return iwn_dma_contig_alloc(sc, &sc->sched_dma, (void **)&sc->sched, 1351 sc->schedsz, 1024); 1352} 1353 1354static void 1355iwn_free_sched(struct iwn_softc *sc) 1356{ 1357 iwn_dma_contig_free(&sc->sched_dma); 1358} 1359 1360static int 1361iwn_alloc_kw(struct iwn_softc *sc) 1362{ 1363 /* "Keep Warm" page must be aligned on a 4KB boundary. */ 1364 return iwn_dma_contig_alloc(sc, &sc->kw_dma, NULL, 4096, 4096); 1365} 1366 1367static void 1368iwn_free_kw(struct iwn_softc *sc) 1369{ 1370 iwn_dma_contig_free(&sc->kw_dma); 1371} 1372 1373static int 1374iwn_alloc_ict(struct iwn_softc *sc) 1375{ 1376 /* ICT table must be aligned on a 4KB boundary. */ 1377 return iwn_dma_contig_alloc(sc, &sc->ict_dma, (void **)&sc->ict, 1378 IWN_ICT_SIZE, 4096); 1379} 1380 1381static void 1382iwn_free_ict(struct iwn_softc *sc) 1383{ 1384 iwn_dma_contig_free(&sc->ict_dma); 1385} 1386 1387static int 1388iwn_alloc_fwmem(struct iwn_softc *sc) 1389{ 1390 /* Must be aligned on a 16-byte boundary. */ 1391 return iwn_dma_contig_alloc(sc, &sc->fw_dma, NULL, sc->fwsz, 16); 1392} 1393 1394static void 1395iwn_free_fwmem(struct iwn_softc *sc) 1396{ 1397 iwn_dma_contig_free(&sc->fw_dma); 1398} 1399 1400static int 1401iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1402{ 1403 bus_size_t size; 1404 int i, error; 1405 1406 ring->cur = 0; 1407 1408 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1409 1410 /* Allocate RX descriptors (256-byte aligned). */ 1411 size = IWN_RX_RING_COUNT * sizeof (uint32_t); 1412 error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, 1413 size, 256); 1414 if (error != 0) { 1415 device_printf(sc->sc_dev, 1416 "%s: could not allocate RX ring DMA memory, error %d\n", 1417 __func__, error); 1418 goto fail; 1419 } 1420 1421 /* Allocate RX status area (16-byte aligned). */ 1422 error = iwn_dma_contig_alloc(sc, &ring->stat_dma, (void **)&ring->stat, 1423 sizeof (struct iwn_rx_status), 16); 1424 if (error != 0) { 1425 device_printf(sc->sc_dev, 1426 "%s: could not allocate RX status DMA memory, error %d\n", 1427 __func__, error); 1428 goto fail; 1429 } 1430 1431 /* Create RX buffer DMA tag. */ 1432 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1433 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1434 IWN_RBUF_SIZE, 1, IWN_RBUF_SIZE, BUS_DMA_NOWAIT, NULL, NULL, 1435 &ring->data_dmat); 1436 if (error != 0) { 1437 device_printf(sc->sc_dev, 1438 "%s: could not create RX buf DMA tag, error %d\n", 1439 __func__, error); 1440 goto fail; 1441 } 1442 1443 /* 1444 * Allocate and map RX buffers. 1445 */ 1446 for (i = 0; i < IWN_RX_RING_COUNT; i++) { 1447 struct iwn_rx_data *data = &ring->data[i]; 1448 bus_addr_t paddr; 1449 1450 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1451 if (error != 0) { 1452 device_printf(sc->sc_dev, 1453 "%s: could not create RX buf DMA map, error %d\n", 1454 __func__, error); 1455 goto fail; 1456 } 1457 1458 data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, 1459 IWN_RBUF_SIZE); 1460 if (data->m == NULL) { 1461 device_printf(sc->sc_dev, 1462 "%s: could not allocate RX mbuf\n", __func__); 1463 error = ENOBUFS; 1464 goto fail; 1465 } 1466 1467 error = bus_dmamap_load(ring->data_dmat, data->map, 1468 mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr, 1469 &paddr, BUS_DMA_NOWAIT); 1470 if (error != 0 && error != EFBIG) { 1471 device_printf(sc->sc_dev, 1472 "%s: can't not map mbuf, error %d\n", __func__, 1473 error); 1474 goto fail; 1475 } 1476 1477 /* Set physical address of RX buffer (256-byte aligned). */ 1478 ring->desc[i] = htole32(paddr >> 8); 1479 } 1480 1481 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1482 BUS_DMASYNC_PREWRITE); 1483 1484 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 1485 1486 return 0; 1487 1488fail: iwn_free_rx_ring(sc, ring); 1489 1490 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__); 1491 1492 return error; 1493} 1494 1495static void 1496iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1497{ 1498 int ntries; 1499 1500 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 1501 1502 if (iwn_nic_lock(sc) == 0) { 1503 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); 1504 for (ntries = 0; ntries < 1000; ntries++) { 1505 if (IWN_READ(sc, IWN_FH_RX_STATUS) & 1506 IWN_FH_RX_STATUS_IDLE) 1507 break; 1508 DELAY(10); 1509 } 1510 iwn_nic_unlock(sc); 1511 } 1512 ring->cur = 0; 1513 sc->last_rx_valid = 0; 1514} 1515 1516static void 1517iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1518{ 1519 int i; 1520 1521 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s \n", __func__); 1522 1523 iwn_dma_contig_free(&ring->desc_dma); 1524 iwn_dma_contig_free(&ring->stat_dma); 1525 1526 for (i = 0; i < IWN_RX_RING_COUNT; i++) { 1527 struct iwn_rx_data *data = &ring->data[i]; 1528 1529 if (data->m != NULL) { 1530 bus_dmamap_sync(ring->data_dmat, data->map, 1531 BUS_DMASYNC_POSTREAD); 1532 bus_dmamap_unload(ring->data_dmat, data->map); 1533 m_freem(data->m); 1534 data->m = NULL; 1535 } 1536 if (data->map != NULL) 1537 bus_dmamap_destroy(ring->data_dmat, data->map); 1538 } 1539 if (ring->data_dmat != NULL) { 1540 bus_dma_tag_destroy(ring->data_dmat); 1541 ring->data_dmat = NULL; 1542 } 1543} 1544 1545static int 1546iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid) 1547{ 1548 bus_addr_t paddr; 1549 bus_size_t size; 1550 int i, error; 1551 1552 ring->qid = qid; 1553 ring->queued = 0; 1554 ring->cur = 0; 1555 1556 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1557 1558 /* Allocate TX descriptors (256-byte aligned). */ 1559 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_desc); 1560 error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, 1561 size, 256); 1562 if (error != 0) { 1563 device_printf(sc->sc_dev, 1564 "%s: could not allocate TX ring DMA memory, error %d\n", 1565 __func__, error); 1566 goto fail; 1567 } 1568 1569 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_cmd); 1570 error = iwn_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd, 1571 size, 4); 1572 if (error != 0) { 1573 device_printf(sc->sc_dev, 1574 "%s: could not allocate TX cmd DMA memory, error %d\n", 1575 __func__, error); 1576 goto fail; 1577 } 1578 1579 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1580 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1581 IWN_MAX_SCATTER - 1, MCLBYTES, BUS_DMA_NOWAIT, NULL, NULL, 1582 &ring->data_dmat); 1583 if (error != 0) { 1584 device_printf(sc->sc_dev, 1585 "%s: could not create TX buf DMA tag, error %d\n", 1586 __func__, error); 1587 goto fail; 1588 } 1589 1590 paddr = ring->cmd_dma.paddr; 1591 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1592 struct iwn_tx_data *data = &ring->data[i]; 1593 1594 data->cmd_paddr = paddr; 1595 data->scratch_paddr = paddr + 12; 1596 paddr += sizeof (struct iwn_tx_cmd); 1597 1598 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1599 if (error != 0) { 1600 device_printf(sc->sc_dev, 1601 "%s: could not create TX buf DMA map, error %d\n", 1602 __func__, error); 1603 goto fail; 1604 } 1605 } 1606 1607 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 1608 1609 return 0; 1610 1611fail: iwn_free_tx_ring(sc, ring); 1612 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__); 1613 return error; 1614} 1615 1616static void 1617iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) 1618{ 1619 int i; 1620 1621 DPRINTF(sc, IWN_DEBUG_TRACE, "->doing %s \n", __func__); 1622 1623 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1624 struct iwn_tx_data *data = &ring->data[i]; 1625 1626 if (data->m != NULL) { 1627 bus_dmamap_sync(ring->data_dmat, data->map, 1628 BUS_DMASYNC_POSTWRITE); 1629 bus_dmamap_unload(ring->data_dmat, data->map); 1630 m_freem(data->m); 1631 data->m = NULL; 1632 } 1633 } 1634 /* Clear TX descriptors. */ 1635 memset(ring->desc, 0, ring->desc_dma.size); 1636 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1637 BUS_DMASYNC_PREWRITE); 1638 sc->qfullmsk &= ~(1 << ring->qid); 1639 ring->queued = 0; 1640 ring->cur = 0; 1641} 1642 1643static void 1644iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) 1645{ 1646 int i; 1647 1648 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s \n", __func__); 1649 1650 iwn_dma_contig_free(&ring->desc_dma); 1651 iwn_dma_contig_free(&ring->cmd_dma); 1652 1653 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 1654 struct iwn_tx_data *data = &ring->data[i]; 1655 1656 if (data->m != NULL) { 1657 bus_dmamap_sync(ring->data_dmat, data->map, 1658 BUS_DMASYNC_POSTWRITE); 1659 bus_dmamap_unload(ring->data_dmat, data->map); 1660 m_freem(data->m); 1661 } 1662 if (data->map != NULL) 1663 bus_dmamap_destroy(ring->data_dmat, data->map); 1664 } 1665 if (ring->data_dmat != NULL) { 1666 bus_dma_tag_destroy(ring->data_dmat); 1667 ring->data_dmat = NULL; 1668 } 1669} 1670 1671static void 1672iwn5000_ict_reset(struct iwn_softc *sc) 1673{ 1674 /* Disable interrupts. */ 1675 IWN_WRITE(sc, IWN_INT_MASK, 0); 1676 1677 /* Reset ICT table. */ 1678 memset(sc->ict, 0, IWN_ICT_SIZE); 1679 sc->ict_cur = 0; 1680 1681 /* Set physical address of ICT table (4KB aligned). */ 1682 DPRINTF(sc, IWN_DEBUG_RESET, "%s: enabling ICT\n", __func__); 1683 IWN_WRITE(sc, IWN_DRAM_INT_TBL, IWN_DRAM_INT_TBL_ENABLE | 1684 IWN_DRAM_INT_TBL_WRAP_CHECK | sc->ict_dma.paddr >> 12); 1685 1686 /* Enable periodic RX interrupt. */ 1687 sc->int_mask |= IWN_INT_RX_PERIODIC; 1688 /* Switch to ICT interrupt mode in driver. */ 1689 sc->sc_flags |= IWN_FLAG_USE_ICT; 1690 1691 /* Re-enable interrupts. */ 1692 IWN_WRITE(sc, IWN_INT, 0xffffffff); 1693 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 1694} 1695 1696static int 1697iwn_read_eeprom(struct iwn_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN]) 1698{ 1699 struct iwn_ops *ops = &sc->ops; 1700 uint16_t val; 1701 int error; 1702 1703 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1704 1705 /* Check whether adapter has an EEPROM or an OTPROM. */ 1706 if (sc->hw_type >= IWN_HW_REV_TYPE_1000 && 1707 (IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP)) 1708 sc->sc_flags |= IWN_FLAG_HAS_OTPROM; 1709 DPRINTF(sc, IWN_DEBUG_RESET, "%s found\n", 1710 (sc->sc_flags & IWN_FLAG_HAS_OTPROM) ? "OTPROM" : "EEPROM"); 1711 1712 /* Adapter has to be powered on for EEPROM access to work. */ 1713 if ((error = iwn_apm_init(sc)) != 0) { 1714 device_printf(sc->sc_dev, 1715 "%s: could not power ON adapter, error %d\n", __func__, 1716 error); 1717 return error; 1718 } 1719 1720 if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) { 1721 device_printf(sc->sc_dev, "%s: bad ROM signature\n", __func__); 1722 return EIO; 1723 } 1724 if ((error = iwn_eeprom_lock(sc)) != 0) { 1725 device_printf(sc->sc_dev, "%s: could not lock ROM, error %d\n", 1726 __func__, error); 1727 return error; 1728 } 1729 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { 1730 if ((error = iwn_init_otprom(sc)) != 0) { 1731 device_printf(sc->sc_dev, 1732 "%s: could not initialize OTPROM, error %d\n", 1733 __func__, error); 1734 return error; 1735 } 1736 } 1737 1738 iwn_read_prom_data(sc, IWN_EEPROM_SKU_CAP, &val, 2); 1739 DPRINTF(sc, IWN_DEBUG_RESET, "SKU capabilities=0x%04x\n", le16toh(val)); 1740 /* Check if HT support is bonded out. */ 1741 if (val & htole16(IWN_EEPROM_SKU_CAP_11N)) 1742 sc->sc_flags |= IWN_FLAG_HAS_11N; 1743 1744 iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2); 1745 sc->rfcfg = le16toh(val); 1746 DPRINTF(sc, IWN_DEBUG_RESET, "radio config=0x%04x\n", sc->rfcfg); 1747 /* Read Tx/Rx chains from ROM unless it's known to be broken. */ 1748 if (sc->txchainmask == 0) 1749 sc->txchainmask = IWN_RFCFG_TXANTMSK(sc->rfcfg); 1750 if (sc->rxchainmask == 0) 1751 sc->rxchainmask = IWN_RFCFG_RXANTMSK(sc->rfcfg); 1752 1753 /* Read MAC address. */ 1754 iwn_read_prom_data(sc, IWN_EEPROM_MAC, macaddr, 6); 1755 1756 /* Read adapter-specific information from EEPROM. */ 1757 ops->read_eeprom(sc); 1758 1759 iwn_apm_stop(sc); /* Power OFF adapter. */ 1760 1761 iwn_eeprom_unlock(sc); 1762 1763 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 1764 1765 return 0; 1766} 1767 1768static void 1769iwn4965_read_eeprom(struct iwn_softc *sc) 1770{ 1771 uint32_t addr; 1772 uint16_t val; 1773 int i; 1774 1775 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1776 1777 /* Read regulatory domain (4 ASCII characters). */ 1778 iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4); 1779 1780 /* Read the list of authorized channels (20MHz ones only). */ 1781 for (i = 0; i < 7; i++) { 1782 addr = iwn4965_regulatory_bands[i]; 1783 iwn_read_eeprom_channels(sc, i, addr); 1784 } 1785 1786 /* Read maximum allowed TX power for 2GHz and 5GHz bands. */ 1787 iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2); 1788 sc->maxpwr2GHz = val & 0xff; 1789 sc->maxpwr5GHz = val >> 8; 1790 /* Check that EEPROM values are within valid range. */ 1791 if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50) 1792 sc->maxpwr5GHz = 38; 1793 if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50) 1794 sc->maxpwr2GHz = 38; 1795 DPRINTF(sc, IWN_DEBUG_RESET, "maxpwr 2GHz=%d 5GHz=%d\n", 1796 sc->maxpwr2GHz, sc->maxpwr5GHz); 1797 1798 /* Read samples for each TX power group. */ 1799 iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands, 1800 sizeof sc->bands); 1801 1802 /* Read voltage at which samples were taken. */ 1803 iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2); 1804 sc->eeprom_voltage = (int16_t)le16toh(val); 1805 DPRINTF(sc, IWN_DEBUG_RESET, "voltage=%d (in 0.3V)\n", 1806 sc->eeprom_voltage); 1807 1808#ifdef IWN_DEBUG 1809 /* Print samples. */ 1810 if (sc->sc_debug & IWN_DEBUG_ANY) { 1811 for (i = 0; i < IWN_NBANDS; i++) 1812 iwn4965_print_power_group(sc, i); 1813 } 1814#endif 1815 1816 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 1817} 1818 1819#ifdef IWN_DEBUG 1820static void 1821iwn4965_print_power_group(struct iwn_softc *sc, int i) 1822{ 1823 struct iwn4965_eeprom_band *band = &sc->bands[i]; 1824 struct iwn4965_eeprom_chan_samples *chans = band->chans; 1825 int j, c; 1826 1827 printf("===band %d===\n", i); 1828 printf("chan lo=%d, chan hi=%d\n", band->lo, band->hi); 1829 printf("chan1 num=%d\n", chans[0].num); 1830 for (c = 0; c < 2; c++) { 1831 for (j = 0; j < IWN_NSAMPLES; j++) { 1832 printf("chain %d, sample %d: temp=%d gain=%d " 1833 "power=%d pa_det=%d\n", c, j, 1834 chans[0].samples[c][j].temp, 1835 chans[0].samples[c][j].gain, 1836 chans[0].samples[c][j].power, 1837 chans[0].samples[c][j].pa_det); 1838 } 1839 } 1840 printf("chan2 num=%d\n", chans[1].num); 1841 for (c = 0; c < 2; c++) { 1842 for (j = 0; j < IWN_NSAMPLES; j++) { 1843 printf("chain %d, sample %d: temp=%d gain=%d " 1844 "power=%d pa_det=%d\n", c, j, 1845 chans[1].samples[c][j].temp, 1846 chans[1].samples[c][j].gain, 1847 chans[1].samples[c][j].power, 1848 chans[1].samples[c][j].pa_det); 1849 } 1850 } 1851} 1852#endif 1853 1854static void 1855iwn5000_read_eeprom(struct iwn_softc *sc) 1856{ 1857 struct iwn5000_eeprom_calib_hdr hdr; 1858 int32_t volt; 1859 uint32_t base, addr; 1860 uint16_t val; 1861 int i; 1862 1863 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1864 1865 /* Read regulatory domain (4 ASCII characters). */ 1866 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); 1867 base = le16toh(val); 1868 iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN, 1869 sc->eeprom_domain, 4); 1870 1871 /* Read the list of authorized channels (20MHz ones only). */ 1872 for (i = 0; i < 7; i++) { 1873 if (sc->hw_type >= IWN_HW_REV_TYPE_6000) 1874 addr = base + iwn6000_regulatory_bands[i]; 1875 else 1876 addr = base + iwn5000_regulatory_bands[i]; 1877 iwn_read_eeprom_channels(sc, i, addr); 1878 } 1879 1880 /* Read enhanced TX power information for 6000 Series. */ 1881 if (sc->hw_type >= IWN_HW_REV_TYPE_6000) 1882 iwn_read_eeprom_enhinfo(sc); 1883 1884 iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2); 1885 base = le16toh(val); 1886 iwn_read_prom_data(sc, base, &hdr, sizeof hdr); 1887 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 1888 "%s: calib version=%u pa type=%u voltage=%u\n", __func__, 1889 hdr.version, hdr.pa_type, le16toh(hdr.volt)); 1890 sc->calib_ver = hdr.version; 1891 1892 if (sc->hw_type == IWN_HW_REV_TYPE_5150) { 1893 /* Compute temperature offset. */ 1894 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2); 1895 sc->eeprom_temp = le16toh(val); 1896 iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2); 1897 volt = le16toh(val); 1898 sc->temp_off = sc->eeprom_temp - (volt / -5); 1899 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "temp=%d volt=%d offset=%dK\n", 1900 sc->eeprom_temp, volt, sc->temp_off); 1901 } else { 1902 /* Read crystal calibration. */ 1903 iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL, 1904 &sc->eeprom_crystal, sizeof (uint32_t)); 1905 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "crystal calibration 0x%08x\n", 1906 le32toh(sc->eeprom_crystal)); 1907 } 1908 1909 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 1910 1911} 1912 1913/* 1914 * Translate EEPROM flags to net80211. 1915 */ 1916static uint32_t 1917iwn_eeprom_channel_flags(struct iwn_eeprom_chan *channel) 1918{ 1919 uint32_t nflags; 1920 1921 nflags = 0; 1922 if ((channel->flags & IWN_EEPROM_CHAN_ACTIVE) == 0) 1923 nflags |= IEEE80211_CHAN_PASSIVE; 1924 if ((channel->flags & IWN_EEPROM_CHAN_IBSS) == 0) 1925 nflags |= IEEE80211_CHAN_NOADHOC; 1926 if (channel->flags & IWN_EEPROM_CHAN_RADAR) { 1927 nflags |= IEEE80211_CHAN_DFS; 1928 /* XXX apparently IBSS may still be marked */ 1929 nflags |= IEEE80211_CHAN_NOADHOC; 1930 } 1931 1932 return nflags; 1933} 1934 1935static void 1936iwn_read_eeprom_band(struct iwn_softc *sc, int n) 1937{ 1938 struct ifnet *ifp = sc->sc_ifp; 1939 struct ieee80211com *ic = ifp->if_l2com; 1940 struct iwn_eeprom_chan *channels = sc->eeprom_channels[n]; 1941 const struct iwn_chan_band *band = &iwn_bands[n]; 1942 struct ieee80211_channel *c; 1943 uint8_t chan; 1944 int i, nflags; 1945 1946 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1947 1948 for (i = 0; i < band->nchan; i++) { 1949 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) { 1950 DPRINTF(sc, IWN_DEBUG_RESET, 1951 "skip chan %d flags 0x%x maxpwr %d\n", 1952 band->chan[i], channels[i].flags, 1953 channels[i].maxpwr); 1954 continue; 1955 } 1956 chan = band->chan[i]; 1957 nflags = iwn_eeprom_channel_flags(&channels[i]); 1958 1959 c = &ic->ic_channels[ic->ic_nchans++]; 1960 c->ic_ieee = chan; 1961 c->ic_maxregpower = channels[i].maxpwr; 1962 c->ic_maxpower = 2*c->ic_maxregpower; 1963 1964 if (n == 0) { /* 2GHz band */ 1965 c->ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_G); 1966 /* G =>'s B is supported */ 1967 c->ic_flags = IEEE80211_CHAN_B | nflags; 1968 c = &ic->ic_channels[ic->ic_nchans++]; 1969 c[0] = c[-1]; 1970 c->ic_flags = IEEE80211_CHAN_G | nflags; 1971 } else { /* 5GHz band */ 1972 c->ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_A); 1973 c->ic_flags = IEEE80211_CHAN_A | nflags; 1974 } 1975 1976 /* Save maximum allowed TX power for this channel. */ 1977 sc->maxpwr[chan] = channels[i].maxpwr; 1978 1979 DPRINTF(sc, IWN_DEBUG_RESET, 1980 "add chan %d flags 0x%x maxpwr %d\n", chan, 1981 channels[i].flags, channels[i].maxpwr); 1982 1983 if (sc->sc_flags & IWN_FLAG_HAS_11N) { 1984 /* add HT20, HT40 added separately */ 1985 c = &ic->ic_channels[ic->ic_nchans++]; 1986 c[0] = c[-1]; 1987 c->ic_flags |= IEEE80211_CHAN_HT20; 1988 } 1989 } 1990 1991 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 1992 1993} 1994 1995static void 1996iwn_read_eeprom_ht40(struct iwn_softc *sc, int n) 1997{ 1998 struct ifnet *ifp = sc->sc_ifp; 1999 struct ieee80211com *ic = ifp->if_l2com; 2000 struct iwn_eeprom_chan *channels = sc->eeprom_channels[n]; 2001 const struct iwn_chan_band *band = &iwn_bands[n]; 2002 struct ieee80211_channel *c, *cent, *extc; 2003 uint8_t chan; 2004 int i, nflags; 2005 2006 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s start\n", __func__); 2007 2008 if (!(sc->sc_flags & IWN_FLAG_HAS_11N)) { 2009 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end no 11n\n", __func__); 2010 return; 2011 } 2012 2013 for (i = 0; i < band->nchan; i++) { 2014 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) { 2015 DPRINTF(sc, IWN_DEBUG_RESET, 2016 "skip chan %d flags 0x%x maxpwr %d\n", 2017 band->chan[i], channels[i].flags, 2018 channels[i].maxpwr); 2019 continue; 2020 } 2021 chan = band->chan[i]; 2022 nflags = iwn_eeprom_channel_flags(&channels[i]); 2023 2024 /* 2025 * Each entry defines an HT40 channel pair; find the 2026 * center channel, then the extension channel above. 2027 */ 2028 cent = ieee80211_find_channel_byieee(ic, chan, 2029 (n == 5 ? IEEE80211_CHAN_G : IEEE80211_CHAN_A)); 2030 if (cent == NULL) { /* XXX shouldn't happen */ 2031 device_printf(sc->sc_dev, 2032 "%s: no entry for channel %d\n", __func__, chan); 2033 continue; 2034 } 2035 extc = ieee80211_find_channel(ic, cent->ic_freq+20, 2036 (n == 5 ? IEEE80211_CHAN_G : IEEE80211_CHAN_A)); 2037 if (extc == NULL) { 2038 DPRINTF(sc, IWN_DEBUG_RESET, 2039 "%s: skip chan %d, extension channel not found\n", 2040 __func__, chan); 2041 continue; 2042 } 2043 2044 DPRINTF(sc, IWN_DEBUG_RESET, 2045 "add ht40 chan %d flags 0x%x maxpwr %d\n", 2046 chan, channels[i].flags, channels[i].maxpwr); 2047 2048 c = &ic->ic_channels[ic->ic_nchans++]; 2049 c[0] = cent[0]; 2050 c->ic_extieee = extc->ic_ieee; 2051 c->ic_flags &= ~IEEE80211_CHAN_HT; 2052 c->ic_flags |= IEEE80211_CHAN_HT40U | nflags; 2053 c = &ic->ic_channels[ic->ic_nchans++]; 2054 c[0] = extc[0]; 2055 c->ic_extieee = cent->ic_ieee; 2056 c->ic_flags &= ~IEEE80211_CHAN_HT; 2057 c->ic_flags |= IEEE80211_CHAN_HT40D | nflags; 2058 } 2059 2060 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2061 2062} 2063 2064static void 2065iwn_read_eeprom_channels(struct iwn_softc *sc, int n, uint32_t addr) 2066{ 2067 struct ifnet *ifp = sc->sc_ifp; 2068 struct ieee80211com *ic = ifp->if_l2com; 2069 2070 iwn_read_prom_data(sc, addr, &sc->eeprom_channels[n], 2071 iwn_bands[n].nchan * sizeof (struct iwn_eeprom_chan)); 2072 2073 if (n < 5) 2074 iwn_read_eeprom_band(sc, n); 2075 else 2076 iwn_read_eeprom_ht40(sc, n); 2077 ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans); 2078} 2079 2080static struct iwn_eeprom_chan * 2081iwn_find_eeprom_channel(struct iwn_softc *sc, struct ieee80211_channel *c) 2082{ 2083 int band, chan, i, j; 2084 2085 if (IEEE80211_IS_CHAN_HT40(c)) { 2086 band = IEEE80211_IS_CHAN_5GHZ(c) ? 6 : 5; 2087 if (IEEE80211_IS_CHAN_HT40D(c)) 2088 chan = c->ic_extieee; 2089 else 2090 chan = c->ic_ieee; 2091 for (i = 0; i < iwn_bands[band].nchan; i++) { 2092 if (iwn_bands[band].chan[i] == chan) 2093 return &sc->eeprom_channels[band][i]; 2094 } 2095 } else { 2096 for (j = 0; j < 5; j++) { 2097 for (i = 0; i < iwn_bands[j].nchan; i++) { 2098 if (iwn_bands[j].chan[i] == c->ic_ieee) 2099 return &sc->eeprom_channels[j][i]; 2100 } 2101 } 2102 } 2103 return NULL; 2104} 2105 2106/* 2107 * Enforce flags read from EEPROM. 2108 */ 2109static int 2110iwn_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd, 2111 int nchan, struct ieee80211_channel chans[]) 2112{ 2113 struct iwn_softc *sc = ic->ic_ifp->if_softc; 2114 int i; 2115 2116 for (i = 0; i < nchan; i++) { 2117 struct ieee80211_channel *c = &chans[i]; 2118 struct iwn_eeprom_chan *channel; 2119 2120 channel = iwn_find_eeprom_channel(sc, c); 2121 if (channel == NULL) { 2122 if_printf(ic->ic_ifp, 2123 "%s: invalid channel %u freq %u/0x%x\n", 2124 __func__, c->ic_ieee, c->ic_freq, c->ic_flags); 2125 return EINVAL; 2126 } 2127 c->ic_flags |= iwn_eeprom_channel_flags(channel); 2128 } 2129 2130 return 0; 2131} 2132 2133static void 2134iwn_read_eeprom_enhinfo(struct iwn_softc *sc) 2135{ 2136 struct iwn_eeprom_enhinfo enhinfo[35]; 2137 struct ifnet *ifp = sc->sc_ifp; 2138 struct ieee80211com *ic = ifp->if_l2com; 2139 struct ieee80211_channel *c; 2140 uint16_t val, base; 2141 int8_t maxpwr; 2142 uint8_t flags; 2143 int i, j; 2144 2145 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2146 2147 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); 2148 base = le16toh(val); 2149 iwn_read_prom_data(sc, base + IWN6000_EEPROM_ENHINFO, 2150 enhinfo, sizeof enhinfo); 2151 2152 for (i = 0; i < nitems(enhinfo); i++) { 2153 flags = enhinfo[i].flags; 2154 if (!(flags & IWN_ENHINFO_VALID)) 2155 continue; /* Skip invalid entries. */ 2156 2157 maxpwr = 0; 2158 if (sc->txchainmask & IWN_ANT_A) 2159 maxpwr = MAX(maxpwr, enhinfo[i].chain[0]); 2160 if (sc->txchainmask & IWN_ANT_B) 2161 maxpwr = MAX(maxpwr, enhinfo[i].chain[1]); 2162 if (sc->txchainmask & IWN_ANT_C) 2163 maxpwr = MAX(maxpwr, enhinfo[i].chain[2]); 2164 if (sc->ntxchains == 2) 2165 maxpwr = MAX(maxpwr, enhinfo[i].mimo2); 2166 else if (sc->ntxchains == 3) 2167 maxpwr = MAX(maxpwr, enhinfo[i].mimo3); 2168 2169 for (j = 0; j < ic->ic_nchans; j++) { 2170 c = &ic->ic_channels[j]; 2171 if ((flags & IWN_ENHINFO_5GHZ)) { 2172 if (!IEEE80211_IS_CHAN_A(c)) 2173 continue; 2174 } else if ((flags & IWN_ENHINFO_OFDM)) { 2175 if (!IEEE80211_IS_CHAN_G(c)) 2176 continue; 2177 } else if (!IEEE80211_IS_CHAN_B(c)) 2178 continue; 2179 if ((flags & IWN_ENHINFO_HT40)) { 2180 if (!IEEE80211_IS_CHAN_HT40(c)) 2181 continue; 2182 } else { 2183 if (IEEE80211_IS_CHAN_HT40(c)) 2184 continue; 2185 } 2186 if (enhinfo[i].chan != 0 && 2187 enhinfo[i].chan != c->ic_ieee) 2188 continue; 2189 2190 DPRINTF(sc, IWN_DEBUG_RESET, 2191 "channel %d(%x), maxpwr %d\n", c->ic_ieee, 2192 c->ic_flags, maxpwr / 2); 2193 c->ic_maxregpower = maxpwr / 2; 2194 c->ic_maxpower = maxpwr; 2195 } 2196 } 2197 2198 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2199 2200} 2201 2202static struct ieee80211_node * 2203iwn_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 2204{ 2205 return malloc(sizeof (struct iwn_node), M_80211_NODE,M_NOWAIT | M_ZERO); 2206} 2207 2208static __inline int 2209rate2plcp(int rate) 2210{ 2211 switch (rate & 0xff) { 2212 case 12: return 0xd; 2213 case 18: return 0xf; 2214 case 24: return 0x5; 2215 case 36: return 0x7; 2216 case 48: return 0x9; 2217 case 72: return 0xb; 2218 case 96: return 0x1; 2219 case 108: return 0x3; 2220 case 2: return 10; 2221 case 4: return 20; 2222 case 11: return 55; 2223 case 22: return 110; 2224 } 2225 return 0; 2226} 2227 2228/* 2229 * Calculate the required PLCP value from the given rate, 2230 * to the given node. 2231 * 2232 * This will take the node configuration (eg 11n, rate table 2233 * setup, etc) into consideration. 2234 */ 2235static uint32_t 2236iwn_rate_to_plcp(struct iwn_softc *sc, struct ieee80211_node *ni, 2237 uint8_t rate) 2238{ 2239#define RV(v) ((v) & IEEE80211_RATE_VAL) 2240 struct ieee80211com *ic = ni->ni_ic; 2241 uint8_t txant1, txant2; 2242 uint32_t plcp = 0; 2243 int ridx; 2244 2245 /* Use the first valid TX antenna. */ 2246 txant1 = IWN_LSB(sc->txchainmask); 2247 txant2 = IWN_LSB(sc->txchainmask & ~txant1); 2248 2249 /* 2250 * If it's an MCS rate, let's set the plcp correctly 2251 * and set the relevant flags based on the node config. 2252 */ 2253 if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) { 2254 /* 2255 * Set the initial PLCP value to be between 0->31 for 2256 * MCS 0 -> MCS 31, then set the "I'm an MCS rate!" 2257 * flag. 2258 */ 2259 plcp = RV(rate) | IWN_RFLAG_MCS; 2260 2261 /* 2262 * XXX the following should only occur if both 2263 * the local configuration _and_ the remote node 2264 * advertise these capabilities. Thus this code 2265 * may need fixing! 2266 */ 2267 2268 /* 2269 * Set the channel width and guard interval. 2270 */ 2271 if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) { 2272 plcp |= IWN_RFLAG_HT40; 2273 if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40) 2274 plcp |= IWN_RFLAG_SGI; 2275 } else if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20) { 2276 plcp |= IWN_RFLAG_SGI; 2277 } 2278 2279 /* 2280 * If it's a two stream rate, enable TX on both 2281 * antennas. 2282 * 2283 * XXX three stream rates? 2284 */ 2285 if (rate > 0x87) 2286 plcp |= IWN_RFLAG_ANT(txant1 | txant2); 2287 else 2288 plcp |= IWN_RFLAG_ANT(txant1); 2289 } else { 2290 /* 2291 * Set the initial PLCP - fine for both 2292 * OFDM and CCK rates. 2293 */ 2294 plcp = rate2plcp(rate); 2295 2296 /* Set CCK flag if it's CCK */ 2297 2298 /* XXX It would be nice to have a method 2299 * to map the ridx -> phy table entry 2300 * so we could just query that, rather than 2301 * this hack to check against IWN_RIDX_OFDM6. 2302 */ 2303 ridx = ieee80211_legacy_rate_lookup(ic->ic_rt, 2304 rate & IEEE80211_RATE_VAL); 2305 if (ridx < IWN_RIDX_OFDM6 && 2306 IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 2307 plcp |= IWN_RFLAG_CCK; 2308 2309 /* Set antenna configuration */ 2310 plcp |= IWN_RFLAG_ANT(txant1); 2311 } 2312 2313 DPRINTF(sc, IWN_DEBUG_TXRATE, "%s: rate=0x%02x, plcp=0x%08x\n", 2314 __func__, 2315 rate, 2316 plcp); 2317 2318 return (htole32(plcp)); 2319#undef RV 2320} 2321 2322static void 2323iwn_newassoc(struct ieee80211_node *ni, int isnew) 2324{ 2325 /* Doesn't do anything at the moment */ 2326} 2327 2328static int 2329iwn_media_change(struct ifnet *ifp) 2330{ 2331 int error; 2332 2333 error = ieee80211_media_change(ifp); 2334 /* NB: only the fixed rate can change and that doesn't need a reset */ 2335 return (error == ENETRESET ? 0 : error); 2336} 2337 2338static int 2339iwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 2340{ 2341 struct iwn_vap *ivp = IWN_VAP(vap); 2342 struct ieee80211com *ic = vap->iv_ic; 2343 struct iwn_softc *sc = ic->ic_ifp->if_softc; 2344 int error = 0; 2345 2346 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2347 2348 DPRINTF(sc, IWN_DEBUG_STATE, "%s: %s -> %s\n", __func__, 2349 ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate]); 2350 2351 IEEE80211_UNLOCK(ic); 2352 IWN_LOCK(sc); 2353 callout_stop(&sc->calib_to); 2354 2355 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 2356 2357 switch (nstate) { 2358 case IEEE80211_S_ASSOC: 2359 if (vap->iv_state != IEEE80211_S_RUN) 2360 break; 2361 /* FALLTHROUGH */ 2362 case IEEE80211_S_AUTH: 2363 if (vap->iv_state == IEEE80211_S_AUTH) 2364 break; 2365 2366 /* 2367 * !AUTH -> AUTH transition requires state reset to handle 2368 * reassociations correctly. 2369 */ 2370 sc->rxon->associd = 0; 2371 sc->rxon->filter &= ~htole32(IWN_FILTER_BSS); 2372 sc->calib.state = IWN_CALIB_STATE_INIT; 2373 2374 if ((error = iwn_auth(sc, vap)) != 0) { 2375 device_printf(sc->sc_dev, 2376 "%s: could not move to auth state\n", __func__); 2377 } 2378 break; 2379 2380 case IEEE80211_S_RUN: 2381 /* 2382 * RUN -> RUN transition; Just restart the timers. 2383 */ 2384 if (vap->iv_state == IEEE80211_S_RUN) { 2385 sc->calib_cnt = 0; 2386 break; 2387 } 2388 2389 /* 2390 * !RUN -> RUN requires setting the association id 2391 * which is done with a firmware cmd. We also defer 2392 * starting the timers until that work is done. 2393 */ 2394 if ((error = iwn_run(sc, vap)) != 0) { 2395 device_printf(sc->sc_dev, 2396 "%s: could not move to run state\n", __func__); 2397 } 2398 break; 2399 2400 case IEEE80211_S_INIT: 2401 sc->calib.state = IWN_CALIB_STATE_INIT; 2402 break; 2403 2404 default: 2405 break; 2406 } 2407 IWN_UNLOCK(sc); 2408 IEEE80211_LOCK(ic); 2409 if (error != 0){ 2410 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__); 2411 return error; 2412 } 2413 2414 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 2415 2416 return ivp->iv_newstate(vap, nstate, arg); 2417} 2418 2419static void 2420iwn_calib_timeout(void *arg) 2421{ 2422 struct iwn_softc *sc = arg; 2423 2424 IWN_LOCK_ASSERT(sc); 2425 2426 /* Force automatic TX power calibration every 60 secs. */ 2427 if (++sc->calib_cnt >= 120) { 2428 uint32_t flags = 0; 2429 2430 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s\n", 2431 "sending request for statistics"); 2432 (void)iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, 2433 sizeof flags, 1); 2434 sc->calib_cnt = 0; 2435 } 2436 callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout, 2437 sc); 2438} 2439 2440/* 2441 * Process an RX_PHY firmware notification. This is usually immediately 2442 * followed by an MPDU_RX_DONE notification. 2443 */ 2444static void 2445iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2446 struct iwn_rx_data *data) 2447{ 2448 struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1); 2449 2450 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received PHY stats\n", __func__); 2451 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2452 2453 /* Save RX statistics, they will be used on MPDU_RX_DONE. */ 2454 memcpy(&sc->last_rx_stat, stat, sizeof (*stat)); 2455 sc->last_rx_valid = 1; 2456} 2457 2458/* 2459 * Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification. 2460 * Each MPDU_RX_DONE notification must be preceded by an RX_PHY one. 2461 */ 2462static void 2463iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2464 struct iwn_rx_data *data) 2465{ 2466 struct iwn_ops *ops = &sc->ops; 2467 struct ifnet *ifp = sc->sc_ifp; 2468 struct ieee80211com *ic = ifp->if_l2com; 2469 struct iwn_rx_ring *ring = &sc->rxq; 2470 struct ieee80211_frame *wh; 2471 struct ieee80211_node *ni; 2472 struct mbuf *m, *m1; 2473 struct iwn_rx_stat *stat; 2474 caddr_t head; 2475 bus_addr_t paddr; 2476 uint32_t flags; 2477 int error, len, rssi, nf; 2478 2479 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2480 2481 if (desc->type == IWN_MPDU_RX_DONE) { 2482 /* Check for prior RX_PHY notification. */ 2483 if (!sc->last_rx_valid) { 2484 DPRINTF(sc, IWN_DEBUG_ANY, 2485 "%s: missing RX_PHY\n", __func__); 2486 return; 2487 } 2488 stat = &sc->last_rx_stat; 2489 } else 2490 stat = (struct iwn_rx_stat *)(desc + 1); 2491 2492 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2493 2494 if (stat->cfg_phy_len > IWN_STAT_MAXLEN) { 2495 device_printf(sc->sc_dev, 2496 "%s: invalid RX statistic header, len %d\n", __func__, 2497 stat->cfg_phy_len); 2498 return; 2499 } 2500 if (desc->type == IWN_MPDU_RX_DONE) { 2501 struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1); 2502 head = (caddr_t)(mpdu + 1); 2503 len = le16toh(mpdu->len); 2504 } else { 2505 head = (caddr_t)(stat + 1) + stat->cfg_phy_len; 2506 len = le16toh(stat->len); 2507 } 2508 2509 flags = le32toh(*(uint32_t *)(head + len)); 2510 2511 /* Discard frames with a bad FCS early. */ 2512 if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) { 2513 DPRINTF(sc, IWN_DEBUG_RECV, "%s: RX flags error %x\n", 2514 __func__, flags); 2515 ifp->if_ierrors++; 2516 return; 2517 } 2518 /* Discard frames that are too short. */ 2519 if (len < sizeof (*wh)) { 2520 DPRINTF(sc, IWN_DEBUG_RECV, "%s: frame too short: %d\n", 2521 __func__, len); 2522 ifp->if_ierrors++; 2523 return; 2524 } 2525 2526 m1 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWN_RBUF_SIZE); 2527 if (m1 == NULL) { 2528 DPRINTF(sc, IWN_DEBUG_ANY, "%s: no mbuf to restock ring\n", 2529 __func__); 2530 ifp->if_ierrors++; 2531 return; 2532 } 2533 bus_dmamap_unload(ring->data_dmat, data->map); 2534 2535 error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m1, void *), 2536 IWN_RBUF_SIZE, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 2537 if (error != 0 && error != EFBIG) { 2538 device_printf(sc->sc_dev, 2539 "%s: bus_dmamap_load failed, error %d\n", __func__, error); 2540 m_freem(m1); 2541 2542 /* Try to reload the old mbuf. */ 2543 error = bus_dmamap_load(ring->data_dmat, data->map, 2544 mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr, 2545 &paddr, BUS_DMA_NOWAIT); 2546 if (error != 0 && error != EFBIG) { 2547 panic("%s: could not load old RX mbuf", __func__); 2548 } 2549 /* Physical address may have changed. */ 2550 ring->desc[ring->cur] = htole32(paddr >> 8); 2551 bus_dmamap_sync(ring->data_dmat, ring->desc_dma.map, 2552 BUS_DMASYNC_PREWRITE); 2553 ifp->if_ierrors++; 2554 return; 2555 } 2556 2557 m = data->m; 2558 data->m = m1; 2559 /* Update RX descriptor. */ 2560 ring->desc[ring->cur] = htole32(paddr >> 8); 2561 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 2562 BUS_DMASYNC_PREWRITE); 2563 2564 /* Finalize mbuf. */ 2565 m->m_pkthdr.rcvif = ifp; 2566 m->m_data = head; 2567 m->m_pkthdr.len = m->m_len = len; 2568 2569 /* Grab a reference to the source node. */ 2570 wh = mtod(m, struct ieee80211_frame *); 2571 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); 2572 nf = (ni != NULL && ni->ni_vap->iv_state == IEEE80211_S_RUN && 2573 (ic->ic_flags & IEEE80211_F_SCAN) == 0) ? sc->noise : -95; 2574 2575 rssi = ops->get_rssi(sc, stat); 2576 2577 if (ieee80211_radiotap_active(ic)) { 2578 struct iwn_rx_radiotap_header *tap = &sc->sc_rxtap; 2579 2580 tap->wr_flags = 0; 2581 if (stat->flags & htole16(IWN_STAT_FLAG_SHPREAMBLE)) 2582 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 2583 tap->wr_dbm_antsignal = (int8_t)rssi; 2584 tap->wr_dbm_antnoise = (int8_t)nf; 2585 tap->wr_tsft = stat->tstamp; 2586 switch (stat->rate) { 2587 /* CCK rates. */ 2588 case 10: tap->wr_rate = 2; break; 2589 case 20: tap->wr_rate = 4; break; 2590 case 55: tap->wr_rate = 11; break; 2591 case 110: tap->wr_rate = 22; break; 2592 /* OFDM rates. */ 2593 case 0xd: tap->wr_rate = 12; break; 2594 case 0xf: tap->wr_rate = 18; break; 2595 case 0x5: tap->wr_rate = 24; break; 2596 case 0x7: tap->wr_rate = 36; break; 2597 case 0x9: tap->wr_rate = 48; break; 2598 case 0xb: tap->wr_rate = 72; break; 2599 case 0x1: tap->wr_rate = 96; break; 2600 case 0x3: tap->wr_rate = 108; break; 2601 /* Unknown rate: should not happen. */ 2602 default: tap->wr_rate = 0; 2603 } 2604 } 2605 2606 IWN_UNLOCK(sc); 2607 2608 /* Send the frame to the 802.11 layer. */ 2609 if (ni != NULL) { 2610 if (ni->ni_flags & IEEE80211_NODE_HT) 2611 m->m_flags |= M_AMPDU; 2612 (void)ieee80211_input(ni, m, rssi - nf, nf); 2613 /* Node is no longer needed. */ 2614 ieee80211_free_node(ni); 2615 } else 2616 (void)ieee80211_input_all(ic, m, rssi - nf, nf); 2617 2618 IWN_LOCK(sc); 2619 2620 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 2621 2622} 2623 2624/* Process an incoming Compressed BlockAck. */ 2625static void 2626iwn_rx_compressed_ba(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2627 struct iwn_rx_data *data) 2628{ 2629 struct iwn_ops *ops = &sc->ops; 2630 struct ifnet *ifp = sc->sc_ifp; 2631 struct iwn_node *wn; 2632 struct ieee80211_node *ni; 2633 struct iwn_compressed_ba *ba = (struct iwn_compressed_ba *)(desc + 1); 2634 struct iwn_tx_ring *txq; 2635 struct iwn_tx_data *txdata; 2636 struct ieee80211_tx_ampdu *tap; 2637 struct mbuf *m; 2638 uint64_t bitmap; 2639 uint16_t ssn; 2640 uint8_t tid; 2641 int ackfailcnt = 0, i, lastidx, qid, *res, shift; 2642 2643 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2644 2645 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2646 2647 qid = le16toh(ba->qid); 2648 txq = &sc->txq[ba->qid]; 2649 tap = sc->qid2tap[ba->qid]; 2650 tid = tap->txa_tid; 2651 wn = (void *)tap->txa_ni; 2652 2653 res = NULL; 2654 ssn = 0; 2655 if (!IEEE80211_AMPDU_RUNNING(tap)) { 2656 res = tap->txa_private; 2657 ssn = tap->txa_start & 0xfff; 2658 } 2659 2660 for (lastidx = le16toh(ba->ssn) & 0xff; txq->read != lastidx;) { 2661 txdata = &txq->data[txq->read]; 2662 2663 /* Unmap and free mbuf. */ 2664 bus_dmamap_sync(txq->data_dmat, txdata->map, 2665 BUS_DMASYNC_POSTWRITE); 2666 bus_dmamap_unload(txq->data_dmat, txdata->map); 2667 m = txdata->m, txdata->m = NULL; 2668 ni = txdata->ni, txdata->ni = NULL; 2669 2670 KASSERT(ni != NULL, ("no node")); 2671 KASSERT(m != NULL, ("no mbuf")); 2672 2673 ieee80211_tx_complete(ni, m, 1); 2674 2675 txq->queued--; 2676 txq->read = (txq->read + 1) % IWN_TX_RING_COUNT; 2677 } 2678 2679 if (txq->queued == 0 && res != NULL) { 2680 iwn_nic_lock(sc); 2681 ops->ampdu_tx_stop(sc, qid, tid, ssn); 2682 iwn_nic_unlock(sc); 2683 sc->qid2tap[qid] = NULL; 2684 free(res, M_DEVBUF); 2685 return; 2686 } 2687 2688 if (wn->agg[tid].bitmap == 0) 2689 return; 2690 2691 shift = wn->agg[tid].startidx - ((le16toh(ba->seq) >> 4) & 0xff); 2692 if (shift < 0) 2693 shift += 0x100; 2694 2695 if (wn->agg[tid].nframes > (64 - shift)) 2696 return; 2697 2698 ni = tap->txa_ni; 2699 bitmap = (le64toh(ba->bitmap) >> shift) & wn->agg[tid].bitmap; 2700 for (i = 0; bitmap; i++) { 2701 if ((bitmap & 1) == 0) { 2702 ifp->if_oerrors++; 2703 ieee80211_ratectl_tx_complete(ni->ni_vap, ni, 2704 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL); 2705 } else { 2706 ifp->if_opackets++; 2707 ieee80211_ratectl_tx_complete(ni->ni_vap, ni, 2708 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL); 2709 } 2710 bitmap >>= 1; 2711 } 2712 2713 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 2714 2715} 2716 2717/* 2718 * Process a CALIBRATION_RESULT notification sent by the initialization 2719 * firmware on response to a CMD_CALIB_CONFIG command (5000 only). 2720 */ 2721static void 2722iwn5000_rx_calib_results(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2723 struct iwn_rx_data *data) 2724{ 2725 struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1); 2726 int len, idx = -1; 2727 2728 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2729 2730 /* Runtime firmware should not send such a notification. */ 2731 if (sc->sc_flags & IWN_FLAG_CALIB_DONE){ 2732 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s received after clib done\n", 2733 __func__); 2734 return; 2735 } 2736 len = (le32toh(desc->len) & 0x3fff) - 4; 2737 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2738 2739 switch (calib->code) { 2740 case IWN5000_PHY_CALIB_DC: 2741 if ((sc->sc_flags & IWN_FLAG_INTERNAL_PA) == 0 && 2742 (sc->hw_type == IWN_HW_REV_TYPE_5150 || 2743 sc->hw_type >= IWN_HW_REV_TYPE_6000) && 2744 sc->hw_type != IWN_HW_REV_TYPE_6050) 2745 idx = 0; 2746 break; 2747 case IWN5000_PHY_CALIB_LO: 2748 idx = 1; 2749 break; 2750 case IWN5000_PHY_CALIB_TX_IQ: 2751 idx = 2; 2752 break; 2753 case IWN5000_PHY_CALIB_TX_IQ_PERIODIC: 2754 if (sc->hw_type < IWN_HW_REV_TYPE_6000 && 2755 sc->hw_type != IWN_HW_REV_TYPE_5150) 2756 idx = 3; 2757 break; 2758 case IWN5000_PHY_CALIB_BASE_BAND: 2759 idx = 4; 2760 break; 2761 } 2762 if (idx == -1) /* Ignore other results. */ 2763 return; 2764 2765 /* Save calibration result. */ 2766 if (sc->calibcmd[idx].buf != NULL) 2767 free(sc->calibcmd[idx].buf, M_DEVBUF); 2768 sc->calibcmd[idx].buf = malloc(len, M_DEVBUF, M_NOWAIT); 2769 if (sc->calibcmd[idx].buf == NULL) { 2770 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 2771 "not enough memory for calibration result %d\n", 2772 calib->code); 2773 return; 2774 } 2775 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 2776 "saving calibration result code=%d len=%d\n", calib->code, len); 2777 sc->calibcmd[idx].len = len; 2778 memcpy(sc->calibcmd[idx].buf, calib, len); 2779} 2780 2781/* 2782 * Process an RX_STATISTICS or BEACON_STATISTICS firmware notification. 2783 * The latter is sent by the firmware after each received beacon. 2784 */ 2785static void 2786iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2787 struct iwn_rx_data *data) 2788{ 2789 struct iwn_ops *ops = &sc->ops; 2790 struct ifnet *ifp = sc->sc_ifp; 2791 struct ieee80211com *ic = ifp->if_l2com; 2792 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 2793 struct iwn_calib_state *calib = &sc->calib; 2794 struct iwn_stats *stats = (struct iwn_stats *)(desc + 1); 2795 int temp; 2796 2797 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2798 2799 /* Ignore statistics received during a scan. */ 2800 if (vap->iv_state != IEEE80211_S_RUN || 2801 (ic->ic_flags & IEEE80211_F_SCAN)){ 2802 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s received during calib\n", 2803 __func__); 2804 return; 2805 } 2806 2807 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2808 2809 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received statistics, cmd %d\n", 2810 __func__, desc->type); 2811 sc->calib_cnt = 0; /* Reset TX power calibration timeout. */ 2812 2813 /* Test if temperature has changed. */ 2814 if (stats->general.temp != sc->rawtemp) { 2815 /* Convert "raw" temperature to degC. */ 2816 sc->rawtemp = stats->general.temp; 2817 temp = ops->get_temperature(sc); 2818 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d\n", 2819 __func__, temp); 2820 2821 /* Update TX power if need be (4965AGN only). */ 2822 if (sc->hw_type == IWN_HW_REV_TYPE_4965) 2823 iwn4965_power_calibration(sc, temp); 2824 } 2825 2826 if (desc->type != IWN_BEACON_STATISTICS) 2827 return; /* Reply to a statistics request. */ 2828 2829 sc->noise = iwn_get_noise(&stats->rx.general); 2830 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: noise %d\n", __func__, sc->noise); 2831 2832 /* Test that RSSI and noise are present in stats report. */ 2833 if (le32toh(stats->rx.general.flags) != 1) { 2834 DPRINTF(sc, IWN_DEBUG_ANY, "%s\n", 2835 "received statistics without RSSI"); 2836 return; 2837 } 2838 2839 if (calib->state == IWN_CALIB_STATE_ASSOC) 2840 iwn_collect_noise(sc, &stats->rx.general); 2841 else if (calib->state == IWN_CALIB_STATE_RUN) 2842 iwn_tune_sensitivity(sc, &stats->rx); 2843 2844 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 2845} 2846 2847/* 2848 * Process a TX_DONE firmware notification. Unfortunately, the 4965AGN 2849 * and 5000 adapters have different incompatible TX status formats. 2850 */ 2851static void 2852iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2853 struct iwn_rx_data *data) 2854{ 2855 struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1); 2856 struct iwn_tx_ring *ring; 2857 int qid; 2858 2859 qid = desc->qid & 0xf; 2860 ring = &sc->txq[qid]; 2861 2862 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: " 2863 "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n", 2864 __func__, desc->qid, desc->idx, stat->ackfailcnt, 2865 stat->btkillcnt, stat->rate, le16toh(stat->duration), 2866 le32toh(stat->status)); 2867 2868 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2869 if (qid >= sc->firstaggqueue) { 2870 iwn_ampdu_tx_done(sc, qid, desc->idx, stat->nframes, 2871 &stat->status); 2872 } else { 2873 iwn_tx_done(sc, desc, stat->ackfailcnt, 2874 le32toh(stat->status) & 0xff); 2875 } 2876} 2877 2878static void 2879iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 2880 struct iwn_rx_data *data) 2881{ 2882 struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1); 2883 struct iwn_tx_ring *ring; 2884 int qid; 2885 2886 qid = desc->qid & 0xf; 2887 ring = &sc->txq[qid]; 2888 2889 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: " 2890 "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n", 2891 __func__, desc->qid, desc->idx, stat->ackfailcnt, 2892 stat->btkillcnt, stat->rate, le16toh(stat->duration), 2893 le32toh(stat->status)); 2894 2895#ifdef notyet 2896 /* Reset TX scheduler slot. */ 2897 iwn5000_reset_sched(sc, desc->qid & 0xf, desc->idx); 2898#endif 2899 2900 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 2901 if (qid >= sc->firstaggqueue) { 2902 iwn_ampdu_tx_done(sc, qid, desc->idx, stat->nframes, 2903 &stat->status); 2904 } else { 2905 iwn_tx_done(sc, desc, stat->ackfailcnt, 2906 le16toh(stat->status) & 0xff); 2907 } 2908} 2909 2910/* 2911 * Adapter-independent backend for TX_DONE firmware notifications. 2912 */ 2913static void 2914iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, int ackfailcnt, 2915 uint8_t status) 2916{ 2917 struct ifnet *ifp = sc->sc_ifp; 2918 struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf]; 2919 struct iwn_tx_data *data = &ring->data[desc->idx]; 2920 struct mbuf *m; 2921 struct ieee80211_node *ni; 2922 struct ieee80211vap *vap; 2923 2924 KASSERT(data->ni != NULL, ("no node")); 2925 2926 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2927 2928 /* Unmap and free mbuf. */ 2929 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); 2930 bus_dmamap_unload(ring->data_dmat, data->map); 2931 m = data->m, data->m = NULL; 2932 ni = data->ni, data->ni = NULL; 2933 vap = ni->ni_vap; 2934 2935 /* 2936 * Update rate control statistics for the node. 2937 */ 2938 if (status & IWN_TX_FAIL) { 2939 ifp->if_oerrors++; 2940 ieee80211_ratectl_tx_complete(vap, ni, 2941 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL); 2942 } else { 2943 ifp->if_opackets++; 2944 ieee80211_ratectl_tx_complete(vap, ni, 2945 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL); 2946 } 2947 2948 /* 2949 * Channels marked for "radar" require traffic to be received 2950 * to unlock before we can transmit. Until traffic is seen 2951 * any attempt to transmit is returned immediately with status 2952 * set to IWN_TX_FAIL_TX_LOCKED. Unfortunately this can easily 2953 * happen on first authenticate after scanning. To workaround 2954 * this we ignore a failure of this sort in AUTH state so the 2955 * 802.11 layer will fall back to using a timeout to wait for 2956 * the AUTH reply. This allows the firmware time to see 2957 * traffic so a subsequent retry of AUTH succeeds. It's 2958 * unclear why the firmware does not maintain state for 2959 * channels recently visited as this would allow immediate 2960 * use of the channel after a scan (where we see traffic). 2961 */ 2962 if (status == IWN_TX_FAIL_TX_LOCKED && 2963 ni->ni_vap->iv_state == IEEE80211_S_AUTH) 2964 ieee80211_tx_complete(ni, m, 0); 2965 else 2966 ieee80211_tx_complete(ni, m, 2967 (status & IWN_TX_FAIL) != 0); 2968 2969 sc->sc_tx_timer = 0; 2970 if (--ring->queued < IWN_TX_RING_LOMARK) { 2971 sc->qfullmsk &= ~(1 << ring->qid); 2972 if (sc->qfullmsk == 0 && 2973 (ifp->if_drv_flags & IFF_DRV_OACTIVE)) { 2974 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2975 iwn_start_locked(ifp); 2976 } 2977 } 2978 2979 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 2980 2981} 2982 2983/* 2984 * Process a "command done" firmware notification. This is where we wakeup 2985 * processes waiting for a synchronous command completion. 2986 */ 2987static void 2988iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc) 2989{ 2990 struct iwn_tx_ring *ring = &sc->txq[4]; 2991 struct iwn_tx_data *data; 2992 2993 if ((desc->qid & 0xf) != 4) 2994 return; /* Not a command ack. */ 2995 2996 data = &ring->data[desc->idx]; 2997 2998 /* If the command was mapped in an mbuf, free it. */ 2999 if (data->m != NULL) { 3000 bus_dmamap_sync(ring->data_dmat, data->map, 3001 BUS_DMASYNC_POSTWRITE); 3002 bus_dmamap_unload(ring->data_dmat, data->map); 3003 m_freem(data->m); 3004 data->m = NULL; 3005 } 3006 wakeup(&ring->desc[desc->idx]); 3007} 3008 3009static void 3010iwn_ampdu_tx_done(struct iwn_softc *sc, int qid, int idx, int nframes, 3011 void *stat) 3012{ 3013 struct iwn_ops *ops = &sc->ops; 3014 struct ifnet *ifp = sc->sc_ifp; 3015 struct iwn_tx_ring *ring = &sc->txq[qid]; 3016 struct iwn_tx_data *data; 3017 struct mbuf *m; 3018 struct iwn_node *wn; 3019 struct ieee80211_node *ni; 3020 struct ieee80211_tx_ampdu *tap; 3021 uint64_t bitmap; 3022 uint32_t *status = stat; 3023 uint16_t *aggstatus = stat; 3024 uint16_t ssn; 3025 uint8_t tid; 3026 int bit, i, lastidx, *res, seqno, shift, start; 3027 3028 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 3029 3030#ifdef NOT_YET 3031 if (nframes == 1) { 3032 if ((*status & 0xff) != 1 && (*status & 0xff) != 2) 3033 printf("ieee80211_send_bar()\n"); 3034 } 3035#endif 3036 3037 bitmap = 0; 3038 start = idx; 3039 for (i = 0; i < nframes; i++) { 3040 if (le16toh(aggstatus[i * 2]) & 0xc) 3041 continue; 3042 3043 idx = le16toh(aggstatus[2*i + 1]) & 0xff; 3044 bit = idx - start; 3045 shift = 0; 3046 if (bit >= 64) { 3047 shift = 0x100 - idx + start; 3048 bit = 0; 3049 start = idx; 3050 } else if (bit <= -64) 3051 bit = 0x100 - start + idx; 3052 else if (bit < 0) { 3053 shift = start - idx; 3054 start = idx; 3055 bit = 0; 3056 } 3057 bitmap = bitmap << shift; 3058 bitmap |= 1ULL << bit; 3059 } 3060 tap = sc->qid2tap[qid]; 3061 tid = tap->txa_tid; 3062 wn = (void *)tap->txa_ni; 3063 wn->agg[tid].bitmap = bitmap; 3064 wn->agg[tid].startidx = start; 3065 wn->agg[tid].nframes = nframes; 3066 3067 res = NULL; 3068 ssn = 0; 3069 if (!IEEE80211_AMPDU_RUNNING(tap)) { 3070 res = tap->txa_private; 3071 ssn = tap->txa_start & 0xfff; 3072 } 3073 3074 seqno = le32toh(*(status + nframes)) & 0xfff; 3075 for (lastidx = (seqno & 0xff); ring->read != lastidx;) { 3076 data = &ring->data[ring->read]; 3077 3078 /* Unmap and free mbuf. */ 3079 bus_dmamap_sync(ring->data_dmat, data->map, 3080 BUS_DMASYNC_POSTWRITE); 3081 bus_dmamap_unload(ring->data_dmat, data->map); 3082 m = data->m, data->m = NULL; 3083 ni = data->ni, data->ni = NULL; 3084 3085 KASSERT(ni != NULL, ("no node")); 3086 KASSERT(m != NULL, ("no mbuf")); 3087 3088 ieee80211_tx_complete(ni, m, 1); 3089 3090 ring->queued--; 3091 ring->read = (ring->read + 1) % IWN_TX_RING_COUNT; 3092 } 3093 3094 if (ring->queued == 0 && res != NULL) { 3095 iwn_nic_lock(sc); 3096 ops->ampdu_tx_stop(sc, qid, tid, ssn); 3097 iwn_nic_unlock(sc); 3098 sc->qid2tap[qid] = NULL; 3099 free(res, M_DEVBUF); 3100 return; 3101 } 3102 3103 sc->sc_tx_timer = 0; 3104 if (ring->queued < IWN_TX_RING_LOMARK) { 3105 sc->qfullmsk &= ~(1 << ring->qid); 3106 if (sc->qfullmsk == 0 && 3107 (ifp->if_drv_flags & IFF_DRV_OACTIVE)) { 3108 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3109 iwn_start_locked(ifp); 3110 } 3111 } 3112 3113 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 3114 3115} 3116 3117/* 3118 * Process an INT_FH_RX or INT_SW_RX interrupt. 3119 */ 3120static void 3121iwn_notif_intr(struct iwn_softc *sc) 3122{ 3123 struct iwn_ops *ops = &sc->ops; 3124 struct ifnet *ifp = sc->sc_ifp; 3125 struct ieee80211com *ic = ifp->if_l2com; 3126 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3127 uint16_t hw; 3128 3129 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map, 3130 BUS_DMASYNC_POSTREAD); 3131 3132 hw = le16toh(sc->rxq.stat->closed_count) & 0xfff; 3133 while (sc->rxq.cur != hw) { 3134 struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur]; 3135 struct iwn_rx_desc *desc; 3136 3137 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3138 BUS_DMASYNC_POSTREAD); 3139 desc = mtod(data->m, struct iwn_rx_desc *); 3140 3141 DPRINTF(sc, IWN_DEBUG_RECV, 3142 "%s: qid %x idx %d flags %x type %d(%s) len %d\n", 3143 __func__, desc->qid & 0xf, desc->idx, desc->flags, 3144 desc->type, iwn_intr_str(desc->type), 3145 le16toh(desc->len)); 3146 3147 if (!(desc->qid & 0x80)) /* Reply to a command. */ 3148 iwn_cmd_done(sc, desc); 3149 3150 switch (desc->type) { 3151 case IWN_RX_PHY: 3152 iwn_rx_phy(sc, desc, data); 3153 break; 3154 3155 case IWN_RX_DONE: /* 4965AGN only. */ 3156 case IWN_MPDU_RX_DONE: 3157 /* An 802.11 frame has been received. */ 3158 iwn_rx_done(sc, desc, data); 3159 break; 3160 3161 case IWN_RX_COMPRESSED_BA: 3162 /* A Compressed BlockAck has been received. */ 3163 iwn_rx_compressed_ba(sc, desc, data); 3164 break; 3165 3166 case IWN_TX_DONE: 3167 /* An 802.11 frame has been transmitted. */ 3168 ops->tx_done(sc, desc, data); 3169 break; 3170 3171 case IWN_RX_STATISTICS: 3172 case IWN_BEACON_STATISTICS: 3173 iwn_rx_statistics(sc, desc, data); 3174 break; 3175 3176 case IWN_BEACON_MISSED: 3177 { 3178 struct iwn_beacon_missed *miss = 3179 (struct iwn_beacon_missed *)(desc + 1); 3180 int misses; 3181 3182 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3183 BUS_DMASYNC_POSTREAD); 3184 misses = le32toh(miss->consecutive); 3185 3186 DPRINTF(sc, IWN_DEBUG_STATE, 3187 "%s: beacons missed %d/%d\n", __func__, 3188 misses, le32toh(miss->total)); 3189 /* 3190 * If more than 5 consecutive beacons are missed, 3191 * reinitialize the sensitivity state machine. 3192 */ 3193 if (vap->iv_state == IEEE80211_S_RUN && 3194 (ic->ic_flags & IEEE80211_F_SCAN) == 0) { 3195 if (misses > 5) 3196 (void)iwn_init_sensitivity(sc); 3197 if (misses >= vap->iv_bmissthreshold) { 3198 IWN_UNLOCK(sc); 3199 ieee80211_beacon_miss(ic); 3200 IWN_LOCK(sc); 3201 } 3202 } 3203 break; 3204 } 3205 case IWN_UC_READY: 3206 { 3207 struct iwn_ucode_info *uc = 3208 (struct iwn_ucode_info *)(desc + 1); 3209 3210 /* The microcontroller is ready. */ 3211 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3212 BUS_DMASYNC_POSTREAD); 3213 DPRINTF(sc, IWN_DEBUG_RESET, 3214 "microcode alive notification version=%d.%d " 3215 "subtype=%x alive=%x\n", uc->major, uc->minor, 3216 uc->subtype, le32toh(uc->valid)); 3217 3218 if (le32toh(uc->valid) != 1) { 3219 device_printf(sc->sc_dev, 3220 "microcontroller initialization failed"); 3221 break; 3222 } 3223 if (uc->subtype == IWN_UCODE_INIT) { 3224 /* Save microcontroller report. */ 3225 memcpy(&sc->ucode_info, uc, sizeof (*uc)); 3226 } 3227 /* Save the address of the error log in SRAM. */ 3228 sc->errptr = le32toh(uc->errptr); 3229 break; 3230 } 3231 case IWN_STATE_CHANGED: 3232 { 3233 /* 3234 * State change allows hardware switch change to be 3235 * noted. However, we handle this in iwn_intr as we 3236 * get both the enable/disble intr. 3237 */ 3238 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3239 BUS_DMASYNC_POSTREAD); 3240#ifdef IWN_DEBUG 3241 uint32_t *status = (uint32_t *)(desc + 1); 3242 DPRINTF(sc, IWN_DEBUG_INTR, "state changed to %x\n", 3243 le32toh(*status)); 3244#endif 3245 break; 3246 } 3247 case IWN_START_SCAN: 3248 { 3249 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3250 BUS_DMASYNC_POSTREAD); 3251#ifdef IWN_DEBUG 3252 struct iwn_start_scan *scan = 3253 (struct iwn_start_scan *)(desc + 1); 3254 DPRINTF(sc, IWN_DEBUG_ANY, 3255 "%s: scanning channel %d status %x\n", 3256 __func__, scan->chan, le32toh(scan->status)); 3257#endif 3258 break; 3259 } 3260 case IWN_STOP_SCAN: 3261 { 3262 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 3263 BUS_DMASYNC_POSTREAD); 3264#ifdef IWN_DEBUG 3265 struct iwn_stop_scan *scan = 3266 (struct iwn_stop_scan *)(desc + 1); 3267 DPRINTF(sc, IWN_DEBUG_STATE, 3268 "scan finished nchan=%d status=%d chan=%d\n", 3269 scan->nchan, scan->status, scan->chan); 3270#endif 3271 3272 IWN_UNLOCK(sc); 3273 ieee80211_scan_next(vap); 3274 IWN_LOCK(sc); 3275 break; 3276 } 3277 case IWN5000_CALIBRATION_RESULT: 3278 iwn5000_rx_calib_results(sc, desc, data); 3279 break; 3280 3281 case IWN5000_CALIBRATION_DONE: 3282 sc->sc_flags |= IWN_FLAG_CALIB_DONE; 3283 wakeup(sc); 3284 break; 3285 } 3286 3287 sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT; 3288 } 3289 3290 /* Tell the firmware what we have processed. */ 3291 hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1; 3292 IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7); 3293} 3294 3295/* 3296 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up 3297 * from power-down sleep mode. 3298 */ 3299static void 3300iwn_wakeup_intr(struct iwn_softc *sc) 3301{ 3302 int qid; 3303 3304 DPRINTF(sc, IWN_DEBUG_RESET, "%s: ucode wakeup from power-down sleep\n", 3305 __func__); 3306 3307 /* Wakeup RX and TX rings. */ 3308 IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7); 3309 for (qid = 0; qid < sc->ntxqs; qid++) { 3310 struct iwn_tx_ring *ring = &sc->txq[qid]; 3311 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur); 3312 } 3313} 3314 3315static void 3316iwn_rftoggle_intr(struct iwn_softc *sc) 3317{ 3318 struct ifnet *ifp = sc->sc_ifp; 3319 struct ieee80211com *ic = ifp->if_l2com; 3320 uint32_t tmp = IWN_READ(sc, IWN_GP_CNTRL); 3321 3322 IWN_LOCK_ASSERT(sc); 3323 3324 device_printf(sc->sc_dev, "RF switch: radio %s\n", 3325 (tmp & IWN_GP_CNTRL_RFKILL) ? "enabled" : "disabled"); 3326 if (tmp & IWN_GP_CNTRL_RFKILL) 3327 ieee80211_runtask(ic, &sc->sc_radioon_task); 3328 else 3329 ieee80211_runtask(ic, &sc->sc_radiooff_task); 3330} 3331 3332/* 3333 * Dump the error log of the firmware when a firmware panic occurs. Although 3334 * we can't debug the firmware because it is neither open source nor free, it 3335 * can help us to identify certain classes of problems. 3336 */ 3337static void 3338iwn_fatal_intr(struct iwn_softc *sc) 3339{ 3340 struct iwn_fw_dump dump; 3341 int i; 3342 3343 IWN_LOCK_ASSERT(sc); 3344 3345 /* Force a complete recalibration on next init. */ 3346 sc->sc_flags &= ~IWN_FLAG_CALIB_DONE; 3347 3348 /* Check that the error log address is valid. */ 3349 if (sc->errptr < IWN_FW_DATA_BASE || 3350 sc->errptr + sizeof (dump) > 3351 IWN_FW_DATA_BASE + sc->fw_data_maxsz) { 3352 printf("%s: bad firmware error log address 0x%08x\n", __func__, 3353 sc->errptr); 3354 return; 3355 } 3356 if (iwn_nic_lock(sc) != 0) { 3357 printf("%s: could not read firmware error log\n", __func__); 3358 return; 3359 } 3360 /* Read firmware error log from SRAM. */ 3361 iwn_mem_read_region_4(sc, sc->errptr, (uint32_t *)&dump, 3362 sizeof (dump) / sizeof (uint32_t)); 3363 iwn_nic_unlock(sc); 3364 3365 if (dump.valid == 0) { 3366 printf("%s: firmware error log is empty\n", __func__); 3367 return; 3368 } 3369 printf("firmware error log:\n"); 3370 printf(" error type = \"%s\" (0x%08X)\n", 3371 (dump.id < nitems(iwn_fw_errmsg)) ? 3372 iwn_fw_errmsg[dump.id] : "UNKNOWN", 3373 dump.id); 3374 printf(" program counter = 0x%08X\n", dump.pc); 3375 printf(" source line = 0x%08X\n", dump.src_line); 3376 printf(" error data = 0x%08X%08X\n", 3377 dump.error_data[0], dump.error_data[1]); 3378 printf(" branch link = 0x%08X%08X\n", 3379 dump.branch_link[0], dump.branch_link[1]); 3380 printf(" interrupt link = 0x%08X%08X\n", 3381 dump.interrupt_link[0], dump.interrupt_link[1]); 3382 printf(" time = %u\n", dump.time[0]); 3383 3384 /* Dump driver status (TX and RX rings) while we're here. */ 3385 printf("driver status:\n"); 3386 for (i = 0; i < sc->ntxqs; i++) { 3387 struct iwn_tx_ring *ring = &sc->txq[i]; 3388 printf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n", 3389 i, ring->qid, ring->cur, ring->queued); 3390 } 3391 printf(" rx ring: cur=%d\n", sc->rxq.cur); 3392} 3393 3394static void 3395iwn_intr(void *arg) 3396{ 3397 struct iwn_softc *sc = arg; 3398 struct ifnet *ifp = sc->sc_ifp; 3399 uint32_t r1, r2, tmp; 3400 3401 IWN_LOCK(sc); 3402 3403 /* Disable interrupts. */ 3404 IWN_WRITE(sc, IWN_INT_MASK, 0); 3405 3406 /* Read interrupts from ICT (fast) or from registers (slow). */ 3407 if (sc->sc_flags & IWN_FLAG_USE_ICT) { 3408 tmp = 0; 3409 while (sc->ict[sc->ict_cur] != 0) { 3410 tmp |= sc->ict[sc->ict_cur]; 3411 sc->ict[sc->ict_cur] = 0; /* Acknowledge. */ 3412 sc->ict_cur = (sc->ict_cur + 1) % IWN_ICT_COUNT; 3413 } 3414 tmp = le32toh(tmp); 3415 if (tmp == 0xffffffff) /* Shouldn't happen. */ 3416 tmp = 0; 3417 else if (tmp & 0xc0000) /* Workaround a HW bug. */ 3418 tmp |= 0x8000; 3419 r1 = (tmp & 0xff00) << 16 | (tmp & 0xff); 3420 r2 = 0; /* Unused. */ 3421 } else { 3422 r1 = IWN_READ(sc, IWN_INT); 3423 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0) 3424 return; /* Hardware gone! */ 3425 r2 = IWN_READ(sc, IWN_FH_INT); 3426 } 3427 3428 DPRINTF(sc, IWN_DEBUG_INTR, "interrupt reg1=0x%08x reg2=0x%08x\n" 3429 , r1, r2); 3430 3431 if (r1 == 0 && r2 == 0) 3432 goto done; /* Interrupt not for us. */ 3433 3434 /* Acknowledge interrupts. */ 3435 IWN_WRITE(sc, IWN_INT, r1); 3436 if (!(sc->sc_flags & IWN_FLAG_USE_ICT)) 3437 IWN_WRITE(sc, IWN_FH_INT, r2); 3438 3439 if (r1 & IWN_INT_RF_TOGGLED) { 3440 iwn_rftoggle_intr(sc); 3441 goto done; 3442 } 3443 if (r1 & IWN_INT_CT_REACHED) { 3444 device_printf(sc->sc_dev, "%s: critical temperature reached!\n", 3445 __func__); 3446 } 3447 if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) { 3448 device_printf(sc->sc_dev, "%s: fatal firmware error\n", 3449 __func__); 3450#ifdef IWN_DEBUG 3451 iwn_debug_register(sc); 3452#endif 3453 /* Dump firmware error log and stop. */ 3454 iwn_fatal_intr(sc); 3455 ifp->if_flags &= ~IFF_UP; 3456 iwn_stop_locked(sc); 3457 goto done; 3458 } 3459 if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX | IWN_INT_RX_PERIODIC)) || 3460 (r2 & IWN_FH_INT_RX)) { 3461 if (sc->sc_flags & IWN_FLAG_USE_ICT) { 3462 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) 3463 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX); 3464 IWN_WRITE_1(sc, IWN_INT_PERIODIC, 3465 IWN_INT_PERIODIC_DIS); 3466 iwn_notif_intr(sc); 3467 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) { 3468 IWN_WRITE_1(sc, IWN_INT_PERIODIC, 3469 IWN_INT_PERIODIC_ENA); 3470 } 3471 } else 3472 iwn_notif_intr(sc); 3473 } 3474 3475 if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) { 3476 if (sc->sc_flags & IWN_FLAG_USE_ICT) 3477 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_TX); 3478 wakeup(sc); /* FH DMA transfer completed. */ 3479 } 3480 3481 if (r1 & IWN_INT_ALIVE) 3482 wakeup(sc); /* Firmware is alive. */ 3483 3484 if (r1 & IWN_INT_WAKEUP) 3485 iwn_wakeup_intr(sc); 3486 3487done: 3488 /* Re-enable interrupts. */ 3489 if (ifp->if_flags & IFF_UP) 3490 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 3491 3492 IWN_UNLOCK(sc); 3493} 3494 3495/* 3496 * Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and 3497 * 5000 adapters use a slightly different format). 3498 */ 3499static void 3500iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, 3501 uint16_t len) 3502{ 3503 uint16_t *w = &sc->sched[qid * IWN4965_SCHED_COUNT + idx]; 3504 3505 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 3506 3507 *w = htole16(len + 8); 3508 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3509 BUS_DMASYNC_PREWRITE); 3510 if (idx < IWN_SCHED_WINSZ) { 3511 *(w + IWN_TX_RING_COUNT) = *w; 3512 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3513 BUS_DMASYNC_PREWRITE); 3514 } 3515} 3516 3517static void 3518iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, 3519 uint16_t len) 3520{ 3521 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; 3522 3523 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 3524 3525 *w = htole16(id << 12 | (len + 8)); 3526 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3527 BUS_DMASYNC_PREWRITE); 3528 if (idx < IWN_SCHED_WINSZ) { 3529 *(w + IWN_TX_RING_COUNT) = *w; 3530 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3531 BUS_DMASYNC_PREWRITE); 3532 } 3533} 3534 3535#ifdef notyet 3536static void 3537iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx) 3538{ 3539 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; 3540 3541 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 3542 3543 *w = (*w & htole16(0xf000)) | htole16(1); 3544 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3545 BUS_DMASYNC_PREWRITE); 3546 if (idx < IWN_SCHED_WINSZ) { 3547 *(w + IWN_TX_RING_COUNT) = *w; 3548 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3549 BUS_DMASYNC_PREWRITE); 3550 } 3551} 3552#endif 3553 3554static int 3555iwn_tx_data(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni) 3556{ 3557 struct iwn_ops *ops = &sc->ops; 3558 const struct ieee80211_txparam *tp; 3559 struct ieee80211vap *vap = ni->ni_vap; 3560 struct ieee80211com *ic = ni->ni_ic; 3561 struct iwn_node *wn = (void *)ni; 3562 struct iwn_tx_ring *ring; 3563 struct iwn_tx_desc *desc; 3564 struct iwn_tx_data *data; 3565 struct iwn_tx_cmd *cmd; 3566 struct iwn_cmd_data *tx; 3567 struct ieee80211_frame *wh; 3568 struct ieee80211_key *k = NULL; 3569 struct mbuf *m1; 3570 uint32_t flags; 3571 uint16_t qos; 3572 u_int hdrlen; 3573 bus_dma_segment_t *seg, segs[IWN_MAX_SCATTER]; 3574 uint8_t tid, ridx, txant, type; 3575 int ac, i, totlen, error, pad, nsegs = 0, rate; 3576 3577 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 3578 3579 IWN_LOCK_ASSERT(sc); 3580 3581 wh = mtod(m, struct ieee80211_frame *); 3582 hdrlen = ieee80211_anyhdrsize(wh); 3583 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 3584 3585 /* Select EDCA Access Category and TX ring for this frame. */ 3586 if (IEEE80211_QOS_HAS_SEQ(wh)) { 3587 qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0]; 3588 tid = qos & IEEE80211_QOS_TID; 3589 } else { 3590 qos = 0; 3591 tid = 0; 3592 } 3593 ac = M_WME_GETAC(m); 3594 if (m->m_flags & M_AMPDU_MPDU) { 3595 struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[ac]; 3596 3597 if (!IEEE80211_AMPDU_RUNNING(tap)) { 3598 m_freem(m); 3599 return EINVAL; 3600 } 3601 3602 ac = *(int *)tap->txa_private; 3603 *(uint16_t *)wh->i_seq = 3604 htole16(ni->ni_txseqs[tid] << IEEE80211_SEQ_SEQ_SHIFT); 3605 ni->ni_txseqs[tid]++; 3606 } 3607 ring = &sc->txq[ac]; 3608 desc = &ring->desc[ring->cur]; 3609 data = &ring->data[ring->cur]; 3610 3611 /* Choose a TX rate index. */ 3612 tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)]; 3613 if (type == IEEE80211_FC0_TYPE_MGT) 3614 rate = tp->mgmtrate; 3615 else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) 3616 rate = tp->mcastrate; 3617 else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) 3618 rate = tp->ucastrate; 3619 else { 3620 /* XXX pass pktlen */ 3621 (void) ieee80211_ratectl_rate(ni, NULL, 0); 3622 rate = ni->ni_txrate; 3623 } 3624 ridx = ieee80211_legacy_rate_lookup(ic->ic_rt, 3625 rate & IEEE80211_RATE_VAL); 3626 3627 /* Encrypt the frame if need be. */ 3628 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { 3629 /* Retrieve key for TX. */ 3630 k = ieee80211_crypto_encap(ni, m); 3631 if (k == NULL) { 3632 m_freem(m); 3633 return ENOBUFS; 3634 } 3635 /* 802.11 header may have moved. */ 3636 wh = mtod(m, struct ieee80211_frame *); 3637 } 3638 totlen = m->m_pkthdr.len; 3639 3640 if (ieee80211_radiotap_active_vap(vap)) { 3641 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap; 3642 3643 tap->wt_flags = 0; 3644 tap->wt_rate = rate; 3645 if (k != NULL) 3646 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 3647 3648 ieee80211_radiotap_tx(vap, m); 3649 } 3650 3651 /* Prepare TX firmware command. */ 3652 cmd = &ring->cmd[ring->cur]; 3653 cmd->code = IWN_CMD_TX_DATA; 3654 cmd->flags = 0; 3655 cmd->qid = ring->qid; 3656 cmd->idx = ring->cur; 3657 3658 tx = (struct iwn_cmd_data *)cmd->data; 3659 /* NB: No need to clear tx, all fields are reinitialized here. */ 3660 tx->scratch = 0; /* clear "scratch" area */ 3661 3662 flags = 0; 3663 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 3664 /* Unicast frame, check if an ACK is expected. */ 3665 if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) != 3666 IEEE80211_QOS_ACKPOLICY_NOACK) 3667 flags |= IWN_TX_NEED_ACK; 3668 } 3669 if ((wh->i_fc[0] & 3670 (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == 3671 (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR)) 3672 flags |= IWN_TX_IMM_BA; /* Cannot happen yet. */ 3673 3674 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) 3675 flags |= IWN_TX_MORE_FRAG; /* Cannot happen yet. */ 3676 3677 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */ 3678 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 3679 /* NB: Group frames are sent using CCK in 802.11b/g. */ 3680 if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) { 3681 flags |= IWN_TX_NEED_RTS; 3682 } else if ((ic->ic_flags & IEEE80211_F_USEPROT) && 3683 ridx >= IWN_RIDX_OFDM6) { 3684 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 3685 flags |= IWN_TX_NEED_CTS; 3686 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 3687 flags |= IWN_TX_NEED_RTS; 3688 } 3689 if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) { 3690 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 3691 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 3692 flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS); 3693 flags |= IWN_TX_NEED_PROTECTION; 3694 } else 3695 flags |= IWN_TX_FULL_TXOP; 3696 } 3697 } 3698 3699 if (IEEE80211_IS_MULTICAST(wh->i_addr1) || 3700 type != IEEE80211_FC0_TYPE_DATA) 3701 tx->id = sc->broadcast_id; 3702 else 3703 tx->id = wn->id; 3704 3705 if (type == IEEE80211_FC0_TYPE_MGT) { 3706 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 3707 3708 /* Tell HW to set timestamp in probe responses. */ 3709 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 3710 flags |= IWN_TX_INSERT_TSTAMP; 3711 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 3712 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 3713 tx->timeout = htole16(3); 3714 else 3715 tx->timeout = htole16(2); 3716 } else 3717 tx->timeout = htole16(0); 3718 3719 if (hdrlen & 3) { 3720 /* First segment length must be a multiple of 4. */ 3721 flags |= IWN_TX_NEED_PADDING; 3722 pad = 4 - (hdrlen & 3); 3723 } else 3724 pad = 0; 3725 3726 tx->len = htole16(totlen); 3727 tx->tid = tid; 3728 tx->rts_ntries = 60; 3729 tx->data_ntries = 15; 3730 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 3731 tx->rate = iwn_rate_to_plcp(sc, ni, rate); 3732 if (tx->id == sc->broadcast_id) { 3733 /* Group or management frame. */ 3734 tx->linkq = 0; 3735 /* XXX Alternate between antenna A and B? */ 3736 txant = IWN_LSB(sc->txchainmask); 3737 tx->rate |= htole32(IWN_RFLAG_ANT(txant)); 3738 } else { 3739 tx->linkq = ni->ni_rates.rs_nrates - ridx - 1; 3740 flags |= IWN_TX_LINKQ; /* enable MRR */ 3741 } 3742 /* Set physical address of "scratch area". */ 3743 tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr)); 3744 tx->hiaddr = IWN_HIADDR(data->scratch_paddr); 3745 3746 /* Copy 802.11 header in TX command. */ 3747 memcpy((uint8_t *)(tx + 1), wh, hdrlen); 3748 3749 /* Trim 802.11 header. */ 3750 m_adj(m, hdrlen); 3751 tx->security = 0; 3752 tx->flags = htole32(flags); 3753 3754 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs, 3755 &nsegs, BUS_DMA_NOWAIT); 3756 if (error != 0) { 3757 if (error != EFBIG) { 3758 device_printf(sc->sc_dev, 3759 "%s: can't map mbuf (error %d)\n", __func__, error); 3760 m_freem(m); 3761 return error; 3762 } 3763 /* Too many DMA segments, linearize mbuf. */ 3764 m1 = m_collapse(m, M_NOWAIT, IWN_MAX_SCATTER); 3765 if (m1 == NULL) { 3766 device_printf(sc->sc_dev, 3767 "%s: could not defrag mbuf\n", __func__); 3768 m_freem(m); 3769 return ENOBUFS; 3770 } 3771 m = m1; 3772 3773 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, 3774 segs, &nsegs, BUS_DMA_NOWAIT); 3775 if (error != 0) { 3776 device_printf(sc->sc_dev, 3777 "%s: can't map mbuf (error %d)\n", __func__, error); 3778 m_freem(m); 3779 return error; 3780 } 3781 } 3782 3783 data->m = m; 3784 data->ni = ni; 3785 3786 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n", 3787 __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs); 3788 3789 /* Fill TX descriptor. */ 3790 desc->nsegs = 1; 3791 if (m->m_len != 0) 3792 desc->nsegs += nsegs; 3793 /* First DMA segment is used by the TX command. */ 3794 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr)); 3795 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) | 3796 (4 + sizeof (*tx) + hdrlen + pad) << 4); 3797 /* Other DMA segments are for data payload. */ 3798 seg = &segs[0]; 3799 for (i = 1; i <= nsegs; i++) { 3800 desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr)); 3801 desc->segs[i].len = htole16(IWN_HIADDR(seg->ds_addr) | 3802 seg->ds_len << 4); 3803 seg++; 3804 } 3805 3806 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); 3807 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 3808 BUS_DMASYNC_PREWRITE); 3809 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 3810 BUS_DMASYNC_PREWRITE); 3811 3812 /* Update TX scheduler. */ 3813 if (ring->qid >= sc->firstaggqueue) 3814 ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen); 3815 3816 /* Kick TX ring. */ 3817 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 3818 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 3819 3820 /* Mark TX ring as full if we reach a certain threshold. */ 3821 if (++ring->queued > IWN_TX_RING_HIMARK) 3822 sc->qfullmsk |= 1 << ring->qid; 3823 3824 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 3825 3826 return 0; 3827} 3828 3829static int 3830iwn_tx_data_raw(struct iwn_softc *sc, struct mbuf *m, 3831 struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) 3832{ 3833 struct iwn_ops *ops = &sc->ops; 3834 struct ifnet *ifp = sc->sc_ifp; 3835 struct ieee80211vap *vap = ni->ni_vap; 3836 struct ieee80211com *ic = ifp->if_l2com; 3837 struct iwn_tx_cmd *cmd; 3838 struct iwn_cmd_data *tx; 3839 struct ieee80211_frame *wh; 3840 struct iwn_tx_ring *ring; 3841 struct iwn_tx_desc *desc; 3842 struct iwn_tx_data *data; 3843 struct mbuf *m1; 3844 bus_dma_segment_t *seg, segs[IWN_MAX_SCATTER]; 3845 uint32_t flags; 3846 u_int hdrlen; 3847 int ac, totlen, error, pad, nsegs = 0, i, rate; 3848 uint8_t ridx, type, txant; 3849 3850 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 3851 3852 IWN_LOCK_ASSERT(sc); 3853 3854 wh = mtod(m, struct ieee80211_frame *); 3855 hdrlen = ieee80211_anyhdrsize(wh); 3856 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 3857 3858 ac = params->ibp_pri & 3; 3859 3860 ring = &sc->txq[ac]; 3861 desc = &ring->desc[ring->cur]; 3862 data = &ring->data[ring->cur]; 3863 3864 /* Choose a TX rate index. */ 3865 rate = params->ibp_rate0; 3866 ridx = ieee80211_legacy_rate_lookup(ic->ic_rt, 3867 rate & IEEE80211_RATE_VAL); 3868 if (ridx == (uint8_t)-1) { 3869 /* XXX fall back to mcast/mgmt rate? */ 3870 m_freem(m); 3871 return EINVAL; 3872 } 3873 3874 totlen = m->m_pkthdr.len; 3875 3876 /* Prepare TX firmware command. */ 3877 cmd = &ring->cmd[ring->cur]; 3878 cmd->code = IWN_CMD_TX_DATA; 3879 cmd->flags = 0; 3880 cmd->qid = ring->qid; 3881 cmd->idx = ring->cur; 3882 3883 tx = (struct iwn_cmd_data *)cmd->data; 3884 /* NB: No need to clear tx, all fields are reinitialized here. */ 3885 tx->scratch = 0; /* clear "scratch" area */ 3886 3887 flags = 0; 3888 if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) 3889 flags |= IWN_TX_NEED_ACK; 3890 if (params->ibp_flags & IEEE80211_BPF_RTS) { 3891 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 3892 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 3893 flags &= ~IWN_TX_NEED_RTS; 3894 flags |= IWN_TX_NEED_PROTECTION; 3895 } else 3896 flags |= IWN_TX_NEED_RTS | IWN_TX_FULL_TXOP; 3897 } 3898 if (params->ibp_flags & IEEE80211_BPF_CTS) { 3899 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 3900 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 3901 flags &= ~IWN_TX_NEED_CTS; 3902 flags |= IWN_TX_NEED_PROTECTION; 3903 } else 3904 flags |= IWN_TX_NEED_CTS | IWN_TX_FULL_TXOP; 3905 } 3906 if (type == IEEE80211_FC0_TYPE_MGT) { 3907 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 3908 3909 /* Tell HW to set timestamp in probe responses. */ 3910 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 3911 flags |= IWN_TX_INSERT_TSTAMP; 3912 3913 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 3914 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 3915 tx->timeout = htole16(3); 3916 else 3917 tx->timeout = htole16(2); 3918 } else 3919 tx->timeout = htole16(0); 3920 3921 if (hdrlen & 3) { 3922 /* First segment length must be a multiple of 4. */ 3923 flags |= IWN_TX_NEED_PADDING; 3924 pad = 4 - (hdrlen & 3); 3925 } else 3926 pad = 0; 3927 3928 if (ieee80211_radiotap_active_vap(vap)) { 3929 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap; 3930 3931 tap->wt_flags = 0; 3932 tap->wt_rate = rate; 3933 3934 ieee80211_radiotap_tx(vap, m); 3935 } 3936 3937 tx->len = htole16(totlen); 3938 tx->tid = 0; 3939 tx->id = sc->broadcast_id; 3940 tx->rts_ntries = params->ibp_try1; 3941 tx->data_ntries = params->ibp_try0; 3942 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 3943 3944 /* XXX should just use iwn_rate_to_plcp() */ 3945 tx->rate = htole32(rate2plcp(rate)); 3946 if (ridx < IWN_RIDX_OFDM6 && 3947 IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 3948 tx->rate |= htole32(IWN_RFLAG_CCK); 3949 3950 /* Group or management frame. */ 3951 tx->linkq = 0; 3952 txant = IWN_LSB(sc->txchainmask); 3953 tx->rate |= htole32(IWN_RFLAG_ANT(txant)); 3954 3955 /* Set physical address of "scratch area". */ 3956 tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr)); 3957 tx->hiaddr = IWN_HIADDR(data->scratch_paddr); 3958 3959 /* Copy 802.11 header in TX command. */ 3960 memcpy((uint8_t *)(tx + 1), wh, hdrlen); 3961 3962 /* Trim 802.11 header. */ 3963 m_adj(m, hdrlen); 3964 tx->security = 0; 3965 tx->flags = htole32(flags); 3966 3967 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs, 3968 &nsegs, BUS_DMA_NOWAIT); 3969 if (error != 0) { 3970 if (error != EFBIG) { 3971 device_printf(sc->sc_dev, 3972 "%s: can't map mbuf (error %d)\n", __func__, error); 3973 m_freem(m); 3974 return error; 3975 } 3976 /* Too many DMA segments, linearize mbuf. */ 3977 m1 = m_collapse(m, M_NOWAIT, IWN_MAX_SCATTER); 3978 if (m1 == NULL) { 3979 device_printf(sc->sc_dev, 3980 "%s: could not defrag mbuf\n", __func__); 3981 m_freem(m); 3982 return ENOBUFS; 3983 } 3984 m = m1; 3985 3986 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, 3987 segs, &nsegs, BUS_DMA_NOWAIT); 3988 if (error != 0) { 3989 device_printf(sc->sc_dev, 3990 "%s: can't map mbuf (error %d)\n", __func__, error); 3991 m_freem(m); 3992 return error; 3993 } 3994 } 3995 3996 data->m = m; 3997 data->ni = ni; 3998 3999 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n", 4000 __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs); 4001 4002 /* Fill TX descriptor. */ 4003 desc->nsegs = 1; 4004 if (m->m_len != 0) 4005 desc->nsegs += nsegs; 4006 /* First DMA segment is used by the TX command. */ 4007 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr)); 4008 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) | 4009 (4 + sizeof (*tx) + hdrlen + pad) << 4); 4010 /* Other DMA segments are for data payload. */ 4011 seg = &segs[0]; 4012 for (i = 1; i <= nsegs; i++) { 4013 desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr)); 4014 desc->segs[i].len = htole16(IWN_HIADDR(seg->ds_addr) | 4015 seg->ds_len << 4); 4016 seg++; 4017 } 4018 4019 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); 4020 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 4021 BUS_DMASYNC_PREWRITE); 4022 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 4023 BUS_DMASYNC_PREWRITE); 4024 4025 /* Update TX scheduler. */ 4026 if (ring->qid >= sc->firstaggqueue) 4027 ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen); 4028 4029 /* Kick TX ring. */ 4030 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 4031 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 4032 4033 /* Mark TX ring as full if we reach a certain threshold. */ 4034 if (++ring->queued > IWN_TX_RING_HIMARK) 4035 sc->qfullmsk |= 1 << ring->qid; 4036 4037 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 4038 4039 return 0; 4040} 4041 4042static int 4043iwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 4044 const struct ieee80211_bpf_params *params) 4045{ 4046 struct ieee80211com *ic = ni->ni_ic; 4047 struct ifnet *ifp = ic->ic_ifp; 4048 struct iwn_softc *sc = ifp->if_softc; 4049 int error = 0; 4050 4051 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4052 4053 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 4054 ieee80211_free_node(ni); 4055 m_freem(m); 4056 return ENETDOWN; 4057 } 4058 4059 IWN_LOCK(sc); 4060 if (params == NULL) { 4061 /* 4062 * Legacy path; interpret frame contents to decide 4063 * precisely how to send the frame. 4064 */ 4065 error = iwn_tx_data(sc, m, ni); 4066 } else { 4067 /* 4068 * Caller supplied explicit parameters to use in 4069 * sending the frame. 4070 */ 4071 error = iwn_tx_data_raw(sc, m, ni, params); 4072 } 4073 if (error != 0) { 4074 /* NB: m is reclaimed on tx failure */ 4075 ieee80211_free_node(ni); 4076 ifp->if_oerrors++; 4077 } 4078 sc->sc_tx_timer = 5; 4079 4080 IWN_UNLOCK(sc); 4081 4082 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 4083 4084 return error; 4085} 4086 4087static void 4088iwn_start(struct ifnet *ifp) 4089{ 4090 struct iwn_softc *sc = ifp->if_softc; 4091 4092 IWN_LOCK(sc); 4093 iwn_start_locked(ifp); 4094 IWN_UNLOCK(sc); 4095} 4096 4097static void 4098iwn_start_locked(struct ifnet *ifp) 4099{ 4100 struct iwn_softc *sc = ifp->if_softc; 4101 struct ieee80211_node *ni; 4102 struct mbuf *m; 4103 4104 IWN_LOCK_ASSERT(sc); 4105 4106 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || 4107 (ifp->if_drv_flags & IFF_DRV_OACTIVE)) 4108 return; 4109 4110 for (;;) { 4111 if (sc->qfullmsk != 0) { 4112 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 4113 break; 4114 } 4115 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 4116 if (m == NULL) 4117 break; 4118 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 4119 if (iwn_tx_data(sc, m, ni) != 0) { 4120 ieee80211_free_node(ni); 4121 ifp->if_oerrors++; 4122 continue; 4123 } 4124 sc->sc_tx_timer = 5; 4125 } 4126} 4127 4128static void 4129iwn_watchdog(void *arg) 4130{ 4131 struct iwn_softc *sc = arg; 4132 struct ifnet *ifp = sc->sc_ifp; 4133 struct ieee80211com *ic = ifp->if_l2com; 4134 4135 IWN_LOCK_ASSERT(sc); 4136 4137 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING, ("not running")); 4138 4139 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4140 4141 if (sc->sc_tx_timer > 0) { 4142 if (--sc->sc_tx_timer == 0) { 4143 if_printf(ifp, "device timeout\n"); 4144 ieee80211_runtask(ic, &sc->sc_reinit_task); 4145 return; 4146 } 4147 } 4148 callout_reset(&sc->watchdog_to, hz, iwn_watchdog, sc); 4149} 4150 4151static int 4152iwn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 4153{ 4154 struct iwn_softc *sc = ifp->if_softc; 4155 struct ieee80211com *ic = ifp->if_l2com; 4156 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 4157 struct ifreq *ifr = (struct ifreq *) data; 4158 int error = 0, startall = 0, stop = 0; 4159 4160 switch (cmd) { 4161 case SIOCGIFADDR: 4162 error = ether_ioctl(ifp, cmd, data); 4163 break; 4164 case SIOCSIFFLAGS: 4165 IWN_LOCK(sc); 4166 if (ifp->if_flags & IFF_UP) { 4167 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 4168 iwn_init_locked(sc); 4169 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL) 4170 startall = 1; 4171 else 4172 stop = 1; 4173 } 4174 } else { 4175 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 4176 iwn_stop_locked(sc); 4177 } 4178 IWN_UNLOCK(sc); 4179 if (startall) 4180 ieee80211_start_all(ic); 4181 else if (vap != NULL && stop) 4182 ieee80211_stop(vap); 4183 break; 4184 case SIOCGIFMEDIA: 4185 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); 4186 break; 4187 default: 4188 error = EINVAL; 4189 break; 4190 } 4191 return error; 4192} 4193 4194/* 4195 * Send a command to the firmware. 4196 */ 4197static int 4198iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async) 4199{ 4200 struct iwn_tx_ring *ring = &sc->txq[4]; 4201 struct iwn_tx_desc *desc; 4202 struct iwn_tx_data *data; 4203 struct iwn_tx_cmd *cmd; 4204 struct mbuf *m; 4205 bus_addr_t paddr; 4206 int totlen, error; 4207 4208 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4209 4210 if (async == 0) 4211 IWN_LOCK_ASSERT(sc); 4212 4213 desc = &ring->desc[ring->cur]; 4214 data = &ring->data[ring->cur]; 4215 totlen = 4 + size; 4216 4217 if (size > sizeof cmd->data) { 4218 /* Command is too large to fit in a descriptor. */ 4219 if (totlen > MCLBYTES) 4220 return EINVAL; 4221 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 4222 if (m == NULL) 4223 return ENOMEM; 4224 cmd = mtod(m, struct iwn_tx_cmd *); 4225 error = bus_dmamap_load(ring->data_dmat, data->map, cmd, 4226 totlen, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 4227 if (error != 0) { 4228 m_freem(m); 4229 return error; 4230 } 4231 data->m = m; 4232 } else { 4233 cmd = &ring->cmd[ring->cur]; 4234 paddr = data->cmd_paddr; 4235 } 4236 4237 cmd->code = code; 4238 cmd->flags = 0; 4239 cmd->qid = ring->qid; 4240 cmd->idx = ring->cur; 4241 memcpy(cmd->data, buf, size); 4242 4243 desc->nsegs = 1; 4244 desc->segs[0].addr = htole32(IWN_LOADDR(paddr)); 4245 desc->segs[0].len = htole16(IWN_HIADDR(paddr) | totlen << 4); 4246 4247 DPRINTF(sc, IWN_DEBUG_CMD, "%s: %s (0x%x) flags %d qid %d idx %d\n", 4248 __func__, iwn_intr_str(cmd->code), cmd->code, 4249 cmd->flags, cmd->qid, cmd->idx); 4250 4251 if (size > sizeof cmd->data) { 4252 bus_dmamap_sync(ring->data_dmat, data->map, 4253 BUS_DMASYNC_PREWRITE); 4254 } else { 4255 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 4256 BUS_DMASYNC_PREWRITE); 4257 } 4258 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 4259 BUS_DMASYNC_PREWRITE); 4260 4261 /* Kick command ring. */ 4262 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 4263 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 4264 4265 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 4266 4267 return async ? 0 : msleep(desc, &sc->sc_mtx, PCATCH, "iwncmd", hz); 4268} 4269 4270static int 4271iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) 4272{ 4273 struct iwn4965_node_info hnode; 4274 caddr_t src, dst; 4275 4276 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4277 4278 /* 4279 * We use the node structure for 5000 Series internally (it is 4280 * a superset of the one for 4965AGN). We thus copy the common 4281 * fields before sending the command. 4282 */ 4283 src = (caddr_t)node; 4284 dst = (caddr_t)&hnode; 4285 memcpy(dst, src, 48); 4286 /* Skip TSC, RX MIC and TX MIC fields from ``src''. */ 4287 memcpy(dst + 48, src + 72, 20); 4288 return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async); 4289} 4290 4291static int 4292iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) 4293{ 4294 4295 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4296 4297 /* Direct mapping. */ 4298 return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async); 4299} 4300 4301static int 4302iwn_set_link_quality(struct iwn_softc *sc, struct ieee80211_node *ni) 4303{ 4304#define RV(v) ((v) & IEEE80211_RATE_VAL) 4305 struct iwn_node *wn = (void *)ni; 4306 struct ieee80211_rateset *rs = &ni->ni_rates; 4307 struct iwn_cmd_link_quality linkq; 4308 uint8_t txant; 4309 int i, rate, txrate; 4310 4311 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4312 4313 /* Use the first valid TX antenna. */ 4314 txant = IWN_LSB(sc->txchainmask); 4315 4316 memset(&linkq, 0, sizeof linkq); 4317 linkq.id = wn->id; 4318 linkq.antmsk_1stream = txant; 4319 linkq.antmsk_2stream = IWN_ANT_AB; 4320 linkq.ampdu_max = 64; 4321 linkq.ampdu_threshold = 3; 4322 linkq.ampdu_limit = htole16(4000); /* 4ms */ 4323 4324 /* Start at highest available bit-rate. */ 4325 if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) 4326 txrate = ni->ni_htrates.rs_nrates - 1; 4327 else 4328 txrate = rs->rs_nrates - 1; 4329 for (i = 0; i < IWN_MAX_TX_RETRIES; i++) { 4330 uint32_t plcp; 4331 4332 if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) 4333 rate = IEEE80211_RATE_MCS | txrate; 4334 else 4335 rate = RV(rs->rs_rates[txrate]); 4336 4337 /* Do rate -> PLCP config mapping */ 4338 plcp = iwn_rate_to_plcp(sc, ni, rate); 4339 linkq.retry[i] = plcp; 4340 4341 /* Special case for dual-stream rates? */ 4342 if ((le32toh(plcp) & IWN_RFLAG_MCS) && 4343 RV(le32toh(plcp)) > 7) 4344 linkq.mimo = i + 1; 4345 4346 /* Next retry at immediate lower bit-rate. */ 4347 if (txrate > 0) 4348 txrate--; 4349 } 4350 4351 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 4352 4353 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, 1); 4354#undef RV 4355} 4356 4357/* 4358 * Broadcast node is used to send group-addressed and management frames. 4359 */ 4360static int 4361iwn_add_broadcast_node(struct iwn_softc *sc, int async) 4362{ 4363 struct iwn_ops *ops = &sc->ops; 4364 struct ifnet *ifp = sc->sc_ifp; 4365 struct ieee80211com *ic = ifp->if_l2com; 4366 struct iwn_node_info node; 4367 struct iwn_cmd_link_quality linkq; 4368 uint8_t txant; 4369 int i, error; 4370 4371 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4372 4373 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 4374 4375 memset(&node, 0, sizeof node); 4376 IEEE80211_ADDR_COPY(node.macaddr, ifp->if_broadcastaddr); 4377 node.id = sc->broadcast_id; 4378 DPRINTF(sc, IWN_DEBUG_RESET, "%s: adding broadcast node\n", __func__); 4379 if ((error = ops->add_node(sc, &node, async)) != 0) 4380 return error; 4381 4382 /* Use the first valid TX antenna. */ 4383 txant = IWN_LSB(sc->txchainmask); 4384 4385 memset(&linkq, 0, sizeof linkq); 4386 linkq.id = sc->broadcast_id; 4387 linkq.antmsk_1stream = txant; 4388 linkq.antmsk_2stream = IWN_ANT_AB; 4389 linkq.ampdu_max = 64; 4390 linkq.ampdu_threshold = 3; 4391 linkq.ampdu_limit = htole16(4000); /* 4ms */ 4392 4393 /* Use lowest mandatory bit-rate. */ 4394 if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) 4395 linkq.retry[0] = htole32(0xd); 4396 else 4397 linkq.retry[0] = htole32(10 | IWN_RFLAG_CCK); 4398 linkq.retry[0] |= htole32(IWN_RFLAG_ANT(txant)); 4399 /* Use same bit-rate for all TX retries. */ 4400 for (i = 1; i < IWN_MAX_TX_RETRIES; i++) { 4401 linkq.retry[i] = linkq.retry[0]; 4402 } 4403 4404 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 4405 4406 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async); 4407} 4408 4409static int 4410iwn_updateedca(struct ieee80211com *ic) 4411{ 4412#define IWN_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */ 4413 struct iwn_softc *sc = ic->ic_ifp->if_softc; 4414 struct iwn_edca_params cmd; 4415 int aci; 4416 4417 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4418 4419 memset(&cmd, 0, sizeof cmd); 4420 cmd.flags = htole32(IWN_EDCA_UPDATE); 4421 for (aci = 0; aci < WME_NUM_AC; aci++) { 4422 const struct wmeParams *ac = 4423 &ic->ic_wme.wme_chanParams.cap_wmeParams[aci]; 4424 cmd.ac[aci].aifsn = ac->wmep_aifsn; 4425 cmd.ac[aci].cwmin = htole16(IWN_EXP2(ac->wmep_logcwmin)); 4426 cmd.ac[aci].cwmax = htole16(IWN_EXP2(ac->wmep_logcwmax)); 4427 cmd.ac[aci].txoplimit = 4428 htole16(IEEE80211_TXOP_TO_US(ac->wmep_txopLimit)); 4429 } 4430 IEEE80211_UNLOCK(ic); 4431 IWN_LOCK(sc); 4432 (void)iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1); 4433 IWN_UNLOCK(sc); 4434 IEEE80211_LOCK(ic); 4435 4436 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 4437 4438 return 0; 4439#undef IWN_EXP2 4440} 4441 4442static void 4443iwn_update_mcast(struct ifnet *ifp) 4444{ 4445 /* Ignore */ 4446} 4447 4448static void 4449iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on) 4450{ 4451 struct iwn_cmd_led led; 4452 4453 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4454 4455 /* Clear microcode LED ownership. */ 4456 IWN_CLRBITS(sc, IWN_LED, IWN_LED_BSM_CTRL); 4457 4458 led.which = which; 4459 led.unit = htole32(10000); /* on/off in unit of 100ms */ 4460 led.off = off; 4461 led.on = on; 4462 (void)iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1); 4463} 4464 4465/* 4466 * Set the critical temperature at which the firmware will stop the radio 4467 * and notify us. 4468 */ 4469static int 4470iwn_set_critical_temp(struct iwn_softc *sc) 4471{ 4472 struct iwn_critical_temp crit; 4473 int32_t temp; 4474 4475 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4476 4477 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CTEMP_STOP_RF); 4478 4479 if (sc->hw_type == IWN_HW_REV_TYPE_5150) 4480 temp = (IWN_CTOK(110) - sc->temp_off) * -5; 4481 else if (sc->hw_type == IWN_HW_REV_TYPE_4965) 4482 temp = IWN_CTOK(110); 4483 else 4484 temp = 110; 4485 memset(&crit, 0, sizeof crit); 4486 crit.tempR = htole32(temp); 4487 DPRINTF(sc, IWN_DEBUG_RESET, "setting critical temp to %d\n", temp); 4488 return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0); 4489} 4490 4491static int 4492iwn_set_timing(struct iwn_softc *sc, struct ieee80211_node *ni) 4493{ 4494 struct iwn_cmd_timing cmd; 4495 uint64_t val, mod; 4496 4497 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4498 4499 memset(&cmd, 0, sizeof cmd); 4500 memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t)); 4501 cmd.bintval = htole16(ni->ni_intval); 4502 cmd.lintval = htole16(10); 4503 4504 /* Compute remaining time until next beacon. */ 4505 val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU; 4506 mod = le64toh(cmd.tstamp) % val; 4507 cmd.binitval = htole32((uint32_t)(val - mod)); 4508 4509 DPRINTF(sc, IWN_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n", 4510 ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod)); 4511 4512 return iwn_cmd(sc, IWN_CMD_TIMING, &cmd, sizeof cmd, 1); 4513} 4514 4515static void 4516iwn4965_power_calibration(struct iwn_softc *sc, int temp) 4517{ 4518 struct ifnet *ifp = sc->sc_ifp; 4519 struct ieee80211com *ic = ifp->if_l2com; 4520 4521 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4522 4523 /* Adjust TX power if need be (delta >= 3 degC). */ 4524 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d->%d\n", 4525 __func__, sc->temp, temp); 4526 if (abs(temp - sc->temp) >= 3) { 4527 /* Record temperature of last calibration. */ 4528 sc->temp = temp; 4529 (void)iwn4965_set_txpower(sc, ic->ic_bsschan, 1); 4530 } 4531} 4532 4533/* 4534 * Set TX power for current channel (each rate has its own power settings). 4535 * This function takes into account the regulatory information from EEPROM, 4536 * the current temperature and the current voltage. 4537 */ 4538static int 4539iwn4965_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch, 4540 int async) 4541{ 4542/* Fixed-point arithmetic division using a n-bit fractional part. */ 4543#define fdivround(a, b, n) \ 4544 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n)) 4545/* Linear interpolation. */ 4546#define interpolate(x, x1, y1, x2, y2, n) \ 4547 ((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n)) 4548 4549 static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 }; 4550 struct iwn_ucode_info *uc = &sc->ucode_info; 4551 struct iwn4965_cmd_txpower cmd; 4552 struct iwn4965_eeprom_chan_samples *chans; 4553 const uint8_t *rf_gain, *dsp_gain; 4554 int32_t vdiff, tdiff; 4555 int i, c, grp, maxpwr; 4556 uint8_t chan; 4557 4558 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 4559 /* Retrieve current channel from last RXON. */ 4560 chan = sc->rxon->chan; 4561 DPRINTF(sc, IWN_DEBUG_RESET, "setting TX power for channel %d\n", 4562 chan); 4563 4564 memset(&cmd, 0, sizeof cmd); 4565 cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1; 4566 cmd.chan = chan; 4567 4568 if (IEEE80211_IS_CHAN_5GHZ(ch)) { 4569 maxpwr = sc->maxpwr5GHz; 4570 rf_gain = iwn4965_rf_gain_5ghz; 4571 dsp_gain = iwn4965_dsp_gain_5ghz; 4572 } else { 4573 maxpwr = sc->maxpwr2GHz; 4574 rf_gain = iwn4965_rf_gain_2ghz; 4575 dsp_gain = iwn4965_dsp_gain_2ghz; 4576 } 4577 4578 /* Compute voltage compensation. */ 4579 vdiff = ((int32_t)le32toh(uc->volt) - sc->eeprom_voltage) / 7; 4580 if (vdiff > 0) 4581 vdiff *= 2; 4582 if (abs(vdiff) > 2) 4583 vdiff = 0; 4584 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 4585 "%s: voltage compensation=%d (UCODE=%d, EEPROM=%d)\n", 4586 __func__, vdiff, le32toh(uc->volt), sc->eeprom_voltage); 4587 4588 /* Get channel attenuation group. */ 4589 if (chan <= 20) /* 1-20 */ 4590 grp = 4; 4591 else if (chan <= 43) /* 34-43 */ 4592 grp = 0; 4593 else if (chan <= 70) /* 44-70 */ 4594 grp = 1; 4595 else if (chan <= 124) /* 71-124 */ 4596 grp = 2; 4597 else /* 125-200 */ 4598 grp = 3; 4599 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 4600 "%s: chan %d, attenuation group=%d\n", __func__, chan, grp); 4601 4602 /* Get channel sub-band. */ 4603 for (i = 0; i < IWN_NBANDS; i++) 4604 if (sc->bands[i].lo != 0 && 4605 sc->bands[i].lo <= chan && chan <= sc->bands[i].hi) 4606 break; 4607 if (i == IWN_NBANDS) /* Can't happen in real-life. */ 4608 return EINVAL; 4609 chans = sc->bands[i].chans; 4610 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 4611 "%s: chan %d sub-band=%d\n", __func__, chan, i); 4612 4613 for (c = 0; c < 2; c++) { 4614 uint8_t power, gain, temp; 4615 int maxchpwr, pwr, ridx, idx; 4616 4617 power = interpolate(chan, 4618 chans[0].num, chans[0].samples[c][1].power, 4619 chans[1].num, chans[1].samples[c][1].power, 1); 4620 gain = interpolate(chan, 4621 chans[0].num, chans[0].samples[c][1].gain, 4622 chans[1].num, chans[1].samples[c][1].gain, 1); 4623 temp = interpolate(chan, 4624 chans[0].num, chans[0].samples[c][1].temp, 4625 chans[1].num, chans[1].samples[c][1].temp, 1); 4626 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 4627 "%s: Tx chain %d: power=%d gain=%d temp=%d\n", 4628 __func__, c, power, gain, temp); 4629 4630 /* Compute temperature compensation. */ 4631 tdiff = ((sc->temp - temp) * 2) / tdiv[grp]; 4632 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 4633 "%s: temperature compensation=%d (current=%d, EEPROM=%d)\n", 4634 __func__, tdiff, sc->temp, temp); 4635 4636 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) { 4637 /* Convert dBm to half-dBm. */ 4638 maxchpwr = sc->maxpwr[chan] * 2; 4639 if ((ridx / 8) & 1) 4640 maxchpwr -= 6; /* MIMO 2T: -3dB */ 4641 4642 pwr = maxpwr; 4643 4644 /* Adjust TX power based on rate. */ 4645 if ((ridx % 8) == 5) 4646 pwr -= 15; /* OFDM48: -7.5dB */ 4647 else if ((ridx % 8) == 6) 4648 pwr -= 17; /* OFDM54: -8.5dB */ 4649 else if ((ridx % 8) == 7) 4650 pwr -= 20; /* OFDM60: -10dB */ 4651 else 4652 pwr -= 10; /* Others: -5dB */ 4653 4654 /* Do not exceed channel max TX power. */ 4655 if (pwr > maxchpwr) 4656 pwr = maxchpwr; 4657 4658 idx = gain - (pwr - power) - tdiff - vdiff; 4659 if ((ridx / 8) & 1) /* MIMO */ 4660 idx += (int32_t)le32toh(uc->atten[grp][c]); 4661 4662 if (cmd.band == 0) 4663 idx += 9; /* 5GHz */ 4664 if (ridx == IWN_RIDX_MAX) 4665 idx += 5; /* CCK */ 4666 4667 /* Make sure idx stays in a valid range. */ 4668 if (idx < 0) 4669 idx = 0; 4670 else if (idx > IWN4965_MAX_PWR_INDEX) 4671 idx = IWN4965_MAX_PWR_INDEX; 4672 4673 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 4674 "%s: Tx chain %d, rate idx %d: power=%d\n", 4675 __func__, c, ridx, idx); 4676 cmd.power[ridx].rf_gain[c] = rf_gain[idx]; 4677 cmd.power[ridx].dsp_gain[c] = dsp_gain[idx]; 4678 } 4679 } 4680 4681 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 4682 "%s: set tx power for chan %d\n", __func__, chan); 4683 return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async); 4684 4685#undef interpolate 4686#undef fdivround 4687} 4688 4689static int 4690iwn5000_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch, 4691 int async) 4692{ 4693 struct iwn5000_cmd_txpower cmd; 4694 4695 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4696 4697 /* 4698 * TX power calibration is handled automatically by the firmware 4699 * for 5000 Series. 4700 */ 4701 memset(&cmd, 0, sizeof cmd); 4702 cmd.global_limit = 2 * IWN5000_TXPOWER_MAX_DBM; /* 16 dBm */ 4703 cmd.flags = IWN5000_TXPOWER_NO_CLOSED; 4704 cmd.srv_limit = IWN5000_TXPOWER_AUTO; 4705 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: setting TX power\n", __func__); 4706 return iwn_cmd(sc, IWN_CMD_TXPOWER_DBM, &cmd, sizeof cmd, async); 4707} 4708 4709/* 4710 * Retrieve the maximum RSSI (in dBm) among receivers. 4711 */ 4712static int 4713iwn4965_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat) 4714{ 4715 struct iwn4965_rx_phystat *phy = (void *)stat->phybuf; 4716 uint8_t mask, agc; 4717 int rssi; 4718 4719 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4720 4721 mask = (le16toh(phy->antenna) >> 4) & IWN_ANT_ABC; 4722 agc = (le16toh(phy->agc) >> 7) & 0x7f; 4723 4724 rssi = 0; 4725 if (mask & IWN_ANT_A) 4726 rssi = MAX(rssi, phy->rssi[0]); 4727 if (mask & IWN_ANT_B) 4728 rssi = MAX(rssi, phy->rssi[2]); 4729 if (mask & IWN_ANT_C) 4730 rssi = MAX(rssi, phy->rssi[4]); 4731 4732 DPRINTF(sc, IWN_DEBUG_RECV, 4733 "%s: agc %d mask 0x%x rssi %d %d %d result %d\n", __func__, agc, 4734 mask, phy->rssi[0], phy->rssi[2], phy->rssi[4], 4735 rssi - agc - IWN_RSSI_TO_DBM); 4736 return rssi - agc - IWN_RSSI_TO_DBM; 4737} 4738 4739static int 4740iwn5000_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat) 4741{ 4742 struct iwn5000_rx_phystat *phy = (void *)stat->phybuf; 4743 uint8_t agc; 4744 int rssi; 4745 4746 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4747 4748 agc = (le32toh(phy->agc) >> 9) & 0x7f; 4749 4750 rssi = MAX(le16toh(phy->rssi[0]) & 0xff, 4751 le16toh(phy->rssi[1]) & 0xff); 4752 rssi = MAX(le16toh(phy->rssi[2]) & 0xff, rssi); 4753 4754 DPRINTF(sc, IWN_DEBUG_RECV, 4755 "%s: agc %d rssi %d %d %d result %d\n", __func__, agc, 4756 phy->rssi[0], phy->rssi[1], phy->rssi[2], 4757 rssi - agc - IWN_RSSI_TO_DBM); 4758 return rssi - agc - IWN_RSSI_TO_DBM; 4759} 4760 4761/* 4762 * Retrieve the average noise (in dBm) among receivers. 4763 */ 4764static int 4765iwn_get_noise(const struct iwn_rx_general_stats *stats) 4766{ 4767 int i, total, nbant, noise; 4768 4769 total = nbant = 0; 4770 for (i = 0; i < 3; i++) { 4771 if ((noise = le32toh(stats->noise[i]) & 0xff) == 0) 4772 continue; 4773 total += noise; 4774 nbant++; 4775 } 4776 /* There should be at least one antenna but check anyway. */ 4777 return (nbant == 0) ? -127 : (total / nbant) - 107; 4778} 4779 4780/* 4781 * Compute temperature (in degC) from last received statistics. 4782 */ 4783static int 4784iwn4965_get_temperature(struct iwn_softc *sc) 4785{ 4786 struct iwn_ucode_info *uc = &sc->ucode_info; 4787 int32_t r1, r2, r3, r4, temp; 4788 4789 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4790 4791 r1 = le32toh(uc->temp[0].chan20MHz); 4792 r2 = le32toh(uc->temp[1].chan20MHz); 4793 r3 = le32toh(uc->temp[2].chan20MHz); 4794 r4 = le32toh(sc->rawtemp); 4795 4796 if (r1 == r3) /* Prevents division by 0 (should not happen). */ 4797 return 0; 4798 4799 /* Sign-extend 23-bit R4 value to 32-bit. */ 4800 r4 = ((r4 & 0xffffff) ^ 0x800000) - 0x800000; 4801 /* Compute temperature in Kelvin. */ 4802 temp = (259 * (r4 - r2)) / (r3 - r1); 4803 temp = (temp * 97) / 100 + 8; 4804 4805 DPRINTF(sc, IWN_DEBUG_ANY, "temperature %dK/%dC\n", temp, 4806 IWN_KTOC(temp)); 4807 return IWN_KTOC(temp); 4808} 4809 4810static int 4811iwn5000_get_temperature(struct iwn_softc *sc) 4812{ 4813 int32_t temp; 4814 4815 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4816 4817 /* 4818 * Temperature is not used by the driver for 5000 Series because 4819 * TX power calibration is handled by firmware. 4820 */ 4821 temp = le32toh(sc->rawtemp); 4822 if (sc->hw_type == IWN_HW_REV_TYPE_5150) { 4823 temp = (temp / -5) + sc->temp_off; 4824 temp = IWN_KTOC(temp); 4825 } 4826 return temp; 4827} 4828 4829/* 4830 * Initialize sensitivity calibration state machine. 4831 */ 4832static int 4833iwn_init_sensitivity(struct iwn_softc *sc) 4834{ 4835 struct iwn_ops *ops = &sc->ops; 4836 struct iwn_calib_state *calib = &sc->calib; 4837 uint32_t flags; 4838 int error; 4839 4840 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4841 4842 /* Reset calibration state machine. */ 4843 memset(calib, 0, sizeof (*calib)); 4844 calib->state = IWN_CALIB_STATE_INIT; 4845 calib->cck_state = IWN_CCK_STATE_HIFA; 4846 /* Set initial correlation values. */ 4847 calib->ofdm_x1 = sc->limits->min_ofdm_x1; 4848 calib->ofdm_mrc_x1 = sc->limits->min_ofdm_mrc_x1; 4849 calib->ofdm_x4 = sc->limits->min_ofdm_x4; 4850 calib->ofdm_mrc_x4 = sc->limits->min_ofdm_mrc_x4; 4851 calib->cck_x4 = 125; 4852 calib->cck_mrc_x4 = sc->limits->min_cck_mrc_x4; 4853 calib->energy_cck = sc->limits->energy_cck; 4854 4855 /* Write initial sensitivity. */ 4856 if ((error = iwn_send_sensitivity(sc)) != 0) 4857 return error; 4858 4859 /* Write initial gains. */ 4860 if ((error = ops->init_gains(sc)) != 0) 4861 return error; 4862 4863 /* Request statistics at each beacon interval. */ 4864 flags = 0; 4865 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending request for statistics\n", 4866 __func__); 4867 return iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1); 4868} 4869 4870/* 4871 * Collect noise and RSSI statistics for the first 20 beacons received 4872 * after association and use them to determine connected antennas and 4873 * to set differential gains. 4874 */ 4875static void 4876iwn_collect_noise(struct iwn_softc *sc, 4877 const struct iwn_rx_general_stats *stats) 4878{ 4879 struct iwn_ops *ops = &sc->ops; 4880 struct iwn_calib_state *calib = &sc->calib; 4881 struct ifnet *ifp = sc->sc_ifp; 4882 struct ieee80211com *ic = ifp->if_l2com; 4883 uint32_t val; 4884 int i; 4885 4886 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4887 4888 /* Accumulate RSSI and noise for all 3 antennas. */ 4889 for (i = 0; i < 3; i++) { 4890 calib->rssi[i] += le32toh(stats->rssi[i]) & 0xff; 4891 calib->noise[i] += le32toh(stats->noise[i]) & 0xff; 4892 } 4893 /* NB: We update differential gains only once after 20 beacons. */ 4894 if (++calib->nbeacons < 20) 4895 return; 4896 4897 /* Determine highest average RSSI. */ 4898 val = MAX(calib->rssi[0], calib->rssi[1]); 4899 val = MAX(calib->rssi[2], val); 4900 4901 /* Determine which antennas are connected. */ 4902 sc->chainmask = sc->rxchainmask; 4903 for (i = 0; i < 3; i++) 4904 if (val - calib->rssi[i] > 15 * 20) 4905 sc->chainmask &= ~(1 << i); 4906 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4907 "%s: RX chains mask: theoretical=0x%x, actual=0x%x\n", 4908 __func__, sc->rxchainmask, sc->chainmask); 4909 4910 /* If none of the TX antennas are connected, keep at least one. */ 4911 if ((sc->chainmask & sc->txchainmask) == 0) 4912 sc->chainmask |= IWN_LSB(sc->txchainmask); 4913 4914 (void)ops->set_gains(sc); 4915 calib->state = IWN_CALIB_STATE_RUN; 4916 4917#ifdef notyet 4918 /* XXX Disable RX chains with no antennas connected. */ 4919 sc->rxon->rxchain = htole16(IWN_RXCHAIN_SEL(sc->chainmask)); 4920 (void)iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1); 4921#endif 4922 4923 /* Enable power-saving mode if requested by user. */ 4924 if (ic->ic_flags & IEEE80211_F_PMGTON) 4925 (void)iwn_set_pslevel(sc, 0, 3, 1); 4926 4927 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 4928 4929} 4930 4931static int 4932iwn4965_init_gains(struct iwn_softc *sc) 4933{ 4934 struct iwn_phy_calib_gain cmd; 4935 4936 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4937 4938 memset(&cmd, 0, sizeof cmd); 4939 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; 4940 /* Differential gains initially set to 0 for all 3 antennas. */ 4941 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4942 "%s: setting initial differential gains\n", __func__); 4943 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 4944} 4945 4946static int 4947iwn5000_init_gains(struct iwn_softc *sc) 4948{ 4949 struct iwn_phy_calib cmd; 4950 4951 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4952 4953 memset(&cmd, 0, sizeof cmd); 4954 cmd.code = sc->reset_noise_gain; 4955 cmd.ngroups = 1; 4956 cmd.isvalid = 1; 4957 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4958 "%s: setting initial differential gains\n", __func__); 4959 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 4960} 4961 4962static int 4963iwn4965_set_gains(struct iwn_softc *sc) 4964{ 4965 struct iwn_calib_state *calib = &sc->calib; 4966 struct iwn_phy_calib_gain cmd; 4967 int i, delta, noise; 4968 4969 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4970 4971 /* Get minimal noise among connected antennas. */ 4972 noise = INT_MAX; /* NB: There's at least one antenna. */ 4973 for (i = 0; i < 3; i++) 4974 if (sc->chainmask & (1 << i)) 4975 noise = MIN(calib->noise[i], noise); 4976 4977 memset(&cmd, 0, sizeof cmd); 4978 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; 4979 /* Set differential gains for connected antennas. */ 4980 for (i = 0; i < 3; i++) { 4981 if (sc->chainmask & (1 << i)) { 4982 /* Compute attenuation (in unit of 1.5dB). */ 4983 delta = (noise - (int32_t)calib->noise[i]) / 30; 4984 /* NB: delta <= 0 */ 4985 /* Limit to [-4.5dB,0]. */ 4986 cmd.gain[i] = MIN(abs(delta), 3); 4987 if (delta < 0) 4988 cmd.gain[i] |= 1 << 2; /* sign bit */ 4989 } 4990 } 4991 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 4992 "setting differential gains Ant A/B/C: %x/%x/%x (%x)\n", 4993 cmd.gain[0], cmd.gain[1], cmd.gain[2], sc->chainmask); 4994 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 4995} 4996 4997static int 4998iwn5000_set_gains(struct iwn_softc *sc) 4999{ 5000 struct iwn_calib_state *calib = &sc->calib; 5001 struct iwn_phy_calib_gain cmd; 5002 int i, ant, div, delta; 5003 5004 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5005 5006 /* We collected 20 beacons and !=6050 need a 1.5 factor. */ 5007 div = (sc->hw_type == IWN_HW_REV_TYPE_6050) ? 20 : 30; 5008 5009 memset(&cmd, 0, sizeof cmd); 5010 cmd.code = sc->noise_gain; 5011 cmd.ngroups = 1; 5012 cmd.isvalid = 1; 5013 /* Get first available RX antenna as referential. */ 5014 ant = IWN_LSB(sc->rxchainmask); 5015 /* Set differential gains for other antennas. */ 5016 for (i = ant + 1; i < 3; i++) { 5017 if (sc->chainmask & (1 << i)) { 5018 /* The delta is relative to antenna "ant". */ 5019 delta = ((int32_t)calib->noise[ant] - 5020 (int32_t)calib->noise[i]) / div; 5021 /* Limit to [-4.5dB,+4.5dB]. */ 5022 cmd.gain[i - 1] = MIN(abs(delta), 3); 5023 if (delta < 0) 5024 cmd.gain[i - 1] |= 1 << 2; /* sign bit */ 5025 } 5026 } 5027 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5028 "setting differential gains Ant B/C: %x/%x (%x)\n", 5029 cmd.gain[0], cmd.gain[1], sc->chainmask); 5030 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 5031} 5032 5033/* 5034 * Tune RF RX sensitivity based on the number of false alarms detected 5035 * during the last beacon period. 5036 */ 5037static void 5038iwn_tune_sensitivity(struct iwn_softc *sc, const struct iwn_rx_stats *stats) 5039{ 5040#define inc(val, inc, max) \ 5041 if ((val) < (max)) { \ 5042 if ((val) < (max) - (inc)) \ 5043 (val) += (inc); \ 5044 else \ 5045 (val) = (max); \ 5046 needs_update = 1; \ 5047 } 5048#define dec(val, dec, min) \ 5049 if ((val) > (min)) { \ 5050 if ((val) > (min) + (dec)) \ 5051 (val) -= (dec); \ 5052 else \ 5053 (val) = (min); \ 5054 needs_update = 1; \ 5055 } 5056 5057 const struct iwn_sensitivity_limits *limits = sc->limits; 5058 struct iwn_calib_state *calib = &sc->calib; 5059 uint32_t val, rxena, fa; 5060 uint32_t energy[3], energy_min; 5061 uint8_t noise[3], noise_ref; 5062 int i, needs_update = 0; 5063 5064 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 5065 5066 /* Check that we've been enabled long enough. */ 5067 if ((rxena = le32toh(stats->general.load)) == 0){ 5068 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end not so long\n", __func__); 5069 return; 5070 } 5071 5072 /* Compute number of false alarms since last call for OFDM. */ 5073 fa = le32toh(stats->ofdm.bad_plcp) - calib->bad_plcp_ofdm; 5074 fa += le32toh(stats->ofdm.fa) - calib->fa_ofdm; 5075 fa *= 200 * IEEE80211_DUR_TU; /* 200TU */ 5076 5077 /* Save counters values for next call. */ 5078 calib->bad_plcp_ofdm = le32toh(stats->ofdm.bad_plcp); 5079 calib->fa_ofdm = le32toh(stats->ofdm.fa); 5080 5081 if (fa > 50 * rxena) { 5082 /* High false alarm count, decrease sensitivity. */ 5083 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5084 "%s: OFDM high false alarm count: %u\n", __func__, fa); 5085 inc(calib->ofdm_x1, 1, limits->max_ofdm_x1); 5086 inc(calib->ofdm_mrc_x1, 1, limits->max_ofdm_mrc_x1); 5087 inc(calib->ofdm_x4, 1, limits->max_ofdm_x4); 5088 inc(calib->ofdm_mrc_x4, 1, limits->max_ofdm_mrc_x4); 5089 5090 } else if (fa < 5 * rxena) { 5091 /* Low false alarm count, increase sensitivity. */ 5092 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5093 "%s: OFDM low false alarm count: %u\n", __func__, fa); 5094 dec(calib->ofdm_x1, 1, limits->min_ofdm_x1); 5095 dec(calib->ofdm_mrc_x1, 1, limits->min_ofdm_mrc_x1); 5096 dec(calib->ofdm_x4, 1, limits->min_ofdm_x4); 5097 dec(calib->ofdm_mrc_x4, 1, limits->min_ofdm_mrc_x4); 5098 } 5099 5100 /* Compute maximum noise among 3 receivers. */ 5101 for (i = 0; i < 3; i++) 5102 noise[i] = (le32toh(stats->general.noise[i]) >> 8) & 0xff; 5103 val = MAX(noise[0], noise[1]); 5104 val = MAX(noise[2], val); 5105 /* Insert it into our samples table. */ 5106 calib->noise_samples[calib->cur_noise_sample] = val; 5107 calib->cur_noise_sample = (calib->cur_noise_sample + 1) % 20; 5108 5109 /* Compute maximum noise among last 20 samples. */ 5110 noise_ref = calib->noise_samples[0]; 5111 for (i = 1; i < 20; i++) 5112 noise_ref = MAX(noise_ref, calib->noise_samples[i]); 5113 5114 /* Compute maximum energy among 3 receivers. */ 5115 for (i = 0; i < 3; i++) 5116 energy[i] = le32toh(stats->general.energy[i]); 5117 val = MIN(energy[0], energy[1]); 5118 val = MIN(energy[2], val); 5119 /* Insert it into our samples table. */ 5120 calib->energy_samples[calib->cur_energy_sample] = val; 5121 calib->cur_energy_sample = (calib->cur_energy_sample + 1) % 10; 5122 5123 /* Compute minimum energy among last 10 samples. */ 5124 energy_min = calib->energy_samples[0]; 5125 for (i = 1; i < 10; i++) 5126 energy_min = MAX(energy_min, calib->energy_samples[i]); 5127 energy_min += 6; 5128 5129 /* Compute number of false alarms since last call for CCK. */ 5130 fa = le32toh(stats->cck.bad_plcp) - calib->bad_plcp_cck; 5131 fa += le32toh(stats->cck.fa) - calib->fa_cck; 5132 fa *= 200 * IEEE80211_DUR_TU; /* 200TU */ 5133 5134 /* Save counters values for next call. */ 5135 calib->bad_plcp_cck = le32toh(stats->cck.bad_plcp); 5136 calib->fa_cck = le32toh(stats->cck.fa); 5137 5138 if (fa > 50 * rxena) { 5139 /* High false alarm count, decrease sensitivity. */ 5140 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5141 "%s: CCK high false alarm count: %u\n", __func__, fa); 5142 calib->cck_state = IWN_CCK_STATE_HIFA; 5143 calib->low_fa = 0; 5144 5145 if (calib->cck_x4 > 160) { 5146 calib->noise_ref = noise_ref; 5147 if (calib->energy_cck > 2) 5148 dec(calib->energy_cck, 2, energy_min); 5149 } 5150 if (calib->cck_x4 < 160) { 5151 calib->cck_x4 = 161; 5152 needs_update = 1; 5153 } else 5154 inc(calib->cck_x4, 3, limits->max_cck_x4); 5155 5156 inc(calib->cck_mrc_x4, 3, limits->max_cck_mrc_x4); 5157 5158 } else if (fa < 5 * rxena) { 5159 /* Low false alarm count, increase sensitivity. */ 5160 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5161 "%s: CCK low false alarm count: %u\n", __func__, fa); 5162 calib->cck_state = IWN_CCK_STATE_LOFA; 5163 calib->low_fa++; 5164 5165 if (calib->cck_state != IWN_CCK_STATE_INIT && 5166 (((int32_t)calib->noise_ref - (int32_t)noise_ref) > 2 || 5167 calib->low_fa > 100)) { 5168 inc(calib->energy_cck, 2, limits->min_energy_cck); 5169 dec(calib->cck_x4, 3, limits->min_cck_x4); 5170 dec(calib->cck_mrc_x4, 3, limits->min_cck_mrc_x4); 5171 } 5172 } else { 5173 /* Not worth to increase or decrease sensitivity. */ 5174 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5175 "%s: CCK normal false alarm count: %u\n", __func__, fa); 5176 calib->low_fa = 0; 5177 calib->noise_ref = noise_ref; 5178 5179 if (calib->cck_state == IWN_CCK_STATE_HIFA) { 5180 /* Previous interval had many false alarms. */ 5181 dec(calib->energy_cck, 8, energy_min); 5182 } 5183 calib->cck_state = IWN_CCK_STATE_INIT; 5184 } 5185 5186 if (needs_update) 5187 (void)iwn_send_sensitivity(sc); 5188 5189 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 5190 5191#undef dec 5192#undef inc 5193} 5194 5195static int 5196iwn_send_sensitivity(struct iwn_softc *sc) 5197{ 5198 struct iwn_calib_state *calib = &sc->calib; 5199 struct iwn_enhanced_sensitivity_cmd cmd; 5200 int len; 5201 5202 memset(&cmd, 0, sizeof cmd); 5203 len = sizeof (struct iwn_sensitivity_cmd); 5204 cmd.which = IWN_SENSITIVITY_WORKTBL; 5205 /* OFDM modulation. */ 5206 cmd.corr_ofdm_x1 = htole16(calib->ofdm_x1); 5207 cmd.corr_ofdm_mrc_x1 = htole16(calib->ofdm_mrc_x1); 5208 cmd.corr_ofdm_x4 = htole16(calib->ofdm_x4); 5209 cmd.corr_ofdm_mrc_x4 = htole16(calib->ofdm_mrc_x4); 5210 cmd.energy_ofdm = htole16(sc->limits->energy_ofdm); 5211 cmd.energy_ofdm_th = htole16(62); 5212 /* CCK modulation. */ 5213 cmd.corr_cck_x4 = htole16(calib->cck_x4); 5214 cmd.corr_cck_mrc_x4 = htole16(calib->cck_mrc_x4); 5215 cmd.energy_cck = htole16(calib->energy_cck); 5216 /* Barker modulation: use default values. */ 5217 cmd.corr_barker = htole16(190); 5218 cmd.corr_barker_mrc = htole16(390); 5219 5220 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5221 "%s: set sensitivity %d/%d/%d/%d/%d/%d/%d\n", __func__, 5222 calib->ofdm_x1, calib->ofdm_mrc_x1, calib->ofdm_x4, 5223 calib->ofdm_mrc_x4, calib->cck_x4, 5224 calib->cck_mrc_x4, calib->energy_cck); 5225 5226 if (!(sc->sc_flags & IWN_FLAG_ENH_SENS)) 5227 goto send; 5228 /* Enhanced sensitivity settings. */ 5229 len = sizeof (struct iwn_enhanced_sensitivity_cmd); 5230 cmd.ofdm_det_slope_mrc = htole16(668); 5231 cmd.ofdm_det_icept_mrc = htole16(4); 5232 cmd.ofdm_det_slope = htole16(486); 5233 cmd.ofdm_det_icept = htole16(37); 5234 cmd.cck_det_slope_mrc = htole16(853); 5235 cmd.cck_det_icept_mrc = htole16(4); 5236 cmd.cck_det_slope = htole16(476); 5237 cmd.cck_det_icept = htole16(99); 5238send: 5239 return iwn_cmd(sc, IWN_CMD_SET_SENSITIVITY, &cmd, len, 1); 5240} 5241 5242/* 5243 * Set STA mode power saving level (between 0 and 5). 5244 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving. 5245 */ 5246static int 5247iwn_set_pslevel(struct iwn_softc *sc, int dtim, int level, int async) 5248{ 5249 struct iwn_pmgt_cmd cmd; 5250 const struct iwn_pmgt *pmgt; 5251 uint32_t max, skip_dtim; 5252 uint32_t reg; 5253 int i; 5254 5255 DPRINTF(sc, IWN_DEBUG_PWRSAVE, 5256 "%s: dtim=%d, level=%d, async=%d\n", 5257 __func__, 5258 dtim, 5259 level, 5260 async); 5261 5262 /* Select which PS parameters to use. */ 5263 if (dtim <= 2) 5264 pmgt = &iwn_pmgt[0][level]; 5265 else if (dtim <= 10) 5266 pmgt = &iwn_pmgt[1][level]; 5267 else 5268 pmgt = &iwn_pmgt[2][level]; 5269 5270 memset(&cmd, 0, sizeof cmd); 5271 if (level != 0) /* not CAM */ 5272 cmd.flags |= htole16(IWN_PS_ALLOW_SLEEP); 5273 if (level == 5) 5274 cmd.flags |= htole16(IWN_PS_FAST_PD); 5275 /* Retrieve PCIe Active State Power Management (ASPM). */ 5276 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); 5277 if (!(reg & 0x1)) /* L0s Entry disabled. */ 5278 cmd.flags |= htole16(IWN_PS_PCI_PMGT); 5279 cmd.rxtimeout = htole32(pmgt->rxtimeout * 1024); 5280 cmd.txtimeout = htole32(pmgt->txtimeout * 1024); 5281 5282 if (dtim == 0) { 5283 dtim = 1; 5284 skip_dtim = 0; 5285 } else 5286 skip_dtim = pmgt->skip_dtim; 5287 if (skip_dtim != 0) { 5288 cmd.flags |= htole16(IWN_PS_SLEEP_OVER_DTIM); 5289 max = pmgt->intval[4]; 5290 if (max == (uint32_t)-1) 5291 max = dtim * (skip_dtim + 1); 5292 else if (max > dtim) 5293 max = (max / dtim) * dtim; 5294 } else 5295 max = dtim; 5296 for (i = 0; i < 5; i++) 5297 cmd.intval[i] = htole32(MIN(max, pmgt->intval[i])); 5298 5299 DPRINTF(sc, IWN_DEBUG_RESET, "setting power saving level to %d\n", 5300 level); 5301 return iwn_cmd(sc, IWN_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async); 5302} 5303 5304static int 5305iwn_send_btcoex(struct iwn_softc *sc) 5306{ 5307 struct iwn_bluetooth cmd; 5308 5309 memset(&cmd, 0, sizeof cmd); 5310 cmd.flags = IWN_BT_COEX_CHAN_ANN | IWN_BT_COEX_BT_PRIO; 5311 cmd.lead_time = IWN_BT_LEAD_TIME_DEF; 5312 cmd.max_kill = IWN_BT_MAX_KILL_DEF; 5313 DPRINTF(sc, IWN_DEBUG_RESET, "%s: configuring bluetooth coexistence\n", 5314 __func__); 5315 return iwn_cmd(sc, IWN_CMD_BT_COEX, &cmd, sizeof(cmd), 0); 5316} 5317 5318static int 5319iwn_send_advanced_btcoex(struct iwn_softc *sc) 5320{ 5321 static const uint32_t btcoex_3wire[12] = { 5322 0xaaaaaaaa, 0xaaaaaaaa, 0xaeaaaaaa, 0xaaaaaaaa, 5323 0xcc00ff28, 0x0000aaaa, 0xcc00aaaa, 0x0000aaaa, 5324 0xc0004000, 0x00004000, 0xf0005000, 0xf0005000, 5325 }; 5326 struct iwn6000_btcoex_config btconfig; 5327 struct iwn_btcoex_priotable btprio; 5328 struct iwn_btcoex_prot btprot; 5329 int error, i; 5330 5331 memset(&btconfig, 0, sizeof btconfig); 5332 btconfig.flags = 145; 5333 btconfig.max_kill = 5; 5334 btconfig.bt3_t7_timer = 1; 5335 btconfig.kill_ack = htole32(0xffff0000); 5336 btconfig.kill_cts = htole32(0xffff0000); 5337 btconfig.sample_time = 2; 5338 btconfig.bt3_t2_timer = 0xc; 5339 for (i = 0; i < 12; i++) 5340 btconfig.lookup_table[i] = htole32(btcoex_3wire[i]); 5341 btconfig.valid = htole16(0xff); 5342 btconfig.prio_boost = 0xf0; 5343 DPRINTF(sc, IWN_DEBUG_RESET, 5344 "%s: configuring advanced bluetooth coexistence\n", __func__); 5345 error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig, sizeof(btconfig), 1); 5346 if (error != 0) 5347 return error; 5348 5349 memset(&btprio, 0, sizeof btprio); 5350 btprio.calib_init1 = 0x6; 5351 btprio.calib_init2 = 0x7; 5352 btprio.calib_periodic_low1 = 0x2; 5353 btprio.calib_periodic_low2 = 0x3; 5354 btprio.calib_periodic_high1 = 0x4; 5355 btprio.calib_periodic_high2 = 0x5; 5356 btprio.dtim = 0x6; 5357 btprio.scan52 = 0x8; 5358 btprio.scan24 = 0xa; 5359 error = iwn_cmd(sc, IWN_CMD_BT_COEX_PRIOTABLE, &btprio, sizeof(btprio), 5360 1); 5361 if (error != 0) 5362 return error; 5363 5364 /* Force BT state machine change. */ 5365 memset(&btprot, 0, sizeof btprot); 5366 btprot.open = 1; 5367 btprot.type = 1; 5368 error = iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1); 5369 if (error != 0) 5370 return error; 5371 btprot.open = 0; 5372 return iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1); 5373} 5374 5375static int 5376iwn5000_runtime_calib(struct iwn_softc *sc) 5377{ 5378 struct iwn5000_calib_config cmd; 5379 5380 memset(&cmd, 0, sizeof cmd); 5381 cmd.ucode.once.enable = 0xffffffff; 5382 cmd.ucode.once.start = IWN5000_CALIB_DC; 5383 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 5384 "%s: configuring runtime calibration\n", __func__); 5385 return iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof(cmd), 0); 5386} 5387 5388static int 5389iwn_config(struct iwn_softc *sc) 5390{ 5391 struct iwn_ops *ops = &sc->ops; 5392 struct ifnet *ifp = sc->sc_ifp; 5393 struct ieee80211com *ic = ifp->if_l2com; 5394 uint32_t txmask; 5395 uint16_t rxchain; 5396 int error; 5397 5398 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 5399 5400 if (sc->hw_type == IWN_HW_REV_TYPE_6005) { 5401 /* Set radio temperature sensor offset. */ 5402 error = iwn5000_temp_offset_calib(sc); 5403 if (error != 0) { 5404 device_printf(sc->sc_dev, 5405 "%s: could not set temperature offset\n", __func__); 5406 return error; 5407 } 5408 } 5409 5410 if (sc->hw_type == IWN_HW_REV_TYPE_6050) { 5411 /* Configure runtime DC calibration. */ 5412 error = iwn5000_runtime_calib(sc); 5413 if (error != 0) { 5414 device_printf(sc->sc_dev, 5415 "%s: could not configure runtime calibration\n", 5416 __func__); 5417 return error; 5418 } 5419 } 5420 5421 /* Configure valid TX chains for >=5000 Series. */ 5422 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 5423 txmask = htole32(sc->txchainmask); 5424 DPRINTF(sc, IWN_DEBUG_RESET, 5425 "%s: configuring valid TX chains 0x%x\n", __func__, txmask); 5426 error = iwn_cmd(sc, IWN5000_CMD_TX_ANT_CONFIG, &txmask, 5427 sizeof txmask, 0); 5428 if (error != 0) { 5429 device_printf(sc->sc_dev, 5430 "%s: could not configure valid TX chains, " 5431 "error %d\n", __func__, error); 5432 return error; 5433 } 5434 } 5435 5436 /* Configure bluetooth coexistence. */ 5437 if (sc->sc_flags & IWN_FLAG_ADV_BTCOEX) 5438 error = iwn_send_advanced_btcoex(sc); 5439 else 5440 error = iwn_send_btcoex(sc); 5441 if (error != 0) { 5442 device_printf(sc->sc_dev, 5443 "%s: could not configure bluetooth coexistence, error %d\n", 5444 __func__, error); 5445 return error; 5446 } 5447 5448 /* Set mode, channel, RX filter and enable RX. */ 5449 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 5450 memset(sc->rxon, 0, sizeof (struct iwn_rxon)); 5451 IEEE80211_ADDR_COPY(sc->rxon->myaddr, IF_LLADDR(ifp)); 5452 IEEE80211_ADDR_COPY(sc->rxon->wlap, IF_LLADDR(ifp)); 5453 sc->rxon->chan = ieee80211_chan2ieee(ic, ic->ic_curchan); 5454 sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 5455 if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) 5456 sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 5457 switch (ic->ic_opmode) { 5458 case IEEE80211_M_STA: 5459 sc->rxon->mode = IWN_MODE_STA; 5460 sc->rxon->filter = htole32(IWN_FILTER_MULTICAST); 5461 break; 5462 case IEEE80211_M_MONITOR: 5463 sc->rxon->mode = IWN_MODE_MONITOR; 5464 sc->rxon->filter = htole32(IWN_FILTER_MULTICAST | 5465 IWN_FILTER_CTL | IWN_FILTER_PROMISC); 5466 break; 5467 default: 5468 /* Should not get there. */ 5469 break; 5470 } 5471 sc->rxon->cck_mask = 0x0f; /* not yet negotiated */ 5472 sc->rxon->ofdm_mask = 0xff; /* not yet negotiated */ 5473 sc->rxon->ht_single_mask = 0xff; 5474 sc->rxon->ht_dual_mask = 0xff; 5475 sc->rxon->ht_triple_mask = 0xff; 5476 rxchain = 5477 IWN_RXCHAIN_VALID(sc->rxchainmask) | 5478 IWN_RXCHAIN_MIMO_COUNT(2) | 5479 IWN_RXCHAIN_IDLE_COUNT(2); 5480 sc->rxon->rxchain = htole16(rxchain); 5481 DPRINTF(sc, IWN_DEBUG_RESET, "%s: setting configuration\n", __func__); 5482 error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 0); 5483 if (error != 0) { 5484 device_printf(sc->sc_dev, "%s: RXON command failed\n", 5485 __func__); 5486 return error; 5487 } 5488 5489 if ((error = iwn_add_broadcast_node(sc, 0)) != 0) { 5490 device_printf(sc->sc_dev, "%s: could not add broadcast node\n", 5491 __func__); 5492 return error; 5493 } 5494 5495 /* Configuration has changed, set TX power accordingly. */ 5496 if ((error = ops->set_txpower(sc, ic->ic_curchan, 0)) != 0) { 5497 device_printf(sc->sc_dev, "%s: could not set TX power\n", 5498 __func__); 5499 return error; 5500 } 5501 5502 if ((error = iwn_set_critical_temp(sc)) != 0) { 5503 device_printf(sc->sc_dev, 5504 "%s: could not set critical temperature\n", __func__); 5505 return error; 5506 } 5507 5508 /* Set power saving level to CAM during initialization. */ 5509 if ((error = iwn_set_pslevel(sc, 0, 0, 0)) != 0) { 5510 device_printf(sc->sc_dev, 5511 "%s: could not set power saving level\n", __func__); 5512 return error; 5513 } 5514 5515 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 5516 5517 return 0; 5518} 5519 5520/* 5521 * Add an ssid element to a frame. 5522 */ 5523static uint8_t * 5524ieee80211_add_ssid(uint8_t *frm, const uint8_t *ssid, u_int len) 5525{ 5526 *frm++ = IEEE80211_ELEMID_SSID; 5527 *frm++ = len; 5528 memcpy(frm, ssid, len); 5529 return frm + len; 5530} 5531 5532static int 5533iwn_scan(struct iwn_softc *sc) 5534{ 5535 struct ifnet *ifp = sc->sc_ifp; 5536 struct ieee80211com *ic = ifp->if_l2com; 5537 struct ieee80211_scan_state *ss = ic->ic_scan; /*XXX*/ 5538 struct ieee80211_node *ni = ss->ss_vap->iv_bss; 5539 struct iwn_scan_hdr *hdr; 5540 struct iwn_cmd_data *tx; 5541 struct iwn_scan_essid *essid; 5542 struct iwn_scan_chan *chan; 5543 struct ieee80211_frame *wh; 5544 struct ieee80211_rateset *rs; 5545 struct ieee80211_channel *c; 5546 uint8_t *buf, *frm; 5547 uint16_t rxchain; 5548 uint8_t txant; 5549 int buflen, error; 5550 5551 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 5552 5553 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 5554 buf = malloc(IWN_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO); 5555 if (buf == NULL) { 5556 device_printf(sc->sc_dev, 5557 "%s: could not allocate buffer for scan command\n", 5558 __func__); 5559 return ENOMEM; 5560 } 5561 hdr = (struct iwn_scan_hdr *)buf; 5562 /* 5563 * Move to the next channel if no frames are received within 10ms 5564 * after sending the probe request. 5565 */ 5566 hdr->quiet_time = htole16(10); /* timeout in milliseconds */ 5567 hdr->quiet_threshold = htole16(1); /* min # of packets */ 5568 5569 /* Select antennas for scanning. */ 5570 rxchain = 5571 IWN_RXCHAIN_VALID(sc->rxchainmask) | 5572 IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask) | 5573 IWN_RXCHAIN_DRIVER_FORCE; 5574 if (IEEE80211_IS_CHAN_A(ic->ic_curchan) && 5575 sc->hw_type == IWN_HW_REV_TYPE_4965) { 5576 /* Ant A must be avoided in 5GHz because of an HW bug. */ 5577 rxchain |= IWN_RXCHAIN_FORCE_SEL(IWN_ANT_B); 5578 } else /* Use all available RX antennas. */ 5579 rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask); 5580 hdr->rxchain = htole16(rxchain); 5581 hdr->filter = htole32(IWN_FILTER_MULTICAST | IWN_FILTER_BEACON); 5582 5583 tx = (struct iwn_cmd_data *)(hdr + 1); 5584 tx->flags = htole32(IWN_TX_AUTO_SEQ); 5585 tx->id = sc->broadcast_id; 5586 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 5587 5588 if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) { 5589 /* Send probe requests at 6Mbps. */ 5590 tx->rate = htole32(0xd); 5591 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A]; 5592 } else { 5593 hdr->flags = htole32(IWN_RXON_24GHZ | IWN_RXON_AUTO); 5594 if (sc->hw_type == IWN_HW_REV_TYPE_4965 && 5595 sc->rxon->associd && sc->rxon->chan > 14) 5596 tx->rate = htole32(0xd); 5597 else { 5598 /* Send probe requests at 1Mbps. */ 5599 tx->rate = htole32(10 | IWN_RFLAG_CCK); 5600 } 5601 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G]; 5602 } 5603 /* Use the first valid TX antenna. */ 5604 txant = IWN_LSB(sc->txchainmask); 5605 tx->rate |= htole32(IWN_RFLAG_ANT(txant)); 5606 5607 essid = (struct iwn_scan_essid *)(tx + 1); 5608 if (ss->ss_ssid[0].len != 0) { 5609 essid[0].id = IEEE80211_ELEMID_SSID; 5610 essid[0].len = ss->ss_ssid[0].len; 5611 memcpy(essid[0].data, ss->ss_ssid[0].ssid, ss->ss_ssid[0].len); 5612 } 5613 /* 5614 * Build a probe request frame. Most of the following code is a 5615 * copy & paste of what is done in net80211. 5616 */ 5617 wh = (struct ieee80211_frame *)(essid + 20); 5618 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | 5619 IEEE80211_FC0_SUBTYPE_PROBE_REQ; 5620 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; 5621 IEEE80211_ADDR_COPY(wh->i_addr1, ifp->if_broadcastaddr); 5622 IEEE80211_ADDR_COPY(wh->i_addr2, IF_LLADDR(ifp)); 5623 IEEE80211_ADDR_COPY(wh->i_addr3, ifp->if_broadcastaddr); 5624 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */ 5625 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */ 5626 5627 frm = (uint8_t *)(wh + 1); 5628 frm = ieee80211_add_ssid(frm, NULL, 0); 5629 frm = ieee80211_add_rates(frm, rs); 5630 if (rs->rs_nrates > IEEE80211_RATE_SIZE) 5631 frm = ieee80211_add_xrates(frm, rs); 5632 if (ic->ic_htcaps & IEEE80211_HTC_HT) 5633 frm = ieee80211_add_htcap(frm, ni); 5634 5635 /* Set length of probe request. */ 5636 tx->len = htole16(frm - (uint8_t *)wh); 5637 5638 c = ic->ic_curchan; 5639 chan = (struct iwn_scan_chan *)frm; 5640 chan->chan = htole16(ieee80211_chan2ieee(ic, c)); 5641 chan->flags = 0; 5642 if (ss->ss_nssid > 0) 5643 chan->flags |= htole32(IWN_CHAN_NPBREQS(1)); 5644 chan->dsp_gain = 0x6e; 5645 if (IEEE80211_IS_CHAN_5GHZ(c) && 5646 !(c->ic_flags & IEEE80211_CHAN_PASSIVE)) { 5647 chan->rf_gain = 0x3b; 5648 chan->active = htole16(24); 5649 chan->passive = htole16(110); 5650 chan->flags |= htole32(IWN_CHAN_ACTIVE); 5651 } else if (IEEE80211_IS_CHAN_5GHZ(c)) { 5652 chan->rf_gain = 0x3b; 5653 chan->active = htole16(24); 5654 if (sc->rxon->associd) 5655 chan->passive = htole16(78); 5656 else 5657 chan->passive = htole16(110); 5658 hdr->crc_threshold = 0xffff; 5659 } else if (!(c->ic_flags & IEEE80211_CHAN_PASSIVE)) { 5660 chan->rf_gain = 0x28; 5661 chan->active = htole16(36); 5662 chan->passive = htole16(120); 5663 chan->flags |= htole32(IWN_CHAN_ACTIVE); 5664 } else { 5665 chan->rf_gain = 0x28; 5666 chan->active = htole16(36); 5667 if (sc->rxon->associd) 5668 chan->passive = htole16(88); 5669 else 5670 chan->passive = htole16(120); 5671 hdr->crc_threshold = 0xffff; 5672 } 5673 5674 DPRINTF(sc, IWN_DEBUG_STATE, 5675 "%s: chan %u flags 0x%x rf_gain 0x%x " 5676 "dsp_gain 0x%x active 0x%x passive 0x%x\n", __func__, 5677 chan->chan, chan->flags, chan->rf_gain, chan->dsp_gain, 5678 chan->active, chan->passive); 5679 5680 hdr->nchan++; 5681 chan++; 5682 buflen = (uint8_t *)chan - buf; 5683 hdr->len = htole16(buflen); 5684 5685 DPRINTF(sc, IWN_DEBUG_STATE, "sending scan command nchan=%d\n", 5686 hdr->nchan); 5687 error = iwn_cmd(sc, IWN_CMD_SCAN, buf, buflen, 1); 5688 free(buf, M_DEVBUF); 5689 5690 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 5691 5692 return error; 5693} 5694 5695static int 5696iwn_auth(struct iwn_softc *sc, struct ieee80211vap *vap) 5697{ 5698 struct iwn_ops *ops = &sc->ops; 5699 struct ifnet *ifp = sc->sc_ifp; 5700 struct ieee80211com *ic = ifp->if_l2com; 5701 struct ieee80211_node *ni = vap->iv_bss; 5702 int error; 5703 5704 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 5705 5706 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 5707 /* Update adapter configuration. */ 5708 IEEE80211_ADDR_COPY(sc->rxon->bssid, ni->ni_bssid); 5709 sc->rxon->chan = ieee80211_chan2ieee(ic, ni->ni_chan); 5710 sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 5711 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 5712 sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 5713 if (ic->ic_flags & IEEE80211_F_SHSLOT) 5714 sc->rxon->flags |= htole32(IWN_RXON_SHSLOT); 5715 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 5716 sc->rxon->flags |= htole32(IWN_RXON_SHPREAMBLE); 5717 if (IEEE80211_IS_CHAN_A(ni->ni_chan)) { 5718 sc->rxon->cck_mask = 0; 5719 sc->rxon->ofdm_mask = 0x15; 5720 } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) { 5721 sc->rxon->cck_mask = 0x03; 5722 sc->rxon->ofdm_mask = 0; 5723 } else { 5724 /* Assume 802.11b/g. */ 5725 sc->rxon->cck_mask = 0x0f; 5726 sc->rxon->ofdm_mask = 0x15; 5727 } 5728 DPRINTF(sc, IWN_DEBUG_STATE, "rxon chan %d flags %x cck %x ofdm %x\n", 5729 sc->rxon->chan, sc->rxon->flags, sc->rxon->cck_mask, 5730 sc->rxon->ofdm_mask); 5731 error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1); 5732 if (error != 0) { 5733 device_printf(sc->sc_dev, "%s: RXON command failed, error %d\n", 5734 __func__, error); 5735 return error; 5736 } 5737 5738 /* Configuration has changed, set TX power accordingly. */ 5739 if ((error = ops->set_txpower(sc, ni->ni_chan, 1)) != 0) { 5740 device_printf(sc->sc_dev, 5741 "%s: could not set TX power, error %d\n", __func__, error); 5742 return error; 5743 } 5744 /* 5745 * Reconfiguring RXON clears the firmware nodes table so we must 5746 * add the broadcast node again. 5747 */ 5748 if ((error = iwn_add_broadcast_node(sc, 1)) != 0) { 5749 device_printf(sc->sc_dev, 5750 "%s: could not add broadcast node, error %d\n", __func__, 5751 error); 5752 return error; 5753 } 5754 5755 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 5756 5757 return 0; 5758} 5759 5760static int 5761iwn_run(struct iwn_softc *sc, struct ieee80211vap *vap) 5762{ 5763 struct iwn_ops *ops = &sc->ops; 5764 struct ifnet *ifp = sc->sc_ifp; 5765 struct ieee80211com *ic = ifp->if_l2com; 5766 struct ieee80211_node *ni = vap->iv_bss; 5767 struct iwn_node_info node; 5768 uint32_t htflags = 0; 5769 int error; 5770 5771 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 5772 5773 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 5774 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 5775 /* Link LED blinks while monitoring. */ 5776 iwn_set_led(sc, IWN_LED_LINK, 5, 5); 5777 return 0; 5778 } 5779 if ((error = iwn_set_timing(sc, ni)) != 0) { 5780 device_printf(sc->sc_dev, 5781 "%s: could not set timing, error %d\n", __func__, error); 5782 return error; 5783 } 5784 5785 /* Update adapter configuration. */ 5786 IEEE80211_ADDR_COPY(sc->rxon->bssid, ni->ni_bssid); 5787 sc->rxon->associd = htole16(IEEE80211_AID(ni->ni_associd)); 5788 sc->rxon->chan = ieee80211_chan2ieee(ic, ni->ni_chan); 5789 sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 5790 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 5791 sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 5792 if (ic->ic_flags & IEEE80211_F_SHSLOT) 5793 sc->rxon->flags |= htole32(IWN_RXON_SHSLOT); 5794 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 5795 sc->rxon->flags |= htole32(IWN_RXON_SHPREAMBLE); 5796 if (IEEE80211_IS_CHAN_A(ni->ni_chan)) { 5797 sc->rxon->cck_mask = 0; 5798 sc->rxon->ofdm_mask = 0x15; 5799 } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) { 5800 sc->rxon->cck_mask = 0x03; 5801 sc->rxon->ofdm_mask = 0; 5802 } else { 5803 /* Assume 802.11b/g. */ 5804 sc->rxon->cck_mask = 0x0f; 5805 sc->rxon->ofdm_mask = 0x15; 5806 } 5807 if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) { 5808 htflags |= IWN_RXON_HT_PROTMODE(ic->ic_curhtprotmode); 5809 if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) { 5810 switch (ic->ic_curhtprotmode) { 5811 case IEEE80211_HTINFO_OPMODE_HT20PR: 5812 htflags |= IWN_RXON_HT_MODEPURE40; 5813 break; 5814 default: 5815 htflags |= IWN_RXON_HT_MODEMIXED; 5816 break; 5817 } 5818 } 5819 if (IEEE80211_IS_CHAN_HT40D(ni->ni_chan)) 5820 htflags |= IWN_RXON_HT_HT40MINUS; 5821 } 5822 sc->rxon->flags |= htole32(htflags); 5823 sc->rxon->filter |= htole32(IWN_FILTER_BSS); 5824 DPRINTF(sc, IWN_DEBUG_STATE, "rxon chan %d flags %x\n", 5825 sc->rxon->chan, sc->rxon->flags); 5826 error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1); 5827 if (error != 0) { 5828 device_printf(sc->sc_dev, 5829 "%s: could not update configuration, error %d\n", __func__, 5830 error); 5831 return error; 5832 } 5833 5834 /* Configuration has changed, set TX power accordingly. */ 5835 if ((error = ops->set_txpower(sc, ni->ni_chan, 1)) != 0) { 5836 device_printf(sc->sc_dev, 5837 "%s: could not set TX power, error %d\n", __func__, error); 5838 return error; 5839 } 5840 5841 /* Fake a join to initialize the TX rate. */ 5842 ((struct iwn_node *)ni)->id = IWN_ID_BSS; 5843 iwn_newassoc(ni, 1); 5844 5845 /* Add BSS node. */ 5846 memset(&node, 0, sizeof node); 5847 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 5848 node.id = IWN_ID_BSS; 5849 if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) { 5850 switch (ni->ni_htcap & IEEE80211_HTCAP_SMPS) { 5851 case IEEE80211_HTCAP_SMPS_ENA: 5852 node.htflags |= htole32(IWN_SMPS_MIMO_DIS); 5853 break; 5854 case IEEE80211_HTCAP_SMPS_DYNAMIC: 5855 node.htflags |= htole32(IWN_SMPS_MIMO_PROT); 5856 break; 5857 } 5858 node.htflags |= htole32(IWN_AMDPU_SIZE_FACTOR(3) | 5859 IWN_AMDPU_DENSITY(5)); /* 4us */ 5860 if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) 5861 node.htflags |= htole32(IWN_NODE_HT40); 5862 } 5863 DPRINTF(sc, IWN_DEBUG_STATE, "%s: adding BSS node\n", __func__); 5864 error = ops->add_node(sc, &node, 1); 5865 if (error != 0) { 5866 device_printf(sc->sc_dev, 5867 "%s: could not add BSS node, error %d\n", __func__, error); 5868 return error; 5869 } 5870 DPRINTF(sc, IWN_DEBUG_STATE, "%s: setting link quality for node %d\n", 5871 __func__, node.id); 5872 if ((error = iwn_set_link_quality(sc, ni)) != 0) { 5873 device_printf(sc->sc_dev, 5874 "%s: could not setup link quality for node %d, error %d\n", 5875 __func__, node.id, error); 5876 return error; 5877 } 5878 5879 if ((error = iwn_init_sensitivity(sc)) != 0) { 5880 device_printf(sc->sc_dev, 5881 "%s: could not set sensitivity, error %d\n", __func__, 5882 error); 5883 return error; 5884 } 5885 /* Start periodic calibration timer. */ 5886 sc->calib.state = IWN_CALIB_STATE_ASSOC; 5887 sc->calib_cnt = 0; 5888 callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout, 5889 sc); 5890 5891 /* Link LED always on while associated. */ 5892 iwn_set_led(sc, IWN_LED_LINK, 0, 1); 5893 5894 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 5895 5896 return 0; 5897} 5898 5899/* 5900 * This function is called by upper layer when an ADDBA request is received 5901 * from another STA and before the ADDBA response is sent. 5902 */ 5903static int 5904iwn_ampdu_rx_start(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap, 5905 int baparamset, int batimeout, int baseqctl) 5906{ 5907#define MS(_v, _f) (((_v) & _f) >> _f##_S) 5908 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5909 struct iwn_ops *ops = &sc->ops; 5910 struct iwn_node *wn = (void *)ni; 5911 struct iwn_node_info node; 5912 uint16_t ssn; 5913 uint8_t tid; 5914 int error; 5915 5916 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5917 5918 tid = MS(le16toh(baparamset), IEEE80211_BAPS_TID); 5919 ssn = MS(le16toh(baseqctl), IEEE80211_BASEQ_START); 5920 5921 memset(&node, 0, sizeof node); 5922 node.id = wn->id; 5923 node.control = IWN_NODE_UPDATE; 5924 node.flags = IWN_FLAG_SET_ADDBA; 5925 node.addba_tid = tid; 5926 node.addba_ssn = htole16(ssn); 5927 DPRINTF(sc, IWN_DEBUG_RECV, "ADDBA RA=%d TID=%d SSN=%d\n", 5928 wn->id, tid, ssn); 5929 error = ops->add_node(sc, &node, 1); 5930 if (error != 0) 5931 return error; 5932 return sc->sc_ampdu_rx_start(ni, rap, baparamset, batimeout, baseqctl); 5933#undef MS 5934} 5935 5936/* 5937 * This function is called by upper layer on teardown of an HT-immediate 5938 * Block Ack agreement (eg. uppon receipt of a DELBA frame). 5939 */ 5940static void 5941iwn_ampdu_rx_stop(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap) 5942{ 5943 struct ieee80211com *ic = ni->ni_ic; 5944 struct iwn_softc *sc = ic->ic_ifp->if_softc; 5945 struct iwn_ops *ops = &sc->ops; 5946 struct iwn_node *wn = (void *)ni; 5947 struct iwn_node_info node; 5948 uint8_t tid; 5949 5950 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5951 5952 /* XXX: tid as an argument */ 5953 for (tid = 0; tid < WME_NUM_TID; tid++) { 5954 if (&ni->ni_rx_ampdu[tid] == rap) 5955 break; 5956 } 5957 5958 memset(&node, 0, sizeof node); 5959 node.id = wn->id; 5960 node.control = IWN_NODE_UPDATE; 5961 node.flags = IWN_FLAG_SET_DELBA; 5962 node.delba_tid = tid; 5963 DPRINTF(sc, IWN_DEBUG_RECV, "DELBA RA=%d TID=%d\n", wn->id, tid); 5964 (void)ops->add_node(sc, &node, 1); 5965 sc->sc_ampdu_rx_stop(ni, rap); 5966} 5967 5968static int 5969iwn_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 5970 int dialogtoken, int baparamset, int batimeout) 5971{ 5972 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 5973 int qid; 5974 5975 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5976 5977 for (qid = sc->firstaggqueue; qid < sc->ntxqs; qid++) { 5978 if (sc->qid2tap[qid] == NULL) 5979 break; 5980 } 5981 if (qid == sc->ntxqs) { 5982 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: not free aggregation queue\n", 5983 __func__); 5984 return 0; 5985 } 5986 tap->txa_private = malloc(sizeof(int), M_DEVBUF, M_NOWAIT); 5987 if (tap->txa_private == NULL) { 5988 device_printf(sc->sc_dev, 5989 "%s: failed to alloc TX aggregation structure\n", __func__); 5990 return 0; 5991 } 5992 sc->qid2tap[qid] = tap; 5993 *(int *)tap->txa_private = qid; 5994 return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, 5995 batimeout); 5996} 5997 5998static int 5999iwn_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 6000 int code, int baparamset, int batimeout) 6001{ 6002 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 6003 int qid = *(int *)tap->txa_private; 6004 uint8_t tid = tap->txa_tid; 6005 int ret; 6006 6007 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6008 6009 if (code == IEEE80211_STATUS_SUCCESS) { 6010 ni->ni_txseqs[tid] = tap->txa_start & 0xfff; 6011 ret = iwn_ampdu_tx_start(ni->ni_ic, ni, tid); 6012 if (ret != 1) 6013 return ret; 6014 } else { 6015 sc->qid2tap[qid] = NULL; 6016 free(tap->txa_private, M_DEVBUF); 6017 tap->txa_private = NULL; 6018 } 6019 return sc->sc_addba_response(ni, tap, code, baparamset, batimeout); 6020} 6021 6022/* 6023 * This function is called by upper layer when an ADDBA response is received 6024 * from another STA. 6025 */ 6026static int 6027iwn_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni, 6028 uint8_t tid) 6029{ 6030 struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[tid]; 6031 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 6032 struct iwn_ops *ops = &sc->ops; 6033 struct iwn_node *wn = (void *)ni; 6034 struct iwn_node_info node; 6035 int error, qid; 6036 6037 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6038 6039 /* Enable TX for the specified RA/TID. */ 6040 wn->disable_tid &= ~(1 << tid); 6041 memset(&node, 0, sizeof node); 6042 node.id = wn->id; 6043 node.control = IWN_NODE_UPDATE; 6044 node.flags = IWN_FLAG_SET_DISABLE_TID; 6045 node.disable_tid = htole16(wn->disable_tid); 6046 error = ops->add_node(sc, &node, 1); 6047 if (error != 0) 6048 return 0; 6049 6050 if ((error = iwn_nic_lock(sc)) != 0) 6051 return 0; 6052 qid = *(int *)tap->txa_private; 6053 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: ra=%d tid=%d ssn=%d qid=%d\n", 6054 __func__, wn->id, tid, tap->txa_start, qid); 6055 ops->ampdu_tx_start(sc, ni, qid, tid, tap->txa_start & 0xfff); 6056 iwn_nic_unlock(sc); 6057 6058 iwn_set_link_quality(sc, ni); 6059 return 1; 6060} 6061 6062static void 6063iwn_ampdu_tx_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) 6064{ 6065 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc; 6066 struct iwn_ops *ops = &sc->ops; 6067 uint8_t tid = tap->txa_tid; 6068 int qid; 6069 6070 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6071 6072 sc->sc_addba_stop(ni, tap); 6073 6074 if (tap->txa_private == NULL) 6075 return; 6076 6077 qid = *(int *)tap->txa_private; 6078 if (sc->txq[qid].queued != 0) 6079 return; 6080 if (iwn_nic_lock(sc) != 0) 6081 return; 6082 ops->ampdu_tx_stop(sc, qid, tid, tap->txa_start & 0xfff); 6083 iwn_nic_unlock(sc); 6084 sc->qid2tap[qid] = NULL; 6085 free(tap->txa_private, M_DEVBUF); 6086 tap->txa_private = NULL; 6087} 6088 6089static void 6090iwn4965_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni, 6091 int qid, uint8_t tid, uint16_t ssn) 6092{ 6093 struct iwn_node *wn = (void *)ni; 6094 6095 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6096 6097 /* Stop TX scheduler while we're changing its configuration. */ 6098 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 6099 IWN4965_TXQ_STATUS_CHGACT); 6100 6101 /* Assign RA/TID translation to the queue. */ 6102 iwn_mem_write_2(sc, sc->sched_base + IWN4965_SCHED_TRANS_TBL(qid), 6103 wn->id << 4 | tid); 6104 6105 /* Enable chain-building mode for the queue. */ 6106 iwn_prph_setbits(sc, IWN4965_SCHED_QCHAIN_SEL, 1 << qid); 6107 6108 /* Set starting sequence number from the ADDBA request. */ 6109 sc->txq[qid].cur = sc->txq[qid].read = (ssn & 0xff); 6110 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 6111 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn); 6112 6113 /* Set scheduler window size. */ 6114 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid), 6115 IWN_SCHED_WINSZ); 6116 /* Set scheduler frame limit. */ 6117 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, 6118 IWN_SCHED_LIMIT << 16); 6119 6120 /* Enable interrupts for the queue. */ 6121 iwn_prph_setbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid); 6122 6123 /* Mark the queue as active. */ 6124 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 6125 IWN4965_TXQ_STATUS_ACTIVE | IWN4965_TXQ_STATUS_AGGR_ENA | 6126 iwn_tid2fifo[tid] << 1); 6127} 6128 6129static void 6130iwn4965_ampdu_tx_stop(struct iwn_softc *sc, int qid, uint8_t tid, uint16_t ssn) 6131{ 6132 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6133 6134 /* Stop TX scheduler while we're changing its configuration. */ 6135 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 6136 IWN4965_TXQ_STATUS_CHGACT); 6137 6138 /* Set starting sequence number from the ADDBA request. */ 6139 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 6140 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn); 6141 6142 /* Disable interrupts for the queue. */ 6143 iwn_prph_clrbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid); 6144 6145 /* Mark the queue as inactive. */ 6146 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 6147 IWN4965_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid] << 1); 6148} 6149 6150static void 6151iwn5000_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni, 6152 int qid, uint8_t tid, uint16_t ssn) 6153{ 6154 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6155 6156 struct iwn_node *wn = (void *)ni; 6157 6158 /* Stop TX scheduler while we're changing its configuration. */ 6159 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 6160 IWN5000_TXQ_STATUS_CHGACT); 6161 6162 /* Assign RA/TID translation to the queue. */ 6163 iwn_mem_write_2(sc, sc->sched_base + IWN5000_SCHED_TRANS_TBL(qid), 6164 wn->id << 4 | tid); 6165 6166 /* Enable chain-building mode for the queue. */ 6167 iwn_prph_setbits(sc, IWN5000_SCHED_QCHAIN_SEL, 1 << qid); 6168 6169 /* Enable aggregation for the queue. */ 6170 iwn_prph_setbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid); 6171 6172 /* Set starting sequence number from the ADDBA request. */ 6173 sc->txq[qid].cur = sc->txq[qid].read = (ssn & 0xff); 6174 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 6175 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn); 6176 6177 /* Set scheduler window size and frame limit. */ 6178 iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, 6179 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); 6180 6181 /* Enable interrupts for the queue. */ 6182 iwn_prph_setbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid); 6183 6184 /* Mark the queue as active. */ 6185 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 6186 IWN5000_TXQ_STATUS_ACTIVE | iwn_tid2fifo[tid]); 6187} 6188 6189static void 6190iwn5000_ampdu_tx_stop(struct iwn_softc *sc, int qid, uint8_t tid, uint16_t ssn) 6191{ 6192 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6193 6194 /* Stop TX scheduler while we're changing its configuration. */ 6195 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 6196 IWN5000_TXQ_STATUS_CHGACT); 6197 6198 /* Disable aggregation for the queue. */ 6199 iwn_prph_clrbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid); 6200 6201 /* Set starting sequence number from the ADDBA request. */ 6202 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 6203 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn); 6204 6205 /* Disable interrupts for the queue. */ 6206 iwn_prph_clrbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid); 6207 6208 /* Mark the queue as inactive. */ 6209 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 6210 IWN5000_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid]); 6211} 6212 6213/* 6214 * Query calibration tables from the initialization firmware. We do this 6215 * only once at first boot. Called from a process context. 6216 */ 6217static int 6218iwn5000_query_calibration(struct iwn_softc *sc) 6219{ 6220 struct iwn5000_calib_config cmd; 6221 int error; 6222 6223 memset(&cmd, 0, sizeof cmd); 6224 cmd.ucode.once.enable = 0xffffffff; 6225 cmd.ucode.once.start = 0xffffffff; 6226 cmd.ucode.once.send = 0xffffffff; 6227 cmd.ucode.flags = 0xffffffff; 6228 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending calibration query\n", 6229 __func__); 6230 error = iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof cmd, 0); 6231 if (error != 0) 6232 return error; 6233 6234 /* Wait at most two seconds for calibration to complete. */ 6235 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) 6236 error = msleep(sc, &sc->sc_mtx, PCATCH, "iwncal", 2 * hz); 6237 return error; 6238} 6239 6240/* 6241 * Send calibration results to the runtime firmware. These results were 6242 * obtained on first boot from the initialization firmware. 6243 */ 6244static int 6245iwn5000_send_calibration(struct iwn_softc *sc) 6246{ 6247 int idx, error; 6248 6249 for (idx = 0; idx < 5; idx++) { 6250 if (sc->calibcmd[idx].buf == NULL) 6251 continue; /* No results available. */ 6252 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 6253 "send calibration result idx=%d len=%d\n", idx, 6254 sc->calibcmd[idx].len); 6255 error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, sc->calibcmd[idx].buf, 6256 sc->calibcmd[idx].len, 0); 6257 if (error != 0) { 6258 device_printf(sc->sc_dev, 6259 "%s: could not send calibration result, error %d\n", 6260 __func__, error); 6261 return error; 6262 } 6263 } 6264 return 0; 6265} 6266 6267static int 6268iwn5000_send_wimax_coex(struct iwn_softc *sc) 6269{ 6270 struct iwn5000_wimax_coex wimax; 6271 6272#ifdef notyet 6273 if (sc->hw_type == IWN_HW_REV_TYPE_6050) { 6274 /* Enable WiMAX coexistence for combo adapters. */ 6275 wimax.flags = 6276 IWN_WIMAX_COEX_ASSOC_WA_UNMASK | 6277 IWN_WIMAX_COEX_UNASSOC_WA_UNMASK | 6278 IWN_WIMAX_COEX_STA_TABLE_VALID | 6279 IWN_WIMAX_COEX_ENABLE; 6280 memcpy(wimax.events, iwn6050_wimax_events, 6281 sizeof iwn6050_wimax_events); 6282 } else 6283#endif 6284 { 6285 /* Disable WiMAX coexistence. */ 6286 wimax.flags = 0; 6287 memset(wimax.events, 0, sizeof wimax.events); 6288 } 6289 DPRINTF(sc, IWN_DEBUG_RESET, "%s: Configuring WiMAX coexistence\n", 6290 __func__); 6291 return iwn_cmd(sc, IWN5000_CMD_WIMAX_COEX, &wimax, sizeof wimax, 0); 6292} 6293 6294static int 6295iwn5000_crystal_calib(struct iwn_softc *sc) 6296{ 6297 struct iwn5000_phy_calib_crystal cmd; 6298 6299 memset(&cmd, 0, sizeof cmd); 6300 cmd.code = IWN5000_PHY_CALIB_CRYSTAL; 6301 cmd.ngroups = 1; 6302 cmd.isvalid = 1; 6303 cmd.cap_pin[0] = le32toh(sc->eeprom_crystal) & 0xff; 6304 cmd.cap_pin[1] = (le32toh(sc->eeprom_crystal) >> 16) & 0xff; 6305 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "sending crystal calibration %d, %d\n", 6306 cmd.cap_pin[0], cmd.cap_pin[1]); 6307 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 6308} 6309 6310static int 6311iwn5000_temp_offset_calib(struct iwn_softc *sc) 6312{ 6313 struct iwn5000_phy_calib_temp_offset cmd; 6314 6315 memset(&cmd, 0, sizeof cmd); 6316 cmd.code = IWN5000_PHY_CALIB_TEMP_OFFSET; 6317 cmd.ngroups = 1; 6318 cmd.isvalid = 1; 6319 if (sc->eeprom_temp != 0) 6320 cmd.offset = htole16(sc->eeprom_temp); 6321 else 6322 cmd.offset = htole16(IWN_DEFAULT_TEMP_OFFSET); 6323 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "setting radio sensor offset to %d\n", 6324 le16toh(cmd.offset)); 6325 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 6326} 6327 6328/* 6329 * This function is called after the runtime firmware notifies us of its 6330 * readiness (called in a process context). 6331 */ 6332static int 6333iwn4965_post_alive(struct iwn_softc *sc) 6334{ 6335 int error, qid; 6336 6337 if ((error = iwn_nic_lock(sc)) != 0) 6338 return error; 6339 6340 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6341 6342 /* Clear TX scheduler state in SRAM. */ 6343 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); 6344 iwn_mem_set_region_4(sc, sc->sched_base + IWN4965_SCHED_CTX_OFF, 0, 6345 IWN4965_SCHED_CTX_LEN / sizeof (uint32_t)); 6346 6347 /* Set physical address of TX scheduler rings (1KB aligned). */ 6348 iwn_prph_write(sc, IWN4965_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); 6349 6350 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); 6351 6352 /* Disable chain mode for all our 16 queues. */ 6353 iwn_prph_write(sc, IWN4965_SCHED_QCHAIN_SEL, 0); 6354 6355 for (qid = 0; qid < IWN4965_NTXQUEUES; qid++) { 6356 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), 0); 6357 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); 6358 6359 /* Set scheduler window size. */ 6360 iwn_mem_write(sc, sc->sched_base + 6361 IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ); 6362 /* Set scheduler frame limit. */ 6363 iwn_mem_write(sc, sc->sched_base + 6364 IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, 6365 IWN_SCHED_LIMIT << 16); 6366 } 6367 6368 /* Enable interrupts for all our 16 queues. */ 6369 iwn_prph_write(sc, IWN4965_SCHED_INTR_MASK, 0xffff); 6370 /* Identify TX FIFO rings (0-7). */ 6371 iwn_prph_write(sc, IWN4965_SCHED_TXFACT, 0xff); 6372 6373 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 6374 for (qid = 0; qid < 7; qid++) { 6375 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 4, 5, 6 }; 6376 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 6377 IWN4965_TXQ_STATUS_ACTIVE | qid2fifo[qid] << 1); 6378 } 6379 iwn_nic_unlock(sc); 6380 return 0; 6381} 6382 6383/* 6384 * This function is called after the initialization or runtime firmware 6385 * notifies us of its readiness (called in a process context). 6386 */ 6387static int 6388iwn5000_post_alive(struct iwn_softc *sc) 6389{ 6390 int error, qid; 6391 6392 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 6393 6394 /* Switch to using ICT interrupt mode. */ 6395 iwn5000_ict_reset(sc); 6396 6397 if ((error = iwn_nic_lock(sc)) != 0){ 6398 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__); 6399 return error; 6400 } 6401 6402 /* Clear TX scheduler state in SRAM. */ 6403 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); 6404 iwn_mem_set_region_4(sc, sc->sched_base + IWN5000_SCHED_CTX_OFF, 0, 6405 IWN5000_SCHED_CTX_LEN / sizeof (uint32_t)); 6406 6407 /* Set physical address of TX scheduler rings (1KB aligned). */ 6408 iwn_prph_write(sc, IWN5000_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); 6409 6410 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); 6411 6412 /* Enable chain mode for all queues, except command queue. */ 6413 iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffef); 6414 iwn_prph_write(sc, IWN5000_SCHED_AGGR_SEL, 0); 6415 6416 for (qid = 0; qid < IWN5000_NTXQUEUES; qid++) { 6417 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), 0); 6418 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); 6419 6420 iwn_mem_write(sc, sc->sched_base + 6421 IWN5000_SCHED_QUEUE_OFFSET(qid), 0); 6422 /* Set scheduler window size and frame limit. */ 6423 iwn_mem_write(sc, sc->sched_base + 6424 IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, 6425 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); 6426 } 6427 6428 /* Enable interrupts for all our 20 queues. */ 6429 iwn_prph_write(sc, IWN5000_SCHED_INTR_MASK, 0xfffff); 6430 /* Identify TX FIFO rings (0-7). */ 6431 iwn_prph_write(sc, IWN5000_SCHED_TXFACT, 0xff); 6432 6433 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 6434 for (qid = 0; qid < 7; qid++) { 6435 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 7, 5, 6 }; 6436 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 6437 IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]); 6438 } 6439 iwn_nic_unlock(sc); 6440 6441 /* Configure WiMAX coexistence for combo adapters. */ 6442 error = iwn5000_send_wimax_coex(sc); 6443 if (error != 0) { 6444 device_printf(sc->sc_dev, 6445 "%s: could not configure WiMAX coexistence, error %d\n", 6446 __func__, error); 6447 return error; 6448 } 6449 if (sc->hw_type != IWN_HW_REV_TYPE_5150) { 6450 /* Perform crystal calibration. */ 6451 error = iwn5000_crystal_calib(sc); 6452 if (error != 0) { 6453 device_printf(sc->sc_dev, 6454 "%s: crystal calibration failed, error %d\n", 6455 __func__, error); 6456 return error; 6457 } 6458 } 6459 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) { 6460 /* Query calibration from the initialization firmware. */ 6461 if ((error = iwn5000_query_calibration(sc)) != 0) { 6462 device_printf(sc->sc_dev, 6463 "%s: could not query calibration, error %d\n", 6464 __func__, error); 6465 return error; 6466 } 6467 /* 6468 * We have the calibration results now, reboot with the 6469 * runtime firmware (call ourselves recursively!) 6470 */ 6471 iwn_hw_stop(sc); 6472 error = iwn_hw_init(sc); 6473 } else { 6474 /* Send calibration results to runtime firmware. */ 6475 error = iwn5000_send_calibration(sc); 6476 } 6477 6478 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 6479 6480 return error; 6481} 6482 6483/* 6484 * The firmware boot code is small and is intended to be copied directly into 6485 * the NIC internal memory (no DMA transfer). 6486 */ 6487static int 6488iwn4965_load_bootcode(struct iwn_softc *sc, const uint8_t *ucode, int size) 6489{ 6490 int error, ntries; 6491 6492 size /= sizeof (uint32_t); 6493 6494 if ((error = iwn_nic_lock(sc)) != 0) 6495 return error; 6496 6497 /* Copy microcode image into NIC memory. */ 6498 iwn_prph_write_region_4(sc, IWN_BSM_SRAM_BASE, 6499 (const uint32_t *)ucode, size); 6500 6501 iwn_prph_write(sc, IWN_BSM_WR_MEM_SRC, 0); 6502 iwn_prph_write(sc, IWN_BSM_WR_MEM_DST, IWN_FW_TEXT_BASE); 6503 iwn_prph_write(sc, IWN_BSM_WR_DWCOUNT, size); 6504 6505 /* Start boot load now. */ 6506 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START); 6507 6508 /* Wait for transfer to complete. */ 6509 for (ntries = 0; ntries < 1000; ntries++) { 6510 if (!(iwn_prph_read(sc, IWN_BSM_WR_CTRL) & 6511 IWN_BSM_WR_CTRL_START)) 6512 break; 6513 DELAY(10); 6514 } 6515 if (ntries == 1000) { 6516 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 6517 __func__); 6518 iwn_nic_unlock(sc); 6519 return ETIMEDOUT; 6520 } 6521 6522 /* Enable boot after power up. */ 6523 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START_EN); 6524 6525 iwn_nic_unlock(sc); 6526 return 0; 6527} 6528 6529static int 6530iwn4965_load_firmware(struct iwn_softc *sc) 6531{ 6532 struct iwn_fw_info *fw = &sc->fw; 6533 struct iwn_dma_info *dma = &sc->fw_dma; 6534 int error; 6535 6536 /* Copy initialization sections into pre-allocated DMA-safe memory. */ 6537 memcpy(dma->vaddr, fw->init.data, fw->init.datasz); 6538 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 6539 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ, 6540 fw->init.text, fw->init.textsz); 6541 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 6542 6543 /* Tell adapter where to find initialization sections. */ 6544 if ((error = iwn_nic_lock(sc)) != 0) 6545 return error; 6546 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); 6547 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->init.datasz); 6548 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, 6549 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); 6550 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, fw->init.textsz); 6551 iwn_nic_unlock(sc); 6552 6553 /* Load firmware boot code. */ 6554 error = iwn4965_load_bootcode(sc, fw->boot.text, fw->boot.textsz); 6555 if (error != 0) { 6556 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 6557 __func__); 6558 return error; 6559 } 6560 /* Now press "execute". */ 6561 IWN_WRITE(sc, IWN_RESET, 0); 6562 6563 /* Wait at most one second for first alive notification. */ 6564 if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz)) != 0) { 6565 device_printf(sc->sc_dev, 6566 "%s: timeout waiting for adapter to initialize, error %d\n", 6567 __func__, error); 6568 return error; 6569 } 6570 6571 /* Retrieve current temperature for initial TX power calibration. */ 6572 sc->rawtemp = sc->ucode_info.temp[3].chan20MHz; 6573 sc->temp = iwn4965_get_temperature(sc); 6574 6575 /* Copy runtime sections into pre-allocated DMA-safe memory. */ 6576 memcpy(dma->vaddr, fw->main.data, fw->main.datasz); 6577 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 6578 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ, 6579 fw->main.text, fw->main.textsz); 6580 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 6581 6582 /* Tell adapter where to find runtime sections. */ 6583 if ((error = iwn_nic_lock(sc)) != 0) 6584 return error; 6585 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); 6586 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->main.datasz); 6587 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, 6588 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); 6589 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, 6590 IWN_FW_UPDATED | fw->main.textsz); 6591 iwn_nic_unlock(sc); 6592 6593 return 0; 6594} 6595 6596static int 6597iwn5000_load_firmware_section(struct iwn_softc *sc, uint32_t dst, 6598 const uint8_t *section, int size) 6599{ 6600 struct iwn_dma_info *dma = &sc->fw_dma; 6601 int error; 6602 6603 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6604 6605 /* Copy firmware section into pre-allocated DMA-safe memory. */ 6606 memcpy(dma->vaddr, section, size); 6607 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 6608 6609 if ((error = iwn_nic_lock(sc)) != 0) 6610 return error; 6611 6612 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), 6613 IWN_FH_TX_CONFIG_DMA_PAUSE); 6614 6615 IWN_WRITE(sc, IWN_FH_SRAM_ADDR(IWN_SRVC_DMACHNL), dst); 6616 IWN_WRITE(sc, IWN_FH_TFBD_CTRL0(IWN_SRVC_DMACHNL), 6617 IWN_LOADDR(dma->paddr)); 6618 IWN_WRITE(sc, IWN_FH_TFBD_CTRL1(IWN_SRVC_DMACHNL), 6619 IWN_HIADDR(dma->paddr) << 28 | size); 6620 IWN_WRITE(sc, IWN_FH_TXBUF_STATUS(IWN_SRVC_DMACHNL), 6621 IWN_FH_TXBUF_STATUS_TBNUM(1) | 6622 IWN_FH_TXBUF_STATUS_TBIDX(1) | 6623 IWN_FH_TXBUF_STATUS_TFBD_VALID); 6624 6625 /* Kick Flow Handler to start DMA transfer. */ 6626 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), 6627 IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_CIRQ_HOST_ENDTFD); 6628 6629 iwn_nic_unlock(sc); 6630 6631 /* Wait at most five seconds for FH DMA transfer to complete. */ 6632 return msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", 5 * hz); 6633} 6634 6635static int 6636iwn5000_load_firmware(struct iwn_softc *sc) 6637{ 6638 struct iwn_fw_part *fw; 6639 int error; 6640 6641 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6642 6643 /* Load the initialization firmware on first boot only. */ 6644 fw = (sc->sc_flags & IWN_FLAG_CALIB_DONE) ? 6645 &sc->fw.main : &sc->fw.init; 6646 6647 error = iwn5000_load_firmware_section(sc, IWN_FW_TEXT_BASE, 6648 fw->text, fw->textsz); 6649 if (error != 0) { 6650 device_printf(sc->sc_dev, 6651 "%s: could not load firmware %s section, error %d\n", 6652 __func__, ".text", error); 6653 return error; 6654 } 6655 error = iwn5000_load_firmware_section(sc, IWN_FW_DATA_BASE, 6656 fw->data, fw->datasz); 6657 if (error != 0) { 6658 device_printf(sc->sc_dev, 6659 "%s: could not load firmware %s section, error %d\n", 6660 __func__, ".data", error); 6661 return error; 6662 } 6663 6664 /* Now press "execute". */ 6665 IWN_WRITE(sc, IWN_RESET, 0); 6666 return 0; 6667} 6668 6669/* 6670 * Extract text and data sections from a legacy firmware image. 6671 */ 6672static int 6673iwn_read_firmware_leg(struct iwn_softc *sc, struct iwn_fw_info *fw) 6674{ 6675 const uint32_t *ptr; 6676 size_t hdrlen = 24; 6677 uint32_t rev; 6678 6679 ptr = (const uint32_t *)fw->data; 6680 rev = le32toh(*ptr++); 6681 6682 /* Check firmware API version. */ 6683 if (IWN_FW_API(rev) <= 1) { 6684 device_printf(sc->sc_dev, 6685 "%s: bad firmware, need API version >=2\n", __func__); 6686 return EINVAL; 6687 } 6688 if (IWN_FW_API(rev) >= 3) { 6689 /* Skip build number (version 2 header). */ 6690 hdrlen += 4; 6691 ptr++; 6692 } 6693 if (fw->size < hdrlen) { 6694 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", 6695 __func__, fw->size); 6696 return EINVAL; 6697 } 6698 fw->main.textsz = le32toh(*ptr++); 6699 fw->main.datasz = le32toh(*ptr++); 6700 fw->init.textsz = le32toh(*ptr++); 6701 fw->init.datasz = le32toh(*ptr++); 6702 fw->boot.textsz = le32toh(*ptr++); 6703 6704 /* Check that all firmware sections fit. */ 6705 if (fw->size < hdrlen + fw->main.textsz + fw->main.datasz + 6706 fw->init.textsz + fw->init.datasz + fw->boot.textsz) { 6707 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", 6708 __func__, fw->size); 6709 return EINVAL; 6710 } 6711 6712 /* Get pointers to firmware sections. */ 6713 fw->main.text = (const uint8_t *)ptr; 6714 fw->main.data = fw->main.text + fw->main.textsz; 6715 fw->init.text = fw->main.data + fw->main.datasz; 6716 fw->init.data = fw->init.text + fw->init.textsz; 6717 fw->boot.text = fw->init.data + fw->init.datasz; 6718 return 0; 6719} 6720 6721/* 6722 * Extract text and data sections from a TLV firmware image. 6723 */ 6724static int 6725iwn_read_firmware_tlv(struct iwn_softc *sc, struct iwn_fw_info *fw, 6726 uint16_t alt) 6727{ 6728 const struct iwn_fw_tlv_hdr *hdr; 6729 const struct iwn_fw_tlv *tlv; 6730 const uint8_t *ptr, *end; 6731 uint64_t altmask; 6732 uint32_t len, tmp; 6733 6734 if (fw->size < sizeof (*hdr)) { 6735 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", 6736 __func__, fw->size); 6737 return EINVAL; 6738 } 6739 hdr = (const struct iwn_fw_tlv_hdr *)fw->data; 6740 if (hdr->signature != htole32(IWN_FW_SIGNATURE)) { 6741 device_printf(sc->sc_dev, "%s: bad firmware signature 0x%08x\n", 6742 __func__, le32toh(hdr->signature)); 6743 return EINVAL; 6744 } 6745 DPRINTF(sc, IWN_DEBUG_RESET, "FW: \"%.64s\", build 0x%x\n", hdr->descr, 6746 le32toh(hdr->build)); 6747 6748 /* 6749 * Select the closest supported alternative that is less than 6750 * or equal to the specified one. 6751 */ 6752 altmask = le64toh(hdr->altmask); 6753 while (alt > 0 && !(altmask & (1ULL << alt))) 6754 alt--; /* Downgrade. */ 6755 DPRINTF(sc, IWN_DEBUG_RESET, "using alternative %d\n", alt); 6756 6757 ptr = (const uint8_t *)(hdr + 1); 6758 end = (const uint8_t *)(fw->data + fw->size); 6759 6760 /* Parse type-length-value fields. */ 6761 while (ptr + sizeof (*tlv) <= end) { 6762 tlv = (const struct iwn_fw_tlv *)ptr; 6763 len = le32toh(tlv->len); 6764 6765 ptr += sizeof (*tlv); 6766 if (ptr + len > end) { 6767 device_printf(sc->sc_dev, 6768 "%s: firmware too short: %zu bytes\n", __func__, 6769 fw->size); 6770 return EINVAL; 6771 } 6772 /* Skip other alternatives. */ 6773 if (tlv->alt != 0 && tlv->alt != htole16(alt)) 6774 goto next; 6775 6776 switch (le16toh(tlv->type)) { 6777 case IWN_FW_TLV_MAIN_TEXT: 6778 fw->main.text = ptr; 6779 fw->main.textsz = len; 6780 break; 6781 case IWN_FW_TLV_MAIN_DATA: 6782 fw->main.data = ptr; 6783 fw->main.datasz = len; 6784 break; 6785 case IWN_FW_TLV_INIT_TEXT: 6786 fw->init.text = ptr; 6787 fw->init.textsz = len; 6788 break; 6789 case IWN_FW_TLV_INIT_DATA: 6790 fw->init.data = ptr; 6791 fw->init.datasz = len; 6792 break; 6793 case IWN_FW_TLV_BOOT_TEXT: 6794 fw->boot.text = ptr; 6795 fw->boot.textsz = len; 6796 break; 6797 case IWN_FW_TLV_ENH_SENS: 6798 if (!len) 6799 sc->sc_flags |= IWN_FLAG_ENH_SENS; 6800 break; 6801 case IWN_FW_TLV_PHY_CALIB: 6802 tmp = htole32(*ptr); 6803 if (tmp < 253) { 6804 sc->reset_noise_gain = tmp; 6805 sc->noise_gain = tmp + 1; 6806 } 6807 break; 6808 case IWN_FW_TLV_PAN: 6809 sc->sc_flags |= IWN_FLAG_PAN_SUPPORT; 6810 DPRINTF(sc, IWN_DEBUG_RESET, 6811 "PAN Support found: %d\n", 1); 6812 break; 6813 case IWN_FW_TLV_FLAGS : 6814 sc->tlv_feature_flags = htole32(*ptr); 6815 break; 6816 case IWN_FW_TLV_PBREQ_MAXLEN: 6817 case IWN_FW_TLV_RUNT_EVTLOG_PTR: 6818 case IWN_FW_TLV_RUNT_EVTLOG_SIZE: 6819 case IWN_FW_TLV_RUNT_ERRLOG_PTR: 6820 case IWN_FW_TLV_INIT_EVTLOG_PTR: 6821 case IWN_FW_TLV_INIT_EVTLOG_SIZE: 6822 case IWN_FW_TLV_INIT_ERRLOG_PTR: 6823 case IWN_FW_TLV_WOWLAN_INST: 6824 case IWN_FW_TLV_WOWLAN_DATA: 6825 DPRINTF(sc, IWN_DEBUG_RESET, 6826 "TLV type %d reconized but not handled\n", 6827 le16toh(tlv->type)); 6828 break; 6829 default: 6830 DPRINTF(sc, IWN_DEBUG_RESET, 6831 "TLV type %d not handled\n", le16toh(tlv->type)); 6832 break; 6833 } 6834 next: /* TLV fields are 32-bit aligned. */ 6835 ptr += (len + 3) & ~3; 6836 } 6837 return 0; 6838} 6839 6840static int 6841iwn_read_firmware(struct iwn_softc *sc) 6842{ 6843 struct iwn_fw_info *fw = &sc->fw; 6844 int error; 6845 6846 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6847 6848 IWN_UNLOCK(sc); 6849 6850 memset(fw, 0, sizeof (*fw)); 6851 6852 /* Read firmware image from filesystem. */ 6853 sc->fw_fp = firmware_get(sc->fwname); 6854 if (sc->fw_fp == NULL) { 6855 device_printf(sc->sc_dev, "%s: could not read firmware %s\n", 6856 __func__, sc->fwname); 6857 IWN_LOCK(sc); 6858 return EINVAL; 6859 } 6860 IWN_LOCK(sc); 6861 6862 fw->size = sc->fw_fp->datasize; 6863 fw->data = (const uint8_t *)sc->fw_fp->data; 6864 if (fw->size < sizeof (uint32_t)) { 6865 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", 6866 __func__, fw->size); 6867 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 6868 sc->fw_fp = NULL; 6869 return EINVAL; 6870 } 6871 6872 /* Retrieve text and data sections. */ 6873 if (*(const uint32_t *)fw->data != 0) /* Legacy image. */ 6874 error = iwn_read_firmware_leg(sc, fw); 6875 else 6876 error = iwn_read_firmware_tlv(sc, fw, 1); 6877 if (error != 0) { 6878 device_printf(sc->sc_dev, 6879 "%s: could not read firmware sections, error %d\n", 6880 __func__, error); 6881 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 6882 sc->fw_fp = NULL; 6883 return error; 6884 } 6885 6886 /* Make sure text and data sections fit in hardware memory. */ 6887 if (fw->main.textsz > sc->fw_text_maxsz || 6888 fw->main.datasz > sc->fw_data_maxsz || 6889 fw->init.textsz > sc->fw_text_maxsz || 6890 fw->init.datasz > sc->fw_data_maxsz || 6891 fw->boot.textsz > IWN_FW_BOOT_TEXT_MAXSZ || 6892 (fw->boot.textsz & 3) != 0) { 6893 device_printf(sc->sc_dev, "%s: firmware sections too large\n", 6894 __func__); 6895 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 6896 sc->fw_fp = NULL; 6897 return EINVAL; 6898 } 6899 6900 /* We can proceed with loading the firmware. */ 6901 return 0; 6902} 6903 6904static int 6905iwn_clock_wait(struct iwn_softc *sc) 6906{ 6907 int ntries; 6908 6909 /* Set "initialization complete" bit. */ 6910 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); 6911 6912 /* Wait for clock stabilization. */ 6913 for (ntries = 0; ntries < 2500; ntries++) { 6914 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_MAC_CLOCK_READY) 6915 return 0; 6916 DELAY(10); 6917 } 6918 device_printf(sc->sc_dev, 6919 "%s: timeout waiting for clock stabilization\n", __func__); 6920 return ETIMEDOUT; 6921} 6922 6923static int 6924iwn_apm_init(struct iwn_softc *sc) 6925{ 6926 uint32_t reg; 6927 int error; 6928 6929 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6930 6931 /* Disable L0s exit timer (NMI bug workaround). */ 6932 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_DIS_L0S_TIMER); 6933 /* Don't wait for ICH L0s (ICH bug workaround). */ 6934 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_L1A_NO_L0S_RX); 6935 6936 /* Set FH wait threshold to max (HW bug under stress workaround). */ 6937 IWN_SETBITS(sc, IWN_DBG_HPET_MEM, 0xffff0000); 6938 6939 /* Enable HAP INTA to move adapter from L1a to L0s. */ 6940 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_HAP_WAKE_L1A); 6941 6942 /* Retrieve PCIe Active State Power Management (ASPM). */ 6943 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); 6944 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */ 6945 if (reg & 0x02) /* L1 Entry enabled. */ 6946 IWN_SETBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); 6947 else 6948 IWN_CLRBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); 6949 6950 if (sc->hw_type != IWN_HW_REV_TYPE_4965 && 6951 sc->hw_type <= IWN_HW_REV_TYPE_1000) 6952 IWN_SETBITS(sc, IWN_ANA_PLL, IWN_ANA_PLL_INIT); 6953 6954 /* Wait for clock stabilization before accessing prph. */ 6955 if ((error = iwn_clock_wait(sc)) != 0) 6956 return error; 6957 6958 if ((error = iwn_nic_lock(sc)) != 0) 6959 return error; 6960 if (sc->hw_type == IWN_HW_REV_TYPE_4965) { 6961 /* Enable DMA and BSM (Bootstrap State Machine). */ 6962 iwn_prph_write(sc, IWN_APMG_CLK_EN, 6963 IWN_APMG_CLK_CTRL_DMA_CLK_RQT | 6964 IWN_APMG_CLK_CTRL_BSM_CLK_RQT); 6965 } else { 6966 /* Enable DMA. */ 6967 iwn_prph_write(sc, IWN_APMG_CLK_EN, 6968 IWN_APMG_CLK_CTRL_DMA_CLK_RQT); 6969 } 6970 DELAY(20); 6971 /* Disable L1-Active. */ 6972 iwn_prph_setbits(sc, IWN_APMG_PCI_STT, IWN_APMG_PCI_STT_L1A_DIS); 6973 iwn_nic_unlock(sc); 6974 6975 return 0; 6976} 6977 6978static void 6979iwn_apm_stop_master(struct iwn_softc *sc) 6980{ 6981 int ntries; 6982 6983 /* Stop busmaster DMA activity. */ 6984 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_STOP_MASTER); 6985 for (ntries = 0; ntries < 100; ntries++) { 6986 if (IWN_READ(sc, IWN_RESET) & IWN_RESET_MASTER_DISABLED) 6987 return; 6988 DELAY(10); 6989 } 6990 device_printf(sc->sc_dev, "%s: timeout waiting for master\n", __func__); 6991} 6992 6993static void 6994iwn_apm_stop(struct iwn_softc *sc) 6995{ 6996 iwn_apm_stop_master(sc); 6997 6998 /* Reset the entire device. */ 6999 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_SW); 7000 DELAY(10); 7001 /* Clear "initialization complete" bit. */ 7002 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); 7003} 7004 7005static int 7006iwn4965_nic_config(struct iwn_softc *sc) 7007{ 7008 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7009 7010 if (IWN_RFCFG_TYPE(sc->rfcfg) == 1) { 7011 /* 7012 * I don't believe this to be correct but this is what the 7013 * vendor driver is doing. Probably the bits should not be 7014 * shifted in IWN_RFCFG_*. 7015 */ 7016 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 7017 IWN_RFCFG_TYPE(sc->rfcfg) | 7018 IWN_RFCFG_STEP(sc->rfcfg) | 7019 IWN_RFCFG_DASH(sc->rfcfg)); 7020 } 7021 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 7022 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); 7023 return 0; 7024} 7025 7026static int 7027iwn5000_nic_config(struct iwn_softc *sc) 7028{ 7029 uint32_t tmp; 7030 int error; 7031 7032 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7033 7034 if (IWN_RFCFG_TYPE(sc->rfcfg) < 3) { 7035 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 7036 IWN_RFCFG_TYPE(sc->rfcfg) | 7037 IWN_RFCFG_STEP(sc->rfcfg) | 7038 IWN_RFCFG_DASH(sc->rfcfg)); 7039 } 7040 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 7041 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); 7042 7043 if ((error = iwn_nic_lock(sc)) != 0) 7044 return error; 7045 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_EARLY_PWROFF_DIS); 7046 7047 if (sc->hw_type == IWN_HW_REV_TYPE_1000) { 7048 /* 7049 * Select first Switching Voltage Regulator (1.32V) to 7050 * solve a stability issue related to noisy DC2DC line 7051 * in the silicon of 1000 Series. 7052 */ 7053 tmp = iwn_prph_read(sc, IWN_APMG_DIGITAL_SVR); 7054 tmp &= ~IWN_APMG_DIGITAL_SVR_VOLTAGE_MASK; 7055 tmp |= IWN_APMG_DIGITAL_SVR_VOLTAGE_1_32; 7056 iwn_prph_write(sc, IWN_APMG_DIGITAL_SVR, tmp); 7057 } 7058 iwn_nic_unlock(sc); 7059 7060 if (sc->sc_flags & IWN_FLAG_INTERNAL_PA) { 7061 /* Use internal power amplifier only. */ 7062 IWN_WRITE(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_2X2_IPA); 7063 } 7064 if ((sc->hw_type == IWN_HW_REV_TYPE_6050 || 7065 sc->hw_type == IWN_HW_REV_TYPE_6005) && sc->calib_ver >= 6) { 7066 /* Indicate that ROM calibration version is >=6. */ 7067 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_CALIB_VER6); 7068 } 7069 if (sc->hw_type == IWN_HW_REV_TYPE_6005) 7070 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_6050_1X2); 7071 return 0; 7072} 7073 7074/* 7075 * Take NIC ownership over Intel Active Management Technology (AMT). 7076 */ 7077static int 7078iwn_hw_prepare(struct iwn_softc *sc) 7079{ 7080 int ntries; 7081 7082 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7083 7084 /* Check if hardware is ready. */ 7085 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); 7086 for (ntries = 0; ntries < 5; ntries++) { 7087 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 7088 IWN_HW_IF_CONFIG_NIC_READY) 7089 return 0; 7090 DELAY(10); 7091 } 7092 7093 /* Hardware not ready, force into ready state. */ 7094 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_PREPARE); 7095 for (ntries = 0; ntries < 15000; ntries++) { 7096 if (!(IWN_READ(sc, IWN_HW_IF_CONFIG) & 7097 IWN_HW_IF_CONFIG_PREPARE_DONE)) 7098 break; 7099 DELAY(10); 7100 } 7101 if (ntries == 15000) 7102 return ETIMEDOUT; 7103 7104 /* Hardware should be ready now. */ 7105 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); 7106 for (ntries = 0; ntries < 5; ntries++) { 7107 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 7108 IWN_HW_IF_CONFIG_NIC_READY) 7109 return 0; 7110 DELAY(10); 7111 } 7112 return ETIMEDOUT; 7113} 7114 7115static int 7116iwn_hw_init(struct iwn_softc *sc) 7117{ 7118 struct iwn_ops *ops = &sc->ops; 7119 int error, chnl, qid; 7120 7121 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 7122 7123 /* Clear pending interrupts. */ 7124 IWN_WRITE(sc, IWN_INT, 0xffffffff); 7125 7126 if ((error = iwn_apm_init(sc)) != 0) { 7127 device_printf(sc->sc_dev, 7128 "%s: could not power ON adapter, error %d\n", __func__, 7129 error); 7130 return error; 7131 } 7132 7133 /* Select VMAIN power source. */ 7134 if ((error = iwn_nic_lock(sc)) != 0) 7135 return error; 7136 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_PWR_SRC_MASK); 7137 iwn_nic_unlock(sc); 7138 7139 /* Perform adapter-specific initialization. */ 7140 if ((error = ops->nic_config(sc)) != 0) 7141 return error; 7142 7143 /* Initialize RX ring. */ 7144 if ((error = iwn_nic_lock(sc)) != 0) 7145 return error; 7146 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); 7147 IWN_WRITE(sc, IWN_FH_RX_WPTR, 0); 7148 /* Set physical address of RX ring (256-byte aligned). */ 7149 IWN_WRITE(sc, IWN_FH_RX_BASE, sc->rxq.desc_dma.paddr >> 8); 7150 /* Set physical address of RX status (16-byte aligned). */ 7151 IWN_WRITE(sc, IWN_FH_STATUS_WPTR, sc->rxq.stat_dma.paddr >> 4); 7152 /* Enable RX. */ 7153 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 7154 IWN_FH_RX_CONFIG_ENA | 7155 IWN_FH_RX_CONFIG_IGN_RXF_EMPTY | /* HW bug workaround */ 7156 IWN_FH_RX_CONFIG_IRQ_DST_HOST | 7157 IWN_FH_RX_CONFIG_SINGLE_FRAME | 7158 IWN_FH_RX_CONFIG_RB_TIMEOUT(0) | 7159 IWN_FH_RX_CONFIG_NRBD(IWN_RX_RING_COUNT_LOG)); 7160 iwn_nic_unlock(sc); 7161 IWN_WRITE(sc, IWN_FH_RX_WPTR, (IWN_RX_RING_COUNT - 1) & ~7); 7162 7163 if ((error = iwn_nic_lock(sc)) != 0) 7164 return error; 7165 7166 /* Initialize TX scheduler. */ 7167 iwn_prph_write(sc, sc->sched_txfact_addr, 0); 7168 7169 /* Set physical address of "keep warm" page (16-byte aligned). */ 7170 IWN_WRITE(sc, IWN_FH_KW_ADDR, sc->kw_dma.paddr >> 4); 7171 7172 /* Initialize TX rings. */ 7173 for (qid = 0; qid < sc->ntxqs; qid++) { 7174 struct iwn_tx_ring *txq = &sc->txq[qid]; 7175 7176 /* Set physical address of TX ring (256-byte aligned). */ 7177 IWN_WRITE(sc, IWN_FH_CBBC_QUEUE(qid), 7178 txq->desc_dma.paddr >> 8); 7179 } 7180 iwn_nic_unlock(sc); 7181 7182 /* Enable DMA channels. */ 7183 for (chnl = 0; chnl < sc->ndmachnls; chnl++) { 7184 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 7185 IWN_FH_TX_CONFIG_DMA_ENA | 7186 IWN_FH_TX_CONFIG_DMA_CREDIT_ENA); 7187 } 7188 7189 /* Clear "radio off" and "commands blocked" bits. */ 7190 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 7191 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CMD_BLOCKED); 7192 7193 /* Clear pending interrupts. */ 7194 IWN_WRITE(sc, IWN_INT, 0xffffffff); 7195 /* Enable interrupt coalescing. */ 7196 IWN_WRITE(sc, IWN_INT_COALESCING, 512 / 8); 7197 /* Enable interrupts. */ 7198 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 7199 7200 /* _Really_ make sure "radio off" bit is cleared! */ 7201 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 7202 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 7203 7204 /* Enable shadow registers. */ 7205 if (sc->hw_type >= IWN_HW_REV_TYPE_6000) 7206 IWN_SETBITS(sc, IWN_SHADOW_REG_CTRL, 0x800fffff); 7207 7208 if ((error = ops->load_firmware(sc)) != 0) { 7209 device_printf(sc->sc_dev, 7210 "%s: could not load firmware, error %d\n", __func__, 7211 error); 7212 return error; 7213 } 7214 /* Wait at most one second for firmware alive notification. */ 7215 if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz)) != 0) { 7216 device_printf(sc->sc_dev, 7217 "%s: timeout waiting for adapter to initialize, error %d\n", 7218 __func__, error); 7219 return error; 7220 } 7221 /* Do post-firmware initialization. */ 7222 7223 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 7224 7225 return ops->post_alive(sc); 7226} 7227 7228static void 7229iwn_hw_stop(struct iwn_softc *sc) 7230{ 7231 int chnl, qid, ntries; 7232 7233 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7234 7235 IWN_WRITE(sc, IWN_RESET, IWN_RESET_NEVO); 7236 7237 /* Disable interrupts. */ 7238 IWN_WRITE(sc, IWN_INT_MASK, 0); 7239 IWN_WRITE(sc, IWN_INT, 0xffffffff); 7240 IWN_WRITE(sc, IWN_FH_INT, 0xffffffff); 7241 sc->sc_flags &= ~IWN_FLAG_USE_ICT; 7242 7243 /* Make sure we no longer hold the NIC lock. */ 7244 iwn_nic_unlock(sc); 7245 7246 /* Stop TX scheduler. */ 7247 iwn_prph_write(sc, sc->sched_txfact_addr, 0); 7248 7249 /* Stop all DMA channels. */ 7250 if (iwn_nic_lock(sc) == 0) { 7251 for (chnl = 0; chnl < sc->ndmachnls; chnl++) { 7252 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 0); 7253 for (ntries = 0; ntries < 200; ntries++) { 7254 if (IWN_READ(sc, IWN_FH_TX_STATUS) & 7255 IWN_FH_TX_STATUS_IDLE(chnl)) 7256 break; 7257 DELAY(10); 7258 } 7259 } 7260 iwn_nic_unlock(sc); 7261 } 7262 7263 /* Stop RX ring. */ 7264 iwn_reset_rx_ring(sc, &sc->rxq); 7265 7266 /* Reset all TX rings. */ 7267 for (qid = 0; qid < sc->ntxqs; qid++) 7268 iwn_reset_tx_ring(sc, &sc->txq[qid]); 7269 7270 if (iwn_nic_lock(sc) == 0) { 7271 iwn_prph_write(sc, IWN_APMG_CLK_DIS, 7272 IWN_APMG_CLK_CTRL_DMA_CLK_RQT); 7273 iwn_nic_unlock(sc); 7274 } 7275 DELAY(5); 7276 /* Power OFF adapter. */ 7277 iwn_apm_stop(sc); 7278} 7279 7280static void 7281iwn_radio_on(void *arg0, int pending) 7282{ 7283 struct iwn_softc *sc = arg0; 7284 struct ifnet *ifp = sc->sc_ifp; 7285 struct ieee80211com *ic = ifp->if_l2com; 7286 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 7287 7288 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7289 7290 if (vap != NULL) { 7291 iwn_init(sc); 7292 ieee80211_init(vap); 7293 } 7294} 7295 7296static void 7297iwn_radio_off(void *arg0, int pending) 7298{ 7299 struct iwn_softc *sc = arg0; 7300 struct ifnet *ifp = sc->sc_ifp; 7301 struct ieee80211com *ic = ifp->if_l2com; 7302 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 7303 7304 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7305 7306 iwn_stop(sc); 7307 if (vap != NULL) 7308 ieee80211_stop(vap); 7309 7310 /* Enable interrupts to get RF toggle notification. */ 7311 IWN_LOCK(sc); 7312 IWN_WRITE(sc, IWN_INT, 0xffffffff); 7313 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 7314 IWN_UNLOCK(sc); 7315} 7316 7317static void 7318iwn_init_locked(struct iwn_softc *sc) 7319{ 7320 struct ifnet *ifp = sc->sc_ifp; 7321 int error; 7322 7323 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 7324 7325 IWN_LOCK_ASSERT(sc); 7326 7327 if ((error = iwn_hw_prepare(sc)) != 0) { 7328 device_printf(sc->sc_dev, "%s: hardware not ready, error %d\n", 7329 __func__, error); 7330 goto fail; 7331 } 7332 7333 /* Initialize interrupt mask to default value. */ 7334 sc->int_mask = IWN_INT_MASK_DEF; 7335 sc->sc_flags &= ~IWN_FLAG_USE_ICT; 7336 7337 /* Check that the radio is not disabled by hardware switch. */ 7338 if (!(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) { 7339 device_printf(sc->sc_dev, 7340 "radio is disabled by hardware switch\n"); 7341 /* Enable interrupts to get RF toggle notifications. */ 7342 IWN_WRITE(sc, IWN_INT, 0xffffffff); 7343 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 7344 return; 7345 } 7346 7347 /* Read firmware images from the filesystem. */ 7348 if ((error = iwn_read_firmware(sc)) != 0) { 7349 device_printf(sc->sc_dev, 7350 "%s: could not read firmware, error %d\n", __func__, 7351 error); 7352 goto fail; 7353 } 7354 7355 /* Initialize hardware and upload firmware. */ 7356 error = iwn_hw_init(sc); 7357 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 7358 sc->fw_fp = NULL; 7359 if (error != 0) { 7360 device_printf(sc->sc_dev, 7361 "%s: could not initialize hardware, error %d\n", __func__, 7362 error); 7363 goto fail; 7364 } 7365 7366 /* Configure adapter now that it is ready. */ 7367 if ((error = iwn_config(sc)) != 0) { 7368 device_printf(sc->sc_dev, 7369 "%s: could not configure device, error %d\n", __func__, 7370 error); 7371 goto fail; 7372 } 7373 7374 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 7375 ifp->if_drv_flags |= IFF_DRV_RUNNING; 7376 7377 callout_reset(&sc->watchdog_to, hz, iwn_watchdog, sc); 7378 7379 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 7380 7381 return; 7382 7383fail: iwn_stop_locked(sc); 7384 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__); 7385} 7386 7387static void 7388iwn_init(void *arg) 7389{ 7390 struct iwn_softc *sc = arg; 7391 struct ifnet *ifp = sc->sc_ifp; 7392 struct ieee80211com *ic = ifp->if_l2com; 7393 7394 IWN_LOCK(sc); 7395 iwn_init_locked(sc); 7396 IWN_UNLOCK(sc); 7397 7398 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 7399 ieee80211_start_all(ic); 7400} 7401 7402static void 7403iwn_stop_locked(struct iwn_softc *sc) 7404{ 7405 struct ifnet *ifp = sc->sc_ifp; 7406 7407 IWN_LOCK_ASSERT(sc); 7408 7409 sc->sc_tx_timer = 0; 7410 callout_stop(&sc->watchdog_to); 7411 callout_stop(&sc->calib_to); 7412 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 7413 7414 /* Power OFF hardware. */ 7415 iwn_hw_stop(sc); 7416} 7417 7418static void 7419iwn_stop(struct iwn_softc *sc) 7420{ 7421 IWN_LOCK(sc); 7422 iwn_stop_locked(sc); 7423 IWN_UNLOCK(sc); 7424} 7425 7426/* 7427 * Callback from net80211 to start a scan. 7428 */ 7429static void 7430iwn_scan_start(struct ieee80211com *ic) 7431{ 7432 struct ifnet *ifp = ic->ic_ifp; 7433 struct iwn_softc *sc = ifp->if_softc; 7434 7435 IWN_LOCK(sc); 7436 /* make the link LED blink while we're scanning */ 7437 iwn_set_led(sc, IWN_LED_LINK, 20, 2); 7438 IWN_UNLOCK(sc); 7439} 7440 7441/* 7442 * Callback from net80211 to terminate a scan. 7443 */ 7444static void 7445iwn_scan_end(struct ieee80211com *ic) 7446{ 7447 struct ifnet *ifp = ic->ic_ifp; 7448 struct iwn_softc *sc = ifp->if_softc; 7449 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 7450 7451 IWN_LOCK(sc); 7452 if (vap->iv_state == IEEE80211_S_RUN) { 7453 /* Set link LED to ON status if we are associated */ 7454 iwn_set_led(sc, IWN_LED_LINK, 0, 1); 7455 } 7456 IWN_UNLOCK(sc); 7457} 7458 7459/* 7460 * Callback from net80211 to force a channel change. 7461 */ 7462static void 7463iwn_set_channel(struct ieee80211com *ic) 7464{ 7465 const struct ieee80211_channel *c = ic->ic_curchan; 7466 struct ifnet *ifp = ic->ic_ifp; 7467 struct iwn_softc *sc = ifp->if_softc; 7468 int error; 7469 7470 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7471 7472 IWN_LOCK(sc); 7473 sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq); 7474 sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags); 7475 sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq); 7476 sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags); 7477 7478 /* 7479 * Only need to set the channel in Monitor mode. AP scanning and auth 7480 * are already taken care of by their respective firmware commands. 7481 */ 7482 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 7483 error = iwn_config(sc); 7484 if (error != 0) 7485 device_printf(sc->sc_dev, 7486 "%s: error %d settting channel\n", __func__, error); 7487 } 7488 IWN_UNLOCK(sc); 7489} 7490 7491/* 7492 * Callback from net80211 to start scanning of the current channel. 7493 */ 7494static void 7495iwn_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) 7496{ 7497 struct ieee80211vap *vap = ss->ss_vap; 7498 struct iwn_softc *sc = vap->iv_ic->ic_ifp->if_softc; 7499 int error; 7500 7501 IWN_LOCK(sc); 7502 error = iwn_scan(sc); 7503 IWN_UNLOCK(sc); 7504 if (error != 0) 7505 ieee80211_cancel_scan(vap); 7506} 7507 7508/* 7509 * Callback from net80211 to handle the minimum dwell time being met. 7510 * The intent is to terminate the scan but we just let the firmware 7511 * notify us when it's finished as we have no safe way to abort it. 7512 */ 7513static void 7514iwn_scan_mindwell(struct ieee80211_scan_state *ss) 7515{ 7516 /* NB: don't try to abort scan; wait for firmware to finish */ 7517} 7518 7519static void 7520iwn_hw_reset(void *arg0, int pending) 7521{ 7522 struct iwn_softc *sc = arg0; 7523 struct ifnet *ifp = sc->sc_ifp; 7524 struct ieee80211com *ic = ifp->if_l2com; 7525 7526 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7527 7528 iwn_stop(sc); 7529 iwn_init(sc); 7530 ieee80211_notify_radio(ic, 1); 7531} 7532#ifdef IWN_DEBUG 7533#define IWN_DESC(x) case x: return #x 7534#define COUNTOF(array) (sizeof(array) / sizeof(array[0])) 7535 7536/* 7537 * Translate CSR code to string 7538 */ 7539static char *iwn_get_csr_string(int csr) 7540{ 7541 switch (csr) { 7542 IWN_DESC(IWN_HW_IF_CONFIG); 7543 IWN_DESC(IWN_INT_COALESCING); 7544 IWN_DESC(IWN_INT); 7545 IWN_DESC(IWN_INT_MASK); 7546 IWN_DESC(IWN_FH_INT); 7547 IWN_DESC(IWN_GPIO_IN); 7548 IWN_DESC(IWN_RESET); 7549 IWN_DESC(IWN_GP_CNTRL); 7550 IWN_DESC(IWN_HW_REV); 7551 IWN_DESC(IWN_EEPROM); 7552 IWN_DESC(IWN_EEPROM_GP); 7553 IWN_DESC(IWN_OTP_GP); 7554 IWN_DESC(IWN_GIO); 7555 IWN_DESC(IWN_GP_UCODE); 7556 IWN_DESC(IWN_GP_DRIVER); 7557 IWN_DESC(IWN_UCODE_GP1); 7558 IWN_DESC(IWN_UCODE_GP2); 7559 IWN_DESC(IWN_LED); 7560 IWN_DESC(IWN_DRAM_INT_TBL); 7561 IWN_DESC(IWN_GIO_CHICKEN); 7562 IWN_DESC(IWN_ANA_PLL); 7563 IWN_DESC(IWN_HW_REV_WA); 7564 IWN_DESC(IWN_DBG_HPET_MEM); 7565 default: 7566 return "UNKNOWN CSR"; 7567 } 7568} 7569 7570/* 7571 * This function print firmware register 7572 */ 7573static void 7574iwn_debug_register(struct iwn_softc *sc) 7575{ 7576 int i; 7577 static const uint32_t csr_tbl[] = { 7578 IWN_HW_IF_CONFIG, 7579 IWN_INT_COALESCING, 7580 IWN_INT, 7581 IWN_INT_MASK, 7582 IWN_FH_INT, 7583 IWN_GPIO_IN, 7584 IWN_RESET, 7585 IWN_GP_CNTRL, 7586 IWN_HW_REV, 7587 IWN_EEPROM, 7588 IWN_EEPROM_GP, 7589 IWN_OTP_GP, 7590 IWN_GIO, 7591 IWN_GP_UCODE, 7592 IWN_GP_DRIVER, 7593 IWN_UCODE_GP1, 7594 IWN_UCODE_GP2, 7595 IWN_LED, 7596 IWN_DRAM_INT_TBL, 7597 IWN_GIO_CHICKEN, 7598 IWN_ANA_PLL, 7599 IWN_HW_REV_WA, 7600 IWN_DBG_HPET_MEM, 7601 }; 7602 DPRINTF(sc, IWN_DEBUG_REGISTER, 7603 "CSR values: (2nd byte of IWN_INT_COALESCING is IWN_INT_PERIODIC)%s", 7604 "\n"); 7605 for (i = 0; i < COUNTOF(csr_tbl); i++){ 7606 DPRINTF(sc, IWN_DEBUG_REGISTER," %10s: 0x%08x ", 7607 iwn_get_csr_string(csr_tbl[i]), IWN_READ(sc, csr_tbl[i])); 7608 if ((i+1) % 3 == 0) 7609 DPRINTF(sc, IWN_DEBUG_REGISTER,"%s","\n"); 7610 } 7611 DPRINTF(sc, IWN_DEBUG_REGISTER,"%s","\n"); 7612} 7613#endif 7614