1/*- 2 * Copyright (c) 2007-2009 Damien Bergamini <damien.bergamini@free.fr> 3 * Copyright (c) 2008 Benjamin Close <benjsc@FreeBSD.org> 4 * Copyright (c) 2008 Sam Leffler, Errno Consulting 5 * Copyright (c) 2011 Intel Corporation 6 * Copyright (c) 2013 Cedric GROSS <c.gross@kreiz-it.fr> 7 * Copyright (c) 2013 Adrian Chadd <adrian@FreeBSD.org> 8 * 9 * Permission to use, copy, modify, and distribute this software for any 10 * purpose with or without fee is hereby granted, provided that the above 11 * copyright notice and this permission notice appear in all copies. 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 20 */ 21 22/* 23 * Driver for Intel WiFi Link 4965 and 1000/5000/6000 Series 802.11 network 24 * adapters. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: releng/12.0/sys/dev/iwn/if_iwn.c 338949 2018-09-26 17:12:30Z imp $"); 29 30#include <sys/param.h> 31#include <sys/sockio.h> 32#include <sys/sysctl.h> 33#include <sys/mbuf.h> 34#include <sys/kernel.h> 35#include <sys/socket.h> 36#include <sys/systm.h> 37#include <sys/malloc.h> 38#include <sys/bus.h> 39#include <sys/conf.h> 40#include <sys/rman.h> 41#include <sys/endian.h> 42#include <sys/firmware.h> 43#include <sys/limits.h> 44#include <sys/module.h> 45#include <sys/priv.h> 46#include <sys/queue.h> 47#include <sys/taskqueue.h> 48 49#include <machine/bus.h> 50#include <machine/resource.h> 51#include <machine/clock.h> 52 53#include <dev/pci/pcireg.h> 54#include <dev/pci/pcivar.h> 55 56#include <net/if.h> 57#include <net/if_var.h> 58#include <net/if_dl.h> 59#include <net/if_media.h> 60 61#include <netinet/in.h> 62#include <netinet/if_ether.h> 63 64#include <net80211/ieee80211_var.h> 65#include <net80211/ieee80211_radiotap.h> 66#include <net80211/ieee80211_regdomain.h> 67#include <net80211/ieee80211_ratectl.h> 68 69#include <dev/iwn/if_iwnreg.h> 70#include <dev/iwn/if_iwnvar.h> 71#include <dev/iwn/if_iwn_devid.h> 72#include <dev/iwn/if_iwn_chip_cfg.h> 73#include <dev/iwn/if_iwn_debug.h> 74#include <dev/iwn/if_iwn_ioctl.h> 75 76struct iwn_ident { 77 uint16_t vendor; 78 uint16_t device; 79 const char *name; 80}; 81 82static const struct iwn_ident iwn_ident_table[] = { 83 { 0x8086, IWN_DID_6x05_1, "Intel Centrino Advanced-N 6205" }, 84 { 0x8086, IWN_DID_1000_1, "Intel Centrino Wireless-N 1000" }, 85 { 0x8086, IWN_DID_1000_2, "Intel Centrino Wireless-N 1000" }, 86 { 0x8086, IWN_DID_6x05_2, "Intel Centrino Advanced-N 6205" }, 87 { 0x8086, IWN_DID_6050_1, "Intel Centrino Advanced-N + WiMAX 6250" }, 88 { 0x8086, IWN_DID_6050_2, "Intel Centrino Advanced-N + WiMAX 6250" }, 89 { 0x8086, IWN_DID_x030_1, "Intel Centrino Wireless-N 1030" }, 90 { 0x8086, IWN_DID_x030_2, "Intel Centrino Wireless-N 1030" }, 91 { 0x8086, IWN_DID_x030_3, "Intel Centrino Advanced-N 6230" }, 92 { 0x8086, IWN_DID_x030_4, "Intel Centrino Advanced-N 6230" }, 93 { 0x8086, IWN_DID_6150_1, "Intel Centrino Wireless-N + WiMAX 6150" }, 94 { 0x8086, IWN_DID_6150_2, "Intel Centrino Wireless-N + WiMAX 6150" }, 95 { 0x8086, IWN_DID_2x00_1, "Intel(R) Centrino(R) Wireless-N 2200 BGN" }, 96 { 0x8086, IWN_DID_2x00_2, "Intel(R) Centrino(R) Wireless-N 2200 BGN" }, 97 /* XXX 2200D is IWN_SDID_2x00_4; there's no way to express this here! */ 98 { 0x8086, IWN_DID_2x30_1, "Intel Centrino Wireless-N 2230" }, 99 { 0x8086, IWN_DID_2x30_2, "Intel Centrino Wireless-N 2230" }, 100 { 0x8086, IWN_DID_130_1, "Intel Centrino Wireless-N 130" }, 101 { 0x8086, IWN_DID_130_2, "Intel Centrino Wireless-N 130" }, 102 { 0x8086, IWN_DID_100_1, "Intel Centrino Wireless-N 100" }, 103 { 0x8086, IWN_DID_100_2, "Intel Centrino Wireless-N 100" }, 104 { 0x8086, IWN_DID_105_1, "Intel Centrino Wireless-N 105" }, 105 { 0x8086, IWN_DID_105_2, "Intel Centrino Wireless-N 105" }, 106 { 0x8086, IWN_DID_135_1, "Intel Centrino Wireless-N 135" }, 107 { 0x8086, IWN_DID_135_2, "Intel Centrino Wireless-N 135" }, 108 { 0x8086, IWN_DID_4965_1, "Intel Wireless WiFi Link 4965" }, 109 { 0x8086, IWN_DID_6x00_1, "Intel Centrino Ultimate-N 6300" }, 110 { 0x8086, IWN_DID_6x00_2, "Intel Centrino Advanced-N 6200" }, 111 { 0x8086, IWN_DID_4965_2, "Intel Wireless WiFi Link 4965" }, 112 { 0x8086, IWN_DID_4965_3, "Intel Wireless WiFi Link 4965" }, 113 { 0x8086, IWN_DID_5x00_1, "Intel WiFi Link 5100" }, 114 { 0x8086, IWN_DID_4965_4, "Intel Wireless WiFi Link 4965" }, 115 { 0x8086, IWN_DID_5x00_3, "Intel Ultimate N WiFi Link 5300" }, 116 { 0x8086, IWN_DID_5x00_4, "Intel Ultimate N WiFi Link 5300" }, 117 { 0x8086, IWN_DID_5x00_2, "Intel WiFi Link 5100" }, 118 { 0x8086, IWN_DID_6x00_3, "Intel Centrino Ultimate-N 6300" }, 119 { 0x8086, IWN_DID_6x00_4, "Intel Centrino Advanced-N 6200" }, 120 { 0x8086, IWN_DID_5x50_1, "Intel WiMAX/WiFi Link 5350" }, 121 { 0x8086, IWN_DID_5x50_2, "Intel WiMAX/WiFi Link 5350" }, 122 { 0x8086, IWN_DID_5x50_3, "Intel WiMAX/WiFi Link 5150" }, 123 { 0x8086, IWN_DID_5x50_4, "Intel WiMAX/WiFi Link 5150" }, 124 { 0x8086, IWN_DID_6035_1, "Intel Centrino Advanced 6235" }, 125 { 0x8086, IWN_DID_6035_2, "Intel Centrino Advanced 6235" }, 126 { 0, 0, NULL } 127}; 128 129static int iwn_probe(device_t); 130static int iwn_attach(device_t); 131static void iwn4965_attach(struct iwn_softc *, uint16_t); 132static void iwn5000_attach(struct iwn_softc *, uint16_t); 133static int iwn_config_specific(struct iwn_softc *, uint16_t); 134static void iwn_radiotap_attach(struct iwn_softc *); 135static void iwn_sysctlattach(struct iwn_softc *); 136static struct ieee80211vap *iwn_vap_create(struct ieee80211com *, 137 const char [IFNAMSIZ], int, enum ieee80211_opmode, int, 138 const uint8_t [IEEE80211_ADDR_LEN], 139 const uint8_t [IEEE80211_ADDR_LEN]); 140static void iwn_vap_delete(struct ieee80211vap *); 141static int iwn_detach(device_t); 142static int iwn_shutdown(device_t); 143static int iwn_suspend(device_t); 144static int iwn_resume(device_t); 145static int iwn_nic_lock(struct iwn_softc *); 146static int iwn_eeprom_lock(struct iwn_softc *); 147static int iwn_init_otprom(struct iwn_softc *); 148static int iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int); 149static void iwn_dma_map_addr(void *, bus_dma_segment_t *, int, int); 150static int iwn_dma_contig_alloc(struct iwn_softc *, struct iwn_dma_info *, 151 void **, bus_size_t, bus_size_t); 152static void iwn_dma_contig_free(struct iwn_dma_info *); 153static int iwn_alloc_sched(struct iwn_softc *); 154static void iwn_free_sched(struct iwn_softc *); 155static int iwn_alloc_kw(struct iwn_softc *); 156static void iwn_free_kw(struct iwn_softc *); 157static int iwn_alloc_ict(struct iwn_softc *); 158static void iwn_free_ict(struct iwn_softc *); 159static int iwn_alloc_fwmem(struct iwn_softc *); 160static void iwn_free_fwmem(struct iwn_softc *); 161static int iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 162static void iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 163static void iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); 164static int iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *, 165 int); 166static void iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); 167static void iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); 168static void iwn_check_tx_ring(struct iwn_softc *, int); 169static void iwn5000_ict_reset(struct iwn_softc *); 170static int iwn_read_eeprom(struct iwn_softc *, 171 uint8_t macaddr[IEEE80211_ADDR_LEN]); 172static void iwn4965_read_eeprom(struct iwn_softc *); 173#ifdef IWN_DEBUG 174static void iwn4965_print_power_group(struct iwn_softc *, int); 175#endif 176static void iwn5000_read_eeprom(struct iwn_softc *); 177static uint32_t iwn_eeprom_channel_flags(struct iwn_eeprom_chan *); 178static void iwn_read_eeprom_band(struct iwn_softc *, int, int, int *, 179 struct ieee80211_channel[]); 180static void iwn_read_eeprom_ht40(struct iwn_softc *, int, int, int *, 181 struct ieee80211_channel[]); 182static void iwn_read_eeprom_channels(struct iwn_softc *, int, uint32_t); 183static struct iwn_eeprom_chan *iwn_find_eeprom_channel(struct iwn_softc *, 184 struct ieee80211_channel *); 185static void iwn_getradiocaps(struct ieee80211com *, int, int *, 186 struct ieee80211_channel[]); 187static int iwn_setregdomain(struct ieee80211com *, 188 struct ieee80211_regdomain *, int, 189 struct ieee80211_channel[]); 190static void iwn_read_eeprom_enhinfo(struct iwn_softc *); 191static struct ieee80211_node *iwn_node_alloc(struct ieee80211vap *, 192 const uint8_t mac[IEEE80211_ADDR_LEN]); 193static void iwn_newassoc(struct ieee80211_node *, int); 194static int iwn_newstate(struct ieee80211vap *, enum ieee80211_state, int); 195static void iwn_calib_timeout(void *); 196static void iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *); 197static void iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *, 198 struct iwn_rx_data *); 199static void iwn_agg_tx_complete(struct iwn_softc *, struct iwn_tx_ring *, 200 int, int, int); 201static void iwn_rx_compressed_ba(struct iwn_softc *, struct iwn_rx_desc *); 202static void iwn5000_rx_calib_results(struct iwn_softc *, 203 struct iwn_rx_desc *); 204static void iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *); 205static void iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *, 206 struct iwn_rx_data *); 207static void iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *, 208 struct iwn_rx_data *); 209static void iwn_adj_ampdu_ptr(struct iwn_softc *, struct iwn_tx_ring *); 210static void iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *, int, int, 211 uint8_t); 212static int iwn_ampdu_check_bitmap(uint64_t, int, int); 213static int iwn_ampdu_index_check(struct iwn_softc *, struct iwn_tx_ring *, 214 uint64_t, int, int); 215static void iwn_ampdu_tx_done(struct iwn_softc *, int, int, int, void *); 216static void iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *); 217static void iwn_notif_intr(struct iwn_softc *); 218static void iwn_wakeup_intr(struct iwn_softc *); 219static void iwn_rftoggle_task(void *, int); 220static void iwn_fatal_intr(struct iwn_softc *); 221static void iwn_intr(void *); 222static void iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t, 223 uint16_t); 224static void iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t, 225 uint16_t); 226#ifdef notyet 227static void iwn5000_reset_sched(struct iwn_softc *, int, int); 228#endif 229static int iwn_tx_data(struct iwn_softc *, struct mbuf *, 230 struct ieee80211_node *); 231static int iwn_tx_data_raw(struct iwn_softc *, struct mbuf *, 232 struct ieee80211_node *, 233 const struct ieee80211_bpf_params *params); 234static int iwn_tx_cmd(struct iwn_softc *, struct mbuf *, 235 struct ieee80211_node *, struct iwn_tx_ring *); 236static void iwn_xmit_task(void *arg0, int pending); 237static int iwn_raw_xmit(struct ieee80211_node *, struct mbuf *, 238 const struct ieee80211_bpf_params *); 239static int iwn_transmit(struct ieee80211com *, struct mbuf *); 240static void iwn_scan_timeout(void *); 241static void iwn_watchdog(void *); 242static int iwn_ioctl(struct ieee80211com *, u_long , void *); 243static void iwn_parent(struct ieee80211com *); 244static int iwn_cmd(struct iwn_softc *, int, const void *, int, int); 245static int iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *, 246 int); 247static int iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *, 248 int); 249static int iwn_set_link_quality(struct iwn_softc *, 250 struct ieee80211_node *); 251static int iwn_add_broadcast_node(struct iwn_softc *, int); 252static int iwn_updateedca(struct ieee80211com *); 253static void iwn_set_promisc(struct iwn_softc *); 254static void iwn_update_promisc(struct ieee80211com *); 255static void iwn_update_mcast(struct ieee80211com *); 256static void iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t); 257static int iwn_set_critical_temp(struct iwn_softc *); 258static int iwn_set_timing(struct iwn_softc *, struct ieee80211_node *); 259static void iwn4965_power_calibration(struct iwn_softc *, int); 260static int iwn4965_set_txpower(struct iwn_softc *, int); 261static int iwn5000_set_txpower(struct iwn_softc *, int); 262static int iwn4965_get_rssi(struct iwn_softc *, struct iwn_rx_stat *); 263static int iwn5000_get_rssi(struct iwn_softc *, struct iwn_rx_stat *); 264static int iwn_get_noise(const struct iwn_rx_general_stats *); 265static int iwn4965_get_temperature(struct iwn_softc *); 266static int iwn5000_get_temperature(struct iwn_softc *); 267static int iwn_init_sensitivity(struct iwn_softc *); 268static void iwn_collect_noise(struct iwn_softc *, 269 const struct iwn_rx_general_stats *); 270static int iwn4965_init_gains(struct iwn_softc *); 271static int iwn5000_init_gains(struct iwn_softc *); 272static int iwn4965_set_gains(struct iwn_softc *); 273static int iwn5000_set_gains(struct iwn_softc *); 274static void iwn_tune_sensitivity(struct iwn_softc *, 275 const struct iwn_rx_stats *); 276static void iwn_save_stats_counters(struct iwn_softc *, 277 const struct iwn_stats *); 278static int iwn_send_sensitivity(struct iwn_softc *); 279static void iwn_check_rx_recovery(struct iwn_softc *, struct iwn_stats *); 280static int iwn_set_pslevel(struct iwn_softc *, int, int, int); 281static int iwn_send_btcoex(struct iwn_softc *); 282static int iwn_send_advanced_btcoex(struct iwn_softc *); 283static int iwn5000_runtime_calib(struct iwn_softc *); 284static int iwn_check_bss_filter(struct iwn_softc *); 285static int iwn4965_rxon_assoc(struct iwn_softc *, int); 286static int iwn5000_rxon_assoc(struct iwn_softc *, int); 287static int iwn_send_rxon(struct iwn_softc *, int, int); 288static int iwn_config(struct iwn_softc *); 289static int iwn_scan(struct iwn_softc *, struct ieee80211vap *, 290 struct ieee80211_scan_state *, struct ieee80211_channel *); 291static int iwn_auth(struct iwn_softc *, struct ieee80211vap *vap); 292static int iwn_run(struct iwn_softc *, struct ieee80211vap *vap); 293static int iwn_ampdu_rx_start(struct ieee80211_node *, 294 struct ieee80211_rx_ampdu *, int, int, int); 295static void iwn_ampdu_rx_stop(struct ieee80211_node *, 296 struct ieee80211_rx_ampdu *); 297static int iwn_addba_request(struct ieee80211_node *, 298 struct ieee80211_tx_ampdu *, int, int, int); 299static int iwn_addba_response(struct ieee80211_node *, 300 struct ieee80211_tx_ampdu *, int, int, int); 301static int iwn_ampdu_tx_start(struct ieee80211com *, 302 struct ieee80211_node *, uint8_t); 303static void iwn_ampdu_tx_stop(struct ieee80211_node *, 304 struct ieee80211_tx_ampdu *); 305static void iwn4965_ampdu_tx_start(struct iwn_softc *, 306 struct ieee80211_node *, int, uint8_t, uint16_t); 307static void iwn4965_ampdu_tx_stop(struct iwn_softc *, int, 308 uint8_t, uint16_t); 309static void iwn5000_ampdu_tx_start(struct iwn_softc *, 310 struct ieee80211_node *, int, uint8_t, uint16_t); 311static void iwn5000_ampdu_tx_stop(struct iwn_softc *, int, 312 uint8_t, uint16_t); 313static int iwn5000_query_calibration(struct iwn_softc *); 314static int iwn5000_send_calibration(struct iwn_softc *); 315static int iwn5000_send_wimax_coex(struct iwn_softc *); 316static int iwn5000_crystal_calib(struct iwn_softc *); 317static int iwn5000_temp_offset_calib(struct iwn_softc *); 318static int iwn5000_temp_offset_calibv2(struct iwn_softc *); 319static int iwn4965_post_alive(struct iwn_softc *); 320static int iwn5000_post_alive(struct iwn_softc *); 321static int iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *, 322 int); 323static int iwn4965_load_firmware(struct iwn_softc *); 324static int iwn5000_load_firmware_section(struct iwn_softc *, uint32_t, 325 const uint8_t *, int); 326static int iwn5000_load_firmware(struct iwn_softc *); 327static int iwn_read_firmware_leg(struct iwn_softc *, 328 struct iwn_fw_info *); 329static int iwn_read_firmware_tlv(struct iwn_softc *, 330 struct iwn_fw_info *, uint16_t); 331static int iwn_read_firmware(struct iwn_softc *); 332static void iwn_unload_firmware(struct iwn_softc *); 333static int iwn_clock_wait(struct iwn_softc *); 334static int iwn_apm_init(struct iwn_softc *); 335static void iwn_apm_stop_master(struct iwn_softc *); 336static void iwn_apm_stop(struct iwn_softc *); 337static int iwn4965_nic_config(struct iwn_softc *); 338static int iwn5000_nic_config(struct iwn_softc *); 339static int iwn_hw_prepare(struct iwn_softc *); 340static int iwn_hw_init(struct iwn_softc *); 341static void iwn_hw_stop(struct iwn_softc *); 342static void iwn_panicked(void *, int); 343static int iwn_init_locked(struct iwn_softc *); 344static int iwn_init(struct iwn_softc *); 345static void iwn_stop_locked(struct iwn_softc *); 346static void iwn_stop(struct iwn_softc *); 347static void iwn_scan_start(struct ieee80211com *); 348static void iwn_scan_end(struct ieee80211com *); 349static void iwn_set_channel(struct ieee80211com *); 350static void iwn_scan_curchan(struct ieee80211_scan_state *, unsigned long); 351static void iwn_scan_mindwell(struct ieee80211_scan_state *); 352#ifdef IWN_DEBUG 353static char *iwn_get_csr_string(int); 354static void iwn_debug_register(struct iwn_softc *); 355#endif 356 357static device_method_t iwn_methods[] = { 358 /* Device interface */ 359 DEVMETHOD(device_probe, iwn_probe), 360 DEVMETHOD(device_attach, iwn_attach), 361 DEVMETHOD(device_detach, iwn_detach), 362 DEVMETHOD(device_shutdown, iwn_shutdown), 363 DEVMETHOD(device_suspend, iwn_suspend), 364 DEVMETHOD(device_resume, iwn_resume), 365 366 DEVMETHOD_END 367}; 368 369static driver_t iwn_driver = { 370 "iwn", 371 iwn_methods, 372 sizeof(struct iwn_softc) 373}; 374static devclass_t iwn_devclass; 375 376DRIVER_MODULE(iwn, pci, iwn_driver, iwn_devclass, NULL, NULL); 377MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, iwn, iwn_ident_table, 378 nitems(iwn_ident_table) - 1); 379MODULE_VERSION(iwn, 1); 380 381MODULE_DEPEND(iwn, firmware, 1, 1, 1); 382MODULE_DEPEND(iwn, pci, 1, 1, 1); 383MODULE_DEPEND(iwn, wlan, 1, 1, 1); 384 385#ifndef __HAIKU__ 386static d_ioctl_t iwn_cdev_ioctl; 387static d_open_t iwn_cdev_open; 388static d_close_t iwn_cdev_close; 389 390static struct cdevsw iwn_cdevsw = { 391 .d_version = D_VERSION, 392 .d_flags = 0, 393 .d_open = iwn_cdev_open, 394 .d_close = iwn_cdev_close, 395 .d_ioctl = iwn_cdev_ioctl, 396 .d_name = "iwn", 397}; 398#endif 399 400static int 401iwn_probe(device_t dev) 402{ 403 const struct iwn_ident *ident; 404 405 for (ident = iwn_ident_table; ident->name != NULL; ident++) { 406 if (pci_get_vendor(dev) == ident->vendor && 407 pci_get_device(dev) == ident->device) { 408 device_set_desc(dev, ident->name); 409 return (BUS_PROBE_DEFAULT); 410 } 411 } 412 return ENXIO; 413} 414 415static int 416iwn_is_3stream_device(struct iwn_softc *sc) 417{ 418 /* XXX for now only 5300, until the 5350 can be tested */ 419 if (sc->hw_type == IWN_HW_REV_TYPE_5300) 420 return (1); 421 return (0); 422} 423 424static int 425iwn_attach(device_t dev) 426{ 427 struct iwn_softc *sc = device_get_softc(dev); 428 struct ieee80211com *ic; 429 int i, error, rid; 430 431 sc->sc_dev = dev; 432 433#ifdef IWN_DEBUG 434 error = resource_int_value(device_get_name(sc->sc_dev), 435 device_get_unit(sc->sc_dev), "debug", &(sc->sc_debug)); 436 if (error != 0) 437 sc->sc_debug = 0; 438#else 439 sc->sc_debug = 0; 440#endif 441 442 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: begin\n",__func__); 443 444 /* 445 * Get the offset of the PCI Express Capability Structure in PCI 446 * Configuration Space. 447 */ 448 error = pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off); 449 if (error != 0) { 450 device_printf(dev, "PCIe capability structure not found!\n"); 451 return error; 452 } 453 454 /* Clear device-specific "PCI retry timeout" register (41h). */ 455 pci_write_config(dev, 0x41, 0, 1); 456 457 /* Enable bus-mastering. */ 458 pci_enable_busmaster(dev); 459 460 rid = PCIR_BAR(0); 461 sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 462 RF_ACTIVE); 463 if (sc->mem == NULL) { 464 device_printf(dev, "can't map mem space\n"); 465 error = ENOMEM; 466 return error; 467 } 468 sc->sc_st = rman_get_bustag(sc->mem); 469 sc->sc_sh = rman_get_bushandle(sc->mem); 470 471 i = 1; 472 rid = 0; 473 if (pci_alloc_msi(dev, &i) == 0) 474 rid = 1; 475 /* Install interrupt handler. */ 476 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | 477 (rid != 0 ? 0 : RF_SHAREABLE)); 478 if (sc->irq == NULL) { 479 device_printf(dev, "can't map interrupt\n"); 480 error = ENOMEM; 481 goto fail; 482 } 483 484 IWN_LOCK_INIT(sc); 485 486 /* Read hardware revision and attach. */ 487 sc->hw_type = (IWN_READ(sc, IWN_HW_REV) >> IWN_HW_REV_TYPE_SHIFT) 488 & IWN_HW_REV_TYPE_MASK; 489 sc->subdevice_id = pci_get_subdevice(dev); 490 491 /* 492 * 4965 versus 5000 and later have different methods. 493 * Let's set those up first. 494 */ 495 if (sc->hw_type == IWN_HW_REV_TYPE_4965) 496 iwn4965_attach(sc, pci_get_device(dev)); 497 else 498 iwn5000_attach(sc, pci_get_device(dev)); 499 500 /* 501 * Next, let's setup the various parameters of each NIC. 502 */ 503 error = iwn_config_specific(sc, pci_get_device(dev)); 504 if (error != 0) { 505 device_printf(dev, "could not attach device, error %d\n", 506 error); 507 goto fail; 508 } 509 510 if ((error = iwn_hw_prepare(sc)) != 0) { 511 device_printf(dev, "hardware not ready, error %d\n", error); 512 goto fail; 513 } 514 515 /* Allocate DMA memory for firmware transfers. */ 516 if ((error = iwn_alloc_fwmem(sc)) != 0) { 517 device_printf(dev, 518 "could not allocate memory for firmware, error %d\n", 519 error); 520 goto fail; 521 } 522 523 /* Allocate "Keep Warm" page. */ 524 if ((error = iwn_alloc_kw(sc)) != 0) { 525 device_printf(dev, 526 "could not allocate keep warm page, error %d\n", error); 527 goto fail; 528 } 529 530 /* Allocate ICT table for 5000 Series. */ 531 if (sc->hw_type != IWN_HW_REV_TYPE_4965 && 532 (error = iwn_alloc_ict(sc)) != 0) { 533 device_printf(dev, "could not allocate ICT table, error %d\n", 534 error); 535 goto fail; 536 } 537 538 /* Allocate TX scheduler "rings". */ 539 if ((error = iwn_alloc_sched(sc)) != 0) { 540 device_printf(dev, 541 "could not allocate TX scheduler rings, error %d\n", error); 542 goto fail; 543 } 544 545 /* Allocate TX rings (16 on 4965AGN, 20 on >=5000). */ 546 for (i = 0; i < sc->ntxqs; i++) { 547 if ((error = iwn_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) { 548 device_printf(dev, 549 "could not allocate TX ring %d, error %d\n", i, 550 error); 551 goto fail; 552 } 553 } 554 555 /* Allocate RX ring. */ 556 if ((error = iwn_alloc_rx_ring(sc, &sc->rxq)) != 0) { 557 device_printf(dev, "could not allocate RX ring, error %d\n", 558 error); 559 goto fail; 560 } 561 562 /* Clear pending interrupts. */ 563 IWN_WRITE(sc, IWN_INT, 0xffffffff); 564 565 ic = &sc->sc_ic; 566 ic->ic_softc = sc; 567 ic->ic_name = device_get_nameunit(dev); 568 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 569 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 570 571 /* Set device capabilities. */ 572 ic->ic_caps = 573 IEEE80211_C_STA /* station mode supported */ 574 | IEEE80211_C_MONITOR /* monitor mode supported */ 575#if 0 576 | IEEE80211_C_BGSCAN /* background scanning */ 577#endif 578 | IEEE80211_C_TXPMGT /* tx power management */ 579 | IEEE80211_C_SHSLOT /* short slot time supported */ 580 | IEEE80211_C_WPA 581 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 582#if 0 583 | IEEE80211_C_IBSS /* ibss/adhoc mode */ 584#endif 585 | IEEE80211_C_WME /* WME */ 586 | IEEE80211_C_PMGT /* Station-side power mgmt */ 587 ; 588 589 /* Read MAC address, channels, etc from EEPROM. */ 590 if ((error = iwn_read_eeprom(sc, ic->ic_macaddr)) != 0) { 591 device_printf(dev, "could not read EEPROM, error %d\n", 592 error); 593 goto fail; 594 } 595 596 /* Count the number of available chains. */ 597 sc->ntxchains = 598 ((sc->txchainmask >> 2) & 1) + 599 ((sc->txchainmask >> 1) & 1) + 600 ((sc->txchainmask >> 0) & 1); 601 sc->nrxchains = 602 ((sc->rxchainmask >> 2) & 1) + 603 ((sc->rxchainmask >> 1) & 1) + 604 ((sc->rxchainmask >> 0) & 1); 605 if (bootverbose) { 606 device_printf(dev, "MIMO %dT%dR, %.4s, address %6D\n", 607 sc->ntxchains, sc->nrxchains, sc->eeprom_domain, 608 ic->ic_macaddr, ":"); 609 } 610 611 if (sc->sc_flags & IWN_FLAG_HAS_11N) { 612 ic->ic_rxstream = sc->nrxchains; 613 ic->ic_txstream = sc->ntxchains; 614 615 /* 616 * Some of the 3 antenna devices (ie, the 4965) only supports 617 * 2x2 operation. So correct the number of streams if 618 * it's not a 3-stream device. 619 */ 620 if (! iwn_is_3stream_device(sc)) { 621 if (ic->ic_rxstream > 2) 622 ic->ic_rxstream = 2; 623 if (ic->ic_txstream > 2) 624 ic->ic_txstream = 2; 625 } 626 627 ic->ic_htcaps = 628 IEEE80211_HTCAP_SMPS_OFF /* SMPS mode disabled */ 629 | IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */ 630 | IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width*/ 631 | IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */ 632#ifdef notyet 633 | IEEE80211_HTCAP_GREENFIELD 634#if IWN_RBUF_SIZE == 8192 635 | IEEE80211_HTCAP_MAXAMSDU_7935 /* max A-MSDU length */ 636#else 637 | IEEE80211_HTCAP_MAXAMSDU_3839 /* max A-MSDU length */ 638#endif 639#endif 640 /* s/w capabilities */ 641 | IEEE80211_HTC_HT /* HT operation */ 642 | IEEE80211_HTC_AMPDU /* tx A-MPDU */ 643#ifdef notyet 644 | IEEE80211_HTC_AMSDU /* tx A-MSDU */ 645#endif 646 ; 647 } 648 649 ieee80211_ifattach(ic); 650 ic->ic_vap_create = iwn_vap_create; 651 ic->ic_ioctl = iwn_ioctl; 652 ic->ic_parent = iwn_parent; 653 ic->ic_vap_delete = iwn_vap_delete; 654 ic->ic_transmit = iwn_transmit; 655 ic->ic_raw_xmit = iwn_raw_xmit; 656 ic->ic_node_alloc = iwn_node_alloc; 657 sc->sc_ampdu_rx_start = ic->ic_ampdu_rx_start; 658 ic->ic_ampdu_rx_start = iwn_ampdu_rx_start; 659 sc->sc_ampdu_rx_stop = ic->ic_ampdu_rx_stop; 660 ic->ic_ampdu_rx_stop = iwn_ampdu_rx_stop; 661 sc->sc_addba_request = ic->ic_addba_request; 662 ic->ic_addba_request = iwn_addba_request; 663 sc->sc_addba_response = ic->ic_addba_response; 664 ic->ic_addba_response = iwn_addba_response; 665 sc->sc_addba_stop = ic->ic_addba_stop; 666 ic->ic_addba_stop = iwn_ampdu_tx_stop; 667 ic->ic_newassoc = iwn_newassoc; 668 ic->ic_wme.wme_update = iwn_updateedca; 669 ic->ic_update_promisc = iwn_update_promisc; 670 ic->ic_update_mcast = iwn_update_mcast; 671 ic->ic_scan_start = iwn_scan_start; 672 ic->ic_scan_end = iwn_scan_end; 673 ic->ic_set_channel = iwn_set_channel; 674 ic->ic_scan_curchan = iwn_scan_curchan; 675 ic->ic_scan_mindwell = iwn_scan_mindwell; 676 ic->ic_getradiocaps = iwn_getradiocaps; 677 ic->ic_setregdomain = iwn_setregdomain; 678 679 iwn_radiotap_attach(sc); 680 681 callout_init_mtx(&sc->calib_to, &sc->sc_mtx, 0); 682 callout_init_mtx(&sc->scan_timeout, &sc->sc_mtx, 0); 683 callout_init_mtx(&sc->watchdog_to, &sc->sc_mtx, 0); 684 TASK_INIT(&sc->sc_rftoggle_task, 0, iwn_rftoggle_task, sc); 685 TASK_INIT(&sc->sc_panic_task, 0, iwn_panicked, sc); 686 TASK_INIT(&sc->sc_xmit_task, 0, iwn_xmit_task, sc); 687 688 mbufq_init(&sc->sc_xmit_queue, 1024); 689 690 sc->sc_tq = taskqueue_create("iwn_taskq", M_WAITOK, 691 taskqueue_thread_enqueue, &sc->sc_tq); 692 error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwn_taskq"); 693 if (error != 0) { 694 device_printf(dev, "can't start threads, error %d\n", error); 695 goto fail; 696 } 697 698 iwn_sysctlattach(sc); 699 700 /* 701 * Hook our interrupt after all initialization is complete. 702 */ 703 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, 704 NULL, iwn_intr, sc, &sc->sc_ih); 705 if (error != 0) { 706 device_printf(dev, "can't establish interrupt, error %d\n", 707 error); 708 goto fail; 709 } 710 711#if 0 712 device_printf(sc->sc_dev, "%s: rx_stats=%d, rx_stats_bt=%d\n", 713 __func__, 714 sizeof(struct iwn_stats), 715 sizeof(struct iwn_stats_bt)); 716#endif 717 718 if (bootverbose) 719 ieee80211_announce(ic); 720 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 721 722#ifndef __HAIKU__ 723 /* Add debug ioctl right at the end */ 724 sc->sc_cdev = make_dev(&iwn_cdevsw, device_get_unit(dev), 725 UID_ROOT, GID_WHEEL, 0600, "%s", device_get_nameunit(dev)); 726 if (sc->sc_cdev == NULL) { 727 device_printf(dev, "failed to create debug character device\n"); 728 } else { 729 sc->sc_cdev->si_drv1 = sc; 730 } 731#else 732 sc->sc_cdev = NULL; 733#endif 734 return 0; 735fail: 736 iwn_detach(dev); 737 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__); 738 return error; 739} 740 741/* 742 * Define specific configuration based on device id and subdevice id 743 * pid : PCI device id 744 */ 745static int 746iwn_config_specific(struct iwn_softc *sc, uint16_t pid) 747{ 748 749 switch (pid) { 750/* 4965 series */ 751 case IWN_DID_4965_1: 752 case IWN_DID_4965_2: 753 case IWN_DID_4965_3: 754 case IWN_DID_4965_4: 755 sc->base_params = &iwn4965_base_params; 756 sc->limits = &iwn4965_sensitivity_limits; 757 sc->fwname = "iwn4965fw"; 758 /* Override chains masks, ROM is known to be broken. */ 759 sc->txchainmask = IWN_ANT_AB; 760 sc->rxchainmask = IWN_ANT_ABC; 761 /* Enable normal btcoex */ 762 sc->sc_flags |= IWN_FLAG_BTCOEX; 763 break; 764/* 1000 Series */ 765 case IWN_DID_1000_1: 766 case IWN_DID_1000_2: 767 switch(sc->subdevice_id) { 768 case IWN_SDID_1000_1: 769 case IWN_SDID_1000_2: 770 case IWN_SDID_1000_3: 771 case IWN_SDID_1000_4: 772 case IWN_SDID_1000_5: 773 case IWN_SDID_1000_6: 774 case IWN_SDID_1000_7: 775 case IWN_SDID_1000_8: 776 case IWN_SDID_1000_9: 777 case IWN_SDID_1000_10: 778 case IWN_SDID_1000_11: 779 case IWN_SDID_1000_12: 780 sc->limits = &iwn1000_sensitivity_limits; 781 sc->base_params = &iwn1000_base_params; 782 sc->fwname = "iwn1000fw"; 783 break; 784 default: 785 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 786 "0x%04x rev %d not supported (subdevice)\n", pid, 787 sc->subdevice_id,sc->hw_type); 788 return ENOTSUP; 789 } 790 break; 791/* 6x00 Series */ 792 case IWN_DID_6x00_2: 793 case IWN_DID_6x00_4: 794 case IWN_DID_6x00_1: 795 case IWN_DID_6x00_3: 796 sc->fwname = "iwn6000fw"; 797 sc->limits = &iwn6000_sensitivity_limits; 798 switch(sc->subdevice_id) { 799 case IWN_SDID_6x00_1: 800 case IWN_SDID_6x00_2: 801 case IWN_SDID_6x00_8: 802 //iwl6000_3agn_cfg 803 sc->base_params = &iwn_6000_base_params; 804 break; 805 case IWN_SDID_6x00_3: 806 case IWN_SDID_6x00_6: 807 case IWN_SDID_6x00_9: 808 ////iwl6000i_2agn 809 case IWN_SDID_6x00_4: 810 case IWN_SDID_6x00_7: 811 case IWN_SDID_6x00_10: 812 //iwl6000i_2abg_cfg 813 case IWN_SDID_6x00_5: 814 //iwl6000i_2bg_cfg 815 sc->base_params = &iwn_6000i_base_params; 816 sc->sc_flags |= IWN_FLAG_INTERNAL_PA; 817 sc->txchainmask = IWN_ANT_BC; 818 sc->rxchainmask = IWN_ANT_BC; 819 break; 820 default: 821 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 822 "0x%04x rev %d not supported (subdevice)\n", pid, 823 sc->subdevice_id,sc->hw_type); 824 return ENOTSUP; 825 } 826 break; 827/* 6x05 Series */ 828 case IWN_DID_6x05_1: 829 case IWN_DID_6x05_2: 830 switch(sc->subdevice_id) { 831 case IWN_SDID_6x05_1: 832 case IWN_SDID_6x05_4: 833 case IWN_SDID_6x05_6: 834 //iwl6005_2agn_cfg 835 case IWN_SDID_6x05_2: 836 case IWN_SDID_6x05_5: 837 case IWN_SDID_6x05_7: 838 //iwl6005_2abg_cfg 839 case IWN_SDID_6x05_3: 840 //iwl6005_2bg_cfg 841 case IWN_SDID_6x05_8: 842 case IWN_SDID_6x05_9: 843 //iwl6005_2agn_sff_cfg 844 case IWN_SDID_6x05_10: 845 //iwl6005_2agn_d_cfg 846 case IWN_SDID_6x05_11: 847 //iwl6005_2agn_mow1_cfg 848 case IWN_SDID_6x05_12: 849 //iwl6005_2agn_mow2_cfg 850 sc->fwname = "iwn6000g2afw"; 851 sc->limits = &iwn6000_sensitivity_limits; 852 sc->base_params = &iwn_6000g2_base_params; 853 break; 854 default: 855 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 856 "0x%04x rev %d not supported (subdevice)\n", pid, 857 sc->subdevice_id,sc->hw_type); 858 return ENOTSUP; 859 } 860 break; 861/* 6x35 Series */ 862 case IWN_DID_6035_1: 863 case IWN_DID_6035_2: 864 switch(sc->subdevice_id) { 865 case IWN_SDID_6035_1: 866 case IWN_SDID_6035_2: 867 case IWN_SDID_6035_3: 868 case IWN_SDID_6035_4: 869 case IWN_SDID_6035_5: 870 sc->fwname = "iwn6000g2bfw"; 871 sc->limits = &iwn6235_sensitivity_limits; 872 sc->base_params = &iwn_6235_base_params; 873 break; 874 default: 875 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 876 "0x%04x rev %d not supported (subdevice)\n", pid, 877 sc->subdevice_id,sc->hw_type); 878 return ENOTSUP; 879 } 880 break; 881/* 6x50 WiFi/WiMax Series */ 882 case IWN_DID_6050_1: 883 case IWN_DID_6050_2: 884 switch(sc->subdevice_id) { 885 case IWN_SDID_6050_1: 886 case IWN_SDID_6050_3: 887 case IWN_SDID_6050_5: 888 //iwl6050_2agn_cfg 889 case IWN_SDID_6050_2: 890 case IWN_SDID_6050_4: 891 case IWN_SDID_6050_6: 892 //iwl6050_2abg_cfg 893 sc->fwname = "iwn6050fw"; 894 sc->txchainmask = IWN_ANT_AB; 895 sc->rxchainmask = IWN_ANT_AB; 896 sc->limits = &iwn6000_sensitivity_limits; 897 sc->base_params = &iwn_6050_base_params; 898 break; 899 default: 900 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 901 "0x%04x rev %d not supported (subdevice)\n", pid, 902 sc->subdevice_id,sc->hw_type); 903 return ENOTSUP; 904 } 905 break; 906/* 6150 WiFi/WiMax Series */ 907 case IWN_DID_6150_1: 908 case IWN_DID_6150_2: 909 switch(sc->subdevice_id) { 910 case IWN_SDID_6150_1: 911 case IWN_SDID_6150_3: 912 case IWN_SDID_6150_5: 913 // iwl6150_bgn_cfg 914 case IWN_SDID_6150_2: 915 case IWN_SDID_6150_4: 916 case IWN_SDID_6150_6: 917 //iwl6150_bg_cfg 918 sc->fwname = "iwn6050fw"; 919 sc->limits = &iwn6000_sensitivity_limits; 920 sc->base_params = &iwn_6150_base_params; 921 break; 922 default: 923 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 924 "0x%04x rev %d not supported (subdevice)\n", pid, 925 sc->subdevice_id,sc->hw_type); 926 return ENOTSUP; 927 } 928 break; 929/* 6030 Series and 1030 Series */ 930 case IWN_DID_x030_1: 931 case IWN_DID_x030_2: 932 case IWN_DID_x030_3: 933 case IWN_DID_x030_4: 934 switch(sc->subdevice_id) { 935 case IWN_SDID_x030_1: 936 case IWN_SDID_x030_3: 937 case IWN_SDID_x030_5: 938 // iwl1030_bgn_cfg 939 case IWN_SDID_x030_2: 940 case IWN_SDID_x030_4: 941 case IWN_SDID_x030_6: 942 //iwl1030_bg_cfg 943 case IWN_SDID_x030_7: 944 case IWN_SDID_x030_10: 945 case IWN_SDID_x030_14: 946 //iwl6030_2agn_cfg 947 case IWN_SDID_x030_8: 948 case IWN_SDID_x030_11: 949 case IWN_SDID_x030_15: 950 // iwl6030_2bgn_cfg 951 case IWN_SDID_x030_9: 952 case IWN_SDID_x030_12: 953 case IWN_SDID_x030_16: 954 // iwl6030_2abg_cfg 955 case IWN_SDID_x030_13: 956 //iwl6030_2bg_cfg 957 sc->fwname = "iwn6000g2bfw"; 958 sc->limits = &iwn6000_sensitivity_limits; 959 sc->base_params = &iwn_6000g2b_base_params; 960 break; 961 default: 962 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 963 "0x%04x rev %d not supported (subdevice)\n", pid, 964 sc->subdevice_id,sc->hw_type); 965 return ENOTSUP; 966 } 967 break; 968/* 130 Series WiFi */ 969/* XXX: This series will need adjustment for rate. 970 * see rx_with_siso_diversity in linux kernel 971 */ 972 case IWN_DID_130_1: 973 case IWN_DID_130_2: 974 switch(sc->subdevice_id) { 975 case IWN_SDID_130_1: 976 case IWN_SDID_130_3: 977 case IWN_SDID_130_5: 978 //iwl130_bgn_cfg 979 case IWN_SDID_130_2: 980 case IWN_SDID_130_4: 981 case IWN_SDID_130_6: 982 //iwl130_bg_cfg 983 sc->fwname = "iwn6000g2bfw"; 984 sc->limits = &iwn6000_sensitivity_limits; 985 sc->base_params = &iwn_6000g2b_base_params; 986 break; 987 default: 988 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 989 "0x%04x rev %d not supported (subdevice)\n", pid, 990 sc->subdevice_id,sc->hw_type); 991 return ENOTSUP; 992 } 993 break; 994/* 100 Series WiFi */ 995 case IWN_DID_100_1: 996 case IWN_DID_100_2: 997 switch(sc->subdevice_id) { 998 case IWN_SDID_100_1: 999 case IWN_SDID_100_2: 1000 case IWN_SDID_100_3: 1001 case IWN_SDID_100_4: 1002 case IWN_SDID_100_5: 1003 case IWN_SDID_100_6: 1004 sc->limits = &iwn1000_sensitivity_limits; 1005 sc->base_params = &iwn1000_base_params; 1006 sc->fwname = "iwn100fw"; 1007 break; 1008 default: 1009 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 1010 "0x%04x rev %d not supported (subdevice)\n", pid, 1011 sc->subdevice_id,sc->hw_type); 1012 return ENOTSUP; 1013 } 1014 break; 1015 1016/* 105 Series */ 1017/* XXX: This series will need adjustment for rate. 1018 * see rx_with_siso_diversity in linux kernel 1019 */ 1020 case IWN_DID_105_1: 1021 case IWN_DID_105_2: 1022 switch(sc->subdevice_id) { 1023 case IWN_SDID_105_1: 1024 case IWN_SDID_105_2: 1025 case IWN_SDID_105_3: 1026 //iwl105_bgn_cfg 1027 case IWN_SDID_105_4: 1028 //iwl105_bgn_d_cfg 1029 sc->limits = &iwn2030_sensitivity_limits; 1030 sc->base_params = &iwn2000_base_params; 1031 sc->fwname = "iwn105fw"; 1032 break; 1033 default: 1034 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 1035 "0x%04x rev %d not supported (subdevice)\n", pid, 1036 sc->subdevice_id,sc->hw_type); 1037 return ENOTSUP; 1038 } 1039 break; 1040 1041/* 135 Series */ 1042/* XXX: This series will need adjustment for rate. 1043 * see rx_with_siso_diversity in linux kernel 1044 */ 1045 case IWN_DID_135_1: 1046 case IWN_DID_135_2: 1047 switch(sc->subdevice_id) { 1048 case IWN_SDID_135_1: 1049 case IWN_SDID_135_2: 1050 case IWN_SDID_135_3: 1051 sc->limits = &iwn2030_sensitivity_limits; 1052 sc->base_params = &iwn2030_base_params; 1053 sc->fwname = "iwn135fw"; 1054 break; 1055 default: 1056 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 1057 "0x%04x rev %d not supported (subdevice)\n", pid, 1058 sc->subdevice_id,sc->hw_type); 1059 return ENOTSUP; 1060 } 1061 break; 1062 1063/* 2x00 Series */ 1064 case IWN_DID_2x00_1: 1065 case IWN_DID_2x00_2: 1066 switch(sc->subdevice_id) { 1067 case IWN_SDID_2x00_1: 1068 case IWN_SDID_2x00_2: 1069 case IWN_SDID_2x00_3: 1070 //iwl2000_2bgn_cfg 1071 case IWN_SDID_2x00_4: 1072 //iwl2000_2bgn_d_cfg 1073 sc->limits = &iwn2030_sensitivity_limits; 1074 sc->base_params = &iwn2000_base_params; 1075 sc->fwname = "iwn2000fw"; 1076 break; 1077 default: 1078 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 1079 "0x%04x rev %d not supported (subdevice) \n", 1080 pid, sc->subdevice_id, sc->hw_type); 1081 return ENOTSUP; 1082 } 1083 break; 1084/* 2x30 Series */ 1085 case IWN_DID_2x30_1: 1086 case IWN_DID_2x30_2: 1087 switch(sc->subdevice_id) { 1088 case IWN_SDID_2x30_1: 1089 case IWN_SDID_2x30_3: 1090 case IWN_SDID_2x30_5: 1091 //iwl100_bgn_cfg 1092 case IWN_SDID_2x30_2: 1093 case IWN_SDID_2x30_4: 1094 case IWN_SDID_2x30_6: 1095 //iwl100_bg_cfg 1096 sc->limits = &iwn2030_sensitivity_limits; 1097 sc->base_params = &iwn2030_base_params; 1098 sc->fwname = "iwn2030fw"; 1099 break; 1100 default: 1101 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 1102 "0x%04x rev %d not supported (subdevice)\n", pid, 1103 sc->subdevice_id,sc->hw_type); 1104 return ENOTSUP; 1105 } 1106 break; 1107/* 5x00 Series */ 1108 case IWN_DID_5x00_1: 1109 case IWN_DID_5x00_2: 1110 case IWN_DID_5x00_3: 1111 case IWN_DID_5x00_4: 1112 sc->limits = &iwn5000_sensitivity_limits; 1113 sc->base_params = &iwn5000_base_params; 1114 sc->fwname = "iwn5000fw"; 1115 switch(sc->subdevice_id) { 1116 case IWN_SDID_5x00_1: 1117 case IWN_SDID_5x00_2: 1118 case IWN_SDID_5x00_3: 1119 case IWN_SDID_5x00_4: 1120 case IWN_SDID_5x00_9: 1121 case IWN_SDID_5x00_10: 1122 case IWN_SDID_5x00_11: 1123 case IWN_SDID_5x00_12: 1124 case IWN_SDID_5x00_17: 1125 case IWN_SDID_5x00_18: 1126 case IWN_SDID_5x00_19: 1127 case IWN_SDID_5x00_20: 1128 //iwl5100_agn_cfg 1129 sc->txchainmask = IWN_ANT_B; 1130 sc->rxchainmask = IWN_ANT_AB; 1131 break; 1132 case IWN_SDID_5x00_5: 1133 case IWN_SDID_5x00_6: 1134 case IWN_SDID_5x00_13: 1135 case IWN_SDID_5x00_14: 1136 case IWN_SDID_5x00_21: 1137 case IWN_SDID_5x00_22: 1138 //iwl5100_bgn_cfg 1139 sc->txchainmask = IWN_ANT_B; 1140 sc->rxchainmask = IWN_ANT_AB; 1141 break; 1142 case IWN_SDID_5x00_7: 1143 case IWN_SDID_5x00_8: 1144 case IWN_SDID_5x00_15: 1145 case IWN_SDID_5x00_16: 1146 case IWN_SDID_5x00_23: 1147 case IWN_SDID_5x00_24: 1148 //iwl5100_abg_cfg 1149 sc->txchainmask = IWN_ANT_B; 1150 sc->rxchainmask = IWN_ANT_AB; 1151 break; 1152 case IWN_SDID_5x00_25: 1153 case IWN_SDID_5x00_26: 1154 case IWN_SDID_5x00_27: 1155 case IWN_SDID_5x00_28: 1156 case IWN_SDID_5x00_29: 1157 case IWN_SDID_5x00_30: 1158 case IWN_SDID_5x00_31: 1159 case IWN_SDID_5x00_32: 1160 case IWN_SDID_5x00_33: 1161 case IWN_SDID_5x00_34: 1162 case IWN_SDID_5x00_35: 1163 case IWN_SDID_5x00_36: 1164 //iwl5300_agn_cfg 1165 sc->txchainmask = IWN_ANT_ABC; 1166 sc->rxchainmask = IWN_ANT_ABC; 1167 break; 1168 default: 1169 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 1170 "0x%04x rev %d not supported (subdevice)\n", pid, 1171 sc->subdevice_id,sc->hw_type); 1172 return ENOTSUP; 1173 } 1174 break; 1175/* 5x50 Series */ 1176 case IWN_DID_5x50_1: 1177 case IWN_DID_5x50_2: 1178 case IWN_DID_5x50_3: 1179 case IWN_DID_5x50_4: 1180 sc->limits = &iwn5000_sensitivity_limits; 1181 sc->base_params = &iwn5000_base_params; 1182 sc->fwname = "iwn5000fw"; 1183 switch(sc->subdevice_id) { 1184 case IWN_SDID_5x50_1: 1185 case IWN_SDID_5x50_2: 1186 case IWN_SDID_5x50_3: 1187 //iwl5350_agn_cfg 1188 sc->limits = &iwn5000_sensitivity_limits; 1189 sc->base_params = &iwn5000_base_params; 1190 sc->fwname = "iwn5000fw"; 1191 break; 1192 case IWN_SDID_5x50_4: 1193 case IWN_SDID_5x50_5: 1194 case IWN_SDID_5x50_8: 1195 case IWN_SDID_5x50_9: 1196 case IWN_SDID_5x50_10: 1197 case IWN_SDID_5x50_11: 1198 //iwl5150_agn_cfg 1199 case IWN_SDID_5x50_6: 1200 case IWN_SDID_5x50_7: 1201 case IWN_SDID_5x50_12: 1202 case IWN_SDID_5x50_13: 1203 //iwl5150_abg_cfg 1204 sc->limits = &iwn5000_sensitivity_limits; 1205 sc->fwname = "iwn5150fw"; 1206 sc->base_params = &iwn_5x50_base_params; 1207 break; 1208 default: 1209 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" 1210 "0x%04x rev %d not supported (subdevice)\n", pid, 1211 sc->subdevice_id,sc->hw_type); 1212 return ENOTSUP; 1213 } 1214 break; 1215 default: 1216 device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id : 0x%04x" 1217 "rev 0x%08x not supported (device)\n", pid, sc->subdevice_id, 1218 sc->hw_type); 1219 return ENOTSUP; 1220 } 1221 return 0; 1222} 1223 1224static void 1225iwn4965_attach(struct iwn_softc *sc, uint16_t pid) 1226{ 1227 struct iwn_ops *ops = &sc->ops; 1228 1229 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1230 1231 ops->load_firmware = iwn4965_load_firmware; 1232 ops->read_eeprom = iwn4965_read_eeprom; 1233 ops->post_alive = iwn4965_post_alive; 1234 ops->nic_config = iwn4965_nic_config; 1235 ops->update_sched = iwn4965_update_sched; 1236 ops->get_temperature = iwn4965_get_temperature; 1237 ops->get_rssi = iwn4965_get_rssi; 1238 ops->set_txpower = iwn4965_set_txpower; 1239 ops->init_gains = iwn4965_init_gains; 1240 ops->set_gains = iwn4965_set_gains; 1241 ops->rxon_assoc = iwn4965_rxon_assoc; 1242 ops->add_node = iwn4965_add_node; 1243 ops->tx_done = iwn4965_tx_done; 1244 ops->ampdu_tx_start = iwn4965_ampdu_tx_start; 1245 ops->ampdu_tx_stop = iwn4965_ampdu_tx_stop; 1246 sc->ntxqs = IWN4965_NTXQUEUES; 1247 sc->firstaggqueue = IWN4965_FIRSTAGGQUEUE; 1248 sc->ndmachnls = IWN4965_NDMACHNLS; 1249 sc->broadcast_id = IWN4965_ID_BROADCAST; 1250 sc->rxonsz = IWN4965_RXONSZ; 1251 sc->schedsz = IWN4965_SCHEDSZ; 1252 sc->fw_text_maxsz = IWN4965_FW_TEXT_MAXSZ; 1253 sc->fw_data_maxsz = IWN4965_FW_DATA_MAXSZ; 1254 sc->fwsz = IWN4965_FWSZ; 1255 sc->sched_txfact_addr = IWN4965_SCHED_TXFACT; 1256 sc->limits = &iwn4965_sensitivity_limits; 1257 sc->fwname = "iwn4965fw"; 1258 /* Override chains masks, ROM is known to be broken. */ 1259 sc->txchainmask = IWN_ANT_AB; 1260 sc->rxchainmask = IWN_ANT_ABC; 1261 /* Enable normal btcoex */ 1262 sc->sc_flags |= IWN_FLAG_BTCOEX; 1263 1264 DPRINTF(sc, IWN_DEBUG_TRACE, "%s: end\n",__func__); 1265} 1266 1267static void 1268iwn5000_attach(struct iwn_softc *sc, uint16_t pid) 1269{ 1270 struct iwn_ops *ops = &sc->ops; 1271 1272 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1273 1274 ops->load_firmware = iwn5000_load_firmware; 1275 ops->read_eeprom = iwn5000_read_eeprom; 1276 ops->post_alive = iwn5000_post_alive; 1277 ops->nic_config = iwn5000_nic_config; 1278 ops->update_sched = iwn5000_update_sched; 1279 ops->get_temperature = iwn5000_get_temperature; 1280 ops->get_rssi = iwn5000_get_rssi; 1281 ops->set_txpower = iwn5000_set_txpower; 1282 ops->init_gains = iwn5000_init_gains; 1283 ops->set_gains = iwn5000_set_gains; 1284 ops->rxon_assoc = iwn5000_rxon_assoc; 1285 ops->add_node = iwn5000_add_node; 1286 ops->tx_done = iwn5000_tx_done; 1287 ops->ampdu_tx_start = iwn5000_ampdu_tx_start; 1288 ops->ampdu_tx_stop = iwn5000_ampdu_tx_stop; 1289 sc->ntxqs = IWN5000_NTXQUEUES; 1290 sc->firstaggqueue = IWN5000_FIRSTAGGQUEUE; 1291 sc->ndmachnls = IWN5000_NDMACHNLS; 1292 sc->broadcast_id = IWN5000_ID_BROADCAST; 1293 sc->rxonsz = IWN5000_RXONSZ; 1294 sc->schedsz = IWN5000_SCHEDSZ; 1295 sc->fw_text_maxsz = IWN5000_FW_TEXT_MAXSZ; 1296 sc->fw_data_maxsz = IWN5000_FW_DATA_MAXSZ; 1297 sc->fwsz = IWN5000_FWSZ; 1298 sc->sched_txfact_addr = IWN5000_SCHED_TXFACT; 1299 sc->reset_noise_gain = IWN5000_PHY_CALIB_RESET_NOISE_GAIN; 1300 sc->noise_gain = IWN5000_PHY_CALIB_NOISE_GAIN; 1301 1302 DPRINTF(sc, IWN_DEBUG_TRACE, "%s: end\n",__func__); 1303} 1304 1305/* 1306 * Attach the interface to 802.11 radiotap. 1307 */ 1308static void 1309iwn_radiotap_attach(struct iwn_softc *sc) 1310{ 1311 1312 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1313 ieee80211_radiotap_attach(&sc->sc_ic, 1314 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), 1315 IWN_TX_RADIOTAP_PRESENT, 1316 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), 1317 IWN_RX_RADIOTAP_PRESENT); 1318 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 1319} 1320 1321static void 1322iwn_sysctlattach(struct iwn_softc *sc) 1323{ 1324#ifdef IWN_DEBUG 1325 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); 1326 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); 1327 1328 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 1329 "debug", CTLFLAG_RW, &sc->sc_debug, sc->sc_debug, 1330 "control debugging printfs"); 1331#endif 1332} 1333 1334static struct ieee80211vap * 1335iwn_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 1336 enum ieee80211_opmode opmode, int flags, 1337 const uint8_t bssid[IEEE80211_ADDR_LEN], 1338 const uint8_t mac[IEEE80211_ADDR_LEN]) 1339{ 1340 struct iwn_softc *sc = ic->ic_softc; 1341 struct iwn_vap *ivp; 1342 struct ieee80211vap *vap; 1343 1344 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ 1345 return NULL; 1346 1347 ivp = malloc(sizeof(struct iwn_vap), M_80211_VAP, M_WAITOK | M_ZERO); 1348 vap = &ivp->iv_vap; 1349 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); 1350 ivp->ctx = IWN_RXON_BSS_CTX; 1351 vap->iv_bmissthreshold = 10; /* override default */ 1352 /* Override with driver methods. */ 1353 ivp->iv_newstate = vap->iv_newstate; 1354 vap->iv_newstate = iwn_newstate; 1355 sc->ivap[IWN_RXON_BSS_CTX] = vap; 1356 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K; 1357 vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_4; /* 4uS */ 1358 1359 ieee80211_ratectl_init(vap); 1360 /* Complete setup. */ 1361 ieee80211_vap_attach(vap, ieee80211_media_change, 1362 ieee80211_media_status, mac); 1363 ic->ic_opmode = opmode; 1364 return vap; 1365} 1366 1367static void 1368iwn_vap_delete(struct ieee80211vap *vap) 1369{ 1370 struct iwn_vap *ivp = IWN_VAP(vap); 1371 1372 ieee80211_ratectl_deinit(vap); 1373 ieee80211_vap_detach(vap); 1374 free(ivp, M_80211_VAP); 1375} 1376 1377static void 1378iwn_xmit_queue_drain(struct iwn_softc *sc) 1379{ 1380 struct mbuf *m; 1381 struct ieee80211_node *ni; 1382 1383 IWN_LOCK_ASSERT(sc); 1384 while ((m = mbufq_dequeue(&sc->sc_xmit_queue)) != NULL) { 1385 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 1386 ieee80211_free_node(ni); 1387 m_freem(m); 1388 } 1389} 1390 1391static int 1392iwn_xmit_queue_enqueue(struct iwn_softc *sc, struct mbuf *m) 1393{ 1394 1395 IWN_LOCK_ASSERT(sc); 1396 return (mbufq_enqueue(&sc->sc_xmit_queue, m)); 1397} 1398 1399static int 1400iwn_detach(device_t dev) 1401{ 1402 struct iwn_softc *sc = device_get_softc(dev); 1403 int qid; 1404 1405 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1406 1407 if (sc->sc_ic.ic_softc != NULL) { 1408 /* Free the mbuf queue and node references */ 1409 IWN_LOCK(sc); 1410 iwn_xmit_queue_drain(sc); 1411 IWN_UNLOCK(sc); 1412 1413 iwn_stop(sc); 1414 1415 taskqueue_drain_all(sc->sc_tq); 1416 taskqueue_free(sc->sc_tq); 1417 1418 callout_drain(&sc->watchdog_to); 1419 callout_drain(&sc->scan_timeout); 1420 callout_drain(&sc->calib_to); 1421 ieee80211_ifdetach(&sc->sc_ic); 1422 } 1423 1424 /* Uninstall interrupt handler. */ 1425 if (sc->irq != NULL) { 1426 bus_teardown_intr(dev, sc->irq, sc->sc_ih); 1427 bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq), 1428 sc->irq); 1429 pci_release_msi(dev); 1430 } 1431 1432 /* Free DMA resources. */ 1433 iwn_free_rx_ring(sc, &sc->rxq); 1434 for (qid = 0; qid < sc->ntxqs; qid++) 1435 iwn_free_tx_ring(sc, &sc->txq[qid]); 1436 iwn_free_sched(sc); 1437 iwn_free_kw(sc); 1438 if (sc->ict != NULL) 1439 iwn_free_ict(sc); 1440 iwn_free_fwmem(sc); 1441 1442 if (sc->mem != NULL) 1443 bus_release_resource(dev, SYS_RES_MEMORY, 1444 rman_get_rid(sc->mem), sc->mem); 1445 1446#ifndef __HAIKU__ 1447 if (sc->sc_cdev) { 1448 destroy_dev(sc->sc_cdev); 1449 sc->sc_cdev = NULL; 1450 } 1451#endif 1452 1453 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n", __func__); 1454 IWN_LOCK_DESTROY(sc); 1455 return 0; 1456} 1457 1458static int 1459iwn_shutdown(device_t dev) 1460{ 1461 struct iwn_softc *sc = device_get_softc(dev); 1462 1463 iwn_stop(sc); 1464 return 0; 1465} 1466 1467static int 1468iwn_suspend(device_t dev) 1469{ 1470 struct iwn_softc *sc = device_get_softc(dev); 1471 1472 ieee80211_suspend_all(&sc->sc_ic); 1473 return 0; 1474} 1475 1476static int 1477iwn_resume(device_t dev) 1478{ 1479 struct iwn_softc *sc = device_get_softc(dev); 1480 1481 /* Clear device-specific "PCI retry timeout" register (41h). */ 1482 pci_write_config(dev, 0x41, 0, 1); 1483 1484 ieee80211_resume_all(&sc->sc_ic); 1485 return 0; 1486} 1487 1488static int 1489iwn_nic_lock(struct iwn_softc *sc) 1490{ 1491 int ntries; 1492 1493 /* Request exclusive access to NIC. */ 1494 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); 1495 1496 /* Spin until we actually get the lock. */ 1497 for (ntries = 0; ntries < 1000; ntries++) { 1498 if ((IWN_READ(sc, IWN_GP_CNTRL) & 1499 (IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) == 1500 IWN_GP_CNTRL_MAC_ACCESS_ENA) 1501 return 0; 1502 DELAY(10); 1503 } 1504 return ETIMEDOUT; 1505} 1506 1507static __inline void 1508iwn_nic_unlock(struct iwn_softc *sc) 1509{ 1510 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); 1511} 1512 1513static __inline uint32_t 1514iwn_prph_read(struct iwn_softc *sc, uint32_t addr) 1515{ 1516 IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr); 1517 IWN_BARRIER_READ_WRITE(sc); 1518 return IWN_READ(sc, IWN_PRPH_RDATA); 1519} 1520 1521static __inline void 1522iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) 1523{ 1524 IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr); 1525 IWN_BARRIER_WRITE(sc); 1526 IWN_WRITE(sc, IWN_PRPH_WDATA, data); 1527} 1528 1529static __inline void 1530iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) 1531{ 1532 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask); 1533} 1534 1535static __inline void 1536iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) 1537{ 1538 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask); 1539} 1540 1541static __inline void 1542iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr, 1543 const uint32_t *data, int count) 1544{ 1545 for (; count > 0; count--, data++, addr += 4) 1546 iwn_prph_write(sc, addr, *data); 1547} 1548 1549static __inline uint32_t 1550iwn_mem_read(struct iwn_softc *sc, uint32_t addr) 1551{ 1552 IWN_WRITE(sc, IWN_MEM_RADDR, addr); 1553 IWN_BARRIER_READ_WRITE(sc); 1554 return IWN_READ(sc, IWN_MEM_RDATA); 1555} 1556 1557static __inline void 1558iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) 1559{ 1560 IWN_WRITE(sc, IWN_MEM_WADDR, addr); 1561 IWN_BARRIER_WRITE(sc); 1562 IWN_WRITE(sc, IWN_MEM_WDATA, data); 1563} 1564 1565static __inline void 1566iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data) 1567{ 1568 uint32_t tmp; 1569 1570 tmp = iwn_mem_read(sc, addr & ~3); 1571 if (addr & 3) 1572 tmp = (tmp & 0x0000ffff) | data << 16; 1573 else 1574 tmp = (tmp & 0xffff0000) | data; 1575 iwn_mem_write(sc, addr & ~3, tmp); 1576} 1577 1578static __inline void 1579iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data, 1580 int count) 1581{ 1582 for (; count > 0; count--, addr += 4) 1583 *data++ = iwn_mem_read(sc, addr); 1584} 1585 1586static __inline void 1587iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val, 1588 int count) 1589{ 1590 for (; count > 0; count--, addr += 4) 1591 iwn_mem_write(sc, addr, val); 1592} 1593 1594static int 1595iwn_eeprom_lock(struct iwn_softc *sc) 1596{ 1597 int i, ntries; 1598 1599 for (i = 0; i < 100; i++) { 1600 /* Request exclusive access to EEPROM. */ 1601 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 1602 IWN_HW_IF_CONFIG_EEPROM_LOCKED); 1603 1604 /* Spin until we actually get the lock. */ 1605 for (ntries = 0; ntries < 100; ntries++) { 1606 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 1607 IWN_HW_IF_CONFIG_EEPROM_LOCKED) 1608 return 0; 1609 DELAY(10); 1610 } 1611 } 1612 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end timeout\n", __func__); 1613 return ETIMEDOUT; 1614} 1615 1616static __inline void 1617iwn_eeprom_unlock(struct iwn_softc *sc) 1618{ 1619 IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED); 1620} 1621 1622/* 1623 * Initialize access by host to One Time Programmable ROM. 1624 * NB: This kind of ROM can be found on 1000 or 6000 Series only. 1625 */ 1626static int 1627iwn_init_otprom(struct iwn_softc *sc) 1628{ 1629 uint16_t prev, base, next; 1630 int count, error; 1631 1632 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1633 1634 /* Wait for clock stabilization before accessing prph. */ 1635 if ((error = iwn_clock_wait(sc)) != 0) 1636 return error; 1637 1638 if ((error = iwn_nic_lock(sc)) != 0) 1639 return error; 1640 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); 1641 DELAY(5); 1642 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); 1643 iwn_nic_unlock(sc); 1644 1645 /* Set auto clock gate disable bit for HW with OTP shadow RAM. */ 1646 if (sc->base_params->shadow_ram_support) { 1647 IWN_SETBITS(sc, IWN_DBG_LINK_PWR_MGMT, 1648 IWN_RESET_LINK_PWR_MGMT_DIS); 1649 } 1650 IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER); 1651 /* Clear ECC status. */ 1652 IWN_SETBITS(sc, IWN_OTP_GP, 1653 IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS); 1654 1655 /* 1656 * Find the block before last block (contains the EEPROM image) 1657 * for HW without OTP shadow RAM. 1658 */ 1659 if (! sc->base_params->shadow_ram_support) { 1660 /* Switch to absolute addressing mode. */ 1661 IWN_CLRBITS(sc, IWN_OTP_GP, IWN_OTP_GP_RELATIVE_ACCESS); 1662 base = prev = 0; 1663 for (count = 0; count < sc->base_params->max_ll_items; 1664 count++) { 1665 error = iwn_read_prom_data(sc, base, &next, 2); 1666 if (error != 0) 1667 return error; 1668 if (next == 0) /* End of linked-list. */ 1669 break; 1670 prev = base; 1671 base = le16toh(next); 1672 } 1673 if (count == 0 || count == sc->base_params->max_ll_items) 1674 return EIO; 1675 /* Skip "next" word. */ 1676 sc->prom_base = prev + 1; 1677 } 1678 1679 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 1680 1681 return 0; 1682} 1683 1684static int 1685iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count) 1686{ 1687 uint8_t *out = data; 1688 uint32_t val, tmp; 1689 int ntries; 1690 1691 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1692 1693 addr += sc->prom_base; 1694 for (; count > 0; count -= 2, addr++) { 1695 IWN_WRITE(sc, IWN_EEPROM, addr << 2); 1696 for (ntries = 0; ntries < 20; ntries++) { 1697 val = IWN_READ(sc, IWN_EEPROM); 1698 if (val & IWN_EEPROM_READ_VALID) 1699 break; 1700 DELAY(5); 1701 } 1702 if (ntries == 20) { 1703 device_printf(sc->sc_dev, 1704 "timeout reading ROM at 0x%x\n", addr); 1705 return ETIMEDOUT; 1706 } 1707 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { 1708 /* OTPROM, check for ECC errors. */ 1709 tmp = IWN_READ(sc, IWN_OTP_GP); 1710 if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) { 1711 device_printf(sc->sc_dev, 1712 "OTPROM ECC error at 0x%x\n", addr); 1713 return EIO; 1714 } 1715 if (tmp & IWN_OTP_GP_ECC_CORR_STTS) { 1716 /* Correctable ECC error, clear bit. */ 1717 IWN_SETBITS(sc, IWN_OTP_GP, 1718 IWN_OTP_GP_ECC_CORR_STTS); 1719 } 1720 } 1721 *out++ = val >> 16; 1722 if (count > 1) 1723 *out++ = val >> 24; 1724 } 1725 1726 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 1727 1728 return 0; 1729} 1730 1731static void 1732iwn_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1733{ 1734 if (error != 0) 1735 return; 1736 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs)); 1737 *(bus_addr_t *)arg = segs[0].ds_addr; 1738} 1739 1740static int 1741iwn_dma_contig_alloc(struct iwn_softc *sc, struct iwn_dma_info *dma, 1742 void **kvap, bus_size_t size, bus_size_t alignment) 1743{ 1744 int error; 1745 1746 dma->tag = NULL; 1747 dma->size = size; 1748 1749 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment, 1750 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 1751 1, size, 0, NULL, NULL, &dma->tag); 1752 if (error != 0) 1753 goto fail; 1754 1755 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, 1756 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map); 1757 if (error != 0) 1758 goto fail; 1759 1760 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, 1761 iwn_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT); 1762 if (error != 0) 1763 goto fail; 1764 1765 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 1766 1767 if (kvap != NULL) 1768 *kvap = dma->vaddr; 1769 1770 return 0; 1771 1772fail: iwn_dma_contig_free(dma); 1773 return error; 1774} 1775 1776static void 1777iwn_dma_contig_free(struct iwn_dma_info *dma) 1778{ 1779 if (dma->vaddr != NULL) { 1780 bus_dmamap_sync(dma->tag, dma->map, 1781 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1782 bus_dmamap_unload(dma->tag, dma->map); 1783 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 1784 dma->vaddr = NULL; 1785 } 1786 if (dma->tag != NULL) { 1787 bus_dma_tag_destroy(dma->tag); 1788 dma->tag = NULL; 1789 } 1790} 1791 1792static int 1793iwn_alloc_sched(struct iwn_softc *sc) 1794{ 1795 /* TX scheduler rings must be aligned on a 1KB boundary. */ 1796 return iwn_dma_contig_alloc(sc, &sc->sched_dma, (void **)&sc->sched, 1797 sc->schedsz, 1024); 1798} 1799 1800static void 1801iwn_free_sched(struct iwn_softc *sc) 1802{ 1803 iwn_dma_contig_free(&sc->sched_dma); 1804} 1805 1806static int 1807iwn_alloc_kw(struct iwn_softc *sc) 1808{ 1809 /* "Keep Warm" page must be aligned on a 4KB boundary. */ 1810 return iwn_dma_contig_alloc(sc, &sc->kw_dma, NULL, 4096, 4096); 1811} 1812 1813static void 1814iwn_free_kw(struct iwn_softc *sc) 1815{ 1816 iwn_dma_contig_free(&sc->kw_dma); 1817} 1818 1819static int 1820iwn_alloc_ict(struct iwn_softc *sc) 1821{ 1822 /* ICT table must be aligned on a 4KB boundary. */ 1823 return iwn_dma_contig_alloc(sc, &sc->ict_dma, (void **)&sc->ict, 1824 IWN_ICT_SIZE, 4096); 1825} 1826 1827static void 1828iwn_free_ict(struct iwn_softc *sc) 1829{ 1830 iwn_dma_contig_free(&sc->ict_dma); 1831} 1832 1833static int 1834iwn_alloc_fwmem(struct iwn_softc *sc) 1835{ 1836 /* Must be aligned on a 16-byte boundary. */ 1837 return iwn_dma_contig_alloc(sc, &sc->fw_dma, NULL, sc->fwsz, 16); 1838} 1839 1840static void 1841iwn_free_fwmem(struct iwn_softc *sc) 1842{ 1843 iwn_dma_contig_free(&sc->fw_dma); 1844} 1845 1846static int 1847iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1848{ 1849 bus_size_t size; 1850 int i, error; 1851 1852 ring->cur = 0; 1853 1854 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 1855 1856 /* Allocate RX descriptors (256-byte aligned). */ 1857 size = IWN_RX_RING_COUNT * sizeof (uint32_t); 1858 error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, 1859 size, 256); 1860 if (error != 0) { 1861 device_printf(sc->sc_dev, 1862 "%s: could not allocate RX ring DMA memory, error %d\n", 1863 __func__, error); 1864 goto fail; 1865 } 1866 1867 /* Allocate RX status area (16-byte aligned). */ 1868 error = iwn_dma_contig_alloc(sc, &ring->stat_dma, (void **)&ring->stat, 1869 sizeof (struct iwn_rx_status), 16); 1870 if (error != 0) { 1871 device_printf(sc->sc_dev, 1872 "%s: could not allocate RX status DMA memory, error %d\n", 1873 __func__, error); 1874 goto fail; 1875 } 1876 1877 /* Create RX buffer DMA tag. */ 1878 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1879 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1880 IWN_RBUF_SIZE, 1, IWN_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat); 1881 if (error != 0) { 1882 device_printf(sc->sc_dev, 1883 "%s: could not create RX buf DMA tag, error %d\n", 1884 __func__, error); 1885 goto fail; 1886 } 1887 1888 /* 1889 * Allocate and map RX buffers. 1890 */ 1891 for (i = 0; i < IWN_RX_RING_COUNT; i++) { 1892 struct iwn_rx_data *data = &ring->data[i]; 1893 bus_addr_t paddr; 1894 1895 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1896 if (error != 0) { 1897 device_printf(sc->sc_dev, 1898 "%s: could not create RX buf DMA map, error %d\n", 1899 __func__, error); 1900 goto fail; 1901 } 1902 1903 data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, 1904 IWN_RBUF_SIZE); 1905 if (data->m == NULL) { 1906 device_printf(sc->sc_dev, 1907 "%s: could not allocate RX mbuf\n", __func__); 1908 error = ENOBUFS; 1909 goto fail; 1910 } 1911 1912 error = bus_dmamap_load(ring->data_dmat, data->map, 1913 mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr, 1914 &paddr, BUS_DMA_NOWAIT); 1915 if (error != 0 && error != EFBIG) { 1916 device_printf(sc->sc_dev, 1917 "%s: can't map mbuf, error %d\n", __func__, 1918 error); 1919 goto fail; 1920 } 1921 1922 bus_dmamap_sync(ring->data_dmat, data->map, 1923 BUS_DMASYNC_PREREAD); 1924 1925 /* Set physical address of RX buffer (256-byte aligned). */ 1926 ring->desc[i] = htole32(paddr >> 8); 1927 } 1928 1929 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1930 BUS_DMASYNC_PREWRITE); 1931 1932 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 1933 1934 return 0; 1935 1936fail: iwn_free_rx_ring(sc, ring); 1937 1938 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__); 1939 1940 return error; 1941} 1942 1943static void 1944iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1945{ 1946 int ntries; 1947 1948 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 1949 1950 if (iwn_nic_lock(sc) == 0) { 1951 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); 1952 for (ntries = 0; ntries < 1000; ntries++) { 1953 if (IWN_READ(sc, IWN_FH_RX_STATUS) & 1954 IWN_FH_RX_STATUS_IDLE) 1955 break; 1956 DELAY(10); 1957 } 1958 iwn_nic_unlock(sc); 1959 } 1960 ring->cur = 0; 1961 sc->last_rx_valid = 0; 1962} 1963 1964static void 1965iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) 1966{ 1967 int i; 1968 1969 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s \n", __func__); 1970 1971 iwn_dma_contig_free(&ring->desc_dma); 1972 iwn_dma_contig_free(&ring->stat_dma); 1973 1974 for (i = 0; i < IWN_RX_RING_COUNT; i++) { 1975 struct iwn_rx_data *data = &ring->data[i]; 1976 1977 if (data->m != NULL) { 1978 bus_dmamap_sync(ring->data_dmat, data->map, 1979 BUS_DMASYNC_POSTREAD); 1980 bus_dmamap_unload(ring->data_dmat, data->map); 1981 m_freem(data->m); 1982 data->m = NULL; 1983 } 1984 if (data->map != NULL) 1985 bus_dmamap_destroy(ring->data_dmat, data->map); 1986 } 1987 if (ring->data_dmat != NULL) { 1988 bus_dma_tag_destroy(ring->data_dmat); 1989 ring->data_dmat = NULL; 1990 } 1991} 1992 1993static int 1994iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid) 1995{ 1996 bus_addr_t paddr; 1997 bus_size_t size; 1998 int i, error; 1999 2000 ring->qid = qid; 2001 ring->queued = 0; 2002 ring->cur = 0; 2003 2004 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2005 2006 /* Allocate TX descriptors (256-byte aligned). */ 2007 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_desc); 2008 error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, 2009 size, 256); 2010 if (error != 0) { 2011 device_printf(sc->sc_dev, 2012 "%s: could not allocate TX ring DMA memory, error %d\n", 2013 __func__, error); 2014 goto fail; 2015 } 2016 2017 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_cmd); 2018 error = iwn_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd, 2019 size, 4); 2020 if (error != 0) { 2021 device_printf(sc->sc_dev, 2022 "%s: could not allocate TX cmd DMA memory, error %d\n", 2023 __func__, error); 2024 goto fail; 2025 } 2026 2027 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 2028 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 2029 IWN_MAX_SCATTER - 1, MCLBYTES, 0, NULL, NULL, &ring->data_dmat); 2030 if (error != 0) { 2031 device_printf(sc->sc_dev, 2032 "%s: could not create TX buf DMA tag, error %d\n", 2033 __func__, error); 2034 goto fail; 2035 } 2036 2037 paddr = ring->cmd_dma.paddr; 2038 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 2039 struct iwn_tx_data *data = &ring->data[i]; 2040 2041 data->cmd_paddr = paddr; 2042 data->scratch_paddr = paddr + 12; 2043 paddr += sizeof (struct iwn_tx_cmd); 2044 2045 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 2046 if (error != 0) { 2047 device_printf(sc->sc_dev, 2048 "%s: could not create TX buf DMA map, error %d\n", 2049 __func__, error); 2050 goto fail; 2051 } 2052 } 2053 2054 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2055 2056 return 0; 2057 2058fail: iwn_free_tx_ring(sc, ring); 2059 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__); 2060 return error; 2061} 2062 2063static void 2064iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) 2065{ 2066 int i; 2067 2068 DPRINTF(sc, IWN_DEBUG_TRACE, "->doing %s \n", __func__); 2069 2070 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 2071 struct iwn_tx_data *data = &ring->data[i]; 2072 2073 if (data->m != NULL) { 2074 bus_dmamap_sync(ring->data_dmat, data->map, 2075 BUS_DMASYNC_POSTWRITE); 2076 bus_dmamap_unload(ring->data_dmat, data->map); 2077 m_freem(data->m); 2078 data->m = NULL; 2079 } 2080 if (data->ni != NULL) { 2081 ieee80211_free_node(data->ni); 2082 data->ni = NULL; 2083 } 2084 data->remapped = 0; 2085 data->long_retries = 0; 2086 } 2087 /* Clear TX descriptors. */ 2088 memset(ring->desc, 0, ring->desc_dma.size); 2089 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 2090 BUS_DMASYNC_PREWRITE); 2091 sc->qfullmsk &= ~(1 << ring->qid); 2092 ring->queued = 0; 2093 ring->cur = 0; 2094} 2095 2096static void 2097iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) 2098{ 2099 int i; 2100 2101 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s \n", __func__); 2102 2103 iwn_dma_contig_free(&ring->desc_dma); 2104 iwn_dma_contig_free(&ring->cmd_dma); 2105 2106 for (i = 0; i < IWN_TX_RING_COUNT; i++) { 2107 struct iwn_tx_data *data = &ring->data[i]; 2108 2109 if (data->m != NULL) { 2110 bus_dmamap_sync(ring->data_dmat, data->map, 2111 BUS_DMASYNC_POSTWRITE); 2112 bus_dmamap_unload(ring->data_dmat, data->map); 2113 m_freem(data->m); 2114 } 2115 if (data->map != NULL) 2116 bus_dmamap_destroy(ring->data_dmat, data->map); 2117 } 2118 if (ring->data_dmat != NULL) { 2119 bus_dma_tag_destroy(ring->data_dmat); 2120 ring->data_dmat = NULL; 2121 } 2122} 2123 2124static void 2125iwn_check_tx_ring(struct iwn_softc *sc, int qid) 2126{ 2127 struct iwn_tx_ring *ring = &sc->txq[qid]; 2128 2129 KASSERT(ring->queued >= 0, ("%s: ring->queued (%d) for queue %d < 0!", 2130 __func__, ring->queued, qid)); 2131 2132 if (qid >= sc->firstaggqueue) { 2133 struct iwn_ops *ops = &sc->ops; 2134 struct ieee80211_tx_ampdu *tap = sc->qid2tap[qid]; 2135 2136 if (ring->queued == 0 && !IEEE80211_AMPDU_RUNNING(tap)) { 2137 uint16_t ssn = tap->txa_start & 0xfff; 2138 uint8_t tid = tap->txa_tid; 2139 int *res = tap->txa_private; 2140 2141 iwn_nic_lock(sc); 2142 ops->ampdu_tx_stop(sc, qid, tid, ssn); 2143 iwn_nic_unlock(sc); 2144 2145 sc->qid2tap[qid] = NULL; 2146 free(res, M_DEVBUF); 2147 } 2148 } 2149 2150 if (ring->queued < IWN_TX_RING_LOMARK) { 2151 sc->qfullmsk &= ~(1 << qid); 2152 2153 if (ring->queued == 0) 2154 sc->sc_tx_timer = 0; 2155 else 2156 sc->sc_tx_timer = 5; 2157 } 2158} 2159 2160static void 2161iwn5000_ict_reset(struct iwn_softc *sc) 2162{ 2163 /* Disable interrupts. */ 2164 IWN_WRITE(sc, IWN_INT_MASK, 0); 2165 2166 /* Reset ICT table. */ 2167 memset(sc->ict, 0, IWN_ICT_SIZE); 2168 sc->ict_cur = 0; 2169 2170 bus_dmamap_sync(sc->ict_dma.tag, sc->ict_dma.map, 2171 BUS_DMASYNC_PREWRITE); 2172 2173 /* Set physical address of ICT table (4KB aligned). */ 2174 DPRINTF(sc, IWN_DEBUG_RESET, "%s: enabling ICT\n", __func__); 2175 IWN_WRITE(sc, IWN_DRAM_INT_TBL, IWN_DRAM_INT_TBL_ENABLE | 2176 IWN_DRAM_INT_TBL_WRAP_CHECK | sc->ict_dma.paddr >> 12); 2177 2178 /* Enable periodic RX interrupt. */ 2179 sc->int_mask |= IWN_INT_RX_PERIODIC; 2180 /* Switch to ICT interrupt mode in driver. */ 2181 sc->sc_flags |= IWN_FLAG_USE_ICT; 2182 2183 /* Re-enable interrupts. */ 2184 IWN_WRITE(sc, IWN_INT, 0xffffffff); 2185 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 2186} 2187 2188static int 2189iwn_read_eeprom(struct iwn_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN]) 2190{ 2191 struct iwn_ops *ops = &sc->ops; 2192 uint16_t val; 2193 int error; 2194 2195 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2196 2197 /* Check whether adapter has an EEPROM or an OTPROM. */ 2198 if (sc->hw_type >= IWN_HW_REV_TYPE_1000 && 2199 (IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP)) 2200 sc->sc_flags |= IWN_FLAG_HAS_OTPROM; 2201 DPRINTF(sc, IWN_DEBUG_RESET, "%s found\n", 2202 (sc->sc_flags & IWN_FLAG_HAS_OTPROM) ? "OTPROM" : "EEPROM"); 2203 2204 /* Adapter has to be powered on for EEPROM access to work. */ 2205 if ((error = iwn_apm_init(sc)) != 0) { 2206 device_printf(sc->sc_dev, 2207 "%s: could not power ON adapter, error %d\n", __func__, 2208 error); 2209 return error; 2210 } 2211 2212 if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) { 2213 device_printf(sc->sc_dev, "%s: bad ROM signature\n", __func__); 2214 return EIO; 2215 } 2216 if ((error = iwn_eeprom_lock(sc)) != 0) { 2217 device_printf(sc->sc_dev, "%s: could not lock ROM, error %d\n", 2218 __func__, error); 2219 return error; 2220 } 2221 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { 2222 if ((error = iwn_init_otprom(sc)) != 0) { 2223 device_printf(sc->sc_dev, 2224 "%s: could not initialize OTPROM, error %d\n", 2225 __func__, error); 2226 return error; 2227 } 2228 } 2229 2230 iwn_read_prom_data(sc, IWN_EEPROM_SKU_CAP, &val, 2); 2231 DPRINTF(sc, IWN_DEBUG_RESET, "SKU capabilities=0x%04x\n", le16toh(val)); 2232 /* Check if HT support is bonded out. */ 2233 if (val & htole16(IWN_EEPROM_SKU_CAP_11N)) 2234 sc->sc_flags |= IWN_FLAG_HAS_11N; 2235 2236 iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2); 2237 sc->rfcfg = le16toh(val); 2238 DPRINTF(sc, IWN_DEBUG_RESET, "radio config=0x%04x\n", sc->rfcfg); 2239 /* Read Tx/Rx chains from ROM unless it's known to be broken. */ 2240 if (sc->txchainmask == 0) 2241 sc->txchainmask = IWN_RFCFG_TXANTMSK(sc->rfcfg); 2242 if (sc->rxchainmask == 0) 2243 sc->rxchainmask = IWN_RFCFG_RXANTMSK(sc->rfcfg); 2244 2245 /* Read MAC address. */ 2246 iwn_read_prom_data(sc, IWN_EEPROM_MAC, macaddr, 6); 2247 2248 /* Read adapter-specific information from EEPROM. */ 2249 ops->read_eeprom(sc); 2250 2251 iwn_apm_stop(sc); /* Power OFF adapter. */ 2252 2253 iwn_eeprom_unlock(sc); 2254 2255 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2256 2257 return 0; 2258} 2259 2260static void 2261iwn4965_read_eeprom(struct iwn_softc *sc) 2262{ 2263 uint32_t addr; 2264 uint16_t val; 2265 int i; 2266 2267 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2268 2269 /* Read regulatory domain (4 ASCII characters). */ 2270 iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4); 2271 2272 /* Read the list of authorized channels (20MHz & 40MHz). */ 2273 for (i = 0; i < IWN_NBANDS - 1; i++) { 2274 addr = iwn4965_regulatory_bands[i]; 2275 iwn_read_eeprom_channels(sc, i, addr); 2276 } 2277 2278 /* Read maximum allowed TX power for 2GHz and 5GHz bands. */ 2279 iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2); 2280 sc->maxpwr2GHz = val & 0xff; 2281 sc->maxpwr5GHz = val >> 8; 2282 /* Check that EEPROM values are within valid range. */ 2283 if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50) 2284 sc->maxpwr5GHz = 38; 2285 if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50) 2286 sc->maxpwr2GHz = 38; 2287 DPRINTF(sc, IWN_DEBUG_RESET, "maxpwr 2GHz=%d 5GHz=%d\n", 2288 sc->maxpwr2GHz, sc->maxpwr5GHz); 2289 2290 /* Read samples for each TX power group. */ 2291 iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands, 2292 sizeof sc->bands); 2293 2294 /* Read voltage at which samples were taken. */ 2295 iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2); 2296 sc->eeprom_voltage = (int16_t)le16toh(val); 2297 DPRINTF(sc, IWN_DEBUG_RESET, "voltage=%d (in 0.3V)\n", 2298 sc->eeprom_voltage); 2299 2300#ifdef IWN_DEBUG 2301 /* Print samples. */ 2302 if (sc->sc_debug & IWN_DEBUG_ANY) { 2303 for (i = 0; i < IWN_NBANDS - 1; i++) 2304 iwn4965_print_power_group(sc, i); 2305 } 2306#endif 2307 2308 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2309} 2310 2311#ifdef IWN_DEBUG 2312static void 2313iwn4965_print_power_group(struct iwn_softc *sc, int i) 2314{ 2315 struct iwn4965_eeprom_band *band = &sc->bands[i]; 2316 struct iwn4965_eeprom_chan_samples *chans = band->chans; 2317 int j, c; 2318 2319 printf("===band %d===\n", i); 2320 printf("chan lo=%d, chan hi=%d\n", band->lo, band->hi); 2321 printf("chan1 num=%d\n", chans[0].num); 2322 for (c = 0; c < 2; c++) { 2323 for (j = 0; j < IWN_NSAMPLES; j++) { 2324 printf("chain %d, sample %d: temp=%d gain=%d " 2325 "power=%d pa_det=%d\n", c, j, 2326 chans[0].samples[c][j].temp, 2327 chans[0].samples[c][j].gain, 2328 chans[0].samples[c][j].power, 2329 chans[0].samples[c][j].pa_det); 2330 } 2331 } 2332 printf("chan2 num=%d\n", chans[1].num); 2333 for (c = 0; c < 2; c++) { 2334 for (j = 0; j < IWN_NSAMPLES; j++) { 2335 printf("chain %d, sample %d: temp=%d gain=%d " 2336 "power=%d pa_det=%d\n", c, j, 2337 chans[1].samples[c][j].temp, 2338 chans[1].samples[c][j].gain, 2339 chans[1].samples[c][j].power, 2340 chans[1].samples[c][j].pa_det); 2341 } 2342 } 2343} 2344#endif 2345 2346static void 2347iwn5000_read_eeprom(struct iwn_softc *sc) 2348{ 2349 struct iwn5000_eeprom_calib_hdr hdr; 2350 int32_t volt; 2351 uint32_t base, addr; 2352 uint16_t val; 2353 int i; 2354 2355 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2356 2357 /* Read regulatory domain (4 ASCII characters). */ 2358 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); 2359 base = le16toh(val); 2360 iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN, 2361 sc->eeprom_domain, 4); 2362 2363 /* Read the list of authorized channels (20MHz & 40MHz). */ 2364 for (i = 0; i < IWN_NBANDS - 1; i++) { 2365 addr = base + sc->base_params->regulatory_bands[i]; 2366 iwn_read_eeprom_channels(sc, i, addr); 2367 } 2368 2369 /* Read enhanced TX power information for 6000 Series. */ 2370 if (sc->base_params->enhanced_TX_power) 2371 iwn_read_eeprom_enhinfo(sc); 2372 2373 iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2); 2374 base = le16toh(val); 2375 iwn_read_prom_data(sc, base, &hdr, sizeof hdr); 2376 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 2377 "%s: calib version=%u pa type=%u voltage=%u\n", __func__, 2378 hdr.version, hdr.pa_type, le16toh(hdr.volt)); 2379 sc->calib_ver = hdr.version; 2380 2381 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2) { 2382 sc->eeprom_voltage = le16toh(hdr.volt); 2383 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2); 2384 sc->eeprom_temp_high=le16toh(val); 2385 iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2); 2386 sc->eeprom_temp = le16toh(val); 2387 } 2388 2389 if (sc->hw_type == IWN_HW_REV_TYPE_5150) { 2390 /* Compute temperature offset. */ 2391 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2); 2392 sc->eeprom_temp = le16toh(val); 2393 iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2); 2394 volt = le16toh(val); 2395 sc->temp_off = sc->eeprom_temp - (volt / -5); 2396 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "temp=%d volt=%d offset=%dK\n", 2397 sc->eeprom_temp, volt, sc->temp_off); 2398 } else { 2399 /* Read crystal calibration. */ 2400 iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL, 2401 &sc->eeprom_crystal, sizeof (uint32_t)); 2402 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "crystal calibration 0x%08x\n", 2403 le32toh(sc->eeprom_crystal)); 2404 } 2405 2406 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2407 2408} 2409 2410/* 2411 * Translate EEPROM flags to net80211. 2412 */ 2413static uint32_t 2414iwn_eeprom_channel_flags(struct iwn_eeprom_chan *channel) 2415{ 2416 uint32_t nflags; 2417 2418 nflags = 0; 2419 if ((channel->flags & IWN_EEPROM_CHAN_ACTIVE) == 0) 2420 nflags |= IEEE80211_CHAN_PASSIVE; 2421 if ((channel->flags & IWN_EEPROM_CHAN_IBSS) == 0) 2422 nflags |= IEEE80211_CHAN_NOADHOC; 2423 if (channel->flags & IWN_EEPROM_CHAN_RADAR) { 2424 nflags |= IEEE80211_CHAN_DFS; 2425 /* XXX apparently IBSS may still be marked */ 2426 nflags |= IEEE80211_CHAN_NOADHOC; 2427 } 2428 2429 return nflags; 2430} 2431 2432static void 2433iwn_read_eeprom_band(struct iwn_softc *sc, int n, int maxchans, int *nchans, 2434 struct ieee80211_channel chans[]) 2435{ 2436 struct iwn_eeprom_chan *channels = sc->eeprom_channels[n]; 2437 const struct iwn_chan_band *band = &iwn_bands[n]; 2438 uint8_t bands[IEEE80211_MODE_BYTES]; 2439 uint8_t chan; 2440 int i, error, nflags; 2441 2442 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2443 2444 memset(bands, 0, sizeof(bands)); 2445 if (n == 0) { 2446 setbit(bands, IEEE80211_MODE_11B); 2447 setbit(bands, IEEE80211_MODE_11G); 2448 if (sc->sc_flags & IWN_FLAG_HAS_11N) 2449 setbit(bands, IEEE80211_MODE_11NG); 2450 } else { 2451 setbit(bands, IEEE80211_MODE_11A); 2452 if (sc->sc_flags & IWN_FLAG_HAS_11N) 2453 setbit(bands, IEEE80211_MODE_11NA); 2454 } 2455 2456 for (i = 0; i < band->nchan; i++) { 2457 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) { 2458 DPRINTF(sc, IWN_DEBUG_RESET, 2459 "skip chan %d flags 0x%x maxpwr %d\n", 2460 band->chan[i], channels[i].flags, 2461 channels[i].maxpwr); 2462 continue; 2463 } 2464 2465 chan = band->chan[i]; 2466 nflags = iwn_eeprom_channel_flags(&channels[i]); 2467 error = ieee80211_add_channel(chans, maxchans, nchans, 2468 chan, 0, channels[i].maxpwr, nflags, bands); 2469 if (error != 0) 2470 break; 2471 2472 /* Save maximum allowed TX power for this channel. */ 2473 /* XXX wrong */ 2474 sc->maxpwr[chan] = channels[i].maxpwr; 2475 2476 DPRINTF(sc, IWN_DEBUG_RESET, 2477 "add chan %d flags 0x%x maxpwr %d\n", chan, 2478 channels[i].flags, channels[i].maxpwr); 2479 } 2480 2481 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2482 2483} 2484 2485static void 2486iwn_read_eeprom_ht40(struct iwn_softc *sc, int n, int maxchans, int *nchans, 2487 struct ieee80211_channel chans[]) 2488{ 2489 struct iwn_eeprom_chan *channels = sc->eeprom_channels[n]; 2490 const struct iwn_chan_band *band = &iwn_bands[n]; 2491 uint8_t chan; 2492 int i, error, nflags; 2493 2494 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s start\n", __func__); 2495 2496 if (!(sc->sc_flags & IWN_FLAG_HAS_11N)) { 2497 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end no 11n\n", __func__); 2498 return; 2499 } 2500 2501 for (i = 0; i < band->nchan; i++) { 2502 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) { 2503 DPRINTF(sc, IWN_DEBUG_RESET, 2504 "skip chan %d flags 0x%x maxpwr %d\n", 2505 band->chan[i], channels[i].flags, 2506 channels[i].maxpwr); 2507 continue; 2508 } 2509 2510 chan = band->chan[i]; 2511 nflags = iwn_eeprom_channel_flags(&channels[i]); 2512 nflags |= (n == 5 ? IEEE80211_CHAN_G : IEEE80211_CHAN_A); 2513 error = ieee80211_add_channel_ht40(chans, maxchans, nchans, 2514 chan, channels[i].maxpwr, nflags); 2515 switch (error) { 2516 case EINVAL: 2517 device_printf(sc->sc_dev, 2518 "%s: no entry for channel %d\n", __func__, chan); 2519 continue; 2520 case ENOENT: 2521 DPRINTF(sc, IWN_DEBUG_RESET, 2522 "%s: skip chan %d, extension channel not found\n", 2523 __func__, chan); 2524 continue; 2525 case ENOBUFS: 2526 device_printf(sc->sc_dev, 2527 "%s: channel table is full!\n", __func__); 2528 break; 2529 case 0: 2530 DPRINTF(sc, IWN_DEBUG_RESET, 2531 "add ht40 chan %d flags 0x%x maxpwr %d\n", 2532 chan, channels[i].flags, channels[i].maxpwr); 2533 /* FALLTHROUGH */ 2534 default: 2535 break; 2536 } 2537 } 2538 2539 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2540 2541} 2542 2543static void 2544iwn_read_eeprom_channels(struct iwn_softc *sc, int n, uint32_t addr) 2545{ 2546 struct ieee80211com *ic = &sc->sc_ic; 2547 2548 iwn_read_prom_data(sc, addr, &sc->eeprom_channels[n], 2549 iwn_bands[n].nchan * sizeof (struct iwn_eeprom_chan)); 2550 2551 if (n < 5) { 2552 iwn_read_eeprom_band(sc, n, IEEE80211_CHAN_MAX, &ic->ic_nchans, 2553 ic->ic_channels); 2554 } else { 2555 iwn_read_eeprom_ht40(sc, n, IEEE80211_CHAN_MAX, &ic->ic_nchans, 2556 ic->ic_channels); 2557 } 2558 ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans); 2559} 2560 2561static struct iwn_eeprom_chan * 2562iwn_find_eeprom_channel(struct iwn_softc *sc, struct ieee80211_channel *c) 2563{ 2564 int band, chan, i, j; 2565 2566 if (IEEE80211_IS_CHAN_HT40(c)) { 2567 band = IEEE80211_IS_CHAN_5GHZ(c) ? 6 : 5; 2568 if (IEEE80211_IS_CHAN_HT40D(c)) 2569 chan = c->ic_extieee; 2570 else 2571 chan = c->ic_ieee; 2572 for (i = 0; i < iwn_bands[band].nchan; i++) { 2573 if (iwn_bands[band].chan[i] == chan) 2574 return &sc->eeprom_channels[band][i]; 2575 } 2576 } else { 2577 for (j = 0; j < 5; j++) { 2578 for (i = 0; i < iwn_bands[j].nchan; i++) { 2579 if (iwn_bands[j].chan[i] == c->ic_ieee && 2580 ((j == 0) ^ IEEE80211_IS_CHAN_A(c)) == 1) 2581 return &sc->eeprom_channels[j][i]; 2582 } 2583 } 2584 } 2585 return NULL; 2586} 2587 2588static void 2589iwn_getradiocaps(struct ieee80211com *ic, 2590 int maxchans, int *nchans, struct ieee80211_channel chans[]) 2591{ 2592 struct iwn_softc *sc = ic->ic_softc; 2593 int i; 2594 2595 /* Parse the list of authorized channels. */ 2596 for (i = 0; i < 5 && *nchans < maxchans; i++) 2597 iwn_read_eeprom_band(sc, i, maxchans, nchans, chans); 2598 for (i = 5; i < IWN_NBANDS - 1 && *nchans < maxchans; i++) 2599 iwn_read_eeprom_ht40(sc, i, maxchans, nchans, chans); 2600} 2601 2602/* 2603 * Enforce flags read from EEPROM. 2604 */ 2605static int 2606iwn_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd, 2607 int nchan, struct ieee80211_channel chans[]) 2608{ 2609 struct iwn_softc *sc = ic->ic_softc; 2610 int i; 2611 2612 for (i = 0; i < nchan; i++) { 2613 struct ieee80211_channel *c = &chans[i]; 2614 struct iwn_eeprom_chan *channel; 2615 2616 channel = iwn_find_eeprom_channel(sc, c); 2617 if (channel == NULL) { 2618 ic_printf(ic, "%s: invalid channel %u freq %u/0x%x\n", 2619 __func__, c->ic_ieee, c->ic_freq, c->ic_flags); 2620 return EINVAL; 2621 } 2622 c->ic_flags |= iwn_eeprom_channel_flags(channel); 2623 } 2624 2625 return 0; 2626} 2627 2628static void 2629iwn_read_eeprom_enhinfo(struct iwn_softc *sc) 2630{ 2631 struct iwn_eeprom_enhinfo enhinfo[35]; 2632 struct ieee80211com *ic = &sc->sc_ic; 2633 struct ieee80211_channel *c; 2634 uint16_t val, base; 2635 int8_t maxpwr; 2636 uint8_t flags; 2637 int i, j; 2638 2639 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2640 2641 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); 2642 base = le16toh(val); 2643 iwn_read_prom_data(sc, base + IWN6000_EEPROM_ENHINFO, 2644 enhinfo, sizeof enhinfo); 2645 2646 for (i = 0; i < nitems(enhinfo); i++) { 2647 flags = enhinfo[i].flags; 2648 if (!(flags & IWN_ENHINFO_VALID)) 2649 continue; /* Skip invalid entries. */ 2650 2651 maxpwr = 0; 2652 if (sc->txchainmask & IWN_ANT_A) 2653 maxpwr = MAX(maxpwr, enhinfo[i].chain[0]); 2654 if (sc->txchainmask & IWN_ANT_B) 2655 maxpwr = MAX(maxpwr, enhinfo[i].chain[1]); 2656 if (sc->txchainmask & IWN_ANT_C) 2657 maxpwr = MAX(maxpwr, enhinfo[i].chain[2]); 2658 if (sc->ntxchains == 2) 2659 maxpwr = MAX(maxpwr, enhinfo[i].mimo2); 2660 else if (sc->ntxchains == 3) 2661 maxpwr = MAX(maxpwr, enhinfo[i].mimo3); 2662 2663 for (j = 0; j < ic->ic_nchans; j++) { 2664 c = &ic->ic_channels[j]; 2665 if ((flags & IWN_ENHINFO_5GHZ)) { 2666 if (!IEEE80211_IS_CHAN_A(c)) 2667 continue; 2668 } else if ((flags & IWN_ENHINFO_OFDM)) { 2669 if (!IEEE80211_IS_CHAN_G(c)) 2670 continue; 2671 } else if (!IEEE80211_IS_CHAN_B(c)) 2672 continue; 2673 if ((flags & IWN_ENHINFO_HT40)) { 2674 if (!IEEE80211_IS_CHAN_HT40(c)) 2675 continue; 2676 } else { 2677 if (IEEE80211_IS_CHAN_HT40(c)) 2678 continue; 2679 } 2680 if (enhinfo[i].chan != 0 && 2681 enhinfo[i].chan != c->ic_ieee) 2682 continue; 2683 2684 DPRINTF(sc, IWN_DEBUG_RESET, 2685 "channel %d(%x), maxpwr %d\n", c->ic_ieee, 2686 c->ic_flags, maxpwr / 2); 2687 c->ic_maxregpower = maxpwr / 2; 2688 c->ic_maxpower = maxpwr; 2689 } 2690 } 2691 2692 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); 2693 2694} 2695 2696static struct ieee80211_node * 2697iwn_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 2698{ 2699 struct iwn_node *wn; 2700 2701 wn = malloc(sizeof (struct iwn_node), M_80211_NODE, M_NOWAIT | M_ZERO); 2702 if (wn == NULL) 2703 return (NULL); 2704 2705 wn->id = IWN_ID_UNDEFINED; 2706 2707 return (&wn->ni); 2708} 2709 2710static __inline int 2711rate2plcp(int rate) 2712{ 2713 switch (rate & 0xff) { 2714 case 12: return 0xd; 2715 case 18: return 0xf; 2716 case 24: return 0x5; 2717 case 36: return 0x7; 2718 case 48: return 0x9; 2719 case 72: return 0xb; 2720 case 96: return 0x1; 2721 case 108: return 0x3; 2722 case 2: return 10; 2723 case 4: return 20; 2724 case 11: return 55; 2725 case 22: return 110; 2726 } 2727 return 0; 2728} 2729 2730static __inline uint8_t 2731plcp2rate(const uint8_t rate_plcp) 2732{ 2733 switch (rate_plcp) { 2734 case 0xd: return 12; 2735 case 0xf: return 18; 2736 case 0x5: return 24; 2737 case 0x7: return 36; 2738 case 0x9: return 48; 2739 case 0xb: return 72; 2740 case 0x1: return 96; 2741 case 0x3: return 108; 2742 case 10: return 2; 2743 case 20: return 4; 2744 case 55: return 11; 2745 case 110: return 22; 2746 default: return 0; 2747 } 2748} 2749 2750static int 2751iwn_get_1stream_tx_antmask(struct iwn_softc *sc) 2752{ 2753 2754 return IWN_LSB(sc->txchainmask); 2755} 2756 2757static int 2758iwn_get_2stream_tx_antmask(struct iwn_softc *sc) 2759{ 2760 int tx; 2761 2762 /* 2763 * The '2 stream' setup is a bit .. odd. 2764 * 2765 * For NICs that support only 1 antenna, default to IWN_ANT_AB or 2766 * the firmware panics (eg Intel 5100.) 2767 * 2768 * For NICs that support two antennas, we use ANT_AB. 2769 * 2770 * For NICs that support three antennas, we use the two that 2771 * wasn't the default one. 2772 * 2773 * XXX TODO: if bluetooth (full concurrent) is enabled, restrict 2774 * this to only one antenna. 2775 */ 2776 2777 /* Default - transmit on the other antennas */ 2778 tx = (sc->txchainmask & ~IWN_LSB(sc->txchainmask)); 2779 2780 /* Now, if it's zero, set it to IWN_ANT_AB, so to not panic firmware */ 2781 if (tx == 0) 2782 tx = IWN_ANT_AB; 2783 2784 /* 2785 * If the NIC is a two-stream TX NIC, configure the TX mask to 2786 * the default chainmask 2787 */ 2788 else if (sc->ntxchains == 2) 2789 tx = sc->txchainmask; 2790 2791 return (tx); 2792} 2793 2794 2795 2796/* 2797 * Calculate the required PLCP value from the given rate, 2798 * to the given node. 2799 * 2800 * This will take the node configuration (eg 11n, rate table 2801 * setup, etc) into consideration. 2802 */ 2803static uint32_t 2804iwn_rate_to_plcp(struct iwn_softc *sc, struct ieee80211_node *ni, 2805 uint8_t rate) 2806{ 2807 struct ieee80211com *ic = ni->ni_ic; 2808 uint32_t plcp = 0; 2809 int ridx; 2810 2811 /* 2812 * If it's an MCS rate, let's set the plcp correctly 2813 * and set the relevant flags based on the node config. 2814 */ 2815 if (rate & IEEE80211_RATE_MCS) { 2816 /* 2817 * Set the initial PLCP value to be between 0->31 for 2818 * MCS 0 -> MCS 31, then set the "I'm an MCS rate!" 2819 * flag. 2820 */ 2821 plcp = IEEE80211_RV(rate) | IWN_RFLAG_MCS; 2822 2823 /* 2824 * XXX the following should only occur if both 2825 * the local configuration _and_ the remote node 2826 * advertise these capabilities. Thus this code 2827 * may need fixing! 2828 */ 2829 2830 /* 2831 * Set the channel width and guard interval. 2832 */ 2833 if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) { 2834 plcp |= IWN_RFLAG_HT40; 2835 if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40) 2836 plcp |= IWN_RFLAG_SGI; 2837 } else if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20) { 2838 plcp |= IWN_RFLAG_SGI; 2839 } 2840 2841 /* 2842 * Ensure the selected rate matches the link quality 2843 * table entries being used. 2844 */ 2845 if (rate > 0x8f) 2846 plcp |= IWN_RFLAG_ANT(sc->txchainmask); 2847 else if (rate > 0x87) 2848 plcp |= IWN_RFLAG_ANT(iwn_get_2stream_tx_antmask(sc)); 2849 else 2850 plcp |= IWN_RFLAG_ANT(iwn_get_1stream_tx_antmask(sc)); 2851 } else { 2852 /* 2853 * Set the initial PLCP - fine for both 2854 * OFDM and CCK rates. 2855 */ 2856 plcp = rate2plcp(rate); 2857 2858 /* Set CCK flag if it's CCK */ 2859 2860 /* XXX It would be nice to have a method 2861 * to map the ridx -> phy table entry 2862 * so we could just query that, rather than 2863 * this hack to check against IWN_RIDX_OFDM6. 2864 */ 2865 ridx = ieee80211_legacy_rate_lookup(ic->ic_rt, 2866 rate & IEEE80211_RATE_VAL); 2867 if (ridx < IWN_RIDX_OFDM6 && 2868 IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 2869 plcp |= IWN_RFLAG_CCK; 2870 2871 /* Set antenna configuration */ 2872 /* XXX TODO: is this the right antenna to use for legacy? */ 2873 plcp |= IWN_RFLAG_ANT(iwn_get_1stream_tx_antmask(sc)); 2874 } 2875 2876 DPRINTF(sc, IWN_DEBUG_TXRATE, "%s: rate=0x%02x, plcp=0x%08x\n", 2877 __func__, 2878 rate, 2879 plcp); 2880 2881 return (htole32(plcp)); 2882} 2883 2884static void 2885iwn_newassoc(struct ieee80211_node *ni, int isnew) 2886{ 2887 /* Doesn't do anything at the moment */ 2888} 2889 2890static int 2891iwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 2892{ 2893 struct iwn_vap *ivp = IWN_VAP(vap); 2894 struct ieee80211com *ic = vap->iv_ic; 2895 struct iwn_softc *sc = ic->ic_softc; 2896 int error = 0; 2897 2898 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 2899 2900 DPRINTF(sc, IWN_DEBUG_STATE, "%s: %s -> %s\n", __func__, 2901 ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate]); 2902 2903 IEEE80211_UNLOCK(ic); 2904 IWN_LOCK(sc); 2905 callout_stop(&sc->calib_to); 2906 2907 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 2908 2909 switch (nstate) { 2910 case IEEE80211_S_ASSOC: 2911 if (vap->iv_state != IEEE80211_S_RUN) 2912 break; 2913 /* FALLTHROUGH */ 2914 case IEEE80211_S_AUTH: 2915 if (vap->iv_state == IEEE80211_S_AUTH) 2916 break; 2917 2918 /* 2919 * !AUTH -> AUTH transition requires state reset to handle 2920 * reassociations correctly. 2921 */ 2922 sc->rxon->associd = 0; 2923 sc->rxon->filter &= ~htole32(IWN_FILTER_BSS); 2924 sc->calib.state = IWN_CALIB_STATE_INIT; 2925 2926 /* Wait until we hear a beacon before we transmit */ 2927 if (IEEE80211_IS_CHAN_PASSIVE(ic->ic_curchan)) 2928 sc->sc_beacon_wait = 1; 2929 2930 if ((error = iwn_auth(sc, vap)) != 0) { 2931 device_printf(sc->sc_dev, 2932 "%s: could not move to auth state\n", __func__); 2933 } 2934 break; 2935 2936 case IEEE80211_S_RUN: 2937 /* 2938 * RUN -> RUN transition; Just restart the timers. 2939 */ 2940 if (vap->iv_state == IEEE80211_S_RUN) { 2941 sc->calib_cnt = 0; 2942 break; 2943 } 2944 2945 /* Wait until we hear a beacon before we transmit */ 2946 if (IEEE80211_IS_CHAN_PASSIVE(ic->ic_curchan)) 2947 sc->sc_beacon_wait = 1; 2948 2949 /* 2950 * !RUN -> RUN requires setting the association id 2951 * which is done with a firmware cmd. We also defer 2952 * starting the timers until that work is done. 2953 */ 2954 if ((error = iwn_run(sc, vap)) != 0) { 2955 device_printf(sc->sc_dev, 2956 "%s: could not move to run state\n", __func__); 2957 } 2958 break; 2959 2960 case IEEE80211_S_INIT: 2961 sc->calib.state = IWN_CALIB_STATE_INIT; 2962 /* 2963 * Purge the xmit queue so we don't have old frames 2964 * during a new association attempt. 2965 */ 2966 sc->sc_beacon_wait = 0; 2967 iwn_xmit_queue_drain(sc); 2968 break; 2969 2970 default: 2971 break; 2972 } 2973 IWN_UNLOCK(sc); 2974 IEEE80211_LOCK(ic); 2975 if (error != 0){ 2976 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__); 2977 return error; 2978 } 2979 2980 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 2981 2982 return ivp->iv_newstate(vap, nstate, arg); 2983} 2984 2985static void 2986iwn_calib_timeout(void *arg) 2987{ 2988 struct iwn_softc *sc = arg; 2989 2990 IWN_LOCK_ASSERT(sc); 2991 2992 /* Force automatic TX power calibration every 60 secs. */ 2993 if (++sc->calib_cnt >= 120) { 2994 uint32_t flags = 0; 2995 2996 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s\n", 2997 "sending request for statistics"); 2998 (void)iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, 2999 sizeof flags, 1); 3000 sc->calib_cnt = 0; 3001 } 3002 callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout, 3003 sc); 3004} 3005 3006/* 3007 * Process an RX_PHY firmware notification. This is usually immediately 3008 * followed by an MPDU_RX_DONE notification. 3009 */ 3010static void 3011iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc) 3012{ 3013 struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1); 3014 3015 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received PHY stats\n", __func__); 3016 3017 /* Save RX statistics, they will be used on MPDU_RX_DONE. */ 3018 memcpy(&sc->last_rx_stat, stat, sizeof (*stat)); 3019 sc->last_rx_valid = 1; 3020} 3021 3022/* 3023 * Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification. 3024 * Each MPDU_RX_DONE notification must be preceded by an RX_PHY one. 3025 */ 3026static void 3027iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 3028 struct iwn_rx_data *data) 3029{ 3030 struct iwn_ops *ops = &sc->ops; 3031 struct ieee80211com *ic = &sc->sc_ic; 3032 struct iwn_rx_ring *ring = &sc->rxq; 3033 struct ieee80211_frame_min *wh; 3034 struct ieee80211_node *ni; 3035 struct mbuf *m, *m1; 3036 struct iwn_rx_stat *stat; 3037 caddr_t head; 3038 bus_addr_t paddr; 3039 uint32_t flags; 3040 int error, len, rssi, nf; 3041 3042 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 3043 3044 if (desc->type == IWN_MPDU_RX_DONE) { 3045 /* Check for prior RX_PHY notification. */ 3046 if (!sc->last_rx_valid) { 3047 DPRINTF(sc, IWN_DEBUG_ANY, 3048 "%s: missing RX_PHY\n", __func__); 3049 return; 3050 } 3051 stat = &sc->last_rx_stat; 3052 } else 3053 stat = (struct iwn_rx_stat *)(desc + 1); 3054 3055 if (stat->cfg_phy_len > IWN_STAT_MAXLEN) { 3056 device_printf(sc->sc_dev, 3057 "%s: invalid RX statistic header, len %d\n", __func__, 3058 stat->cfg_phy_len); 3059 return; 3060 } 3061 if (desc->type == IWN_MPDU_RX_DONE) { 3062 struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1); 3063 head = (caddr_t)(mpdu + 1); 3064 len = le16toh(mpdu->len); 3065 } else { 3066 head = (caddr_t)(stat + 1) + stat->cfg_phy_len; 3067 len = le16toh(stat->len); 3068 } 3069 3070 flags = le32toh(*(uint32_t *)(head + len)); 3071 3072 /* Discard frames with a bad FCS early. */ 3073 if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) { 3074 DPRINTF(sc, IWN_DEBUG_RECV, "%s: RX flags error %x\n", 3075 __func__, flags); 3076 counter_u64_add(ic->ic_ierrors, 1); 3077 return; 3078 } 3079 /* Discard frames that are too short. */ 3080 if (len < sizeof (struct ieee80211_frame_ack)) { 3081 DPRINTF(sc, IWN_DEBUG_RECV, "%s: frame too short: %d\n", 3082 __func__, len); 3083 counter_u64_add(ic->ic_ierrors, 1); 3084 return; 3085 } 3086 3087 m1 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWN_RBUF_SIZE); 3088 if (m1 == NULL) { 3089 DPRINTF(sc, IWN_DEBUG_ANY, "%s: no mbuf to restock ring\n", 3090 __func__); 3091 counter_u64_add(ic->ic_ierrors, 1); 3092 return; 3093 } 3094 bus_dmamap_unload(ring->data_dmat, data->map); 3095 3096 error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m1, void *), 3097 IWN_RBUF_SIZE, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 3098 if (error != 0 && error != EFBIG) { 3099 device_printf(sc->sc_dev, 3100 "%s: bus_dmamap_load failed, error %d\n", __func__, error); 3101 m_freem(m1); 3102 3103 /* Try to reload the old mbuf. */ 3104 error = bus_dmamap_load(ring->data_dmat, data->map, 3105 mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr, 3106 &paddr, BUS_DMA_NOWAIT); 3107 if (error != 0 && error != EFBIG) { 3108 panic("%s: could not load old RX mbuf", __func__); 3109 } 3110 bus_dmamap_sync(ring->data_dmat, data->map, 3111 BUS_DMASYNC_PREREAD); 3112 /* Physical address may have changed. */ 3113 ring->desc[ring->cur] = htole32(paddr >> 8); 3114 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 3115 BUS_DMASYNC_PREWRITE); 3116 counter_u64_add(ic->ic_ierrors, 1); 3117 return; 3118 } 3119 3120 bus_dmamap_sync(ring->data_dmat, data->map, 3121 BUS_DMASYNC_PREREAD); 3122 3123 m = data->m; 3124 data->m = m1; 3125 /* Update RX descriptor. */ 3126 ring->desc[ring->cur] = htole32(paddr >> 8); 3127 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 3128 BUS_DMASYNC_PREWRITE); 3129 3130 /* Finalize mbuf. */ 3131 m->m_data = head; 3132 m->m_pkthdr.len = m->m_len = len; 3133 3134 /* Grab a reference to the source node. */ 3135 wh = mtod(m, struct ieee80211_frame_min *); 3136 if (len >= sizeof(struct ieee80211_frame_min)) 3137 ni = ieee80211_find_rxnode(ic, wh); 3138 else 3139 ni = NULL; 3140 nf = (ni != NULL && ni->ni_vap->iv_state == IEEE80211_S_RUN && 3141 (ic->ic_flags & IEEE80211_F_SCAN) == 0) ? sc->noise : -95; 3142 3143 rssi = ops->get_rssi(sc, stat); 3144 3145 if (ieee80211_radiotap_active(ic)) { 3146 struct iwn_rx_radiotap_header *tap = &sc->sc_rxtap; 3147 uint32_t rate = le32toh(stat->rate); 3148 3149 tap->wr_flags = 0; 3150 if (stat->flags & htole16(IWN_STAT_FLAG_SHPREAMBLE)) 3151 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 3152 tap->wr_dbm_antsignal = (int8_t)rssi; 3153 tap->wr_dbm_antnoise = (int8_t)nf; 3154 tap->wr_tsft = stat->tstamp; 3155 if (rate & IWN_RFLAG_MCS) { 3156 tap->wr_rate = rate & IWN_RFLAG_RATE_MCS; 3157 tap->wr_rate |= IEEE80211_RATE_MCS; 3158 } else 3159 tap->wr_rate = plcp2rate(rate & IWN_RFLAG_RATE); 3160 } 3161 3162 /* 3163 * If it's a beacon and we're waiting, then do the 3164 * wakeup. This should unblock raw_xmit/start. 3165 */ 3166 if (sc->sc_beacon_wait) { 3167 uint8_t type, subtype; 3168 /* NB: Re-assign wh */ 3169 wh = mtod(m, struct ieee80211_frame_min *); 3170 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 3171 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 3172 /* 3173 * This assumes at this point we've received our own 3174 * beacon. 3175 */ 3176 DPRINTF(sc, IWN_DEBUG_TRACE, 3177 "%s: beacon_wait, type=%d, subtype=%d\n", 3178 __func__, type, subtype); 3179 if (type == IEEE80211_FC0_TYPE_MGT && 3180 subtype == IEEE80211_FC0_SUBTYPE_BEACON) { 3181 DPRINTF(sc, IWN_DEBUG_TRACE | IWN_DEBUG_XMIT, 3182 "%s: waking things up\n", __func__); 3183 /* queue taskqueue to transmit! */ 3184 taskqueue_enqueue(sc->sc_tq, &sc->sc_xmit_task); 3185 } 3186 } 3187 3188 IWN_UNLOCK(sc); 3189 3190 /* Send the frame to the 802.11 layer. */ 3191 if (ni != NULL) { 3192 if (ni->ni_flags & IEEE80211_NODE_HT) 3193 m->m_flags |= M_AMPDU; 3194 (void)ieee80211_input(ni, m, rssi - nf, nf); 3195 /* Node is no longer needed. */ 3196 ieee80211_free_node(ni); 3197 } else 3198 (void)ieee80211_input_all(ic, m, rssi - nf, nf); 3199 3200 IWN_LOCK(sc); 3201 3202 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 3203 3204} 3205 3206static void 3207iwn_agg_tx_complete(struct iwn_softc *sc, struct iwn_tx_ring *ring, int tid, 3208 int idx, int success) 3209{ 3210 struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs; 3211 struct iwn_tx_data *data = &ring->data[idx]; 3212 struct iwn_node *wn; 3213 struct mbuf *m; 3214 struct ieee80211_node *ni; 3215 3216 KASSERT(data->ni != NULL, ("idx %d: no node", idx)); 3217 KASSERT(data->m != NULL, ("idx %d: no mbuf", idx)); 3218 3219 /* Unmap and free mbuf. */ 3220 bus_dmamap_sync(ring->data_dmat, data->map, 3221 BUS_DMASYNC_POSTWRITE); 3222 bus_dmamap_unload(ring->data_dmat, data->map); 3223 m = data->m, data->m = NULL; 3224 ni = data->ni, data->ni = NULL; 3225 wn = (void *)ni; 3226 3227#if 0 3228 /* XXX causes significant performance degradation. */ 3229 txs->flags = IEEE80211_RATECTL_STATUS_SHORT_RETRY | 3230 IEEE80211_RATECTL_STATUS_LONG_RETRY; 3231 txs->long_retries = data->long_retries - 1; 3232#else 3233 txs->flags = IEEE80211_RATECTL_STATUS_SHORT_RETRY; 3234#endif 3235 txs->short_retries = wn->agg[tid].short_retries; 3236 if (success) 3237 txs->status = IEEE80211_RATECTL_TX_SUCCESS; 3238 else 3239 txs->status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED; 3240 3241 wn->agg[tid].short_retries = 0; 3242 data->long_retries = 0; 3243 3244 DPRINTF(sc, IWN_DEBUG_AMPDU, "%s: freeing m %p ni %p idx %d qid %d\n", 3245 __func__, m, ni, idx, ring->qid); 3246 ieee80211_ratectl_tx_complete(ni, txs); 3247 ieee80211_tx_complete(ni, m, !success); 3248} 3249 3250/* Process an incoming Compressed BlockAck. */ 3251static void 3252iwn_rx_compressed_ba(struct iwn_softc *sc, struct iwn_rx_desc *desc) 3253{ 3254 struct iwn_tx_ring *ring; 3255 struct iwn_tx_data *data; 3256 struct iwn_node *wn; 3257 struct iwn_compressed_ba *ba = (struct iwn_compressed_ba *)(desc + 1); 3258 struct ieee80211_tx_ampdu *tap; 3259 uint64_t bitmap; 3260 uint8_t tid; 3261 int i, qid, shift; 3262 int tx_ok = 0; 3263 3264 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 3265 3266 qid = le16toh(ba->qid); 3267 tap = sc->qid2tap[qid]; 3268 ring = &sc->txq[qid]; 3269 tid = tap->txa_tid; 3270 wn = (void *)tap->txa_ni; 3271 3272 DPRINTF(sc, IWN_DEBUG_AMPDU, "%s: qid %d tid %d seq %04X ssn %04X\n" 3273 "bitmap: ba %016jX wn %016jX, start %d\n", 3274 __func__, qid, tid, le16toh(ba->seq), le16toh(ba->ssn), 3275 (uintmax_t)le64toh(ba->bitmap), (uintmax_t)wn->agg[tid].bitmap, 3276 wn->agg[tid].startidx); 3277 3278 if (wn->agg[tid].bitmap == 0) 3279 return; 3280 3281 shift = wn->agg[tid].startidx - ((le16toh(ba->seq) >> 4) & 0xff); 3282 if (shift <= -64) 3283 shift += 0x100; 3284 3285 /* 3286 * Walk the bitmap and calculate how many successful attempts 3287 * are made. 3288 * 3289 * Yes, the rate control code doesn't know these are A-MPDU 3290 * subframes; due to that long_retries stats are not used here. 3291 */ 3292 bitmap = le64toh(ba->bitmap); 3293 if (shift >= 0) 3294 bitmap >>= shift; 3295 else 3296 bitmap <<= -shift; 3297 bitmap &= wn->agg[tid].bitmap; 3298 wn->agg[tid].bitmap = 0; 3299 3300 for (i = wn->agg[tid].startidx; 3301 bitmap; 3302 bitmap >>= 1, i = (i + 1) % IWN_TX_RING_COUNT) { 3303 if ((bitmap & 1) == 0) 3304 continue; 3305 3306 data = &ring->data[i]; 3307 if (__predict_false(data->m == NULL)) { 3308 /* 3309 * There is no frame; skip this entry. 3310 * 3311 * NB: it is "ok" to have both 3312 * 'tx done' + 'compressed BA' replies for frame 3313 * with STATE_SCD_QUERY status. 3314 */ 3315 DPRINTF(sc, IWN_DEBUG_AMPDU, 3316 "%s: ring %d: no entry %d\n", __func__, qid, i); 3317 continue; 3318 } 3319 3320 tx_ok++; 3321 iwn_agg_tx_complete(sc, ring, tid, i, 1); 3322 } 3323 3324 ring->queued -= tx_ok; 3325 iwn_check_tx_ring(sc, qid); 3326 3327 DPRINTF(sc, IWN_DEBUG_TRACE | IWN_DEBUG_AMPDU, 3328 "->%s: end; %d ok\n",__func__, tx_ok); 3329} 3330 3331/* 3332 * Process a CALIBRATION_RESULT notification sent by the initialization 3333 * firmware on response to a CMD_CALIB_CONFIG command (5000 only). 3334 */ 3335static void 3336iwn5000_rx_calib_results(struct iwn_softc *sc, struct iwn_rx_desc *desc) 3337{ 3338 struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1); 3339 int len, idx = -1; 3340 3341 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 3342 3343 /* Runtime firmware should not send such a notification. */ 3344 if (sc->sc_flags & IWN_FLAG_CALIB_DONE){ 3345 DPRINTF(sc, IWN_DEBUG_TRACE, 3346 "->%s received after calib done\n", __func__); 3347 return; 3348 } 3349 len = (le32toh(desc->len) & 0x3fff) - 4; 3350 3351 switch (calib->code) { 3352 case IWN5000_PHY_CALIB_DC: 3353 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_DC) 3354 idx = 0; 3355 break; 3356 case IWN5000_PHY_CALIB_LO: 3357 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_LO) 3358 idx = 1; 3359 break; 3360 case IWN5000_PHY_CALIB_TX_IQ: 3361 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TX_IQ) 3362 idx = 2; 3363 break; 3364 case IWN5000_PHY_CALIB_TX_IQ_PERIODIC: 3365 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TX_IQ_PERIODIC) 3366 idx = 3; 3367 break; 3368 case IWN5000_PHY_CALIB_BASE_BAND: 3369 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_BASE_BAND) 3370 idx = 4; 3371 break; 3372 } 3373 if (idx == -1) /* Ignore other results. */ 3374 return; 3375 3376 /* Save calibration result. */ 3377 if (sc->calibcmd[idx].buf != NULL) 3378 free(sc->calibcmd[idx].buf, M_DEVBUF); 3379 sc->calibcmd[idx].buf = malloc(len, M_DEVBUF, M_NOWAIT); 3380 if (sc->calibcmd[idx].buf == NULL) { 3381 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 3382 "not enough memory for calibration result %d\n", 3383 calib->code); 3384 return; 3385 } 3386 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 3387 "saving calibration result idx=%d, code=%d len=%d\n", idx, calib->code, len); 3388 sc->calibcmd[idx].len = len; 3389 memcpy(sc->calibcmd[idx].buf, calib, len); 3390} 3391 3392static void 3393iwn_stats_update(struct iwn_softc *sc, struct iwn_calib_state *calib, 3394 struct iwn_stats *stats, int len) 3395{ 3396 struct iwn_stats_bt *stats_bt; 3397 struct iwn_stats *lstats; 3398 3399 /* 3400 * First - check whether the length is the bluetooth or normal. 3401 * 3402 * If it's normal - just copy it and bump out. 3403 * Otherwise we have to convert things. 3404 */ 3405 3406 if (len == sizeof(struct iwn_stats) + 4) { 3407 memcpy(&sc->last_stat, stats, sizeof(struct iwn_stats)); 3408 sc->last_stat_valid = 1; 3409 return; 3410 } 3411 3412 /* 3413 * If it's not the bluetooth size - log, then just copy. 3414 */ 3415 if (len != sizeof(struct iwn_stats_bt) + 4) { 3416 DPRINTF(sc, IWN_DEBUG_STATS, 3417 "%s: size of rx statistics (%d) not an expected size!\n", 3418 __func__, 3419 len); 3420 memcpy(&sc->last_stat, stats, sizeof(struct iwn_stats)); 3421 sc->last_stat_valid = 1; 3422 return; 3423 } 3424 3425 /* 3426 * Ok. Time to copy. 3427 */ 3428 stats_bt = (struct iwn_stats_bt *) stats; 3429 lstats = &sc->last_stat; 3430 3431 /* flags */ 3432 lstats->flags = stats_bt->flags; 3433 /* rx_bt */ 3434 memcpy(&lstats->rx.ofdm, &stats_bt->rx_bt.ofdm, 3435 sizeof(struct iwn_rx_phy_stats)); 3436 memcpy(&lstats->rx.cck, &stats_bt->rx_bt.cck, 3437 sizeof(struct iwn_rx_phy_stats)); 3438 memcpy(&lstats->rx.general, &stats_bt->rx_bt.general_bt.common, 3439 sizeof(struct iwn_rx_general_stats)); 3440 memcpy(&lstats->rx.ht, &stats_bt->rx_bt.ht, 3441 sizeof(struct iwn_rx_ht_phy_stats)); 3442 /* tx */ 3443 memcpy(&lstats->tx, &stats_bt->tx, 3444 sizeof(struct iwn_tx_stats)); 3445 /* general */ 3446 memcpy(&lstats->general, &stats_bt->general, 3447 sizeof(struct iwn_general_stats)); 3448 3449 /* XXX TODO: Squirrel away the extra bluetooth stats somewhere */ 3450 sc->last_stat_valid = 1; 3451} 3452 3453/* 3454 * Process an RX_STATISTICS or BEACON_STATISTICS firmware notification. 3455 * The latter is sent by the firmware after each received beacon. 3456 */ 3457static void 3458iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc) 3459{ 3460 struct iwn_ops *ops = &sc->ops; 3461 struct ieee80211com *ic = &sc->sc_ic; 3462 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3463 struct iwn_calib_state *calib = &sc->calib; 3464 struct iwn_stats *stats = (struct iwn_stats *)(desc + 1); 3465 struct iwn_stats *lstats; 3466 int temp; 3467 3468 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 3469 3470 /* Ignore statistics received during a scan. */ 3471 if (vap->iv_state != IEEE80211_S_RUN || 3472 (ic->ic_flags & IEEE80211_F_SCAN)){ 3473 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s received during calib\n", 3474 __func__); 3475 return; 3476 } 3477 3478 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_STATS, 3479 "%s: received statistics, cmd %d, len %d\n", 3480 __func__, desc->type, le16toh(desc->len)); 3481 sc->calib_cnt = 0; /* Reset TX power calibration timeout. */ 3482 3483 /* 3484 * Collect/track general statistics for reporting. 3485 * 3486 * This takes care of ensuring that the bluetooth sized message 3487 * will be correctly converted to the legacy sized message. 3488 */ 3489 iwn_stats_update(sc, calib, stats, le16toh(desc->len)); 3490 3491 /* 3492 * And now, let's take a reference of it to use! 3493 */ 3494 lstats = &sc->last_stat; 3495 3496 /* Test if temperature has changed. */ 3497 if (lstats->general.temp != sc->rawtemp) { 3498 /* Convert "raw" temperature to degC. */ 3499 sc->rawtemp = stats->general.temp; 3500 temp = ops->get_temperature(sc); 3501 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d\n", 3502 __func__, temp); 3503 3504 /* Update TX power if need be (4965AGN only). */ 3505 if (sc->hw_type == IWN_HW_REV_TYPE_4965) 3506 iwn4965_power_calibration(sc, temp); 3507 } 3508 3509 if (desc->type != IWN_BEACON_STATISTICS) 3510 return; /* Reply to a statistics request. */ 3511 3512 sc->noise = iwn_get_noise(&lstats->rx.general); 3513 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: noise %d\n", __func__, sc->noise); 3514 3515 /* Test that RSSI and noise are present in stats report. */ 3516 if (le32toh(lstats->rx.general.flags) != 1) { 3517 DPRINTF(sc, IWN_DEBUG_ANY, "%s\n", 3518 "received statistics without RSSI"); 3519 return; 3520 } 3521 3522 if (calib->state == IWN_CALIB_STATE_ASSOC) 3523 iwn_collect_noise(sc, &lstats->rx.general); 3524 else if (calib->state == IWN_CALIB_STATE_RUN) { 3525 iwn_tune_sensitivity(sc, &lstats->rx); 3526 /* 3527 * XXX TODO: Only run the RX recovery if we're associated! 3528 */ 3529 iwn_check_rx_recovery(sc, lstats); 3530 iwn_save_stats_counters(sc, lstats); 3531 } 3532 3533 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 3534} 3535 3536/* 3537 * Save the relevant statistic counters for the next calibration 3538 * pass. 3539 */ 3540static void 3541iwn_save_stats_counters(struct iwn_softc *sc, const struct iwn_stats *rs) 3542{ 3543 struct iwn_calib_state *calib = &sc->calib; 3544 3545 /* Save counters values for next call. */ 3546 calib->bad_plcp_cck = le32toh(rs->rx.cck.bad_plcp); 3547 calib->fa_cck = le32toh(rs->rx.cck.fa); 3548 calib->bad_plcp_ht = le32toh(rs->rx.ht.bad_plcp); 3549 calib->bad_plcp_ofdm = le32toh(rs->rx.ofdm.bad_plcp); 3550 calib->fa_ofdm = le32toh(rs->rx.ofdm.fa); 3551 3552 /* Last time we received these tick values */ 3553 sc->last_calib_ticks = ticks; 3554} 3555 3556/* 3557 * Process a TX_DONE firmware notification. Unfortunately, the 4965AGN 3558 * and 5000 adapters have different incompatible TX status formats. 3559 */ 3560static void 3561iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 3562 struct iwn_rx_data *data) 3563{ 3564 struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1); 3565 int qid = desc->qid & IWN_RX_DESC_QID_MSK; 3566 3567 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: " 3568 "qid %d idx %d RTS retries %d ACK retries %d nkill %d rate %x duration %d status %x\n", 3569 __func__, desc->qid, desc->idx, 3570 stat->rtsfailcnt, 3571 stat->ackfailcnt, 3572 stat->btkillcnt, 3573 stat->rate, le16toh(stat->duration), 3574 le32toh(stat->status)); 3575 3576 if (qid >= sc->firstaggqueue && stat->nframes != 1) { 3577 iwn_ampdu_tx_done(sc, qid, stat->nframes, stat->rtsfailcnt, 3578 &stat->status); 3579 } else { 3580 iwn_tx_done(sc, desc, stat->rtsfailcnt, stat->ackfailcnt, 3581 le32toh(stat->status) & 0xff); 3582 } 3583} 3584 3585static void 3586iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, 3587 struct iwn_rx_data *data) 3588{ 3589 struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1); 3590 int qid = desc->qid & IWN_RX_DESC_QID_MSK; 3591 3592 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: " 3593 "qid %d idx %d RTS retries %d ACK retries %d nkill %d rate %x duration %d status %x\n", 3594 __func__, desc->qid, desc->idx, 3595 stat->rtsfailcnt, 3596 stat->ackfailcnt, 3597 stat->btkillcnt, 3598 stat->rate, le16toh(stat->duration), 3599 le32toh(stat->status)); 3600 3601#ifdef notyet 3602 /* Reset TX scheduler slot. */ 3603 iwn5000_reset_sched(sc, qid, desc->idx); 3604#endif 3605 3606 if (qid >= sc->firstaggqueue && stat->nframes != 1) { 3607 iwn_ampdu_tx_done(sc, qid, stat->nframes, stat->rtsfailcnt, 3608 &stat->status); 3609 } else { 3610 iwn_tx_done(sc, desc, stat->rtsfailcnt, stat->ackfailcnt, 3611 le16toh(stat->status) & 0xff); 3612 } 3613} 3614 3615static void 3616iwn_adj_ampdu_ptr(struct iwn_softc *sc, struct iwn_tx_ring *ring) 3617{ 3618 int i; 3619 3620 for (i = ring->read; i != ring->cur; i = (i + 1) % IWN_TX_RING_COUNT) { 3621 struct iwn_tx_data *data = &ring->data[i]; 3622 3623 if (data->m != NULL) 3624 break; 3625 3626 data->remapped = 0; 3627 } 3628 3629 ring->read = i; 3630} 3631 3632/* 3633 * Adapter-independent backend for TX_DONE firmware notifications. 3634 */ 3635static void 3636iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, int rtsfailcnt, 3637 int ackfailcnt, uint8_t status) 3638{ 3639 struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs; 3640 struct iwn_tx_ring *ring = &sc->txq[desc->qid & IWN_RX_DESC_QID_MSK]; 3641 struct iwn_tx_data *data = &ring->data[desc->idx]; 3642 struct mbuf *m; 3643 struct ieee80211_node *ni; 3644 3645 if (__predict_false(data->m == NULL && 3646 ring->qid >= sc->firstaggqueue)) { 3647 /* 3648 * There is no frame; skip this entry. 3649 */ 3650 DPRINTF(sc, IWN_DEBUG_AMPDU, "%s: ring %d: no entry %d\n", 3651 __func__, ring->qid, desc->idx); 3652 return; 3653 } 3654 3655 KASSERT(data->ni != NULL, ("no node")); 3656 KASSERT(data->m != NULL, ("no mbuf")); 3657 3658 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 3659 3660 /* Unmap and free mbuf. */ 3661 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); 3662 bus_dmamap_unload(ring->data_dmat, data->map); 3663 m = data->m, data->m = NULL; 3664 ni = data->ni, data->ni = NULL; 3665 3666 data->long_retries = 0; 3667 3668 if (ring->qid >= sc->firstaggqueue) 3669 iwn_adj_ampdu_ptr(sc, ring); 3670 3671 /* 3672 * XXX f/w may hang (device timeout) when desc->idx - ring->read == 64 3673 * (aggregation queues only). 3674 */ 3675 3676 ring->queued--; 3677 iwn_check_tx_ring(sc, ring->qid); 3678 3679 /* 3680 * Update rate control statistics for the node. 3681 */ 3682 txs->flags = IEEE80211_RATECTL_STATUS_SHORT_RETRY | 3683 IEEE80211_RATECTL_STATUS_LONG_RETRY; 3684 txs->short_retries = rtsfailcnt; 3685 txs->long_retries = ackfailcnt; 3686 if (!(status & IWN_TX_FAIL)) 3687 txs->status = IEEE80211_RATECTL_TX_SUCCESS; 3688 else { 3689 switch (status) { 3690 case IWN_TX_FAIL_SHORT_LIMIT: 3691 txs->status = IEEE80211_RATECTL_TX_FAIL_SHORT; 3692 break; 3693 case IWN_TX_FAIL_LONG_LIMIT: 3694 txs->status = IEEE80211_RATECTL_TX_FAIL_LONG; 3695 break; 3696 case IWN_TX_STATUS_FAIL_LIFE_EXPIRE: 3697 txs->status = IEEE80211_RATECTL_TX_FAIL_EXPIRED; 3698 break; 3699 default: 3700 txs->status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED; 3701 break; 3702 } 3703 } 3704 ieee80211_ratectl_tx_complete(ni, txs); 3705 3706 /* 3707 * Channels marked for "radar" require traffic to be received 3708 * to unlock before we can transmit. Until traffic is seen 3709 * any attempt to transmit is returned immediately with status 3710 * set to IWN_TX_FAIL_TX_LOCKED. Unfortunately this can easily 3711 * happen on first authenticate after scanning. To workaround 3712 * this we ignore a failure of this sort in AUTH state so the 3713 * 802.11 layer will fall back to using a timeout to wait for 3714 * the AUTH reply. This allows the firmware time to see 3715 * traffic so a subsequent retry of AUTH succeeds. It's 3716 * unclear why the firmware does not maintain state for 3717 * channels recently visited as this would allow immediate 3718 * use of the channel after a scan (where we see traffic). 3719 */ 3720 if (status == IWN_TX_FAIL_TX_LOCKED && 3721 ni->ni_vap->iv_state == IEEE80211_S_AUTH) 3722 ieee80211_tx_complete(ni, m, 0); 3723 else 3724 ieee80211_tx_complete(ni, m, 3725 (status & IWN_TX_FAIL) != 0); 3726 3727 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 3728} 3729 3730/* 3731 * Process a "command done" firmware notification. This is where we wakeup 3732 * processes waiting for a synchronous command completion. 3733 */ 3734static void 3735iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc) 3736{ 3737 struct iwn_tx_ring *ring; 3738 struct iwn_tx_data *data; 3739 int cmd_queue_num; 3740 3741 if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT) 3742 cmd_queue_num = IWN_PAN_CMD_QUEUE; 3743 else 3744 cmd_queue_num = IWN_CMD_QUEUE_NUM; 3745 3746 if ((desc->qid & IWN_RX_DESC_QID_MSK) != cmd_queue_num) 3747 return; /* Not a command ack. */ 3748 3749 ring = &sc->txq[cmd_queue_num]; 3750 data = &ring->data[desc->idx]; 3751 3752 /* If the command was mapped in an mbuf, free it. */ 3753 if (data->m != NULL) { 3754 bus_dmamap_sync(ring->data_dmat, data->map, 3755 BUS_DMASYNC_POSTWRITE); 3756 bus_dmamap_unload(ring->data_dmat, data->map); 3757 m_freem(data->m); 3758 data->m = NULL; 3759 } 3760 wakeup(&ring->desc[desc->idx]); 3761} 3762 3763static int 3764iwn_ampdu_check_bitmap(uint64_t bitmap, int start, int idx) 3765{ 3766 int bit, shift; 3767 3768 bit = idx - start; 3769 shift = 0; 3770 if (bit >= 64) { 3771 shift = 0x100 - bit; 3772 bit = 0; 3773 } else if (bit <= -64) 3774 bit = 0x100 + bit; 3775 else if (bit < 0) { 3776 shift = -bit; 3777 bit = 0; 3778 } 3779 3780 if (bit - shift >= 64) 3781 return (0); 3782 3783 return ((bitmap & (1ULL << (bit - shift))) != 0); 3784} 3785 3786/* 3787 * Firmware bug workaround: in case if 'retries' counter 3788 * overflows 'seqno' field will be incremented: 3789 * status|sequence|status|sequence|status|sequence 3790 * 0000 0A48 0001 0A49 0000 0A6A 3791 * 1000 0A48 1000 0A49 1000 0A6A 3792 * 2000 0A48 2000 0A49 2000 0A6A 3793 * ... 3794 * E000 0A48 E000 0A49 E000 0A6A 3795 * F000 0A48 F000 0A49 F000 0A6A 3796 * 0000 0A49 0000 0A49 0000 0A6B 3797 * 1000 0A49 1000 0A49 1000 0A6B 3798 * ... 3799 * D000 0A49 D000 0A49 D000 0A6B 3800 * E000 0A49 E001 0A49 E000 0A6B 3801 * F000 0A49 F001 0A49 F000 0A6B 3802 * 0000 0A4A 0000 0A4B 0000 0A6A 3803 * 1000 0A4A 1000 0A4B 1000 0A6A 3804 * ... 3805 * 3806 * Odd 'seqno' numbers are incremened by 2 every 2 overflows. 3807 * For even 'seqno' % 4 != 0 overflow is cyclic (0 -> +1 -> 0). 3808 * Not checked with nretries >= 64. 3809 * 3810 */ 3811static int 3812iwn_ampdu_index_check(struct iwn_softc *sc, struct iwn_tx_ring *ring, 3813 uint64_t bitmap, int start, int idx) 3814{ 3815 struct ieee80211com *ic = &sc->sc_ic; 3816 struct iwn_tx_data *data; 3817 int diff, min_retries, max_retries, new_idx, loop_end; 3818 3819 new_idx = idx - IWN_LONG_RETRY_LIMIT_LOG; 3820 if (new_idx < 0) 3821 new_idx += IWN_TX_RING_COUNT; 3822 3823 /* 3824 * Corner case: check if retry count is not too big; 3825 * reset device otherwise. 3826 */ 3827 if (!iwn_ampdu_check_bitmap(bitmap, start, new_idx)) { 3828 data = &ring->data[new_idx]; 3829 if (data->long_retries > IWN_LONG_RETRY_LIMIT) { 3830 device_printf(sc->sc_dev, 3831 "%s: retry count (%d) for idx %d/%d overflow, " 3832 "resetting...\n", __func__, data->long_retries, 3833 ring->qid, new_idx); 3834 ieee80211_restart_all(ic); 3835 return (-1); 3836 } 3837 } 3838 3839 /* Correct index if needed. */ 3840 loop_end = idx; 3841 do { 3842 data = &ring->data[new_idx]; 3843 diff = idx - new_idx; 3844 if (diff < 0) 3845 diff += IWN_TX_RING_COUNT; 3846 3847 min_retries = IWN_LONG_RETRY_FW_OVERFLOW * diff; 3848 if ((new_idx % 2) == 0) 3849 max_retries = IWN_LONG_RETRY_FW_OVERFLOW * (diff + 1); 3850 else 3851 max_retries = IWN_LONG_RETRY_FW_OVERFLOW * (diff + 2); 3852 3853 if (!iwn_ampdu_check_bitmap(bitmap, start, new_idx) && 3854 ((data->long_retries >= min_retries && 3855 data->long_retries < max_retries) || 3856 (diff == 1 && 3857 (new_idx & 0x03) == 0x02 && 3858 data->long_retries >= IWN_LONG_RETRY_FW_OVERFLOW))) { 3859 DPRINTF(sc, IWN_DEBUG_AMPDU, 3860 "%s: correcting index %d -> %d in queue %d" 3861 " (retries %d)\n", __func__, idx, new_idx, 3862 ring->qid, data->long_retries); 3863 return (new_idx); 3864 } 3865 3866 new_idx = (new_idx + 1) % IWN_TX_RING_COUNT; 3867 } while (new_idx != loop_end); 3868 3869 return (idx); 3870} 3871 3872static void 3873iwn_ampdu_tx_done(struct iwn_softc *sc, int qid, int nframes, int rtsfailcnt, 3874 void *stat) 3875{ 3876 struct iwn_tx_ring *ring = &sc->txq[qid]; 3877 struct ieee80211_tx_ampdu *tap = sc->qid2tap[qid]; 3878 struct iwn_node *wn = (void *)tap->txa_ni; 3879 struct iwn_tx_data *data; 3880 uint64_t bitmap = 0; 3881 uint16_t *aggstatus = stat; 3882 uint8_t tid = tap->txa_tid; 3883 int bit, i, idx, shift, start, tx_err; 3884 3885 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 3886 3887 start = le16toh(*(aggstatus + nframes * 2)) & 0xff; 3888 3889 for (i = 0; i < nframes; i++) { 3890 uint16_t status = le16toh(aggstatus[i * 2]); 3891 3892 if (status & IWN_AGG_TX_STATE_IGNORE_MASK) 3893 continue; 3894 3895 idx = le16toh(aggstatus[i * 2 + 1]) & 0xff; 3896 data = &ring->data[idx]; 3897 if (data->remapped) { 3898 idx = iwn_ampdu_index_check(sc, ring, bitmap, start, idx); 3899 if (idx == -1) { 3900 /* skip error (device will be restarted anyway). */ 3901 continue; 3902 } 3903 3904 /* Index may have changed. */ 3905 data = &ring->data[idx]; 3906 } 3907 3908 /* 3909 * XXX Sometimes (rarely) some frames are excluded from events. 3910 * XXX Due to that long_retries counter may be wrong. 3911 */ 3912 data->long_retries &= ~0x0f; 3913 data->long_retries += IWN_AGG_TX_TRY_COUNT(status) + 1; 3914 3915 if (data->long_retries >= IWN_LONG_RETRY_FW_OVERFLOW) { 3916 int diff, wrong_idx; 3917 3918 diff = data->long_retries / IWN_LONG_RETRY_FW_OVERFLOW; 3919 wrong_idx = (idx + diff) % IWN_TX_RING_COUNT; 3920 3921 /* 3922 * Mark the entry so the above code will check it 3923 * next time. 3924 */ 3925 ring->data[wrong_idx].remapped = 1; 3926 } 3927 3928 if (status & IWN_AGG_TX_STATE_UNDERRUN_MSK) { 3929 /* 3930 * NB: count retries but postpone - it was not 3931 * transmitted. 3932 */ 3933 continue; 3934 } 3935 3936 bit = idx - start; 3937 shift = 0; 3938 if (bit >= 64) { 3939 shift = 0x100 - bit; 3940 bit = 0; 3941 } else if (bit <= -64) 3942 bit = 0x100 + bit; 3943 else if (bit < 0) { 3944 shift = -bit; 3945 bit = 0; 3946 } 3947 bitmap = bitmap << shift; 3948 bitmap |= 1ULL << bit; 3949 } 3950 wn->agg[tid].startidx = start; 3951 wn->agg[tid].bitmap = bitmap; 3952 wn->agg[tid].short_retries = rtsfailcnt; 3953 3954 DPRINTF(sc, IWN_DEBUG_AMPDU, "%s: nframes %d start %d bitmap %016jX\n", 3955 __func__, nframes, start, (uintmax_t)bitmap); 3956 3957 i = ring->read; 3958 3959 for (tx_err = 0; 3960 i != wn->agg[tid].startidx; 3961 i = (i + 1) % IWN_TX_RING_COUNT) { 3962 data = &ring->data[i]; 3963 data->remapped = 0; 3964 if (data->m == NULL) 3965 continue; 3966 3967 tx_err++; 3968 iwn_agg_tx_complete(sc, ring, tid, i, 0); 3969 } 3970 3971 ring->read = wn->agg[tid].startidx; 3972 ring->queued -= tx_err; 3973 3974 iwn_check_tx_ring(sc, qid); 3975 3976 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 3977} 3978 3979/* 3980 * Process an INT_FH_RX or INT_SW_RX interrupt. 3981 */ 3982static void 3983iwn_notif_intr(struct iwn_softc *sc) 3984{ 3985 struct iwn_ops *ops = &sc->ops; 3986 struct ieee80211com *ic = &sc->sc_ic; 3987 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3988 uint16_t hw; 3989 int is_stopped; 3990 3991 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map, 3992 BUS_DMASYNC_POSTREAD); 3993 3994 hw = le16toh(sc->rxq.stat->closed_count) & 0xfff; 3995 while (sc->rxq.cur != hw) { 3996 struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur]; 3997 struct iwn_rx_desc *desc; 3998 3999 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 4000 BUS_DMASYNC_POSTREAD); 4001 desc = mtod(data->m, struct iwn_rx_desc *); 4002 4003 DPRINTF(sc, IWN_DEBUG_RECV, 4004 "%s: cur=%d; qid %x idx %d flags %x type %d(%s) len %d\n", 4005 __func__, sc->rxq.cur, desc->qid & IWN_RX_DESC_QID_MSK, 4006 desc->idx, desc->flags, desc->type, 4007 iwn_intr_str(desc->type), le16toh(desc->len)); 4008 4009 if (!(desc->qid & IWN_UNSOLICITED_RX_NOTIF)) /* Reply to a command. */ 4010 iwn_cmd_done(sc, desc); 4011 4012 switch (desc->type) { 4013 case IWN_RX_PHY: 4014 iwn_rx_phy(sc, desc); 4015 break; 4016 4017 case IWN_RX_DONE: /* 4965AGN only. */ 4018 case IWN_MPDU_RX_DONE: 4019 /* An 802.11 frame has been received. */ 4020 iwn_rx_done(sc, desc, data); 4021 4022 is_stopped = (sc->sc_flags & IWN_FLAG_RUNNING) == 0; 4023 if (__predict_false(is_stopped)) 4024 return; 4025 4026 break; 4027 4028 case IWN_RX_COMPRESSED_BA: 4029 /* A Compressed BlockAck has been received. */ 4030 iwn_rx_compressed_ba(sc, desc); 4031 break; 4032 4033 case IWN_TX_DONE: 4034 /* An 802.11 frame has been transmitted. */ 4035 ops->tx_done(sc, desc, data); 4036 break; 4037 4038 case IWN_RX_STATISTICS: 4039 case IWN_BEACON_STATISTICS: 4040 iwn_rx_statistics(sc, desc); 4041 break; 4042 4043 case IWN_BEACON_MISSED: 4044 { 4045 struct iwn_beacon_missed *miss = 4046 (struct iwn_beacon_missed *)(desc + 1); 4047 int misses; 4048 4049 misses = le32toh(miss->consecutive); 4050 4051 DPRINTF(sc, IWN_DEBUG_STATE, 4052 "%s: beacons missed %d/%d\n", __func__, 4053 misses, le32toh(miss->total)); 4054 /* 4055 * If more than 5 consecutive beacons are missed, 4056 * reinitialize the sensitivity state machine. 4057 */ 4058 if (vap->iv_state == IEEE80211_S_RUN && 4059 (ic->ic_flags & IEEE80211_F_SCAN) == 0) { 4060 if (misses > 5) 4061 (void)iwn_init_sensitivity(sc); 4062 if (misses >= vap->iv_bmissthreshold) { 4063 IWN_UNLOCK(sc); 4064 ieee80211_beacon_miss(ic); 4065 IWN_LOCK(sc); 4066 4067 is_stopped = (sc->sc_flags & 4068 IWN_FLAG_RUNNING) == 0; 4069 if (__predict_false(is_stopped)) 4070 return; 4071 } 4072 } 4073 break; 4074 } 4075 case IWN_UC_READY: 4076 { 4077 struct iwn_ucode_info *uc = 4078 (struct iwn_ucode_info *)(desc + 1); 4079 4080 /* The microcontroller is ready. */ 4081 DPRINTF(sc, IWN_DEBUG_RESET, 4082 "microcode alive notification version=%d.%d " 4083 "subtype=%x alive=%x\n", uc->major, uc->minor, 4084 uc->subtype, le32toh(uc->valid)); 4085 4086 if (le32toh(uc->valid) != 1) { 4087 device_printf(sc->sc_dev, 4088 "microcontroller initialization failed"); 4089 break; 4090 } 4091 if (uc->subtype == IWN_UCODE_INIT) { 4092 /* Save microcontroller report. */ 4093 memcpy(&sc->ucode_info, uc, sizeof (*uc)); 4094 } 4095 /* Save the address of the error log in SRAM. */ 4096 sc->errptr = le32toh(uc->errptr); 4097 break; 4098 } 4099#ifdef IWN_DEBUG 4100 case IWN_STATE_CHANGED: 4101 { 4102 /* 4103 * State change allows hardware switch change to be 4104 * noted. However, we handle this in iwn_intr as we 4105 * get both the enable/disble intr. 4106 */ 4107 uint32_t *status = (uint32_t *)(desc + 1); 4108 DPRINTF(sc, IWN_DEBUG_INTR | IWN_DEBUG_STATE, 4109 "state changed to %x\n", 4110 le32toh(*status)); 4111 break; 4112 } 4113 case IWN_START_SCAN: 4114 { 4115 struct iwn_start_scan *scan = 4116 (struct iwn_start_scan *)(desc + 1); 4117 DPRINTF(sc, IWN_DEBUG_ANY, 4118 "%s: scanning channel %d status %x\n", 4119 __func__, scan->chan, le32toh(scan->status)); 4120 break; 4121 } 4122#endif 4123 case IWN_STOP_SCAN: 4124 { 4125#ifdef IWN_DEBUG 4126 struct iwn_stop_scan *scan = 4127 (struct iwn_stop_scan *)(desc + 1); 4128 DPRINTF(sc, IWN_DEBUG_STATE | IWN_DEBUG_SCAN, 4129 "scan finished nchan=%d status=%d chan=%d\n", 4130 scan->nchan, scan->status, scan->chan); 4131#endif 4132 sc->sc_is_scanning = 0; 4133 callout_stop(&sc->scan_timeout); 4134 IWN_UNLOCK(sc); 4135 ieee80211_scan_next(vap); 4136 IWN_LOCK(sc); 4137 4138 is_stopped = (sc->sc_flags & IWN_FLAG_RUNNING) == 0; 4139 if (__predict_false(is_stopped)) 4140 return; 4141 4142 break; 4143 } 4144 case IWN5000_CALIBRATION_RESULT: 4145 iwn5000_rx_calib_results(sc, desc); 4146 break; 4147 4148 case IWN5000_CALIBRATION_DONE: 4149 sc->sc_flags |= IWN_FLAG_CALIB_DONE; 4150 wakeup(sc); 4151 break; 4152 } 4153 4154 sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT; 4155 } 4156 4157 /* Tell the firmware what we have processed. */ 4158 hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1; 4159 IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7); 4160} 4161 4162/* 4163 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up 4164 * from power-down sleep mode. 4165 */ 4166static void 4167iwn_wakeup_intr(struct iwn_softc *sc) 4168{ 4169 int qid; 4170 4171 DPRINTF(sc, IWN_DEBUG_RESET, "%s: ucode wakeup from power-down sleep\n", 4172 __func__); 4173 4174 /* Wakeup RX and TX rings. */ 4175 IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7); 4176 for (qid = 0; qid < sc->ntxqs; qid++) { 4177 struct iwn_tx_ring *ring = &sc->txq[qid]; 4178 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur); 4179 } 4180} 4181 4182static void 4183iwn_rftoggle_task(void *arg, int npending) 4184{ 4185 struct iwn_softc *sc = arg; 4186 struct ieee80211com *ic = &sc->sc_ic; 4187 uint32_t tmp; 4188 4189 IWN_LOCK(sc); 4190 tmp = IWN_READ(sc, IWN_GP_CNTRL); 4191 IWN_UNLOCK(sc); 4192 4193 device_printf(sc->sc_dev, "RF switch: radio %s\n", 4194 (tmp & IWN_GP_CNTRL_RFKILL) ? "enabled" : "disabled"); 4195 if (!(tmp & IWN_GP_CNTRL_RFKILL)) { 4196 ieee80211_suspend_all(ic); 4197 4198 /* Enable interrupts to get RF toggle notification. */ 4199 IWN_LOCK(sc); 4200 IWN_WRITE(sc, IWN_INT, 0xffffffff); 4201 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 4202 IWN_UNLOCK(sc); 4203 } else 4204 ieee80211_resume_all(ic); 4205} 4206 4207/* 4208 * Dump the error log of the firmware when a firmware panic occurs. Although 4209 * we can't debug the firmware because it is neither open source nor free, it 4210 * can help us to identify certain classes of problems. 4211 */ 4212static void 4213iwn_fatal_intr(struct iwn_softc *sc) 4214{ 4215 struct iwn_fw_dump dump; 4216 int i; 4217 4218 IWN_LOCK_ASSERT(sc); 4219 4220 /* Force a complete recalibration on next init. */ 4221 sc->sc_flags &= ~IWN_FLAG_CALIB_DONE; 4222 4223 /* Check that the error log address is valid. */ 4224 if (sc->errptr < IWN_FW_DATA_BASE || 4225 sc->errptr + sizeof (dump) > 4226 IWN_FW_DATA_BASE + sc->fw_data_maxsz) { 4227 printf("%s: bad firmware error log address 0x%08x\n", __func__, 4228 sc->errptr); 4229 return; 4230 } 4231 if (iwn_nic_lock(sc) != 0) { 4232 printf("%s: could not read firmware error log\n", __func__); 4233 return; 4234 } 4235 /* Read firmware error log from SRAM. */ 4236 iwn_mem_read_region_4(sc, sc->errptr, (uint32_t *)&dump, 4237 sizeof (dump) / sizeof (uint32_t)); 4238 iwn_nic_unlock(sc); 4239 4240 if (dump.valid == 0) { 4241 printf("%s: firmware error log is empty\n", __func__); 4242 return; 4243 } 4244 printf("firmware error log:\n"); 4245 printf(" error type = \"%s\" (0x%08X)\n", 4246 (dump.id < nitems(iwn_fw_errmsg)) ? 4247 iwn_fw_errmsg[dump.id] : "UNKNOWN", 4248 dump.id); 4249 printf(" program counter = 0x%08X\n", dump.pc); 4250 printf(" source line = 0x%08X\n", dump.src_line); 4251 printf(" error data = 0x%08X%08X\n", 4252 dump.error_data[0], dump.error_data[1]); 4253 printf(" branch link = 0x%08X%08X\n", 4254 dump.branch_link[0], dump.branch_link[1]); 4255 printf(" interrupt link = 0x%08X%08X\n", 4256 dump.interrupt_link[0], dump.interrupt_link[1]); 4257 printf(" time = %u\n", dump.time[0]); 4258 4259 /* Dump driver status (TX and RX rings) while we're here. */ 4260 printf("driver status:\n"); 4261 for (i = 0; i < sc->ntxqs; i++) { 4262 struct iwn_tx_ring *ring = &sc->txq[i]; 4263 printf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n", 4264 i, ring->qid, ring->cur, ring->queued); 4265 } 4266 printf(" rx ring: cur=%d\n", sc->rxq.cur); 4267} 4268 4269static void 4270iwn_intr(void *arg) 4271{ 4272 struct iwn_softc *sc = arg; 4273 uint32_t r1, r2, tmp; 4274 4275 IWN_LOCK(sc); 4276 4277#ifndef __HAIKU__ 4278 /* Disable interrupts. */ 4279 IWN_WRITE(sc, IWN_INT_MASK, 0); 4280 4281 /* Read interrupts from ICT (fast) or from registers (slow). */ 4282 if (sc->sc_flags & IWN_FLAG_USE_ICT) { 4283 bus_dmamap_sync(sc->ict_dma.tag, sc->ict_dma.map, 4284 BUS_DMASYNC_POSTREAD); 4285 tmp = 0; 4286 while (sc->ict[sc->ict_cur] != 0) { 4287 tmp |= sc->ict[sc->ict_cur]; 4288 sc->ict[sc->ict_cur] = 0; /* Acknowledge. */ 4289 sc->ict_cur = (sc->ict_cur + 1) % IWN_ICT_COUNT; 4290 } 4291 tmp = le32toh(tmp); 4292 if (tmp == 0xffffffff) /* Shouldn't happen. */ 4293 tmp = 0; 4294 else if (tmp & 0xc0000) /* Workaround a HW bug. */ 4295 tmp |= 0x8000; 4296 r1 = (tmp & 0xff00) << 16 | (tmp & 0xff); 4297 r2 = 0; /* Unused. */ 4298 } else { 4299 r1 = IWN_READ(sc, IWN_INT); 4300 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0) { 4301 IWN_UNLOCK(sc); 4302 return; /* Hardware gone! */ 4303 } 4304 r2 = IWN_READ(sc, IWN_FH_INT); 4305 } 4306#else 4307 r1 = atomic_get((int32 *)&sc->sc_intr_status_1); 4308 r2 = atomic_get((int32 *)&sc->sc_intr_status_2); 4309#endif 4310 4311 DPRINTF(sc, IWN_DEBUG_INTR, "interrupt reg1=0x%08x reg2=0x%08x\n" 4312 , r1, r2); 4313 4314 if (r1 == 0 && r2 == 0) 4315 goto done; /* Interrupt not for us. */ 4316 4317 /* Acknowledge interrupts. */ 4318 IWN_WRITE(sc, IWN_INT, r1); 4319 if (!(sc->sc_flags & IWN_FLAG_USE_ICT)) 4320 IWN_WRITE(sc, IWN_FH_INT, r2); 4321 4322 if (r1 & IWN_INT_RF_TOGGLED) { 4323 taskqueue_enqueue(sc->sc_tq, &sc->sc_rftoggle_task); 4324 goto done; 4325 } 4326 if (r1 & IWN_INT_CT_REACHED) { 4327 device_printf(sc->sc_dev, "%s: critical temperature reached!\n", 4328 __func__); 4329 } 4330 if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) { 4331 device_printf(sc->sc_dev, "%s: fatal firmware error\n", 4332 __func__); 4333#ifdef IWN_DEBUG 4334 iwn_debug_register(sc); 4335#endif 4336 /* Dump firmware error log and stop. */ 4337 iwn_fatal_intr(sc); 4338 4339 taskqueue_enqueue(sc->sc_tq, &sc->sc_panic_task); 4340 goto done; 4341 } 4342 if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX | IWN_INT_RX_PERIODIC)) || 4343 (r2 & IWN_FH_INT_RX)) { 4344 if (sc->sc_flags & IWN_FLAG_USE_ICT) { 4345 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) 4346 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX); 4347 IWN_WRITE_1(sc, IWN_INT_PERIODIC, 4348 IWN_INT_PERIODIC_DIS); 4349 iwn_notif_intr(sc); 4350 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) { 4351 IWN_WRITE_1(sc, IWN_INT_PERIODIC, 4352 IWN_INT_PERIODIC_ENA); 4353 } 4354 } else 4355 iwn_notif_intr(sc); 4356 } 4357 4358 if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) { 4359 if (sc->sc_flags & IWN_FLAG_USE_ICT) 4360 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_TX); 4361 wakeup(sc); /* FH DMA transfer completed. */ 4362 } 4363 4364 if (r1 & IWN_INT_ALIVE) 4365 wakeup(sc); /* Firmware is alive. */ 4366 4367 if (r1 & IWN_INT_WAKEUP) 4368 iwn_wakeup_intr(sc); 4369 4370done: 4371 /* Re-enable interrupts. */ 4372 if (sc->sc_flags & IWN_FLAG_RUNNING) 4373 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 4374 4375 IWN_UNLOCK(sc); 4376} 4377 4378/* 4379 * Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and 4380 * 5000 adapters use a slightly different format). 4381 */ 4382static void 4383iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, 4384 uint16_t len) 4385{ 4386 uint16_t *w = &sc->sched[qid * IWN4965_SCHED_COUNT + idx]; 4387 4388 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4389 4390 *w = htole16(len + 8); 4391 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 4392 BUS_DMASYNC_PREWRITE); 4393 if (idx < IWN_SCHED_WINSZ) { 4394 *(w + IWN_TX_RING_COUNT) = *w; 4395 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 4396 BUS_DMASYNC_PREWRITE); 4397 } 4398} 4399 4400static void 4401iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, 4402 uint16_t len) 4403{ 4404 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; 4405 4406 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4407 4408 *w = htole16(id << 12 | (len + 8)); 4409 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 4410 BUS_DMASYNC_PREWRITE); 4411 if (idx < IWN_SCHED_WINSZ) { 4412 *(w + IWN_TX_RING_COUNT) = *w; 4413 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 4414 BUS_DMASYNC_PREWRITE); 4415 } 4416} 4417 4418#ifdef notyet 4419static void 4420iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx) 4421{ 4422 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; 4423 4424 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 4425 4426 *w = (*w & htole16(0xf000)) | htole16(1); 4427 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 4428 BUS_DMASYNC_PREWRITE); 4429 if (idx < IWN_SCHED_WINSZ) { 4430 *(w + IWN_TX_RING_COUNT) = *w; 4431 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 4432 BUS_DMASYNC_PREWRITE); 4433 } 4434} 4435#endif 4436 4437/* 4438 * Check whether OFDM 11g protection will be enabled for the given rate. 4439 * 4440 * The original driver code only enabled protection for OFDM rates. 4441 * It didn't check to see whether it was operating in 11a or 11bg mode. 4442 */ 4443static int 4444iwn_check_rate_needs_protection(struct iwn_softc *sc, 4445 struct ieee80211vap *vap, uint8_t rate) 4446{ 4447 struct ieee80211com *ic = vap->iv_ic; 4448 4449 /* 4450 * Not in 2GHz mode? Then there's no need to enable OFDM 4451 * 11bg protection. 4452 */ 4453 if (! IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { 4454 return (0); 4455 } 4456 4457 /* 4458 * 11bg protection not enabled? Then don't use it. 4459 */ 4460 if ((ic->ic_flags & IEEE80211_F_USEPROT) == 0) 4461 return (0); 4462 4463 /* 4464 * If it's an 11n rate - no protection. 4465 * We'll do it via a specific 11n check. 4466 */ 4467 if (rate & IEEE80211_RATE_MCS) { 4468 return (0); 4469 } 4470 4471 /* 4472 * Do a rate table lookup. If the PHY is CCK, 4473 * don't do protection. 4474 */ 4475 if (ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_CCK) 4476 return (0); 4477 4478 /* 4479 * Yup, enable protection. 4480 */ 4481 return (1); 4482} 4483 4484/* 4485 * return a value between 0 and IWN_MAX_TX_RETRIES-1 as an index into 4486 * the link quality table that reflects this particular entry. 4487 */ 4488static int 4489iwn_tx_rate_to_linkq_offset(struct iwn_softc *sc, struct ieee80211_node *ni, 4490 uint8_t rate) 4491{ 4492 struct ieee80211_rateset *rs; 4493 int is_11n; 4494 int nr; 4495 int i; 4496 uint8_t cmp_rate; 4497 4498 /* 4499 * Figure out if we're using 11n or not here. 4500 */ 4501 if (IEEE80211_IS_CHAN_HT(ni->ni_chan) && ni->ni_htrates.rs_nrates > 0) 4502 is_11n = 1; 4503 else 4504 is_11n = 0; 4505 4506 /* 4507 * Use the correct rate table. 4508 */ 4509 if (is_11n) { 4510 rs = (struct ieee80211_rateset *) &ni->ni_htrates; 4511 nr = ni->ni_htrates.rs_nrates; 4512 } else { 4513 rs = &ni->ni_rates; 4514 nr = rs->rs_nrates; 4515 } 4516 4517 /* 4518 * Find the relevant link quality entry in the table. 4519 */ 4520 for (i = 0; i < nr && i < IWN_MAX_TX_RETRIES - 1 ; i++) { 4521 /* 4522 * The link quality table index starts at 0 == highest 4523 * rate, so we walk the rate table backwards. 4524 */ 4525 cmp_rate = rs->rs_rates[(nr - 1) - i]; 4526 if (rate & IEEE80211_RATE_MCS) 4527 cmp_rate |= IEEE80211_RATE_MCS; 4528 4529#if 0 4530 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: idx %d: nr=%d, rate=0x%02x, rateentry=0x%02x\n", 4531 __func__, 4532 i, 4533 nr, 4534 rate, 4535 cmp_rate); 4536#endif 4537 4538 if (cmp_rate == rate) 4539 return (i); 4540 } 4541 4542 /* Failed? Start at the end */ 4543 return (IWN_MAX_TX_RETRIES - 1); 4544} 4545 4546static int 4547iwn_tx_data(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni) 4548{ 4549 const struct ieee80211_txparam *tp = ni->ni_txparms; 4550 struct ieee80211vap *vap = ni->ni_vap; 4551 struct ieee80211com *ic = ni->ni_ic; 4552 struct iwn_node *wn = (void *)ni; 4553 struct iwn_tx_ring *ring; 4554 struct iwn_tx_cmd *cmd; 4555 struct iwn_cmd_data *tx; 4556 struct ieee80211_frame *wh; 4557 struct ieee80211_key *k = NULL; 4558 uint32_t flags; 4559 uint16_t qos; 4560 uint8_t tid, type; 4561 int ac, totlen, rate; 4562 4563 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4564 4565 IWN_LOCK_ASSERT(sc); 4566 4567 wh = mtod(m, struct ieee80211_frame *); 4568 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 4569 4570 /* Select EDCA Access Category and TX ring for this frame. */ 4571 if (IEEE80211_QOS_HAS_SEQ(wh)) { 4572 qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0]; 4573 tid = qos & IEEE80211_QOS_TID; 4574 } else { 4575 qos = 0; 4576 tid = 0; 4577 } 4578 4579 /* Choose a TX rate index. */ 4580 if (type == IEEE80211_FC0_TYPE_MGT || 4581 type == IEEE80211_FC0_TYPE_CTL || 4582 (m->m_flags & M_EAPOL) != 0) 4583 rate = tp->mgmtrate; 4584 else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) 4585 rate = tp->mcastrate; 4586 else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) 4587 rate = tp->ucastrate; 4588 else { 4589 /* XXX pass pktlen */ 4590 (void) ieee80211_ratectl_rate(ni, NULL, 0); 4591 rate = ni->ni_txrate; 4592 } 4593 4594 /* 4595 * XXX TODO: Group addressed frames aren't aggregated and must 4596 * go to the normal non-aggregation queue, and have a NONQOS TID 4597 * assigned from net80211. 4598 */ 4599 4600 ac = M_WME_GETAC(m); 4601 if (m->m_flags & M_AMPDU_MPDU) { 4602 struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[ac]; 4603 4604 if (!IEEE80211_AMPDU_RUNNING(tap)) 4605 return (EINVAL); 4606 4607 ac = *(int *)tap->txa_private; 4608 } 4609 4610 /* Encrypt the frame if need be. */ 4611 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { 4612 /* Retrieve key for TX. */ 4613 k = ieee80211_crypto_encap(ni, m); 4614 if (k == NULL) { 4615 return ENOBUFS; 4616 } 4617 /* 802.11 header may have moved. */ 4618 wh = mtod(m, struct ieee80211_frame *); 4619 } 4620 totlen = m->m_pkthdr.len; 4621 4622 if (ieee80211_radiotap_active_vap(vap)) { 4623 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap; 4624 4625 tap->wt_flags = 0; 4626 tap->wt_rate = rate; 4627 if (k != NULL) 4628 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 4629 4630 ieee80211_radiotap_tx(vap, m); 4631 } 4632 4633 flags = 0; 4634 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 4635 /* Unicast frame, check if an ACK is expected. */ 4636 if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) != 4637 IEEE80211_QOS_ACKPOLICY_NOACK) 4638 flags |= IWN_TX_NEED_ACK; 4639 } 4640 if ((wh->i_fc[0] & 4641 (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == 4642 (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR)) 4643 flags |= IWN_TX_IMM_BA; /* Cannot happen yet. */ 4644 4645 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) 4646 flags |= IWN_TX_MORE_FRAG; /* Cannot happen yet. */ 4647 4648 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */ 4649 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 4650 /* NB: Group frames are sent using CCK in 802.11b/g. */ 4651 if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) { 4652 flags |= IWN_TX_NEED_RTS; 4653 } else if (iwn_check_rate_needs_protection(sc, vap, rate)) { 4654 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 4655 flags |= IWN_TX_NEED_CTS; 4656 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 4657 flags |= IWN_TX_NEED_RTS; 4658 } else if ((rate & IEEE80211_RATE_MCS) && 4659 (ic->ic_htprotmode == IEEE80211_PROT_RTSCTS)) { 4660 flags |= IWN_TX_NEED_RTS; 4661 } 4662 4663 /* XXX HT protection? */ 4664 4665 if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) { 4666 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 4667 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 4668 flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS); 4669 flags |= IWN_TX_NEED_PROTECTION; 4670 } else 4671 flags |= IWN_TX_FULL_TXOP; 4672 } 4673 } 4674 4675 ring = &sc->txq[ac]; 4676 if (m->m_flags & M_AMPDU_MPDU) { 4677 uint16_t seqno = ni->ni_txseqs[tid]; 4678 4679 if (ring->queued > IWN_TX_RING_COUNT / 2 && 4680 (ring->cur + 1) % IWN_TX_RING_COUNT == ring->read) { 4681 DPRINTF(sc, IWN_DEBUG_AMPDU, "%s: no more space " 4682 "(queued %d) left in %d queue!\n", 4683 __func__, ring->queued, ac); 4684 return (ENOBUFS); 4685 } 4686 4687 /* 4688 * Queue this frame to the hardware ring that we've 4689 * negotiated AMPDU TX on. 4690 * 4691 * Note that the sequence number must match the TX slot 4692 * being used! 4693 */ 4694 if ((seqno % 256) != ring->cur) { 4695 device_printf(sc->sc_dev, 4696 "%s: m=%p: seqno (%d) (%d) != ring index (%d) !\n", 4697 __func__, 4698 m, 4699 seqno, 4700 seqno % 256, 4701 ring->cur); 4702 4703 /* XXX until D9195 will not be committed */ 4704 ni->ni_txseqs[tid] &= ~0xff; 4705 ni->ni_txseqs[tid] += ring->cur; 4706 seqno = ni->ni_txseqs[tid]; 4707 } 4708 4709 *(uint16_t *)wh->i_seq = 4710 htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT); 4711 ni->ni_txseqs[tid]++; 4712 } 4713 4714 /* Prepare TX firmware command. */ 4715 cmd = &ring->cmd[ring->cur]; 4716 tx = (struct iwn_cmd_data *)cmd->data; 4717 4718 /* NB: No need to clear tx, all fields are reinitialized here. */ 4719 tx->scratch = 0; /* clear "scratch" area */ 4720 4721 if (IEEE80211_IS_MULTICAST(wh->i_addr1) || 4722 type != IEEE80211_FC0_TYPE_DATA) 4723 tx->id = sc->broadcast_id; 4724 else 4725 tx->id = wn->id; 4726 4727 if (type == IEEE80211_FC0_TYPE_MGT) { 4728 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 4729 4730 /* Tell HW to set timestamp in probe responses. */ 4731 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 4732 flags |= IWN_TX_INSERT_TSTAMP; 4733 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 4734 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 4735 tx->timeout = htole16(3); 4736 else 4737 tx->timeout = htole16(2); 4738 } else 4739 tx->timeout = htole16(0); 4740 4741 if (tx->id == sc->broadcast_id) { 4742 /* Group or management frame. */ 4743 tx->linkq = 0; 4744 } else { 4745 tx->linkq = iwn_tx_rate_to_linkq_offset(sc, ni, rate); 4746 flags |= IWN_TX_LINKQ; /* enable MRR */ 4747 } 4748 4749 tx->tid = tid; 4750 tx->rts_ntries = 60; 4751 tx->data_ntries = 15; 4752 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 4753 tx->rate = iwn_rate_to_plcp(sc, ni, rate); 4754 tx->security = 0; 4755 tx->flags = htole32(flags); 4756 4757 return (iwn_tx_cmd(sc, m, ni, ring)); 4758} 4759 4760static int 4761iwn_tx_data_raw(struct iwn_softc *sc, struct mbuf *m, 4762 struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) 4763{ 4764 struct ieee80211vap *vap = ni->ni_vap; 4765 struct iwn_tx_cmd *cmd; 4766 struct iwn_cmd_data *tx; 4767 struct ieee80211_frame *wh; 4768 struct iwn_tx_ring *ring; 4769 uint32_t flags; 4770 int ac, rate; 4771 uint8_t type; 4772 4773 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 4774 4775 IWN_LOCK_ASSERT(sc); 4776 4777 wh = mtod(m, struct ieee80211_frame *); 4778 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 4779 4780 ac = params->ibp_pri & 3; 4781 4782 /* Choose a TX rate. */ 4783 rate = params->ibp_rate0; 4784 4785 flags = 0; 4786 if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) 4787 flags |= IWN_TX_NEED_ACK; 4788 if (params->ibp_flags & IEEE80211_BPF_RTS) { 4789 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 4790 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 4791 flags &= ~IWN_TX_NEED_RTS; 4792 flags |= IWN_TX_NEED_PROTECTION; 4793 } else 4794 flags |= IWN_TX_NEED_RTS | IWN_TX_FULL_TXOP; 4795 } 4796 if (params->ibp_flags & IEEE80211_BPF_CTS) { 4797 if (sc->hw_type != IWN_HW_REV_TYPE_4965) { 4798 /* 5000 autoselects RTS/CTS or CTS-to-self. */ 4799 flags &= ~IWN_TX_NEED_CTS; 4800 flags |= IWN_TX_NEED_PROTECTION; 4801 } else 4802 flags |= IWN_TX_NEED_CTS | IWN_TX_FULL_TXOP; 4803 } 4804 4805 if (ieee80211_radiotap_active_vap(vap)) { 4806 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap; 4807 4808 tap->wt_flags = 0; 4809 tap->wt_rate = rate; 4810 4811 ieee80211_radiotap_tx(vap, m); 4812 } 4813 4814 ring = &sc->txq[ac]; 4815 cmd = &ring->cmd[ring->cur]; 4816 4817 tx = (struct iwn_cmd_data *)cmd->data; 4818 /* NB: No need to clear tx, all fields are reinitialized here. */ 4819 tx->scratch = 0; /* clear "scratch" area */ 4820 4821 if (type == IEEE80211_FC0_TYPE_MGT) { 4822 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 4823 4824 /* Tell HW to set timestamp in probe responses. */ 4825 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 4826 flags |= IWN_TX_INSERT_TSTAMP; 4827 4828 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 4829 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 4830 tx->timeout = htole16(3); 4831 else 4832 tx->timeout = htole16(2); 4833 } else 4834 tx->timeout = htole16(0); 4835 4836 tx->tid = 0; 4837 tx->id = sc->broadcast_id; 4838 tx->rts_ntries = params->ibp_try1; 4839 tx->data_ntries = params->ibp_try0; 4840 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 4841 tx->rate = iwn_rate_to_plcp(sc, ni, rate); 4842 tx->security = 0; 4843 tx->flags = htole32(flags); 4844 4845 /* Group or management frame. */ 4846 tx->linkq = 0; 4847 4848 return (iwn_tx_cmd(sc, m, ni, ring)); 4849} 4850 4851static int 4852iwn_tx_cmd(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni, 4853 struct iwn_tx_ring *ring) 4854{ 4855 struct iwn_ops *ops = &sc->ops; 4856 struct iwn_tx_cmd *cmd; 4857 struct iwn_cmd_data *tx; 4858 struct ieee80211_frame *wh; 4859 struct iwn_tx_desc *desc; 4860 struct iwn_tx_data *data; 4861 bus_dma_segment_t *seg, segs[IWN_MAX_SCATTER]; 4862 struct mbuf *m1; 4863 u_int hdrlen; 4864 int totlen, error, pad, nsegs = 0, i; 4865 4866 wh = mtod(m, struct ieee80211_frame *); 4867 hdrlen = ieee80211_anyhdrsize(wh); 4868 totlen = m->m_pkthdr.len; 4869 4870 desc = &ring->desc[ring->cur]; 4871 data = &ring->data[ring->cur]; 4872 4873 if (__predict_false(data->m != NULL || data->ni != NULL)) { 4874 device_printf(sc->sc_dev, "%s: ni (%p) or m (%p) for idx %d " 4875 "in queue %d is not NULL!\n", __func__, data->ni, data->m, 4876 ring->cur, ring->qid); 4877 return EIO; 4878 } 4879 4880 /* Prepare TX firmware command. */ 4881 cmd = &ring->cmd[ring->cur]; 4882 cmd->code = IWN_CMD_TX_DATA; 4883 cmd->flags = 0; 4884 cmd->qid = ring->qid; 4885 cmd->idx = ring->cur; 4886 4887 tx = (struct iwn_cmd_data *)cmd->data; 4888 tx->len = htole16(totlen); 4889 4890 /* Set physical address of "scratch area". */ 4891 tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr)); 4892 tx->hiaddr = IWN_HIADDR(data->scratch_paddr); 4893 if (hdrlen & 3) { 4894 /* First segment length must be a multiple of 4. */ 4895 tx->flags |= htole32(IWN_TX_NEED_PADDING); 4896 pad = 4 - (hdrlen & 3); 4897 } else 4898 pad = 0; 4899 4900 /* Copy 802.11 header in TX command. */ 4901 memcpy((uint8_t *)(tx + 1), wh, hdrlen); 4902 4903 /* Trim 802.11 header. */ 4904 m_adj(m, hdrlen); 4905 4906 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs, 4907 &nsegs, BUS_DMA_NOWAIT); 4908 if (error != 0) { 4909 if (error != EFBIG) { 4910 device_printf(sc->sc_dev, 4911 "%s: can't map mbuf (error %d)\n", __func__, error); 4912 return error; 4913 } 4914 /* Too many DMA segments, linearize mbuf. */ 4915 m1 = m_collapse(m, M_NOWAIT, IWN_MAX_SCATTER - 1); 4916 if (m1 == NULL) { 4917 device_printf(sc->sc_dev, 4918 "%s: could not defrag mbuf\n", __func__); 4919 return ENOBUFS; 4920 } 4921 m = m1; 4922 4923 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, 4924 segs, &nsegs, BUS_DMA_NOWAIT); 4925 if (error != 0) { 4926 /* XXX fix this */ 4927 /* 4928 * NB: Do not return error; 4929 * original mbuf does not exist anymore. 4930 */ 4931 device_printf(sc->sc_dev, 4932 "%s: can't map mbuf (error %d)\n", 4933 __func__, error); 4934 if_inc_counter(ni->ni_vap->iv_ifp, 4935 IFCOUNTER_OERRORS, 1); 4936 ieee80211_free_node(ni); 4937 m_freem(m); 4938 return 0; 4939 } 4940 } 4941 4942 data->m = m; 4943 data->ni = ni; 4944 4945 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d " 4946 "plcp %d\n", 4947 __func__, ring->qid, ring->cur, totlen, nsegs, tx->rate); 4948 4949 /* Fill TX descriptor. */ 4950 desc->nsegs = 1; 4951 if (m->m_len != 0) 4952 desc->nsegs += nsegs; 4953 /* First DMA segment is used by the TX command. */ 4954 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr)); 4955 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) | 4956 (4 + sizeof (*tx) + hdrlen + pad) << 4); 4957 /* Other DMA segments are for data payload. */ 4958 seg = &segs[0]; 4959 for (i = 1; i <= nsegs; i++) { 4960 desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr)); 4961 desc->segs[i].len = htole16(IWN_HIADDR(seg->ds_addr) | 4962 seg->ds_len << 4); 4963 seg++; 4964 } 4965 4966 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); 4967 bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map, 4968 BUS_DMASYNC_PREWRITE); 4969 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 4970 BUS_DMASYNC_PREWRITE); 4971 4972 /* Update TX scheduler. */ 4973 if (ring->qid >= sc->firstaggqueue) 4974 ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen); 4975 4976 /* Kick TX ring. */ 4977 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 4978 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 4979 4980 /* Mark TX ring as full if we reach a certain threshold. */ 4981 if (++ring->queued > IWN_TX_RING_HIMARK) 4982 sc->qfullmsk |= 1 << ring->qid; 4983 4984 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 4985 4986 return 0; 4987} 4988 4989static void 4990iwn_xmit_task(void *arg0, int pending) 4991{ 4992 struct iwn_softc *sc = arg0; 4993 struct ieee80211_node *ni; 4994 struct mbuf *m; 4995 int error; 4996 struct ieee80211_bpf_params p; 4997 int have_p; 4998 4999 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: called\n", __func__); 5000 5001 IWN_LOCK(sc); 5002 /* 5003 * Dequeue frames, attempt to transmit, 5004 * then disable beaconwait when we're done. 5005 */ 5006 while ((m = mbufq_dequeue(&sc->sc_xmit_queue)) != NULL) { 5007 have_p = 0; 5008 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 5009 5010 /* Get xmit params if appropriate */ 5011 if (ieee80211_get_xmit_params(m, &p) == 0) 5012 have_p = 1; 5013 5014 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: m=%p, have_p=%d\n", 5015 __func__, m, have_p); 5016 5017 /* If we have xmit params, use them */ 5018 if (have_p) 5019 error = iwn_tx_data_raw(sc, m, ni, &p); 5020 else 5021 error = iwn_tx_data(sc, m, ni); 5022 5023 if (error != 0) { 5024 if_inc_counter(ni->ni_vap->iv_ifp, 5025 IFCOUNTER_OERRORS, 1); 5026 ieee80211_free_node(ni); 5027 m_freem(m); 5028 } 5029 } 5030 5031 sc->sc_beacon_wait = 0; 5032 IWN_UNLOCK(sc); 5033} 5034 5035/* 5036 * raw frame xmit - free node/reference if failed. 5037 */ 5038static int 5039iwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 5040 const struct ieee80211_bpf_params *params) 5041{ 5042 struct ieee80211com *ic = ni->ni_ic; 5043 struct iwn_softc *sc = ic->ic_softc; 5044 int error = 0; 5045 5046 DPRINTF(sc, IWN_DEBUG_XMIT | IWN_DEBUG_TRACE, "->%s begin\n", __func__); 5047 5048 IWN_LOCK(sc); 5049 if ((sc->sc_flags & IWN_FLAG_RUNNING) == 0) { 5050 m_freem(m); 5051 IWN_UNLOCK(sc); 5052 return (ENETDOWN); 5053 } 5054 5055 /* queue frame if we have to */ 5056 if (sc->sc_beacon_wait) { 5057 if (iwn_xmit_queue_enqueue(sc, m) != 0) { 5058 m_freem(m); 5059 IWN_UNLOCK(sc); 5060 return (ENOBUFS); 5061 } 5062 /* Queued, so just return OK */ 5063 IWN_UNLOCK(sc); 5064 return (0); 5065 } 5066 5067 if (params == NULL) { 5068 /* 5069 * Legacy path; interpret frame contents to decide 5070 * precisely how to send the frame. 5071 */ 5072 error = iwn_tx_data(sc, m, ni); 5073 } else { 5074 /* 5075 * Caller supplied explicit parameters to use in 5076 * sending the frame. 5077 */ 5078 error = iwn_tx_data_raw(sc, m, ni, params); 5079 } 5080 if (error == 0) 5081 sc->sc_tx_timer = 5; 5082 else 5083 m_freem(m); 5084 5085 IWN_UNLOCK(sc); 5086 5087 DPRINTF(sc, IWN_DEBUG_TRACE | IWN_DEBUG_XMIT, "->%s: end\n",__func__); 5088 5089 return (error); 5090} 5091 5092/* 5093 * transmit - don't free mbuf if failed; don't free node ref if failed. 5094 */ 5095static int 5096iwn_transmit(struct ieee80211com *ic, struct mbuf *m) 5097{ 5098 struct iwn_softc *sc = ic->ic_softc; 5099 struct ieee80211_node *ni; 5100 int error; 5101 5102 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 5103 5104 IWN_LOCK(sc); 5105 if ((sc->sc_flags & IWN_FLAG_RUNNING) == 0 || sc->sc_beacon_wait) { 5106 IWN_UNLOCK(sc); 5107 return (ENXIO); 5108 } 5109 5110 if (sc->qfullmsk) { 5111 IWN_UNLOCK(sc); 5112 return (ENOBUFS); 5113 } 5114 5115 error = iwn_tx_data(sc, m, ni); 5116 if (!error) 5117 sc->sc_tx_timer = 5; 5118 IWN_UNLOCK(sc); 5119 return (error); 5120} 5121 5122static void 5123iwn_scan_timeout(void *arg) 5124{ 5125 struct iwn_softc *sc = arg; 5126 struct ieee80211com *ic = &sc->sc_ic; 5127 5128 ic_printf(ic, "scan timeout\n"); 5129 ieee80211_restart_all(ic); 5130} 5131 5132static void 5133iwn_watchdog(void *arg) 5134{ 5135 struct iwn_softc *sc = arg; 5136 struct ieee80211com *ic = &sc->sc_ic; 5137 5138 IWN_LOCK_ASSERT(sc); 5139 5140 KASSERT(sc->sc_flags & IWN_FLAG_RUNNING, ("not running")); 5141 5142 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5143 5144 if (sc->sc_tx_timer > 0) { 5145 if (--sc->sc_tx_timer == 0) { 5146 ic_printf(ic, "device timeout\n"); 5147 ieee80211_restart_all(ic); 5148 return; 5149 } 5150 } 5151 callout_reset(&sc->watchdog_to, hz, iwn_watchdog, sc); 5152} 5153 5154static int 5155iwn_cdev_open(struct cdev *dev, int flags, int type, struct thread *td) 5156{ 5157 5158 return (0); 5159} 5160 5161static int 5162iwn_cdev_close(struct cdev *dev, int flags, int type, struct thread *td) 5163{ 5164 5165 return (0); 5166} 5167 5168#ifndef __HAIKU__ 5169static int 5170iwn_cdev_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag, 5171 struct thread *td) 5172{ 5173 int rc; 5174 struct iwn_softc *sc = dev->si_drv1; 5175 struct iwn_ioctl_data *d; 5176 5177 rc = priv_check(td, PRIV_DRIVER); 5178 if (rc != 0) 5179 return (0); 5180 5181 switch (cmd) { 5182 case SIOCGIWNSTATS: 5183 d = (struct iwn_ioctl_data *) data; 5184 IWN_LOCK(sc); 5185 /* XXX validate permissions/memory/etc? */ 5186 rc = copyout(&sc->last_stat, d->dst_addr, sizeof(struct iwn_stats)); 5187 IWN_UNLOCK(sc); 5188 break; 5189 case SIOCZIWNSTATS: 5190 IWN_LOCK(sc); 5191 memset(&sc->last_stat, 0, sizeof(struct iwn_stats)); 5192 IWN_UNLOCK(sc); 5193 break; 5194 default: 5195 rc = EINVAL; 5196 break; 5197 } 5198 return (rc); 5199} 5200#endif 5201 5202static int 5203iwn_ioctl(struct ieee80211com *ic, u_long cmd, void *data) 5204{ 5205 5206 return (ENOTTY); 5207} 5208 5209static void 5210iwn_parent(struct ieee80211com *ic) 5211{ 5212 struct iwn_softc *sc = ic->ic_softc; 5213 struct ieee80211vap *vap; 5214 int error; 5215 5216 if (ic->ic_nrunning > 0) { 5217 error = iwn_init(sc); 5218 5219 switch (error) { 5220 case 0: 5221 ieee80211_start_all(ic); 5222 break; 5223 case 1: 5224 /* radio is disabled via RFkill switch */ 5225 taskqueue_enqueue(sc->sc_tq, &sc->sc_rftoggle_task); 5226 break; 5227 default: 5228 vap = TAILQ_FIRST(&ic->ic_vaps); 5229 if (vap != NULL) 5230 ieee80211_stop(vap); 5231 break; 5232 } 5233 } else 5234 iwn_stop(sc); 5235} 5236 5237/* 5238 * Send a command to the firmware. 5239 */ 5240static int 5241iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async) 5242{ 5243 struct iwn_tx_ring *ring; 5244 struct iwn_tx_desc *desc; 5245 struct iwn_tx_data *data; 5246 struct iwn_tx_cmd *cmd; 5247 struct mbuf *m; 5248 bus_addr_t paddr; 5249 int totlen, error; 5250 int cmd_queue_num; 5251 5252 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 5253 5254 if (async == 0) 5255 IWN_LOCK_ASSERT(sc); 5256 5257 if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT) 5258 cmd_queue_num = IWN_PAN_CMD_QUEUE; 5259 else 5260 cmd_queue_num = IWN_CMD_QUEUE_NUM; 5261 5262 ring = &sc->txq[cmd_queue_num]; 5263 desc = &ring->desc[ring->cur]; 5264 data = &ring->data[ring->cur]; 5265 totlen = 4 + size; 5266 5267 if (size > sizeof cmd->data) { 5268 /* Command is too large to fit in a descriptor. */ 5269 if (totlen > MCLBYTES) 5270 return EINVAL; 5271 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 5272 if (m == NULL) 5273 return ENOMEM; 5274 cmd = mtod(m, struct iwn_tx_cmd *); 5275 error = bus_dmamap_load(ring->data_dmat, data->map, cmd, 5276 totlen, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 5277 if (error != 0) { 5278 m_freem(m); 5279 return error; 5280 } 5281 data->m = m; 5282 } else { 5283 cmd = &ring->cmd[ring->cur]; 5284 paddr = data->cmd_paddr; 5285 } 5286 5287 cmd->code = code; 5288 cmd->flags = 0; 5289 cmd->qid = ring->qid; 5290 cmd->idx = ring->cur; 5291 memcpy(cmd->data, buf, size); 5292 5293 desc->nsegs = 1; 5294 desc->segs[0].addr = htole32(IWN_LOADDR(paddr)); 5295 desc->segs[0].len = htole16(IWN_HIADDR(paddr) | totlen << 4); 5296 5297 DPRINTF(sc, IWN_DEBUG_CMD, "%s: %s (0x%x) flags %d qid %d idx %d\n", 5298 __func__, iwn_intr_str(cmd->code), cmd->code, 5299 cmd->flags, cmd->qid, cmd->idx); 5300 5301 if (size > sizeof cmd->data) { 5302 bus_dmamap_sync(ring->data_dmat, data->map, 5303 BUS_DMASYNC_PREWRITE); 5304 } else { 5305 bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map, 5306 BUS_DMASYNC_PREWRITE); 5307 } 5308 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 5309 BUS_DMASYNC_PREWRITE); 5310 5311 /* Kick command ring. */ 5312 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; 5313 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 5314 5315 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 5316 5317 return async ? 0 : msleep(desc, &sc->sc_mtx, PCATCH, "iwncmd", hz); 5318} 5319 5320static int 5321iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) 5322{ 5323 struct iwn4965_node_info hnode; 5324 caddr_t src, dst; 5325 5326 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5327 5328 /* 5329 * We use the node structure for 5000 Series internally (it is 5330 * a superset of the one for 4965AGN). We thus copy the common 5331 * fields before sending the command. 5332 */ 5333 src = (caddr_t)node; 5334 dst = (caddr_t)&hnode; 5335 memcpy(dst, src, 48); 5336 /* Skip TSC, RX MIC and TX MIC fields from ``src''. */ 5337 memcpy(dst + 48, src + 72, 20); 5338 return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async); 5339} 5340 5341static int 5342iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) 5343{ 5344 5345 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5346 5347 /* Direct mapping. */ 5348 return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async); 5349} 5350 5351static int 5352iwn_set_link_quality(struct iwn_softc *sc, struct ieee80211_node *ni) 5353{ 5354 struct iwn_node *wn = (void *)ni; 5355 struct ieee80211_rateset *rs; 5356 struct iwn_cmd_link_quality linkq; 5357 int i, rate, txrate; 5358 int is_11n; 5359 5360 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 5361 5362 memset(&linkq, 0, sizeof linkq); 5363 linkq.id = wn->id; 5364 linkq.antmsk_1stream = iwn_get_1stream_tx_antmask(sc); 5365 linkq.antmsk_2stream = iwn_get_2stream_tx_antmask(sc); 5366 5367 linkq.ampdu_max = 32; /* XXX negotiated? */ 5368 linkq.ampdu_threshold = 3; 5369 linkq.ampdu_limit = htole16(4000); /* 4ms */ 5370 5371 DPRINTF(sc, IWN_DEBUG_XMIT, 5372 "%s: 1stream antenna=0x%02x, 2stream antenna=0x%02x, ntxstreams=%d\n", 5373 __func__, 5374 linkq.antmsk_1stream, 5375 linkq.antmsk_2stream, 5376 sc->ntxchains); 5377 5378 /* 5379 * Are we using 11n rates? Ensure the channel is 5380 * 11n _and_ we have some 11n rates, or don't 5381 * try. 5382 */ 5383 if (IEEE80211_IS_CHAN_HT(ni->ni_chan) && ni->ni_htrates.rs_nrates > 0) { 5384 rs = (struct ieee80211_rateset *) &ni->ni_htrates; 5385 is_11n = 1; 5386 } else { 5387 rs = &ni->ni_rates; 5388 is_11n = 0; 5389 } 5390 5391 /* Start at highest available bit-rate. */ 5392 /* 5393 * XXX this is all very dirty! 5394 */ 5395 if (is_11n) 5396 txrate = ni->ni_htrates.rs_nrates - 1; 5397 else 5398 txrate = rs->rs_nrates - 1; 5399 for (i = 0; i < IWN_MAX_TX_RETRIES; i++) { 5400 uint32_t plcp; 5401 5402 /* 5403 * XXX TODO: ensure the last two slots are the two lowest 5404 * rate entries, just for now. 5405 */ 5406 if (i == 14 || i == 15) 5407 txrate = 0; 5408 5409 if (is_11n) 5410 rate = IEEE80211_RATE_MCS | rs->rs_rates[txrate]; 5411 else 5412 rate = IEEE80211_RV(rs->rs_rates[txrate]); 5413 5414 /* Do rate -> PLCP config mapping */ 5415 plcp = iwn_rate_to_plcp(sc, ni, rate); 5416 linkq.retry[i] = plcp; 5417 DPRINTF(sc, IWN_DEBUG_XMIT, 5418 "%s: i=%d, txrate=%d, rate=0x%02x, plcp=0x%08x\n", 5419 __func__, 5420 i, 5421 txrate, 5422 rate, 5423 le32toh(plcp)); 5424 5425 /* 5426 * The mimo field is an index into the table which 5427 * indicates the first index where it and subsequent entries 5428 * will not be using MIMO. 5429 * 5430 * Since we're filling linkq from 0..15 and we're filling 5431 * from the highest MCS rates to the lowest rates, if we 5432 * _are_ doing a dual-stream rate, set mimo to idx+1 (ie, 5433 * the next entry.) That way if the next entry is a non-MIMO 5434 * entry, we're already pointing at it. 5435 */ 5436 if ((le32toh(plcp) & IWN_RFLAG_MCS) && 5437 IEEE80211_RV(le32toh(plcp)) > 7) 5438 linkq.mimo = i + 1; 5439 5440 /* Next retry at immediate lower bit-rate. */ 5441 if (txrate > 0) 5442 txrate--; 5443 } 5444 /* 5445 * If we reached the end of the list and indeed we hit 5446 * all MIMO rates (eg 5300 doing MCS23-15) then yes, 5447 * set mimo to 15. Setting it to 16 panics the firmware. 5448 */ 5449 if (linkq.mimo > 15) 5450 linkq.mimo = 15; 5451 5452 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: mimo = %d\n", __func__, linkq.mimo); 5453 5454 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 5455 5456 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, 1); 5457} 5458 5459/* 5460 * Broadcast node is used to send group-addressed and management frames. 5461 */ 5462static int 5463iwn_add_broadcast_node(struct iwn_softc *sc, int async) 5464{ 5465 struct iwn_ops *ops = &sc->ops; 5466 struct ieee80211com *ic = &sc->sc_ic; 5467 struct iwn_node_info node; 5468 struct iwn_cmd_link_quality linkq; 5469 uint8_t txant; 5470 int i, error; 5471 5472 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 5473 5474 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 5475 5476 memset(&node, 0, sizeof node); 5477 IEEE80211_ADDR_COPY(node.macaddr, ieee80211broadcastaddr); 5478 node.id = sc->broadcast_id; 5479 DPRINTF(sc, IWN_DEBUG_RESET, "%s: adding broadcast node\n", __func__); 5480 if ((error = ops->add_node(sc, &node, async)) != 0) 5481 return error; 5482 5483 /* Use the first valid TX antenna. */ 5484 txant = IWN_LSB(sc->txchainmask); 5485 5486 memset(&linkq, 0, sizeof linkq); 5487 linkq.id = sc->broadcast_id; 5488 linkq.antmsk_1stream = iwn_get_1stream_tx_antmask(sc); 5489 linkq.antmsk_2stream = iwn_get_2stream_tx_antmask(sc); 5490 linkq.ampdu_max = 64; 5491 linkq.ampdu_threshold = 3; 5492 linkq.ampdu_limit = htole16(4000); /* 4ms */ 5493 5494 /* Use lowest mandatory bit-rate. */ 5495 /* XXX rate table lookup? */ 5496 if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) 5497 linkq.retry[0] = htole32(0xd); 5498 else 5499 linkq.retry[0] = htole32(10 | IWN_RFLAG_CCK); 5500 linkq.retry[0] |= htole32(IWN_RFLAG_ANT(txant)); 5501 /* Use same bit-rate for all TX retries. */ 5502 for (i = 1; i < IWN_MAX_TX_RETRIES; i++) { 5503 linkq.retry[i] = linkq.retry[0]; 5504 } 5505 5506 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 5507 5508 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async); 5509} 5510 5511static int 5512iwn_updateedca(struct ieee80211com *ic) 5513{ 5514#define IWN_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */ 5515 struct iwn_softc *sc = ic->ic_softc; 5516 struct iwn_edca_params cmd; 5517 struct chanAccParams chp; 5518 int aci; 5519 5520 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 5521 5522 ieee80211_wme_ic_getparams(ic, &chp); 5523 5524 memset(&cmd, 0, sizeof cmd); 5525 cmd.flags = htole32(IWN_EDCA_UPDATE); 5526 5527 IEEE80211_LOCK(ic); 5528 for (aci = 0; aci < WME_NUM_AC; aci++) { 5529 const struct wmeParams *ac = &chp.cap_wmeParams[aci]; 5530 cmd.ac[aci].aifsn = ac->wmep_aifsn; 5531 cmd.ac[aci].cwmin = htole16(IWN_EXP2(ac->wmep_logcwmin)); 5532 cmd.ac[aci].cwmax = htole16(IWN_EXP2(ac->wmep_logcwmax)); 5533 cmd.ac[aci].txoplimit = 5534 htole16(IEEE80211_TXOP_TO_US(ac->wmep_txopLimit)); 5535 } 5536 IEEE80211_UNLOCK(ic); 5537 5538 IWN_LOCK(sc); 5539 (void)iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1); 5540 IWN_UNLOCK(sc); 5541 5542 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 5543 5544 return 0; 5545#undef IWN_EXP2 5546} 5547 5548static void 5549iwn_set_promisc(struct iwn_softc *sc) 5550{ 5551 struct ieee80211com *ic = &sc->sc_ic; 5552 uint32_t promisc_filter; 5553 5554 promisc_filter = IWN_FILTER_CTL | IWN_FILTER_PROMISC; 5555 if (ic->ic_promisc > 0 || ic->ic_opmode == IEEE80211_M_MONITOR) 5556 sc->rxon->filter |= htole32(promisc_filter); 5557 else 5558 sc->rxon->filter &= ~htole32(promisc_filter); 5559} 5560 5561static void 5562iwn_update_promisc(struct ieee80211com *ic) 5563{ 5564 struct iwn_softc *sc = ic->ic_softc; 5565 int error; 5566 5567 if (ic->ic_opmode == IEEE80211_M_MONITOR) 5568 return; /* nothing to do */ 5569 5570 IWN_LOCK(sc); 5571 if (!(sc->sc_flags & IWN_FLAG_RUNNING)) { 5572 IWN_UNLOCK(sc); 5573 return; 5574 } 5575 5576 iwn_set_promisc(sc); 5577 if ((error = iwn_send_rxon(sc, 1, 1)) != 0) { 5578 device_printf(sc->sc_dev, 5579 "%s: could not send RXON, error %d\n", 5580 __func__, error); 5581 } 5582 IWN_UNLOCK(sc); 5583} 5584 5585static void 5586iwn_update_mcast(struct ieee80211com *ic) 5587{ 5588 /* Ignore */ 5589} 5590 5591static void 5592iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on) 5593{ 5594 struct iwn_cmd_led led; 5595 5596 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5597 5598#if 0 5599 /* XXX don't set LEDs during scan? */ 5600 if (sc->sc_is_scanning) 5601 return; 5602#endif 5603 5604 /* Clear microcode LED ownership. */ 5605 IWN_CLRBITS(sc, IWN_LED, IWN_LED_BSM_CTRL); 5606 5607 led.which = which; 5608 led.unit = htole32(10000); /* on/off in unit of 100ms */ 5609 led.off = off; 5610 led.on = on; 5611 (void)iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1); 5612} 5613 5614/* 5615 * Set the critical temperature at which the firmware will stop the radio 5616 * and notify us. 5617 */ 5618static int 5619iwn_set_critical_temp(struct iwn_softc *sc) 5620{ 5621 struct iwn_critical_temp crit; 5622 int32_t temp; 5623 5624 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5625 5626 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CTEMP_STOP_RF); 5627 5628 if (sc->hw_type == IWN_HW_REV_TYPE_5150) 5629 temp = (IWN_CTOK(110) - sc->temp_off) * -5; 5630 else if (sc->hw_type == IWN_HW_REV_TYPE_4965) 5631 temp = IWN_CTOK(110); 5632 else 5633 temp = 110; 5634 memset(&crit, 0, sizeof crit); 5635 crit.tempR = htole32(temp); 5636 DPRINTF(sc, IWN_DEBUG_RESET, "setting critical temp to %d\n", temp); 5637 return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0); 5638} 5639 5640static int 5641iwn_set_timing(struct iwn_softc *sc, struct ieee80211_node *ni) 5642{ 5643 struct iwn_cmd_timing cmd; 5644 uint64_t val, mod; 5645 5646 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5647 5648 memset(&cmd, 0, sizeof cmd); 5649 memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t)); 5650 cmd.bintval = htole16(ni->ni_intval); 5651 cmd.lintval = htole16(10); 5652 5653 /* Compute remaining time until next beacon. */ 5654 val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU; 5655 mod = le64toh(cmd.tstamp) % val; 5656 cmd.binitval = htole32((uint32_t)(val - mod)); 5657 5658 DPRINTF(sc, IWN_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n", 5659 ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod)); 5660 5661 return iwn_cmd(sc, IWN_CMD_TIMING, &cmd, sizeof cmd, 1); 5662} 5663 5664static void 5665iwn4965_power_calibration(struct iwn_softc *sc, int temp) 5666{ 5667 5668 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5669 5670 /* Adjust TX power if need be (delta >= 3 degC). */ 5671 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d->%d\n", 5672 __func__, sc->temp, temp); 5673 if (abs(temp - sc->temp) >= 3) { 5674 /* Record temperature of last calibration. */ 5675 sc->temp = temp; 5676 (void)iwn4965_set_txpower(sc, 1); 5677 } 5678} 5679 5680/* 5681 * Set TX power for current channel (each rate has its own power settings). 5682 * This function takes into account the regulatory information from EEPROM, 5683 * the current temperature and the current voltage. 5684 */ 5685static int 5686iwn4965_set_txpower(struct iwn_softc *sc, int async) 5687{ 5688/* Fixed-point arithmetic division using a n-bit fractional part. */ 5689#define fdivround(a, b, n) \ 5690 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n)) 5691/* Linear interpolation. */ 5692#define interpolate(x, x1, y1, x2, y2, n) \ 5693 ((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n)) 5694 5695 static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 }; 5696 struct iwn_ucode_info *uc = &sc->ucode_info; 5697 struct iwn4965_cmd_txpower cmd; 5698 struct iwn4965_eeprom_chan_samples *chans; 5699 const uint8_t *rf_gain, *dsp_gain; 5700 int32_t vdiff, tdiff; 5701 int i, is_chan_5ghz, c, grp, maxpwr; 5702 uint8_t chan; 5703 5704 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 5705 /* Retrieve current channel from last RXON. */ 5706 chan = sc->rxon->chan; 5707 is_chan_5ghz = (sc->rxon->flags & htole32(IWN_RXON_24GHZ)) == 0; 5708 DPRINTF(sc, IWN_DEBUG_RESET, "setting TX power for channel %d\n", 5709 chan); 5710 5711 memset(&cmd, 0, sizeof cmd); 5712 cmd.band = is_chan_5ghz ? 0 : 1; 5713 cmd.chan = chan; 5714 5715 if (is_chan_5ghz) { 5716 maxpwr = sc->maxpwr5GHz; 5717 rf_gain = iwn4965_rf_gain_5ghz; 5718 dsp_gain = iwn4965_dsp_gain_5ghz; 5719 } else { 5720 maxpwr = sc->maxpwr2GHz; 5721 rf_gain = iwn4965_rf_gain_2ghz; 5722 dsp_gain = iwn4965_dsp_gain_2ghz; 5723 } 5724 5725 /* Compute voltage compensation. */ 5726 vdiff = ((int32_t)le32toh(uc->volt) - sc->eeprom_voltage) / 7; 5727 if (vdiff > 0) 5728 vdiff *= 2; 5729 if (abs(vdiff) > 2) 5730 vdiff = 0; 5731 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5732 "%s: voltage compensation=%d (UCODE=%d, EEPROM=%d)\n", 5733 __func__, vdiff, le32toh(uc->volt), sc->eeprom_voltage); 5734 5735 /* Get channel attenuation group. */ 5736 if (chan <= 20) /* 1-20 */ 5737 grp = 4; 5738 else if (chan <= 43) /* 34-43 */ 5739 grp = 0; 5740 else if (chan <= 70) /* 44-70 */ 5741 grp = 1; 5742 else if (chan <= 124) /* 71-124 */ 5743 grp = 2; 5744 else /* 125-200 */ 5745 grp = 3; 5746 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5747 "%s: chan %d, attenuation group=%d\n", __func__, chan, grp); 5748 5749 /* Get channel sub-band. */ 5750 for (i = 0; i < IWN_NBANDS; i++) 5751 if (sc->bands[i].lo != 0 && 5752 sc->bands[i].lo <= chan && chan <= sc->bands[i].hi) 5753 break; 5754 if (i == IWN_NBANDS) /* Can't happen in real-life. */ 5755 return EINVAL; 5756 chans = sc->bands[i].chans; 5757 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5758 "%s: chan %d sub-band=%d\n", __func__, chan, i); 5759 5760 for (c = 0; c < 2; c++) { 5761 uint8_t power, gain, temp; 5762 int maxchpwr, pwr, ridx, idx; 5763 5764 power = interpolate(chan, 5765 chans[0].num, chans[0].samples[c][1].power, 5766 chans[1].num, chans[1].samples[c][1].power, 1); 5767 gain = interpolate(chan, 5768 chans[0].num, chans[0].samples[c][1].gain, 5769 chans[1].num, chans[1].samples[c][1].gain, 1); 5770 temp = interpolate(chan, 5771 chans[0].num, chans[0].samples[c][1].temp, 5772 chans[1].num, chans[1].samples[c][1].temp, 1); 5773 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5774 "%s: Tx chain %d: power=%d gain=%d temp=%d\n", 5775 __func__, c, power, gain, temp); 5776 5777 /* Compute temperature compensation. */ 5778 tdiff = ((sc->temp - temp) * 2) / tdiv[grp]; 5779 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5780 "%s: temperature compensation=%d (current=%d, EEPROM=%d)\n", 5781 __func__, tdiff, sc->temp, temp); 5782 5783 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) { 5784 /* Convert dBm to half-dBm. */ 5785 maxchpwr = sc->maxpwr[chan] * 2; 5786 if ((ridx / 8) & 1) 5787 maxchpwr -= 6; /* MIMO 2T: -3dB */ 5788 5789 pwr = maxpwr; 5790 5791 /* Adjust TX power based on rate. */ 5792 if ((ridx % 8) == 5) 5793 pwr -= 15; /* OFDM48: -7.5dB */ 5794 else if ((ridx % 8) == 6) 5795 pwr -= 17; /* OFDM54: -8.5dB */ 5796 else if ((ridx % 8) == 7) 5797 pwr -= 20; /* OFDM60: -10dB */ 5798 else 5799 pwr -= 10; /* Others: -5dB */ 5800 5801 /* Do not exceed channel max TX power. */ 5802 if (pwr > maxchpwr) 5803 pwr = maxchpwr; 5804 5805 idx = gain - (pwr - power) - tdiff - vdiff; 5806 if ((ridx / 8) & 1) /* MIMO */ 5807 idx += (int32_t)le32toh(uc->atten[grp][c]); 5808 5809 if (cmd.band == 0) 5810 idx += 9; /* 5GHz */ 5811 if (ridx == IWN_RIDX_MAX) 5812 idx += 5; /* CCK */ 5813 5814 /* Make sure idx stays in a valid range. */ 5815 if (idx < 0) 5816 idx = 0; 5817 else if (idx > IWN4965_MAX_PWR_INDEX) 5818 idx = IWN4965_MAX_PWR_INDEX; 5819 5820 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5821 "%s: Tx chain %d, rate idx %d: power=%d\n", 5822 __func__, c, ridx, idx); 5823 cmd.power[ridx].rf_gain[c] = rf_gain[idx]; 5824 cmd.power[ridx].dsp_gain[c] = dsp_gain[idx]; 5825 } 5826 } 5827 5828 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, 5829 "%s: set tx power for chan %d\n", __func__, chan); 5830 return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async); 5831 5832#undef interpolate 5833#undef fdivround 5834} 5835 5836static int 5837iwn5000_set_txpower(struct iwn_softc *sc, int async) 5838{ 5839 struct iwn5000_cmd_txpower cmd; 5840 int cmdid; 5841 5842 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5843 5844 /* 5845 * TX power calibration is handled automatically by the firmware 5846 * for 5000 Series. 5847 */ 5848 memset(&cmd, 0, sizeof cmd); 5849 cmd.global_limit = 2 * IWN5000_TXPOWER_MAX_DBM; /* 16 dBm */ 5850 cmd.flags = IWN5000_TXPOWER_NO_CLOSED; 5851 cmd.srv_limit = IWN5000_TXPOWER_AUTO; 5852 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_XMIT, 5853 "%s: setting TX power; rev=%d\n", 5854 __func__, 5855 IWN_UCODE_API(sc->ucode_rev)); 5856 if (IWN_UCODE_API(sc->ucode_rev) == 1) 5857 cmdid = IWN_CMD_TXPOWER_DBM_V1; 5858 else 5859 cmdid = IWN_CMD_TXPOWER_DBM; 5860 return iwn_cmd(sc, cmdid, &cmd, sizeof cmd, async); 5861} 5862 5863/* 5864 * Retrieve the maximum RSSI (in dBm) among receivers. 5865 */ 5866static int 5867iwn4965_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat) 5868{ 5869 struct iwn4965_rx_phystat *phy = (void *)stat->phybuf; 5870 uint8_t mask, agc; 5871 int rssi; 5872 5873 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5874 5875 mask = (le16toh(phy->antenna) >> 4) & IWN_ANT_ABC; 5876 agc = (le16toh(phy->agc) >> 7) & 0x7f; 5877 5878 rssi = 0; 5879 if (mask & IWN_ANT_A) 5880 rssi = MAX(rssi, phy->rssi[0]); 5881 if (mask & IWN_ANT_B) 5882 rssi = MAX(rssi, phy->rssi[2]); 5883 if (mask & IWN_ANT_C) 5884 rssi = MAX(rssi, phy->rssi[4]); 5885 5886 DPRINTF(sc, IWN_DEBUG_RECV, 5887 "%s: agc %d mask 0x%x rssi %d %d %d result %d\n", __func__, agc, 5888 mask, phy->rssi[0], phy->rssi[2], phy->rssi[4], 5889 rssi - agc - IWN_RSSI_TO_DBM); 5890 return rssi - agc - IWN_RSSI_TO_DBM; 5891} 5892 5893static int 5894iwn5000_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat) 5895{ 5896 struct iwn5000_rx_phystat *phy = (void *)stat->phybuf; 5897 uint8_t agc; 5898 int rssi; 5899 5900 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5901 5902 agc = (le32toh(phy->agc) >> 9) & 0x7f; 5903 5904 rssi = MAX(le16toh(phy->rssi[0]) & 0xff, 5905 le16toh(phy->rssi[1]) & 0xff); 5906 rssi = MAX(le16toh(phy->rssi[2]) & 0xff, rssi); 5907 5908 DPRINTF(sc, IWN_DEBUG_RECV, 5909 "%s: agc %d rssi %d %d %d result %d\n", __func__, agc, 5910 phy->rssi[0], phy->rssi[1], phy->rssi[2], 5911 rssi - agc - IWN_RSSI_TO_DBM); 5912 return rssi - agc - IWN_RSSI_TO_DBM; 5913} 5914 5915/* 5916 * Retrieve the average noise (in dBm) among receivers. 5917 */ 5918static int 5919iwn_get_noise(const struct iwn_rx_general_stats *stats) 5920{ 5921 int i, total, nbant, noise; 5922 5923 total = nbant = 0; 5924 for (i = 0; i < 3; i++) { 5925 if ((noise = le32toh(stats->noise[i]) & 0xff) == 0) 5926 continue; 5927 total += noise; 5928 nbant++; 5929 } 5930 /* There should be at least one antenna but check anyway. */ 5931 return (nbant == 0) ? -127 : (total / nbant) - 107; 5932} 5933 5934/* 5935 * Compute temperature (in degC) from last received statistics. 5936 */ 5937static int 5938iwn4965_get_temperature(struct iwn_softc *sc) 5939{ 5940 struct iwn_ucode_info *uc = &sc->ucode_info; 5941 int32_t r1, r2, r3, r4, temp; 5942 5943 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5944 5945 r1 = le32toh(uc->temp[0].chan20MHz); 5946 r2 = le32toh(uc->temp[1].chan20MHz); 5947 r3 = le32toh(uc->temp[2].chan20MHz); 5948 r4 = le32toh(sc->rawtemp); 5949 5950 if (r1 == r3) /* Prevents division by 0 (should not happen). */ 5951 return 0; 5952 5953 /* Sign-extend 23-bit R4 value to 32-bit. */ 5954 r4 = ((r4 & 0xffffff) ^ 0x800000) - 0x800000; 5955 /* Compute temperature in Kelvin. */ 5956 temp = (259 * (r4 - r2)) / (r3 - r1); 5957 temp = (temp * 97) / 100 + 8; 5958 5959 DPRINTF(sc, IWN_DEBUG_ANY, "temperature %dK/%dC\n", temp, 5960 IWN_KTOC(temp)); 5961 return IWN_KTOC(temp); 5962} 5963 5964static int 5965iwn5000_get_temperature(struct iwn_softc *sc) 5966{ 5967 int32_t temp; 5968 5969 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5970 5971 /* 5972 * Temperature is not used by the driver for 5000 Series because 5973 * TX power calibration is handled by firmware. 5974 */ 5975 temp = le32toh(sc->rawtemp); 5976 if (sc->hw_type == IWN_HW_REV_TYPE_5150) { 5977 temp = (temp / -5) + sc->temp_off; 5978 temp = IWN_KTOC(temp); 5979 } 5980 return temp; 5981} 5982 5983/* 5984 * Initialize sensitivity calibration state machine. 5985 */ 5986static int 5987iwn_init_sensitivity(struct iwn_softc *sc) 5988{ 5989 struct iwn_ops *ops = &sc->ops; 5990 struct iwn_calib_state *calib = &sc->calib; 5991 uint32_t flags; 5992 int error; 5993 5994 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 5995 5996 /* Reset calibration state machine. */ 5997 memset(calib, 0, sizeof (*calib)); 5998 calib->state = IWN_CALIB_STATE_INIT; 5999 calib->cck_state = IWN_CCK_STATE_HIFA; 6000 /* Set initial correlation values. */ 6001 calib->ofdm_x1 = sc->limits->min_ofdm_x1; 6002 calib->ofdm_mrc_x1 = sc->limits->min_ofdm_mrc_x1; 6003 calib->ofdm_x4 = sc->limits->min_ofdm_x4; 6004 calib->ofdm_mrc_x4 = sc->limits->min_ofdm_mrc_x4; 6005 calib->cck_x4 = 125; 6006 calib->cck_mrc_x4 = sc->limits->min_cck_mrc_x4; 6007 calib->energy_cck = sc->limits->energy_cck; 6008 6009 /* Write initial sensitivity. */ 6010 if ((error = iwn_send_sensitivity(sc)) != 0) 6011 return error; 6012 6013 /* Write initial gains. */ 6014 if ((error = ops->init_gains(sc)) != 0) 6015 return error; 6016 6017 /* Request statistics at each beacon interval. */ 6018 flags = 0; 6019 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending request for statistics\n", 6020 __func__); 6021 return iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1); 6022} 6023 6024/* 6025 * Collect noise and RSSI statistics for the first 20 beacons received 6026 * after association and use them to determine connected antennas and 6027 * to set differential gains. 6028 */ 6029static void 6030iwn_collect_noise(struct iwn_softc *sc, 6031 const struct iwn_rx_general_stats *stats) 6032{ 6033 struct iwn_ops *ops = &sc->ops; 6034 struct iwn_calib_state *calib = &sc->calib; 6035 struct ieee80211com *ic = &sc->sc_ic; 6036 uint32_t val; 6037 int i; 6038 6039 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 6040 6041 /* Accumulate RSSI and noise for all 3 antennas. */ 6042 for (i = 0; i < 3; i++) { 6043 calib->rssi[i] += le32toh(stats->rssi[i]) & 0xff; 6044 calib->noise[i] += le32toh(stats->noise[i]) & 0xff; 6045 } 6046 /* NB: We update differential gains only once after 20 beacons. */ 6047 if (++calib->nbeacons < 20) 6048 return; 6049 6050 /* Determine highest average RSSI. */ 6051 val = MAX(calib->rssi[0], calib->rssi[1]); 6052 val = MAX(calib->rssi[2], val); 6053 6054 /* Determine which antennas are connected. */ 6055 sc->chainmask = sc->rxchainmask; 6056 for (i = 0; i < 3; i++) 6057 if (val - calib->rssi[i] > 15 * 20) 6058 sc->chainmask &= ~(1 << i); 6059 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_XMIT, 6060 "%s: RX chains mask: theoretical=0x%x, actual=0x%x\n", 6061 __func__, sc->rxchainmask, sc->chainmask); 6062 6063 /* If none of the TX antennas are connected, keep at least one. */ 6064 if ((sc->chainmask & sc->txchainmask) == 0) 6065 sc->chainmask |= IWN_LSB(sc->txchainmask); 6066 6067 (void)ops->set_gains(sc); 6068 calib->state = IWN_CALIB_STATE_RUN; 6069 6070#ifdef notyet 6071 /* XXX Disable RX chains with no antennas connected. */ 6072 sc->rxon->rxchain = htole16(IWN_RXCHAIN_SEL(sc->chainmask)); 6073 if (sc->sc_is_scanning) 6074 device_printf(sc->sc_dev, 6075 "%s: is_scanning set, before RXON\n", 6076 __func__); 6077 (void)iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1); 6078#endif 6079 6080 /* Enable power-saving mode if requested by user. */ 6081 if (ic->ic_flags & IEEE80211_F_PMGTON) 6082 (void)iwn_set_pslevel(sc, 0, 3, 1); 6083 6084 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 6085 6086} 6087 6088static int 6089iwn4965_init_gains(struct iwn_softc *sc) 6090{ 6091 struct iwn_phy_calib_gain cmd; 6092 6093 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6094 6095 memset(&cmd, 0, sizeof cmd); 6096 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; 6097 /* Differential gains initially set to 0 for all 3 antennas. */ 6098 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 6099 "%s: setting initial differential gains\n", __func__); 6100 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 6101} 6102 6103static int 6104iwn5000_init_gains(struct iwn_softc *sc) 6105{ 6106 struct iwn_phy_calib cmd; 6107 6108 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6109 6110 memset(&cmd, 0, sizeof cmd); 6111 cmd.code = sc->reset_noise_gain; 6112 cmd.ngroups = 1; 6113 cmd.isvalid = 1; 6114 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 6115 "%s: setting initial differential gains\n", __func__); 6116 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 6117} 6118 6119static int 6120iwn4965_set_gains(struct iwn_softc *sc) 6121{ 6122 struct iwn_calib_state *calib = &sc->calib; 6123 struct iwn_phy_calib_gain cmd; 6124 int i, delta, noise; 6125 6126 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6127 6128 /* Get minimal noise among connected antennas. */ 6129 noise = INT_MAX; /* NB: There's at least one antenna. */ 6130 for (i = 0; i < 3; i++) 6131 if (sc->chainmask & (1 << i)) 6132 noise = MIN(calib->noise[i], noise); 6133 6134 memset(&cmd, 0, sizeof cmd); 6135 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; 6136 /* Set differential gains for connected antennas. */ 6137 for (i = 0; i < 3; i++) { 6138 if (sc->chainmask & (1 << i)) { 6139 /* Compute attenuation (in unit of 1.5dB). */ 6140 delta = (noise - (int32_t)calib->noise[i]) / 30; 6141 /* NB: delta <= 0 */ 6142 /* Limit to [-4.5dB,0]. */ 6143 cmd.gain[i] = MIN(abs(delta), 3); 6144 if (delta < 0) 6145 cmd.gain[i] |= 1 << 2; /* sign bit */ 6146 } 6147 } 6148 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 6149 "setting differential gains Ant A/B/C: %x/%x/%x (%x)\n", 6150 cmd.gain[0], cmd.gain[1], cmd.gain[2], sc->chainmask); 6151 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 6152} 6153 6154static int 6155iwn5000_set_gains(struct iwn_softc *sc) 6156{ 6157 struct iwn_calib_state *calib = &sc->calib; 6158 struct iwn_phy_calib_gain cmd; 6159 int i, ant, div, delta; 6160 6161 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 6162 6163 /* We collected 20 beacons and !=6050 need a 1.5 factor. */ 6164 div = (sc->hw_type == IWN_HW_REV_TYPE_6050) ? 20 : 30; 6165 6166 memset(&cmd, 0, sizeof cmd); 6167 cmd.code = sc->noise_gain; 6168 cmd.ngroups = 1; 6169 cmd.isvalid = 1; 6170 /* Get first available RX antenna as referential. */ 6171 ant = IWN_LSB(sc->rxchainmask); 6172 /* Set differential gains for other antennas. */ 6173 for (i = ant + 1; i < 3; i++) { 6174 if (sc->chainmask & (1 << i)) { 6175 /* The delta is relative to antenna "ant". */ 6176 delta = ((int32_t)calib->noise[ant] - 6177 (int32_t)calib->noise[i]) / div; 6178 /* Limit to [-4.5dB,+4.5dB]. */ 6179 cmd.gain[i - 1] = MIN(abs(delta), 3); 6180 if (delta < 0) 6181 cmd.gain[i - 1] |= 1 << 2; /* sign bit */ 6182 } 6183 } 6184 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_XMIT, 6185 "setting differential gains Ant B/C: %x/%x (%x)\n", 6186 cmd.gain[0], cmd.gain[1], sc->chainmask); 6187 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); 6188} 6189 6190/* 6191 * Tune RF RX sensitivity based on the number of false alarms detected 6192 * during the last beacon period. 6193 */ 6194static void 6195iwn_tune_sensitivity(struct iwn_softc *sc, const struct iwn_rx_stats *stats) 6196{ 6197#define inc(val, inc, max) \ 6198 if ((val) < (max)) { \ 6199 if ((val) < (max) - (inc)) \ 6200 (val) += (inc); \ 6201 else \ 6202 (val) = (max); \ 6203 needs_update = 1; \ 6204 } 6205#define dec(val, dec, min) \ 6206 if ((val) > (min)) { \ 6207 if ((val) > (min) + (dec)) \ 6208 (val) -= (dec); \ 6209 else \ 6210 (val) = (min); \ 6211 needs_update = 1; \ 6212 } 6213 6214 const struct iwn_sensitivity_limits *limits = sc->limits; 6215 struct iwn_calib_state *calib = &sc->calib; 6216 uint32_t val, rxena, fa; 6217 uint32_t energy[3], energy_min; 6218 uint8_t noise[3], noise_ref; 6219 int i, needs_update = 0; 6220 6221 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 6222 6223 /* Check that we've been enabled long enough. */ 6224 if ((rxena = le32toh(stats->general.load)) == 0){ 6225 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end not so long\n", __func__); 6226 return; 6227 } 6228 6229 /* Compute number of false alarms since last call for OFDM. */ 6230 fa = le32toh(stats->ofdm.bad_plcp) - calib->bad_plcp_ofdm; 6231 fa += le32toh(stats->ofdm.fa) - calib->fa_ofdm; 6232 fa *= 200 * IEEE80211_DUR_TU; /* 200TU */ 6233 6234 if (fa > 50 * rxena) { 6235 /* High false alarm count, decrease sensitivity. */ 6236 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 6237 "%s: OFDM high false alarm count: %u\n", __func__, fa); 6238 inc(calib->ofdm_x1, 1, limits->max_ofdm_x1); 6239 inc(calib->ofdm_mrc_x1, 1, limits->max_ofdm_mrc_x1); 6240 inc(calib->ofdm_x4, 1, limits->max_ofdm_x4); 6241 inc(calib->ofdm_mrc_x4, 1, limits->max_ofdm_mrc_x4); 6242 6243 } else if (fa < 5 * rxena) { 6244 /* Low false alarm count, increase sensitivity. */ 6245 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 6246 "%s: OFDM low false alarm count: %u\n", __func__, fa); 6247 dec(calib->ofdm_x1, 1, limits->min_ofdm_x1); 6248 dec(calib->ofdm_mrc_x1, 1, limits->min_ofdm_mrc_x1); 6249 dec(calib->ofdm_x4, 1, limits->min_ofdm_x4); 6250 dec(calib->ofdm_mrc_x4, 1, limits->min_ofdm_mrc_x4); 6251 } 6252 6253 /* Compute maximum noise among 3 receivers. */ 6254 for (i = 0; i < 3; i++) 6255 noise[i] = (le32toh(stats->general.noise[i]) >> 8) & 0xff; 6256 val = MAX(noise[0], noise[1]); 6257 val = MAX(noise[2], val); 6258 /* Insert it into our samples table. */ 6259 calib->noise_samples[calib->cur_noise_sample] = val; 6260 calib->cur_noise_sample = (calib->cur_noise_sample + 1) % 20; 6261 6262 /* Compute maximum noise among last 20 samples. */ 6263 noise_ref = calib->noise_samples[0]; 6264 for (i = 1; i < 20; i++) 6265 noise_ref = MAX(noise_ref, calib->noise_samples[i]); 6266 6267 /* Compute maximum energy among 3 receivers. */ 6268 for (i = 0; i < 3; i++) 6269 energy[i] = le32toh(stats->general.energy[i]); 6270 val = MIN(energy[0], energy[1]); 6271 val = MIN(energy[2], val); 6272 /* Insert it into our samples table. */ 6273 calib->energy_samples[calib->cur_energy_sample] = val; 6274 calib->cur_energy_sample = (calib->cur_energy_sample + 1) % 10; 6275 6276 /* Compute minimum energy among last 10 samples. */ 6277 energy_min = calib->energy_samples[0]; 6278 for (i = 1; i < 10; i++) 6279 energy_min = MAX(energy_min, calib->energy_samples[i]); 6280 energy_min += 6; 6281 6282 /* Compute number of false alarms since last call for CCK. */ 6283 fa = le32toh(stats->cck.bad_plcp) - calib->bad_plcp_cck; 6284 fa += le32toh(stats->cck.fa) - calib->fa_cck; 6285 fa *= 200 * IEEE80211_DUR_TU; /* 200TU */ 6286 6287 if (fa > 50 * rxena) { 6288 /* High false alarm count, decrease sensitivity. */ 6289 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 6290 "%s: CCK high false alarm count: %u\n", __func__, fa); 6291 calib->cck_state = IWN_CCK_STATE_HIFA; 6292 calib->low_fa = 0; 6293 6294 if (calib->cck_x4 > 160) { 6295 calib->noise_ref = noise_ref; 6296 if (calib->energy_cck > 2) 6297 dec(calib->energy_cck, 2, energy_min); 6298 } 6299 if (calib->cck_x4 < 160) { 6300 calib->cck_x4 = 161; 6301 needs_update = 1; 6302 } else 6303 inc(calib->cck_x4, 3, limits->max_cck_x4); 6304 6305 inc(calib->cck_mrc_x4, 3, limits->max_cck_mrc_x4); 6306 6307 } else if (fa < 5 * rxena) { 6308 /* Low false alarm count, increase sensitivity. */ 6309 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 6310 "%s: CCK low false alarm count: %u\n", __func__, fa); 6311 calib->cck_state = IWN_CCK_STATE_LOFA; 6312 calib->low_fa++; 6313 6314 if (calib->cck_state != IWN_CCK_STATE_INIT && 6315 (((int32_t)calib->noise_ref - (int32_t)noise_ref) > 2 || 6316 calib->low_fa > 100)) { 6317 inc(calib->energy_cck, 2, limits->min_energy_cck); 6318 dec(calib->cck_x4, 3, limits->min_cck_x4); 6319 dec(calib->cck_mrc_x4, 3, limits->min_cck_mrc_x4); 6320 } 6321 } else { 6322 /* Not worth to increase or decrease sensitivity. */ 6323 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 6324 "%s: CCK normal false alarm count: %u\n", __func__, fa); 6325 calib->low_fa = 0; 6326 calib->noise_ref = noise_ref; 6327 6328 if (calib->cck_state == IWN_CCK_STATE_HIFA) { 6329 /* Previous interval had many false alarms. */ 6330 dec(calib->energy_cck, 8, energy_min); 6331 } 6332 calib->cck_state = IWN_CCK_STATE_INIT; 6333 } 6334 6335 if (needs_update) 6336 (void)iwn_send_sensitivity(sc); 6337 6338 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 6339 6340#undef dec 6341#undef inc 6342} 6343 6344static int 6345iwn_send_sensitivity(struct iwn_softc *sc) 6346{ 6347 struct iwn_calib_state *calib = &sc->calib; 6348 struct iwn_enhanced_sensitivity_cmd cmd; 6349 int len; 6350 6351 memset(&cmd, 0, sizeof cmd); 6352 len = sizeof (struct iwn_sensitivity_cmd); 6353 cmd.which = IWN_SENSITIVITY_WORKTBL; 6354 /* OFDM modulation. */ 6355 cmd.corr_ofdm_x1 = htole16(calib->ofdm_x1); 6356 cmd.corr_ofdm_mrc_x1 = htole16(calib->ofdm_mrc_x1); 6357 cmd.corr_ofdm_x4 = htole16(calib->ofdm_x4); 6358 cmd.corr_ofdm_mrc_x4 = htole16(calib->ofdm_mrc_x4); 6359 cmd.energy_ofdm = htole16(sc->limits->energy_ofdm); 6360 cmd.energy_ofdm_th = htole16(62); 6361 /* CCK modulation. */ 6362 cmd.corr_cck_x4 = htole16(calib->cck_x4); 6363 cmd.corr_cck_mrc_x4 = htole16(calib->cck_mrc_x4); 6364 cmd.energy_cck = htole16(calib->energy_cck); 6365 /* Barker modulation: use default values. */ 6366 cmd.corr_barker = htole16(190); 6367 cmd.corr_barker_mrc = htole16(sc->limits->barker_mrc); 6368 6369 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 6370 "%s: set sensitivity %d/%d/%d/%d/%d/%d/%d\n", __func__, 6371 calib->ofdm_x1, calib->ofdm_mrc_x1, calib->ofdm_x4, 6372 calib->ofdm_mrc_x4, calib->cck_x4, 6373 calib->cck_mrc_x4, calib->energy_cck); 6374 6375 if (!(sc->sc_flags & IWN_FLAG_ENH_SENS)) 6376 goto send; 6377 /* Enhanced sensitivity settings. */ 6378 len = sizeof (struct iwn_enhanced_sensitivity_cmd); 6379 cmd.ofdm_det_slope_mrc = htole16(668); 6380 cmd.ofdm_det_icept_mrc = htole16(4); 6381 cmd.ofdm_det_slope = htole16(486); 6382 cmd.ofdm_det_icept = htole16(37); 6383 cmd.cck_det_slope_mrc = htole16(853); 6384 cmd.cck_det_icept_mrc = htole16(4); 6385 cmd.cck_det_slope = htole16(476); 6386 cmd.cck_det_icept = htole16(99); 6387send: 6388 return iwn_cmd(sc, IWN_CMD_SET_SENSITIVITY, &cmd, len, 1); 6389} 6390 6391/* 6392 * Look at the increase of PLCP errors over time; if it exceeds 6393 * a programmed threshold then trigger an RF retune. 6394 */ 6395static void 6396iwn_check_rx_recovery(struct iwn_softc *sc, struct iwn_stats *rs) 6397{ 6398 int32_t delta_ofdm, delta_ht, delta_cck; 6399 struct iwn_calib_state *calib = &sc->calib; 6400 int delta_ticks, cur_ticks; 6401 int delta_msec; 6402 int thresh; 6403 6404 /* 6405 * Calculate the difference between the current and 6406 * previous statistics. 6407 */ 6408 delta_cck = le32toh(rs->rx.cck.bad_plcp) - calib->bad_plcp_cck; 6409 delta_ofdm = le32toh(rs->rx.ofdm.bad_plcp) - calib->bad_plcp_ofdm; 6410 delta_ht = le32toh(rs->rx.ht.bad_plcp) - calib->bad_plcp_ht; 6411 6412 /* 6413 * Calculate the delta in time between successive statistics 6414 * messages. Yes, it can roll over; so we make sure that 6415 * this doesn't happen. 6416 * 6417 * XXX go figure out what to do about rollover 6418 * XXX go figure out what to do if ticks rolls over to -ve instead! 6419 * XXX go stab signed integer overflow undefined-ness in the face. 6420 */ 6421 cur_ticks = ticks; 6422 delta_ticks = cur_ticks - sc->last_calib_ticks; 6423 6424 /* 6425 * If any are negative, then the firmware likely reset; so just 6426 * bail. We'll pick this up next time. 6427 */ 6428 if (delta_cck < 0 || delta_ofdm < 0 || delta_ht < 0 || delta_ticks < 0) 6429 return; 6430 6431 /* 6432 * delta_ticks is in ticks; we need to convert it up to milliseconds 6433 * so we can do some useful math with it. 6434 */ 6435 delta_msec = ticks_to_msecs(delta_ticks); 6436 6437 /* 6438 * Calculate what our threshold is given the current delta_msec. 6439 */ 6440 thresh = sc->base_params->plcp_err_threshold * delta_msec; 6441 6442 DPRINTF(sc, IWN_DEBUG_STATE, 6443 "%s: time delta: %d; cck=%d, ofdm=%d, ht=%d, total=%d, thresh=%d\n", 6444 __func__, 6445 delta_msec, 6446 delta_cck, 6447 delta_ofdm, 6448 delta_ht, 6449 (delta_msec + delta_cck + delta_ofdm + delta_ht), 6450 thresh); 6451 6452 /* 6453 * If we need a retune, then schedule a single channel scan 6454 * to a channel that isn't the currently active one! 6455 * 6456 * The math from linux iwlwifi: 6457 * 6458 * if ((delta * 100 / msecs) > threshold) 6459 */ 6460 if (thresh > 0 && (delta_cck + delta_ofdm + delta_ht) * 100 > thresh) { 6461 DPRINTF(sc, IWN_DEBUG_ANY, 6462 "%s: PLCP error threshold raw (%d) comparison (%d) " 6463 "over limit (%d); retune!\n", 6464 __func__, 6465 (delta_cck + delta_ofdm + delta_ht), 6466 (delta_cck + delta_ofdm + delta_ht) * 100, 6467 thresh); 6468 } 6469} 6470 6471/* 6472 * Set STA mode power saving level (between 0 and 5). 6473 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving. 6474 */ 6475static int 6476iwn_set_pslevel(struct iwn_softc *sc, int dtim, int level, int async) 6477{ 6478 struct iwn_pmgt_cmd cmd; 6479 const struct iwn_pmgt *pmgt; 6480 uint32_t max, skip_dtim; 6481 uint32_t reg; 6482 int i; 6483 6484 DPRINTF(sc, IWN_DEBUG_PWRSAVE, 6485 "%s: dtim=%d, level=%d, async=%d\n", 6486 __func__, 6487 dtim, 6488 level, 6489 async); 6490 6491 /* Select which PS parameters to use. */ 6492 if (dtim <= 2) 6493 pmgt = &iwn_pmgt[0][level]; 6494 else if (dtim <= 10) 6495 pmgt = &iwn_pmgt[1][level]; 6496 else 6497 pmgt = &iwn_pmgt[2][level]; 6498 6499 memset(&cmd, 0, sizeof cmd); 6500 if (level != 0) /* not CAM */ 6501 cmd.flags |= htole16(IWN_PS_ALLOW_SLEEP); 6502 if (level == 5) 6503 cmd.flags |= htole16(IWN_PS_FAST_PD); 6504 /* Retrieve PCIe Active State Power Management (ASPM). */ 6505 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + PCIER_LINK_CTL, 4); 6506 if (!(reg & PCIEM_LINK_CTL_ASPMC_L0S)) /* L0s Entry disabled. */ 6507 cmd.flags |= htole16(IWN_PS_PCI_PMGT); 6508 cmd.rxtimeout = htole32(pmgt->rxtimeout * 1024); 6509 cmd.txtimeout = htole32(pmgt->txtimeout * 1024); 6510 6511 if (dtim == 0) { 6512 dtim = 1; 6513 skip_dtim = 0; 6514 } else 6515 skip_dtim = pmgt->skip_dtim; 6516 if (skip_dtim != 0) { 6517 cmd.flags |= htole16(IWN_PS_SLEEP_OVER_DTIM); 6518 max = pmgt->intval[4]; 6519 if (max == (uint32_t)-1) 6520 max = dtim * (skip_dtim + 1); 6521 else if (max > dtim) 6522 max = rounddown(max, dtim); 6523 } else 6524 max = dtim; 6525 for (i = 0; i < 5; i++) 6526 cmd.intval[i] = htole32(MIN(max, pmgt->intval[i])); 6527 6528 DPRINTF(sc, IWN_DEBUG_RESET, "setting power saving level to %d\n", 6529 level); 6530 return iwn_cmd(sc, IWN_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async); 6531} 6532 6533static int 6534iwn_send_btcoex(struct iwn_softc *sc) 6535{ 6536 struct iwn_bluetooth cmd; 6537 6538 memset(&cmd, 0, sizeof cmd); 6539 cmd.flags = IWN_BT_COEX_CHAN_ANN | IWN_BT_COEX_BT_PRIO; 6540 cmd.lead_time = IWN_BT_LEAD_TIME_DEF; 6541 cmd.max_kill = IWN_BT_MAX_KILL_DEF; 6542 DPRINTF(sc, IWN_DEBUG_RESET, "%s: configuring bluetooth coexistence\n", 6543 __func__); 6544 return iwn_cmd(sc, IWN_CMD_BT_COEX, &cmd, sizeof(cmd), 0); 6545} 6546 6547static int 6548iwn_send_advanced_btcoex(struct iwn_softc *sc) 6549{ 6550 static const uint32_t btcoex_3wire[12] = { 6551 0xaaaaaaaa, 0xaaaaaaaa, 0xaeaaaaaa, 0xaaaaaaaa, 6552 0xcc00ff28, 0x0000aaaa, 0xcc00aaaa, 0x0000aaaa, 6553 0xc0004000, 0x00004000, 0xf0005000, 0xf0005000, 6554 }; 6555 struct iwn6000_btcoex_config btconfig; 6556 struct iwn2000_btcoex_config btconfig2k; 6557 struct iwn_btcoex_priotable btprio; 6558 struct iwn_btcoex_prot btprot; 6559 int error, i; 6560 uint8_t flags; 6561 6562 memset(&btconfig, 0, sizeof btconfig); 6563 memset(&btconfig2k, 0, sizeof btconfig2k); 6564 6565 flags = IWN_BT_FLAG_COEX6000_MODE_3W << 6566 IWN_BT_FLAG_COEX6000_MODE_SHIFT; // Done as is in linux kernel 3.2 6567 6568 if (sc->base_params->bt_sco_disable) 6569 flags &= ~IWN_BT_FLAG_SYNC_2_BT_DISABLE; 6570 else 6571 flags |= IWN_BT_FLAG_SYNC_2_BT_DISABLE; 6572 6573 flags |= IWN_BT_FLAG_COEX6000_CHAN_INHIBITION; 6574 6575 /* Default flags result is 145 as old value */ 6576 6577 /* 6578 * Flags value has to be review. Values must change if we 6579 * which to disable it 6580 */ 6581 if (sc->base_params->bt_session_2) { 6582 btconfig2k.flags = flags; 6583 btconfig2k.max_kill = 5; 6584 btconfig2k.bt3_t7_timer = 1; 6585 btconfig2k.kill_ack = htole32(0xffff0000); 6586 btconfig2k.kill_cts = htole32(0xffff0000); 6587 btconfig2k.sample_time = 2; 6588 btconfig2k.bt3_t2_timer = 0xc; 6589 6590 for (i = 0; i < 12; i++) 6591 btconfig2k.lookup_table[i] = htole32(btcoex_3wire[i]); 6592 btconfig2k.valid = htole16(0xff); 6593 btconfig2k.prio_boost = htole32(0xf0); 6594 DPRINTF(sc, IWN_DEBUG_RESET, 6595 "%s: configuring advanced bluetooth coexistence" 6596 " session 2, flags : 0x%x\n", 6597 __func__, 6598 flags); 6599 error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig2k, 6600 sizeof(btconfig2k), 1); 6601 } else { 6602 btconfig.flags = flags; 6603 btconfig.max_kill = 5; 6604 btconfig.bt3_t7_timer = 1; 6605 btconfig.kill_ack = htole32(0xffff0000); 6606 btconfig.kill_cts = htole32(0xffff0000); 6607 btconfig.sample_time = 2; 6608 btconfig.bt3_t2_timer = 0xc; 6609 6610 for (i = 0; i < 12; i++) 6611 btconfig.lookup_table[i] = htole32(btcoex_3wire[i]); 6612 btconfig.valid = htole16(0xff); 6613 btconfig.prio_boost = 0xf0; 6614 DPRINTF(sc, IWN_DEBUG_RESET, 6615 "%s: configuring advanced bluetooth coexistence," 6616 " flags : 0x%x\n", 6617 __func__, 6618 flags); 6619 error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig, 6620 sizeof(btconfig), 1); 6621 } 6622 6623 if (error != 0) 6624 return error; 6625 6626 memset(&btprio, 0, sizeof btprio); 6627 btprio.calib_init1 = 0x6; 6628 btprio.calib_init2 = 0x7; 6629 btprio.calib_periodic_low1 = 0x2; 6630 btprio.calib_periodic_low2 = 0x3; 6631 btprio.calib_periodic_high1 = 0x4; 6632 btprio.calib_periodic_high2 = 0x5; 6633 btprio.dtim = 0x6; 6634 btprio.scan52 = 0x8; 6635 btprio.scan24 = 0xa; 6636 error = iwn_cmd(sc, IWN_CMD_BT_COEX_PRIOTABLE, &btprio, sizeof(btprio), 6637 1); 6638 if (error != 0) 6639 return error; 6640 6641 /* Force BT state machine change. */ 6642 memset(&btprot, 0, sizeof btprot); 6643 btprot.open = 1; 6644 btprot.type = 1; 6645 error = iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1); 6646 if (error != 0) 6647 return error; 6648 btprot.open = 0; 6649 return iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1); 6650} 6651 6652static int 6653iwn5000_runtime_calib(struct iwn_softc *sc) 6654{ 6655 struct iwn5000_calib_config cmd; 6656 6657 memset(&cmd, 0, sizeof cmd); 6658 cmd.ucode.once.enable = 0xffffffff; 6659 cmd.ucode.once.start = IWN5000_CALIB_DC; 6660 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 6661 "%s: configuring runtime calibration\n", __func__); 6662 return iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof(cmd), 0); 6663} 6664 6665static uint32_t 6666iwn_get_rxon_ht_flags(struct iwn_softc *sc, struct ieee80211_channel *c) 6667{ 6668 struct ieee80211com *ic = &sc->sc_ic; 6669 uint32_t htflags = 0; 6670 6671 if (! IEEE80211_IS_CHAN_HT(c)) 6672 return (0); 6673 6674 htflags |= IWN_RXON_HT_PROTMODE(ic->ic_curhtprotmode); 6675 6676 if (IEEE80211_IS_CHAN_HT40(c)) { 6677 switch (ic->ic_curhtprotmode) { 6678 case IEEE80211_HTINFO_OPMODE_HT20PR: 6679 htflags |= IWN_RXON_HT_MODEPURE40; 6680 break; 6681 default: 6682 htflags |= IWN_RXON_HT_MODEMIXED; 6683 break; 6684 } 6685 } 6686 if (IEEE80211_IS_CHAN_HT40D(c)) 6687 htflags |= IWN_RXON_HT_HT40MINUS; 6688 6689 return (htflags); 6690} 6691 6692static int 6693iwn_check_bss_filter(struct iwn_softc *sc) 6694{ 6695 return ((sc->rxon->filter & htole32(IWN_FILTER_BSS)) != 0); 6696} 6697 6698static int 6699iwn4965_rxon_assoc(struct iwn_softc *sc, int async) 6700{ 6701 struct iwn4965_rxon_assoc cmd; 6702 struct iwn_rxon *rxon = sc->rxon; 6703 6704 cmd.flags = rxon->flags; 6705 cmd.filter = rxon->filter; 6706 cmd.ofdm_mask = rxon->ofdm_mask; 6707 cmd.cck_mask = rxon->cck_mask; 6708 cmd.ht_single_mask = rxon->ht_single_mask; 6709 cmd.ht_dual_mask = rxon->ht_dual_mask; 6710 cmd.rxchain = rxon->rxchain; 6711 cmd.reserved = 0; 6712 6713 return (iwn_cmd(sc, IWN_CMD_RXON_ASSOC, &cmd, sizeof(cmd), async)); 6714} 6715 6716static int 6717iwn5000_rxon_assoc(struct iwn_softc *sc, int async) 6718{ 6719 struct iwn5000_rxon_assoc cmd; 6720 struct iwn_rxon *rxon = sc->rxon; 6721 6722 cmd.flags = rxon->flags; 6723 cmd.filter = rxon->filter; 6724 cmd.ofdm_mask = rxon->ofdm_mask; 6725 cmd.cck_mask = rxon->cck_mask; 6726 cmd.reserved1 = 0; 6727 cmd.ht_single_mask = rxon->ht_single_mask; 6728 cmd.ht_dual_mask = rxon->ht_dual_mask; 6729 cmd.ht_triple_mask = rxon->ht_triple_mask; 6730 cmd.reserved2 = 0; 6731 cmd.rxchain = rxon->rxchain; 6732 cmd.acquisition = rxon->acquisition; 6733 cmd.reserved3 = 0; 6734 6735 return (iwn_cmd(sc, IWN_CMD_RXON_ASSOC, &cmd, sizeof(cmd), async)); 6736} 6737 6738static int 6739iwn_send_rxon(struct iwn_softc *sc, int assoc, int async) 6740{ 6741 struct iwn_ops *ops = &sc->ops; 6742 int error; 6743 6744 IWN_LOCK_ASSERT(sc); 6745 6746 if (assoc && iwn_check_bss_filter(sc) != 0) { 6747 error = ops->rxon_assoc(sc, async); 6748 if (error != 0) { 6749 device_printf(sc->sc_dev, 6750 "%s: RXON_ASSOC command failed, error %d\n", 6751 __func__, error); 6752 return (error); 6753 } 6754 } else { 6755 if (sc->sc_is_scanning) 6756 device_printf(sc->sc_dev, 6757 "%s: is_scanning set, before RXON\n", 6758 __func__); 6759 6760 error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, async); 6761 if (error != 0) { 6762 device_printf(sc->sc_dev, 6763 "%s: RXON command failed, error %d\n", 6764 __func__, error); 6765 return (error); 6766 } 6767 6768 /* 6769 * Reconfiguring RXON clears the firmware nodes table so 6770 * we must add the broadcast node again. 6771 */ 6772 if (iwn_check_bss_filter(sc) == 0 && 6773 (error = iwn_add_broadcast_node(sc, async)) != 0) { 6774 device_printf(sc->sc_dev, 6775 "%s: could not add broadcast node, error %d\n", 6776 __func__, error); 6777 return (error); 6778 } 6779 } 6780 6781 /* Configuration has changed, set TX power accordingly. */ 6782 if ((error = ops->set_txpower(sc, async)) != 0) { 6783 device_printf(sc->sc_dev, 6784 "%s: could not set TX power, error %d\n", 6785 __func__, error); 6786 return (error); 6787 } 6788 6789 return (0); 6790} 6791 6792static int 6793iwn_config(struct iwn_softc *sc) 6794{ 6795 struct ieee80211com *ic = &sc->sc_ic; 6796 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 6797 const uint8_t *macaddr; 6798 uint32_t txmask; 6799 uint16_t rxchain; 6800 int error; 6801 6802 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 6803 6804 if ((sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSET) 6805 && (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2)) { 6806 device_printf(sc->sc_dev,"%s: temp_offset and temp_offsetv2 are" 6807 " exclusive each together. Review NIC config file. Conf" 6808 " : 0x%08x Flags : 0x%08x \n", __func__, 6809 sc->base_params->calib_need, 6810 (IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSET | 6811 IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2)); 6812 return (EINVAL); 6813 } 6814 6815 /* Compute temperature calib if needed. Will be send by send calib */ 6816 if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSET) { 6817 error = iwn5000_temp_offset_calib(sc); 6818 if (error != 0) { 6819 device_printf(sc->sc_dev, 6820 "%s: could not set temperature offset\n", __func__); 6821 return (error); 6822 } 6823 } else if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2) { 6824 error = iwn5000_temp_offset_calibv2(sc); 6825 if (error != 0) { 6826 device_printf(sc->sc_dev, 6827 "%s: could not compute temperature offset v2\n", 6828 __func__); 6829 return (error); 6830 } 6831 } 6832 6833 if (sc->hw_type == IWN_HW_REV_TYPE_6050) { 6834 /* Configure runtime DC calibration. */ 6835 error = iwn5000_runtime_calib(sc); 6836 if (error != 0) { 6837 device_printf(sc->sc_dev, 6838 "%s: could not configure runtime calibration\n", 6839 __func__); 6840 return error; 6841 } 6842 } 6843 6844 /* Configure valid TX chains for >=5000 Series. */ 6845 if (sc->hw_type != IWN_HW_REV_TYPE_4965 && 6846 IWN_UCODE_API(sc->ucode_rev) > 1) { 6847 txmask = htole32(sc->txchainmask); 6848 DPRINTF(sc, IWN_DEBUG_RESET | IWN_DEBUG_XMIT, 6849 "%s: configuring valid TX chains 0x%x\n", __func__, txmask); 6850 error = iwn_cmd(sc, IWN5000_CMD_TX_ANT_CONFIG, &txmask, 6851 sizeof txmask, 0); 6852 if (error != 0) { 6853 device_printf(sc->sc_dev, 6854 "%s: could not configure valid TX chains, " 6855 "error %d\n", __func__, error); 6856 return error; 6857 } 6858 } 6859 6860 /* Configure bluetooth coexistence. */ 6861 error = 0; 6862 6863 /* Configure bluetooth coexistence if needed. */ 6864 if (sc->base_params->bt_mode == IWN_BT_ADVANCED) 6865 error = iwn_send_advanced_btcoex(sc); 6866 if (sc->base_params->bt_mode == IWN_BT_SIMPLE) 6867 error = iwn_send_btcoex(sc); 6868 6869 if (error != 0) { 6870 device_printf(sc->sc_dev, 6871 "%s: could not configure bluetooth coexistence, error %d\n", 6872 __func__, error); 6873 return error; 6874 } 6875 6876 /* Set mode, channel, RX filter and enable RX. */ 6877 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 6878 memset(sc->rxon, 0, sizeof (struct iwn_rxon)); 6879 macaddr = vap ? vap->iv_myaddr : ic->ic_macaddr; 6880 IEEE80211_ADDR_COPY(sc->rxon->myaddr, macaddr); 6881 IEEE80211_ADDR_COPY(sc->rxon->wlap, macaddr); 6882 sc->rxon->chan = ieee80211_chan2ieee(ic, ic->ic_curchan); 6883 sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 6884 if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) 6885 sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 6886 6887 sc->rxon->filter = htole32(IWN_FILTER_MULTICAST); 6888 switch (ic->ic_opmode) { 6889 case IEEE80211_M_STA: 6890 sc->rxon->mode = IWN_MODE_STA; 6891 break; 6892 case IEEE80211_M_MONITOR: 6893 sc->rxon->mode = IWN_MODE_MONITOR; 6894 break; 6895 default: 6896 /* Should not get there. */ 6897 break; 6898 } 6899 iwn_set_promisc(sc); 6900 sc->rxon->cck_mask = 0x0f; /* not yet negotiated */ 6901 sc->rxon->ofdm_mask = 0xff; /* not yet negotiated */ 6902 sc->rxon->ht_single_mask = 0xff; 6903 sc->rxon->ht_dual_mask = 0xff; 6904 sc->rxon->ht_triple_mask = 0xff; 6905 /* 6906 * In active association mode, ensure that 6907 * all the receive chains are enabled. 6908 * 6909 * Since we're not yet doing SMPS, don't allow the 6910 * number of idle RX chains to be less than the active 6911 * number. 6912 */ 6913 rxchain = 6914 IWN_RXCHAIN_VALID(sc->rxchainmask) | 6915 IWN_RXCHAIN_MIMO_COUNT(sc->nrxchains) | 6916 IWN_RXCHAIN_IDLE_COUNT(sc->nrxchains); 6917 sc->rxon->rxchain = htole16(rxchain); 6918 DPRINTF(sc, IWN_DEBUG_RESET | IWN_DEBUG_XMIT, 6919 "%s: rxchainmask=0x%x, nrxchains=%d\n", 6920 __func__, 6921 sc->rxchainmask, 6922 sc->nrxchains); 6923 6924 sc->rxon->flags |= htole32(iwn_get_rxon_ht_flags(sc, ic->ic_curchan)); 6925 6926 DPRINTF(sc, IWN_DEBUG_RESET, 6927 "%s: setting configuration; flags=0x%08x\n", 6928 __func__, le32toh(sc->rxon->flags)); 6929 if ((error = iwn_send_rxon(sc, 0, 0)) != 0) { 6930 device_printf(sc->sc_dev, "%s: could not send RXON\n", 6931 __func__); 6932 return error; 6933 } 6934 6935 if ((error = iwn_set_critical_temp(sc)) != 0) { 6936 device_printf(sc->sc_dev, 6937 "%s: could not set critical temperature\n", __func__); 6938 return error; 6939 } 6940 6941 /* Set power saving level to CAM during initialization. */ 6942 if ((error = iwn_set_pslevel(sc, 0, 0, 0)) != 0) { 6943 device_printf(sc->sc_dev, 6944 "%s: could not set power saving level\n", __func__); 6945 return error; 6946 } 6947 6948 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 6949 6950 return 0; 6951} 6952 6953static uint16_t 6954iwn_get_active_dwell_time(struct iwn_softc *sc, 6955 struct ieee80211_channel *c, uint8_t n_probes) 6956{ 6957 /* No channel? Default to 2GHz settings */ 6958 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) { 6959 return (IWN_ACTIVE_DWELL_TIME_2GHZ + 6960 IWN_ACTIVE_DWELL_FACTOR_2GHZ * (n_probes + 1)); 6961 } 6962 6963 /* 5GHz dwell time */ 6964 return (IWN_ACTIVE_DWELL_TIME_5GHZ + 6965 IWN_ACTIVE_DWELL_FACTOR_5GHZ * (n_probes + 1)); 6966} 6967 6968/* 6969 * Limit the total dwell time to 85% of the beacon interval. 6970 * 6971 * Returns the dwell time in milliseconds. 6972 */ 6973static uint16_t 6974iwn_limit_dwell(struct iwn_softc *sc, uint16_t dwell_time) 6975{ 6976 struct ieee80211com *ic = &sc->sc_ic; 6977 struct ieee80211vap *vap = NULL; 6978 int bintval = 0; 6979 6980 /* bintval is in TU (1.024mS) */ 6981 if (! TAILQ_EMPTY(&ic->ic_vaps)) { 6982 vap = TAILQ_FIRST(&ic->ic_vaps); 6983 bintval = vap->iv_bss->ni_intval; 6984 } 6985 6986 /* 6987 * If it's non-zero, we should calculate the minimum of 6988 * it and the DWELL_BASE. 6989 * 6990 * XXX Yes, the math should take into account that bintval 6991 * is 1.024mS, not 1mS.. 6992 */ 6993 if (bintval > 0) { 6994 DPRINTF(sc, IWN_DEBUG_SCAN, 6995 "%s: bintval=%d\n", 6996 __func__, 6997 bintval); 6998 return (MIN(IWN_PASSIVE_DWELL_BASE, ((bintval * 85) / 100))); 6999 } 7000 7001 /* No association context? Default */ 7002 return (IWN_PASSIVE_DWELL_BASE); 7003} 7004 7005static uint16_t 7006iwn_get_passive_dwell_time(struct iwn_softc *sc, struct ieee80211_channel *c) 7007{ 7008 uint16_t passive; 7009 7010 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) { 7011 passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_2GHZ; 7012 } else { 7013 passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_5GHZ; 7014 } 7015 7016 /* Clamp to the beacon interval if we're associated */ 7017 return (iwn_limit_dwell(sc, passive)); 7018} 7019 7020static int 7021iwn_scan(struct iwn_softc *sc, struct ieee80211vap *vap, 7022 struct ieee80211_scan_state *ss, struct ieee80211_channel *c) 7023{ 7024 struct ieee80211com *ic = &sc->sc_ic; 7025 struct ieee80211_node *ni = vap->iv_bss; 7026 struct iwn_scan_hdr *hdr; 7027 struct iwn_cmd_data *tx; 7028 struct iwn_scan_essid *essid; 7029 struct iwn_scan_chan *chan; 7030 struct ieee80211_frame *wh; 7031 struct ieee80211_rateset *rs; 7032 uint8_t *buf, *frm; 7033 uint16_t rxchain; 7034 uint8_t txant; 7035 int buflen, error; 7036 int is_active; 7037 uint16_t dwell_active, dwell_passive; 7038 uint32_t extra, scan_service_time; 7039 7040 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 7041 7042 /* 7043 * We are absolutely not allowed to send a scan command when another 7044 * scan command is pending. 7045 */ 7046 if (sc->sc_is_scanning) { 7047 device_printf(sc->sc_dev, "%s: called whilst scanning!\n", 7048 __func__); 7049 return (EAGAIN); 7050 } 7051 7052 /* Assign the scan channel */ 7053 c = ic->ic_curchan; 7054 7055 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 7056 buf = malloc(IWN_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO); 7057 if (buf == NULL) { 7058 device_printf(sc->sc_dev, 7059 "%s: could not allocate buffer for scan command\n", 7060 __func__); 7061 return ENOMEM; 7062 } 7063 hdr = (struct iwn_scan_hdr *)buf; 7064 /* 7065 * Move to the next channel if no frames are received within 10ms 7066 * after sending the probe request. 7067 */ 7068 hdr->quiet_time = htole16(10); /* timeout in milliseconds */ 7069 hdr->quiet_threshold = htole16(1); /* min # of packets */ 7070 /* 7071 * Max needs to be greater than active and passive and quiet! 7072 * It's also in microseconds! 7073 */ 7074 hdr->max_svc = htole32(250 * 1024); 7075 7076 /* 7077 * Reset scan: interval=100 7078 * Normal scan: interval=becaon interval 7079 * suspend_time: 100 (TU) 7080 * 7081 */ 7082 extra = (100 /* suspend_time */ / 100 /* beacon interval */) << 22; 7083 //scan_service_time = extra | ((100 /* susp */ % 100 /* int */) * 1024); 7084 scan_service_time = (4 << 22) | (100 * 1024); /* Hardcode for now! */ 7085 hdr->pause_svc = htole32(scan_service_time); 7086 7087 /* Select antennas for scanning. */ 7088 rxchain = 7089 IWN_RXCHAIN_VALID(sc->rxchainmask) | 7090 IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask) | 7091 IWN_RXCHAIN_DRIVER_FORCE; 7092 if (IEEE80211_IS_CHAN_A(c) && 7093 sc->hw_type == IWN_HW_REV_TYPE_4965) { 7094 /* Ant A must be avoided in 5GHz because of an HW bug. */ 7095 rxchain |= IWN_RXCHAIN_FORCE_SEL(IWN_ANT_B); 7096 } else /* Use all available RX antennas. */ 7097 rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask); 7098 hdr->rxchain = htole16(rxchain); 7099 hdr->filter = htole32(IWN_FILTER_MULTICAST | IWN_FILTER_BEACON); 7100 7101 tx = (struct iwn_cmd_data *)(hdr + 1); 7102 tx->flags = htole32(IWN_TX_AUTO_SEQ); 7103 tx->id = sc->broadcast_id; 7104 tx->lifetime = htole32(IWN_LIFETIME_INFINITE); 7105 7106 if (IEEE80211_IS_CHAN_5GHZ(c)) { 7107 /* Send probe requests at 6Mbps. */ 7108 tx->rate = htole32(0xd); 7109 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A]; 7110 } else { 7111 hdr->flags = htole32(IWN_RXON_24GHZ | IWN_RXON_AUTO); 7112 if (sc->hw_type == IWN_HW_REV_TYPE_4965 && 7113 sc->rxon->associd && sc->rxon->chan > 14) 7114 tx->rate = htole32(0xd); 7115 else { 7116 /* Send probe requests at 1Mbps. */ 7117 tx->rate = htole32(10 | IWN_RFLAG_CCK); 7118 } 7119 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G]; 7120 } 7121 /* Use the first valid TX antenna. */ 7122 txant = IWN_LSB(sc->txchainmask); 7123 tx->rate |= htole32(IWN_RFLAG_ANT(txant)); 7124 7125 /* 7126 * Only do active scanning if we're announcing a probe request 7127 * for a given SSID (or more, if we ever add it to the driver.) 7128 */ 7129 is_active = 0; 7130 7131 /* 7132 * If we're scanning for a specific SSID, add it to the command. 7133 * 7134 * XXX maybe look at adding support for scanning multiple SSIDs? 7135 */ 7136 essid = (struct iwn_scan_essid *)(tx + 1); 7137 if (ss != NULL) { 7138 if (ss->ss_ssid[0].len != 0) { 7139 essid[0].id = IEEE80211_ELEMID_SSID; 7140 essid[0].len = ss->ss_ssid[0].len; 7141 memcpy(essid[0].data, ss->ss_ssid[0].ssid, ss->ss_ssid[0].len); 7142 } 7143 7144 DPRINTF(sc, IWN_DEBUG_SCAN, "%s: ssid_len=%d, ssid=%*s\n", 7145 __func__, 7146 ss->ss_ssid[0].len, 7147 ss->ss_ssid[0].len, 7148 ss->ss_ssid[0].ssid); 7149 7150 if (ss->ss_nssid > 0) 7151 is_active = 1; 7152 } 7153 7154 /* 7155 * Build a probe request frame. Most of the following code is a 7156 * copy & paste of what is done in net80211. 7157 */ 7158 wh = (struct ieee80211_frame *)(essid + 20); 7159 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | 7160 IEEE80211_FC0_SUBTYPE_PROBE_REQ; 7161 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; 7162 IEEE80211_ADDR_COPY(wh->i_addr1, vap->iv_ifp->if_broadcastaddr); 7163 IEEE80211_ADDR_COPY(wh->i_addr2, IF_LLADDR(vap->iv_ifp)); 7164 IEEE80211_ADDR_COPY(wh->i_addr3, vap->iv_ifp->if_broadcastaddr); 7165 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */ 7166 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */ 7167 7168 frm = (uint8_t *)(wh + 1); 7169 frm = ieee80211_add_ssid(frm, NULL, 0); 7170 frm = ieee80211_add_rates(frm, rs); 7171 if (rs->rs_nrates > IEEE80211_RATE_SIZE) 7172 frm = ieee80211_add_xrates(frm, rs); 7173 if (ic->ic_htcaps & IEEE80211_HTC_HT) 7174 frm = ieee80211_add_htcap(frm, ni); 7175 7176 /* Set length of probe request. */ 7177 tx->len = htole16(frm - (uint8_t *)wh); 7178 7179 /* 7180 * If active scanning is requested but a certain channel is 7181 * marked passive, we can do active scanning if we detect 7182 * transmissions. 7183 * 7184 * There is an issue with some firmware versions that triggers 7185 * a sysassert on a "good CRC threshold" of zero (== disabled), 7186 * on a radar channel even though this means that we should NOT 7187 * send probes. 7188 * 7189 * The "good CRC threshold" is the number of frames that we 7190 * need to receive during our dwell time on a channel before 7191 * sending out probes -- setting this to a huge value will 7192 * mean we never reach it, but at the same time work around 7193 * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER 7194 * here instead of IWL_GOOD_CRC_TH_DISABLED. 7195 * 7196 * This was fixed in later versions along with some other 7197 * scan changes, and the threshold behaves as a flag in those 7198 * versions. 7199 */ 7200 7201 /* 7202 * If we're doing active scanning, set the crc_threshold 7203 * to a suitable value. This is different to active veruss 7204 * passive scanning depending upon the channel flags; the 7205 * firmware will obey that particular check for us. 7206 */ 7207 if (sc->tlv_feature_flags & IWN_UCODE_TLV_FLAGS_NEWSCAN) 7208 hdr->crc_threshold = is_active ? 7209 IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_DISABLED; 7210 else 7211 hdr->crc_threshold = is_active ? 7212 IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_NEVER; 7213 7214 chan = (struct iwn_scan_chan *)frm; 7215 chan->chan = htole16(ieee80211_chan2ieee(ic, c)); 7216 chan->flags = 0; 7217 if (ss->ss_nssid > 0) 7218 chan->flags |= htole32(IWN_CHAN_NPBREQS(1)); 7219 chan->dsp_gain = 0x6e; 7220 7221 /* 7222 * Set the passive/active flag depending upon the channel mode. 7223 * XXX TODO: take the is_active flag into account as well? 7224 */ 7225 if (c->ic_flags & IEEE80211_CHAN_PASSIVE) 7226 chan->flags |= htole32(IWN_CHAN_PASSIVE); 7227 else 7228 chan->flags |= htole32(IWN_CHAN_ACTIVE); 7229 7230 /* 7231 * Calculate the active/passive dwell times. 7232 */ 7233 7234 dwell_active = iwn_get_active_dwell_time(sc, c, ss->ss_nssid); 7235 dwell_passive = iwn_get_passive_dwell_time(sc, c); 7236 7237 /* Make sure they're valid */ 7238 if (dwell_passive <= dwell_active) 7239 dwell_passive = dwell_active + 1; 7240 7241 chan->active = htole16(dwell_active); 7242 chan->passive = htole16(dwell_passive); 7243 7244 if (IEEE80211_IS_CHAN_5GHZ(c)) 7245 chan->rf_gain = 0x3b; 7246 else 7247 chan->rf_gain = 0x28; 7248 7249 DPRINTF(sc, IWN_DEBUG_STATE, 7250 "%s: chan %u flags 0x%x rf_gain 0x%x " 7251 "dsp_gain 0x%x active %d passive %d scan_svc_time %d crc 0x%x " 7252 "isactive=%d numssid=%d\n", __func__, 7253 chan->chan, chan->flags, chan->rf_gain, chan->dsp_gain, 7254 dwell_active, dwell_passive, scan_service_time, 7255 hdr->crc_threshold, is_active, ss->ss_nssid); 7256 7257 hdr->nchan++; 7258 chan++; 7259 buflen = (uint8_t *)chan - buf; 7260 hdr->len = htole16(buflen); 7261 7262 if (sc->sc_is_scanning) { 7263 device_printf(sc->sc_dev, 7264 "%s: called with is_scanning set!\n", 7265 __func__); 7266 } 7267 sc->sc_is_scanning = 1; 7268 7269 DPRINTF(sc, IWN_DEBUG_STATE, "sending scan command nchan=%d\n", 7270 hdr->nchan); 7271 error = iwn_cmd(sc, IWN_CMD_SCAN, buf, buflen, 1); 7272 free(buf, M_DEVBUF); 7273 if (error == 0) 7274 callout_reset(&sc->scan_timeout, 5*hz, iwn_scan_timeout, sc); 7275 7276 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 7277 7278 return error; 7279} 7280 7281static int 7282iwn_auth(struct iwn_softc *sc, struct ieee80211vap *vap) 7283{ 7284 struct ieee80211com *ic = &sc->sc_ic; 7285 struct ieee80211_node *ni = vap->iv_bss; 7286 int error; 7287 7288 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 7289 7290 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 7291 /* Update adapter configuration. */ 7292 IEEE80211_ADDR_COPY(sc->rxon->bssid, ni->ni_bssid); 7293 sc->rxon->chan = ieee80211_chan2ieee(ic, ni->ni_chan); 7294 sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 7295 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 7296 sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 7297 if (ic->ic_flags & IEEE80211_F_SHSLOT) 7298 sc->rxon->flags |= htole32(IWN_RXON_SHSLOT); 7299 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 7300 sc->rxon->flags |= htole32(IWN_RXON_SHPREAMBLE); 7301 if (IEEE80211_IS_CHAN_A(ni->ni_chan)) { 7302 sc->rxon->cck_mask = 0; 7303 sc->rxon->ofdm_mask = 0x15; 7304 } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) { 7305 sc->rxon->cck_mask = 0x03; 7306 sc->rxon->ofdm_mask = 0; 7307 } else { 7308 /* Assume 802.11b/g. */ 7309 sc->rxon->cck_mask = 0x03; 7310 sc->rxon->ofdm_mask = 0x15; 7311 } 7312 7313 /* try HT */ 7314 sc->rxon->flags |= htole32(iwn_get_rxon_ht_flags(sc, ic->ic_curchan)); 7315 7316 DPRINTF(sc, IWN_DEBUG_STATE, "rxon chan %d flags %x cck %x ofdm %x\n", 7317 sc->rxon->chan, sc->rxon->flags, sc->rxon->cck_mask, 7318 sc->rxon->ofdm_mask); 7319 7320 if ((error = iwn_send_rxon(sc, 0, 1)) != 0) { 7321 device_printf(sc->sc_dev, "%s: could not send RXON\n", 7322 __func__); 7323 return (error); 7324 } 7325 7326 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 7327 7328 return (0); 7329} 7330 7331static int 7332iwn_run(struct iwn_softc *sc, struct ieee80211vap *vap) 7333{ 7334 struct iwn_ops *ops = &sc->ops; 7335 struct ieee80211com *ic = &sc->sc_ic; 7336 struct ieee80211_node *ni = vap->iv_bss; 7337 struct iwn_node_info node; 7338 int error; 7339 7340 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 7341 7342 sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; 7343 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 7344 /* Link LED blinks while monitoring. */ 7345 iwn_set_led(sc, IWN_LED_LINK, 5, 5); 7346 return 0; 7347 } 7348 if ((error = iwn_set_timing(sc, ni)) != 0) { 7349 device_printf(sc->sc_dev, 7350 "%s: could not set timing, error %d\n", __func__, error); 7351 return error; 7352 } 7353 7354 /* Update adapter configuration. */ 7355 IEEE80211_ADDR_COPY(sc->rxon->bssid, ni->ni_bssid); 7356 sc->rxon->associd = htole16(IEEE80211_AID(ni->ni_associd)); 7357 sc->rxon->chan = ieee80211_chan2ieee(ic, ni->ni_chan); 7358 sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); 7359 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) 7360 sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); 7361 if (ic->ic_flags & IEEE80211_F_SHSLOT) 7362 sc->rxon->flags |= htole32(IWN_RXON_SHSLOT); 7363 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 7364 sc->rxon->flags |= htole32(IWN_RXON_SHPREAMBLE); 7365 if (IEEE80211_IS_CHAN_A(ni->ni_chan)) { 7366 sc->rxon->cck_mask = 0; 7367 sc->rxon->ofdm_mask = 0x15; 7368 } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) { 7369 sc->rxon->cck_mask = 0x03; 7370 sc->rxon->ofdm_mask = 0; 7371 } else { 7372 /* Assume 802.11b/g. */ 7373 sc->rxon->cck_mask = 0x0f; 7374 sc->rxon->ofdm_mask = 0x15; 7375 } 7376 /* try HT */ 7377 sc->rxon->flags |= htole32(iwn_get_rxon_ht_flags(sc, ni->ni_chan)); 7378 sc->rxon->filter |= htole32(IWN_FILTER_BSS); 7379 DPRINTF(sc, IWN_DEBUG_STATE, "rxon chan %d flags %x, curhtprotmode=%d\n", 7380 sc->rxon->chan, le32toh(sc->rxon->flags), ic->ic_curhtprotmode); 7381 7382 if ((error = iwn_send_rxon(sc, 0, 1)) != 0) { 7383 device_printf(sc->sc_dev, "%s: could not send RXON\n", 7384 __func__); 7385 return error; 7386 } 7387 7388 /* Fake a join to initialize the TX rate. */ 7389 ((struct iwn_node *)ni)->id = IWN_ID_BSS; 7390 iwn_newassoc(ni, 1); 7391 7392 /* Add BSS node. */ 7393 memset(&node, 0, sizeof node); 7394 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 7395 node.id = IWN_ID_BSS; 7396 if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) { 7397 switch (ni->ni_htcap & IEEE80211_HTCAP_SMPS) { 7398 case IEEE80211_HTCAP_SMPS_ENA: 7399 node.htflags |= htole32(IWN_SMPS_MIMO_DIS); 7400 break; 7401 case IEEE80211_HTCAP_SMPS_DYNAMIC: 7402 node.htflags |= htole32(IWN_SMPS_MIMO_PROT); 7403 break; 7404 } 7405 node.htflags |= htole32(IWN_AMDPU_SIZE_FACTOR(3) | 7406 IWN_AMDPU_DENSITY(5)); /* 4us */ 7407 if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) 7408 node.htflags |= htole32(IWN_NODE_HT40); 7409 } 7410 DPRINTF(sc, IWN_DEBUG_STATE, "%s: adding BSS node\n", __func__); 7411 error = ops->add_node(sc, &node, 1); 7412 if (error != 0) { 7413 device_printf(sc->sc_dev, 7414 "%s: could not add BSS node, error %d\n", __func__, error); 7415 return error; 7416 } 7417 DPRINTF(sc, IWN_DEBUG_STATE, "%s: setting link quality for node %d\n", 7418 __func__, node.id); 7419 if ((error = iwn_set_link_quality(sc, ni)) != 0) { 7420 device_printf(sc->sc_dev, 7421 "%s: could not setup link quality for node %d, error %d\n", 7422 __func__, node.id, error); 7423 return error; 7424 } 7425 7426 if ((error = iwn_init_sensitivity(sc)) != 0) { 7427 device_printf(sc->sc_dev, 7428 "%s: could not set sensitivity, error %d\n", __func__, 7429 error); 7430 return error; 7431 } 7432 /* Start periodic calibration timer. */ 7433 sc->calib.state = IWN_CALIB_STATE_ASSOC; 7434 sc->calib_cnt = 0; 7435 callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout, 7436 sc); 7437 7438 /* Link LED always on while associated. */ 7439 iwn_set_led(sc, IWN_LED_LINK, 0, 1); 7440 7441 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 7442 7443 return 0; 7444} 7445 7446/* 7447 * This function is called by upper layer when an ADDBA request is received 7448 * from another STA and before the ADDBA response is sent. 7449 */ 7450static int 7451iwn_ampdu_rx_start(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap, 7452 int baparamset, int batimeout, int baseqctl) 7453{ 7454 struct iwn_softc *sc = ni->ni_ic->ic_softc; 7455 struct iwn_ops *ops = &sc->ops; 7456 struct iwn_node *wn = (void *)ni; 7457 struct iwn_node_info node; 7458 uint16_t ssn; 7459 uint8_t tid; 7460 int error; 7461 7462 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7463 7464 tid = _IEEE80211_MASKSHIFT(le16toh(baparamset), IEEE80211_BAPS_TID); 7465 ssn = _IEEE80211_MASKSHIFT(le16toh(baseqctl), IEEE80211_BASEQ_START); 7466 7467 if (wn->id == IWN_ID_UNDEFINED) 7468 return (ENOENT); 7469 7470 memset(&node, 0, sizeof node); 7471 node.id = wn->id; 7472 node.control = IWN_NODE_UPDATE; 7473 node.flags = IWN_FLAG_SET_ADDBA; 7474 node.addba_tid = tid; 7475 node.addba_ssn = htole16(ssn); 7476 DPRINTF(sc, IWN_DEBUG_RECV, "ADDBA RA=%d TID=%d SSN=%d\n", 7477 wn->id, tid, ssn); 7478 error = ops->add_node(sc, &node, 1); 7479 if (error != 0) 7480 return error; 7481 return sc->sc_ampdu_rx_start(ni, rap, baparamset, batimeout, baseqctl); 7482} 7483 7484/* 7485 * This function is called by upper layer on teardown of an HT-immediate 7486 * Block Ack agreement (eg. uppon receipt of a DELBA frame). 7487 */ 7488static void 7489iwn_ampdu_rx_stop(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap) 7490{ 7491 struct ieee80211com *ic = ni->ni_ic; 7492 struct iwn_softc *sc = ic->ic_softc; 7493 struct iwn_ops *ops = &sc->ops; 7494 struct iwn_node *wn = (void *)ni; 7495 struct iwn_node_info node; 7496 uint8_t tid; 7497 7498 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7499 7500 if (wn->id == IWN_ID_UNDEFINED) 7501 goto end; 7502 7503 /* XXX: tid as an argument */ 7504 for (tid = 0; tid < WME_NUM_TID; tid++) { 7505 if (&ni->ni_rx_ampdu[tid] == rap) 7506 break; 7507 } 7508 7509 memset(&node, 0, sizeof node); 7510 node.id = wn->id; 7511 node.control = IWN_NODE_UPDATE; 7512 node.flags = IWN_FLAG_SET_DELBA; 7513 node.delba_tid = tid; 7514 DPRINTF(sc, IWN_DEBUG_RECV, "DELBA RA=%d TID=%d\n", wn->id, tid); 7515 (void)ops->add_node(sc, &node, 1); 7516end: 7517 sc->sc_ampdu_rx_stop(ni, rap); 7518} 7519 7520static int 7521iwn_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 7522 int dialogtoken, int baparamset, int batimeout) 7523{ 7524 struct iwn_softc *sc = ni->ni_ic->ic_softc; 7525 int qid; 7526 7527 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7528 7529 for (qid = sc->firstaggqueue; qid < sc->ntxqs; qid++) { 7530 if (sc->qid2tap[qid] == NULL) 7531 break; 7532 } 7533 if (qid == sc->ntxqs) { 7534 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: not free aggregation queue\n", 7535 __func__); 7536 return 0; 7537 } 7538 tap->txa_private = malloc(sizeof(int), M_DEVBUF, M_NOWAIT); 7539 if (tap->txa_private == NULL) { 7540 device_printf(sc->sc_dev, 7541 "%s: failed to alloc TX aggregation structure\n", __func__); 7542 return 0; 7543 } 7544 sc->qid2tap[qid] = tap; 7545 *(int *)tap->txa_private = qid; 7546 return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, 7547 batimeout); 7548} 7549 7550static int 7551iwn_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, 7552 int code, int baparamset, int batimeout) 7553{ 7554 struct iwn_softc *sc = ni->ni_ic->ic_softc; 7555 int qid = *(int *)tap->txa_private; 7556 uint8_t tid = tap->txa_tid; 7557 int ret; 7558 7559 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7560 7561 if (code == IEEE80211_STATUS_SUCCESS) { 7562 ni->ni_txseqs[tid] = tap->txa_start & 0xfff; 7563 ret = iwn_ampdu_tx_start(ni->ni_ic, ni, tid); 7564 if (ret != 1) 7565 return ret; 7566 } else { 7567 sc->qid2tap[qid] = NULL; 7568 free(tap->txa_private, M_DEVBUF); 7569 tap->txa_private = NULL; 7570 } 7571 return sc->sc_addba_response(ni, tap, code, baparamset, batimeout); 7572} 7573 7574/* 7575 * This function is called by upper layer when an ADDBA response is received 7576 * from another STA. 7577 */ 7578static int 7579iwn_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni, 7580 uint8_t tid) 7581{ 7582 struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[tid]; 7583 struct iwn_softc *sc = ni->ni_ic->ic_softc; 7584 struct iwn_ops *ops = &sc->ops; 7585 struct iwn_node *wn = (void *)ni; 7586 struct iwn_node_info node; 7587 int error, qid; 7588 7589 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7590 7591 if (wn->id == IWN_ID_UNDEFINED) 7592 return (0); 7593 7594 /* Enable TX for the specified RA/TID. */ 7595 wn->disable_tid &= ~(1 << tid); 7596 memset(&node, 0, sizeof node); 7597 node.id = wn->id; 7598 node.control = IWN_NODE_UPDATE; 7599 node.flags = IWN_FLAG_SET_DISABLE_TID; 7600 node.disable_tid = htole16(wn->disable_tid); 7601 error = ops->add_node(sc, &node, 1); 7602 if (error != 0) 7603 return 0; 7604 7605 if ((error = iwn_nic_lock(sc)) != 0) 7606 return 0; 7607 qid = *(int *)tap->txa_private; 7608 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: ra=%d tid=%d ssn=%d qid=%d\n", 7609 __func__, wn->id, tid, tap->txa_start, qid); 7610 ops->ampdu_tx_start(sc, ni, qid, tid, tap->txa_start & 0xfff); 7611 iwn_nic_unlock(sc); 7612 7613 iwn_set_link_quality(sc, ni); 7614 return 1; 7615} 7616 7617static void 7618iwn_ampdu_tx_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) 7619{ 7620 struct iwn_softc *sc = ni->ni_ic->ic_softc; 7621 struct iwn_ops *ops = &sc->ops; 7622 uint8_t tid = tap->txa_tid; 7623 int qid; 7624 7625 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7626 7627 sc->sc_addba_stop(ni, tap); 7628 7629 if (tap->txa_private == NULL) 7630 return; 7631 7632 qid = *(int *)tap->txa_private; 7633 if (sc->txq[qid].queued != 0) 7634 return; 7635 if (iwn_nic_lock(sc) != 0) 7636 return; 7637 ops->ampdu_tx_stop(sc, qid, tid, tap->txa_start & 0xfff); 7638 iwn_nic_unlock(sc); 7639 sc->qid2tap[qid] = NULL; 7640 free(tap->txa_private, M_DEVBUF); 7641 tap->txa_private = NULL; 7642} 7643 7644static void 7645iwn4965_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni, 7646 int qid, uint8_t tid, uint16_t ssn) 7647{ 7648 struct iwn_node *wn = (void *)ni; 7649 7650 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7651 7652 /* Stop TX scheduler while we're changing its configuration. */ 7653 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 7654 IWN4965_TXQ_STATUS_CHGACT); 7655 7656 /* Assign RA/TID translation to the queue. */ 7657 iwn_mem_write_2(sc, sc->sched_base + IWN4965_SCHED_TRANS_TBL(qid), 7658 wn->id << 4 | tid); 7659 7660 /* Enable chain-building mode for the queue. */ 7661 iwn_prph_setbits(sc, IWN4965_SCHED_QCHAIN_SEL, 1 << qid); 7662 7663 /* Set starting sequence number from the ADDBA request. */ 7664 sc->txq[qid].cur = sc->txq[qid].read = (ssn & 0xff); 7665 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 7666 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn); 7667 7668 /* Set scheduler window size. */ 7669 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid), 7670 IWN_SCHED_WINSZ); 7671 /* Set scheduler frame limit. */ 7672 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, 7673 IWN_SCHED_LIMIT << 16); 7674 7675 /* Enable interrupts for the queue. */ 7676 iwn_prph_setbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid); 7677 7678 /* Mark the queue as active. */ 7679 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 7680 IWN4965_TXQ_STATUS_ACTIVE | IWN4965_TXQ_STATUS_AGGR_ENA | 7681 iwn_tid2fifo[tid] << 1); 7682} 7683 7684static void 7685iwn4965_ampdu_tx_stop(struct iwn_softc *sc, int qid, uint8_t tid, uint16_t ssn) 7686{ 7687 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7688 7689 /* Stop TX scheduler while we're changing its configuration. */ 7690 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 7691 IWN4965_TXQ_STATUS_CHGACT); 7692 7693 /* Set starting sequence number from the ADDBA request. */ 7694 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 7695 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn); 7696 7697 /* Disable interrupts for the queue. */ 7698 iwn_prph_clrbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid); 7699 7700 /* Mark the queue as inactive. */ 7701 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 7702 IWN4965_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid] << 1); 7703} 7704 7705static void 7706iwn5000_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni, 7707 int qid, uint8_t tid, uint16_t ssn) 7708{ 7709 struct iwn_node *wn = (void *)ni; 7710 7711 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7712 7713 /* Stop TX scheduler while we're changing its configuration. */ 7714 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 7715 IWN5000_TXQ_STATUS_CHGACT); 7716 7717 /* Assign RA/TID translation to the queue. */ 7718 iwn_mem_write_2(sc, sc->sched_base + IWN5000_SCHED_TRANS_TBL(qid), 7719 wn->id << 4 | tid); 7720 7721 /* Enable chain-building mode for the queue. */ 7722 iwn_prph_setbits(sc, IWN5000_SCHED_QCHAIN_SEL, 1 << qid); 7723 7724 /* Enable aggregation for the queue. */ 7725 iwn_prph_setbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid); 7726 7727 /* Set starting sequence number from the ADDBA request. */ 7728 sc->txq[qid].cur = sc->txq[qid].read = (ssn & 0xff); 7729 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 7730 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn); 7731 7732 /* Set scheduler window size and frame limit. */ 7733 iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, 7734 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); 7735 7736 /* Enable interrupts for the queue. */ 7737 iwn_prph_setbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid); 7738 7739 /* Mark the queue as active. */ 7740 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 7741 IWN5000_TXQ_STATUS_ACTIVE | iwn_tid2fifo[tid]); 7742} 7743 7744static void 7745iwn5000_ampdu_tx_stop(struct iwn_softc *sc, int qid, uint8_t tid, uint16_t ssn) 7746{ 7747 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7748 7749 /* Stop TX scheduler while we're changing its configuration. */ 7750 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 7751 IWN5000_TXQ_STATUS_CHGACT); 7752 7753 /* Disable aggregation for the queue. */ 7754 iwn_prph_clrbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid); 7755 7756 /* Set starting sequence number from the ADDBA request. */ 7757 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); 7758 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn); 7759 7760 /* Disable interrupts for the queue. */ 7761 iwn_prph_clrbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid); 7762 7763 /* Mark the queue as inactive. */ 7764 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 7765 IWN5000_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid]); 7766} 7767 7768/* 7769 * Query calibration tables from the initialization firmware. We do this 7770 * only once at first boot. Called from a process context. 7771 */ 7772static int 7773iwn5000_query_calibration(struct iwn_softc *sc) 7774{ 7775 struct iwn5000_calib_config cmd; 7776 int error; 7777 7778 memset(&cmd, 0, sizeof cmd); 7779 cmd.ucode.once.enable = htole32(0xffffffff); 7780 cmd.ucode.once.start = htole32(0xffffffff); 7781 cmd.ucode.once.send = htole32(0xffffffff); 7782 cmd.ucode.flags = htole32(0xffffffff); 7783 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending calibration query\n", 7784 __func__); 7785 error = iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof cmd, 0); 7786 if (error != 0) 7787 return error; 7788 7789 /* Wait at most two seconds for calibration to complete. */ 7790 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) 7791 error = msleep(sc, &sc->sc_mtx, PCATCH, "iwncal", 2 * hz); 7792 return error; 7793} 7794 7795/* 7796 * Send calibration results to the runtime firmware. These results were 7797 * obtained on first boot from the initialization firmware. 7798 */ 7799static int 7800iwn5000_send_calibration(struct iwn_softc *sc) 7801{ 7802 int idx, error; 7803 7804 for (idx = 0; idx < IWN5000_PHY_CALIB_MAX_RESULT; idx++) { 7805 if (!(sc->base_params->calib_need & (1<<idx))) { 7806 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 7807 "No need of calib %d\n", 7808 idx); 7809 continue; /* no need for this calib */ 7810 } 7811 if (sc->calibcmd[idx].buf == NULL) { 7812 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 7813 "Need calib idx : %d but no available data\n", 7814 idx); 7815 continue; 7816 } 7817 7818 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 7819 "send calibration result idx=%d len=%d\n", idx, 7820 sc->calibcmd[idx].len); 7821 error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, sc->calibcmd[idx].buf, 7822 sc->calibcmd[idx].len, 0); 7823 if (error != 0) { 7824 device_printf(sc->sc_dev, 7825 "%s: could not send calibration result, error %d\n", 7826 __func__, error); 7827 return error; 7828 } 7829 } 7830 return 0; 7831} 7832 7833static int 7834iwn5000_send_wimax_coex(struct iwn_softc *sc) 7835{ 7836 struct iwn5000_wimax_coex wimax; 7837 7838#if 0 7839 if (sc->hw_type == IWN_HW_REV_TYPE_6050) { 7840 /* Enable WiMAX coexistence for combo adapters. */ 7841 wimax.flags = 7842 IWN_WIMAX_COEX_ASSOC_WA_UNMASK | 7843 IWN_WIMAX_COEX_UNASSOC_WA_UNMASK | 7844 IWN_WIMAX_COEX_STA_TABLE_VALID | 7845 IWN_WIMAX_COEX_ENABLE; 7846 memcpy(wimax.events, iwn6050_wimax_events, 7847 sizeof iwn6050_wimax_events); 7848 } else 7849#endif 7850 { 7851 /* Disable WiMAX coexistence. */ 7852 wimax.flags = 0; 7853 memset(wimax.events, 0, sizeof wimax.events); 7854 } 7855 DPRINTF(sc, IWN_DEBUG_RESET, "%s: Configuring WiMAX coexistence\n", 7856 __func__); 7857 return iwn_cmd(sc, IWN5000_CMD_WIMAX_COEX, &wimax, sizeof wimax, 0); 7858} 7859 7860static int 7861iwn5000_crystal_calib(struct iwn_softc *sc) 7862{ 7863 struct iwn5000_phy_calib_crystal cmd; 7864 7865 memset(&cmd, 0, sizeof cmd); 7866 cmd.code = IWN5000_PHY_CALIB_CRYSTAL; 7867 cmd.ngroups = 1; 7868 cmd.isvalid = 1; 7869 cmd.cap_pin[0] = le32toh(sc->eeprom_crystal) & 0xff; 7870 cmd.cap_pin[1] = (le32toh(sc->eeprom_crystal) >> 16) & 0xff; 7871 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "sending crystal calibration %d, %d\n", 7872 cmd.cap_pin[0], cmd.cap_pin[1]); 7873 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 7874} 7875 7876static int 7877iwn5000_temp_offset_calib(struct iwn_softc *sc) 7878{ 7879 struct iwn5000_phy_calib_temp_offset cmd; 7880 7881 memset(&cmd, 0, sizeof cmd); 7882 cmd.code = IWN5000_PHY_CALIB_TEMP_OFFSET; 7883 cmd.ngroups = 1; 7884 cmd.isvalid = 1; 7885 if (sc->eeprom_temp != 0) 7886 cmd.offset = htole16(sc->eeprom_temp); 7887 else 7888 cmd.offset = htole16(IWN_DEFAULT_TEMP_OFFSET); 7889 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "setting radio sensor offset to %d\n", 7890 le16toh(cmd.offset)); 7891 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 7892} 7893 7894static int 7895iwn5000_temp_offset_calibv2(struct iwn_softc *sc) 7896{ 7897 struct iwn5000_phy_calib_temp_offsetv2 cmd; 7898 7899 memset(&cmd, 0, sizeof cmd); 7900 cmd.code = IWN5000_PHY_CALIB_TEMP_OFFSET; 7901 cmd.ngroups = 1; 7902 cmd.isvalid = 1; 7903 if (sc->eeprom_temp != 0) { 7904 cmd.offset_low = htole16(sc->eeprom_temp); 7905 cmd.offset_high = htole16(sc->eeprom_temp_high); 7906 } else { 7907 cmd.offset_low = htole16(IWN_DEFAULT_TEMP_OFFSET); 7908 cmd.offset_high = htole16(IWN_DEFAULT_TEMP_OFFSET); 7909 } 7910 cmd.burnt_voltage_ref = htole16(sc->eeprom_voltage); 7911 7912 DPRINTF(sc, IWN_DEBUG_CALIBRATE, 7913 "setting radio sensor low offset to %d, high offset to %d, voltage to %d\n", 7914 le16toh(cmd.offset_low), 7915 le16toh(cmd.offset_high), 7916 le16toh(cmd.burnt_voltage_ref)); 7917 7918 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); 7919} 7920 7921/* 7922 * This function is called after the runtime firmware notifies us of its 7923 * readiness (called in a process context). 7924 */ 7925static int 7926iwn4965_post_alive(struct iwn_softc *sc) 7927{ 7928 int error, qid; 7929 7930 if ((error = iwn_nic_lock(sc)) != 0) 7931 return error; 7932 7933 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 7934 7935 /* Clear TX scheduler state in SRAM. */ 7936 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); 7937 iwn_mem_set_region_4(sc, sc->sched_base + IWN4965_SCHED_CTX_OFF, 0, 7938 IWN4965_SCHED_CTX_LEN / sizeof (uint32_t)); 7939 7940 /* Set physical address of TX scheduler rings (1KB aligned). */ 7941 iwn_prph_write(sc, IWN4965_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); 7942 7943 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); 7944 7945 /* Disable chain mode for all our 16 queues. */ 7946 iwn_prph_write(sc, IWN4965_SCHED_QCHAIN_SEL, 0); 7947 7948 for (qid = 0; qid < IWN4965_NTXQUEUES; qid++) { 7949 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), 0); 7950 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); 7951 7952 /* Set scheduler window size. */ 7953 iwn_mem_write(sc, sc->sched_base + 7954 IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ); 7955 /* Set scheduler frame limit. */ 7956 iwn_mem_write(sc, sc->sched_base + 7957 IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, 7958 IWN_SCHED_LIMIT << 16); 7959 } 7960 7961 /* Enable interrupts for all our 16 queues. */ 7962 iwn_prph_write(sc, IWN4965_SCHED_INTR_MASK, 0xffff); 7963 /* Identify TX FIFO rings (0-7). */ 7964 iwn_prph_write(sc, IWN4965_SCHED_TXFACT, 0xff); 7965 7966 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 7967 for (qid = 0; qid < 7; qid++) { 7968 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 4, 5, 6 }; 7969 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), 7970 IWN4965_TXQ_STATUS_ACTIVE | qid2fifo[qid] << 1); 7971 } 7972 iwn_nic_unlock(sc); 7973 return 0; 7974} 7975 7976/* 7977 * This function is called after the initialization or runtime firmware 7978 * notifies us of its readiness (called in a process context). 7979 */ 7980static int 7981iwn5000_post_alive(struct iwn_softc *sc) 7982{ 7983 int error, qid; 7984 7985 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 7986 7987#ifndef __HAIKU__ 7988 /* Switch to using ICT interrupt mode. */ 7989 iwn5000_ict_reset(sc); 7990#endif 7991 7992 if ((error = iwn_nic_lock(sc)) != 0){ 7993 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__); 7994 return error; 7995 } 7996 7997 /* Clear TX scheduler state in SRAM. */ 7998 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); 7999 iwn_mem_set_region_4(sc, sc->sched_base + IWN5000_SCHED_CTX_OFF, 0, 8000 IWN5000_SCHED_CTX_LEN / sizeof (uint32_t)); 8001 8002 /* Set physical address of TX scheduler rings (1KB aligned). */ 8003 iwn_prph_write(sc, IWN5000_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); 8004 8005 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); 8006 8007 /* Enable chain mode for all queues, except command queue. */ 8008 if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT) 8009 iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffdf); 8010 else 8011 iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffef); 8012 iwn_prph_write(sc, IWN5000_SCHED_AGGR_SEL, 0); 8013 8014 for (qid = 0; qid < IWN5000_NTXQUEUES; qid++) { 8015 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), 0); 8016 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); 8017 8018 iwn_mem_write(sc, sc->sched_base + 8019 IWN5000_SCHED_QUEUE_OFFSET(qid), 0); 8020 /* Set scheduler window size and frame limit. */ 8021 iwn_mem_write(sc, sc->sched_base + 8022 IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, 8023 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); 8024 } 8025 8026 /* Enable interrupts for all our 20 queues. */ 8027 iwn_prph_write(sc, IWN5000_SCHED_INTR_MASK, 0xfffff); 8028 /* Identify TX FIFO rings (0-7). */ 8029 iwn_prph_write(sc, IWN5000_SCHED_TXFACT, 0xff); 8030 8031 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 8032 if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT) { 8033 /* Mark TX rings as active. */ 8034 for (qid = 0; qid < 11; qid++) { 8035 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 0, 4, 2, 5, 4, 7, 5 }; 8036 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 8037 IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]); 8038 } 8039 } else { 8040 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ 8041 for (qid = 0; qid < 7; qid++) { 8042 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 7, 5, 6 }; 8043 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), 8044 IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]); 8045 } 8046 } 8047 iwn_nic_unlock(sc); 8048 8049 /* Configure WiMAX coexistence for combo adapters. */ 8050 error = iwn5000_send_wimax_coex(sc); 8051 if (error != 0) { 8052 device_printf(sc->sc_dev, 8053 "%s: could not configure WiMAX coexistence, error %d\n", 8054 __func__, error); 8055 return error; 8056 } 8057 if (sc->hw_type != IWN_HW_REV_TYPE_5150) { 8058 /* Perform crystal calibration. */ 8059 error = iwn5000_crystal_calib(sc); 8060 if (error != 0) { 8061 device_printf(sc->sc_dev, 8062 "%s: crystal calibration failed, error %d\n", 8063 __func__, error); 8064 return error; 8065 } 8066 } 8067 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) { 8068 /* Query calibration from the initialization firmware. */ 8069 if ((error = iwn5000_query_calibration(sc)) != 0) { 8070 device_printf(sc->sc_dev, 8071 "%s: could not query calibration, error %d\n", 8072 __func__, error); 8073 return error; 8074 } 8075 /* 8076 * We have the calibration results now, reboot with the 8077 * runtime firmware (call ourselves recursively!) 8078 */ 8079 iwn_hw_stop(sc); 8080 error = iwn_hw_init(sc); 8081 } else { 8082 /* Send calibration results to runtime firmware. */ 8083 error = iwn5000_send_calibration(sc); 8084 } 8085 8086 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 8087 8088 return error; 8089} 8090 8091/* 8092 * The firmware boot code is small and is intended to be copied directly into 8093 * the NIC internal memory (no DMA transfer). 8094 */ 8095static int 8096iwn4965_load_bootcode(struct iwn_softc *sc, const uint8_t *ucode, int size) 8097{ 8098 int error, ntries; 8099 8100 size /= sizeof (uint32_t); 8101 8102 if ((error = iwn_nic_lock(sc)) != 0) 8103 return error; 8104 8105 /* Copy microcode image into NIC memory. */ 8106 iwn_prph_write_region_4(sc, IWN_BSM_SRAM_BASE, 8107 (const uint32_t *)ucode, size); 8108 8109 iwn_prph_write(sc, IWN_BSM_WR_MEM_SRC, 0); 8110 iwn_prph_write(sc, IWN_BSM_WR_MEM_DST, IWN_FW_TEXT_BASE); 8111 iwn_prph_write(sc, IWN_BSM_WR_DWCOUNT, size); 8112 8113 /* Start boot load now. */ 8114 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START); 8115 8116 /* Wait for transfer to complete. */ 8117 for (ntries = 0; ntries < 1000; ntries++) { 8118 if (!(iwn_prph_read(sc, IWN_BSM_WR_CTRL) & 8119 IWN_BSM_WR_CTRL_START)) 8120 break; 8121 DELAY(10); 8122 } 8123 if (ntries == 1000) { 8124 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 8125 __func__); 8126 iwn_nic_unlock(sc); 8127 return ETIMEDOUT; 8128 } 8129 8130 /* Enable boot after power up. */ 8131 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START_EN); 8132 8133 iwn_nic_unlock(sc); 8134 return 0; 8135} 8136 8137static int 8138iwn4965_load_firmware(struct iwn_softc *sc) 8139{ 8140 struct iwn_fw_info *fw = &sc->fw; 8141 struct iwn_dma_info *dma = &sc->fw_dma; 8142 int error; 8143 8144 /* Copy initialization sections into pre-allocated DMA-safe memory. */ 8145 memcpy(dma->vaddr, fw->init.data, fw->init.datasz); 8146 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 8147 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ, 8148 fw->init.text, fw->init.textsz); 8149 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 8150 8151 /* Tell adapter where to find initialization sections. */ 8152 if ((error = iwn_nic_lock(sc)) != 0) 8153 return error; 8154 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); 8155 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->init.datasz); 8156 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, 8157 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); 8158 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, fw->init.textsz); 8159 iwn_nic_unlock(sc); 8160 8161 /* Load firmware boot code. */ 8162 error = iwn4965_load_bootcode(sc, fw->boot.text, fw->boot.textsz); 8163 if (error != 0) { 8164 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 8165 __func__); 8166 return error; 8167 } 8168 /* Now press "execute". */ 8169 IWN_WRITE(sc, IWN_RESET, 0); 8170 8171 /* Wait at most one second for first alive notification. */ 8172 if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz)) != 0) { 8173 device_printf(sc->sc_dev, 8174 "%s: timeout waiting for adapter to initialize, error %d\n", 8175 __func__, error); 8176 return error; 8177 } 8178 8179 /* Retrieve current temperature for initial TX power calibration. */ 8180 sc->rawtemp = sc->ucode_info.temp[3].chan20MHz; 8181 sc->temp = iwn4965_get_temperature(sc); 8182 8183 /* Copy runtime sections into pre-allocated DMA-safe memory. */ 8184 memcpy(dma->vaddr, fw->main.data, fw->main.datasz); 8185 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 8186 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ, 8187 fw->main.text, fw->main.textsz); 8188 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 8189 8190 /* Tell adapter where to find runtime sections. */ 8191 if ((error = iwn_nic_lock(sc)) != 0) 8192 return error; 8193 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); 8194 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->main.datasz); 8195 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, 8196 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); 8197 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, 8198 IWN_FW_UPDATED | fw->main.textsz); 8199 iwn_nic_unlock(sc); 8200 8201 return 0; 8202} 8203 8204static int 8205iwn5000_load_firmware_section(struct iwn_softc *sc, uint32_t dst, 8206 const uint8_t *section, int size) 8207{ 8208 struct iwn_dma_info *dma = &sc->fw_dma; 8209 int error; 8210 8211 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8212 8213 /* Copy firmware section into pre-allocated DMA-safe memory. */ 8214 memcpy(dma->vaddr, section, size); 8215 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 8216 8217 if ((error = iwn_nic_lock(sc)) != 0) 8218 return error; 8219 8220 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), 8221 IWN_FH_TX_CONFIG_DMA_PAUSE); 8222 8223 IWN_WRITE(sc, IWN_FH_SRAM_ADDR(IWN_SRVC_DMACHNL), dst); 8224 IWN_WRITE(sc, IWN_FH_TFBD_CTRL0(IWN_SRVC_DMACHNL), 8225 IWN_LOADDR(dma->paddr)); 8226 IWN_WRITE(sc, IWN_FH_TFBD_CTRL1(IWN_SRVC_DMACHNL), 8227 IWN_HIADDR(dma->paddr) << 28 | size); 8228 IWN_WRITE(sc, IWN_FH_TXBUF_STATUS(IWN_SRVC_DMACHNL), 8229 IWN_FH_TXBUF_STATUS_TBNUM(1) | 8230 IWN_FH_TXBUF_STATUS_TBIDX(1) | 8231 IWN_FH_TXBUF_STATUS_TFBD_VALID); 8232 8233 /* Kick Flow Handler to start DMA transfer. */ 8234 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), 8235 IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_CIRQ_HOST_ENDTFD); 8236 8237 iwn_nic_unlock(sc); 8238 8239 /* Wait at most five seconds for FH DMA transfer to complete. */ 8240 return msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", 5 * hz); 8241} 8242 8243static int 8244iwn5000_load_firmware(struct iwn_softc *sc) 8245{ 8246 struct iwn_fw_part *fw; 8247 int error; 8248 8249 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8250 8251 /* Load the initialization firmware on first boot only. */ 8252 fw = (sc->sc_flags & IWN_FLAG_CALIB_DONE) ? 8253 &sc->fw.main : &sc->fw.init; 8254 8255 error = iwn5000_load_firmware_section(sc, IWN_FW_TEXT_BASE, 8256 fw->text, fw->textsz); 8257 if (error != 0) { 8258 device_printf(sc->sc_dev, 8259 "%s: could not load firmware %s section, error %d\n", 8260 __func__, ".text", error); 8261 return error; 8262 } 8263 error = iwn5000_load_firmware_section(sc, IWN_FW_DATA_BASE, 8264 fw->data, fw->datasz); 8265 if (error != 0) { 8266 device_printf(sc->sc_dev, 8267 "%s: could not load firmware %s section, error %d\n", 8268 __func__, ".data", error); 8269 return error; 8270 } 8271 8272 /* Now press "execute". */ 8273 IWN_WRITE(sc, IWN_RESET, 0); 8274 return 0; 8275} 8276 8277/* 8278 * Extract text and data sections from a legacy firmware image. 8279 */ 8280static int 8281iwn_read_firmware_leg(struct iwn_softc *sc, struct iwn_fw_info *fw) 8282{ 8283 const uint32_t *ptr; 8284 size_t hdrlen = 24; 8285 uint32_t rev; 8286 8287 ptr = (const uint32_t *)fw->data; 8288 rev = le32toh(*ptr++); 8289 8290 sc->ucode_rev = rev; 8291 8292 /* Check firmware API version. */ 8293 if (IWN_FW_API(rev) <= 1) { 8294 device_printf(sc->sc_dev, 8295 "%s: bad firmware, need API version >=2\n", __func__); 8296 return EINVAL; 8297 } 8298 if (IWN_FW_API(rev) >= 3) { 8299 /* Skip build number (version 2 header). */ 8300 hdrlen += 4; 8301 ptr++; 8302 } 8303 if (fw->size < hdrlen) { 8304 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", 8305 __func__, fw->size); 8306 return EINVAL; 8307 } 8308 fw->main.textsz = le32toh(*ptr++); 8309 fw->main.datasz = le32toh(*ptr++); 8310 fw->init.textsz = le32toh(*ptr++); 8311 fw->init.datasz = le32toh(*ptr++); 8312 fw->boot.textsz = le32toh(*ptr++); 8313 8314 /* Check that all firmware sections fit. */ 8315 if (fw->size < hdrlen + fw->main.textsz + fw->main.datasz + 8316 fw->init.textsz + fw->init.datasz + fw->boot.textsz) { 8317 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", 8318 __func__, fw->size); 8319 return EINVAL; 8320 } 8321 8322 /* Get pointers to firmware sections. */ 8323 fw->main.text = (const uint8_t *)ptr; 8324 fw->main.data = fw->main.text + fw->main.textsz; 8325 fw->init.text = fw->main.data + fw->main.datasz; 8326 fw->init.data = fw->init.text + fw->init.textsz; 8327 fw->boot.text = fw->init.data + fw->init.datasz; 8328 return 0; 8329} 8330 8331/* 8332 * Extract text and data sections from a TLV firmware image. 8333 */ 8334static int 8335iwn_read_firmware_tlv(struct iwn_softc *sc, struct iwn_fw_info *fw, 8336 uint16_t alt) 8337{ 8338 const struct iwn_fw_tlv_hdr *hdr; 8339 const struct iwn_fw_tlv *tlv; 8340 const uint8_t *ptr, *end; 8341 uint64_t altmask; 8342 uint32_t len, tmp; 8343 8344 if (fw->size < sizeof (*hdr)) { 8345 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", 8346 __func__, fw->size); 8347 return EINVAL; 8348 } 8349 hdr = (const struct iwn_fw_tlv_hdr *)fw->data; 8350 if (hdr->signature != htole32(IWN_FW_SIGNATURE)) { 8351 device_printf(sc->sc_dev, "%s: bad firmware signature 0x%08x\n", 8352 __func__, le32toh(hdr->signature)); 8353 return EINVAL; 8354 } 8355 DPRINTF(sc, IWN_DEBUG_RESET, "FW: \"%.64s\", build 0x%x\n", hdr->descr, 8356 le32toh(hdr->build)); 8357 sc->ucode_rev = le32toh(hdr->rev); 8358 8359 /* 8360 * Select the closest supported alternative that is less than 8361 * or equal to the specified one. 8362 */ 8363 altmask = le64toh(hdr->altmask); 8364 while (alt > 0 && !(altmask & (1ULL << alt))) 8365 alt--; /* Downgrade. */ 8366 DPRINTF(sc, IWN_DEBUG_RESET, "using alternative %d\n", alt); 8367 8368 ptr = (const uint8_t *)(hdr + 1); 8369 end = (const uint8_t *)(fw->data + fw->size); 8370 8371 /* Parse type-length-value fields. */ 8372 while (ptr + sizeof (*tlv) <= end) { 8373 tlv = (const struct iwn_fw_tlv *)ptr; 8374 len = le32toh(tlv->len); 8375 8376 ptr += sizeof (*tlv); 8377 if (ptr + len > end) { 8378 device_printf(sc->sc_dev, 8379 "%s: firmware too short: %zu bytes\n", __func__, 8380 fw->size); 8381 return EINVAL; 8382 } 8383 /* Skip other alternatives. */ 8384 if (tlv->alt != 0 && tlv->alt != htole16(alt)) 8385 goto next; 8386 8387 switch (le16toh(tlv->type)) { 8388 case IWN_FW_TLV_MAIN_TEXT: 8389 fw->main.text = ptr; 8390 fw->main.textsz = len; 8391 break; 8392 case IWN_FW_TLV_MAIN_DATA: 8393 fw->main.data = ptr; 8394 fw->main.datasz = len; 8395 break; 8396 case IWN_FW_TLV_INIT_TEXT: 8397 fw->init.text = ptr; 8398 fw->init.textsz = len; 8399 break; 8400 case IWN_FW_TLV_INIT_DATA: 8401 fw->init.data = ptr; 8402 fw->init.datasz = len; 8403 break; 8404 case IWN_FW_TLV_BOOT_TEXT: 8405 fw->boot.text = ptr; 8406 fw->boot.textsz = len; 8407 break; 8408 case IWN_FW_TLV_ENH_SENS: 8409 if (!len) 8410 sc->sc_flags |= IWN_FLAG_ENH_SENS; 8411 break; 8412 case IWN_FW_TLV_PHY_CALIB: 8413 tmp = le32toh(*ptr); 8414 if (tmp < 253) { 8415 sc->reset_noise_gain = tmp; 8416 sc->noise_gain = tmp + 1; 8417 } 8418 break; 8419 case IWN_FW_TLV_PAN: 8420 sc->sc_flags |= IWN_FLAG_PAN_SUPPORT; 8421 DPRINTF(sc, IWN_DEBUG_RESET, 8422 "PAN Support found: %d\n", 1); 8423 break; 8424 case IWN_FW_TLV_FLAGS: 8425 if (len < sizeof(uint32_t)) 8426 break; 8427 if (len % sizeof(uint32_t)) 8428 break; 8429 sc->tlv_feature_flags = le32toh(*ptr); 8430 DPRINTF(sc, IWN_DEBUG_RESET, 8431 "%s: feature: 0x%08x\n", 8432 __func__, 8433 sc->tlv_feature_flags); 8434 break; 8435 case IWN_FW_TLV_PBREQ_MAXLEN: 8436 case IWN_FW_TLV_RUNT_EVTLOG_PTR: 8437 case IWN_FW_TLV_RUNT_EVTLOG_SIZE: 8438 case IWN_FW_TLV_RUNT_ERRLOG_PTR: 8439 case IWN_FW_TLV_INIT_EVTLOG_PTR: 8440 case IWN_FW_TLV_INIT_EVTLOG_SIZE: 8441 case IWN_FW_TLV_INIT_ERRLOG_PTR: 8442 case IWN_FW_TLV_WOWLAN_INST: 8443 case IWN_FW_TLV_WOWLAN_DATA: 8444 DPRINTF(sc, IWN_DEBUG_RESET, 8445 "TLV type %d recognized but not handled\n", 8446 le16toh(tlv->type)); 8447 break; 8448 default: 8449 DPRINTF(sc, IWN_DEBUG_RESET, 8450 "TLV type %d not handled\n", le16toh(tlv->type)); 8451 break; 8452 } 8453 next: /* TLV fields are 32-bit aligned. */ 8454 ptr += (len + 3) & ~3; 8455 } 8456 return 0; 8457} 8458 8459static int 8460iwn_read_firmware(struct iwn_softc *sc) 8461{ 8462 struct iwn_fw_info *fw = &sc->fw; 8463 int error; 8464 8465 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8466 8467 IWN_UNLOCK(sc); 8468 8469 memset(fw, 0, sizeof (*fw)); 8470 8471 /* Read firmware image from filesystem. */ 8472 sc->fw_fp = firmware_get(sc->fwname); 8473 if (sc->fw_fp == NULL) { 8474 device_printf(sc->sc_dev, "%s: could not read firmware %s\n", 8475 __func__, sc->fwname); 8476 IWN_LOCK(sc); 8477 return EINVAL; 8478 } 8479 IWN_LOCK(sc); 8480 8481 fw->size = sc->fw_fp->datasize; 8482 fw->data = (const uint8_t *)sc->fw_fp->data; 8483 if (fw->size < sizeof (uint32_t)) { 8484 device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", 8485 __func__, fw->size); 8486 error = EINVAL; 8487 goto fail; 8488 } 8489 8490 /* Retrieve text and data sections. */ 8491 if (*(const uint32_t *)fw->data != 0) /* Legacy image. */ 8492 error = iwn_read_firmware_leg(sc, fw); 8493 else 8494 error = iwn_read_firmware_tlv(sc, fw, 1); 8495 if (error != 0) { 8496 device_printf(sc->sc_dev, 8497 "%s: could not read firmware sections, error %d\n", 8498 __func__, error); 8499 goto fail; 8500 } 8501 8502 device_printf(sc->sc_dev, "%s: ucode rev=0x%08x\n", __func__, sc->ucode_rev); 8503 8504 /* Make sure text and data sections fit in hardware memory. */ 8505 if (fw->main.textsz > sc->fw_text_maxsz || 8506 fw->main.datasz > sc->fw_data_maxsz || 8507 fw->init.textsz > sc->fw_text_maxsz || 8508 fw->init.datasz > sc->fw_data_maxsz || 8509 fw->boot.textsz > IWN_FW_BOOT_TEXT_MAXSZ || 8510 (fw->boot.textsz & 3) != 0) { 8511 device_printf(sc->sc_dev, "%s: firmware sections too large\n", 8512 __func__); 8513 error = EINVAL; 8514 goto fail; 8515 } 8516 8517 /* We can proceed with loading the firmware. */ 8518 return 0; 8519 8520fail: iwn_unload_firmware(sc); 8521 return error; 8522} 8523 8524static void 8525iwn_unload_firmware(struct iwn_softc *sc) 8526{ 8527 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 8528 sc->fw_fp = NULL; 8529} 8530 8531static int 8532iwn_clock_wait(struct iwn_softc *sc) 8533{ 8534 int ntries; 8535 8536 /* Set "initialization complete" bit. */ 8537 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); 8538 8539 /* Wait for clock stabilization. */ 8540 for (ntries = 0; ntries < 2500; ntries++) { 8541 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_MAC_CLOCK_READY) 8542 return 0; 8543 DELAY(10); 8544 } 8545 device_printf(sc->sc_dev, 8546 "%s: timeout waiting for clock stabilization\n", __func__); 8547 return ETIMEDOUT; 8548} 8549 8550static int 8551iwn_apm_init(struct iwn_softc *sc) 8552{ 8553 uint32_t reg; 8554 int error; 8555 8556 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8557 8558 /* Disable L0s exit timer (NMI bug workaround). */ 8559 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_DIS_L0S_TIMER); 8560 /* Don't wait for ICH L0s (ICH bug workaround). */ 8561 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_L1A_NO_L0S_RX); 8562 8563 /* Set FH wait threshold to max (HW bug under stress workaround). */ 8564 IWN_SETBITS(sc, IWN_DBG_HPET_MEM, 0xffff0000); 8565 8566 /* Enable HAP INTA to move adapter from L1a to L0s. */ 8567 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_HAP_WAKE_L1A); 8568 8569 /* Retrieve PCIe Active State Power Management (ASPM). */ 8570 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + PCIER_LINK_CTL, 4); 8571 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */ 8572 if (reg & PCIEM_LINK_CTL_ASPMC_L1) /* L1 Entry enabled. */ 8573 IWN_SETBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); 8574 else 8575 IWN_CLRBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); 8576 8577 if (sc->base_params->pll_cfg_val) 8578 IWN_SETBITS(sc, IWN_ANA_PLL, sc->base_params->pll_cfg_val); 8579 8580 /* Wait for clock stabilization before accessing prph. */ 8581 if ((error = iwn_clock_wait(sc)) != 0) 8582 return error; 8583 8584 if ((error = iwn_nic_lock(sc)) != 0) 8585 return error; 8586 if (sc->hw_type == IWN_HW_REV_TYPE_4965) { 8587 /* Enable DMA and BSM (Bootstrap State Machine). */ 8588 iwn_prph_write(sc, IWN_APMG_CLK_EN, 8589 IWN_APMG_CLK_CTRL_DMA_CLK_RQT | 8590 IWN_APMG_CLK_CTRL_BSM_CLK_RQT); 8591 } else { 8592 /* Enable DMA. */ 8593 iwn_prph_write(sc, IWN_APMG_CLK_EN, 8594 IWN_APMG_CLK_CTRL_DMA_CLK_RQT); 8595 } 8596 DELAY(20); 8597 /* Disable L1-Active. */ 8598 iwn_prph_setbits(sc, IWN_APMG_PCI_STT, IWN_APMG_PCI_STT_L1A_DIS); 8599 iwn_nic_unlock(sc); 8600 8601 return 0; 8602} 8603 8604static void 8605iwn_apm_stop_master(struct iwn_softc *sc) 8606{ 8607 int ntries; 8608 8609 /* Stop busmaster DMA activity. */ 8610 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_STOP_MASTER); 8611 for (ntries = 0; ntries < 100; ntries++) { 8612 if (IWN_READ(sc, IWN_RESET) & IWN_RESET_MASTER_DISABLED) 8613 return; 8614 DELAY(10); 8615 } 8616 device_printf(sc->sc_dev, "%s: timeout waiting for master\n", __func__); 8617} 8618 8619static void 8620iwn_apm_stop(struct iwn_softc *sc) 8621{ 8622 iwn_apm_stop_master(sc); 8623 8624 /* Reset the entire device. */ 8625 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_SW); 8626 DELAY(10); 8627 /* Clear "initialization complete" bit. */ 8628 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); 8629} 8630 8631static int 8632iwn4965_nic_config(struct iwn_softc *sc) 8633{ 8634 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8635 8636 if (IWN_RFCFG_TYPE(sc->rfcfg) == 1) { 8637 /* 8638 * I don't believe this to be correct but this is what the 8639 * vendor driver is doing. Probably the bits should not be 8640 * shifted in IWN_RFCFG_*. 8641 */ 8642 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 8643 IWN_RFCFG_TYPE(sc->rfcfg) | 8644 IWN_RFCFG_STEP(sc->rfcfg) | 8645 IWN_RFCFG_DASH(sc->rfcfg)); 8646 } 8647 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 8648 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); 8649 return 0; 8650} 8651 8652static int 8653iwn5000_nic_config(struct iwn_softc *sc) 8654{ 8655 uint32_t tmp; 8656 int error; 8657 8658 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8659 8660 if (IWN_RFCFG_TYPE(sc->rfcfg) < 3) { 8661 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 8662 IWN_RFCFG_TYPE(sc->rfcfg) | 8663 IWN_RFCFG_STEP(sc->rfcfg) | 8664 IWN_RFCFG_DASH(sc->rfcfg)); 8665 } 8666 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, 8667 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); 8668 8669 if ((error = iwn_nic_lock(sc)) != 0) 8670 return error; 8671 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_EARLY_PWROFF_DIS); 8672 8673 if (sc->hw_type == IWN_HW_REV_TYPE_1000) { 8674 /* 8675 * Select first Switching Voltage Regulator (1.32V) to 8676 * solve a stability issue related to noisy DC2DC line 8677 * in the silicon of 1000 Series. 8678 */ 8679 tmp = iwn_prph_read(sc, IWN_APMG_DIGITAL_SVR); 8680 tmp &= ~IWN_APMG_DIGITAL_SVR_VOLTAGE_MASK; 8681 tmp |= IWN_APMG_DIGITAL_SVR_VOLTAGE_1_32; 8682 iwn_prph_write(sc, IWN_APMG_DIGITAL_SVR, tmp); 8683 } 8684 iwn_nic_unlock(sc); 8685 8686 if (sc->sc_flags & IWN_FLAG_INTERNAL_PA) { 8687 /* Use internal power amplifier only. */ 8688 IWN_WRITE(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_2X2_IPA); 8689 } 8690 if (sc->base_params->additional_nic_config && sc->calib_ver >= 6) { 8691 /* Indicate that ROM calibration version is >=6. */ 8692 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_CALIB_VER6); 8693 } 8694 if (sc->base_params->additional_gp_drv_bit) 8695 IWN_SETBITS(sc, IWN_GP_DRIVER, 8696 sc->base_params->additional_gp_drv_bit); 8697 return 0; 8698} 8699 8700/* 8701 * Take NIC ownership over Intel Active Management Technology (AMT). 8702 */ 8703static int 8704iwn_hw_prepare(struct iwn_softc *sc) 8705{ 8706 int ntries; 8707 8708 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8709 8710 /* Check if hardware is ready. */ 8711 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); 8712 for (ntries = 0; ntries < 5; ntries++) { 8713 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 8714 IWN_HW_IF_CONFIG_NIC_READY) 8715 return 0; 8716 DELAY(10); 8717 } 8718 8719 /* Hardware not ready, force into ready state. */ 8720 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_PREPARE); 8721 for (ntries = 0; ntries < 15000; ntries++) { 8722 if (!(IWN_READ(sc, IWN_HW_IF_CONFIG) & 8723 IWN_HW_IF_CONFIG_PREPARE_DONE)) 8724 break; 8725 DELAY(10); 8726 } 8727 if (ntries == 15000) 8728 return ETIMEDOUT; 8729 8730 /* Hardware should be ready now. */ 8731 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); 8732 for (ntries = 0; ntries < 5; ntries++) { 8733 if (IWN_READ(sc, IWN_HW_IF_CONFIG) & 8734 IWN_HW_IF_CONFIG_NIC_READY) 8735 return 0; 8736 DELAY(10); 8737 } 8738 return ETIMEDOUT; 8739} 8740 8741static int 8742iwn_hw_init(struct iwn_softc *sc) 8743{ 8744 struct iwn_ops *ops = &sc->ops; 8745 int error, chnl, qid; 8746 8747 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 8748 8749 /* Clear pending interrupts. */ 8750 IWN_WRITE(sc, IWN_INT, 0xffffffff); 8751 8752 if ((error = iwn_apm_init(sc)) != 0) { 8753 device_printf(sc->sc_dev, 8754 "%s: could not power ON adapter, error %d\n", __func__, 8755 error); 8756 return error; 8757 } 8758 8759 /* Select VMAIN power source. */ 8760 if ((error = iwn_nic_lock(sc)) != 0) 8761 return error; 8762 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_PWR_SRC_MASK); 8763 iwn_nic_unlock(sc); 8764 8765 /* Perform adapter-specific initialization. */ 8766 if ((error = ops->nic_config(sc)) != 0) 8767 return error; 8768 8769 /* Initialize RX ring. */ 8770 if ((error = iwn_nic_lock(sc)) != 0) 8771 return error; 8772 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); 8773 IWN_WRITE(sc, IWN_FH_RX_WPTR, 0); 8774 /* Set physical address of RX ring (256-byte aligned). */ 8775 IWN_WRITE(sc, IWN_FH_RX_BASE, sc->rxq.desc_dma.paddr >> 8); 8776 /* Set physical address of RX status (16-byte aligned). */ 8777 IWN_WRITE(sc, IWN_FH_STATUS_WPTR, sc->rxq.stat_dma.paddr >> 4); 8778 /* Enable RX. */ 8779 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 8780 IWN_FH_RX_CONFIG_ENA | 8781 IWN_FH_RX_CONFIG_IGN_RXF_EMPTY | /* HW bug workaround */ 8782 IWN_FH_RX_CONFIG_IRQ_DST_HOST | 8783 IWN_FH_RX_CONFIG_SINGLE_FRAME | 8784 IWN_FH_RX_CONFIG_RB_TIMEOUT(0) | 8785 IWN_FH_RX_CONFIG_NRBD(IWN_RX_RING_COUNT_LOG)); 8786 iwn_nic_unlock(sc); 8787 IWN_WRITE(sc, IWN_FH_RX_WPTR, (IWN_RX_RING_COUNT - 1) & ~7); 8788 8789 if ((error = iwn_nic_lock(sc)) != 0) 8790 return error; 8791 8792 /* Initialize TX scheduler. */ 8793 iwn_prph_write(sc, sc->sched_txfact_addr, 0); 8794 8795 /* Set physical address of "keep warm" page (16-byte aligned). */ 8796 IWN_WRITE(sc, IWN_FH_KW_ADDR, sc->kw_dma.paddr >> 4); 8797 8798 /* Initialize TX rings. */ 8799 for (qid = 0; qid < sc->ntxqs; qid++) { 8800 struct iwn_tx_ring *txq = &sc->txq[qid]; 8801 8802 /* Set physical address of TX ring (256-byte aligned). */ 8803 IWN_WRITE(sc, IWN_FH_CBBC_QUEUE(qid), 8804 txq->desc_dma.paddr >> 8); 8805 } 8806 iwn_nic_unlock(sc); 8807 8808 /* Enable DMA channels. */ 8809 for (chnl = 0; chnl < sc->ndmachnls; chnl++) { 8810 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 8811 IWN_FH_TX_CONFIG_DMA_ENA | 8812 IWN_FH_TX_CONFIG_DMA_CREDIT_ENA); 8813 } 8814 8815 /* Clear "radio off" and "commands blocked" bits. */ 8816 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 8817 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CMD_BLOCKED); 8818 8819 /* Clear pending interrupts. */ 8820 IWN_WRITE(sc, IWN_INT, 0xffffffff); 8821 /* Enable interrupt coalescing. */ 8822 IWN_WRITE(sc, IWN_INT_COALESCING, 512 / 8); 8823 /* Enable interrupts. */ 8824 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); 8825 8826 /* _Really_ make sure "radio off" bit is cleared! */ 8827 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 8828 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); 8829 8830 /* Enable shadow registers. */ 8831 if (sc->base_params->shadow_reg_enable) 8832 IWN_SETBITS(sc, IWN_SHADOW_REG_CTRL, 0x800fffff); 8833 8834 if ((error = ops->load_firmware(sc)) != 0) { 8835 device_printf(sc->sc_dev, 8836 "%s: could not load firmware, error %d\n", __func__, 8837 error); 8838 return error; 8839 } 8840 /* Wait at most one second for firmware alive notification. */ 8841 if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz)) != 0) { 8842 device_printf(sc->sc_dev, 8843 "%s: timeout waiting for adapter to initialize, error %d\n", 8844 __func__, error); 8845 return error; 8846 } 8847 /* Do post-firmware initialization. */ 8848 8849 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 8850 8851 return ops->post_alive(sc); 8852} 8853 8854static void 8855iwn_hw_stop(struct iwn_softc *sc) 8856{ 8857 int chnl, qid, ntries; 8858 8859 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 8860 8861 IWN_WRITE(sc, IWN_RESET, IWN_RESET_NEVO); 8862 8863 /* Disable interrupts. */ 8864 IWN_WRITE(sc, IWN_INT_MASK, 0); 8865 IWN_WRITE(sc, IWN_INT, 0xffffffff); 8866 IWN_WRITE(sc, IWN_FH_INT, 0xffffffff); 8867 sc->sc_flags &= ~IWN_FLAG_USE_ICT; 8868 8869 /* Make sure we no longer hold the NIC lock. */ 8870 iwn_nic_unlock(sc); 8871 8872 /* Stop TX scheduler. */ 8873 iwn_prph_write(sc, sc->sched_txfact_addr, 0); 8874 8875 /* Stop all DMA channels. */ 8876 if (iwn_nic_lock(sc) == 0) { 8877 for (chnl = 0; chnl < sc->ndmachnls; chnl++) { 8878 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 0); 8879 for (ntries = 0; ntries < 200; ntries++) { 8880 if (IWN_READ(sc, IWN_FH_TX_STATUS) & 8881 IWN_FH_TX_STATUS_IDLE(chnl)) 8882 break; 8883 DELAY(10); 8884 } 8885 } 8886 iwn_nic_unlock(sc); 8887 } 8888 8889 /* Stop RX ring. */ 8890 iwn_reset_rx_ring(sc, &sc->rxq); 8891 8892 /* Reset all TX rings. */ 8893 for (qid = 0; qid < sc->ntxqs; qid++) 8894 iwn_reset_tx_ring(sc, &sc->txq[qid]); 8895 8896 if (iwn_nic_lock(sc) == 0) { 8897 iwn_prph_write(sc, IWN_APMG_CLK_DIS, 8898 IWN_APMG_CLK_CTRL_DMA_CLK_RQT); 8899 iwn_nic_unlock(sc); 8900 } 8901 DELAY(5); 8902 /* Power OFF adapter. */ 8903 iwn_apm_stop(sc); 8904} 8905 8906static void 8907iwn_panicked(void *arg0, int pending) 8908{ 8909 struct iwn_softc *sc = arg0; 8910 struct ieee80211com *ic = &sc->sc_ic; 8911 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 8912#if 0 8913 int error; 8914#endif 8915 8916 if (vap == NULL) { 8917 printf("%s: null vap\n", __func__); 8918 return; 8919 } 8920 8921 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; " 8922 "restarting\n", __func__, vap->iv_state); 8923 8924 /* 8925 * This is not enough work. We need to also reinitialise 8926 * the correct transmit state for aggregation enabled queues, 8927 * which has a very specific requirement of 8928 * ring index = 802.11 seqno % 256. If we don't do this (which 8929 * we definitely don't!) then the firmware will just panic again. 8930 */ 8931#if 1 8932 ieee80211_restart_all(ic); 8933#else 8934 IWN_LOCK(sc); 8935 8936 iwn_stop_locked(sc); 8937 if ((error = iwn_init_locked(sc)) != 0) { 8938 device_printf(sc->sc_dev, 8939 "%s: could not init hardware\n", __func__); 8940 goto unlock; 8941 } 8942 if (vap->iv_state >= IEEE80211_S_AUTH && 8943 (error = iwn_auth(sc, vap)) != 0) { 8944 device_printf(sc->sc_dev, 8945 "%s: could not move to auth state\n", __func__); 8946 } 8947 if (vap->iv_state >= IEEE80211_S_RUN && 8948 (error = iwn_run(sc, vap)) != 0) { 8949 device_printf(sc->sc_dev, 8950 "%s: could not move to run state\n", __func__); 8951 } 8952 8953unlock: 8954 IWN_UNLOCK(sc); 8955#endif 8956} 8957 8958static int 8959iwn_init_locked(struct iwn_softc *sc) 8960{ 8961 int error; 8962 8963 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); 8964 8965 IWN_LOCK_ASSERT(sc); 8966 8967 if (sc->sc_flags & IWN_FLAG_RUNNING) 8968 goto end; 8969 8970 sc->sc_flags |= IWN_FLAG_RUNNING; 8971 8972 if ((error = iwn_hw_prepare(sc)) != 0) { 8973 device_printf(sc->sc_dev, "%s: hardware not ready, error %d\n", 8974 __func__, error); 8975 goto fail; 8976 } 8977 8978 /* Initialize interrupt mask to default value. */ 8979 sc->int_mask = IWN_INT_MASK_DEF; 8980 sc->sc_flags &= ~IWN_FLAG_USE_ICT; 8981 8982 /* Check that the radio is not disabled by hardware switch. */ 8983 if (!(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) { 8984 iwn_stop_locked(sc); 8985 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 8986 8987 return (1); 8988 } 8989 8990 /* Read firmware images from the filesystem. */ 8991 if ((error = iwn_read_firmware(sc)) != 0) { 8992 device_printf(sc->sc_dev, 8993 "%s: could not read firmware, error %d\n", __func__, 8994 error); 8995 goto fail; 8996 } 8997 8998 /* Initialize hardware and upload firmware. */ 8999 error = iwn_hw_init(sc); 9000 iwn_unload_firmware(sc); 9001 if (error != 0) { 9002 device_printf(sc->sc_dev, 9003 "%s: could not initialize hardware, error %d\n", __func__, 9004 error); 9005 goto fail; 9006 } 9007 9008 /* Configure adapter now that it is ready. */ 9009 if ((error = iwn_config(sc)) != 0) { 9010 device_printf(sc->sc_dev, 9011 "%s: could not configure device, error %d\n", __func__, 9012 error); 9013 goto fail; 9014 } 9015 9016 callout_reset(&sc->watchdog_to, hz, iwn_watchdog, sc); 9017 9018end: 9019 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); 9020 9021 return (0); 9022 9023fail: 9024 iwn_stop_locked(sc); 9025 9026 DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__); 9027 9028 return (-1); 9029} 9030 9031static int 9032iwn_init(struct iwn_softc *sc) 9033{ 9034 int error; 9035 9036 IWN_LOCK(sc); 9037 error = iwn_init_locked(sc); 9038 IWN_UNLOCK(sc); 9039 9040 return (error); 9041} 9042 9043static void 9044iwn_stop_locked(struct iwn_softc *sc) 9045{ 9046 9047 IWN_LOCK_ASSERT(sc); 9048 9049 if (!(sc->sc_flags & IWN_FLAG_RUNNING)) 9050 return; 9051 9052 sc->sc_is_scanning = 0; 9053 sc->sc_tx_timer = 0; 9054 callout_stop(&sc->watchdog_to); 9055 callout_stop(&sc->scan_timeout); 9056 callout_stop(&sc->calib_to); 9057 sc->sc_flags &= ~IWN_FLAG_RUNNING; 9058 9059 /* Power OFF hardware. */ 9060 iwn_hw_stop(sc); 9061} 9062 9063static void 9064iwn_stop(struct iwn_softc *sc) 9065{ 9066 IWN_LOCK(sc); 9067 iwn_stop_locked(sc); 9068 IWN_UNLOCK(sc); 9069} 9070 9071/* 9072 * Callback from net80211 to start a scan. 9073 */ 9074static void 9075iwn_scan_start(struct ieee80211com *ic) 9076{ 9077 struct iwn_softc *sc = ic->ic_softc; 9078 9079 IWN_LOCK(sc); 9080 /* make the link LED blink while we're scanning */ 9081 iwn_set_led(sc, IWN_LED_LINK, 20, 2); 9082 IWN_UNLOCK(sc); 9083} 9084 9085/* 9086 * Callback from net80211 to terminate a scan. 9087 */ 9088static void 9089iwn_scan_end(struct ieee80211com *ic) 9090{ 9091 struct iwn_softc *sc = ic->ic_softc; 9092 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 9093 9094 IWN_LOCK(sc); 9095 if (vap->iv_state == IEEE80211_S_RUN) { 9096 /* Set link LED to ON status if we are associated */ 9097 iwn_set_led(sc, IWN_LED_LINK, 0, 1); 9098 } 9099 IWN_UNLOCK(sc); 9100} 9101 9102/* 9103 * Callback from net80211 to force a channel change. 9104 */ 9105static void 9106iwn_set_channel(struct ieee80211com *ic) 9107{ 9108 struct iwn_softc *sc = ic->ic_softc; 9109 int error; 9110 9111 DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); 9112 9113 IWN_LOCK(sc); 9114 /* 9115 * Only need to set the channel in Monitor mode. AP scanning and auth 9116 * are already taken care of by their respective firmware commands. 9117 */ 9118 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 9119 error = iwn_config(sc); 9120 if (error != 0) 9121 device_printf(sc->sc_dev, 9122 "%s: error %d settting channel\n", __func__, error); 9123 } 9124 IWN_UNLOCK(sc); 9125} 9126 9127/* 9128 * Callback from net80211 to start scanning of the current channel. 9129 */ 9130static void 9131iwn_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) 9132{ 9133 struct ieee80211vap *vap = ss->ss_vap; 9134 struct ieee80211com *ic = vap->iv_ic; 9135 struct iwn_softc *sc = ic->ic_softc; 9136 int error; 9137 9138 IWN_LOCK(sc); 9139 error = iwn_scan(sc, vap, ss, ic->ic_curchan); 9140 IWN_UNLOCK(sc); 9141 if (error != 0) 9142 ieee80211_cancel_scan(vap); 9143} 9144 9145/* 9146 * Callback from net80211 to handle the minimum dwell time being met. 9147 * The intent is to terminate the scan but we just let the firmware 9148 * notify us when it's finished as we have no safe way to abort it. 9149 */ 9150static void 9151iwn_scan_mindwell(struct ieee80211_scan_state *ss) 9152{ 9153 /* NB: don't try to abort scan; wait for firmware to finish */ 9154} 9155#ifdef IWN_DEBUG 9156#define IWN_DESC(x) case x: return #x 9157 9158/* 9159 * Translate CSR code to string 9160 */ 9161static char *iwn_get_csr_string(int csr) 9162{ 9163 switch (csr) { 9164 IWN_DESC(IWN_HW_IF_CONFIG); 9165 IWN_DESC(IWN_INT_COALESCING); 9166 IWN_DESC(IWN_INT); 9167 IWN_DESC(IWN_INT_MASK); 9168 IWN_DESC(IWN_FH_INT); 9169 IWN_DESC(IWN_GPIO_IN); 9170 IWN_DESC(IWN_RESET); 9171 IWN_DESC(IWN_GP_CNTRL); 9172 IWN_DESC(IWN_HW_REV); 9173 IWN_DESC(IWN_EEPROM); 9174 IWN_DESC(IWN_EEPROM_GP); 9175 IWN_DESC(IWN_OTP_GP); 9176 IWN_DESC(IWN_GIO); 9177 IWN_DESC(IWN_GP_UCODE); 9178 IWN_DESC(IWN_GP_DRIVER); 9179 IWN_DESC(IWN_UCODE_GP1); 9180 IWN_DESC(IWN_UCODE_GP2); 9181 IWN_DESC(IWN_LED); 9182 IWN_DESC(IWN_DRAM_INT_TBL); 9183 IWN_DESC(IWN_GIO_CHICKEN); 9184 IWN_DESC(IWN_ANA_PLL); 9185 IWN_DESC(IWN_HW_REV_WA); 9186 IWN_DESC(IWN_DBG_HPET_MEM); 9187 default: 9188 return "UNKNOWN CSR"; 9189 } 9190} 9191 9192/* 9193 * This function print firmware register 9194 */ 9195static void 9196iwn_debug_register(struct iwn_softc *sc) 9197{ 9198 int i; 9199 static const uint32_t csr_tbl[] = { 9200 IWN_HW_IF_CONFIG, 9201 IWN_INT_COALESCING, 9202 IWN_INT, 9203 IWN_INT_MASK, 9204 IWN_FH_INT, 9205 IWN_GPIO_IN, 9206 IWN_RESET, 9207 IWN_GP_CNTRL, 9208 IWN_HW_REV, 9209 IWN_EEPROM, 9210 IWN_EEPROM_GP, 9211 IWN_OTP_GP, 9212 IWN_GIO, 9213 IWN_GP_UCODE, 9214 IWN_GP_DRIVER, 9215 IWN_UCODE_GP1, 9216 IWN_UCODE_GP2, 9217 IWN_LED, 9218 IWN_DRAM_INT_TBL, 9219 IWN_GIO_CHICKEN, 9220 IWN_ANA_PLL, 9221 IWN_HW_REV_WA, 9222 IWN_DBG_HPET_MEM, 9223 }; 9224 DPRINTF(sc, IWN_DEBUG_REGISTER, 9225 "CSR values: (2nd byte of IWN_INT_COALESCING is IWN_INT_PERIODIC)%s", 9226 "\n"); 9227 for (i = 0; i < nitems(csr_tbl); i++){ 9228 DPRINTF(sc, IWN_DEBUG_REGISTER," %10s: 0x%08x ", 9229 iwn_get_csr_string(csr_tbl[i]), IWN_READ(sc, csr_tbl[i])); 9230 if ((i+1) % 3 == 0) 9231 DPRINTF(sc, IWN_DEBUG_REGISTER,"%s","\n"); 9232 } 9233 DPRINTF(sc, IWN_DEBUG_REGISTER,"%s","\n"); 9234} 9235#endif 9236 9237 9238