if_wpi.c revision 282382
1/*- 2 * Copyright (c) 2006,2007 3 * Damien Bergamini <damien.bergamini@free.fr> 4 * Benjamin Close <Benjamin.Close@clearchain.com> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19#include <sys/cdefs.h> 20__FBSDID("$FreeBSD: head/sys/dev/wpi/if_wpi.c 282382 2015-05-03 22:56:36Z adrian $"); 21 22/* 23 * Driver for Intel PRO/Wireless 3945ABG 802.11 network adapters. 24 * 25 * The 3945ABG network adapter doesn't use traditional hardware as 26 * many other adaptors do. Instead at run time the eeprom is set into a known 27 * state and told to load boot firmware. The boot firmware loads an init and a 28 * main binary firmware image into SRAM on the card via DMA. 29 * Once the firmware is loaded, the driver/hw then 30 * communicate by way of circular dma rings via the SRAM to the firmware. 31 * 32 * There is 6 memory rings. 1 command ring, 1 rx data ring & 4 tx data rings. 33 * The 4 tx data rings allow for prioritization QoS. 34 * 35 * The rx data ring consists of 32 dma buffers. Two registers are used to 36 * indicate where in the ring the driver and the firmware are up to. The 37 * driver sets the initial read index (reg1) and the initial write index (reg2), 38 * the firmware updates the read index (reg1) on rx of a packet and fires an 39 * interrupt. The driver then processes the buffers starting at reg1 indicating 40 * to the firmware which buffers have been accessed by updating reg2. At the 41 * same time allocating new memory for the processed buffer. 42 * 43 * A similar thing happens with the tx rings. The difference is the firmware 44 * stop processing buffers once the queue is full and until confirmation 45 * of a successful transmition (tx_done) has occurred. 46 * 47 * The command ring operates in the same manner as the tx queues. 48 * 49 * All communication direct to the card (ie eeprom) is classed as Stage1 50 * communication 51 * 52 * All communication via the firmware to the card is classed as State2. 53 * The firmware consists of 2 parts. A bootstrap firmware and a runtime 54 * firmware. The bootstrap firmware and runtime firmware are loaded 55 * from host memory via dma to the card then told to execute. From this point 56 * on the majority of communications between the driver and the card goes 57 * via the firmware. 58 */ 59 60#include "opt_wlan.h" 61#include "opt_wpi.h" 62 63#include <sys/param.h> 64#include <sys/sysctl.h> 65#include <sys/sockio.h> 66#include <sys/mbuf.h> 67#include <sys/kernel.h> 68#include <sys/socket.h> 69#include <sys/systm.h> 70#include <sys/malloc.h> 71#include <sys/queue.h> 72#include <sys/taskqueue.h> 73#include <sys/module.h> 74#include <sys/bus.h> 75#include <sys/endian.h> 76#include <sys/linker.h> 77#include <sys/firmware.h> 78 79#include <machine/bus.h> 80#include <machine/resource.h> 81#include <sys/rman.h> 82 83#include <dev/pci/pcireg.h> 84#include <dev/pci/pcivar.h> 85 86#include <net/bpf.h> 87#include <net/if.h> 88#include <net/if_var.h> 89#include <net/if_arp.h> 90#include <net/ethernet.h> 91#include <net/if_dl.h> 92#include <net/if_media.h> 93#include <net/if_types.h> 94 95#include <netinet/in.h> 96#include <netinet/in_systm.h> 97#include <netinet/in_var.h> 98#include <netinet/if_ether.h> 99#include <netinet/ip.h> 100 101#include <net80211/ieee80211_var.h> 102#include <net80211/ieee80211_radiotap.h> 103#include <net80211/ieee80211_regdomain.h> 104#include <net80211/ieee80211_ratectl.h> 105 106#include <dev/wpi/if_wpireg.h> 107#include <dev/wpi/if_wpivar.h> 108#include <dev/wpi/if_wpi_debug.h> 109 110struct wpi_ident { 111 uint16_t vendor; 112 uint16_t device; 113 uint16_t subdevice; 114 const char *name; 115}; 116 117static const struct wpi_ident wpi_ident_table[] = { 118 /* The below entries support ABG regardless of the subid */ 119 { 0x8086, 0x4222, 0x0, "Intel(R) PRO/Wireless 3945ABG" }, 120 { 0x8086, 0x4227, 0x0, "Intel(R) PRO/Wireless 3945ABG" }, 121 /* The below entries only support BG */ 122 { 0x8086, 0x4222, 0x1005, "Intel(R) PRO/Wireless 3945BG" }, 123 { 0x8086, 0x4222, 0x1034, "Intel(R) PRO/Wireless 3945BG" }, 124 { 0x8086, 0x4227, 0x1014, "Intel(R) PRO/Wireless 3945BG" }, 125 { 0x8086, 0x4222, 0x1044, "Intel(R) PRO/Wireless 3945BG" }, 126 { 0, 0, 0, NULL } 127}; 128 129static int wpi_probe(device_t); 130static int wpi_attach(device_t); 131static void wpi_radiotap_attach(struct wpi_softc *); 132static void wpi_sysctlattach(struct wpi_softc *); 133static void wpi_init_beacon(struct wpi_vap *); 134static struct ieee80211vap *wpi_vap_create(struct ieee80211com *, 135 const char [IFNAMSIZ], int, enum ieee80211_opmode, int, 136 const uint8_t [IEEE80211_ADDR_LEN], 137 const uint8_t [IEEE80211_ADDR_LEN]); 138static void wpi_vap_delete(struct ieee80211vap *); 139static int wpi_detach(device_t); 140static int wpi_shutdown(device_t); 141static int wpi_suspend(device_t); 142static int wpi_resume(device_t); 143static int wpi_nic_lock(struct wpi_softc *); 144static int wpi_read_prom_data(struct wpi_softc *, uint32_t, void *, int); 145static void wpi_dma_map_addr(void *, bus_dma_segment_t *, int, int); 146static int wpi_dma_contig_alloc(struct wpi_softc *, struct wpi_dma_info *, 147 void **, bus_size_t, bus_size_t); 148static void wpi_dma_contig_free(struct wpi_dma_info *); 149static int wpi_alloc_shared(struct wpi_softc *); 150static void wpi_free_shared(struct wpi_softc *); 151static int wpi_alloc_fwmem(struct wpi_softc *); 152static void wpi_free_fwmem(struct wpi_softc *); 153static int wpi_alloc_rx_ring(struct wpi_softc *); 154static void wpi_update_rx_ring(struct wpi_softc *); 155static void wpi_reset_rx_ring(struct wpi_softc *); 156static void wpi_free_rx_ring(struct wpi_softc *); 157static int wpi_alloc_tx_ring(struct wpi_softc *, struct wpi_tx_ring *, 158 int); 159static void wpi_update_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); 160static void wpi_reset_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); 161static void wpi_free_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); 162static int wpi_read_eeprom(struct wpi_softc *, 163 uint8_t macaddr[IEEE80211_ADDR_LEN]); 164static uint32_t wpi_eeprom_channel_flags(struct wpi_eeprom_chan *); 165static void wpi_read_eeprom_band(struct wpi_softc *, int); 166static int wpi_read_eeprom_channels(struct wpi_softc *, int); 167static struct wpi_eeprom_chan *wpi_find_eeprom_channel(struct wpi_softc *, 168 struct ieee80211_channel *); 169static int wpi_setregdomain(struct ieee80211com *, 170 struct ieee80211_regdomain *, int, 171 struct ieee80211_channel[]); 172static int wpi_read_eeprom_group(struct wpi_softc *, int); 173static int wpi_add_node_entry_adhoc(struct wpi_softc *); 174static void wpi_node_free(struct ieee80211_node *); 175static struct ieee80211_node *wpi_node_alloc(struct ieee80211vap *, 176 const uint8_t mac[IEEE80211_ADDR_LEN]); 177static int wpi_newstate(struct ieee80211vap *, enum ieee80211_state, int); 178static void wpi_calib_timeout(void *); 179static void wpi_rx_done(struct wpi_softc *, struct wpi_rx_desc *, 180 struct wpi_rx_data *); 181static void wpi_rx_statistics(struct wpi_softc *, struct wpi_rx_desc *, 182 struct wpi_rx_data *); 183static void wpi_tx_done(struct wpi_softc *, struct wpi_rx_desc *); 184static void wpi_cmd_done(struct wpi_softc *, struct wpi_rx_desc *); 185static void wpi_notif_intr(struct wpi_softc *); 186static void wpi_wakeup_intr(struct wpi_softc *); 187#ifdef WPI_DEBUG 188static void wpi_debug_registers(struct wpi_softc *); 189#endif 190static void wpi_fatal_intr(struct wpi_softc *); 191static void wpi_intr(void *); 192static int wpi_cmd2(struct wpi_softc *, struct wpi_buf *); 193static int wpi_tx_data(struct wpi_softc *, struct mbuf *, 194 struct ieee80211_node *); 195static int wpi_tx_data_raw(struct wpi_softc *, struct mbuf *, 196 struct ieee80211_node *, 197 const struct ieee80211_bpf_params *); 198static int wpi_raw_xmit(struct ieee80211_node *, struct mbuf *, 199 const struct ieee80211_bpf_params *); 200static void wpi_start(struct ifnet *); 201static void wpi_start_task(void *, int); 202static void wpi_watchdog_rfkill(void *); 203static void wpi_scan_timeout(void *); 204static void wpi_tx_timeout(void *); 205static int wpi_ioctl(struct ifnet *, u_long, caddr_t); 206static int wpi_cmd(struct wpi_softc *, int, const void *, size_t, int); 207static int wpi_mrr_setup(struct wpi_softc *); 208static int wpi_add_node(struct wpi_softc *, struct ieee80211_node *); 209static int wpi_add_broadcast_node(struct wpi_softc *, int); 210static int wpi_add_ibss_node(struct wpi_softc *, struct ieee80211_node *); 211static void wpi_del_node(struct wpi_softc *, struct ieee80211_node *); 212static int wpi_updateedca(struct ieee80211com *); 213static void wpi_set_promisc(struct wpi_softc *); 214static void wpi_update_promisc(struct ifnet *); 215static void wpi_update_mcast(struct ifnet *); 216static void wpi_set_led(struct wpi_softc *, uint8_t, uint8_t, uint8_t); 217static int wpi_set_timing(struct wpi_softc *, struct ieee80211_node *); 218static void wpi_power_calibration(struct wpi_softc *); 219static int wpi_set_txpower(struct wpi_softc *, int); 220static int wpi_get_power_index(struct wpi_softc *, 221 struct wpi_power_group *, uint8_t, int, int); 222static int wpi_set_pslevel(struct wpi_softc *, uint8_t, int, int); 223static int wpi_send_btcoex(struct wpi_softc *); 224static int wpi_send_rxon(struct wpi_softc *, int, int); 225static int wpi_config(struct wpi_softc *); 226static uint16_t wpi_get_active_dwell_time(struct wpi_softc *, 227 struct ieee80211_channel *, uint8_t); 228static uint16_t wpi_limit_dwell(struct wpi_softc *, uint16_t); 229static uint16_t wpi_get_passive_dwell_time(struct wpi_softc *, 230 struct ieee80211_channel *); 231static int wpi_scan(struct wpi_softc *, struct ieee80211_channel *); 232static int wpi_auth(struct wpi_softc *, struct ieee80211vap *); 233static int wpi_config_beacon(struct wpi_vap *); 234static int wpi_setup_beacon(struct wpi_softc *, struct ieee80211_node *); 235static void wpi_update_beacon(struct ieee80211vap *, int); 236static void wpi_newassoc(struct ieee80211_node *, int); 237static int wpi_run(struct wpi_softc *, struct ieee80211vap *); 238static int wpi_load_key(struct ieee80211_node *, 239 const struct ieee80211_key *); 240static void wpi_load_key_cb(void *, struct ieee80211_node *); 241static int wpi_set_global_keys(struct ieee80211_node *); 242static int wpi_del_key(struct ieee80211_node *, 243 const struct ieee80211_key *); 244static void wpi_del_key_cb(void *, struct ieee80211_node *); 245static int wpi_process_key(struct ieee80211vap *, 246 const struct ieee80211_key *, int); 247static int wpi_key_set(struct ieee80211vap *, 248 const struct ieee80211_key *, 249 const uint8_t mac[IEEE80211_ADDR_LEN]); 250static int wpi_key_delete(struct ieee80211vap *, 251 const struct ieee80211_key *); 252static int wpi_post_alive(struct wpi_softc *); 253static int wpi_load_bootcode(struct wpi_softc *, const uint8_t *, int); 254static int wpi_load_firmware(struct wpi_softc *); 255static int wpi_read_firmware(struct wpi_softc *); 256static void wpi_unload_firmware(struct wpi_softc *); 257static int wpi_clock_wait(struct wpi_softc *); 258static int wpi_apm_init(struct wpi_softc *); 259static void wpi_apm_stop_master(struct wpi_softc *); 260static void wpi_apm_stop(struct wpi_softc *); 261static void wpi_nic_config(struct wpi_softc *); 262static int wpi_hw_init(struct wpi_softc *); 263static void wpi_hw_stop(struct wpi_softc *); 264static void wpi_radio_on(void *, int); 265static void wpi_radio_off(void *, int); 266static void wpi_init(void *); 267static void wpi_stop_locked(struct wpi_softc *); 268static void wpi_stop(struct wpi_softc *); 269static void wpi_scan_start(struct ieee80211com *); 270static void wpi_scan_end(struct ieee80211com *); 271static void wpi_set_channel(struct ieee80211com *); 272static void wpi_scan_curchan(struct ieee80211_scan_state *, unsigned long); 273static void wpi_scan_mindwell(struct ieee80211_scan_state *); 274static void wpi_hw_reset(void *, int); 275 276static device_method_t wpi_methods[] = { 277 /* Device interface */ 278 DEVMETHOD(device_probe, wpi_probe), 279 DEVMETHOD(device_attach, wpi_attach), 280 DEVMETHOD(device_detach, wpi_detach), 281 DEVMETHOD(device_shutdown, wpi_shutdown), 282 DEVMETHOD(device_suspend, wpi_suspend), 283 DEVMETHOD(device_resume, wpi_resume), 284 285 DEVMETHOD_END 286}; 287 288static driver_t wpi_driver = { 289 "wpi", 290 wpi_methods, 291 sizeof (struct wpi_softc) 292}; 293static devclass_t wpi_devclass; 294 295DRIVER_MODULE(wpi, pci, wpi_driver, wpi_devclass, NULL, NULL); 296 297MODULE_VERSION(wpi, 1); 298 299MODULE_DEPEND(wpi, pci, 1, 1, 1); 300MODULE_DEPEND(wpi, wlan, 1, 1, 1); 301MODULE_DEPEND(wpi, firmware, 1, 1, 1); 302 303static int 304wpi_probe(device_t dev) 305{ 306 const struct wpi_ident *ident; 307 308 for (ident = wpi_ident_table; ident->name != NULL; ident++) { 309 if (pci_get_vendor(dev) == ident->vendor && 310 pci_get_device(dev) == ident->device) { 311 device_set_desc(dev, ident->name); 312 return (BUS_PROBE_DEFAULT); 313 } 314 } 315 return ENXIO; 316} 317 318static int 319wpi_attach(device_t dev) 320{ 321 struct wpi_softc *sc = (struct wpi_softc *)device_get_softc(dev); 322 struct ieee80211com *ic; 323 struct ifnet *ifp; 324 int i, error, rid; 325#ifdef WPI_DEBUG 326 int supportsa = 1; 327 const struct wpi_ident *ident; 328#endif 329 uint8_t macaddr[IEEE80211_ADDR_LEN]; 330 331 sc->sc_dev = dev; 332 333#ifdef WPI_DEBUG 334 error = resource_int_value(device_get_name(sc->sc_dev), 335 device_get_unit(sc->sc_dev), "debug", &(sc->sc_debug)); 336 if (error != 0) 337 sc->sc_debug = 0; 338#else 339 sc->sc_debug = 0; 340#endif 341 342 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 343 344 /* 345 * Get the offset of the PCI Express Capability Structure in PCI 346 * Configuration Space. 347 */ 348 error = pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off); 349 if (error != 0) { 350 device_printf(dev, "PCIe capability structure not found!\n"); 351 return error; 352 } 353 354 /* 355 * Some card's only support 802.11b/g not a, check to see if 356 * this is one such card. A 0x0 in the subdevice table indicates 357 * the entire subdevice range is to be ignored. 358 */ 359#ifdef WPI_DEBUG 360 for (ident = wpi_ident_table; ident->name != NULL; ident++) { 361 if (ident->subdevice && 362 pci_get_subdevice(dev) == ident->subdevice) { 363 supportsa = 0; 364 break; 365 } 366 } 367#endif 368 369 /* Clear device-specific "PCI retry timeout" register (41h). */ 370 pci_write_config(dev, 0x41, 0, 1); 371 372 /* Enable bus-mastering. */ 373 pci_enable_busmaster(dev); 374 375 rid = PCIR_BAR(0); 376 sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 377 RF_ACTIVE); 378 if (sc->mem == NULL) { 379 device_printf(dev, "can't map mem space\n"); 380 return ENOMEM; 381 } 382 sc->sc_st = rman_get_bustag(sc->mem); 383 sc->sc_sh = rman_get_bushandle(sc->mem); 384 385 i = 1; 386 rid = 0; 387 if (pci_alloc_msi(dev, &i) == 0) 388 rid = 1; 389 /* Install interrupt handler. */ 390 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | 391 (rid != 0 ? 0 : RF_SHAREABLE)); 392 if (sc->irq == NULL) { 393 device_printf(dev, "can't map interrupt\n"); 394 error = ENOMEM; 395 goto fail; 396 } 397 398 WPI_LOCK_INIT(sc); 399 WPI_TX_LOCK_INIT(sc); 400 WPI_RXON_LOCK_INIT(sc); 401 WPI_NT_LOCK_INIT(sc); 402 WPI_TXQ_LOCK_INIT(sc); 403 WPI_TXQ_STATE_LOCK_INIT(sc); 404 405 /* Allocate DMA memory for firmware transfers. */ 406 if ((error = wpi_alloc_fwmem(sc)) != 0) { 407 device_printf(dev, 408 "could not allocate memory for firmware, error %d\n", 409 error); 410 goto fail; 411 } 412 413 /* Allocate shared page. */ 414 if ((error = wpi_alloc_shared(sc)) != 0) { 415 device_printf(dev, "could not allocate shared page\n"); 416 goto fail; 417 } 418 419 /* Allocate TX rings - 4 for QoS purposes, 1 for commands. */ 420 for (i = 0; i < WPI_NTXQUEUES; i++) { 421 if ((error = wpi_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) { 422 device_printf(dev, 423 "could not allocate TX ring %d, error %d\n", i, 424 error); 425 goto fail; 426 } 427 } 428 429 /* Allocate RX ring. */ 430 if ((error = wpi_alloc_rx_ring(sc)) != 0) { 431 device_printf(dev, "could not allocate RX ring, error %d\n", 432 error); 433 goto fail; 434 } 435 436 /* Clear pending interrupts. */ 437 WPI_WRITE(sc, WPI_INT, 0xffffffff); 438 439 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); 440 if (ifp == NULL) { 441 device_printf(dev, "can not allocate ifnet structure\n"); 442 goto fail; 443 } 444 445 ic = ifp->if_l2com; 446 ic->ic_ifp = ifp; 447 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 448 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 449 450 /* Set device capabilities. */ 451 ic->ic_caps = 452 IEEE80211_C_STA /* station mode supported */ 453 | IEEE80211_C_IBSS /* IBSS mode supported */ 454 | IEEE80211_C_HOSTAP /* Host access point mode */ 455 | IEEE80211_C_MONITOR /* monitor mode supported */ 456 | IEEE80211_C_AHDEMO /* adhoc demo mode */ 457 | IEEE80211_C_BGSCAN /* capable of bg scanning */ 458 | IEEE80211_C_TXPMGT /* tx power management */ 459 | IEEE80211_C_SHSLOT /* short slot time supported */ 460 | IEEE80211_C_WPA /* 802.11i */ 461 | IEEE80211_C_SHPREAMBLE /* short preamble supported */ 462 | IEEE80211_C_WME /* 802.11e */ 463 | IEEE80211_C_PMGT /* Station-side power mgmt */ 464 ; 465 466 ic->ic_cryptocaps = 467 IEEE80211_CRYPTO_AES_CCM; 468 469 /* 470 * Read in the eeprom and also setup the channels for 471 * net80211. We don't set the rates as net80211 does this for us 472 */ 473 if ((error = wpi_read_eeprom(sc, macaddr)) != 0) { 474 device_printf(dev, "could not read EEPROM, error %d\n", 475 error); 476 goto fail; 477 } 478 479#ifdef WPI_DEBUG 480 if (bootverbose) { 481 device_printf(sc->sc_dev, "Regulatory Domain: %.4s\n", 482 sc->domain); 483 device_printf(sc->sc_dev, "Hardware Type: %c\n", 484 sc->type > 1 ? 'B': '?'); 485 device_printf(sc->sc_dev, "Hardware Revision: %c\n", 486 ((sc->rev & 0xf0) == 0xd0) ? 'D': '?'); 487 device_printf(sc->sc_dev, "SKU %s support 802.11a\n", 488 supportsa ? "does" : "does not"); 489 490 /* XXX hw_config uses the PCIDEV for the Hardware rev. Must 491 check what sc->rev really represents - benjsc 20070615 */ 492 } 493#endif 494 495 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 496 ifp->if_softc = sc; 497 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 498 ifp->if_init = wpi_init; 499 ifp->if_ioctl = wpi_ioctl; 500 ifp->if_start = wpi_start; 501 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 502 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 503 IFQ_SET_READY(&ifp->if_snd); 504 505 ieee80211_ifattach(ic, macaddr); 506 ic->ic_vap_create = wpi_vap_create; 507 ic->ic_vap_delete = wpi_vap_delete; 508 ic->ic_raw_xmit = wpi_raw_xmit; 509 ic->ic_node_alloc = wpi_node_alloc; 510 sc->sc_node_free = ic->ic_node_free; 511 ic->ic_node_free = wpi_node_free; 512 ic->ic_wme.wme_update = wpi_updateedca; 513 ic->ic_update_promisc = wpi_update_promisc; 514 ic->ic_update_mcast = wpi_update_mcast; 515 ic->ic_newassoc = wpi_newassoc; 516 ic->ic_scan_start = wpi_scan_start; 517 ic->ic_scan_end = wpi_scan_end; 518 ic->ic_set_channel = wpi_set_channel; 519 ic->ic_scan_curchan = wpi_scan_curchan; 520 ic->ic_scan_mindwell = wpi_scan_mindwell; 521 ic->ic_setregdomain = wpi_setregdomain; 522 523 wpi_radiotap_attach(sc); 524 525 callout_init_mtx(&sc->calib_to, &sc->rxon_mtx, 0); 526 callout_init_mtx(&sc->scan_timeout, &sc->rxon_mtx, 0); 527 callout_init_mtx(&sc->tx_timeout, &sc->txq_state_mtx, 0); 528 callout_init_mtx(&sc->watchdog_rfkill, &sc->sc_mtx, 0); 529 TASK_INIT(&sc->sc_reinittask, 0, wpi_hw_reset, sc); 530 TASK_INIT(&sc->sc_radiooff_task, 0, wpi_radio_off, sc); 531 TASK_INIT(&sc->sc_radioon_task, 0, wpi_radio_on, sc); 532 TASK_INIT(&sc->sc_start_task, 0, wpi_start_task, sc); 533 534 sc->sc_tq = taskqueue_create("wpi_taskq", M_WAITOK, 535 taskqueue_thread_enqueue, &sc->sc_tq); 536 error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "wpi_taskq"); 537 if (error != 0) { 538 device_printf(dev, "can't start threads, error %d\n", error); 539 goto fail; 540 } 541 542 wpi_sysctlattach(sc); 543 544 /* 545 * Hook our interrupt after all initialization is complete. 546 */ 547 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, 548 NULL, wpi_intr, sc, &sc->sc_ih); 549 if (error != 0) { 550 device_printf(dev, "can't establish interrupt, error %d\n", 551 error); 552 goto fail; 553 } 554 555 if (bootverbose) 556 ieee80211_announce(ic); 557 558#ifdef WPI_DEBUG 559 if (sc->sc_debug & WPI_DEBUG_HW) 560 ieee80211_announce_channels(ic); 561#endif 562 563 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 564 return 0; 565 566fail: wpi_detach(dev); 567 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 568 return error; 569} 570 571/* 572 * Attach the interface to 802.11 radiotap. 573 */ 574static void 575wpi_radiotap_attach(struct wpi_softc *sc) 576{ 577 struct ifnet *ifp = sc->sc_ifp; 578 struct ieee80211com *ic = ifp->if_l2com; 579 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 580 ieee80211_radiotap_attach(ic, 581 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), 582 WPI_TX_RADIOTAP_PRESENT, 583 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), 584 WPI_RX_RADIOTAP_PRESENT); 585 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 586} 587 588static void 589wpi_sysctlattach(struct wpi_softc *sc) 590{ 591#ifdef WPI_DEBUG 592 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); 593 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); 594 595 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 596 "debug", CTLFLAG_RW, &sc->sc_debug, sc->sc_debug, 597 "control debugging printfs"); 598#endif 599} 600 601static void 602wpi_init_beacon(struct wpi_vap *wvp) 603{ 604 struct wpi_buf *bcn = &wvp->wv_bcbuf; 605 struct wpi_cmd_beacon *cmd = (struct wpi_cmd_beacon *)&bcn->data; 606 607 cmd->id = WPI_ID_BROADCAST; 608 cmd->ofdm_mask = 0xff; 609 cmd->cck_mask = 0x0f; 610 cmd->lifetime = htole32(WPI_LIFETIME_INFINITE); 611 cmd->flags = htole32(WPI_TX_AUTO_SEQ | WPI_TX_INSERT_TSTAMP); 612 613 bcn->code = WPI_CMD_SET_BEACON; 614 bcn->ac = WPI_CMD_QUEUE_NUM; 615 bcn->size = sizeof(struct wpi_cmd_beacon); 616} 617 618static struct ieee80211vap * 619wpi_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 620 enum ieee80211_opmode opmode, int flags, 621 const uint8_t bssid[IEEE80211_ADDR_LEN], 622 const uint8_t mac[IEEE80211_ADDR_LEN]) 623{ 624 struct wpi_vap *wvp; 625 struct ieee80211vap *vap; 626 627 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ 628 return NULL; 629 630 wvp = (struct wpi_vap *) malloc(sizeof(struct wpi_vap), 631 M_80211_VAP, M_NOWAIT | M_ZERO); 632 if (wvp == NULL) 633 return NULL; 634 vap = &wvp->wv_vap; 635 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac); 636 637 if (opmode == IEEE80211_M_IBSS || opmode == IEEE80211_M_HOSTAP) { 638 WPI_VAP_LOCK_INIT(wvp); 639 wpi_init_beacon(wvp); 640 } 641 642 /* Override with driver methods. */ 643 vap->iv_key_set = wpi_key_set; 644 vap->iv_key_delete = wpi_key_delete; 645 wvp->wv_newstate = vap->iv_newstate; 646 vap->iv_newstate = wpi_newstate; 647 vap->iv_update_beacon = wpi_update_beacon; 648 vap->iv_max_aid = WPI_ID_IBSS_MAX - WPI_ID_IBSS_MIN + 1; 649 650 ieee80211_ratectl_init(vap); 651 /* Complete setup. */ 652 ieee80211_vap_attach(vap, ieee80211_media_change, 653 ieee80211_media_status); 654 ic->ic_opmode = opmode; 655 return vap; 656} 657 658static void 659wpi_vap_delete(struct ieee80211vap *vap) 660{ 661 struct wpi_vap *wvp = WPI_VAP(vap); 662 struct wpi_buf *bcn = &wvp->wv_bcbuf; 663 enum ieee80211_opmode opmode = vap->iv_opmode; 664 665 ieee80211_ratectl_deinit(vap); 666 ieee80211_vap_detach(vap); 667 668 if (opmode == IEEE80211_M_IBSS || opmode == IEEE80211_M_HOSTAP) { 669 if (bcn->m != NULL) 670 m_freem(bcn->m); 671 672 WPI_VAP_LOCK_DESTROY(wvp); 673 } 674 675 free(wvp, M_80211_VAP); 676} 677 678static int 679wpi_detach(device_t dev) 680{ 681 struct wpi_softc *sc = device_get_softc(dev); 682 struct ifnet *ifp = sc->sc_ifp; 683 struct ieee80211com *ic; 684 int qid; 685 686 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 687 688 if (ifp != NULL) { 689 ic = ifp->if_l2com; 690 691 ieee80211_draintask(ic, &sc->sc_radioon_task); 692 ieee80211_draintask(ic, &sc->sc_start_task); 693 694 wpi_stop(sc); 695 696 taskqueue_drain_all(sc->sc_tq); 697 taskqueue_free(sc->sc_tq); 698 699 callout_drain(&sc->watchdog_rfkill); 700 callout_drain(&sc->tx_timeout); 701 callout_drain(&sc->scan_timeout); 702 callout_drain(&sc->calib_to); 703 ieee80211_ifdetach(ic); 704 } 705 706 /* Uninstall interrupt handler. */ 707 if (sc->irq != NULL) { 708 bus_teardown_intr(dev, sc->irq, sc->sc_ih); 709 bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq), 710 sc->irq); 711 pci_release_msi(dev); 712 } 713 714 if (sc->txq[0].data_dmat) { 715 /* Free DMA resources. */ 716 for (qid = 0; qid < WPI_NTXQUEUES; qid++) 717 wpi_free_tx_ring(sc, &sc->txq[qid]); 718 719 wpi_free_rx_ring(sc); 720 wpi_free_shared(sc); 721 } 722 723 if (sc->fw_dma.tag) 724 wpi_free_fwmem(sc); 725 726 if (sc->mem != NULL) 727 bus_release_resource(dev, SYS_RES_MEMORY, 728 rman_get_rid(sc->mem), sc->mem); 729 730 if (ifp != NULL) 731 if_free(ifp); 732 733 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 734 WPI_TXQ_STATE_LOCK_DESTROY(sc); 735 WPI_TXQ_LOCK_DESTROY(sc); 736 WPI_NT_LOCK_DESTROY(sc); 737 WPI_RXON_LOCK_DESTROY(sc); 738 WPI_TX_LOCK_DESTROY(sc); 739 WPI_LOCK_DESTROY(sc); 740 return 0; 741} 742 743static int 744wpi_shutdown(device_t dev) 745{ 746 struct wpi_softc *sc = device_get_softc(dev); 747 748 wpi_stop(sc); 749 return 0; 750} 751 752static int 753wpi_suspend(device_t dev) 754{ 755 struct wpi_softc *sc = device_get_softc(dev); 756 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 757 758 ieee80211_suspend_all(ic); 759 return 0; 760} 761 762static int 763wpi_resume(device_t dev) 764{ 765 struct wpi_softc *sc = device_get_softc(dev); 766 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 767 768 /* Clear device-specific "PCI retry timeout" register (41h). */ 769 pci_write_config(dev, 0x41, 0, 1); 770 771 ieee80211_resume_all(ic); 772 return 0; 773} 774 775/* 776 * Grab exclusive access to NIC memory. 777 */ 778static int 779wpi_nic_lock(struct wpi_softc *sc) 780{ 781 int ntries; 782 783 /* Request exclusive access to NIC. */ 784 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 785 786 /* Spin until we actually get the lock. */ 787 for (ntries = 0; ntries < 1000; ntries++) { 788 if ((WPI_READ(sc, WPI_GP_CNTRL) & 789 (WPI_GP_CNTRL_MAC_ACCESS_ENA | WPI_GP_CNTRL_SLEEP)) == 790 WPI_GP_CNTRL_MAC_ACCESS_ENA) 791 return 0; 792 DELAY(10); 793 } 794 795 device_printf(sc->sc_dev, "could not lock memory\n"); 796 797 return ETIMEDOUT; 798} 799 800/* 801 * Release lock on NIC memory. 802 */ 803static __inline void 804wpi_nic_unlock(struct wpi_softc *sc) 805{ 806 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 807} 808 809static __inline uint32_t 810wpi_prph_read(struct wpi_softc *sc, uint32_t addr) 811{ 812 WPI_WRITE(sc, WPI_PRPH_RADDR, WPI_PRPH_DWORD | addr); 813 WPI_BARRIER_READ_WRITE(sc); 814 return WPI_READ(sc, WPI_PRPH_RDATA); 815} 816 817static __inline void 818wpi_prph_write(struct wpi_softc *sc, uint32_t addr, uint32_t data) 819{ 820 WPI_WRITE(sc, WPI_PRPH_WADDR, WPI_PRPH_DWORD | addr); 821 WPI_BARRIER_WRITE(sc); 822 WPI_WRITE(sc, WPI_PRPH_WDATA, data); 823} 824 825static __inline void 826wpi_prph_setbits(struct wpi_softc *sc, uint32_t addr, uint32_t mask) 827{ 828 wpi_prph_write(sc, addr, wpi_prph_read(sc, addr) | mask); 829} 830 831static __inline void 832wpi_prph_clrbits(struct wpi_softc *sc, uint32_t addr, uint32_t mask) 833{ 834 wpi_prph_write(sc, addr, wpi_prph_read(sc, addr) & ~mask); 835} 836 837static __inline void 838wpi_prph_write_region_4(struct wpi_softc *sc, uint32_t addr, 839 const uint32_t *data, int count) 840{ 841 for (; count > 0; count--, data++, addr += 4) 842 wpi_prph_write(sc, addr, *data); 843} 844 845static __inline uint32_t 846wpi_mem_read(struct wpi_softc *sc, uint32_t addr) 847{ 848 WPI_WRITE(sc, WPI_MEM_RADDR, addr); 849 WPI_BARRIER_READ_WRITE(sc); 850 return WPI_READ(sc, WPI_MEM_RDATA); 851} 852 853static __inline void 854wpi_mem_read_region_4(struct wpi_softc *sc, uint32_t addr, uint32_t *data, 855 int count) 856{ 857 for (; count > 0; count--, addr += 4) 858 *data++ = wpi_mem_read(sc, addr); 859} 860 861static int 862wpi_read_prom_data(struct wpi_softc *sc, uint32_t addr, void *data, int count) 863{ 864 uint8_t *out = data; 865 uint32_t val; 866 int error, ntries; 867 868 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 869 870 if ((error = wpi_nic_lock(sc)) != 0) 871 return error; 872 873 for (; count > 0; count -= 2, addr++) { 874 WPI_WRITE(sc, WPI_EEPROM, addr << 2); 875 for (ntries = 0; ntries < 10; ntries++) { 876 val = WPI_READ(sc, WPI_EEPROM); 877 if (val & WPI_EEPROM_READ_VALID) 878 break; 879 DELAY(5); 880 } 881 if (ntries == 10) { 882 device_printf(sc->sc_dev, 883 "timeout reading ROM at 0x%x\n", addr); 884 return ETIMEDOUT; 885 } 886 *out++= val >> 16; 887 if (count > 1) 888 *out ++= val >> 24; 889 } 890 891 wpi_nic_unlock(sc); 892 893 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 894 895 return 0; 896} 897 898static void 899wpi_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 900{ 901 if (error != 0) 902 return; 903 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs)); 904 *(bus_addr_t *)arg = segs[0].ds_addr; 905} 906 907/* 908 * Allocates a contiguous block of dma memory of the requested size and 909 * alignment. 910 */ 911static int 912wpi_dma_contig_alloc(struct wpi_softc *sc, struct wpi_dma_info *dma, 913 void **kvap, bus_size_t size, bus_size_t alignment) 914{ 915 int error; 916 917 dma->tag = NULL; 918 dma->size = size; 919 920 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment, 921 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 922 1, size, BUS_DMA_NOWAIT, NULL, NULL, &dma->tag); 923 if (error != 0) 924 goto fail; 925 926 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, 927 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map); 928 if (error != 0) 929 goto fail; 930 931 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, 932 wpi_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT); 933 if (error != 0) 934 goto fail; 935 936 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 937 938 if (kvap != NULL) 939 *kvap = dma->vaddr; 940 941 return 0; 942 943fail: wpi_dma_contig_free(dma); 944 return error; 945} 946 947static void 948wpi_dma_contig_free(struct wpi_dma_info *dma) 949{ 950 if (dma->vaddr != NULL) { 951 bus_dmamap_sync(dma->tag, dma->map, 952 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 953 bus_dmamap_unload(dma->tag, dma->map); 954 bus_dmamem_free(dma->tag, dma->vaddr, dma->map); 955 dma->vaddr = NULL; 956 } 957 if (dma->tag != NULL) { 958 bus_dma_tag_destroy(dma->tag); 959 dma->tag = NULL; 960 } 961} 962 963/* 964 * Allocate a shared page between host and NIC. 965 */ 966static int 967wpi_alloc_shared(struct wpi_softc *sc) 968{ 969 /* Shared buffer must be aligned on a 4KB boundary. */ 970 return wpi_dma_contig_alloc(sc, &sc->shared_dma, 971 (void **)&sc->shared, sizeof (struct wpi_shared), 4096); 972} 973 974static void 975wpi_free_shared(struct wpi_softc *sc) 976{ 977 wpi_dma_contig_free(&sc->shared_dma); 978} 979 980/* 981 * Allocate DMA-safe memory for firmware transfer. 982 */ 983static int 984wpi_alloc_fwmem(struct wpi_softc *sc) 985{ 986 /* Must be aligned on a 16-byte boundary. */ 987 return wpi_dma_contig_alloc(sc, &sc->fw_dma, NULL, 988 WPI_FW_TEXT_MAXSZ + WPI_FW_DATA_MAXSZ, 16); 989} 990 991static void 992wpi_free_fwmem(struct wpi_softc *sc) 993{ 994 wpi_dma_contig_free(&sc->fw_dma); 995} 996 997static int 998wpi_alloc_rx_ring(struct wpi_softc *sc) 999{ 1000 struct wpi_rx_ring *ring = &sc->rxq; 1001 bus_size_t size; 1002 int i, error; 1003 1004 ring->cur = 0; 1005 ring->update = 0; 1006 1007 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1008 1009 /* Allocate RX descriptors (16KB aligned.) */ 1010 size = WPI_RX_RING_COUNT * sizeof (uint32_t); 1011 error = wpi_dma_contig_alloc(sc, &ring->desc_dma, 1012 (void **)&ring->desc, size, WPI_RING_DMA_ALIGN); 1013 if (error != 0) { 1014 device_printf(sc->sc_dev, 1015 "%s: could not allocate RX ring DMA memory, error %d\n", 1016 __func__, error); 1017 goto fail; 1018 } 1019 1020 /* Create RX buffer DMA tag. */ 1021 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1022 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1023 MJUMPAGESIZE, 1, MJUMPAGESIZE, BUS_DMA_NOWAIT, NULL, NULL, 1024 &ring->data_dmat); 1025 if (error != 0) { 1026 device_printf(sc->sc_dev, 1027 "%s: could not create RX buf DMA tag, error %d\n", 1028 __func__, error); 1029 goto fail; 1030 } 1031 1032 /* 1033 * Allocate and map RX buffers. 1034 */ 1035 for (i = 0; i < WPI_RX_RING_COUNT; i++) { 1036 struct wpi_rx_data *data = &ring->data[i]; 1037 bus_addr_t paddr; 1038 1039 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1040 if (error != 0) { 1041 device_printf(sc->sc_dev, 1042 "%s: could not create RX buf DMA map, error %d\n", 1043 __func__, error); 1044 goto fail; 1045 } 1046 1047 data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 1048 if (data->m == NULL) { 1049 device_printf(sc->sc_dev, 1050 "%s: could not allocate RX mbuf\n", __func__); 1051 error = ENOBUFS; 1052 goto fail; 1053 } 1054 1055 error = bus_dmamap_load(ring->data_dmat, data->map, 1056 mtod(data->m, void *), MJUMPAGESIZE, wpi_dma_map_addr, 1057 &paddr, BUS_DMA_NOWAIT); 1058 if (error != 0 && error != EFBIG) { 1059 device_printf(sc->sc_dev, 1060 "%s: can't map mbuf (error %d)\n", __func__, 1061 error); 1062 goto fail; 1063 } 1064 1065 /* Set physical address of RX buffer. */ 1066 ring->desc[i] = htole32(paddr); 1067 } 1068 1069 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1070 BUS_DMASYNC_PREWRITE); 1071 1072 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1073 1074 return 0; 1075 1076fail: wpi_free_rx_ring(sc); 1077 1078 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1079 1080 return error; 1081} 1082 1083static void 1084wpi_update_rx_ring(struct wpi_softc *sc) 1085{ 1086 struct wpi_rx_ring *ring = &sc->rxq; 1087 1088 if (ring->update != 0) { 1089 /* Wait for INT_WAKEUP event. */ 1090 return; 1091 } 1092 1093 if (WPI_READ(sc, WPI_UCODE_GP1) & WPI_UCODE_GP1_MAC_SLEEP) { 1094 DPRINTF(sc, WPI_DEBUG_PWRSAVE, "%s: wakeup request\n", 1095 __func__); 1096 1097 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1098 ring->update = 1; 1099 } else 1100 WPI_WRITE(sc, WPI_FH_RX_WPTR, ring->cur & ~7); 1101} 1102 1103static void 1104wpi_reset_rx_ring(struct wpi_softc *sc) 1105{ 1106 struct wpi_rx_ring *ring = &sc->rxq; 1107 int ntries; 1108 1109 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1110 1111 if (wpi_nic_lock(sc) == 0) { 1112 WPI_WRITE(sc, WPI_FH_RX_CONFIG, 0); 1113 for (ntries = 0; ntries < 1000; ntries++) { 1114 if (WPI_READ(sc, WPI_FH_RX_STATUS) & 1115 WPI_FH_RX_STATUS_IDLE) 1116 break; 1117 DELAY(10); 1118 } 1119 wpi_nic_unlock(sc); 1120 } 1121 1122 ring->cur = 0; 1123 ring->update = 0; 1124} 1125 1126static void 1127wpi_free_rx_ring(struct wpi_softc *sc) 1128{ 1129 struct wpi_rx_ring *ring = &sc->rxq; 1130 int i; 1131 1132 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1133 1134 wpi_dma_contig_free(&ring->desc_dma); 1135 1136 for (i = 0; i < WPI_RX_RING_COUNT; i++) { 1137 struct wpi_rx_data *data = &ring->data[i]; 1138 1139 if (data->m != NULL) { 1140 bus_dmamap_sync(ring->data_dmat, data->map, 1141 BUS_DMASYNC_POSTREAD); 1142 bus_dmamap_unload(ring->data_dmat, data->map); 1143 m_freem(data->m); 1144 data->m = NULL; 1145 } 1146 if (data->map != NULL) 1147 bus_dmamap_destroy(ring->data_dmat, data->map); 1148 } 1149 if (ring->data_dmat != NULL) { 1150 bus_dma_tag_destroy(ring->data_dmat); 1151 ring->data_dmat = NULL; 1152 } 1153} 1154 1155static int 1156wpi_alloc_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring, int qid) 1157{ 1158 bus_addr_t paddr; 1159 bus_size_t size; 1160 int i, error; 1161 1162 ring->qid = qid; 1163 ring->queued = 0; 1164 ring->cur = 0; 1165 ring->update = 0; 1166 1167 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1168 1169 /* Allocate TX descriptors (16KB aligned.) */ 1170 size = WPI_TX_RING_COUNT * sizeof (struct wpi_tx_desc); 1171 error = wpi_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, 1172 size, WPI_RING_DMA_ALIGN); 1173 if (error != 0) { 1174 device_printf(sc->sc_dev, 1175 "%s: could not allocate TX ring DMA memory, error %d\n", 1176 __func__, error); 1177 goto fail; 1178 } 1179 1180 /* Update shared area with ring physical address. */ 1181 sc->shared->txbase[qid] = htole32(ring->desc_dma.paddr); 1182 bus_dmamap_sync(sc->shared_dma.tag, sc->shared_dma.map, 1183 BUS_DMASYNC_PREWRITE); 1184 1185 /* 1186 * We only use rings 0 through 4 (4 EDCA + cmd) so there is no need 1187 * to allocate commands space for other rings. 1188 * XXX Do we really need to allocate descriptors for other rings? 1189 */ 1190 if (qid > WPI_CMD_QUEUE_NUM) { 1191 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1192 return 0; 1193 } 1194 1195 size = WPI_TX_RING_COUNT * sizeof (struct wpi_tx_cmd); 1196 error = wpi_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd, 1197 size, 4); 1198 if (error != 0) { 1199 device_printf(sc->sc_dev, 1200 "%s: could not allocate TX cmd DMA memory, error %d\n", 1201 __func__, error); 1202 goto fail; 1203 } 1204 1205 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 1206 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1207 WPI_MAX_SCATTER - 1, MCLBYTES, BUS_DMA_NOWAIT, NULL, NULL, 1208 &ring->data_dmat); 1209 if (error != 0) { 1210 device_printf(sc->sc_dev, 1211 "%s: could not create TX buf DMA tag, error %d\n", 1212 __func__, error); 1213 goto fail; 1214 } 1215 1216 paddr = ring->cmd_dma.paddr; 1217 for (i = 0; i < WPI_TX_RING_COUNT; i++) { 1218 struct wpi_tx_data *data = &ring->data[i]; 1219 1220 data->cmd_paddr = paddr; 1221 paddr += sizeof (struct wpi_tx_cmd); 1222 1223 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1224 if (error != 0) { 1225 device_printf(sc->sc_dev, 1226 "%s: could not create TX buf DMA map, error %d\n", 1227 __func__, error); 1228 goto fail; 1229 } 1230 } 1231 1232 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1233 1234 return 0; 1235 1236fail: wpi_free_tx_ring(sc, ring); 1237 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1238 return error; 1239} 1240 1241static void 1242wpi_update_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1243{ 1244 if (ring->update != 0) { 1245 /* Wait for INT_WAKEUP event. */ 1246 return; 1247 } 1248 1249 if (WPI_READ(sc, WPI_UCODE_GP1) & WPI_UCODE_GP1_MAC_SLEEP) { 1250 DPRINTF(sc, WPI_DEBUG_PWRSAVE, "%s (%d): requesting wakeup\n", 1251 __func__, ring->qid); 1252 1253 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 1254 ring->update = 1; 1255 } else 1256 WPI_WRITE(sc, WPI_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 1257} 1258 1259static void 1260wpi_reset_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1261{ 1262 int i; 1263 1264 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1265 1266 for (i = 0; i < WPI_TX_RING_COUNT; i++) { 1267 struct wpi_tx_data *data = &ring->data[i]; 1268 1269 if (data->m != NULL) { 1270 bus_dmamap_sync(ring->data_dmat, data->map, 1271 BUS_DMASYNC_POSTWRITE); 1272 bus_dmamap_unload(ring->data_dmat, data->map); 1273 m_freem(data->m); 1274 data->m = NULL; 1275 } 1276 } 1277 /* Clear TX descriptors. */ 1278 memset(ring->desc, 0, ring->desc_dma.size); 1279 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1280 BUS_DMASYNC_PREWRITE); 1281 sc->qfullmsk &= ~(1 << ring->qid); 1282 ring->queued = 0; 1283 ring->cur = 0; 1284 ring->update = 0; 1285} 1286 1287static void 1288wpi_free_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) 1289{ 1290 int i; 1291 1292 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 1293 1294 wpi_dma_contig_free(&ring->desc_dma); 1295 wpi_dma_contig_free(&ring->cmd_dma); 1296 1297 for (i = 0; i < WPI_TX_RING_COUNT; i++) { 1298 struct wpi_tx_data *data = &ring->data[i]; 1299 1300 if (data->m != NULL) { 1301 bus_dmamap_sync(ring->data_dmat, data->map, 1302 BUS_DMASYNC_POSTWRITE); 1303 bus_dmamap_unload(ring->data_dmat, data->map); 1304 m_freem(data->m); 1305 } 1306 if (data->map != NULL) 1307 bus_dmamap_destroy(ring->data_dmat, data->map); 1308 } 1309 if (ring->data_dmat != NULL) { 1310 bus_dma_tag_destroy(ring->data_dmat); 1311 ring->data_dmat = NULL; 1312 } 1313} 1314 1315/* 1316 * Extract various information from EEPROM. 1317 */ 1318static int 1319wpi_read_eeprom(struct wpi_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN]) 1320{ 1321#define WPI_CHK(res) do { \ 1322 if ((error = res) != 0) \ 1323 goto fail; \ 1324} while (0) 1325 int error, i; 1326 1327 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1328 1329 /* Adapter has to be powered on for EEPROM access to work. */ 1330 if ((error = wpi_apm_init(sc)) != 0) { 1331 device_printf(sc->sc_dev, 1332 "%s: could not power ON adapter, error %d\n", __func__, 1333 error); 1334 return error; 1335 } 1336 1337 if ((WPI_READ(sc, WPI_EEPROM_GP) & 0x6) == 0) { 1338 device_printf(sc->sc_dev, "bad EEPROM signature\n"); 1339 error = EIO; 1340 goto fail; 1341 } 1342 /* Clear HW ownership of EEPROM. */ 1343 WPI_CLRBITS(sc, WPI_EEPROM_GP, WPI_EEPROM_GP_IF_OWNER); 1344 1345 /* Read the hardware capabilities, revision and SKU type. */ 1346 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_SKU_CAP, &sc->cap, 1347 sizeof(sc->cap))); 1348 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_REVISION, &sc->rev, 1349 sizeof(sc->rev))); 1350 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_TYPE, &sc->type, 1351 sizeof(sc->type))); 1352 1353 sc->rev = le16toh(sc->rev); 1354 DPRINTF(sc, WPI_DEBUG_EEPROM, "cap=%x rev=%x type=%x\n", sc->cap, 1355 sc->rev, sc->type); 1356 1357 /* Read the regulatory domain (4 ASCII characters.) */ 1358 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_DOMAIN, sc->domain, 1359 sizeof(sc->domain))); 1360 1361 /* Read MAC address. */ 1362 WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_MAC, macaddr, 1363 IEEE80211_ADDR_LEN)); 1364 1365 /* Read the list of authorized channels. */ 1366 for (i = 0; i < WPI_CHAN_BANDS_COUNT; i++) 1367 WPI_CHK(wpi_read_eeprom_channels(sc, i)); 1368 1369 /* Read the list of TX power groups. */ 1370 for (i = 0; i < WPI_POWER_GROUPS_COUNT; i++) 1371 WPI_CHK(wpi_read_eeprom_group(sc, i)); 1372 1373fail: wpi_apm_stop(sc); /* Power OFF adapter. */ 1374 1375 DPRINTF(sc, WPI_DEBUG_TRACE, error ? TRACE_STR_END_ERR : TRACE_STR_END, 1376 __func__); 1377 1378 return error; 1379#undef WPI_CHK 1380} 1381 1382/* 1383 * Translate EEPROM flags to net80211. 1384 */ 1385static uint32_t 1386wpi_eeprom_channel_flags(struct wpi_eeprom_chan *channel) 1387{ 1388 uint32_t nflags; 1389 1390 nflags = 0; 1391 if ((channel->flags & WPI_EEPROM_CHAN_ACTIVE) == 0) 1392 nflags |= IEEE80211_CHAN_PASSIVE; 1393 if ((channel->flags & WPI_EEPROM_CHAN_IBSS) == 0) 1394 nflags |= IEEE80211_CHAN_NOADHOC; 1395 if (channel->flags & WPI_EEPROM_CHAN_RADAR) { 1396 nflags |= IEEE80211_CHAN_DFS; 1397 /* XXX apparently IBSS may still be marked */ 1398 nflags |= IEEE80211_CHAN_NOADHOC; 1399 } 1400 1401 /* XXX HOSTAP uses WPI_MODE_IBSS */ 1402 if (nflags & IEEE80211_CHAN_NOADHOC) 1403 nflags |= IEEE80211_CHAN_NOHOSTAP; 1404 1405 return nflags; 1406} 1407 1408static void 1409wpi_read_eeprom_band(struct wpi_softc *sc, int n) 1410{ 1411 struct ifnet *ifp = sc->sc_ifp; 1412 struct ieee80211com *ic = ifp->if_l2com; 1413 struct wpi_eeprom_chan *channels = sc->eeprom_channels[n]; 1414 const struct wpi_chan_band *band = &wpi_bands[n]; 1415 struct ieee80211_channel *c; 1416 uint8_t chan; 1417 int i, nflags; 1418 1419 for (i = 0; i < band->nchan; i++) { 1420 if (!(channels[i].flags & WPI_EEPROM_CHAN_VALID)) { 1421 DPRINTF(sc, WPI_DEBUG_EEPROM, 1422 "Channel Not Valid: %d, band %d\n", 1423 band->chan[i],n); 1424 continue; 1425 } 1426 1427 chan = band->chan[i]; 1428 nflags = wpi_eeprom_channel_flags(&channels[i]); 1429 1430 c = &ic->ic_channels[ic->ic_nchans++]; 1431 c->ic_ieee = chan; 1432 c->ic_maxregpower = channels[i].maxpwr; 1433 c->ic_maxpower = 2*c->ic_maxregpower; 1434 1435 if (n == 0) { /* 2GHz band */ 1436 c->ic_freq = ieee80211_ieee2mhz(chan, 1437 IEEE80211_CHAN_G); 1438 1439 /* G =>'s B is supported */ 1440 c->ic_flags = IEEE80211_CHAN_B | nflags; 1441 c = &ic->ic_channels[ic->ic_nchans++]; 1442 c[0] = c[-1]; 1443 c->ic_flags = IEEE80211_CHAN_G | nflags; 1444 } else { /* 5GHz band */ 1445 c->ic_freq = ieee80211_ieee2mhz(chan, 1446 IEEE80211_CHAN_A); 1447 1448 c->ic_flags = IEEE80211_CHAN_A | nflags; 1449 } 1450 1451 /* Save maximum allowed TX power for this channel. */ 1452 sc->maxpwr[chan] = channels[i].maxpwr; 1453 1454 DPRINTF(sc, WPI_DEBUG_EEPROM, 1455 "adding chan %d (%dMHz) flags=0x%x maxpwr=%d passive=%d," 1456 " offset %d\n", chan, c->ic_freq, 1457 channels[i].flags, sc->maxpwr[chan], 1458 IEEE80211_IS_CHAN_PASSIVE(c), ic->ic_nchans); 1459 } 1460} 1461 1462/** 1463 * Read the eeprom to find out what channels are valid for the given 1464 * band and update net80211 with what we find. 1465 */ 1466static int 1467wpi_read_eeprom_channels(struct wpi_softc *sc, int n) 1468{ 1469 struct ifnet *ifp = sc->sc_ifp; 1470 struct ieee80211com *ic = ifp->if_l2com; 1471 const struct wpi_chan_band *band = &wpi_bands[n]; 1472 int error; 1473 1474 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1475 1476 error = wpi_read_prom_data(sc, band->addr, &sc->eeprom_channels[n], 1477 band->nchan * sizeof (struct wpi_eeprom_chan)); 1478 if (error != 0) { 1479 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1480 return error; 1481 } 1482 1483 wpi_read_eeprom_band(sc, n); 1484 1485 ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans); 1486 1487 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1488 1489 return 0; 1490} 1491 1492static struct wpi_eeprom_chan * 1493wpi_find_eeprom_channel(struct wpi_softc *sc, struct ieee80211_channel *c) 1494{ 1495 int i, j; 1496 1497 for (j = 0; j < WPI_CHAN_BANDS_COUNT; j++) 1498 for (i = 0; i < wpi_bands[j].nchan; i++) 1499 if (wpi_bands[j].chan[i] == c->ic_ieee) 1500 return &sc->eeprom_channels[j][i]; 1501 1502 return NULL; 1503} 1504 1505/* 1506 * Enforce flags read from EEPROM. 1507 */ 1508static int 1509wpi_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd, 1510 int nchan, struct ieee80211_channel chans[]) 1511{ 1512 struct ifnet *ifp = ic->ic_ifp; 1513 struct wpi_softc *sc = ifp->if_softc; 1514 int i; 1515 1516 for (i = 0; i < nchan; i++) { 1517 struct ieee80211_channel *c = &chans[i]; 1518 struct wpi_eeprom_chan *channel; 1519 1520 channel = wpi_find_eeprom_channel(sc, c); 1521 if (channel == NULL) { 1522 if_printf(ic->ic_ifp, 1523 "%s: invalid channel %u freq %u/0x%x\n", 1524 __func__, c->ic_ieee, c->ic_freq, c->ic_flags); 1525 return EINVAL; 1526 } 1527 c->ic_flags |= wpi_eeprom_channel_flags(channel); 1528 } 1529 1530 return 0; 1531} 1532 1533static int 1534wpi_read_eeprom_group(struct wpi_softc *sc, int n) 1535{ 1536 struct wpi_power_group *group = &sc->groups[n]; 1537 struct wpi_eeprom_group rgroup; 1538 int i, error; 1539 1540 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1541 1542 if ((error = wpi_read_prom_data(sc, WPI_EEPROM_POWER_GRP + n * 32, 1543 &rgroup, sizeof rgroup)) != 0) { 1544 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1545 return error; 1546 } 1547 1548 /* Save TX power group information. */ 1549 group->chan = rgroup.chan; 1550 group->maxpwr = rgroup.maxpwr; 1551 /* Retrieve temperature at which the samples were taken. */ 1552 group->temp = (int16_t)le16toh(rgroup.temp); 1553 1554 DPRINTF(sc, WPI_DEBUG_EEPROM, 1555 "power group %d: chan=%d maxpwr=%d temp=%d\n", n, group->chan, 1556 group->maxpwr, group->temp); 1557 1558 for (i = 0; i < WPI_SAMPLES_COUNT; i++) { 1559 group->samples[i].index = rgroup.samples[i].index; 1560 group->samples[i].power = rgroup.samples[i].power; 1561 1562 DPRINTF(sc, WPI_DEBUG_EEPROM, 1563 "\tsample %d: index=%d power=%d\n", i, 1564 group->samples[i].index, group->samples[i].power); 1565 } 1566 1567 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1568 1569 return 0; 1570} 1571 1572static int 1573wpi_add_node_entry_adhoc(struct wpi_softc *sc) 1574{ 1575 int newid = WPI_ID_IBSS_MIN; 1576 1577 for (; newid <= WPI_ID_IBSS_MAX; newid++) { 1578 if ((sc->nodesmsk & (1 << newid)) == 0) { 1579 sc->nodesmsk |= 1 << newid; 1580 return newid; 1581 } 1582 } 1583 1584 return WPI_ID_UNDEFINED; 1585} 1586 1587static __inline int 1588wpi_add_node_entry_sta(struct wpi_softc *sc) 1589{ 1590 sc->nodesmsk |= 1 << WPI_ID_BSS; 1591 1592 return WPI_ID_BSS; 1593} 1594 1595static __inline int 1596wpi_check_node_entry(struct wpi_softc *sc, uint8_t id) 1597{ 1598 if (id == WPI_ID_UNDEFINED) 1599 return 0; 1600 1601 return (sc->nodesmsk >> id) & 1; 1602} 1603 1604static __inline void 1605wpi_clear_node_table(struct wpi_softc *sc) 1606{ 1607 sc->nodesmsk = 0; 1608} 1609 1610static __inline void 1611wpi_del_node_entry(struct wpi_softc *sc, uint8_t id) 1612{ 1613 sc->nodesmsk &= ~(1 << id); 1614} 1615 1616static struct ieee80211_node * 1617wpi_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 1618{ 1619 struct wpi_node *wn; 1620 1621 wn = malloc(sizeof (struct wpi_node), M_80211_NODE, 1622 M_NOWAIT | M_ZERO); 1623 1624 if (wn == NULL) 1625 return NULL; 1626 1627 wn->id = WPI_ID_UNDEFINED; 1628 1629 return &wn->ni; 1630} 1631 1632static void 1633wpi_node_free(struct ieee80211_node *ni) 1634{ 1635 struct ieee80211com *ic = ni->ni_ic; 1636 struct wpi_softc *sc = ic->ic_ifp->if_softc; 1637 struct wpi_node *wn = WPI_NODE(ni); 1638 1639 if (wn->id != WPI_ID_UNDEFINED) { 1640 WPI_NT_LOCK(sc); 1641 if (wpi_check_node_entry(sc, wn->id)) { 1642 wpi_del_node_entry(sc, wn->id); 1643 wpi_del_node(sc, ni); 1644 } 1645 WPI_NT_UNLOCK(sc); 1646 } 1647 1648 sc->sc_node_free(ni); 1649} 1650 1651static __inline int 1652wpi_check_bss_filter(struct wpi_softc *sc) 1653{ 1654 return (sc->rxon.filter & htole32(WPI_FILTER_BSS)) != 0; 1655} 1656 1657/** 1658 * Called by net80211 when ever there is a change to 80211 state machine 1659 */ 1660static int 1661wpi_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 1662{ 1663 struct wpi_vap *wvp = WPI_VAP(vap); 1664 struct ieee80211com *ic = vap->iv_ic; 1665 struct ifnet *ifp = ic->ic_ifp; 1666 struct wpi_softc *sc = ifp->if_softc; 1667 int error = 0; 1668 1669 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1670 1671 DPRINTF(sc, WPI_DEBUG_STATE, "%s: %s -> %s\n", __func__, 1672 ieee80211_state_name[vap->iv_state], 1673 ieee80211_state_name[nstate]); 1674 1675 if (vap->iv_state == IEEE80211_S_RUN && nstate < IEEE80211_S_RUN) { 1676 if ((error = wpi_set_pslevel(sc, 0, 0, 1)) != 0) { 1677 device_printf(sc->sc_dev, 1678 "%s: could not set power saving level\n", 1679 __func__); 1680 return error; 1681 } 1682 1683 wpi_set_led(sc, WPI_LED_LINK, 1, 0); 1684 } 1685 1686 switch (nstate) { 1687 case IEEE80211_S_SCAN: 1688 WPI_RXON_LOCK(sc); 1689 if (wpi_check_bss_filter(sc) != 0 && 1690 vap->iv_opmode != IEEE80211_M_STA) { 1691 sc->rxon.filter &= ~htole32(WPI_FILTER_BSS); 1692 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 1693 device_printf(sc->sc_dev, 1694 "%s: could not send RXON\n", __func__); 1695 } 1696 } 1697 WPI_RXON_UNLOCK(sc); 1698 break; 1699 1700 case IEEE80211_S_ASSOC: 1701 if (vap->iv_state != IEEE80211_S_RUN) 1702 break; 1703 /* FALLTHROUGH */ 1704 case IEEE80211_S_AUTH: 1705 /* 1706 * The node must be registered in the firmware before auth. 1707 * Also the associd must be cleared on RUN -> ASSOC 1708 * transitions. 1709 */ 1710 if ((error = wpi_auth(sc, vap)) != 0) { 1711 device_printf(sc->sc_dev, 1712 "%s: could not move to AUTH state, error %d\n", 1713 __func__, error); 1714 } 1715 break; 1716 1717 case IEEE80211_S_RUN: 1718 /* 1719 * RUN -> RUN transition; Just restart the timers. 1720 */ 1721 if (vap->iv_state == IEEE80211_S_RUN) { 1722 WPI_RXON_LOCK(sc); 1723 wpi_calib_timeout(sc); 1724 WPI_RXON_UNLOCK(sc); 1725 break; 1726 } 1727 1728 /* 1729 * !RUN -> RUN requires setting the association id 1730 * which is done with a firmware cmd. We also defer 1731 * starting the timers until that work is done. 1732 */ 1733 if ((error = wpi_run(sc, vap)) != 0) { 1734 device_printf(sc->sc_dev, 1735 "%s: could not move to RUN state\n", __func__); 1736 } 1737 break; 1738 1739 default: 1740 break; 1741 } 1742 if (error != 0) { 1743 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 1744 return error; 1745 } 1746 1747 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 1748 1749 return wvp->wv_newstate(vap, nstate, arg); 1750} 1751 1752static void 1753wpi_calib_timeout(void *arg) 1754{ 1755 struct wpi_softc *sc = arg; 1756 1757 if (wpi_check_bss_filter(sc) == 0) 1758 return; 1759 1760 wpi_power_calibration(sc); 1761 1762 callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc); 1763} 1764 1765static __inline uint8_t 1766rate2plcp(const uint8_t rate) 1767{ 1768 switch (rate) { 1769 case 12: return 0xd; 1770 case 18: return 0xf; 1771 case 24: return 0x5; 1772 case 36: return 0x7; 1773 case 48: return 0x9; 1774 case 72: return 0xb; 1775 case 96: return 0x1; 1776 case 108: return 0x3; 1777 case 2: return 10; 1778 case 4: return 20; 1779 case 11: return 55; 1780 case 22: return 110; 1781 default: return 0; 1782 } 1783} 1784 1785static __inline uint8_t 1786plcp2rate(const uint8_t plcp) 1787{ 1788 switch (plcp) { 1789 case 0xd: return 12; 1790 case 0xf: return 18; 1791 case 0x5: return 24; 1792 case 0x7: return 36; 1793 case 0x9: return 48; 1794 case 0xb: return 72; 1795 case 0x1: return 96; 1796 case 0x3: return 108; 1797 case 10: return 2; 1798 case 20: return 4; 1799 case 55: return 11; 1800 case 110: return 22; 1801 default: return 0; 1802 } 1803} 1804 1805/* Quickly determine if a given rate is CCK or OFDM. */ 1806#define WPI_RATE_IS_OFDM(rate) ((rate) >= 12 && (rate) != 22) 1807 1808static void 1809wpi_rx_done(struct wpi_softc *sc, struct wpi_rx_desc *desc, 1810 struct wpi_rx_data *data) 1811{ 1812 struct ifnet *ifp = sc->sc_ifp; 1813 struct ieee80211com *ic = ifp->if_l2com; 1814 struct wpi_rx_ring *ring = &sc->rxq; 1815 struct wpi_rx_stat *stat; 1816 struct wpi_rx_head *head; 1817 struct wpi_rx_tail *tail; 1818 struct ieee80211_frame *wh; 1819 struct ieee80211_node *ni; 1820 struct mbuf *m, *m1; 1821 bus_addr_t paddr; 1822 uint32_t flags; 1823 uint16_t len; 1824 int error; 1825 1826 stat = (struct wpi_rx_stat *)(desc + 1); 1827 1828 if (stat->len > WPI_STAT_MAXLEN) { 1829 device_printf(sc->sc_dev, "invalid RX statistic header\n"); 1830 goto fail1; 1831 } 1832 1833 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); 1834 head = (struct wpi_rx_head *)((caddr_t)(stat + 1) + stat->len); 1835 len = le16toh(head->len); 1836 tail = (struct wpi_rx_tail *)((caddr_t)(head + 1) + len); 1837 flags = le32toh(tail->flags); 1838 1839 DPRINTF(sc, WPI_DEBUG_RECV, "%s: idx %d len %d stat len %u rssi %d" 1840 " rate %x chan %d tstamp %ju\n", __func__, ring->cur, 1841 le32toh(desc->len), len, (int8_t)stat->rssi, 1842 head->plcp, head->chan, (uintmax_t)le64toh(tail->tstamp)); 1843 1844 /* Discard frames with a bad FCS early. */ 1845 if ((flags & WPI_RX_NOERROR) != WPI_RX_NOERROR) { 1846 DPRINTF(sc, WPI_DEBUG_RECV, "%s: RX flags error %x\n", 1847 __func__, flags); 1848 goto fail1; 1849 } 1850 /* Discard frames that are too short. */ 1851 if (len < sizeof (*wh)) { 1852 DPRINTF(sc, WPI_DEBUG_RECV, "%s: frame too short: %d\n", 1853 __func__, len); 1854 goto fail1; 1855 } 1856 1857 m1 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 1858 if (m1 == NULL) { 1859 DPRINTF(sc, WPI_DEBUG_ANY, "%s: no mbuf to restock ring\n", 1860 __func__); 1861 goto fail1; 1862 } 1863 bus_dmamap_unload(ring->data_dmat, data->map); 1864 1865 error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m1, void *), 1866 MJUMPAGESIZE, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 1867 if (error != 0 && error != EFBIG) { 1868 device_printf(sc->sc_dev, 1869 "%s: bus_dmamap_load failed, error %d\n", __func__, error); 1870 m_freem(m1); 1871 1872 /* Try to reload the old mbuf. */ 1873 error = bus_dmamap_load(ring->data_dmat, data->map, 1874 mtod(data->m, void *), MJUMPAGESIZE, wpi_dma_map_addr, 1875 &paddr, BUS_DMA_NOWAIT); 1876 if (error != 0 && error != EFBIG) { 1877 panic("%s: could not load old RX mbuf", __func__); 1878 } 1879 /* Physical address may have changed. */ 1880 ring->desc[ring->cur] = htole32(paddr); 1881 bus_dmamap_sync(ring->data_dmat, ring->desc_dma.map, 1882 BUS_DMASYNC_PREWRITE); 1883 goto fail1; 1884 } 1885 1886 m = data->m; 1887 data->m = m1; 1888 /* Update RX descriptor. */ 1889 ring->desc[ring->cur] = htole32(paddr); 1890 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1891 BUS_DMASYNC_PREWRITE); 1892 1893 /* Finalize mbuf. */ 1894 m->m_pkthdr.rcvif = ifp; 1895 m->m_data = (caddr_t)(head + 1); 1896 m->m_pkthdr.len = m->m_len = len; 1897 1898 /* Grab a reference to the source node. */ 1899 wh = mtod(m, struct ieee80211_frame *); 1900 1901 if ((wh->i_fc[1] & IEEE80211_FC1_PROTECTED) && 1902 (flags & WPI_RX_CIPHER_MASK) == WPI_RX_CIPHER_CCMP) { 1903 /* Check whether decryption was successful or not. */ 1904 if ((flags & WPI_RX_DECRYPT_MASK) != WPI_RX_DECRYPT_OK) { 1905 DPRINTF(sc, WPI_DEBUG_RECV, 1906 "CCMP decryption failed 0x%x\n", flags); 1907 goto fail2; 1908 } 1909 m->m_flags |= M_WEP; 1910 } 1911 1912 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); 1913 1914 if (ieee80211_radiotap_active(ic)) { 1915 struct wpi_rx_radiotap_header *tap = &sc->sc_rxtap; 1916 1917 tap->wr_flags = 0; 1918 if (head->flags & htole16(WPI_STAT_FLAG_SHPREAMBLE)) 1919 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 1920 tap->wr_dbm_antsignal = (int8_t)(stat->rssi + WPI_RSSI_OFFSET); 1921 tap->wr_dbm_antnoise = WPI_RSSI_OFFSET; 1922 tap->wr_tsft = tail->tstamp; 1923 tap->wr_antenna = (le16toh(head->flags) >> 4) & 0xf; 1924 tap->wr_rate = plcp2rate(head->plcp); 1925 } 1926 1927 WPI_UNLOCK(sc); 1928 1929 /* Send the frame to the 802.11 layer. */ 1930 if (ni != NULL) { 1931 (void)ieee80211_input(ni, m, stat->rssi, WPI_RSSI_OFFSET); 1932 /* Node is no longer needed. */ 1933 ieee80211_free_node(ni); 1934 } else 1935 (void)ieee80211_input_all(ic, m, stat->rssi, WPI_RSSI_OFFSET); 1936 1937 WPI_LOCK(sc); 1938 1939 return; 1940 1941fail2: m_freem(m); 1942 1943fail1: if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1944} 1945 1946static void 1947wpi_rx_statistics(struct wpi_softc *sc, struct wpi_rx_desc *desc, 1948 struct wpi_rx_data *data) 1949{ 1950 /* Ignore */ 1951} 1952 1953static void 1954wpi_tx_done(struct wpi_softc *sc, struct wpi_rx_desc *desc) 1955{ 1956 struct ifnet *ifp = sc->sc_ifp; 1957 struct wpi_tx_ring *ring = &sc->txq[desc->qid & 0x3]; 1958 struct wpi_tx_data *data = &ring->data[desc->idx]; 1959 struct wpi_tx_stat *stat = (struct wpi_tx_stat *)(desc + 1); 1960 struct mbuf *m; 1961 struct ieee80211_node *ni; 1962 struct ieee80211vap *vap; 1963 struct ieee80211com *ic; 1964 uint32_t status = le32toh(stat->status); 1965 int ackfailcnt = stat->ackfailcnt / WPI_NTRIES_DEFAULT; 1966 1967 KASSERT(data->ni != NULL, ("no node")); 1968 KASSERT(data->m != NULL, ("no mbuf")); 1969 1970 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 1971 1972 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: " 1973 "qid %d idx %d retries %d btkillcnt %d rate %x duration %d " 1974 "status %x\n", __func__, desc->qid, desc->idx, stat->ackfailcnt, 1975 stat->btkillcnt, stat->rate, le32toh(stat->duration), status); 1976 1977 /* Unmap and free mbuf. */ 1978 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); 1979 bus_dmamap_unload(ring->data_dmat, data->map); 1980 m = data->m, data->m = NULL; 1981 ni = data->ni, data->ni = NULL; 1982 vap = ni->ni_vap; 1983 ic = vap->iv_ic; 1984 1985 /* 1986 * Update rate control statistics for the node. 1987 */ 1988 if (status & WPI_TX_STATUS_FAIL) { 1989 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1990 ieee80211_ratectl_tx_complete(vap, ni, 1991 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL); 1992 } else { 1993 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 1994 ieee80211_ratectl_tx_complete(vap, ni, 1995 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL); 1996 } 1997 1998 ieee80211_tx_complete(ni, m, (status & WPI_TX_STATUS_FAIL) != 0); 1999 2000 WPI_TXQ_STATE_LOCK(sc); 2001 ring->queued -= 1; 2002 if (ring->queued > 0) { 2003 callout_reset(&sc->tx_timeout, 5*hz, wpi_tx_timeout, sc); 2004 2005 if (sc->qfullmsk != 0 && 2006 ring->queued < WPI_TX_RING_LOMARK) { 2007 sc->qfullmsk &= ~(1 << ring->qid); 2008 IF_LOCK(&ifp->if_snd); 2009 if (sc->qfullmsk == 0 && 2010 (ifp->if_drv_flags & IFF_DRV_OACTIVE)) { 2011 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2012 IF_UNLOCK(&ifp->if_snd); 2013 ieee80211_runtask(ic, &sc->sc_start_task); 2014 } else 2015 IF_UNLOCK(&ifp->if_snd); 2016 } 2017 } else 2018 callout_stop(&sc->tx_timeout); 2019 WPI_TXQ_STATE_UNLOCK(sc); 2020 2021 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 2022} 2023 2024/* 2025 * Process a "command done" firmware notification. This is where we wakeup 2026 * processes waiting for a synchronous command completion. 2027 */ 2028static void 2029wpi_cmd_done(struct wpi_softc *sc, struct wpi_rx_desc *desc) 2030{ 2031 struct wpi_tx_ring *ring = &sc->txq[WPI_CMD_QUEUE_NUM]; 2032 struct wpi_tx_data *data; 2033 2034 DPRINTF(sc, WPI_DEBUG_CMD, "cmd notification qid %x idx %d flags %x " 2035 "type %s len %d\n", desc->qid, desc->idx, 2036 desc->flags, wpi_cmd_str(desc->type), 2037 le32toh(desc->len)); 2038 2039 if ((desc->qid & WPI_RX_DESC_QID_MSK) != WPI_CMD_QUEUE_NUM) 2040 return; /* Not a command ack. */ 2041 2042 KASSERT(ring->queued == 0, ("ring->queued must be 0")); 2043 2044 data = &ring->data[desc->idx]; 2045 2046 /* If the command was mapped in an mbuf, free it. */ 2047 if (data->m != NULL) { 2048 bus_dmamap_sync(ring->data_dmat, data->map, 2049 BUS_DMASYNC_POSTWRITE); 2050 bus_dmamap_unload(ring->data_dmat, data->map); 2051 m_freem(data->m); 2052 data->m = NULL; 2053 } 2054 2055 wakeup(&ring->cmd[desc->idx]); 2056} 2057 2058static void 2059wpi_notif_intr(struct wpi_softc *sc) 2060{ 2061 struct ifnet *ifp = sc->sc_ifp; 2062 struct ieee80211com *ic = ifp->if_l2com; 2063 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 2064 uint32_t hw; 2065 2066 bus_dmamap_sync(sc->shared_dma.tag, sc->shared_dma.map, 2067 BUS_DMASYNC_POSTREAD); 2068 2069 hw = le32toh(sc->shared->next); 2070 hw = (hw == 0) ? WPI_RX_RING_COUNT - 1 : hw - 1; 2071 2072 while (sc->rxq.cur != hw) { 2073 sc->rxq.cur = (sc->rxq.cur + 1) % WPI_RX_RING_COUNT; 2074 2075 struct wpi_rx_data *data = &sc->rxq.data[sc->rxq.cur]; 2076 struct wpi_rx_desc *desc; 2077 2078 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2079 BUS_DMASYNC_POSTREAD); 2080 desc = mtod(data->m, struct wpi_rx_desc *); 2081 2082 DPRINTF(sc, WPI_DEBUG_NOTIFY, 2083 "%s: cur=%d; qid %x idx %d flags %x type %d(%s) len %d\n", 2084 __func__, sc->rxq.cur, desc->qid, desc->idx, desc->flags, 2085 desc->type, wpi_cmd_str(desc->type), le32toh(desc->len)); 2086 2087 if (!(desc->qid & WPI_UNSOLICITED_RX_NOTIF)) { 2088 /* Reply to a command. */ 2089 wpi_cmd_done(sc, desc); 2090 } 2091 2092 switch (desc->type) { 2093 case WPI_RX_DONE: 2094 /* An 802.11 frame has been received. */ 2095 wpi_rx_done(sc, desc, data); 2096 2097 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 2098 /* wpi_stop() was called. */ 2099 return; 2100 } 2101 2102 break; 2103 2104 case WPI_TX_DONE: 2105 /* An 802.11 frame has been transmitted. */ 2106 wpi_tx_done(sc, desc); 2107 break; 2108 2109 case WPI_RX_STATISTICS: 2110 case WPI_BEACON_STATISTICS: 2111 wpi_rx_statistics(sc, desc, data); 2112 break; 2113 2114 case WPI_BEACON_MISSED: 2115 { 2116 struct wpi_beacon_missed *miss = 2117 (struct wpi_beacon_missed *)(desc + 1); 2118 uint32_t misses; 2119 2120 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2121 BUS_DMASYNC_POSTREAD); 2122 misses = le32toh(miss->consecutive); 2123 2124 DPRINTF(sc, WPI_DEBUG_STATE, 2125 "%s: beacons missed %d/%d\n", __func__, misses, 2126 le32toh(miss->total)); 2127 2128 if (vap->iv_state == IEEE80211_S_RUN && 2129 (ic->ic_flags & IEEE80211_F_SCAN) == 0 && 2130 misses >= vap->iv_bmissthreshold) 2131 ieee80211_beacon_miss(ic); 2132 2133 break; 2134 } 2135 case WPI_UC_READY: 2136 { 2137 struct wpi_ucode_info *uc = 2138 (struct wpi_ucode_info *)(desc + 1); 2139 2140 /* The microcontroller is ready. */ 2141 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2142 BUS_DMASYNC_POSTREAD); 2143 DPRINTF(sc, WPI_DEBUG_RESET, 2144 "microcode alive notification version=%d.%d " 2145 "subtype=%x alive=%x\n", uc->major, uc->minor, 2146 uc->subtype, le32toh(uc->valid)); 2147 2148 if (le32toh(uc->valid) != 1) { 2149 device_printf(sc->sc_dev, 2150 "microcontroller initialization failed\n"); 2151 wpi_stop_locked(sc); 2152 } 2153 /* Save the address of the error log in SRAM. */ 2154 sc->errptr = le32toh(uc->errptr); 2155 break; 2156 } 2157 case WPI_STATE_CHANGED: 2158 { 2159 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2160 BUS_DMASYNC_POSTREAD); 2161 2162 uint32_t *status = (uint32_t *)(desc + 1); 2163 2164 DPRINTF(sc, WPI_DEBUG_STATE, "state changed to %x\n", 2165 le32toh(*status)); 2166 2167 if (le32toh(*status) & 1) { 2168 WPI_NT_LOCK(sc); 2169 wpi_clear_node_table(sc); 2170 WPI_NT_UNLOCK(sc); 2171 taskqueue_enqueue(sc->sc_tq, 2172 &sc->sc_radiooff_task); 2173 return; 2174 } 2175 break; 2176 } 2177 case WPI_START_SCAN: 2178 { 2179 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2180 BUS_DMASYNC_POSTREAD); 2181#ifdef WPI_DEBUG 2182 struct wpi_start_scan *scan = 2183 (struct wpi_start_scan *)(desc + 1); 2184 DPRINTF(sc, WPI_DEBUG_SCAN, 2185 "%s: scanning channel %d status %x\n", 2186 __func__, scan->chan, le32toh(scan->status)); 2187#endif 2188 break; 2189 } 2190 case WPI_STOP_SCAN: 2191 { 2192 bus_dmamap_sync(sc->rxq.data_dmat, data->map, 2193 BUS_DMASYNC_POSTREAD); 2194#ifdef WPI_DEBUG 2195 struct wpi_stop_scan *scan = 2196 (struct wpi_stop_scan *)(desc + 1); 2197 DPRINTF(sc, WPI_DEBUG_SCAN, 2198 "scan finished nchan=%d status=%d chan=%d\n", 2199 scan->nchan, scan->status, scan->chan); 2200#endif 2201 WPI_RXON_LOCK(sc); 2202 callout_stop(&sc->scan_timeout); 2203 WPI_RXON_UNLOCK(sc); 2204 ieee80211_scan_next(vap); 2205 break; 2206 } 2207 } 2208 2209 if (sc->rxq.cur % 8 == 0) { 2210 /* Tell the firmware what we have processed. */ 2211 wpi_update_rx_ring(sc); 2212 } 2213 } 2214} 2215 2216/* 2217 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up 2218 * from power-down sleep mode. 2219 */ 2220static void 2221wpi_wakeup_intr(struct wpi_softc *sc) 2222{ 2223 int qid; 2224 2225 DPRINTF(sc, WPI_DEBUG_PWRSAVE, 2226 "%s: ucode wakeup from power-down sleep\n", __func__); 2227 2228 /* Wakeup RX and TX rings. */ 2229 if (sc->rxq.update) { 2230 sc->rxq.update = 0; 2231 wpi_update_rx_ring(sc); 2232 } 2233 WPI_TXQ_LOCK(sc); 2234 for (qid = 0; qid < WPI_DRV_NTXQUEUES; qid++) { 2235 struct wpi_tx_ring *ring = &sc->txq[qid]; 2236 2237 if (ring->update) { 2238 ring->update = 0; 2239 wpi_update_tx_ring(sc, ring); 2240 } 2241 } 2242 WPI_TXQ_UNLOCK(sc); 2243 2244 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ); 2245} 2246 2247/* 2248 * This function prints firmware registers 2249 */ 2250#ifdef WPI_DEBUG 2251static void 2252wpi_debug_registers(struct wpi_softc *sc) 2253{ 2254#define COUNTOF(array) (sizeof(array) / sizeof(array[0])) 2255 int i; 2256 static const uint32_t csr_tbl[] = { 2257 WPI_HW_IF_CONFIG, 2258 WPI_INT, 2259 WPI_INT_MASK, 2260 WPI_FH_INT, 2261 WPI_GPIO_IN, 2262 WPI_RESET, 2263 WPI_GP_CNTRL, 2264 WPI_EEPROM, 2265 WPI_EEPROM_GP, 2266 WPI_GIO, 2267 WPI_UCODE_GP1, 2268 WPI_UCODE_GP2, 2269 WPI_GIO_CHICKEN, 2270 WPI_ANA_PLL, 2271 WPI_DBG_HPET_MEM, 2272 }; 2273 static const uint32_t prph_tbl[] = { 2274 WPI_APMG_CLK_CTRL, 2275 WPI_APMG_PS, 2276 WPI_APMG_PCI_STT, 2277 WPI_APMG_RFKILL, 2278 }; 2279 2280 DPRINTF(sc, WPI_DEBUG_REGISTER,"%s","\n"); 2281 2282 for (i = 0; i < COUNTOF(csr_tbl); i++) { 2283 DPRINTF(sc, WPI_DEBUG_REGISTER, " %-18s: 0x%08x ", 2284 wpi_get_csr_string(csr_tbl[i]), WPI_READ(sc, csr_tbl[i])); 2285 2286 if ((i + 1) % 2 == 0) 2287 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); 2288 } 2289 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n\n"); 2290 2291 if (wpi_nic_lock(sc) == 0) { 2292 for (i = 0; i < COUNTOF(prph_tbl); i++) { 2293 DPRINTF(sc, WPI_DEBUG_REGISTER, " %-18s: 0x%08x ", 2294 wpi_get_prph_string(prph_tbl[i]), 2295 wpi_prph_read(sc, prph_tbl[i])); 2296 2297 if ((i + 1) % 2 == 0) 2298 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); 2299 } 2300 DPRINTF(sc, WPI_DEBUG_REGISTER, "\n"); 2301 wpi_nic_unlock(sc); 2302 } else { 2303 DPRINTF(sc, WPI_DEBUG_REGISTER, 2304 "Cannot access internal registers.\n"); 2305 } 2306#undef COUNTOF 2307} 2308#endif 2309 2310/* 2311 * Dump the error log of the firmware when a firmware panic occurs. Although 2312 * we can't debug the firmware because it is neither open source nor free, it 2313 * can help us to identify certain classes of problems. 2314 */ 2315static void 2316wpi_fatal_intr(struct wpi_softc *sc) 2317{ 2318 struct wpi_fw_dump dump; 2319 uint32_t i, offset, count; 2320 const uint32_t size_errmsg = 2321 (sizeof (wpi_fw_errmsg) / sizeof ((wpi_fw_errmsg)[0])); 2322 2323 /* Check that the error log address is valid. */ 2324 if (sc->errptr < WPI_FW_DATA_BASE || 2325 sc->errptr + sizeof (dump) > 2326 WPI_FW_DATA_BASE + WPI_FW_DATA_MAXSZ) { 2327 printf("%s: bad firmware error log address 0x%08x\n", __func__, 2328 sc->errptr); 2329 return; 2330 } 2331 if (wpi_nic_lock(sc) != 0) { 2332 printf("%s: could not read firmware error log\n", __func__); 2333 return; 2334 } 2335 /* Read number of entries in the log. */ 2336 count = wpi_mem_read(sc, sc->errptr); 2337 if (count == 0 || count * sizeof (dump) > WPI_FW_DATA_MAXSZ) { 2338 printf("%s: invalid count field (count = %u)\n", __func__, 2339 count); 2340 wpi_nic_unlock(sc); 2341 return; 2342 } 2343 /* Skip "count" field. */ 2344 offset = sc->errptr + sizeof (uint32_t); 2345 printf("firmware error log (count = %u):\n", count); 2346 for (i = 0; i < count; i++) { 2347 wpi_mem_read_region_4(sc, offset, (uint32_t *)&dump, 2348 sizeof (dump) / sizeof (uint32_t)); 2349 2350 printf(" error type = \"%s\" (0x%08X)\n", 2351 (dump.desc < size_errmsg) ? 2352 wpi_fw_errmsg[dump.desc] : "UNKNOWN", 2353 dump.desc); 2354 printf(" error data = 0x%08X\n", 2355 dump.data); 2356 printf(" branch link = 0x%08X%08X\n", 2357 dump.blink[0], dump.blink[1]); 2358 printf(" interrupt link = 0x%08X%08X\n", 2359 dump.ilink[0], dump.ilink[1]); 2360 printf(" time = %u\n", dump.time); 2361 2362 offset += sizeof (dump); 2363 } 2364 wpi_nic_unlock(sc); 2365 /* Dump driver status (TX and RX rings) while we're here. */ 2366 printf("driver status:\n"); 2367 WPI_TXQ_LOCK(sc); 2368 for (i = 0; i < WPI_DRV_NTXQUEUES; i++) { 2369 struct wpi_tx_ring *ring = &sc->txq[i]; 2370 printf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n", 2371 i, ring->qid, ring->cur, ring->queued); 2372 } 2373 WPI_TXQ_UNLOCK(sc); 2374 printf(" rx ring: cur=%d\n", sc->rxq.cur); 2375} 2376 2377static void 2378wpi_intr(void *arg) 2379{ 2380 struct wpi_softc *sc = arg; 2381 struct ifnet *ifp = sc->sc_ifp; 2382 uint32_t r1, r2; 2383 2384 WPI_LOCK(sc); 2385 2386 /* Disable interrupts. */ 2387 WPI_WRITE(sc, WPI_INT_MASK, 0); 2388 2389 r1 = WPI_READ(sc, WPI_INT); 2390 2391 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0) 2392 goto end; /* Hardware gone! */ 2393 2394 r2 = WPI_READ(sc, WPI_FH_INT); 2395 2396 DPRINTF(sc, WPI_DEBUG_INTR, "%s: reg1=0x%08x reg2=0x%08x\n", __func__, 2397 r1, r2); 2398 2399 if (r1 == 0 && r2 == 0) 2400 goto done; /* Interrupt not for us. */ 2401 2402 /* Acknowledge interrupts. */ 2403 WPI_WRITE(sc, WPI_INT, r1); 2404 WPI_WRITE(sc, WPI_FH_INT, r2); 2405 2406 if (r1 & (WPI_INT_SW_ERR | WPI_INT_HW_ERR)) { 2407 device_printf(sc->sc_dev, "fatal firmware error\n"); 2408#ifdef WPI_DEBUG 2409 wpi_debug_registers(sc); 2410#endif 2411 wpi_fatal_intr(sc); 2412 DPRINTF(sc, WPI_DEBUG_HW, 2413 "(%s)\n", (r1 & WPI_INT_SW_ERR) ? "(Software Error)" : 2414 "(Hardware Error)"); 2415 taskqueue_enqueue(sc->sc_tq, &sc->sc_reinittask); 2416 goto end; 2417 } 2418 2419 if ((r1 & (WPI_INT_FH_RX | WPI_INT_SW_RX)) || 2420 (r2 & WPI_FH_INT_RX)) 2421 wpi_notif_intr(sc); 2422 2423 if (r1 & WPI_INT_ALIVE) 2424 wakeup(sc); /* Firmware is alive. */ 2425 2426 if (r1 & WPI_INT_WAKEUP) 2427 wpi_wakeup_intr(sc); 2428 2429done: 2430 /* Re-enable interrupts. */ 2431 if (ifp->if_flags & IFF_UP) 2432 WPI_WRITE(sc, WPI_INT_MASK, WPI_INT_MASK_DEF); 2433 2434end: WPI_UNLOCK(sc); 2435} 2436 2437static int 2438wpi_cmd2(struct wpi_softc *sc, struct wpi_buf *buf) 2439{ 2440 struct ifnet *ifp = sc->sc_ifp; 2441 struct ieee80211_frame *wh; 2442 struct wpi_tx_cmd *cmd; 2443 struct wpi_tx_data *data; 2444 struct wpi_tx_desc *desc; 2445 struct wpi_tx_ring *ring; 2446 struct mbuf *m1; 2447 bus_dma_segment_t *seg, segs[WPI_MAX_SCATTER]; 2448 int error, i, hdrlen, nsegs, totlen, pad; 2449 2450 WPI_TXQ_LOCK(sc); 2451 2452 KASSERT(buf->size <= sizeof(buf->data), ("buffer overflow")); 2453 2454 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 2455 2456 if (sc->txq_active == 0) { 2457 /* wpi_stop() was called */ 2458 error = ENETDOWN; 2459 goto fail; 2460 } 2461 2462 wh = mtod(buf->m, struct ieee80211_frame *); 2463 hdrlen = ieee80211_anyhdrsize(wh); 2464 totlen = buf->m->m_pkthdr.len; 2465 2466 if (hdrlen & 3) { 2467 /* First segment length must be a multiple of 4. */ 2468 pad = 4 - (hdrlen & 3); 2469 } else 2470 pad = 0; 2471 2472 ring = &sc->txq[buf->ac]; 2473 desc = &ring->desc[ring->cur]; 2474 data = &ring->data[ring->cur]; 2475 2476 /* Prepare TX firmware command. */ 2477 cmd = &ring->cmd[ring->cur]; 2478 cmd->code = buf->code; 2479 cmd->flags = 0; 2480 cmd->qid = ring->qid; 2481 cmd->idx = ring->cur; 2482 2483 memcpy(cmd->data, buf->data, buf->size); 2484 2485 /* Save and trim IEEE802.11 header. */ 2486 memcpy((uint8_t *)(cmd->data + buf->size), wh, hdrlen); 2487 m_adj(buf->m, hdrlen); 2488 2489 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, buf->m, 2490 segs, &nsegs, BUS_DMA_NOWAIT); 2491 if (error != 0 && error != EFBIG) { 2492 device_printf(sc->sc_dev, 2493 "%s: can't map mbuf (error %d)\n", __func__, error); 2494 goto fail; 2495 } 2496 if (error != 0) { 2497 /* Too many DMA segments, linearize mbuf. */ 2498 m1 = m_collapse(buf->m, M_NOWAIT, WPI_MAX_SCATTER - 1); 2499 if (m1 == NULL) { 2500 device_printf(sc->sc_dev, 2501 "%s: could not defrag mbuf\n", __func__); 2502 error = ENOBUFS; 2503 goto fail; 2504 } 2505 buf->m = m1; 2506 2507 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, 2508 buf->m, segs, &nsegs, BUS_DMA_NOWAIT); 2509 if (error != 0) { 2510 device_printf(sc->sc_dev, 2511 "%s: can't map mbuf (error %d)\n", __func__, 2512 error); 2513 goto fail; 2514 } 2515 } 2516 2517 KASSERT(nsegs < WPI_MAX_SCATTER, 2518 ("too many DMA segments, nsegs (%d) should be less than %d", 2519 nsegs, WPI_MAX_SCATTER)); 2520 2521 data->m = buf->m; 2522 data->ni = buf->ni; 2523 2524 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n", 2525 __func__, ring->qid, ring->cur, totlen, nsegs); 2526 2527 /* Fill TX descriptor. */ 2528 desc->nsegs = WPI_PAD32(totlen + pad) << 4 | (1 + nsegs); 2529 /* First DMA segment is used by the TX command. */ 2530 desc->segs[0].addr = htole32(data->cmd_paddr); 2531 desc->segs[0].len = htole32(4 + buf->size + hdrlen + pad); 2532 /* Other DMA segments are for data payload. */ 2533 seg = &segs[0]; 2534 for (i = 1; i <= nsegs; i++) { 2535 desc->segs[i].addr = htole32(seg->ds_addr); 2536 desc->segs[i].len = htole32(seg->ds_len); 2537 seg++; 2538 } 2539 2540 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); 2541 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 2542 BUS_DMASYNC_PREWRITE); 2543 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 2544 BUS_DMASYNC_PREWRITE); 2545 2546 /* Kick TX ring. */ 2547 ring->cur = (ring->cur + 1) % WPI_TX_RING_COUNT; 2548 wpi_update_tx_ring(sc, ring); 2549 2550 if (ring->qid < WPI_CMD_QUEUE_NUM) { 2551 /* Mark TX ring as full if we reach a certain threshold. */ 2552 WPI_TXQ_STATE_LOCK(sc); 2553 if (++ring->queued > WPI_TX_RING_HIMARK) { 2554 sc->qfullmsk |= 1 << ring->qid; 2555 2556 IF_LOCK(&ifp->if_snd); 2557 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2558 IF_UNLOCK(&ifp->if_snd); 2559 } 2560 2561 callout_reset(&sc->tx_timeout, 5*hz, wpi_tx_timeout, sc); 2562 WPI_TXQ_STATE_UNLOCK(sc); 2563 } 2564 2565 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 2566 2567 WPI_TXQ_UNLOCK(sc); 2568 2569 return 0; 2570 2571fail: m_freem(buf->m); 2572 2573 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 2574 2575 WPI_TXQ_UNLOCK(sc); 2576 2577 return error; 2578} 2579 2580/* 2581 * Construct the data packet for a transmit buffer. 2582 */ 2583static int 2584wpi_tx_data(struct wpi_softc *sc, struct mbuf *m, struct ieee80211_node *ni) 2585{ 2586 const struct ieee80211_txparam *tp; 2587 struct ieee80211vap *vap = ni->ni_vap; 2588 struct ieee80211com *ic = ni->ni_ic; 2589 struct wpi_node *wn = WPI_NODE(ni); 2590 struct ieee80211_channel *chan; 2591 struct ieee80211_frame *wh; 2592 struct ieee80211_key *k = NULL; 2593 struct wpi_buf tx_data; 2594 struct wpi_cmd_data *tx = (struct wpi_cmd_data *)&tx_data.data; 2595 uint32_t flags; 2596 uint16_t qos; 2597 uint8_t tid, type; 2598 int ac, error, swcrypt, rate, ismcast, totlen; 2599 2600 wh = mtod(m, struct ieee80211_frame *); 2601 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2602 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); 2603 2604 /* Select EDCA Access Category and TX ring for this frame. */ 2605 if (IEEE80211_QOS_HAS_SEQ(wh)) { 2606 qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0]; 2607 tid = qos & IEEE80211_QOS_TID; 2608 } else { 2609 qos = 0; 2610 tid = 0; 2611 } 2612 ac = M_WME_GETAC(m); 2613 2614 chan = (ni->ni_chan != IEEE80211_CHAN_ANYC) ? 2615 ni->ni_chan : ic->ic_curchan; 2616 tp = &vap->iv_txparms[ieee80211_chan2mode(chan)]; 2617 2618 /* Choose a TX rate index. */ 2619 if (type == IEEE80211_FC0_TYPE_MGT) 2620 rate = tp->mgmtrate; 2621 else if (ismcast) 2622 rate = tp->mcastrate; 2623 else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) 2624 rate = tp->ucastrate; 2625 else if (m->m_flags & M_EAPOL) 2626 rate = tp->mgmtrate; 2627 else { 2628 /* XXX pass pktlen */ 2629 (void) ieee80211_ratectl_rate(ni, NULL, 0); 2630 rate = ni->ni_txrate; 2631 } 2632 2633 /* Encrypt the frame if need be. */ 2634 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { 2635 /* Retrieve key for TX. */ 2636 k = ieee80211_crypto_encap(ni, m); 2637 if (k == NULL) { 2638 error = ENOBUFS; 2639 goto fail; 2640 } 2641 swcrypt = k->wk_flags & IEEE80211_KEY_SWCRYPT; 2642 2643 /* 802.11 header may have moved. */ 2644 wh = mtod(m, struct ieee80211_frame *); 2645 } 2646 totlen = m->m_pkthdr.len; 2647 2648 if (ieee80211_radiotap_active_vap(vap)) { 2649 struct wpi_tx_radiotap_header *tap = &sc->sc_txtap; 2650 2651 tap->wt_flags = 0; 2652 tap->wt_rate = rate; 2653 if (k != NULL) 2654 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2655 2656 ieee80211_radiotap_tx(vap, m); 2657 } 2658 2659 flags = 0; 2660 if (!ismcast) { 2661 /* Unicast frame, check if an ACK is expected. */ 2662 if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) != 2663 IEEE80211_QOS_ACKPOLICY_NOACK) 2664 flags |= WPI_TX_NEED_ACK; 2665 } 2666 2667 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) 2668 flags |= WPI_TX_MORE_FRAG; /* Cannot happen yet. */ 2669 2670 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */ 2671 if (!ismcast) { 2672 /* NB: Group frames are sent using CCK in 802.11b/g. */ 2673 if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) { 2674 flags |= WPI_TX_NEED_RTS; 2675 } else if ((ic->ic_flags & IEEE80211_F_USEPROT) && 2676 WPI_RATE_IS_OFDM(rate)) { 2677 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) 2678 flags |= WPI_TX_NEED_CTS; 2679 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) 2680 flags |= WPI_TX_NEED_RTS; 2681 } 2682 2683 if (flags & (WPI_TX_NEED_RTS | WPI_TX_NEED_CTS)) 2684 flags |= WPI_TX_FULL_TXOP; 2685 } 2686 2687 memset(tx, 0, sizeof (struct wpi_cmd_data)); 2688 if (type == IEEE80211_FC0_TYPE_MGT) { 2689 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2690 2691 /* Tell HW to set timestamp in probe responses. */ 2692 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 2693 flags |= WPI_TX_INSERT_TSTAMP; 2694 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 2695 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 2696 tx->timeout = htole16(3); 2697 else 2698 tx->timeout = htole16(2); 2699 } 2700 2701 if (ismcast || type != IEEE80211_FC0_TYPE_DATA) 2702 tx->id = WPI_ID_BROADCAST; 2703 else { 2704 if (wn->id == WPI_ID_UNDEFINED) { 2705 device_printf(sc->sc_dev, 2706 "%s: undefined node id\n", __func__); 2707 error = EINVAL; 2708 goto fail; 2709 } 2710 2711 tx->id = wn->id; 2712 } 2713 2714 if (k != NULL && !swcrypt) { 2715 switch (k->wk_cipher->ic_cipher) { 2716 case IEEE80211_CIPHER_AES_CCM: 2717 tx->security = WPI_CIPHER_CCMP; 2718 break; 2719 2720 default: 2721 break; 2722 } 2723 2724 memcpy(tx->key, k->wk_key, k->wk_keylen); 2725 } 2726 2727 tx->len = htole16(totlen); 2728 tx->flags = htole32(flags); 2729 tx->plcp = rate2plcp(rate); 2730 tx->tid = tid; 2731 tx->lifetime = htole32(WPI_LIFETIME_INFINITE); 2732 tx->ofdm_mask = 0xff; 2733 tx->cck_mask = 0x0f; 2734 tx->rts_ntries = 7; 2735 tx->data_ntries = tp->maxretry; 2736 2737 tx_data.ni = ni; 2738 tx_data.m = m; 2739 tx_data.size = sizeof(struct wpi_cmd_data); 2740 tx_data.code = WPI_CMD_TX_DATA; 2741 tx_data.ac = ac; 2742 2743 return wpi_cmd2(sc, &tx_data); 2744 2745fail: m_freem(m); 2746 return error; 2747} 2748 2749static int 2750wpi_tx_data_raw(struct wpi_softc *sc, struct mbuf *m, 2751 struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) 2752{ 2753 struct ieee80211vap *vap = ni->ni_vap; 2754 struct ieee80211_key *k = NULL; 2755 struct ieee80211_frame *wh; 2756 struct wpi_buf tx_data; 2757 struct wpi_cmd_data *tx = (struct wpi_cmd_data *)&tx_data.data; 2758 uint32_t flags; 2759 uint8_t type; 2760 int ac, rate, swcrypt, totlen; 2761 2762 wh = mtod(m, struct ieee80211_frame *); 2763 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 2764 2765 ac = params->ibp_pri & 3; 2766 2767 /* Choose a TX rate index. */ 2768 rate = params->ibp_rate0; 2769 2770 flags = 0; 2771 if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) 2772 flags |= WPI_TX_NEED_ACK; 2773 if (params->ibp_flags & IEEE80211_BPF_RTS) 2774 flags |= WPI_TX_NEED_RTS; 2775 if (params->ibp_flags & IEEE80211_BPF_CTS) 2776 flags |= WPI_TX_NEED_CTS; 2777 if (flags & (WPI_TX_NEED_RTS | WPI_TX_NEED_CTS)) 2778 flags |= WPI_TX_FULL_TXOP; 2779 2780 /* Encrypt the frame if need be. */ 2781 if (params->ibp_flags & IEEE80211_BPF_CRYPTO) { 2782 /* Retrieve key for TX. */ 2783 k = ieee80211_crypto_encap(ni, m); 2784 if (k == NULL) { 2785 m_freem(m); 2786 return ENOBUFS; 2787 } 2788 swcrypt = k->wk_flags & IEEE80211_KEY_SWCRYPT; 2789 2790 /* 802.11 header may have moved. */ 2791 wh = mtod(m, struct ieee80211_frame *); 2792 } 2793 totlen = m->m_pkthdr.len; 2794 2795 if (ieee80211_radiotap_active_vap(vap)) { 2796 struct wpi_tx_radiotap_header *tap = &sc->sc_txtap; 2797 2798 tap->wt_flags = 0; 2799 tap->wt_rate = rate; 2800 if (params->ibp_flags & IEEE80211_BPF_CRYPTO) 2801 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 2802 2803 ieee80211_radiotap_tx(vap, m); 2804 } 2805 2806 memset(tx, 0, sizeof (struct wpi_cmd_data)); 2807 if (type == IEEE80211_FC0_TYPE_MGT) { 2808 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 2809 2810 /* Tell HW to set timestamp in probe responses. */ 2811 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) 2812 flags |= WPI_TX_INSERT_TSTAMP; 2813 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 2814 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) 2815 tx->timeout = htole16(3); 2816 else 2817 tx->timeout = htole16(2); 2818 } 2819 2820 if (k != NULL && !swcrypt) { 2821 switch (k->wk_cipher->ic_cipher) { 2822 case IEEE80211_CIPHER_AES_CCM: 2823 tx->security = WPI_CIPHER_CCMP; 2824 break; 2825 2826 default: 2827 break; 2828 } 2829 2830 memcpy(tx->key, k->wk_key, k->wk_keylen); 2831 } 2832 2833 tx->len = htole16(totlen); 2834 tx->flags = htole32(flags); 2835 tx->plcp = rate2plcp(rate); 2836 tx->id = WPI_ID_BROADCAST; 2837 tx->lifetime = htole32(WPI_LIFETIME_INFINITE); 2838 tx->rts_ntries = params->ibp_try1; 2839 tx->data_ntries = params->ibp_try0; 2840 2841 tx_data.ni = ni; 2842 tx_data.m = m; 2843 tx_data.size = sizeof(struct wpi_cmd_data); 2844 tx_data.code = WPI_CMD_TX_DATA; 2845 tx_data.ac = ac; 2846 2847 return wpi_cmd2(sc, &tx_data); 2848} 2849 2850static int 2851wpi_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 2852 const struct ieee80211_bpf_params *params) 2853{ 2854 struct ieee80211com *ic = ni->ni_ic; 2855 struct ifnet *ifp = ic->ic_ifp; 2856 struct wpi_softc *sc = ifp->if_softc; 2857 int error = 0; 2858 2859 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 2860 2861 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 2862 ieee80211_free_node(ni); 2863 m_freem(m); 2864 return ENETDOWN; 2865 } 2866 2867 WPI_TX_LOCK(sc); 2868 if (params == NULL) { 2869 /* 2870 * Legacy path; interpret frame contents to decide 2871 * precisely how to send the frame. 2872 */ 2873 error = wpi_tx_data(sc, m, ni); 2874 } else { 2875 /* 2876 * Caller supplied explicit parameters to use in 2877 * sending the frame. 2878 */ 2879 error = wpi_tx_data_raw(sc, m, ni, params); 2880 } 2881 WPI_TX_UNLOCK(sc); 2882 2883 if (error != 0) { 2884 /* NB: m is reclaimed on tx failure */ 2885 ieee80211_free_node(ni); 2886 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2887 2888 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 2889 2890 return error; 2891 } 2892 2893 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 2894 2895 return 0; 2896} 2897 2898/** 2899 * Process data waiting to be sent on the IFNET output queue 2900 */ 2901static void 2902wpi_start(struct ifnet *ifp) 2903{ 2904 struct wpi_softc *sc = ifp->if_softc; 2905 struct ieee80211_node *ni; 2906 struct mbuf *m; 2907 2908 WPI_TX_LOCK(sc); 2909 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: called\n", __func__); 2910 2911 for (;;) { 2912 IF_LOCK(&ifp->if_snd); 2913 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || 2914 (ifp->if_drv_flags & IFF_DRV_OACTIVE)) { 2915 IF_UNLOCK(&ifp->if_snd); 2916 break; 2917 } 2918 IF_UNLOCK(&ifp->if_snd); 2919 2920 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 2921 if (m == NULL) 2922 break; 2923 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 2924 if (wpi_tx_data(sc, m, ni) != 0) { 2925 ieee80211_free_node(ni); 2926 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2927 } 2928 } 2929 2930 DPRINTF(sc, WPI_DEBUG_XMIT, "%s: done\n", __func__); 2931 WPI_TX_UNLOCK(sc); 2932} 2933 2934static void 2935wpi_start_task(void *arg0, int pending) 2936{ 2937 struct wpi_softc *sc = arg0; 2938 struct ifnet *ifp = sc->sc_ifp; 2939 2940 wpi_start(ifp); 2941} 2942 2943static void 2944wpi_watchdog_rfkill(void *arg) 2945{ 2946 struct wpi_softc *sc = arg; 2947 struct ifnet *ifp = sc->sc_ifp; 2948 struct ieee80211com *ic = ifp->if_l2com; 2949 2950 DPRINTF(sc, WPI_DEBUG_WATCHDOG, "RFkill Watchdog: tick\n"); 2951 2952 /* No need to lock firmware memory. */ 2953 if ((wpi_prph_read(sc, WPI_APMG_RFKILL) & 0x1) == 0) { 2954 /* Radio kill switch is still off. */ 2955 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, 2956 sc); 2957 } else 2958 ieee80211_runtask(ic, &sc->sc_radioon_task); 2959} 2960 2961static void 2962wpi_scan_timeout(void *arg) 2963{ 2964 struct wpi_softc *sc = arg; 2965 struct ifnet *ifp = sc->sc_ifp; 2966 2967 if_printf(ifp, "scan timeout\n"); 2968 taskqueue_enqueue(sc->sc_tq, &sc->sc_reinittask); 2969} 2970 2971static void 2972wpi_tx_timeout(void *arg) 2973{ 2974 struct wpi_softc *sc = arg; 2975 struct ifnet *ifp = sc->sc_ifp; 2976 2977 if_printf(ifp, "device timeout\n"); 2978 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2979 taskqueue_enqueue(sc->sc_tq, &sc->sc_reinittask); 2980} 2981 2982static int 2983wpi_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 2984{ 2985 struct wpi_softc *sc = ifp->if_softc; 2986 struct ieee80211com *ic = ifp->if_l2com; 2987 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 2988 struct ifreq *ifr = (struct ifreq *) data; 2989 int error = 0; 2990 2991 switch (cmd) { 2992 case SIOCGIFADDR: 2993 error = ether_ioctl(ifp, cmd, data); 2994 break; 2995 case SIOCSIFFLAGS: 2996 if (ifp->if_flags & IFF_UP) { 2997 wpi_init(sc); 2998 2999 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 && 3000 vap != NULL) 3001 ieee80211_stop(vap); 3002 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 3003 wpi_stop(sc); 3004 break; 3005 case SIOCGIFMEDIA: 3006 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); 3007 break; 3008 default: 3009 error = EINVAL; 3010 break; 3011 } 3012 return error; 3013} 3014 3015/* 3016 * Send a command to the firmware. 3017 */ 3018static int 3019wpi_cmd(struct wpi_softc *sc, int code, const void *buf, size_t size, 3020 int async) 3021{ 3022 struct wpi_tx_ring *ring = &sc->txq[WPI_CMD_QUEUE_NUM]; 3023 struct wpi_tx_desc *desc; 3024 struct wpi_tx_data *data; 3025 struct wpi_tx_cmd *cmd; 3026 struct mbuf *m; 3027 bus_addr_t paddr; 3028 int totlen, error; 3029 3030 WPI_TXQ_LOCK(sc); 3031 3032 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3033 3034 if (sc->txq_active == 0) { 3035 /* wpi_stop() was called */ 3036 error = 0; 3037 goto fail; 3038 } 3039 3040 if (async == 0) 3041 WPI_LOCK_ASSERT(sc); 3042 3043 DPRINTF(sc, WPI_DEBUG_CMD, "%s: cmd %s size %zu async %d\n", 3044 __func__, wpi_cmd_str(code), size, async); 3045 3046 desc = &ring->desc[ring->cur]; 3047 data = &ring->data[ring->cur]; 3048 totlen = 4 + size; 3049 3050 if (size > sizeof cmd->data) { 3051 /* Command is too large to fit in a descriptor. */ 3052 if (totlen > MCLBYTES) { 3053 error = EINVAL; 3054 goto fail; 3055 } 3056 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 3057 if (m == NULL) { 3058 error = ENOMEM; 3059 goto fail; 3060 } 3061 cmd = mtod(m, struct wpi_tx_cmd *); 3062 error = bus_dmamap_load(ring->data_dmat, data->map, cmd, 3063 totlen, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT); 3064 if (error != 0) { 3065 m_freem(m); 3066 goto fail; 3067 } 3068 data->m = m; 3069 } else { 3070 cmd = &ring->cmd[ring->cur]; 3071 paddr = data->cmd_paddr; 3072 } 3073 3074 cmd->code = code; 3075 cmd->flags = 0; 3076 cmd->qid = ring->qid; 3077 cmd->idx = ring->cur; 3078 memcpy(cmd->data, buf, size); 3079 3080 desc->nsegs = 1 + (WPI_PAD32(size) << 4); 3081 desc->segs[0].addr = htole32(paddr); 3082 desc->segs[0].len = htole32(totlen); 3083 3084 if (size > sizeof cmd->data) { 3085 bus_dmamap_sync(ring->data_dmat, data->map, 3086 BUS_DMASYNC_PREWRITE); 3087 } else { 3088 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map, 3089 BUS_DMASYNC_PREWRITE); 3090 } 3091 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 3092 BUS_DMASYNC_PREWRITE); 3093 3094 /* Kick command ring. */ 3095 ring->cur = (ring->cur + 1) % WPI_TX_RING_COUNT; 3096 wpi_update_tx_ring(sc, ring); 3097 3098 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3099 3100 WPI_TXQ_UNLOCK(sc); 3101 3102 if (async) 3103 return 0; 3104 3105 return mtx_sleep(cmd, &sc->sc_mtx, PCATCH, "wpicmd", hz); 3106 3107fail: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 3108 3109 WPI_TXQ_UNLOCK(sc); 3110 3111 return error; 3112} 3113 3114/* 3115 * Configure HW multi-rate retries. 3116 */ 3117static int 3118wpi_mrr_setup(struct wpi_softc *sc) 3119{ 3120 struct ifnet *ifp = sc->sc_ifp; 3121 struct ieee80211com *ic = ifp->if_l2com; 3122 struct wpi_mrr_setup mrr; 3123 int i, error; 3124 3125 /* CCK rates (not used with 802.11a). */ 3126 for (i = WPI_RIDX_CCK1; i <= WPI_RIDX_CCK11; i++) { 3127 mrr.rates[i].flags = 0; 3128 mrr.rates[i].plcp = wpi_ridx_to_plcp[i]; 3129 /* Fallback to the immediate lower CCK rate (if any.) */ 3130 mrr.rates[i].next = 3131 (i == WPI_RIDX_CCK1) ? WPI_RIDX_CCK1 : i - 1; 3132 /* Try twice at this rate before falling back to "next". */ 3133 mrr.rates[i].ntries = WPI_NTRIES_DEFAULT; 3134 } 3135 /* OFDM rates (not used with 802.11b). */ 3136 for (i = WPI_RIDX_OFDM6; i <= WPI_RIDX_OFDM54; i++) { 3137 mrr.rates[i].flags = 0; 3138 mrr.rates[i].plcp = wpi_ridx_to_plcp[i]; 3139 /* Fallback to the immediate lower rate (if any.) */ 3140 /* We allow fallback from OFDM/6 to CCK/2 in 11b/g mode. */ 3141 mrr.rates[i].next = (i == WPI_RIDX_OFDM6) ? 3142 ((ic->ic_curmode == IEEE80211_MODE_11A) ? 3143 WPI_RIDX_OFDM6 : WPI_RIDX_CCK2) : 3144 i - 1; 3145 /* Try twice at this rate before falling back to "next". */ 3146 mrr.rates[i].ntries = WPI_NTRIES_DEFAULT; 3147 } 3148 /* Setup MRR for control frames. */ 3149 mrr.which = htole32(WPI_MRR_CTL); 3150 error = wpi_cmd(sc, WPI_CMD_MRR_SETUP, &mrr, sizeof mrr, 0); 3151 if (error != 0) { 3152 device_printf(sc->sc_dev, 3153 "could not setup MRR for control frames\n"); 3154 return error; 3155 } 3156 /* Setup MRR for data frames. */ 3157 mrr.which = htole32(WPI_MRR_DATA); 3158 error = wpi_cmd(sc, WPI_CMD_MRR_SETUP, &mrr, sizeof mrr, 0); 3159 if (error != 0) { 3160 device_printf(sc->sc_dev, 3161 "could not setup MRR for data frames\n"); 3162 return error; 3163 } 3164 return 0; 3165} 3166 3167static int 3168wpi_add_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3169{ 3170 struct ieee80211com *ic = ni->ni_ic; 3171 struct wpi_vap *wvp = WPI_VAP(ni->ni_vap); 3172 struct wpi_node *wn = WPI_NODE(ni); 3173 struct wpi_node_info node; 3174 int error; 3175 3176 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3177 3178 if (wn->id == WPI_ID_UNDEFINED) 3179 return EINVAL; 3180 3181 memset(&node, 0, sizeof node); 3182 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 3183 node.id = wn->id; 3184 node.plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? 3185 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 3186 node.action = htole32(WPI_ACTION_SET_RATE); 3187 node.antenna = WPI_ANTENNA_BOTH; 3188 3189 DPRINTF(sc, WPI_DEBUG_NODE, "%s: adding node %d (%s)\n", __func__, 3190 wn->id, ether_sprintf(ni->ni_macaddr)); 3191 3192 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); 3193 if (error != 0) { 3194 device_printf(sc->sc_dev, 3195 "%s: wpi_cmd() call failed with error code %d\n", __func__, 3196 error); 3197 return error; 3198 } 3199 3200 if (wvp->wv_gtk != 0) { 3201 error = wpi_set_global_keys(ni); 3202 if (error != 0) { 3203 device_printf(sc->sc_dev, 3204 "%s: error while setting global keys\n", __func__); 3205 return ENXIO; 3206 } 3207 } 3208 3209 return 0; 3210} 3211 3212/* 3213 * Broadcast node is used to send group-addressed and management frames. 3214 */ 3215static int 3216wpi_add_broadcast_node(struct wpi_softc *sc, int async) 3217{ 3218 struct ifnet *ifp = sc->sc_ifp; 3219 struct ieee80211com *ic = ifp->if_l2com; 3220 struct wpi_node_info node; 3221 3222 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3223 3224 memset(&node, 0, sizeof node); 3225 IEEE80211_ADDR_COPY(node.macaddr, ifp->if_broadcastaddr); 3226 node.id = WPI_ID_BROADCAST; 3227 node.plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? 3228 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 3229 node.action = htole32(WPI_ACTION_SET_RATE); 3230 node.antenna = WPI_ANTENNA_BOTH; 3231 3232 DPRINTF(sc, WPI_DEBUG_NODE, "%s: adding broadcast node\n", __func__); 3233 3234 return wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, async); 3235} 3236 3237static int 3238wpi_add_sta_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3239{ 3240 struct wpi_node *wn = WPI_NODE(ni); 3241 int error; 3242 3243 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3244 3245 wn->id = wpi_add_node_entry_sta(sc); 3246 3247 if ((error = wpi_add_node(sc, ni)) != 0) { 3248 wpi_del_node_entry(sc, wn->id); 3249 wn->id = WPI_ID_UNDEFINED; 3250 return error; 3251 } 3252 3253 return 0; 3254} 3255 3256static int 3257wpi_add_ibss_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3258{ 3259 struct wpi_node *wn = WPI_NODE(ni); 3260 int error; 3261 3262 KASSERT(wn->id == WPI_ID_UNDEFINED, 3263 ("the node %d was added before", wn->id)); 3264 3265 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3266 3267 if ((wn->id = wpi_add_node_entry_adhoc(sc)) == WPI_ID_UNDEFINED) { 3268 device_printf(sc->sc_dev, "%s: h/w table is full\n", __func__); 3269 return ENOMEM; 3270 } 3271 3272 if ((error = wpi_add_node(sc, ni)) != 0) { 3273 wpi_del_node_entry(sc, wn->id); 3274 wn->id = WPI_ID_UNDEFINED; 3275 return error; 3276 } 3277 3278 return 0; 3279} 3280 3281static void 3282wpi_del_node(struct wpi_softc *sc, struct ieee80211_node *ni) 3283{ 3284 struct wpi_node *wn = WPI_NODE(ni); 3285 struct wpi_cmd_del_node node; 3286 int error; 3287 3288 KASSERT(wn->id != WPI_ID_UNDEFINED, ("undefined node id passed")); 3289 3290 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3291 3292 memset(&node, 0, sizeof node); 3293 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); 3294 node.count = 1; 3295 3296 DPRINTF(sc, WPI_DEBUG_NODE, "%s: deleting node %d (%s)\n", __func__, 3297 wn->id, ether_sprintf(ni->ni_macaddr)); 3298 3299 error = wpi_cmd(sc, WPI_CMD_DEL_NODE, &node, sizeof node, 1); 3300 if (error != 0) { 3301 device_printf(sc->sc_dev, 3302 "%s: could not delete node %u, error %d\n", __func__, 3303 wn->id, error); 3304 } 3305} 3306 3307static int 3308wpi_updateedca(struct ieee80211com *ic) 3309{ 3310#define WPI_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */ 3311 struct wpi_softc *sc = ic->ic_ifp->if_softc; 3312 struct wpi_edca_params cmd; 3313 int aci, error; 3314 3315 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3316 3317 memset(&cmd, 0, sizeof cmd); 3318 cmd.flags = htole32(WPI_EDCA_UPDATE); 3319 for (aci = 0; aci < WME_NUM_AC; aci++) { 3320 const struct wmeParams *ac = 3321 &ic->ic_wme.wme_chanParams.cap_wmeParams[aci]; 3322 cmd.ac[aci].aifsn = ac->wmep_aifsn; 3323 cmd.ac[aci].cwmin = htole16(WPI_EXP2(ac->wmep_logcwmin)); 3324 cmd.ac[aci].cwmax = htole16(WPI_EXP2(ac->wmep_logcwmax)); 3325 cmd.ac[aci].txoplimit = 3326 htole16(IEEE80211_TXOP_TO_US(ac->wmep_txopLimit)); 3327 3328 DPRINTF(sc, WPI_DEBUG_EDCA, 3329 "setting WME for queue %d aifsn=%d cwmin=%d cwmax=%d " 3330 "txoplimit=%d\n", aci, cmd.ac[aci].aifsn, 3331 cmd.ac[aci].cwmin, cmd.ac[aci].cwmax, 3332 cmd.ac[aci].txoplimit); 3333 } 3334 error = wpi_cmd(sc, WPI_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1); 3335 3336 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3337 3338 return error; 3339#undef WPI_EXP2 3340} 3341 3342static void 3343wpi_set_promisc(struct wpi_softc *sc) 3344{ 3345 struct ifnet *ifp = sc->sc_ifp; 3346 struct ieee80211com *ic = ifp->if_l2com; 3347 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3348 uint32_t promisc_filter; 3349 3350 promisc_filter = WPI_FILTER_CTL; 3351 if (vap != NULL && vap->iv_opmode != IEEE80211_M_HOSTAP) 3352 promisc_filter |= WPI_FILTER_PROMISC; 3353 3354 if (ifp->if_flags & IFF_PROMISC) 3355 sc->rxon.filter |= htole32(promisc_filter); 3356 else 3357 sc->rxon.filter &= ~htole32(promisc_filter); 3358} 3359 3360static void 3361wpi_update_promisc(struct ifnet *ifp) 3362{ 3363 struct wpi_softc *sc = ifp->if_softc; 3364 3365 WPI_RXON_LOCK(sc); 3366 wpi_set_promisc(sc); 3367 3368 if (wpi_send_rxon(sc, 1, 1) != 0) { 3369 device_printf(sc->sc_dev, "%s: could not send RXON\n", 3370 __func__); 3371 } 3372 WPI_RXON_UNLOCK(sc); 3373} 3374 3375static void 3376wpi_update_mcast(struct ifnet *ifp) 3377{ 3378 /* Ignore */ 3379} 3380 3381static void 3382wpi_set_led(struct wpi_softc *sc, uint8_t which, uint8_t off, uint8_t on) 3383{ 3384 struct wpi_cmd_led led; 3385 3386 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3387 3388 led.which = which; 3389 led.unit = htole32(100000); /* on/off in unit of 100ms */ 3390 led.off = off; 3391 led.on = on; 3392 (void)wpi_cmd(sc, WPI_CMD_SET_LED, &led, sizeof led, 1); 3393} 3394 3395static int 3396wpi_set_timing(struct wpi_softc *sc, struct ieee80211_node *ni) 3397{ 3398 struct wpi_cmd_timing cmd; 3399 uint64_t val, mod; 3400 3401 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3402 3403 memset(&cmd, 0, sizeof cmd); 3404 memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t)); 3405 cmd.bintval = htole16(ni->ni_intval); 3406 cmd.lintval = htole16(10); 3407 3408 /* Compute remaining time until next beacon. */ 3409 val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU; 3410 mod = le64toh(cmd.tstamp) % val; 3411 cmd.binitval = htole32((uint32_t)(val - mod)); 3412 3413 DPRINTF(sc, WPI_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n", 3414 ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod)); 3415 3416 return wpi_cmd(sc, WPI_CMD_TIMING, &cmd, sizeof cmd, 1); 3417} 3418 3419/* 3420 * This function is called periodically (every 60 seconds) to adjust output 3421 * power to temperature changes. 3422 */ 3423static void 3424wpi_power_calibration(struct wpi_softc *sc) 3425{ 3426 int temp; 3427 3428 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 3429 3430 /* Update sensor data. */ 3431 temp = (int)WPI_READ(sc, WPI_UCODE_GP2); 3432 DPRINTF(sc, WPI_DEBUG_TEMP, "Temp in calibration is: %d\n", temp); 3433 3434 /* Sanity-check read value. */ 3435 if (temp < -260 || temp > 25) { 3436 /* This can't be correct, ignore. */ 3437 DPRINTF(sc, WPI_DEBUG_TEMP, 3438 "out-of-range temperature reported: %d\n", temp); 3439 return; 3440 } 3441 3442 DPRINTF(sc, WPI_DEBUG_TEMP, "temperature %d->%d\n", sc->temp, temp); 3443 3444 /* Adjust Tx power if need be. */ 3445 if (abs(temp - sc->temp) <= 6) 3446 return; 3447 3448 sc->temp = temp; 3449 3450 if (wpi_set_txpower(sc, 1) != 0) { 3451 /* just warn, too bad for the automatic calibration... */ 3452 device_printf(sc->sc_dev,"could not adjust Tx power\n"); 3453 } 3454} 3455 3456/* 3457 * Set TX power for current channel. 3458 */ 3459static int 3460wpi_set_txpower(struct wpi_softc *sc, int async) 3461{ 3462 struct wpi_power_group *group; 3463 struct wpi_cmd_txpower cmd; 3464 uint8_t chan; 3465 int idx, is_chan_5ghz, i; 3466 3467 /* Retrieve current channel from last RXON. */ 3468 chan = sc->rxon.chan; 3469 is_chan_5ghz = (sc->rxon.flags & htole32(WPI_RXON_24GHZ)) == 0; 3470 3471 /* Find the TX power group to which this channel belongs. */ 3472 if (is_chan_5ghz) { 3473 for (group = &sc->groups[1]; group < &sc->groups[4]; group++) 3474 if (chan <= group->chan) 3475 break; 3476 } else 3477 group = &sc->groups[0]; 3478 3479 memset(&cmd, 0, sizeof cmd); 3480 cmd.band = is_chan_5ghz ? WPI_BAND_5GHZ : WPI_BAND_2GHZ; 3481 cmd.chan = htole16(chan); 3482 3483 /* Set TX power for all OFDM and CCK rates. */ 3484 for (i = 0; i <= WPI_RIDX_MAX ; i++) { 3485 /* Retrieve TX power for this channel/rate. */ 3486 idx = wpi_get_power_index(sc, group, chan, is_chan_5ghz, i); 3487 3488 cmd.rates[i].plcp = wpi_ridx_to_plcp[i]; 3489 3490 if (is_chan_5ghz) { 3491 cmd.rates[i].rf_gain = wpi_rf_gain_5ghz[idx]; 3492 cmd.rates[i].dsp_gain = wpi_dsp_gain_5ghz[idx]; 3493 } else { 3494 cmd.rates[i].rf_gain = wpi_rf_gain_2ghz[idx]; 3495 cmd.rates[i].dsp_gain = wpi_dsp_gain_2ghz[idx]; 3496 } 3497 DPRINTF(sc, WPI_DEBUG_TEMP, 3498 "chan %d/ridx %d: power index %d\n", chan, i, idx); 3499 } 3500 3501 return wpi_cmd(sc, WPI_CMD_TXPOWER, &cmd, sizeof cmd, async); 3502} 3503 3504/* 3505 * Determine Tx power index for a given channel/rate combination. 3506 * This takes into account the regulatory information from EEPROM and the 3507 * current temperature. 3508 */ 3509static int 3510wpi_get_power_index(struct wpi_softc *sc, struct wpi_power_group *group, 3511 uint8_t chan, int is_chan_5ghz, int ridx) 3512{ 3513/* Fixed-point arithmetic division using a n-bit fractional part. */ 3514#define fdivround(a, b, n) \ 3515 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n)) 3516 3517/* Linear interpolation. */ 3518#define interpolate(x, x1, y1, x2, y2, n) \ 3519 ((y1) + fdivround(((x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n)) 3520 3521 struct wpi_power_sample *sample; 3522 int pwr, idx; 3523 3524 /* Default TX power is group maximum TX power minus 3dB. */ 3525 pwr = group->maxpwr / 2; 3526 3527 /* Decrease TX power for highest OFDM rates to reduce distortion. */ 3528 switch (ridx) { 3529 case WPI_RIDX_OFDM36: 3530 pwr -= is_chan_5ghz ? 5 : 0; 3531 break; 3532 case WPI_RIDX_OFDM48: 3533 pwr -= is_chan_5ghz ? 10 : 7; 3534 break; 3535 case WPI_RIDX_OFDM54: 3536 pwr -= is_chan_5ghz ? 12 : 9; 3537 break; 3538 } 3539 3540 /* Never exceed the channel maximum allowed TX power. */ 3541 pwr = min(pwr, sc->maxpwr[chan]); 3542 3543 /* Retrieve TX power index into gain tables from samples. */ 3544 for (sample = group->samples; sample < &group->samples[3]; sample++) 3545 if (pwr > sample[1].power) 3546 break; 3547 /* Fixed-point linear interpolation using a 19-bit fractional part. */ 3548 idx = interpolate(pwr, sample[0].power, sample[0].index, 3549 sample[1].power, sample[1].index, 19); 3550 3551 /*- 3552 * Adjust power index based on current temperature: 3553 * - if cooler than factory-calibrated: decrease output power 3554 * - if warmer than factory-calibrated: increase output power 3555 */ 3556 idx -= (sc->temp - group->temp) * 11 / 100; 3557 3558 /* Decrease TX power for CCK rates (-5dB). */ 3559 if (ridx >= WPI_RIDX_CCK1) 3560 idx += 10; 3561 3562 /* Make sure idx stays in a valid range. */ 3563 if (idx < 0) 3564 return 0; 3565 if (idx > WPI_MAX_PWR_INDEX) 3566 return WPI_MAX_PWR_INDEX; 3567 return idx; 3568 3569#undef interpolate 3570#undef fdivround 3571} 3572 3573/* 3574 * Set STA mode power saving level (between 0 and 5). 3575 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving. 3576 */ 3577static int 3578wpi_set_pslevel(struct wpi_softc *sc, uint8_t dtim, int level, int async) 3579{ 3580 struct wpi_pmgt_cmd cmd; 3581 const struct wpi_pmgt *pmgt; 3582 uint32_t max, skip_dtim; 3583 uint32_t reg; 3584 int i; 3585 3586 DPRINTF(sc, WPI_DEBUG_PWRSAVE, 3587 "%s: dtim=%d, level=%d, async=%d\n", 3588 __func__, dtim, level, async); 3589 3590 /* Select which PS parameters to use. */ 3591 if (dtim <= 10) 3592 pmgt = &wpi_pmgt[0][level]; 3593 else 3594 pmgt = &wpi_pmgt[1][level]; 3595 3596 memset(&cmd, 0, sizeof cmd); 3597 if (level != 0) /* not CAM */ 3598 cmd.flags |= htole16(WPI_PS_ALLOW_SLEEP); 3599 /* Retrieve PCIe Active State Power Management (ASPM). */ 3600 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); 3601 if (!(reg & 0x1)) /* L0s Entry disabled. */ 3602 cmd.flags |= htole16(WPI_PS_PCI_PMGT); 3603 3604 cmd.rxtimeout = htole32(pmgt->rxtimeout * IEEE80211_DUR_TU); 3605 cmd.txtimeout = htole32(pmgt->txtimeout * IEEE80211_DUR_TU); 3606 3607 if (dtim == 0) { 3608 dtim = 1; 3609 skip_dtim = 0; 3610 } else 3611 skip_dtim = pmgt->skip_dtim; 3612 3613 if (skip_dtim != 0) { 3614 cmd.flags |= htole16(WPI_PS_SLEEP_OVER_DTIM); 3615 max = pmgt->intval[4]; 3616 if (max == (uint32_t)-1) 3617 max = dtim * (skip_dtim + 1); 3618 else if (max > dtim) 3619 max = (max / dtim) * dtim; 3620 } else 3621 max = dtim; 3622 3623 for (i = 0; i < 5; i++) 3624 cmd.intval[i] = htole32(MIN(max, pmgt->intval[i])); 3625 3626 return wpi_cmd(sc, WPI_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async); 3627} 3628 3629static int 3630wpi_send_btcoex(struct wpi_softc *sc) 3631{ 3632 struct wpi_bluetooth cmd; 3633 3634 memset(&cmd, 0, sizeof cmd); 3635 cmd.flags = WPI_BT_COEX_MODE_4WIRE; 3636 cmd.lead_time = WPI_BT_LEAD_TIME_DEF; 3637 cmd.max_kill = WPI_BT_MAX_KILL_DEF; 3638 DPRINTF(sc, WPI_DEBUG_RESET, "%s: configuring bluetooth coexistence\n", 3639 __func__); 3640 return wpi_cmd(sc, WPI_CMD_BT_COEX, &cmd, sizeof(cmd), 0); 3641} 3642 3643static int 3644wpi_send_rxon(struct wpi_softc *sc, int assoc, int async) 3645{ 3646 int error; 3647 3648 if (async) 3649 WPI_RXON_LOCK_ASSERT(sc); 3650 3651 if (assoc && wpi_check_bss_filter(sc) != 0) { 3652 struct wpi_assoc rxon_assoc; 3653 3654 rxon_assoc.flags = sc->rxon.flags; 3655 rxon_assoc.filter = sc->rxon.filter; 3656 rxon_assoc.ofdm_mask = sc->rxon.ofdm_mask; 3657 rxon_assoc.cck_mask = sc->rxon.cck_mask; 3658 rxon_assoc.reserved = 0; 3659 3660 error = wpi_cmd(sc, WPI_CMD_RXON_ASSOC, &rxon_assoc, 3661 sizeof (struct wpi_assoc), async); 3662 if (error != 0) { 3663 device_printf(sc->sc_dev, 3664 "RXON_ASSOC command failed, error %d\n", error); 3665 return error; 3666 } 3667 } else { 3668 if (async) { 3669 WPI_NT_LOCK(sc); 3670 error = wpi_cmd(sc, WPI_CMD_RXON, &sc->rxon, 3671 sizeof (struct wpi_rxon), async); 3672 if (error == 0) 3673 wpi_clear_node_table(sc); 3674 WPI_NT_UNLOCK(sc); 3675 } else { 3676 error = wpi_cmd(sc, WPI_CMD_RXON, &sc->rxon, 3677 sizeof (struct wpi_rxon), async); 3678 if (error == 0) 3679 wpi_clear_node_table(sc); 3680 } 3681 3682 if (error != 0) { 3683 device_printf(sc->sc_dev, 3684 "RXON command failed, error %d\n", error); 3685 return error; 3686 } 3687 3688 /* Add broadcast node. */ 3689 error = wpi_add_broadcast_node(sc, async); 3690 if (error != 0) { 3691 device_printf(sc->sc_dev, 3692 "could not add broadcast node, error %d\n", error); 3693 return error; 3694 } 3695 } 3696 3697 /* Configuration has changed, set Tx power accordingly. */ 3698 if ((error = wpi_set_txpower(sc, async)) != 0) { 3699 device_printf(sc->sc_dev, 3700 "%s: could not set TX power, error %d\n", __func__, error); 3701 return error; 3702 } 3703 3704 return 0; 3705} 3706 3707/** 3708 * Configure the card to listen to a particular channel, this transisions the 3709 * card in to being able to receive frames from remote devices. 3710 */ 3711static int 3712wpi_config(struct wpi_softc *sc) 3713{ 3714 struct ifnet *ifp = sc->sc_ifp; 3715 struct ieee80211com *ic = ifp->if_l2com; 3716 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3717 struct ieee80211_channel *c = ic->ic_curchan; 3718 uint32_t flags; 3719 int error; 3720 3721 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3722 3723 /* Set power saving level to CAM during initialization. */ 3724 if ((error = wpi_set_pslevel(sc, 0, 0, 0)) != 0) { 3725 device_printf(sc->sc_dev, 3726 "%s: could not set power saving level\n", __func__); 3727 return error; 3728 } 3729 3730 /* Configure bluetooth coexistence. */ 3731 if ((error = wpi_send_btcoex(sc)) != 0) { 3732 device_printf(sc->sc_dev, 3733 "could not configure bluetooth coexistence\n"); 3734 return error; 3735 } 3736 3737 /* Configure adapter. */ 3738 memset(&sc->rxon, 0, sizeof (struct wpi_rxon)); 3739 IEEE80211_ADDR_COPY(sc->rxon.myaddr, vap->iv_myaddr); 3740 3741 /* Set default channel. */ 3742 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 3743 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); 3744 if (IEEE80211_IS_CHAN_2GHZ(c)) 3745 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); 3746 3747 sc->rxon.filter = WPI_FILTER_MULTICAST; 3748 switch (ic->ic_opmode) { 3749 case IEEE80211_M_STA: 3750 sc->rxon.mode = WPI_MODE_STA; 3751 break; 3752 case IEEE80211_M_IBSS: 3753 sc->rxon.mode = WPI_MODE_IBSS; 3754 sc->rxon.filter |= WPI_FILTER_BEACON; 3755 break; 3756 case IEEE80211_M_HOSTAP: 3757 /* XXX workaround for beaconing */ 3758 sc->rxon.mode = WPI_MODE_IBSS; 3759 sc->rxon.filter |= WPI_FILTER_ASSOC | WPI_FILTER_PROMISC; 3760 break; 3761 case IEEE80211_M_AHDEMO: 3762 /* XXX workaround for passive channels selection */ 3763 sc->rxon.mode = WPI_MODE_HOSTAP; 3764 break; 3765 case IEEE80211_M_MONITOR: 3766 sc->rxon.mode = WPI_MODE_MONITOR; 3767 break; 3768 default: 3769 device_printf(sc->sc_dev, "unknown opmode %d\n", 3770 ic->ic_opmode); 3771 return EINVAL; 3772 } 3773 sc->rxon.filter = htole32(sc->rxon.filter); 3774 wpi_set_promisc(sc); 3775 sc->rxon.cck_mask = 0x0f; /* not yet negotiated */ 3776 sc->rxon.ofdm_mask = 0xff; /* not yet negotiated */ 3777 3778 if ((error = wpi_send_rxon(sc, 0, 0)) != 0) { 3779 device_printf(sc->sc_dev, "%s: could not send RXON\n", 3780 __func__); 3781 return error; 3782 } 3783 3784 /* Setup rate scalling. */ 3785 if ((error = wpi_mrr_setup(sc)) != 0) { 3786 device_printf(sc->sc_dev, "could not setup MRR, error %d\n", 3787 error); 3788 return error; 3789 } 3790 3791 /* Disable beacon notifications (unused). */ 3792 flags = WPI_STATISTICS_BEACON_DISABLE; 3793 error = wpi_cmd(sc, WPI_CMD_GET_STATISTICS, &flags, sizeof flags, 1); 3794 if (error != 0) { 3795 device_printf(sc->sc_dev, 3796 "could not disable beacon statistics, error %d\n", error); 3797 return error; 3798 } 3799 3800 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 3801 3802 return 0; 3803} 3804 3805static uint16_t 3806wpi_get_active_dwell_time(struct wpi_softc *sc, 3807 struct ieee80211_channel *c, uint8_t n_probes) 3808{ 3809 /* No channel? Default to 2GHz settings. */ 3810 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) { 3811 return (WPI_ACTIVE_DWELL_TIME_2GHZ + 3812 WPI_ACTIVE_DWELL_FACTOR_2GHZ * (n_probes + 1)); 3813 } 3814 3815 /* 5GHz dwell time. */ 3816 return (WPI_ACTIVE_DWELL_TIME_5GHZ + 3817 WPI_ACTIVE_DWELL_FACTOR_5GHZ * (n_probes + 1)); 3818} 3819 3820/* 3821 * Limit the total dwell time. 3822 * 3823 * Returns the dwell time in milliseconds. 3824 */ 3825static uint16_t 3826wpi_limit_dwell(struct wpi_softc *sc, uint16_t dwell_time) 3827{ 3828 struct ieee80211com *ic = sc->sc_ifp->if_l2com; 3829 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3830 int bintval = 0; 3831 3832 /* bintval is in TU (1.024mS) */ 3833 if (vap != NULL) 3834 bintval = vap->iv_bss->ni_intval; 3835 3836 /* 3837 * If it's non-zero, we should calculate the minimum of 3838 * it and the DWELL_BASE. 3839 * 3840 * XXX Yes, the math should take into account that bintval 3841 * is 1.024mS, not 1mS.. 3842 */ 3843 if (bintval > 0) { 3844 DPRINTF(sc, WPI_DEBUG_SCAN, "%s: bintval=%d\n", __func__, 3845 bintval); 3846 return (MIN(dwell_time, bintval - WPI_CHANNEL_TUNE_TIME * 2)); 3847 } 3848 3849 /* No association context? Default. */ 3850 return dwell_time; 3851} 3852 3853static uint16_t 3854wpi_get_passive_dwell_time(struct wpi_softc *sc, struct ieee80211_channel *c) 3855{ 3856 uint16_t passive; 3857 3858 if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) 3859 passive = WPI_PASSIVE_DWELL_BASE + WPI_PASSIVE_DWELL_TIME_2GHZ; 3860 else 3861 passive = WPI_PASSIVE_DWELL_BASE + WPI_PASSIVE_DWELL_TIME_5GHZ; 3862 3863 /* Clamp to the beacon interval if we're associated. */ 3864 return (wpi_limit_dwell(sc, passive)); 3865} 3866 3867/* 3868 * Send a scan request to the firmware. 3869 */ 3870static int 3871wpi_scan(struct wpi_softc *sc, struct ieee80211_channel *c) 3872{ 3873 struct ifnet *ifp = sc->sc_ifp; 3874 struct ieee80211com *ic = ifp->if_l2com; 3875 struct ieee80211_scan_state *ss = ic->ic_scan; 3876 struct ieee80211vap *vap = ss->ss_vap; 3877 struct wpi_scan_hdr *hdr; 3878 struct wpi_cmd_data *tx; 3879 struct wpi_scan_essid *essids; 3880 struct wpi_scan_chan *chan; 3881 struct ieee80211_frame *wh; 3882 struct ieee80211_rateset *rs; 3883 uint16_t dwell_active, dwell_passive; 3884 uint8_t *buf, *frm; 3885 int bgscan, bintval, buflen, error, i, nssid; 3886 3887 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 3888 3889 /* 3890 * We are absolutely not allowed to send a scan command when another 3891 * scan command is pending. 3892 */ 3893 if (callout_pending(&sc->scan_timeout)) { 3894 device_printf(sc->sc_dev, "%s: called whilst scanning!\n", 3895 __func__); 3896 error = EAGAIN; 3897 goto fail; 3898 } 3899 3900 bgscan = wpi_check_bss_filter(sc); 3901 bintval = vap->iv_bss->ni_intval; 3902 if (bgscan != 0 && 3903 bintval < WPI_QUIET_TIME_DEFAULT + WPI_CHANNEL_TUNE_TIME * 2) { 3904 error = EOPNOTSUPP; 3905 goto fail; 3906 } 3907 3908 buf = malloc(WPI_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO); 3909 if (buf == NULL) { 3910 device_printf(sc->sc_dev, 3911 "%s: could not allocate buffer for scan command\n", 3912 __func__); 3913 error = ENOMEM; 3914 goto fail; 3915 } 3916 hdr = (struct wpi_scan_hdr *)buf; 3917 3918 /* 3919 * Move to the next channel if no packets are received within 10 msecs 3920 * after sending the probe request. 3921 */ 3922 hdr->quiet_time = htole16(WPI_QUIET_TIME_DEFAULT); 3923 hdr->quiet_threshold = htole16(1); 3924 /* 3925 * Max needs to be greater than active and passive and quiet! 3926 * It's also in microseconds! 3927 */ 3928 hdr->max_svc = htole32(250 * IEEE80211_DUR_TU); 3929 hdr->pause_svc = htole32((4 << 24) | 3930 (100 * IEEE80211_DUR_TU)); /* Hardcode for now */ 3931 hdr->filter = htole32(WPI_FILTER_MULTICAST | WPI_FILTER_BEACON); 3932 3933 tx = (struct wpi_cmd_data *)(hdr + 1); 3934 tx->flags = htole32(WPI_TX_AUTO_SEQ); 3935 tx->id = WPI_ID_BROADCAST; 3936 tx->lifetime = htole32(WPI_LIFETIME_INFINITE); 3937 3938 if (IEEE80211_IS_CHAN_5GHZ(c)) { 3939 /* Send probe requests at 6Mbps. */ 3940 tx->plcp = wpi_ridx_to_plcp[WPI_RIDX_OFDM6]; 3941 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A]; 3942 } else { 3943 hdr->flags = htole32(WPI_RXON_24GHZ | WPI_RXON_AUTO); 3944 /* Send probe requests at 1Mbps. */ 3945 tx->plcp = wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 3946 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G]; 3947 } 3948 3949 essids = (struct wpi_scan_essid *)(tx + 1); 3950 nssid = MIN(ss->ss_nssid, WPI_SCAN_MAX_ESSIDS); 3951 for (i = 0; i < nssid; i++) { 3952 essids[i].id = IEEE80211_ELEMID_SSID; 3953 essids[i].len = MIN(ss->ss_ssid[i].len, IEEE80211_NWID_LEN); 3954 memcpy(essids[i].data, ss->ss_ssid[i].ssid, essids[i].len); 3955#ifdef WPI_DEBUG 3956 if (sc->sc_debug & WPI_DEBUG_SCAN) { 3957 printf("Scanning Essid: "); 3958 ieee80211_print_essid(essids[i].data, essids[i].len); 3959 printf("\n"); 3960 } 3961#endif 3962 } 3963 3964 /* 3965 * Build a probe request frame. Most of the following code is a 3966 * copy & paste of what is done in net80211. 3967 */ 3968 wh = (struct ieee80211_frame *)(essids + WPI_SCAN_MAX_ESSIDS); 3969 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | 3970 IEEE80211_FC0_SUBTYPE_PROBE_REQ; 3971 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; 3972 IEEE80211_ADDR_COPY(wh->i_addr1, ifp->if_broadcastaddr); 3973 IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr); 3974 IEEE80211_ADDR_COPY(wh->i_addr3, ifp->if_broadcastaddr); 3975 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by h/w */ 3976 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by h/w */ 3977 3978 frm = (uint8_t *)(wh + 1); 3979 frm = ieee80211_add_ssid(frm, NULL, 0); 3980 frm = ieee80211_add_rates(frm, rs); 3981 if (rs->rs_nrates > IEEE80211_RATE_SIZE) 3982 frm = ieee80211_add_xrates(frm, rs); 3983 3984 /* Set length of probe request. */ 3985 tx->len = htole16(frm - (uint8_t *)wh); 3986 3987 /* 3988 * Construct information about the channel that we 3989 * want to scan. The firmware expects this to be directly 3990 * after the scan probe request 3991 */ 3992 chan = (struct wpi_scan_chan *)frm; 3993 chan->chan = htole16(ieee80211_chan2ieee(ic, c)); 3994 chan->flags = 0; 3995 if (nssid) { 3996 hdr->crc_threshold = WPI_SCAN_CRC_TH_DEFAULT; 3997 chan->flags |= WPI_CHAN_NPBREQS(nssid); 3998 } else 3999 hdr->crc_threshold = WPI_SCAN_CRC_TH_NEVER; 4000 4001 if (!IEEE80211_IS_CHAN_PASSIVE(c)) 4002 chan->flags |= WPI_CHAN_ACTIVE; 4003 4004 /* 4005 * Calculate the active/passive dwell times. 4006 */ 4007 4008 dwell_active = wpi_get_active_dwell_time(sc, c, nssid); 4009 dwell_passive = wpi_get_passive_dwell_time(sc, c); 4010 4011 /* Make sure they're valid. */ 4012 if (dwell_active > dwell_passive) 4013 dwell_active = dwell_passive; 4014 4015 chan->active = htole16(dwell_active); 4016 chan->passive = htole16(dwell_passive); 4017 4018 chan->dsp_gain = 0x6e; /* Default level */ 4019 4020 if (IEEE80211_IS_CHAN_5GHZ(c)) 4021 chan->rf_gain = 0x3b; 4022 else 4023 chan->rf_gain = 0x28; 4024 4025 DPRINTF(sc, WPI_DEBUG_SCAN, "Scanning %u Passive: %d\n", 4026 chan->chan, IEEE80211_IS_CHAN_PASSIVE(c)); 4027 4028 hdr->nchan++; 4029 4030 if (hdr->nchan == 1 && sc->rxon.chan == chan->chan) { 4031 /* XXX Force probe request transmission. */ 4032 memcpy(chan + 1, chan, sizeof (struct wpi_scan_chan)); 4033 4034 chan++; 4035 4036 /* Reduce unnecessary delay. */ 4037 chan->flags = 0; 4038 chan->passive = chan->active = hdr->quiet_time; 4039 4040 hdr->nchan++; 4041 } 4042 4043 chan++; 4044 4045 buflen = (uint8_t *)chan - buf; 4046 hdr->len = htole16(buflen); 4047 4048 DPRINTF(sc, WPI_DEBUG_CMD, "sending scan command nchan=%d\n", 4049 hdr->nchan); 4050 error = wpi_cmd(sc, WPI_CMD_SCAN, buf, buflen, 1); 4051 free(buf, M_DEVBUF); 4052 4053 if (error != 0) 4054 goto fail; 4055 4056 callout_reset(&sc->scan_timeout, 5*hz, wpi_scan_timeout, sc); 4057 4058 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4059 4060 return 0; 4061 4062fail: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 4063 4064 return error; 4065} 4066 4067static int 4068wpi_auth(struct wpi_softc *sc, struct ieee80211vap *vap) 4069{ 4070 struct ieee80211com *ic = vap->iv_ic; 4071 struct ieee80211_node *ni = vap->iv_bss; 4072 struct ieee80211_channel *c = ni->ni_chan; 4073 int error; 4074 4075 WPI_RXON_LOCK(sc); 4076 4077 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4078 4079 /* Update adapter configuration. */ 4080 sc->rxon.associd = 0; 4081 sc->rxon.filter &= ~htole32(WPI_FILTER_BSS); 4082 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 4083 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 4084 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); 4085 if (IEEE80211_IS_CHAN_2GHZ(c)) 4086 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); 4087 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4088 sc->rxon.flags |= htole32(WPI_RXON_SHSLOT); 4089 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4090 sc->rxon.flags |= htole32(WPI_RXON_SHPREAMBLE); 4091 if (IEEE80211_IS_CHAN_A(c)) { 4092 sc->rxon.cck_mask = 0; 4093 sc->rxon.ofdm_mask = 0x15; 4094 } else if (IEEE80211_IS_CHAN_B(c)) { 4095 sc->rxon.cck_mask = 0x03; 4096 sc->rxon.ofdm_mask = 0; 4097 } else { 4098 /* Assume 802.11b/g. */ 4099 sc->rxon.cck_mask = 0x0f; 4100 sc->rxon.ofdm_mask = 0x15; 4101 } 4102 4103 DPRINTF(sc, WPI_DEBUG_STATE, "rxon chan %d flags %x cck %x ofdm %x\n", 4104 sc->rxon.chan, sc->rxon.flags, sc->rxon.cck_mask, 4105 sc->rxon.ofdm_mask); 4106 4107 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 4108 device_printf(sc->sc_dev, "%s: could not send RXON\n", 4109 __func__); 4110 } 4111 4112 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4113 4114 WPI_RXON_UNLOCK(sc); 4115 4116 return error; 4117} 4118 4119static int 4120wpi_config_beacon(struct wpi_vap *wvp) 4121{ 4122 struct ieee80211com *ic = wvp->wv_vap.iv_ic; 4123 struct ieee80211_beacon_offsets *bo = &wvp->wv_boff; 4124 struct wpi_buf *bcn = &wvp->wv_bcbuf; 4125 struct wpi_softc *sc = ic->ic_ifp->if_softc; 4126 struct wpi_cmd_beacon *cmd = (struct wpi_cmd_beacon *)&bcn->data; 4127 struct ieee80211_tim_ie *tie; 4128 struct mbuf *m; 4129 uint8_t *ptr; 4130 int error; 4131 4132 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4133 4134 WPI_VAP_LOCK_ASSERT(wvp); 4135 4136 cmd->len = htole16(bcn->m->m_pkthdr.len); 4137 cmd->plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? 4138 wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; 4139 4140 /* XXX seems to be unused */ 4141 if (*(bo->bo_tim) == IEEE80211_ELEMID_TIM) { 4142 tie = (struct ieee80211_tim_ie *) bo->bo_tim; 4143 ptr = mtod(bcn->m, uint8_t *); 4144 4145 cmd->tim = htole16(bo->bo_tim - ptr); 4146 cmd->timsz = tie->tim_len; 4147 } 4148 4149 /* Necessary for recursion in ieee80211_beacon_update(). */ 4150 m = bcn->m; 4151 bcn->m = m_dup(m, M_NOWAIT); 4152 if (bcn->m == NULL) { 4153 device_printf(sc->sc_dev, 4154 "%s: could not copy beacon frame\n", __func__); 4155 error = ENOMEM; 4156 goto end; 4157 } 4158 4159 if ((error = wpi_cmd2(sc, bcn)) != 0) { 4160 device_printf(sc->sc_dev, 4161 "%s: could not update beacon frame, error %d", __func__, 4162 error); 4163 } 4164 4165 /* Restore mbuf. */ 4166end: bcn->m = m; 4167 4168 return error; 4169} 4170 4171static int 4172wpi_setup_beacon(struct wpi_softc *sc, struct ieee80211_node *ni) 4173{ 4174 struct wpi_vap *wvp = WPI_VAP(ni->ni_vap); 4175 struct wpi_buf *bcn = &wvp->wv_bcbuf; 4176 struct ieee80211_beacon_offsets *bo = &wvp->wv_boff; 4177 struct mbuf *m; 4178 int error; 4179 4180 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4181 4182 if (ni->ni_chan == IEEE80211_CHAN_ANYC) 4183 return EINVAL; 4184 4185 m = ieee80211_beacon_alloc(ni, bo); 4186 if (m == NULL) { 4187 device_printf(sc->sc_dev, 4188 "%s: could not allocate beacon frame\n", __func__); 4189 return ENOMEM; 4190 } 4191 4192 WPI_VAP_LOCK(wvp); 4193 if (bcn->m != NULL) 4194 m_freem(bcn->m); 4195 4196 bcn->m = m; 4197 4198 error = wpi_config_beacon(wvp); 4199 WPI_VAP_UNLOCK(wvp); 4200 4201 return error; 4202} 4203 4204static void 4205wpi_update_beacon(struct ieee80211vap *vap, int item) 4206{ 4207 struct wpi_softc *sc = vap->iv_ic->ic_ifp->if_softc; 4208 struct wpi_vap *wvp = WPI_VAP(vap); 4209 struct wpi_buf *bcn = &wvp->wv_bcbuf; 4210 struct ieee80211_beacon_offsets *bo = &wvp->wv_boff; 4211 struct ieee80211_node *ni = vap->iv_bss; 4212 int mcast = 0; 4213 4214 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4215 4216 WPI_VAP_LOCK(wvp); 4217 if (bcn->m == NULL) { 4218 bcn->m = ieee80211_beacon_alloc(ni, bo); 4219 if (bcn->m == NULL) { 4220 device_printf(sc->sc_dev, 4221 "%s: could not allocate beacon frame\n", __func__); 4222 4223 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, 4224 __func__); 4225 4226 WPI_VAP_UNLOCK(wvp); 4227 return; 4228 } 4229 } 4230 WPI_VAP_UNLOCK(wvp); 4231 4232 if (item == IEEE80211_BEACON_TIM) 4233 mcast = 1; /* TODO */ 4234 4235 setbit(bo->bo_flags, item); 4236 ieee80211_beacon_update(ni, bo, bcn->m, mcast); 4237 4238 WPI_VAP_LOCK(wvp); 4239 wpi_config_beacon(wvp); 4240 WPI_VAP_UNLOCK(wvp); 4241 4242 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4243} 4244 4245static void 4246wpi_newassoc(struct ieee80211_node *ni, int isnew) 4247{ 4248 struct ieee80211vap *vap = ni->ni_vap; 4249 struct wpi_softc *sc = ni->ni_ic->ic_ifp->if_softc; 4250 struct wpi_node *wn = WPI_NODE(ni); 4251 int error; 4252 4253 WPI_NT_LOCK(sc); 4254 4255 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4256 4257 if (vap->iv_opmode != IEEE80211_M_STA && wn->id == WPI_ID_UNDEFINED) { 4258 if ((error = wpi_add_ibss_node(sc, ni)) != 0) { 4259 device_printf(sc->sc_dev, 4260 "%s: could not add IBSS node, error %d\n", 4261 __func__, error); 4262 } 4263 } 4264 WPI_NT_UNLOCK(sc); 4265} 4266 4267static int 4268wpi_run(struct wpi_softc *sc, struct ieee80211vap *vap) 4269{ 4270 struct ieee80211com *ic = vap->iv_ic; 4271 struct ieee80211_node *ni = vap->iv_bss; 4272 struct ieee80211_channel *c = ni->ni_chan; 4273 int error; 4274 4275 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 4276 4277 if (vap->iv_opmode == IEEE80211_M_MONITOR) { 4278 /* Link LED blinks while monitoring. */ 4279 wpi_set_led(sc, WPI_LED_LINK, 5, 5); 4280 return 0; 4281 } 4282 4283 /* XXX kernel panic workaround */ 4284 if (c == IEEE80211_CHAN_ANYC) { 4285 device_printf(sc->sc_dev, "%s: incomplete configuration\n", 4286 __func__); 4287 return EINVAL; 4288 } 4289 4290 if ((error = wpi_set_timing(sc, ni)) != 0) { 4291 device_printf(sc->sc_dev, 4292 "%s: could not set timing, error %d\n", __func__, error); 4293 return error; 4294 } 4295 4296 /* Update adapter configuration. */ 4297 WPI_RXON_LOCK(sc); 4298 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid); 4299 sc->rxon.associd = htole16(IEEE80211_NODE_AID(ni)); 4300 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 4301 sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF); 4302 if (IEEE80211_IS_CHAN_2GHZ(c)) 4303 sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ); 4304 if (ic->ic_flags & IEEE80211_F_SHSLOT) 4305 sc->rxon.flags |= htole32(WPI_RXON_SHSLOT); 4306 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) 4307 sc->rxon.flags |= htole32(WPI_RXON_SHPREAMBLE); 4308 if (IEEE80211_IS_CHAN_A(c)) { 4309 sc->rxon.cck_mask = 0; 4310 sc->rxon.ofdm_mask = 0x15; 4311 } else if (IEEE80211_IS_CHAN_B(c)) { 4312 sc->rxon.cck_mask = 0x03; 4313 sc->rxon.ofdm_mask = 0; 4314 } else { 4315 /* Assume 802.11b/g. */ 4316 sc->rxon.cck_mask = 0x0f; 4317 sc->rxon.ofdm_mask = 0x15; 4318 } 4319 sc->rxon.filter |= htole32(WPI_FILTER_BSS); 4320 4321 DPRINTF(sc, WPI_DEBUG_STATE, "rxon chan %d flags %x\n", 4322 sc->rxon.chan, sc->rxon.flags); 4323 4324 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) { 4325 device_printf(sc->sc_dev, "%s: could not send RXON\n", 4326 __func__); 4327 return error; 4328 } 4329 4330 /* Start periodic calibration timer. */ 4331 callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc); 4332 4333 WPI_RXON_UNLOCK(sc); 4334 4335 if (vap->iv_opmode == IEEE80211_M_IBSS || 4336 vap->iv_opmode == IEEE80211_M_HOSTAP) { 4337 if ((error = wpi_setup_beacon(sc, ni)) != 0) { 4338 device_printf(sc->sc_dev, 4339 "%s: could not setup beacon, error %d\n", __func__, 4340 error); 4341 return error; 4342 } 4343 } 4344 4345 if (vap->iv_opmode == IEEE80211_M_STA) { 4346 /* Add BSS node. */ 4347 WPI_NT_LOCK(sc); 4348 error = wpi_add_sta_node(sc, ni); 4349 WPI_NT_UNLOCK(sc); 4350 if (error != 0) { 4351 device_printf(sc->sc_dev, 4352 "%s: could not add BSS node, error %d\n", __func__, 4353 error); 4354 return error; 4355 } 4356 } 4357 4358 /* Link LED always on while associated. */ 4359 wpi_set_led(sc, WPI_LED_LINK, 0, 1); 4360 4361 /* Enable power-saving mode if requested by user. */ 4362 if ((vap->iv_flags & IEEE80211_F_PMGTON) && 4363 vap->iv_opmode != IEEE80211_M_IBSS) 4364 (void)wpi_set_pslevel(sc, 0, 3, 1); 4365 4366 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 4367 4368 return 0; 4369} 4370 4371static int 4372wpi_load_key(struct ieee80211_node *ni, const struct ieee80211_key *k) 4373{ 4374 const struct ieee80211_cipher *cip = k->wk_cipher; 4375 struct ieee80211vap *vap = ni->ni_vap; 4376 struct wpi_softc *sc = ni->ni_ic->ic_ifp->if_softc; 4377 struct wpi_node *wn = WPI_NODE(ni); 4378 struct wpi_node_info node; 4379 uint16_t kflags; 4380 int error; 4381 4382 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4383 4384 if (wpi_check_node_entry(sc, wn->id) == 0) { 4385 device_printf(sc->sc_dev, "%s: node does not exist\n", 4386 __func__); 4387 return 0; 4388 } 4389 4390 switch (cip->ic_cipher) { 4391 case IEEE80211_CIPHER_AES_CCM: 4392 kflags = WPI_KFLAG_CCMP; 4393 break; 4394 4395 default: 4396 device_printf(sc->sc_dev, "%s: unknown cipher %d\n", __func__, 4397 cip->ic_cipher); 4398 return 0; 4399 } 4400 4401 kflags |= WPI_KFLAG_KID(k->wk_keyix); 4402 if (k->wk_flags & IEEE80211_KEY_GROUP) 4403 kflags |= WPI_KFLAG_MULTICAST; 4404 4405 memset(&node, 0, sizeof node); 4406 node.id = wn->id; 4407 node.control = WPI_NODE_UPDATE; 4408 node.flags = WPI_FLAG_KEY_SET; 4409 node.kflags = htole16(kflags); 4410 memcpy(node.key, k->wk_key, k->wk_keylen); 4411again: 4412 DPRINTF(sc, WPI_DEBUG_KEY, 4413 "%s: setting %s key id %d for node %d (%s)\n", __func__, 4414 (kflags & WPI_KFLAG_MULTICAST) ? "group" : "ucast", k->wk_keyix, 4415 node.id, ether_sprintf(ni->ni_macaddr)); 4416 4417 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); 4418 if (error != 0) { 4419 device_printf(sc->sc_dev, "can't update node info, error %d\n", 4420 error); 4421 return !error; 4422 } 4423 4424 if (!(kflags & WPI_KFLAG_MULTICAST) && &vap->iv_nw_keys[0] <= k && 4425 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { 4426 kflags |= WPI_KFLAG_MULTICAST; 4427 node.kflags = htole16(kflags); 4428 4429 goto again; 4430 } 4431 4432 return 1; 4433} 4434 4435static void 4436wpi_load_key_cb(void *arg, struct ieee80211_node *ni) 4437{ 4438 const struct ieee80211_key *k = arg; 4439 struct ieee80211vap *vap = ni->ni_vap; 4440 struct wpi_softc *sc = ni->ni_ic->ic_ifp->if_softc; 4441 struct wpi_node *wn = WPI_NODE(ni); 4442 int error; 4443 4444 if (vap->iv_bss == ni && wn->id == WPI_ID_UNDEFINED) 4445 return; 4446 4447 WPI_NT_LOCK(sc); 4448 error = wpi_load_key(ni, k); 4449 WPI_NT_UNLOCK(sc); 4450 4451 if (error == 0) { 4452 device_printf(sc->sc_dev, "%s: error while setting key\n", 4453 __func__); 4454 } 4455} 4456 4457static int 4458wpi_set_global_keys(struct ieee80211_node *ni) 4459{ 4460 struct ieee80211vap *vap = ni->ni_vap; 4461 struct ieee80211_key *wk = &vap->iv_nw_keys[0]; 4462 int error = 1; 4463 4464 for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID] && error; wk++) 4465 if (wk->wk_keyix != IEEE80211_KEYIX_NONE) 4466 error = wpi_load_key(ni, wk); 4467 4468 return !error; 4469} 4470 4471static int 4472wpi_del_key(struct ieee80211_node *ni, const struct ieee80211_key *k) 4473{ 4474 struct ieee80211vap *vap = ni->ni_vap; 4475 struct wpi_softc *sc = ni->ni_ic->ic_ifp->if_softc; 4476 struct wpi_node *wn = WPI_NODE(ni); 4477 struct wpi_node_info node; 4478 uint16_t kflags; 4479 int error; 4480 4481 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4482 4483 if (wpi_check_node_entry(sc, wn->id) == 0) { 4484 DPRINTF(sc, WPI_DEBUG_KEY, "%s: node was removed\n", __func__); 4485 return 1; /* Nothing to do. */ 4486 } 4487 4488 kflags = WPI_KFLAG_KID(k->wk_keyix); 4489 if (k->wk_flags & IEEE80211_KEY_GROUP) 4490 kflags |= WPI_KFLAG_MULTICAST; 4491 4492 memset(&node, 0, sizeof node); 4493 node.id = wn->id; 4494 node.control = WPI_NODE_UPDATE; 4495 node.flags = WPI_FLAG_KEY_SET; 4496 node.kflags = htole16(kflags); 4497again: 4498 DPRINTF(sc, WPI_DEBUG_KEY, "%s: deleting %s key %d for node %d (%s)\n", 4499 __func__, (kflags & WPI_KFLAG_MULTICAST) ? "group" : "ucast", 4500 k->wk_keyix, node.id, ether_sprintf(ni->ni_macaddr)); 4501 4502 error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); 4503 if (error != 0) { 4504 device_printf(sc->sc_dev, "can't update node info, error %d\n", 4505 error); 4506 return !error; 4507 } 4508 4509 if (!(kflags & WPI_KFLAG_MULTICAST) && &vap->iv_nw_keys[0] <= k && 4510 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { 4511 kflags |= WPI_KFLAG_MULTICAST; 4512 node.kflags = htole16(kflags); 4513 4514 goto again; 4515 } 4516 4517 return 1; 4518} 4519 4520static void 4521wpi_del_key_cb(void *arg, struct ieee80211_node *ni) 4522{ 4523 const struct ieee80211_key *k = arg; 4524 struct ieee80211vap *vap = ni->ni_vap; 4525 struct wpi_softc *sc = ni->ni_ic->ic_ifp->if_softc; 4526 struct wpi_node *wn = WPI_NODE(ni); 4527 int error; 4528 4529 if (vap->iv_bss == ni && wn->id == WPI_ID_UNDEFINED) 4530 return; 4531 4532 WPI_NT_LOCK(sc); 4533 error = wpi_del_key(ni, k); 4534 WPI_NT_UNLOCK(sc); 4535 4536 if (error == 0) { 4537 device_printf(sc->sc_dev, "%s: error while deleting key\n", 4538 __func__); 4539 } 4540} 4541 4542static int 4543wpi_process_key(struct ieee80211vap *vap, const struct ieee80211_key *k, 4544 int set) 4545{ 4546 struct ieee80211com *ic = vap->iv_ic; 4547 struct wpi_softc *sc = ic->ic_ifp->if_softc; 4548 struct wpi_vap *wvp = WPI_VAP(vap); 4549 struct ieee80211_node *ni; 4550 int error, ni_ref = 0; 4551 4552 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4553 4554 if (k->wk_flags & IEEE80211_KEY_SWCRYPT) { 4555 /* Not for us. */ 4556 return 1; 4557 } 4558 4559 if (!(k->wk_flags & IEEE80211_KEY_RECV)) { 4560 /* XMIT keys are handled in wpi_tx_data(). */ 4561 return 1; 4562 } 4563 4564 /* Handle group keys. */ 4565 if (&vap->iv_nw_keys[0] <= k && 4566 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { 4567 WPI_NT_LOCK(sc); 4568 if (set) 4569 wvp->wv_gtk |= WPI_VAP_KEY(k->wk_keyix); 4570 else 4571 wvp->wv_gtk &= ~WPI_VAP_KEY(k->wk_keyix); 4572 WPI_NT_UNLOCK(sc); 4573 4574 if (vap->iv_state == IEEE80211_S_RUN) { 4575 ieee80211_iterate_nodes(&ic->ic_sta, 4576 set ? wpi_load_key_cb : wpi_del_key_cb, 4577 __DECONST(void *, k)); 4578 } 4579 4580 return 1; 4581 } 4582 4583 switch (vap->iv_opmode) { 4584 case IEEE80211_M_STA: 4585 ni = vap->iv_bss; 4586 break; 4587 4588 case IEEE80211_M_IBSS: 4589 case IEEE80211_M_AHDEMO: 4590 case IEEE80211_M_HOSTAP: 4591 ni = ieee80211_find_vap_node(&ic->ic_sta, vap, k->wk_macaddr); 4592 if (ni == NULL) 4593 return 0; /* should not happen */ 4594 4595 ni_ref = 1; 4596 break; 4597 4598 default: 4599 device_printf(sc->sc_dev, "%s: unknown opmode %d\n", __func__, 4600 vap->iv_opmode); 4601 return 0; 4602 } 4603 4604 WPI_NT_LOCK(sc); 4605 if (set) 4606 error = wpi_load_key(ni, k); 4607 else 4608 error = wpi_del_key(ni, k); 4609 WPI_NT_UNLOCK(sc); 4610 4611 if (ni_ref) 4612 ieee80211_node_decref(ni); 4613 4614 return error; 4615} 4616 4617static int 4618wpi_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k, 4619 const uint8_t mac[IEEE80211_ADDR_LEN]) 4620{ 4621 return wpi_process_key(vap, k, 1); 4622} 4623 4624static int 4625wpi_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k) 4626{ 4627 return wpi_process_key(vap, k, 0); 4628} 4629 4630/* 4631 * This function is called after the runtime firmware notifies us of its 4632 * readiness (called in a process context). 4633 */ 4634static int 4635wpi_post_alive(struct wpi_softc *sc) 4636{ 4637 int ntries, error; 4638 4639 /* Check (again) that the radio is not disabled. */ 4640 if ((error = wpi_nic_lock(sc)) != 0) 4641 return error; 4642 4643 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4644 4645 /* NB: Runtime firmware must be up and running. */ 4646 if (!(wpi_prph_read(sc, WPI_APMG_RFKILL) & 1)) { 4647 device_printf(sc->sc_dev, 4648 "RF switch: radio disabled (%s)\n", __func__); 4649 wpi_nic_unlock(sc); 4650 return EPERM; /* :-) */ 4651 } 4652 wpi_nic_unlock(sc); 4653 4654 /* Wait for thermal sensor to calibrate. */ 4655 for (ntries = 0; ntries < 1000; ntries++) { 4656 if ((sc->temp = (int)WPI_READ(sc, WPI_UCODE_GP2)) != 0) 4657 break; 4658 DELAY(10); 4659 } 4660 4661 if (ntries == 1000) { 4662 device_printf(sc->sc_dev, 4663 "timeout waiting for thermal sensor calibration\n"); 4664 return ETIMEDOUT; 4665 } 4666 4667 DPRINTF(sc, WPI_DEBUG_TEMP, "temperature %d\n", sc->temp); 4668 return 0; 4669} 4670 4671/* 4672 * The firmware boot code is small and is intended to be copied directly into 4673 * the NIC internal memory (no DMA transfer). 4674 */ 4675static int 4676wpi_load_bootcode(struct wpi_softc *sc, const uint8_t *ucode, int size) 4677{ 4678 int error, ntries; 4679 4680 DPRINTF(sc, WPI_DEBUG_HW, "Loading microcode size 0x%x\n", size); 4681 4682 size /= sizeof (uint32_t); 4683 4684 if ((error = wpi_nic_lock(sc)) != 0) 4685 return error; 4686 4687 /* Copy microcode image into NIC memory. */ 4688 wpi_prph_write_region_4(sc, WPI_BSM_SRAM_BASE, 4689 (const uint32_t *)ucode, size); 4690 4691 wpi_prph_write(sc, WPI_BSM_WR_MEM_SRC, 0); 4692 wpi_prph_write(sc, WPI_BSM_WR_MEM_DST, WPI_FW_TEXT_BASE); 4693 wpi_prph_write(sc, WPI_BSM_WR_DWCOUNT, size); 4694 4695 /* Start boot load now. */ 4696 wpi_prph_write(sc, WPI_BSM_WR_CTRL, WPI_BSM_WR_CTRL_START); 4697 4698 /* Wait for transfer to complete. */ 4699 for (ntries = 0; ntries < 1000; ntries++) { 4700 uint32_t status = WPI_READ(sc, WPI_FH_TX_STATUS); 4701 DPRINTF(sc, WPI_DEBUG_HW, 4702 "firmware status=0x%x, val=0x%x, result=0x%x\n", status, 4703 WPI_FH_TX_STATUS_IDLE(6), 4704 status & WPI_FH_TX_STATUS_IDLE(6)); 4705 if (status & WPI_FH_TX_STATUS_IDLE(6)) { 4706 DPRINTF(sc, WPI_DEBUG_HW, 4707 "Status Match! - ntries = %d\n", ntries); 4708 break; 4709 } 4710 DELAY(10); 4711 } 4712 if (ntries == 1000) { 4713 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 4714 __func__); 4715 wpi_nic_unlock(sc); 4716 return ETIMEDOUT; 4717 } 4718 4719 /* Enable boot after power up. */ 4720 wpi_prph_write(sc, WPI_BSM_WR_CTRL, WPI_BSM_WR_CTRL_START_EN); 4721 4722 wpi_nic_unlock(sc); 4723 return 0; 4724} 4725 4726static int 4727wpi_load_firmware(struct wpi_softc *sc) 4728{ 4729 struct wpi_fw_info *fw = &sc->fw; 4730 struct wpi_dma_info *dma = &sc->fw_dma; 4731 int error; 4732 4733 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4734 4735 /* Copy initialization sections into pre-allocated DMA-safe memory. */ 4736 memcpy(dma->vaddr, fw->init.data, fw->init.datasz); 4737 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4738 memcpy(dma->vaddr + WPI_FW_DATA_MAXSZ, fw->init.text, fw->init.textsz); 4739 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4740 4741 /* Tell adapter where to find initialization sections. */ 4742 if ((error = wpi_nic_lock(sc)) != 0) 4743 return error; 4744 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_ADDR, dma->paddr); 4745 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_SIZE, fw->init.datasz); 4746 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_ADDR, 4747 dma->paddr + WPI_FW_DATA_MAXSZ); 4748 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_SIZE, fw->init.textsz); 4749 wpi_nic_unlock(sc); 4750 4751 /* Load firmware boot code. */ 4752 error = wpi_load_bootcode(sc, fw->boot.text, fw->boot.textsz); 4753 if (error != 0) { 4754 device_printf(sc->sc_dev, "%s: could not load boot firmware\n", 4755 __func__); 4756 return error; 4757 } 4758 4759 /* Now press "execute". */ 4760 WPI_WRITE(sc, WPI_RESET, 0); 4761 4762 /* Wait at most one second for first alive notification. */ 4763 if ((error = mtx_sleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) { 4764 device_printf(sc->sc_dev, 4765 "%s: timeout waiting for adapter to initialize, error %d\n", 4766 __func__, error); 4767 return error; 4768 } 4769 4770 /* Copy runtime sections into pre-allocated DMA-safe memory. */ 4771 memcpy(dma->vaddr, fw->main.data, fw->main.datasz); 4772 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4773 memcpy(dma->vaddr + WPI_FW_DATA_MAXSZ, fw->main.text, fw->main.textsz); 4774 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 4775 4776 /* Tell adapter where to find runtime sections. */ 4777 if ((error = wpi_nic_lock(sc)) != 0) 4778 return error; 4779 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_ADDR, dma->paddr); 4780 wpi_prph_write(sc, WPI_BSM_DRAM_DATA_SIZE, fw->main.datasz); 4781 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_ADDR, 4782 dma->paddr + WPI_FW_DATA_MAXSZ); 4783 wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_SIZE, 4784 WPI_FW_UPDATED | fw->main.textsz); 4785 wpi_nic_unlock(sc); 4786 4787 return 0; 4788} 4789 4790static int 4791wpi_read_firmware(struct wpi_softc *sc) 4792{ 4793 const struct firmware *fp; 4794 struct wpi_fw_info *fw = &sc->fw; 4795 const struct wpi_firmware_hdr *hdr; 4796 int error; 4797 4798 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4799 4800 DPRINTF(sc, WPI_DEBUG_FIRMWARE, 4801 "Attempting Loading Firmware from %s module\n", WPI_FW_NAME); 4802 4803 WPI_UNLOCK(sc); 4804 fp = firmware_get(WPI_FW_NAME); 4805 WPI_LOCK(sc); 4806 4807 if (fp == NULL) { 4808 device_printf(sc->sc_dev, 4809 "could not load firmware image '%s'\n", WPI_FW_NAME); 4810 return EINVAL; 4811 } 4812 4813 sc->fw_fp = fp; 4814 4815 if (fp->datasize < sizeof (struct wpi_firmware_hdr)) { 4816 device_printf(sc->sc_dev, 4817 "firmware file too short: %zu bytes\n", fp->datasize); 4818 error = EINVAL; 4819 goto fail; 4820 } 4821 4822 fw->size = fp->datasize; 4823 fw->data = (const uint8_t *)fp->data; 4824 4825 /* Extract firmware header information. */ 4826 hdr = (const struct wpi_firmware_hdr *)fw->data; 4827 4828 /* | RUNTIME FIRMWARE | INIT FIRMWARE | BOOT FW | 4829 |HDR|<--TEXT-->|<--DATA-->|<--TEXT-->|<--DATA-->|<--TEXT-->| */ 4830 4831 fw->main.textsz = le32toh(hdr->rtextsz); 4832 fw->main.datasz = le32toh(hdr->rdatasz); 4833 fw->init.textsz = le32toh(hdr->itextsz); 4834 fw->init.datasz = le32toh(hdr->idatasz); 4835 fw->boot.textsz = le32toh(hdr->btextsz); 4836 fw->boot.datasz = 0; 4837 4838 /* Sanity-check firmware header. */ 4839 if (fw->main.textsz > WPI_FW_TEXT_MAXSZ || 4840 fw->main.datasz > WPI_FW_DATA_MAXSZ || 4841 fw->init.textsz > WPI_FW_TEXT_MAXSZ || 4842 fw->init.datasz > WPI_FW_DATA_MAXSZ || 4843 fw->boot.textsz > WPI_FW_BOOT_TEXT_MAXSZ || 4844 (fw->boot.textsz & 3) != 0) { 4845 device_printf(sc->sc_dev, "invalid firmware header\n"); 4846 error = EINVAL; 4847 goto fail; 4848 } 4849 4850 /* Check that all firmware sections fit. */ 4851 if (fw->size < sizeof (*hdr) + fw->main.textsz + fw->main.datasz + 4852 fw->init.textsz + fw->init.datasz + fw->boot.textsz) { 4853 device_printf(sc->sc_dev, 4854 "firmware file too short: %zu bytes\n", fw->size); 4855 error = EINVAL; 4856 goto fail; 4857 } 4858 4859 /* Get pointers to firmware sections. */ 4860 fw->main.text = (const uint8_t *)(hdr + 1); 4861 fw->main.data = fw->main.text + fw->main.textsz; 4862 fw->init.text = fw->main.data + fw->main.datasz; 4863 fw->init.data = fw->init.text + fw->init.textsz; 4864 fw->boot.text = fw->init.data + fw->init.datasz; 4865 4866 DPRINTF(sc, WPI_DEBUG_FIRMWARE, 4867 "Firmware Version: Major %d, Minor %d, Driver %d, \n" 4868 "runtime (text: %u, data: %u) init (text: %u, data %u) " 4869 "boot (text %u)\n", hdr->major, hdr->minor, le32toh(hdr->driver), 4870 fw->main.textsz, fw->main.datasz, 4871 fw->init.textsz, fw->init.datasz, fw->boot.textsz); 4872 4873 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->main.text %p\n", fw->main.text); 4874 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->main.data %p\n", fw->main.data); 4875 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->init.text %p\n", fw->init.text); 4876 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->init.data %p\n", fw->init.data); 4877 DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->boot.text %p\n", fw->boot.text); 4878 4879 return 0; 4880 4881fail: wpi_unload_firmware(sc); 4882 return error; 4883} 4884 4885/** 4886 * Free the referenced firmware image 4887 */ 4888static void 4889wpi_unload_firmware(struct wpi_softc *sc) 4890{ 4891 if (sc->fw_fp != NULL) { 4892 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); 4893 sc->fw_fp = NULL; 4894 } 4895} 4896 4897static int 4898wpi_clock_wait(struct wpi_softc *sc) 4899{ 4900 int ntries; 4901 4902 /* Set "initialization complete" bit. */ 4903 WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_INIT_DONE); 4904 4905 /* Wait for clock stabilization. */ 4906 for (ntries = 0; ntries < 2500; ntries++) { 4907 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_MAC_CLOCK_READY) 4908 return 0; 4909 DELAY(100); 4910 } 4911 device_printf(sc->sc_dev, 4912 "%s: timeout waiting for clock stabilization\n", __func__); 4913 4914 return ETIMEDOUT; 4915} 4916 4917static int 4918wpi_apm_init(struct wpi_softc *sc) 4919{ 4920 uint32_t reg; 4921 int error; 4922 4923 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 4924 4925 /* Disable L0s exit timer (NMI bug workaround). */ 4926 WPI_SETBITS(sc, WPI_GIO_CHICKEN, WPI_GIO_CHICKEN_DIS_L0S_TIMER); 4927 /* Don't wait for ICH L0s (ICH bug workaround). */ 4928 WPI_SETBITS(sc, WPI_GIO_CHICKEN, WPI_GIO_CHICKEN_L1A_NO_L0S_RX); 4929 4930 /* Set FH wait threshold to max (HW bug under stress workaround). */ 4931 WPI_SETBITS(sc, WPI_DBG_HPET_MEM, 0xffff0000); 4932 4933 /* Retrieve PCIe Active State Power Management (ASPM). */ 4934 reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1); 4935 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */ 4936 if (reg & 0x02) /* L1 Entry enabled. */ 4937 WPI_SETBITS(sc, WPI_GIO, WPI_GIO_L0S_ENA); 4938 else 4939 WPI_CLRBITS(sc, WPI_GIO, WPI_GIO_L0S_ENA); 4940 4941 WPI_SETBITS(sc, WPI_ANA_PLL, WPI_ANA_PLL_INIT); 4942 4943 /* Wait for clock stabilization before accessing prph. */ 4944 if ((error = wpi_clock_wait(sc)) != 0) 4945 return error; 4946 4947 if ((error = wpi_nic_lock(sc)) != 0) 4948 return error; 4949 /* Cleanup. */ 4950 wpi_prph_write(sc, WPI_APMG_CLK_DIS, 0x00000400); 4951 wpi_prph_clrbits(sc, WPI_APMG_PS, 0x00000200); 4952 4953 /* Enable DMA and BSM (Bootstrap State Machine). */ 4954 wpi_prph_write(sc, WPI_APMG_CLK_EN, 4955 WPI_APMG_CLK_CTRL_DMA_CLK_RQT | WPI_APMG_CLK_CTRL_BSM_CLK_RQT); 4956 DELAY(20); 4957 /* Disable L1-Active. */ 4958 wpi_prph_setbits(sc, WPI_APMG_PCI_STT, WPI_APMG_PCI_STT_L1A_DIS); 4959 wpi_nic_unlock(sc); 4960 4961 return 0; 4962} 4963 4964static void 4965wpi_apm_stop_master(struct wpi_softc *sc) 4966{ 4967 int ntries; 4968 4969 /* Stop busmaster DMA activity. */ 4970 WPI_SETBITS(sc, WPI_RESET, WPI_RESET_STOP_MASTER); 4971 4972 if ((WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_PS_MASK) == 4973 WPI_GP_CNTRL_MAC_PS) 4974 return; /* Already asleep. */ 4975 4976 for (ntries = 0; ntries < 100; ntries++) { 4977 if (WPI_READ(sc, WPI_RESET) & WPI_RESET_MASTER_DISABLED) 4978 return; 4979 DELAY(10); 4980 } 4981 device_printf(sc->sc_dev, "%s: timeout waiting for master\n", 4982 __func__); 4983} 4984 4985static void 4986wpi_apm_stop(struct wpi_softc *sc) 4987{ 4988 wpi_apm_stop_master(sc); 4989 4990 /* Reset the entire device. */ 4991 WPI_SETBITS(sc, WPI_RESET, WPI_RESET_SW); 4992 DELAY(10); 4993 /* Clear "initialization complete" bit. */ 4994 WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_INIT_DONE); 4995} 4996 4997static void 4998wpi_nic_config(struct wpi_softc *sc) 4999{ 5000 uint32_t rev; 5001 5002 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5003 5004 /* voodoo from the Linux "driver".. */ 5005 rev = pci_read_config(sc->sc_dev, PCIR_REVID, 1); 5006 if ((rev & 0xc0) == 0x40) 5007 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_ALM_MB); 5008 else if (!(rev & 0x80)) 5009 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_ALM_MM); 5010 5011 if (sc->cap == 0x80) 5012 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_SKU_MRC); 5013 5014 if ((sc->rev & 0xf0) == 0xd0) 5015 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_REV_D); 5016 else 5017 WPI_CLRBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_REV_D); 5018 5019 if (sc->type > 1) 5020 WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_TYPE_B); 5021} 5022 5023static int 5024wpi_hw_init(struct wpi_softc *sc) 5025{ 5026 int chnl, ntries, error; 5027 5028 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 5029 5030 /* Clear pending interrupts. */ 5031 WPI_WRITE(sc, WPI_INT, 0xffffffff); 5032 5033 if ((error = wpi_apm_init(sc)) != 0) { 5034 device_printf(sc->sc_dev, 5035 "%s: could not power ON adapter, error %d\n", __func__, 5036 error); 5037 return error; 5038 } 5039 5040 /* Select VMAIN power source. */ 5041 if ((error = wpi_nic_lock(sc)) != 0) 5042 return error; 5043 wpi_prph_clrbits(sc, WPI_APMG_PS, WPI_APMG_PS_PWR_SRC_MASK); 5044 wpi_nic_unlock(sc); 5045 /* Spin until VMAIN gets selected. */ 5046 for (ntries = 0; ntries < 5000; ntries++) { 5047 if (WPI_READ(sc, WPI_GPIO_IN) & WPI_GPIO_IN_VMAIN) 5048 break; 5049 DELAY(10); 5050 } 5051 if (ntries == 5000) { 5052 device_printf(sc->sc_dev, "timeout selecting power source\n"); 5053 return ETIMEDOUT; 5054 } 5055 5056 /* Perform adapter initialization. */ 5057 wpi_nic_config(sc); 5058 5059 /* Initialize RX ring. */ 5060 if ((error = wpi_nic_lock(sc)) != 0) 5061 return error; 5062 /* Set physical address of RX ring. */ 5063 WPI_WRITE(sc, WPI_FH_RX_BASE, sc->rxq.desc_dma.paddr); 5064 /* Set physical address of RX read pointer. */ 5065 WPI_WRITE(sc, WPI_FH_RX_RPTR_ADDR, sc->shared_dma.paddr + 5066 offsetof(struct wpi_shared, next)); 5067 WPI_WRITE(sc, WPI_FH_RX_WPTR, 0); 5068 /* Enable RX. */ 5069 WPI_WRITE(sc, WPI_FH_RX_CONFIG, 5070 WPI_FH_RX_CONFIG_DMA_ENA | 5071 WPI_FH_RX_CONFIG_RDRBD_ENA | 5072 WPI_FH_RX_CONFIG_WRSTATUS_ENA | 5073 WPI_FH_RX_CONFIG_MAXFRAG | 5074 WPI_FH_RX_CONFIG_NRBD(WPI_RX_RING_COUNT_LOG) | 5075 WPI_FH_RX_CONFIG_IRQ_DST_HOST | 5076 WPI_FH_RX_CONFIG_IRQ_TIMEOUT(1)); 5077 (void)WPI_READ(sc, WPI_FH_RSSR_TBL); /* barrier */ 5078 wpi_nic_unlock(sc); 5079 WPI_WRITE(sc, WPI_FH_RX_WPTR, (WPI_RX_RING_COUNT - 1) & ~7); 5080 5081 /* Initialize TX rings. */ 5082 if ((error = wpi_nic_lock(sc)) != 0) 5083 return error; 5084 wpi_prph_write(sc, WPI_ALM_SCHED_MODE, 2); /* bypass mode */ 5085 wpi_prph_write(sc, WPI_ALM_SCHED_ARASTAT, 1); /* enable RA0 */ 5086 /* Enable all 6 TX rings. */ 5087 wpi_prph_write(sc, WPI_ALM_SCHED_TXFACT, 0x3f); 5088 wpi_prph_write(sc, WPI_ALM_SCHED_SBYPASS_MODE1, 0x10000); 5089 wpi_prph_write(sc, WPI_ALM_SCHED_SBYPASS_MODE2, 0x30002); 5090 wpi_prph_write(sc, WPI_ALM_SCHED_TXF4MF, 4); 5091 wpi_prph_write(sc, WPI_ALM_SCHED_TXF5MF, 5); 5092 /* Set physical address of TX rings. */ 5093 WPI_WRITE(sc, WPI_FH_TX_BASE, sc->shared_dma.paddr); 5094 WPI_WRITE(sc, WPI_FH_MSG_CONFIG, 0xffff05a5); 5095 5096 /* Enable all DMA channels. */ 5097 for (chnl = 0; chnl < WPI_NDMACHNLS; chnl++) { 5098 WPI_WRITE(sc, WPI_FH_CBBC_CTRL(chnl), 0); 5099 WPI_WRITE(sc, WPI_FH_CBBC_BASE(chnl), 0); 5100 WPI_WRITE(sc, WPI_FH_TX_CONFIG(chnl), 0x80200008); 5101 } 5102 wpi_nic_unlock(sc); 5103 (void)WPI_READ(sc, WPI_FH_TX_BASE); /* barrier */ 5104 5105 /* Clear "radio off" and "commands blocked" bits. */ 5106 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); 5107 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_CMD_BLOCKED); 5108 5109 /* Clear pending interrupts. */ 5110 WPI_WRITE(sc, WPI_INT, 0xffffffff); 5111 /* Enable interrupts. */ 5112 WPI_WRITE(sc, WPI_INT_MASK, WPI_INT_MASK_DEF); 5113 5114 /* _Really_ make sure "radio off" bit is cleared! */ 5115 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); 5116 WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL); 5117 5118 if ((error = wpi_load_firmware(sc)) != 0) { 5119 device_printf(sc->sc_dev, 5120 "%s: could not load firmware, error %d\n", __func__, 5121 error); 5122 return error; 5123 } 5124 /* Wait at most one second for firmware alive notification. */ 5125 if ((error = mtx_sleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) { 5126 device_printf(sc->sc_dev, 5127 "%s: timeout waiting for adapter to initialize, error %d\n", 5128 __func__, error); 5129 return error; 5130 } 5131 5132 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 5133 5134 /* Do post-firmware initialization. */ 5135 return wpi_post_alive(sc); 5136} 5137 5138static void 5139wpi_hw_stop(struct wpi_softc *sc) 5140{ 5141 int chnl, qid, ntries; 5142 5143 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5144 5145 if (WPI_READ(sc, WPI_UCODE_GP1) & WPI_UCODE_GP1_MAC_SLEEP) 5146 wpi_nic_lock(sc); 5147 5148 WPI_WRITE(sc, WPI_RESET, WPI_RESET_NEVO); 5149 5150 /* Disable interrupts. */ 5151 WPI_WRITE(sc, WPI_INT_MASK, 0); 5152 WPI_WRITE(sc, WPI_INT, 0xffffffff); 5153 WPI_WRITE(sc, WPI_FH_INT, 0xffffffff); 5154 5155 /* Make sure we no longer hold the NIC lock. */ 5156 wpi_nic_unlock(sc); 5157 5158 if (wpi_nic_lock(sc) == 0) { 5159 /* Stop TX scheduler. */ 5160 wpi_prph_write(sc, WPI_ALM_SCHED_MODE, 0); 5161 wpi_prph_write(sc, WPI_ALM_SCHED_TXFACT, 0); 5162 5163 /* Stop all DMA channels. */ 5164 for (chnl = 0; chnl < WPI_NDMACHNLS; chnl++) { 5165 WPI_WRITE(sc, WPI_FH_TX_CONFIG(chnl), 0); 5166 for (ntries = 0; ntries < 200; ntries++) { 5167 if (WPI_READ(sc, WPI_FH_TX_STATUS) & 5168 WPI_FH_TX_STATUS_IDLE(chnl)) 5169 break; 5170 DELAY(10); 5171 } 5172 } 5173 wpi_nic_unlock(sc); 5174 } 5175 5176 /* Stop RX ring. */ 5177 wpi_reset_rx_ring(sc); 5178 5179 /* Reset all TX rings. */ 5180 for (qid = 0; qid < WPI_NTXQUEUES; qid++) 5181 wpi_reset_tx_ring(sc, &sc->txq[qid]); 5182 5183 if (wpi_nic_lock(sc) == 0) { 5184 wpi_prph_write(sc, WPI_APMG_CLK_DIS, 5185 WPI_APMG_CLK_CTRL_DMA_CLK_RQT); 5186 wpi_nic_unlock(sc); 5187 } 5188 DELAY(5); 5189 /* Power OFF adapter. */ 5190 wpi_apm_stop(sc); 5191} 5192 5193static void 5194wpi_radio_on(void *arg0, int pending) 5195{ 5196 struct wpi_softc *sc = arg0; 5197 struct ifnet *ifp = sc->sc_ifp; 5198 struct ieee80211com *ic = ifp->if_l2com; 5199 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5200 5201 device_printf(sc->sc_dev, "RF switch: radio enabled\n"); 5202 5203 if (vap != NULL) { 5204 wpi_init(sc); 5205 ieee80211_init(vap); 5206 } 5207 5208 if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_RFKILL) { 5209 WPI_LOCK(sc); 5210 callout_stop(&sc->watchdog_rfkill); 5211 WPI_UNLOCK(sc); 5212 } 5213} 5214 5215static void 5216wpi_radio_off(void *arg0, int pending) 5217{ 5218 struct wpi_softc *sc = arg0; 5219 struct ifnet *ifp = sc->sc_ifp; 5220 struct ieee80211com *ic = ifp->if_l2com; 5221 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5222 5223 device_printf(sc->sc_dev, "RF switch: radio disabled\n"); 5224 5225 wpi_stop(sc); 5226 if (vap != NULL) 5227 ieee80211_stop(vap); 5228 5229 WPI_LOCK(sc); 5230 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, sc); 5231 WPI_UNLOCK(sc); 5232} 5233 5234static void 5235wpi_init(void *arg) 5236{ 5237 struct wpi_softc *sc = arg; 5238 struct ifnet *ifp = sc->sc_ifp; 5239 struct ieee80211com *ic = ifp->if_l2com; 5240 int error; 5241 5242 WPI_LOCK(sc); 5243 5244 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); 5245 5246 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 5247 goto end; 5248 5249 /* Check that the radio is not disabled by hardware switch. */ 5250 if (!(WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_RFKILL)) { 5251 device_printf(sc->sc_dev, 5252 "RF switch: radio disabled (%s)\n", __func__); 5253 callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, 5254 sc); 5255 goto end; 5256 } 5257 5258 /* Read firmware images from the filesystem. */ 5259 if ((error = wpi_read_firmware(sc)) != 0) { 5260 device_printf(sc->sc_dev, 5261 "%s: could not read firmware, error %d\n", __func__, 5262 error); 5263 goto fail; 5264 } 5265 5266 /* Initialize hardware and upload firmware. */ 5267 error = wpi_hw_init(sc); 5268 wpi_unload_firmware(sc); 5269 if (error != 0) { 5270 device_printf(sc->sc_dev, 5271 "%s: could not initialize hardware, error %d\n", __func__, 5272 error); 5273 goto fail; 5274 } 5275 5276 /* Configure adapter now that it is ready. */ 5277 sc->txq_active = 1; 5278 if ((error = wpi_config(sc)) != 0) { 5279 device_printf(sc->sc_dev, 5280 "%s: could not configure device, error %d\n", __func__, 5281 error); 5282 goto fail; 5283 } 5284 5285 IF_LOCK(&ifp->if_snd); 5286 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 5287 ifp->if_drv_flags |= IFF_DRV_RUNNING; 5288 IF_UNLOCK(&ifp->if_snd); 5289 5290 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__); 5291 5292 WPI_UNLOCK(sc); 5293 5294 ieee80211_start_all(ic); 5295 5296 return; 5297 5298fail: wpi_stop_locked(sc); 5299end: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__); 5300 WPI_UNLOCK(sc); 5301} 5302 5303static void 5304wpi_stop_locked(struct wpi_softc *sc) 5305{ 5306 struct ifnet *ifp = sc->sc_ifp; 5307 5308 WPI_LOCK_ASSERT(sc); 5309 5310 WPI_TXQ_LOCK(sc); 5311 sc->txq_active = 0; 5312 WPI_TXQ_UNLOCK(sc); 5313 5314 WPI_TXQ_STATE_LOCK(sc); 5315 callout_stop(&sc->tx_timeout); 5316 WPI_TXQ_STATE_UNLOCK(sc); 5317 5318 WPI_RXON_LOCK(sc); 5319 callout_stop(&sc->scan_timeout); 5320 callout_stop(&sc->calib_to); 5321 WPI_RXON_UNLOCK(sc); 5322 5323 IF_LOCK(&ifp->if_snd); 5324 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 5325 IF_UNLOCK(&ifp->if_snd); 5326 5327 /* Power OFF hardware. */ 5328 wpi_hw_stop(sc); 5329} 5330 5331static void 5332wpi_stop(struct wpi_softc *sc) 5333{ 5334 WPI_LOCK(sc); 5335 wpi_stop_locked(sc); 5336 WPI_UNLOCK(sc); 5337} 5338 5339/* 5340 * Callback from net80211 to start a scan. 5341 */ 5342static void 5343wpi_scan_start(struct ieee80211com *ic) 5344{ 5345 struct wpi_softc *sc = ic->ic_ifp->if_softc; 5346 5347 wpi_set_led(sc, WPI_LED_LINK, 20, 2); 5348} 5349 5350/* 5351 * Callback from net80211 to terminate a scan. 5352 */ 5353static void 5354wpi_scan_end(struct ieee80211com *ic) 5355{ 5356 struct ifnet *ifp = ic->ic_ifp; 5357 struct wpi_softc *sc = ifp->if_softc; 5358 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5359 5360 if (vap->iv_state == IEEE80211_S_RUN) 5361 wpi_set_led(sc, WPI_LED_LINK, 0, 1); 5362} 5363 5364/** 5365 * Called by the net80211 framework to indicate to the driver 5366 * that the channel should be changed 5367 */ 5368static void 5369wpi_set_channel(struct ieee80211com *ic) 5370{ 5371 const struct ieee80211_channel *c = ic->ic_curchan; 5372 struct ifnet *ifp = ic->ic_ifp; 5373 struct wpi_softc *sc = ifp->if_softc; 5374 int error; 5375 5376 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5377 5378 WPI_LOCK(sc); 5379 sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq); 5380 sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags); 5381 WPI_UNLOCK(sc); 5382 WPI_TX_LOCK(sc); 5383 sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq); 5384 sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags); 5385 WPI_TX_UNLOCK(sc); 5386 5387 /* 5388 * Only need to set the channel in Monitor mode. AP scanning and auth 5389 * are already taken care of by their respective firmware commands. 5390 */ 5391 if (ic->ic_opmode == IEEE80211_M_MONITOR) { 5392 WPI_RXON_LOCK(sc); 5393 sc->rxon.chan = ieee80211_chan2ieee(ic, c); 5394 if (IEEE80211_IS_CHAN_2GHZ(c)) { 5395 sc->rxon.flags |= htole32(WPI_RXON_AUTO | 5396 WPI_RXON_24GHZ); 5397 } else { 5398 sc->rxon.flags &= ~htole32(WPI_RXON_AUTO | 5399 WPI_RXON_24GHZ); 5400 } 5401 if ((error = wpi_send_rxon(sc, 0, 1)) != 0) 5402 device_printf(sc->sc_dev, 5403 "%s: error %d setting channel\n", __func__, 5404 error); 5405 WPI_RXON_UNLOCK(sc); 5406 } 5407} 5408 5409/** 5410 * Called by net80211 to indicate that we need to scan the current 5411 * channel. The channel is previously be set via the wpi_set_channel 5412 * callback. 5413 */ 5414static void 5415wpi_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) 5416{ 5417 struct ieee80211vap *vap = ss->ss_vap; 5418 struct ieee80211com *ic = vap->iv_ic; 5419 struct wpi_softc *sc = ic->ic_ifp->if_softc; 5420 int error; 5421 5422 WPI_RXON_LOCK(sc); 5423 error = wpi_scan(sc, ic->ic_curchan); 5424 WPI_RXON_UNLOCK(sc); 5425 if (error != 0) 5426 ieee80211_cancel_scan(vap); 5427} 5428 5429/** 5430 * Called by the net80211 framework to indicate 5431 * the minimum dwell time has been met, terminate the scan. 5432 * We don't actually terminate the scan as the firmware will notify 5433 * us when it's finished and we have no way to interrupt it. 5434 */ 5435static void 5436wpi_scan_mindwell(struct ieee80211_scan_state *ss) 5437{ 5438 /* NB: don't try to abort scan; wait for firmware to finish */ 5439} 5440 5441static void 5442wpi_hw_reset(void *arg, int pending) 5443{ 5444 struct wpi_softc *sc = arg; 5445 struct ifnet *ifp = sc->sc_ifp; 5446 struct ieee80211com *ic = ifp->if_l2com; 5447 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5448 5449 DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__); 5450 5451 if (vap != NULL && (ic->ic_flags & IEEE80211_F_SCAN)) 5452 ieee80211_cancel_scan(vap); 5453 5454 wpi_stop(sc); 5455 if (vap != NULL) 5456 ieee80211_stop(vap); 5457 wpi_init(sc); 5458 if (vap != NULL) 5459 ieee80211_init(vap); 5460} 5461