1/* $OpenBSD: if_iwm.c,v 1.167 2017/04/04 00:40:52 claudio Exp $ */ 2 3/* 4 * Copyright (c) 2014 genua mbh <info@genua.de> 5 * Copyright (c) 2014 Fixup Software Ltd. 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20/*- 21 * Based on BSD-licensed source modules in the Linux iwlwifi driver, 22 * which were used as the reference documentation for this implementation. 23 * 24 * Driver version we are currently based off of is 25 * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd) 26 * 27 *********************************************************************** 28 * 29 * This file is provided under a dual BSD/GPLv2 license. When using or 30 * redistributing this file, you may do so under either license. 31 * 32 * GPL LICENSE SUMMARY 33 * 34 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved. 35 * 36 * This program is free software; you can redistribute it and/or modify 37 * it under the terms of version 2 of the GNU General Public License as 38 * published by the Free Software Foundation. 39 * 40 * This program is distributed in the hope that it will be useful, but 41 * WITHOUT ANY WARRANTY; without even the implied warranty of 42 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 43 * General Public License for more details. 44 * 45 * You should have received a copy of the GNU General Public License 46 * along with this program; if not, write to the Free Software 47 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, 48 * USA 49 * 50 * The full GNU General Public License is included in this distribution 51 * in the file called COPYING. 52 * 53 * Contact Information: 54 * Intel Linux Wireless <ilw@linux.intel.com> 55 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 56 * 57 * 58 * BSD LICENSE 59 * 60 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved. 61 * All rights reserved. 62 * 63 * Redistribution and use in source and binary forms, with or without 64 * modification, are permitted provided that the following conditions 65 * are met: 66 * 67 * * Redistributions of source code must retain the above copyright 68 * notice, this list of conditions and the following disclaimer. 69 * * Redistributions in binary form must reproduce the above copyright 70 * notice, this list of conditions and the following disclaimer in 71 * the documentation and/or other materials provided with the 72 * distribution. 73 * * Neither the name Intel Corporation nor the names of its 74 * contributors may be used to endorse or promote products derived 75 * from this software without specific prior written permission. 76 * 77 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 78 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 79 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 80 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 81 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 82 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 83 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 84 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 85 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 86 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 87 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 88 */ 89 90/*- 91 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr> 92 * 93 * Permission to use, copy, modify, and distribute this software for any 94 * purpose with or without fee is hereby granted, provided that the above 95 * copyright notice and this permission notice appear in all copies. 96 * 97 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 98 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 99 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 100 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 101 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 102 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 103 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 104 */ 105#include <sys/cdefs.h> 106__FBSDID("$FreeBSD: stable/11/sys/dev/iwm/if_iwm.c 346035 2019-04-08 17:55:04Z kevans $"); 107 108#include "opt_wlan.h" 109#include "opt_iwm.h" 110 111#include <sys/param.h> 112#include <sys/bus.h> 113#include <sys/conf.h> 114#include <sys/endian.h> 115#include <sys/firmware.h> 116#include <sys/kernel.h> 117#include <sys/malloc.h> 118#include <sys/mbuf.h> 119#include <sys/mutex.h> 120#include <sys/module.h> 121#include <sys/proc.h> 122#include <sys/rman.h> 123#include <sys/socket.h> 124#include <sys/sockio.h> 125#include <sys/sysctl.h> 126#include <sys/linker.h> 127 128#include <machine/bus.h> 129#include <machine/endian.h> 130#include <machine/resource.h> 131 132#include <dev/pci/pcivar.h> 133#include <dev/pci/pcireg.h> 134 135#include <net/bpf.h> 136 137#include <net/if.h> 138#include <net/if_var.h> 139#include <net/if_arp.h> 140#include <net/if_dl.h> 141#include <net/if_media.h> 142#include <net/if_types.h> 143 144#include <netinet/in.h> 145#include <netinet/in_systm.h> 146#include <netinet/if_ether.h> 147#include <netinet/ip.h> 148 149#include <net80211/ieee80211_var.h> 150#include <net80211/ieee80211_regdomain.h> 151#include <net80211/ieee80211_ratectl.h> 152#include <net80211/ieee80211_radiotap.h> 153 154#include <dev/iwm/if_iwmreg.h> 155#include <dev/iwm/if_iwmvar.h> 156#include <dev/iwm/if_iwm_config.h> 157#include <dev/iwm/if_iwm_debug.h> 158#include <dev/iwm/if_iwm_notif_wait.h> 159#include <dev/iwm/if_iwm_util.h> 160#include <dev/iwm/if_iwm_binding.h> 161#include <dev/iwm/if_iwm_phy_db.h> 162#include <dev/iwm/if_iwm_mac_ctxt.h> 163#include <dev/iwm/if_iwm_phy_ctxt.h> 164#include <dev/iwm/if_iwm_time_event.h> 165#include <dev/iwm/if_iwm_power.h> 166#include <dev/iwm/if_iwm_scan.h> 167#include <dev/iwm/if_iwm_sta.h> 168 169#include <dev/iwm/if_iwm_pcie_trans.h> 170#include <dev/iwm/if_iwm_led.h> 171#include <dev/iwm/if_iwm_fw.h> 172 173/* From DragonflyBSD */ 174#define mtodoff(m, t, off) ((t)((m)->m_data + (off))) 175 176const uint8_t iwm_nvm_channels[] = { 177 /* 2.4 GHz */ 178 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 179 /* 5 GHz */ 180 36, 40, 44, 48, 52, 56, 60, 64, 181 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 182 149, 153, 157, 161, 165 183}; 184_Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS, 185 "IWM_NUM_CHANNELS is too small"); 186 187const uint8_t iwm_nvm_channels_8000[] = { 188 /* 2.4 GHz */ 189 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 190 /* 5 GHz */ 191 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92, 192 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 193 149, 153, 157, 161, 165, 169, 173, 177, 181 194}; 195_Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000, 196 "IWM_NUM_CHANNELS_8000 is too small"); 197 198#define IWM_NUM_2GHZ_CHANNELS 14 199#define IWM_N_HW_ADDR_MASK 0xF 200 201/* 202 * XXX For now, there's simply a fixed set of rate table entries 203 * that are populated. 204 */ 205const struct iwm_rate { 206 uint8_t rate; 207 uint8_t plcp; 208} iwm_rates[] = { 209 { 2, IWM_RATE_1M_PLCP }, 210 { 4, IWM_RATE_2M_PLCP }, 211 { 11, IWM_RATE_5M_PLCP }, 212 { 22, IWM_RATE_11M_PLCP }, 213 { 12, IWM_RATE_6M_PLCP }, 214 { 18, IWM_RATE_9M_PLCP }, 215 { 24, IWM_RATE_12M_PLCP }, 216 { 36, IWM_RATE_18M_PLCP }, 217 { 48, IWM_RATE_24M_PLCP }, 218 { 72, IWM_RATE_36M_PLCP }, 219 { 96, IWM_RATE_48M_PLCP }, 220 { 108, IWM_RATE_54M_PLCP }, 221}; 222#define IWM_RIDX_CCK 0 223#define IWM_RIDX_OFDM 4 224#define IWM_RIDX_MAX (nitems(iwm_rates)-1) 225#define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM) 226#define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM) 227 228struct iwm_nvm_section { 229 uint16_t length; 230 uint8_t *data; 231}; 232 233#define IWM_MVM_UCODE_ALIVE_TIMEOUT hz 234#define IWM_MVM_UCODE_CALIB_TIMEOUT (2*hz) 235 236struct iwm_mvm_alive_data { 237 int valid; 238 uint32_t scd_base_addr; 239}; 240 241static int iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t); 242static int iwm_firmware_store_section(struct iwm_softc *, 243 enum iwm_ucode_type, 244 const uint8_t *, size_t); 245static int iwm_set_default_calib(struct iwm_softc *, const void *); 246static void iwm_fw_info_free(struct iwm_fw_info *); 247static int iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type); 248static int iwm_alloc_fwmem(struct iwm_softc *); 249static int iwm_alloc_sched(struct iwm_softc *); 250static int iwm_alloc_kw(struct iwm_softc *); 251static int iwm_alloc_ict(struct iwm_softc *); 252static int iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *); 253static void iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *); 254static void iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *); 255static int iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *, 256 int); 257static void iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *); 258static void iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *); 259static void iwm_enable_interrupts(struct iwm_softc *); 260static void iwm_restore_interrupts(struct iwm_softc *); 261static void iwm_disable_interrupts(struct iwm_softc *); 262static void iwm_ict_reset(struct iwm_softc *); 263static int iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *); 264static void iwm_stop_device(struct iwm_softc *); 265static void iwm_mvm_nic_config(struct iwm_softc *); 266static int iwm_nic_rx_init(struct iwm_softc *); 267static int iwm_nic_tx_init(struct iwm_softc *); 268static int iwm_nic_init(struct iwm_softc *); 269static int iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t); 270static int iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t, 271 uint16_t, uint8_t *, uint16_t *); 272static int iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *, 273 uint16_t *, uint32_t); 274static uint32_t iwm_eeprom_channel_flags(uint16_t); 275static void iwm_add_channel_band(struct iwm_softc *, 276 struct ieee80211_channel[], int, int *, int, size_t, 277 const uint8_t[]); 278static void iwm_init_channel_map(struct ieee80211com *, int, int *, 279 struct ieee80211_channel[]); 280static struct iwm_nvm_data * 281 iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *, 282 const uint16_t *, const uint16_t *, 283 const uint16_t *, const uint16_t *, 284 const uint16_t *); 285static void iwm_free_nvm_data(struct iwm_nvm_data *); 286static void iwm_set_hw_address_family_8000(struct iwm_softc *, 287 struct iwm_nvm_data *, 288 const uint16_t *, 289 const uint16_t *); 290static int iwm_get_sku(const struct iwm_softc *, const uint16_t *, 291 const uint16_t *); 292static int iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *); 293static int iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *, 294 const uint16_t *); 295static int iwm_get_n_hw_addrs(const struct iwm_softc *, 296 const uint16_t *); 297static void iwm_set_radio_cfg(const struct iwm_softc *, 298 struct iwm_nvm_data *, uint32_t); 299static struct iwm_nvm_data * 300 iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *); 301static int iwm_nvm_init(struct iwm_softc *); 302static int iwm_pcie_load_section(struct iwm_softc *, uint8_t, 303 const struct iwm_fw_desc *); 304static int iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t, 305 bus_addr_t, uint32_t); 306static int iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc, 307 const struct iwm_fw_sects *, 308 int, int *); 309static int iwm_pcie_load_cpu_sections(struct iwm_softc *, 310 const struct iwm_fw_sects *, 311 int, int *); 312static int iwm_pcie_load_given_ucode_8000(struct iwm_softc *, 313 const struct iwm_fw_sects *); 314static int iwm_pcie_load_given_ucode(struct iwm_softc *, 315 const struct iwm_fw_sects *); 316static int iwm_start_fw(struct iwm_softc *, const struct iwm_fw_sects *); 317static int iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t); 318static int iwm_send_phy_cfg_cmd(struct iwm_softc *); 319static int iwm_mvm_load_ucode_wait_alive(struct iwm_softc *, 320 enum iwm_ucode_type); 321static int iwm_run_init_mvm_ucode(struct iwm_softc *, int); 322static int iwm_rx_addbuf(struct iwm_softc *, int, int); 323static int iwm_mvm_get_signal_strength(struct iwm_softc *, 324 struct iwm_rx_phy_info *); 325static void iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *, 326 struct iwm_rx_packet *); 327static int iwm_get_noise(struct iwm_softc *, 328 const struct iwm_mvm_statistics_rx_non_phy *); 329static void iwm_mvm_handle_rx_statistics(struct iwm_softc *, 330 struct iwm_rx_packet *); 331static boolean_t iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct mbuf *, 332 uint32_t, boolean_t); 333static int iwm_mvm_rx_tx_cmd_single(struct iwm_softc *, 334 struct iwm_rx_packet *, 335 struct iwm_node *); 336static void iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *); 337static void iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *); 338#if 0 339static void iwm_update_sched(struct iwm_softc *, int, int, uint8_t, 340 uint16_t); 341#endif 342static const struct iwm_rate * 343 iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *, 344 struct mbuf *, struct iwm_tx_cmd *); 345static int iwm_tx(struct iwm_softc *, struct mbuf *, 346 struct ieee80211_node *, int); 347static int iwm_raw_xmit(struct ieee80211_node *, struct mbuf *, 348 const struct ieee80211_bpf_params *); 349static int iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_vap *); 350static int iwm_auth(struct ieee80211vap *, struct iwm_softc *); 351static int iwm_assoc(struct ieee80211vap *, struct iwm_softc *); 352static int iwm_release(struct iwm_softc *, struct iwm_node *); 353static struct ieee80211_node * 354 iwm_node_alloc(struct ieee80211vap *, 355 const uint8_t[IEEE80211_ADDR_LEN]); 356static void iwm_setrates(struct iwm_softc *, struct iwm_node *); 357static int iwm_media_change(struct ifnet *); 358static int iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int); 359static void iwm_endscan_cb(void *, int); 360static void iwm_mvm_fill_sf_command(struct iwm_softc *, 361 struct iwm_sf_cfg_cmd *, 362 struct ieee80211_node *); 363static int iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state); 364static int iwm_send_bt_init_conf(struct iwm_softc *); 365static boolean_t iwm_mvm_is_lar_supported(struct iwm_softc *); 366static boolean_t iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *); 367static int iwm_send_update_mcc_cmd(struct iwm_softc *, const char *); 368static void iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t); 369static int iwm_init_hw(struct iwm_softc *); 370static void iwm_init(struct iwm_softc *); 371static void iwm_start(struct iwm_softc *); 372static void iwm_stop(struct iwm_softc *); 373static void iwm_watchdog(void *); 374static void iwm_parent(struct ieee80211com *); 375#ifdef IWM_DEBUG 376static const char * 377 iwm_desc_lookup(uint32_t); 378static void iwm_nic_error(struct iwm_softc *); 379static void iwm_nic_umac_error(struct iwm_softc *); 380#endif 381static void iwm_handle_rxb(struct iwm_softc *, struct mbuf *); 382static void iwm_notif_intr(struct iwm_softc *); 383static void iwm_intr(void *); 384static int iwm_attach(device_t); 385static int iwm_is_valid_ether_addr(uint8_t *); 386static void iwm_preinit(void *); 387static int iwm_detach_local(struct iwm_softc *sc, int); 388static void iwm_init_task(void *); 389static void iwm_radiotap_attach(struct iwm_softc *); 390static struct ieee80211vap * 391 iwm_vap_create(struct ieee80211com *, 392 const char [IFNAMSIZ], int, 393 enum ieee80211_opmode, int, 394 const uint8_t [IEEE80211_ADDR_LEN], 395 const uint8_t [IEEE80211_ADDR_LEN]); 396static void iwm_vap_delete(struct ieee80211vap *); 397static void iwm_xmit_queue_drain(struct iwm_softc *); 398static void iwm_scan_start(struct ieee80211com *); 399static void iwm_scan_end(struct ieee80211com *); 400static void iwm_update_mcast(struct ieee80211com *); 401static void iwm_set_channel(struct ieee80211com *); 402static void iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long); 403static void iwm_scan_mindwell(struct ieee80211_scan_state *); 404static int iwm_detach(device_t); 405 406static int iwm_lar_disable = 0; 407TUNABLE_INT("hw.iwm.lar.disable", &iwm_lar_disable); 408 409/* 410 * Firmware parser. 411 */ 412 413static int 414iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen) 415{ 416 const struct iwm_fw_cscheme_list *l = (const void *)data; 417 418 if (dlen < sizeof(*l) || 419 dlen < sizeof(l->size) + l->size * sizeof(*l->cs)) 420 return EINVAL; 421 422 /* we don't actually store anything for now, always use s/w crypto */ 423 424 return 0; 425} 426 427static int 428iwm_firmware_store_section(struct iwm_softc *sc, 429 enum iwm_ucode_type type, const uint8_t *data, size_t dlen) 430{ 431 struct iwm_fw_sects *fws; 432 struct iwm_fw_desc *fwone; 433 434 if (type >= IWM_UCODE_TYPE_MAX) 435 return EINVAL; 436 if (dlen < sizeof(uint32_t)) 437 return EINVAL; 438 439 fws = &sc->sc_fw.fw_sects[type]; 440 if (fws->fw_count >= IWM_UCODE_SECTION_MAX) 441 return EINVAL; 442 443 fwone = &fws->fw_sect[fws->fw_count]; 444 445 /* first 32bit are device load offset */ 446 memcpy(&fwone->offset, data, sizeof(uint32_t)); 447 448 /* rest is data */ 449 fwone->data = data + sizeof(uint32_t); 450 fwone->len = dlen - sizeof(uint32_t); 451 452 fws->fw_count++; 453 454 return 0; 455} 456 457#define IWM_DEFAULT_SCAN_CHANNELS 40 458 459/* iwlwifi: iwl-drv.c */ 460struct iwm_tlv_calib_data { 461 uint32_t ucode_type; 462 struct iwm_tlv_calib_ctrl calib; 463} __packed; 464 465static int 466iwm_set_default_calib(struct iwm_softc *sc, const void *data) 467{ 468 const struct iwm_tlv_calib_data *def_calib = data; 469 uint32_t ucode_type = le32toh(def_calib->ucode_type); 470 471 if (ucode_type >= IWM_UCODE_TYPE_MAX) { 472 device_printf(sc->sc_dev, 473 "Wrong ucode_type %u for default " 474 "calibration.\n", ucode_type); 475 return EINVAL; 476 } 477 478 sc->sc_default_calib[ucode_type].flow_trigger = 479 def_calib->calib.flow_trigger; 480 sc->sc_default_calib[ucode_type].event_trigger = 481 def_calib->calib.event_trigger; 482 483 return 0; 484} 485 486static int 487iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data, 488 struct iwm_ucode_capabilities *capa) 489{ 490 const struct iwm_ucode_api *ucode_api = (const void *)data; 491 uint32_t api_index = le32toh(ucode_api->api_index); 492 uint32_t api_flags = le32toh(ucode_api->api_flags); 493 int i; 494 495 if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) { 496 device_printf(sc->sc_dev, 497 "api flags index %d larger than supported by driver\n", 498 api_index); 499 /* don't return an error so we can load FW that has more bits */ 500 return 0; 501 } 502 503 for (i = 0; i < 32; i++) { 504 if (api_flags & (1U << i)) 505 setbit(capa->enabled_api, i + 32 * api_index); 506 } 507 508 return 0; 509} 510 511static int 512iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data, 513 struct iwm_ucode_capabilities *capa) 514{ 515 const struct iwm_ucode_capa *ucode_capa = (const void *)data; 516 uint32_t api_index = le32toh(ucode_capa->api_index); 517 uint32_t api_flags = le32toh(ucode_capa->api_capa); 518 int i; 519 520 if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) { 521 device_printf(sc->sc_dev, 522 "capa flags index %d larger than supported by driver\n", 523 api_index); 524 /* don't return an error so we can load FW that has more bits */ 525 return 0; 526 } 527 528 for (i = 0; i < 32; i++) { 529 if (api_flags & (1U << i)) 530 setbit(capa->enabled_capa, i + 32 * api_index); 531 } 532 533 return 0; 534} 535 536static void 537iwm_fw_info_free(struct iwm_fw_info *fw) 538{ 539 firmware_put(fw->fw_fp, FIRMWARE_UNLOAD); 540 fw->fw_fp = NULL; 541 /* don't touch fw->fw_status */ 542 memset(fw->fw_sects, 0, sizeof(fw->fw_sects)); 543} 544 545static int 546iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type) 547{ 548 struct iwm_fw_info *fw = &sc->sc_fw; 549 const struct iwm_tlv_ucode_header *uhdr; 550 struct iwm_ucode_tlv tlv; 551 struct iwm_ucode_capabilities *capa = &sc->ucode_capa; 552 enum iwm_ucode_tlv_type tlv_type; 553 const struct firmware *fwp; 554 const uint8_t *data; 555 uint32_t usniffer_img; 556 uint32_t paging_mem_size; 557 int num_of_cpus; 558 int error = 0; 559 size_t len; 560 561 if (fw->fw_status == IWM_FW_STATUS_DONE && 562 ucode_type != IWM_UCODE_INIT) 563 return 0; 564 565 while (fw->fw_status == IWM_FW_STATUS_INPROGRESS) 566 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0); 567 fw->fw_status = IWM_FW_STATUS_INPROGRESS; 568 569 if (fw->fw_fp != NULL) 570 iwm_fw_info_free(fw); 571 572 /* 573 * Load firmware into driver memory. 574 * fw_fp will be set. 575 */ 576 IWM_UNLOCK(sc); 577 fwp = firmware_get(sc->cfg->fw_name); 578 IWM_LOCK(sc); 579 if (fwp == NULL) { 580 device_printf(sc->sc_dev, 581 "could not read firmware %s (error %d)\n", 582 sc->cfg->fw_name, error); 583 goto out; 584 } 585 fw->fw_fp = fwp; 586 587 /* (Re-)Initialize default values. */ 588 capa->flags = 0; 589 capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH; 590 capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS; 591 memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa)); 592 memset(capa->enabled_api, 0, sizeof(capa->enabled_api)); 593 memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc)); 594 595 /* 596 * Parse firmware contents 597 */ 598 599 uhdr = (const void *)fw->fw_fp->data; 600 if (*(const uint32_t *)fw->fw_fp->data != 0 601 || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) { 602 device_printf(sc->sc_dev, "invalid firmware %s\n", 603 sc->cfg->fw_name); 604 error = EINVAL; 605 goto out; 606 } 607 608 snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)", 609 IWM_UCODE_MAJOR(le32toh(uhdr->ver)), 610 IWM_UCODE_MINOR(le32toh(uhdr->ver)), 611 IWM_UCODE_API(le32toh(uhdr->ver))); 612 data = uhdr->data; 613 len = fw->fw_fp->datasize - sizeof(*uhdr); 614 615 while (len >= sizeof(tlv)) { 616 size_t tlv_len; 617 const void *tlv_data; 618 619 memcpy(&tlv, data, sizeof(tlv)); 620 tlv_len = le32toh(tlv.length); 621 tlv_type = le32toh(tlv.type); 622 623 len -= sizeof(tlv); 624 data += sizeof(tlv); 625 tlv_data = data; 626 627 if (len < tlv_len) { 628 device_printf(sc->sc_dev, 629 "firmware too short: %zu bytes\n", 630 len); 631 error = EINVAL; 632 goto parse_out; 633 } 634 635 switch ((int)tlv_type) { 636 case IWM_UCODE_TLV_PROBE_MAX_LEN: 637 if (tlv_len < sizeof(uint32_t)) { 638 device_printf(sc->sc_dev, 639 "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n", 640 __func__, 641 (int) tlv_len); 642 error = EINVAL; 643 goto parse_out; 644 } 645 capa->max_probe_length = 646 le32toh(*(const uint32_t *)tlv_data); 647 /* limit it to something sensible */ 648 if (capa->max_probe_length > 649 IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) { 650 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV, 651 "%s: IWM_UCODE_TLV_PROBE_MAX_LEN " 652 "ridiculous\n", __func__); 653 error = EINVAL; 654 goto parse_out; 655 } 656 break; 657 case IWM_UCODE_TLV_PAN: 658 if (tlv_len) { 659 device_printf(sc->sc_dev, 660 "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n", 661 __func__, 662 (int) tlv_len); 663 error = EINVAL; 664 goto parse_out; 665 } 666 capa->flags |= IWM_UCODE_TLV_FLAGS_PAN; 667 break; 668 case IWM_UCODE_TLV_FLAGS: 669 if (tlv_len < sizeof(uint32_t)) { 670 device_printf(sc->sc_dev, 671 "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n", 672 __func__, 673 (int) tlv_len); 674 error = EINVAL; 675 goto parse_out; 676 } 677 /* 678 * Apparently there can be many flags, but Linux driver 679 * parses only the first one, and so do we. 680 * 681 * XXX: why does this override IWM_UCODE_TLV_PAN? 682 * Intentional or a bug? Observations from 683 * current firmware file: 684 * 1) TLV_PAN is parsed first 685 * 2) TLV_FLAGS contains TLV_FLAGS_PAN 686 * ==> this resets TLV_PAN to itself... hnnnk 687 */ 688 capa->flags = le32toh(*(const uint32_t *)tlv_data); 689 break; 690 case IWM_UCODE_TLV_CSCHEME: 691 if ((error = iwm_store_cscheme(sc, 692 tlv_data, tlv_len)) != 0) { 693 device_printf(sc->sc_dev, 694 "%s: iwm_store_cscheme(): returned %d\n", 695 __func__, 696 error); 697 goto parse_out; 698 } 699 break; 700 case IWM_UCODE_TLV_NUM_OF_CPU: 701 if (tlv_len != sizeof(uint32_t)) { 702 device_printf(sc->sc_dev, 703 "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) != sizeof(uint32_t)\n", 704 __func__, 705 (int) tlv_len); 706 error = EINVAL; 707 goto parse_out; 708 } 709 num_of_cpus = le32toh(*(const uint32_t *)tlv_data); 710 if (num_of_cpus == 2) { 711 fw->fw_sects[IWM_UCODE_REGULAR].is_dual_cpus = 712 TRUE; 713 fw->fw_sects[IWM_UCODE_INIT].is_dual_cpus = 714 TRUE; 715 fw->fw_sects[IWM_UCODE_WOWLAN].is_dual_cpus = 716 TRUE; 717 } else if ((num_of_cpus > 2) || (num_of_cpus < 1)) { 718 device_printf(sc->sc_dev, 719 "%s: Driver supports only 1 or 2 CPUs\n", 720 __func__); 721 error = EINVAL; 722 goto parse_out; 723 } 724 break; 725 case IWM_UCODE_TLV_SEC_RT: 726 if ((error = iwm_firmware_store_section(sc, 727 IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) { 728 device_printf(sc->sc_dev, 729 "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n", 730 __func__, 731 error); 732 goto parse_out; 733 } 734 break; 735 case IWM_UCODE_TLV_SEC_INIT: 736 if ((error = iwm_firmware_store_section(sc, 737 IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) { 738 device_printf(sc->sc_dev, 739 "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n", 740 __func__, 741 error); 742 goto parse_out; 743 } 744 break; 745 case IWM_UCODE_TLV_SEC_WOWLAN: 746 if ((error = iwm_firmware_store_section(sc, 747 IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) { 748 device_printf(sc->sc_dev, 749 "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n", 750 __func__, 751 error); 752 goto parse_out; 753 } 754 break; 755 case IWM_UCODE_TLV_DEF_CALIB: 756 if (tlv_len != sizeof(struct iwm_tlv_calib_data)) { 757 device_printf(sc->sc_dev, 758 "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n", 759 __func__, 760 (int) tlv_len, 761 (int) sizeof(struct iwm_tlv_calib_data)); 762 error = EINVAL; 763 goto parse_out; 764 } 765 if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) { 766 device_printf(sc->sc_dev, 767 "%s: iwm_set_default_calib() failed: %d\n", 768 __func__, 769 error); 770 goto parse_out; 771 } 772 break; 773 case IWM_UCODE_TLV_PHY_SKU: 774 if (tlv_len != sizeof(uint32_t)) { 775 error = EINVAL; 776 device_printf(sc->sc_dev, 777 "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n", 778 __func__, 779 (int) tlv_len); 780 goto parse_out; 781 } 782 sc->sc_fw.phy_config = 783 le32toh(*(const uint32_t *)tlv_data); 784 sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config & 785 IWM_FW_PHY_CFG_TX_CHAIN) >> 786 IWM_FW_PHY_CFG_TX_CHAIN_POS; 787 sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config & 788 IWM_FW_PHY_CFG_RX_CHAIN) >> 789 IWM_FW_PHY_CFG_RX_CHAIN_POS; 790 break; 791 792 case IWM_UCODE_TLV_API_CHANGES_SET: { 793 if (tlv_len != sizeof(struct iwm_ucode_api)) { 794 error = EINVAL; 795 goto parse_out; 796 } 797 if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) { 798 error = EINVAL; 799 goto parse_out; 800 } 801 break; 802 } 803 804 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: { 805 if (tlv_len != sizeof(struct iwm_ucode_capa)) { 806 error = EINVAL; 807 goto parse_out; 808 } 809 if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) { 810 error = EINVAL; 811 goto parse_out; 812 } 813 break; 814 } 815 816 case 48: /* undocumented TLV */ 817 case IWM_UCODE_TLV_SDIO_ADMA_ADDR: 818 case IWM_UCODE_TLV_FW_GSCAN_CAPA: 819 /* ignore, not used by current driver */ 820 break; 821 822 case IWM_UCODE_TLV_SEC_RT_USNIFFER: 823 if ((error = iwm_firmware_store_section(sc, 824 IWM_UCODE_REGULAR_USNIFFER, tlv_data, 825 tlv_len)) != 0) 826 goto parse_out; 827 break; 828 829 case IWM_UCODE_TLV_PAGING: 830 if (tlv_len != sizeof(uint32_t)) { 831 error = EINVAL; 832 goto parse_out; 833 } 834 paging_mem_size = le32toh(*(const uint32_t *)tlv_data); 835 836 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV, 837 "%s: Paging: paging enabled (size = %u bytes)\n", 838 __func__, paging_mem_size); 839 if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) { 840 device_printf(sc->sc_dev, 841 "%s: Paging: driver supports up to %u bytes for paging image\n", 842 __func__, IWM_MAX_PAGING_IMAGE_SIZE); 843 error = EINVAL; 844 goto out; 845 } 846 if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) { 847 device_printf(sc->sc_dev, 848 "%s: Paging: image isn't multiple %u\n", 849 __func__, IWM_FW_PAGING_SIZE); 850 error = EINVAL; 851 goto out; 852 } 853 854 sc->sc_fw.fw_sects[IWM_UCODE_REGULAR].paging_mem_size = 855 paging_mem_size; 856 usniffer_img = IWM_UCODE_REGULAR_USNIFFER; 857 sc->sc_fw.fw_sects[usniffer_img].paging_mem_size = 858 paging_mem_size; 859 break; 860 861 case IWM_UCODE_TLV_N_SCAN_CHANNELS: 862 if (tlv_len != sizeof(uint32_t)) { 863 error = EINVAL; 864 goto parse_out; 865 } 866 capa->n_scan_channels = 867 le32toh(*(const uint32_t *)tlv_data); 868 break; 869 870 case IWM_UCODE_TLV_FW_VERSION: 871 if (tlv_len != sizeof(uint32_t) * 3) { 872 error = EINVAL; 873 goto parse_out; 874 } 875 snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), 876 "%d.%d.%d", 877 le32toh(((const uint32_t *)tlv_data)[0]), 878 le32toh(((const uint32_t *)tlv_data)[1]), 879 le32toh(((const uint32_t *)tlv_data)[2])); 880 break; 881 882 case IWM_UCODE_TLV_FW_MEM_SEG: 883 break; 884 885 default: 886 device_printf(sc->sc_dev, 887 "%s: unknown firmware section %d, abort\n", 888 __func__, tlv_type); 889 error = EINVAL; 890 goto parse_out; 891 } 892 893 len -= roundup(tlv_len, 4); 894 data += roundup(tlv_len, 4); 895 } 896 897 KASSERT(error == 0, ("unhandled error")); 898 899 parse_out: 900 if (error) { 901 device_printf(sc->sc_dev, "firmware parse error %d, " 902 "section type %d\n", error, tlv_type); 903 } 904 905 out: 906 if (error) { 907 fw->fw_status = IWM_FW_STATUS_NONE; 908 if (fw->fw_fp != NULL) 909 iwm_fw_info_free(fw); 910 } else 911 fw->fw_status = IWM_FW_STATUS_DONE; 912 wakeup(&sc->sc_fw); 913 914 return error; 915} 916 917/* 918 * DMA resource routines 919 */ 920 921/* fwmem is used to load firmware onto the card */ 922static int 923iwm_alloc_fwmem(struct iwm_softc *sc) 924{ 925 /* Must be aligned on a 16-byte boundary. */ 926 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma, 927 IWM_FH_MEM_TB_MAX_LENGTH, 16); 928} 929 930/* tx scheduler rings. not used? */ 931static int 932iwm_alloc_sched(struct iwm_softc *sc) 933{ 934 /* TX scheduler rings must be aligned on a 1KB boundary. */ 935 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma, 936 nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024); 937} 938 939/* keep-warm page is used internally by the card. see iwl-fh.h for more info */ 940static int 941iwm_alloc_kw(struct iwm_softc *sc) 942{ 943 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096); 944} 945 946/* interrupt cause table */ 947static int 948iwm_alloc_ict(struct iwm_softc *sc) 949{ 950 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma, 951 IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT); 952} 953 954static int 955iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring) 956{ 957 bus_size_t size; 958 int i, error; 959 960 ring->cur = 0; 961 962 /* Allocate RX descriptors (256-byte aligned). */ 963 size = IWM_RX_RING_COUNT * sizeof(uint32_t); 964 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256); 965 if (error != 0) { 966 device_printf(sc->sc_dev, 967 "could not allocate RX ring DMA memory\n"); 968 goto fail; 969 } 970 ring->desc = ring->desc_dma.vaddr; 971 972 /* Allocate RX status area (16-byte aligned). */ 973 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma, 974 sizeof(*ring->stat), 16); 975 if (error != 0) { 976 device_printf(sc->sc_dev, 977 "could not allocate RX status DMA memory\n"); 978 goto fail; 979 } 980 ring->stat = ring->stat_dma.vaddr; 981 982 /* Create RX buffer DMA tag. */ 983 error = bus_dma_tag_create(sc->sc_dmat, 1, 0, 984 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 985 IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat); 986 if (error != 0) { 987 device_printf(sc->sc_dev, 988 "%s: could not create RX buf DMA tag, error %d\n", 989 __func__, error); 990 goto fail; 991 } 992 993 /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */ 994 error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map); 995 if (error != 0) { 996 device_printf(sc->sc_dev, 997 "%s: could not create RX buf DMA map, error %d\n", 998 __func__, error); 999 goto fail; 1000 } 1001 /* 1002 * Allocate and map RX buffers. 1003 */ 1004 for (i = 0; i < IWM_RX_RING_COUNT; i++) { 1005 struct iwm_rx_data *data = &ring->data[i]; 1006 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1007 if (error != 0) { 1008 device_printf(sc->sc_dev, 1009 "%s: could not create RX buf DMA map, error %d\n", 1010 __func__, error); 1011 goto fail; 1012 } 1013 data->m = NULL; 1014 1015 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) { 1016 goto fail; 1017 } 1018 } 1019 return 0; 1020 1021fail: iwm_free_rx_ring(sc, ring); 1022 return error; 1023} 1024 1025static void 1026iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring) 1027{ 1028 /* Reset the ring state */ 1029 ring->cur = 0; 1030 1031 /* 1032 * The hw rx ring index in shared memory must also be cleared, 1033 * otherwise the discrepancy can cause reprocessing chaos. 1034 */ 1035 if (sc->rxq.stat) 1036 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat)); 1037} 1038 1039static void 1040iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring) 1041{ 1042 int i; 1043 1044 iwm_dma_contig_free(&ring->desc_dma); 1045 iwm_dma_contig_free(&ring->stat_dma); 1046 1047 for (i = 0; i < IWM_RX_RING_COUNT; i++) { 1048 struct iwm_rx_data *data = &ring->data[i]; 1049 1050 if (data->m != NULL) { 1051 bus_dmamap_sync(ring->data_dmat, data->map, 1052 BUS_DMASYNC_POSTREAD); 1053 bus_dmamap_unload(ring->data_dmat, data->map); 1054 m_freem(data->m); 1055 data->m = NULL; 1056 } 1057 if (data->map != NULL) { 1058 bus_dmamap_destroy(ring->data_dmat, data->map); 1059 data->map = NULL; 1060 } 1061 } 1062 if (ring->spare_map != NULL) { 1063 bus_dmamap_destroy(ring->data_dmat, ring->spare_map); 1064 ring->spare_map = NULL; 1065 } 1066 if (ring->data_dmat != NULL) { 1067 bus_dma_tag_destroy(ring->data_dmat); 1068 ring->data_dmat = NULL; 1069 } 1070} 1071 1072static int 1073iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid) 1074{ 1075 bus_addr_t paddr; 1076 bus_size_t size; 1077 size_t maxsize; 1078 int nsegments; 1079 int i, error; 1080 1081 ring->qid = qid; 1082 ring->queued = 0; 1083 ring->cur = 0; 1084 1085 /* Allocate TX descriptors (256-byte aligned). */ 1086 size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd); 1087 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256); 1088 if (error != 0) { 1089 device_printf(sc->sc_dev, 1090 "could not allocate TX ring DMA memory\n"); 1091 goto fail; 1092 } 1093 ring->desc = ring->desc_dma.vaddr; 1094 1095 /* 1096 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need 1097 * to allocate commands space for other rings. 1098 */ 1099 if (qid > IWM_MVM_CMD_QUEUE) 1100 return 0; 1101 1102 size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd); 1103 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4); 1104 if (error != 0) { 1105 device_printf(sc->sc_dev, 1106 "could not allocate TX cmd DMA memory\n"); 1107 goto fail; 1108 } 1109 ring->cmd = ring->cmd_dma.vaddr; 1110 1111 /* FW commands may require more mapped space than packets. */ 1112 if (qid == IWM_MVM_CMD_QUEUE) { 1113 maxsize = IWM_RBUF_SIZE; 1114 nsegments = 1; 1115 } else { 1116 maxsize = MCLBYTES; 1117 nsegments = IWM_MAX_SCATTER - 2; 1118 } 1119 1120 error = bus_dma_tag_create(sc->sc_dmat, 1, 0, 1121 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize, 1122 nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat); 1123 if (error != 0) { 1124 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n"); 1125 goto fail; 1126 } 1127 1128 paddr = ring->cmd_dma.paddr; 1129 for (i = 0; i < IWM_TX_RING_COUNT; i++) { 1130 struct iwm_tx_data *data = &ring->data[i]; 1131 1132 data->cmd_paddr = paddr; 1133 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header) 1134 + offsetof(struct iwm_tx_cmd, scratch); 1135 paddr += sizeof(struct iwm_device_cmd); 1136 1137 error = bus_dmamap_create(ring->data_dmat, 0, &data->map); 1138 if (error != 0) { 1139 device_printf(sc->sc_dev, 1140 "could not create TX buf DMA map\n"); 1141 goto fail; 1142 } 1143 } 1144 KASSERT(paddr == ring->cmd_dma.paddr + size, 1145 ("invalid physical address")); 1146 return 0; 1147 1148fail: iwm_free_tx_ring(sc, ring); 1149 return error; 1150} 1151 1152static void 1153iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring) 1154{ 1155 int i; 1156 1157 for (i = 0; i < IWM_TX_RING_COUNT; i++) { 1158 struct iwm_tx_data *data = &ring->data[i]; 1159 1160 if (data->m != NULL) { 1161 bus_dmamap_sync(ring->data_dmat, data->map, 1162 BUS_DMASYNC_POSTWRITE); 1163 bus_dmamap_unload(ring->data_dmat, data->map); 1164 m_freem(data->m); 1165 data->m = NULL; 1166 } 1167 } 1168 /* Clear TX descriptors. */ 1169 memset(ring->desc, 0, ring->desc_dma.size); 1170 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 1171 BUS_DMASYNC_PREWRITE); 1172 sc->qfullmsk &= ~(1 << ring->qid); 1173 ring->queued = 0; 1174 ring->cur = 0; 1175 1176 if (ring->qid == IWM_MVM_CMD_QUEUE && sc->cmd_hold_nic_awake) 1177 iwm_pcie_clear_cmd_in_flight(sc); 1178} 1179 1180static void 1181iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring) 1182{ 1183 int i; 1184 1185 iwm_dma_contig_free(&ring->desc_dma); 1186 iwm_dma_contig_free(&ring->cmd_dma); 1187 1188 for (i = 0; i < IWM_TX_RING_COUNT; i++) { 1189 struct iwm_tx_data *data = &ring->data[i]; 1190 1191 if (data->m != NULL) { 1192 bus_dmamap_sync(ring->data_dmat, data->map, 1193 BUS_DMASYNC_POSTWRITE); 1194 bus_dmamap_unload(ring->data_dmat, data->map); 1195 m_freem(data->m); 1196 data->m = NULL; 1197 } 1198 if (data->map != NULL) { 1199 bus_dmamap_destroy(ring->data_dmat, data->map); 1200 data->map = NULL; 1201 } 1202 } 1203 if (ring->data_dmat != NULL) { 1204 bus_dma_tag_destroy(ring->data_dmat); 1205 ring->data_dmat = NULL; 1206 } 1207} 1208 1209/* 1210 * High-level hardware frobbing routines 1211 */ 1212 1213static void 1214iwm_enable_interrupts(struct iwm_softc *sc) 1215{ 1216 sc->sc_intmask = IWM_CSR_INI_SET_MASK; 1217 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask); 1218} 1219 1220static void 1221iwm_restore_interrupts(struct iwm_softc *sc) 1222{ 1223 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask); 1224} 1225 1226static void 1227iwm_disable_interrupts(struct iwm_softc *sc) 1228{ 1229 /* disable interrupts */ 1230 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0); 1231 1232 /* acknowledge all interrupts */ 1233 IWM_WRITE(sc, IWM_CSR_INT, ~0); 1234 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0); 1235} 1236 1237static void 1238iwm_ict_reset(struct iwm_softc *sc) 1239{ 1240 iwm_disable_interrupts(sc); 1241 1242 /* Reset ICT table. */ 1243 memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE); 1244 sc->ict_cur = 0; 1245 1246 /* Set physical address of ICT table (4KB aligned). */ 1247 IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG, 1248 IWM_CSR_DRAM_INT_TBL_ENABLE 1249 | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER 1250 | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK 1251 | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT); 1252 1253 /* Switch to ICT interrupt mode in driver. */ 1254 sc->sc_flags |= IWM_FLAG_USE_ICT; 1255 1256 /* Re-enable interrupts. */ 1257 IWM_WRITE(sc, IWM_CSR_INT, ~0); 1258 iwm_enable_interrupts(sc); 1259} 1260 1261/* iwlwifi pcie/trans.c */ 1262 1263/* 1264 * Since this .. hard-resets things, it's time to actually 1265 * mark the first vap (if any) as having no mac context. 1266 * It's annoying, but since the driver is potentially being 1267 * stop/start'ed whilst active (thanks openbsd port!) we 1268 * have to correctly track this. 1269 */ 1270static void 1271iwm_stop_device(struct iwm_softc *sc) 1272{ 1273 struct ieee80211com *ic = &sc->sc_ic; 1274 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 1275 int chnl, qid; 1276 uint32_t mask = 0; 1277 1278 /* tell the device to stop sending interrupts */ 1279 iwm_disable_interrupts(sc); 1280 1281 /* 1282 * FreeBSD-local: mark the first vap as not-uploaded, 1283 * so the next transition through auth/assoc 1284 * will correctly populate the MAC context. 1285 */ 1286 if (vap) { 1287 struct iwm_vap *iv = IWM_VAP(vap); 1288 iv->phy_ctxt = NULL; 1289 iv->is_uploaded = 0; 1290 } 1291 1292 /* device going down, Stop using ICT table */ 1293 sc->sc_flags &= ~IWM_FLAG_USE_ICT; 1294 1295 /* stop tx and rx. tx and rx bits, as usual, are from if_iwn */ 1296 1297 if (iwm_nic_lock(sc)) { 1298 iwm_write_prph(sc, IWM_SCD_TXFACT, 0); 1299 1300 /* Stop each Tx DMA channel */ 1301 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) { 1302 IWM_WRITE(sc, 1303 IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0); 1304 mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl); 1305 } 1306 1307 /* Wait for DMA channels to be idle */ 1308 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask, 1309 5000)) { 1310 device_printf(sc->sc_dev, 1311 "Failing on timeout while stopping DMA channel: [0x%08x]\n", 1312 IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG)); 1313 } 1314 iwm_nic_unlock(sc); 1315 } 1316 iwm_pcie_rx_stop(sc); 1317 1318 /* Stop RX ring. */ 1319 iwm_reset_rx_ring(sc, &sc->rxq); 1320 1321 /* Reset all TX rings. */ 1322 for (qid = 0; qid < nitems(sc->txq); qid++) 1323 iwm_reset_tx_ring(sc, &sc->txq[qid]); 1324 1325 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) { 1326 /* Power-down device's busmaster DMA clocks */ 1327 if (iwm_nic_lock(sc)) { 1328 iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, 1329 IWM_APMG_CLK_VAL_DMA_CLK_RQT); 1330 iwm_nic_unlock(sc); 1331 } 1332 DELAY(5); 1333 } 1334 1335 /* Make sure (redundant) we've released our request to stay awake */ 1336 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL, 1337 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1338 1339 /* Stop the device, and put it in low power state */ 1340 iwm_apm_stop(sc); 1341 1342 /* Upon stop, the APM issues an interrupt if HW RF kill is set. 1343 * Clean again the interrupt here 1344 */ 1345 iwm_disable_interrupts(sc); 1346 /* stop and reset the on-board processor */ 1347 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET); 1348 1349 /* 1350 * Even if we stop the HW, we still want the RF kill 1351 * interrupt 1352 */ 1353 iwm_enable_rfkill_int(sc); 1354 iwm_check_rfkill(sc); 1355} 1356 1357/* iwlwifi: mvm/ops.c */ 1358static void 1359iwm_mvm_nic_config(struct iwm_softc *sc) 1360{ 1361 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash; 1362 uint32_t reg_val = 0; 1363 uint32_t phy_config = iwm_mvm_get_phy_config(sc); 1364 1365 radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >> 1366 IWM_FW_PHY_CFG_RADIO_TYPE_POS; 1367 radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >> 1368 IWM_FW_PHY_CFG_RADIO_STEP_POS; 1369 radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >> 1370 IWM_FW_PHY_CFG_RADIO_DASH_POS; 1371 1372 /* SKU control */ 1373 reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) << 1374 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP; 1375 reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) << 1376 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH; 1377 1378 /* radio configuration */ 1379 reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE; 1380 reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP; 1381 reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH; 1382 1383 IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val); 1384 1385 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 1386 "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type, 1387 radio_cfg_step, radio_cfg_dash); 1388 1389 /* 1390 * W/A : NIC is stuck in a reset state after Early PCIe power off 1391 * (PCIe power is lost before PERST# is asserted), causing ME FW 1392 * to lose ownership and not being able to obtain it back. 1393 */ 1394 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) { 1395 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG, 1396 IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS, 1397 ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS); 1398 } 1399} 1400 1401static int 1402iwm_nic_rx_init(struct iwm_softc *sc) 1403{ 1404 /* 1405 * Initialize RX ring. This is from the iwn driver. 1406 */ 1407 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat)); 1408 1409 /* Stop Rx DMA */ 1410 iwm_pcie_rx_stop(sc); 1411 1412 if (!iwm_nic_lock(sc)) 1413 return EBUSY; 1414 1415 /* reset and flush pointers */ 1416 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0); 1417 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0); 1418 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0); 1419 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); 1420 1421 /* Set physical address of RX ring (256-byte aligned). */ 1422 IWM_WRITE(sc, 1423 IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8); 1424 1425 /* Set physical address of RX status (16-byte aligned). */ 1426 IWM_WRITE(sc, 1427 IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4); 1428 1429 /* Enable Rx DMA 1430 * XXX 5000 HW isn't supported by the iwm(4) driver. 1431 * IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in 1432 * the credit mechanism in 5000 HW RX FIFO 1433 * Direct rx interrupts to hosts 1434 * Rx buffer size 4 or 8k or 12k 1435 * RB timeout 0x10 1436 * 256 RBDs 1437 */ 1438 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 1439 IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | 1440 IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | /* HW bug */ 1441 IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | 1442 IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K | 1443 (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) | 1444 IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS); 1445 1446 IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF); 1447 1448 /* W/A for interrupt coalescing bug in 7260 and 3160 */ 1449 if (sc->cfg->host_interrupt_operation_mode) 1450 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE); 1451 1452 /* 1453 * Thus sayeth el jefe (iwlwifi) via a comment: 1454 * 1455 * This value should initially be 0 (before preparing any 1456 * RBs), should be 8 after preparing the first 8 RBs (for example) 1457 */ 1458 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8); 1459 1460 iwm_nic_unlock(sc); 1461 1462 return 0; 1463} 1464 1465static int 1466iwm_nic_tx_init(struct iwm_softc *sc) 1467{ 1468 int qid; 1469 1470 if (!iwm_nic_lock(sc)) 1471 return EBUSY; 1472 1473 /* Deactivate TX scheduler. */ 1474 iwm_write_prph(sc, IWM_SCD_TXFACT, 0); 1475 1476 /* Set physical address of "keep warm" page (16-byte aligned). */ 1477 IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4); 1478 1479 /* Initialize TX rings. */ 1480 for (qid = 0; qid < nitems(sc->txq); qid++) { 1481 struct iwm_tx_ring *txq = &sc->txq[qid]; 1482 1483 /* Set physical address of TX ring (256-byte aligned). */ 1484 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid), 1485 txq->desc_dma.paddr >> 8); 1486 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, 1487 "%s: loading ring %d descriptors (%p) at %lx\n", 1488 __func__, 1489 qid, txq->desc, 1490 (unsigned long) (txq->desc_dma.paddr >> 8)); 1491 } 1492 1493 iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE); 1494 1495 iwm_nic_unlock(sc); 1496 1497 return 0; 1498} 1499 1500static int 1501iwm_nic_init(struct iwm_softc *sc) 1502{ 1503 int error; 1504 1505 iwm_apm_init(sc); 1506 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) 1507 iwm_set_pwr(sc); 1508 1509 iwm_mvm_nic_config(sc); 1510 1511 if ((error = iwm_nic_rx_init(sc)) != 0) 1512 return error; 1513 1514 /* 1515 * Ditto for TX, from iwn 1516 */ 1517 if ((error = iwm_nic_tx_init(sc)) != 0) 1518 return error; 1519 1520 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 1521 "%s: shadow registers enabled\n", __func__); 1522 IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff); 1523 1524 return 0; 1525} 1526 1527int 1528iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo) 1529{ 1530 if (!iwm_nic_lock(sc)) { 1531 device_printf(sc->sc_dev, 1532 "%s: cannot enable txq %d\n", 1533 __func__, 1534 qid); 1535 return EBUSY; 1536 } 1537 1538 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0); 1539 1540 if (qid == IWM_MVM_CMD_QUEUE) { 1541 /* unactivate before configuration */ 1542 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid), 1543 (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) 1544 | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); 1545 1546 iwm_nic_unlock(sc); 1547 1548 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid)); 1549 1550 if (!iwm_nic_lock(sc)) { 1551 device_printf(sc->sc_dev, 1552 "%s: cannot enable txq %d\n", __func__, qid); 1553 return EBUSY; 1554 } 1555 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0); 1556 iwm_nic_unlock(sc); 1557 1558 iwm_write_mem32(sc, sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0); 1559 /* Set scheduler window size and frame limit. */ 1560 iwm_write_mem32(sc, 1561 sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) + 1562 sizeof(uint32_t), 1563 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & 1564 IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | 1565 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & 1566 IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); 1567 1568 if (!iwm_nic_lock(sc)) { 1569 device_printf(sc->sc_dev, 1570 "%s: cannot enable txq %d\n", __func__, qid); 1571 return EBUSY; 1572 } 1573 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid), 1574 (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) | 1575 (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) | 1576 (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) | 1577 IWM_SCD_QUEUE_STTS_REG_MSK); 1578 } else { 1579 struct iwm_scd_txq_cfg_cmd cmd; 1580 int error; 1581 1582 iwm_nic_unlock(sc); 1583 1584 memset(&cmd, 0, sizeof(cmd)); 1585 cmd.scd_queue = qid; 1586 cmd.enable = 1; 1587 cmd.sta_id = sta_id; 1588 cmd.tx_fifo = fifo; 1589 cmd.aggregate = 0; 1590 cmd.window = IWM_FRAME_LIMIT; 1591 1592 error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC, 1593 sizeof(cmd), &cmd); 1594 if (error) { 1595 device_printf(sc->sc_dev, 1596 "cannot enable txq %d\n", qid); 1597 return error; 1598 } 1599 1600 if (!iwm_nic_lock(sc)) 1601 return EBUSY; 1602 } 1603 1604 iwm_write_prph(sc, IWM_SCD_EN_CTRL, 1605 iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid); 1606 1607 iwm_nic_unlock(sc); 1608 1609 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n", 1610 __func__, qid, fifo); 1611 1612 return 0; 1613} 1614 1615static int 1616iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr) 1617{ 1618 int error, chnl; 1619 1620 int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND - 1621 IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t); 1622 1623 if (!iwm_nic_lock(sc)) 1624 return EBUSY; 1625 1626 iwm_ict_reset(sc); 1627 1628 sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR); 1629 if (scd_base_addr != 0 && 1630 scd_base_addr != sc->scd_base_addr) { 1631 device_printf(sc->sc_dev, 1632 "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n", 1633 __func__, sc->scd_base_addr, scd_base_addr); 1634 } 1635 1636 iwm_nic_unlock(sc); 1637 1638 /* reset context data, TX status and translation data */ 1639 error = iwm_write_mem(sc, 1640 sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND, 1641 NULL, clear_dwords); 1642 if (error) 1643 return EBUSY; 1644 1645 if (!iwm_nic_lock(sc)) 1646 return EBUSY; 1647 1648 /* Set physical address of TX scheduler rings (1KB aligned). */ 1649 iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10); 1650 1651 iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0); 1652 1653 iwm_nic_unlock(sc); 1654 1655 /* enable command channel */ 1656 error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7); 1657 if (error) 1658 return error; 1659 1660 if (!iwm_nic_lock(sc)) 1661 return EBUSY; 1662 1663 iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff); 1664 1665 /* Enable DMA channels. */ 1666 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) { 1667 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 1668 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 1669 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); 1670 } 1671 1672 IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG, 1673 IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); 1674 1675 iwm_nic_unlock(sc); 1676 1677 /* Enable L1-Active */ 1678 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) { 1679 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG, 1680 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 1681 } 1682 1683 return error; 1684} 1685 1686/* 1687 * NVM read access and content parsing. We do not support 1688 * external NVM or writing NVM. 1689 * iwlwifi/mvm/nvm.c 1690 */ 1691 1692/* Default NVM size to read */ 1693#define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024) 1694 1695#define IWM_NVM_WRITE_OPCODE 1 1696#define IWM_NVM_READ_OPCODE 0 1697 1698/* load nvm chunk response */ 1699enum { 1700 IWM_READ_NVM_CHUNK_SUCCEED = 0, 1701 IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1 1702}; 1703 1704static int 1705iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section, 1706 uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len) 1707{ 1708 struct iwm_nvm_access_cmd nvm_access_cmd = { 1709 .offset = htole16(offset), 1710 .length = htole16(length), 1711 .type = htole16(section), 1712 .op_code = IWM_NVM_READ_OPCODE, 1713 }; 1714 struct iwm_nvm_access_resp *nvm_resp; 1715 struct iwm_rx_packet *pkt; 1716 struct iwm_host_cmd cmd = { 1717 .id = IWM_NVM_ACCESS_CMD, 1718 .flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL, 1719 .data = { &nvm_access_cmd, }, 1720 }; 1721 int ret, bytes_read, offset_read; 1722 uint8_t *resp_data; 1723 1724 cmd.len[0] = sizeof(struct iwm_nvm_access_cmd); 1725 1726 ret = iwm_send_cmd(sc, &cmd); 1727 if (ret) { 1728 device_printf(sc->sc_dev, 1729 "Could not send NVM_ACCESS command (error=%d)\n", ret); 1730 return ret; 1731 } 1732 1733 pkt = cmd.resp_pkt; 1734 1735 /* Extract NVM response */ 1736 nvm_resp = (void *)pkt->data; 1737 ret = le16toh(nvm_resp->status); 1738 bytes_read = le16toh(nvm_resp->length); 1739 offset_read = le16toh(nvm_resp->offset); 1740 resp_data = nvm_resp->data; 1741 if (ret) { 1742 if ((offset != 0) && 1743 (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) { 1744 /* 1745 * meaning of NOT_VALID_ADDRESS: 1746 * driver try to read chunk from address that is 1747 * multiple of 2K and got an error since addr is empty. 1748 * meaning of (offset != 0): driver already 1749 * read valid data from another chunk so this case 1750 * is not an error. 1751 */ 1752 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET, 1753 "NVM access command failed on offset 0x%x since that section size is multiple 2K\n", 1754 offset); 1755 *len = 0; 1756 ret = 0; 1757 } else { 1758 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET, 1759 "NVM access command failed with status %d\n", ret); 1760 ret = EIO; 1761 } 1762 goto exit; 1763 } 1764 1765 if (offset_read != offset) { 1766 device_printf(sc->sc_dev, 1767 "NVM ACCESS response with invalid offset %d\n", 1768 offset_read); 1769 ret = EINVAL; 1770 goto exit; 1771 } 1772 1773 if (bytes_read > length) { 1774 device_printf(sc->sc_dev, 1775 "NVM ACCESS response with too much data " 1776 "(%d bytes requested, %d bytes received)\n", 1777 length, bytes_read); 1778 ret = EINVAL; 1779 goto exit; 1780 } 1781 1782 /* Write data to NVM */ 1783 memcpy(data + offset, resp_data, bytes_read); 1784 *len = bytes_read; 1785 1786 exit: 1787 iwm_free_resp(sc, &cmd); 1788 return ret; 1789} 1790 1791/* 1792 * Reads an NVM section completely. 1793 * NICs prior to 7000 family don't have a real NVM, but just read 1794 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited 1795 * by uCode, we need to manually check in this case that we don't 1796 * overflow and try to read more than the EEPROM size. 1797 * For 7000 family NICs, we supply the maximal size we can read, and 1798 * the uCode fills the response with as much data as we can, 1799 * without overflowing, so no check is needed. 1800 */ 1801static int 1802iwm_nvm_read_section(struct iwm_softc *sc, 1803 uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read) 1804{ 1805 uint16_t seglen, length, offset = 0; 1806 int ret; 1807 1808 /* Set nvm section read length */ 1809 length = IWM_NVM_DEFAULT_CHUNK_SIZE; 1810 1811 seglen = length; 1812 1813 /* Read the NVM until exhausted (reading less than requested) */ 1814 while (seglen == length) { 1815 /* Check no memory assumptions fail and cause an overflow */ 1816 if ((size_read + offset + length) > 1817 sc->cfg->eeprom_size) { 1818 device_printf(sc->sc_dev, 1819 "EEPROM size is too small for NVM\n"); 1820 return ENOBUFS; 1821 } 1822 1823 ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen); 1824 if (ret) { 1825 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET, 1826 "Cannot read NVM from section %d offset %d, length %d\n", 1827 section, offset, length); 1828 return ret; 1829 } 1830 offset += seglen; 1831 } 1832 1833 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET, 1834 "NVM section %d read completed\n", section); 1835 *len = offset; 1836 return 0; 1837} 1838 1839/* 1840 * BEGIN IWM_NVM_PARSE 1841 */ 1842 1843/* iwlwifi/iwl-nvm-parse.c */ 1844 1845/* NVM offsets (in words) definitions */ 1846enum iwm_nvm_offsets { 1847 /* NVM HW-Section offset (in words) definitions */ 1848 IWM_HW_ADDR = 0x15, 1849 1850/* NVM SW-Section offset (in words) definitions */ 1851 IWM_NVM_SW_SECTION = 0x1C0, 1852 IWM_NVM_VERSION = 0, 1853 IWM_RADIO_CFG = 1, 1854 IWM_SKU = 2, 1855 IWM_N_HW_ADDRS = 3, 1856 IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION, 1857 1858/* NVM calibration section offset (in words) definitions */ 1859 IWM_NVM_CALIB_SECTION = 0x2B8, 1860 IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION 1861}; 1862 1863enum iwm_8000_nvm_offsets { 1864 /* NVM HW-Section offset (in words) definitions */ 1865 IWM_HW_ADDR0_WFPM_8000 = 0x12, 1866 IWM_HW_ADDR1_WFPM_8000 = 0x16, 1867 IWM_HW_ADDR0_PCIE_8000 = 0x8A, 1868 IWM_HW_ADDR1_PCIE_8000 = 0x8E, 1869 IWM_MAC_ADDRESS_OVERRIDE_8000 = 1, 1870 1871 /* NVM SW-Section offset (in words) definitions */ 1872 IWM_NVM_SW_SECTION_8000 = 0x1C0, 1873 IWM_NVM_VERSION_8000 = 0, 1874 IWM_RADIO_CFG_8000 = 0, 1875 IWM_SKU_8000 = 2, 1876 IWM_N_HW_ADDRS_8000 = 3, 1877 1878 /* NVM REGULATORY -Section offset (in words) definitions */ 1879 IWM_NVM_CHANNELS_8000 = 0, 1880 IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7, 1881 IWM_NVM_LAR_OFFSET_8000 = 0x507, 1882 IWM_NVM_LAR_ENABLED_8000 = 0x7, 1883 1884 /* NVM calibration section offset (in words) definitions */ 1885 IWM_NVM_CALIB_SECTION_8000 = 0x2B8, 1886 IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000 1887}; 1888 1889/* SKU Capabilities (actual values from NVM definition) */ 1890enum nvm_sku_bits { 1891 IWM_NVM_SKU_CAP_BAND_24GHZ = (1 << 0), 1892 IWM_NVM_SKU_CAP_BAND_52GHZ = (1 << 1), 1893 IWM_NVM_SKU_CAP_11N_ENABLE = (1 << 2), 1894 IWM_NVM_SKU_CAP_11AC_ENABLE = (1 << 3), 1895}; 1896 1897/* radio config bits (actual values from NVM definition) */ 1898#define IWM_NVM_RF_CFG_DASH_MSK(x) (x & 0x3) /* bits 0-1 */ 1899#define IWM_NVM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */ 1900#define IWM_NVM_RF_CFG_TYPE_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */ 1901#define IWM_NVM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */ 1902#define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */ 1903#define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */ 1904 1905#define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x) (x & 0xF) 1906#define IWM_NVM_RF_CFG_DASH_MSK_8000(x) ((x >> 4) & 0xF) 1907#define IWM_NVM_RF_CFG_STEP_MSK_8000(x) ((x >> 8) & 0xF) 1908#define IWM_NVM_RF_CFG_TYPE_MSK_8000(x) ((x >> 12) & 0xFFF) 1909#define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x) ((x >> 24) & 0xF) 1910#define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x) ((x >> 28) & 0xF) 1911 1912#define DEFAULT_MAX_TX_POWER 16 1913 1914/** 1915 * enum iwm_nvm_channel_flags - channel flags in NVM 1916 * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo 1917 * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel 1918 * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed 1919 * @IWM_NVM_CHANNEL_RADAR: radar detection required 1920 * XXX cannot find this (DFS) flag in iwm-nvm-parse.c 1921 * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate 1922 * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?) 1923 * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?) 1924 * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?) 1925 * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?) 1926 */ 1927enum iwm_nvm_channel_flags { 1928 IWM_NVM_CHANNEL_VALID = (1 << 0), 1929 IWM_NVM_CHANNEL_IBSS = (1 << 1), 1930 IWM_NVM_CHANNEL_ACTIVE = (1 << 3), 1931 IWM_NVM_CHANNEL_RADAR = (1 << 4), 1932 IWM_NVM_CHANNEL_DFS = (1 << 7), 1933 IWM_NVM_CHANNEL_WIDE = (1 << 8), 1934 IWM_NVM_CHANNEL_40MHZ = (1 << 9), 1935 IWM_NVM_CHANNEL_80MHZ = (1 << 10), 1936 IWM_NVM_CHANNEL_160MHZ = (1 << 11), 1937}; 1938 1939/* 1940 * Translate EEPROM flags to net80211. 1941 */ 1942static uint32_t 1943iwm_eeprom_channel_flags(uint16_t ch_flags) 1944{ 1945 uint32_t nflags; 1946 1947 nflags = 0; 1948 if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0) 1949 nflags |= IEEE80211_CHAN_PASSIVE; 1950 if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0) 1951 nflags |= IEEE80211_CHAN_NOADHOC; 1952 if (ch_flags & IWM_NVM_CHANNEL_RADAR) { 1953 nflags |= IEEE80211_CHAN_DFS; 1954 /* Just in case. */ 1955 nflags |= IEEE80211_CHAN_NOADHOC; 1956 } 1957 1958 return (nflags); 1959} 1960 1961static void 1962iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[], 1963 int maxchans, int *nchans, int ch_idx, size_t ch_num, 1964 const uint8_t bands[]) 1965{ 1966 const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags; 1967 uint32_t nflags; 1968 uint16_t ch_flags; 1969 uint8_t ieee; 1970 int error; 1971 1972 for (; ch_idx < ch_num; ch_idx++) { 1973 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx); 1974 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) 1975 ieee = iwm_nvm_channels[ch_idx]; 1976 else 1977 ieee = iwm_nvm_channels_8000[ch_idx]; 1978 1979 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) { 1980 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, 1981 "Ch. %d Flags %x [%sGHz] - No traffic\n", 1982 ieee, ch_flags, 1983 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ? 1984 "5.2" : "2.4"); 1985 continue; 1986 } 1987 1988 nflags = iwm_eeprom_channel_flags(ch_flags); 1989 error = ieee80211_add_channel(chans, maxchans, nchans, 1990 ieee, 0, 0, nflags, bands); 1991 if (error != 0) 1992 break; 1993 1994 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, 1995 "Ch. %d Flags %x [%sGHz] - Added\n", 1996 ieee, ch_flags, 1997 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ? 1998 "5.2" : "2.4"); 1999 } 2000} 2001 2002static void 2003iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans, 2004 struct ieee80211_channel chans[]) 2005{ 2006 struct iwm_softc *sc = ic->ic_softc; 2007 struct iwm_nvm_data *data = sc->nvm_data; 2008 uint8_t bands[IEEE80211_MODE_BYTES]; 2009 size_t ch_num; 2010 2011 memset(bands, 0, sizeof(bands)); 2012 /* 1-13: 11b/g channels. */ 2013 setbit(bands, IEEE80211_MODE_11B); 2014 setbit(bands, IEEE80211_MODE_11G); 2015 iwm_add_channel_band(sc, chans, maxchans, nchans, 0, 2016 IWM_NUM_2GHZ_CHANNELS - 1, bands); 2017 2018 /* 14: 11b channel only. */ 2019 clrbit(bands, IEEE80211_MODE_11G); 2020 iwm_add_channel_band(sc, chans, maxchans, nchans, 2021 IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands); 2022 2023 if (data->sku_cap_band_52GHz_enable) { 2024 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) 2025 ch_num = nitems(iwm_nvm_channels); 2026 else 2027 ch_num = nitems(iwm_nvm_channels_8000); 2028 memset(bands, 0, sizeof(bands)); 2029 setbit(bands, IEEE80211_MODE_11A); 2030 iwm_add_channel_band(sc, chans, maxchans, nchans, 2031 IWM_NUM_2GHZ_CHANNELS, ch_num, bands); 2032 } 2033} 2034 2035static void 2036iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data, 2037 const uint16_t *mac_override, const uint16_t *nvm_hw) 2038{ 2039 const uint8_t *hw_addr; 2040 2041 if (mac_override) { 2042 static const uint8_t reserved_mac[] = { 2043 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00 2044 }; 2045 2046 hw_addr = (const uint8_t *)(mac_override + 2047 IWM_MAC_ADDRESS_OVERRIDE_8000); 2048 2049 /* 2050 * Store the MAC address from MAO section. 2051 * No byte swapping is required in MAO section 2052 */ 2053 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr); 2054 2055 /* 2056 * Force the use of the OTP MAC address in case of reserved MAC 2057 * address in the NVM, or if address is given but invalid. 2058 */ 2059 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) && 2060 !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) && 2061 iwm_is_valid_ether_addr(data->hw_addr) && 2062 !IEEE80211_IS_MULTICAST(data->hw_addr)) 2063 return; 2064 2065 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 2066 "%s: mac address from nvm override section invalid\n", 2067 __func__); 2068 } 2069 2070 if (nvm_hw) { 2071 /* read the mac address from WFMP registers */ 2072 uint32_t mac_addr0 = 2073 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0)); 2074 uint32_t mac_addr1 = 2075 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1)); 2076 2077 hw_addr = (const uint8_t *)&mac_addr0; 2078 data->hw_addr[0] = hw_addr[3]; 2079 data->hw_addr[1] = hw_addr[2]; 2080 data->hw_addr[2] = hw_addr[1]; 2081 data->hw_addr[3] = hw_addr[0]; 2082 2083 hw_addr = (const uint8_t *)&mac_addr1; 2084 data->hw_addr[4] = hw_addr[1]; 2085 data->hw_addr[5] = hw_addr[0]; 2086 2087 return; 2088 } 2089 2090 device_printf(sc->sc_dev, "%s: mac address not found\n", __func__); 2091 memset(data->hw_addr, 0, sizeof(data->hw_addr)); 2092} 2093 2094static int 2095iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw, 2096 const uint16_t *phy_sku) 2097{ 2098 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) 2099 return le16_to_cpup(nvm_sw + IWM_SKU); 2100 2101 return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000)); 2102} 2103 2104static int 2105iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw) 2106{ 2107 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) 2108 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION); 2109 else 2110 return le32_to_cpup((const uint32_t *)(nvm_sw + 2111 IWM_NVM_VERSION_8000)); 2112} 2113 2114static int 2115iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw, 2116 const uint16_t *phy_sku) 2117{ 2118 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) 2119 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG); 2120 2121 return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000)); 2122} 2123 2124static int 2125iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw) 2126{ 2127 int n_hw_addr; 2128 2129 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) 2130 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS); 2131 2132 n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000)); 2133 2134 return n_hw_addr & IWM_N_HW_ADDR_MASK; 2135} 2136 2137static void 2138iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data, 2139 uint32_t radio_cfg) 2140{ 2141 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) { 2142 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg); 2143 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg); 2144 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg); 2145 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg); 2146 return; 2147 } 2148 2149 /* set the radio configuration for family 8000 */ 2150 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg); 2151 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg); 2152 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg); 2153 data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg); 2154 data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg); 2155 data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg); 2156} 2157 2158static int 2159iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data, 2160 const uint16_t *nvm_hw, const uint16_t *mac_override) 2161{ 2162#ifdef notyet /* for FAMILY 9000 */ 2163 if (cfg->mac_addr_from_csr) { 2164 iwm_set_hw_address_from_csr(sc, data); 2165 } else 2166#endif 2167 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) { 2168 const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR); 2169 2170 /* The byte order is little endian 16 bit, meaning 214365 */ 2171 data->hw_addr[0] = hw_addr[1]; 2172 data->hw_addr[1] = hw_addr[0]; 2173 data->hw_addr[2] = hw_addr[3]; 2174 data->hw_addr[3] = hw_addr[2]; 2175 data->hw_addr[4] = hw_addr[5]; 2176 data->hw_addr[5] = hw_addr[4]; 2177 } else { 2178 iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw); 2179 } 2180 2181 if (!iwm_is_valid_ether_addr(data->hw_addr)) { 2182 device_printf(sc->sc_dev, "no valid mac address was found\n"); 2183 return EINVAL; 2184 } 2185 2186 return 0; 2187} 2188 2189static struct iwm_nvm_data * 2190iwm_parse_nvm_data(struct iwm_softc *sc, 2191 const uint16_t *nvm_hw, const uint16_t *nvm_sw, 2192 const uint16_t *nvm_calib, const uint16_t *mac_override, 2193 const uint16_t *phy_sku, const uint16_t *regulatory) 2194{ 2195 struct iwm_nvm_data *data; 2196 uint32_t sku, radio_cfg; 2197 uint16_t lar_config; 2198 2199 if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) { 2200 data = malloc(sizeof(*data) + 2201 IWM_NUM_CHANNELS * sizeof(uint16_t), 2202 M_DEVBUF, M_NOWAIT | M_ZERO); 2203 } else { 2204 data = malloc(sizeof(*data) + 2205 IWM_NUM_CHANNELS_8000 * sizeof(uint16_t), 2206 M_DEVBUF, M_NOWAIT | M_ZERO); 2207 } 2208 if (!data) 2209 return NULL; 2210 2211 data->nvm_version = iwm_get_nvm_version(sc, nvm_sw); 2212 2213 radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku); 2214 iwm_set_radio_cfg(sc, data, radio_cfg); 2215 2216 sku = iwm_get_sku(sc, nvm_sw, phy_sku); 2217 data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ; 2218 data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ; 2219 data->sku_cap_11n_enable = 0; 2220 2221 data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw); 2222 2223 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) { 2224 uint16_t lar_offset = data->nvm_version < 0xE39 ? 2225 IWM_NVM_LAR_OFFSET_8000_OLD : 2226 IWM_NVM_LAR_OFFSET_8000; 2227 2228 lar_config = le16_to_cpup(regulatory + lar_offset); 2229 data->lar_enabled = !!(lar_config & 2230 IWM_NVM_LAR_ENABLED_8000); 2231 } 2232 2233 /* If no valid mac address was found - bail out */ 2234 if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) { 2235 free(data, M_DEVBUF); 2236 return NULL; 2237 } 2238 2239 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) { 2240 memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS], 2241 IWM_NUM_CHANNELS * sizeof(uint16_t)); 2242 } else { 2243 memcpy(data->nvm_ch_flags, ®ulatory[IWM_NVM_CHANNELS_8000], 2244 IWM_NUM_CHANNELS_8000 * sizeof(uint16_t)); 2245 } 2246 2247 return data; 2248} 2249 2250static void 2251iwm_free_nvm_data(struct iwm_nvm_data *data) 2252{ 2253 if (data != NULL) 2254 free(data, M_DEVBUF); 2255} 2256 2257static struct iwm_nvm_data * 2258iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections) 2259{ 2260 const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku; 2261 2262 /* Checking for required sections */ 2263 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) { 2264 if (!sections[IWM_NVM_SECTION_TYPE_SW].data || 2265 !sections[sc->cfg->nvm_hw_section_num].data) { 2266 device_printf(sc->sc_dev, 2267 "Can't parse empty OTP/NVM sections\n"); 2268 return NULL; 2269 } 2270 } else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) { 2271 /* SW and REGULATORY sections are mandatory */ 2272 if (!sections[IWM_NVM_SECTION_TYPE_SW].data || 2273 !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) { 2274 device_printf(sc->sc_dev, 2275 "Can't parse empty OTP/NVM sections\n"); 2276 return NULL; 2277 } 2278 /* MAC_OVERRIDE or at least HW section must exist */ 2279 if (!sections[sc->cfg->nvm_hw_section_num].data && 2280 !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) { 2281 device_printf(sc->sc_dev, 2282 "Can't parse mac_address, empty sections\n"); 2283 return NULL; 2284 } 2285 2286 /* PHY_SKU section is mandatory in B0 */ 2287 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) { 2288 device_printf(sc->sc_dev, 2289 "Can't parse phy_sku in B0, empty sections\n"); 2290 return NULL; 2291 } 2292 } else { 2293 panic("unknown device family %d\n", sc->cfg->device_family); 2294 } 2295 2296 hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data; 2297 sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data; 2298 calib = (const uint16_t *) 2299 sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data; 2300 regulatory = (const uint16_t *) 2301 sections[IWM_NVM_SECTION_TYPE_REGULATORY].data; 2302 mac_override = (const uint16_t *) 2303 sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data; 2304 phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data; 2305 2306 return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override, 2307 phy_sku, regulatory); 2308} 2309 2310static int 2311iwm_nvm_init(struct iwm_softc *sc) 2312{ 2313 struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS]; 2314 int i, ret, section; 2315 uint32_t size_read = 0; 2316 uint8_t *nvm_buffer, *temp; 2317 uint16_t len; 2318 2319 memset(nvm_sections, 0, sizeof(nvm_sections)); 2320 2321 if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS) 2322 return EINVAL; 2323 2324 /* load NVM values from nic */ 2325 /* Read From FW NVM */ 2326 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n"); 2327 2328 nvm_buffer = malloc(sc->cfg->eeprom_size, M_DEVBUF, M_NOWAIT | M_ZERO); 2329 if (!nvm_buffer) 2330 return ENOMEM; 2331 for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) { 2332 /* we override the constness for initial read */ 2333 ret = iwm_nvm_read_section(sc, section, nvm_buffer, 2334 &len, size_read); 2335 if (ret) 2336 continue; 2337 size_read += len; 2338 temp = malloc(len, M_DEVBUF, M_NOWAIT); 2339 if (!temp) { 2340 ret = ENOMEM; 2341 break; 2342 } 2343 memcpy(temp, nvm_buffer, len); 2344 2345 nvm_sections[section].data = temp; 2346 nvm_sections[section].length = len; 2347 } 2348 if (!size_read) 2349 device_printf(sc->sc_dev, "OTP is blank\n"); 2350 free(nvm_buffer, M_DEVBUF); 2351 2352 sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections); 2353 if (!sc->nvm_data) 2354 return EINVAL; 2355 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET, 2356 "nvm version = %x\n", sc->nvm_data->nvm_version); 2357 2358 for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) { 2359 if (nvm_sections[i].data != NULL) 2360 free(nvm_sections[i].data, M_DEVBUF); 2361 } 2362 2363 return 0; 2364} 2365 2366static int 2367iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num, 2368 const struct iwm_fw_desc *section) 2369{ 2370 struct iwm_dma_info *dma = &sc->fw_dma; 2371 uint8_t *v_addr; 2372 bus_addr_t p_addr; 2373 uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len); 2374 int ret = 0; 2375 2376 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 2377 "%s: [%d] uCode section being loaded...\n", 2378 __func__, section_num); 2379 2380 v_addr = dma->vaddr; 2381 p_addr = dma->paddr; 2382 2383 for (offset = 0; offset < section->len; offset += chunk_sz) { 2384 uint32_t copy_size, dst_addr; 2385 int extended_addr = FALSE; 2386 2387 copy_size = MIN(chunk_sz, section->len - offset); 2388 dst_addr = section->offset + offset; 2389 2390 if (dst_addr >= IWM_FW_MEM_EXTENDED_START && 2391 dst_addr <= IWM_FW_MEM_EXTENDED_END) 2392 extended_addr = TRUE; 2393 2394 if (extended_addr) 2395 iwm_set_bits_prph(sc, IWM_LMPM_CHICK, 2396 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE); 2397 2398 memcpy(v_addr, (const uint8_t *)section->data + offset, 2399 copy_size); 2400 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); 2401 ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr, 2402 copy_size); 2403 2404 if (extended_addr) 2405 iwm_clear_bits_prph(sc, IWM_LMPM_CHICK, 2406 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE); 2407 2408 if (ret) { 2409 device_printf(sc->sc_dev, 2410 "%s: Could not load the [%d] uCode section\n", 2411 __func__, section_num); 2412 break; 2413 } 2414 } 2415 2416 return ret; 2417} 2418 2419/* 2420 * ucode 2421 */ 2422static int 2423iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr, 2424 bus_addr_t phy_addr, uint32_t byte_cnt) 2425{ 2426 int ret; 2427 2428 sc->sc_fw_chunk_done = 0; 2429 2430 if (!iwm_nic_lock(sc)) 2431 return EBUSY; 2432 2433 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL), 2434 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE); 2435 2436 IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL), 2437 dst_addr); 2438 2439 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL), 2440 phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK); 2441 2442 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL), 2443 (iwm_get_dma_hi_addr(phy_addr) 2444 << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt); 2445 2446 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL), 2447 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM | 2448 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX | 2449 IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID); 2450 2451 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL), 2452 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 2453 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE | 2454 IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); 2455 2456 iwm_nic_unlock(sc); 2457 2458 /* wait up to 5s for this segment to load */ 2459 ret = 0; 2460 while (!sc->sc_fw_chunk_done) { 2461 ret = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz); 2462 if (ret) 2463 break; 2464 } 2465 2466 if (ret != 0) { 2467 device_printf(sc->sc_dev, 2468 "fw chunk addr 0x%x len %d failed to load\n", 2469 dst_addr, byte_cnt); 2470 return ETIMEDOUT; 2471 } 2472 2473 return 0; 2474} 2475 2476static int 2477iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc, 2478 const struct iwm_fw_sects *image, int cpu, int *first_ucode_section) 2479{ 2480 int shift_param; 2481 int i, ret = 0, sec_num = 0x1; 2482 uint32_t val, last_read_idx = 0; 2483 2484 if (cpu == 1) { 2485 shift_param = 0; 2486 *first_ucode_section = 0; 2487 } else { 2488 shift_param = 16; 2489 (*first_ucode_section)++; 2490 } 2491 2492 for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) { 2493 last_read_idx = i; 2494 2495 /* 2496 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between 2497 * CPU1 to CPU2. 2498 * PAGING_SEPARATOR_SECTION delimiter - separate between 2499 * CPU2 non paged to CPU2 paging sec. 2500 */ 2501 if (!image->fw_sect[i].data || 2502 image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION || 2503 image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) { 2504 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 2505 "Break since Data not valid or Empty section, sec = %d\n", 2506 i); 2507 break; 2508 } 2509 ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]); 2510 if (ret) 2511 return ret; 2512 2513 /* Notify the ucode of the loaded section number and status */ 2514 if (iwm_nic_lock(sc)) { 2515 val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS); 2516 val = val | (sec_num << shift_param); 2517 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val); 2518 sec_num = (sec_num << 1) | 0x1; 2519 iwm_nic_unlock(sc); 2520 } 2521 } 2522 2523 *first_ucode_section = last_read_idx; 2524 2525 iwm_enable_interrupts(sc); 2526 2527 if (iwm_nic_lock(sc)) { 2528 if (cpu == 1) 2529 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF); 2530 else 2531 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF); 2532 iwm_nic_unlock(sc); 2533 } 2534 2535 return 0; 2536} 2537 2538static int 2539iwm_pcie_load_cpu_sections(struct iwm_softc *sc, 2540 const struct iwm_fw_sects *image, int cpu, int *first_ucode_section) 2541{ 2542 int shift_param; 2543 int i, ret = 0; 2544 uint32_t last_read_idx = 0; 2545 2546 if (cpu == 1) { 2547 shift_param = 0; 2548 *first_ucode_section = 0; 2549 } else { 2550 shift_param = 16; 2551 (*first_ucode_section)++; 2552 } 2553 2554 for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) { 2555 last_read_idx = i; 2556 2557 /* 2558 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between 2559 * CPU1 to CPU2. 2560 * PAGING_SEPARATOR_SECTION delimiter - separate between 2561 * CPU2 non paged to CPU2 paging sec. 2562 */ 2563 if (!image->fw_sect[i].data || 2564 image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION || 2565 image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) { 2566 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 2567 "Break since Data not valid or Empty section, sec = %d\n", 2568 i); 2569 break; 2570 } 2571 2572 ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]); 2573 if (ret) 2574 return ret; 2575 } 2576 2577 *first_ucode_section = last_read_idx; 2578 2579 return 0; 2580 2581} 2582 2583static int 2584iwm_pcie_load_given_ucode(struct iwm_softc *sc, 2585 const struct iwm_fw_sects *image) 2586{ 2587 int ret = 0; 2588 int first_ucode_section; 2589 2590 IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n", 2591 image->is_dual_cpus ? "Dual" : "Single"); 2592 2593 /* load to FW the binary non secured sections of CPU1 */ 2594 ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section); 2595 if (ret) 2596 return ret; 2597 2598 if (image->is_dual_cpus) { 2599 /* set CPU2 header address */ 2600 if (iwm_nic_lock(sc)) { 2601 iwm_write_prph(sc, 2602 IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR, 2603 IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE); 2604 iwm_nic_unlock(sc); 2605 } 2606 2607 /* load to FW the binary sections of CPU2 */ 2608 ret = iwm_pcie_load_cpu_sections(sc, image, 2, 2609 &first_ucode_section); 2610 if (ret) 2611 return ret; 2612 } 2613 2614 iwm_enable_interrupts(sc); 2615 2616 /* release CPU reset */ 2617 IWM_WRITE(sc, IWM_CSR_RESET, 0); 2618 2619 return 0; 2620} 2621 2622int 2623iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc, 2624 const struct iwm_fw_sects *image) 2625{ 2626 int ret = 0; 2627 int first_ucode_section; 2628 2629 IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n", 2630 image->is_dual_cpus ? "Dual" : "Single"); 2631 2632 /* configure the ucode to be ready to get the secured image */ 2633 /* release CPU reset */ 2634 if (iwm_nic_lock(sc)) { 2635 iwm_write_prph(sc, IWM_RELEASE_CPU_RESET, 2636 IWM_RELEASE_CPU_RESET_BIT); 2637 iwm_nic_unlock(sc); 2638 } 2639 2640 /* load to FW the binary Secured sections of CPU1 */ 2641 ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1, 2642 &first_ucode_section); 2643 if (ret) 2644 return ret; 2645 2646 /* load to FW the binary sections of CPU2 */ 2647 return iwm_pcie_load_cpu_sections_8000(sc, image, 2, 2648 &first_ucode_section); 2649} 2650 2651/* XXX Get rid of this definition */ 2652static inline void 2653iwm_enable_fw_load_int(struct iwm_softc *sc) 2654{ 2655 IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n"); 2656 sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX; 2657 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask); 2658} 2659 2660/* XXX Add proper rfkill support code */ 2661static int 2662iwm_start_fw(struct iwm_softc *sc, 2663 const struct iwm_fw_sects *fw) 2664{ 2665 int ret; 2666 2667 /* This may fail if AMT took ownership of the device */ 2668 if (iwm_prepare_card_hw(sc)) { 2669 device_printf(sc->sc_dev, 2670 "%s: Exit HW not ready\n", __func__); 2671 ret = EIO; 2672 goto out; 2673 } 2674 2675 IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF); 2676 2677 iwm_disable_interrupts(sc); 2678 2679 /* make sure rfkill handshake bits are cleared */ 2680 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL); 2681 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, 2682 IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 2683 2684 /* clear (again), then enable host interrupts */ 2685 IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF); 2686 2687 ret = iwm_nic_init(sc); 2688 if (ret) { 2689 device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__); 2690 goto out; 2691 } 2692 2693 /* 2694 * Now, we load the firmware and don't want to be interrupted, even 2695 * by the RF-Kill interrupt (hence mask all the interrupt besides the 2696 * FH_TX interrupt which is needed to load the firmware). If the 2697 * RF-Kill switch is toggled, we will find out after having loaded 2698 * the firmware and return the proper value to the caller. 2699 */ 2700 iwm_enable_fw_load_int(sc); 2701 2702 /* really make sure rfkill handshake bits are cleared */ 2703 /* maybe we should write a few times more? just to make sure */ 2704 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL); 2705 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL); 2706 2707 /* Load the given image to the HW */ 2708 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) 2709 ret = iwm_pcie_load_given_ucode_8000(sc, fw); 2710 else 2711 ret = iwm_pcie_load_given_ucode(sc, fw); 2712 2713 /* XXX re-check RF-Kill state */ 2714 2715out: 2716 return ret; 2717} 2718 2719static int 2720iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant) 2721{ 2722 struct iwm_tx_ant_cfg_cmd tx_ant_cmd = { 2723 .valid = htole32(valid_tx_ant), 2724 }; 2725 2726 return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD, 2727 IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd); 2728} 2729 2730/* iwlwifi: mvm/fw.c */ 2731static int 2732iwm_send_phy_cfg_cmd(struct iwm_softc *sc) 2733{ 2734 struct iwm_phy_cfg_cmd phy_cfg_cmd; 2735 enum iwm_ucode_type ucode_type = sc->cur_ucode; 2736 2737 /* Set parameters */ 2738 phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc)); 2739 phy_cfg_cmd.calib_control.event_trigger = 2740 sc->sc_default_calib[ucode_type].event_trigger; 2741 phy_cfg_cmd.calib_control.flow_trigger = 2742 sc->sc_default_calib[ucode_type].flow_trigger; 2743 2744 IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET, 2745 "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg); 2746 return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC, 2747 sizeof(phy_cfg_cmd), &phy_cfg_cmd); 2748} 2749 2750static int 2751iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data) 2752{ 2753 struct iwm_mvm_alive_data *alive_data = data; 2754 struct iwm_mvm_alive_resp_ver1 *palive1; 2755 struct iwm_mvm_alive_resp_ver2 *palive2; 2756 struct iwm_mvm_alive_resp *palive; 2757 2758 if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive1)) { 2759 palive1 = (void *)pkt->data; 2760 2761 sc->support_umac_log = FALSE; 2762 sc->error_event_table = 2763 le32toh(palive1->error_event_table_ptr); 2764 sc->log_event_table = 2765 le32toh(palive1->log_event_table_ptr); 2766 alive_data->scd_base_addr = le32toh(palive1->scd_base_ptr); 2767 2768 alive_data->valid = le16toh(palive1->status) == 2769 IWM_ALIVE_STATUS_OK; 2770 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 2771 "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n", 2772 le16toh(palive1->status), palive1->ver_type, 2773 palive1->ver_subtype, palive1->flags); 2774 } else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive2)) { 2775 palive2 = (void *)pkt->data; 2776 sc->error_event_table = 2777 le32toh(palive2->error_event_table_ptr); 2778 sc->log_event_table = 2779 le32toh(palive2->log_event_table_ptr); 2780 alive_data->scd_base_addr = le32toh(palive2->scd_base_ptr); 2781 sc->umac_error_event_table = 2782 le32toh(palive2->error_info_addr); 2783 2784 alive_data->valid = le16toh(palive2->status) == 2785 IWM_ALIVE_STATUS_OK; 2786 if (sc->umac_error_event_table) 2787 sc->support_umac_log = TRUE; 2788 2789 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 2790 "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n", 2791 le16toh(palive2->status), palive2->ver_type, 2792 palive2->ver_subtype, palive2->flags); 2793 2794 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 2795 "UMAC version: Major - 0x%x, Minor - 0x%x\n", 2796 palive2->umac_major, palive2->umac_minor); 2797 } else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) { 2798 palive = (void *)pkt->data; 2799 2800 sc->error_event_table = 2801 le32toh(palive->error_event_table_ptr); 2802 sc->log_event_table = 2803 le32toh(palive->log_event_table_ptr); 2804 alive_data->scd_base_addr = le32toh(palive->scd_base_ptr); 2805 sc->umac_error_event_table = 2806 le32toh(palive->error_info_addr); 2807 2808 alive_data->valid = le16toh(palive->status) == 2809 IWM_ALIVE_STATUS_OK; 2810 if (sc->umac_error_event_table) 2811 sc->support_umac_log = TRUE; 2812 2813 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 2814 "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n", 2815 le16toh(palive->status), palive->ver_type, 2816 palive->ver_subtype, palive->flags); 2817 2818 IWM_DPRINTF(sc, IWM_DEBUG_RESET, 2819 "UMAC version: Major - 0x%x, Minor - 0x%x\n", 2820 le32toh(palive->umac_major), 2821 le32toh(palive->umac_minor)); 2822 } 2823 2824 return TRUE; 2825} 2826 2827static int 2828iwm_wait_phy_db_entry(struct iwm_softc *sc, 2829 struct iwm_rx_packet *pkt, void *data) 2830{ 2831 struct iwm_phy_db *phy_db = data; 2832 2833 if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) { 2834 if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) { 2835 device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n", 2836 __func__, pkt->hdr.code); 2837 } 2838 return TRUE; 2839 } 2840 2841 if (iwm_phy_db_set_section(phy_db, pkt)) { 2842 device_printf(sc->sc_dev, 2843 "%s: iwm_phy_db_set_section failed\n", __func__); 2844 } 2845 2846 return FALSE; 2847} 2848 2849static int 2850iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc, 2851 enum iwm_ucode_type ucode_type) 2852{ 2853 struct iwm_notification_wait alive_wait; 2854 struct iwm_mvm_alive_data alive_data; 2855 const struct iwm_fw_sects *fw; 2856 enum iwm_ucode_type old_type = sc->cur_ucode; 2857 int error; 2858 static const uint16_t alive_cmd[] = { IWM_MVM_ALIVE }; 2859 2860 if ((error = iwm_read_firmware(sc, ucode_type)) != 0) { 2861 device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n", 2862 error); 2863 return error; 2864 } 2865 fw = &sc->sc_fw.fw_sects[ucode_type]; 2866 sc->cur_ucode = ucode_type; 2867 sc->ucode_loaded = FALSE; 2868 2869 memset(&alive_data, 0, sizeof(alive_data)); 2870 iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait, 2871 alive_cmd, nitems(alive_cmd), 2872 iwm_alive_fn, &alive_data); 2873 2874 error = iwm_start_fw(sc, fw); 2875 if (error) { 2876 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error); 2877 sc->cur_ucode = old_type; 2878 iwm_remove_notification(sc->sc_notif_wait, &alive_wait); 2879 return error; 2880 } 2881 2882 /* 2883 * Some things may run in the background now, but we 2884 * just wait for the ALIVE notification here. 2885 */ 2886 IWM_UNLOCK(sc); 2887 error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait, 2888 IWM_MVM_UCODE_ALIVE_TIMEOUT); 2889 IWM_LOCK(sc); 2890 if (error) { 2891 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) { 2892 uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a; 2893 if (iwm_nic_lock(sc)) { 2894 a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS); 2895 b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS); 2896 iwm_nic_unlock(sc); 2897 } 2898 device_printf(sc->sc_dev, 2899 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", 2900 a, b); 2901 } 2902 sc->cur_ucode = old_type; 2903 return error; 2904 } 2905 2906 if (!alive_data.valid) { 2907 device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n", 2908 __func__); 2909 sc->cur_ucode = old_type; 2910 return EIO; 2911 } 2912 2913 iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr); 2914 2915 /* 2916 * configure and operate fw paging mechanism. 2917 * driver configures the paging flow only once, CPU2 paging image 2918 * included in the IWM_UCODE_INIT image. 2919 */ 2920 if (fw->paging_mem_size) { 2921 error = iwm_save_fw_paging(sc, fw); 2922 if (error) { 2923 device_printf(sc->sc_dev, 2924 "%s: failed to save the FW paging image\n", 2925 __func__); 2926 return error; 2927 } 2928 2929 error = iwm_send_paging_cmd(sc, fw); 2930 if (error) { 2931 device_printf(sc->sc_dev, 2932 "%s: failed to send the paging cmd\n", __func__); 2933 iwm_free_fw_paging(sc); 2934 return error; 2935 } 2936 } 2937 2938 if (!error) 2939 sc->ucode_loaded = TRUE; 2940 return error; 2941} 2942 2943/* 2944 * mvm misc bits 2945 */ 2946 2947/* 2948 * follows iwlwifi/fw.c 2949 */ 2950static int 2951iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm) 2952{ 2953 struct iwm_notification_wait calib_wait; 2954 static const uint16_t init_complete[] = { 2955 IWM_INIT_COMPLETE_NOTIF, 2956 IWM_CALIB_RES_NOTIF_PHY_DB 2957 }; 2958 int ret; 2959 2960 /* do not operate with rfkill switch turned on */ 2961 if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) { 2962 device_printf(sc->sc_dev, 2963 "radio is disabled by hardware switch\n"); 2964 return EPERM; 2965 } 2966 2967 iwm_init_notification_wait(sc->sc_notif_wait, 2968 &calib_wait, 2969 init_complete, 2970 nitems(init_complete), 2971 iwm_wait_phy_db_entry, 2972 sc->sc_phy_db); 2973 2974 /* Will also start the device */ 2975 ret = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_INIT); 2976 if (ret) { 2977 device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n", 2978 ret); 2979 goto error; 2980 } 2981 2982 if (justnvm) { 2983 /* Read nvm */ 2984 ret = iwm_nvm_init(sc); 2985 if (ret) { 2986 device_printf(sc->sc_dev, "failed to read nvm\n"); 2987 goto error; 2988 } 2989 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr); 2990 goto error; 2991 } 2992 2993 ret = iwm_send_bt_init_conf(sc); 2994 if (ret) { 2995 device_printf(sc->sc_dev, 2996 "failed to send bt coex configuration: %d\n", ret); 2997 goto error; 2998 } 2999 3000 /* Init Smart FIFO. */ 3001 ret = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF); 3002 if (ret) 3003 goto error; 3004 3005 /* Send TX valid antennas before triggering calibrations */ 3006 ret = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc)); 3007 if (ret) { 3008 device_printf(sc->sc_dev, 3009 "failed to send antennas before calibration: %d\n", ret); 3010 goto error; 3011 } 3012 3013 /* 3014 * Send phy configurations command to init uCode 3015 * to start the 16.0 uCode init image internal calibrations. 3016 */ 3017 ret = iwm_send_phy_cfg_cmd(sc); 3018 if (ret) { 3019 device_printf(sc->sc_dev, 3020 "%s: Failed to run INIT calibrations: %d\n", 3021 __func__, ret); 3022 goto error; 3023 } 3024 3025 /* 3026 * Nothing to do but wait for the init complete notification 3027 * from the firmware. 3028 */ 3029 IWM_UNLOCK(sc); 3030 ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait, 3031 IWM_MVM_UCODE_CALIB_TIMEOUT); 3032 IWM_LOCK(sc); 3033 3034 3035 goto out; 3036 3037error: 3038 iwm_remove_notification(sc->sc_notif_wait, &calib_wait); 3039out: 3040 return ret; 3041} 3042 3043/* 3044 * receive side 3045 */ 3046 3047/* (re)stock rx ring, called at init-time and at runtime */ 3048static int 3049iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx) 3050{ 3051 struct iwm_rx_ring *ring = &sc->rxq; 3052 struct iwm_rx_data *data = &ring->data[idx]; 3053 struct mbuf *m; 3054 bus_dmamap_t dmamap; 3055 bus_dma_segment_t seg; 3056 int nsegs, error; 3057 3058 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE); 3059 if (m == NULL) 3060 return ENOBUFS; 3061 3062 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 3063 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m, 3064 &seg, &nsegs, BUS_DMA_NOWAIT); 3065 if (error != 0) { 3066 device_printf(sc->sc_dev, 3067 "%s: can't map mbuf, error %d\n", __func__, error); 3068 m_freem(m); 3069 return error; 3070 } 3071 3072 if (data->m != NULL) 3073 bus_dmamap_unload(ring->data_dmat, data->map); 3074 3075 /* Swap ring->spare_map with data->map */ 3076 dmamap = data->map; 3077 data->map = ring->spare_map; 3078 ring->spare_map = dmamap; 3079 3080 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD); 3081 data->m = m; 3082 3083 /* Update RX descriptor. */ 3084 KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned")); 3085 ring->desc[idx] = htole32(seg.ds_addr >> 8); 3086 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 3087 BUS_DMASYNC_PREWRITE); 3088 3089 return 0; 3090} 3091 3092/* iwlwifi: mvm/rx.c */ 3093/* 3094 * iwm_mvm_get_signal_strength - use new rx PHY INFO API 3095 * values are reported by the fw as positive values - need to negate 3096 * to obtain their dBM. Account for missing antennas by replacing 0 3097 * values by -256dBm: practically 0 power and a non-feasible 8 bit value. 3098 */ 3099static int 3100iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info) 3101{ 3102 int energy_a, energy_b, energy_c, max_energy; 3103 uint32_t val; 3104 3105 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]); 3106 energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >> 3107 IWM_RX_INFO_ENERGY_ANT_A_POS; 3108 energy_a = energy_a ? -energy_a : -256; 3109 energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >> 3110 IWM_RX_INFO_ENERGY_ANT_B_POS; 3111 energy_b = energy_b ? -energy_b : -256; 3112 energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >> 3113 IWM_RX_INFO_ENERGY_ANT_C_POS; 3114 energy_c = energy_c ? -energy_c : -256; 3115 max_energy = MAX(energy_a, energy_b); 3116 max_energy = MAX(max_energy, energy_c); 3117 3118 IWM_DPRINTF(sc, IWM_DEBUG_RECV, 3119 "energy In A %d B %d C %d , and max %d\n", 3120 energy_a, energy_b, energy_c, max_energy); 3121 3122 return max_energy; 3123} 3124 3125static void 3126iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt) 3127{ 3128 struct iwm_rx_phy_info *phy_info = (void *)pkt->data; 3129 3130 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n"); 3131 3132 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info)); 3133} 3134 3135/* 3136 * Retrieve the average noise (in dBm) among receivers. 3137 */ 3138static int 3139iwm_get_noise(struct iwm_softc *sc, 3140 const struct iwm_mvm_statistics_rx_non_phy *stats) 3141{ 3142 int i, total, nbant, noise; 3143 3144 total = nbant = noise = 0; 3145 for (i = 0; i < 3; i++) { 3146 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff; 3147 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n", 3148 __func__, 3149 i, 3150 noise); 3151 3152 if (noise) { 3153 total += noise; 3154 nbant++; 3155 } 3156 } 3157 3158 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n", 3159 __func__, nbant, total); 3160#if 0 3161 /* There should be at least one antenna but check anyway. */ 3162 return (nbant == 0) ? -127 : (total / nbant) - 107; 3163#else 3164 /* For now, just hard-code it to -96 to be safe */ 3165 return (-96); 3166#endif 3167} 3168 3169static void 3170iwm_mvm_handle_rx_statistics(struct iwm_softc *sc, struct iwm_rx_packet *pkt) 3171{ 3172 struct iwm_notif_statistics_v10 *stats = (void *)&pkt->data; 3173 3174 memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats)); 3175 sc->sc_noise = iwm_get_noise(sc, &stats->rx.general); 3176} 3177 3178/* 3179 * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler 3180 * 3181 * Handles the actual data of the Rx packet from the fw 3182 */ 3183static boolean_t 3184iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset, 3185 boolean_t stolen) 3186{ 3187 struct ieee80211com *ic = &sc->sc_ic; 3188 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3189 struct ieee80211_frame *wh; 3190 struct ieee80211_node *ni; 3191 struct ieee80211_rx_stats rxs; 3192 struct iwm_rx_phy_info *phy_info; 3193 struct iwm_rx_mpdu_res_start *rx_res; 3194 struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, offset); 3195 uint32_t len; 3196 uint32_t rx_pkt_status; 3197 int rssi; 3198 3199 phy_info = &sc->sc_last_phy_info; 3200 rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data; 3201 wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res)); 3202 len = le16toh(rx_res->byte_count); 3203 rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len)); 3204 3205 if (__predict_false(phy_info->cfg_phy_cnt > 20)) { 3206 device_printf(sc->sc_dev, 3207 "dsp size out of range [0,20]: %d\n", 3208 phy_info->cfg_phy_cnt); 3209 goto fail; 3210 } 3211 3212 if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) || 3213 !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) { 3214 IWM_DPRINTF(sc, IWM_DEBUG_RECV, 3215 "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status); 3216 goto fail; 3217 } 3218 3219 rssi = iwm_mvm_get_signal_strength(sc, phy_info); 3220 3221 /* Map it to relative value */ 3222 rssi = rssi - sc->sc_noise; 3223 3224 /* replenish ring for the buffer we're going to feed to the sharks */ 3225 if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) { 3226 device_printf(sc->sc_dev, "%s: unable to add more buffers\n", 3227 __func__); 3228 goto fail; 3229 } 3230 3231 m->m_data = pkt->data + sizeof(*rx_res); 3232 m->m_pkthdr.len = m->m_len = len; 3233 3234 IWM_DPRINTF(sc, IWM_DEBUG_RECV, 3235 "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise); 3236 3237 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); 3238 3239 IWM_DPRINTF(sc, IWM_DEBUG_RECV, 3240 "%s: phy_info: channel=%d, flags=0x%08x\n", 3241 __func__, 3242 le16toh(phy_info->channel), 3243 le16toh(phy_info->phy_flags)); 3244 3245 /* 3246 * Populate an RX state struct with the provided information. 3247 */ 3248 bzero(&rxs, sizeof(rxs)); 3249 rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ; 3250 rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI; 3251 rxs.c_ieee = le16toh(phy_info->channel); 3252 if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) { 3253 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ); 3254 } else { 3255 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ); 3256 } 3257 3258 /* rssi is in 1/2db units */ 3259 rxs.rssi = rssi * 2; 3260 rxs.nf = sc->sc_noise; 3261 3262 if (ieee80211_radiotap_active_vap(vap)) { 3263 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap; 3264 3265 tap->wr_flags = 0; 3266 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE)) 3267 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; 3268 tap->wr_chan_freq = htole16(rxs.c_freq); 3269 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */ 3270 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags); 3271 tap->wr_dbm_antsignal = (int8_t)rssi; 3272 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise; 3273 tap->wr_tsft = phy_info->system_timestamp; 3274 switch (phy_info->rate) { 3275 /* CCK rates. */ 3276 case 10: tap->wr_rate = 2; break; 3277 case 20: tap->wr_rate = 4; break; 3278 case 55: tap->wr_rate = 11; break; 3279 case 110: tap->wr_rate = 22; break; 3280 /* OFDM rates. */ 3281 case 0xd: tap->wr_rate = 12; break; 3282 case 0xf: tap->wr_rate = 18; break; 3283 case 0x5: tap->wr_rate = 24; break; 3284 case 0x7: tap->wr_rate = 36; break; 3285 case 0x9: tap->wr_rate = 48; break; 3286 case 0xb: tap->wr_rate = 72; break; 3287 case 0x1: tap->wr_rate = 96; break; 3288 case 0x3: tap->wr_rate = 108; break; 3289 /* Unknown rate: should not happen. */ 3290 default: tap->wr_rate = 0; 3291 } 3292 } 3293 3294 IWM_UNLOCK(sc); 3295 if (ni != NULL) { 3296 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m); 3297 ieee80211_input_mimo(ni, m, &rxs); 3298 ieee80211_free_node(ni); 3299 } else { 3300 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m); 3301 ieee80211_input_mimo_all(ic, m, &rxs); 3302 } 3303 IWM_LOCK(sc); 3304 3305 return TRUE; 3306 3307fail: counter_u64_add(ic->ic_ierrors, 1); 3308 return FALSE; 3309} 3310 3311static int 3312iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt, 3313 struct iwm_node *in) 3314{ 3315 struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data; 3316 struct ieee80211_node *ni = &in->in_ni; 3317 struct ieee80211vap *vap = ni->ni_vap; 3318 int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK; 3319 int failack = tx_resp->failure_frame; 3320 3321 KASSERT(tx_resp->frame_count == 1, ("too many frames")); 3322 3323 /* Update rate control statistics. */ 3324 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n", 3325 __func__, 3326 (int) le16toh(tx_resp->status.status), 3327 (int) le16toh(tx_resp->status.sequence), 3328 tx_resp->frame_count, 3329 tx_resp->bt_kill_count, 3330 tx_resp->failure_rts, 3331 tx_resp->failure_frame, 3332 le32toh(tx_resp->initial_rate), 3333 (int) le16toh(tx_resp->wireless_media_time)); 3334 3335 if (status != IWM_TX_STATUS_SUCCESS && 3336 status != IWM_TX_STATUS_DIRECT_DONE) { 3337 ieee80211_ratectl_tx_complete(vap, ni, 3338 IEEE80211_RATECTL_TX_FAILURE, &failack, NULL); 3339 return (1); 3340 } else { 3341 ieee80211_ratectl_tx_complete(vap, ni, 3342 IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL); 3343 return (0); 3344 } 3345} 3346 3347static void 3348iwm_mvm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt) 3349{ 3350 struct iwm_cmd_header *cmd_hdr = &pkt->hdr; 3351 int idx = cmd_hdr->idx; 3352 int qid = cmd_hdr->qid; 3353 struct iwm_tx_ring *ring = &sc->txq[qid]; 3354 struct iwm_tx_data *txd = &ring->data[idx]; 3355 struct iwm_node *in = txd->in; 3356 struct mbuf *m = txd->m; 3357 int status; 3358 3359 KASSERT(txd->done == 0, ("txd not done")); 3360 KASSERT(txd->in != NULL, ("txd without node")); 3361 KASSERT(txd->m != NULL, ("txd without mbuf")); 3362 3363 sc->sc_tx_timer = 0; 3364 3365 status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in); 3366 3367 /* Unmap and free mbuf. */ 3368 bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE); 3369 bus_dmamap_unload(ring->data_dmat, txd->map); 3370 3371 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, 3372 "free txd %p, in %p\n", txd, txd->in); 3373 txd->done = 1; 3374 txd->m = NULL; 3375 txd->in = NULL; 3376 3377 ieee80211_tx_complete(&in->in_ni, m, status); 3378 3379 if (--ring->queued < IWM_TX_RING_LOMARK) { 3380 sc->qfullmsk &= ~(1 << ring->qid); 3381 if (sc->qfullmsk == 0) { 3382 iwm_start(sc); 3383 } 3384 } 3385} 3386 3387/* 3388 * transmit side 3389 */ 3390 3391/* 3392 * Process a "command done" firmware notification. This is where we wakeup 3393 * processes waiting for a synchronous command completion. 3394 * from if_iwn 3395 */ 3396static void 3397iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt) 3398{ 3399 struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE]; 3400 struct iwm_tx_data *data; 3401 3402 if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) { 3403 return; /* Not a command ack. */ 3404 } 3405 3406 /* XXX wide commands? */ 3407 IWM_DPRINTF(sc, IWM_DEBUG_CMD, 3408 "cmd notification type 0x%x qid %d idx %d\n", 3409 pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx); 3410 3411 data = &ring->data[pkt->hdr.idx]; 3412 3413 /* If the command was mapped in an mbuf, free it. */ 3414 if (data->m != NULL) { 3415 bus_dmamap_sync(ring->data_dmat, data->map, 3416 BUS_DMASYNC_POSTWRITE); 3417 bus_dmamap_unload(ring->data_dmat, data->map); 3418 m_freem(data->m); 3419 data->m = NULL; 3420 } 3421 wakeup(&ring->desc[pkt->hdr.idx]); 3422 3423 if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) { 3424 device_printf(sc->sc_dev, 3425 "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n", 3426 __func__, pkt->hdr.idx, ring->queued, ring->cur); 3427 /* XXX call iwm_force_nmi() */ 3428 } 3429 3430 KASSERT(ring->queued > 0, ("ring->queued is empty?")); 3431 ring->queued--; 3432 if (ring->queued == 0) 3433 iwm_pcie_clear_cmd_in_flight(sc); 3434} 3435 3436#if 0 3437/* 3438 * necessary only for block ack mode 3439 */ 3440void 3441iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id, 3442 uint16_t len) 3443{ 3444 struct iwm_agn_scd_bc_tbl *scd_bc_tbl; 3445 uint16_t w_val; 3446 3447 scd_bc_tbl = sc->sched_dma.vaddr; 3448 3449 len += 8; /* magic numbers came naturally from paris */ 3450 len = roundup(len, 4) / 4; 3451 3452 w_val = htole16(sta_id << 12 | len); 3453 3454 /* Update TX scheduler. */ 3455 scd_bc_tbl[qid].tfd_offset[idx] = w_val; 3456 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3457 BUS_DMASYNC_PREWRITE); 3458 3459 /* I really wonder what this is ?!? */ 3460 if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) { 3461 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val; 3462 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, 3463 BUS_DMASYNC_PREWRITE); 3464 } 3465} 3466#endif 3467 3468/* 3469 * Take an 802.11 (non-n) rate, find the relevant rate 3470 * table entry. return the index into in_ridx[]. 3471 * 3472 * The caller then uses that index back into in_ridx 3473 * to figure out the rate index programmed /into/ 3474 * the firmware for this given node. 3475 */ 3476static int 3477iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in, 3478 uint8_t rate) 3479{ 3480 int i; 3481 uint8_t r; 3482 3483 for (i = 0; i < nitems(in->in_ridx); i++) { 3484 r = iwm_rates[in->in_ridx[i]].rate; 3485 if (rate == r) 3486 return (i); 3487 } 3488 3489 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE, 3490 "%s: couldn't find an entry for rate=%d\n", 3491 __func__, 3492 rate); 3493 3494 /* XXX Return the first */ 3495 /* XXX TODO: have it return the /lowest/ */ 3496 return (0); 3497} 3498 3499static int 3500iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate) 3501{ 3502 int i; 3503 3504 for (i = 0; i < nitems(iwm_rates); i++) { 3505 if (iwm_rates[i].rate == rate) 3506 return (i); 3507 } 3508 /* XXX error? */ 3509 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE, 3510 "%s: couldn't find an entry for rate=%d\n", 3511 __func__, 3512 rate); 3513 return (0); 3514} 3515 3516/* 3517 * Fill in the rate related information for a transmit command. 3518 */ 3519static const struct iwm_rate * 3520iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in, 3521 struct mbuf *m, struct iwm_tx_cmd *tx) 3522{ 3523 struct ieee80211_node *ni = &in->in_ni; 3524 struct ieee80211_frame *wh; 3525 const struct ieee80211_txparam *tp = ni->ni_txparms; 3526 const struct iwm_rate *rinfo; 3527 int type; 3528 int ridx, rate_flags; 3529 3530 wh = mtod(m, struct ieee80211_frame *); 3531 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 3532 3533 tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT; 3534 tx->data_retry_limit = IWM_DEFAULT_TX_RETRY; 3535 3536 if (type == IEEE80211_FC0_TYPE_MGT) { 3537 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate); 3538 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, 3539 "%s: MGT (%d)\n", __func__, tp->mgmtrate); 3540 } else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { 3541 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate); 3542 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, 3543 "%s: MCAST (%d)\n", __func__, tp->mcastrate); 3544 } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) { 3545 ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate); 3546 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, 3547 "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate); 3548 } else if (m->m_flags & M_EAPOL) { 3549 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate); 3550 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, 3551 "%s: EAPOL\n", __func__); 3552 } else if (type == IEEE80211_FC0_TYPE_DATA) { 3553 int i; 3554 3555 /* for data frames, use RS table */ 3556 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__); 3557 /* XXX pass pktlen */ 3558 (void) ieee80211_ratectl_rate(ni, NULL, 0); 3559 i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate); 3560 ridx = in->in_ridx[i]; 3561 3562 /* This is the index into the programmed table */ 3563 tx->initial_rate_index = i; 3564 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE); 3565 3566 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE, 3567 "%s: start with i=%d, txrate %d\n", 3568 __func__, i, iwm_rates[ridx].rate); 3569 } else { 3570 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate); 3571 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DEFAULT (%d)\n", 3572 __func__, tp->mgmtrate); 3573 } 3574 3575 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE, 3576 "%s: frame type=%d txrate %d\n", 3577 __func__, type, iwm_rates[ridx].rate); 3578 3579 rinfo = &iwm_rates[ridx]; 3580 3581 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n", 3582 __func__, ridx, 3583 rinfo->rate, 3584 !! (IWM_RIDX_IS_CCK(ridx)) 3585 ); 3586 3587 /* XXX TODO: hard-coded TX antenna? */ 3588 rate_flags = 1 << IWM_RATE_MCS_ANT_POS; 3589 if (IWM_RIDX_IS_CCK(ridx)) 3590 rate_flags |= IWM_RATE_MCS_CCK_MSK; 3591 tx->rate_n_flags = htole32(rate_flags | rinfo->plcp); 3592 3593 return rinfo; 3594} 3595 3596#define TB0_SIZE 16 3597static int 3598iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac) 3599{ 3600 struct ieee80211com *ic = &sc->sc_ic; 3601 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 3602 struct iwm_node *in = IWM_NODE(ni); 3603 struct iwm_tx_ring *ring; 3604 struct iwm_tx_data *data; 3605 struct iwm_tfd *desc; 3606 struct iwm_device_cmd *cmd; 3607 struct iwm_tx_cmd *tx; 3608 struct ieee80211_frame *wh; 3609 struct ieee80211_key *k = NULL; 3610 struct mbuf *m1; 3611 const struct iwm_rate *rinfo; 3612 uint32_t flags; 3613 u_int hdrlen; 3614 bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER]; 3615 int nsegs; 3616 uint8_t tid, type; 3617 int i, totlen, error, pad; 3618 3619 wh = mtod(m, struct ieee80211_frame *); 3620 hdrlen = ieee80211_anyhdrsize(wh); 3621 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; 3622 tid = 0; 3623 ring = &sc->txq[ac]; 3624 desc = &ring->desc[ring->cur]; 3625 memset(desc, 0, sizeof(*desc)); 3626 data = &ring->data[ring->cur]; 3627 3628 /* Fill out iwm_tx_cmd to send to the firmware */ 3629 cmd = &ring->cmd[ring->cur]; 3630 cmd->hdr.code = IWM_TX_CMD; 3631 cmd->hdr.flags = 0; 3632 cmd->hdr.qid = ring->qid; 3633 cmd->hdr.idx = ring->cur; 3634 3635 tx = (void *)cmd->data; 3636 memset(tx, 0, sizeof(*tx)); 3637 3638 rinfo = iwm_tx_fill_cmd(sc, in, m, tx); 3639 3640 /* Encrypt the frame if need be. */ 3641 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { 3642 /* Retrieve key for TX && do software encryption. */ 3643 k = ieee80211_crypto_encap(ni, m); 3644 if (k == NULL) { 3645 m_freem(m); 3646 return (ENOBUFS); 3647 } 3648 /* 802.11 header may have moved. */ 3649 wh = mtod(m, struct ieee80211_frame *); 3650 } 3651 3652 if (ieee80211_radiotap_active_vap(vap)) { 3653 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap; 3654 3655 tap->wt_flags = 0; 3656 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq); 3657 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags); 3658 tap->wt_rate = rinfo->rate; 3659 if (k != NULL) 3660 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; 3661 ieee80211_radiotap_tx(vap, m); 3662 } 3663 3664 3665 totlen = m->m_pkthdr.len; 3666 3667 flags = 0; 3668 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { 3669 flags |= IWM_TX_CMD_FLG_ACK; 3670 } 3671 3672 if (type == IEEE80211_FC0_TYPE_DATA 3673 && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) 3674 && !IEEE80211_IS_MULTICAST(wh->i_addr1)) { 3675 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE; 3676 } 3677 3678 if (IEEE80211_IS_MULTICAST(wh->i_addr1) || 3679 type != IEEE80211_FC0_TYPE_DATA) 3680 tx->sta_id = sc->sc_aux_sta.sta_id; 3681 else 3682 tx->sta_id = IWM_STATION_ID; 3683 3684 if (type == IEEE80211_FC0_TYPE_MGT) { 3685 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; 3686 3687 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || 3688 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) { 3689 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC); 3690 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) { 3691 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE); 3692 } else { 3693 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT); 3694 } 3695 } else { 3696 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE); 3697 } 3698 3699 if (hdrlen & 3) { 3700 /* First segment length must be a multiple of 4. */ 3701 flags |= IWM_TX_CMD_FLG_MH_PAD; 3702 pad = 4 - (hdrlen & 3); 3703 } else 3704 pad = 0; 3705 3706 tx->driver_txop = 0; 3707 tx->next_frame_len = 0; 3708 3709 tx->len = htole16(totlen); 3710 tx->tid_tspec = tid; 3711 tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE); 3712 3713 /* Set physical address of "scratch area". */ 3714 tx->dram_lsb_ptr = htole32(data->scratch_paddr); 3715 tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr); 3716 3717 /* Copy 802.11 header in TX command. */ 3718 memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen); 3719 3720 flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL; 3721 3722 tx->sec_ctl = 0; 3723 tx->tx_flags |= htole32(flags); 3724 3725 /* Trim 802.11 header. */ 3726 m_adj(m, hdrlen); 3727 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, 3728 segs, &nsegs, BUS_DMA_NOWAIT); 3729 if (error != 0) { 3730 if (error != EFBIG) { 3731 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n", 3732 error); 3733 m_freem(m); 3734 return error; 3735 } 3736 /* Too many DMA segments, linearize mbuf. */ 3737 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2); 3738 if (m1 == NULL) { 3739 device_printf(sc->sc_dev, 3740 "%s: could not defrag mbuf\n", __func__); 3741 m_freem(m); 3742 return (ENOBUFS); 3743 } 3744 m = m1; 3745 3746 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, 3747 segs, &nsegs, BUS_DMA_NOWAIT); 3748 if (error != 0) { 3749 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n", 3750 error); 3751 m_freem(m); 3752 return error; 3753 } 3754 } 3755 data->m = m; 3756 data->in = in; 3757 data->done = 0; 3758 3759 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, 3760 "sending txd %p, in %p\n", data, data->in); 3761 KASSERT(data->in != NULL, ("node is NULL")); 3762 3763 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, 3764 "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n", 3765 ring->qid, ring->cur, totlen, nsegs, 3766 le32toh(tx->tx_flags), 3767 le32toh(tx->rate_n_flags), 3768 tx->initial_rate_index 3769 ); 3770 3771 /* Fill TX descriptor. */ 3772 desc->num_tbs = 2 + nsegs; 3773 3774 desc->tbs[0].lo = htole32(data->cmd_paddr); 3775 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) | 3776 (TB0_SIZE << 4); 3777 desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE); 3778 desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) | 3779 ((sizeof(struct iwm_cmd_header) + sizeof(*tx) 3780 + hdrlen + pad - TB0_SIZE) << 4); 3781 3782 /* Other DMA segments are for data payload. */ 3783 for (i = 0; i < nsegs; i++) { 3784 seg = &segs[i]; 3785 desc->tbs[i+2].lo = htole32(seg->ds_addr); 3786 desc->tbs[i+2].hi_n_len = \ 3787 htole16(iwm_get_dma_hi_addr(seg->ds_addr)) 3788 | ((seg->ds_len) << 4); 3789 } 3790 3791 bus_dmamap_sync(ring->data_dmat, data->map, 3792 BUS_DMASYNC_PREWRITE); 3793 bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map, 3794 BUS_DMASYNC_PREWRITE); 3795 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, 3796 BUS_DMASYNC_PREWRITE); 3797 3798#if 0 3799 iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len)); 3800#endif 3801 3802 /* Kick TX ring. */ 3803 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT; 3804 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); 3805 3806 /* Mark TX ring as full if we reach a certain threshold. */ 3807 if (++ring->queued > IWM_TX_RING_HIMARK) { 3808 sc->qfullmsk |= 1 << ring->qid; 3809 } 3810 3811 return 0; 3812} 3813 3814static int 3815iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, 3816 const struct ieee80211_bpf_params *params) 3817{ 3818 struct ieee80211com *ic = ni->ni_ic; 3819 struct iwm_softc *sc = ic->ic_softc; 3820 int error = 0; 3821 3822 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, 3823 "->%s begin\n", __func__); 3824 3825 if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) { 3826 m_freem(m); 3827 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, 3828 "<-%s not RUNNING\n", __func__); 3829 return (ENETDOWN); 3830 } 3831 3832 IWM_LOCK(sc); 3833 /* XXX fix this */ 3834 if (params == NULL) { 3835 error = iwm_tx(sc, m, ni, 0); 3836 } else { 3837 error = iwm_tx(sc, m, ni, 0); 3838 } 3839 sc->sc_tx_timer = 5; 3840 IWM_UNLOCK(sc); 3841 3842 return (error); 3843} 3844 3845/* 3846 * mvm/tx.c 3847 */ 3848 3849/* 3850 * Note that there are transports that buffer frames before they reach 3851 * the firmware. This means that after flush_tx_path is called, the 3852 * queue might not be empty. The race-free way to handle this is to: 3853 * 1) set the station as draining 3854 * 2) flush the Tx path 3855 * 3) wait for the transport queues to be empty 3856 */ 3857int 3858iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags) 3859{ 3860 int ret; 3861 struct iwm_tx_path_flush_cmd flush_cmd = { 3862 .queues_ctl = htole32(tfd_msk), 3863 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH), 3864 }; 3865 3866 ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags, 3867 sizeof(flush_cmd), &flush_cmd); 3868 if (ret) 3869 device_printf(sc->sc_dev, 3870 "Flushing tx queue failed: %d\n", ret); 3871 return ret; 3872} 3873 3874/* 3875 * BEGIN mvm/quota.c 3876 */ 3877 3878static int 3879iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp) 3880{ 3881 struct iwm_time_quota_cmd cmd; 3882 int i, idx, ret, num_active_macs, quota, quota_rem; 3883 int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, }; 3884 int n_ifs[IWM_MAX_BINDINGS] = {0, }; 3885 uint16_t id; 3886 3887 memset(&cmd, 0, sizeof(cmd)); 3888 3889 /* currently, PHY ID == binding ID */ 3890 if (ivp) { 3891 id = ivp->phy_ctxt->id; 3892 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id")); 3893 colors[id] = ivp->phy_ctxt->color; 3894 3895 if (1) 3896 n_ifs[id] = 1; 3897 } 3898 3899 /* 3900 * The FW's scheduling session consists of 3901 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments 3902 * equally between all the bindings that require quota 3903 */ 3904 num_active_macs = 0; 3905 for (i = 0; i < IWM_MAX_BINDINGS; i++) { 3906 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID); 3907 num_active_macs += n_ifs[i]; 3908 } 3909 3910 quota = 0; 3911 quota_rem = 0; 3912 if (num_active_macs) { 3913 quota = IWM_MVM_MAX_QUOTA / num_active_macs; 3914 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs; 3915 } 3916 3917 for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) { 3918 if (colors[i] < 0) 3919 continue; 3920 3921 cmd.quotas[idx].id_and_color = 3922 htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i])); 3923 3924 if (n_ifs[i] <= 0) { 3925 cmd.quotas[idx].quota = htole32(0); 3926 cmd.quotas[idx].max_duration = htole32(0); 3927 } else { 3928 cmd.quotas[idx].quota = htole32(quota * n_ifs[i]); 3929 cmd.quotas[idx].max_duration = htole32(0); 3930 } 3931 idx++; 3932 } 3933 3934 /* Give the remainder of the session to the first binding */ 3935 cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem); 3936 3937 ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC, 3938 sizeof(cmd), &cmd); 3939 if (ret) 3940 device_printf(sc->sc_dev, 3941 "%s: Failed to send quota: %d\n", __func__, ret); 3942 return ret; 3943} 3944 3945/* 3946 * END mvm/quota.c 3947 */ 3948 3949/* 3950 * ieee80211 routines 3951 */ 3952 3953/* 3954 * Change to AUTH state in 80211 state machine. Roughly matches what 3955 * Linux does in bss_info_changed(). 3956 */ 3957static int 3958iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc) 3959{ 3960 struct ieee80211_node *ni; 3961 struct iwm_node *in; 3962 struct iwm_vap *iv = IWM_VAP(vap); 3963 uint32_t duration; 3964 int error; 3965 3966 /* 3967 * XXX i have a feeling that the vap node is being 3968 * freed from underneath us. Grr. 3969 */ 3970 ni = ieee80211_ref_node(vap->iv_bss); 3971 in = IWM_NODE(ni); 3972 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE, 3973 "%s: called; vap=%p, bss ni=%p\n", 3974 __func__, 3975 vap, 3976 ni); 3977 3978 in->in_assoc = 0; 3979 3980 /* 3981 * Firmware bug - it'll crash if the beacon interval is less 3982 * than 16. We can't avoid connecting at all, so refuse the 3983 * station state change, this will cause net80211 to abandon 3984 * attempts to connect to this AP, and eventually wpa_s will 3985 * blacklist the AP... 3986 */ 3987 if (ni->ni_intval < 16) { 3988 device_printf(sc->sc_dev, 3989 "AP %s beacon interval is %d, refusing due to firmware bug!\n", 3990 ether_sprintf(ni->ni_bssid), ni->ni_intval); 3991 error = EINVAL; 3992 goto out; 3993 } 3994 3995 error = iwm_mvm_sf_config(sc, IWM_SF_FULL_ON); 3996 if (error != 0) 3997 return error; 3998 3999 error = iwm_allow_mcast(vap, sc); 4000 if (error) { 4001 device_printf(sc->sc_dev, 4002 "%s: failed to set multicast\n", __func__); 4003 goto out; 4004 } 4005 4006 /* 4007 * This is where it deviates from what Linux does. 4008 * 4009 * Linux iwlwifi doesn't reset the nic each time, nor does it 4010 * call ctxt_add() here. Instead, it adds it during vap creation, 4011 * and always does a mac_ctx_changed(). 4012 * 4013 * The openbsd port doesn't attempt to do that - it reset things 4014 * at odd states and does the add here. 4015 * 4016 * So, until the state handling is fixed (ie, we never reset 4017 * the NIC except for a firmware failure, which should drag 4018 * the NIC back to IDLE, re-setup and re-add all the mac/phy 4019 * contexts that are required), let's do a dirty hack here. 4020 */ 4021 if (iv->is_uploaded) { 4022 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) { 4023 device_printf(sc->sc_dev, 4024 "%s: failed to update MAC\n", __func__); 4025 goto out; 4026 } 4027 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0], 4028 in->in_ni.ni_chan, 1, 1)) != 0) { 4029 device_printf(sc->sc_dev, 4030 "%s: failed update phy ctxt\n", __func__); 4031 goto out; 4032 } 4033 iv->phy_ctxt = &sc->sc_phyctxt[0]; 4034 4035 if ((error = iwm_mvm_binding_add_vif(sc, iv)) != 0) { 4036 device_printf(sc->sc_dev, 4037 "%s: binding update cmd\n", __func__); 4038 goto out; 4039 } 4040 if ((error = iwm_mvm_update_sta(sc, in)) != 0) { 4041 device_printf(sc->sc_dev, 4042 "%s: failed to update sta\n", __func__); 4043 goto out; 4044 } 4045 } else { 4046 if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) { 4047 device_printf(sc->sc_dev, 4048 "%s: failed to add MAC\n", __func__); 4049 goto out; 4050 } 4051 if ((error = iwm_mvm_power_update_mac(sc)) != 0) { 4052 device_printf(sc->sc_dev, 4053 "%s: failed to update power management\n", 4054 __func__); 4055 goto out; 4056 } 4057 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0], 4058 in->in_ni.ni_chan, 1, 1)) != 0) { 4059 device_printf(sc->sc_dev, 4060 "%s: failed add phy ctxt!\n", __func__); 4061 error = ETIMEDOUT; 4062 goto out; 4063 } 4064 iv->phy_ctxt = &sc->sc_phyctxt[0]; 4065 4066 if ((error = iwm_mvm_binding_add_vif(sc, iv)) != 0) { 4067 device_printf(sc->sc_dev, 4068 "%s: binding add cmd\n", __func__); 4069 goto out; 4070 } 4071 if ((error = iwm_mvm_add_sta(sc, in)) != 0) { 4072 device_printf(sc->sc_dev, 4073 "%s: failed to add sta\n", __func__); 4074 goto out; 4075 } 4076 } 4077 4078 /* 4079 * Prevent the FW from wandering off channel during association 4080 * by "protecting" the session with a time event. 4081 */ 4082 /* XXX duration is in units of TU, not MS */ 4083 duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS; 4084 iwm_mvm_protect_session(sc, iv, duration, 500 /* XXX magic number */); 4085 DELAY(100); 4086 4087 error = 0; 4088out: 4089 ieee80211_free_node(ni); 4090 return (error); 4091} 4092 4093static int 4094iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc) 4095{ 4096 struct iwm_node *in = IWM_NODE(vap->iv_bss); 4097 int error; 4098 4099 if ((error = iwm_mvm_update_sta(sc, in)) != 0) { 4100 device_printf(sc->sc_dev, 4101 "%s: failed to update STA\n", __func__); 4102 return error; 4103 } 4104 4105 in->in_assoc = 1; 4106 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) { 4107 device_printf(sc->sc_dev, 4108 "%s: failed to update MAC\n", __func__); 4109 return error; 4110 } 4111 4112 return 0; 4113} 4114 4115static int 4116iwm_release(struct iwm_softc *sc, struct iwm_node *in) 4117{ 4118 uint32_t tfd_msk; 4119 4120 /* 4121 * Ok, so *technically* the proper set of calls for going 4122 * from RUN back to SCAN is: 4123 * 4124 * iwm_mvm_power_mac_disable(sc, in); 4125 * iwm_mvm_mac_ctxt_changed(sc, vap); 4126 * iwm_mvm_rm_sta(sc, in); 4127 * iwm_mvm_update_quotas(sc, NULL); 4128 * iwm_mvm_mac_ctxt_changed(sc, in); 4129 * iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap)); 4130 * iwm_mvm_mac_ctxt_remove(sc, in); 4131 * 4132 * However, that freezes the device not matter which permutations 4133 * and modifications are attempted. Obviously, this driver is missing 4134 * something since it works in the Linux driver, but figuring out what 4135 * is missing is a little more complicated. Now, since we're going 4136 * back to nothing anyway, we'll just do a complete device reset. 4137 * Up your's, device! 4138 */ 4139 /* 4140 * Just using 0xf for the queues mask is fine as long as we only 4141 * get here from RUN state. 4142 */ 4143 tfd_msk = 0xf; 4144 iwm_xmit_queue_drain(sc); 4145 iwm_mvm_flush_tx_path(sc, tfd_msk, IWM_CMD_SYNC); 4146 /* 4147 * We seem to get away with just synchronously sending the 4148 * IWM_TXPATH_FLUSH command. 4149 */ 4150// iwm_trans_wait_tx_queue_empty(sc, tfd_msk); 4151 iwm_stop_device(sc); 4152 iwm_init_hw(sc); 4153 if (in) 4154 in->in_assoc = 0; 4155 return 0; 4156 4157#if 0 4158 int error; 4159 4160 iwm_mvm_power_mac_disable(sc, in); 4161 4162 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) { 4163 device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error); 4164 return error; 4165 } 4166 4167 if ((error = iwm_mvm_rm_sta(sc, in)) != 0) { 4168 device_printf(sc->sc_dev, "sta remove fail %d\n", error); 4169 return error; 4170 } 4171 error = iwm_mvm_rm_sta(sc, in); 4172 in->in_assoc = 0; 4173 iwm_mvm_update_quotas(sc, NULL); 4174 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) { 4175 device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error); 4176 return error; 4177 } 4178 iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap)); 4179 4180 iwm_mvm_mac_ctxt_remove(sc, in); 4181 4182 return error; 4183#endif 4184} 4185 4186static struct ieee80211_node * 4187iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) 4188{ 4189 return malloc(sizeof (struct iwm_node), M_80211_NODE, 4190 M_NOWAIT | M_ZERO); 4191} 4192 4193uint8_t 4194iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx) 4195{ 4196 int i; 4197 uint8_t rval; 4198 4199 for (i = 0; i < rs->rs_nrates; i++) { 4200 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL); 4201 if (rval == iwm_rates[ridx].rate) 4202 return rs->rs_rates[i]; 4203 } 4204 4205 return 0; 4206} 4207 4208static void 4209iwm_setrates(struct iwm_softc *sc, struct iwm_node *in) 4210{ 4211 struct ieee80211_node *ni = &in->in_ni; 4212 struct iwm_lq_cmd *lq = &in->in_lq; 4213 int nrates = ni->ni_rates.rs_nrates; 4214 int i, ridx, tab = 0; 4215// int txant = 0; 4216 4217 if (nrates > nitems(lq->rs_table)) { 4218 device_printf(sc->sc_dev, 4219 "%s: node supports %d rates, driver handles " 4220 "only %zu\n", __func__, nrates, nitems(lq->rs_table)); 4221 return; 4222 } 4223 if (nrates == 0) { 4224 device_printf(sc->sc_dev, 4225 "%s: node supports 0 rates, odd!\n", __func__); 4226 return; 4227 } 4228 4229 /* 4230 * XXX .. and most of iwm_node is not initialised explicitly; 4231 * it's all just 0x0 passed to the firmware. 4232 */ 4233 4234 /* first figure out which rates we should support */ 4235 /* XXX TODO: this isn't 11n aware /at all/ */ 4236 memset(&in->in_ridx, -1, sizeof(in->in_ridx)); 4237 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, 4238 "%s: nrates=%d\n", __func__, nrates); 4239 4240 /* 4241 * Loop over nrates and populate in_ridx from the highest 4242 * rate to the lowest rate. Remember, in_ridx[] has 4243 * IEEE80211_RATE_MAXSIZE entries! 4244 */ 4245 for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) { 4246 int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL; 4247 4248 /* Map 802.11 rate to HW rate index. */ 4249 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++) 4250 if (iwm_rates[ridx].rate == rate) 4251 break; 4252 if (ridx > IWM_RIDX_MAX) { 4253 device_printf(sc->sc_dev, 4254 "%s: WARNING: device rate for %d not found!\n", 4255 __func__, rate); 4256 } else { 4257 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, 4258 "%s: rate: i: %d, rate=%d, ridx=%d\n", 4259 __func__, 4260 i, 4261 rate, 4262 ridx); 4263 in->in_ridx[i] = ridx; 4264 } 4265 } 4266 4267 /* then construct a lq_cmd based on those */ 4268 memset(lq, 0, sizeof(*lq)); 4269 lq->sta_id = IWM_STATION_ID; 4270 4271 /* For HT, always enable RTS/CTS to avoid excessive retries. */ 4272 if (ni->ni_flags & IEEE80211_NODE_HT) 4273 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK; 4274 4275 /* 4276 * are these used? (we don't do SISO or MIMO) 4277 * need to set them to non-zero, though, or we get an error. 4278 */ 4279 lq->single_stream_ant_msk = 1; 4280 lq->dual_stream_ant_msk = 1; 4281 4282 /* 4283 * Build the actual rate selection table. 4284 * The lowest bits are the rates. Additionally, 4285 * CCK needs bit 9 to be set. The rest of the bits 4286 * we add to the table select the tx antenna 4287 * Note that we add the rates in the highest rate first 4288 * (opposite of ni_rates). 4289 */ 4290 /* 4291 * XXX TODO: this should be looping over the min of nrates 4292 * and LQ_MAX_RETRY_NUM. Sigh. 4293 */ 4294 for (i = 0; i < nrates; i++) { 4295 int nextant; 4296 4297#if 0 4298 if (txant == 0) 4299 txant = iwm_mvm_get_valid_tx_ant(sc); 4300 nextant = 1<<(ffs(txant)-1); 4301 txant &= ~nextant; 4302#else 4303 nextant = iwm_mvm_get_valid_tx_ant(sc); 4304#endif 4305 /* 4306 * Map the rate id into a rate index into 4307 * our hardware table containing the 4308 * configuration to use for this rate. 4309 */ 4310 ridx = in->in_ridx[i]; 4311 tab = iwm_rates[ridx].plcp; 4312 tab |= nextant << IWM_RATE_MCS_ANT_POS; 4313 if (IWM_RIDX_IS_CCK(ridx)) 4314 tab |= IWM_RATE_MCS_CCK_MSK; 4315 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, 4316 "station rate i=%d, rate=%d, hw=%x\n", 4317 i, iwm_rates[ridx].rate, tab); 4318 lq->rs_table[i] = htole32(tab); 4319 } 4320 /* then fill the rest with the lowest possible rate */ 4321 for (i = nrates; i < nitems(lq->rs_table); i++) { 4322 KASSERT(tab != 0, ("invalid tab")); 4323 lq->rs_table[i] = htole32(tab); 4324 } 4325} 4326 4327static int 4328iwm_media_change(struct ifnet *ifp) 4329{ 4330 struct ieee80211vap *vap = ifp->if_softc; 4331 struct ieee80211com *ic = vap->iv_ic; 4332 struct iwm_softc *sc = ic->ic_softc; 4333 int error; 4334 4335 error = ieee80211_media_change(ifp); 4336 if (error != ENETRESET) 4337 return error; 4338 4339 IWM_LOCK(sc); 4340 if (ic->ic_nrunning > 0) { 4341 iwm_stop(sc); 4342 iwm_init(sc); 4343 } 4344 IWM_UNLOCK(sc); 4345 return error; 4346} 4347 4348 4349static int 4350iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) 4351{ 4352 struct iwm_vap *ivp = IWM_VAP(vap); 4353 struct ieee80211com *ic = vap->iv_ic; 4354 struct iwm_softc *sc = ic->ic_softc; 4355 struct iwm_node *in; 4356 int error; 4357 4358 IWM_DPRINTF(sc, IWM_DEBUG_STATE, 4359 "switching state %s -> %s\n", 4360 ieee80211_state_name[vap->iv_state], 4361 ieee80211_state_name[nstate]); 4362 IEEE80211_UNLOCK(ic); 4363 IWM_LOCK(sc); 4364 4365 if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state) 4366 iwm_led_blink_stop(sc); 4367 4368 /* disable beacon filtering if we're hopping out of RUN */ 4369 if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) { 4370 iwm_mvm_disable_beacon_filter(sc); 4371 4372 if (((in = IWM_NODE(vap->iv_bss)) != NULL)) 4373 in->in_assoc = 0; 4374 4375 if (nstate == IEEE80211_S_INIT) { 4376 IWM_UNLOCK(sc); 4377 IEEE80211_LOCK(ic); 4378 error = ivp->iv_newstate(vap, nstate, arg); 4379 IEEE80211_UNLOCK(ic); 4380 IWM_LOCK(sc); 4381 iwm_release(sc, NULL); 4382 IWM_UNLOCK(sc); 4383 IEEE80211_LOCK(ic); 4384 return error; 4385 } 4386 4387 /* 4388 * It's impossible to directly go RUN->SCAN. If we iwm_release() 4389 * above then the card will be completely reinitialized, 4390 * so the driver must do everything necessary to bring the card 4391 * from INIT to SCAN. 4392 * 4393 * Additionally, upon receiving deauth frame from AP, 4394 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH 4395 * state. This will also fail with this driver, so bring the FSM 4396 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well. 4397 * 4398 * XXX TODO: fix this for FreeBSD! 4399 */ 4400 if (nstate == IEEE80211_S_SCAN || 4401 nstate == IEEE80211_S_AUTH || 4402 nstate == IEEE80211_S_ASSOC) { 4403 IWM_DPRINTF(sc, IWM_DEBUG_STATE, 4404 "Force transition to INIT; MGT=%d\n", arg); 4405 IWM_UNLOCK(sc); 4406 IEEE80211_LOCK(ic); 4407 /* Always pass arg as -1 since we can't Tx right now. */ 4408 /* 4409 * XXX arg is just ignored anyway when transitioning 4410 * to IEEE80211_S_INIT. 4411 */ 4412 vap->iv_newstate(vap, IEEE80211_S_INIT, -1); 4413 IWM_DPRINTF(sc, IWM_DEBUG_STATE, 4414 "Going INIT->SCAN\n"); 4415 nstate = IEEE80211_S_SCAN; 4416 IEEE80211_UNLOCK(ic); 4417 IWM_LOCK(sc); 4418 } 4419 } 4420 4421 switch (nstate) { 4422 case IEEE80211_S_INIT: 4423 case IEEE80211_S_SCAN: 4424 if (vap->iv_state == IEEE80211_S_AUTH || 4425 vap->iv_state == IEEE80211_S_ASSOC) { 4426 int myerr; 4427 IWM_UNLOCK(sc); 4428 IEEE80211_LOCK(ic); 4429 myerr = ivp->iv_newstate(vap, nstate, arg); 4430 IEEE80211_UNLOCK(ic); 4431 IWM_LOCK(sc); 4432 error = iwm_mvm_rm_sta(sc, vap, FALSE); 4433 if (error) { 4434 device_printf(sc->sc_dev, 4435 "%s: Failed to remove station: %d\n", 4436 __func__, error); 4437 } 4438 error = iwm_mvm_mac_ctxt_changed(sc, vap); 4439 if (error) { 4440 device_printf(sc->sc_dev, 4441 "%s: Failed to change mac context: %d\n", 4442 __func__, error); 4443 } 4444 error = iwm_mvm_binding_remove_vif(sc, ivp); 4445 if (error) { 4446 device_printf(sc->sc_dev, 4447 "%s: Failed to remove channel ctx: %d\n", 4448 __func__, error); 4449 } 4450 ivp->phy_ctxt = NULL; 4451 IWM_UNLOCK(sc); 4452 IEEE80211_LOCK(ic); 4453 return myerr; 4454 } 4455 break; 4456 4457 case IEEE80211_S_AUTH: 4458 if ((error = iwm_auth(vap, sc)) != 0) { 4459 device_printf(sc->sc_dev, 4460 "%s: could not move to auth state: %d\n", 4461 __func__, error); 4462 break; 4463 } 4464 break; 4465 4466 case IEEE80211_S_ASSOC: 4467 if ((error = iwm_assoc(vap, sc)) != 0) { 4468 device_printf(sc->sc_dev, 4469 "%s: failed to associate: %d\n", __func__, 4470 error); 4471 break; 4472 } 4473 break; 4474 4475 case IEEE80211_S_RUN: 4476 /* Update the association state, now we have it all */ 4477 /* (eg associd comes in at this point */ 4478 error = iwm_assoc(vap, sc); 4479 if (error != 0) { 4480 device_printf(sc->sc_dev, 4481 "%s: failed to update association state: %d\n", 4482 __func__, 4483 error); 4484 break; 4485 } 4486 4487 in = IWM_NODE(vap->iv_bss); 4488 iwm_mvm_enable_beacon_filter(sc, in); 4489 iwm_mvm_power_update_mac(sc); 4490 iwm_mvm_update_quotas(sc, ivp); 4491 iwm_setrates(sc, in); 4492 4493 if ((error = iwm_mvm_send_lq_cmd(sc, &in->in_lq, TRUE)) != 0) { 4494 device_printf(sc->sc_dev, 4495 "%s: IWM_LQ_CMD failed: %d\n", __func__, error); 4496 } 4497 4498 iwm_mvm_led_enable(sc); 4499 break; 4500 4501 default: 4502 break; 4503 } 4504 IWM_UNLOCK(sc); 4505 IEEE80211_LOCK(ic); 4506 4507 return (ivp->iv_newstate(vap, nstate, arg)); 4508} 4509 4510void 4511iwm_endscan_cb(void *arg, int pending) 4512{ 4513 struct iwm_softc *sc = arg; 4514 struct ieee80211com *ic = &sc->sc_ic; 4515 4516 IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE, 4517 "%s: scan ended\n", 4518 __func__); 4519 4520 ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps)); 4521} 4522 4523/* 4524 * Aging and idle timeouts for the different possible scenarios 4525 * in default configuration 4526 */ 4527static const uint32_t 4528iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = { 4529 { 4530 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF), 4531 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF) 4532 }, 4533 { 4534 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF), 4535 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF) 4536 }, 4537 { 4538 htole32(IWM_SF_MCAST_AGING_TIMER_DEF), 4539 htole32(IWM_SF_MCAST_IDLE_TIMER_DEF) 4540 }, 4541 { 4542 htole32(IWM_SF_BA_AGING_TIMER_DEF), 4543 htole32(IWM_SF_BA_IDLE_TIMER_DEF) 4544 }, 4545 { 4546 htole32(IWM_SF_TX_RE_AGING_TIMER_DEF), 4547 htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF) 4548 }, 4549}; 4550 4551/* 4552 * Aging and idle timeouts for the different possible scenarios 4553 * in single BSS MAC configuration. 4554 */ 4555static const uint32_t 4556iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = { 4557 { 4558 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER), 4559 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER) 4560 }, 4561 { 4562 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER), 4563 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER) 4564 }, 4565 { 4566 htole32(IWM_SF_MCAST_AGING_TIMER), 4567 htole32(IWM_SF_MCAST_IDLE_TIMER) 4568 }, 4569 { 4570 htole32(IWM_SF_BA_AGING_TIMER), 4571 htole32(IWM_SF_BA_IDLE_TIMER) 4572 }, 4573 { 4574 htole32(IWM_SF_TX_RE_AGING_TIMER), 4575 htole32(IWM_SF_TX_RE_IDLE_TIMER) 4576 }, 4577}; 4578 4579static void 4580iwm_mvm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd, 4581 struct ieee80211_node *ni) 4582{ 4583 int i, j, watermark; 4584 4585 sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN); 4586 4587 /* 4588 * If we are in association flow - check antenna configuration 4589 * capabilities of the AP station, and choose the watermark accordingly. 4590 */ 4591 if (ni) { 4592 if (ni->ni_flags & IEEE80211_NODE_HT) { 4593#ifdef notyet 4594 if (ni->ni_rxmcs[2] != 0) 4595 watermark = IWM_SF_W_MARK_MIMO3; 4596 else if (ni->ni_rxmcs[1] != 0) 4597 watermark = IWM_SF_W_MARK_MIMO2; 4598 else 4599#endif 4600 watermark = IWM_SF_W_MARK_SISO; 4601 } else { 4602 watermark = IWM_SF_W_MARK_LEGACY; 4603 } 4604 /* default watermark value for unassociated mode. */ 4605 } else { 4606 watermark = IWM_SF_W_MARK_MIMO2; 4607 } 4608 sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark); 4609 4610 for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) { 4611 for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) { 4612 sf_cmd->long_delay_timeouts[i][j] = 4613 htole32(IWM_SF_LONG_DELAY_AGING_TIMER); 4614 } 4615 } 4616 4617 if (ni) { 4618 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout, 4619 sizeof(iwm_sf_full_timeout)); 4620 } else { 4621 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def, 4622 sizeof(iwm_sf_full_timeout_def)); 4623 } 4624} 4625 4626static int 4627iwm_mvm_sf_config(struct iwm_softc *sc, enum iwm_sf_state new_state) 4628{ 4629 struct ieee80211com *ic = &sc->sc_ic; 4630 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 4631 struct iwm_sf_cfg_cmd sf_cmd = { 4632 .state = htole32(IWM_SF_FULL_ON), 4633 }; 4634 int ret = 0; 4635 4636 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) 4637 sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF); 4638 4639 switch (new_state) { 4640 case IWM_SF_UNINIT: 4641 case IWM_SF_INIT_OFF: 4642 iwm_mvm_fill_sf_command(sc, &sf_cmd, NULL); 4643 break; 4644 case IWM_SF_FULL_ON: 4645 iwm_mvm_fill_sf_command(sc, &sf_cmd, vap->iv_bss); 4646 break; 4647 default: 4648 IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE, 4649 "Invalid state: %d. not sending Smart Fifo cmd\n", 4650 new_state); 4651 return EINVAL; 4652 } 4653 4654 ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC, 4655 sizeof(sf_cmd), &sf_cmd); 4656 return ret; 4657} 4658 4659static int 4660iwm_send_bt_init_conf(struct iwm_softc *sc) 4661{ 4662 struct iwm_bt_coex_cmd bt_cmd; 4663 4664 bt_cmd.mode = htole32(IWM_BT_COEX_WIFI); 4665 bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET); 4666 4667 return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd), 4668 &bt_cmd); 4669} 4670 4671static boolean_t 4672iwm_mvm_is_lar_supported(struct iwm_softc *sc) 4673{ 4674 boolean_t nvm_lar = sc->nvm_data->lar_enabled; 4675 boolean_t tlv_lar = fw_has_capa(&sc->ucode_capa, 4676 IWM_UCODE_TLV_CAPA_LAR_SUPPORT); 4677 4678 if (iwm_lar_disable) 4679 return FALSE; 4680 4681 /* 4682 * Enable LAR only if it is supported by the FW (TLV) && 4683 * enabled in the NVM 4684 */ 4685 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) 4686 return nvm_lar && tlv_lar; 4687 else 4688 return tlv_lar; 4689} 4690 4691static boolean_t 4692iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *sc) 4693{ 4694 return fw_has_api(&sc->ucode_capa, 4695 IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) || 4696 fw_has_capa(&sc->ucode_capa, 4697 IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC); 4698} 4699 4700static int 4701iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2) 4702{ 4703 struct iwm_mcc_update_cmd mcc_cmd; 4704 struct iwm_host_cmd hcmd = { 4705 .id = IWM_MCC_UPDATE_CMD, 4706 .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB), 4707 .data = { &mcc_cmd }, 4708 }; 4709 int ret; 4710#ifdef IWM_DEBUG 4711 struct iwm_rx_packet *pkt; 4712 struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL; 4713 struct iwm_mcc_update_resp *mcc_resp; 4714 int n_channels; 4715 uint16_t mcc; 4716#endif 4717 int resp_v2 = fw_has_capa(&sc->ucode_capa, 4718 IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2); 4719 4720 if (!iwm_mvm_is_lar_supported(sc)) { 4721 IWM_DPRINTF(sc, IWM_DEBUG_LAR, "%s: no LAR support\n", 4722 __func__); 4723 return 0; 4724 } 4725 4726 memset(&mcc_cmd, 0, sizeof(mcc_cmd)); 4727 mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]); 4728 if (iwm_mvm_is_wifi_mcc_supported(sc)) 4729 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT; 4730 else 4731 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW; 4732 4733 if (resp_v2) 4734 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd); 4735 else 4736 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1); 4737 4738 IWM_DPRINTF(sc, IWM_DEBUG_LAR, 4739 "send MCC update to FW with '%c%c' src = %d\n", 4740 alpha2[0], alpha2[1], mcc_cmd.source_id); 4741 4742 ret = iwm_send_cmd(sc, &hcmd); 4743 if (ret) 4744 return ret; 4745 4746#ifdef IWM_DEBUG 4747 pkt = hcmd.resp_pkt; 4748 4749 /* Extract MCC response */ 4750 if (resp_v2) { 4751 mcc_resp = (void *)pkt->data; 4752 mcc = mcc_resp->mcc; 4753 n_channels = le32toh(mcc_resp->n_channels); 4754 } else { 4755 mcc_resp_v1 = (void *)pkt->data; 4756 mcc = mcc_resp_v1->mcc; 4757 n_channels = le32toh(mcc_resp_v1->n_channels); 4758 } 4759 4760 /* W/A for a FW/NVM issue - returns 0x00 for the world domain */ 4761 if (mcc == 0) 4762 mcc = 0x3030; /* "00" - world */ 4763 4764 IWM_DPRINTF(sc, IWM_DEBUG_LAR, 4765 "regulatory domain '%c%c' (%d channels available)\n", 4766 mcc >> 8, mcc & 0xff, n_channels); 4767#endif 4768 iwm_free_resp(sc, &hcmd); 4769 4770 return 0; 4771} 4772 4773static void 4774iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff) 4775{ 4776 struct iwm_host_cmd cmd = { 4777 .id = IWM_REPLY_THERMAL_MNG_BACKOFF, 4778 .len = { sizeof(uint32_t), }, 4779 .data = { &backoff, }, 4780 }; 4781 4782 if (iwm_send_cmd(sc, &cmd) != 0) { 4783 device_printf(sc->sc_dev, 4784 "failed to change thermal tx backoff\n"); 4785 } 4786} 4787 4788static int 4789iwm_init_hw(struct iwm_softc *sc) 4790{ 4791 struct ieee80211com *ic = &sc->sc_ic; 4792 int error, i, ac; 4793 4794 if ((error = iwm_start_hw(sc)) != 0) { 4795 printf("iwm_start_hw: failed %d\n", error); 4796 return error; 4797 } 4798 4799 if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) { 4800 printf("iwm_run_init_mvm_ucode: failed %d\n", error); 4801 return error; 4802 } 4803 4804 /* 4805 * should stop and start HW since that INIT 4806 * image just loaded 4807 */ 4808 iwm_stop_device(sc); 4809 sc->sc_ps_disabled = FALSE; 4810 if ((error = iwm_start_hw(sc)) != 0) { 4811 device_printf(sc->sc_dev, "could not initialize hardware\n"); 4812 return error; 4813 } 4814 4815 /* omstart, this time with the regular firmware */ 4816 error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR); 4817 if (error) { 4818 device_printf(sc->sc_dev, "could not load firmware\n"); 4819 goto error; 4820 } 4821 4822 if ((error = iwm_send_bt_init_conf(sc)) != 0) { 4823 device_printf(sc->sc_dev, "bt init conf failed\n"); 4824 goto error; 4825 } 4826 4827 error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc)); 4828 if (error != 0) { 4829 device_printf(sc->sc_dev, "antenna config failed\n"); 4830 goto error; 4831 } 4832 4833 /* Send phy db control command and then phy db calibration */ 4834 if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0) 4835 goto error; 4836 4837 if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) { 4838 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n"); 4839 goto error; 4840 } 4841 4842 /* Add auxiliary station for scanning */ 4843 if ((error = iwm_mvm_add_aux_sta(sc)) != 0) { 4844 device_printf(sc->sc_dev, "add_aux_sta failed\n"); 4845 goto error; 4846 } 4847 4848 for (i = 0; i < IWM_NUM_PHY_CTX; i++) { 4849 /* 4850 * The channel used here isn't relevant as it's 4851 * going to be overwritten in the other flows. 4852 * For now use the first channel we have. 4853 */ 4854 if ((error = iwm_mvm_phy_ctxt_add(sc, 4855 &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0) 4856 goto error; 4857 } 4858 4859 /* Initialize tx backoffs to the minimum. */ 4860 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) 4861 iwm_mvm_tt_tx_backoff(sc, 0); 4862 4863 error = iwm_mvm_power_update_device(sc); 4864 if (error) 4865 goto error; 4866 4867 if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0) 4868 goto error; 4869 4870 if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) { 4871 if ((error = iwm_mvm_config_umac_scan(sc)) != 0) 4872 goto error; 4873 } 4874 4875 /* Enable Tx queues. */ 4876 for (ac = 0; ac < WME_NUM_AC; ac++) { 4877 error = iwm_enable_txq(sc, IWM_STATION_ID, ac, 4878 iwm_mvm_ac_to_tx_fifo[ac]); 4879 if (error) 4880 goto error; 4881 } 4882 4883 if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) { 4884 device_printf(sc->sc_dev, "failed to disable beacon filter\n"); 4885 goto error; 4886 } 4887 4888 return 0; 4889 4890 error: 4891 iwm_stop_device(sc); 4892 return error; 4893} 4894 4895/* Allow multicast from our BSSID. */ 4896static int 4897iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc) 4898{ 4899 struct ieee80211_node *ni = vap->iv_bss; 4900 struct iwm_mcast_filter_cmd *cmd; 4901 size_t size; 4902 int error; 4903 4904 size = roundup(sizeof(*cmd), 4); 4905 cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); 4906 if (cmd == NULL) 4907 return ENOMEM; 4908 cmd->filter_own = 1; 4909 cmd->port_id = 0; 4910 cmd->count = 0; 4911 cmd->pass_all = 1; 4912 IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid); 4913 4914 error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD, 4915 IWM_CMD_SYNC, size, cmd); 4916 free(cmd, M_DEVBUF); 4917 4918 return (error); 4919} 4920 4921/* 4922 * ifnet interfaces 4923 */ 4924 4925static void 4926iwm_init(struct iwm_softc *sc) 4927{ 4928 int error; 4929 4930 if (sc->sc_flags & IWM_FLAG_HW_INITED) { 4931 return; 4932 } 4933 sc->sc_generation++; 4934 sc->sc_flags &= ~IWM_FLAG_STOPPED; 4935 4936 if ((error = iwm_init_hw(sc)) != 0) { 4937 printf("iwm_init_hw failed %d\n", error); 4938 iwm_stop(sc); 4939 return; 4940 } 4941 4942 /* 4943 * Ok, firmware loaded and we are jogging 4944 */ 4945 sc->sc_flags |= IWM_FLAG_HW_INITED; 4946 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc); 4947} 4948 4949static int 4950iwm_transmit(struct ieee80211com *ic, struct mbuf *m) 4951{ 4952 struct iwm_softc *sc; 4953 int error; 4954 4955 sc = ic->ic_softc; 4956 4957 IWM_LOCK(sc); 4958 if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) { 4959 IWM_UNLOCK(sc); 4960 return (ENXIO); 4961 } 4962 error = mbufq_enqueue(&sc->sc_snd, m); 4963 if (error) { 4964 IWM_UNLOCK(sc); 4965 return (error); 4966 } 4967 iwm_start(sc); 4968 IWM_UNLOCK(sc); 4969 return (0); 4970} 4971 4972/* 4973 * Dequeue packets from sendq and call send. 4974 */ 4975static void 4976iwm_start(struct iwm_softc *sc) 4977{ 4978 struct ieee80211_node *ni; 4979 struct mbuf *m; 4980 int ac = 0; 4981 4982 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__); 4983 while (sc->qfullmsk == 0 && 4984 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) { 4985 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 4986 if (iwm_tx(sc, m, ni, ac) != 0) { 4987 if_inc_counter(ni->ni_vap->iv_ifp, 4988 IFCOUNTER_OERRORS, 1); 4989 ieee80211_free_node(ni); 4990 continue; 4991 } 4992 sc->sc_tx_timer = 15; 4993 } 4994 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__); 4995} 4996 4997static void 4998iwm_stop(struct iwm_softc *sc) 4999{ 5000 5001 sc->sc_flags &= ~IWM_FLAG_HW_INITED; 5002 sc->sc_flags |= IWM_FLAG_STOPPED; 5003 sc->sc_generation++; 5004 iwm_led_blink_stop(sc); 5005 sc->sc_tx_timer = 0; 5006 iwm_stop_device(sc); 5007 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING; 5008} 5009 5010static void 5011iwm_watchdog(void *arg) 5012{ 5013 struct iwm_softc *sc = arg; 5014 struct ieee80211com *ic = &sc->sc_ic; 5015 5016 if (sc->sc_tx_timer > 0) { 5017 if (--sc->sc_tx_timer == 0) { 5018 device_printf(sc->sc_dev, "device timeout\n"); 5019#ifdef IWM_DEBUG 5020 iwm_nic_error(sc); 5021#endif 5022 ieee80211_restart_all(ic); 5023 counter_u64_add(sc->sc_ic.ic_oerrors, 1); 5024 return; 5025 } 5026 } 5027 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc); 5028} 5029 5030static void 5031iwm_parent(struct ieee80211com *ic) 5032{ 5033 struct iwm_softc *sc = ic->ic_softc; 5034 int startall = 0; 5035 5036 IWM_LOCK(sc); 5037 if (ic->ic_nrunning > 0) { 5038 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) { 5039 iwm_init(sc); 5040 startall = 1; 5041 } 5042 } else if (sc->sc_flags & IWM_FLAG_HW_INITED) 5043 iwm_stop(sc); 5044 IWM_UNLOCK(sc); 5045 if (startall) 5046 ieee80211_start_all(ic); 5047} 5048 5049/* 5050 * The interrupt side of things 5051 */ 5052 5053/* 5054 * error dumping routines are from iwlwifi/mvm/utils.c 5055 */ 5056 5057/* 5058 * Note: This structure is read from the device with IO accesses, 5059 * and the reading already does the endian conversion. As it is 5060 * read with uint32_t-sized accesses, any members with a different size 5061 * need to be ordered correctly though! 5062 */ 5063struct iwm_error_event_table { 5064 uint32_t valid; /* (nonzero) valid, (0) log is empty */ 5065 uint32_t error_id; /* type of error */ 5066 uint32_t trm_hw_status0; /* TRM HW status */ 5067 uint32_t trm_hw_status1; /* TRM HW status */ 5068 uint32_t blink2; /* branch link */ 5069 uint32_t ilink1; /* interrupt link */ 5070 uint32_t ilink2; /* interrupt link */ 5071 uint32_t data1; /* error-specific data */ 5072 uint32_t data2; /* error-specific data */ 5073 uint32_t data3; /* error-specific data */ 5074 uint32_t bcon_time; /* beacon timer */ 5075 uint32_t tsf_low; /* network timestamp function timer */ 5076 uint32_t tsf_hi; /* network timestamp function timer */ 5077 uint32_t gp1; /* GP1 timer register */ 5078 uint32_t gp2; /* GP2 timer register */ 5079 uint32_t fw_rev_type; /* firmware revision type */ 5080 uint32_t major; /* uCode version major */ 5081 uint32_t minor; /* uCode version minor */ 5082 uint32_t hw_ver; /* HW Silicon version */ 5083 uint32_t brd_ver; /* HW board version */ 5084 uint32_t log_pc; /* log program counter */ 5085 uint32_t frame_ptr; /* frame pointer */ 5086 uint32_t stack_ptr; /* stack pointer */ 5087 uint32_t hcmd; /* last host command header */ 5088 uint32_t isr0; /* isr status register LMPM_NIC_ISR0: 5089 * rxtx_flag */ 5090 uint32_t isr1; /* isr status register LMPM_NIC_ISR1: 5091 * host_flag */ 5092 uint32_t isr2; /* isr status register LMPM_NIC_ISR2: 5093 * enc_flag */ 5094 uint32_t isr3; /* isr status register LMPM_NIC_ISR3: 5095 * time_flag */ 5096 uint32_t isr4; /* isr status register LMPM_NIC_ISR4: 5097 * wico interrupt */ 5098 uint32_t last_cmd_id; /* last HCMD id handled by the firmware */ 5099 uint32_t wait_event; /* wait event() caller address */ 5100 uint32_t l2p_control; /* L2pControlField */ 5101 uint32_t l2p_duration; /* L2pDurationField */ 5102 uint32_t l2p_mhvalid; /* L2pMhValidBits */ 5103 uint32_t l2p_addr_match; /* L2pAddrMatchStat */ 5104 uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on 5105 * (LMPM_PMG_SEL) */ 5106 uint32_t u_timestamp; /* indicate when the date and time of the 5107 * compilation */ 5108 uint32_t flow_handler; /* FH read/write pointers, RX credit */ 5109} __packed /* LOG_ERROR_TABLE_API_S_VER_3 */; 5110 5111/* 5112 * UMAC error struct - relevant starting from family 8000 chip. 5113 * Note: This structure is read from the device with IO accesses, 5114 * and the reading already does the endian conversion. As it is 5115 * read with u32-sized accesses, any members with a different size 5116 * need to be ordered correctly though! 5117 */ 5118struct iwm_umac_error_event_table { 5119 uint32_t valid; /* (nonzero) valid, (0) log is empty */ 5120 uint32_t error_id; /* type of error */ 5121 uint32_t blink1; /* branch link */ 5122 uint32_t blink2; /* branch link */ 5123 uint32_t ilink1; /* interrupt link */ 5124 uint32_t ilink2; /* interrupt link */ 5125 uint32_t data1; /* error-specific data */ 5126 uint32_t data2; /* error-specific data */ 5127 uint32_t data3; /* error-specific data */ 5128 uint32_t umac_major; 5129 uint32_t umac_minor; 5130 uint32_t frame_pointer; /* core register 27*/ 5131 uint32_t stack_pointer; /* core register 28 */ 5132 uint32_t cmd_header; /* latest host cmd sent to UMAC */ 5133 uint32_t nic_isr_pref; /* ISR status register */ 5134} __packed; 5135 5136#define ERROR_START_OFFSET (1 * sizeof(uint32_t)) 5137#define ERROR_ELEM_SIZE (7 * sizeof(uint32_t)) 5138 5139#ifdef IWM_DEBUG 5140struct { 5141 const char *name; 5142 uint8_t num; 5143} advanced_lookup[] = { 5144 { "NMI_INTERRUPT_WDG", 0x34 }, 5145 { "SYSASSERT", 0x35 }, 5146 { "UCODE_VERSION_MISMATCH", 0x37 }, 5147 { "BAD_COMMAND", 0x38 }, 5148 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C }, 5149 { "FATAL_ERROR", 0x3D }, 5150 { "NMI_TRM_HW_ERR", 0x46 }, 5151 { "NMI_INTERRUPT_TRM", 0x4C }, 5152 { "NMI_INTERRUPT_BREAK_POINT", 0x54 }, 5153 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C }, 5154 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 }, 5155 { "NMI_INTERRUPT_HOST", 0x66 }, 5156 { "NMI_INTERRUPT_ACTION_PT", 0x7C }, 5157 { "NMI_INTERRUPT_UNKNOWN", 0x84 }, 5158 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 }, 5159 { "ADVANCED_SYSASSERT", 0 }, 5160}; 5161 5162static const char * 5163iwm_desc_lookup(uint32_t num) 5164{ 5165 int i; 5166 5167 for (i = 0; i < nitems(advanced_lookup) - 1; i++) 5168 if (advanced_lookup[i].num == num) 5169 return advanced_lookup[i].name; 5170 5171 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */ 5172 return advanced_lookup[i].name; 5173} 5174 5175static void 5176iwm_nic_umac_error(struct iwm_softc *sc) 5177{ 5178 struct iwm_umac_error_event_table table; 5179 uint32_t base; 5180 5181 base = sc->umac_error_event_table; 5182 5183 if (base < 0x800000) { 5184 device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n", 5185 base); 5186 return; 5187 } 5188 5189 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) { 5190 device_printf(sc->sc_dev, "reading errlog failed\n"); 5191 return; 5192 } 5193 5194 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { 5195 device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n"); 5196 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n", 5197 sc->sc_flags, table.valid); 5198 } 5199 5200 device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id, 5201 iwm_desc_lookup(table.error_id)); 5202 device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1); 5203 device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2); 5204 device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n", 5205 table.ilink1); 5206 device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n", 5207 table.ilink2); 5208 device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1); 5209 device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2); 5210 device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3); 5211 device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major); 5212 device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor); 5213 device_printf(sc->sc_dev, "0x%08X | frame pointer\n", 5214 table.frame_pointer); 5215 device_printf(sc->sc_dev, "0x%08X | stack pointer\n", 5216 table.stack_pointer); 5217 device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header); 5218 device_printf(sc->sc_dev, "0x%08X | isr status reg\n", 5219 table.nic_isr_pref); 5220} 5221 5222/* 5223 * Support for dumping the error log seemed like a good idea ... 5224 * but it's mostly hex junk and the only sensible thing is the 5225 * hw/ucode revision (which we know anyway). Since it's here, 5226 * I'll just leave it in, just in case e.g. the Intel guys want to 5227 * help us decipher some "ADVANCED_SYSASSERT" later. 5228 */ 5229static void 5230iwm_nic_error(struct iwm_softc *sc) 5231{ 5232 struct iwm_error_event_table table; 5233 uint32_t base; 5234 5235 device_printf(sc->sc_dev, "dumping device error log\n"); 5236 base = sc->error_event_table; 5237 if (base < 0x800000) { 5238 device_printf(sc->sc_dev, 5239 "Invalid error log pointer 0x%08x\n", base); 5240 return; 5241 } 5242 5243 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) { 5244 device_printf(sc->sc_dev, "reading errlog failed\n"); 5245 return; 5246 } 5247 5248 if (!table.valid) { 5249 device_printf(sc->sc_dev, "errlog not found, skipping\n"); 5250 return; 5251 } 5252 5253 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { 5254 device_printf(sc->sc_dev, "Start Error Log Dump:\n"); 5255 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n", 5256 sc->sc_flags, table.valid); 5257 } 5258 5259 device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id, 5260 iwm_desc_lookup(table.error_id)); 5261 device_printf(sc->sc_dev, "%08X | trm_hw_status0\n", 5262 table.trm_hw_status0); 5263 device_printf(sc->sc_dev, "%08X | trm_hw_status1\n", 5264 table.trm_hw_status1); 5265 device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2); 5266 device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1); 5267 device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2); 5268 device_printf(sc->sc_dev, "%08X | data1\n", table.data1); 5269 device_printf(sc->sc_dev, "%08X | data2\n", table.data2); 5270 device_printf(sc->sc_dev, "%08X | data3\n", table.data3); 5271 device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time); 5272 device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low); 5273 device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi); 5274 device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1); 5275 device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2); 5276 device_printf(sc->sc_dev, "%08X | uCode revision type\n", 5277 table.fw_rev_type); 5278 device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major); 5279 device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor); 5280 device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver); 5281 device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver); 5282 device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd); 5283 device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0); 5284 device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1); 5285 device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2); 5286 device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3); 5287 device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4); 5288 device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id); 5289 device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event); 5290 device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control); 5291 device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration); 5292 device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid); 5293 device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match); 5294 device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel); 5295 device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp); 5296 device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler); 5297 5298 if (sc->umac_error_event_table) 5299 iwm_nic_umac_error(sc); 5300} 5301#endif 5302 5303static void 5304iwm_handle_rxb(struct iwm_softc *sc, struct mbuf *m) 5305{ 5306 struct ieee80211com *ic = &sc->sc_ic; 5307 struct iwm_cmd_response *cresp; 5308 struct mbuf *m1; 5309 uint32_t offset = 0; 5310 uint32_t maxoff = IWM_RBUF_SIZE; 5311 uint32_t nextoff; 5312 boolean_t stolen = FALSE; 5313 5314#define HAVEROOM(a) \ 5315 ((a) + sizeof(uint32_t) + sizeof(struct iwm_cmd_header) < maxoff) 5316 5317 while (HAVEROOM(offset)) { 5318 struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, 5319 offset); 5320 int qid, idx, code, len; 5321 5322 qid = pkt->hdr.qid; 5323 idx = pkt->hdr.idx; 5324 5325 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code); 5326 5327 /* 5328 * randomly get these from the firmware, no idea why. 5329 * they at least seem harmless, so just ignore them for now 5330 */ 5331 if ((pkt->hdr.code == 0 && (qid & ~0x80) == 0 && idx == 0) || 5332 pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID)) { 5333 break; 5334 } 5335 5336 IWM_DPRINTF(sc, IWM_DEBUG_INTR, 5337 "rx packet qid=%d idx=%d type=%x\n", 5338 qid & ~0x80, pkt->hdr.idx, code); 5339 5340 len = iwm_rx_packet_len(pkt); 5341 len += sizeof(uint32_t); /* account for status word */ 5342 nextoff = offset + roundup2(len, IWM_FH_RSCSR_FRAME_ALIGN); 5343 5344 iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt); 5345 5346 switch (code) { 5347 case IWM_REPLY_RX_PHY_CMD: 5348 iwm_mvm_rx_rx_phy_cmd(sc, pkt); 5349 break; 5350 5351 case IWM_REPLY_RX_MPDU_CMD: { 5352 /* 5353 * If this is the last frame in the RX buffer, we 5354 * can directly feed the mbuf to the sharks here. 5355 */ 5356 struct iwm_rx_packet *nextpkt = mtodoff(m, 5357 struct iwm_rx_packet *, nextoff); 5358 if (!HAVEROOM(nextoff) || 5359 (nextpkt->hdr.code == 0 && 5360 (nextpkt->hdr.qid & ~0x80) == 0 && 5361 nextpkt->hdr.idx == 0) || 5362 (nextpkt->len_n_flags == 5363 htole32(IWM_FH_RSCSR_FRAME_INVALID))) { 5364 if (iwm_mvm_rx_rx_mpdu(sc, m, offset, stolen)) { 5365 stolen = FALSE; 5366 /* Make sure we abort the loop */ 5367 nextoff = maxoff; 5368 } 5369 break; 5370 } 5371 5372 /* 5373 * Use m_copym instead of m_split, because that 5374 * makes it easier to keep a valid rx buffer in 5375 * the ring, when iwm_mvm_rx_rx_mpdu() fails. 5376 * 5377 * We need to start m_copym() at offset 0, to get the 5378 * M_PKTHDR flag preserved. 5379 */ 5380 m1 = m_copym(m, 0, M_COPYALL, M_NOWAIT); 5381 if (m1) { 5382 if (iwm_mvm_rx_rx_mpdu(sc, m1, offset, stolen)) 5383 stolen = TRUE; 5384 else 5385 m_freem(m1); 5386 } 5387 break; 5388 } 5389 5390 case IWM_TX_CMD: 5391 iwm_mvm_rx_tx_cmd(sc, pkt); 5392 break; 5393 5394 case IWM_MISSED_BEACONS_NOTIFICATION: { 5395 struct iwm_missed_beacons_notif *resp; 5396 int missed; 5397 5398 /* XXX look at mac_id to determine interface ID */ 5399 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5400 5401 resp = (void *)pkt->data; 5402 missed = le32toh(resp->consec_missed_beacons); 5403 5404 IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE, 5405 "%s: MISSED_BEACON: mac_id=%d, " 5406 "consec_since_last_rx=%d, consec=%d, num_expect=%d " 5407 "num_rx=%d\n", 5408 __func__, 5409 le32toh(resp->mac_id), 5410 le32toh(resp->consec_missed_beacons_since_last_rx), 5411 le32toh(resp->consec_missed_beacons), 5412 le32toh(resp->num_expected_beacons), 5413 le32toh(resp->num_recvd_beacons)); 5414 5415 /* Be paranoid */ 5416 if (vap == NULL) 5417 break; 5418 5419 /* XXX no net80211 locking? */ 5420 if (vap->iv_state == IEEE80211_S_RUN && 5421 (ic->ic_flags & IEEE80211_F_SCAN) == 0) { 5422 if (missed > vap->iv_bmissthreshold) { 5423 /* XXX bad locking; turn into task */ 5424 IWM_UNLOCK(sc); 5425 ieee80211_beacon_miss(ic); 5426 IWM_LOCK(sc); 5427 } 5428 } 5429 5430 break; 5431 } 5432 5433 case IWM_MFUART_LOAD_NOTIFICATION: 5434 break; 5435 5436 case IWM_MVM_ALIVE: 5437 break; 5438 5439 case IWM_CALIB_RES_NOTIF_PHY_DB: 5440 break; 5441 5442 case IWM_STATISTICS_NOTIFICATION: 5443 iwm_mvm_handle_rx_statistics(sc, pkt); 5444 break; 5445 5446 case IWM_NVM_ACCESS_CMD: 5447 case IWM_MCC_UPDATE_CMD: 5448 if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) { 5449 memcpy(sc->sc_cmd_resp, 5450 pkt, sizeof(sc->sc_cmd_resp)); 5451 } 5452 break; 5453 5454 case IWM_MCC_CHUB_UPDATE_CMD: { 5455 struct iwm_mcc_chub_notif *notif; 5456 notif = (void *)pkt->data; 5457 5458 sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8; 5459 sc->sc_fw_mcc[1] = notif->mcc & 0xff; 5460 sc->sc_fw_mcc[2] = '\0'; 5461 IWM_DPRINTF(sc, IWM_DEBUG_LAR, 5462 "fw source %d sent CC '%s'\n", 5463 notif->source_id, sc->sc_fw_mcc); 5464 break; 5465 } 5466 5467 case IWM_DTS_MEASUREMENT_NOTIFICATION: 5468 case IWM_WIDE_ID(IWM_PHY_OPS_GROUP, 5469 IWM_DTS_MEASUREMENT_NOTIF_WIDE): { 5470 struct iwm_dts_measurement_notif_v1 *notif; 5471 5472 if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) { 5473 device_printf(sc->sc_dev, 5474 "Invalid DTS_MEASUREMENT_NOTIFICATION\n"); 5475 break; 5476 } 5477 notif = (void *)pkt->data; 5478 IWM_DPRINTF(sc, IWM_DEBUG_TEMP, 5479 "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n", 5480 notif->temp); 5481 break; 5482 } 5483 5484 case IWM_PHY_CONFIGURATION_CMD: 5485 case IWM_TX_ANT_CONFIGURATION_CMD: 5486 case IWM_ADD_STA: 5487 case IWM_MAC_CONTEXT_CMD: 5488 case IWM_REPLY_SF_CFG_CMD: 5489 case IWM_POWER_TABLE_CMD: 5490 case IWM_PHY_CONTEXT_CMD: 5491 case IWM_BINDING_CONTEXT_CMD: 5492 case IWM_TIME_EVENT_CMD: 5493 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD): 5494 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC): 5495 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC): 5496 case IWM_SCAN_OFFLOAD_REQUEST_CMD: 5497 case IWM_SCAN_OFFLOAD_ABORT_CMD: 5498 case IWM_REPLY_BEACON_FILTERING_CMD: 5499 case IWM_MAC_PM_POWER_TABLE: 5500 case IWM_TIME_QUOTA_CMD: 5501 case IWM_REMOVE_STA: 5502 case IWM_TXPATH_FLUSH: 5503 case IWM_LQ_CMD: 5504 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, 5505 IWM_FW_PAGING_BLOCK_CMD): 5506 case IWM_BT_CONFIG: 5507 case IWM_REPLY_THERMAL_MNG_BACKOFF: 5508 cresp = (void *)pkt->data; 5509 if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) { 5510 memcpy(sc->sc_cmd_resp, 5511 pkt, sizeof(*pkt)+sizeof(*cresp)); 5512 } 5513 break; 5514 5515 /* ignore */ 5516 case IWM_PHY_DB_CMD: 5517 break; 5518 5519 case IWM_INIT_COMPLETE_NOTIF: 5520 break; 5521 5522 case IWM_SCAN_OFFLOAD_COMPLETE: { 5523 struct iwm_periodic_scan_complete *notif; 5524 notif = (void *)pkt->data; 5525 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) { 5526 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING; 5527 ieee80211_runtask(ic, &sc->sc_es_task); 5528 } 5529 break; 5530 } 5531 5532 case IWM_SCAN_ITERATION_COMPLETE: { 5533 struct iwm_lmac_scan_complete_notif *notif; 5534 notif = (void *)pkt->data; 5535 ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task); 5536 break; 5537 } 5538 5539 case IWM_SCAN_COMPLETE_UMAC: { 5540 struct iwm_umac_scan_complete *notif; 5541 notif = (void *)pkt->data; 5542 5543 IWM_DPRINTF(sc, IWM_DEBUG_SCAN, 5544 "UMAC scan complete, status=0x%x\n", 5545 notif->status); 5546 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) { 5547 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING; 5548 ieee80211_runtask(ic, &sc->sc_es_task); 5549 } 5550 break; 5551 } 5552 5553 case IWM_SCAN_ITERATION_COMPLETE_UMAC: { 5554 struct iwm_umac_scan_iter_complete_notif *notif; 5555 notif = (void *)pkt->data; 5556 5557 IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration " 5558 "complete, status=0x%x, %d channels scanned\n", 5559 notif->status, notif->scanned_channels); 5560 ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task); 5561 break; 5562 } 5563 5564 case IWM_REPLY_ERROR: { 5565 struct iwm_error_resp *resp; 5566 resp = (void *)pkt->data; 5567 5568 device_printf(sc->sc_dev, 5569 "firmware error 0x%x, cmd 0x%x\n", 5570 le32toh(resp->error_type), 5571 resp->cmd_id); 5572 break; 5573 } 5574 5575 case IWM_TIME_EVENT_NOTIFICATION: { 5576 struct iwm_time_event_notif *notif; 5577 notif = (void *)pkt->data; 5578 5579 IWM_DPRINTF(sc, IWM_DEBUG_INTR, 5580 "TE notif status = 0x%x action = 0x%x\n", 5581 notif->status, notif->action); 5582 break; 5583 } 5584 5585 /* 5586 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG 5587 * messages. Just ignore them for now. 5588 */ 5589 case IWM_DEBUG_LOG_MSG: 5590 break; 5591 5592 case IWM_MCAST_FILTER_CMD: 5593 break; 5594 5595 case IWM_SCD_QUEUE_CFG: { 5596 struct iwm_scd_txq_cfg_rsp *rsp; 5597 rsp = (void *)pkt->data; 5598 5599 IWM_DPRINTF(sc, IWM_DEBUG_CMD, 5600 "queue cfg token=0x%x sta_id=%d " 5601 "tid=%d scd_queue=%d\n", 5602 rsp->token, rsp->sta_id, rsp->tid, 5603 rsp->scd_queue); 5604 break; 5605 } 5606 5607 default: 5608 device_printf(sc->sc_dev, 5609 "frame %d/%d %x UNHANDLED (this should " 5610 "not happen)\n", qid & ~0x80, idx, 5611 pkt->len_n_flags); 5612 break; 5613 } 5614 5615 /* 5616 * Why test bit 0x80? The Linux driver: 5617 * 5618 * There is one exception: uCode sets bit 15 when it 5619 * originates the response/notification, i.e. when the 5620 * response/notification is not a direct response to a 5621 * command sent by the driver. For example, uCode issues 5622 * IWM_REPLY_RX when it sends a received frame to the driver; 5623 * it is not a direct response to any driver command. 5624 * 5625 * Ok, so since when is 7 == 15? Well, the Linux driver 5626 * uses a slightly different format for pkt->hdr, and "qid" 5627 * is actually the upper byte of a two-byte field. 5628 */ 5629 if (!(qid & (1 << 7))) 5630 iwm_cmd_done(sc, pkt); 5631 5632 offset = nextoff; 5633 } 5634 if (stolen) 5635 m_freem(m); 5636#undef HAVEROOM 5637} 5638 5639/* 5640 * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt. 5641 * Basic structure from if_iwn 5642 */ 5643static void 5644iwm_notif_intr(struct iwm_softc *sc) 5645{ 5646 uint16_t hw; 5647 5648 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map, 5649 BUS_DMASYNC_POSTREAD); 5650 5651 hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff; 5652 5653 /* 5654 * Process responses 5655 */ 5656 while (sc->rxq.cur != hw) { 5657 struct iwm_rx_ring *ring = &sc->rxq; 5658 struct iwm_rx_data *data = &ring->data[ring->cur]; 5659 5660 bus_dmamap_sync(ring->data_dmat, data->map, 5661 BUS_DMASYNC_POSTREAD); 5662 5663 IWM_DPRINTF(sc, IWM_DEBUG_INTR, 5664 "%s: hw = %d cur = %d\n", __func__, hw, ring->cur); 5665 iwm_handle_rxb(sc, data->m); 5666 5667 ring->cur = (ring->cur + 1) % IWM_RX_RING_COUNT; 5668 } 5669 5670 /* 5671 * Tell the firmware that it can reuse the ring entries that 5672 * we have just processed. 5673 * Seems like the hardware gets upset unless we align 5674 * the write by 8?? 5675 */ 5676 hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1; 5677 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, rounddown2(hw, 8)); 5678} 5679 5680static void 5681iwm_intr(void *arg) 5682{ 5683 struct iwm_softc *sc = arg; 5684 int handled = 0; 5685 int r1, r2, rv = 0; 5686 int isperiodic = 0; 5687 5688 IWM_LOCK(sc); 5689 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0); 5690 5691 if (sc->sc_flags & IWM_FLAG_USE_ICT) { 5692 uint32_t *ict = sc->ict_dma.vaddr; 5693 int tmp; 5694 5695 tmp = htole32(ict[sc->ict_cur]); 5696 if (!tmp) 5697 goto out_ena; 5698 5699 /* 5700 * ok, there was something. keep plowing until we have all. 5701 */ 5702 r1 = r2 = 0; 5703 while (tmp) { 5704 r1 |= tmp; 5705 ict[sc->ict_cur] = 0; 5706 sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT; 5707 tmp = htole32(ict[sc->ict_cur]); 5708 } 5709 5710 /* this is where the fun begins. don't ask */ 5711 if (r1 == 0xffffffff) 5712 r1 = 0; 5713 5714 /* i am not expected to understand this */ 5715 if (r1 & 0xc0000) 5716 r1 |= 0x8000; 5717 r1 = (0xff & r1) | ((0xff00 & r1) << 16); 5718 } else { 5719 r1 = IWM_READ(sc, IWM_CSR_INT); 5720 /* "hardware gone" (where, fishing?) */ 5721 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0) 5722 goto out; 5723 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS); 5724 } 5725 if (r1 == 0 && r2 == 0) { 5726 goto out_ena; 5727 } 5728 5729 IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask); 5730 5731 /* Safely ignore these bits for debug checks below */ 5732 r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD); 5733 5734 if (r1 & IWM_CSR_INT_BIT_SW_ERR) { 5735 int i; 5736 struct ieee80211com *ic = &sc->sc_ic; 5737 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 5738 5739#ifdef IWM_DEBUG 5740 iwm_nic_error(sc); 5741#endif 5742 /* Dump driver status (TX and RX rings) while we're here. */ 5743 device_printf(sc->sc_dev, "driver status:\n"); 5744 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) { 5745 struct iwm_tx_ring *ring = &sc->txq[i]; 5746 device_printf(sc->sc_dev, 5747 " tx ring %2d: qid=%-2d cur=%-3d " 5748 "queued=%-3d\n", 5749 i, ring->qid, ring->cur, ring->queued); 5750 } 5751 device_printf(sc->sc_dev, 5752 " rx ring: cur=%d\n", sc->rxq.cur); 5753 device_printf(sc->sc_dev, 5754 " 802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state); 5755 5756 /* Don't stop the device; just do a VAP restart */ 5757 IWM_UNLOCK(sc); 5758 5759 if (vap == NULL) { 5760 printf("%s: null vap\n", __func__); 5761 return; 5762 } 5763 5764 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; " 5765 "restarting\n", __func__, vap->iv_state); 5766 5767 ieee80211_restart_all(ic); 5768 return; 5769 } 5770 5771 if (r1 & IWM_CSR_INT_BIT_HW_ERR) { 5772 handled |= IWM_CSR_INT_BIT_HW_ERR; 5773 device_printf(sc->sc_dev, "hardware error, stopping device\n"); 5774 iwm_stop(sc); 5775 rv = 1; 5776 goto out; 5777 } 5778 5779 /* firmware chunk loaded */ 5780 if (r1 & IWM_CSR_INT_BIT_FH_TX) { 5781 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK); 5782 handled |= IWM_CSR_INT_BIT_FH_TX; 5783 sc->sc_fw_chunk_done = 1; 5784 wakeup(&sc->sc_fw); 5785 } 5786 5787 if (r1 & IWM_CSR_INT_BIT_RF_KILL) { 5788 handled |= IWM_CSR_INT_BIT_RF_KILL; 5789 if (iwm_check_rfkill(sc)) { 5790 device_printf(sc->sc_dev, 5791 "%s: rfkill switch, disabling interface\n", 5792 __func__); 5793 iwm_stop(sc); 5794 } 5795 } 5796 5797 /* 5798 * The Linux driver uses periodic interrupts to avoid races. 5799 * We cargo-cult like it's going out of fashion. 5800 */ 5801 if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) { 5802 handled |= IWM_CSR_INT_BIT_RX_PERIODIC; 5803 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC); 5804 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0) 5805 IWM_WRITE_1(sc, 5806 IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS); 5807 isperiodic = 1; 5808 } 5809 5810 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) { 5811 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX); 5812 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK); 5813 5814 iwm_notif_intr(sc); 5815 5816 /* enable periodic interrupt, see above */ 5817 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic) 5818 IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG, 5819 IWM_CSR_INT_PERIODIC_ENA); 5820 } 5821 5822 if (__predict_false(r1 & ~handled)) 5823 IWM_DPRINTF(sc, IWM_DEBUG_INTR, 5824 "%s: unhandled interrupts: %x\n", __func__, r1); 5825 rv = 1; 5826 5827 out_ena: 5828 iwm_restore_interrupts(sc); 5829 out: 5830 IWM_UNLOCK(sc); 5831 return; 5832} 5833 5834/* 5835 * Autoconf glue-sniffing 5836 */ 5837#define PCI_VENDOR_INTEL 0x8086 5838#define PCI_PRODUCT_INTEL_WL_3160_1 0x08b3 5839#define PCI_PRODUCT_INTEL_WL_3160_2 0x08b4 5840#define PCI_PRODUCT_INTEL_WL_3165_1 0x3165 5841#define PCI_PRODUCT_INTEL_WL_3165_2 0x3166 5842#define PCI_PRODUCT_INTEL_WL_7260_1 0x08b1 5843#define PCI_PRODUCT_INTEL_WL_7260_2 0x08b2 5844#define PCI_PRODUCT_INTEL_WL_7265_1 0x095a 5845#define PCI_PRODUCT_INTEL_WL_7265_2 0x095b 5846#define PCI_PRODUCT_INTEL_WL_8260_1 0x24f3 5847#define PCI_PRODUCT_INTEL_WL_8260_2 0x24f4 5848#define PCI_PRODUCT_INTEL_WL_8265_1 0x24fd 5849 5850static const struct iwm_devices { 5851 uint16_t device; 5852 const struct iwm_cfg *cfg; 5853} iwm_devices[] = { 5854 { PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg }, 5855 { PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg }, 5856 { PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg }, 5857 { PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg }, 5858 { PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg }, 5859 { PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg }, 5860 { PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg }, 5861 { PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg }, 5862 { PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg }, 5863 { PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg }, 5864 { PCI_PRODUCT_INTEL_WL_8265_1, &iwm8265_cfg }, 5865}; 5866 5867static int 5868iwm_probe(device_t dev) 5869{ 5870 int i; 5871 5872 for (i = 0; i < nitems(iwm_devices); i++) { 5873 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL && 5874 pci_get_device(dev) == iwm_devices[i].device) { 5875 device_set_desc(dev, iwm_devices[i].cfg->name); 5876 return (BUS_PROBE_DEFAULT); 5877 } 5878 } 5879 5880 return (ENXIO); 5881} 5882 5883static int 5884iwm_dev_check(device_t dev) 5885{ 5886 struct iwm_softc *sc; 5887 uint16_t devid; 5888 int i; 5889 5890 sc = device_get_softc(dev); 5891 5892 devid = pci_get_device(dev); 5893 for (i = 0; i < nitems(iwm_devices); i++) { 5894 if (iwm_devices[i].device == devid) { 5895 sc->cfg = iwm_devices[i].cfg; 5896 return (0); 5897 } 5898 } 5899 device_printf(dev, "unknown adapter type\n"); 5900 return ENXIO; 5901} 5902 5903/* PCI registers */ 5904#define PCI_CFG_RETRY_TIMEOUT 0x041 5905 5906static int 5907iwm_pci_attach(device_t dev) 5908{ 5909 struct iwm_softc *sc; 5910 int count, error, rid; 5911 uint16_t reg; 5912 5913 sc = device_get_softc(dev); 5914 5915 /* We disable the RETRY_TIMEOUT register (0x41) to keep 5916 * PCI Tx retries from interfering with C3 CPU state */ 5917 pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1); 5918 5919 /* Enable bus-mastering and hardware bug workaround. */ 5920 pci_enable_busmaster(dev); 5921 reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg)); 5922 /* if !MSI */ 5923 if (reg & PCIM_STATUS_INTxSTATE) { 5924 reg &= ~PCIM_STATUS_INTxSTATE; 5925 } 5926 pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg)); 5927 5928 rid = PCIR_BAR(0); 5929 sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 5930 RF_ACTIVE); 5931 if (sc->sc_mem == NULL) { 5932 device_printf(sc->sc_dev, "can't map mem space\n"); 5933 return (ENXIO); 5934 } 5935 sc->sc_st = rman_get_bustag(sc->sc_mem); 5936 sc->sc_sh = rman_get_bushandle(sc->sc_mem); 5937 5938 /* Install interrupt handler. */ 5939 count = 1; 5940 rid = 0; 5941 if (pci_alloc_msi(dev, &count) == 0) 5942 rid = 1; 5943 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | 5944 (rid != 0 ? 0 : RF_SHAREABLE)); 5945 if (sc->sc_irq == NULL) { 5946 device_printf(dev, "can't map interrupt\n"); 5947 return (ENXIO); 5948 } 5949 error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE, 5950 NULL, iwm_intr, sc, &sc->sc_ih); 5951 if (sc->sc_ih == NULL) { 5952 device_printf(dev, "can't establish interrupt"); 5953 return (ENXIO); 5954 } 5955 sc->sc_dmat = bus_get_dma_tag(sc->sc_dev); 5956 5957 return (0); 5958} 5959 5960static void 5961iwm_pci_detach(device_t dev) 5962{ 5963 struct iwm_softc *sc = device_get_softc(dev); 5964 5965 if (sc->sc_irq != NULL) { 5966 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); 5967 bus_release_resource(dev, SYS_RES_IRQ, 5968 rman_get_rid(sc->sc_irq), sc->sc_irq); 5969 pci_release_msi(dev); 5970 } 5971 if (sc->sc_mem != NULL) 5972 bus_release_resource(dev, SYS_RES_MEMORY, 5973 rman_get_rid(sc->sc_mem), sc->sc_mem); 5974} 5975 5976 5977 5978static int 5979iwm_attach(device_t dev) 5980{ 5981 struct iwm_softc *sc = device_get_softc(dev); 5982 struct ieee80211com *ic = &sc->sc_ic; 5983 int error; 5984 int txq_i, i; 5985 5986 sc->sc_dev = dev; 5987 sc->sc_attached = 1; 5988 IWM_LOCK_INIT(sc); 5989 mbufq_init(&sc->sc_snd, ifqmaxlen); 5990 callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0); 5991 callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0); 5992 TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc); 5993 5994 sc->sc_notif_wait = iwm_notification_wait_init(sc); 5995 if (sc->sc_notif_wait == NULL) { 5996 device_printf(dev, "failed to init notification wait struct\n"); 5997 goto fail; 5998 } 5999 6000 /* Init phy db */ 6001 sc->sc_phy_db = iwm_phy_db_init(sc); 6002 if (!sc->sc_phy_db) { 6003 device_printf(dev, "Cannot init phy_db\n"); 6004 goto fail; 6005 } 6006 6007 /* PCI attach */ 6008 error = iwm_pci_attach(dev); 6009 if (error != 0) 6010 goto fail; 6011 6012 sc->sc_wantresp = -1; 6013 6014 /* Check device type */ 6015 error = iwm_dev_check(dev); 6016 if (error != 0) 6017 goto fail; 6018 6019 sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV); 6020 /* 6021 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have 6022 * changed, and now the revision step also includes bit 0-1 (no more 6023 * "dash" value). To keep hw_rev backwards compatible - we'll store it 6024 * in the old format. 6025 */ 6026 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) 6027 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) | 6028 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2); 6029 6030 if (iwm_prepare_card_hw(sc) != 0) { 6031 device_printf(dev, "could not initialize hardware\n"); 6032 goto fail; 6033 } 6034 6035 if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) { 6036 int ret; 6037 uint32_t hw_step; 6038 6039 /* 6040 * In order to recognize C step the driver should read the 6041 * chip version id located at the AUX bus MISC address. 6042 */ 6043 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, 6044 IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 6045 DELAY(2); 6046 6047 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL, 6048 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 6049 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 6050 25000); 6051 if (!ret) { 6052 device_printf(sc->sc_dev, 6053 "Failed to wake up the nic\n"); 6054 goto fail; 6055 } 6056 6057 if (iwm_nic_lock(sc)) { 6058 hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG); 6059 hw_step |= IWM_ENABLE_WFPM; 6060 iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step); 6061 hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG); 6062 hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF; 6063 if (hw_step == 0x3) 6064 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) | 6065 (IWM_SILICON_C_STEP << 2); 6066 iwm_nic_unlock(sc); 6067 } else { 6068 device_printf(sc->sc_dev, "Failed to lock the nic\n"); 6069 goto fail; 6070 } 6071 } 6072 6073 /* special-case 7265D, it has the same PCI IDs. */ 6074 if (sc->cfg == &iwm7265_cfg && 6075 (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) { 6076 sc->cfg = &iwm7265d_cfg; 6077 } 6078 6079 /* Allocate DMA memory for firmware transfers. */ 6080 if ((error = iwm_alloc_fwmem(sc)) != 0) { 6081 device_printf(dev, "could not allocate memory for firmware\n"); 6082 goto fail; 6083 } 6084 6085 /* Allocate "Keep Warm" page. */ 6086 if ((error = iwm_alloc_kw(sc)) != 0) { 6087 device_printf(dev, "could not allocate keep warm page\n"); 6088 goto fail; 6089 } 6090 6091 /* We use ICT interrupts */ 6092 if ((error = iwm_alloc_ict(sc)) != 0) { 6093 device_printf(dev, "could not allocate ICT table\n"); 6094 goto fail; 6095 } 6096 6097 /* Allocate TX scheduler "rings". */ 6098 if ((error = iwm_alloc_sched(sc)) != 0) { 6099 device_printf(dev, "could not allocate TX scheduler rings\n"); 6100 goto fail; 6101 } 6102 6103 /* Allocate TX rings */ 6104 for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) { 6105 if ((error = iwm_alloc_tx_ring(sc, 6106 &sc->txq[txq_i], txq_i)) != 0) { 6107 device_printf(dev, 6108 "could not allocate TX ring %d\n", 6109 txq_i); 6110 goto fail; 6111 } 6112 } 6113 6114 /* Allocate RX ring. */ 6115 if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) { 6116 device_printf(dev, "could not allocate RX ring\n"); 6117 goto fail; 6118 } 6119 6120 /* Clear pending interrupts. */ 6121 IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff); 6122 6123 ic->ic_softc = sc; 6124 ic->ic_name = device_get_nameunit(sc->sc_dev); 6125 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ 6126 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ 6127 6128 /* Set device capabilities. */ 6129 ic->ic_caps = 6130 IEEE80211_C_STA | 6131 IEEE80211_C_WPA | /* WPA/RSN */ 6132 IEEE80211_C_WME | 6133 IEEE80211_C_PMGT | 6134 IEEE80211_C_SHSLOT | /* short slot time supported */ 6135 IEEE80211_C_SHPREAMBLE /* short preamble supported */ 6136// IEEE80211_C_BGSCAN /* capable of bg scanning */ 6137 ; 6138 /* Advertise full-offload scanning */ 6139 ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD; 6140 for (i = 0; i < nitems(sc->sc_phyctxt); i++) { 6141 sc->sc_phyctxt[i].id = i; 6142 sc->sc_phyctxt[i].color = 0; 6143 sc->sc_phyctxt[i].ref = 0; 6144 sc->sc_phyctxt[i].channel = NULL; 6145 } 6146 6147 /* Default noise floor */ 6148 sc->sc_noise = -96; 6149 6150 /* Max RSSI */ 6151 sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM; 6152 6153 sc->sc_preinit_hook.ich_func = iwm_preinit; 6154 sc->sc_preinit_hook.ich_arg = sc; 6155 if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) { 6156 device_printf(dev, "config_intrhook_establish failed\n"); 6157 goto fail; 6158 } 6159 6160#ifdef IWM_DEBUG 6161 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 6162 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", 6163 CTLFLAG_RW, &sc->sc_debug, 0, "control debugging"); 6164#endif 6165 6166 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE, 6167 "<-%s\n", __func__); 6168 6169 return 0; 6170 6171 /* Free allocated memory if something failed during attachment. */ 6172fail: 6173 iwm_detach_local(sc, 0); 6174 6175 return ENXIO; 6176} 6177 6178static int 6179iwm_is_valid_ether_addr(uint8_t *addr) 6180{ 6181 char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; 6182 6183 if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr)) 6184 return (FALSE); 6185 6186 return (TRUE); 6187} 6188 6189static int 6190iwm_wme_update(struct ieee80211com *ic) 6191{ 6192#define IWM_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */ 6193 struct iwm_softc *sc = ic->ic_softc; 6194 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 6195 struct iwm_vap *ivp = IWM_VAP(vap); 6196 struct iwm_node *in; 6197 struct wmeParams tmp[WME_NUM_AC]; 6198 int aci, error; 6199 6200 if (vap == NULL) 6201 return (0); 6202 6203 IEEE80211_LOCK(ic); 6204 for (aci = 0; aci < WME_NUM_AC; aci++) 6205 tmp[aci] = ic->ic_wme.wme_chanParams.cap_wmeParams[aci]; 6206 IEEE80211_UNLOCK(ic); 6207 6208 IWM_LOCK(sc); 6209 for (aci = 0; aci < WME_NUM_AC; aci++) { 6210 const struct wmeParams *ac = &tmp[aci]; 6211 ivp->queue_params[aci].aifsn = ac->wmep_aifsn; 6212 ivp->queue_params[aci].cw_min = IWM_EXP2(ac->wmep_logcwmin); 6213 ivp->queue_params[aci].cw_max = IWM_EXP2(ac->wmep_logcwmax); 6214 ivp->queue_params[aci].edca_txop = 6215 IEEE80211_TXOP_TO_US(ac->wmep_txopLimit); 6216 } 6217 ivp->have_wme = TRUE; 6218 if (ivp->is_uploaded && vap->iv_bss != NULL) { 6219 in = IWM_NODE(vap->iv_bss); 6220 if (in->in_assoc) { 6221 if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) { 6222 device_printf(sc->sc_dev, 6223 "%s: failed to update MAC\n", __func__); 6224 } 6225 } 6226 } 6227 IWM_UNLOCK(sc); 6228 6229 return (0); 6230#undef IWM_EXP2 6231} 6232 6233static void 6234iwm_preinit(void *arg) 6235{ 6236 struct iwm_softc *sc = arg; 6237 device_t dev = sc->sc_dev; 6238 struct ieee80211com *ic = &sc->sc_ic; 6239 int error; 6240 6241 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE, 6242 "->%s\n", __func__); 6243 6244 IWM_LOCK(sc); 6245 if ((error = iwm_start_hw(sc)) != 0) { 6246 device_printf(dev, "could not initialize hardware\n"); 6247 IWM_UNLOCK(sc); 6248 goto fail; 6249 } 6250 6251 error = iwm_run_init_mvm_ucode(sc, 1); 6252 iwm_stop_device(sc); 6253 if (error) { 6254 IWM_UNLOCK(sc); 6255 goto fail; 6256 } 6257 device_printf(dev, 6258 "hw rev 0x%x, fw ver %s, address %s\n", 6259 sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK, 6260 sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr)); 6261 6262 /* not all hardware can do 5GHz band */ 6263 if (!sc->nvm_data->sku_cap_band_52GHz_enable) 6264 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0, 6265 sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A])); 6266 IWM_UNLOCK(sc); 6267 6268 iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans, 6269 ic->ic_channels); 6270 6271 /* 6272 * At this point we've committed - if we fail to do setup, 6273 * we now also have to tear down the net80211 state. 6274 */ 6275 ieee80211_ifattach(ic); 6276 ic->ic_vap_create = iwm_vap_create; 6277 ic->ic_vap_delete = iwm_vap_delete; 6278 ic->ic_raw_xmit = iwm_raw_xmit; 6279 ic->ic_node_alloc = iwm_node_alloc; 6280 ic->ic_scan_start = iwm_scan_start; 6281 ic->ic_scan_end = iwm_scan_end; 6282 ic->ic_update_mcast = iwm_update_mcast; 6283 ic->ic_getradiocaps = iwm_init_channel_map; 6284 ic->ic_set_channel = iwm_set_channel; 6285 ic->ic_scan_curchan = iwm_scan_curchan; 6286 ic->ic_scan_mindwell = iwm_scan_mindwell; 6287 ic->ic_wme.wme_update = iwm_wme_update; 6288 ic->ic_parent = iwm_parent; 6289 ic->ic_transmit = iwm_transmit; 6290 iwm_radiotap_attach(sc); 6291 if (bootverbose) 6292 ieee80211_announce(ic); 6293 6294 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE, 6295 "<-%s\n", __func__); 6296 config_intrhook_disestablish(&sc->sc_preinit_hook); 6297 6298 return; 6299fail: 6300 config_intrhook_disestablish(&sc->sc_preinit_hook); 6301 iwm_detach_local(sc, 0); 6302} 6303 6304/* 6305 * Attach the interface to 802.11 radiotap. 6306 */ 6307static void 6308iwm_radiotap_attach(struct iwm_softc *sc) 6309{ 6310 struct ieee80211com *ic = &sc->sc_ic; 6311 6312 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE, 6313 "->%s begin\n", __func__); 6314 ieee80211_radiotap_attach(ic, 6315 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), 6316 IWM_TX_RADIOTAP_PRESENT, 6317 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), 6318 IWM_RX_RADIOTAP_PRESENT); 6319 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE, 6320 "->%s end\n", __func__); 6321} 6322 6323static struct ieee80211vap * 6324iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, 6325 enum ieee80211_opmode opmode, int flags, 6326 const uint8_t bssid[IEEE80211_ADDR_LEN], 6327 const uint8_t mac[IEEE80211_ADDR_LEN]) 6328{ 6329 struct iwm_vap *ivp; 6330 struct ieee80211vap *vap; 6331 6332 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ 6333 return NULL; 6334 ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO); 6335 vap = &ivp->iv_vap; 6336 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); 6337 vap->iv_bmissthreshold = 10; /* override default */ 6338 /* Override with driver methods. */ 6339 ivp->iv_newstate = vap->iv_newstate; 6340 vap->iv_newstate = iwm_newstate; 6341 6342 ivp->id = IWM_DEFAULT_MACID; 6343 ivp->color = IWM_DEFAULT_COLOR; 6344 6345 ivp->have_wme = FALSE; 6346 6347 ieee80211_ratectl_init(vap); 6348 /* Complete setup. */ 6349 ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status, 6350 mac); 6351 ic->ic_opmode = opmode; 6352 6353 return vap; 6354} 6355 6356static void 6357iwm_vap_delete(struct ieee80211vap *vap) 6358{ 6359 struct iwm_vap *ivp = IWM_VAP(vap); 6360 6361 ieee80211_ratectl_deinit(vap); 6362 ieee80211_vap_detach(vap); 6363 free(ivp, M_80211_VAP); 6364} 6365 6366static void 6367iwm_xmit_queue_drain(struct iwm_softc *sc) 6368{ 6369 struct mbuf *m; 6370 struct ieee80211_node *ni; 6371 6372 while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) { 6373 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; 6374 ieee80211_free_node(ni); 6375 m_freem(m); 6376 } 6377} 6378 6379static void 6380iwm_scan_start(struct ieee80211com *ic) 6381{ 6382 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 6383 struct iwm_softc *sc = ic->ic_softc; 6384 int error; 6385 6386 IWM_LOCK(sc); 6387 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) { 6388 /* This should not be possible */ 6389 device_printf(sc->sc_dev, 6390 "%s: Previous scan not completed yet\n", __func__); 6391 } 6392 if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) 6393 error = iwm_mvm_umac_scan(sc); 6394 else 6395 error = iwm_mvm_lmac_scan(sc); 6396 if (error != 0) { 6397 device_printf(sc->sc_dev, "could not initiate scan\n"); 6398 IWM_UNLOCK(sc); 6399 ieee80211_cancel_scan(vap); 6400 } else { 6401 sc->sc_flags |= IWM_FLAG_SCAN_RUNNING; 6402 iwm_led_blink_start(sc); 6403 IWM_UNLOCK(sc); 6404 } 6405} 6406 6407static void 6408iwm_scan_end(struct ieee80211com *ic) 6409{ 6410 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); 6411 struct iwm_softc *sc = ic->ic_softc; 6412 6413 IWM_LOCK(sc); 6414 iwm_led_blink_stop(sc); 6415 if (vap->iv_state == IEEE80211_S_RUN) 6416 iwm_mvm_led_enable(sc); 6417 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) { 6418 /* 6419 * Removing IWM_FLAG_SCAN_RUNNING now, is fine because 6420 * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq 6421 * taskqueue. 6422 */ 6423 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING; 6424 iwm_mvm_scan_stop_wait(sc); 6425 } 6426 IWM_UNLOCK(sc); 6427 6428 /* 6429 * Make sure we don't race, if sc_es_task is still enqueued here. 6430 * This is to make sure that it won't call ieee80211_scan_done 6431 * when we have already started the next scan. 6432 */ 6433 taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL); 6434} 6435 6436static void 6437iwm_update_mcast(struct ieee80211com *ic) 6438{ 6439} 6440 6441static void 6442iwm_set_channel(struct ieee80211com *ic) 6443{ 6444} 6445 6446static void 6447iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) 6448{ 6449} 6450 6451static void 6452iwm_scan_mindwell(struct ieee80211_scan_state *ss) 6453{ 6454 return; 6455} 6456 6457void 6458iwm_init_task(void *arg1) 6459{ 6460 struct iwm_softc *sc = arg1; 6461 6462 IWM_LOCK(sc); 6463 while (sc->sc_flags & IWM_FLAG_BUSY) 6464 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0); 6465 sc->sc_flags |= IWM_FLAG_BUSY; 6466 iwm_stop(sc); 6467 if (sc->sc_ic.ic_nrunning > 0) 6468 iwm_init(sc); 6469 sc->sc_flags &= ~IWM_FLAG_BUSY; 6470 wakeup(&sc->sc_flags); 6471 IWM_UNLOCK(sc); 6472} 6473 6474static int 6475iwm_resume(device_t dev) 6476{ 6477 struct iwm_softc *sc = device_get_softc(dev); 6478 int do_reinit = 0; 6479 6480 /* 6481 * We disable the RETRY_TIMEOUT register (0x41) to keep 6482 * PCI Tx retries from interfering with C3 CPU state. 6483 */ 6484 pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1); 6485 iwm_init_task(device_get_softc(dev)); 6486 6487 IWM_LOCK(sc); 6488 if (sc->sc_flags & IWM_FLAG_SCANNING) { 6489 sc->sc_flags &= ~IWM_FLAG_SCANNING; 6490 do_reinit = 1; 6491 } 6492 IWM_UNLOCK(sc); 6493 6494 if (do_reinit) 6495 ieee80211_resume_all(&sc->sc_ic); 6496 6497 return 0; 6498} 6499 6500static int 6501iwm_suspend(device_t dev) 6502{ 6503 int do_stop = 0; 6504 struct iwm_softc *sc = device_get_softc(dev); 6505 6506 do_stop = !! (sc->sc_ic.ic_nrunning > 0); 6507 6508 ieee80211_suspend_all(&sc->sc_ic); 6509 6510 if (do_stop) { 6511 IWM_LOCK(sc); 6512 iwm_stop(sc); 6513 sc->sc_flags |= IWM_FLAG_SCANNING; 6514 IWM_UNLOCK(sc); 6515 } 6516 6517 return (0); 6518} 6519 6520static int 6521iwm_detach_local(struct iwm_softc *sc, int do_net80211) 6522{ 6523 struct iwm_fw_info *fw = &sc->sc_fw; 6524 device_t dev = sc->sc_dev; 6525 int i; 6526 6527 if (!sc->sc_attached) 6528 return 0; 6529 sc->sc_attached = 0; 6530 6531 if (do_net80211) 6532 ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task); 6533 6534 callout_drain(&sc->sc_led_blink_to); 6535 callout_drain(&sc->sc_watchdog_to); 6536 iwm_stop_device(sc); 6537 if (do_net80211) { 6538 IWM_LOCK(sc); 6539 iwm_xmit_queue_drain(sc); 6540 IWM_UNLOCK(sc); 6541 ieee80211_ifdetach(&sc->sc_ic); 6542 } 6543 6544 iwm_phy_db_free(sc->sc_phy_db); 6545 sc->sc_phy_db = NULL; 6546 6547 iwm_free_nvm_data(sc->nvm_data); 6548 6549 /* Free descriptor rings */ 6550 iwm_free_rx_ring(sc, &sc->rxq); 6551 for (i = 0; i < nitems(sc->txq); i++) 6552 iwm_free_tx_ring(sc, &sc->txq[i]); 6553 6554 /* Free firmware */ 6555 if (fw->fw_fp != NULL) 6556 iwm_fw_info_free(fw); 6557 6558 /* Free scheduler */ 6559 iwm_dma_contig_free(&sc->sched_dma); 6560 iwm_dma_contig_free(&sc->ict_dma); 6561 iwm_dma_contig_free(&sc->kw_dma); 6562 iwm_dma_contig_free(&sc->fw_dma); 6563 6564 iwm_free_fw_paging(sc); 6565 6566 /* Finished with the hardware - detach things */ 6567 iwm_pci_detach(dev); 6568 6569 if (sc->sc_notif_wait != NULL) { 6570 iwm_notification_wait_free(sc->sc_notif_wait); 6571 sc->sc_notif_wait = NULL; 6572 } 6573 6574 IWM_LOCK_DESTROY(sc); 6575 6576 return (0); 6577} 6578 6579static int 6580iwm_detach(device_t dev) 6581{ 6582 struct iwm_softc *sc = device_get_softc(dev); 6583 6584 return (iwm_detach_local(sc, 1)); 6585} 6586 6587static device_method_t iwm_pci_methods[] = { 6588 /* Device interface */ 6589 DEVMETHOD(device_probe, iwm_probe), 6590 DEVMETHOD(device_attach, iwm_attach), 6591 DEVMETHOD(device_detach, iwm_detach), 6592 DEVMETHOD(device_suspend, iwm_suspend), 6593 DEVMETHOD(device_resume, iwm_resume), 6594 6595 DEVMETHOD_END 6596}; 6597 6598static driver_t iwm_pci_driver = { 6599 "iwm", 6600 iwm_pci_methods, 6601 sizeof (struct iwm_softc) 6602}; 6603 6604static devclass_t iwm_devclass; 6605 6606DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL); 6607MODULE_DEPEND(iwm, firmware, 1, 1, 1); 6608MODULE_DEPEND(iwm, pci, 1, 1, 1); 6609MODULE_DEPEND(iwm, wlan, 1, 1, 1); 6610