Lines Matching defs:sc

156 #define SC_DEV_FOR_PCI sc->sc_dev
471 int iwm_drain_sta(struct iwm_softc *sc, struct iwm_node *, int);
583 iwm_lookup_cmd_ver(struct iwm_softc *sc, uint8_t grp, uint8_t cmd)
588 for (i = 0; i < sc->n_cmd_versions; i++) {
589 entry = &sc->cmd_versions[i];
613 iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
627 iwm_firmware_store_section(struct iwm_softc *sc, enum iwm_ucode_type type,
638 fws = &sc->sc_fw.fw_sects[type];
667 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
675 sc->sc_default_calib[ucode_type].flow_trigger =
677 sc->sc_default_calib[ucode_type].event_trigger =
708 iwm_read_firmware(struct iwm_softc *sc)
710 struct iwm_fw_info *fw = &sc->sc_fw;
724 tsleep_nsec(&sc->sc_fw, 0, "iwmfwp", INFSLP);
730 err = loadfirmware(sc->sc_fwname,
734 DEVNAME(sc), sc->sc_fwname, err);
738 sc->sc_capaflags = 0;
739 sc->sc_capa_n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
740 memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
741 memset(sc->sc_ucode_api, 0, sizeof(sc->sc_ucode_api));
742 sc->n_cmd_versions = 0;
748 DEVNAME(sc), sc->sc_fwname);
753 iwm_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
775 DEVNAME(sc), len);
786 sc->sc_capa_max_probe_len
788 if (sc->sc_capa_max_probe_len >
799 sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
817 sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
820 err = iwm_store_cscheme(sc, tlv_data, tlv_len);
838 err = iwm_firmware_store_section(sc,
844 err = iwm_firmware_store_section(sc,
850 err = iwm_firmware_store_section(sc,
860 err = iwm_set_default_calib(sc, tlv_data);
869 sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
888 setbit(sc->sc_ucode_api, i + (32 * idx));
908 setbit(sc->sc_enabled_capa, i + (32 * idx));
918 if (sc->n_cmd_versions != 0) {
922 if (tlv_len > sizeof(sc->cmd_versions)) {
926 memcpy(&sc->cmd_versions[0], tlv_data, tlv_len);
927 sc->n_cmd_versions = tlv_len / sizeof(struct iwm_fw_cmd_version);
936 err = iwm_firmware_store_section(sc,
951 DEVNAME(sc), paging_mem_size));
955 DEVNAME(sc), IWM_MAX_PAGING_IMAGE_SIZE,
962 DEVNAME(sc), IWM_FW_PAGING_SIZE);
979 sc->sc_capa_n_scan_channels =
981 if (sc->sc_capa_n_scan_channels > IWM_MAX_SCAN_CHANNELS) {
993 iwm_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
1042 "section type %d\n", DEVNAME(sc), err, tlv_type);
1052 wakeup(&sc->sc_fw);
1058 iwm_read_prph_unlocked(struct iwm_softc *sc, uint32_t addr)
1060 IWM_WRITE(sc,
1062 IWM_BARRIER_READ_WRITE(sc);
1063 return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT);
1067 iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
1069 iwm_nic_assert_locked(sc);
1070 return iwm_read_prph_unlocked(sc, addr);
1074 iwm_write_prph_unlocked(struct iwm_softc *sc, uint32_t addr, uint32_t val)
1076 IWM_WRITE(sc,
1078 IWM_BARRIER_WRITE(sc);
1079 IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val);
1083 iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
1085 iwm_nic_assert_locked(sc);
1086 iwm_write_prph_unlocked(sc, addr, val);
1090 iwm_write_prph64(struct iwm_softc *sc, uint64_t addr, uint64_t val)
1092 iwm_write_prph(sc, (uint32_t)addr, val & 0xffffffff);
1093 iwm_write_prph(sc, (uint32_t)addr + 4, val >> 32);
1097 iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
1102 if (iwm_nic_lock(sc)) {
1103 IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr);
1105 vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT);
1106 iwm_nic_unlock(sc);
1114 iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
1119 if (iwm_nic_lock(sc)) {
1120 IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr);
1124 IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val);
1126 iwm_nic_unlock(sc);
1134 iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
1136 return iwm_write_mem(sc, addr, &val, 1);
1140 iwm_poll_bit(struct iwm_softc *sc, int reg, uint32_t bits, uint32_t mask,
1144 if ((IWM_READ(sc, reg) & mask) == (bits & mask)) {
1156 iwm_nic_lock(struct iwm_softc *sc)
1158 if (sc->sc_nic_locks > 0) {
1159 iwm_nic_assert_locked(sc);
1160 sc->sc_nic_locks++;
1164 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1167 if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000)
1170 if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1174 sc->sc_nic_locks++;
1178 printf("%s: acquiring device failed\n", DEVNAME(sc));
1183 iwm_nic_assert_locked(struct iwm_softc *sc)
1185 if (sc->sc_nic_locks <= 0)
1186 panic("%s: nic locks counter %d", DEVNAME(sc), sc->sc_nic_locks);
1190 iwm_nic_unlock(struct iwm_softc *sc)
1192 if (sc->sc_nic_locks > 0) {
1193 if (--sc->sc_nic_locks == 0)
1194 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1197 printf("%s: NIC already unlocked\n", DEVNAME(sc));
1201 iwm_set_bits_mask_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits,
1206 if (iwm_nic_lock(sc)) {
1207 val = iwm_read_prph(sc, reg) & mask;
1209 iwm_write_prph(sc, reg, val);
1210 iwm_nic_unlock(sc);
1217 iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1219 return iwm_set_bits_mask_prph(sc, reg, bits, ~0);
1223 iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1225 return iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
1287 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1295 if (sc->sc_mqrx_supported) {
1305 err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma, size, 256);
1308 DEVNAME(sc));
1314 err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1318 DEVNAME(sc));
1323 if (sc->sc_mqrx_supported) {
1325 err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
1329 DEVNAME(sc));
1338 err = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,
1343 DEVNAME(sc));
1347 err = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i);
1353 fail: iwm_free_rx_ring(sc, ring);
1358 iwm_disable_rx_dma(struct iwm_softc *sc)
1362 if (iwm_nic_lock(sc)) {
1363 if (sc->sc_mqrx_supported) {
1364 iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG, 0);
1366 if (iwm_read_prph(sc, IWM_RFH_GEN_STATUS) &
1372 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1374 if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG)&
1380 iwm_nic_unlock(sc);
1385 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1388 bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1391 bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1397 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1405 if (sc->sc_mqrx_supported)
1414 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1416 bus_dmamap_unload(sc->sc_dmat, data->map);
1421 bus_dmamap_destroy(sc->sc_dmat, data->map);
1426 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1439 err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1442 DEVNAME(sc));
1453 * The command is queue 0 (sc->txq[0]), and 4 mgmt/data frame queues
1454 * are sc->tqx[IWM_DQA_MIN_MGMT_QUEUE + ac], i.e. sc->txq[5:8],
1457 * which aggregation is enabled. We map TID 0-7 to sc->txq[10:17].
1470 err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1472 printf("%s: could not allocate cmd DMA memory\n", DEVNAME(sc));
1493 err = bus_dmamap_create(sc->sc_dmat, mapsize,
1498 DEVNAME(sc));
1505 fail: iwm_free_tx_ring(sc, ring);
1510 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1518 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1520 bus_dmamap_unload(sc->sc_dmat, data->map);
1527 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1529 sc->qfullmsk &= ~(1 << ring->qid);
1530 sc->qenablemsk &= ~(1 << ring->qid);
1532 if (ring->qid == sc->cmdqid && ring->queued > 0) {
1533 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1534 iwm_nic_unlock(sc);
1542 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1553 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1555 bus_dmamap_unload(sc->sc_dmat, data->map);
1560 bus_dmamap_destroy(sc->sc_dmat, data->map);
1565 iwm_enable_rfkill_int(struct iwm_softc *sc)
1567 if (!sc->sc_msix) {
1568 sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL;
1569 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1571 IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1572 sc->sc_fh_init_mask);
1573 IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1575 sc->sc_hw_mask = IWM_MSIX_HW_INT_CAUSES_REG_RF_KILL;
1578 if (sc->sc_device_family >= IWM_DEVICE_FAMILY_9000)
1579 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1584 iwm_check_rfkill(struct iwm_softc *sc)
1596 v = IWM_READ(sc, IWM_CSR_GP_CNTRL);
1599 sc->sc_flags |= IWM_FLAG_RFKILL;
1601 sc->sc_flags &= ~IWM_FLAG_RFKILL;
1608 iwm_enable_interrupts(struct iwm_softc *sc)
1610 if (!sc->sc_msix) {
1611 sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1612 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1618 sc->sc_hw_mask = sc->sc_hw_init_mask;
1619 sc->sc_fh_mask = sc->sc_fh_init_mask;
1620 IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1621 ~sc->sc_fh_mask);
1622 IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1623 ~sc->sc_hw_mask);
1628 iwm_enable_fwload_interrupt(struct iwm_softc *sc)
1630 if (!sc->sc_msix) {
1631 sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
1632 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1634 IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1635 sc->sc_hw_init_mask);
1636 IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1638 sc->sc_fh_mask = IWM_MSIX_FH_INT_CAUSES_D2S_CH0_NUM;
1643 iwm_restore_interrupts(struct iwm_softc *sc)
1645 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1649 iwm_disable_interrupts(struct iwm_softc *sc)
1651 if (!sc->sc_msix) {
1652 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1655 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1656 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1658 IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1659 sc->sc_fh_init_mask);
1660 IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
1661 sc->sc_hw_init_mask);
1666 iwm_ict_reset(struct iwm_softc *sc)
1668 iwm_disable_interrupts(sc);
1670 memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1671 sc->ict_cur = 0;
1674 IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1678 | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1681 sc->sc_flags |= IWM_FLAG_USE_ICT;
1683 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1684 iwm_enable_interrupts(sc);
1689 iwm_set_hw_ready(struct iwm_softc *sc)
1693 IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1696 ready = iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
1701 IWM_SETBITS(sc, IWM_CSR_MBOX_SET_REG,
1709 iwm_prepare_card_hw(struct iwm_softc *sc)
1714 if (iwm_set_hw_ready(sc))
1717 IWM_SETBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,
1723 IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1727 if (iwm_set_hw_ready(sc))
1739 iwm_apm_config(struct iwm_softc *sc)
1751 lctl = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1752 sc->sc_cap_off + PCI_PCIE_LCSR);
1754 IWM_SETBITS(sc, IWM_CSR_GIO_REG,
1757 IWM_CLRBITS(sc, IWM_CSR_GIO_REG,
1761 cap = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1762 sc->sc_cap_off + PCI_PCIE_DCSR2);
1763 sc->sc_ltr_enabled = (cap & PCI_PCIE_DCSR2_LTREN) ? 1 : 0;
1765 DEVNAME(sc),
1767 sc->sc_ltr_enabled ? "En" : "Dis"));
1776 iwm_apm_init(struct iwm_softc *sc)
1781 if (sc->sc_device_family < IWM_DEVICE_FAMILY_8000)
1782 IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1789 IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1793 IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL);
1799 IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1802 iwm_apm_config(sc);
1815 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1822 if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1826 DEVNAME(sc));
1831 if (sc->host_interrupt_operation_mode) {
1846 if (iwm_nic_lock(sc)) {
1847 iwm_read_prph(sc, IWM_OSC_CLK);
1848 iwm_read_prph(sc, IWM_OSC_CLK);
1849 iwm_nic_unlock(sc);
1851 err = iwm_set_bits_prph(sc, IWM_OSC_CLK,
1855 if (iwm_nic_lock(sc)) {
1856 iwm_read_prph(sc, IWM_OSC_CLK);
1857 iwm_read_prph(sc, IWM_OSC_CLK);
1858 iwm_nic_unlock(sc);
1869 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1870 if (iwm_nic_lock(sc)) {
1871 iwm_write_prph(sc, IWM_APMG_CLK_EN_REG,
1873 iwm_nic_unlock(sc);
1878 err = iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1884 if (iwm_nic_lock(sc)) {
1885 iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
1887 iwm_nic_unlock(sc);
1892 printf("%s: apm init error %d\n", DEVNAME(sc), err);
1897 iwm_apm_stop(struct iwm_softc *sc)
1899 IWM_SETBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,
1901 IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1905 IWM_CLRBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,
1910 IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER);
1912 if (!iwm_poll_bit(sc, IWM_CSR_RESET,
1915 printf("%s: timeout waiting for master\n", DEVNAME(sc));
1921 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1926 iwm_init_msix_hw(struct iwm_softc *sc)
1928 iwm_conf_msix_hw(sc, 0);
1930 if (!sc->sc_msix)
1933 sc->sc_fh_init_mask = ~IWM_READ(sc, IWM_CSR_MSIX_FH_INT_MASK_AD);
1934 sc->sc_fh_mask = sc->sc_fh_init_mask;
1935 sc->sc_hw_init_mask = ~IWM_READ(sc, IWM_CSR_MSIX_HW_INT_MASK_AD);
1936 sc->sc_hw_mask = sc->sc_hw_init_mask;
1940 iwm_conf_msix_hw(struct iwm_softc *sc, int stopped)
1944 if (!sc->sc_msix) {
1946 if (sc->sc_mqrx_supported && !stopped && iwm_nic_lock(sc)) {
1947 iwm_write_prph(sc, IWM_UREG_CHICK,
1949 iwm_nic_unlock(sc);
1954 if (!stopped && iwm_nic_lock(sc)) {
1955 iwm_write_prph(sc, IWM_UREG_CHICK, IWM_UREG_CHICK_MSIX_ENABLE);
1956 iwm_nic_unlock(sc);
1960 IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_MASK_AD, ~0);
1961 IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_MASK_AD, ~0);
1964 IWM_WRITE_1(sc, IWM_CSR_MSIX_RX_IVAR(0),
1967 IWM_WRITE_1(sc, IWM_CSR_MSIX_RX_IVAR(1),
1971 IWM_CLRBITS(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
1975 IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_D2S_CH0_NUM),
1977 IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_D2S_CH1_NUM),
1979 IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_S2D),
1981 IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_FH_ERR),
1983 IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_ALIVE),
1985 IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_WAKEUP),
1987 IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_IML),
1989 IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_CT_KILL),
1991 IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_RF_KILL),
1993 IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_PERIODIC),
1995 IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_SW_ERR),
1997 IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_SCD),
1999 IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_FH_TX),
2001 IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_HW_ERR),
2003 IWM_WRITE_1(sc, IWM_CSR_MSIX_IVAR(IWM_MSIX_IVAR_CAUSE_REG_HAP),
2007 IWM_CLRBITS(sc, IWM_CSR_MSIX_FH_INT_MASK_AD,
2012 IWM_CLRBITS(sc, IWM_CSR_MSIX_HW_INT_MASK_AD,
2027 iwm_clear_persistence_bit(struct iwm_softc *sc)
2031 hpm = iwm_read_prph_unlocked(sc, IWM_HPM_DEBUG);
2033 wprot = iwm_read_prph_unlocked(sc, IWM_PREG_PRPH_WPROT_9000);
2036 DEVNAME(sc));
2039 iwm_write_prph_unlocked(sc, IWM_HPM_DEBUG,
2047 iwm_start_hw(struct iwm_softc *sc)
2051 err = iwm_prepare_card_hw(sc);
2055 if (sc->sc_device_family == IWM_DEVICE_FAMILY_9000) {
2056 err = iwm_clear_persistence_bit(sc);
2062 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
2065 err = iwm_apm_init(sc);
2069 iwm_init_msix_hw(sc);
2071 iwm_enable_rfkill_int(sc);
2072 iwm_check_rfkill(sc);
2079 iwm_stop_device(struct iwm_softc *sc)
2084 iwm_disable_interrupts(sc);
2085 sc->sc_flags &= ~IWM_FLAG_USE_ICT;
2088 if (iwm_nic_lock(sc)) {
2090 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
2093 IWM_WRITE(sc,
2098 r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
2105 iwm_nic_unlock(sc);
2107 iwm_disable_rx_dma(sc);
2109 iwm_reset_rx_ring(sc, &sc->rxq);
2111 for (qid = 0; qid < nitems(sc->txq); qid++)
2112 iwm_reset_tx_ring(sc, &sc->txq[qid]);
2114 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2115 if (iwm_nic_lock(sc)) {
2117 iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
2119 iwm_nic_unlock(sc);
2125 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
2127 if (sc->sc_nic_locks > 0)
2129 DEVNAME(sc), sc->sc_nic_locks);
2130 sc->sc_nic_locks = 0;
2133 iwm_apm_stop(sc);
2136 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
2146 iwm_conf_msix_hw(sc, 1);
2152 iwm_disable_interrupts(sc);
2155 iwm_enable_rfkill_int(sc);
2156 iwm_check_rfkill(sc);
2158 iwm_prepare_card_hw(sc);
2162 iwm_nic_config(struct iwm_softc *sc)
2167 radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
2169 radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
2171 radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
2174 reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
2176 reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
2192 val = IWM_READ(sc, IWM_CSR_HW_IF_CONFIG_REG);
2195 IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, val);
2202 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2203 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
2209 iwm_nic_rx_init(struct iwm_softc *sc)
2211 if (sc->sc_mqrx_supported)
2212 return iwm_nic_rx_mq_init(sc);
2214 return iwm_nic_rx_legacy_init(sc);
2218 iwm_nic_rx_mq_init(struct iwm_softc *sc)
2222 if (!iwm_nic_lock(sc))
2226 iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG, 0);
2228 iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, 0);
2230 iwm_write_prph64(sc, IWM_RFH_Q0_FRBDCB_BA_LSB,
2231 sc->rxq.free_desc_dma.paddr);
2232 iwm_write_prph64(sc, IWM_RFH_Q0_URBDCB_BA_LSB,
2233 sc->rxq.used_desc_dma.paddr);
2234 iwm_write_prph64(sc, IWM_RFH_Q0_URBD_STTS_WPTR_LSB,
2235 sc->rxq.stat_dma.paddr);
2236 iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_WIDX, 0);
2237 iwm_write_prph(sc, IWM_RFH_Q0_FRBDCB_RIDX, 0);
2238 iwm_write_prph(sc, IWM_RFH_Q0_URBDCB_WIDX, 0);
2244 iwm_write_prph(sc, IWM_RFH_RXF_DMA_CFG,
2252 iwm_write_prph(sc, IWM_RFH_GEN_CFG,
2255 (sc->sc_integrated ? IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
2259 iwm_write_prph(sc, IWM_RFH_RXF_RXQ_ACTIVE, enabled);
2261 iwm_nic_unlock(sc);
2263 IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
2265 IWM_WRITE(sc, IWM_RFH_Q0_FRBDCB_WIDX_TRG, 8);
2271 iwm_nic_rx_legacy_init(struct iwm_softc *sc)
2273 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
2275 iwm_disable_rx_dma(sc);
2277 if (!iwm_nic_lock(sc))
2281 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
2282 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
2283 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
2284 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
2287 IWM_WRITE(sc,
2288 IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.free_desc_dma.paddr >> 8);
2291 IWM_WRITE(sc,
2292 IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
2295 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
2303 IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
2306 if (sc->host_interrupt_operation_mode)
2307 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
2309 iwm_nic_unlock(sc);
2315 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
2321 iwm_nic_tx_init(struct iwm_softc *sc)
2325 if (!iwm_nic_lock(sc))
2329 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
2332 IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
2334 for (qid = 0; qid < nitems(sc->txq); qid++) {
2335 struct iwm_tx_ring *txq = &sc->txq[qid];
2338 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
2342 err = iwm_set_bits_prph(sc, IWM_SCD_GP_CTRL,
2346 iwm_nic_unlock(sc);
2352 iwm_nic_init(struct iwm_softc *sc)
2356 iwm_apm_init(sc);
2357 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2358 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
2362 iwm_nic_config(sc);
2364 err = iwm_nic_rx_init(sc);
2368 err = iwm_nic_tx_init(sc);
2372 IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
2398 iwm_enable_ac_txq(struct iwm_softc *sc, int qid, int fifo)
2401 iwm_nic_assert_locked(sc);
2403 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
2405 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
2409 err = iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
2414 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
2416 iwm_write_mem32(sc,
2417 sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
2420 iwm_write_mem32(sc,
2421 sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
2429 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
2435 if (qid == sc->cmdqid)
2436 iwm_write_prph(sc, IWM_SCD_EN_CTRL,
2437 iwm_read_prph(sc, IWM_SCD_EN_CTRL) | (1 << qid));
2443 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo,
2446 struct iwm_tx_ring *ring = &sc->txq[qid];
2450 iwm_nic_assert_locked(sc);
2459 scd_bug = !sc->sc_mqrx_supported &&
2466 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | idx);
2480 err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, 0,
2485 sc->qenablemsk |= (1 << qid);
2490 iwm_disable_txq(struct iwm_softc *sc, int sta_id, int qid, uint8_t tid)
2501 err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
2505 sc->qenablemsk &= ~(1 << qid);
2510 iwm_post_alive(struct iwm_softc *sc)
2516 if (!iwm_nic_lock(sc))
2519 base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
2521 iwm_ict_reset(sc);
2523 iwm_nic_unlock(sc);
2529 err = iwm_write_mem(sc,
2530 sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
2535 if (!iwm_nic_lock(sc))
2539 iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
2541 iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
2544 err = iwm_enable_ac_txq(sc, sc->cmdqid, IWM_TX_FIFO_CMD);
2546 iwm_nic_unlock(sc);
2551 iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
2555 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
2560 IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
2563 iwm_nic_unlock(sc);
2566 if (sc->sc_device_family < IWM_DEVICE_FAMILY_8000) {
2567 err = iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
2575 iwm_phy_db_get_section(struct iwm_softc *sc, uint16_t type, uint16_t chg_id)
2577 struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2602 iwm_phy_db_set_section(struct iwm_softc *sc,
2614 entry = iwm_phy_db_get_section(sc, type, chg_id);
2674 iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
2676 struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2699 iwm_phy_db_get_section_data(struct iwm_softc *sc, uint32_t type, uint8_t **data,
2708 ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
2710 entry = iwm_phy_db_get_section(sc, type, ch_group_id);
2721 iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type, uint16_t length,
2738 return iwm_send_cmd(sc, &cmd);
2742 iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc, uint16_t type,
2750 entry = iwm_phy_db_get_section(sc, type, i);
2757 err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
2768 iwm_send_phy_db_data(struct iwm_softc *sc)
2774 err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG, &data, &size, 0);
2778 err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG, size, data);
2782 err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH,
2787 err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH, size, data);
2791 err = iwm_phy_db_send_all_channel_groups(sc,
2796 err = iwm_phy_db_send_all_channel_groups(sc,
2812 iwm_send_time_event_cmd(struct iwm_softc *sc,
2827 err = iwm_send_cmd(sc, &hcmd);
2845 sc->sc_time_event_uid = le32toh(resp->unique_id);
2849 iwm_free_resp(sc, &hcmd);
2854 iwm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
2860 if (sc->sc_flags & IWM_FLAG_TE_ACTIVE)
2883 if (iwm_send_time_event_cmd(sc, &time_cmd) == 0)
2884 sc->sc_flags |= IWM_FLAG_TE_ACTIVE;
2890 iwm_unprotect_session(struct iwm_softc *sc, struct iwm_node *in)
2895 if ((sc->sc_flags & IWM_FLAG_TE_ACTIVE) == 0)
2903 time_cmd.id = htole32(sc->sc_time_event_uid);
2905 if (iwm_send_time_event_cmd(sc, &time_cmd) == 0)
2906 sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
2935 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section, uint16_t offset,
2959 err = iwm_send_cmd(sc, &cmd);
2997 iwm_free_resp(sc, &cmd);
3009 iwm_nvm_read_section(struct iwm_softc *sc, uint16_t section, uint8_t *data,
3020 err = iwm_nvm_read_chunk(sc,
3032 iwm_fw_valid_tx_ant(struct iwm_softc *sc)
3036 tx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN)
3039 if (sc->sc_nvm.valid_tx_ant)
3040 tx_ant &= sc->sc_nvm.valid_tx_ant;
3046 iwm_fw_valid_rx_ant(struct iwm_softc *sc)
3050 rx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN)
3053 if (sc->sc_nvm.valid_rx_ant)
3054 rx_ant &= sc->sc_nvm.valid_rx_ant;
3060 iwm_valid_siso_ant_rate_mask(struct iwm_softc *sc)
3062 uint8_t valid_tx_ant = iwm_fw_valid_tx_ant(sc);
3069 if (sc->sc_device_family == IWM_DEVICE_FAMILY_9000 &&
3077 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags,
3080 struct ieee80211com *ic = &sc->sc_ic;
3081 struct iwm_nvm_data *data = &sc->sc_nvm;
3134 iwm_mimo_enabled(struct iwm_softc *sc)
3136 struct ieee80211com *ic = &sc->sc_ic;
3138 return !sc->sc_nvm.sku_cap_mimo_disable &&
3143 iwm_setup_ht_rates(struct iwm_softc *sc)
3145 struct ieee80211com *ic = &sc->sc_ic;
3154 if (!iwm_mimo_enabled(sc))
3157 rx_ant = iwm_fw_valid_rx_ant(sc);
3164 iwm_setup_vht_rates(struct iwm_softc *sc)
3166 struct ieee80211com *ic = &sc->sc_ic;
3167 uint8_t rx_ant = iwm_fw_valid_rx_ant(sc);
3173 if (iwm_mimo_enabled(sc) &&
3208 iwm_clear_reorder_buffer(struct iwm_softc *sc, struct iwm_rxba_data *rxba)
3233 struct iwm_softc *sc = rxba->sc;
3234 struct ieee80211com *ic = &sc->sc_ic;
3240 if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0 &&
3264 struct iwm_softc *sc = rxba->sc;
3265 struct ieee80211com *ic = &sc->sc_ic;
3303 iwm_release_frames(sc, ni, rxba, buf, sn, &ml);
3304 if_input(&sc->sc_ic.ic_if, &ml);
3322 iwm_sta_rx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid,
3325 struct ieee80211com *ic = &sc->sc_ic;
3336 if (start && sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS) {
3360 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
3364 err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize, &cmd,
3375 if (sc->sc_mqrx_supported) {
3386 baid >= nitems(sc->sc_rxba_data)) {
3391 rxba = &sc->sc_rxba_data[baid];
3414 for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
3415 rxba = &sc->sc_rxba_data[i];
3421 iwm_clear_reorder_buffer(sc, rxba);
3428 sc->sc_rx_ba_sessions++;
3430 } else if (sc->sc_rx_ba_sessions > 0)
3431 sc->sc_rx_ba_sessions--;
3440 struct iwm_softc *sc = arg;
3441 struct ieee80211com *ic = &sc->sc_ic;
3445 if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) ||
3447 refcnt_rele_wake(&sc->task_refs);
3452 err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
3454 printf("%s: failed to update MAC\n", DEVNAME(sc));
3456 iwm_unprotect_session(sc, in);
3458 refcnt_rele_wake(&sc->task_refs);
3465 struct iwm_softc *sc = ic->ic_softc;
3468 !task_pending(&sc->newstate_task))
3469 iwm_add_task(sc, systq, &sc->mac_ctxt_task);
3475 struct iwm_softc *sc = ic->ic_softc;
3478 !task_pending(&sc->newstate_task))
3479 iwm_add_task(sc, systq, &sc->mac_ctxt_task);
3485 struct iwm_softc *sc = ic->ic_softc;
3488 !task_pending(&sc->newstate_task))
3489 iwm_add_task(sc, systq, &sc->mac_ctxt_task);
3495 struct iwm_softc *sc = arg;
3496 struct ieee80211com *ic = &sc->sc_ic;
3502 if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) ||
3505 refcnt_rele_wake(&sc->task_refs);
3510 chains = iwm_mimo_enabled(sc) ? 2 : 1;
3525 err = iwm_phy_ctxt_update(sc, in->in_phyctxt,
3529 printf("%s: failed to update PHY\n", DEVNAME(sc));
3533 refcnt_rele_wake(&sc->task_refs);
3540 struct iwm_softc *sc = ic->ic_softc;
3543 !task_pending(&sc->newstate_task))
3544 iwm_add_task(sc, systq, &sc->phy_ctxt_task);
3550 struct iwm_softc *sc = ic->ic_softc;
3553 !task_pending(&sc->newstate_task))
3554 iwm_add_task(sc, systq, &sc->mac_ctxt_task);
3558 iwm_sta_tx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid,
3562 struct ieee80211com *ic = &sc->sc_ic;
3577 if ((sc->tx_ba_queue_mask & (1 << qid)) != 0)
3580 if ((sc->tx_ba_queue_mask & (1 << qid)) == 0)
3584 ring = &sc->txq[qid];
3605 err = iwm_flush_sta(sc, in);
3615 if (start && (sc->qenablemsk & (1 << qid)) == 0) {
3616 if (!iwm_nic_lock(sc)) {
3622 err = iwm_enable_txq(sc, IWM_STATION_ID, qid, fifo, 1, tid,
3624 iwm_nic_unlock(sc);
3627 DEVNAME(sc), qid, err);
3645 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
3651 err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize, &cmd, &status);
3656 DEVNAME(sc), err);
3664 sc->tx_ba_queue_mask |= (1 << qid);
3667 sc->tx_ba_queue_mask &= ~(1 << qid);
3673 iwm_txq_advance(sc, ring, ring->cur);
3674 iwm_clear_oactive(sc, ring);
3683 struct iwm_softc *sc = arg;
3684 struct ieee80211com *ic = &sc->sc_ic;
3689 if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) ||
3691 refcnt_rele_wake(&sc->task_refs);
3697 if (sc->sc_flags & IWM_FLAG_SHUTDOWN)
3699 if (sc->ba_rx.start_tidmask & (1 << tid)) {
3701 err = iwm_sta_rx_agg(sc, ni, tid, ba->ba_winstart,
3703 sc->ba_rx.start_tidmask &= ~(1 << tid);
3704 } else if (sc->ba_rx.stop_tidmask & (1 << tid)) {
3705 err = iwm_sta_rx_agg(sc, ni, tid, 0, 0, 0, 0);
3706 sc->ba_rx.stop_tidmask &= ~(1 << tid);
3711 if (sc->sc_flags & IWM_FLAG_SHUTDOWN)
3713 if (sc->ba_tx.start_tidmask & (1 << tid)) {
3715 err = iwm_sta_tx_agg(sc, ni, tid, ba->ba_winstart,
3717 sc->ba_tx.start_tidmask &= ~(1 << tid);
3718 } else if (sc->ba_tx.stop_tidmask & (1 << tid)) {
3719 err = iwm_sta_tx_agg(sc, ni, tid, 0, 0, 0);
3720 sc->ba_tx.stop_tidmask &= ~(1 << tid);
3728 if (err && (sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
3729 task_add(systq, &sc->init_task);
3731 refcnt_rele_wake(&sc->task_refs);
3743 struct iwm_softc *sc = IC2IFP(ic)->if_softc;
3745 if (sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS ||
3749 if (sc->ba_rx.start_tidmask & (1 << tid))
3752 sc->ba_rx.start_tidmask |= (1 << tid);
3753 iwm_add_task(sc, systq, &sc->ba_task);
3766 struct iwm_softc *sc = IC2IFP(ic)->if_softc;
3768 if (tid > IWM_MAX_TID_COUNT || sc->ba_rx.stop_tidmask & (1 << tid))
3771 sc->ba_rx.stop_tidmask |= (1 << tid);
3772 iwm_add_task(sc, systq, &sc->ba_task);
3779 struct iwm_softc *sc = IC2IFP(ic)->if_softc;
3784 if (!isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
3796 if ((sc->tx_ba_queue_mask & (1 << qid)) != 0)
3800 if (sc->ba_tx.start_tidmask & (1 << tid))
3803 sc->ba_tx.start_tidmask |= (1 << tid);
3804 iwm_add_task(sc, systq, &sc->ba_task);
3813 struct iwm_softc *sc = IC2IFP(ic)->if_softc;
3816 if (tid > IWM_MAX_TID_COUNT || sc->ba_tx.stop_tidmask & (1 << tid))
3820 if ((sc->tx_ba_queue_mask & (1 << qid)) == 0)
3823 sc->ba_tx.stop_tidmask |= (1 << tid);
3824 iwm_add_task(sc, systq, &sc->ba_task);
3828 iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
3864 if (!iwm_nic_lock(sc))
3866 mac_addr0 = htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
3867 mac_addr1 = htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
3868 iwm_nic_unlock(sc);
3883 printf("%s: mac address not found\n", DEVNAME(sc));
3888 iwm_parse_nvm_data(struct iwm_softc *sc, const uint16_t *nvm_hw,
3893 struct iwm_nvm_data *data = &sc->sc_nvm;
3900 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3927 if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000) {
3941 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3950 iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
3952 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3953 if (sc->nvm_type == IWM_NVM_SDP) {
3954 iwm_init_channel_map(sc, regulatory, iwm_nvm_channels,
3957 iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS],
3961 iwm_init_channel_map(sc, &regulatory[IWM_NVM_CHANNELS_8000],
3975 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
3982 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3990 if (sc->nvm_type == IWM_NVM_SDP) {
3998 } else if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000) {
4026 panic("unknown device family %d", sc->sc_device_family);
4034 return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
4039 iwm_nvm_init(struct iwm_softc *sc)
4045 const size_t bufsz = sc->sc_nvm_max_section_size;
4057 err = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
4072 err = iwm_parse_nvm_sections(sc, nvm_sections);
4084 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
4100 err = iwm_firmware_load_chunk(sc, addr, data, len);
4109 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
4112 struct iwm_dma_info *dma = &sc->fw_dma;
4117 bus_dmamap_sync(sc->sc_dmat,
4122 err = iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
4128 sc->sc_fw_chunk_done = 0;
4130 if (!iwm_nic_lock(sc))
4133 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
4135 IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
4137 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
4139 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
4142 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
4146 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
4151 iwm_nic_unlock(sc);
4155 while (!sc->sc_fw_chunk_done) {
4156 err = tsleep_nsec(&sc->sc_fw, 0, "iwmfw", SEC_TO_NSEC(1));
4161 if (!sc->sc_fw_chunk_done)
4163 DEVNAME(sc), dst_addr, byte_cnt);
4167 int err2 = iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
4177 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
4185 fws = &sc->sc_fw.fw_sects[ucode_type];
4190 if (dlen > sc->sc_fwdmasegsz) {
4193 err = iwm_firmware_load_sect(sc, offset, data, dlen);
4196 DEVNAME(sc), i, fws->fw_count);
4201 iwm_enable_interrupts(sc);
4203 IWM_WRITE(sc, IWM_CSR_RESET, 0);
4209 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
4243 if (dlen > sc->sc_fwdmasegsz) {
4246 err = iwm_firmware_load_sect(sc, offset, data, dlen);
4249 "(error %d)\n", DEVNAME(sc), i, err);
4254 if (iwm_nic_lock(sc)) {
4255 val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
4257 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
4259 iwm_nic_unlock(sc);
4263 "(error %d)\n", DEVNAME(sc), i, err);
4270 if (iwm_nic_lock(sc)) {
4272 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
4274 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
4275 iwm_nic_unlock(sc);
4279 DEVNAME(sc), err);
4287 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
4293 fws = &sc->sc_fw.fw_sects[ucode_type];
4297 if (iwm_nic_lock(sc)) {
4298 iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
4300 iwm_nic_unlock(sc);
4304 err = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
4309 err = iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
4313 iwm_enable_interrupts(sc);
4318 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
4324 sc->sc_uc.uc_intr = 0;
4325 sc->sc_uc.uc_ok = 0;
4327 if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000)
4328 err = iwm_load_firmware_8000(sc, ucode_type);
4330 err = iwm_load_firmware_7000(sc, ucode_type);
4336 err = tsleep_nsec(&sc->sc_uc, 0, "iwmuc", SEC_TO_NSEC(1));
4337 if (err || !sc->sc_uc.uc_ok)
4338 printf("%s: could not load firmware\n", DEVNAME(sc));
4344 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
4348 IWM_WRITE(sc, IWM_CSR_INT, ~0);
4350 err = iwm_nic_init(sc);
4352 printf("%s: unable to init nic\n", DEVNAME(sc));
4357 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
4358 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
4362 IWM_WRITE(sc, IWM_CSR_INT, ~0);
4363 iwm_enable_fwload_interrupt(sc);
4367 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
4368 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
4370 return iwm_load_firmware(sc, ucode_type);
4374 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
4380 return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
4385 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
4388 enum iwm_ucode_type ucode_type = sc->sc_uc_current;
4390 phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config |
4391 sc->sc_extra_phy_config);
4393 sc->sc_default_calib[ucode_type].event_trigger;
4395 sc->sc_default_calib[ucode_type].flow_trigger;
4397 return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, 0,
4402 iwm_send_dqa_cmd(struct iwm_softc *sc)
4410 return iwm_send_cmd_pdu(sc, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
4414 iwm_load_ucode_wait_alive(struct iwm_softc *sc,
4417 enum iwm_ucode_type old_type = sc->sc_uc_current;
4418 struct iwm_fw_sects *fw = &sc->sc_fw.fw_sects[ucode_type];
4421 err = iwm_read_firmware(sc);
4425 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
4426 sc->cmdqid = IWM_DQA_CMD_QUEUE;
4428 sc->cmdqid = IWM_CMD_QUEUE;
4430 sc->sc_uc_current = ucode_type;
4431 err = iwm_start_fw(sc, ucode_type);
4433 sc->sc_uc_current = old_type;
4437 err = iwm_post_alive(sc);
4447 err = iwm_save_fw_paging(sc, fw);
4450 DEVNAME(sc));
4454 err = iwm_send_paging_cmd(sc, fw);
4457 DEVNAME(sc));
4458 iwm_free_fw_paging(sc);
4467 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
4472 if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
4474 DEVNAME(sc));
4479 sc->sc_init_complete = 0;
4480 err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_INIT);
4482 printf("%s: failed to load init firmware\n", DEVNAME(sc));
4487 if (sc->sc_device_family < IWM_DEVICE_FAMILY_8000) {
4488 err = iwm_send_bt_init_conf(sc);
4491 DEVNAME(sc), err);
4498 err = iwm_nvm_init(sc);
4500 printf("%s: failed to read nvm\n", DEVNAME(sc));
4505 if (IEEE80211_ADDR_EQ(etheranyaddr, sc->sc_ic.ic_myaddr))
4506 IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
4507 sc->sc_nvm.hw_addr);
4513 err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
4520 err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
4530 err = iwm_send_phy_cfg_cmd(sc);
4540 while ((sc->sc_init_complete & wait_flags) != wait_flags) {
4541 err = tsleep_nsec(&sc->sc_init_complete, 0, "iwminit",
4552 iwm_config_ltr(struct iwm_softc *sc)
4558 if (!sc->sc_ltr_enabled)
4561 return iwm_send_cmd_pdu(sc, IWM_LTR_CONFIG, 0, sizeof(cmd), &cmd);
4565 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
4567 struct iwm_rx_ring *ring = &sc->rxq;
4588 bus_dmamap_unload(sc->sc_dmat, data->map);
4593 err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4603 bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
4606 if (sc->sc_mqrx_supported) {
4609 bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,
4615 bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,
4629 iwm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
4651 iwm_rxmq_get_signal_strength(struct iwm_softc *sc,
4664 iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
4669 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
4672 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
4697 iwm_ccmp_decap(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
4700 struct ieee80211com *ic = &sc->sc_ic;
4750 iwm_rx_hwdecrypt(struct iwm_softc *sc, struct mbuf *m, uint32_t rx_pkt_status,
4753 struct ieee80211com *ic = &sc->sc_ic;
4804 iwm_rx_frame(struct iwm_softc *sc, struct mbuf *m, int chanidx,
4809 struct ieee80211com *ic = &sc->sc_ic;
4820 iwm_ccmp_decap(sc, m, ni, rxi) != 0) {
4828 if (sc->sc_drvbpf != NULL) {
4829 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
4847 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
4877 bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len,
4886 iwm_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, void *pktdata,
4889 struct ieee80211com *ic = &sc->sc_ic;
4901 phy_info = &sc->sc_last_phy_info;
4939 if (iwm_rx_hwdecrypt(sc, m, rx_pkt_status, &rxi)) {
4949 rssi = iwm_get_signal_strength(sc, phy_info);
4957 iwm_rx_frame(sc, m, chanidx, rx_pkt_status,
4980 iwm_detect_duplicate(struct iwm_softc *sc, struct mbuf *m,
4983 struct ieee80211com *ic = &sc->sc_ic;
5044 iwm_release_frames(struct iwm_softc *sc, struct ieee80211_node *ni,
5076 iwm_rx_frame(sc, m, chanidx, rx_pkt_status, is_shortpre,
5102 iwm_oldsn_workaround(struct iwm_softc *sc, struct ieee80211_node *ni, int tid,
5105 struct ieee80211com *ic = &sc->sc_ic;
5155 iwm_rx_reorder(struct iwm_softc *sc, struct mbuf *m, int chanidx,
5160 struct ieee80211com *ic = &sc->sc_ic;
5202 baid >= nitems(sc->sc_rxba_data))
5205 rxba = &sc->sc_rxba_data[baid];
5232 iwm_release_frames(sc, ni, rxba, buffer, nssn, ml);
5248 iwm_release_frames(sc, ni, rxba, buffer, min_sn, ml);
5251 if (iwm_oldsn_workaround(sc, ni, tid, buffer, reorder_data,
5339 iwm_release_frames(sc, ni, rxba, buffer, nssn, ml);
5351 iwm_rx_mpdu_mq(struct iwm_softc *sc, struct mbuf *m, void *pktdata,
5354 struct ieee80211com *ic = &sc->sc_ic;
5465 if (iwm_rx_hwdecrypt(sc, m, le16toh(desc->status), &rxi)) {
5470 if (iwm_detect_duplicate(sc, m, desc, &rxi)) {
5480 rssi = iwm_rxmq_get_signal_strength(sc, desc);
5488 if (iwm_rx_reorder(sc, m, chanidx, desc,
5493 iwm_rx_frame(sc, m, chanidx, le16toh(desc->status),
5499 iwm_ra_choose(struct iwm_softc *sc, struct ieee80211_node *ni)
5501 struct ieee80211com *ic = &sc->sc_ic;
5520 iwm_ht_single_rate_control(struct iwm_softc *sc, struct ieee80211_node *ni,
5523 struct ieee80211com *ic = &sc->sc_ic;
5560 iwm_ra_choose(sc, ni);
5565 iwm_vht_single_rate_control(struct iwm_softc *sc, struct ieee80211_node *ni,
5568 struct ieee80211com *ic = &sc->sc_ic;
5624 iwm_ra_choose(sc, ni);
5629 iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
5632 struct ieee80211com *ic = &sc->sc_ic;
5675 iwm_vht_single_rate_control(sc, ni, txmcs, nss,
5681 iwm_ht_single_rate_control(sc, ni, txmcs,
5690 iwm_txd_done(struct iwm_softc *sc, struct iwm_tx_data *txd)
5692 struct ieee80211com *ic = &sc->sc_ic;
5694 bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
5696 bus_dmamap_unload(sc->sc_dmat, txd->map);
5709 iwm_txq_advance(struct iwm_softc *sc, struct iwm_tx_ring *ring, int idx)
5716 iwm_reset_sched(sc, ring->qid, ring->tail, IWM_STATION_ID);
5717 iwm_txd_done(sc, txd);
5727 iwm_ampdu_tx_done(struct iwm_softc *sc, struct iwm_cmd_header *cmd_hdr,
5732 struct ieee80211com *ic = &sc->sc_ic;
5825 iwm_vht_single_rate_control(sc, ni, txmcs, nss,
5831 iwm_ht_single_rate_control(sc, ni, txmcs,
5845 iwm_txq_advance(sc, txq, IWM_AGG_SSN_TO_TXQ_IDX(ssn));
5846 iwm_clear_oactive(sc, txq);
5850 iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
5856 struct iwm_tx_ring *ring = &sc->txq[qid];
5862 bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
5876 sc->sc_tx_timer[qid] = 0;
5887 iwm_ampdu_tx_done(sc, cmd_hdr, txd->in, ring,
5895 iwm_rx_tx_cmd_single(sc, pkt, txd->in, txd->txmcs, txd->txrate);
5896 iwm_txq_advance(sc, ring, IWM_AGG_SSN_TO_TXQ_IDX(ssn));
5897 iwm_clear_oactive(sc, ring);
5902 iwm_clear_oactive(struct iwm_softc *sc, struct iwm_tx_ring *ring)
5904 struct ieee80211com *ic = &sc->sc_ic;
5908 sc->qfullmsk &= ~(1 << ring->qid);
5909 if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
5922 iwm_ampdu_rate_control(struct iwm_softc *sc, struct ieee80211_node *ni,
5925 struct ieee80211com *ic = &sc->sc_ic;
5958 iwm_ra_choose(sc, ni);
5962 iwm_rx_compressed_ba(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
5965 struct ieee80211com *ic = &sc->sc_ic;
5991 sc->sc_tx_timer[qid] = 0;
5997 ring = &sc->txq[qid];
6022 iwm_ampdu_rate_control(sc, ni, ring, ban->tid,
6031 iwm_txq_advance(sc, ring, IWM_AGG_SSN_TO_TXQ_IDX(ssn));
6032 iwm_clear_oactive(sc, ring);
6036 iwm_rx_bmiss(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
6039 struct ieee80211com *ic = &sc->sc_ic;
6047 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
6055 DEVNAME(sc), ether_sprintf(ic->ic_bss->ni_macaddr));
6069 iwm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
6074 int i, err, active = (sc->sc_flags & IWM_FLAG_BINDING_ACTIVE);
6098 !isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_CDB_SUPPORT))
6103 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT))
6108 err = iwm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD, len, &cmd,
6117 iwm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
6129 iwm_phy_ctxt_cmd_data(struct iwm_softc *sc, struct iwm_phy_context_cmd *cmd,
6133 struct ieee80211com *ic = &sc->sc_ic;
6164 cmd->rxchain_info = htole32(iwm_fw_valid_rx_ant(sc) <<
6170 cmd->txchain_info = htole32(iwm_fw_valid_tx_ant(sc));
6206 iwm_phy_ctxt_cmd_uhb(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
6210 struct ieee80211com *ic = &sc->sc_ic;
6247 cmd.rxchain_info = htole32(iwm_fw_valid_rx_ant(sc) <<
6252 cmd.txchain_info = htole32(iwm_fw_valid_tx_ant(sc));
6254 return iwm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
6258 iwm_phy_ctxt_cmd(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
6271 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS))
6272 return iwm_phy_ctxt_cmd_uhb(sc, ctxt, chains_static,
6275 iwm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
6277 iwm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
6280 return iwm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, 0,
6285 iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
6287 struct iwm_tx_ring *ring = &sc->txq[sc->cmdqid];
6298 int generation = sc->sc_generation;
6315 if (sc->sc_cmd_resp_pkt[idx] != NULL)
6321 sc->sc_cmd_resp_pkt[idx] = resp_buf;
6322 sc->sc_cmd_resp_len[idx] = hcmd->resp_pkt_len;
6324 sc->sc_cmd_resp_pkt[idx] = NULL;
6346 DEVNAME(sc), totlen);
6353 DEVNAME(sc), totlen);
6358 err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,
6362 DEVNAME(sc), totlen);
6405 bus_dmamap_sync(sc->sc_dmat, txdata->map, 0,
6408 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
6412 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
6421 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
6422 if (ring->queued == 0 && !iwm_nic_lock(sc)) {
6428 iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
6433 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
6439 if (generation != sc->sc_generation) {
6445 hcmd->resp_pkt = (void *)sc->sc_cmd_resp_pkt[idx];
6446 sc->sc_cmd_resp_pkt[idx] = NULL;
6447 } else if (generation == sc->sc_generation) {
6448 free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
6449 sc->sc_cmd_resp_len[idx]);
6450 sc->sc_cmd_resp_pkt[idx] = NULL;
6460 iwm_send_cmd_pdu(struct iwm_softc *sc, uint32_t id, uint32_t flags,
6470 return iwm_send_cmd(sc, &cmd);
6474 iwm_send_cmd_status(struct iwm_softc *sc, struct iwm_host_cmd *cmd,
6485 err = iwm_send_cmd(sc, cmd);
6495 iwm_free_resp(sc, cmd);
6501 iwm_free_resp(sc, cmd);
6506 iwm_send_cmd_pdu_status(struct iwm_softc *sc, uint32_t id, uint16_t len,
6515 return iwm_send_cmd_status(sc, &cmd, status);
6519 iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
6527 iwm_cmd_done(struct iwm_softc *sc, int qid, int idx, int code)
6529 struct iwm_tx_ring *ring = &sc->txq[sc->cmdqid];
6532 if (qid != sc->cmdqid) {
6539 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
6541 bus_dmamap_unload(sc->sc_dmat, data->map);
6549 DEVNAME(sc), code));
6555 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
6556 iwm_nic_unlock(sc);
6561 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
6567 scd_bc_tbl = sc->sched_dma.vaddr;
6570 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
6575 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
6576 0, sc->sched_dma.size, BUS_DMASYNC_PREWRITE);
6582 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
6583 0, sc->sched_dma.size, BUS_DMASYNC_POSTWRITE);
6587 iwm_reset_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id)
6592 scd_bc_tbl = sc->sched_dma.vaddr;
6596 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
6597 0, sc->sched_dma.size, BUS_DMASYNC_PREWRITE);
6604 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
6605 0, sc->sched_dma.size, BUS_DMASYNC_POSTWRITE);
6614 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
6617 struct ieee80211com *ic = &sc->sc_ic;
6637 ridx = sc->sc_fixed_ridx;
6639 ridx = sc->sc_fixed_ridx;
6664 rate_flags = iwm_valid_siso_ant_rate_mask(sc);
6715 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
6717 struct ieee80211com *ic = &sc->sc_ic;
6758 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
6762 } else if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
6778 (sc->tx_ba_queue_mask & (1 << agg_qid)) &&
6786 ring = &sc->txq[qid];
6800 rate = iwm_tx_fill_cmd(sc, in, wh, tx);
6803 if (sc->sc_drvbpf != NULL) {
6804 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
6828 bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_txtap_len,
6940 err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
6943 printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc), err);
6952 device_printf(sc->sc_dev,
6965 err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
6968 printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc),
7001 bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
7003 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
7006 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
7010 iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, totlen);
7014 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
7018 sc->qfullmsk |= 1 << ring->qid;
7022 sc->sc_tx_timer[ring->qid] = 15;
7028 iwm_flush_tx_path(struct iwm_softc *sc, int tfd_queue_msk)
7036 err = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, 0,
7039 printf("%s: Flushing tx queue failed: %d\n", DEVNAME(sc), err);
7046 iwm_wait_tx_queues_empty(struct iwm_softc *sc)
7051 struct iwm_tx_ring *ring = &sc->txq[i];
7053 if (i == sc->cmdqid)
7068 iwm_led_enable(struct iwm_softc *sc)
7070 IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_ON);
7074 iwm_led_disable(struct iwm_softc *sc)
7076 IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_OFF);
7080 iwm_led_is_enabled(struct iwm_softc *sc)
7082 return (IWM_READ(sc, IWM_CSR_LED_REG) == IWM_CSR_LED_REG_TURN_ON);
7090 struct iwm_softc *sc = arg;
7092 if (iwm_led_is_enabled(sc))
7093 iwm_led_disable(sc);
7095 iwm_led_enable(sc);
7097 timeout_add_msec(&sc->sc_led_blink_to, IWM_LED_BLINK_TIMEOUT_MSEC);
7101 iwm_led_blink_start(struct iwm_softc *sc)
7103 timeout_add_msec(&sc->sc_led_blink_to, IWM_LED_BLINK_TIMEOUT_MSEC);
7104 iwm_led_enable(sc);
7108 iwm_led_blink_stop(struct iwm_softc *sc)
7110 timeout_del(&sc->sc_led_blink_to);
7111 iwm_led_disable(sc);
7117 iwm_beacon_filter_send_cmd(struct iwm_softc *sc,
7120 return iwm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD,
7125 iwm_beacon_filter_set_cqm_params(struct iwm_softc *sc, struct iwm_node *in,
7128 cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled);
7132 iwm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in, int enable)
7140 if (!sc->sc_bf.bf_enabled)
7143 sc->sc_bf.ba_enabled = enable;
7144 iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
7145 return iwm_beacon_filter_send_cmd(sc, &cmd);
7149 iwm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
7152 struct ieee80211com *ic = &sc->sc_ic;
7179 iwm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
7187 iwm_power_build_cmd(sc, in, &cmd);
7189 err = iwm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE, 0,
7196 return iwm_update_beacon_abort(sc, in, ba_enable);
7200 iwm_power_update_device(struct iwm_softc *sc)
7203 struct ieee80211com *ic = &sc->sc_ic;
7208 return iwm_send_cmd_pdu(sc,
7213 iwm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
7221 iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
7222 err = iwm_beacon_filter_send_cmd(sc, &cmd);
7225 sc->sc_bf.bf_enabled = 1;
7231 iwm_disable_beacon_filter(struct iwm_softc *sc)
7238 err = iwm_beacon_filter_send_cmd(sc, &cmd);
7240 sc->sc_bf.bf_enabled = 0;
7246 iwm_add_sta_cmd(struct iwm_softc *sc, struct iwm_node *in, int update)
7254 struct ieee80211com *ic = &sc->sc_ic;
7256 if (!update && (sc->sc_flags & IWM_FLAG_STA_ACTIVE))
7265 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE)) {
7276 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
7285 if (isset(sc->sc_enabled_capa,
7314 if (iwm_mimo_enabled(sc)) {
7383 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
7387 err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize,
7396 iwm_add_aux_sta(struct iwm_softc *sc)
7403 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT)) {
7405 err = iwm_enable_txq(sc, IWM_AUX_STA_ID, qid,
7409 err = iwm_enable_ac_txq(sc, qid, IWM_TX_FIFO_MCAST);
7416 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
7424 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
7428 err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize, &cmd,
7437 iwm_drain_sta(struct iwm_softc *sc, struct iwm_node* in, int drain)
7452 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
7458 err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA,
7462 DEVNAME(sc), err);
7472 DEVNAME(sc), drain ? "enable" : "disable");
7480 iwm_flush_sta(struct iwm_softc *sc, struct iwm_node *in)
7484 sc->sc_flags |= IWM_FLAG_TXFLUSH;
7486 err = iwm_drain_sta(sc, in, 1);
7490 err = iwm_flush_tx_path(sc, in->tfd_queue_msk);
7493 DEVNAME(sc), err);
7502 err = iwm_wait_tx_queues_empty(sc);
7506 err = iwm_drain_sta(sc, in, 0);
7508 sc->sc_flags &= ~IWM_FLAG_TXFLUSH;
7513 iwm_rm_sta_cmd(struct iwm_softc *sc, struct iwm_node *in)
7515 struct ieee80211com *ic = &sc->sc_ic;
7519 if ((sc->sc_flags & IWM_FLAG_STA_ACTIVE) == 0)
7528 err = iwm_send_cmd_pdu(sc, IWM_REMOVE_STA, 0, sizeof(rm_sta_cmd),
7535 iwm_scan_rx_chain(struct iwm_softc *sc)
7540 rx_ant = iwm_fw_valid_rx_ant(sc);
7549 iwm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
7554 for (i = 0, ind = sc->sc_scan_last_antenna;
7557 if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
7558 sc->sc_scan_last_antenna = ind;
7562 tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS;
7572 iwm_lmac_scan_fill_channels(struct iwm_softc *sc,
7575 struct ieee80211com *ic = &sc->sc_ic;
7581 nchan < sc->sc_capa_n_scan_channels;
7600 iwm_umac_scan_fill_channels(struct iwm_softc *sc,
7603 struct ieee80211com *ic = &sc->sc_ic;
7609 nchan < sc->sc_capa_n_scan_channels;
7627 iwm_fill_probe_req_v1(struct iwm_softc *sc, struct iwm_scan_probe_req_v1 *preq1)
7632 err = iwm_fill_probe_req(sc, &preq2);
7645 iwm_fill_probe_req(struct iwm_softc *sc, struct iwm_scan_probe_req *preq)
7647 struct ieee80211com *ic = &sc->sc_ic;
7696 if (isset(sc->sc_enabled_capa,
7707 if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
7748 iwm_lmac_scan(struct iwm_softc *sc, int bgscan)
7750 struct ieee80211com *ic = &sc->sc_ic;
7764 sc->sc_capa_n_scan_channels) + sizeof(struct iwm_scan_probe_req_v1);
7789 req->rx_chain_select = iwm_scan_rx_chain(sc);
7801 if (isset(sc->sc_enabled_capa,
7803 isset(sc->sc_enabled_capa,
7808 if (sc->sc_nvm.sku_cap_band_52GHz_enable)
7817 iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_2GHZ, 1/*XXX*/);
7824 iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_5GHZ, 1/*XXX*/);
7835 req->n_channels = iwm_lmac_scan_fill_channels(sc,
7841 sc->sc_capa_n_scan_channels));
7842 err = iwm_fill_probe_req_v1(sc, preq);
7856 err = iwm_send_cmd(sc, &hcmd);
7862 iwm_config_umac_scan(struct iwm_softc *sc)
7864 struct ieee80211com *ic = &sc->sc_ic;
7881 cmd_size = sizeof(*scan_config) + sc->sc_capa_n_scan_channels;
7887 scan_config->tx_chains = htole32(iwm_fw_valid_tx_ant(sc));
7888 scan_config->rx_chains = htole32(iwm_fw_valid_rx_ant(sc));
7900 IEEE80211_ADDR_COPY(scan_config->mac_addr, sc->sc_ic.ic_myaddr);
7907 nchan < sc->sc_capa_n_scan_channels; c++) {
7929 err = iwm_send_cmd(sc, &hcmd);
7935 iwm_umac_scan_size(struct iwm_softc *sc)
7940 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
7942 else if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL))
7944 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_SCAN_EXT_CHAN_VER))
7950 sc->sc_capa_n_scan_channels + tail_size;
7954 iwm_get_scan_req_umac_chan_param(struct iwm_softc *sc,
7957 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
7960 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL))
7967 iwm_get_scan_req_umac_data(struct iwm_softc *sc, struct iwm_scan_req_umac *req)
7969 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
7972 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL))
7991 iwm_umac_scan(struct iwm_softc *sc, int bgscan)
7993 struct ieee80211com *ic = &sc->sc_ic;
8008 req_len = iwm_umac_scan_size(sc);
8022 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL)) {
8039 if (isset(sc->sc_ucode_api,
8060 if (isset(sc->sc_ucode_api,
8064 } else if (isset(sc->sc_ucode_api,
8076 cmd_data = iwm_get_scan_req_umac_data(sc, req);
8077 chanparam = iwm_get_scan_req_umac_chan_param(sc, req);
8078 chanparam->count = iwm_umac_scan_fill_channels(sc,
8084 sc->sc_capa_n_scan_channels;
8091 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2)) {
8097 if (isset(sc->sc_ucode_api,
8114 if (isset(sc->sc_enabled_capa,
8116 isset(sc->sc_enabled_capa,
8121 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL)) {
8129 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_SCAN_EXT_CHAN_VER))
8130 err = iwm_fill_probe_req(sc, &tail->preq);
8132 err = iwm_fill_probe_req_v1(sc, &tailv1->preq);
8142 err = iwm_send_cmd(sc, &hcmd);
8148 iwm_mcc_update(struct iwm_softc *sc, struct iwm_mcc_chub_notif *notif)
8150 struct ieee80211com *ic = &sc->sc_ic;
8159 "(0x%x)\n", DEVNAME(sc), alpha2, le16toh(notif->mcc));
8196 iwm_ack_rates(struct iwm_softc *sc, struct iwm_node *in, int *cck_rates,
8282 iwm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
8286 struct ieee80211com *ic = &sc->sc_ic;
8310 iwm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
8369 iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
8393 iwm_mac_ctxt_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action,
8396 struct ieee80211com *ic = &sc->sc_ic;
8399 int active = (sc->sc_flags & IWM_FLAG_MAC_ACTIVE);
8408 iwm_mac_ctxt_cmd_common(sc, in, &cmd, action);
8424 iwm_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
8426 return iwm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
8430 iwm_update_quotas(struct iwm_softc *sc, struct iwm_node *in, int running)
8487 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_QUOTA_LOW_LATENCY)) {
8498 return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, 0,
8502 return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, 0, sizeof(cmd), &cmd);
8506 iwm_add_task(struct iwm_softc *sc, struct taskq *taskq, struct task *task)
8510 if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
8515 refcnt_take(&sc->task_refs);
8517 refcnt_rele_wake(&sc->task_refs);
8522 iwm_del_task(struct iwm_softc *sc, struct taskq *taskq, struct task *task)
8525 refcnt_rele(&sc->task_refs);
8529 iwm_scan(struct iwm_softc *sc)
8531 struct ieee80211com *ic = &sc->sc_ic;
8535 if (sc->sc_flags & IWM_FLAG_BGSCAN) {
8536 err = iwm_scan_abort(sc);
8539 DEVNAME(sc));
8544 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
8545 err = iwm_umac_scan(sc, 0);
8547 err = iwm_lmac_scan(sc, 0);
8549 printf("%s: could not initiate scan\n", DEVNAME(sc));
8560 sc->sc_flags |= IWM_FLAG_SCANNING;
8565 if ((sc->sc_flags & IWM_FLAG_BGSCAN) == 0) {
8570 iwm_led_blink_start(sc);
8579 struct iwm_softc *sc = IC2IFP(ic)->if_softc;
8582 if (sc->sc_flags & IWM_FLAG_SCANNING)
8585 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
8586 err = iwm_umac_scan(sc, 1);
8588 err = iwm_lmac_scan(sc, 1);
8590 printf("%s: could not initiate scan\n", DEVNAME(sc));
8594 sc->sc_flags |= IWM_FLAG_BGSCAN;
8602 struct iwm_softc *sc = ic->ic_softc;
8604 free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
8605 sc->bgscan_unref_arg = arg;
8606 sc->bgscan_unref_arg_size = arg_size;
8607 iwm_add_task(sc, systq, &sc->bgscan_done_task);
8613 struct iwm_softc *sc = arg;
8614 struct ieee80211com *ic = &sc->sc_ic;
8619 if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) ||
8629 if ((sc->tx_ba_queue_mask & (1 << qid)) == 0)
8632 err = iwm_sta_tx_agg(sc, ni, tid, 0, 0, 0);
8635 err = iwm_disable_txq(sc, IWM_STATION_ID, qid, tid);
8648 err = iwm_flush_sta(sc, in);
8656 ni->ni_unref_arg = sc->bgscan_unref_arg;
8657 ni->ni_unref_arg_size = sc->bgscan_unref_arg_size;
8658 sc->bgscan_unref_arg = NULL;
8659 sc->bgscan_unref_arg_size = 0;
8663 free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
8664 sc->bgscan_unref_arg = NULL;
8665 sc->bgscan_unref_arg_size = 0;
8666 if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
8667 task_add(systq, &sc->init_task);
8669 refcnt_rele_wake(&sc->task_refs);
8674 iwm_umac_scan_abort(struct iwm_softc *sc)
8678 return iwm_send_cmd_pdu(sc,
8684 iwm_lmac_scan_abort(struct iwm_softc *sc)
8691 err = iwm_send_cmd_status(sc, &cmd, &status);
8710 iwm_scan_abort(struct iwm_softc *sc)
8714 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
8715 err = iwm_umac_scan_abort(sc);
8717 err = iwm_lmac_scan_abort(sc);
8720 sc->sc_flags &= ~(IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN);
8725 iwm_phy_ctxt_update(struct iwm_softc *sc, struct iwm_phy_ctxt *phyctxt,
8733 if (isset(sc->sc_enabled_capa,
8737 err = iwm_phy_ctxt_cmd(sc, phyctxt, chains_static,
8742 "(error %d)\n", DEVNAME(sc), err);
8746 err = iwm_phy_ctxt_cmd(sc, phyctxt, chains_static,
8751 "(error %d)\n", DEVNAME(sc), err);
8756 err = iwm_phy_ctxt_cmd(sc, phyctxt, chains_static,
8761 DEVNAME(sc), err);
8772 iwm_auth(struct iwm_softc *sc)
8774 struct ieee80211com *ic = &sc->sc_ic;
8777 int generation = sc->sc_generation, err;
8782 err = iwm_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
8788 err = iwm_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
8794 in->in_phyctxt = &sc->sc_phyctxt[0];
8798 err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD, 0);
8801 DEVNAME(sc), err);
8804 sc->sc_flags |= IWM_FLAG_MAC_ACTIVE;
8806 err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD);
8809 DEVNAME(sc), err);
8812 sc->sc_flags |= IWM_FLAG_BINDING_ACTIVE;
8815 err = iwm_add_sta_cmd(sc, in, 0);
8818 DEVNAME(sc), err);
8821 sc->sc_flags |= IWM_FLAG_STA_ACTIVE;
8834 iwm_protect_session(sc, in, duration, in->in_ni.ni_intval / 2);
8839 if (generation == sc->sc_generation) {
8840 iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE);
8841 sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE;
8844 if (generation == sc->sc_generation) {
8845 iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE, 0);
8846 sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE;
8852 iwm_deauth(struct iwm_softc *sc)
8854 struct ieee80211com *ic = &sc->sc_ic;
8860 iwm_unprotect_session(sc, in);
8862 if (sc->sc_flags & IWM_FLAG_STA_ACTIVE) {
8863 err = iwm_flush_sta(sc, in);
8866 err = iwm_rm_sta_cmd(sc, in);
8869 DEVNAME(sc), err);
8873 sc->sc_flags &= ~IWM_FLAG_STA_ACTIVE;
8874 sc->sc_rx_ba_sessions = 0;
8875 sc->ba_rx.start_tidmask = 0;
8876 sc->ba_rx.stop_tidmask = 0;
8877 sc->tx_ba_queue_mask = 0;
8878 sc->ba_tx.start_tidmask = 0;
8879 sc->ba_tx.stop_tidmask = 0;
8882 if (sc->sc_flags & IWM_FLAG_BINDING_ACTIVE) {
8883 err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE);
8886 DEVNAME(sc), err);
8889 sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE;
8892 if (sc->sc_flags & IWM_FLAG_MAC_ACTIVE) {
8893 err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE, 0);
8896 DEVNAME(sc), err);
8899 sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE;
8903 err = iwm_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
8913 iwm_run(struct iwm_softc *sc)
8915 struct ieee80211com *ic = &sc->sc_ic;
8924 err = iwm_auth(sc);
8931 uint8_t chains = iwm_mimo_enabled(sc) ? 2 : 1;
8932 err = iwm_phy_ctxt_update(sc, in->in_phyctxt,
8937 printf("%s: failed to update PHY\n", DEVNAME(sc));
8941 uint8_t chains = iwm_mimo_enabled(sc) ? 2 : 1;
8954 err = iwm_phy_ctxt_update(sc, in->in_phyctxt,
8958 printf("%s: failed to update PHY\n", DEVNAME(sc));
8964 err = iwm_add_sta_cmd(sc, in, 1);
8967 DEVNAME(sc), err);
8972 err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
8974 printf("%s: failed to update MAC\n", DEVNAME(sc));
8978 err = iwm_sf_config(sc, IWM_SF_FULL_ON);
8981 DEVNAME(sc), err);
8985 err = iwm_allow_mcast(sc);
8988 DEVNAME(sc), err);
8992 err = iwm_power_update_device(sc);
8995 DEVNAME(sc), err);
9004 err = iwm_enable_beacon_filter(sc, in);
9007 DEVNAME(sc));
9011 err = iwm_power_mac_update_mode(sc, in);
9014 DEVNAME(sc), err);
9018 if (!isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) {
9019 err = iwm_update_quotas(sc, in, 1);
9022 DEVNAME(sc), err);
9027 ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
9032 iwm_led_blink_start(sc);
9042 timeout_add_msec(&sc->sc_calib_to, 500);
9043 iwm_led_enable(sc);
9049 iwm_run_stop(struct iwm_softc *sc)
9051 struct ieee80211com *ic = &sc->sc_ic;
9067 for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
9068 struct iwm_rxba_data *rxba = &sc->sc_rxba_data[i];
9071 err = iwm_sta_rx_agg(sc, ni, rxba->tid, 0, 0, 0, 0);
9074 iwm_clear_reorder_buffer(sc, rxba);
9075 if (sc->sc_rx_ba_sessions > 0)
9076 sc->sc_rx_ba_sessions--;
9080 if ((sc->tx_ba_queue_mask & (1 << qid)) == 0)
9082 err = iwm_sta_tx_agg(sc, ni, tid, 0, 0, 0);
9085 err = iwm_disable_txq(sc, IWM_STATION_ID, qid, tid);
9093 iwm_led_blink_stop(sc);
9095 err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
9099 iwm_disable_beacon_filter(sc);
9101 if (!isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) {
9102 err = iwm_update_quotas(sc, in, 0);
9105 DEVNAME(sc), err);
9111 err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 0);
9113 printf("%s: failed to update MAC\n", DEVNAME(sc));
9119 err = iwm_phy_ctxt_update(sc, in->in_phyctxt,
9123 printf("%s: failed to update PHY\n", DEVNAME(sc));
9141 struct iwm_softc *sc = ic->ic_softc;
9157 return iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY, IWM_CMD_ASYNC,
9165 struct iwm_softc *sc = ic->ic_softc;
9174 if (!isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_TKIP_MIC_KEYS))
9192 return iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY, IWM_CMD_ASYNC,
9200 struct iwm_softc *sc = ic->ic_softc;
9213 iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY, IWM_CMD_ASYNC, sizeof(cmd), &cmd);
9220 struct iwm_softc *sc = ic->ic_softc;
9230 if ((sc->sc_flags & IWM_FLAG_STA_ACTIVE) == 0)
9233 if (!isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_TKIP_MIC_KEYS))
9246 iwm_send_cmd_pdu(sc, IWM_ADD_STA_KEY, IWM_CMD_ASYNC, sizeof(cmd), &cmd);
9252 struct iwm_softc *sc = arg;
9253 struct ieee80211com *ic = &sc->sc_ic;
9263 ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
9276 timeout_add_msec(&sc->sc_calib_to, 500);
9284 struct iwm_softc *sc = IC2IFP(ic)->if_softc;
9306 tab |= iwm_valid_siso_ant_rate_mask(sc);
9355 tab |= iwm_valid_siso_ant_rate_mask(sc);
9369 struct iwm_softc *sc = IC2IFP(ic)->if_softc;
9437 tab |= iwm_valid_siso_ant_rate_mask(sc);
9451 tab |= iwm_valid_siso_ant_rate_mask(sc);
9461 struct iwm_softc *sc = IC2IFP(ic)->if_softc;
9481 if (sc->sc_device_family == IWM_DEVICE_FAMILY_9000 &&
9482 (iwm_fw_valid_tx_ant(sc) & IWM_ANT_B))
9493 iwm_send_cmd(sc, &cmd);
9499 struct iwm_softc *sc = ifp->if_softc;
9500 struct ieee80211com *ic = &sc->sc_ic;
9509 sc->sc_fixed_ridx = iwm_ht_mcs2ridx[ic->ic_fixed_mcs];
9517 sc->sc_fixed_ridx = ridx;
9531 struct iwm_softc *sc = (struct iwm_softc *)psc;
9532 struct ieee80211com *ic = &sc->sc_ic;
9533 enum ieee80211_state nstate = sc->ns_nstate;
9535 int arg = sc->ns_arg;
9538 if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
9540 refcnt_rele_wake(&sc->task_refs);
9547 if (sc->sc_flags & IWM_FLAG_SCANNING) {
9548 refcnt_rele_wake(&sc->task_refs);
9555 iwm_led_blink_stop(sc);
9561 err = iwm_run_stop(sc);
9568 err = iwm_deauth(sc);
9579 if (sc->sc_flags & IWM_FLAG_SHUTDOWN) {
9580 refcnt_rele_wake(&sc->task_refs);
9592 err = iwm_scan(sc);
9595 refcnt_rele_wake(&sc->task_refs);
9600 err = iwm_auth(sc);
9607 err = iwm_run(sc);
9612 if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0) {
9614 task_add(systq, &sc->init_task);
9616 sc->sc_newstate(ic, nstate, arg);
9618 refcnt_rele_wake(&sc->task_refs);
9626 struct iwm_softc *sc = ifp->if_softc;
9634 if (sc->ns_nstate == nstate && nstate != IEEE80211_S_SCAN &&
9639 timeout_del(&sc->sc_calib_to);
9640 iwm_del_task(sc, systq, &sc->ba_task);
9641 iwm_del_task(sc, systq, &sc->mac_ctxt_task);
9642 iwm_del_task(sc, systq, &sc->phy_ctxt_task);
9643 iwm_del_task(sc, systq, &sc->bgscan_done_task);
9646 sc->ns_nstate = nstate;
9647 sc->ns_arg = arg;
9649 iwm_add_task(sc, sc->sc_nswq, &sc->newstate_task);
9655 iwm_endscan(struct iwm_softc *sc)
9657 struct ieee80211com *ic = &sc->sc_ic;
9659 if ((sc->sc_flags & (IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN)) == 0)
9662 sc->sc_flags &= ~(IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN);
9723 iwm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
9767 iwm_sf_config(struct iwm_softc *sc, int new_state)
9769 struct ieee80211com *ic = &sc->sc_ic;
9776 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
9783 iwm_fill_sf_command(sc, &sf_cmd, NULL);
9786 iwm_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
9792 err = iwm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
9798 iwm_send_bt_init_conf(struct iwm_softc *sc)
9805 return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
9810 iwm_send_soc_conf(struct iwm_softc *sc)
9824 if (!sc->sc_integrated) { /* VER_1 */
9828 if (sc->sc_ltr_delay != IWM_SOC_FLAGS_LTR_APPLY_DELAY_NONE)
9829 flags |= (sc->sc_ltr_delay &
9831 scan_cmd_ver = iwm_lookup_cmd_ver(sc, IWM_LONG_GROUP,
9834 scan_cmd_ver >= 2 && sc->sc_low_latency_xtal)
9839 cmd.latency = htole32(sc->sc_xtal_latency);
9842 err = iwm_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
9844 printf("%s: failed to set soc latency: %d\n", DEVNAME(sc), err);
9849 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
9861 int resp_v3 = isset(sc->sc_enabled_capa,
9864 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000 &&
9865 !sc->sc_nvm.lar_enabled) {
9871 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
9872 isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
9883 err = iwm_send_cmd(sc, &hcmd);
9923 iwm_free_resp(sc, &hcmd);
9928 iwm_send_temp_report_ths_cmd(struct iwm_softc *sc)
9940 err = iwm_send_cmd_pdu(sc,
9945 DEVNAME(sc), err);
9951 iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
9959 iwm_send_cmd(sc, &cmd);
9963 iwm_free_fw_paging(struct iwm_softc *sc)
9967 if (sc->fw_paging_db[0].fw_paging_block.vaddr == NULL)
9971 iwm_dma_contig_free(&sc->fw_paging_db[i].fw_paging_block);
9974 memset(sc->fw_paging_db, 0, sizeof(sc->fw_paging_db));
9978 iwm_fill_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *image)
10008 DEVNAME(sc));
10009 iwm_free_fw_paging(sc);
10015 DEVNAME(sc), sec_idx));
10017 memcpy(sc->fw_paging_db[0].fw_paging_block.vaddr,
10019 sc->fw_paging_db[0].fw_paging_size);
10022 DEVNAME(sc), sc->fw_paging_db[0].fw_paging_size));
10032 for (idx = 1; idx < sc->num_of_paging_blk; idx++) {
10033 memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
10035 sc->fw_paging_db[idx].fw_paging_size);
10038 DEVNAME(sc), sc->fw_paging_db[idx].fw_paging_size, idx));
10040 offset += sc->fw_paging_db[idx].fw_paging_size;
10044 if (sc->num_of_pages_in_last_blk > 0) {
10045 memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
10047 IWM_FW_PAGING_SIZE * sc->num_of_pages_in_last_blk);
10050 DEVNAME(sc), sc->num_of_pages_in_last_blk, idx));
10057 iwm_alloc_fw_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *image)
10062 if (sc->fw_paging_db[0].fw_paging_block.vaddr != NULL) {
10065 bus_dmamap_sync(sc->sc_dmat,
10066 sc->fw_paging_db[0].fw_paging_block.map,
10069 for (i = 1; i < sc->num_of_paging_blk + 1; i++) {
10070 bus_dmamap_sync(sc->sc_dmat,
10071 sc->fw_paging_db[i].fw_paging_block.map,
10084 sc->num_of_paging_blk =
10087 sc->num_of_pages_in_last_blk =
10089 IWM_NUM_OF_PAGE_PER_GROUP * (sc->num_of_paging_blk - 1);
10092 " holds 8 pages, last block holds %d pages\n", DEVNAME(sc),
10093 sc->num_of_paging_blk,
10094 sc->num_of_pages_in_last_blk));
10097 error = iwm_dma_contig_alloc(sc->sc_dmat,
10098 &sc->fw_paging_db[blk_idx].fw_paging_block, IWM_FW_PAGING_SIZE,
10102 iwm_free_fw_paging(sc);
10106 sc->fw_paging_db[blk_idx].fw_paging_size = IWM_FW_PAGING_SIZE;
10109 DEVNAME(sc)));
10115 for (blk_idx = 1; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
10118 error = iwm_dma_contig_alloc(sc->sc_dmat,
10119 &sc->fw_paging_db[blk_idx].fw_paging_block,
10123 iwm_free_fw_paging(sc);
10127 sc->fw_paging_db[blk_idx].fw_paging_size =
10132 DEVNAME(sc)));
10139 iwm_save_fw_paging(struct iwm_softc *sc, const struct iwm_fw_sects *fw)
10143 ret = iwm_alloc_fw_paging_mem(sc, fw);
10147 return iwm_fill_paging_mem(sc, fw);
10152 iwm_send_paging_cmd(struct iwm_softc *sc, const struct iwm_fw_sects *fw)
10160 (sc->num_of_pages_in_last_blk <<
10163 .block_num = htole32(sc->num_of_paging_blk),
10167 for (blk_idx = 0; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
10169 sc->fw_paging_db[blk_idx].fw_paging_block.paddr >>
10172 bus_dmamap_sync(sc->sc_dmat,
10173 sc->fw_paging_db[blk_idx].fw_paging_block.map, 0,
10178 return iwm_send_cmd_pdu(sc, iwm_cmd_id(IWM_FW_PAGING_BLOCK_CMD,
10184 iwm_init_hw(struct iwm_softc *sc)
10186 struct ieee80211com *ic = &sc->sc_ic;
10189 err = iwm_run_init_mvm_ucode(sc, 0);
10194 iwm_stop_device(sc);
10195 err = iwm_start_hw(sc);
10197 printf("%s: could not initialize hardware\n", DEVNAME(sc));
10203 err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
10205 printf("%s: could not load firmware\n", DEVNAME(sc));
10210 if (!iwm_nic_lock(sc)) {
10215 err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
10218 DEVNAME(sc), err);
10222 err = iwm_send_phy_db_data(sc);
10225 DEVNAME(sc), err);
10229 err = iwm_send_phy_cfg_cmd(sc);
10232 DEVNAME(sc), err);
10236 err = iwm_send_bt_init_conf(sc);
10239 DEVNAME(sc), err);
10243 if (isset(sc->sc_enabled_capa,
10245 err = iwm_send_soc_conf(sc);
10250 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT)) {
10251 err = iwm_send_dqa_cmd(sc);
10257 err = iwm_add_aux_sta(sc);
10260 DEVNAME(sc), err);
10270 sc->sc_phyctxt[i].id = i;
10271 sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
10272 err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
10277 DEVNAME(sc), i, err);
10283 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
10284 iwm_tt_tx_backoff(sc, 0);
10287 err = iwm_config_ltr(sc);
10290 DEVNAME(sc), err);
10293 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_CT_KILL_BY_FW)) {
10294 err = iwm_send_temp_report_ths_cmd(sc);
10299 err = iwm_power_update_device(sc);
10302 DEVNAME(sc), err);
10306 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
10307 err = iwm_send_update_mcc_cmd(sc, "ZZ");
10310 DEVNAME(sc), err);
10315 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
10316 err = iwm_config_umac_scan(sc);
10319 DEVNAME(sc), err);
10325 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
10329 err = iwm_enable_txq(sc, IWM_MONITOR_STA_ID, qid,
10333 "(error %d)\n", DEVNAME(sc), err);
10338 if (isset(sc->sc_enabled_capa,
10343 err = iwm_enable_txq(sc, IWM_STATION_ID, qid,
10347 "(error %d)\n", DEVNAME(sc), ac, err);
10353 err = iwm_disable_beacon_filter(sc);
10356 DEVNAME(sc), err);
10361 iwm_nic_unlock(sc);
10368 iwm_allow_mcast(struct iwm_softc *sc)
10370 struct ieee80211com *ic = &sc->sc_ic;
10386 err = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
10395 struct iwm_softc *sc = ifp->if_softc;
10396 struct ieee80211com *ic = &sc->sc_ic;
10399 rw_assert_wrlock(&sc->ioctl_rwl);
10401 generation = ++sc->sc_generation;
10403 err = iwm_preinit(sc);
10407 err = iwm_start_hw(sc);
10409 printf("%s: could not initialize hardware\n", DEVNAME(sc));
10413 err = iwm_init_hw(sc);
10415 if (generation == sc->sc_generation)
10416 iwm_stop_device(sc);
10420 if (sc->sc_nvm.sku_cap_11n_enable)
10421 iwm_setup_ht_rates(sc);
10422 if (sc->sc_nvm.sku_cap_11ac_enable)
10423 iwm_setup_vht_rates(sc);
10425 KASSERT(sc->task_refs.r_refs == 0);
10426 refcnt_init(&sc->task_refs);
10445 if (generation != sc->sc_generation)
10459 struct iwm_softc *sc = ifp->if_softc;
10460 struct ieee80211com *ic = &sc->sc_ic;
10471 if (sc->qfullmsk != 0) {
10477 if (sc->sc_flags & IWM_FLAG_TXFLUSH)
10513 if (iwm_tx(sc, m, ni, ac) != 0) {
10529 struct iwm_softc *sc = ifp->if_softc;
10530 struct ieee80211com *ic = &sc->sc_ic;
10534 rw_assert_wrlock(&sc->ioctl_rwl);
10536 sc->sc_flags |= IWM_FLAG_SHUTDOWN; /* Disallow new tasks. */
10539 task_del(systq, &sc->init_task);
10540 iwm_del_task(sc, sc->sc_nswq, &sc->newstate_task);
10541 iwm_del_task(sc, systq, &sc->ba_task);
10542 iwm_del_task(sc, systq, &sc->mac_ctxt_task);
10543 iwm_del_task(sc, systq, &sc->phy_ctxt_task);
10544 iwm_del_task(sc, systq, &sc->bgscan_done_task);
10545 KASSERT(sc->task_refs.r_refs >= 1);
10546 refcnt_finalize(&sc->task_refs, "iwmstop");
10548 iwm_stop_device(sc);
10550 free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
10551 sc->bgscan_unref_arg = NULL;
10552 sc->bgscan_unref_arg_size = 0;
10556 sc->sc_generation++;
10557 for (i = 0; i < nitems(sc->sc_cmd_resp_pkt); i++) {
10558 free(sc->sc_cmd_resp_pkt[i], M_DEVBUF, sc->sc_cmd_resp_len[i]);
10559 sc->sc_cmd_resp_pkt[i] = NULL;
10560 sc->sc_cmd_resp_len[i] = 0;
10570 sc->sc_flags &= ~(IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN);
10571 sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE;
10572 sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE;
10573 sc->sc_flags &= ~IWM_FLAG_STA_ACTIVE;
10574 sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
10575 sc->sc_flags &= ~IWM_FLAG_HW_ERR;
10576 sc->sc_flags &= ~IWM_FLAG_SHUTDOWN;
10577 sc->sc_flags &= ~IWM_FLAG_TXFLUSH;
10579 sc->sc_rx_ba_sessions = 0;
10580 sc->ba_rx.start_tidmask = 0;
10581 sc->ba_rx.stop_tidmask = 0;
10582 sc->tx_ba_queue_mask = 0;
10583 sc->ba_tx.start_tidmask = 0;
10584 sc->ba_tx.stop_tidmask = 0;
10586 sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
10587 sc->ns_nstate = IEEE80211_S_INIT;
10589 timeout_del(&sc->sc_calib_to); /* XXX refcount? */
10590 for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
10591 struct iwm_rxba_data *rxba = &sc->sc_rxba_data[i];
10592 iwm_clear_reorder_buffer(sc, rxba);
10594 iwm_led_blink_stop(sc);
10595 memset(sc->sc_tx_timer, 0, sizeof(sc->sc_tx_timer));
10604 struct iwm_softc *sc = ifp->if_softc;
10614 for (i = 0; i < nitems(sc->sc_tx_timer); i++) {
10615 if (sc->sc_tx_timer[i] > 0) {
10616 if (--sc->sc_tx_timer[i] == 0) {
10617 printf("%s: device timeout\n", DEVNAME(sc));
10619 iwm_nic_error(sc);
10620 iwm_dump_driver_status(sc);
10622 if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
10623 task_add(systq, &sc->init_task);
10637 struct iwm_softc *sc = ifp->if_softc;
10638 int s, err = 0, generation = sc->sc_generation;
10644 err = rw_enter(&sc->ioctl_rwl, RW_WRITE | RW_INTR);
10645 if (err == 0 && generation != sc->sc_generation) {
10646 rw_exit(&sc->ioctl_rwl);
10661 sc->sc_fw.fw_status = IWM_FW_STATUS_NONE;
10684 rw_exit(&sc->ioctl_rwl);
10772 iwm_nic_umac_error(struct iwm_softc *sc)
10777 base = sc->sc_uc.uc_umac_error_event_table;
10781 DEVNAME(sc), base);
10785 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
10786 printf("%s: reading errlog failed\n", DEVNAME(sc));
10791 printf("%s: Start UMAC Error Log Dump:\n", DEVNAME(sc));
10792 printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
10793 sc->sc_flags, table.valid);
10796 printf("%s: 0x%08X | %s\n", DEVNAME(sc), table.error_id,
10798 printf("%s: 0x%08X | umac branchlink1\n", DEVNAME(sc), table.blink1);
10799 printf("%s: 0x%08X | umac branchlink2\n", DEVNAME(sc), table.blink2);
10800 printf("%s: 0x%08X | umac interruptlink1\n", DEVNAME(sc), table.ilink1);
10801 printf("%s: 0x%08X | umac interruptlink2\n", DEVNAME(sc), table.ilink2);
10802 printf("%s: 0x%08X | umac data1\n", DEVNAME(sc), table.data1);
10803 printf("%s: 0x%08X | umac data2\n", DEVNAME(sc), table.data2);
10804 printf("%s: 0x%08X | umac data3\n", DEVNAME(sc), table.data3);
10805 printf("%s: 0x%08X | umac major\n", DEVNAME(sc), table.umac_major);
10806 printf("%s: 0x%08X | umac minor\n", DEVNAME(sc), table.umac_minor);
10807 printf("%s: 0x%08X | frame pointer\n", DEVNAME(sc),
10809 printf("%s: 0x%08X | stack pointer\n", DEVNAME(sc),
10811 printf("%s: 0x%08X | last host cmd\n", DEVNAME(sc), table.cmd_header);
10812 printf("%s: 0x%08X | isr status reg\n", DEVNAME(sc),
10865 iwm_nic_error(struct iwm_softc *sc)
10870 printf("%s: dumping device error log\n", DEVNAME(sc));
10871 base = sc->sc_uc.uc_error_event_table;
10874 DEVNAME(sc), base);
10878 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
10879 printf("%s: reading errlog failed\n", DEVNAME(sc));
10884 printf("%s: errlog not found, skipping\n", DEVNAME(sc));
10889 printf("%s: Start Error Log Dump:\n", DEVNAME(sc));
10890 printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
10891 sc->sc_flags, table.valid);
10894 printf("%s: 0x%08X | %-28s\n", DEVNAME(sc), table.error_id,
10896 printf("%s: %08X | trm_hw_status0\n", DEVNAME(sc),
10898 printf("%s: %08X | trm_hw_status1\n", DEVNAME(sc),
10900 printf("%s: %08X | branchlink2\n", DEVNAME(sc), table.blink2);
10901 printf("%s: %08X | interruptlink1\n", DEVNAME(sc), table.ilink1);
10902 printf("%s: %08X | interruptlink2\n", DEVNAME(sc), table.ilink2);
10903 printf("%s: %08X | data1\n", DEVNAME(sc), table.data1);
10904 printf("%s: %08X | data2\n", DEVNAME(sc), table.data2);
10905 printf("%s: %08X | data3\n", DEVNAME(sc), table.data3);
10906 printf("%s: %08X | beacon time\n", DEVNAME(sc), table.bcon_time);
10907 printf("%s: %08X | tsf low\n", DEVNAME(sc), table.tsf_low);
10908 printf("%s: %08X | tsf hi\n", DEVNAME(sc), table.tsf_hi);
10909 printf("%s: %08X | time gp1\n", DEVNAME(sc), table.gp1);
10910 printf("%s: %08X | time gp2\n", DEVNAME(sc), table.gp2);
10911 printf("%s: %08X | uCode revision type\n", DEVNAME(sc),
10913 printf("%s: %08X | uCode version major\n", DEVNAME(sc),
10915 printf("%s: %08X | uCode version minor\n", DEVNAME(sc),
10917 printf("%s: %08X | hw version\n", DEVNAME(sc), table.hw_ver);
10918 printf("%s: %08X | board version\n", DEVNAME(sc), table.brd_ver);
10919 printf("%s: %08X | hcmd\n", DEVNAME(sc), table.hcmd);
10920 printf("%s: %08X | isr0\n", DEVNAME(sc), table.isr0);
10921 printf("%s: %08X | isr1\n", DEVNAME(sc), table.isr1);
10922 printf("%s: %08X | isr2\n", DEVNAME(sc), table.isr2);
10923 printf("%s: %08X | isr3\n", DEVNAME(sc), table.isr3);
10924 printf("%s: %08X | isr4\n", DEVNAME(sc), table.isr4);
10925 printf("%s: %08X | last cmd Id\n", DEVNAME(sc), table.last_cmd_id);
10926 printf("%s: %08X | wait_event\n", DEVNAME(sc), table.wait_event);
10927 printf("%s: %08X | l2p_control\n", DEVNAME(sc), table.l2p_control);
10928 printf("%s: %08X | l2p_duration\n", DEVNAME(sc), table.l2p_duration);
10929 printf("%s: %08X | l2p_mhvalid\n", DEVNAME(sc), table.l2p_mhvalid);
10930 printf("%s: %08X | l2p_addr_match\n", DEVNAME(sc), table.l2p_addr_match);
10931 printf("%s: %08X | lmpm_pmg_sel\n", DEVNAME(sc), table.lmpm_pmg_sel);
10932 printf("%s: %08X | timestamp\n", DEVNAME(sc), table.u_timestamp);
10933 printf("%s: %08X | flow_handler\n", DEVNAME(sc), table.flow_handler);
10935 if (sc->sc_uc.uc_umac_error_event_table)
10936 iwm_nic_umac_error(sc);
10940 iwm_dump_driver_status(struct iwm_softc *sc)
10946 struct iwm_tx_ring *ring = &sc->txq[i];
10951 printf(" rx ring: cur=%d\n", sc->rxq.cur);
10953 ieee80211_state_name[sc->sc_ic.ic_state]);
10958 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)), \
10965 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)), \
10970 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % count);
10986 iwm_rx_pkt(struct iwm_softc *sc, struct iwm_rx_data *data, struct mbuf_list *ml)
10988 struct ifnet *ifp = IC2IFP(&sc->sc_ic);
10995 bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
11015 if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur)) {
11024 iwm_rx_rx_phy_cmd(sc, pkt, data);
11038 if (sc->sc_mqrx_supported)
11039 iwm_rx_mpdu_mq(sc, m0, pkt->data,
11042 iwm_rx_mpdu(sc, m0, pkt->data,
11059 if (sc->sc_mqrx_supported)
11060 iwm_rx_mpdu_mq(sc, m, pkt->data,
11063 iwm_rx_mpdu(sc, m, pkt->data,
11070 iwm_rx_tx_cmd(sc, pkt, data);
11074 iwm_rx_compressed_ba(sc, pkt);
11078 iwm_rx_bmiss(sc, pkt, data);
11091 sc->sc_uc.uc_error_event_table
11093 sc->sc_uc.uc_log_event_table
11095 sc->sched_base = le32toh(resp1->scd_base_ptr);
11097 sc->sc_uc.uc_ok = 1;
11099 sc->sc_uc.uc_ok = 0;
11104 sc->sc_uc.uc_error_event_table
11106 sc->sc_uc.uc_log_event_table
11108 sc->sched_base = le32toh(resp2->scd_base_ptr);
11109 sc->sc_uc.uc_umac_error_event_table
11112 sc->sc_uc.uc_ok = 1;
11114 sc->sc_uc.uc_ok = 0;
11119 sc->sc_uc.uc_error_event_table
11121 sc->sc_uc.uc_log_event_table
11123 sc->sched_base = le32toh(resp3->scd_base_ptr);
11124 sc->sc_uc.uc_umac_error_event_table
11127 sc->sc_uc.uc_ok = 1;
11129 sc->sc_uc.uc_ok = 0;
11132 sc->sc_uc.uc_intr = 1;
11133 wakeup(&sc->sc_uc);
11140 iwm_phy_db_set_section(sc, phy_db_notif);
11141 sc->sc_init_complete |= IWM_CALIB_COMPLETE;
11142 wakeup(&sc->sc_init_complete);
11149 memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
11150 sc->sc_noise = iwm_get_noise(&stats->rx.general);
11157 iwm_mcc_update(sc, notif);
11174 DEVNAME(sc), le16toh(notif->temperature));
11175 sc->sc_flags |= IWM_FLAG_HW_ERR;
11176 task_add(systq, &sc->init_task);
11210 if (sc->sc_cmd_resp_pkt[idx] == NULL)
11213 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
11221 pkt_len > sc->sc_cmd_resp_len[idx]) {
11222 free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
11223 sc->sc_cmd_resp_len[idx]);
11224 sc->sc_cmd_resp_pkt[idx] = NULL;
11228 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
11230 memcpy(sc->sc_cmd_resp_pkt[idx], pkt, pkt_len);
11239 sc->sc_init_complete |= IWM_INIT_COMPLETE;
11240 wakeup(&sc->sc_init_complete);
11252 iwm_endscan(sc);
11259 iwm_endscan(sc);
11266 iwm_endscan(sc);
11274 DEVNAME(sc), le32toh(resp->error_type),
11284 if (sc->sc_time_event_uid != le32toh(notif->unique_id))
11288 sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
11323 DEVNAME(sc), code, pkt->len_n_flags,
11336 iwm_cmd_done(sc, qid, idx, code);
11347 iwm_notif_intr(struct iwm_softc *sc)
11354 bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
11355 0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
11357 if (sc->sc_mqrx_supported) {
11365 hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
11367 while (sc->rxq.cur != hw) {
11368 struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
11369 iwm_rx_pkt(sc, data, &ml);
11370 ADVANCE_RXQ(sc);
11372 if_input(&sc->sc_ic.ic_if, &ml);
11379 IWM_WRITE(sc, wreg, hw & ~7);
11385 struct iwm_softc *sc = arg;
11386 struct ieee80211com *ic = &sc->sc_ic;
11392 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
11394 if (sc->sc_flags & IWM_FLAG_USE_ICT) {
11395 uint32_t *ict = sc->ict_dma.vaddr;
11398 tmp = htole32(ict[sc->ict_cur]);
11408 ict[sc->ict_cur] = 0;
11409 sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
11410 tmp = htole32(ict[sc->ict_cur]);
11427 r1 = IWM_READ(sc, IWM_CSR_INT);
11428 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
11436 IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
11443 iwm_check_rfkill(sc);
11444 task_add(systq, &sc->init_task);
11451 iwm_nic_error(sc);
11452 iwm_dump_driver_status(sc);
11454 printf("%s: fatal firmware error\n", DEVNAME(sc));
11455 if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
11456 task_add(systq, &sc->init_task);
11464 printf("%s: hardware error, stopping device \n", DEVNAME(sc));
11465 if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0) {
11466 sc->sc_flags |= IWM_FLAG_HW_ERR;
11467 task_add(systq, &sc->init_task);
11475 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
11478 sc->sc_fw_chunk_done = 1;
11479 wakeup(&sc->sc_fw);
11486 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
11490 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
11494 IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
11504 IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
11507 iwm_notif_intr(sc);
11513 iwm_restore_interrupts(sc);
11521 struct iwm_softc *sc = arg;
11522 struct ieee80211com *ic = &sc->sc_ic;
11527 inta_fh = IWM_READ(sc, IWM_CSR_MSIX_FH_INT_CAUSES_AD);
11528 inta_hw = IWM_READ(sc, IWM_CSR_MSIX_HW_INT_CAUSES_AD);
11529 IWM_WRITE(sc, IWM_CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
11530 IWM_WRITE(sc, IWM_CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
11531 inta_fh &= sc->sc_fh_mask;
11532 inta_hw &= sc->sc_hw_mask;
11536 iwm_notif_intr(sc);
11541 sc->sc_fw_chunk_done = 1;
11542 wakeup(&sc->sc_fw);
11549 iwm_nic_error(sc);
11550 iwm_dump_driver_status(sc);
11552 printf("%s: fatal firmware error\n", DEVNAME(sc));
11553 if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0)
11554 task_add(systq, &sc->init_task);
11559 iwm_check_rfkill(sc);
11560 task_add(systq, &sc->init_task);
11564 printf("%s: hardware error, stopping device \n", DEVNAME(sc));
11565 if ((sc->sc_flags & IWM_FLAG_SHUTDOWN) == 0) {
11566 sc->sc_flags |= IWM_FLAG_HW_ERR;
11567 task_add(systq, &sc->init_task);
11580 IWM_WRITE(sc, IWM_CSR_MSIX_AUTOMASK_ST_AD, 1 << vector);
11649 iwm_preinit(struct iwm_softc *sc)
11651 struct ieee80211com *ic = &sc->sc_ic;
11655 err = iwm_prepare_card_hw(sc);
11657 printf("%s: could not initialize hardware\n", DEVNAME(sc));
11661 if (sc->attached) {
11664 IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
11670 err = iwm_start_hw(sc);
11672 printf("%s: could not initialize hardware\n", DEVNAME(sc));
11676 err = iwm_run_init_mvm_ucode(sc, 1);
11677 iwm_stop_device(sc);
11682 sc->attached = 1;
11684 DEVNAME(sc), sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
11685 sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
11687 if (sc->sc_nvm.sku_cap_11n_enable)
11688 iwm_setup_ht_rates(sc);
11691 if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
11705 DEVNAME(sc), err);
11716 struct iwm_softc *sc = (void *)self;
11720 iwm_preinit(sc);
11733 struct iwm_softc *sc = device_get_softc(dev);
11735 struct iwm_softc *sc = (void *)self;
11740 struct ieee80211com *ic = &sc->sc_ic;
11747 sc->sc_dev = dev;
11748 sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
11749 bus_dma_tag_create(sc->sc_dmat, 1, 0,
11752 &sc->sc_dmat);
11753 pci_enable_busmaster(sc->sc_dev);
11757 sc->sc_pct = pa->pa_pc;
11758 sc->sc_pcitag = pa->pa_tag;
11759 sc->sc_dmat = pa->pa_dmat;
11762 rw_init(&sc->ioctl_rwl, "iwmioctl");
11764 err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
11765 PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
11768 DEVNAME(sc));
11776 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
11777 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
11781 &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz, 0);
11783 printf("%s: can't map mem space\n", DEVNAME(sc));
11788 sc->sc_msix = 1;
11795 printf("%s: can't map interrupt\n", DEVNAME(sc));
11799 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
11803 pci_conf_write(sc->sc_pct, sc->sc_pcitag,
11807 intrstr = pci_intr_string(sc->sc_pct, ih);
11808 if (sc->sc_msix)
11809 sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
11810 iwm_intr_msix, sc, DEVNAME(sc));
11812 sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
11813 iwm_intr, sc, DEVNAME(sc));
11815 if (sc->sc_ih == NULL) {
11817 printf("%s: can't establish interrupt", DEVNAME(sc));
11825 sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
11833 sc->sc_fwname = "iwm-3160-17";
11834 sc->host_interrupt_operation_mode = 1;
11835 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
11836 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
11837 sc->sc_nvm_max_section_size = 16384;
11838 sc->nvm_type = IWM_NVM;
11842 sc->sc_fwname = "iwm-7265D-29";
11843 sc->host_interrupt_operation_mode = 0;
11844 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
11845 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
11846 sc->sc_nvm_max_section_size = 16384;
11847 sc->nvm_type = IWM_NVM;
11850 sc->sc_fwname = "iwm-3168-29";
11851 sc->host_interrupt_operation_mode = 0;
11852 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
11853 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
11854 sc->sc_nvm_max_section_size = 16384;
11855 sc->nvm_type = IWM_NVM_SDP;
11859 sc->sc_fwname = "iwm-7260-17";
11860 sc->host_interrupt_operation_mode = 1;
11861 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
11862 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
11863 sc->sc_nvm_max_section_size = 16384;
11864 sc->nvm_type = IWM_NVM;
11868 sc->sc_fwname = "iwm-7265-17";
11869 sc->host_interrupt_operation_mode = 0;
11870 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
11871 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
11872 sc->sc_nvm_max_section_size = 16384;
11873 sc->nvm_type = IWM_NVM;
11877 sc->sc_fwname = "iwm-8000C-36";
11878 sc->host_interrupt_operation_mode = 0;
11879 sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
11880 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
11881 sc->sc_nvm_max_section_size = 32768;
11882 sc->nvm_type = IWM_NVM_EXT;
11885 sc->sc_fwname = "iwm-8265-36";
11886 sc->host_interrupt_operation_mode = 0;
11887 sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
11888 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
11889 sc->sc_nvm_max_section_size = 32768;
11890 sc->nvm_type = IWM_NVM_EXT;
11893 sc->sc_fwname = "iwm-9260-46";
11894 sc->host_interrupt_operation_mode = 0;
11895 sc->sc_device_family = IWM_DEVICE_FAMILY_9000;
11896 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
11897 sc->sc_nvm_max_section_size = 32768;
11898 sc->sc_mqrx_supported = 1;
11903 sc->sc_fwname = "iwm-9000-46";
11904 sc->host_interrupt_operation_mode = 0;
11905 sc->sc_device_family = IWM_DEVICE_FAMILY_9000;
11906 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
11907 sc->sc_nvm_max_section_size = 32768;
11908 sc->sc_mqrx_supported = 1;
11909 sc->sc_integrated = 1;
11911 sc->sc_xtal_latency = 670;
11912 sc->sc_extra_phy_config = IWM_FW_PHY_CFG_SHARED_CLK;
11914 sc->sc_xtal_latency = 650;
11917 printf("%s: unknown adapter type\n", DEVNAME(sc));
11930 if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000) {
11933 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
11934 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
11936 if (iwm_prepare_card_hw(sc) != 0) {
11938 DEVNAME(sc));
11946 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
11950 err = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
11955 printf("%s: Failed to wake up the nic\n", DEVNAME(sc));
11959 if (iwm_nic_lock(sc)) {
11960 hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
11962 iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
11963 hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
11966 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
11968 iwm_nic_unlock(sc);
11970 printf("%s: Failed to lock the nic\n", DEVNAME(sc));
11979 err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
11980 sc->sc_fwdmasegsz, 16);
11983 DEVNAME(sc));
11988 err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
11990 printf("%s: could not allocate keep warm page\n", DEVNAME(sc));
11995 err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
11998 printf("%s: could not allocate ICT table\n", DEVNAME(sc));
12003 err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
12004 nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
12007 DEVNAME(sc));
12011 for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
12012 err = iwm_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
12015 DEVNAME(sc), txq_i);
12020 err = iwm_alloc_rx_ring(sc, &sc->rxq);
12022 printf("%s: could not allocate RX ring\n", DEVNAME(sc));
12026 sc->sc_nswq = taskq_create("iwmns", 1, IPL_NET, 0);
12027 if (sc->sc_nswq == NULL)
12031 IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
12068 for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
12069 sc->sc_phyctxt[i].id = i;
12070 sc->sc_phyctxt[i].sco = IEEE80211_HTOP0_SCO_SCN;
12071 sc->sc_phyctxt[i].vht_chan_width =
12075 sc->sc_amrr.amrr_min_success_threshold = 1;
12076 sc->sc_amrr.amrr_max_success_threshold = 15;
12083 ifp->if_softc = sc;
12088 memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
12095 iwm_radiotap_attach(sc);
12097 timeout_set(&sc->sc_calib_to, iwm_calib_timeout, sc);
12098 timeout_set(&sc->sc_led_blink_to, iwm_led_blink_timeout, sc);
12099 for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
12100 struct iwm_rxba_data *rxba = &sc->sc_rxba_data[i];
12102 rxba->sc = sc;
12110 task_set(&sc->init_task, iwm_init_task, sc);
12111 task_set(&sc->newstate_task, iwm_newstate_task, sc);
12112 task_set(&sc->ba_task, iwm_ba_task, sc);
12113 task_set(&sc->mac_ctxt_task, iwm_mac_ctxt_task, sc);
12114 task_set(&sc->phy_ctxt_task, iwm_phy_ctxt_task, sc);
12115 task_set(&sc->bgscan_done_task, iwm_bgscan_done_task, sc);
12124 sc->sc_newstate = ic->ic_newstate;
12136 iwm_preinit(sc);
12148 iwm_free_tx_ring(sc, &sc->txq[txq_i]);
12149 iwm_free_rx_ring(sc, &sc->rxq);
12150 iwm_dma_contig_free(&sc->sched_dma);
12151 fail3: if (sc->ict_dma.vaddr != NULL)
12152 iwm_dma_contig_free(&sc->ict_dma);
12154 fail2: iwm_dma_contig_free(&sc->kw_dma);
12155 fail1: iwm_dma_contig_free(&sc->fw_dma);
12165 iwm_radiotap_attach(struct iwm_softc *sc)
12167 bpfattach(&sc->sc_drvbpf, &sc->sc_ic.ic_if, DLT_IEEE802_11_RADIO,
12170 sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
12171 sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
12172 sc->sc_rxtap.wr_ihdr.it_present = htole32(IWM_RX_RADIOTAP_PRESENT);
12174 sc->sc_txtap_len = sizeof sc->sc_txtapu;
12175 sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
12176 sc->sc_txtap.wt_ihdr.it_present = htole32(IWM_TX_RADIOTAP_PRESENT);
12183 struct iwm_softc *sc = arg1;
12184 struct ifnet *ifp = &sc->sc_ic.ic_if;
12186 int generation = sc->sc_generation;
12187 int fatal = (sc->sc_flags & (IWM_FLAG_HW_ERR | IWM_FLAG_RFKILL));
12189 rw_enter_write(&sc->ioctl_rwl);
12190 if (generation != sc->sc_generation) {
12191 rw_exit(&sc->ioctl_rwl);
12199 sc->sc_flags &= ~IWM_FLAG_HW_ERR;
12204 rw_exit(&sc->ioctl_rwl);
12209 iwm_resume(struct iwm_softc *sc)
12217 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
12218 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
12220 if (!sc->sc_msix) {
12222 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
12226 pci_conf_write(sc->sc_pct, sc->sc_pcitag,
12230 iwm_disable_interrupts(sc);
12234 iwm_wakeup(struct iwm_softc *sc)
12236 struct ieee80211com *ic = &sc->sc_ic;
12237 struct ifnet *ifp = &sc->sc_ic.ic_if;
12240 err = iwm_start_hw(sc);
12244 err = iwm_init_hw(sc);
12248 refcnt_init(&sc->task_refs);
12287 struct iwm_softc *sc = (struct iwm_softc *)self;
12288 struct ifnet *ifp = &sc->sc_ic.ic_if;
12294 rw_enter_write(&sc->ioctl_rwl);
12296 rw_exit(&sc->ioctl_rwl);
12300 iwm_resume(sc);
12304 err = iwm_wakeup(sc);
12307 DEVNAME(sc));