Lines Matching refs:trans

144 int iwl_pcie_rx_stop(struct iwl_trans *trans)
146 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
148 iwl_write_umac_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0);
149 return iwl_poll_umac_prph_bit(trans, RFH_GEN_STATUS_GEN3,
151 } else if (trans->trans_cfg->mq_rx_supported) {
152 iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
153 return iwl_poll_prph_bit(trans, RFH_GEN_STATUS,
156 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
157 return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
166 static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
178 if (!trans->trans_cfg->base_params->shadow_reg_enable &&
179 test_bit(STATUS_TPOWER_PMI, &trans->status)) {
180 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
183 IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
185 iwl_set_bit(trans, CSR_GP_CNTRL,
193 if (!trans->trans_cfg->mq_rx_supported)
194 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
195 else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
196 iwl_write32(trans, HBUS_TARG_WRPTR, rxq->write_actual |
199 iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
203 static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
205 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
208 for (i = 0; i < trans->num_rx_queues; i++) {
214 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
220 static void iwl_pcie_restock_bd(struct iwl_trans *trans,
224 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
237 IWL_DEBUG_RX(trans, "Assigned virtual RB ID %u to queue %d index %d\n",
244 static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
247 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
258 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
271 iwl_pcie_restock_bd(trans, rxq, rxb);
283 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
291 static void iwl_pcie_rxsq_restock(struct iwl_trans *trans,
304 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
332 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
349 void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
351 if (trans->trans_cfg->mq_rx_supported)
352 iwl_pcie_rxmq_restock(trans, rxq);
354 iwl_pcie_rxsq_restock(trans, rxq);
361 static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
364 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
394 IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
401 IWL_CRIT(trans,
429 void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
432 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
446 page = iwl_pcie_rx_alloc_page(trans, &offset, priority);
467 dma_map_page(trans->dev, page, rxb->offset,
470 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
488 void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
490 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
499 dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
513 static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
515 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
520 IWL_DEBUG_TPT(trans, "Pending allocation requests = %d\n", pending);
553 page = iwl_pcie_rx_alloc_page(trans, &rxb->offset,
560 rxb->page_dma = dma_map_page(trans->dev, page,
564 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
581 IWL_DEBUG_TPT(trans,
602 IWL_DEBUG_TPT(trans, "%s, exit.\n", __func__);
613 static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
616 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
655 iwl_pcie_rx_allocator(trans_pcie->trans);
658 static int iwl_pcie_free_bd_size(struct iwl_trans *trans)
660 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
663 return trans->trans_cfg->mq_rx_supported ?
667 static int iwl_pcie_used_bd_size(struct iwl_trans *trans)
669 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
672 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
678 static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
681 int free_size = iwl_pcie_free_bd_size(trans);
684 dma_free_coherent(trans->dev,
694 dma_free_coherent(trans->dev,
695 iwl_pcie_used_bd_size(trans) *
702 static size_t iwl_pcie_rb_stts_size(struct iwl_trans *trans)
704 bool use_rx_td = (trans->trans_cfg->device_family >=
713 static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
716 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
717 size_t rb_stts_size = iwl_pcie_rb_stts_size(trans);
718 struct device *dev = trans->dev;
723 if (trans->trans_cfg->mq_rx_supported)
724 rxq->queue_size = trans->cfg->num_rbds;
728 free_size = iwl_pcie_free_bd_size(trans);
739 if (trans->trans_cfg->mq_rx_supported) {
741 iwl_pcie_used_bd_size(trans) *
756 for (i = 0; i < trans->num_rx_queues; i++) {
759 iwl_pcie_free_rxq_dma(trans, rxq);
765 static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
767 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
768 size_t rb_stts_size = iwl_pcie_rb_stts_size(trans);
775 trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
797 dma_alloc_coherent(trans->dev,
798 rb_stts_size * trans->num_rx_queues,
806 for (i = 0; i < trans->num_rx_queues; i++) {
810 ret = iwl_pcie_alloc_rxq_dma(trans, rxq);
818 dma_free_coherent(trans->dev,
819 rb_stts_size * trans->num_rx_queues,
835 static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
837 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
856 if (!iwl_trans_grab_nic_access(trans))
860 iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
862 iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
863 iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
864 iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
867 iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
870 iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
874 iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
885 iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
893 iwl_trans_release_nic_access(trans);
896 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
899 if (trans->cfg->host_interrupt_operation_mode)
900 iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
903 static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
905 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
927 if (!iwl_trans_grab_nic_access(trans))
931 iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0);
933 iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0);
935 for (i = 0; i < trans->num_rx_queues; i++) {
937 iwl_write_prph64_no_grab(trans,
941 iwl_write_prph64_no_grab(trans,
945 iwl_write_prph64_no_grab(trans,
949 iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0);
950 iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0);
951 iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0);
963 iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG,
974 iwl_write_prph_no_grab(trans, RFH_GEN_CFG,
979 trans->trans_cfg->integrated ?
983 iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled);
985 iwl_trans_release_nic_access(trans);
988 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
1001 static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget);
1007 struct iwl_trans *trans;
1011 trans = trans_pcie->trans;
1013 ret = iwl_pcie_rx_handle(trans, rxq->id, budget);
1015 IWL_DEBUG_ISR(trans, "[%d] handled %d, budget %d\n",
1020 if (test_bit(STATUS_INT_ENABLED, &trans->status))
1021 _iwl_enable_interrupts(trans);
1034 struct iwl_trans *trans;
1038 trans = trans_pcie->trans;
1040 ret = iwl_pcie_rx_handle(trans, rxq->id, budget);
1041 IWL_DEBUG_ISR(trans, "[%d] handled %d, budget %d\n", rxq->id, ret,
1053 iwl_pcie_clear_irq(trans, irq_line);
1062 void iwl_pcie_rx_napi_sync(struct iwl_trans *trans)
1064 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1070 for (i = 0; i < trans->num_rx_queues; i++) {
1078 static int _iwl_pcie_rx_init(struct iwl_trans *trans)
1080 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1086 err = iwl_pcie_rx_alloc(trans);
1102 iwl_pcie_free_rbs_pool(trans);
1107 for (i = 0; i < trans->num_rx_queues; i++) {
1120 (trans->trans_cfg->device_family >=
1142 queue_size = trans->trans_cfg->mq_rx_supported ?
1144 allocator_pool_size = trans->num_rx_queues *
1160 iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq);
1165 int iwl_pcie_rx_init(struct iwl_trans *trans)
1167 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1168 int ret = _iwl_pcie_rx_init(trans);
1173 if (trans->trans_cfg->mq_rx_supported)
1174 iwl_pcie_rx_mq_hw_init(trans);
1176 iwl_pcie_rx_hw_init(trans, trans_pcie->rxq);
1178 iwl_pcie_rxq_restock(trans, trans_pcie->rxq);
1181 iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq);
1187 int iwl_pcie_gen2_rx_init(struct iwl_trans *trans)
1190 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
1196 return _iwl_pcie_rx_init(trans);
1199 void iwl_pcie_rx_free(struct iwl_trans *trans)
1201 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1202 size_t rb_stts_size = iwl_pcie_rb_stts_size(trans);
1211 IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
1217 iwl_pcie_free_rbs_pool(trans);
1220 dma_free_coherent(trans->dev,
1221 rb_stts_size * trans->num_rx_queues,
1228 for (i = 0; i < trans->num_rx_queues; i++) {
1231 iwl_pcie_free_rxq_dma(trans, rxq);
1260 static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
1264 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1292 static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
1298 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1299 struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
1307 dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
1324 IWL_DEBUG_RX(trans,
1337 IWL_DEBUG_RX(trans,
1340 iwl_get_cmd_string(trans,
1354 maybe_trace_iwlwifi_dev_rx(trans, pkt, len);
1376 iwl_op_mode_rx(trans->op_mode, &rxq->napi,
1379 iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi,
1400 iwl_pcie_hcmd_complete(trans, &rxcb);
1402 IWL_WARN(trans, "Claim null rxb?\n");
1406 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
1421 dma_map_page(trans->dev, rxb->page, rxb->offset,
1424 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
1432 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1438 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1441 static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
1445 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1452 if (!trans->trans_cfg->mq_rx_supported) {
1458 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
1463 } else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
1481 IWL_DEBUG_RX(trans, "Got virtual RB ID %u\n", (u32)rxb->vid);
1489 iwl_force_nmi(trans);
1496 static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget)
1498 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1512 r = iwl_get_closed_rb_stts(trans, rxq);
1520 IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);
1535 IWL_DEBUG_TPT(trans,
1540 IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
1542 rxb = iwl_pcie_get_rxb(trans, rxq, i, &join);
1563 iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i);
1576 iwl_pcie_rx_allocator_get(trans, rxq);
1586 IWL_DEBUG_TPT(trans,
1594 iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
1595 iwl_pcie_rxq_restock(trans, rxq);
1618 iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
1620 iwl_pcie_rxq_restock(trans, rxq);
1641 struct iwl_trans *trans = trans_pcie->trans;
1644 trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0);
1646 if (WARN_ON(entry->entry >= trans->num_rx_queues))
1651 IWL_ERR(trans,
1658 lock_map_acquire(&trans->sync_cmd_lockdep_map);
1659 IWL_DEBUG_ISR(trans, "[%d] Got interrupt\n", entry->entry);
1663 iwl_pcie_clear_irq(trans, entry->entry);
1666 lock_map_release(&trans->sync_cmd_lockdep_map);
1674 static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
1679 if (trans->cfg->internal_wimax_coex &&
1680 !trans->cfg->apmg_not_supported &&
1681 (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
1683 (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
1685 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1686 iwl_op_mode_wimax_active(trans->op_mode);
1687 wake_up(&trans->wait_command_queue);
1691 for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
1692 if (!trans->txqs.txq[i])
1694 del_timer(&trans->txqs.txq[i]->stuck_timer);
1699 iwl_trans_fw_error(trans, false);
1701 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1702 wake_up(&trans->wait_command_queue);
1705 static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
1709 lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);
1711 trace_iwlwifi_dev_irq(trans->dev);
1714 inta = iwl_read32(trans, CSR_INT);
1733 static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
1735 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1740 trace_iwlwifi_dev_irq(trans->dev);
1746 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
1756 IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
1763 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
1785 void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans, bool from_irq)
1787 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1792 prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1793 hw_rfkill = iwl_is_rfkill_set(trans);
1795 set_bit(STATUS_RFKILL_OPMODE, &trans->status);
1796 set_bit(STATUS_RFKILL_HW, &trans->status);
1801 report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1803 IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
1809 iwl_trans_pcie_rf_kill(trans, report, from_irq);
1814 &trans->status))
1815 IWL_DEBUG_RF_KILL(trans,
1817 wake_up(&trans->wait_command_queue);
1819 clear_bit(STATUS_RFKILL_HW, &trans->status);
1821 clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
1827 struct iwl_trans *trans = dev_id;
1828 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1834 lock_map_acquire(&trans->sync_cmd_lockdep_map);
1842 inta = iwl_pcie_int_cause_ict(trans);
1844 inta = iwl_pcie_int_cause_non_ict(trans);
1847 IWL_DEBUG_ISR(trans,
1850 iwl_read32(trans, CSR_INT_MASK),
1851 iwl_read32(trans, CSR_FH_INT_STATUS));
1853 IWL_DEBUG_ISR(trans,
1866 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1871 if (test_bit(STATUS_INT_ENABLED, &trans->status))
1872 _iwl_enable_interrupts(trans);
1874 lock_map_release(&trans->sync_cmd_lockdep_map);
1883 IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1899 iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
1902 IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
1903 inta, iwl_read32(trans, CSR_INT_MASK));
1909 IWL_ERR(trans, "Hardware error detected. Restarting.\n");
1912 iwl_disable_interrupts(trans);
1915 iwl_pcie_irq_handle_error(trans);
1924 IWL_DEBUG_ISR(trans,
1931 IWL_DEBUG_ISR(trans, "Alive interrupt\n");
1933 if (trans->trans_cfg->gen2) {
1938 iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
1949 iwl_pcie_handle_rfkill_irq(trans, true);
1955 IWL_ERR(trans, "Microcode CT kill error detected.\n");
1962 IWL_ERR(trans, "Microcode SW error detected. "
1965 iwl_pcie_irq_handle_error(trans);
1971 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
1972 iwl_pcie_rxq_check_wrptr(trans);
1973 iwl_pcie_txq_check_wrptrs(trans);
1985 IWL_DEBUG_ISR(trans, "Rx interrupt\n");
1988 iwl_write32(trans, CSR_FH_INT_STATUS,
1993 iwl_write32(trans,
2008 iwl_write8(trans, CSR_INT_PERIODIC_REG,
2019 iwl_write8(trans, CSR_INT_PERIODIC_REG,
2034 iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
2035 IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
2049 IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
2054 IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
2061 if (test_bit(STATUS_INT_ENABLED, &trans->status))
2062 _iwl_enable_interrupts(trans);
2065 iwl_enable_fw_load_int(trans);
2068 iwl_enable_rfkill_int(trans);
2071 iwl_enable_fw_load_int_ctx_info(trans);
2076 lock_map_release(&trans->sync_cmd_lockdep_map);
2087 void iwl_pcie_free_ict(struct iwl_trans *trans)
2089 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2092 dma_free_coherent(trans->dev, ICT_SIZE,
2105 int iwl_pcie_alloc_ict(struct iwl_trans *trans)
2107 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2110 dma_alloc_coherent(trans->dev, ICT_SIZE,
2117 iwl_pcie_free_ict(trans);
2127 void iwl_pcie_reset_ict(struct iwl_trans *trans)
2129 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2136 _iwl_disable_interrupts(trans);
2146 IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
2148 iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
2151 iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
2152 _iwl_enable_interrupts(trans);
2157 void iwl_pcie_disable_ict(struct iwl_trans *trans)
2159 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2168 struct iwl_trans *trans = data;
2170 if (!trans)
2178 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
2192 struct iwl_trans *trans = trans_pcie->trans;
2205 lock_map_acquire(&trans->sync_cmd_lockdep_map);
2208 inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD);
2209 inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
2213 iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh & inta_fh_msk);
2214 iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
2217 trace_iwlwifi_dev_irq_msix(trans->dev, entry, true, inta_fh, inta_hw);
2220 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
2221 lock_map_release(&trans->sync_cmd_lockdep_map);
2226 IWL_DEBUG_ISR(trans,
2229 iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD));
2231 IWL_DEBUG_ISR(trans,
2261 IWL_DEBUG_ISR(trans, "IMR Complete interrupt\n");
2270 IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
2286 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
2292 IWL_ERR(trans, "TOP Fatal error detected, inta_hw=0x%x.\n",
2299 IWL_ERR(trans,
2311 iwl_pcie_irq_handle_error(trans);
2317 IWL_DEBUG_ISR(trans,
2320 iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD));
2322 IWL_DEBUG_ISR(trans,
2331 IWL_DEBUG_ISR(trans, "Alive interrupt\n");
2333 if (trans->trans_cfg->gen2) {
2335 iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
2349 IWL_DEBUG_ISR(trans,
2356 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
2357 iwl_pcie_rxq_check_wrptr(trans);
2358 iwl_pcie_txq_check_wrptrs(trans);
2366 IWL_ERR(trans, "Microcode CT kill error detected.\n");
2372 iwl_pcie_handle_rfkill_irq(trans, true);
2375 IWL_ERR(trans,
2379 trans->dbg.hw_error = true;
2380 iwl_pcie_irq_handle_error(trans);
2384 IWL_DEBUG_ISR(trans, "Reset flow completed\n");
2390 iwl_pcie_clear_irq(trans, entry->entry);
2392 lock_map_release(&trans->sync_cmd_lockdep_map);