Lines Matching refs:trans_pcie

205 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
209 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
247 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
269 WARN_ON(rxb->page_dma & trans_pcie->supported_dma_mask);
364 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
365 unsigned int rbsize = iwl_trans_get_rb_size(trans_pcie->rx_buf_size);
366 unsigned int allocsize = PAGE_SIZE << trans_pcie->rx_page_order;
370 if (trans_pcie->rx_page_order > 0)
373 if (trans_pcie->alloc_page) {
374 spin_lock_bh(&trans_pcie->alloc_page_lock);
376 if (trans_pcie->alloc_page) {
377 *offset = trans_pcie->alloc_page_used;
378 page = trans_pcie->alloc_page;
379 trans_pcie->alloc_page_used += rbsize;
380 if (trans_pcie->alloc_page_used >= allocsize)
381 trans_pcie->alloc_page = NULL;
384 spin_unlock_bh(&trans_pcie->alloc_page_lock);
387 spin_unlock_bh(&trans_pcie->alloc_page_lock);
391 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
395 trans_pcie->rx_page_order);
407 spin_lock_bh(&trans_pcie->alloc_page_lock);
408 if (!trans_pcie->alloc_page) {
410 trans_pcie->alloc_page = page;
411 trans_pcie->alloc_page_used = rbsize;
413 spin_unlock_bh(&trans_pcie->alloc_page_lock);
432 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
454 __free_pages(page, trans_pcie->rx_page_order);
468 trans_pcie->rx_buf_bytes,
475 __free_pages(page, trans_pcie->rx_page_order);
490 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
493 if (!trans_pcie->rx_pool)
496 for (i = 0; i < RX_POOL_SIZE(trans_pcie->num_rx_bufs); i++) {
497 if (!trans_pcie->rx_pool[i].page)
499 dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
500 trans_pcie->rx_buf_bytes, DMA_FROM_DEVICE);
501 __free_pages(trans_pcie->rx_pool[i].page,
502 trans_pcie->rx_page_order);
503 trans_pcie->rx_pool[i].page = NULL;
515 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
516 struct iwl_rb_allocator *rba = &trans_pcie->rba;
562 trans_pcie->rx_buf_bytes,
566 __free_pages(page, trans_pcie->rx_page_order);
616 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
617 struct iwl_rb_allocator *rba = &trans_pcie->rba;
652 struct iwl_trans_pcie *trans_pcie =
655 iwl_pcie_rx_allocator(trans_pcie->trans);
716 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
749 rxq->rb_stts = (u8 *)trans_pcie->base_rb_stts + rxq->id * rb_stts_size;
751 trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size;
757 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
767 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
769 struct iwl_rb_allocator *rba = &trans_pcie->rba;
772 if (WARN_ON(trans_pcie->rxq))
775 trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
777 trans_pcie->rx_pool = kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs),
778 sizeof(trans_pcie->rx_pool[0]),
780 trans_pcie->global_table =
781 kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs),
782 sizeof(trans_pcie->global_table[0]),
784 if (!trans_pcie->rxq || !trans_pcie->rx_pool ||
785 !trans_pcie->global_table) {
796 trans_pcie->base_rb_stts =
799 &trans_pcie->base_rb_stts_dma,
801 if (!trans_pcie->base_rb_stts) {
807 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
817 if (trans_pcie->base_rb_stts) {
820 trans_pcie->base_rb_stts,
821 trans_pcie->base_rb_stts_dma);
822 trans_pcie->base_rb_stts = NULL;
823 trans_pcie->base_rb_stts_dma = 0;
825 kfree(trans_pcie->rx_pool);
826 trans_pcie->rx_pool = NULL;
827 kfree(trans_pcie->global_table);
828 trans_pcie->global_table = NULL;
829 kfree(trans_pcie->rxq);
830 trans_pcie->rxq = NULL;
837 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
841 switch (trans_pcie->rx_buf_size) {
905 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
909 switch (trans_pcie->rx_buf_size) {
939 trans_pcie->rxq[i].bd_dma);
943 trans_pcie->rxq[i].used_bd_dma);
947 trans_pcie->rxq[i].rb_stts_dma);
1011 struct iwl_trans_pcie *trans_pcie;
1015 trans_pcie = iwl_netdev_to_trans_pcie(napi->dev);
1016 trans = trans_pcie->trans;
1024 spin_lock(&trans_pcie->irq_lock);
1027 spin_unlock(&trans_pcie->irq_lock);
1038 struct iwl_trans_pcie *trans_pcie;
1042 trans_pcie = iwl_netdev_to_trans_pcie(napi->dev);
1043 trans = trans_pcie->trans;
1053 if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS &&
1057 spin_lock(&trans_pcie->irq_lock);
1059 spin_unlock(&trans_pcie->irq_lock);
1069 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1072 if (unlikely(!trans_pcie->rxq))
1076 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
1085 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1087 struct iwl_rb_allocator *rba = &trans_pcie->rba;
1090 if (!trans_pcie->rxq) {
1095 def_rxq = trans_pcie->rxq;
1113 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
1136 if (trans_pcie->msix_enabled)
1139 netif_napi_add(trans_pcie->napi_dev, &rxq->napi,
1148 trans_pcie->num_rx_bufs - 1 : RX_QUEUE_SIZE;
1154 struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
1160 trans_pcie->global_table[i] = rxb;
1172 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1181 iwl_pcie_rx_hw_init(trans, trans_pcie->rxq);
1183 iwl_pcie_rxq_restock(trans, trans_pcie->rxq);
1185 spin_lock_bh(&trans_pcie->rxq->lock);
1186 iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq);
1187 spin_unlock_bh(&trans_pcie->rxq->lock);
1206 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1208 struct iwl_rb_allocator *rba = &trans_pcie->rba;
1215 if (!trans_pcie->rxq) {
1224 if (trans_pcie->base_rb_stts) {
1227 trans_pcie->base_rb_stts,
1228 trans_pcie->base_rb_stts_dma);
1229 trans_pcie->base_rb_stts = NULL;
1230 trans_pcie->base_rb_stts_dma = 0;
1234 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
1243 kfree(trans_pcie->rx_pool);
1244 kfree(trans_pcie->global_table);
1245 kfree(trans_pcie->rxq);
1247 if (trans_pcie->alloc_page)
1248 __free_pages(trans_pcie->alloc_page, trans_pcie->rx_page_order);
1269 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1270 struct iwl_rb_allocator *rba = &trans_pcie->rba;
1303 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1306 int max_len = trans_pcie->rx_buf_bytes;
1320 ._rx_page_order = trans_pcie->rx_page_order,
1371 for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
1372 if (trans_pcie->no_reclaim_cmds[i] ==
1417 __free_pages(rxb->page, trans_pcie->rx_page_order);
1427 trans_pcie->rx_buf_bytes,
1435 __free_pages(rxb->page, trans_pcie->rx_page_order);
1450 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1479 if (!vid || vid > RX_POOL_SIZE(trans_pcie->num_rx_bufs))
1482 rxb = trans_pcie->global_table[vid - 1];
1503 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1508 if (WARN_ON_ONCE(!trans_pcie->rxq || !trans_pcie->rxq[queue].bd))
1511 rxq = &trans_pcie->rxq[queue];
1528 struct iwl_rb_allocator *rba = &trans_pcie->rba;
1532 atomic_read(&trans_pcie->rba.req_pending) *
1645 struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
1646 struct iwl_trans *trans = trans_pcie->trans;
1654 if (!trans_pcie->rxq) {
1662 rxq = &trans_pcie->rxq[entry->entry];
1740 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1750 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1751 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
1762 trans_pcie->ict_index, read);
1763 trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
1764 trans_pcie->ict_index =
1765 ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));
1767 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1768 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
1792 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1793 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1796 mutex_lock(&trans_pcie->mutex);
1803 if (trans_pcie->opmode_down)
1815 mutex_unlock(&trans_pcie->mutex);
1825 if (trans_pcie->opmode_down)
1833 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1834 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1841 spin_lock_bh(&trans_pcie->irq_lock);
1846 if (likely(trans_pcie->use_ict))
1854 inta, trans_pcie->inta_mask,
1857 if (inta & (~trans_pcie->inta_mask))
1860 inta & (~trans_pcie->inta_mask));
1863 inta &= trans_pcie->inta_mask;
1878 spin_unlock_bh(&trans_pcie->irq_lock);
1889 spin_unlock_bh(&trans_pcie->irq_lock);
1904 iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
1910 spin_unlock_bh(&trans_pcie->irq_lock);
1943 iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
2030 if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) {
2032 __napi_schedule(&trans_pcie->rxq[0].napi);
2044 trans_pcie->ucode_write_complete = true;
2045 wake_up(&trans_pcie->ucode_write_waitq);
2047 if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
2048 trans_pcie->imr_status = IMR_D2S_COMPLETED;
2049 wake_up(&trans_pcie->ucode_write_waitq);
2058 if (inta & ~(trans_pcie->inta_mask)) {
2060 inta & ~trans_pcie->inta_mask);
2064 spin_lock_bh(&trans_pcie->irq_lock);
2077 spin_unlock_bh(&trans_pcie->irq_lock);
2094 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2096 if (trans_pcie->ict_tbl) {
2098 trans_pcie->ict_tbl,
2099 trans_pcie->ict_tbl_dma);
2100 trans_pcie->ict_tbl = NULL;
2101 trans_pcie->ict_tbl_dma = 0;
2112 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2114 trans_pcie->ict_tbl =
2116 &trans_pcie->ict_tbl_dma, GFP_KERNEL);
2117 if (!trans_pcie->ict_tbl)
2121 if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
2134 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2137 if (!trans_pcie->ict_tbl)
2140 spin_lock_bh(&trans_pcie->irq_lock);
2143 memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
2145 val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
2154 trans_pcie->use_ict = true;
2155 trans_pcie->ict_index = 0;
2156 iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
2158 spin_unlock_bh(&trans_pcie->irq_lock);
2164 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2166 spin_lock_bh(&trans_pcie->irq_lock);
2167 trans_pcie->use_ict = false;
2168 spin_unlock_bh(&trans_pcie->irq_lock);
2196 struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
2197 struct iwl_trans *trans = trans_pcie->trans;
2198 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
2204 if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)
2207 if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS)
2212 spin_lock_bh(&trans_pcie->irq_lock);
2220 spin_unlock_bh(&trans_pcie->irq_lock);
2233 entry->entry, inta_fh, trans_pcie->fh_mask,
2235 if (inta_fh & ~trans_pcie->fh_mask)
2238 inta_fh & ~trans_pcie->fh_mask);
2241 inta_fh &= trans_pcie->fh_mask;
2243 if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) &&
2246 if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) {
2248 __napi_schedule(&trans_pcie->rxq[0].napi);
2253 if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) &&
2256 if (napi_schedule_prep(&trans_pcie->rxq[1].napi)) {
2258 __napi_schedule(&trans_pcie->rxq[1].napi);
2265 trans_pcie->imr_status == IMR_D2S_REQUESTED) {
2270 if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
2271 trans_pcie->imr_status = IMR_D2S_COMPLETED;
2272 wake_up(&trans_pcie->ucode_write_waitq);
2281 trans_pcie->ucode_write_complete = true;
2282 wake_up(&trans_pcie->ucode_write_waitq);
2285 if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
2286 trans_pcie->imr_status = IMR_D2S_COMPLETED;
2287 wake_up(&trans_pcie->ucode_write_waitq);
2309 if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
2310 trans_pcie->imr_status = IMR_D2S_ERROR;
2311 wake_up(&trans_pcie->imr_waitq);
2312 } else if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) {
2313 trans_pcie->fw_reset_state = FW_RESET_ERROR;
2314 wake_up(&trans_pcie->fw_reset_waitq);
2324 entry->entry, inta_hw, trans_pcie->hw_mask,
2326 if (inta_hw & ~trans_pcie->hw_mask)
2329 inta_hw & ~trans_pcie->hw_mask);
2332 inta_hw &= trans_pcie->hw_mask;
2340 iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
2349 if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP && trans_pcie->prph_info) {
2351 le32_to_cpu(trans_pcie->prph_info->sleep_notif);
2357 trans_pcie->sx_complete = true;
2358 wake_up(&trans_pcie->sx_waitq);
2390 trans_pcie->fw_reset_state = FW_RESET_OK;
2391 wake_up(&trans_pcie->fw_reset_waitq);