Lines Matching refs:txq_id

76 	int txq_id = txq->id;
87 txq_id != trans->txqs.cmd.q_id &&
98 txq_id, reg);
110 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr);
113 txq->write_ptr | (txq_id << 8));
187 static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
189 struct iwl_txq *txq = trans->txqs.txq[txq_id];
199 txq_id, txq->read_ptr);
201 if (txq_id != trans->txqs.cmd.q_id) {
213 txq_id == trans->txqs.cmd.q_id)
237 static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
239 struct iwl_txq *txq = trans->txqs.txq[txq_id];
246 iwl_pcie_txq_unmap(trans, txq_id);
249 if (txq_id == trans->txqs.cmd.q_id)
339 int txq_id;
348 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
349 txq_id++) {
350 struct iwl_txq *txq = trans->txqs.txq[txq_id];
353 FH_MEM_CBBC_QUEUE(trans, txq_id),
357 FH_MEM_CBBC_QUEUE(trans, txq_id),
359 iwl_pcie_txq_unmap(trans, txq_id);
412 int txq_id;
434 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
435 txq_id++)
436 iwl_pcie_txq_unmap(trans, txq_id);
448 int txq_id;
455 for (txq_id = 0;
456 txq_id < trans->trans_cfg->base_params->num_of_queues;
457 txq_id++) {
458 iwl_pcie_txq_free(trans, txq_id);
459 trans->txqs.txq[txq_id] = NULL;
478 int txq_id, slots_num;
518 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
519 txq_id++) {
520 bool cmd_queue = (txq_id == trans->txqs.cmd.q_id);
528 trans->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id];
529 ret = iwl_txq_alloc(trans, trans->txqs.txq[txq_id], slots_num,
532 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
535 trans->txqs.txq[txq_id]->id = txq_id;
550 int txq_id, slots_num;
572 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
573 txq_id++) {
574 bool cmd_queue = (txq_id == trans->txqs.cmd.q_id);
582 ret = iwl_txq_init(trans, trans->txqs.txq[txq_id], slots_num,
585 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
595 iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id),
596 trans->txqs.txq[txq_id]->dma_addr >> 8);
651 static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
653 struct iwl_txq *txq = trans->txqs.txq[txq_id];
664 WARN_ONCE(test_bit(txq_id, trans->txqs.queue_used),
666 __func__, txq_id, idx,
690 u16 txq_id)
700 SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
704 if (txq_id & 0x1)
718 bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
723 struct iwl_txq *txq = trans->txqs.txq[txq_id];
727 if (test_and_set_bit(txq_id, trans->txqs.queue_used))
728 WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
736 if (txq_id == trans->txqs.cmd.q_id &&
741 iwl_scd_txq_set_inactive(trans, txq_id);
744 if (txq_id != trans->txqs.cmd.q_id)
745 iwl_scd_txq_set_chain(trans, txq_id);
751 iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id);
754 iwl_scd_txq_enable_agg(trans, txq_id);
762 iwl_scd_txq_disable_agg(trans, txq_id);
790 (ssn & 0xff) | (txq_id << 8));
795 iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
799 SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
802 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
807 iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
814 if (txq_id == trans->txqs.cmd.q_id &&
816 iwl_scd_enable_set_active(trans, BIT(txq_id));
820 txq_id, fifo, ssn & 0xff);
824 txq_id, ssn & 0xff);
830 void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
833 struct iwl_txq *txq = trans->txqs.txq[txq_id];
838 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
843 SCD_TX_STTS_QUEUE_OFFSET(txq_id);
846 trans->txqs.txq[txq_id]->frozen_expiry_remainder = 0;
847 trans->txqs.txq[txq_id]->frozen = false;
855 if (!test_and_clear_bit(txq_id, trans->txqs.queue_used)) {
857 "queue %d not used", txq_id);
862 iwl_scd_txq_set_inactive(trans, txq_id);
868 iwl_pcie_txq_unmap(trans, txq_id);
869 trans->txqs.txq[txq_id]->ampdu = false;
871 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
1193 int txq_id = SEQ_TO_QUEUE(sequence);
1204 if (WARN(txq_id != trans->txqs.cmd.q_id,
1206 txq_id, trans->txqs.cmd.q_id, sequence, txq->read_ptr,
1238 iwl_pcie_cmdq_reclaim(trans, txq_id, index);
1454 struct iwl_device_tx_cmd *dev_cmd, int txq_id)
1470 txq = trans->txqs.txq[txq_id];
1472 if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used),
1473 "TX on unused queue %d\n", txq_id))
1517 txq_id, wifi_seq, txq->write_ptr);
1524 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |