Lines Matching defs:ab

19 	struct ath11k_base *ab = arvif->ar->ab;
21 if (test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags))
85 struct ath11k_base *ab = ar->ab;
86 struct ath11k_dp *dp = &ab->dp;
101 if (unlikely(test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)))
110 ring_selector = ab->hw_params.hw_ops->get_ring_selector(skb);
115 ti.ring_id = ring_selector % ab->hw_params.max_tx_ring;
116 ti.rbm_id = ab->hw_params.hal_params->tcl2wbm_rbm_map[ti.ring_id].rbm_id;
128 if (ring_map == (BIT(ab->hw_params.max_tx_ring) - 1) ||
129 !ab->hw_params.tcl_ring_retry) {
130 atomic_inc(&ab->soc_stats.tx_err.misc_fail);
195 if (!test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags)) {
207 atomic_inc(&ab->soc_stats.tx_err.misc_fail);
211 ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
212 if (unlikely(dma_mapping_error(ab->dev, ti.paddr))) {
213 atomic_inc(&ab->soc_stats.tx_err.misc_fail);
214 ath11k_warn(ab, "failed to DMA map data Tx buffer\n");
225 tcl_ring = &ab->hal.srng_list[hal_ring_id];
229 ath11k_hal_srng_access_begin(ab, tcl_ring);
231 hal_tcl_desc = (void *)ath11k_hal_srng_src_get_next_entry(ab, tcl_ring);
236 ath11k_hal_srng_access_end(ab, tcl_ring);
237 ab->soc_stats.tx_err.desc_na[ti.ring_id]++;
246 if (unlikely(ring_map != (BIT(ab->hw_params.max_tx_ring)) - 1) &&
247 ab->hw_params.tcl_ring_retry && ab->hw_params.max_tx_ring > 1) {
255 ath11k_hal_tx_cmd_desc_setup(ab, hal_tcl_desc +
258 ath11k_hal_srng_access_end(ab, tcl_ring);
260 ath11k_dp_shadow_start_timer(ab, tcl_ring, &dp->tx_ring_timer[ti.ring_id]);
264 ath11k_dbg_dump(ab, ATH11K_DBG_DP_TX, NULL, "dp tx msdu: ",
272 dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE);
286 static void ath11k_dp_tx_free_txbuf(struct ath11k_base *ab, u8 mac_id,
299 ath11k_warn(ab, "tx completion for unknown msdu_id %d\n",
306 dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
309 ar = ab->pdevs[mac_id].ar;
315 ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab,
331 ath11k_warn(ab, "htt tx completion for unknown msdu_id %d\n",
344 dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
365 spin_lock_bh(&ab->base_lock);
366 peer = ath11k_peer_find_by_id(ab, ts->peer_id);
368 ath11k_dbg(ab, ATH11K_DBG_DATA,
371 spin_unlock_bh(&ab->base_lock);
375 spin_unlock_bh(&ab->base_lock);
385 ath11k_dp_tx_process_htt_tx_complete(struct ath11k_base *ab,
412 ath11k_dp_tx_htt_tx_complete_buf(ab, tx_ring, &ts);
417 ath11k_dp_tx_free_txbuf(ab, mac_id, msdu_id, tx_ring);
425 ath11k_warn(ab, "Unknown htt tx status %d\n", wbm_status);
449 struct ath11k_base *ab = ar->ab;
461 spin_lock_bh(&ab->base_lock);
462 peer = ath11k_peer_find_by_id(ab, ts->peer_id);
464 ath11k_dbg(ab, ATH11K_DBG_DP_TX,
502 ath11k_warn(ab, "Invalid HT mcs index %d\n", mcs);
513 ath11k_warn(ab, "Invalid VHT mcs index %d\n", mcs);
523 ath11k_warn(ab, "Invalid HE mcs index %d\n", mcs);
543 spin_unlock_bh(&ab->base_lock);
552 struct ath11k_base *ab = ar->ab;
566 dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
568 if (unlikely(!rcu_access_pointer(ab->pdevs_active[ar->pdev_idx]))) {
597 ab->hw_params.single_pdev_only) {
620 spin_lock_bh(&ab->base_lock);
621 peer = ath11k_peer_find_by_id(ab, ts->peer_id);
623 ath11k_dbg(ab, ATH11K_DBG_DATA,
626 spin_unlock_bh(&ab->base_lock);
642 spin_unlock_bh(&ab->base_lock);
647 static inline void ath11k_dp_tx_status_parse(struct ath11k_base *ab,
678 void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id)
681 struct ath11k_dp *dp = &ab->dp;
683 struct hal_srng *status_ring = &ab->hal.srng_list[hal_ring_id];
693 ath11k_hal_srng_access_begin(ab, status_ring);
697 (desc = ath11k_hal_srng_dst_get_next_entry(ab, status_ring))) {
704 if (unlikely((ath11k_hal_srng_dst_peek(ab, status_ring) != NULL) &&
708 ath11k_warn(ab, "Unable to process some of the tx_status ring desc because status_fifo is full\n");
711 ath11k_hal_srng_access_end(ab, status_ring);
722 ath11k_dp_tx_status_parse(ab, tx_status, &ts);
730 ath11k_dp_tx_process_htt_tx_complete(ab,
740 ath11k_warn(ab, "tx completion for unknown msdu_id %d\n",
748 ar = ab->pdevs[mac_id].ar;
757 int ath11k_dp_tx_send_reo_cmd(struct ath11k_base *ab, struct dp_rx_tid *rx_tid,
763 struct ath11k_dp *dp = &ab->dp;
768 if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
771 cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
772 cmd_num = ath11k_hal_reo_cmd_send(ab, cmd_ring, type, cmd);
806 ath11k_dp_tx_get_ring_id_type(struct ath11k_base *ab,
822 if (!ab->hw_params.rx_mac_buf_ring) {
862 ath11k_warn(ab, "Unsupported ring type in DP :%d\n", ring_type);
868 int ath11k_dp_tx_htt_srng_setup(struct ath11k_base *ab, u32 ring_id,
872 struct hal_srng *srng = &ab->hal.srng_list[ring_id];
882 skb = ath11k_htc_alloc_skb(ab, len);
887 ath11k_hal_srng_get_params(ab, srng, &params);
889 hp_addr = ath11k_hal_srng_get_hp_addr(ab, srng);
890 tp_addr = ath11k_hal_srng_get_tp_addr(ab, srng);
892 ret = ath11k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
919 ret = ath11k_hal_srng_get_entrysize(ab, ring_type);
967 ath11k_dbg(ab, ATH11K_DBG_DP_TX,
972 ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb);
986 int ath11k_dp_tx_htt_h2t_ver_req_msg(struct ath11k_base *ab)
988 struct ath11k_dp *dp = &ab->dp;
996 skb = ath11k_htc_alloc_skb(ab, len);
1005 ret = ath11k_htc_send(&ab->htc, dp->eid, skb);
1014 ath11k_warn(ab, "htt target version request timed out\n");
1019 ath11k_err(ab, "unsupported htt major version %d supported version is %d\n",
1029 struct ath11k_base *ab = ar->ab;
1030 struct ath11k_dp *dp = &ab->dp;
1038 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
1039 skb = ath11k_htc_alloc_skb(ab, len);
1052 ret = ath11k_htc_send(&ab->htc, dp->eid, skb);
1062 int ath11k_dp_tx_htt_rx_filter_setup(struct ath11k_base *ab, u32 ring_id,
1068 struct hal_srng *srng = &ab->hal.srng_list[ring_id];
1076 skb = ath11k_htc_alloc_skb(ab, len);
1081 ath11k_hal_srng_get_params(ab, srng, &params);
1083 ret = ath11k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
1117 ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb);
1134 struct ath11k_base *ab = ar->ab;
1135 struct ath11k_dp *dp = &ab->dp;
1142 skb = ath11k_htc_alloc_skb(ab, len);
1152 if (ab->hw_params.single_pdev_only)
1167 ret = ath11k_htc_send(&ab->htc, dp->eid, skb);
1169 ath11k_warn(ab, "failed to send htt type stats request: %d",
1181 struct ath11k_base *ab = ar->ab;
1185 if (ab->hw_params.full_monitor_mode) {
1186 ret = ath11k_dp_tx_htt_rx_full_mon_setup(ab,
1189 ath11k_err(ab, "failed to setup full monitor %d\n", ret);
1214 if (ab->hw_params.rxdma1_enable) {
1215 ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, dp->mac_id,
1221 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
1223 ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
1234 for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
1246 ret = ath11k_dp_tx_htt_rx_filter_setup(ab, ring_id,
1253 if (!ar->ab->hw_params.rxdma1_enable)
1254 mod_timer(&ar->ab->mon_reap_timer, jiffies +
1260 int ath11k_dp_tx_htt_rx_full_mon_setup(struct ath11k_base *ab, int mac_id,
1267 skb = ath11k_htc_alloc_skb(ab, len);
1287 ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb);