Lines Matching refs:ar

245 static int ath12k_start_vdev_delay(struct ath12k *ar,
247 static void ath12k_mac_stop(struct ath12k *ar);
248 static int ath12k_mac_vdev_create(struct ath12k *ar, struct ieee80211_vif *vif);
249 static int ath12k_mac_vdev_delete(struct ath12k *ar, struct ieee80211_vif *vif);
538 arvif->ar == arvif_iter->ar)
542 struct ath12k_vif *ath12k_mac_get_arvif(struct ath12k *ar, u32 vdev_id)
548 arvif_iter.ar = ar;
551 ieee80211_iterate_active_interfaces_atomic(ath12k_ar_to_hw(ar),
556 ath12k_warn(ar->ab, "No VIF found for vdev %d\n", vdev_id);
572 if (pdev && pdev->ar &&
573 (pdev->ar->allocated_vdev_map & (1LL << vdev_id))) {
574 arvif = ath12k_mac_get_arvif(pdev->ar, vdev_id);
590 if (pdev && pdev->ar) {
591 if (pdev->ar->allocated_vdev_map & (1LL << vdev_id))
592 return pdev->ar;
606 return pdev ? pdev->ar : NULL;
616 return (pdev->ar ? pdev->ar : NULL);
626 struct ath12k *ar;
629 ar = ah->radio;
632 return ar;
634 for_each_ar(ah, ar, i) {
635 if (channel->center_freq >= ar->freq_low &&
636 channel->center_freq <= ar->freq_high)
637 return ar;
658 * ar directly.
664 return arvif->ar;
669 static void ath12k_pdev_caps_update(struct ath12k *ar)
671 struct ath12k_base *ab = ar->ab;
673 ar->max_tx_power = ab->target_caps.hw_max_tx_power;
677 * we can set ar->min_tx_power to 0 currently until
680 ar->min_tx_power = 0;
682 ar->txpower_limit_2g = ar->max_tx_power;
683 ar->txpower_limit_5g = ar->max_tx_power;
684 ar->txpower_scale = WMI_HOST_TP_SCALE_MAX;
687 static int ath12k_mac_txpower_recalc(struct ath12k *ar)
689 struct ath12k_pdev *pdev = ar->pdev;
694 lockdep_assert_held(&ar->conf_mutex);
696 list_for_each_entry(arvif, &ar->arvifs, list) {
710 txpower = min_t(u32, max_t(u32, ar->min_tx_power, txpower),
711 ar->max_tx_power) * 2;
713 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "txpower to set in hw %d\n",
717 ar->txpower_limit_2g != txpower) {
719 ret = ath12k_wmi_pdev_set_param(ar, param,
720 txpower, ar->pdev->pdev_id);
723 ar->txpower_limit_2g = txpower;
727 ar->txpower_limit_5g != txpower) {
729 ret = ath12k_wmi_pdev_set_param(ar, param,
730 txpower, ar->pdev->pdev_id);
733 ar->txpower_limit_5g = txpower;
739 ath12k_warn(ar->ab, "failed to recalc txpower limit %d using pdev param %d: %d\n",
746 struct ath12k *ar = arvif->ar;
750 lockdep_assert_held(&ar->conf_mutex);
771 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %d recalc rts/cts prot %d\n",
774 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
777 ath12k_warn(ar->ab, "failed to recalculate rts/cts prot for vdev %d: %d\n",
785 struct ath12k *ar = arvif->ar;
789 ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_STA_KICKOUT_TH,
791 ar->pdev->pdev_id);
793 ath12k_warn(ar->ab, "failed to set kickout threshold on vdev %i: %d\n",
799 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param,
802 ath12k_warn(ar->ab, "failed to set keepalive minimum idle time on vdev %i: %d\n",
808 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param,
811 ath12k_warn(ar->ab, "failed to set keepalive maximum idle time on vdev %i: %d\n",
817 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param,
820 ath12k_warn(ar->ab, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
828 void ath12k_mac_peer_cleanup_all(struct ath12k *ar)
831 struct ath12k_base *ab = ar->ab;
833 lockdep_assert_held(&ar->conf_mutex);
837 ath12k_dp_rx_peer_tid_cleanup(ar, peer);
843 ar->num_peers = 0;
844 ar->num_stations = 0;
847 static int ath12k_mac_vdev_setup_sync(struct ath12k *ar)
849 lockdep_assert_held(&ar->conf_mutex);
851 if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
854 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "vdev setup timeout %d\n",
857 if (!wait_for_completion_timeout(&ar->vdev_setup_done,
861 return ar->last_wmi_vdev_start_status ? -EINVAL : 0;
864 static int ath12k_monitor_vdev_up(struct ath12k *ar, int vdev_id)
868 ret = ath12k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
870 ath12k_warn(ar->ab, "failed to put up monitor vdev %i: %d\n",
875 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor vdev %i started\n",
880 static int ath12k_mac_monitor_vdev_start(struct ath12k *ar, int vdev_id,
887 lockdep_assert_held(&ar->conf_mutex);
902 arg.pref_tx_streams = ar->num_tx_chains;
903 arg.pref_rx_streams = ar->num_rx_chains;
908 reinit_completion(&ar->vdev_setup_done);
909 reinit_completion(&ar->vdev_delete_done);
911 ret = ath12k_wmi_vdev_start(ar, &arg, false);
913 ath12k_warn(ar->ab, "failed to request monitor vdev %i start: %d\n",
918 ret = ath12k_mac_vdev_setup_sync(ar);
920 ath12k_warn(ar->ab, "failed to synchronize setup for monitor vdev %i start: %d\n",
925 ret = ath12k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
927 ath12k_warn(ar->ab, "failed to put up monitor vdev %i: %d\n",
932 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor vdev %i started\n",
937 ret = ath12k_wmi_vdev_stop(ar, vdev_id);
939 ath12k_warn(ar->ab, "failed to stop monitor vdev %i after start failure: %d\n",
944 static int ath12k_mac_monitor_vdev_stop(struct ath12k *ar)
948 lockdep_assert_held(&ar->conf_mutex);
950 reinit_completion(&ar->vdev_setup_done);
952 ret = ath12k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
954 ath12k_warn(ar->ab, "failed to request monitor vdev %i stop: %d\n",
955 ar->monitor_vdev_id, ret);
957 ret = ath12k_mac_vdev_setup_sync(ar);
959 ath12k_warn(ar->ab, "failed to synchronize monitor vdev %i stop: %d\n",
960 ar->monitor_vdev_id, ret);
962 ret = ath12k_wmi_vdev_down(ar, ar->monitor_vdev_id);
964 ath12k_warn(ar->ab, "failed to put down monitor vdev %i: %d\n",
965 ar->monitor_vdev_id, ret);
967 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor vdev %i stopped\n",
968 ar->monitor_vdev_id);
972 static int ath12k_mac_monitor_vdev_create(struct ath12k *ar)
974 struct ath12k_pdev *pdev = ar->pdev;
980 lockdep_assert_held(&ar->conf_mutex);
982 if (ar->monitor_vdev_created)
985 if (ar->ab->free_vdev_map == 0) {
986 ath12k_warn(ar->ab, "failed to find free vdev id for monitor vdev\n");
990 bit = __ffs64(ar->ab->free_vdev_map);
992 ar->monitor_vdev_id = bit;
994 arg.if_id = ar->monitor_vdev_id;
1001 arg.chains[NL80211_BAND_2GHZ].tx = ar->num_tx_chains;
1002 arg.chains[NL80211_BAND_2GHZ].rx = ar->num_rx_chains;
1006 arg.chains[NL80211_BAND_5GHZ].tx = ar->num_tx_chains;
1007 arg.chains[NL80211_BAND_5GHZ].rx = ar->num_rx_chains;
1010 ret = ath12k_wmi_vdev_create(ar, tmp_addr, &arg);
1012 ath12k_warn(ar->ab, "failed to request monitor vdev %i creation: %d\n",
1013 ar->monitor_vdev_id, ret);
1014 ar->monitor_vdev_id = -1;
1018 nss = hweight32(ar->cfg_tx_chainmask) ? : 1;
1019 ret = ath12k_wmi_vdev_set_param_cmd(ar, ar->monitor_vdev_id,
1022 ath12k_warn(ar->ab, "failed to set vdev %d chainmask 0x%x, nss %d :%d\n",
1023 ar->monitor_vdev_id, ar->cfg_tx_chainmask, nss, ret);
1027 ret = ath12k_mac_txpower_recalc(ar);
1031 ar->allocated_vdev_map |= 1LL << ar->monitor_vdev_id;
1032 ar->ab->free_vdev_map &= ~(1LL << ar->monitor_vdev_id);
1033 ar->num_created_vdevs++;
1034 ar->monitor_vdev_created = true;
1035 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor vdev %d created\n",
1036 ar->monitor_vdev_id);
1041 static int ath12k_mac_monitor_vdev_delete(struct ath12k *ar)
1046 lockdep_assert_held(&ar->conf_mutex);
1048 if (!ar->monitor_vdev_created)
1051 reinit_completion(&ar->vdev_delete_done);
1053 ret = ath12k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
1055 ath12k_warn(ar->ab, "failed to request wmi monitor vdev %i removal: %d\n",
1056 ar->monitor_vdev_id, ret);
1060 time_left = wait_for_completion_timeout(&ar->vdev_delete_done,
1063 ath12k_warn(ar->ab, "Timeout in receiving vdev delete response\n");
1065 ar->allocated_vdev_map &= ~(1LL << ar->monitor_vdev_id);
1066 ar->ab->free_vdev_map |= 1LL << (ar->monitor_vdev_id);
1067 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor vdev %d deleted\n",
1068 ar->monitor_vdev_id);
1069 ar->num_created_vdevs--;
1070 ar->monitor_vdev_id = -1;
1071 ar->monitor_vdev_created = false;
1087 static int ath12k_mac_monitor_start(struct ath12k *ar)
1092 lockdep_assert_held(&ar->conf_mutex);
1094 if (ar->monitor_started)
1097 ieee80211_iter_chan_contexts_atomic(ath12k_ar_to_hw(ar),
1103 ret = ath12k_mac_monitor_vdev_start(ar, ar->monitor_vdev_id, chandef);
1105 ath12k_warn(ar->ab, "failed to start monitor vdev: %d\n", ret);
1106 ath12k_mac_monitor_vdev_delete(ar);
1110 ar->monitor_started = true;
1111 ar->num_started_vdevs++;
1112 ret = ath12k_dp_tx_htt_monitor_mode_ring_config(ar, false);
1113 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor started ret %d\n", ret);
1118 static int ath12k_mac_monitor_stop(struct ath12k *ar)
1122 lockdep_assert_held(&ar->conf_mutex);
1124 if (!ar->monitor_started)
1127 ret = ath12k_mac_monitor_vdev_stop(ar);
1129 ath12k_warn(ar->ab, "failed to stop monitor vdev: %d\n", ret);
1133 ar->monitor_started = false;
1134 ar->num_started_vdevs--;
1135 ret = ath12k_dp_tx_htt_monitor_mode_ring_config(ar, true);
1136 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor stopped ret %d\n", ret);
1142 struct ath12k *ar = arvif->ar;
1145 lockdep_assert_held(&ar->conf_mutex);
1147 reinit_completion(&ar->vdev_setup_done);
1149 ret = ath12k_wmi_vdev_stop(ar, arvif->vdev_id);
1151 ath12k_warn(ar->ab, "failed to stop WMI vdev %i: %d\n",
1156 ret = ath12k_mac_vdev_setup_sync(ar);
1158 ath12k_warn(ar->ab, "failed to synchronize setup for vdev %i: %d\n",
1163 WARN_ON(ar->num_started_vdevs == 0);
1165 ar->num_started_vdevs--;
1166 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "vdev %pM stopped, vdev_id %d\n",
1169 if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
1170 clear_bit(ATH12K_CAC_RUNNING, &ar->dev_flags);
1171 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "CAC Stopped for vdev %d\n",
1180 static int ath12k_mac_config(struct ath12k *ar, u32 changed)
1182 struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
1186 mutex_lock(&ar->conf_mutex);
1189 ar->monitor_conf_enabled = conf->flags & IEEE80211_CONF_MONITOR;
1190 if (ar->monitor_conf_enabled) {
1191 if (ar->monitor_vdev_created)
1193 ret = ath12k_mac_monitor_vdev_create(ar);
1196 ret = ath12k_mac_monitor_start(ar);
1200 if (!ar->monitor_vdev_created)
1202 ret = ath12k_mac_monitor_stop(ar);
1205 ath12k_mac_monitor_vdev_delete(ar);
1210 mutex_unlock(&ar->conf_mutex);
1214 ath12k_mac_monitor_vdev_delete(ar);
1215 mutex_unlock(&ar->conf_mutex);
1222 struct ath12k *ar;
1225 ar = ath12k_ah_to_ar(ah, 0);
1227 ret = ath12k_mac_config(ar, changed);
1229 ath12k_warn(ar->ab, "failed to update config pdev idx %d: %d\n",
1230 ar->pdev_idx, ret);
1238 struct ath12k *ar = arvif->ar;
1249 ath12k_warn(ar->ab, "no P2P ie found in beacon\n");
1253 ret = ath12k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie);
1255 ath12k_warn(ar->ab, "failed to submit P2P GO bcn ie for vdev %i: %d\n",
1294 struct ath12k *ar = arvif->ar;
1295 struct ath12k_base *ab = ar->ab;
1296 struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
1347 ret = ath12k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn);
1361 struct ath12k *ar = arvif->ar;
1364 lockdep_assert_held(&arvif->ar->conf_mutex);
1367 ret = ath12k_wmi_vdev_down(ar, arvif->vdev_id);
1369 ath12k_warn(ar->ab, "failed to down vdev_id %i: %d\n",
1379 ath12k_warn(ar->ab, "failed to update bcn tmpl during vdev up: %d\n",
1388 ret = ath12k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
1391 ath12k_warn(ar->ab, "failed to bring up vdev %d: %i\n",
1398 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
1417 void ath12k_mac_handle_beacon(struct ath12k *ar, struct sk_buff *skb)
1419 ieee80211_iterate_active_interfaces_atomic(ath12k_ar_to_hw(ar),
1430 struct ath12k *ar = arvif->ar;
1431 struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
1450 void ath12k_mac_handle_beacon_miss(struct ath12k *ar, u32 vdev_id)
1452 ieee80211_iterate_active_interfaces_atomic(ath12k_ar_to_hw(ar),
1470 static void ath12k_peer_assoc_h_basic(struct ath12k *ar,
1476 struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
1479 lockdep_assert_held(&ar->conf_mutex);
1496 static void ath12k_peer_assoc_h_crypto(struct ath12k *ar,
1505 struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
1509 lockdep_assert_held(&ar->conf_mutex);
1539 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1545 ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1558 static void ath12k_peer_assoc_h_rates(struct ath12k *ar,
1568 struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
1574 lockdep_assert_held(&ar->conf_mutex);
1621 static void ath12k_peer_assoc_h_ht(struct ath12k *ar,
1635 lockdep_assert_held(&ar->conf_mutex);
1716 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
1782 static void ath12k_peer_assoc_h_vht(struct ath12k *ar,
1871 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
1877 static void ath12k_peer_assoc_h_he(struct ath12k *ar,
2058 static void ath12k_peer_assoc_h_qos(struct ath12k *ar,
2090 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac peer %pM qos %d\n",
2094 static int ath12k_peer_assoc_qos_ap(struct ath12k *ar,
2103 lockdep_assert_held(&ar->conf_mutex);
2107 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n",
2130 ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg);
2136 ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg);
2143 ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg);
2149 ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg);
2156 ath12k_warn(ar->ab, "failed to set ap ps peer param %d for vdev %i: %d\n",
2167 static enum wmi_phy_mode ath12k_mac_get_phymode_vht(struct ath12k *ar,
2195 static enum wmi_phy_mode ath12k_mac_get_phymode_he(struct ath12k *ar,
2221 static enum wmi_phy_mode ath12k_mac_get_phymode_eht(struct ath12k *ar,
2238 ath12k_warn(ar->ab, "invalid EHT PHY capability info for 160 Mhz: %d\n",
2256 static void ath12k_peer_assoc_h_phymode(struct ath12k *ar,
2311 phymode = ath12k_mac_get_phymode_eht(ar, sta);
2313 phymode = ath12k_mac_get_phymode_he(ar, sta);
2316 phymode = ath12k_mac_get_phymode_vht(ar, sta);
2331 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac peer %pM phymode %s\n",
2401 static void ath12k_peer_assoc_h_eht(struct ath12k *ar,
2484 static void ath12k_peer_assoc_prepare(struct ath12k *ar,
2490 lockdep_assert_held(&ar->conf_mutex);
2494 reinit_completion(&ar->peer_assoc_done);
2497 ath12k_peer_assoc_h_basic(ar, vif, sta, arg);
2498 ath12k_peer_assoc_h_crypto(ar, vif, sta, arg);
2499 ath12k_peer_assoc_h_rates(ar, vif, sta, arg);
2500 ath12k_peer_assoc_h_ht(ar, vif, sta, arg);
2501 ath12k_peer_assoc_h_vht(ar, vif, sta, arg);
2502 ath12k_peer_assoc_h_he(ar, vif, sta, arg);
2503 ath12k_peer_assoc_h_eht(ar, vif, sta, arg);
2504 ath12k_peer_assoc_h_qos(ar, vif, sta, arg);
2505 ath12k_peer_assoc_h_phymode(ar, vif, sta, arg);
2511 static int ath12k_setup_peer_smps(struct ath12k *ar, struct ath12k_vif *arvif,
2526 return ath12k_wmi_set_peer_param(ar, addr, arvif->vdev_id,
2531 static void ath12k_bss_assoc(struct ath12k *ar,
2542 lockdep_assert_held(&ar->conf_mutex);
2544 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n",
2551 ath12k_warn(ar->ab, "failed to find station entry for bss %pM vdev %i\n",
2557 ath12k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg, false);
2561 ret = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
2563 ath12k_warn(ar->ab, "failed to run peer assoc for %pM vdev %i: %d\n",
2568 if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) {
2569 ath12k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
2574 ret = ath12k_setup_peer_smps(ar, arvif, bss_conf->bssid,
2577 ath12k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
2587 ret = ath12k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
2589 ath12k_warn(ar->ab, "failed to set vdev %d up: %d\n",
2596 ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
2600 spin_lock_bh(&ar->ab->base_lock);
2602 peer = ath12k_peer_find(ar->ab, arvif->vdev_id, arvif->bssid);
2606 spin_unlock_bh(&ar->ab->base_lock);
2610 ret = ath12k_wmi_set_peer_param(ar, arvif->bssid,
2615 ath12k_warn(ar->ab, "Unable to authorize BSS peer: %d\n", ret);
2618 ret = ath12k_wmi_send_obss_spr_cmd(ar, arvif->vdev_id,
2621 ath12k_warn(ar->ab, "failed to set vdev %i OBSS PD parameters: %d\n",
2625 static void ath12k_bss_disassoc(struct ath12k *ar,
2630 lockdep_assert_held(&ar->conf_mutex);
2632 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n",
2635 ret = ath12k_wmi_vdev_down(ar, arvif->vdev_id);
2637 ath12k_warn(ar->ab, "failed to down vdev %i: %d\n",
2670 static void ath12k_recalculate_mgmt_rate(struct ath12k *ar,
2675 struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
2683 lockdep_assert_held(&ar->conf_mutex);
2691 ath12k_warn(ar->ab, "bitrate not supported %d\n", bitrate);
2696 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param,
2699 ath12k_warn(ar->ab, "failed to set mgmt tx rate %d\n", ret);
2702 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param,
2705 ath12k_warn(ar->ab, "failed to set beacon tx rate %d\n", ret);
2711 struct ath12k *ar = arvif->ar;
2712 struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
2723 ret = ath12k_wmi_fils_discovery_tmpl(ar, arvif->vdev_id,
2732 ret = ath12k_wmi_probe_resp_tmpl(ar, arvif->vdev_id,
2735 return ath12k_wmi_fils_discovery(ar, arvif->vdev_id, 0, false);
2739 ath12k_warn(ar->ab,
2749 ret = ath12k_wmi_fils_discovery(ar, arvif->vdev_id, interval,
2757 struct ath12k *ar = arvif->ar;
2759 struct ieee80211_conf *conf = &ath12k_ar_to_hw(ar)->conf;
2766 lockdep_assert_held(&ar->conf_mutex);
2782 ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
2785 ath12k_warn(ar->ab, "failed to set inactivity time for vdev %d: %i\n",
2793 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %d psmode %s\n",
2796 ret = ath12k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, psmode);
2798 ath12k_warn(ar->ab, "failed to set sta power save mode %d for vdev %d: %d\n",
2802 static void ath12k_mac_bss_info_changed(struct ath12k *ar,
2821 lockdep_assert_held(&ar->conf_mutex);
2827 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
2831 ath12k_warn(ar->ab, "Failed to set beacon interval for VDEV: %d\n",
2834 ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
2842 ret = ath12k_wmi_pdev_set_param(ar, param_id,
2843 param_value, ar->pdev->pdev_id);
2845 ath12k_warn(ar->ab, "Failed to set beacon mode for VDEV: %d\n",
2848 ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
2854 ath12k_warn(ar->ab, "failed to update bcn template: %d\n",
2862 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
2867 ath12k_warn(ar->ab, "Failed to set dtim period for VDEV %d: %i\n",
2870 ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
2892 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
2896 ath12k_warn(ar->ab,
2902 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
2904 ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
2909 ath12k_warn(ar->ab, "Failed to set he oper params %x for VDEV %d: %i\n",
2921 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
2924 ath12k_warn(ar->ab, "Failed to set CTS prot for VDEV: %d\n",
2927 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "Set CTS prot: %d for VDEV: %d\n",
2930 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "defer protection mode setup, vdev is not ready yet\n");
2944 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
2947 ath12k_warn(ar->ab, "Failed to set erp slot for VDEV: %d\n",
2950 ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
2964 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
2967 ath12k_warn(ar->ab, "Failed to set preamble for VDEV: %d\n",
2970 ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
2977 ath12k_bss_assoc(ar, arvif, info);
2979 ath12k_bss_disassoc(ar, arvif);
2983 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev_id %i txpower %d\n",
2987 ath12k_mac_txpower_recalc(ar);
3000 if (ar->pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP)
3013 ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
3018 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
3021 ath12k_warn(ar->ab,
3026 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
3029 ath12k_warn(ar->ab,
3036 ath12k_recalculate_mgmt_rate(ar, vif, &def);
3040 ath12k_wmi_send_twt_enable_cmd(ar, ar->pdev->pdev_id);
3042 ath12k_wmi_send_twt_disable_cmd(ar, ar->pdev->pdev_id);
3046 ath12k_wmi_send_obss_spr_cmd(ar, arvif->vdev_id,
3051 ret = ath12k_wmi_obss_color_cfg_cmd(ar,
3057 ath12k_warn(ar->ab, "failed to set bss color collision on vdev %i: %d\n",
3060 ret = ath12k_wmi_send_bss_color_change_enable_cmd(ar,
3064 ath12k_warn(ar->ab, "failed to enable bss color change on vdev %i: %d\n",
3066 ret = ath12k_wmi_obss_color_cfg_cmd(ar,
3072 ath12k_warn(ar->ab, "failed to set bss color collision on vdev %i: %d\n",
3080 ar->ab->hw_params->supports_sta_ps) {
3105 struct ath12k *ar;
3109 ar = ath12k_get_ar_by_vif(hw, vif);
3115 if (!ar) {
3123 mutex_lock(&ar->conf_mutex);
3125 ath12k_mac_bss_info_changed(ar, arvif, info, changed);
3127 mutex_unlock(&ar->conf_mutex);
3137 struct ath12k *ar;
3157 for_each_ar(ah, ar, i) {
3159 if (ar->mac.sbands[band].channels)
3160 return ar;
3166 void __ath12k_mac_scan_finish(struct ath12k *ar)
3168 struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
3170 lockdep_assert_held(&ar->data_lock);
3172 switch (ar->scan.state) {
3177 if (ar->scan.is_roc && ar->scan.roc_notify)
3181 if (!ar->scan.is_roc) {
3183 .aborted = ((ar->scan.state ==
3185 (ar->scan.state ==
3192 ar->scan.state = ATH12K_SCAN_IDLE;
3193 ar->scan_channel = NULL;
3194 ar->scan.roc_freq = 0;
3195 cancel_delayed_work(&ar->scan.timeout);
3196 complete(&ar->scan.completed);
3201 void ath12k_mac_scan_finish(struct ath12k *ar)
3203 spin_lock_bh(&ar->data_lock);
3204 __ath12k_mac_scan_finish(ar);
3205 spin_unlock_bh(&ar->data_lock);
3208 static int ath12k_scan_stop(struct ath12k *ar)
3216 lockdep_assert_held(&ar->conf_mutex);
3219 arg.pdev_id = ar->pdev->pdev_id;
3221 ret = ath12k_wmi_send_scan_stop_cmd(ar, &arg);
3223 ath12k_warn(ar->ab, "failed to stop wmi scan: %d\n", ret);
3227 ret = wait_for_completion_timeout(&ar->scan.completed, 3 * HZ);
3229 ath12k_warn(ar->ab,
3244 spin_lock_bh(&ar->data_lock);
3245 if (ar->scan.state != ATH12K_SCAN_IDLE)
3246 __ath12k_mac_scan_finish(ar);
3247 spin_unlock_bh(&ar->data_lock);
3252 static void ath12k_scan_abort(struct ath12k *ar)
3256 lockdep_assert_held(&ar->conf_mutex);
3258 spin_lock_bh(&ar->data_lock);
3260 switch (ar->scan.state) {
3268 ath12k_warn(ar->ab, "refusing scan abortion due to invalid scan state: %d\n",
3269 ar->scan.state);
3272 ar->scan.state = ATH12K_SCAN_ABORTING;
3273 spin_unlock_bh(&ar->data_lock);
3275 ret = ath12k_scan_stop(ar);
3277 ath12k_warn(ar->ab, "failed to abort scan: %d\n", ret);
3279 spin_lock_bh(&ar->data_lock);
3283 spin_unlock_bh(&ar->data_lock);
3288 struct ath12k *ar = container_of(work, struct ath12k,
3291 mutex_lock(&ar->conf_mutex);
3292 ath12k_scan_abort(ar);
3293 mutex_unlock(&ar->conf_mutex);
3296 static int ath12k_start_scan(struct ath12k *ar,
3301 lockdep_assert_held(&ar->conf_mutex);
3303 ret = ath12k_wmi_send_scan_start_cmd(ar, arg);
3307 ret = wait_for_completion_timeout(&ar->scan.started, 1 * HZ);
3309 ret = ath12k_scan_stop(ar);
3311 ath12k_warn(ar->ab, "failed to stop scan: %d\n", ret);
3320 spin_lock_bh(&ar->data_lock);
3321 if (ar->scan.state == ATH12K_SCAN_IDLE) {
3322 spin_unlock_bh(&ar->data_lock);
3325 spin_unlock_bh(&ar->data_lock);
3335 struct ath12k *ar, *prev_ar;
3345 ar = ath12k_ah_to_ar(ah, 0);
3352 ar = ath12k_mac_select_scan_device(hw, vif, hw_req);
3353 if (!ar)
3356 /* If the vif is already assigned to a specific vdev of an ar,
3360 * different ar, delete that vdev and create a new one. We don't
3362 * delete-create vdev's for the same ar, in case the request is
3366 if (WARN_ON(!arvif->ar))
3369 if (ar != arvif->ar && arvif->is_started)
3372 if (ar != arvif->ar) {
3373 /* backup the previously used ar ptr, since the vdev delete
3374 * would assign the arvif->ar to NULL after the call
3376 prev_ar = arvif->ar;
3388 mutex_lock(&ar->conf_mutex);
3389 ret = ath12k_mac_vdev_create(ar, vif);
3390 mutex_unlock(&ar->conf_mutex);
3392 ath12k_warn(ar->ab, "unable to create scan vdev %d\n", ret);
3397 mutex_lock(&ar->conf_mutex);
3399 spin_lock_bh(&ar->data_lock);
3400 switch (ar->scan.state) {
3402 reinit_completion(&ar->scan.started);
3403 reinit_completion(&ar->scan.completed);
3404 ar->scan.state = ATH12K_SCAN_STARTING;
3405 ar->scan.is_roc = false;
3406 ar->scan.vdev_id = arvif->vdev_id;
3415 spin_unlock_bh(&ar->data_lock);
3420 ath12k_wmi_start_scan_init(ar, &arg);
3455 ret = ath12k_start_scan(ar, &arg);
3457 ath12k_warn(ar->ab, "failed to start hw scan: %d\n", ret);
3458 spin_lock_bh(&ar->data_lock);
3459 ar->scan.state = ATH12K_SCAN_IDLE;
3460 spin_unlock_bh(&ar->data_lock);
3464 ieee80211_queue_delayed_work(ath12k_ar_to_hw(ar), &ar->scan.timeout,
3474 mutex_unlock(&ar->conf_mutex);
3483 struct ath12k *ar;
3488 ar = arvif->ar;
3490 mutex_lock(&ar->conf_mutex);
3491 ath12k_scan_abort(ar);
3492 mutex_unlock(&ar->conf_mutex);
3494 cancel_delayed_work_sync(&ar->scan.timeout);
3503 struct ath12k *ar = arvif->ar;
3513 lockdep_assert_held(&arvif->ar->conf_mutex);
3515 reinit_completion(&ar->install_key_done);
3517 if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags))
3547 ath12k_warn(ar->ab, "cipher %d is not supported\n", key->cipher);
3551 if (test_bit(ATH12K_FLAG_RAW_MODE, &ar->ab->dev_flags))
3556 ret = ath12k_wmi_vdev_install_key(arvif->ar, &arg);
3561 if (!wait_for_completion_timeout(&ar->install_key_done, 1 * HZ))
3567 return ar->install_key_status ? -EINVAL : 0;
3573 struct ath12k *ar = arvif->ar;
3574 struct ath12k_base *ab = ar->ab;
3581 lockdep_assert_held(&ar->conf_mutex);
3612 static int ath12k_mac_set_key(struct ath12k *ar, enum set_key_cmd cmd,
3616 struct ath12k_base *ab = ar->ab;
3624 lockdep_assert_held(&ar->conf_mutex);
3729 struct ath12k *ar;
3742 ar = ath12k_get_ar_by_vif(hw, vif);
3743 if (!ar) {
3744 /* ar is expected to be valid when sta ptr is available */
3759 mutex_lock(&ar->conf_mutex);
3760 ret = ath12k_mac_set_key(ar, cmd, vif, sta, key);
3761 mutex_unlock(&ar->conf_mutex);
3766 ath12k_mac_bitrate_mask_num_vht_rates(struct ath12k *ar,
3785 struct ath12k *ar = arvif->ar;
3790 lockdep_assert_held(&ar->conf_mutex);
3802 ath12k_warn(ar->ab, "No single VHT Fixed rate found to set for %pM",
3807 ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
3813 ret = ath12k_wmi_set_peer_param(ar, sta->addr,
3818 ath12k_warn(ar->ab,
3825 static int ath12k_station_assoc(struct ath12k *ar,
3838 lockdep_assert_held(&ar->conf_mutex);
3846 ath12k_peer_assoc_prepare(ar, vif, sta, &peer_arg, reassoc);
3848 ret = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
3850 ath12k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n",
3855 if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) {
3856 ath12k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
3861 num_vht_rates = ath12k_mac_bitrate_mask_num_vht_rates(ar, band, mask);
3881 ret = ath12k_setup_peer_smps(ar, arvif, sta->addr,
3884 ath12k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
3897 ret = ath12k_peer_assoc_qos_ap(ar, arvif, sta);
3899 ath12k_warn(ar->ab, "failed to set qos params for STA %pM for vdev %i: %d\n",
3908 static int ath12k_station_disassoc(struct ath12k *ar,
3915 lockdep_assert_held(&ar->conf_mutex);
3926 ath12k_warn(ar->ab, "failed to clear all peer keys for vdev %i: %d\n",
3935 struct ath12k *ar;
3952 ar = arvif->ar;
3961 spin_lock_bh(&ar->data_lock);
3971 spin_unlock_bh(&ar->data_lock);
3973 mutex_lock(&ar->conf_mutex);
3980 ath12k_peer_assoc_h_phymode(ar, arvif->vif, sta, &peer_arg);
3989 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac bandwidth upgrade for sta %pM new %d old %d\n",
3991 err = ath12k_wmi_set_peer_param(ar, sta->addr,
3995 ath12k_warn(ar->ab, "failed to update STA %pM to peer phymode %d: %d\n",
3999 err = ath12k_wmi_set_peer_param(ar, sta->addr,
4003 ath12k_warn(ar->ab, "failed to update STA %pM to peer bandwidth %d: %d\n",
4010 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac bandwidth downgrade for sta %pM new %d old %d\n",
4012 err = ath12k_wmi_set_peer_param(ar, sta->addr,
4016 ath12k_warn(ar->ab, "failed to update STA %pM peer to bandwidth %d: %d\n",
4020 err = ath12k_wmi_set_peer_param(ar, sta->addr,
4024 ath12k_warn(ar->ab, "failed to update STA %pM to peer phymode %d: %d\n",
4030 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac update sta %pM nss %d\n",
4033 err = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
4036 ath12k_warn(ar->ab, "failed to update STA %pM nss %d: %d\n",
4041 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac update sta %pM smps %d\n",
4044 err = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
4047 ath12k_warn(ar->ab, "failed to update STA %pM smps %d: %d\n",
4053 num_vht_rates = ath12k_mac_bitrate_mask_num_vht_rates(ar, band,
4075 ath12k_peer_assoc_prepare(ar, arvif->vif, sta,
4078 err = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
4080 ath12k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n",
4083 if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ))
4084 ath12k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
4089 mutex_unlock(&ar->conf_mutex);
4095 struct ath12k *ar = arvif->ar;
4097 lockdep_assert_held(&ar->conf_mutex);
4102 if (ar->num_stations >= ar->max_num_stations)
4105 ar->num_stations++;
4113 struct ath12k *ar = arvif->ar;
4115 lockdep_assert_held(&ar->conf_mutex);
4120 ar->num_stations--;
4123 static int ath12k_mac_station_add(struct ath12k *ar,
4127 struct ath12k_base *ab = ar->ab;
4133 lockdep_assert_held(&ar->conf_mutex);
4138 ar->max_num_stations);
4152 ret = ath12k_peer_create(ar, arvif, sta, &peer_param);
4163 ret = ath12k_wmi_set_peer_param(ar, sta->addr,
4173 ret = ath12k_dp_peer_setup(ar, arvif->vdev_id, sta->addr);
4183 ret = ath12k_start_vdev_delay(ar, arvif);
4193 ath12k_peer_delete(ar, arvif->vdev_id, sta->addr);
4200 static u32 ath12k_mac_ieee80211_sta_bw_to_wmi(struct ath12k *ar,
4222 ath12k_warn(ar->ab, "Invalid bandwidth %d in rc update for %pM\n",
4237 struct ath12k *ar;
4248 ar = ath12k_get_ar_by_vif(hw, vif);
4249 if (!ar) {
4254 mutex_lock(&ar->conf_mutex);
4262 ret = ath12k_mac_station_add(ar, vif, sta);
4264 ath12k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n",
4269 ath12k_bss_disassoc(ar, arvif);
4272 ath12k_warn(ar->ab, "failed to stop vdev %i: %d\n",
4275 ath12k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr);
4277 ret = ath12k_peer_delete(ar, arvif->vdev_id, sta->addr);
4279 ath12k_warn(ar->ab, "Failed to delete peer: %pM for VDEV: %d\n",
4282 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "Removed peer: %pM for VDEV: %d\n",
4286 spin_lock_bh(&ar->ab->base_lock);
4287 peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
4289 ath12k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n",
4294 ar->num_peers--;
4296 spin_unlock_bh(&ar->ab->base_lock);
4305 ret = ath12k_station_assoc(ar, vif, sta, false);
4307 ath12k_warn(ar->ab, "Failed to associate station: %pM\n",
4310 spin_lock_bh(&ar->data_lock);
4312 arsta->bw = ath12k_mac_ieee80211_sta_bw_to_wmi(ar, sta);
4315 spin_unlock_bh(&ar->data_lock);
4318 spin_lock_bh(&ar->ab->base_lock);
4320 peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
4324 spin_unlock_bh(&ar->ab->base_lock);
4327 ret = ath12k_wmi_set_peer_param(ar, sta->addr,
4332 ath12k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n",
4337 spin_lock_bh(&ar->ab->base_lock);
4339 peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
4343 spin_unlock_bh(&ar->ab->base_lock);
4349 ret = ath12k_station_disassoc(ar, vif, sta);
4351 ath12k_warn(ar->ab, "Failed to disassociate station: %pM\n",
4355 mutex_unlock(&ar->conf_mutex);
4365 struct ath12k *ar;
4381 ar = ath12k_ah_to_ar(ah, 0);
4383 mutex_lock(&ar->conf_mutex);
4385 ret = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
4388 ath12k_warn(ar->ab, "failed to set tx power for station ret: %d\n",
4394 mutex_unlock(&ar->conf_mutex);
4403 struct ath12k *ar;
4409 ar = ath12k_get_ar_by_vif(hw, vif);
4410 if (!ar) {
4415 spin_lock_bh(&ar->ab->base_lock);
4417 peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
4419 spin_unlock_bh(&ar->ab->base_lock);
4420 ath12k_warn(ar->ab, "mac sta rc update failed to find peer %pM on vdev %i\n",
4425 spin_unlock_bh(&ar->ab->base_lock);
4427 ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
4432 spin_lock_bh(&ar->data_lock);
4435 bw = ath12k_mac_ieee80211_sta_bw_to_wmi(ar, sta);
4458 ath12k_warn(ar->ab, "Invalid smps %d in sta rc update for %pM\n",
4469 spin_unlock_bh(&ar->data_lock);
4477 struct ath12k *ar = arvif->ar;
4508 ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
4512 ath12k_warn(ar->ab, "could not set uapsd params %d\n", ret);
4521 ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
4525 ath12k_warn(ar->ab, "could not set rx wake param %d\n", ret);
4536 struct ath12k *ar = arvif->ar;
4537 struct ath12k_base *ab = ar->ab;
4540 lockdep_assert_held(&ar->conf_mutex);
4567 ret = ath12k_wmi_send_wmm_update_cmd(ar, arvif->vdev_id,
4571 ar->pdev_idx, ret);
4578 ar->pdev_idx, ret);
4589 struct ath12k *ar;
4594 ar = ath12k_get_ar_by_vif(hw, vif);
4595 if (!ar) {
4606 mutex_lock(&ar->conf_mutex);
4608 mutex_unlock(&ar->conf_mutex);
4614 ath12k_create_ht_cap(struct ath12k *ar, u32 ar_ht_cap, u32 rate_cap_rx_chainmask)
4618 u32 ar_vht_cap = ar->pdev->cap.vht_cap;
4669 for (i = 0; i < ar->num_rx_chains; i++) {
4682 struct ath12k *ar = arvif->ar;
4685 u32 vht_cap = ar->pdev->cap.vht_cap;
4698 if (sound_dim > (ar->num_tx_chains - 1))
4699 sound_dim = ar->num_tx_chains - 1;
4722 return ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
4726 static void ath12k_set_vht_txbf_cap(struct ath12k *ar, u32 *vht_cap)
4734 if (ar->num_tx_chains < 2) {
4756 if (sound_dim > (ar->num_tx_chains - 1))
4757 sound_dim = ar->num_tx_chains - 1;
4769 ath12k_create_vht_cap(struct ath12k *ar, u32 rate_cap_tx_chainmask,
4777 vht_cap.cap = ar->pdev->cap.vht_cap;
4779 ath12k_set_vht_txbf_cap(ar, &vht_cap.cap);
4789 if (i < ar->num_tx_chains && rate_cap_tx_chainmask & BIT(i))
4794 if (i < ar->num_rx_chains && rate_cap_rx_chainmask & BIT(i))
4809 static void ath12k_mac_setup_ht_vht_cap(struct ath12k *ar,
4818 rate_cap_tx_chainmask = ar->cfg_tx_chainmask >> cap->tx_chain_mask_shift;
4819 rate_cap_rx_chainmask = ar->cfg_rx_chainmask >> cap->rx_chain_mask_shift;
4822 band = &ar->mac.sbands[NL80211_BAND_2GHZ];
4826 band->ht_cap = ath12k_create_ht_cap(ar, ht_cap,
4831 (ar->ab->hw_params->single_pdev_only ||
4832 !ar->supports_6ghz)) {
4833 band = &ar->mac.sbands[NL80211_BAND_5GHZ];
4837 band->ht_cap = ath12k_create_ht_cap(ar, ht_cap,
4839 band->vht_cap = ath12k_create_vht_cap(ar, rate_cap_tx_chainmask,
4844 static int ath12k_check_chain_mask(struct ath12k *ar, u32 ant, bool is_tx_ant)
5142 static void ath12k_mac_copy_eht_cap(struct ath12k *ar,
5152 if (!(test_bit(WMI_TLV_SERVICE_11BE, ar->ab->wmi_ab.svc_map)))
5195 static int ath12k_mac_copy_sband_iftype_data(struct ath12k *ar,
5218 ath12k_mac_copy_he_cap(band_cap, i, ar->num_tx_chains, he_cap);
5223 ath12k_mac_copy_eht_cap(ar, band_cap, &he_cap->he_cap_elem, i,
5231 static void ath12k_mac_setup_sband_iftype_data(struct ath12k *ar,
5240 count = ath12k_mac_copy_sband_iftype_data(ar, cap,
5241 ar->mac.iftype[band],
5243 sband = &ar->mac.sbands[band];
5244 _ieee80211_set_sband_iftype_data(sband, ar->mac.iftype[band],
5250 count = ath12k_mac_copy_sband_iftype_data(ar, cap,
5251 ar->mac.iftype[band],
5253 sband = &ar->mac.sbands[band];
5254 _ieee80211_set_sband_iftype_data(sband, ar->mac.iftype[band],
5259 ar->supports_6ghz) {
5261 count = ath12k_mac_copy_sband_iftype_data(ar, cap,
5262 ar->mac.iftype[band],
5264 sband = &ar->mac.sbands[band];
5265 _ieee80211_set_sband_iftype_data(sband, ar->mac.iftype[band],
5270 static int __ath12k_set_antenna(struct ath12k *ar, u32 tx_ant, u32 rx_ant)
5274 lockdep_assert_held(&ar->conf_mutex);
5276 if (ath12k_check_chain_mask(ar, tx_ant, true))
5279 if (ath12k_check_chain_mask(ar, rx_ant, false))
5286 tx_ant = min_t(u32, tx_ant, ar->pdev->cap.tx_chain_mask);
5287 rx_ant = min_t(u32, rx_ant, ar->pdev->cap.rx_chain_mask);
5289 ar->cfg_tx_chainmask = tx_ant;
5290 ar->cfg_rx_chainmask = rx_ant;
5292 if (ar->state != ATH12K_STATE_ON &&
5293 ar->state != ATH12K_STATE_RESTARTED)
5296 ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_TX_CHAIN_MASK,
5297 tx_ant, ar->pdev->pdev_id);
5299 ath12k_warn(ar->ab, "failed to set tx-chainmask: %d, req 0x%x\n",
5304 ar->num_tx_chains = hweight32(tx_ant);
5306 ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_RX_CHAIN_MASK,
5307 rx_ant, ar->pdev->pdev_id);
5309 ath12k_warn(ar->ab, "failed to set rx-chainmask: %d, req 0x%x\n",
5314 ar->num_rx_chains = hweight32(rx_ant);
5317 ath12k_mac_setup_ht_vht_cap(ar, &ar->pdev->cap, NULL);
5318 ath12k_mac_setup_sband_iftype_data(ar, &ar->pdev->cap);
5323 static void ath12k_mgmt_over_wmi_tx_drop(struct ath12k *ar, struct sk_buff *skb)
5327 ieee80211_free_txskb(ath12k_ar_to_hw(ar), skb);
5329 num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
5335 wake_up(&ar->txmgmt_empty_waitq);
5342 struct ath12k *ar = ctx;
5343 struct ath12k_base *ab = ar->ab;
5345 spin_lock_bh(&ar->txmgmt_idr_lock);
5346 idr_remove(&ar->txmgmt_idr, buf_id);
5347 spin_unlock_bh(&ar->txmgmt_idr_lock);
5354 ath12k_mgmt_over_wmi_tx_drop(ar, skb);
5364 struct ath12k *ar = skb_cb->ar;
5365 struct ath12k_base *ab = ar->ab;
5368 spin_lock_bh(&ar->txmgmt_idr_lock);
5369 idr_remove(&ar->txmgmt_idr, buf_id);
5370 spin_unlock_bh(&ar->txmgmt_idr_lock);
5378 static int ath12k_mac_mgmt_tx_wmi(struct ath12k *ar, struct ath12k_vif *arvif,
5381 struct ath12k_base *ab = ar->ab;
5388 ATH12K_SKB_CB(skb)->ar = ar;
5389 spin_lock_bh(&ar->txmgmt_idr_lock);
5390 buf_id = idr_alloc(&ar->txmgmt_idr, skb, 0,
5392 spin_unlock_bh(&ar->txmgmt_idr_lock);
5415 ret = ath12k_wmi_mgmt_send(ar, arvif->vdev_id, buf_id, skb);
5417 ath12k_warn(ar->ab, "failed to send mgmt frame: %d\n", ret);
5427 spin_lock_bh(&ar->txmgmt_idr_lock);
5428 idr_remove(&ar->txmgmt_idr, buf_id);
5429 spin_unlock_bh(&ar->txmgmt_idr_lock);
5434 static void ath12k_mgmt_over_wmi_tx_purge(struct ath12k *ar)
5438 while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL)
5439 ath12k_mgmt_over_wmi_tx_drop(ar, skb);
5444 struct ath12k *ar = container_of(work, struct ath12k, wmi_mgmt_tx_work);
5450 while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL) {
5453 ath12k_warn(ar->ab, "no vif found for mgmt frame\n");
5454 ath12k_mgmt_over_wmi_tx_drop(ar, skb);
5460 if (ar->allocated_vdev_map & (1LL << arvif->vdev_id)) {
5461 ret = ath12k_mac_mgmt_tx_wmi(ar, arvif, skb);
5463 ath12k_warn(ar->ab, "failed to tx mgmt frame, vdev_id %d :%d\n",
5465 ath12k_mgmt_over_wmi_tx_drop(ar, skb);
5468 ath12k_warn(ar->ab,
5472 ath12k_mgmt_over_wmi_tx_drop(ar, skb);
5477 static int ath12k_mac_mgmt_tx(struct ath12k *ar, struct sk_buff *skb,
5480 struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
5482 if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
5491 atomic_read(&ar->num_pending_mgmt_tx) > ATH12K_PRB_RSP_DROP_THRESHOLD) {
5492 ath12k_warn(ar->ab,
5498 ath12k_warn(ar->ab, "mgmt tx queue is full\n");
5503 atomic_inc(&ar->num_pending_mgmt_tx);
5504 ieee80211_queue_work(ath12k_ar_to_hw(ar), &ar->wmi_mgmt_tx_work);
5509 static void ath12k_mac_add_p2p_noa_ie(struct ath12k *ar,
5519 spin_lock_bh(&ar->data_lock);
5527 spin_unlock_bh(&ar->data_lock);
5538 struct ath12k *ar = arvif->ar;
5558 ret = ath12k_mac_mgmt_tx(ar, skb, is_prb_rsp);
5560 ath12k_warn(ar->ab, "failed to queue management frame %d\n",
5569 ath12k_mac_add_p2p_noa_ie(ar, vif, skb, is_prb_rsp);
5571 ret = ath12k_dp_tx(ar, arvif, skb);
5573 ath12k_warn(ar->ab, "failed to transmit frame %d\n", ret);
5578 void ath12k_mac_drain_tx(struct ath12k *ar)
5583 cancel_work_sync(&ar->wmi_mgmt_tx_work);
5584 ath12k_mgmt_over_wmi_tx_purge(ar);
5587 static int ath12k_mac_config_mon_status_default(struct ath12k *ar, bool enable)
5615 static int ath12k_mac_start(struct ath12k *ar)
5617 struct ath12k_base *ab = ar->ab;
5618 struct ath12k_pdev *pdev = ar->pdev;
5621 mutex_lock(&ar->conf_mutex);
5623 switch (ar->state) {
5625 ar->state = ATH12K_STATE_ON;
5628 ar->state = ATH12K_STATE_RESTARTED;
5639 ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS,
5647 ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_DYNAMIC_BW, 1,
5654 ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
5662 ret = ath12k_wmi_send_dfs_phyerr_offload_enable_cmd(ar, pdev->pdev_id);
5669 ret = ath12k_dp_tx_htt_h2t_ppdu_stats_req(ar,
5676 ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_MESH_MCAST_ENABLE,
5684 __ath12k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask);
5688 ath12k_reg_update_chan_list(ar);
5690 ar->num_started_vdevs = 0;
5691 ar->num_created_vdevs = 0;
5692 ar->num_peers = 0;
5693 ar->allocated_vdev_map = 0;
5698 ret = ath12k_mac_config_mon_status_default(ar, true);
5710 ath12k_wmi_pdev_lro_cfg(ar, ar->pdev->pdev_id);
5714 ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_IDLE_PS_CONFIG,
5722 mutex_unlock(&ar->conf_mutex);
5724 rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx],
5725 &ab->pdevs[ar->pdev_idx]);
5729 ar->state = ATH12K_STATE_OFF;
5730 mutex_unlock(&ar->conf_mutex);
5737 struct ath12k *ar;
5740 for_each_ar(ah, ar, i)
5741 ath12k_mac_drain_tx(ar);
5747 struct ath12k *ar;
5752 for_each_ar(ah, ar, i) {
5753 ret = ath12k_mac_start(ar);
5755 ath12k_err(ar->ab, "fail to start mac operations in pdev idx %d ret %d\n",
5756 ar->pdev_idx, ret);
5764 ar = ath12k_ah_to_ar(ah, i - 1);
5765 ath12k_mac_stop(ar);
5770 int ath12k_mac_rfkill_config(struct ath12k *ar)
5772 struct ath12k_base *ab = ar->ab;
5791 ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_HW_RFKILL_CONFIG,
5792 param, ar->pdev->pdev_id);
5803 int ath12k_mac_rfkill_enable_radio(struct ath12k *ar, bool enable)
5813 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac %d rfkill enable %d",
5814 ar->pdev_idx, param);
5816 ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_RFKILL_ENABLE,
5817 param, ar->pdev->pdev_id);
5819 ath12k_warn(ar->ab, "failed to set rfkill enable param %d: %d\n",
5827 static void ath12k_mac_stop(struct ath12k *ar)
5832 mutex_lock(&ar->conf_mutex);
5833 ret = ath12k_mac_config_mon_status_default(ar, false);
5835 ath12k_err(ar->ab, "failed to clear rx_filter for monitor status ring: (%d)\n",
5838 clear_bit(ATH12K_CAC_RUNNING, &ar->dev_flags);
5839 ar->state = ATH12K_STATE_OFF;
5840 mutex_unlock(&ar->conf_mutex);
5842 cancel_delayed_work_sync(&ar->scan.timeout);
5843 cancel_work_sync(&ar->regd_update_work);
5844 cancel_work_sync(&ar->ab->rfkill_work);
5846 spin_lock_bh(&ar->data_lock);
5847 list_for_each_entry_safe(ppdu_stats, tmp, &ar->ppdu_stats_info, list) {
5851 spin_unlock_bh(&ar->data_lock);
5853 rcu_assign_pointer(ar->ab->pdevs_active[ar->pdev_idx], NULL);
5857 atomic_set(&ar->num_pending_mgmt_tx, 0);
5863 struct ath12k *ar;
5868 for_each_ar(ah, ar, i)
5869 ath12k_mac_stop(ar);
5875 struct ath12k_base *ab = arvif->ar->ab;
5898 struct ath12k *ar = arvif->ar;
5899 struct ath12k_pdev *pdev = ar->pdev;
5907 arg->chains[NL80211_BAND_2GHZ].tx = ar->num_tx_chains;
5908 arg->chains[NL80211_BAND_2GHZ].rx = ar->num_rx_chains;
5911 arg->chains[NL80211_BAND_5GHZ].tx = ar->num_tx_chains;
5912 arg->chains[NL80211_BAND_5GHZ].rx = ar->num_rx_chains;
5915 ar->supports_6ghz) {
5916 arg->chains[NL80211_BAND_6GHZ].tx = ar->num_tx_chains;
5917 arg->chains[NL80211_BAND_6GHZ].rx = ar->num_rx_chains;
5957 static int ath12k_set_he_mu_sounding_mode(struct ath12k *ar,
5961 struct ath12k_base *ab = ar->ab;
5965 param_value = ath12k_mac_prepare_he_mode(ar->pdev, arvif->vif->type);
5966 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
5978 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
5991 struct ath12k *ar = arvif->ar;
5992 struct ath12k_base *ab = ar->ab;
6009 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
6025 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
6042 static int ath12k_mac_vdev_create(struct ath12k *ar, struct ieee80211_vif *vif)
6044 struct ath12k_hw *ah = ar->ah;
6045 struct ath12k_base *ab = ar->ab;
6055 lockdep_assert_held(&ar->conf_mutex);
6057 arvif->ar = ar;
6083 ar->monitor_vdev_id = vdev_id;
6094 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev create id %d type %d subtype %d map %llx\n",
6104 ret = ath12k_wmi_vdev_create(ar, vif->addr, &vdev_arg);
6111 ar->num_created_vdevs++;
6115 ar->allocated_vdev_map |= 1LL << arvif->vdev_id;
6118 spin_lock_bh(&ar->data_lock);
6119 list_add(&arvif->list, &ar->arvifs);
6120 spin_unlock_bh(&ar->data_lock);
6124 nss = hweight32(ar->cfg_tx_chainmask) ? : 1;
6125 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
6129 arvif->vdev_id, ar->cfg_tx_chainmask, nss, ret);
6138 ret = ath12k_peer_create(ar, arvif, NULL, &peer_param);
6147 ath12k_warn(ar->ab, "failed to set vdev %i kickout parameters: %d\n",
6155 ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
6158 ath12k_warn(ar->ab, "failed to set vdev %d RX wake policy: %d\n",
6165 ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
6168 ath12k_warn(ar->ab, "failed to set vdev %d TX wake threshold: %d\n",
6175 ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
6178 ath12k_warn(ar->ab, "failed to set vdev %d pspoll count: %d\n",
6183 ret = ath12k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, false);
6185 ath12k_warn(ar->ab, "failed to disable vdev %d ps mode: %d\n",
6195 ret = ath12k_mac_txpower_recalc(ar);
6201 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
6204 ath12k_warn(ar->ab, "failed to set rts threshold for vdev %d: %d\n",
6208 ath12k_dp_vdev_tx_attach(ar, arvif);
6210 if (vif->type != NL80211_IFTYPE_MONITOR && ar->monitor_conf_enabled)
6211 ath12k_mac_monitor_vdev_create(ar);
6213 arvif->ar = ar;
6218 reinit_completion(&ar->peer_delete_done);
6220 ret = ath12k_wmi_send_peer_delete_cmd(ar, vif->addr,
6223 ath12k_warn(ar->ab, "failed to delete peer vdev_id %d addr %pM\n",
6228 ret = ath12k_wait_for_peer_delete_done(ar, arvif->vdev_id,
6233 ar->num_peers--;
6237 ath12k_wmi_vdev_delete(ar, arvif->vdev_id);
6238 ar->num_created_vdevs--;
6240 arvif->ar = NULL;
6241 ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id);
6244 spin_lock_bh(&ar->data_lock);
6246 spin_unlock_bh(&ar->data_lock);
6249 arvif->ar = NULL;
6253 static void ath12k_mac_vif_cache_flush(struct ath12k *ar, struct ieee80211_vif *vif)
6257 struct ath12k_base *ab = ar->ab;
6261 lockdep_assert_held(&ar->conf_mutex);
6276 ath12k_mac_bss_info_changed(ar, arvif, &vif->bss_conf,
6281 ret = ath12k_mac_set_key(ar, cache->key_conf.cmd, vif, NULL,
6296 struct ath12k *ar, *prev_ar;
6301 ar = ah->radio;
6303 ar = ath12k_get_ar_by_ctx(hw, ctx);
6307 if (!ar)
6310 if (arvif->ar) {
6313 arvif->ar = NULL;
6318 return arvif->ar;
6324 if (ar != arvif->ar) {
6328 /* backup the previously used ar ptr since arvif->ar would
6331 prev_ar = arvif->ar;
6342 ab = ar->ab;
6344 mutex_lock(&ar->conf_mutex);
6350 ar->num_peers > (ar->max_num_peers - 1)) {
6355 if (ar->num_created_vdevs > (TARGET_NUM_VDEVS - 1)) {
6361 ret = ath12k_mac_vdev_create(ar, vif);
6372 ath12k_mac_vif_cache_flush(ar, vif);
6374 mutex_unlock(&ar->conf_mutex);
6375 return arvif->ar;
6441 static int ath12k_mac_vdev_delete(struct ath12k *ar, struct ieee80211_vif *vif)
6444 struct ath12k_base *ab = ar->ab;
6448 lockdep_assert_held(&ar->conf_mutex);
6449 reinit_completion(&ar->vdev_delete_done);
6451 ret = ath12k_wmi_vdev_delete(ar, arvif->vdev_id);
6458 time_left = wait_for_completion_timeout(&ar->vdev_delete_done,
6466 ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id);
6467 ar->num_created_vdevs--;
6470 ar->monitor_vdev_id = -1;
6471 ar->monitor_vdev_created = false;
6472 } else if (ar->monitor_vdev_created && !ar->monitor_started) {
6473 ret = ath12k_mac_monitor_vdev_delete(ar);
6480 spin_lock_bh(&ar->data_lock);
6482 spin_unlock_bh(&ar->data_lock);
6484 ath12k_peer_cleanup(ar, arvif->vdev_id);
6487 idr_for_each(&ar->txmgmt_idr,
6494 ath12k_mac_txpower_recalc(ar);
6495 clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
6499 arvif->ar = NULL;
6509 struct ath12k *ar;
6520 ar = arvif->ar;
6521 ab = ar->ab;
6525 mutex_lock(&ar->conf_mutex);
6531 ret = ath12k_peer_delete(ar, arvif->vdev_id, vif->addr);
6537 ath12k_mac_vdev_delete(ar, vif);
6539 mutex_unlock(&ar->conf_mutex);
6552 static void ath12k_mac_configure_filter(struct ath12k *ar,
6558 lockdep_assert_held(&ar->conf_mutex);
6560 ar->filter_flags = total_flags;
6563 reset_flag = !(ar->filter_flags & FIF_BCN_PRBRESP_PROMISC);
6565 ret = ath12k_dp_tx_htt_monitor_mode_ring_config(ar, reset_flag);
6568 set_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
6570 clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
6572 ath12k_warn(ar->ab,
6576 ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
6587 struct ath12k *ar;
6589 ar = ath12k_ah_to_ar(ah, 0);
6591 mutex_lock(&ar->conf_mutex);
6594 ath12k_mac_configure_filter(ar, *total_flags);
6596 mutex_unlock(&ar->conf_mutex);
6603 struct ath12k *ar;
6606 for_each_ar(ah, ar, i) {
6607 mutex_lock(&ar->conf_mutex);
6608 antennas_rx = max_t(u32, antennas_rx, ar->cfg_rx_chainmask);
6609 antennas_tx = max_t(u32, antennas_tx, ar->cfg_tx_chainmask);
6610 mutex_unlock(&ar->conf_mutex);
6622 struct ath12k *ar;
6626 for_each_ar(ah, ar, i) {
6627 mutex_lock(&ar->conf_mutex);
6628 ret = __ath12k_set_antenna(ar, tx_ant, rx_ant);
6629 mutex_unlock(&ar->conf_mutex);
6640 struct ath12k *ar = arvif->ar;
6643 lockdep_assert_held(&ar->conf_mutex);
6647 ret = ath12k_dp_rx_ampdu_start(ar, params);
6650 ret = ath12k_dp_rx_ampdu_stop(ar, params);
6672 struct ath12k *ar;
6676 ar = ath12k_get_ar_by_vif(hw, vif);
6677 if (!ar)
6680 ar = ath12k_ah_to_ar(ah, 0);
6682 mutex_lock(&ar->conf_mutex);
6684 mutex_unlock(&ar->conf_mutex);
6687 ath12k_warn(ar->ab, "pdev idx %d unable to perform ampdu action %d ret %d\n",
6688 ar->pdev_idx, params->action, ret);
6696 struct ath12k *ar;
6699 ar = ath12k_get_ar_by_ctx(hw, ctx);
6700 if (!ar)
6703 ab = ar->ab;
6709 mutex_lock(&ar->conf_mutex);
6711 spin_lock_bh(&ar->data_lock);
6715 ar->rx_channel = ctx->def.chan;
6716 spin_unlock_bh(&ar->data_lock);
6718 mutex_unlock(&ar->conf_mutex);
6726 struct ath12k *ar;
6729 ar = ath12k_get_ar_by_ctx(hw, ctx);
6730 if (!ar)
6733 ab = ar->ab;
6739 mutex_lock(&ar->conf_mutex);
6741 spin_lock_bh(&ar->data_lock);
6745 ar->rx_channel = NULL;
6746 spin_unlock_bh(&ar->data_lock);
6748 mutex_unlock(&ar->conf_mutex);
6752 ath12k_mac_check_down_grade_phy_mode(struct ath12k *ar,
6759 int n = ar->mac.sbands[band].n_iftype_data;
6766 data = ar->mac.iftype[band];
6806 ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
6819 struct ath12k *ar = arvif->ar;
6820 struct ath12k_base *ab = ar->ab;
6826 lockdep_assert_held(&ar->conf_mutex);
6828 reinit_completion(&ar->vdev_setup_done);
6840 arg.mode = ath12k_mac_check_down_grade_phy_mode(ar, arg.mode,
6848 arg.pref_tx_streams = ar->num_tx_chains;
6849 arg.pref_rx_streams = ar->num_rx_chains;
6869 arg.regdomain = ar->ab->dfs_region;
6874 ret = ath12k_set_he_mu_sounding_mode(ar, arvif);
6876 ath12k_warn(ar->ab, "failed to set he mode vdev %i\n",
6890 ret = ath12k_wmi_vdev_start(ar, &arg, restart);
6892 ath12k_warn(ar->ab, "failed to %s WMI vdev %i\n",
6897 ret = ath12k_mac_vdev_setup_sync(ar);
6904 ar->num_started_vdevs++;
6918 set_bit(ATH12K_CAC_RUNNING, &ar->dev_flags);
6949 struct ath12k *ar;
6959 if (arvif->ar != arg->ar)
6976 if (arvif->ar != arg->ar)
7017 static int ath12k_mac_update_peer_puncturing_width(struct ath12k *ar,
7032 ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
7036 ret = ath12k_wmi_set_peer_param(ar, arvif->bssid,
7044 ath12k_mac_update_vif_chan(struct ath12k *ar,
7048 struct ath12k_base *ab = ar->ab;
7054 lockdep_assert_held(&ar->conf_mutex);
7076 ret = ath12k_wmi_vdev_down(ar, arvif->vdev_id);
7088 /* TODO: Update ar->rx_channel */
7128 ret = ath12k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
7136 ret = ath12k_mac_update_peer_puncturing_width(arvif->ar, arvif,
7139 ath12k_warn(ar->ab,
7148 if (!monitor_vif && ar->monitor_vdev_created) {
7149 if (!ath12k_mac_monitor_stop(ar))
7150 ath12k_mac_monitor_start(ar);
7155 ath12k_mac_update_active_vif_chan(struct ath12k *ar,
7158 struct ath12k_mac_change_chanctx_arg arg = { .ctx = ctx, .ar = ar };
7159 struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
7161 lockdep_assert_held(&ar->conf_mutex);
7179 ath12k_mac_update_vif_chan(ar, arg.vifs, arg.n_vifs);
7188 struct ath12k *ar;
7191 ar = ath12k_get_ar_by_ctx(hw, ctx);
7192 if (!ar)
7195 ab = ar->ab;
7197 mutex_lock(&ar->conf_mutex);
7212 ath12k_mac_update_active_vif_chan(ar, ctx);
7217 mutex_unlock(&ar->conf_mutex);
7220 static int ath12k_start_vdev_delay(struct ath12k *ar,
7223 struct ath12k_base *ab = ar->ab;
7239 ret = ath12k_monitor_vdev_up(ar, arvif->vdev_id);
7258 struct ath12k *ar;
7265 * create now since we have a channel ctx now to assign to a specific ar/fw
7267 ar = ath12k_mac_assign_vif_to_vdev(hw, vif, ctx);
7268 if (!ar) {
7273 ab = ar->ab;
7275 mutex_lock(&ar->conf_mutex);
7303 param.peer_addr = ar->mac_addr;
7305 ret = ath12k_peer_create(ar, arvif, NULL, &param);
7314 ret = ath12k_mac_monitor_start(ar);
7329 if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR && ar->monitor_vdev_created)
7330 ath12k_mac_monitor_start(ar);
7337 mutex_unlock(&ar->conf_mutex);
7348 struct ath12k *ar;
7353 /* The vif is expected to be attached to an ar's VDEV.
7358 * that moves the vif to a new ar
7363 ar = arvif->ar;
7364 ab = ar->ab;
7366 mutex_lock(&ar->conf_mutex);
7376 ath12k_peer_find_by_addr(ab, ar->mac_addr))
7377 ath12k_peer_delete(ar, arvif->vdev_id, ar->mac_addr);
7380 ret = ath12k_mac_monitor_stop(ar);
7382 mutex_unlock(&ar->conf_mutex);
7390 ath12k_bss_disassoc(ar, arvif);
7400 ath12k_wmi_vdev_down(ar, arvif->vdev_id);
7403 ar->num_started_vdevs == 1 && ar->monitor_vdev_created)
7404 ath12k_mac_monitor_stop(ar);
7406 mutex_unlock(&ar->conf_mutex);
7415 struct ath12k *ar;
7417 ar = ath12k_get_ar_by_ctx(hw, vifs->old_ctx);
7418 if (!ar)
7421 mutex_lock(&ar->conf_mutex);
7424 if (ar != ath12k_get_ar_by_ctx(hw, vifs->new_ctx)) {
7425 mutex_unlock(&ar->conf_mutex);
7429 ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
7432 ath12k_mac_update_vif_chan(ar, vifs, n_vifs);
7434 mutex_unlock(&ar->conf_mutex);
7440 ath12k_set_vdev_param_to_all_vifs(struct ath12k *ar, int param, u32 value)
7445 mutex_lock(&ar->conf_mutex);
7446 list_for_each_entry(arvif, &ar->arvifs, list) {
7447 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "setting mac vdev %d param %d value %d\n",
7450 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
7453 ath12k_warn(ar->ab, "failed to set param %d for vdev %d: %d\n",
7458 mutex_unlock(&ar->conf_mutex);
7468 struct ath12k *ar;
7476 for_each_ar(ah, ar, i) {
7477 ret = ath12k_set_vdev_param_to_all_vifs(ar, param_id, value);
7479 ath12k_warn(ar->ab, "failed to set RTS config for all vdevs of pdev %d",
7480 ar->pdev->pdev_id);
7503 static int ath12k_mac_flush(struct ath12k *ar)
7508 time_left = wait_event_timeout(ar->dp.tx_empty_waitq,
7509 (atomic_read(&ar->dp.num_tx_pending) == 0),
7512 ath12k_warn(ar->ab,
7514 atomic_read(&ar->dp.num_tx_pending));
7518 time_left = wait_event_timeout(ar->txmgmt_empty_waitq,
7519 (atomic_read(&ar->num_pending_mgmt_tx) == 0),
7522 ath12k_warn(ar->ab,
7524 atomic_read(&ar->num_pending_mgmt_tx));
7531 int ath12k_mac_wait_tx_complete(struct ath12k *ar)
7533 ath12k_mac_drain_tx(ar);
7534 return ath12k_mac_flush(ar);
7541 struct ath12k *ar;
7549 for_each_ar(ah, ar, i)
7550 ath12k_mac_flush(ar);
7554 ar = ath12k_get_ar_by_vif(hw, vif);
7556 if (!ar)
7559 ath12k_mac_flush(ar);
7563 ath12k_mac_bitrate_mask_num_ht_rates(struct ath12k *ar,
7577 ath12k_mac_has_single_legacy_rate(struct ath12k *ar,
7585 if (ath12k_mac_bitrate_mask_num_ht_rates(ar, band, mask))
7588 if (ath12k_mac_bitrate_mask_num_vht_rates(ar, band, mask))
7595 ath12k_mac_bitrate_mask_get_single_nss(struct ath12k *ar,
7600 struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
7645 ath12k_mac_get_single_legacy_rate(struct ath12k *ar,
7680 struct ath12k *ar = arvif->ar;
7684 lockdep_assert_held(&ar->conf_mutex);
7686 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02x nss %u sgi %u\n",
7690 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
7693 ath12k_warn(ar->ab, "failed to set fixed rate param 0x%02x: %d\n",
7699 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
7702 ath12k_warn(ar->ab, "failed to set nss param %d: %d\n",
7708 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
7711 ath12k_warn(ar->ab, "failed to set sgi param %d: %d\n",
7717 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
7720 ath12k_warn(ar->ab, "failed to set ldpc param %d: %d\n",
7729 ath12k_mac_vht_mcs_range_present(struct ath12k *ar,
7758 struct ath12k *ar = arvif->ar;
7763 spin_lock_bh(&ar->data_lock);
7765 spin_unlock_bh(&ar->data_lock);
7767 ieee80211_queue_work(ath12k_ar_to_hw(ar), &arsta->update_wk);
7775 struct ath12k *ar = arvif->ar;
7781 ret = ath12k_wmi_set_peer_param(ar, sta->addr,
7786 ath12k_warn(ar->ab,
7798 struct ath12k *ar = arvif->ar;
7816 ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC);
7832 if (ath12k_mac_has_single_legacy_rate(ar, band, mask)) {
7833 ret = ath12k_mac_get_single_legacy_rate(ar, band, mask, &rate,
7836 ath12k_warn(ar->ab, "failed to get single legacy rate for vdev %i: %d\n",
7843 } else if (ath12k_mac_bitrate_mask_get_single_nss(ar, band, mask,
7849 nss = min_t(u32, ar->num_tx_chains,
7873 num_rates = ath12k_mac_bitrate_mask_num_vht_rates(ar, band,
7876 if (!ath12k_mac_vht_mcs_range_present(ar, band, mask) &&
7881 ath12k_warn(ar->ab,
7891 mutex_lock(&ar->conf_mutex);
7898 mutex_unlock(&ar->conf_mutex);
7901 mutex_lock(&ar->conf_mutex);
7905 ath12k_warn(ar->ab, "failed to set fixed rate params on vdev %i: %d\n",
7909 mutex_unlock(&ar->conf_mutex);
7920 struct ath12k *ar;
7928 ar = ath12k_ah_to_ar(ah, 0);
7929 ab = ar->ab;
7931 mutex_lock(&ar->conf_mutex);
7933 if (ar->state == ATH12K_STATE_RESTARTED) {
7934 ath12k_warn(ar->ab, "pdev %d successfully recovered\n",
7935 ar->pdev->pdev_id);
7936 ar->state = ATH12K_STATE_ON;
7955 list_for_each_entry(arvif, &ar->arvifs, list) {
7976 mutex_unlock(&ar->conf_mutex);
7980 ath12k_mac_update_bss_chan_survey(struct ath12k *ar,
7986 lockdep_assert_held(&ar->conf_mutex);
7988 if (!test_bit(WMI_TLV_SERVICE_BSS_CHANNEL_INFO_64, ar->ab->wmi_ab.svc_map) ||
7989 ar->rx_channel != channel)
7992 if (ar->scan.state != ATH12K_SCAN_IDLE) {
7993 ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
7998 reinit_completion(&ar->bss_survey_done);
8000 ret = ath12k_wmi_pdev_bss_chan_info_request(ar, type);
8002 ath12k_warn(ar->ab, "failed to send pdev bss chan info request\n");
8006 ret = wait_for_completion_timeout(&ar->bss_survey_done, 3 * HZ);
8008 ath12k_warn(ar->ab, "bss channel survey timed out\n");
8014 struct ath12k *ar;
8033 ar = ath12k_mac_get_ar_by_chan(hw, &sband->channels[idx]);
8034 if (!ar) {
8042 ar_survey = &ar->survey[idx];
8044 mutex_lock(&ar->conf_mutex);
8046 ath12k_mac_update_bss_chan_survey(ar, &sband->channels[idx]);
8048 spin_lock_bh(&ar->data_lock);
8050 spin_unlock_bh(&ar->data_lock);
8054 if (ar->rx_channel == survey->channel)
8057 mutex_unlock(&ar->conf_mutex);
8099 struct ath12k *ar;
8101 ar = ath12k_ah_to_ar(ah, 0);
8103 mutex_lock(&ar->conf_mutex);
8105 spin_lock_bh(&ar->data_lock);
8106 ar->scan.roc_notify = false;
8107 spin_unlock_bh(&ar->data_lock);
8109 ath12k_scan_abort(ar);
8111 mutex_unlock(&ar->conf_mutex);
8113 cancel_delayed_work_sync(&ar->scan.timeout);
8127 struct ath12k *ar;
8131 ar = ath12k_ah_to_ar(ah, 0);
8133 mutex_lock(&ar->conf_mutex);
8134 spin_lock_bh(&ar->data_lock);
8136 switch (ar->scan.state) {
8138 reinit_completion(&ar->scan.started);
8139 reinit_completion(&ar->scan.completed);
8140 reinit_completion(&ar->scan.on_channel);
8141 ar->scan.state = ATH12K_SCAN_STARTING;
8142 ar->scan.is_roc = true;
8143 ar->scan.vdev_id = arvif->vdev_id;
8144 ar->scan.roc_freq = chan->center_freq;
8145 ar->scan.roc_notify = true;
8155 spin_unlock_bh(&ar->data_lock);
8163 ath12k_wmi_start_scan_init(ar, &arg);
8181 ret = ath12k_start_scan(ar, &arg);
8183 ath12k_warn(ar->ab, "failed to start roc scan: %d\n", ret);
8185 spin_lock_bh(&ar->data_lock);
8186 ar->scan.state = ATH12K_SCAN_IDLE;
8187 spin_unlock_bh(&ar->data_lock);
8191 ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ);
8193 ath12k_warn(ar->ab, "failed to switch to channel for roc scan\n");
8194 ret = ath12k_scan_stop(ar);
8196 ath12k_warn(ar->ab, "failed to stop scan: %d\n", ret);
8201 ieee80211_queue_delayed_work(hw, &ar->scan.timeout,
8209 mutex_unlock(&ar->conf_mutex);
8252 static void ath12k_mac_update_ch_list(struct ath12k *ar,
8267 ar->freq_low = freq_low;
8268 ar->freq_high = freq_high;
8271 static u32 ath12k_get_phy_id(struct ath12k *ar, u32 band)
8273 struct ath12k_pdev *pdev = ar->pdev;
8282 ath12k_warn(ar->ab, "unsupported phy cap:%d\n", band);
8287 static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
8293 struct ath12k_hw *ah = ar->ah;
8302 reg_cap = &ar->ab->hal_reg_cap[ar->pdev_idx];
8311 band = &ar->mac.sbands[NL80211_BAND_2GHZ];
8319 if (ar->ab->hw_params->single_pdev_only) {
8320 phy_id = ath12k_get_phy_id(ar, WMI_HOST_WLAN_2G_CAP);
8321 reg_cap = &ar->ab->hal_reg_cap[phy_id];
8323 ath12k_mac_update_ch_list(ar, band,
8333 kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
8337 ar->supports_6ghz = true;
8338 band = &ar->mac.sbands[NL80211_BAND_6GHZ];
8345 ath12k_mac_update_ch_list(ar, band,
8356 kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
8357 kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
8361 band = &ar->mac.sbands[NL80211_BAND_5GHZ];
8369 if (ar->ab->hw_params->single_pdev_only) {
8370 phy_id = ath12k_get_phy_id(ar, WMI_HOST_WLAN_5G_CAP);
8371 reg_cap = &ar->ab->hal_reg_cap[phy_id];
8374 ath12k_mac_update_ch_list(ar, band,
8385 struct ath12k *ar;
8389 for_each_ar(ah, ar, i)
8390 interface_modes &= ar->ab->hw_params->interface_modes;
8398 struct ath12k *ar;
8404 for_each_ar(ah, ar, i) {
8405 interface_modes = ar->ab->hw_params->interface_modes;
8526 static void ath12k_mac_cleanup_unregister(struct ath12k *ar)
8528 idr_for_each(&ar->txmgmt_idr, ath12k_mac_tx_mgmt_pending_free, ar);
8529 idr_destroy(&ar->txmgmt_idr);
8531 kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
8532 kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
8533 kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
8540 struct ath12k *ar;
8543 for_each_ar(ah, ar, i)
8544 cancel_work_sync(&ar->regd_update_work);
8548 for_each_ar(ah, ar, i)
8549 ath12k_mac_cleanup_unregister(ar);
8557 static int ath12k_mac_setup_register(struct ath12k *ar,
8561 struct ath12k_pdev_cap *cap = &ar->pdev->cap;
8564 init_waitqueue_head(&ar->txmgmt_empty_waitq);
8565 idr_init(&ar->txmgmt_idr);
8566 spin_lock_init(&ar->txmgmt_idr_lock);
8568 ath12k_pdev_caps_update(ar);
8570 ret = ath12k_mac_setup_channels_rates(ar,
8576 ath12k_mac_setup_ht_vht_cap(ar, cap, ht_cap);
8577 ath12k_mac_setup_sband_iftype_data(ar, cap);
8579 ar->max_num_stations = ath12k_core_get_max_station_per_radio(ar->ab);
8580 ar->max_num_peers = ath12k_core_get_max_peers_per_radio(ar->ab);
8589 struct ath12k *ar = ath12k_ah_to_ar(ah, 0);
8590 struct ath12k_base *ab = ar->ab;
8611 for_each_ar(ah, ar, i) {
8614 pdev = ar->pdev;
8615 if (ar->ab->pdevs_macaddr_valid) {
8616 ether_addr_copy(ar->mac_addr, pdev->mac_addr);
8618 ether_addr_copy(ar->mac_addr, ar->ab->mac_addr);
8619 ar->mac_addr[4] += ar->pdev_idx;
8622 ret = ath12k_mac_setup_register(ar, &ht_cap_info, hw->wiphy->bands);
8627 wiphy->max_ap_assoc_sta += ar->max_num_stations;
8638 if (ar->supports_6ghz)
8641 if (test_bit(ATH12K_FLAG_RAW_MODE, &ar->ab->dev_flags))
8644 if (!ar->ab->hw_params->supports_monitor)
8648 mac_addr = ar->mac_addr;
8773 for_each_ar(ah, ar, i) {
8775 ret = ath12k_regd_update(ar, true);
8777 ath12k_err(ar->ab, "ath12k regd update failed: %d\n", ret);
8782 ath12k_debugfs_register(ar);
8798 ar = ath12k_ah_to_ar(ah, j);
8799 ath12k_mac_cleanup_unregister(ar);
8807 static void ath12k_mac_setup(struct ath12k *ar)
8809 struct ath12k_base *ab = ar->ab;
8810 struct ath12k_pdev *pdev = ar->pdev;
8811 u8 pdev_idx = ar->pdev_idx;
8813 ar->lmac_id = ath12k_hw_get_mac_from_pdev_id(ab->hw_params, pdev_idx);
8815 ar->wmi = &ab->wmi_ab.wmi[pdev_idx];
8821 ar->cfg_tx_chainmask = pdev->cap.tx_chain_mask;
8822 ar->cfg_rx_chainmask = pdev->cap.rx_chain_mask;
8823 ar->num_tx_chains = hweight32(pdev->cap.tx_chain_mask);
8824 ar->num_rx_chains = hweight32(pdev->cap.rx_chain_mask);
8826 spin_lock_init(&ar->data_lock);
8827 INIT_LIST_HEAD(&ar->arvifs);
8828 INIT_LIST_HEAD(&ar->ppdu_stats_info);
8829 mutex_init(&ar->conf_mutex);
8830 init_completion(&ar->vdev_setup_done);
8831 init_completion(&ar->vdev_delete_done);
8832 init_completion(&ar->peer_assoc_done);
8833 init_completion(&ar->peer_delete_done);
8834 init_completion(&ar->install_key_done);
8835 init_completion(&ar->bss_survey_done);
8836 init_completion(&ar->scan.started);
8837 init_completion(&ar->scan.completed);
8838 init_completion(&ar->scan.on_channel);
8840 INIT_DELAYED_WORK(&ar->scan.timeout, ath12k_scan_timeout_work);
8841 INIT_WORK(&ar->regd_update_work, ath12k_regd_update_work);
8843 INIT_WORK(&ar->wmi_mgmt_tx_work, ath12k_mgmt_over_wmi_tx_work);
8844 skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
8845 clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
8907 struct ath12k *ar;
8927 ar = ath12k_ah_to_ar(ah, i);
8928 ar->ah = ah;
8929 ar->ab = ab;
8930 ar->hw_link_id = i;
8931 ar->pdev = pdev;
8932 ar->pdev_idx = pdev_idx;
8933 pdev->ar = ar;
8935 ath12k_mac_setup(ar);
8948 if (!pdev->ar)
8951 pdev->ar = NULL;