Lines Matching refs:dev

11 void mt76x02_mac_reset_counters(struct mt76x02_dev *dev)
15 mt76_rr(dev, MT_RX_STAT_0);
16 mt76_rr(dev, MT_RX_STAT_1);
17 mt76_rr(dev, MT_RX_STAT_2);
18 mt76_rr(dev, MT_TX_STA_0);
19 mt76_rr(dev, MT_TX_STA_1);
20 mt76_rr(dev, MT_TX_STA_2);
23 mt76_rr(dev, MT_TX_AGG_CNT(i));
26 mt76_rr(dev, MT_TX_STAT_FIFO);
28 memset(dev->mphy.aggr_stats, 0, sizeof(dev->mphy.aggr_stats));
58 int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
69 val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
72 mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
74 mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data,
81 void mt76x02_mac_wcid_sync_pn(struct mt76x02_dev *dev, u8 idx,
90 iv = mt76_rr(dev, MT_WCID_IV(idx));
91 eiv = mt76_rr(dev, MT_WCID_IV(idx) + 4);
106 int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
118 mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
119 mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher);
123 mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE,
143 mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
148 void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx,
157 mt76_wr(dev, MT_WCID_ATTR(idx), attr);
165 mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr));
169 void mt76x02_mac_wcid_set_drop(struct mt76x02_dev *dev, u8 idx, bool drop)
171 u32 val = mt76_rr(dev, MT_WCID_DROP(idx));
176 mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop));
180 mt76x02_mac_tx_rate_val(struct mt76x02_dev *dev,
204 int band = dev->mphy.chandef.chan->band;
207 r = &dev->mt76.hw->wiphy->bands[band]->bitrates[rate->idx];
228 void mt76x02_mac_wcid_set_rate(struct mt76x02_dev *dev, struct mt76_wcid *wcid,
231 s8 max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, rate);
236 rateval = mt76x02_mac_tx_rate_val(dev, rate, &nss);
244 void mt76x02_mac_set_short_preamble(struct mt76x02_dev *dev, bool enable)
247 mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
249 mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
252 bool mt76x02_mac_load_tx_status(struct mt76x02_dev *dev,
257 stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT);
258 stat1 = mt76_rr(dev, MT_TX_STAT_FIFO);
273 trace_mac_txstat_fetch(dev, stat);
335 void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
348 u8 ccmp_pn[8], nstreams = dev->mphy.chainmask & 0xf;
388 rateval = mt76x02_mac_tx_rate_val(dev, rate, &nss);
389 max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, rate);
393 txpwr_adj = mt76x02_tx_get_txpwr_adj(dev, dev->txpower_conf,
397 if (nstreams > 1 && mt76_rev(&dev->mt76) >= MT76XX_REV_E4)
399 else if (nstreams > 1 && mt76_rev(&dev->mt76) >= MT76XX_REV_E3 &&
403 if (is_mt76x2(dev) && (info->flags & IEEE80211_TX_CTL_LDPC))
476 mt76x02_mac_fill_tx_status(struct mt76x02_dev *dev, struct mt76x02_sta *msta,
497 dev->mphy.chandef.chan->band);
503 dev->mphy.chandef.chan->band);
507 dev->mphy.chandef.chan->band);
540 void mt76x02_send_tx_status(struct mt76x02_dev *dev,
555 struct mt76_dev *mdev = &dev->mt76;
568 wcid = rcu_dereference(dev->mt76.wcid[stat->wcid]);
610 mt76x02_mac_fill_tx_status(dev, msta, status.info,
618 mt76x02_mac_fill_tx_status(dev, msta, status.info, stat, 1);
635 spin_lock_bh(&dev->mt76.rx_lock);
636 ieee80211_tx_status_ext(mt76_hw(dev), &status);
637 spin_unlock_bh(&dev->mt76.rx_lock);
643 duration = ieee80211_calc_tx_airtime(mt76_hw(dev), &info, len);
645 spin_lock_bh(&dev->mt76.cc_lock);
646 dev->tx_airtime += duration;
647 spin_unlock_bh(&dev->mt76.cc_lock);
657 mt76x02_mac_process_rate(struct mt76x02_dev *dev,
692 u8 n_rxstream = dev->mphy.chainmask & 0xf;
729 void mt76x02_mac_setaddr(struct mt76x02_dev *dev, const u8 *addr)
734 ether_addr_copy(dev->mphy.macaddr, addr);
736 if (!is_valid_ether_addr(dev->mphy.macaddr)) {
737 eth_random_addr(dev->mphy.macaddr);
738 dev_info(dev->mt76.dev,
740 dev->mphy.macaddr);
743 mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->mphy.macaddr));
744 mt76_wr(dev, MT_MAC_ADDR_DW1,
745 get_unaligned_le16(dev->mphy.macaddr + 4) |
748 mt76_wr(dev, MT_MAC_BSSID_DW0,
749 get_unaligned_le32(dev->mphy.macaddr));
750 mt76_wr(dev, MT_MAC_BSSID_DW1,
751 get_unaligned_le16(dev->mphy.macaddr + 4) |
755 mt76_rmw_field(dev, MT_MAC_BSSID_DW1, MT_MAC_BSSID_DW1_MBEACON_N, 7);
758 mt76x02_mac_set_bssid(dev, i, null_addr);
763 mt76x02_mac_get_rssi(struct mt76x02_dev *dev, s8 rssi, int chain)
765 struct mt76x02_rx_freq_cal *cal = &dev->cal.rx;
773 int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
785 int pad_len = 0, nstreams = dev->mphy.chainmask & 0xf;
791 if (!test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
805 sta = mt76x02_rx_get_sta(&dev->mt76, wcid);
840 status->ampdu_ref = dev->ampdu_ref;
848 if (!++dev->ampdu_ref)
849 dev->ampdu_ref++;
860 signal = mt76x02_mac_get_rssi(dev, rxwi->rssi[0], 0);
864 status->chain_signal[1] = mt76x02_mac_get_rssi(dev,
868 status->freq = dev->mphy.chandef.chan->center_freq;
869 status->band = dev->mphy.chandef.chan->band;
875 return mt76x02_mac_process_rate(dev, status, rate);
878 void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq)
884 if (!test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
887 trace_mac_txstat_poll(dev);
889 while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) {
890 if (!spin_trylock(&dev->txstatus_fifo_lock))
893 ret = mt76x02_mac_load_tx_status(dev, &stat);
894 spin_unlock(&dev->txstatus_fifo_lock);
900 mt76x02_send_tx_status(dev, &stat, &update);
904 kfifo_put(&dev->txstatus_fifo, stat);
910 struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
919 mt76x02_mac_poll_tx_status(dev, false);
929 void mt76x02_mac_set_rts_thresh(struct mt76x02_dev *dev, u32 val)
937 mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, val);
939 mt76_rmw(dev, MT_CCK_PROT_CFG,
941 mt76_rmw(dev, MT_OFDM_PROT_CFG,
945 void mt76x02_mac_set_tx_protection(struct mt76x02_dev *dev, bool legacy_prot,
956 prot[i] = mt76_rr(dev, MT_CCK_PROT_CFG + i * 4);
963 vht_prot[i] = mt76_rr(dev, MT_TX_PROT_CFG6 + i * 4);
967 rts_thr = mt76_get_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH);
1022 mt76_wr(dev, MT_CCK_PROT_CFG + i * 4, prot[i]);
1025 mt76_wr(dev, MT_TX_PROT_CFG6 + i * 4, vht_prot[i]);
1030 struct mt76x02_dev *dev = container_of(mphy->dev, struct mt76x02_dev, mt76);
1034 state->cc_busy += mt76_rr(dev, MT_CH_BUSY);
1036 spin_lock_bh(&dev->mt76.cc_lock);
1037 state->cc_tx += dev->tx_airtime;
1038 dev->tx_airtime = 0;
1039 spin_unlock_bh(&dev->mt76.cc_lock);
1043 static void mt76x02_check_mac_err(struct mt76x02_dev *dev)
1045 if (dev->mt76.beacon_mask) {
1046 if (mt76_rr(dev, MT_TX_STA_0) & MT_TX_STA_0_BEACONS) {
1047 dev->beacon_hang_check = 0;
1051 if (dev->beacon_hang_check < 10)
1055 u32 val = mt76_rr(dev, 0x10f4);
1060 dev_err(dev->mt76.dev, "MAC error detected\n");
1062 mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
1063 if (!mt76x02_wait_for_txrx_idle(&dev->mt76)) {
1064 dev_err(dev->mt76.dev, "MAC stop failed\n");
1068 dev->beacon_hang_check = 0;
1069 mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
1073 mt76_wr(dev, MT_MAC_SYS_CTRL,
1078 mt76x02_edcca_tx_enable(struct mt76x02_dev *dev, bool enable)
1083 mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
1084 mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_EN);
1086 data = mt76_rr(dev, MT_TX_PIN_CFG);
1091 mt76_wr(dev, MT_TX_PIN_CFG, data);
1093 mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
1094 mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_EN);
1096 mt76_clear(dev, MT_TX_PIN_CFG, MT_TX_PIN_CFG_TXANT);
1097 mt76_clear(dev, MT_TX_PIN_CFG, MT_TX_PIN_CFG_RXANT);
1099 dev->ed_tx_blocked = !enable;
1102 void mt76x02_edcca_init(struct mt76x02_dev *dev)
1104 dev->ed_trigger = 0;
1105 dev->ed_silent = 0;
1107 if (dev->ed_monitor) {
1108 struct ieee80211_channel *chan = dev->mphy.chandef.chan;
1111 mt76_clear(dev, MT_TX_LINK_CFG, MT_TX_CFACK_EN);
1112 mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
1113 mt76_rmw(dev, MT_BBP(AGC, 2), GENMASK(15, 0),
1115 mt76_set(dev, MT_TXOP_HLDR_ET, MT_TXOP_HLDR_TX40M_BLK_EN);
1117 mt76_set(dev, MT_TX_LINK_CFG, MT_TX_CFACK_EN);
1118 mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
1119 if (is_mt76x2(dev)) {
1120 mt76_wr(dev, MT_BBP(AGC, 2), 0x00007070);
1121 mt76_set(dev, MT_TXOP_HLDR_ET,
1124 mt76_wr(dev, MT_BBP(AGC, 2), 0x003a6464);
1125 mt76_clear(dev, MT_TXOP_HLDR_ET,
1129 mt76x02_edcca_tx_enable(dev, true);
1130 dev->ed_monitor_learning = true;
1133 mt76_rr(dev, MT_ED_CCA_TIMER);
1134 dev->ed_time = ktime_get_boottime();
1144 static void mt76x02_edcca_check(struct mt76x02_dev *dev)
1150 val = mt76_rr(dev, MT_ED_CCA_TIMER);
1152 active = ktime_to_us(ktime_sub(cur_time, dev->ed_time));
1153 dev->ed_time = cur_time;
1159 dev->ed_trigger++;
1160 dev->ed_silent = 0;
1162 dev->ed_silent++;
1163 dev->ed_trigger = 0;
1166 if (dev->cal.agc_lowest_gain &&
1167 dev->cal.false_cca > MT_EDCCA_LEARN_CCA &&
1168 dev->ed_trigger > MT_EDCCA_LEARN_TH) {
1169 dev->ed_monitor_learning = false;
1170 dev->ed_trigger_timeout = jiffies + 20 * HZ;
1171 } else if (!dev->ed_monitor_learning &&
1172 time_is_after_jiffies(dev->ed_trigger_timeout)) {
1173 dev->ed_monitor_learning = true;
1174 mt76x02_edcca_tx_enable(dev, true);
1177 if (dev->ed_monitor_learning)
1180 if (dev->ed_trigger > MT_EDCCA_BLOCK_TH && !dev->ed_tx_blocked)
1181 mt76x02_edcca_tx_enable(dev, false);
1182 else if (dev->ed_silent > MT_EDCCA_BLOCK_TH && dev->ed_tx_blocked)
1183 mt76x02_edcca_tx_enable(dev, true);
1188 struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
1192 mutex_lock(&dev->mt76.mutex);
1194 mt76_update_survey(&dev->mphy);
1196 u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i));
1198 dev->mphy.aggr_stats[idx++] += val & 0xffff;
1199 dev->mphy.aggr_stats[idx++] += val >> 16;
1202 mt76x02_check_mac_err(dev);
1204 if (dev->ed_monitor)
1205 mt76x02_edcca_check(dev);
1207 mutex_unlock(&dev->mt76.mutex);
1209 mt76_tx_status_check(&dev->mt76, false);
1211 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
1215 void mt76x02_mac_cc_reset(struct mt76x02_dev *dev)
1217 dev->mphy.survey_time = ktime_get_boottime();
1219 mt76_wr(dev, MT_CH_TIME_CFG,
1229 mt76_rr(dev, MT_CH_BUSY);
1230 mt76_rr(dev, MT_CH_IDLE);
1234 void mt76x02_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr)
1237 mt76_wr(dev, MT_MAC_APC_BSSID_L(idx), get_unaligned_le32(addr));
1238 mt76_rmw_field(dev, MT_MAC_APC_BSSID_H(idx), MT_MAC_APC_BSSID_H_ADDR,