1/****************************************************************************** 2 * 3 * GPL LICENSE SUMMARY 4 * 5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of version 2 of the GNU General Public License as 9 * published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, but 12 * WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, 19 * USA 20 * 21 * The full GNU General Public License is included in this distribution 22 * in the file called LICENSE.GPL. 23 * 24 * Contact Information: 25 * Intel Linux Wireless <ilw@linux.intel.com> 26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 27 * 28 *****************************************************************************/ 29 30#include <linux/kernel.h> 31#include <linux/module.h> 32#include <linux/init.h> 33#include <linux/sched.h> 34 35#include "iwl-dev.h" 36#include "iwl-core.h" 37#include "iwl-sta.h" 38#include "iwl-io.h" 39#include "iwl-helpers.h" 40#include "iwl-agn-hw.h" 41#include "iwl-agn.h" 42 43/* 44 * mac80211 queues, ACs, hardware queues, FIFOs. 45 * 46 * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues 47 * 48 * Mac80211 uses the following numbers, which we get as from it 49 * by way of skb_get_queue_mapping(skb): 50 * 51 * VO 0 52 * VI 1 53 * BE 2 54 * BK 3 55 * 56 * 57 * Regular (not A-MPDU) frames are put into hardware queues corresponding 58 * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their 59 * own queue per aggregation session (RA/TID combination), such queues are 60 * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In 61 * order to map frames to the right queue, we also need an AC->hw queue 62 * mapping. This is implemented here. 63 * 64 * Due to the way hw queues are set up (by the hw specific modules like 65 * iwl-4965.c, iwl-5000.c etc.), the AC->hw queue mapping is the identity 66 * mapping. 67 */ 68 69static const u8 tid_to_ac[] = { 70 /* this matches the mac80211 numbers */ 71 2, 3, 3, 2, 1, 1, 0, 0 72}; 73 74static const u8 ac_to_fifo[] = { 75 IWL_TX_FIFO_VO, 76 IWL_TX_FIFO_VI, 77 IWL_TX_FIFO_BE, 78 IWL_TX_FIFO_BK, 79}; 80 81static inline int get_fifo_from_ac(u8 ac) 82{ 83 return ac_to_fifo[ac]; 84} 85 86static inline int get_ac_from_tid(u16 tid) 87{ 88 if (likely(tid < ARRAY_SIZE(tid_to_ac))) 89 return tid_to_ac[tid]; 90 91 /* no support for TIDs 8-15 yet */ 92 return -EINVAL; 93} 94 95static inline int get_fifo_from_tid(u16 tid) 96{ 97 if (likely(tid < ARRAY_SIZE(tid_to_ac))) 98 return get_fifo_from_ac(tid_to_ac[tid]); 99 100 /* no support for TIDs 8-15 yet */ 101 return -EINVAL; 102} 103 104/** 105 * iwlagn_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array 106 */ 107void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv, 108 struct iwl_tx_queue *txq, 109 u16 byte_cnt) 110{ 111 struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr; 112 int write_ptr = txq->q.write_ptr; 113 int txq_id = txq->q.id; 114 u8 sec_ctl = 0; 115 u8 sta_id = 0; 116 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; 117 __le16 bc_ent; 118 119 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX); 120 121 if (txq_id != IWL_CMD_QUEUE_NUM) { 122 sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id; 123 sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl; 124 125 switch (sec_ctl & TX_CMD_SEC_MSK) { 126 case TX_CMD_SEC_CCM: 127 len += CCMP_MIC_LEN; 128 break; 129 case TX_CMD_SEC_TKIP: 130 len += TKIP_ICV_LEN; 131 break; 132 case TX_CMD_SEC_WEP: 133 len += WEP_IV_LEN + WEP_ICV_LEN; 134 break; 135 } 136 } 137 138 bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12)); 139 140 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; 141 142 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) 143 scd_bc_tbl[txq_id]. 144 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; 145} 146 147void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv, 148 struct iwl_tx_queue *txq) 149{ 150 struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr; 151 int txq_id = txq->q.id; 152 int read_ptr = txq->q.read_ptr; 153 u8 sta_id = 0; 154 __le16 bc_ent; 155 156 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); 157 158 if (txq_id != IWL_CMD_QUEUE_NUM) 159 sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id; 160 161 bc_ent = cpu_to_le16(1 | (sta_id << 12)); 162 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; 163 164 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) 165 scd_bc_tbl[txq_id]. 166 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; 167} 168 169static int iwlagn_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid, 170 u16 txq_id) 171{ 172 u32 tbl_dw_addr; 173 u32 tbl_dw; 174 u16 scd_q2ratid; 175 176 scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK; 177 178 tbl_dw_addr = priv->scd_base_addr + 179 IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id); 180 181 tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr); 182 183 if (txq_id & 0x1) 184 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); 185 else 186 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); 187 188 iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw); 189 190 return 0; 191} 192 193static void iwlagn_tx_queue_stop_scheduler(struct iwl_priv *priv, u16 txq_id) 194{ 195 /* Simply stop the queue, but don't change any configuration; 196 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ 197 iwl_write_prph(priv, 198 IWLAGN_SCD_QUEUE_STATUS_BITS(txq_id), 199 (0 << IWLAGN_SCD_QUEUE_STTS_REG_POS_ACTIVE)| 200 (1 << IWLAGN_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); 201} 202 203void iwlagn_set_wr_ptrs(struct iwl_priv *priv, 204 int txq_id, u32 index) 205{ 206 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 207 (index & 0xff) | (txq_id << 8)); 208 iwl_write_prph(priv, IWLAGN_SCD_QUEUE_RDPTR(txq_id), index); 209} 210 211void iwlagn_tx_queue_set_status(struct iwl_priv *priv, 212 struct iwl_tx_queue *txq, 213 int tx_fifo_id, int scd_retry) 214{ 215 int txq_id = txq->q.id; 216 int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0; 217 218 iwl_write_prph(priv, IWLAGN_SCD_QUEUE_STATUS_BITS(txq_id), 219 (active << IWLAGN_SCD_QUEUE_STTS_REG_POS_ACTIVE) | 220 (tx_fifo_id << IWLAGN_SCD_QUEUE_STTS_REG_POS_TXF) | 221 (1 << IWLAGN_SCD_QUEUE_STTS_REG_POS_WSL) | 222 IWLAGN_SCD_QUEUE_STTS_REG_MSK); 223 224 txq->sched_retry = scd_retry; 225 226 IWL_DEBUG_INFO(priv, "%s %s Queue %d on FIFO %d\n", 227 active ? "Activate" : "Deactivate", 228 scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id); 229} 230 231int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id, 232 int tx_fifo, int sta_id, int tid, u16 ssn_idx) 233{ 234 unsigned long flags; 235 u16 ra_tid; 236 int ret; 237 238 if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) || 239 (IWLAGN_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues 240 <= txq_id)) { 241 IWL_WARN(priv, 242 "queue number out of range: %d, must be %d to %d\n", 243 txq_id, IWLAGN_FIRST_AMPDU_QUEUE, 244 IWLAGN_FIRST_AMPDU_QUEUE + 245 priv->cfg->num_of_ampdu_queues - 1); 246 return -EINVAL; 247 } 248 249 ra_tid = BUILD_RAxTID(sta_id, tid); 250 251 /* Modify device's station table to Tx this TID */ 252 ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid); 253 if (ret) 254 return ret; 255 256 spin_lock_irqsave(&priv->lock, flags); 257 258 /* Stop this Tx queue before configuring it */ 259 iwlagn_tx_queue_stop_scheduler(priv, txq_id); 260 261 /* Map receiver-address / traffic-ID to this queue */ 262 iwlagn_tx_queue_set_q2ratid(priv, ra_tid, txq_id); 263 264 /* Set this queue as a chain-building queue */ 265 iwl_set_bits_prph(priv, IWLAGN_SCD_QUEUECHAIN_SEL, (1<<txq_id)); 266 267 /* enable aggregations for the queue */ 268 iwl_set_bits_prph(priv, IWLAGN_SCD_AGGR_SEL, (1<<txq_id)); 269 270 /* Place first TFD at index corresponding to start sequence number. 271 * Assumes that ssn_idx is valid (!= 0xFFF) */ 272 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); 273 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); 274 iwlagn_set_wr_ptrs(priv, txq_id, ssn_idx); 275 276 /* Set up Tx window size and frame limit for this queue */ 277 iwl_write_targ_mem(priv, priv->scd_base_addr + 278 IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + 279 sizeof(u32), 280 ((SCD_WIN_SIZE << 281 IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & 282 IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | 283 ((SCD_FRAME_LIMIT << 284 IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & 285 IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); 286 287 iwl_set_bits_prph(priv, IWLAGN_SCD_INTERRUPT_MASK, (1 << txq_id)); 288 289 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ 290 iwlagn_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1); 291 292 spin_unlock_irqrestore(&priv->lock, flags); 293 294 return 0; 295} 296 297int iwlagn_txq_agg_disable(struct iwl_priv *priv, u16 txq_id, 298 u16 ssn_idx, u8 tx_fifo) 299{ 300 if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) || 301 (IWLAGN_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues 302 <= txq_id)) { 303 IWL_ERR(priv, 304 "queue number out of range: %d, must be %d to %d\n", 305 txq_id, IWLAGN_FIRST_AMPDU_QUEUE, 306 IWLAGN_FIRST_AMPDU_QUEUE + 307 priv->cfg->num_of_ampdu_queues - 1); 308 return -EINVAL; 309 } 310 311 iwlagn_tx_queue_stop_scheduler(priv, txq_id); 312 313 iwl_clear_bits_prph(priv, IWLAGN_SCD_AGGR_SEL, (1 << txq_id)); 314 315 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); 316 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); 317 /* supposes that ssn_idx is valid (!= 0xFFF) */ 318 iwlagn_set_wr_ptrs(priv, txq_id, ssn_idx); 319 320 iwl_clear_bits_prph(priv, IWLAGN_SCD_INTERRUPT_MASK, (1 << txq_id)); 321 iwl_txq_ctx_deactivate(priv, txq_id); 322 iwlagn_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0); 323 324 return 0; 325} 326 327/* 328 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask 329 * must be called under priv->lock and mac access 330 */ 331void iwlagn_txq_set_sched(struct iwl_priv *priv, u32 mask) 332{ 333 iwl_write_prph(priv, IWLAGN_SCD_TXFACT, mask); 334} 335 336static inline int get_queue_from_ac(u16 ac) 337{ 338 return ac; 339} 340 341/* 342 * handle build REPLY_TX command notification. 343 */ 344static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv, 345 struct iwl_tx_cmd *tx_cmd, 346 struct ieee80211_tx_info *info, 347 struct ieee80211_hdr *hdr, 348 u8 std_id) 349{ 350 __le16 fc = hdr->frame_control; 351 __le32 tx_flags = tx_cmd->tx_flags; 352 353 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; 354 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { 355 tx_flags |= TX_CMD_FLG_ACK_MSK; 356 if (ieee80211_is_mgmt(fc)) 357 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; 358 if (ieee80211_is_probe_resp(fc) && 359 !(le16_to_cpu(hdr->seq_ctrl) & 0xf)) 360 tx_flags |= TX_CMD_FLG_TSF_MSK; 361 } else { 362 tx_flags &= (~TX_CMD_FLG_ACK_MSK); 363 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; 364 } 365 366 if (ieee80211_is_back_req(fc)) 367 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK; 368 369 370 tx_cmd->sta_id = std_id; 371 if (ieee80211_has_morefrags(fc)) 372 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; 373 374 if (ieee80211_is_data_qos(fc)) { 375 u8 *qc = ieee80211_get_qos_ctl(hdr); 376 tx_cmd->tid_tspec = qc[0] & 0xf; 377 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; 378 } else { 379 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; 380 } 381 382 priv->cfg->ops->utils->tx_cmd_protection(priv, info, fc, &tx_flags); 383 384 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); 385 if (ieee80211_is_mgmt(fc)) { 386 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) 387 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3); 388 else 389 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2); 390 } else { 391 tx_cmd->timeout.pm_frame_timeout = 0; 392 } 393 394 tx_cmd->driver_txop = 0; 395 tx_cmd->tx_flags = tx_flags; 396 tx_cmd->next_frame_len = 0; 397} 398 399#define RTS_DFAULT_RETRY_LIMIT 60 400 401static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv, 402 struct iwl_tx_cmd *tx_cmd, 403 struct ieee80211_tx_info *info, 404 __le16 fc) 405{ 406 u32 rate_flags; 407 int rate_idx; 408 u8 rts_retry_limit; 409 u8 data_retry_limit; 410 u8 rate_plcp; 411 412 /* Set retry limit on DATA packets and Probe Responses*/ 413 if (ieee80211_is_probe_resp(fc)) 414 data_retry_limit = 3; 415 else 416 data_retry_limit = IWLAGN_DEFAULT_TX_RETRY; 417 tx_cmd->data_retry_limit = data_retry_limit; 418 419 /* Set retry limit on RTS packets */ 420 rts_retry_limit = RTS_DFAULT_RETRY_LIMIT; 421 if (data_retry_limit < rts_retry_limit) 422 rts_retry_limit = data_retry_limit; 423 tx_cmd->rts_retry_limit = rts_retry_limit; 424 425 /* DATA packets will use the uCode station table for rate/antenna 426 * selection */ 427 if (ieee80211_is_data(fc)) { 428 tx_cmd->initial_rate_index = 0; 429 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK; 430 return; 431 } 432 433 /** 434 * If the current TX rate stored in mac80211 has the MCS bit set, it's 435 * not really a TX rate. Thus, we use the lowest supported rate for 436 * this band. Also use the lowest supported rate if the stored rate 437 * index is invalid. 438 */ 439 rate_idx = info->control.rates[0].idx; 440 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS || 441 (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY)) 442 rate_idx = rate_lowest_index(&priv->bands[info->band], 443 info->control.sta); 444 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ 445 if (info->band == IEEE80211_BAND_5GHZ) 446 rate_idx += IWL_FIRST_OFDM_RATE; 447 /* Get PLCP rate for tx_cmd->rate_n_flags */ 448 rate_plcp = iwl_rates[rate_idx].plcp; 449 /* Zero out flags for this packet */ 450 rate_flags = 0; 451 452 /* Set CCK flag as needed */ 453 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) 454 rate_flags |= RATE_MCS_CCK_MSK; 455 456 /* Set up antennas */ 457 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, 458 priv->hw_params.valid_tx_ant); 459 rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant); 460 461 /* Set the rate in the TX cmd */ 462 tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags); 463} 464 465static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv, 466 struct ieee80211_tx_info *info, 467 struct iwl_tx_cmd *tx_cmd, 468 struct sk_buff *skb_frag, 469 int sta_id) 470{ 471 struct ieee80211_key_conf *keyconf = info->control.hw_key; 472 473 switch (keyconf->alg) { 474 case ALG_CCMP: 475 tx_cmd->sec_ctl = TX_CMD_SEC_CCM; 476 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen); 477 if (info->flags & IEEE80211_TX_CTL_AMPDU) 478 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK; 479 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n"); 480 break; 481 482 case ALG_TKIP: 483 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; 484 ieee80211_get_tkip_key(keyconf, skb_frag, 485 IEEE80211_TKIP_P2_KEY, tx_cmd->key); 486 IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n"); 487 break; 488 489 case ALG_WEP: 490 tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP | 491 (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT); 492 493 if (keyconf->keylen == WEP_KEY_LEN_128) 494 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; 495 496 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen); 497 498 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption " 499 "with key %d\n", keyconf->keyidx); 500 break; 501 502 default: 503 IWL_ERR(priv, "Unknown encode alg %d\n", keyconf->alg); 504 break; 505 } 506} 507 508/* 509 * start REPLY_TX command process 510 */ 511int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) 512{ 513 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 514 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 515 struct ieee80211_sta *sta = info->control.sta; 516 struct iwl_station_priv *sta_priv = NULL; 517 struct iwl_tx_queue *txq; 518 struct iwl_queue *q; 519 struct iwl_device_cmd *out_cmd; 520 struct iwl_cmd_meta *out_meta; 521 struct iwl_tx_cmd *tx_cmd; 522 int swq_id, txq_id; 523 dma_addr_t phys_addr; 524 dma_addr_t txcmd_phys; 525 dma_addr_t scratch_phys; 526 u16 len, len_org, firstlen, secondlen; 527 u16 seq_number = 0; 528 __le16 fc; 529 u8 hdr_len; 530 u8 sta_id; 531 u8 wait_write_ptr = 0; 532 u8 tid = 0; 533 u8 *qc = NULL; 534 unsigned long flags; 535 536 spin_lock_irqsave(&priv->lock, flags); 537 if (iwl_is_rfkill(priv)) { 538 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n"); 539 goto drop_unlock; 540 } 541 542 fc = hdr->frame_control; 543 544#ifdef CONFIG_IWLWIFI_DEBUG 545 if (ieee80211_is_auth(fc)) 546 IWL_DEBUG_TX(priv, "Sending AUTH frame\n"); 547 else if (ieee80211_is_assoc_req(fc)) 548 IWL_DEBUG_TX(priv, "Sending ASSOC frame\n"); 549 else if (ieee80211_is_reassoc_req(fc)) 550 IWL_DEBUG_TX(priv, "Sending REASSOC frame\n"); 551#endif 552 553 hdr_len = ieee80211_hdrlen(fc); 554 555 /* Find index into station table for destination station */ 556 sta_id = iwl_sta_id_or_broadcast(priv, info->control.sta); 557 if (sta_id == IWL_INVALID_STATION) { 558 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", 559 hdr->addr1); 560 goto drop_unlock; 561 } 562 563 IWL_DEBUG_TX(priv, "station Id %d\n", sta_id); 564 565 if (sta) 566 sta_priv = (void *)sta->drv_priv; 567 568 if (sta_priv && sta_id != priv->hw_params.bcast_sta_id && 569 sta_priv->asleep) { 570 WARN_ON(!(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)); 571 /* 572 * This sends an asynchronous command to the device, 573 * but we can rely on it being processed before the 574 * next frame is processed -- and the next frame to 575 * this station is the one that will consume this 576 * counter. 577 * For now set the counter to just 1 since we do not 578 * support uAPSD yet. 579 */ 580 iwl_sta_modify_sleep_tx_count(priv, sta_id, 1); 581 } 582 583 txq_id = get_queue_from_ac(skb_get_queue_mapping(skb)); 584 585 /* irqs already disabled/saved above when locking priv->lock */ 586 spin_lock(&priv->sta_lock); 587 588 if (ieee80211_is_data_qos(fc)) { 589 qc = ieee80211_get_qos_ctl(hdr); 590 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; 591 if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) { 592 spin_unlock(&priv->sta_lock); 593 goto drop_unlock; 594 } 595 seq_number = priv->stations[sta_id].tid[tid].seq_number; 596 seq_number &= IEEE80211_SCTL_SEQ; 597 hdr->seq_ctrl = hdr->seq_ctrl & 598 cpu_to_le16(IEEE80211_SCTL_FRAG); 599 hdr->seq_ctrl |= cpu_to_le16(seq_number); 600 seq_number += 0x10; 601 /* aggregation is on for this <sta,tid> */ 602 if (info->flags & IEEE80211_TX_CTL_AMPDU && 603 priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) { 604 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id; 605 } 606 } 607 608 txq = &priv->txq[txq_id]; 609 swq_id = txq->swq_id; 610 q = &txq->q; 611 612 if (unlikely(iwl_queue_space(q) < q->high_mark)) { 613 spin_unlock(&priv->sta_lock); 614 goto drop_unlock; 615 } 616 617 if (ieee80211_is_data_qos(fc)) { 618 priv->stations[sta_id].tid[tid].tfds_in_queue++; 619 if (!ieee80211_has_morefrags(fc)) 620 priv->stations[sta_id].tid[tid].seq_number = seq_number; 621 } 622 623 spin_unlock(&priv->sta_lock); 624 625 /* Set up driver data for this TFD */ 626 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); 627 txq->txb[q->write_ptr].skb = skb; 628 629 /* Set up first empty entry in queue's array of Tx/cmd buffers */ 630 out_cmd = txq->cmd[q->write_ptr]; 631 out_meta = &txq->meta[q->write_ptr]; 632 tx_cmd = &out_cmd->cmd.tx; 633 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); 634 memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd)); 635 636 /* 637 * Set up the Tx-command (not MAC!) header. 638 * Store the chosen Tx queue and TFD index within the sequence field; 639 * after Tx, uCode's Tx response will return this value so driver can 640 * locate the frame within the tx queue and do post-tx processing. 641 */ 642 out_cmd->hdr.cmd = REPLY_TX; 643 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | 644 INDEX_TO_SEQ(q->write_ptr))); 645 646 /* Copy MAC header from skb into command buffer */ 647 memcpy(tx_cmd->hdr, hdr, hdr_len); 648 649 650 /* Total # bytes to be transmitted */ 651 len = (u16)skb->len; 652 tx_cmd->len = cpu_to_le16(len); 653 654 if (info->control.hw_key) 655 iwlagn_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id); 656 657 /* TODO need this for burst mode later on */ 658 iwlagn_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id); 659 iwl_dbg_log_tx_data_frame(priv, len, hdr); 660 661 iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, fc); 662 663 iwl_update_stats(priv, true, fc, len); 664 /* 665 * Use the first empty entry in this queue's command buffer array 666 * to contain the Tx command and MAC header concatenated together 667 * (payload data will be in another buffer). 668 * Size of this varies, due to varying MAC header length. 669 * If end is not dword aligned, we'll have 2 extra bytes at the end 670 * of the MAC header (device reads on dword boundaries). 671 * We'll tell device about this padding later. 672 */ 673 len = sizeof(struct iwl_tx_cmd) + 674 sizeof(struct iwl_cmd_header) + hdr_len; 675 676 len_org = len; 677 firstlen = len = (len + 3) & ~3; 678 679 if (len_org != len) 680 len_org = 1; 681 else 682 len_org = 0; 683 684 /* Tell NIC about any 2-byte padding after MAC header */ 685 if (len_org) 686 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; 687 688 /* Physical address of this Tx command's header (not MAC header!), 689 * within command buffer array. */ 690 txcmd_phys = pci_map_single(priv->pci_dev, 691 &out_cmd->hdr, len, 692 PCI_DMA_BIDIRECTIONAL); 693 dma_unmap_addr_set(out_meta, mapping, txcmd_phys); 694 dma_unmap_len_set(out_meta, len, len); 695 /* Add buffer containing Tx command and MAC(!) header to TFD's 696 * first entry */ 697 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, 698 txcmd_phys, len, 1, 0); 699 700 if (!ieee80211_has_morefrags(hdr->frame_control)) { 701 txq->need_update = 1; 702 } else { 703 wait_write_ptr = 1; 704 txq->need_update = 0; 705 } 706 707 /* Set up TFD's 2nd entry to point directly to remainder of skb, 708 * if any (802.11 null frames have no payload). */ 709 secondlen = len = skb->len - hdr_len; 710 if (len) { 711 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, 712 len, PCI_DMA_TODEVICE); 713 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, 714 phys_addr, len, 715 0, 0); 716 } 717 718 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + 719 offsetof(struct iwl_tx_cmd, scratch); 720 721 len = sizeof(struct iwl_tx_cmd) + 722 sizeof(struct iwl_cmd_header) + hdr_len; 723 /* take back ownership of DMA buffer to enable update */ 724 pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys, 725 len, PCI_DMA_BIDIRECTIONAL); 726 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); 727 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); 728 729 IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n", 730 le16_to_cpu(out_cmd->hdr.sequence)); 731 IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); 732 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd)); 733 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len); 734 735 /* Set up entry for this TFD in Tx byte-count array */ 736 if (info->flags & IEEE80211_TX_CTL_AMPDU) 737 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 738 le16_to_cpu(tx_cmd->len)); 739 740 pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys, 741 len, PCI_DMA_BIDIRECTIONAL); 742 743 trace_iwlwifi_dev_tx(priv, 744 &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr], 745 sizeof(struct iwl_tfd), 746 &out_cmd->hdr, firstlen, 747 skb->data + hdr_len, secondlen); 748 749 /* Tell device the write index *just past* this latest filled TFD */ 750 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); 751 iwl_txq_update_write_ptr(priv, txq); 752 spin_unlock_irqrestore(&priv->lock, flags); 753 754 /* 755 * At this point the frame is "transmitted" successfully 756 * and we will get a TX status notification eventually, 757 * regardless of the value of ret. "ret" only indicates 758 * whether or not we should update the write pointer. 759 */ 760 761 /* avoid atomic ops if it isn't an associated client */ 762 if (sta_priv && sta_priv->client) 763 atomic_inc(&sta_priv->pending_frames); 764 765 if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) { 766 if (wait_write_ptr) { 767 spin_lock_irqsave(&priv->lock, flags); 768 txq->need_update = 1; 769 iwl_txq_update_write_ptr(priv, txq); 770 spin_unlock_irqrestore(&priv->lock, flags); 771 } else { 772 iwl_stop_queue(priv, txq->swq_id); 773 } 774 } 775 776 return 0; 777 778drop_unlock: 779 spin_unlock_irqrestore(&priv->lock, flags); 780 return -1; 781} 782 783static inline int iwlagn_alloc_dma_ptr(struct iwl_priv *priv, 784 struct iwl_dma_ptr *ptr, size_t size) 785{ 786 ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma, 787 GFP_KERNEL); 788 if (!ptr->addr) 789 return -ENOMEM; 790 ptr->size = size; 791 return 0; 792} 793 794static inline void iwlagn_free_dma_ptr(struct iwl_priv *priv, 795 struct iwl_dma_ptr *ptr) 796{ 797 if (unlikely(!ptr->addr)) 798 return; 799 800 dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma); 801 memset(ptr, 0, sizeof(*ptr)); 802} 803 804/** 805 * iwlagn_hw_txq_ctx_free - Free TXQ Context 806 * 807 * Destroy all TX DMA queues and structures 808 */ 809void iwlagn_hw_txq_ctx_free(struct iwl_priv *priv) 810{ 811 int txq_id; 812 813 /* Tx queues */ 814 if (priv->txq) { 815 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) 816 if (txq_id == IWL_CMD_QUEUE_NUM) 817 iwl_cmd_queue_free(priv); 818 else 819 iwl_tx_queue_free(priv, txq_id); 820 } 821 iwlagn_free_dma_ptr(priv, &priv->kw); 822 823 iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls); 824 825 /* free tx queue structure */ 826 iwl_free_txq_mem(priv); 827} 828 829/** 830 * iwlagn_txq_ctx_alloc - allocate TX queue context 831 * Allocate all Tx DMA structures and initialize them 832 * 833 * @param priv 834 * @return error code 835 */ 836int iwlagn_txq_ctx_alloc(struct iwl_priv *priv) 837{ 838 int ret; 839 int txq_id, slots_num; 840 unsigned long flags; 841 842 /* Free all tx/cmd queues and keep-warm buffer */ 843 iwlagn_hw_txq_ctx_free(priv); 844 845 ret = iwlagn_alloc_dma_ptr(priv, &priv->scd_bc_tbls, 846 priv->hw_params.scd_bc_tbls_size); 847 if (ret) { 848 IWL_ERR(priv, "Scheduler BC Table allocation failed\n"); 849 goto error_bc_tbls; 850 } 851 /* Alloc keep-warm buffer */ 852 ret = iwlagn_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE); 853 if (ret) { 854 IWL_ERR(priv, "Keep Warm allocation failed\n"); 855 goto error_kw; 856 } 857 858 /* allocate tx queue structure */ 859 ret = iwl_alloc_txq_mem(priv); 860 if (ret) 861 goto error; 862 863 spin_lock_irqsave(&priv->lock, flags); 864 865 /* Turn off all Tx DMA fifos */ 866 priv->cfg->ops->lib->txq_set_sched(priv, 0); 867 868 /* Tell NIC where to find the "keep warm" buffer */ 869 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); 870 871 spin_unlock_irqrestore(&priv->lock, flags); 872 873 /* Alloc and init all Tx queues, including the command queue (#4) */ 874 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { 875 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ? 876 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 877 ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num, 878 txq_id); 879 if (ret) { 880 IWL_ERR(priv, "Tx %d queue init failed\n", txq_id); 881 goto error; 882 } 883 } 884 885 return ret; 886 887 error: 888 iwlagn_hw_txq_ctx_free(priv); 889 iwlagn_free_dma_ptr(priv, &priv->kw); 890 error_kw: 891 iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls); 892 error_bc_tbls: 893 return ret; 894} 895 896void iwlagn_txq_ctx_reset(struct iwl_priv *priv) 897{ 898 int txq_id, slots_num; 899 unsigned long flags; 900 901 spin_lock_irqsave(&priv->lock, flags); 902 903 /* Turn off all Tx DMA fifos */ 904 priv->cfg->ops->lib->txq_set_sched(priv, 0); 905 906 /* Tell NIC where to find the "keep warm" buffer */ 907 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); 908 909 spin_unlock_irqrestore(&priv->lock, flags); 910 911 /* Alloc and init all Tx queues, including the command queue (#4) */ 912 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { 913 slots_num = txq_id == IWL_CMD_QUEUE_NUM ? 914 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 915 iwl_tx_queue_reset(priv, &priv->txq[txq_id], slots_num, txq_id); 916 } 917} 918 919/** 920 * iwlagn_txq_ctx_stop - Stop all Tx DMA channels 921 */ 922void iwlagn_txq_ctx_stop(struct iwl_priv *priv) 923{ 924 int ch; 925 unsigned long flags; 926 927 /* Turn off all Tx DMA fifos */ 928 spin_lock_irqsave(&priv->lock, flags); 929 930 priv->cfg->ops->lib->txq_set_sched(priv, 0); 931 932 /* Stop each Tx DMA channel, and wait for it to be idle */ 933 for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) { 934 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); 935 if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG, 936 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 937 1000)) 938 IWL_ERR(priv, "Failing on timeout while stopping" 939 " DMA channel %d [0x%08x]", ch, 940 iwl_read_direct32(priv, FH_TSSR_TX_STATUS_REG)); 941 } 942 spin_unlock_irqrestore(&priv->lock, flags); 943} 944 945/* 946 * Find first available (lowest unused) Tx Queue, mark it "active". 947 * Called only when finding queue for aggregation. 948 * Should never return anything < 7, because they should already 949 * be in use as EDCA AC (0-3), Command (4), reserved (5, 6) 950 */ 951static int iwlagn_txq_ctx_activate_free(struct iwl_priv *priv) 952{ 953 int txq_id; 954 955 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) 956 if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk)) 957 return txq_id; 958 return -1; 959} 960 961int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif, 962 struct ieee80211_sta *sta, u16 tid, u16 *ssn) 963{ 964 int sta_id; 965 int tx_fifo; 966 int txq_id; 967 int ret; 968 unsigned long flags; 969 struct iwl_tid_data *tid_data; 970 971 tx_fifo = get_fifo_from_tid(tid); 972 if (unlikely(tx_fifo < 0)) 973 return tx_fifo; 974 975 IWL_WARN(priv, "%s on ra = %pM tid = %d\n", 976 __func__, sta->addr, tid); 977 978 sta_id = iwl_sta_id(sta); 979 if (sta_id == IWL_INVALID_STATION) { 980 IWL_ERR(priv, "Start AGG on invalid station\n"); 981 return -ENXIO; 982 } 983 if (unlikely(tid >= MAX_TID_COUNT)) 984 return -EINVAL; 985 986 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) { 987 IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n"); 988 return -ENXIO; 989 } 990 991 txq_id = iwlagn_txq_ctx_activate_free(priv); 992 if (txq_id == -1) { 993 IWL_ERR(priv, "No free aggregation queue available\n"); 994 return -ENXIO; 995 } 996 997 spin_lock_irqsave(&priv->sta_lock, flags); 998 tid_data = &priv->stations[sta_id].tid[tid]; 999 *ssn = SEQ_TO_SN(tid_data->seq_number); 1000 tid_data->agg.txq_id = txq_id; 1001 priv->txq[txq_id].swq_id = iwl_virtual_agg_queue_num(get_ac_from_tid(tid), txq_id); 1002 spin_unlock_irqrestore(&priv->sta_lock, flags); 1003 1004 ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo, 1005 sta_id, tid, *ssn); 1006 if (ret) 1007 return ret; 1008 1009 spin_lock_irqsave(&priv->sta_lock, flags); 1010 tid_data = &priv->stations[sta_id].tid[tid]; 1011 if (tid_data->tfds_in_queue == 0) { 1012 IWL_DEBUG_HT(priv, "HW queue is empty\n"); 1013 tid_data->agg.state = IWL_AGG_ON; 1014 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); 1015 } else { 1016 IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n", 1017 tid_data->tfds_in_queue); 1018 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA; 1019 } 1020 spin_unlock_irqrestore(&priv->sta_lock, flags); 1021 return ret; 1022} 1023 1024int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, 1025 struct ieee80211_sta *sta, u16 tid) 1026{ 1027 int tx_fifo_id, txq_id, sta_id, ssn = -1; 1028 struct iwl_tid_data *tid_data; 1029 int write_ptr, read_ptr; 1030 unsigned long flags; 1031 1032 tx_fifo_id = get_fifo_from_tid(tid); 1033 if (unlikely(tx_fifo_id < 0)) 1034 return tx_fifo_id; 1035 1036 sta_id = iwl_sta_id(sta); 1037 1038 if (sta_id == IWL_INVALID_STATION) { 1039 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid); 1040 return -ENXIO; 1041 } 1042 1043 spin_lock_irqsave(&priv->sta_lock, flags); 1044 1045 if (priv->stations[sta_id].tid[tid].agg.state == 1046 IWL_EMPTYING_HW_QUEUE_ADDBA) { 1047 IWL_DEBUG_HT(priv, "AGG stop before setup done\n"); 1048 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); 1049 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; 1050 spin_unlock_irqrestore(&priv->sta_lock, flags); 1051 return 0; 1052 } 1053 1054 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON) 1055 IWL_WARN(priv, "Stopping AGG while state not ON or starting\n"); 1056 1057 tid_data = &priv->stations[sta_id].tid[tid]; 1058 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4; 1059 txq_id = tid_data->agg.txq_id; 1060 write_ptr = priv->txq[txq_id].q.write_ptr; 1061 read_ptr = priv->txq[txq_id].q.read_ptr; 1062 1063 /* The queue is not empty */ 1064 if (write_ptr != read_ptr) { 1065 IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n"); 1066 priv->stations[sta_id].tid[tid].agg.state = 1067 IWL_EMPTYING_HW_QUEUE_DELBA; 1068 spin_unlock_irqrestore(&priv->sta_lock, flags); 1069 return 0; 1070 } 1071 1072 IWL_DEBUG_HT(priv, "HW queue is empty\n"); 1073 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; 1074 1075 /* do not restore/save irqs */ 1076 spin_unlock(&priv->sta_lock); 1077 spin_lock(&priv->lock); 1078 1079 /* 1080 * the only reason this call can fail is queue number out of range, 1081 * which can happen if uCode is reloaded and all the station 1082 * information are lost. if it is outside the range, there is no need 1083 * to deactivate the uCode queue, just return "success" to allow 1084 * mac80211 to clean up it own data. 1085 */ 1086 priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn, 1087 tx_fifo_id); 1088 spin_unlock_irqrestore(&priv->lock, flags); 1089 1090 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); 1091 1092 return 0; 1093} 1094 1095int iwlagn_txq_check_empty(struct iwl_priv *priv, 1096 int sta_id, u8 tid, int txq_id) 1097{ 1098 struct iwl_queue *q = &priv->txq[txq_id].q; 1099 u8 *addr = priv->stations[sta_id].sta.sta.addr; 1100 struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid]; 1101 1102 lockdep_assert_held(&priv->sta_lock); 1103 1104 switch (priv->stations[sta_id].tid[tid].agg.state) { 1105 case IWL_EMPTYING_HW_QUEUE_DELBA: 1106 /* We are reclaiming the last packet of the */ 1107 /* aggregated HW queue */ 1108 if ((txq_id == tid_data->agg.txq_id) && 1109 (q->read_ptr == q->write_ptr)) { 1110 u16 ssn = SEQ_TO_SN(tid_data->seq_number); 1111 int tx_fifo = get_fifo_from_tid(tid); 1112 IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n"); 1113 priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, 1114 ssn, tx_fifo); 1115 tid_data->agg.state = IWL_AGG_OFF; 1116 ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, addr, tid); 1117 } 1118 break; 1119 case IWL_EMPTYING_HW_QUEUE_ADDBA: 1120 /* We are reclaiming the last packet of the queue */ 1121 if (tid_data->tfds_in_queue == 0) { 1122 IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n"); 1123 tid_data->agg.state = IWL_AGG_ON; 1124 ieee80211_start_tx_ba_cb_irqsafe(priv->vif, addr, tid); 1125 } 1126 break; 1127 } 1128 1129 return 0; 1130} 1131 1132static void iwlagn_tx_status(struct iwl_priv *priv, struct sk_buff *skb) 1133{ 1134 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1135 struct ieee80211_sta *sta; 1136 struct iwl_station_priv *sta_priv; 1137 1138 rcu_read_lock(); 1139 sta = ieee80211_find_sta(priv->vif, hdr->addr1); 1140 if (sta) { 1141 sta_priv = (void *)sta->drv_priv; 1142 /* avoid atomic ops if this isn't a client */ 1143 if (sta_priv->client && 1144 atomic_dec_return(&sta_priv->pending_frames) == 0) 1145 ieee80211_sta_block_awake(priv->hw, sta, false); 1146 } 1147 rcu_read_unlock(); 1148 1149 ieee80211_tx_status_irqsafe(priv->hw, skb); 1150} 1151 1152int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) 1153{ 1154 struct iwl_tx_queue *txq = &priv->txq[txq_id]; 1155 struct iwl_queue *q = &txq->q; 1156 struct iwl_tx_info *tx_info; 1157 int nfreed = 0; 1158 struct ieee80211_hdr *hdr; 1159 1160 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) { 1161 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, " 1162 "is out of range [0-%d] %d %d.\n", txq_id, 1163 index, q->n_bd, q->write_ptr, q->read_ptr); 1164 return 0; 1165 } 1166 1167 for (index = iwl_queue_inc_wrap(index, q->n_bd); 1168 q->read_ptr != index; 1169 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { 1170 1171 tx_info = &txq->txb[txq->q.read_ptr]; 1172 iwlagn_tx_status(priv, tx_info->skb); 1173 1174 hdr = (struct ieee80211_hdr *)tx_info->skb->data; 1175 if (hdr && ieee80211_is_data_qos(hdr->frame_control)) 1176 nfreed++; 1177 tx_info->skb = NULL; 1178 1179 if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl) 1180 priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq); 1181 1182 priv->cfg->ops->lib->txq_free_tfd(priv, txq); 1183 } 1184 return nfreed; 1185} 1186 1187/** 1188 * iwlagn_tx_status_reply_compressed_ba - Update tx status from block-ack 1189 * 1190 * Go through block-ack's bitmap of ACK'd frames, update driver's record of 1191 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo. 1192 */ 1193static int iwlagn_tx_status_reply_compressed_ba(struct iwl_priv *priv, 1194 struct iwl_ht_agg *agg, 1195 struct iwl_compressed_ba_resp *ba_resp) 1196 1197{ 1198 int i, sh, ack; 1199 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl); 1200 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); 1201 u64 bitmap, sent_bitmap; 1202 int successes = 0; 1203 struct ieee80211_tx_info *info; 1204 1205 if (unlikely(!agg->wait_for_ba)) { 1206 IWL_ERR(priv, "Received BA when not expected\n"); 1207 return -EINVAL; 1208 } 1209 1210 /* Mark that the expected block-ack response arrived */ 1211 agg->wait_for_ba = 0; 1212 IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx, ba_resp->seq_ctl); 1213 1214 /* Calculate shift to align block-ack bits with our Tx window bits */ 1215 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4); 1216 if (sh < 0) /* tbw something is wrong with indices */ 1217 sh += 0x100; 1218 1219 /* don't use 64-bit values for now */ 1220 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh; 1221 1222 if (agg->frame_count > (64 - sh)) { 1223 IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size"); 1224 return -1; 1225 } 1226 1227 /* check for success or failure according to the 1228 * transmitted bitmap and block-ack bitmap */ 1229 sent_bitmap = bitmap & agg->bitmap; 1230 1231 /* For each frame attempted in aggregation, 1232 * update driver's record of tx frame's status. */ 1233 i = 0; 1234 while (sent_bitmap) { 1235 ack = sent_bitmap & 1ULL; 1236 successes += ack; 1237 IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n", 1238 ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff, 1239 agg->start_idx + i); 1240 sent_bitmap >>= 1; 1241 ++i; 1242 } 1243 1244 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb); 1245 memset(&info->status, 0, sizeof(info->status)); 1246 info->flags |= IEEE80211_TX_STAT_ACK; 1247 info->flags |= IEEE80211_TX_STAT_AMPDU; 1248 info->status.ampdu_ack_len = successes; 1249 info->status.ampdu_len = agg->frame_count; 1250 iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags, info); 1251 1252 IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", (unsigned long long)bitmap); 1253 1254 return 0; 1255} 1256 1257/** 1258 * translate ucode response to mac80211 tx status control values 1259 */ 1260void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags, 1261 struct ieee80211_tx_info *info) 1262{ 1263 struct ieee80211_tx_rate *r = &info->control.rates[0]; 1264 1265 info->antenna_sel_tx = 1266 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS); 1267 if (rate_n_flags & RATE_MCS_HT_MSK) 1268 r->flags |= IEEE80211_TX_RC_MCS; 1269 if (rate_n_flags & RATE_MCS_GF_MSK) 1270 r->flags |= IEEE80211_TX_RC_GREEN_FIELD; 1271 if (rate_n_flags & RATE_MCS_HT40_MSK) 1272 r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; 1273 if (rate_n_flags & RATE_MCS_DUP_MSK) 1274 r->flags |= IEEE80211_TX_RC_DUP_DATA; 1275 if (rate_n_flags & RATE_MCS_SGI_MSK) 1276 r->flags |= IEEE80211_TX_RC_SHORT_GI; 1277 r->idx = iwlagn_hwrate_to_mac80211_idx(rate_n_flags, info->band); 1278} 1279 1280/** 1281 * iwlagn_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA 1282 * 1283 * Handles block-acknowledge notification from device, which reports success 1284 * of frames sent via aggregation. 1285 */ 1286void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, 1287 struct iwl_rx_mem_buffer *rxb) 1288{ 1289 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1290 struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba; 1291 struct iwl_tx_queue *txq = NULL; 1292 struct iwl_ht_agg *agg; 1293 int index; 1294 int sta_id; 1295 int tid; 1296 unsigned long flags; 1297 1298 /* "flow" corresponds to Tx queue */ 1299 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); 1300 1301 /* "ssn" is start of block-ack Tx window, corresponds to index 1302 * (in Tx queue's circular buffer) of first TFD/frame in window */ 1303 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); 1304 1305 if (scd_flow >= priv->hw_params.max_txq_num) { 1306 IWL_ERR(priv, 1307 "BUG_ON scd_flow is bigger than number of queues\n"); 1308 return; 1309 } 1310 1311 txq = &priv->txq[scd_flow]; 1312 sta_id = ba_resp->sta_id; 1313 tid = ba_resp->tid; 1314 agg = &priv->stations[sta_id].tid[tid].agg; 1315 if (unlikely(agg->txq_id != scd_flow)) { 1316 IWL_DEBUG_TX_REPLY(priv, 1317 "BA scd_flow %d does not match txq_id %d\n", 1318 scd_flow, agg->txq_id); 1319 return; 1320 } 1321 1322 /* Find index just before block-ack window */ 1323 index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd); 1324 1325 spin_lock_irqsave(&priv->sta_lock, flags); 1326 1327 IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, " 1328 "sta_id = %d\n", 1329 agg->wait_for_ba, 1330 (u8 *) &ba_resp->sta_addr_lo32, 1331 ba_resp->sta_id); 1332 IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = " 1333 "%d, scd_ssn = %d\n", 1334 ba_resp->tid, 1335 ba_resp->seq_ctl, 1336 (unsigned long long)le64_to_cpu(ba_resp->bitmap), 1337 ba_resp->scd_flow, 1338 ba_resp->scd_ssn); 1339 IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx\n", 1340 agg->start_idx, 1341 (unsigned long long)agg->bitmap); 1342 1343 /* Update driver's record of ACK vs. not for each frame in window */ 1344 iwlagn_tx_status_reply_compressed_ba(priv, agg, ba_resp); 1345 1346 /* Release all TFDs before the SSN, i.e. all TFDs in front of 1347 * block-ack window (we assume that they've been successfully 1348 * transmitted ... if not, it's too late anyway). */ 1349 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) { 1350 /* calculate mac80211 ampdu sw queue to wake */ 1351 int freed = iwlagn_tx_queue_reclaim(priv, scd_flow, index); 1352 iwl_free_tfds_in_queue(priv, sta_id, tid, freed); 1353 1354 if ((iwl_queue_space(&txq->q) > txq->q.low_mark) && 1355 priv->mac80211_registered && 1356 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) 1357 iwl_wake_queue(priv, txq->swq_id); 1358 1359 iwlagn_txq_check_empty(priv, sta_id, tid, scd_flow); 1360 } 1361 1362 spin_unlock_irqrestore(&priv->sta_lock, flags); 1363} 1364