1/* 2 * Copyright (c) 2008-2009 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17#include "ath9k.h" 18#include "ar9003_mac.h" 19 20#define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb)) 21 22static inline bool ath9k_check_auto_sleep(struct ath_softc *sc) 23{ 24 return sc->ps_enabled && 25 (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP); 26} 27 28static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc, 29 struct ieee80211_hdr *hdr) 30{ 31 struct ieee80211_hw *hw = sc->pri_wiphy->hw; 32 int i; 33 34 spin_lock_bh(&sc->wiphy_lock); 35 for (i = 0; i < sc->num_sec_wiphy; i++) { 36 struct ath_wiphy *aphy = sc->sec_wiphy[i]; 37 if (aphy == NULL) 38 continue; 39 if (compare_ether_addr(hdr->addr1, aphy->hw->wiphy->perm_addr) 40 == 0) { 41 hw = aphy->hw; 42 break; 43 } 44 } 45 spin_unlock_bh(&sc->wiphy_lock); 46 return hw; 47} 48 49/* 50 * Setup and link descriptors. 51 * 52 * 11N: we can no longer afford to self link the last descriptor. 53 * MAC acknowledges BA status as long as it copies frames to host 54 * buffer (or rx fifo). This can incorrectly acknowledge packets 55 * to a sender if last desc is self-linked. 56 */ 57static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) 58{ 59 struct ath_hw *ah = sc->sc_ah; 60 struct ath_common *common = ath9k_hw_common(ah); 61 struct ath_desc *ds; 62 struct sk_buff *skb; 63 64 ATH_RXBUF_RESET(bf); 65 66 ds = bf->bf_desc; 67 ds->ds_link = 0; /* link to null */ 68 ds->ds_data = bf->bf_buf_addr; 69 70 /* virtual addr of the beginning of the buffer. */ 71 skb = bf->bf_mpdu; 72 BUG_ON(skb == NULL); 73 ds->ds_vdata = skb->data; 74 75 /* 76 * setup rx descriptors. The rx_bufsize here tells the hardware 77 * how much data it can DMA to us and that we are prepared 78 * to process 79 */ 80 ath9k_hw_setuprxdesc(ah, ds, 81 common->rx_bufsize, 82 0); 83 84 if (sc->rx.rxlink == NULL) 85 ath9k_hw_putrxbuf(ah, bf->bf_daddr); 86 else 87 *sc->rx.rxlink = bf->bf_daddr; 88 89 sc->rx.rxlink = &ds->ds_link; 90 ath9k_hw_rxena(ah); 91} 92 93static void ath_setdefantenna(struct ath_softc *sc, u32 antenna) 94{ 95 ath9k_hw_setantenna(sc->sc_ah, antenna); 96 sc->rx.defant = antenna; 97 sc->rx.rxotherant = 0; 98} 99 100static void ath_opmode_init(struct ath_softc *sc) 101{ 102 struct ath_hw *ah = sc->sc_ah; 103 struct ath_common *common = ath9k_hw_common(ah); 104 105 u32 rfilt, mfilt[2]; 106 107 /* configure rx filter */ 108 rfilt = ath_calcrxfilter(sc); 109 ath9k_hw_setrxfilter(ah, rfilt); 110 111 /* configure bssid mask */ 112 if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) 113 ath_hw_setbssidmask(common); 114 115 /* configure operational mode */ 116 ath9k_hw_setopmode(ah); 117 118 /* calculate and install multicast filter */ 119 mfilt[0] = mfilt[1] = ~0; 120 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); 121} 122 123static bool ath_rx_edma_buf_link(struct ath_softc *sc, 124 enum ath9k_rx_qtype qtype) 125{ 126 struct ath_hw *ah = sc->sc_ah; 127 struct ath_rx_edma *rx_edma; 128 struct sk_buff *skb; 129 struct ath_buf *bf; 130 131 rx_edma = &sc->rx.rx_edma[qtype]; 132 if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize) 133 return false; 134 135 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 136 list_del_init(&bf->list); 137 138 skb = bf->bf_mpdu; 139 140 ATH_RXBUF_RESET(bf); 141 memset(skb->data, 0, ah->caps.rx_status_len); 142 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, 143 ah->caps.rx_status_len, DMA_TO_DEVICE); 144 145 SKB_CB_ATHBUF(skb) = bf; 146 ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype); 147 skb_queue_tail(&rx_edma->rx_fifo, skb); 148 149 return true; 150} 151 152static void ath_rx_addbuffer_edma(struct ath_softc *sc, 153 enum ath9k_rx_qtype qtype, int size) 154{ 155 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 156 u32 nbuf = 0; 157 158 if (list_empty(&sc->rx.rxbuf)) { 159 ath_print(common, ATH_DBG_QUEUE, "No free rx buf available\n"); 160 return; 161 } 162 163 while (!list_empty(&sc->rx.rxbuf)) { 164 nbuf++; 165 166 if (!ath_rx_edma_buf_link(sc, qtype)) 167 break; 168 169 if (nbuf >= size) 170 break; 171 } 172} 173 174static void ath_rx_remove_buffer(struct ath_softc *sc, 175 enum ath9k_rx_qtype qtype) 176{ 177 struct ath_buf *bf; 178 struct ath_rx_edma *rx_edma; 179 struct sk_buff *skb; 180 181 rx_edma = &sc->rx.rx_edma[qtype]; 182 183 while ((skb = skb_dequeue(&rx_edma->rx_fifo)) != NULL) { 184 bf = SKB_CB_ATHBUF(skb); 185 BUG_ON(!bf); 186 list_add_tail(&bf->list, &sc->rx.rxbuf); 187 } 188} 189 190static void ath_rx_edma_cleanup(struct ath_softc *sc) 191{ 192 struct ath_buf *bf; 193 194 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); 195 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); 196 197 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 198 if (bf->bf_mpdu) 199 dev_kfree_skb_any(bf->bf_mpdu); 200 } 201 202 INIT_LIST_HEAD(&sc->rx.rxbuf); 203 204 kfree(sc->rx.rx_bufptr); 205 sc->rx.rx_bufptr = NULL; 206} 207 208static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size) 209{ 210 skb_queue_head_init(&rx_edma->rx_fifo); 211 skb_queue_head_init(&rx_edma->rx_buffers); 212 rx_edma->rx_fifo_hwsize = size; 213} 214 215static int ath_rx_edma_init(struct ath_softc *sc, int nbufs) 216{ 217 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 218 struct ath_hw *ah = sc->sc_ah; 219 struct sk_buff *skb; 220 struct ath_buf *bf; 221 int error = 0, i; 222 u32 size; 223 224 225 common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN + 226 ah->caps.rx_status_len, 227 min(common->cachelsz, (u16)64)); 228 229 ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize - 230 ah->caps.rx_status_len); 231 232 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP], 233 ah->caps.rx_lp_qdepth); 234 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP], 235 ah->caps.rx_hp_qdepth); 236 237 size = sizeof(struct ath_buf) * nbufs; 238 bf = kzalloc(size, GFP_KERNEL); 239 if (!bf) 240 return -ENOMEM; 241 242 INIT_LIST_HEAD(&sc->rx.rxbuf); 243 sc->rx.rx_bufptr = bf; 244 245 for (i = 0; i < nbufs; i++, bf++) { 246 skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL); 247 if (!skb) { 248 error = -ENOMEM; 249 goto rx_init_fail; 250 } 251 252 memset(skb->data, 0, common->rx_bufsize); 253 bf->bf_mpdu = skb; 254 255 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 256 common->rx_bufsize, 257 DMA_BIDIRECTIONAL); 258 if (unlikely(dma_mapping_error(sc->dev, 259 bf->bf_buf_addr))) { 260 dev_kfree_skb_any(skb); 261 bf->bf_mpdu = NULL; 262 ath_print(common, ATH_DBG_FATAL, 263 "dma_mapping_error() on RX init\n"); 264 error = -ENOMEM; 265 goto rx_init_fail; 266 } 267 268 list_add_tail(&bf->list, &sc->rx.rxbuf); 269 } 270 271 return 0; 272 273rx_init_fail: 274 ath_rx_edma_cleanup(sc); 275 return error; 276} 277 278static void ath_edma_start_recv(struct ath_softc *sc) 279{ 280 spin_lock_bh(&sc->rx.rxbuflock); 281 282 ath9k_hw_rxena(sc->sc_ah); 283 284 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP, 285 sc->rx.rx_edma[ATH9K_RX_QUEUE_HP].rx_fifo_hwsize); 286 287 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP, 288 sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize); 289 290 ath_opmode_init(sc); 291 292 ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_OFFCHANNEL)); 293 294 spin_unlock_bh(&sc->rx.rxbuflock); 295} 296 297static void ath_edma_stop_recv(struct ath_softc *sc) 298{ 299 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); 300 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); 301} 302 303int ath_rx_init(struct ath_softc *sc, int nbufs) 304{ 305 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 306 struct sk_buff *skb; 307 struct ath_buf *bf; 308 int error = 0; 309 310 spin_lock_init(&sc->sc_pcu_lock); 311 sc->sc_flags &= ~SC_OP_RXFLUSH; 312 spin_lock_init(&sc->rx.rxbuflock); 313 314 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 315 return ath_rx_edma_init(sc, nbufs); 316 } else { 317 common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN, 318 min(common->cachelsz, (u16)64)); 319 320 ath_print(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n", 321 common->cachelsz, common->rx_bufsize); 322 323 /* Initialize rx descriptors */ 324 325 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, 326 "rx", nbufs, 1, 0); 327 if (error != 0) { 328 ath_print(common, ATH_DBG_FATAL, 329 "failed to allocate rx descriptors: %d\n", 330 error); 331 goto err; 332 } 333 334 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 335 skb = ath_rxbuf_alloc(common, common->rx_bufsize, 336 GFP_KERNEL); 337 if (skb == NULL) { 338 error = -ENOMEM; 339 goto err; 340 } 341 342 bf->bf_mpdu = skb; 343 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 344 common->rx_bufsize, 345 DMA_FROM_DEVICE); 346 if (unlikely(dma_mapping_error(sc->dev, 347 bf->bf_buf_addr))) { 348 dev_kfree_skb_any(skb); 349 bf->bf_mpdu = NULL; 350 ath_print(common, ATH_DBG_FATAL, 351 "dma_mapping_error() on RX init\n"); 352 error = -ENOMEM; 353 goto err; 354 } 355 bf->bf_dmacontext = bf->bf_buf_addr; 356 } 357 sc->rx.rxlink = NULL; 358 } 359 360err: 361 if (error) 362 ath_rx_cleanup(sc); 363 364 return error; 365} 366 367void ath_rx_cleanup(struct ath_softc *sc) 368{ 369 struct ath_hw *ah = sc->sc_ah; 370 struct ath_common *common = ath9k_hw_common(ah); 371 struct sk_buff *skb; 372 struct ath_buf *bf; 373 374 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 375 ath_rx_edma_cleanup(sc); 376 return; 377 } else { 378 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 379 skb = bf->bf_mpdu; 380 if (skb) { 381 dma_unmap_single(sc->dev, bf->bf_buf_addr, 382 common->rx_bufsize, 383 DMA_FROM_DEVICE); 384 dev_kfree_skb(skb); 385 } 386 } 387 388 if (sc->rx.rxdma.dd_desc_len != 0) 389 ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf); 390 } 391} 392 393/* 394 * Calculate the receive filter according to the 395 * operating mode and state: 396 * 397 * o always accept unicast, broadcast, and multicast traffic 398 * o maintain current state of phy error reception (the hal 399 * may enable phy error frames for noise immunity work) 400 * o probe request frames are accepted only when operating in 401 * hostap, adhoc, or monitor modes 402 * o enable promiscuous mode according to the interface state 403 * o accept beacons: 404 * - when operating in adhoc mode so the 802.11 layer creates 405 * node table entries for peers, 406 * - when operating in station mode for collecting rssi data when 407 * the station is otherwise quiet, or 408 * - when operating as a repeater so we see repeater-sta beacons 409 * - when scanning 410 */ 411 412u32 ath_calcrxfilter(struct ath_softc *sc) 413{ 414#define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR) 415 416 u32 rfilt; 417 418 rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE) 419 | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST 420 | ATH9K_RX_FILTER_MCAST; 421 422 /* If not a STA, enable processing of Probe Requests */ 423 if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION) 424 rfilt |= ATH9K_RX_FILTER_PROBEREQ; 425 426 /* 427 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station 428 * mode interface or when in monitor mode. AP mode does not need this 429 * since it receives all in-BSS frames anyway. 430 */ 431 if (((sc->sc_ah->opmode != NL80211_IFTYPE_AP) && 432 (sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) || 433 (sc->sc_ah->opmode == NL80211_IFTYPE_MONITOR)) 434 rfilt |= ATH9K_RX_FILTER_PROM; 435 436 if (sc->rx.rxfilter & FIF_CONTROL) 437 rfilt |= ATH9K_RX_FILTER_CONTROL; 438 439 if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) && 440 !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC)) 441 rfilt |= ATH9K_RX_FILTER_MYBEACON; 442 else 443 rfilt |= ATH9K_RX_FILTER_BEACON; 444 445 if ((AR_SREV_9280_10_OR_LATER(sc->sc_ah) || 446 AR_SREV_9285_10_OR_LATER(sc->sc_ah)) && 447 (sc->sc_ah->opmode == NL80211_IFTYPE_AP) && 448 (sc->rx.rxfilter & FIF_PSPOLL)) 449 rfilt |= ATH9K_RX_FILTER_PSPOLL; 450 451 if (conf_is_ht(&sc->hw->conf)) 452 rfilt |= ATH9K_RX_FILTER_COMP_BAR; 453 454 if (sc->sec_wiphy || (sc->rx.rxfilter & FIF_OTHER_BSS)) { 455 /* TODO: only needed if more than one BSSID is in use in 456 * station/adhoc mode */ 457 /* The following may also be needed for other older chips */ 458 if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160) 459 rfilt |= ATH9K_RX_FILTER_PROM; 460 rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL; 461 } 462 463 return rfilt; 464 465#undef RX_FILTER_PRESERVE 466} 467 468int ath_startrecv(struct ath_softc *sc) 469{ 470 struct ath_hw *ah = sc->sc_ah; 471 struct ath_buf *bf, *tbf; 472 473 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 474 ath_edma_start_recv(sc); 475 return 0; 476 } 477 478 spin_lock_bh(&sc->rx.rxbuflock); 479 if (list_empty(&sc->rx.rxbuf)) 480 goto start_recv; 481 482 sc->rx.rxlink = NULL; 483 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) { 484 ath_rx_buf_link(sc, bf); 485 } 486 487 /* We could have deleted elements so the list may be empty now */ 488 if (list_empty(&sc->rx.rxbuf)) 489 goto start_recv; 490 491 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 492 ath9k_hw_putrxbuf(ah, bf->bf_daddr); 493 ath9k_hw_rxena(ah); 494 495start_recv: 496 ath_opmode_init(sc); 497 ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_OFFCHANNEL)); 498 499 spin_unlock_bh(&sc->rx.rxbuflock); 500 501 return 0; 502} 503 504bool ath_stoprecv(struct ath_softc *sc) 505{ 506 struct ath_hw *ah = sc->sc_ah; 507 bool stopped; 508 509 spin_lock_bh(&sc->rx.rxbuflock); 510 ath9k_hw_abortpcurecv(ah); 511 ath9k_hw_setrxfilter(ah, 0); 512 stopped = ath9k_hw_stopdmarecv(ah); 513 514 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 515 ath_edma_stop_recv(sc); 516 else 517 sc->rx.rxlink = NULL; 518 spin_unlock_bh(&sc->rx.rxbuflock); 519 520 return stopped; 521} 522 523void ath_flushrecv(struct ath_softc *sc) 524{ 525 sc->sc_flags |= SC_OP_RXFLUSH; 526 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 527 ath_rx_tasklet(sc, 1, true); 528 ath_rx_tasklet(sc, 1, false); 529 sc->sc_flags &= ~SC_OP_RXFLUSH; 530} 531 532static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb) 533{ 534 /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */ 535 struct ieee80211_mgmt *mgmt; 536 u8 *pos, *end, id, elen; 537 struct ieee80211_tim_ie *tim; 538 539 mgmt = (struct ieee80211_mgmt *)skb->data; 540 pos = mgmt->u.beacon.variable; 541 end = skb->data + skb->len; 542 543 while (pos + 2 < end) { 544 id = *pos++; 545 elen = *pos++; 546 if (pos + elen > end) 547 break; 548 549 if (id == WLAN_EID_TIM) { 550 if (elen < sizeof(*tim)) 551 break; 552 tim = (struct ieee80211_tim_ie *) pos; 553 if (tim->dtim_count != 0) 554 break; 555 return tim->bitmap_ctrl & 0x01; 556 } 557 558 pos += elen; 559 } 560 561 return false; 562} 563 564static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) 565{ 566 struct ieee80211_mgmt *mgmt; 567 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 568 569 if (skb->len < 24 + 8 + 2 + 2) 570 return; 571 572 mgmt = (struct ieee80211_mgmt *)skb->data; 573 if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0) 574 return; /* not from our current AP */ 575 576 sc->ps_flags &= ~PS_WAIT_FOR_BEACON; 577 578 if (sc->ps_flags & PS_BEACON_SYNC) { 579 sc->ps_flags &= ~PS_BEACON_SYNC; 580 ath_print(common, ATH_DBG_PS, 581 "Reconfigure Beacon timers based on " 582 "timestamp from the AP\n"); 583 ath_beacon_config(sc, NULL); 584 } 585 586 if (ath_beacon_dtim_pending_cab(skb)) { 587 /* 588 * Remain awake waiting for buffered broadcast/multicast 589 * frames. If the last broadcast/multicast frame is not 590 * received properly, the next beacon frame will work as 591 * a backup trigger for returning into NETWORK SLEEP state, 592 * so we are waiting for it as well. 593 */ 594 ath_print(common, ATH_DBG_PS, "Received DTIM beacon indicating " 595 "buffered broadcast/multicast frame(s)\n"); 596 sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON; 597 return; 598 } 599 600 if (sc->ps_flags & PS_WAIT_FOR_CAB) { 601 /* 602 * This can happen if a broadcast frame is dropped or the AP 603 * fails to send a frame indicating that all CAB frames have 604 * been delivered. 605 */ 606 sc->ps_flags &= ~PS_WAIT_FOR_CAB; 607 ath_print(common, ATH_DBG_PS, 608 "PS wait for CAB frames timed out\n"); 609 } 610} 611 612static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb) 613{ 614 struct ieee80211_hdr *hdr; 615 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 616 617 hdr = (struct ieee80211_hdr *)skb->data; 618 619 /* Process Beacon and CAB receive in PS state */ 620 if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc)) 621 && ieee80211_is_beacon(hdr->frame_control)) 622 ath_rx_ps_beacon(sc, skb); 623 else if ((sc->ps_flags & PS_WAIT_FOR_CAB) && 624 (ieee80211_is_data(hdr->frame_control) || 625 ieee80211_is_action(hdr->frame_control)) && 626 is_multicast_ether_addr(hdr->addr1) && 627 !ieee80211_has_moredata(hdr->frame_control)) { 628 /* 629 * No more broadcast/multicast frames to be received at this 630 * point. 631 */ 632 sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON); 633 ath_print(common, ATH_DBG_PS, 634 "All PS CAB frames received, back to sleep\n"); 635 } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) && 636 !is_multicast_ether_addr(hdr->addr1) && 637 !ieee80211_has_morefrags(hdr->frame_control)) { 638 sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA; 639 ath_print(common, ATH_DBG_PS, 640 "Going back to sleep after having received " 641 "PS-Poll data (0x%lx)\n", 642 sc->ps_flags & (PS_WAIT_FOR_BEACON | 643 PS_WAIT_FOR_CAB | 644 PS_WAIT_FOR_PSPOLL_DATA | 645 PS_WAIT_FOR_TX_ACK)); 646 } 647} 648 649static void ath_rx_send_to_mac80211(struct ieee80211_hw *hw, 650 struct ath_softc *sc, struct sk_buff *skb, 651 struct ieee80211_rx_status *rxs) 652{ 653 struct ieee80211_hdr *hdr; 654 655 hdr = (struct ieee80211_hdr *)skb->data; 656 657 /* Send the frame to mac80211 */ 658 if (is_multicast_ether_addr(hdr->addr1)) { 659 int i; 660 /* 661 * Deliver broadcast/multicast frames to all suitable 662 * virtual wiphys. 663 */ 664 /* TODO: filter based on channel configuration */ 665 for (i = 0; i < sc->num_sec_wiphy; i++) { 666 struct ath_wiphy *aphy = sc->sec_wiphy[i]; 667 struct sk_buff *nskb; 668 if (aphy == NULL) 669 continue; 670 nskb = skb_copy(skb, GFP_ATOMIC); 671 if (!nskb) 672 continue; 673 ieee80211_rx(aphy->hw, nskb); 674 } 675 ieee80211_rx(sc->hw, skb); 676 } else 677 /* Deliver unicast frames based on receiver address */ 678 ieee80211_rx(hw, skb); 679} 680 681static bool ath_edma_get_buffers(struct ath_softc *sc, 682 enum ath9k_rx_qtype qtype) 683{ 684 struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype]; 685 struct ath_hw *ah = sc->sc_ah; 686 struct ath_common *common = ath9k_hw_common(ah); 687 struct sk_buff *skb; 688 struct ath_buf *bf; 689 int ret; 690 691 skb = skb_peek(&rx_edma->rx_fifo); 692 if (!skb) 693 return false; 694 695 bf = SKB_CB_ATHBUF(skb); 696 BUG_ON(!bf); 697 698 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, 699 common->rx_bufsize, DMA_FROM_DEVICE); 700 701 ret = ath9k_hw_process_rxdesc_edma(ah, NULL, skb->data); 702 if (ret == -EINPROGRESS) { 703 /*let device gain the buffer again*/ 704 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, 705 common->rx_bufsize, DMA_FROM_DEVICE); 706 return false; 707 } 708 709 __skb_unlink(skb, &rx_edma->rx_fifo); 710 if (ret == -EINVAL) { 711 /* corrupt descriptor, skip this one and the following one */ 712 list_add_tail(&bf->list, &sc->rx.rxbuf); 713 ath_rx_edma_buf_link(sc, qtype); 714 skb = skb_peek(&rx_edma->rx_fifo); 715 if (!skb) 716 return true; 717 718 bf = SKB_CB_ATHBUF(skb); 719 BUG_ON(!bf); 720 721 __skb_unlink(skb, &rx_edma->rx_fifo); 722 list_add_tail(&bf->list, &sc->rx.rxbuf); 723 ath_rx_edma_buf_link(sc, qtype); 724 return true; 725 } 726 skb_queue_tail(&rx_edma->rx_buffers, skb); 727 728 return true; 729} 730 731static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc, 732 struct ath_rx_status *rs, 733 enum ath9k_rx_qtype qtype) 734{ 735 struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype]; 736 struct sk_buff *skb; 737 struct ath_buf *bf; 738 739 while (ath_edma_get_buffers(sc, qtype)); 740 skb = __skb_dequeue(&rx_edma->rx_buffers); 741 if (!skb) 742 return NULL; 743 744 bf = SKB_CB_ATHBUF(skb); 745 ath9k_hw_process_rxdesc_edma(sc->sc_ah, rs, skb->data); 746 return bf; 747} 748 749static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc, 750 struct ath_rx_status *rs) 751{ 752 struct ath_hw *ah = sc->sc_ah; 753 struct ath_common *common = ath9k_hw_common(ah); 754 struct ath_desc *ds; 755 struct ath_buf *bf; 756 int ret; 757 758 if (list_empty(&sc->rx.rxbuf)) { 759 sc->rx.rxlink = NULL; 760 return NULL; 761 } 762 763 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 764 ds = bf->bf_desc; 765 766 /* 767 * Must provide the virtual address of the current 768 * descriptor, the physical address, and the virtual 769 * address of the next descriptor in the h/w chain. 770 * This allows the HAL to look ahead to see if the 771 * hardware is done with a descriptor by checking the 772 * done bit in the following descriptor and the address 773 * of the current descriptor the DMA engine is working 774 * on. All this is necessary because of our use of 775 * a self-linked list to avoid rx overruns. 776 */ 777 ret = ath9k_hw_rxprocdesc(ah, ds, rs, 0); 778 if (ret == -EINPROGRESS) { 779 struct ath_rx_status trs; 780 struct ath_buf *tbf; 781 struct ath_desc *tds; 782 783 memset(&trs, 0, sizeof(trs)); 784 if (list_is_last(&bf->list, &sc->rx.rxbuf)) { 785 sc->rx.rxlink = NULL; 786 return NULL; 787 } 788 789 tbf = list_entry(bf->list.next, struct ath_buf, list); 790 791 /* 792 * On some hardware the descriptor status words could 793 * get corrupted, including the done bit. Because of 794 * this, check if the next descriptor's done bit is 795 * set or not. 796 * 797 * If the next descriptor's done bit is set, the current 798 * descriptor has been corrupted. Force s/w to discard 799 * this descriptor and continue... 800 */ 801 802 tds = tbf->bf_desc; 803 ret = ath9k_hw_rxprocdesc(ah, tds, &trs, 0); 804 if (ret == -EINPROGRESS) 805 return NULL; 806 } 807 808 if (!bf->bf_mpdu) 809 return bf; 810 811 /* 812 * Synchronize the DMA transfer with CPU before 813 * 1. accessing the frame 814 * 2. requeueing the same buffer to h/w 815 */ 816 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, 817 common->rx_bufsize, 818 DMA_FROM_DEVICE); 819 820 return bf; 821} 822 823/* Assumes you've already done the endian to CPU conversion */ 824static bool ath9k_rx_accept(struct ath_common *common, 825 struct ieee80211_hdr *hdr, 826 struct ieee80211_rx_status *rxs, 827 struct ath_rx_status *rx_stats, 828 bool *decrypt_error) 829{ 830 struct ath_hw *ah = common->ah; 831 __le16 fc; 832 u8 rx_status_len = ah->caps.rx_status_len; 833 834 fc = hdr->frame_control; 835 836 if (!rx_stats->rs_datalen) 837 return false; 838 /* 839 * rs_status follows rs_datalen so if rs_datalen is too large 840 * we can take a hint that hardware corrupted it, so ignore 841 * those frames. 842 */ 843 if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len)) 844 return false; 845 846 /* 847 * rs_more indicates chained descriptors which can be used 848 * to link buffers together for a sort of scatter-gather 849 * operation. 850 * reject the frame, we don't support scatter-gather yet and 851 * the frame is probably corrupt anyway 852 */ 853 if (rx_stats->rs_more) 854 return false; 855 856 /* 857 * The rx_stats->rs_status will not be set until the end of the 858 * chained descriptors so it can be ignored if rs_more is set. The 859 * rs_more will be false at the last element of the chained 860 * descriptors. 861 */ 862 if (rx_stats->rs_status != 0) { 863 if (rx_stats->rs_status & ATH9K_RXERR_CRC) 864 rxs->flag |= RX_FLAG_FAILED_FCS_CRC; 865 if (rx_stats->rs_status & ATH9K_RXERR_PHY) 866 return false; 867 868 if (rx_stats->rs_status & ATH9K_RXERR_DECRYPT) { 869 *decrypt_error = true; 870 } else if (rx_stats->rs_status & ATH9K_RXERR_MIC) { 871 /* 872 * The MIC error bit is only valid if the frame 873 * is not a control frame or fragment, and it was 874 * decrypted using a valid TKIP key. 875 */ 876 if (!ieee80211_is_ctl(fc) && 877 !ieee80211_has_morefrags(fc) && 878 !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) && 879 test_bit(rx_stats->rs_keyix, common->tkip_keymap)) 880 rxs->flag |= RX_FLAG_MMIC_ERROR; 881 else 882 rx_stats->rs_status &= ~ATH9K_RXERR_MIC; 883 } 884 /* 885 * Reject error frames with the exception of 886 * decryption and MIC failures. For monitor mode, 887 * we also ignore the CRC error. 888 */ 889 if (ah->opmode == NL80211_IFTYPE_MONITOR) { 890 if (rx_stats->rs_status & 891 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC | 892 ATH9K_RXERR_CRC)) 893 return false; 894 } else { 895 if (rx_stats->rs_status & 896 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) { 897 return false; 898 } 899 } 900 } 901 return true; 902} 903 904static int ath9k_process_rate(struct ath_common *common, 905 struct ieee80211_hw *hw, 906 struct ath_rx_status *rx_stats, 907 struct ieee80211_rx_status *rxs) 908{ 909 struct ieee80211_supported_band *sband; 910 enum ieee80211_band band; 911 unsigned int i = 0; 912 913 band = hw->conf.channel->band; 914 sband = hw->wiphy->bands[band]; 915 916 if (rx_stats->rs_rate & 0x80) { 917 /* HT rate */ 918 rxs->flag |= RX_FLAG_HT; 919 if (rx_stats->rs_flags & ATH9K_RX_2040) 920 rxs->flag |= RX_FLAG_40MHZ; 921 if (rx_stats->rs_flags & ATH9K_RX_GI) 922 rxs->flag |= RX_FLAG_SHORT_GI; 923 rxs->rate_idx = rx_stats->rs_rate & 0x7f; 924 return 0; 925 } 926 927 for (i = 0; i < sband->n_bitrates; i++) { 928 if (sband->bitrates[i].hw_value == rx_stats->rs_rate) { 929 rxs->rate_idx = i; 930 return 0; 931 } 932 if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) { 933 rxs->flag |= RX_FLAG_SHORTPRE; 934 rxs->rate_idx = i; 935 return 0; 936 } 937 } 938 939 /* 940 * No valid hardware bitrate found -- we should not get here 941 * because hardware has already validated this frame as OK. 942 */ 943 ath_print(common, ATH_DBG_XMIT, "unsupported hw bitrate detected " 944 "0x%02x using 1 Mbit\n", rx_stats->rs_rate); 945 946 return -EINVAL; 947} 948 949static void ath9k_process_rssi(struct ath_common *common, 950 struct ieee80211_hw *hw, 951 struct ieee80211_hdr *hdr, 952 struct ath_rx_status *rx_stats) 953{ 954 struct ath_hw *ah = common->ah; 955 struct ieee80211_sta *sta; 956 struct ath_node *an; 957 int last_rssi = ATH_RSSI_DUMMY_MARKER; 958 __le16 fc; 959 960 fc = hdr->frame_control; 961 962 rcu_read_lock(); 963 sta = ieee80211_find_sta_by_hw(hw, hdr->addr2); 964 if (sta) { 965 an = (struct ath_node *) sta->drv_priv; 966 if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && 967 !rx_stats->rs_moreaggr) 968 ATH_RSSI_LPF(an->last_rssi, rx_stats->rs_rssi); 969 last_rssi = an->last_rssi; 970 } 971 rcu_read_unlock(); 972 973 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER)) 974 rx_stats->rs_rssi = ATH_EP_RND(last_rssi, 975 ATH_RSSI_EP_MULTIPLIER); 976 if (rx_stats->rs_rssi < 0) 977 rx_stats->rs_rssi = 0; 978 979 /* Update Beacon RSSI, this is used by ANI. */ 980 if (ieee80211_is_beacon(fc)) 981 ah->stats.avgbrssi = rx_stats->rs_rssi; 982} 983 984/* 985 * For Decrypt or Demic errors, we only mark packet status here and always push 986 * up the frame up to let mac80211 handle the actual error case, be it no 987 * decryption key or real decryption error. This let us keep statistics there. 988 */ 989static int ath9k_rx_skb_preprocess(struct ath_common *common, 990 struct ieee80211_hw *hw, 991 struct ieee80211_hdr *hdr, 992 struct ath_rx_status *rx_stats, 993 struct ieee80211_rx_status *rx_status, 994 bool *decrypt_error) 995{ 996 memset(rx_status, 0, sizeof(struct ieee80211_rx_status)); 997 998 /* 999 * everything but the rate is checked here, the rate check is done 1000 * separately to avoid doing two lookups for a rate for each frame. 1001 */ 1002 if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) 1003 return -EINVAL; 1004 1005 ath9k_process_rssi(common, hw, hdr, rx_stats); 1006 1007 if (ath9k_process_rate(common, hw, rx_stats, rx_status)) 1008 return -EINVAL; 1009 1010 rx_status->band = hw->conf.channel->band; 1011 rx_status->freq = hw->conf.channel->center_freq; 1012 rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + rx_stats->rs_rssi; 1013 rx_status->antenna = rx_stats->rs_antenna; 1014 rx_status->flag |= RX_FLAG_TSFT; 1015 1016 return 0; 1017} 1018 1019static void ath9k_rx_skb_postprocess(struct ath_common *common, 1020 struct sk_buff *skb, 1021 struct ath_rx_status *rx_stats, 1022 struct ieee80211_rx_status *rxs, 1023 bool decrypt_error) 1024{ 1025 struct ath_hw *ah = common->ah; 1026 struct ieee80211_hdr *hdr; 1027 int hdrlen, padpos, padsize; 1028 u8 keyix; 1029 __le16 fc; 1030 bool is_mc; 1031 1032 /* see if any padding is done by the hw and remove it */ 1033 hdr = (struct ieee80211_hdr *) skb->data; 1034 is_mc = !!is_multicast_ether_addr(hdr->addr1); 1035 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 1036 fc = hdr->frame_control; 1037 padpos = ath9k_cmn_padpos(hdr->frame_control); 1038 1039 /* The MAC header is padded to have 32-bit boundary if the 1040 * packet payload is non-zero. The general calculation for 1041 * padsize would take into account odd header lengths: 1042 * padsize = (4 - padpos % 4) % 4; However, since only 1043 * even-length headers are used, padding can only be 0 or 2 1044 * bytes and we can optimize this a bit. In addition, we must 1045 * not try to remove padding from short control frames that do 1046 * not have payload. */ 1047 padsize = padpos & 3; 1048 if (padsize && skb->len>=padpos+padsize+FCS_LEN) { 1049 memmove(skb->data + padsize, skb->data, padpos); 1050 skb_pull(skb, padsize); 1051 } 1052 1053 keyix = rx_stats->rs_keyix; 1054 1055 if ((is_mc || !(keyix == ATH9K_RXKEYIX_INVALID)) && !decrypt_error && 1056 ieee80211_has_protected(fc)) { 1057 rxs->flag |= RX_FLAG_DECRYPTED; 1058 } else if (ieee80211_has_protected(fc) 1059 && !decrypt_error && skb->len >= hdrlen + 4) { 1060 keyix = skb->data[hdrlen + 3] >> 6; 1061 1062 if (test_bit(keyix, common->keymap)) 1063 rxs->flag |= RX_FLAG_DECRYPTED; 1064 } 1065 if (ah->sw_mgmt_crypto && 1066 (rxs->flag & RX_FLAG_DECRYPTED) && 1067 ieee80211_is_mgmt(fc)) 1068 /* Use software decrypt for management frames. */ 1069 rxs->flag &= ~RX_FLAG_DECRYPTED; 1070} 1071 1072int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) 1073{ 1074 struct ath_buf *bf; 1075 struct sk_buff *skb = NULL, *requeue_skb; 1076 struct ieee80211_rx_status *rxs; 1077 struct ath_hw *ah = sc->sc_ah; 1078 struct ath_common *common = ath9k_hw_common(ah); 1079 /* 1080 * The hw can techncically differ from common->hw when using ath9k 1081 * virtual wiphy so to account for that we iterate over the active 1082 * wiphys and find the appropriate wiphy and therefore hw. 1083 */ 1084 struct ieee80211_hw *hw = NULL; 1085 struct ieee80211_hdr *hdr; 1086 int retval; 1087 bool decrypt_error = false; 1088 struct ath_rx_status rs; 1089 enum ath9k_rx_qtype qtype; 1090 bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); 1091 int dma_type; 1092 u8 rx_status_len = ah->caps.rx_status_len; 1093 u64 tsf = 0; 1094 u32 tsf_lower = 0; 1095 unsigned long flags; 1096 1097 if (edma) 1098 dma_type = DMA_BIDIRECTIONAL; 1099 else 1100 dma_type = DMA_FROM_DEVICE; 1101 1102 qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP; 1103 spin_lock_bh(&sc->rx.rxbuflock); 1104 1105 tsf = ath9k_hw_gettsf64(ah); 1106 tsf_lower = tsf & 0xffffffff; 1107 1108 do { 1109 /* If handling rx interrupt and flush is in progress => exit */ 1110 if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0)) 1111 break; 1112 1113 memset(&rs, 0, sizeof(rs)); 1114 if (edma) 1115 bf = ath_edma_get_next_rx_buf(sc, &rs, qtype); 1116 else 1117 bf = ath_get_next_rx_buf(sc, &rs); 1118 1119 if (!bf) 1120 break; 1121 1122 skb = bf->bf_mpdu; 1123 if (!skb) 1124 continue; 1125 1126 hdr = (struct ieee80211_hdr *) (skb->data + rx_status_len); 1127 rxs = IEEE80211_SKB_RXCB(skb); 1128 1129 hw = ath_get_virt_hw(sc, hdr); 1130 1131 ath_debug_stat_rx(sc, &rs); 1132 1133 /* 1134 * If we're asked to flush receive queue, directly 1135 * chain it back at the queue without processing it. 1136 */ 1137 if (flush) 1138 goto requeue; 1139 1140 retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs, 1141 rxs, &decrypt_error); 1142 if (retval) 1143 goto requeue; 1144 1145 rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp; 1146 if (rs.rs_tstamp > tsf_lower && 1147 unlikely(rs.rs_tstamp - tsf_lower > 0x10000000)) 1148 rxs->mactime -= 0x100000000ULL; 1149 1150 if (rs.rs_tstamp < tsf_lower && 1151 unlikely(tsf_lower - rs.rs_tstamp > 0x10000000)) 1152 rxs->mactime += 0x100000000ULL; 1153 1154 /* Ensure we always have an skb to requeue once we are done 1155 * processing the current buffer's skb */ 1156 requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC); 1157 1158 /* If there is no memory we ignore the current RX'd frame, 1159 * tell hardware it can give us a new frame using the old 1160 * skb and put it at the tail of the sc->rx.rxbuf list for 1161 * processing. */ 1162 if (!requeue_skb) 1163 goto requeue; 1164 1165 /* Unmap the frame */ 1166 dma_unmap_single(sc->dev, bf->bf_buf_addr, 1167 common->rx_bufsize, 1168 dma_type); 1169 1170 skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len); 1171 if (ah->caps.rx_status_len) 1172 skb_pull(skb, ah->caps.rx_status_len); 1173 1174 ath9k_rx_skb_postprocess(common, skb, &rs, 1175 rxs, decrypt_error); 1176 1177 /* We will now give hardware our shiny new allocated skb */ 1178 bf->bf_mpdu = requeue_skb; 1179 bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data, 1180 common->rx_bufsize, 1181 dma_type); 1182 if (unlikely(dma_mapping_error(sc->dev, 1183 bf->bf_buf_addr))) { 1184 dev_kfree_skb_any(requeue_skb); 1185 bf->bf_mpdu = NULL; 1186 ath_print(common, ATH_DBG_FATAL, 1187 "dma_mapping_error() on RX\n"); 1188 ath_rx_send_to_mac80211(hw, sc, skb, rxs); 1189 break; 1190 } 1191 bf->bf_dmacontext = bf->bf_buf_addr; 1192 1193 /* 1194 * change the default rx antenna if rx diversity chooses the 1195 * other antenna 3 times in a row. 1196 */ 1197 if (sc->rx.defant != rs.rs_antenna) { 1198 if (++sc->rx.rxotherant >= 3) 1199 ath_setdefantenna(sc, rs.rs_antenna); 1200 } else { 1201 sc->rx.rxotherant = 0; 1202 } 1203 1204 spin_lock_irqsave(&sc->sc_pm_lock, flags); 1205 if (unlikely(ath9k_check_auto_sleep(sc) || 1206 (sc->ps_flags & (PS_WAIT_FOR_BEACON | 1207 PS_WAIT_FOR_CAB | 1208 PS_WAIT_FOR_PSPOLL_DATA)))) 1209 ath_rx_ps(sc, skb); 1210 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 1211 1212 ath_rx_send_to_mac80211(hw, sc, skb, rxs); 1213 1214requeue: 1215 if (edma) { 1216 list_add_tail(&bf->list, &sc->rx.rxbuf); 1217 ath_rx_edma_buf_link(sc, qtype); 1218 } else { 1219 list_move_tail(&bf->list, &sc->rx.rxbuf); 1220 ath_rx_buf_link(sc, bf); 1221 } 1222 } while (1); 1223 1224 spin_unlock_bh(&sc->rx.rxbuflock); 1225 1226 return 0; 1227} 1228