1/* 2 * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org> 3 * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com> 4 * 5 * Permission to use, copy, modify, and distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 * 17 */ 18 19/*************************************\ 20* DMA and interrupt masking functions * 21\*************************************/ 22 23/* 24 * dma.c - DMA and interrupt masking functions 25 * 26 * Here we setup descriptor pointers (rxdp/txdp) start/stop dma engine and 27 * handle queue setup for 5210 chipset (rest are handled on qcu.c). 28 * Also we setup interrupt mask register (IMR) and read the various iterrupt 29 * status registers (ISR). 30 * 31 * TODO: Handle SISR on 5211+ and introduce a function to return the queue 32 * number that resulted the interrupt. 33 */ 34 35#include "ath5k.h" 36#include "reg.h" 37#include "debug.h" 38#include "base.h" 39 40/*********\ 41* Receive * 42\*********/ 43 44/** 45 * ath5k_hw_start_rx_dma - Start DMA receive 46 * 47 * @ah: The &struct ath5k_hw 48 */ 49void ath5k_hw_start_rx_dma(struct ath5k_hw *ah) 50{ 51 ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR); 52 ath5k_hw_reg_read(ah, AR5K_CR); 53} 54 55/** 56 * ath5k_hw_stop_rx_dma - Stop DMA receive 57 * 58 * @ah: The &struct ath5k_hw 59 */ 60int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah) 61{ 62 unsigned int i; 63 64 ath5k_hw_reg_write(ah, AR5K_CR_RXD, AR5K_CR); 65 66 /* 67 * It may take some time to disable the DMA receive unit 68 */ 69 for (i = 1000; i > 0 && 70 (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) != 0; 71 i--) 72 udelay(10); 73 74 return i ? 0 : -EBUSY; 75} 76 77/** 78 * ath5k_hw_get_rxdp - Get RX Descriptor's address 79 * 80 * @ah: The &struct ath5k_hw 81 */ 82u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah) 83{ 84 return ath5k_hw_reg_read(ah, AR5K_RXDP); 85} 86 87void ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr) 88{ 89 ath5k_hw_reg_write(ah, phys_addr, AR5K_RXDP); 90} 91 92 93/**********\ 94* Transmit * 95\**********/ 96 97/** 98 * ath5k_hw_start_tx_dma - Start DMA transmit for a specific queue 99 * 100 * @ah: The &struct ath5k_hw 101 * @queue: The hw queue number 102 * 103 * Start DMA transmit for a specific queue and since 5210 doesn't have 104 * QCU/DCU, set up queue parameters for 5210 here based on queue type (one 105 * queue for normal data and one queue for beacons). For queue setup 106 * on newer chips check out qcu.c. Returns -EINVAL if queue number is out 107 * of range or if queue is already disabled. 108 * 109 * NOTE: Must be called after setting up tx control descriptor for that 110 * queue (see below). 111 */ 112int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue) 113{ 114 u32 tx_queue; 115 116 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); 117 118 /* Return if queue is declared inactive */ 119 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) 120 return -EIO; 121 122 if (ah->ah_version == AR5K_AR5210) { 123 tx_queue = ath5k_hw_reg_read(ah, AR5K_CR); 124 125 /* 126 * Set the queue by type on 5210 127 */ 128 switch (ah->ah_txq[queue].tqi_type) { 129 case AR5K_TX_QUEUE_DATA: 130 tx_queue |= AR5K_CR_TXE0 & ~AR5K_CR_TXD0; 131 break; 132 case AR5K_TX_QUEUE_BEACON: 133 tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1; 134 ath5k_hw_reg_write(ah, AR5K_BCR_TQ1V | AR5K_BCR_BDMAE, 135 AR5K_BSR); 136 break; 137 case AR5K_TX_QUEUE_CAB: 138 tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1; 139 ath5k_hw_reg_write(ah, AR5K_BCR_TQ1FV | AR5K_BCR_TQ1V | 140 AR5K_BCR_BDMAE, AR5K_BSR); 141 break; 142 default: 143 return -EINVAL; 144 } 145 /* Start queue */ 146 ath5k_hw_reg_write(ah, tx_queue, AR5K_CR); 147 ath5k_hw_reg_read(ah, AR5K_CR); 148 } else { 149 /* Return if queue is disabled */ 150 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXD, queue)) 151 return -EIO; 152 153 /* Start queue */ 154 AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXE, queue); 155 } 156 157 return 0; 158} 159 160/** 161 * ath5k_hw_stop_tx_dma - Stop DMA transmit on a specific queue 162 * 163 * @ah: The &struct ath5k_hw 164 * @queue: The hw queue number 165 * 166 * Stop DMA transmit on a specific hw queue and drain queue so we don't 167 * have any pending frames. Returns -EBUSY if we still have pending frames, 168 * -EINVAL if queue number is out of range. 169 * 170 */ 171int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue) 172{ 173 unsigned int i = 40; 174 u32 tx_queue, pending; 175 176 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); 177 178 /* Return if queue is declared inactive */ 179 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) 180 return -EIO; 181 182 if (ah->ah_version == AR5K_AR5210) { 183 tx_queue = ath5k_hw_reg_read(ah, AR5K_CR); 184 185 /* 186 * Set by queue type 187 */ 188 switch (ah->ah_txq[queue].tqi_type) { 189 case AR5K_TX_QUEUE_DATA: 190 tx_queue |= AR5K_CR_TXD0 & ~AR5K_CR_TXE0; 191 break; 192 case AR5K_TX_QUEUE_BEACON: 193 case AR5K_TX_QUEUE_CAB: 194 tx_queue |= AR5K_CR_TXD1 & ~AR5K_CR_TXD1; 195 ath5k_hw_reg_write(ah, 0, AR5K_BSR); 196 break; 197 default: 198 return -EINVAL; 199 } 200 201 /* Stop queue */ 202 ath5k_hw_reg_write(ah, tx_queue, AR5K_CR); 203 ath5k_hw_reg_read(ah, AR5K_CR); 204 } else { 205 /* 206 * Schedule TX disable and wait until queue is empty 207 */ 208 AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXD, queue); 209 210 /*Check for pending frames*/ 211 do { 212 pending = ath5k_hw_reg_read(ah, 213 AR5K_QUEUE_STATUS(queue)) & 214 AR5K_QCU_STS_FRMPENDCNT; 215 udelay(100); 216 } while (--i && pending); 217 218 /* For 2413+ order PCU to drop packets using 219 * QUIET mechanism */ 220 if (ah->ah_mac_version >= (AR5K_SREV_AR2414 >> 4) && 221 pending){ 222 /* Set periodicity and duration */ 223 ath5k_hw_reg_write(ah, 224 AR5K_REG_SM(100, AR5K_QUIET_CTL2_QT_PER)| 225 AR5K_REG_SM(10, AR5K_QUIET_CTL2_QT_DUR), 226 AR5K_QUIET_CTL2); 227 228 /* Enable quiet period for current TSF */ 229 ath5k_hw_reg_write(ah, 230 AR5K_QUIET_CTL1_QT_EN | 231 AR5K_REG_SM(ath5k_hw_reg_read(ah, 232 AR5K_TSF_L32_5211) >> 10, 233 AR5K_QUIET_CTL1_NEXT_QT_TSF), 234 AR5K_QUIET_CTL1); 235 236 /* Force channel idle high */ 237 AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW_5211, 238 AR5K_DIAG_SW_CHANEL_IDLE_HIGH); 239 240 /* Wait a while and disable mechanism */ 241 udelay(200); 242 AR5K_REG_DISABLE_BITS(ah, AR5K_QUIET_CTL1, 243 AR5K_QUIET_CTL1_QT_EN); 244 245 /* Re-check for pending frames */ 246 i = 40; 247 do { 248 pending = ath5k_hw_reg_read(ah, 249 AR5K_QUEUE_STATUS(queue)) & 250 AR5K_QCU_STS_FRMPENDCNT; 251 udelay(100); 252 } while (--i && pending); 253 254 AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW_5211, 255 AR5K_DIAG_SW_CHANEL_IDLE_HIGH); 256 } 257 258 /* Clear register */ 259 ath5k_hw_reg_write(ah, 0, AR5K_QCU_TXD); 260 if (pending) 261 return -EBUSY; 262 } 263 264 /* TODO: Check for success on 5210 else return error */ 265 return 0; 266} 267 268u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue) 269{ 270 u16 tx_reg; 271 272 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); 273 274 /* 275 * Get the transmit queue descriptor pointer from the selected queue 276 */ 277 /*5210 doesn't have QCU*/ 278 if (ah->ah_version == AR5K_AR5210) { 279 switch (ah->ah_txq[queue].tqi_type) { 280 case AR5K_TX_QUEUE_DATA: 281 tx_reg = AR5K_NOQCU_TXDP0; 282 break; 283 case AR5K_TX_QUEUE_BEACON: 284 case AR5K_TX_QUEUE_CAB: 285 tx_reg = AR5K_NOQCU_TXDP1; 286 break; 287 default: 288 return 0xffffffff; 289 } 290 } else { 291 tx_reg = AR5K_QUEUE_TXDP(queue); 292 } 293 294 return ath5k_hw_reg_read(ah, tx_reg); 295} 296 297/** 298 * ath5k_hw_set_txdp - Set TX Descriptor's address for a specific queue 299 * 300 * @ah: The &struct ath5k_hw 301 * @queue: The hw queue number 302 * 303 * Set TX descriptor's address for a specific queue. For 5210 we ignore 304 * the queue number and we use tx queue type since we only have 2 queues 305 * so as above we use TXDP0 for normal data queue and TXDP1 for beacon queue. 306 * For newer chips with QCU/DCU we just set the corresponding TXDP register. 307 * Returns -EINVAL if queue type is invalid for 5210 and -EIO if queue is still 308 * active. 309 */ 310int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr) 311{ 312 u16 tx_reg; 313 314 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); 315 316 /* 317 * Set the transmit queue descriptor pointer register by type 318 * on 5210 319 */ 320 if (ah->ah_version == AR5K_AR5210) { 321 switch (ah->ah_txq[queue].tqi_type) { 322 case AR5K_TX_QUEUE_DATA: 323 tx_reg = AR5K_NOQCU_TXDP0; 324 break; 325 case AR5K_TX_QUEUE_BEACON: 326 case AR5K_TX_QUEUE_CAB: 327 tx_reg = AR5K_NOQCU_TXDP1; 328 break; 329 default: 330 return -EINVAL; 331 } 332 } else { 333 /* 334 * Set the transmit queue descriptor pointer for 335 * the selected queue on QCU for 5211+ 336 * (this won't work if the queue is still active) 337 */ 338 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue)) 339 return -EIO; 340 341 tx_reg = AR5K_QUEUE_TXDP(queue); 342 } 343 344 /* Set descriptor pointer */ 345 ath5k_hw_reg_write(ah, phys_addr, tx_reg); 346 347 return 0; 348} 349 350int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase) 351{ 352 u32 trigger_level, imr; 353 int ret = -EIO; 354 355 /* 356 * Disable interrupts by setting the mask 357 */ 358 imr = ath5k_hw_set_imr(ah, ah->ah_imr & ~AR5K_INT_GLOBAL); 359 360 trigger_level = AR5K_REG_MS(ath5k_hw_reg_read(ah, AR5K_TXCFG), 361 AR5K_TXCFG_TXFULL); 362 363 if (!increase) { 364 if (--trigger_level < AR5K_TUNE_MIN_TX_FIFO_THRES) 365 goto done; 366 } else 367 trigger_level += 368 ((AR5K_TUNE_MAX_TX_FIFO_THRES - trigger_level) / 2); 369 370 /* 371 * Update trigger level on success 372 */ 373 if (ah->ah_version == AR5K_AR5210) 374 ath5k_hw_reg_write(ah, trigger_level, AR5K_TRIG_LVL); 375 else 376 AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG, 377 AR5K_TXCFG_TXFULL, trigger_level); 378 379 ret = 0; 380 381done: 382 /* 383 * Restore interrupt mask 384 */ 385 ath5k_hw_set_imr(ah, imr); 386 387 return ret; 388} 389 390/*******************\ 391* Interrupt masking * 392\*******************/ 393 394/** 395 * ath5k_hw_is_intr_pending - Check if we have pending interrupts 396 * 397 * @ah: The &struct ath5k_hw 398 * 399 * Check if we have pending interrupts to process. Returns 1 if we 400 * have pending interrupts and 0 if we haven't. 401 */ 402bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah) 403{ 404 return ath5k_hw_reg_read(ah, AR5K_INTPEND) == 1 ? 1 : 0; 405} 406 407/** 408 * ath5k_hw_get_isr - Get interrupt status 409 * 410 * @ah: The @struct ath5k_hw 411 * @interrupt_mask: Driver's interrupt mask used to filter out 412 * interrupts in sw. 413 * 414 * This function is used inside our interrupt handler to determine the reason 415 * for the interrupt by reading Primary Interrupt Status Register. Returns an 416 * abstract interrupt status mask which is mostly ISR with some uncommon bits 417 * being mapped on some standard non hw-specific positions 418 * (check out &ath5k_int). 419 * 420 * NOTE: We use read-and-clear register, so after this function is called ISR 421 * is zeroed. 422 */ 423int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask) 424{ 425 u32 data; 426 427 /* 428 * Read interrupt status from the Interrupt Status register 429 * on 5210 430 */ 431 if (ah->ah_version == AR5K_AR5210) { 432 data = ath5k_hw_reg_read(ah, AR5K_ISR); 433 if (unlikely(data == AR5K_INT_NOCARD)) { 434 *interrupt_mask = data; 435 return -ENODEV; 436 } 437 } else { 438 /* 439 * Read interrupt status from Interrupt 440 * Status Register shadow copy (Read And Clear) 441 * 442 * Note: PISR/SISR Not available on 5210 443 */ 444 data = ath5k_hw_reg_read(ah, AR5K_RAC_PISR); 445 if (unlikely(data == AR5K_INT_NOCARD)) { 446 *interrupt_mask = data; 447 return -ENODEV; 448 } 449 } 450 451 /* 452 * Get abstract interrupt mask (driver-compatible) 453 */ 454 *interrupt_mask = (data & AR5K_INT_COMMON) & ah->ah_imr; 455 456 if (ah->ah_version != AR5K_AR5210) { 457 u32 sisr2 = ath5k_hw_reg_read(ah, AR5K_RAC_SISR2); 458 459 /*HIU = Host Interface Unit (PCI etc)*/ 460 if (unlikely(data & (AR5K_ISR_HIUERR))) 461 *interrupt_mask |= AR5K_INT_FATAL; 462 463 /*Beacon Not Ready*/ 464 if (unlikely(data & (AR5K_ISR_BNR))) 465 *interrupt_mask |= AR5K_INT_BNR; 466 467 if (unlikely(sisr2 & (AR5K_SISR2_SSERR | 468 AR5K_SISR2_DPERR | 469 AR5K_SISR2_MCABT))) 470 *interrupt_mask |= AR5K_INT_FATAL; 471 472 if (data & AR5K_ISR_TIM) 473 *interrupt_mask |= AR5K_INT_TIM; 474 475 if (data & AR5K_ISR_BCNMISC) { 476 if (sisr2 & AR5K_SISR2_TIM) 477 *interrupt_mask |= AR5K_INT_TIM; 478 if (sisr2 & AR5K_SISR2_DTIM) 479 *interrupt_mask |= AR5K_INT_DTIM; 480 if (sisr2 & AR5K_SISR2_DTIM_SYNC) 481 *interrupt_mask |= AR5K_INT_DTIM_SYNC; 482 if (sisr2 & AR5K_SISR2_BCN_TIMEOUT) 483 *interrupt_mask |= AR5K_INT_BCN_TIMEOUT; 484 if (sisr2 & AR5K_SISR2_CAB_TIMEOUT) 485 *interrupt_mask |= AR5K_INT_CAB_TIMEOUT; 486 } 487 488 if (data & AR5K_ISR_RXDOPPLER) 489 *interrupt_mask |= AR5K_INT_RX_DOPPLER; 490 if (data & AR5K_ISR_QCBRORN) { 491 *interrupt_mask |= AR5K_INT_QCBRORN; 492 ah->ah_txq_isr |= AR5K_REG_MS( 493 ath5k_hw_reg_read(ah, AR5K_RAC_SISR3), 494 AR5K_SISR3_QCBRORN); 495 } 496 if (data & AR5K_ISR_QCBRURN) { 497 *interrupt_mask |= AR5K_INT_QCBRURN; 498 ah->ah_txq_isr |= AR5K_REG_MS( 499 ath5k_hw_reg_read(ah, AR5K_RAC_SISR3), 500 AR5K_SISR3_QCBRURN); 501 } 502 if (data & AR5K_ISR_QTRIG) { 503 *interrupt_mask |= AR5K_INT_QTRIG; 504 ah->ah_txq_isr |= AR5K_REG_MS( 505 ath5k_hw_reg_read(ah, AR5K_RAC_SISR4), 506 AR5K_SISR4_QTRIG); 507 } 508 509 if (data & AR5K_ISR_TXOK) 510 ah->ah_txq_isr |= AR5K_REG_MS( 511 ath5k_hw_reg_read(ah, AR5K_RAC_SISR0), 512 AR5K_SISR0_QCU_TXOK); 513 514 if (data & AR5K_ISR_TXDESC) 515 ah->ah_txq_isr |= AR5K_REG_MS( 516 ath5k_hw_reg_read(ah, AR5K_RAC_SISR0), 517 AR5K_SISR0_QCU_TXDESC); 518 519 if (data & AR5K_ISR_TXERR) 520 ah->ah_txq_isr |= AR5K_REG_MS( 521 ath5k_hw_reg_read(ah, AR5K_RAC_SISR1), 522 AR5K_SISR1_QCU_TXERR); 523 524 if (data & AR5K_ISR_TXEOL) 525 ah->ah_txq_isr |= AR5K_REG_MS( 526 ath5k_hw_reg_read(ah, AR5K_RAC_SISR1), 527 AR5K_SISR1_QCU_TXEOL); 528 529 if (data & AR5K_ISR_TXURN) 530 ah->ah_txq_isr |= AR5K_REG_MS( 531 ath5k_hw_reg_read(ah, AR5K_RAC_SISR2), 532 AR5K_SISR2_QCU_TXURN); 533 } else { 534 if (unlikely(data & (AR5K_ISR_SSERR | AR5K_ISR_MCABT 535 | AR5K_ISR_HIUERR | AR5K_ISR_DPERR))) 536 *interrupt_mask |= AR5K_INT_FATAL; 537 538 } 539 540 /* 541 * In case we didn't handle anything, 542 * print the register value. 543 */ 544 if (unlikely(*interrupt_mask == 0 && net_ratelimit())) 545 ATH5K_PRINTF("ISR: 0x%08x IMR: 0x%08x\n", data, ah->ah_imr); 546 547 return 0; 548} 549 550/** 551 * ath5k_hw_set_imr - Set interrupt mask 552 * 553 * @ah: The &struct ath5k_hw 554 * @new_mask: The new interrupt mask to be set 555 * 556 * Set the interrupt mask in hw to save interrupts. We do that by mapping 557 * ath5k_int bits to hw-specific bits to remove abstraction and writing 558 * Interrupt Mask Register. 559 */ 560enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask) 561{ 562 enum ath5k_int old_mask, int_mask; 563 564 old_mask = ah->ah_imr; 565 566 /* 567 * Disable card interrupts to prevent any race conditions 568 * (they will be re-enabled afterwards if AR5K_INT GLOBAL 569 * is set again on the new mask). 570 */ 571 if (old_mask & AR5K_INT_GLOBAL) { 572 ath5k_hw_reg_write(ah, AR5K_IER_DISABLE, AR5K_IER); 573 ath5k_hw_reg_read(ah, AR5K_IER); 574 } 575 576 /* 577 * Add additional, chipset-dependent interrupt mask flags 578 * and write them to the IMR (interrupt mask register). 579 */ 580 int_mask = new_mask & AR5K_INT_COMMON; 581 582 if (ah->ah_version != AR5K_AR5210) { 583 /* Preserve per queue TXURN interrupt mask */ 584 u32 simr2 = ath5k_hw_reg_read(ah, AR5K_SIMR2) 585 & AR5K_SIMR2_QCU_TXURN; 586 587 if (new_mask & AR5K_INT_FATAL) { 588 int_mask |= AR5K_IMR_HIUERR; 589 simr2 |= (AR5K_SIMR2_MCABT | AR5K_SIMR2_SSERR 590 | AR5K_SIMR2_DPERR); 591 } 592 593 /*Beacon Not Ready*/ 594 if (new_mask & AR5K_INT_BNR) 595 int_mask |= AR5K_INT_BNR; 596 597 if (new_mask & AR5K_INT_TIM) 598 int_mask |= AR5K_IMR_TIM; 599 600 if (new_mask & AR5K_INT_TIM) 601 simr2 |= AR5K_SISR2_TIM; 602 if (new_mask & AR5K_INT_DTIM) 603 simr2 |= AR5K_SISR2_DTIM; 604 if (new_mask & AR5K_INT_DTIM_SYNC) 605 simr2 |= AR5K_SISR2_DTIM_SYNC; 606 if (new_mask & AR5K_INT_BCN_TIMEOUT) 607 simr2 |= AR5K_SISR2_BCN_TIMEOUT; 608 if (new_mask & AR5K_INT_CAB_TIMEOUT) 609 simr2 |= AR5K_SISR2_CAB_TIMEOUT; 610 611 if (new_mask & AR5K_INT_RX_DOPPLER) 612 int_mask |= AR5K_IMR_RXDOPPLER; 613 614 /* Note: Per queue interrupt masks 615 * are set via reset_tx_queue (qcu.c) */ 616 ath5k_hw_reg_write(ah, int_mask, AR5K_PIMR); 617 ath5k_hw_reg_write(ah, simr2, AR5K_SIMR2); 618 619 } else { 620 if (new_mask & AR5K_INT_FATAL) 621 int_mask |= (AR5K_IMR_SSERR | AR5K_IMR_MCABT 622 | AR5K_IMR_HIUERR | AR5K_IMR_DPERR); 623 624 ath5k_hw_reg_write(ah, int_mask, AR5K_IMR); 625 } 626 627 /* If RXNOFRM interrupt is masked disable it 628 * by setting AR5K_RXNOFRM to zero */ 629 if (!(new_mask & AR5K_INT_RXNOFRM)) 630 ath5k_hw_reg_write(ah, 0, AR5K_RXNOFRM); 631 632 /* Store new interrupt mask */ 633 ah->ah_imr = new_mask; 634 635 /* ..re-enable interrupts if AR5K_INT_GLOBAL is set */ 636 if (new_mask & AR5K_INT_GLOBAL) { 637 ath5k_hw_reg_write(ah, AR5K_IER_ENABLE, AR5K_IER); 638 ath5k_hw_reg_read(ah, AR5K_IER); 639 } 640 641 return old_mask; 642} 643