ar9300_interrupts.c revision 250007
1/*
2 * Copyright (c) 2013 Qualcomm Atheros, Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
9 * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
10 * AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
11 * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
12 * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
13 * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
14 * PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "opt_ah.h"
18
19#ifdef AH_SUPPORT_AR9300
20
21#include "ah.h"
22#include "ah_internal.h"
23
24#include "ar9300/ar9300.h"
25#include "ar9300/ar9300reg.h"
26#include "ar9300/ar9300phy.h"
27
28/*
29 * Checks to see if an interrupt is pending on our NIC
30 *
31 * Returns: TRUE    if an interrupt is pending
32 *          FALSE   if not
33 */
34HAL_BOOL
35ar9300_is_interrupt_pending(struct ath_hal *ah)
36{
37    u_int32_t sync_en_def = AR9300_INTR_SYNC_DEFAULT;
38    u_int32_t host_isr;
39
40    /*
41     * Some platforms trigger our ISR before applying power to
42     * the card, so make sure.
43     */
44    host_isr = OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_ASYNC_CAUSE));
45    if ((host_isr & AR_INTR_ASYNC_USED) && (host_isr != AR_INTR_SPURIOUS)) {
46        return AH_TRUE;
47    }
48
49    host_isr = OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_SYNC_CAUSE));
50    if (AR_SREV_POSEIDON(ah)) {
51        sync_en_def = AR9300_INTR_SYNC_DEF_NO_HOST1_PERR;
52    }
53    else if (AR_SREV_WASP(ah)) {
54        sync_en_def = AR9340_INTR_SYNC_DEFAULT;
55    }
56
57    if ((host_isr & (sync_en_def | AR_INTR_SYNC_MASK_GPIO)) &&
58        (host_isr != AR_INTR_SPURIOUS)) {
59        return AH_TRUE;
60    }
61
62    return AH_FALSE;
63}
64
65/*
66 * Reads the Interrupt Status Register value from the NIC, thus deasserting
67 * the interrupt line, and returns both the masked and unmasked mapped ISR
68 * values.  The value returned is mapped to abstract the hw-specific bit
69 * locations in the Interrupt Status Register.
70 *
71 * Returns: A hardware-abstracted bitmap of all non-masked-out
72 *          interrupts pending, as well as an unmasked value
73 */
74#define MAP_ISR_S2_HAL_CST          6 /* Carrier sense timeout */
75#define MAP_ISR_S2_HAL_GTT          6 /* Global transmit timeout */
76#define MAP_ISR_S2_HAL_TIM          3 /* TIM */
77#define MAP_ISR_S2_HAL_CABEND       0 /* CABEND */
78#define MAP_ISR_S2_HAL_DTIMSYNC     7 /* DTIMSYNC */
79#define MAP_ISR_S2_HAL_DTIM         7 /* DTIM */
80#define MAP_ISR_S2_HAL_TSFOOR       4 /* Rx TSF out of range */
81#define MAP_ISR_S2_HAL_BBPANIC      6 /* Panic watchdog IRQ from BB */
82HAL_BOOL
83ar9300_get_pending_interrupts(
84    struct ath_hal *ah,
85    HAL_INT *masked,
86    HAL_INT_TYPE type,
87    u_int8_t msi,
88    HAL_BOOL nortc)
89{
90    struct ath_hal_9300 *ahp = AH9300(ah);
91    HAL_BOOL  ret_val = AH_TRUE;
92    u_int32_t isr = 0;
93    u_int32_t mask2 = 0;
94    u_int32_t sync_cause = 0;
95    u_int32_t async_cause;
96    u_int32_t msi_pend_addr_mask = 0;
97    u_int32_t sync_en_def = AR9300_INTR_SYNC_DEFAULT;
98    HAL_CAPABILITIES *p_cap = &AH_PRIVATE(ah)->ah_caps;
99
100    *masked = 0;
101
102    if (!nortc) {
103        if (HAL_INT_MSI == type) {
104            if (msi == HAL_MSIVEC_RXHP) {
105                OS_REG_WRITE(ah, AR_ISR, AR_ISR_HP_RXOK);
106                *masked = HAL_INT_RXHP;
107                goto end;
108            } else if (msi == HAL_MSIVEC_RXLP) {
109                OS_REG_WRITE(ah, AR_ISR,
110                    (AR_ISR_LP_RXOK | AR_ISR_RXMINTR | AR_ISR_RXINTM));
111                *masked = HAL_INT_RXLP;
112                goto end;
113            } else if (msi == HAL_MSIVEC_TX) {
114                OS_REG_WRITE(ah, AR_ISR, AR_ISR_TXOK);
115                *masked = HAL_INT_TX;
116                goto end;
117            } else if (msi == HAL_MSIVEC_MISC) {
118                /*
119                 * For the misc MSI event fall through and determine the cause.
120                 */
121            }
122        }
123    }
124
125    /* Make sure mac interrupt is pending in async interrupt cause register */
126    async_cause = OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_ASYNC_CAUSE));
127    if (async_cause & AR_INTR_ASYNC_USED) {
128        /*
129         * RTC may not be on since it runs on a slow 32khz clock
130         * so check its status to be sure
131         */
132        if (!nortc &&
133            (OS_REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M) ==
134             AR_RTC_STATUS_ON)
135        {
136            isr = OS_REG_READ(ah, AR_ISR);
137        }
138    }
139
140    if (AR_SREV_POSEIDON(ah)) {
141        sync_en_def = AR9300_INTR_SYNC_DEF_NO_HOST1_PERR;
142    }
143    else if (AR_SREV_WASP(ah)) {
144        sync_en_def = AR9340_INTR_SYNC_DEFAULT;
145    }
146
147    sync_cause =
148        OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_SYNC_CAUSE)) &
149        (sync_en_def | AR_INTR_SYNC_MASK_GPIO);
150
151    if (!isr && !sync_cause && !async_cause) {
152        ret_val = AH_FALSE;
153        goto end;
154    }
155
156    if (isr) {
157        if (isr & AR_ISR_BCNMISC) {
158            u_int32_t isr2;
159            isr2 = OS_REG_READ(ah, AR_ISR_S2);
160
161            /* Translate ISR bits to HAL values */
162            mask2 |= ((isr2 & AR_ISR_S2_TIM) >> MAP_ISR_S2_HAL_TIM);
163            mask2 |= ((isr2 & AR_ISR_S2_DTIM) >> MAP_ISR_S2_HAL_DTIM);
164            mask2 |= ((isr2 & AR_ISR_S2_DTIMSYNC) >> MAP_ISR_S2_HAL_DTIMSYNC);
165            mask2 |= ((isr2 & AR_ISR_S2_CABEND) >> MAP_ISR_S2_HAL_CABEND);
166            mask2 |= ((isr2 & AR_ISR_S2_GTT) << MAP_ISR_S2_HAL_GTT);
167            mask2 |= ((isr2 & AR_ISR_S2_CST) << MAP_ISR_S2_HAL_CST);
168            mask2 |= ((isr2 & AR_ISR_S2_TSFOOR) >> MAP_ISR_S2_HAL_TSFOOR);
169            mask2 |= ((isr2 & AR_ISR_S2_BBPANIC) >> MAP_ISR_S2_HAL_BBPANIC);
170
171            if (!p_cap->hal_isr_rac_support) {
172                /*
173                 * EV61133 (missing interrupts due to ISR_RAC):
174                 * If not using ISR_RAC, clear interrupts by writing to ISR_S2.
175                 * This avoids a race condition where a new BCNMISC interrupt
176                 * could come in between reading the ISR and clearing the
177                 * interrupt via the primary ISR.  We therefore clear the
178                 * interrupt via the secondary, which avoids this race.
179                 */
180                OS_REG_WRITE(ah, AR_ISR_S2, isr2);
181                isr &= ~AR_ISR_BCNMISC;
182            }
183        }
184
185        /* Use AR_ISR_RAC only if chip supports it.
186         * See EV61133 (missing interrupts due to ISR_RAC)
187         */
188        if (p_cap->hal_isr_rac_support) {
189            isr = OS_REG_READ(ah, AR_ISR_RAC);
190        }
191        if (isr == 0xffffffff) {
192            *masked = 0;
193            ret_val = AH_FALSE;
194            goto end;
195        }
196
197        *masked = isr & HAL_INT_COMMON;
198
199        /*
200         * When interrupt mitigation is switched on, we fake a normal RX or TX
201         * interrupt when we received a mitigated interrupt. This way, the upper
202         * layer do not need to know about feature.
203         */
204        if (ahp->ah_intr_mitigation_rx) {
205            /* Only Rx interrupt mitigation. No Tx intr. mitigation. */
206            if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM)) {
207                *masked |= HAL_INT_RXLP;
208            }
209        }
210        if (ahp->ah_intr_mitigation_tx) {
211            if (isr & (AR_ISR_TXMINTR | AR_ISR_TXINTM)) {
212                *masked |= HAL_INT_TX;
213            }
214        }
215
216        if (isr & (AR_ISR_LP_RXOK | AR_ISR_RXERR)) {
217            *masked |= HAL_INT_RXLP;
218        }
219        if (isr & AR_ISR_HP_RXOK) {
220            *masked |= HAL_INT_RXHP;
221        }
222        if (isr & (AR_ISR_TXOK | AR_ISR_TXERR | AR_ISR_TXEOL)) {
223            *masked |= HAL_INT_TX;
224
225            if (!p_cap->hal_isr_rac_support) {
226                u_int32_t s0, s1;
227                /*
228                 * EV61133 (missing interrupts due to ISR_RAC):
229                 * If not using ISR_RAC, clear interrupts by writing to
230                 * ISR_S0/S1.
231                 * This avoids a race condition where a new interrupt
232                 * could come in between reading the ISR and clearing the
233                 * interrupt via the primary ISR.  We therefore clear the
234                 * interrupt via the secondary, which avoids this race.
235                 */
236                s0 = OS_REG_READ(ah, AR_ISR_S0);
237                OS_REG_WRITE(ah, AR_ISR_S0, s0);
238                s1 = OS_REG_READ(ah, AR_ISR_S1);
239                OS_REG_WRITE(ah, AR_ISR_S1, s1);
240
241                isr &= ~(AR_ISR_TXOK | AR_ISR_TXERR | AR_ISR_TXEOL);
242            }
243        }
244
245        /*
246         * Do not treat receive overflows as fatal for owl.
247         */
248        if (isr & AR_ISR_RXORN) {
249#if __PKT_SERIOUS_ERRORS__
250            HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
251                "%s: receive FIFO overrun interrupt\n", __func__);
252#endif
253        }
254
255#if 0
256        /* XXX Verify if this is fixed for Osprey */
257        if (!p_cap->hal_auto_sleep_support) {
258            u_int32_t isr5 = OS_REG_READ(ah, AR_ISR_S5_S);
259            if (isr5 & AR_ISR_S5_TIM_TIMER) {
260                *masked |= HAL_INT_TIM_TIMER;
261            }
262        }
263#endif
264        if (isr & AR_ISR_GENTMR) {
265            u_int32_t s5;
266
267            if (p_cap->hal_isr_rac_support) {
268                /* Use secondary shadow registers if using ISR_RAC */
269                s5 = OS_REG_READ(ah, AR_ISR_S5_S);
270            } else {
271                s5 = OS_REG_READ(ah, AR_ISR_S5);
272            }
273            if (isr & AR_ISR_GENTMR) {
274
275                HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
276                    "%s: GENTIMER, ISR_RAC=0x%x ISR_S2_S=0x%x\n", __func__,
277                    isr, s5);
278                ahp->ah_intr_gen_timer_trigger =
279                    MS(s5, AR_ISR_S5_GENTIMER_TRIG);
280                ahp->ah_intr_gen_timer_thresh =
281                    MS(s5, AR_ISR_S5_GENTIMER_THRESH);
282                if (ahp->ah_intr_gen_timer_trigger) {
283                    *masked |= HAL_INT_GENTIMER;
284                }
285            }
286            if (!p_cap->hal_isr_rac_support) {
287                /*
288                 * EV61133 (missing interrupts due to ISR_RAC):
289                 * If not using ISR_RAC, clear interrupts by writing to ISR_S5.
290                 * This avoids a race condition where a new interrupt
291                 * could come in between reading the ISR and clearing the
292                 * interrupt via the primary ISR.  We therefore clear the
293                 * interrupt via the secondary, which avoids this race.
294                 */
295                OS_REG_WRITE(ah, AR_ISR_S5, s5);
296                isr &= ~AR_ISR_GENTMR;
297            }
298        }
299
300        *masked |= mask2;
301
302        if (!p_cap->hal_isr_rac_support) {
303            /*
304             * EV61133 (missing interrupts due to ISR_RAC):
305             * If not using ISR_RAC, clear the interrupts we've read by
306             * writing back ones in these locations to the primary ISR
307             * (except for interrupts that have a secondary isr register -
308             * see above).
309             */
310            OS_REG_WRITE(ah, AR_ISR, isr);
311
312            /* Flush prior write */
313            (void) OS_REG_READ(ah, AR_ISR);
314        }
315
316#ifdef AH_SUPPORT_AR9300
317        if (*masked & HAL_INT_BBPANIC) {
318            ar9300_handle_bb_panic(ah);
319        }
320#endif
321    }
322
323    if (async_cause) {
324        if (nortc) {
325            OS_REG_WRITE(ah,
326                AR_HOSTIF_REG(ah, AR_INTR_ASYNC_CAUSE_CLR), async_cause);
327            /* Flush prior write */
328            (void) OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_ASYNC_CAUSE_CLR));
329        } else {
330#ifdef ATH_GPIO_USE_ASYNC_CAUSE
331            if (async_cause & AR_INTR_ASYNC_CAUSE_GPIO) {
332                ahp->ah_gpio_cause = (async_cause & AR_INTR_ASYNC_CAUSE_GPIO) >>
333                                     AR_INTR_ASYNC_ENABLE_GPIO_S;
334                *masked |= HAL_INT_GPIO;
335            }
336#endif
337        }
338
339#if ATH_SUPPORT_MCI
340        if ((async_cause & AR_INTR_ASYNC_CAUSE_MCI) &&
341            p_cap->hal_mci_support)
342        {
343            u_int32_t int_raw, int_rx_msg;
344
345            int_rx_msg = OS_REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW);
346            int_raw = OS_REG_READ(ah, AR_MCI_INTERRUPT_RAW);
347
348            if ((int_raw == 0xdeadbeef) || (int_rx_msg == 0xdeadbeef))
349            {
350                HALDEBUG(ah, HAL_DEBUG_BT_COEX,
351                    "(MCI) Get 0xdeadbeef during MCI int processing"
352                    "new int_raw=0x%08x, new rx_msg_raw=0x%08x, "
353                    "int_raw=0x%08x, rx_msg_raw=0x%08x\n",
354                    int_raw, int_rx_msg, ahp->ah_mci_int_raw,
355                    ahp->ah_mci_int_rx_msg);
356            }
357            else {
358                if (ahp->ah_mci_int_raw || ahp->ah_mci_int_rx_msg) {
359                    ahp->ah_mci_int_rx_msg |= int_rx_msg;
360                    ahp->ah_mci_int_raw |= int_raw;
361                }
362                else {
363                    ahp->ah_mci_int_rx_msg = int_rx_msg;
364                    ahp->ah_mci_int_raw = int_raw;
365                }
366
367                *masked |= HAL_INT_MCI;
368                ahp->ah_mci_rx_status = OS_REG_READ(ah, AR_MCI_RX_STATUS);
369                if (int_rx_msg & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO) {
370                    ahp->ah_mci_cont_status =
371                                    OS_REG_READ(ah, AR_MCI_CONT_STATUS);
372                }
373                OS_REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
374                    int_rx_msg);
375                OS_REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, int_raw);
376
377                HALDEBUG(ah, HAL_DEBUG_INTERRUPT, "%s:AR_INTR_SYNC_MCI\n", __func__);
378            }
379        }
380#endif
381    }
382
383    if (sync_cause) {
384        int host1_fatal, host1_perr, radm_cpl_timeout, local_timeout;
385
386        host1_fatal = AR_SREV_WASP(ah) ?
387            AR9340_INTR_SYNC_HOST1_FATAL : AR9300_INTR_SYNC_HOST1_FATAL;
388        host1_perr = AR_SREV_WASP(ah) ?
389            AR9340_INTR_SYNC_HOST1_PERR : AR9300_INTR_SYNC_HOST1_PERR;
390        radm_cpl_timeout = AR_SREV_WASP(ah) ?
391            0x0 : AR9300_INTR_SYNC_RADM_CPL_TIMEOUT;
392        local_timeout = AR_SREV_WASP(ah) ?
393            AR9340_INTR_SYNC_LOCAL_TIMEOUT : AR9300_INTR_SYNC_LOCAL_TIMEOUT;
394
395        if (sync_cause & host1_fatal) {
396#if __PKT_SERIOUS_ERRORS__
397            HALDEBUG(ah, HAL_DEBUG_UNMASKABLE,
398                "%s: received PCI FATAL interrupt\n", __func__);
399#endif
400           *masked |= HAL_INT_FATAL; /* Set FATAL INT flag here;*/
401        }
402        if (sync_cause & host1_perr) {
403#if __PKT_SERIOUS_ERRORS__
404            HALDEBUG(ah, HAL_DEBUG_UNMASKABLE,
405                "%s: received PCI PERR interrupt\n", __func__);
406#endif
407        }
408
409        if (sync_cause & radm_cpl_timeout) {
410            HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
411                "%s: AR_INTR_SYNC_RADM_CPL_TIMEOUT\n",
412                __func__);
413
414            OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_RC), AR_RC_HOSTIF);
415            OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_RC), 0);
416            *masked |= HAL_INT_FATAL;
417        }
418        if (sync_cause & local_timeout) {
419            HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
420                "%s: AR_INTR_SYNC_LOCAL_TIMEOUT\n",
421                __func__);
422        }
423
424#ifndef ATH_GPIO_USE_ASYNC_CAUSE
425        if (sync_cause & AR_INTR_SYNC_MASK_GPIO) {
426            ahp->ah_gpio_cause = (sync_cause & AR_INTR_SYNC_MASK_GPIO) >>
427                                 AR_INTR_SYNC_ENABLE_GPIO_S;
428            *masked |= HAL_INT_GPIO;
429            HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
430                "%s: AR_INTR_SYNC_GPIO\n", __func__);
431        }
432#endif
433
434        OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_SYNC_CAUSE_CLR), sync_cause);
435        /* Flush prior write */
436        (void) OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_SYNC_CAUSE_CLR));
437    }
438
439end:
440    if (HAL_INT_MSI == type) {
441        /*
442         * WAR for Bug EV#75887
443         * In normal case, SW read HOST_INTF_PCIE_MSI (0x40A4) and write
444         * into ah_msi_reg.  Then use value of ah_msi_reg to set bit#25
445         * when want to enable HW write the cfg_msi_pending.
446         * Sometimes, driver get MSI interrupt before read 0x40a4 and
447         * ah_msi_reg is initialization value (0x0).
448         * We don't know why "MSI interrupt earlier than driver read" now...
449         */
450        if (!ahp->ah_msi_reg) {
451            ahp->ah_msi_reg = OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_PCIE_MSI));
452        }
453        if (AR_SREV_POSEIDON(ah)) {
454            msi_pend_addr_mask = AR_PCIE_MSI_HW_INT_PENDING_ADDR_MSI_64;
455        } else {
456            msi_pend_addr_mask = AR_PCIE_MSI_HW_INT_PENDING_ADDR;
457        }
458        OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_PCIE_MSI),
459            ((ahp->ah_msi_reg | AR_PCIE_MSI_ENABLE) & msi_pend_addr_mask));
460
461    }
462
463    return ret_val;
464}
465
466HAL_INT
467ar9300_get_interrupts(struct ath_hal *ah)
468{
469    return AH9300(ah)->ah_mask_reg;
470}
471
472/*
473 * Atomically enables NIC interrupts.  Interrupts are passed in
474 * via the enumerated bitmask in ints.
475 */
476HAL_INT
477ar9300_set_interrupts(struct ath_hal *ah, HAL_INT ints, HAL_BOOL nortc)
478{
479    struct ath_hal_9300 *ahp = AH9300(ah);
480    u_int32_t omask = ahp->ah_mask_reg;
481    u_int32_t mask, mask2, msi_mask = 0;
482    u_int32_t msi_pend_addr_mask = 0;
483    u_int32_t sync_en_def = AR9300_INTR_SYNC_DEFAULT;
484    HAL_CAPABILITIES *p_cap = &AH_PRIVATE(ah)->ah_caps;
485
486    HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
487        "%s: 0x%x => 0x%x\n", __func__, omask, ints);
488
489    if (omask & HAL_INT_GLOBAL) {
490        HALDEBUG(ah, HAL_DEBUG_INTERRUPT, "%s: disable IER\n", __func__);
491
492        if (AH_PRIVATE(ah)->ah_config.ath_hal_enable_msi) {
493            OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_PRIO_ASYNC_ENABLE), 0);
494            /* flush write to HW */
495            (void)OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_PRIO_ASYNC_ENABLE));
496        }
497
498        if (!nortc) {
499            OS_REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
500            (void) OS_REG_READ(ah, AR_IER);   /* flush write to HW */
501        }
502
503        OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_SYNC_ENABLE), 0);
504        /* flush write to HW */
505        (void) OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_SYNC_ENABLE));
506        OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_ASYNC_ENABLE), 0);
507        /* flush write to HW */
508        (void) OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_ASYNC_ENABLE));
509    }
510
511    if (!nortc) {
512        /* reference count for global IER */
513        if (ints & HAL_INT_GLOBAL) {
514#ifdef AH_DEBUG
515            HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
516                "%s: Request HAL_INT_GLOBAL ENABLED\n", __func__);
517            if (OS_ATOMIC_READ(&ahp->ah_ier_ref_count) == 0) {
518                HALDEBUG(ah, HAL_DEBUG_UNMASKABLE,
519                    "%s: WARNING: ah_ier_ref_count is 0 "
520                    "and attempting to enable IER\n",
521                    __func__);
522            }
523#endif
524            if (OS_ATOMIC_READ(&ahp->ah_ier_ref_count) > 0) {
525                OS_ATOMIC_DEC(&ahp->ah_ier_ref_count);
526            }
527        } else {
528            HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
529                "%s: Request HAL_INT_GLOBAL DISABLED\n", __func__);
530            OS_ATOMIC_INC(&ahp->ah_ier_ref_count);
531        }
532        HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
533            "%s: ah_ier_ref_count = %d\n", __func__, ahp->ah_ier_ref_count);
534
535        mask = ints & HAL_INT_COMMON;
536        mask2 = 0;
537        msi_mask = 0;
538
539        if (ints & HAL_INT_TX) {
540            if (ahp->ah_intr_mitigation_tx) {
541                mask |= AR_IMR_TXMINTR | AR_IMR_TXINTM;
542            } else if (ahp->ah_tx_ok_interrupt_mask) {
543                mask |= AR_IMR_TXOK;
544            }
545            msi_mask |= AR_INTR_PRIO_TX;
546            if (ahp->ah_tx_err_interrupt_mask) {
547                mask |= AR_IMR_TXERR;
548            }
549            if (ahp->ah_tx_eol_interrupt_mask) {
550                mask |= AR_IMR_TXEOL;
551            }
552        }
553        if (ints & HAL_INT_RX) {
554            mask |= AR_IMR_RXERR | AR_IMR_RXOK_HP;
555            if (ahp->ah_intr_mitigation_rx) {
556                mask &= ~(AR_IMR_RXOK_LP);
557                mask |=  AR_IMR_RXMINTR | AR_IMR_RXINTM;
558            } else {
559                mask |= AR_IMR_RXOK_LP;
560            }
561            msi_mask |= AR_INTR_PRIO_RXLP | AR_INTR_PRIO_RXHP;
562            if (! p_cap->hal_auto_sleep_support) {
563                mask |= AR_IMR_GENTMR;
564            }
565        }
566
567        if (ints & (HAL_INT_BMISC)) {
568            mask |= AR_IMR_BCNMISC;
569            if (ints & HAL_INT_TIM) {
570                mask2 |= AR_IMR_S2_TIM;
571            }
572            if (ints & HAL_INT_DTIM) {
573                mask2 |= AR_IMR_S2_DTIM;
574            }
575            if (ints & HAL_INT_DTIMSYNC) {
576                mask2 |= AR_IMR_S2_DTIMSYNC;
577            }
578            if (ints & HAL_INT_CABEND) {
579                mask2 |= (AR_IMR_S2_CABEND);
580            }
581            if (ints & HAL_INT_TSFOOR) {
582                mask2 |= AR_IMR_S2_TSFOOR;
583            }
584        }
585
586        if (ints & (HAL_INT_GTT | HAL_INT_CST)) {
587            mask |= AR_IMR_BCNMISC;
588            if (ints & HAL_INT_GTT) {
589                mask2 |= AR_IMR_S2_GTT;
590            }
591            if (ints & HAL_INT_CST) {
592                mask2 |= AR_IMR_S2_CST;
593            }
594        }
595
596        if (ints & HAL_INT_BBPANIC) {
597            /* EV92527 - MAC secondary interrupt must enable AR_IMR_BCNMISC */
598            mask |= AR_IMR_BCNMISC;
599            mask2 |= AR_IMR_S2_BBPANIC;
600        }
601
602        if (ints & HAL_INT_GENTIMER) {
603            HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
604                "%s: enabling gen timer\n", __func__);
605            mask |= AR_IMR_GENTMR;
606        }
607
608        /* Write the new IMR and store off our SW copy. */
609        HALDEBUG(ah, HAL_DEBUG_INTERRUPT, "%s: new IMR 0x%x\n", __func__, mask);
610        OS_REG_WRITE(ah, AR_IMR, mask);
611        ahp->ah_mask2Reg &= ~(AR_IMR_S2_TIM |
612                        AR_IMR_S2_DTIM |
613                        AR_IMR_S2_DTIMSYNC |
614                        AR_IMR_S2_CABEND |
615                        AR_IMR_S2_CABTO  |
616                        AR_IMR_S2_TSFOOR |
617                        AR_IMR_S2_GTT |
618                        AR_IMR_S2_CST |
619                        AR_IMR_S2_BBPANIC);
620        ahp->ah_mask2Reg |= mask2;
621        OS_REG_WRITE(ah, AR_IMR_S2, ahp->ah_mask2Reg );
622        ahp->ah_mask_reg = ints;
623
624        if (! p_cap->hal_auto_sleep_support) {
625            if (ints & HAL_INT_TIM_TIMER) {
626                OS_REG_SET_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
627            }
628            else {
629                OS_REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
630            }
631        }
632    }
633
634    /* Re-enable interrupts if they were enabled before. */
635#if HAL_INTR_REFCOUNT_DISABLE
636    if ((ints & HAL_INT_GLOBAL)) {
637#else
638    if ((ints & HAL_INT_GLOBAL) && (OS_ATOMIC_READ(&ahp->ah_ier_ref_count) == 0)) {
639#endif
640        HALDEBUG(ah, HAL_DEBUG_INTERRUPT, "%s: enable IER\n", __func__);
641
642        if (!nortc) {
643            OS_REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
644        }
645
646        mask = AR_INTR_MAC_IRQ;
647#ifdef ATH_GPIO_USE_ASYNC_CAUSE
648        if (ints & HAL_INT_GPIO) {
649            if (ahp->ah_gpio_mask) {
650                mask |= SM(ahp->ah_gpio_mask, AR_INTR_ASYNC_MASK_GPIO);
651            }
652        }
653#endif
654
655#if ATH_SUPPORT_MCI
656        if (ints & HAL_INT_MCI) {
657            mask |= AR_INTR_ASYNC_MASK_MCI;
658        }
659#endif
660
661        OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_ASYNC_ENABLE), mask);
662        OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_ASYNC_MASK), mask);
663
664        if (AH_PRIVATE(ah)->ah_config.ath_hal_enable_msi) {
665            OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_PRIO_ASYNC_ENABLE),
666                msi_mask);
667            OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_PRIO_ASYNC_MASK),
668                msi_mask);
669            if (AR_SREV_POSEIDON(ah)) {
670                msi_pend_addr_mask = AR_PCIE_MSI_HW_INT_PENDING_ADDR_MSI_64;
671            } else {
672                msi_pend_addr_mask = AR_PCIE_MSI_HW_INT_PENDING_ADDR;
673            }
674            OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_PCIE_MSI),
675                ((ahp->ah_msi_reg | AR_PCIE_MSI_ENABLE) & msi_pend_addr_mask));
676        }
677
678        /*
679         * debug - enable to see all synchronous interrupts status
680         * Enable synchronous GPIO interrupts as well, since some async
681         * GPIO interrupts don't wake the chip up.
682         */
683        mask = 0;
684#ifndef ATH_GPIO_USE_ASYNC_CAUSE
685        if (ints & HAL_INT_GPIO) {
686            mask |= SM(ahp->ah_gpio_mask, AR_INTR_SYNC_MASK_GPIO);
687        }
688#endif
689        if (AR_SREV_POSEIDON(ah)) {
690            sync_en_def = AR9300_INTR_SYNC_DEF_NO_HOST1_PERR;
691        }
692        else if (AR_SREV_WASP(ah)) {
693            sync_en_def = AR9340_INTR_SYNC_DEFAULT;
694        }
695
696        OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_SYNC_ENABLE),
697            (sync_en_def | mask));
698        OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_SYNC_MASK),
699            (sync_en_def | mask));
700
701        HALDEBUG(ah,  HAL_DEBUG_INTERRUPT,
702            "AR_IMR 0x%x IER 0x%x\n",
703            OS_REG_READ(ah, AR_IMR), OS_REG_READ(ah, AR_IER));
704    }
705
706    return omask;
707}
708
709void
710ar9300_set_intr_mitigation_timer(
711    struct ath_hal* ah,
712    HAL_INT_MITIGATION reg,
713    u_int32_t value)
714{
715#ifdef AR5416_INT_MITIGATION
716    switch (reg) {
717    case HAL_INT_THRESHOLD:
718        OS_REG_WRITE(ah, AR_MIRT, 0);
719        break;
720    case HAL_INT_RX_LASTPKT:
721        OS_REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, value);
722        break;
723    case HAL_INT_RX_FIRSTPKT:
724        OS_REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, value);
725        break;
726    case HAL_INT_TX_LASTPKT:
727        OS_REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_LAST, value);
728        break;
729    case HAL_INT_TX_FIRSTPKT:
730        OS_REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_FIRST, value);
731        break;
732    default:
733        break;
734    }
735#endif
736}
737
738u_int32_t
739ar9300_get_intr_mitigation_timer(struct ath_hal* ah, HAL_INT_MITIGATION reg)
740{
741    u_int32_t val = 0;
742#ifdef AR5416_INT_MITIGATION
743    switch (reg) {
744    case HAL_INT_THRESHOLD:
745        val = OS_REG_READ(ah, AR_MIRT);
746        break;
747    case HAL_INT_RX_LASTPKT:
748        val = OS_REG_READ(ah, AR_RIMT) & 0xFFFF;
749        break;
750    case HAL_INT_RX_FIRSTPKT:
751        val = OS_REG_READ(ah, AR_RIMT) >> 16;
752        break;
753    case HAL_INT_TX_LASTPKT:
754        val = OS_REG_READ(ah, AR_TIMT) & 0xFFFF;
755        break;
756    case HAL_INT_TX_FIRSTPKT:
757        val = OS_REG_READ(ah, AR_TIMT) >> 16;
758        break;
759    default:
760        break;
761    }
762#endif
763    return val;
764}
765
766#endif /* AH_SUPPORT_AR9300 */
767
768