ar9300_interrupts.c revision 250008
1/*
2 * Copyright (c) 2013 Qualcomm Atheros, Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
9 * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
10 * AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
11 * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
12 * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
13 * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
14 * PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "opt_ah.h"
18
19#include "ah.h"
20#include "ah_internal.h"
21
22#include "ar9300/ar9300.h"
23#include "ar9300/ar9300reg.h"
24#include "ar9300/ar9300phy.h"
25
26/*
27 * Checks to see if an interrupt is pending on our NIC
28 *
29 * Returns: TRUE    if an interrupt is pending
30 *          FALSE   if not
31 */
32HAL_BOOL
33ar9300_is_interrupt_pending(struct ath_hal *ah)
34{
35    u_int32_t sync_en_def = AR9300_INTR_SYNC_DEFAULT;
36    u_int32_t host_isr;
37
38    /*
39     * Some platforms trigger our ISR before applying power to
40     * the card, so make sure.
41     */
42    host_isr = OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_ASYNC_CAUSE));
43    if ((host_isr & AR_INTR_ASYNC_USED) && (host_isr != AR_INTR_SPURIOUS)) {
44        return AH_TRUE;
45    }
46
47    host_isr = OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_SYNC_CAUSE));
48    if (AR_SREV_POSEIDON(ah)) {
49        sync_en_def = AR9300_INTR_SYNC_DEF_NO_HOST1_PERR;
50    }
51    else if (AR_SREV_WASP(ah)) {
52        sync_en_def = AR9340_INTR_SYNC_DEFAULT;
53    }
54
55    if ((host_isr & (sync_en_def | AR_INTR_SYNC_MASK_GPIO)) &&
56        (host_isr != AR_INTR_SPURIOUS)) {
57        return AH_TRUE;
58    }
59
60    return AH_FALSE;
61}
62
63/*
64 * Reads the Interrupt Status Register value from the NIC, thus deasserting
65 * the interrupt line, and returns both the masked and unmasked mapped ISR
66 * values.  The value returned is mapped to abstract the hw-specific bit
67 * locations in the Interrupt Status Register.
68 *
69 * Returns: A hardware-abstracted bitmap of all non-masked-out
70 *          interrupts pending, as well as an unmasked value
71 */
72#define MAP_ISR_S2_HAL_CST          6 /* Carrier sense timeout */
73#define MAP_ISR_S2_HAL_GTT          6 /* Global transmit timeout */
74#define MAP_ISR_S2_HAL_TIM          3 /* TIM */
75#define MAP_ISR_S2_HAL_CABEND       0 /* CABEND */
76#define MAP_ISR_S2_HAL_DTIMSYNC     7 /* DTIMSYNC */
77#define MAP_ISR_S2_HAL_DTIM         7 /* DTIM */
78#define MAP_ISR_S2_HAL_TSFOOR       4 /* Rx TSF out of range */
79#define MAP_ISR_S2_HAL_BBPANIC      6 /* Panic watchdog IRQ from BB */
80HAL_BOOL
81ar9300_get_pending_interrupts(
82    struct ath_hal *ah,
83    HAL_INT *masked,
84    HAL_INT_TYPE type,
85    u_int8_t msi,
86    HAL_BOOL nortc)
87{
88    struct ath_hal_9300 *ahp = AH9300(ah);
89    HAL_BOOL  ret_val = AH_TRUE;
90    u_int32_t isr = 0;
91    u_int32_t mask2 = 0;
92    u_int32_t sync_cause = 0;
93    u_int32_t async_cause;
94    u_int32_t msi_pend_addr_mask = 0;
95    u_int32_t sync_en_def = AR9300_INTR_SYNC_DEFAULT;
96    HAL_CAPABILITIES *p_cap = &AH_PRIVATE(ah)->ah_caps;
97
98    *masked = 0;
99
100    if (!nortc) {
101        if (HAL_INT_MSI == type) {
102            if (msi == HAL_MSIVEC_RXHP) {
103                OS_REG_WRITE(ah, AR_ISR, AR_ISR_HP_RXOK);
104                *masked = HAL_INT_RXHP;
105                goto end;
106            } else if (msi == HAL_MSIVEC_RXLP) {
107                OS_REG_WRITE(ah, AR_ISR,
108                    (AR_ISR_LP_RXOK | AR_ISR_RXMINTR | AR_ISR_RXINTM));
109                *masked = HAL_INT_RXLP;
110                goto end;
111            } else if (msi == HAL_MSIVEC_TX) {
112                OS_REG_WRITE(ah, AR_ISR, AR_ISR_TXOK);
113                *masked = HAL_INT_TX;
114                goto end;
115            } else if (msi == HAL_MSIVEC_MISC) {
116                /*
117                 * For the misc MSI event fall through and determine the cause.
118                 */
119            }
120        }
121    }
122
123    /* Make sure mac interrupt is pending in async interrupt cause register */
124    async_cause = OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_ASYNC_CAUSE));
125    if (async_cause & AR_INTR_ASYNC_USED) {
126        /*
127         * RTC may not be on since it runs on a slow 32khz clock
128         * so check its status to be sure
129         */
130        if (!nortc &&
131            (OS_REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M) ==
132             AR_RTC_STATUS_ON)
133        {
134            isr = OS_REG_READ(ah, AR_ISR);
135        }
136    }
137
138    if (AR_SREV_POSEIDON(ah)) {
139        sync_en_def = AR9300_INTR_SYNC_DEF_NO_HOST1_PERR;
140    }
141    else if (AR_SREV_WASP(ah)) {
142        sync_en_def = AR9340_INTR_SYNC_DEFAULT;
143    }
144
145    sync_cause =
146        OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_SYNC_CAUSE)) &
147        (sync_en_def | AR_INTR_SYNC_MASK_GPIO);
148
149    if (!isr && !sync_cause && !async_cause) {
150        ret_val = AH_FALSE;
151        goto end;
152    }
153
154    HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
155        "%s: isr=0x%x, sync_cause=0x%x, async_cause=0x%x\n",
156	__func__,
157	isr,
158	sync_cause,
159	async_cause);
160
161    if (isr) {
162        if (isr & AR_ISR_BCNMISC) {
163            u_int32_t isr2;
164            isr2 = OS_REG_READ(ah, AR_ISR_S2);
165
166            /* Translate ISR bits to HAL values */
167            mask2 |= ((isr2 & AR_ISR_S2_TIM) >> MAP_ISR_S2_HAL_TIM);
168            mask2 |= ((isr2 & AR_ISR_S2_DTIM) >> MAP_ISR_S2_HAL_DTIM);
169            mask2 |= ((isr2 & AR_ISR_S2_DTIMSYNC) >> MAP_ISR_S2_HAL_DTIMSYNC);
170            mask2 |= ((isr2 & AR_ISR_S2_CABEND) >> MAP_ISR_S2_HAL_CABEND);
171            mask2 |= ((isr2 & AR_ISR_S2_GTT) << MAP_ISR_S2_HAL_GTT);
172            mask2 |= ((isr2 & AR_ISR_S2_CST) << MAP_ISR_S2_HAL_CST);
173            mask2 |= ((isr2 & AR_ISR_S2_TSFOOR) >> MAP_ISR_S2_HAL_TSFOOR);
174            mask2 |= ((isr2 & AR_ISR_S2_BBPANIC) >> MAP_ISR_S2_HAL_BBPANIC);
175
176            if (!p_cap->halIsrRacSupport) {
177                /*
178                 * EV61133 (missing interrupts due to ISR_RAC):
179                 * If not using ISR_RAC, clear interrupts by writing to ISR_S2.
180                 * This avoids a race condition where a new BCNMISC interrupt
181                 * could come in between reading the ISR and clearing the
182                 * interrupt via the primary ISR.  We therefore clear the
183                 * interrupt via the secondary, which avoids this race.
184                 */
185                OS_REG_WRITE(ah, AR_ISR_S2, isr2);
186                isr &= ~AR_ISR_BCNMISC;
187            }
188        }
189
190        /* Use AR_ISR_RAC only if chip supports it.
191         * See EV61133 (missing interrupts due to ISR_RAC)
192         */
193        if (p_cap->halIsrRacSupport) {
194            isr = OS_REG_READ(ah, AR_ISR_RAC);
195        }
196        if (isr == 0xffffffff) {
197            *masked = 0;
198            ret_val = AH_FALSE;
199            goto end;
200        }
201
202        *masked = isr & HAL_INT_COMMON;
203
204        /*
205         * When interrupt mitigation is switched on, we fake a normal RX or TX
206         * interrupt when we received a mitigated interrupt. This way, the upper
207         * layer do not need to know about feature.
208         */
209        if (ahp->ah_intr_mitigation_rx) {
210            /* Only Rx interrupt mitigation. No Tx intr. mitigation. */
211            if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM)) {
212                *masked |= HAL_INT_RXLP;
213            }
214        }
215        if (ahp->ah_intr_mitigation_tx) {
216            if (isr & (AR_ISR_TXMINTR | AR_ISR_TXINTM)) {
217                *masked |= HAL_INT_TX;
218            }
219        }
220
221        if (isr & (AR_ISR_LP_RXOK | AR_ISR_RXERR)) {
222            *masked |= HAL_INT_RXLP;
223        }
224        if (isr & AR_ISR_HP_RXOK) {
225            *masked |= HAL_INT_RXHP;
226        }
227        if (isr & (AR_ISR_TXOK | AR_ISR_TXERR | AR_ISR_TXEOL)) {
228            *masked |= HAL_INT_TX;
229
230            if (!p_cap->halIsrRacSupport) {
231                u_int32_t s0, s1;
232                /*
233                 * EV61133 (missing interrupts due to ISR_RAC):
234                 * If not using ISR_RAC, clear interrupts by writing to
235                 * ISR_S0/S1.
236                 * This avoids a race condition where a new interrupt
237                 * could come in between reading the ISR and clearing the
238                 * interrupt via the primary ISR.  We therefore clear the
239                 * interrupt via the secondary, which avoids this race.
240                 */
241                s0 = OS_REG_READ(ah, AR_ISR_S0);
242                OS_REG_WRITE(ah, AR_ISR_S0, s0);
243                s1 = OS_REG_READ(ah, AR_ISR_S1);
244                OS_REG_WRITE(ah, AR_ISR_S1, s1);
245
246                isr &= ~(AR_ISR_TXOK | AR_ISR_TXERR | AR_ISR_TXEOL);
247            }
248        }
249
250        /*
251         * Do not treat receive overflows as fatal for owl.
252         */
253        if (isr & AR_ISR_RXORN) {
254#if __PKT_SERIOUS_ERRORS__
255            HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
256                "%s: receive FIFO overrun interrupt\n", __func__);
257#endif
258        }
259
260#if 0
261        /* XXX Verify if this is fixed for Osprey */
262        if (!p_cap->halAutoSleepSupport) {
263            u_int32_t isr5 = OS_REG_READ(ah, AR_ISR_S5_S);
264            if (isr5 & AR_ISR_S5_TIM_TIMER) {
265                *masked |= HAL_INT_TIM_TIMER;
266            }
267        }
268#endif
269        if (isr & AR_ISR_GENTMR) {
270            u_int32_t s5;
271
272            if (p_cap->halIsrRacSupport) {
273                /* Use secondary shadow registers if using ISR_RAC */
274                s5 = OS_REG_READ(ah, AR_ISR_S5_S);
275            } else {
276                s5 = OS_REG_READ(ah, AR_ISR_S5);
277            }
278            if (isr & AR_ISR_GENTMR) {
279
280                HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
281                    "%s: GENTIMER, ISR_RAC=0x%x ISR_S2_S=0x%x\n", __func__,
282                    isr, s5);
283                ahp->ah_intr_gen_timer_trigger =
284                    MS(s5, AR_ISR_S5_GENTIMER_TRIG);
285                ahp->ah_intr_gen_timer_thresh =
286                    MS(s5, AR_ISR_S5_GENTIMER_THRESH);
287                if (ahp->ah_intr_gen_timer_trigger) {
288                    *masked |= HAL_INT_GENTIMER;
289                }
290            }
291            if (!p_cap->halIsrRacSupport) {
292                /*
293                 * EV61133 (missing interrupts due to ISR_RAC):
294                 * If not using ISR_RAC, clear interrupts by writing to ISR_S5.
295                 * This avoids a race condition where a new interrupt
296                 * could come in between reading the ISR and clearing the
297                 * interrupt via the primary ISR.  We therefore clear the
298                 * interrupt via the secondary, which avoids this race.
299                 */
300                OS_REG_WRITE(ah, AR_ISR_S5, s5);
301                isr &= ~AR_ISR_GENTMR;
302            }
303        }
304
305        *masked |= mask2;
306
307        if (!p_cap->halIsrRacSupport) {
308            /*
309             * EV61133 (missing interrupts due to ISR_RAC):
310             * If not using ISR_RAC, clear the interrupts we've read by
311             * writing back ones in these locations to the primary ISR
312             * (except for interrupts that have a secondary isr register -
313             * see above).
314             */
315            OS_REG_WRITE(ah, AR_ISR, isr);
316
317            /* Flush prior write */
318            (void) OS_REG_READ(ah, AR_ISR);
319        }
320
321#ifdef AH_SUPPORT_AR9300
322        if (*masked & HAL_INT_BBPANIC) {
323            ar9300_handle_bb_panic(ah);
324        }
325#endif
326    }
327
328    if (async_cause) {
329        if (nortc) {
330            OS_REG_WRITE(ah,
331                AR_HOSTIF_REG(ah, AR_INTR_ASYNC_CAUSE_CLR), async_cause);
332            /* Flush prior write */
333            (void) OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_ASYNC_CAUSE_CLR));
334        } else {
335#ifdef ATH_GPIO_USE_ASYNC_CAUSE
336            if (async_cause & AR_INTR_ASYNC_CAUSE_GPIO) {
337                ahp->ah_gpio_cause = (async_cause & AR_INTR_ASYNC_CAUSE_GPIO) >>
338                                     AR_INTR_ASYNC_ENABLE_GPIO_S;
339                *masked |= HAL_INT_GPIO;
340            }
341#endif
342        }
343
344#if ATH_SUPPORT_MCI
345        if ((async_cause & AR_INTR_ASYNC_CAUSE_MCI) &&
346            p_cap->halMciSupport)
347        {
348            u_int32_t int_raw, int_rx_msg;
349
350            int_rx_msg = OS_REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW);
351            int_raw = OS_REG_READ(ah, AR_MCI_INTERRUPT_RAW);
352
353            if ((int_raw == 0xdeadbeef) || (int_rx_msg == 0xdeadbeef))
354            {
355                HALDEBUG(ah, HAL_DEBUG_BT_COEX,
356                    "(MCI) Get 0xdeadbeef during MCI int processing"
357                    "new int_raw=0x%08x, new rx_msg_raw=0x%08x, "
358                    "int_raw=0x%08x, rx_msg_raw=0x%08x\n",
359                    int_raw, int_rx_msg, ahp->ah_mci_int_raw,
360                    ahp->ah_mci_int_rx_msg);
361            }
362            else {
363                if (ahp->ah_mci_int_raw || ahp->ah_mci_int_rx_msg) {
364                    ahp->ah_mci_int_rx_msg |= int_rx_msg;
365                    ahp->ah_mci_int_raw |= int_raw;
366                }
367                else {
368                    ahp->ah_mci_int_rx_msg = int_rx_msg;
369                    ahp->ah_mci_int_raw = int_raw;
370                }
371
372                *masked |= HAL_INT_MCI;
373                ahp->ah_mci_rx_status = OS_REG_READ(ah, AR_MCI_RX_STATUS);
374                if (int_rx_msg & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO) {
375                    ahp->ah_mci_cont_status =
376                                    OS_REG_READ(ah, AR_MCI_CONT_STATUS);
377                }
378                OS_REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
379                    int_rx_msg);
380                OS_REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, int_raw);
381
382                HALDEBUG(ah, HAL_DEBUG_INTERRUPT, "%s:AR_INTR_SYNC_MCI\n", __func__);
383            }
384        }
385#endif
386    }
387
388    if (sync_cause) {
389        int host1_fatal, host1_perr, radm_cpl_timeout, local_timeout;
390
391        host1_fatal = AR_SREV_WASP(ah) ?
392            AR9340_INTR_SYNC_HOST1_FATAL : AR9300_INTR_SYNC_HOST1_FATAL;
393        host1_perr = AR_SREV_WASP(ah) ?
394            AR9340_INTR_SYNC_HOST1_PERR : AR9300_INTR_SYNC_HOST1_PERR;
395        radm_cpl_timeout = AR_SREV_WASP(ah) ?
396            0x0 : AR9300_INTR_SYNC_RADM_CPL_TIMEOUT;
397        local_timeout = AR_SREV_WASP(ah) ?
398            AR9340_INTR_SYNC_LOCAL_TIMEOUT : AR9300_INTR_SYNC_LOCAL_TIMEOUT;
399
400        if (sync_cause & host1_fatal) {
401#if __PKT_SERIOUS_ERRORS__
402            HALDEBUG(ah, HAL_DEBUG_UNMASKABLE,
403                "%s: received PCI FATAL interrupt\n", __func__);
404#endif
405           *masked |= HAL_INT_FATAL; /* Set FATAL INT flag here;*/
406        }
407        if (sync_cause & host1_perr) {
408#if __PKT_SERIOUS_ERRORS__
409            HALDEBUG(ah, HAL_DEBUG_UNMASKABLE,
410                "%s: received PCI PERR interrupt\n", __func__);
411#endif
412        }
413
414        if (sync_cause & radm_cpl_timeout) {
415            HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
416                "%s: AR_INTR_SYNC_RADM_CPL_TIMEOUT\n",
417                __func__);
418
419            OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_RC), AR_RC_HOSTIF);
420            OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_RC), 0);
421            *masked |= HAL_INT_FATAL;
422        }
423        if (sync_cause & local_timeout) {
424            HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
425                "%s: AR_INTR_SYNC_LOCAL_TIMEOUT\n",
426                __func__);
427        }
428
429#ifndef ATH_GPIO_USE_ASYNC_CAUSE
430        if (sync_cause & AR_INTR_SYNC_MASK_GPIO) {
431            ahp->ah_gpio_cause = (sync_cause & AR_INTR_SYNC_MASK_GPIO) >>
432                                 AR_INTR_SYNC_ENABLE_GPIO_S;
433            *masked |= HAL_INT_GPIO;
434            HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
435                "%s: AR_INTR_SYNC_GPIO\n", __func__);
436        }
437#endif
438
439        OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_SYNC_CAUSE_CLR), sync_cause);
440        /* Flush prior write */
441        (void) OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_SYNC_CAUSE_CLR));
442    }
443
444end:
445    if (HAL_INT_MSI == type) {
446        /*
447         * WAR for Bug EV#75887
448         * In normal case, SW read HOST_INTF_PCIE_MSI (0x40A4) and write
449         * into ah_msi_reg.  Then use value of ah_msi_reg to set bit#25
450         * when want to enable HW write the cfg_msi_pending.
451         * Sometimes, driver get MSI interrupt before read 0x40a4 and
452         * ah_msi_reg is initialization value (0x0).
453         * We don't know why "MSI interrupt earlier than driver read" now...
454         */
455        if (!ahp->ah_msi_reg) {
456            ahp->ah_msi_reg = OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_PCIE_MSI));
457        }
458        if (AR_SREV_POSEIDON(ah)) {
459            msi_pend_addr_mask = AR_PCIE_MSI_HW_INT_PENDING_ADDR_MSI_64;
460        } else {
461            msi_pend_addr_mask = AR_PCIE_MSI_HW_INT_PENDING_ADDR;
462        }
463        OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_PCIE_MSI),
464            ((ahp->ah_msi_reg | AR_PCIE_MSI_ENABLE) & msi_pend_addr_mask));
465
466    }
467
468    return ret_val;
469}
470
471HAL_INT
472ar9300_get_interrupts(struct ath_hal *ah)
473{
474    return AH9300(ah)->ah_mask_reg;
475}
476
477/*
478 * Atomically enables NIC interrupts.  Interrupts are passed in
479 * via the enumerated bitmask in ints.
480 */
481HAL_INT
482ar9300_set_interrupts(struct ath_hal *ah, HAL_INT ints, HAL_BOOL nortc)
483{
484    struct ath_hal_9300 *ahp = AH9300(ah);
485    u_int32_t omask = ahp->ah_mask_reg;
486    u_int32_t mask, mask2, msi_mask = 0;
487    u_int32_t msi_pend_addr_mask = 0;
488    u_int32_t sync_en_def = AR9300_INTR_SYNC_DEFAULT;
489    HAL_CAPABILITIES *p_cap = &AH_PRIVATE(ah)->ah_caps;
490
491    HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
492        "%s: 0x%x => 0x%x\n", __func__, omask, ints);
493
494    if (omask & HAL_INT_GLOBAL) {
495        HALDEBUG(ah, HAL_DEBUG_INTERRUPT, "%s: disable IER\n", __func__);
496
497        if (ah->ah_config.ath_hal_enable_msi) {
498            OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_PRIO_ASYNC_ENABLE), 0);
499            /* flush write to HW */
500            (void)OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_PRIO_ASYNC_ENABLE));
501        }
502
503        if (!nortc) {
504            OS_REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
505            (void) OS_REG_READ(ah, AR_IER);   /* flush write to HW */
506        }
507
508        OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_SYNC_ENABLE), 0);
509        /* flush write to HW */
510        (void) OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_SYNC_ENABLE));
511        OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_ASYNC_ENABLE), 0);
512        /* flush write to HW */
513        (void) OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_ASYNC_ENABLE));
514    }
515
516    if (!nortc) {
517        /* reference count for global IER */
518        if (ints & HAL_INT_GLOBAL) {
519#ifdef AH_DEBUG
520            HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
521                "%s: Request HAL_INT_GLOBAL ENABLED\n", __func__);
522#if 0
523            if (OS_ATOMIC_READ(&ahp->ah_ier_ref_count) == 0) {
524                HALDEBUG(ah, HAL_DEBUG_UNMASKABLE,
525                    "%s: WARNING: ah_ier_ref_count is 0 "
526                    "and attempting to enable IER\n",
527                    __func__);
528            }
529#endif
530#endif
531#if 0
532            if (OS_ATOMIC_READ(&ahp->ah_ier_ref_count) > 0) {
533                OS_ATOMIC_DEC(&ahp->ah_ier_ref_count);
534            }
535#endif
536        } else {
537            HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
538                "%s: Request HAL_INT_GLOBAL DISABLED\n", __func__);
539            OS_ATOMIC_INC(&ahp->ah_ier_ref_count);
540        }
541        HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
542            "%s: ah_ier_ref_count = %d\n", __func__, ahp->ah_ier_ref_count);
543
544        mask = ints & HAL_INT_COMMON;
545        mask2 = 0;
546        msi_mask = 0;
547
548        if (ints & HAL_INT_TX) {
549            if (ahp->ah_intr_mitigation_tx) {
550                mask |= AR_IMR_TXMINTR | AR_IMR_TXINTM;
551            } else if (ahp->ah_tx_ok_interrupt_mask) {
552                mask |= AR_IMR_TXOK;
553            }
554            msi_mask |= AR_INTR_PRIO_TX;
555            if (ahp->ah_tx_err_interrupt_mask) {
556                mask |= AR_IMR_TXERR;
557            }
558            if (ahp->ah_tx_eol_interrupt_mask) {
559                mask |= AR_IMR_TXEOL;
560            }
561        }
562        if (ints & HAL_INT_RX) {
563            mask |= AR_IMR_RXERR | AR_IMR_RXOK_HP;
564            if (ahp->ah_intr_mitigation_rx) {
565                mask &= ~(AR_IMR_RXOK_LP);
566                mask |=  AR_IMR_RXMINTR | AR_IMR_RXINTM;
567            } else {
568                mask |= AR_IMR_RXOK_LP;
569            }
570            msi_mask |= AR_INTR_PRIO_RXLP | AR_INTR_PRIO_RXHP;
571            if (! p_cap->halAutoSleepSupport) {
572                mask |= AR_IMR_GENTMR;
573            }
574        }
575
576        if (ints & (HAL_INT_BMISC)) {
577            mask |= AR_IMR_BCNMISC;
578            if (ints & HAL_INT_TIM) {
579                mask2 |= AR_IMR_S2_TIM;
580            }
581            if (ints & HAL_INT_DTIM) {
582                mask2 |= AR_IMR_S2_DTIM;
583            }
584            if (ints & HAL_INT_DTIMSYNC) {
585                mask2 |= AR_IMR_S2_DTIMSYNC;
586            }
587            if (ints & HAL_INT_CABEND) {
588                mask2 |= (AR_IMR_S2_CABEND);
589            }
590            if (ints & HAL_INT_TSFOOR) {
591                mask2 |= AR_IMR_S2_TSFOOR;
592            }
593        }
594
595        if (ints & (HAL_INT_GTT | HAL_INT_CST)) {
596            mask |= AR_IMR_BCNMISC;
597            if (ints & HAL_INT_GTT) {
598                mask2 |= AR_IMR_S2_GTT;
599            }
600            if (ints & HAL_INT_CST) {
601                mask2 |= AR_IMR_S2_CST;
602            }
603        }
604
605        if (ints & HAL_INT_BBPANIC) {
606            /* EV92527 - MAC secondary interrupt must enable AR_IMR_BCNMISC */
607            mask |= AR_IMR_BCNMISC;
608            mask2 |= AR_IMR_S2_BBPANIC;
609        }
610
611        if (ints & HAL_INT_GENTIMER) {
612            HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
613                "%s: enabling gen timer\n", __func__);
614            mask |= AR_IMR_GENTMR;
615        }
616
617        /* Write the new IMR and store off our SW copy. */
618        HALDEBUG(ah, HAL_DEBUG_INTERRUPT, "%s: new IMR 0x%x\n", __func__, mask);
619        OS_REG_WRITE(ah, AR_IMR, mask);
620        ahp->ah_mask2Reg &= ~(AR_IMR_S2_TIM |
621                        AR_IMR_S2_DTIM |
622                        AR_IMR_S2_DTIMSYNC |
623                        AR_IMR_S2_CABEND |
624                        AR_IMR_S2_CABTO  |
625                        AR_IMR_S2_TSFOOR |
626                        AR_IMR_S2_GTT |
627                        AR_IMR_S2_CST |
628                        AR_IMR_S2_BBPANIC);
629        ahp->ah_mask2Reg |= mask2;
630        OS_REG_WRITE(ah, AR_IMR_S2, ahp->ah_mask2Reg );
631        ahp->ah_mask_reg = ints;
632
633        if (! p_cap->halAutoSleepSupport) {
634            if (ints & HAL_INT_TIM_TIMER) {
635                OS_REG_SET_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
636            }
637            else {
638                OS_REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
639            }
640        }
641    }
642
643    /* Re-enable interrupts if they were enabled before. */
644#if HAL_INTR_REFCOUNT_DISABLE
645    if ((ints & HAL_INT_GLOBAL)) {
646#else
647    if ((ints & HAL_INT_GLOBAL) && (OS_ATOMIC_READ(&ahp->ah_ier_ref_count) == 0)) {
648#endif
649        HALDEBUG(ah, HAL_DEBUG_INTERRUPT, "%s: enable IER\n", __func__);
650
651        if (!nortc) {
652            OS_REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
653        }
654
655        mask = AR_INTR_MAC_IRQ;
656#ifdef ATH_GPIO_USE_ASYNC_CAUSE
657        if (ints & HAL_INT_GPIO) {
658            if (ahp->ah_gpio_mask) {
659                mask |= SM(ahp->ah_gpio_mask, AR_INTR_ASYNC_MASK_GPIO);
660            }
661        }
662#endif
663
664#if ATH_SUPPORT_MCI
665        if (ints & HAL_INT_MCI) {
666            mask |= AR_INTR_ASYNC_MASK_MCI;
667        }
668#endif
669
670        OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_ASYNC_ENABLE), mask);
671        OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_ASYNC_MASK), mask);
672
673        if (ah->ah_config.ath_hal_enable_msi) {
674            OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_PRIO_ASYNC_ENABLE),
675                msi_mask);
676            OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_PRIO_ASYNC_MASK),
677                msi_mask);
678            if (AR_SREV_POSEIDON(ah)) {
679                msi_pend_addr_mask = AR_PCIE_MSI_HW_INT_PENDING_ADDR_MSI_64;
680            } else {
681                msi_pend_addr_mask = AR_PCIE_MSI_HW_INT_PENDING_ADDR;
682            }
683            OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_PCIE_MSI),
684                ((ahp->ah_msi_reg | AR_PCIE_MSI_ENABLE) & msi_pend_addr_mask));
685        }
686
687        /*
688         * debug - enable to see all synchronous interrupts status
689         * Enable synchronous GPIO interrupts as well, since some async
690         * GPIO interrupts don't wake the chip up.
691         */
692        mask = 0;
693#ifndef ATH_GPIO_USE_ASYNC_CAUSE
694        if (ints & HAL_INT_GPIO) {
695            mask |= SM(ahp->ah_gpio_mask, AR_INTR_SYNC_MASK_GPIO);
696        }
697#endif
698        if (AR_SREV_POSEIDON(ah)) {
699            sync_en_def = AR9300_INTR_SYNC_DEF_NO_HOST1_PERR;
700        }
701        else if (AR_SREV_WASP(ah)) {
702            sync_en_def = AR9340_INTR_SYNC_DEFAULT;
703        }
704
705        OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_SYNC_ENABLE),
706            (sync_en_def | mask));
707        OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_SYNC_MASK),
708            (sync_en_def | mask));
709
710        HALDEBUG(ah,  HAL_DEBUG_INTERRUPT,
711            "AR_IMR 0x%x IER 0x%x\n",
712            OS_REG_READ(ah, AR_IMR), OS_REG_READ(ah, AR_IER));
713    }
714
715    return omask;
716}
717
718void
719ar9300_set_intr_mitigation_timer(
720    struct ath_hal* ah,
721    HAL_INT_MITIGATION reg,
722    u_int32_t value)
723{
724#ifdef AR5416_INT_MITIGATION
725    switch (reg) {
726    case HAL_INT_THRESHOLD:
727        OS_REG_WRITE(ah, AR_MIRT, 0);
728        break;
729    case HAL_INT_RX_LASTPKT:
730        OS_REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, value);
731        break;
732    case HAL_INT_RX_FIRSTPKT:
733        OS_REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, value);
734        break;
735    case HAL_INT_TX_LASTPKT:
736        OS_REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_LAST, value);
737        break;
738    case HAL_INT_TX_FIRSTPKT:
739        OS_REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_FIRST, value);
740        break;
741    default:
742        break;
743    }
744#endif
745}
746
747u_int32_t
748ar9300_get_intr_mitigation_timer(struct ath_hal* ah, HAL_INT_MITIGATION reg)
749{
750    u_int32_t val = 0;
751#ifdef AR5416_INT_MITIGATION
752    switch (reg) {
753    case HAL_INT_THRESHOLD:
754        val = OS_REG_READ(ah, AR_MIRT);
755        break;
756    case HAL_INT_RX_LASTPKT:
757        val = OS_REG_READ(ah, AR_RIMT) & 0xFFFF;
758        break;
759    case HAL_INT_RX_FIRSTPKT:
760        val = OS_REG_READ(ah, AR_RIMT) >> 16;
761        break;
762    case HAL_INT_TX_LASTPKT:
763        val = OS_REG_READ(ah, AR_TIMT) & 0xFFFF;
764        break;
765    case HAL_INT_TX_FIRSTPKT:
766        val = OS_REG_READ(ah, AR_TIMT) >> 16;
767        break;
768    default:
769        break;
770    }
771#endif
772    return val;
773}
774