1709Swollman/*-
227335Sjkh * SPDX-License-Identifier: ISC
3709Swollman *
437Srgrimes * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
537Srgrimes * Copyright (c) 2002-2008 Atheros Communications, Inc.
637Srgrimes *
737Srgrimes * Permission to use, copy, modify, and/or distribute this software for any
837Srgrimes * purpose with or without fee is hereby granted, provided that the above
937Srgrimes * copyright notice and this permission notice appear in all copies.
108460Sjkh *
1125184Sjkh * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
128460Sjkh * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
138460Sjkh * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
1437Srgrimes * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
1537Srgrimes * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
1637Srgrimes * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1737Srgrimes * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
1837Srgrimes *
1937Srgrimes * $FreeBSD: releng/12.0/sys/dev/ath/ath_hal/ar5416/ar5416_xmit.c 326695 2017-12-08 15:57:29Z pfg $
2037Srgrimes */
2137Srgrimes#include "opt_ah.h"
2220684Sjoerg
2337Srgrimes#include "ah.h"
2437Srgrimes#include "ah_desc.h"
2515568Sasami#include "ah_internal.h"
2625184Sjkh
2715568Sasami#include "ar5416/ar5416.h"
2815568Sasami#include "ar5416/ar5416reg.h"
2915568Sasami#include "ar5416/ar5416phy.h"
303843Sdg#include "ar5416/ar5416desc.h"
313843Sdg
3225184Sjkh/*
3337Srgrimes * Stop transmit on the specified queue
3437Srgrimes */
3537SrgrimesHAL_BOOL
3637Srgrimesar5416StopTxDma(struct ath_hal *ah, u_int q)
3737Srgrimes{
3837Srgrimes#define	STOP_DMA_TIMEOUT	4000	/* us */
3937Srgrimes#define	STOP_DMA_ITER		100	/* us */
4037Srgrimes	u_int i;
4137Srgrimes
4237Srgrimes	HALASSERT(q < AH_PRIVATE(ah)->ah_caps.halTotalQueues);
4337Srgrimes
4437Srgrimes	HALASSERT(AH5212(ah)->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE);
4537Srgrimes
4637Srgrimes	OS_REG_WRITE(ah, AR_Q_TXD, 1 << q);
4737Srgrimes	for (i = STOP_DMA_TIMEOUT/STOP_DMA_ITER; i != 0; i--) {
4837Srgrimes		if (ar5212NumTxPending(ah, q) == 0)
4937Srgrimes			break;
5037Srgrimes		OS_DELAY(STOP_DMA_ITER);
5137Srgrimes	}
5237Srgrimes#ifdef AH_DEBUG
5337Srgrimes	if (i == 0) {
5437Srgrimes		HALDEBUG(ah, HAL_DEBUG_ANY,
5537Srgrimes		    "%s: queue %u DMA did not stop in 400 msec\n", __func__, q);
5637Srgrimes		HALDEBUG(ah, HAL_DEBUG_ANY,
5737Srgrimes		    "%s: QSTS 0x%x Q_TXE 0x%x Q_TXD 0x%x Q_CBR 0x%x\n", __func__,
5837Srgrimes		    OS_REG_READ(ah, AR_QSTS(q)), OS_REG_READ(ah, AR_Q_TXE),
5937Srgrimes		    OS_REG_READ(ah, AR_Q_TXD), OS_REG_READ(ah, AR_QCBRCFG(q)));
6037Srgrimes		HALDEBUG(ah, HAL_DEBUG_ANY,
6137Srgrimes		    "%s: Q_MISC 0x%x Q_RDYTIMECFG 0x%x Q_RDYTIMESHDN 0x%x\n",
6237Srgrimes		    __func__, OS_REG_READ(ah, AR_QMISC(q)),
632164Sdg		    OS_REG_READ(ah, AR_QRDYTIMECFG(q)),
642164Sdg		    OS_REG_READ(ah, AR_Q_RDYTIMESHDN));
6537Srgrimes	}
6637Srgrimes#endif /* AH_DEBUG */
6737Srgrimes
6837Srgrimes	/* ar5416 and up can kill packets at the PCU level */
693036Sdg	if (ar5212NumTxPending(ah, q)) {
703036Sdg		uint32_t j;
713036Sdg
728530Sdg		HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
738530Sdg		    "%s: Num of pending TX Frames %d on Q %d\n",
748530Sdg		    __func__, ar5212NumTxPending(ah, q), q);
758530Sdg
761692Sphk		/* Kill last PCU Tx Frame */
7737Srgrimes		/* TODO - save off and restore current values of Q1/Q2? */
788530Sdg		for (j = 0; j < 2; j++) {
7937Srgrimes			uint32_t tsfLow = OS_REG_READ(ah, AR_TSF_L32);
808530Sdg			OS_REG_WRITE(ah, AR_QUIET2,
818530Sdg			    SM(10, AR_QUIET2_QUIET_DUR));
828530Sdg			OS_REG_WRITE(ah, AR_QUIET_PERIOD, 100);
838530Sdg			OS_REG_WRITE(ah, AR_NEXT_QUIET, tsfLow >> 10);
8437Srgrimes			OS_REG_SET_BIT(ah, AR_TIMER_MODE, AR_TIMER_MODE_QUIET);
8525184Sjkh
8625184Sjkh			if ((OS_REG_READ(ah, AR_TSF_L32)>>10) == (tsfLow>>10))
8725184Sjkh				break;
8825184Sjkh
8925184Sjkh			HALDEBUG(ah, HAL_DEBUG_ANY,
9025184Sjkh			    "%s: TSF moved while trying to set quiet time "
9125184Sjkh			    "TSF: 0x%08x\n", __func__, tsfLow);
9225184Sjkh			HALASSERT(j < 1); /* TSF shouldn't count twice or reg access is taking forever */
9325184Sjkh		}
9425184Sjkh
9525184Sjkh		OS_REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_CHAN_IDLE);
964091Sache
97872Sache		/* Allow the quiet mechanism to do its work */
9826450Sache		OS_DELAY(200);
9926450Sache		OS_REG_CLR_BIT(ah, AR_TIMER_MODE, AR_TIMER_MODE_QUIET);
10021197Sphk
10125184Sjkh		/* Verify the transmit q is empty */
10221197Sphk		for (i = STOP_DMA_TIMEOUT/STOP_DMA_ITER; i != 0; i--) {
10317767Sjkh			if (ar5212NumTxPending(ah, q) == 0)
10419363Sjoerg				break;
10517767Sjkh			OS_DELAY(STOP_DMA_ITER);
10625184Sjkh		}
10717767Sjkh		if (i == 0) {
10817767Sjkh			HALDEBUG(ah, HAL_DEBUG_ANY,
1091675Sache			    "%s: Failed to stop Tx DMA in %d msec after killing"
1107219Sjkh			    " last frame\n", __func__, STOP_DMA_TIMEOUT / 1000);
1117293Sjkh		}
1121675Sache		OS_REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_CHAN_IDLE);
1131675Sache	}
11414596Snate
11514624Snate	OS_REG_WRITE(ah, AR_Q_TXD, 0);
11614624Snate	return (i != 0);
11714596Snate#undef STOP_DMA_ITER
11814596Snate#undef STOP_DMA_TIMEOUT
11925184Sjkh}
12025184Sjkh
12125184Sjkh#define VALID_KEY_TYPES \
12225184Sjkh        ((1 << HAL_KEY_TYPE_CLEAR) | (1 << HAL_KEY_TYPE_WEP)|\
1237460Sjkh         (1 << HAL_KEY_TYPE_AES)   | (1 << HAL_KEY_TYPE_TKIP))
1247460Sjkh#define isValidKeyType(_t)      ((1 << (_t)) & VALID_KEY_TYPES)
1258540Srgrimes
1267487Srgrimes#define set11nTries(_series, _index) \
1277487Srgrimes        (SM((_series)[_index].Tries, AR_XmitDataTries##_index))
12820828Sjoerg
1297487Srgrimes#define set11nRate(_series, _index) \
1307487Srgrimes        (SM((_series)[_index].Rate, AR_XmitRate##_index))
1317487Srgrimes
1327487Srgrimes#define set11nPktDurRTSCTS(_series, _index) \
1337761Sache        (SM((_series)[_index].PktDuration, AR_PacketDur##_index) |\
13415684Sjkh         ((_series)[_index].RateFlags & HAL_RATESERIES_RTS_CTS   ?\
1357487Srgrimes         AR_RTSCTSQual##_index : 0))
1369305Sbde
13725412Sjkh#define set11nRateFlags(_series, _index) \
1389305Sbde        ((_series)[_index].RateFlags & HAL_RATESERIES_2040 ? AR_2040_##_index : 0) \
1399305Sbde        |((_series)[_index].RateFlags & HAL_RATESERIES_HALFGI ? AR_GI##_index : 0) \
1409305Sbde        |((_series)[_index].RateFlags & HAL_RATESERIES_STBC ? AR_STBC##_index : 0) \
1417487Srgrimes        |SM((_series)[_index].ChSel, AR_ChainSel##_index)
1427487Srgrimes
14325412Sjkh/*
1447487Srgrimes * Descriptor Access Functions
1457487Srgrimes */
1467487Srgrimes
1477487Srgrimes#define VALID_PKT_TYPES \
1487487Srgrimes        ((1<<HAL_PKT_TYPE_NORMAL)|(1<<HAL_PKT_TYPE_ATIM)|\
1497487Srgrimes         (1<<HAL_PKT_TYPE_PSPOLL)|(1<<HAL_PKT_TYPE_PROBE_RESP)|\
15025339Sjkh         (1<<HAL_PKT_TYPE_BEACON)|(1<<HAL_PKT_TYPE_AMPDU))
1517487Srgrimes#define isValidPktType(_t)      ((1<<(_t)) & VALID_PKT_TYPES)
15225339Sjkh#define VALID_TX_RATES \
1537487Srgrimes        ((1<<0x0b)|(1<<0x0f)|(1<<0x0a)|(1<<0x0e)|(1<<0x09)|(1<<0x0d)|\
1547487Srgrimes         (1<<0x08)|(1<<0x0c)|(1<<0x1b)|(1<<0x1a)|(1<<0x1e)|(1<<0x19)|\
1557259Sjkh	 (1<<0x1d)|(1<<0x18)|(1<<0x1c)|(1<<0x01)|(1<<0x02)|(1<<0x03)|\
15625412Sjkh	 (1<<0x04)|(1<<0x05)|(1<<0x06)|(1<<0x07)|(1<<0x00))
15724463Spst/* NB: accept HT rates */
15824463Spst#define	isValidTxRate(_r)	((1<<((_r) & 0x7f)) & VALID_TX_RATES)
15924463Spst
16024463Spststatic inline int
16124463Spstar5416RateToRateTable(struct ath_hal *ah, uint8_t rate, HAL_BOOL is_ht40)
16224463Spst{
16324463Spst
16424463Spst	/*
16525339Sjkh	 * Handle the non-MCS rates
16619226Sjoerg	 */
1677487Srgrimes	switch (rate) {
16825339Sjkh	case /*   1 Mb */ 0x1b:
16925412Sjkh	case /*   1 MbS*/ 0x1b | 0x4:
17025339Sjkh		return (AH5416(ah)->ah_ratesArray[rate1l]);
17125339Sjkh	case /*   2 Mb */ 0x1a:
17225339Sjkh		return (AH5416(ah)->ah_ratesArray[rate2l]);
17325184Sjkh	case /*   2 MbS*/ 0x1a | 0x4:
17425184Sjkh		return (AH5416(ah)->ah_ratesArray[rate2s]);
175857Sdg	case /* 5.5 Mb */ 0x19:
17637Srgrimes		return (AH5416(ah)->ah_ratesArray[rate5_5l]);
17718812Speter	case /* 5.5 MbS*/ 0x19 | 0x4:
17818812Speter		return (AH5416(ah)->ah_ratesArray[rate5_5s]);
17918812Speter	case /*  11 Mb */ 0x18:
18018812Speter		return (AH5416(ah)->ah_ratesArray[rate11l]);
18118812Speter	case /*  11 MbS*/ 0x18 | 0x4:
18218812Speter		return (AH5416(ah)->ah_ratesArray[rate11s]);
1837238Sache	}
1847238Sache
18525184Sjkh	/* OFDM rates */
18625184Sjkh	switch (rate) {
1877477Sache	case /*   6 Mb */ 0x0b:
1887477Sache		return (AH5416(ah)->ah_ratesArray[rate6mb]);
1897238Sache	case /*   9 Mb */ 0x0f:
1907487Srgrimes		return (AH5416(ah)->ah_ratesArray[rate9mb]);
1917487Srgrimes	case /*  12 Mb */ 0x0a:
1927487Srgrimes		return (AH5416(ah)->ah_ratesArray[rate12mb]);
1937487Srgrimes	case /*  18 Mb */ 0x0e:
1947487Srgrimes		return (AH5416(ah)->ah_ratesArray[rate18mb]);
1957487Srgrimes	case /*  24 Mb */ 0x09:
1967487Srgrimes		return (AH5416(ah)->ah_ratesArray[rate24mb]);
1977487Srgrimes	case /*  36 Mb */ 0x0d:
1987487Srgrimes		return (AH5416(ah)->ah_ratesArray[rate36mb]);
1997238Sache	case /*  48 Mb */ 0x08:
2007238Sache		return (AH5416(ah)->ah_ratesArray[rate48mb]);
20125184Sjkh	case /*  54 Mb */ 0x0c:
20211992Sache		return (AH5416(ah)->ah_ratesArray[rate54mb]);
20311992Sache	}
20425412Sjkh
20511992Sache	/*
20611992Sache	 * Handle HT20/HT40 - we only have to do MCS0-7;
2077238Sache	 * there's no stream differences.
2087238Sache	 */
20927365Sjkh	if ((rate & 0x80) && is_ht40) {
21027365Sjkh		return (AH5416(ah)->ah_ratesArray[rateHt40_0 + (rate & 0x7)]);
21127365Sjkh	} else if (rate & 0x80) {
21227365Sjkh		return (AH5416(ah)->ah_ratesArray[rateHt20_0 + (rate & 0x7)]);
21327365Sjkh	}
21427365Sjkh
21525184Sjkh	/* XXX default (eg XR, bad bad person!) */
21625184Sjkh	return (AH5416(ah)->ah_ratesArray[rate6mb]);
2177296Sjkh}
21817210Spst
21917210Spst/*
22025339Sjkh * Return the TX power to be used for the given rate/chains/TX power.
22126727Spst *
22226727Spst * There are a bunch of tweaks to make to a given TX power based on
22326727Spst * the current configuration, so...
22417210Spst */
22526727Spststatic uint16_t
22626727Spstar5416GetTxRatePower(struct ath_hal *ah, uint8_t rate, uint8_t tx_chainmask,
22726727Spst    uint16_t txPower, HAL_BOOL is_ht40)
22826727Spst{
22925184Sjkh	int n_txpower, max_txpower;
23025916Sjkh	const int cck_ofdm_delta = 2;
23117210Spst#define	EEP_MINOR(_ah) \
23217210Spst	(AH_PRIVATE(_ah)->ah_eeversion & AR5416_EEP_VER_MINOR_MASK)
23325184Sjkh#define	IS_EEP_MINOR_V2(_ah)	(EEP_MINOR(_ah) >= AR5416_EEP_MINOR_VER_2)
23425530Sjkh
23517161Spst	/* Take a copy ; we may underflow and thus need to clamp things */
23617161Spst	n_txpower = txPower;
23717161Spst
23817161Spst	/* HT40? Need to adjust the TX power by this */
2397487Srgrimes	if (is_ht40)
2407487Srgrimes		n_txpower += AH5416(ah)->ah_ht40PowerIncForPdadc;
24125469Sandreas
24225469Sandreas	/*
24325469Sandreas	 * Merlin? Offset the target TX power offset - it defaults to
2447487Srgrimes	 * starting at -5.0dBm, but that can change!
24516671Spst	 *
24619314Speter	 * Kiwi/Kite? Always -5.0dBm offset.
24719314Speter	 */
24816671Spst	if (AR_SREV_KIWI_10_OR_LATER(ah)) {
24919314Speter		n_txpower -= (AR5416_PWR_TABLE_OFFSET_DB * 2);
25019314Speter	} else if (AR_SREV_MERLIN_20_OR_LATER(ah)) {
25119314Speter		int8_t pwr_table_offset = 0;
25219314Speter		/* This is in dBm, convert to 1/2 dBm */
25319314Speter		(void) ath_hal_eepromGet(ah, AR_EEP_PWR_TABLE_OFFSET,
25419314Speter		    &pwr_table_offset);
25519314Speter		n_txpower -= (pwr_table_offset * 2);
25619314Speter	}
25719314Speter
25819314Speter	/*
25919314Speter	 * If Open-loop TX power control is used, the CCK rates need
26016671Spst	 * to be offset by that.
26119314Speter	 *
26219314Speter	 * Rates: 2S, 2L, 1S, 1L, 5.5S, 5.5L
26319314Speter	 *
26419314Speter	 * XXX Odd, we don't have a PHY table entry for long preamble
26519314Speter	 * 1mbit CCK?
26619314Speter	 */
26719314Speter	if (AR_SREV_MERLIN_20_OR_LATER(ah) &&
26819314Speter	    ath_hal_eepromGetFlag(ah, AR_EEP_OL_PWRCTRL)) {
26919314Speter		if (rate == 0x19 || rate == 0x1a || rate == 0x1b ||
27019314Speter		    rate == (0x19 | 0x04) || rate == (0x1a | 0x04) ||
27119314Speter		    rate == (0x1b | 0x04)) {
27219314Speter			n_txpower -= cck_ofdm_delta;
27319314Speter		}
27419314Speter	}
27519314Speter
27619314Speter	/*
27719314Speter	 * We're now offset by the same amount that the static maximum
27819314Speter	 * PHY power tables are.  So, clamp the value based on that rate.
27919314Speter	 */
28019314Speter	max_txpower = ar5416RateToRateTable(ah, rate, is_ht40);
28119314Speter#if 0
28219314Speter	ath_hal_printf(ah, "%s: n_txpower = %d, max_txpower = %d, "
28316671Spst	    "rate = 0x%x , is_ht40 = %d\n",
28416671Spst	    __func__,
28513071Sjkh	    n_txpower,
28613071Sjkh	    max_txpower,
28727365Sjkh	    rate,
28813071Sjkh	    is_ht40);
28913071Sjkh#endif
29013071Sjkh	n_txpower = MIN(max_txpower, n_txpower);
29113071Sjkh
29210873Sjkh	/*
29316391Sjkh	 * We don't have to offset the TX power for two or three
2947259Sjkh	 * chain operation here - it's done by the AR_PHY_POWER_TX_SUB
29537Srgrimes	 * register setting via the EEPROM.
29610873Sjkh	 *
29726934Spst	 * So for vendors that programmed the maximum target power assuming
29810873Sjkh	 * that 2/3 chains are always on, things will just plain work.
29937Srgrimes	 * (They won't reach that target power if only one chain is on, but
30037Srgrimes	 * that's a different problem.)
301	 */
302
303	/* Over/underflow? Adjust */
304	if (n_txpower < 0)
305		n_txpower = 0;
306	else if (n_txpower > 63)
307		n_txpower = 63;
308
309	/*
310	 * For some odd reason the AR9160 with txpower=0 results in a
311	 * much higher (max?) TX power.  So, if it's a chipset before
312	 * AR9220/AR9280, just clamp the minimum value at 1.
313	 */
314	if ((! AR_SREV_MERLIN_10_OR_LATER(ah)) && (n_txpower == 0))
315		n_txpower = 1;
316
317	return (n_txpower);
318#undef	EEP_MINOR
319#undef	IS_EEP_MINOR_V2
320}
321
322HAL_BOOL
323ar5416SetupTxDesc(struct ath_hal *ah, struct ath_desc *ds,
324	u_int pktLen,
325	u_int hdrLen,
326	HAL_PKT_TYPE type,
327	u_int txPower,
328	u_int txRate0, u_int txTries0,
329	u_int keyIx,
330	u_int antMode,
331	u_int flags,
332	u_int rtsctsRate,
333	u_int rtsctsDuration,
334	u_int compicvLen,
335	u_int compivLen,
336	u_int comp)
337{
338#define	RTSCTS	(HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)
339	struct ar5416_desc *ads = AR5416DESC(ds);
340	struct ath_hal_5416 *ahp = AH5416(ah);
341
342	(void) hdrLen;
343
344	HALASSERT(txTries0 != 0);
345	HALASSERT(isValidPktType(type));
346	HALASSERT(isValidTxRate(txRate0));
347	HALASSERT((flags & RTSCTS) != RTSCTS);
348	/* XXX validate antMode */
349
350        txPower = (txPower + AH5212(ah)->ah_txPowerIndexOffset);
351        if (txPower > 63)
352		txPower = 63;
353
354	/*
355	 * XXX For now, just assume that this isn't a HT40 frame.
356	 * It'll get over-ridden by the multi-rate TX power setup.
357	 */
358	if (AH5212(ah)->ah_tpcEnabled) {
359		txPower = ar5416GetTxRatePower(ah, txRate0,
360		    ahp->ah_tx_chainmask,
361		    txPower,
362		    AH_FALSE);
363	}
364
365	ads->ds_ctl0 = (pktLen & AR_FrameLen)
366		     | (txPower << AR_XmitPower_S)
367		     | (flags & HAL_TXDESC_VEOL ? AR_VEOL : 0)
368		     | (flags & HAL_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
369		     | (flags & HAL_TXDESC_INTREQ ? AR_TxIntrReq : 0)
370		     ;
371	ads->ds_ctl1 = (type << AR_FrameType_S)
372		     | (flags & HAL_TXDESC_NOACK ? AR_NoAck : 0)
373		     | (flags & HAL_TXDESC_HWTS ? AR_InsertTS : 0)
374                     ;
375	ads->ds_ctl2 = SM(txTries0, AR_XmitDataTries0)
376		     | (flags & HAL_TXDESC_DURENA ? AR_DurUpdateEn : 0)
377		     ;
378	ads->ds_ctl3 = (txRate0 << AR_XmitRate0_S)
379		     ;
380	ads->ds_ctl4 = 0;
381	ads->ds_ctl5 = 0;
382	ads->ds_ctl6 = 0;
383	ads->ds_ctl7 = SM(ahp->ah_tx_chainmask, AR_ChainSel0)
384		     | SM(ahp->ah_tx_chainmask, AR_ChainSel1)
385		     | SM(ahp->ah_tx_chainmask, AR_ChainSel2)
386		     | SM(ahp->ah_tx_chainmask, AR_ChainSel3)
387		     ;
388	ads->ds_ctl8 = SM(0, AR_AntCtl0);
389	ads->ds_ctl9 = SM(0, AR_AntCtl1) | SM(txPower, AR_XmitPower1);
390	ads->ds_ctl10 = SM(0, AR_AntCtl2) | SM(txPower, AR_XmitPower2);
391	ads->ds_ctl11 = SM(0, AR_AntCtl3) | SM(txPower, AR_XmitPower3);
392
393	if (keyIx != HAL_TXKEYIX_INVALID) {
394		/* XXX validate key index */
395		ads->ds_ctl1 |= SM(keyIx, AR_DestIdx);
396		ads->ds_ctl0 |= AR_DestIdxValid;
397		ads->ds_ctl6 |= SM(ahp->ah_keytype[keyIx], AR_EncrType);
398	}
399	if (flags & RTSCTS) {
400		if (!isValidTxRate(rtsctsRate)) {
401			HALDEBUG(ah, HAL_DEBUG_ANY,
402			    "%s: invalid rts/cts rate 0x%x\n",
403			    __func__, rtsctsRate);
404			return AH_FALSE;
405		}
406		/* XXX validate rtsctsDuration */
407		ads->ds_ctl0 |= (flags & HAL_TXDESC_CTSENA ? AR_CTSEnable : 0)
408			     | (flags & HAL_TXDESC_RTSENA ? AR_RTSEnable : 0)
409			     ;
410		ads->ds_ctl7 |= (rtsctsRate << AR_RTSCTSRate_S);
411	}
412
413	/*
414	 * Set the TX antenna to 0 for Kite
415	 * To preserve existing behaviour, also set the TPC bits to 0;
416	 * when TPC is enabled these should be filled in appropriately.
417	 *
418	 * XXX TODO: when doing TPC, set the TX power up appropriately?
419	 */
420	if (AR_SREV_KITE(ah)) {
421		ads->ds_ctl8 = SM(0, AR_AntCtl0);
422		ads->ds_ctl9 = SM(0, AR_AntCtl1) | SM(0, AR_XmitPower1);
423		ads->ds_ctl10 = SM(0, AR_AntCtl2) | SM(0, AR_XmitPower2);
424		ads->ds_ctl11 = SM(0, AR_AntCtl3) | SM(0, AR_XmitPower3);
425	}
426	return AH_TRUE;
427#undef RTSCTS
428}
429
430HAL_BOOL
431ar5416SetupXTxDesc(struct ath_hal *ah, struct ath_desc *ds,
432	u_int txRate1, u_int txTries1,
433	u_int txRate2, u_int txTries2,
434	u_int txRate3, u_int txTries3)
435{
436	struct ar5416_desc *ads = AR5416DESC(ds);
437
438	if (txTries1) {
439		HALASSERT(isValidTxRate(txRate1));
440		ads->ds_ctl2 |= SM(txTries1, AR_XmitDataTries1);
441		ads->ds_ctl3 |= (txRate1 << AR_XmitRate1_S);
442	}
443	if (txTries2) {
444		HALASSERT(isValidTxRate(txRate2));
445		ads->ds_ctl2 |= SM(txTries2, AR_XmitDataTries2);
446		ads->ds_ctl3 |= (txRate2 << AR_XmitRate2_S);
447	}
448	if (txTries3) {
449		HALASSERT(isValidTxRate(txRate3));
450		ads->ds_ctl2 |= SM(txTries3, AR_XmitDataTries3);
451		ads->ds_ctl3 |= (txRate3 << AR_XmitRate3_S);
452	}
453	return AH_TRUE;
454}
455
456/*
457 * XXX TODO: Figure out if AR_InsertTS is required on all sub-frames
458 * of a TX descriptor.
459 */
460HAL_BOOL
461ar5416FillTxDesc(struct ath_hal *ah, struct ath_desc *ds,
462	HAL_DMA_ADDR *bufAddrList, uint32_t *segLenList, u_int descId,
463	u_int qcuId, HAL_BOOL firstSeg, HAL_BOOL lastSeg,
464	const struct ath_desc *ds0)
465{
466	struct ar5416_desc *ads = AR5416DESC(ds);
467	uint32_t segLen = segLenList[0];
468
469	HALASSERT((segLen &~ AR_BufLen) == 0);
470
471	ds->ds_data = bufAddrList[0];
472
473	if (firstSeg) {
474		/*
475		 * First descriptor, don't clobber xmit control data
476		 * setup by ar5212SetupTxDesc.
477		 */
478		ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_TxMore);
479	} else if (lastSeg) {		/* !firstSeg && lastSeg */
480		/*
481		 * Last descriptor in a multi-descriptor frame,
482		 * copy the multi-rate transmit parameters from
483		 * the first frame for processing on completion.
484		 */
485		ads->ds_ctl1 = segLen;
486#ifdef AH_NEED_DESC_SWAP
487		ads->ds_ctl0 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl0)
488		    & AR_TxIntrReq;
489		ads->ds_ctl2 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl2);
490		ads->ds_ctl3 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl3);
491		/* ctl6 - we only need encrtype; the rest are blank */
492		ads->ds_ctl6 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl6 & AR_EncrType);
493#else
494		ads->ds_ctl0 = AR5416DESC_CONST(ds0)->ds_ctl0 & AR_TxIntrReq;
495		ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
496		ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
497		/* ctl6 - we only need encrtype; the rest are blank */
498		ads->ds_ctl6 = AR5416DESC_CONST(ds0)->ds_ctl6 & AR_EncrType;
499#endif
500	} else {			/* !firstSeg && !lastSeg */
501		/*
502		 * Intermediate descriptor in a multi-descriptor frame.
503		 */
504#ifdef AH_NEED_DESC_SWAP
505		ads->ds_ctl0 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl0)
506		    & AR_TxIntrReq;
507		ads->ds_ctl6 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl6 & AR_EncrType);
508#else
509		ads->ds_ctl0 = AR5416DESC_CONST(ds0)->ds_ctl0 & AR_TxIntrReq;
510		ads->ds_ctl6 = AR5416DESC_CONST(ds0)->ds_ctl6 & AR_EncrType;
511#endif
512		ads->ds_ctl1 = segLen | AR_TxMore;
513		ads->ds_ctl2 = 0;
514		ads->ds_ctl3 = 0;
515	}
516	/* XXX only on last descriptor? */
517	OS_MEMZERO(ads->u.tx.status, sizeof(ads->u.tx.status));
518	return AH_TRUE;
519}
520
521/*
522 * NB: cipher is no longer used, it's calculated.
523 */
524HAL_BOOL
525ar5416ChainTxDesc(struct ath_hal *ah, struct ath_desc *ds,
526	HAL_DMA_ADDR *bufAddrList,
527	uint32_t *segLenList,
528	u_int pktLen,
529	u_int hdrLen,
530	HAL_PKT_TYPE type,
531	u_int keyIx,
532	HAL_CIPHER cipher,
533	uint8_t delims,
534	HAL_BOOL firstSeg,
535	HAL_BOOL lastSeg,
536	HAL_BOOL lastAggr)
537{
538	struct ar5416_desc *ads = AR5416DESC(ds);
539	uint32_t *ds_txstatus = AR5416_DS_TXSTATUS(ah,ads);
540	struct ath_hal_5416 *ahp = AH5416(ah);
541	u_int segLen = segLenList[0];
542
543	int isaggr = 0;
544	uint32_t last_aggr = 0;
545
546	(void) hdrLen;
547	(void) ah;
548
549	HALASSERT((segLen &~ AR_BufLen) == 0);
550	ds->ds_data = bufAddrList[0];
551
552	HALASSERT(isValidPktType(type));
553	if (type == HAL_PKT_TYPE_AMPDU) {
554		type = HAL_PKT_TYPE_NORMAL;
555		isaggr = 1;
556		if (lastAggr == AH_FALSE)
557			last_aggr = AR_MoreAggr;
558	}
559
560	/*
561	 * Since this function is called before any of the other
562	 * descriptor setup functions (at least in this particular
563	 * 802.11n aggregation implementation), always bzero() the
564	 * descriptor. Previously this would be done for all but
565	 * the first segment.
566	 * XXX TODO: figure out why; perhaps I'm using this slightly
567	 * XXX incorrectly.
568	 */
569	OS_MEMZERO(ds->ds_hw, AR5416_DESC_TX_CTL_SZ);
570
571	/*
572	 * Note: VEOL should only be for the last descriptor in the chain.
573	 */
574	ads->ds_ctl0 = (pktLen & AR_FrameLen);
575
576	/*
577	 * For aggregates:
578	 * + IsAggr must be set for all descriptors of all subframes of
579	 *   the aggregate
580	 * + MoreAggr must be set for all descriptors of all subframes
581	 *   of the aggregate EXCEPT the last subframe;
582	 * + MoreAggr must be _CLEAR_ for all descrpitors of the last
583	 *   subframe of the aggregate.
584	 */
585	ads->ds_ctl1 = (type << AR_FrameType_S)
586			| (isaggr ? (AR_IsAggr | last_aggr) : 0);
587
588	ads->ds_ctl2 = 0;
589	ads->ds_ctl3 = 0;
590	if (keyIx != HAL_TXKEYIX_INVALID) {
591		/* XXX validate key index */
592		ads->ds_ctl1 |= SM(keyIx, AR_DestIdx);
593		ads->ds_ctl0 |= AR_DestIdxValid;
594	}
595
596	ads->ds_ctl6 |= SM(ahp->ah_keytype[keyIx], AR_EncrType);
597	if (isaggr) {
598		ads->ds_ctl6 |= SM(delims, AR_PadDelim);
599	}
600
601	if (firstSeg) {
602		ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_TxMore);
603	} else if (lastSeg) {           /* !firstSeg && lastSeg */
604		ads->ds_ctl0 = 0;
605		ads->ds_ctl1 |= segLen;
606	} else {                        /* !firstSeg && !lastSeg */
607		/*
608		 * Intermediate descriptor in a multi-descriptor frame.
609		 */
610		ads->ds_ctl0 = 0;
611		ads->ds_ctl1 |= segLen | AR_TxMore;
612	}
613	ds_txstatus[0] = ds_txstatus[1] = 0;
614	ds_txstatus[9] &= ~AR_TxDone;
615
616	return AH_TRUE;
617}
618
619HAL_BOOL
620ar5416SetupFirstTxDesc(struct ath_hal *ah, struct ath_desc *ds,
621	u_int aggrLen, u_int flags, u_int txPower,
622	u_int txRate0, u_int txTries0, u_int antMode,
623	u_int rtsctsRate, u_int rtsctsDuration)
624{
625#define RTSCTS  (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)
626	struct ar5416_desc *ads = AR5416DESC(ds);
627	struct ath_hal_5212 *ahp = AH5212(ah);
628
629	HALASSERT(txTries0 != 0);
630	HALASSERT(isValidTxRate(txRate0));
631	HALASSERT((flags & RTSCTS) != RTSCTS);
632	/* XXX validate antMode */
633
634	txPower = (txPower + ahp->ah_txPowerIndexOffset );
635	if(txPower > 63)  txPower=63;
636
637	ads->ds_ctl0 |= (txPower << AR_XmitPower_S)
638		| (flags & HAL_TXDESC_VEOL ? AR_VEOL : 0)
639		| (flags & HAL_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
640		| (flags & HAL_TXDESC_INTREQ ? AR_TxIntrReq : 0);
641	ads->ds_ctl1 |= (flags & HAL_TXDESC_NOACK ? AR_NoAck : 0);
642	ads->ds_ctl2 |= SM(txTries0, AR_XmitDataTries0);
643	ads->ds_ctl3 |= (txRate0 << AR_XmitRate0_S);
644	ads->ds_ctl7 = SM(AH5416(ah)->ah_tx_chainmask, AR_ChainSel0)
645		| SM(AH5416(ah)->ah_tx_chainmask, AR_ChainSel1)
646		| SM(AH5416(ah)->ah_tx_chainmask, AR_ChainSel2)
647		| SM(AH5416(ah)->ah_tx_chainmask, AR_ChainSel3);
648
649	/* NB: no V1 WAR */
650	ads->ds_ctl8 = SM(0, AR_AntCtl0);
651	ads->ds_ctl9 = SM(0, AR_AntCtl1) | SM(txPower, AR_XmitPower1);
652	ads->ds_ctl10 = SM(0, AR_AntCtl2) | SM(txPower, AR_XmitPower2);
653	ads->ds_ctl11 = SM(0, AR_AntCtl3) | SM(txPower, AR_XmitPower3);
654
655	ads->ds_ctl6 &= ~(0xffff);
656	ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
657
658	if (flags & RTSCTS) {
659		/* XXX validate rtsctsDuration */
660		ads->ds_ctl0 |= (flags & HAL_TXDESC_CTSENA ? AR_CTSEnable : 0)
661			| (flags & HAL_TXDESC_RTSENA ? AR_RTSEnable : 0);
662	}
663
664	/*
665	 * Set the TX antenna to 0 for Kite
666	 * To preserve existing behaviour, also set the TPC bits to 0;
667	 * when TPC is enabled these should be filled in appropriately.
668	 */
669	if (AR_SREV_KITE(ah)) {
670		ads->ds_ctl8 = SM(0, AR_AntCtl0);
671		ads->ds_ctl9 = SM(0, AR_AntCtl1) | SM(0, AR_XmitPower1);
672		ads->ds_ctl10 = SM(0, AR_AntCtl2) | SM(0, AR_XmitPower2);
673		ads->ds_ctl11 = SM(0, AR_AntCtl3) | SM(0, AR_XmitPower3);
674	}
675
676	return AH_TRUE;
677#undef RTSCTS
678}
679
680HAL_BOOL
681ar5416SetupLastTxDesc(struct ath_hal *ah, struct ath_desc *ds,
682		const struct ath_desc *ds0)
683{
684	struct ar5416_desc *ads = AR5416DESC(ds);
685
686	ads->ds_ctl1 &= ~AR_MoreAggr;
687	ads->ds_ctl6 &= ~AR_PadDelim;
688
689	/* hack to copy rate info to last desc for later processing */
690#ifdef AH_NEED_DESC_SWAP
691	ads->ds_ctl2 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl2);
692	ads->ds_ctl3 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl3);
693#else
694	ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
695	ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
696#endif
697	return AH_TRUE;
698}
699
700#ifdef AH_NEED_DESC_SWAP
701/* Swap transmit descriptor */
702static __inline void
703ar5416SwapTxDesc(struct ath_desc *ds)
704{
705	ds->ds_data = __bswap32(ds->ds_data);
706	ds->ds_ctl0 = __bswap32(ds->ds_ctl0);
707	ds->ds_ctl1 = __bswap32(ds->ds_ctl1);
708	ds->ds_hw[0] = __bswap32(ds->ds_hw[0]);
709	ds->ds_hw[1] = __bswap32(ds->ds_hw[1]);
710	ds->ds_hw[2] = __bswap32(ds->ds_hw[2]);
711	ds->ds_hw[3] = __bswap32(ds->ds_hw[3]);
712}
713#endif
714
715/*
716 * Processing of HW TX descriptor.
717 */
718HAL_STATUS
719ar5416ProcTxDesc(struct ath_hal *ah,
720	struct ath_desc *ds, struct ath_tx_status *ts)
721{
722	struct ar5416_desc *ads = AR5416DESC(ds);
723	uint32_t *ds_txstatus = AR5416_DS_TXSTATUS(ah,ads);
724
725#ifdef AH_NEED_DESC_SWAP
726	if ((ds_txstatus[9] & __bswap32(AR_TxDone)) == 0)
727		return HAL_EINPROGRESS;
728	ar5416SwapTxDesc(ds);
729#else
730	if ((ds_txstatus[9] & AR_TxDone) == 0)
731		return HAL_EINPROGRESS;
732#endif
733
734	/* Update software copies of the HW status */
735	ts->ts_seqnum = MS(ds_txstatus[9], AR_SeqNum);
736	ts->ts_tstamp = AR_SendTimestamp(ds_txstatus);
737	ts->ts_tid = MS(ds_txstatus[9], AR_TxTid);
738
739	ts->ts_status = 0;
740	if (ds_txstatus[1] & AR_ExcessiveRetries)
741		ts->ts_status |= HAL_TXERR_XRETRY;
742	if (ds_txstatus[1] & AR_Filtered)
743		ts->ts_status |= HAL_TXERR_FILT;
744	if (ds_txstatus[1] & AR_FIFOUnderrun)
745		ts->ts_status |= HAL_TXERR_FIFO;
746	if (ds_txstatus[9] & AR_TxOpExceeded)
747		ts->ts_status |= HAL_TXERR_XTXOP;
748	if (ds_txstatus[1] & AR_TxTimerExpired)
749		ts->ts_status |= HAL_TXERR_TIMER_EXPIRED;
750
751	ts->ts_flags  = 0;
752	if (ds_txstatus[0] & AR_TxBaStatus) {
753		ts->ts_flags |= HAL_TX_BA;
754		ts->ts_ba_low = AR_BaBitmapLow(ds_txstatus);
755		ts->ts_ba_high = AR_BaBitmapHigh(ds_txstatus);
756	}
757	if (ds->ds_ctl1 & AR_IsAggr)
758		ts->ts_flags |= HAL_TX_AGGR;
759	if (ds_txstatus[1] & AR_DescCfgErr)
760		ts->ts_flags |= HAL_TX_DESC_CFG_ERR;
761	if (ds_txstatus[1] & AR_TxDataUnderrun)
762		ts->ts_flags |= HAL_TX_DATA_UNDERRUN;
763	if (ds_txstatus[1] & AR_TxDelimUnderrun)
764		ts->ts_flags |= HAL_TX_DELIM_UNDERRUN;
765
766	/*
767	 * Extract the transmit rate used and mark the rate as
768	 * ``alternate'' if it wasn't the series 0 rate.
769	 */
770	ts->ts_finaltsi =  MS(ds_txstatus[9], AR_FinalTxIdx);
771	switch (ts->ts_finaltsi) {
772	case 0:
773		ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate0);
774		break;
775	case 1:
776		ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate1);
777		break;
778	case 2:
779		ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate2);
780		break;
781	case 3:
782		ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate3);
783		break;
784	}
785
786	ts->ts_rssi = MS(ds_txstatus[5], AR_TxRSSICombined);
787	ts->ts_rssi_ctl[0] = MS(ds_txstatus[0], AR_TxRSSIAnt00);
788	ts->ts_rssi_ctl[1] = MS(ds_txstatus[0], AR_TxRSSIAnt01);
789	ts->ts_rssi_ctl[2] = MS(ds_txstatus[0], AR_TxRSSIAnt02);
790	ts->ts_rssi_ext[0] = MS(ds_txstatus[5], AR_TxRSSIAnt10);
791	ts->ts_rssi_ext[1] = MS(ds_txstatus[5], AR_TxRSSIAnt11);
792	ts->ts_rssi_ext[2] = MS(ds_txstatus[5], AR_TxRSSIAnt12);
793	ts->ts_evm0 = AR_TxEVM0(ds_txstatus);
794	ts->ts_evm1 = AR_TxEVM1(ds_txstatus);
795	ts->ts_evm2 = AR_TxEVM2(ds_txstatus);
796
797	ts->ts_shortretry = MS(ds_txstatus[1], AR_RTSFailCnt);
798	ts->ts_longretry = MS(ds_txstatus[1], AR_DataFailCnt);
799	/*
800	 * The retry count has the number of un-acked tries for the
801	 * final series used.  When doing multi-rate retry we must
802	 * fixup the retry count by adding in the try counts for
803	 * each series that was fully-processed.  Beware that this
804	 * takes values from the try counts in the final descriptor.
805	 * These are not required by the hardware.  We assume they
806	 * are placed there by the driver as otherwise we have no
807	 * access and the driver can't do the calculation because it
808	 * doesn't know the descriptor format.
809	 */
810	switch (ts->ts_finaltsi) {
811	case 3: ts->ts_longretry += MS(ads->ds_ctl2, AR_XmitDataTries2);
812	case 2: ts->ts_longretry += MS(ads->ds_ctl2, AR_XmitDataTries1);
813	case 1: ts->ts_longretry += MS(ads->ds_ctl2, AR_XmitDataTries0);
814	}
815
816	/*
817	 * These fields are not used. Zero these to preserve compatibility
818	 * with existing drivers.
819	 */
820	ts->ts_virtcol = MS(ads->ds_ctl1, AR_VirtRetryCnt);
821	ts->ts_antenna = 0; /* We don't switch antennas on Owl*/
822
823	/* handle tx trigger level changes internally */
824	if ((ts->ts_status & HAL_TXERR_FIFO) ||
825	    (ts->ts_flags & (HAL_TX_DATA_UNDERRUN | HAL_TX_DELIM_UNDERRUN)))
826		ar5212UpdateTxTrigLevel(ah, AH_TRUE);
827
828	return HAL_OK;
829}
830
831HAL_BOOL
832ar5416SetGlobalTxTimeout(struct ath_hal *ah, u_int tu)
833{
834	struct ath_hal_5416 *ahp = AH5416(ah);
835
836	if (tu > 0xFFFF) {
837		HALDEBUG(ah, HAL_DEBUG_ANY, "%s: bad global tx timeout %u\n",
838		    __func__, tu);
839		/* restore default handling */
840		ahp->ah_globaltxtimeout = (u_int) -1;
841		return AH_FALSE;
842	}
843	OS_REG_RMW_FIELD(ah, AR_GTXTO, AR_GTXTO_TIMEOUT_LIMIT, tu);
844	ahp->ah_globaltxtimeout = tu;
845	return AH_TRUE;
846}
847
848u_int
849ar5416GetGlobalTxTimeout(struct ath_hal *ah)
850{
851	return MS(OS_REG_READ(ah, AR_GTXTO), AR_GTXTO_TIMEOUT_LIMIT);
852}
853
854#define	HT_RC_2_MCS(_rc)	((_rc) & 0x0f)
855static const u_int8_t baDurationDelta[] = {
856	24,	//  0: BPSK
857	12,	//  1: QPSK 1/2
858	12,	//  2: QPSK 3/4
859	4,	//  3: 16-QAM 1/2
860	4,	//  4: 16-QAM 3/4
861	4,	//  5: 64-QAM 2/3
862	4,	//  6: 64-QAM 3/4
863	4,	//  7: 64-QAM 5/6
864	24,	//  8: BPSK
865	12,	//  9: QPSK 1/2
866	12,	// 10: QPSK 3/4
867	4,	// 11: 16-QAM 1/2
868	4,	// 12: 16-QAM 3/4
869	4,	// 13: 64-QAM 2/3
870	4,	// 14: 64-QAM 3/4
871	4,	// 15: 64-QAM 5/6
872};
873
874void
875ar5416Set11nRateScenario(struct ath_hal *ah, struct ath_desc *ds,
876        u_int durUpdateEn, u_int rtsctsRate,
877	HAL_11N_RATE_SERIES series[], u_int nseries, u_int flags)
878{
879	struct ar5416_desc *ads = AR5416DESC(ds);
880	uint32_t ds_ctl0;
881
882	HALASSERT(nseries == 4);
883	(void)nseries;
884
885	/*
886	 * Only one of RTS and CTS enable must be set.
887	 * If a frame has both set, just do RTS protection -
888	 * that's enough to satisfy legacy protection.
889	 */
890	if (flags & (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) {
891		ds_ctl0 = ads->ds_ctl0;
892
893		if (flags & HAL_TXDESC_RTSENA) {
894			ds_ctl0 &= ~AR_CTSEnable;
895			ds_ctl0 |= AR_RTSEnable;
896		} else {
897			ds_ctl0 &= ~AR_RTSEnable;
898			ds_ctl0 |= AR_CTSEnable;
899		}
900
901		ads->ds_ctl0 = ds_ctl0;
902	} else {
903		ads->ds_ctl0 =
904		    (ads->ds_ctl0 & ~(AR_RTSEnable | AR_CTSEnable));
905	}
906
907	ads->ds_ctl2 = set11nTries(series, 0)
908		     | set11nTries(series, 1)
909		     | set11nTries(series, 2)
910		     | set11nTries(series, 3)
911		     | (durUpdateEn ? AR_DurUpdateEn : 0);
912
913	ads->ds_ctl3 = set11nRate(series, 0)
914		     | set11nRate(series, 1)
915		     | set11nRate(series, 2)
916		     | set11nRate(series, 3);
917
918	ads->ds_ctl4 = set11nPktDurRTSCTS(series, 0)
919		     | set11nPktDurRTSCTS(series, 1);
920
921	ads->ds_ctl5 = set11nPktDurRTSCTS(series, 2)
922		     | set11nPktDurRTSCTS(series, 3);
923
924	ads->ds_ctl7 = set11nRateFlags(series, 0)
925		     | set11nRateFlags(series, 1)
926		     | set11nRateFlags(series, 2)
927		     | set11nRateFlags(series, 3)
928		     | SM(rtsctsRate, AR_RTSCTSRate);
929
930	/*
931	 * Doing per-packet TPC - update the TX power for the first
932	 * field; program in the other series.
933	 */
934	if (AH5212(ah)->ah_tpcEnabled) {
935		uint32_t ds_ctl0;
936		uint16_t txPower;
937
938		/* Modify the tx power field for rate 0 */
939		txPower = ar5416GetTxRatePower(ah, series[0].Rate,
940		    series[0].ChSel,
941		    series[0].tx_power_cap,
942		    !! (series[0].RateFlags & HAL_RATESERIES_2040));
943		ds_ctl0 = ads->ds_ctl0 & ~AR_XmitPower;
944		ds_ctl0 |= (txPower << AR_XmitPower_S);
945		ads->ds_ctl0 = ds_ctl0;
946
947		/*
948		 * Override the whole descriptor field for each TX power.
949		 *
950		 * This will need changing if we ever support antenna control
951		 * programming.
952		 */
953		txPower = ar5416GetTxRatePower(ah, series[1].Rate,
954		    series[1].ChSel,
955		    series[1].tx_power_cap,
956		    !! (series[1].RateFlags & HAL_RATESERIES_2040));
957		ads->ds_ctl9 = SM(0, AR_AntCtl1) | SM(txPower, AR_XmitPower1);
958
959		txPower = ar5416GetTxRatePower(ah, series[2].Rate,
960		    series[2].ChSel,
961		    series[2].tx_power_cap,
962		    !! (series[2].RateFlags & HAL_RATESERIES_2040));
963		ads->ds_ctl10 = SM(0, AR_AntCtl2) | SM(txPower, AR_XmitPower2);
964
965		txPower = ar5416GetTxRatePower(ah, series[3].Rate,
966		    series[3].ChSel,
967		    series[3].tx_power_cap,
968		    !! (series[3].RateFlags & HAL_RATESERIES_2040));
969		ads->ds_ctl11 = SM(0, AR_AntCtl3) | SM(txPower, AR_XmitPower3);
970	}
971}
972
973/*
974 * Note: this should be called before calling ar5416SetBurstDuration()
975 * (if it is indeed called) in order to ensure that the burst duration
976 * is correctly updated with the BA delta workaround.
977 */
978void
979ar5416Set11nAggrFirst(struct ath_hal *ah, struct ath_desc *ds, u_int aggrLen,
980    u_int numDelims)
981{
982	struct ar5416_desc *ads = AR5416DESC(ds);
983	uint32_t flags;
984	uint32_t burstDur;
985	uint8_t rate;
986
987	ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
988
989	ads->ds_ctl6 &= ~(AR_AggrLen | AR_PadDelim);
990	ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
991	ads->ds_ctl6 |= SM(numDelims, AR_PadDelim);
992
993	if (! AR_SREV_MERLIN_10_OR_LATER(ah)) {
994		/*
995		 * XXX It'd be nice if I were passed in the rate scenario
996		 * at this point..
997		 */
998		rate = MS(ads->ds_ctl3, AR_XmitRate0);
999		flags = ads->ds_ctl0 & (AR_CTSEnable | AR_RTSEnable);
1000		/*
1001		 * WAR - MAC assumes normal ACK time instead of
1002		 * block ACK while computing packet duration.
1003		 * Add this delta to the burst duration in the descriptor.
1004		 */
1005		if (flags && (ads->ds_ctl1 & AR_IsAggr)) {
1006			burstDur = baDurationDelta[HT_RC_2_MCS(rate)];
1007			ads->ds_ctl2 &= ~(AR_BurstDur);
1008			ads->ds_ctl2 |= SM(burstDur, AR_BurstDur);
1009		}
1010	}
1011}
1012
1013void
1014ar5416Set11nAggrMiddle(struct ath_hal *ah, struct ath_desc *ds, u_int numDelims)
1015{
1016	struct ar5416_desc *ads = AR5416DESC(ds);
1017	uint32_t *ds_txstatus = AR5416_DS_TXSTATUS(ah,ads);
1018
1019	ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
1020
1021	ads->ds_ctl6 &= ~AR_PadDelim;
1022	ads->ds_ctl6 |= SM(numDelims, AR_PadDelim);
1023	ads->ds_ctl6 &= ~AR_AggrLen;
1024
1025	/*
1026	 * Clear the TxDone status here, may need to change
1027	 * func name to reflect this
1028	 */
1029	ds_txstatus[9] &= ~AR_TxDone;
1030}
1031
1032void
1033ar5416Set11nAggrLast(struct ath_hal *ah, struct ath_desc *ds)
1034{
1035	struct ar5416_desc *ads = AR5416DESC(ds);
1036
1037	ads->ds_ctl1 |= AR_IsAggr;
1038	ads->ds_ctl1 &= ~AR_MoreAggr;
1039	ads->ds_ctl6 &= ~AR_PadDelim;
1040}
1041
1042void
1043ar5416Clr11nAggr(struct ath_hal *ah, struct ath_desc *ds)
1044{
1045	struct ar5416_desc *ads = AR5416DESC(ds);
1046
1047	ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr);
1048	ads->ds_ctl6 &= ~AR_PadDelim;
1049	ads->ds_ctl6 &= ~AR_AggrLen;
1050}
1051
1052void
1053ar5416Set11nVirtualMoreFrag(struct ath_hal *ah, struct ath_desc *ds,
1054    u_int vmf)
1055{
1056	struct ar5416_desc *ads = AR5416DESC(ds);
1057	if (vmf)
1058		ads->ds_ctl0 |= AR_VirtMoreFrag;
1059	else
1060		ads->ds_ctl0 &= ~AR_VirtMoreFrag;
1061}
1062
1063/*
1064 * Program the burst duration, with the included BA delta if it's
1065 * applicable.
1066 */
1067void
1068ar5416Set11nBurstDuration(struct ath_hal *ah, struct ath_desc *ds,
1069                                                  u_int burstDuration)
1070{
1071	struct ar5416_desc *ads = AR5416DESC(ds);
1072	uint32_t burstDur = 0;
1073	uint8_t rate;
1074
1075	if (! AR_SREV_MERLIN_10_OR_LATER(ah)) {
1076		/*
1077		 * XXX It'd be nice if I were passed in the rate scenario
1078		 * at this point..
1079		 */
1080		rate = MS(ads->ds_ctl3, AR_XmitDataTries0);
1081		/*
1082		 * WAR - MAC assumes normal ACK time instead of
1083		 * block ACK while computing packet duration.
1084		 * Add this delta to the burst duration in the descriptor.
1085		 */
1086		if (ads->ds_ctl1 & AR_IsAggr) {
1087			burstDur = baDurationDelta[HT_RC_2_MCS(rate)];
1088		}
1089	}
1090
1091	ads->ds_ctl2 &= ~AR_BurstDur;
1092	ads->ds_ctl2 |= SM(burstDur + burstDuration, AR_BurstDur);
1093}
1094
1095/*
1096 * Retrieve the rate table from the given TX completion descriptor
1097 */
1098HAL_BOOL
1099ar5416GetTxCompletionRates(struct ath_hal *ah, const struct ath_desc *ds0, int *rates, int *tries)
1100{
1101	const struct ar5416_desc *ads = AR5416DESC_CONST(ds0);
1102
1103	rates[0] = MS(ads->ds_ctl3, AR_XmitRate0);
1104	rates[1] = MS(ads->ds_ctl3, AR_XmitRate1);
1105	rates[2] = MS(ads->ds_ctl3, AR_XmitRate2);
1106	rates[3] = MS(ads->ds_ctl3, AR_XmitRate3);
1107
1108	tries[0] = MS(ads->ds_ctl2, AR_XmitDataTries0);
1109	tries[1] = MS(ads->ds_ctl2, AR_XmitDataTries1);
1110	tries[2] = MS(ads->ds_ctl2, AR_XmitDataTries2);
1111	tries[3] = MS(ads->ds_ctl2, AR_XmitDataTries3);
1112
1113	return AH_TRUE;
1114}
1115
1116/*
1117 * TX queue management routines - AR5416 and later chipsets
1118 */
1119
1120/*
1121 * Allocate and initialize a tx DCU/QCU combination.
1122 */
1123int
1124ar5416SetupTxQueue(struct ath_hal *ah, HAL_TX_QUEUE type,
1125	const HAL_TXQ_INFO *qInfo)
1126{
1127	struct ath_hal_5212 *ahp = AH5212(ah);
1128	HAL_TX_QUEUE_INFO *qi;
1129	HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps;
1130	int q, defqflags;
1131
1132	/* by default enable OK+ERR+DESC+URN interrupts */
1133	defqflags = HAL_TXQ_TXOKINT_ENABLE
1134		  | HAL_TXQ_TXERRINT_ENABLE
1135		  | HAL_TXQ_TXDESCINT_ENABLE
1136		  | HAL_TXQ_TXURNINT_ENABLE;
1137	/* XXX move queue assignment to driver */
1138	switch (type) {
1139	case HAL_TX_QUEUE_BEACON:
1140		q = pCap->halTotalQueues-1;	/* highest priority */
1141		defqflags |= HAL_TXQ_DBA_GATED
1142		       | HAL_TXQ_CBR_DIS_QEMPTY
1143		       | HAL_TXQ_ARB_LOCKOUT_GLOBAL
1144		       | HAL_TXQ_BACKOFF_DISABLE;
1145		break;
1146	case HAL_TX_QUEUE_CAB:
1147		q = pCap->halTotalQueues-2;	/* next highest priority */
1148		defqflags |= HAL_TXQ_DBA_GATED
1149		       | HAL_TXQ_CBR_DIS_QEMPTY
1150		       | HAL_TXQ_CBR_DIS_BEMPTY
1151		       | HAL_TXQ_ARB_LOCKOUT_GLOBAL
1152		       | HAL_TXQ_BACKOFF_DISABLE;
1153		break;
1154	case HAL_TX_QUEUE_PSPOLL:
1155		q = 1;				/* lowest priority */
1156		defqflags |= HAL_TXQ_DBA_GATED
1157		       | HAL_TXQ_CBR_DIS_QEMPTY
1158		       | HAL_TXQ_CBR_DIS_BEMPTY
1159		       | HAL_TXQ_ARB_LOCKOUT_GLOBAL
1160		       | HAL_TXQ_BACKOFF_DISABLE;
1161		break;
1162	case HAL_TX_QUEUE_UAPSD:
1163		q = pCap->halTotalQueues-3;	/* nextest highest priority */
1164		if (ahp->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE) {
1165			HALDEBUG(ah, HAL_DEBUG_ANY,
1166			    "%s: no available UAPSD tx queue\n", __func__);
1167			return -1;
1168		}
1169		break;
1170	case HAL_TX_QUEUE_DATA:
1171		for (q = 0; q < pCap->halTotalQueues; q++)
1172			if (ahp->ah_txq[q].tqi_type == HAL_TX_QUEUE_INACTIVE)
1173				break;
1174		if (q == pCap->halTotalQueues) {
1175			HALDEBUG(ah, HAL_DEBUG_ANY,
1176			    "%s: no available tx queue\n", __func__);
1177			return -1;
1178		}
1179		break;
1180	default:
1181		HALDEBUG(ah, HAL_DEBUG_ANY,
1182		    "%s: bad tx queue type %u\n", __func__, type);
1183		return -1;
1184	}
1185
1186	HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: queue %u\n", __func__, q);
1187
1188	qi = &ahp->ah_txq[q];
1189	if (qi->tqi_type != HAL_TX_QUEUE_INACTIVE) {
1190		HALDEBUG(ah, HAL_DEBUG_ANY, "%s: tx queue %u already active\n",
1191		    __func__, q);
1192		return -1;
1193	}
1194	OS_MEMZERO(qi, sizeof(HAL_TX_QUEUE_INFO));
1195	qi->tqi_type = type;
1196	if (qInfo == AH_NULL) {
1197		qi->tqi_qflags = defqflags;
1198		qi->tqi_aifs = INIT_AIFS;
1199		qi->tqi_cwmin = HAL_TXQ_USEDEFAULT;	/* NB: do at reset */
1200		qi->tqi_cwmax = INIT_CWMAX;
1201		qi->tqi_shretry = INIT_SH_RETRY;
1202		qi->tqi_lgretry = INIT_LG_RETRY;
1203		qi->tqi_physCompBuf = 0;
1204	} else {
1205		qi->tqi_physCompBuf = qInfo->tqi_compBuf;
1206		(void) ar5212SetTxQueueProps(ah, q, qInfo);
1207	}
1208	/* NB: must be followed by ar5212ResetTxQueue */
1209	return q;
1210}
1211
1212/*
1213 * Update the h/w interrupt registers to reflect a tx q's configuration.
1214 */
1215static void
1216setTxQInterrupts(struct ath_hal *ah, HAL_TX_QUEUE_INFO *qi)
1217{
1218	struct ath_hal_5212 *ahp = AH5212(ah);
1219
1220	HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
1221	    "%s: tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n", __func__,
1222	    ahp->ah_txOkInterruptMask, ahp->ah_txErrInterruptMask,
1223	    ahp->ah_txDescInterruptMask, ahp->ah_txEolInterruptMask,
1224	    ahp->ah_txUrnInterruptMask);
1225
1226	OS_REG_WRITE(ah, AR_IMR_S0,
1227		  SM(ahp->ah_txOkInterruptMask, AR_IMR_S0_QCU_TXOK)
1228		| SM(ahp->ah_txDescInterruptMask, AR_IMR_S0_QCU_TXDESC)
1229	);
1230	OS_REG_WRITE(ah, AR_IMR_S1,
1231		  SM(ahp->ah_txErrInterruptMask, AR_IMR_S1_QCU_TXERR)
1232		| SM(ahp->ah_txEolInterruptMask, AR_IMR_S1_QCU_TXEOL)
1233	);
1234	OS_REG_RMW_FIELD(ah, AR_IMR_S2,
1235		AR_IMR_S2_QCU_TXURN, ahp->ah_txUrnInterruptMask);
1236}
1237
1238/*
1239 * Set the retry, aifs, cwmin/max, readyTime regs for specified queue
1240 * Assumes:
1241 *  phwChannel has been set to point to the current channel
1242 */
1243#define	TU_TO_USEC(_tu)		((_tu) << 10)
1244HAL_BOOL
1245ar5416ResetTxQueue(struct ath_hal *ah, u_int q)
1246{
1247	struct ath_hal_5212 *ahp = AH5212(ah);
1248	HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps;
1249	const struct ieee80211_channel *chan = AH_PRIVATE(ah)->ah_curchan;
1250	HAL_TX_QUEUE_INFO *qi;
1251	uint32_t cwMin, chanCwMin, qmisc, dmisc;
1252
1253	if (q >= pCap->halTotalQueues) {
1254		HALDEBUG(ah, HAL_DEBUG_ANY, "%s: invalid queue num %u\n",
1255		    __func__, q);
1256		return AH_FALSE;
1257	}
1258	qi = &ahp->ah_txq[q];
1259	if (qi->tqi_type == HAL_TX_QUEUE_INACTIVE) {
1260		HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: inactive queue %u\n",
1261		    __func__, q);
1262		return AH_TRUE;		/* XXX??? */
1263	}
1264
1265	HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: reset queue %u\n", __func__, q);
1266
1267	if (qi->tqi_cwmin == HAL_TXQ_USEDEFAULT) {
1268		/*
1269		 * Select cwmin according to channel type.
1270		 * NB: chan can be NULL during attach
1271		 */
1272		if (chan && IEEE80211_IS_CHAN_B(chan))
1273			chanCwMin = INIT_CWMIN_11B;
1274		else
1275			chanCwMin = INIT_CWMIN;
1276		/* make sure that the CWmin is of the form (2^n - 1) */
1277		for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1)
1278			;
1279	} else
1280		cwMin = qi->tqi_cwmin;
1281
1282	/* set cwMin/Max and AIFS values */
1283	OS_REG_WRITE(ah, AR_DLCL_IFS(q),
1284		  SM(cwMin, AR_D_LCL_IFS_CWMIN)
1285		| SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX)
1286		| SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
1287
1288	/* Set retry limit values */
1289	OS_REG_WRITE(ah, AR_DRETRY_LIMIT(q),
1290		   SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH)
1291		 | SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG)
1292		 | SM(qi->tqi_lgretry, AR_D_RETRY_LIMIT_FR_LG)
1293		 | SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH)
1294	);
1295
1296	/* NB: always enable early termination on the QCU */
1297	qmisc = AR_Q_MISC_DCU_EARLY_TERM_REQ
1298	      | SM(AR_Q_MISC_FSP_ASAP, AR_Q_MISC_FSP);
1299
1300	/* NB: always enable DCU to wait for next fragment from QCU */
1301	dmisc = AR_D_MISC_FRAG_WAIT_EN;
1302
1303	/* Enable exponential backoff window */
1304	dmisc |= AR_D_MISC_BKOFF_PERSISTENCE;
1305
1306	/*
1307	 * The chip reset default is to use a DCU backoff threshold of 0x2.
1308	 * Restore this when programming the DCU MISC register.
1309	 */
1310	dmisc |= 0x2;
1311
1312	/* multiqueue support */
1313	if (qi->tqi_cbrPeriod) {
1314		OS_REG_WRITE(ah, AR_QCBRCFG(q),
1315			  SM(qi->tqi_cbrPeriod,AR_Q_CBRCFG_CBR_INTERVAL)
1316			| SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_CBR_OVF_THRESH));
1317		qmisc = (qmisc &~ AR_Q_MISC_FSP) | AR_Q_MISC_FSP_CBR;
1318		if (qi->tqi_cbrOverflowLimit)
1319			qmisc |= AR_Q_MISC_CBR_EXP_CNTR_LIMIT;
1320	}
1321
1322	if (qi->tqi_readyTime && (qi->tqi_type != HAL_TX_QUEUE_CAB)) {
1323		OS_REG_WRITE(ah, AR_QRDYTIMECFG(q),
1324			  SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_INT)
1325			| AR_Q_RDYTIMECFG_ENA);
1326	}
1327
1328	OS_REG_WRITE(ah, AR_DCHNTIME(q),
1329		  SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR)
1330		| (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
1331
1332	if (qi->tqi_readyTime &&
1333	    (qi->tqi_qflags & HAL_TXQ_RDYTIME_EXP_POLICY_ENABLE))
1334		qmisc |= AR_Q_MISC_RDYTIME_EXP_POLICY;
1335	if (qi->tqi_qflags & HAL_TXQ_DBA_GATED)
1336		qmisc = (qmisc &~ AR_Q_MISC_FSP) | AR_Q_MISC_FSP_DBA_GATED;
1337	if (MS(qmisc, AR_Q_MISC_FSP) != AR_Q_MISC_FSP_ASAP) {
1338		/*
1339		 * These are meangingful only when not scheduled asap.
1340		 */
1341		if (qi->tqi_qflags & HAL_TXQ_CBR_DIS_BEMPTY)
1342			qmisc |= AR_Q_MISC_CBR_INCR_DIS0;
1343		else
1344			qmisc &= ~AR_Q_MISC_CBR_INCR_DIS0;
1345		if (qi->tqi_qflags & HAL_TXQ_CBR_DIS_QEMPTY)
1346			qmisc |= AR_Q_MISC_CBR_INCR_DIS1;
1347		else
1348			qmisc &= ~AR_Q_MISC_CBR_INCR_DIS1;
1349	}
1350
1351	if (qi->tqi_qflags & HAL_TXQ_BACKOFF_DISABLE)
1352		dmisc |= AR_D_MISC_POST_FR_BKOFF_DIS;
1353	if (qi->tqi_qflags & HAL_TXQ_FRAG_BURST_BACKOFF_ENABLE)
1354		dmisc |= AR_D_MISC_FRAG_BKOFF_EN;
1355	if (qi->tqi_qflags & HAL_TXQ_ARB_LOCKOUT_GLOBAL)
1356		dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
1357			    AR_D_MISC_ARB_LOCKOUT_CNTRL);
1358	else if (qi->tqi_qflags & HAL_TXQ_ARB_LOCKOUT_INTRA)
1359		dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_INTRA_FR,
1360			    AR_D_MISC_ARB_LOCKOUT_CNTRL);
1361	if (qi->tqi_qflags & HAL_TXQ_IGNORE_VIRTCOL)
1362		dmisc |= SM(AR_D_MISC_VIR_COL_HANDLING_IGNORE,
1363			    AR_D_MISC_VIR_COL_HANDLING);
1364	if (qi->tqi_qflags & HAL_TXQ_SEQNUM_INC_DIS)
1365		dmisc |= AR_D_MISC_SEQ_NUM_INCR_DIS;
1366
1367	/*
1368	 * Fillin type-dependent bits.  Most of this can be
1369	 * removed by specifying the queue parameters in the
1370	 * driver; it's here for backwards compatibility.
1371	 */
1372	switch (qi->tqi_type) {
1373	case HAL_TX_QUEUE_BEACON:		/* beacon frames */
1374		qmisc |= AR_Q_MISC_FSP_DBA_GATED
1375		      |  AR_Q_MISC_BEACON_USE
1376		      |  AR_Q_MISC_CBR_INCR_DIS1;
1377
1378		dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
1379			    AR_D_MISC_ARB_LOCKOUT_CNTRL)
1380		      |  AR_D_MISC_BEACON_USE
1381		      |  AR_D_MISC_POST_FR_BKOFF_DIS;
1382		break;
1383	case HAL_TX_QUEUE_CAB:			/* CAB  frames */
1384		/*
1385		 * No longer Enable AR_Q_MISC_RDYTIME_EXP_POLICY,
1386		 * There is an issue with the CAB Queue
1387		 * not properly refreshing the Tx descriptor if
1388		 * the TXE clear setting is used.
1389		 */
1390		qmisc |= AR_Q_MISC_FSP_DBA_GATED
1391		      |  AR_Q_MISC_CBR_INCR_DIS1
1392		      |  AR_Q_MISC_CBR_INCR_DIS0;
1393		HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: CAB: tqi_readyTime = %d\n",
1394		    __func__, qi->tqi_readyTime);
1395		if (qi->tqi_readyTime) {
1396			HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
1397			    "%s: using tqi_readyTime\n", __func__);
1398			OS_REG_WRITE(ah, AR_QRDYTIMECFG(q),
1399			    SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_INT) |
1400			    AR_Q_RDYTIMECFG_ENA);
1401		} else {
1402			int value;
1403			/*
1404			 * NB: don't set default ready time if driver
1405			 * has explicitly specified something.  This is
1406			 * here solely for backwards compatibility.
1407			 */
1408			/*
1409			 * XXX for now, hard-code a CAB interval of 70%
1410			 * XXX of the total beacon interval.
1411			 *
1412			 * XXX This keeps Merlin and later based MACs
1413			 * XXX quite a bit happier (stops stuck beacons,
1414			 * XXX which I gather is because of such a long
1415			 * XXX cabq time.)
1416			 */
1417			value = (ahp->ah_beaconInterval * 50 / 100)
1418				- ah->ah_config.ah_additional_swba_backoff
1419				- ah->ah_config.ah_sw_beacon_response_time
1420				+ ah->ah_config.ah_dma_beacon_response_time;
1421			/*
1422			 * XXX Ensure it isn't too low - nothing lower
1423			 * XXX than 10 TU
1424			 */
1425			if (value < 10)
1426				value = 10;
1427			HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
1428			    "%s: defaulting to rdytime = %d uS\n",
1429			    __func__, value);
1430			OS_REG_WRITE(ah, AR_QRDYTIMECFG(q),
1431			    SM(TU_TO_USEC(value), AR_Q_RDYTIMECFG_INT) |
1432			    AR_Q_RDYTIMECFG_ENA);
1433		}
1434		dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
1435			    AR_D_MISC_ARB_LOCKOUT_CNTRL);
1436		break;
1437	case HAL_TX_QUEUE_PSPOLL:
1438		qmisc |= AR_Q_MISC_CBR_INCR_DIS1;
1439		break;
1440	case HAL_TX_QUEUE_UAPSD:
1441		dmisc |= AR_D_MISC_POST_FR_BKOFF_DIS;
1442		break;
1443	default:			/* NB: silence compiler */
1444		break;
1445	}
1446
1447	OS_REG_WRITE(ah, AR_QMISC(q), qmisc);
1448	OS_REG_WRITE(ah, AR_DMISC(q), dmisc);
1449
1450	/* Setup compression scratchpad buffer */
1451	/*
1452	 * XXX: calling this asynchronously to queue operation can
1453	 *      cause unexpected behavior!!!
1454	 */
1455	if (qi->tqi_physCompBuf) {
1456		HALASSERT(qi->tqi_type == HAL_TX_QUEUE_DATA ||
1457			  qi->tqi_type == HAL_TX_QUEUE_UAPSD);
1458		OS_REG_WRITE(ah, AR_Q_CBBS, (80 + 2*q));
1459		OS_REG_WRITE(ah, AR_Q_CBBA, qi->tqi_physCompBuf);
1460		OS_REG_WRITE(ah, AR_Q_CBC,  HAL_COMP_BUF_MAX_SIZE/1024);
1461		OS_REG_WRITE(ah, AR_Q0_MISC + 4*q,
1462			     OS_REG_READ(ah, AR_Q0_MISC + 4*q)
1463			     | AR_Q_MISC_QCU_COMP_EN);
1464	}
1465
1466	/*
1467	 * Always update the secondary interrupt mask registers - this
1468	 * could be a new queue getting enabled in a running system or
1469	 * hw getting re-initialized during a reset!
1470	 *
1471	 * Since we don't differentiate between tx interrupts corresponding
1472	 * to individual queues - secondary tx mask regs are always unmasked;
1473	 * tx interrupts are enabled/disabled for all queues collectively
1474	 * using the primary mask reg
1475	 */
1476	if (qi->tqi_qflags & HAL_TXQ_TXOKINT_ENABLE)
1477		ahp->ah_txOkInterruptMask |= 1 << q;
1478	else
1479		ahp->ah_txOkInterruptMask &= ~(1 << q);
1480	if (qi->tqi_qflags & HAL_TXQ_TXERRINT_ENABLE)
1481		ahp->ah_txErrInterruptMask |= 1 << q;
1482	else
1483		ahp->ah_txErrInterruptMask &= ~(1 << q);
1484	if (qi->tqi_qflags & HAL_TXQ_TXDESCINT_ENABLE)
1485		ahp->ah_txDescInterruptMask |= 1 << q;
1486	else
1487		ahp->ah_txDescInterruptMask &= ~(1 << q);
1488	if (qi->tqi_qflags & HAL_TXQ_TXEOLINT_ENABLE)
1489		ahp->ah_txEolInterruptMask |= 1 << q;
1490	else
1491		ahp->ah_txEolInterruptMask &= ~(1 << q);
1492	if (qi->tqi_qflags & HAL_TXQ_TXURNINT_ENABLE)
1493		ahp->ah_txUrnInterruptMask |= 1 << q;
1494	else
1495		ahp->ah_txUrnInterruptMask &= ~(1 << q);
1496	setTxQInterrupts(ah, qi);
1497
1498	return AH_TRUE;
1499}
1500#undef	TU_TO_USEC
1501