1/*
2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
3 * Copyright (c) 2002-2008 Atheros Communications, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 *
17 * $FreeBSD$
18 */
19#include "opt_ah.h"
20
21#include "ah.h"
22#include "ah_desc.h"
23#include "ah_internal.h"
24
25#include "ar5416/ar5416.h"
26#include "ar5416/ar5416reg.h"
27#include "ar5416/ar5416phy.h"
28#include "ar5416/ar5416desc.h"
29
30/*
31 * Stop transmit on the specified queue
32 */
33HAL_BOOL
34ar5416StopTxDma(struct ath_hal *ah, u_int q)
35{
36#define	STOP_DMA_TIMEOUT	4000	/* us */
37#define	STOP_DMA_ITER		100	/* us */
38	u_int i;
39
40	HALASSERT(q < AH_PRIVATE(ah)->ah_caps.halTotalQueues);
41
42	HALASSERT(AH5212(ah)->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE);
43
44	OS_REG_WRITE(ah, AR_Q_TXD, 1 << q);
45	for (i = STOP_DMA_TIMEOUT/STOP_DMA_ITER; i != 0; i--) {
46		if (ar5212NumTxPending(ah, q) == 0)
47			break;
48		OS_DELAY(STOP_DMA_ITER);
49	}
50#ifdef AH_DEBUG
51	if (i == 0) {
52		HALDEBUG(ah, HAL_DEBUG_ANY,
53		    "%s: queue %u DMA did not stop in 400 msec\n", __func__, q);
54		HALDEBUG(ah, HAL_DEBUG_ANY,
55		    "%s: QSTS 0x%x Q_TXE 0x%x Q_TXD 0x%x Q_CBR 0x%x\n", __func__,
56		    OS_REG_READ(ah, AR_QSTS(q)), OS_REG_READ(ah, AR_Q_TXE),
57		    OS_REG_READ(ah, AR_Q_TXD), OS_REG_READ(ah, AR_QCBRCFG(q)));
58		HALDEBUG(ah, HAL_DEBUG_ANY,
59		    "%s: Q_MISC 0x%x Q_RDYTIMECFG 0x%x Q_RDYTIMESHDN 0x%x\n",
60		    __func__, OS_REG_READ(ah, AR_QMISC(q)),
61		    OS_REG_READ(ah, AR_QRDYTIMECFG(q)),
62		    OS_REG_READ(ah, AR_Q_RDYTIMESHDN));
63	}
64#endif /* AH_DEBUG */
65
66	/* ar5416 and up can kill packets at the PCU level */
67	if (ar5212NumTxPending(ah, q)) {
68		uint32_t j;
69
70		HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
71		    "%s: Num of pending TX Frames %d on Q %d\n",
72		    __func__, ar5212NumTxPending(ah, q), q);
73
74		/* Kill last PCU Tx Frame */
75		/* TODO - save off and restore current values of Q1/Q2? */
76		for (j = 0; j < 2; j++) {
77			uint32_t tsfLow = OS_REG_READ(ah, AR_TSF_L32);
78			OS_REG_WRITE(ah, AR_QUIET2,
79			    SM(10, AR_QUIET2_QUIET_DUR));
80			OS_REG_WRITE(ah, AR_QUIET_PERIOD, 100);
81			OS_REG_WRITE(ah, AR_NEXT_QUIET, tsfLow >> 10);
82			OS_REG_SET_BIT(ah, AR_TIMER_MODE, AR_TIMER_MODE_QUIET);
83
84			if ((OS_REG_READ(ah, AR_TSF_L32)>>10) == (tsfLow>>10))
85				break;
86
87			HALDEBUG(ah, HAL_DEBUG_ANY,
88			    "%s: TSF moved while trying to set quiet time "
89			    "TSF: 0x%08x\n", __func__, tsfLow);
90			HALASSERT(j < 1); /* TSF shouldn't count twice or reg access is taking forever */
91		}
92
93		OS_REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_CHAN_IDLE);
94
95		/* Allow the quiet mechanism to do its work */
96		OS_DELAY(200);
97		OS_REG_CLR_BIT(ah, AR_TIMER_MODE, AR_TIMER_MODE_QUIET);
98
99		/* Verify the transmit q is empty */
100		for (i = STOP_DMA_TIMEOUT/STOP_DMA_ITER; i != 0; i--) {
101			if (ar5212NumTxPending(ah, q) == 0)
102				break;
103			OS_DELAY(STOP_DMA_ITER);
104		}
105		if (i == 0) {
106			HALDEBUG(ah, HAL_DEBUG_ANY,
107			    "%s: Failed to stop Tx DMA in %d msec after killing"
108			    " last frame\n", __func__, STOP_DMA_TIMEOUT / 1000);
109		}
110		OS_REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_CHAN_IDLE);
111	}
112
113	OS_REG_WRITE(ah, AR_Q_TXD, 0);
114	return (i != 0);
115#undef STOP_DMA_ITER
116#undef STOP_DMA_TIMEOUT
117}
118
119#define VALID_KEY_TYPES \
120        ((1 << HAL_KEY_TYPE_CLEAR) | (1 << HAL_KEY_TYPE_WEP)|\
121         (1 << HAL_KEY_TYPE_AES)   | (1 << HAL_KEY_TYPE_TKIP))
122#define isValidKeyType(_t)      ((1 << (_t)) & VALID_KEY_TYPES)
123
124#define set11nTries(_series, _index) \
125        (SM((_series)[_index].Tries, AR_XmitDataTries##_index))
126
127#define set11nRate(_series, _index) \
128        (SM((_series)[_index].Rate, AR_XmitRate##_index))
129
130#define set11nPktDurRTSCTS(_series, _index) \
131        (SM((_series)[_index].PktDuration, AR_PacketDur##_index) |\
132         ((_series)[_index].RateFlags & HAL_RATESERIES_RTS_CTS   ?\
133         AR_RTSCTSQual##_index : 0))
134
135#define set11nRateFlags(_series, _index) \
136        ((_series)[_index].RateFlags & HAL_RATESERIES_2040 ? AR_2040_##_index : 0) \
137        |((_series)[_index].RateFlags & HAL_RATESERIES_HALFGI ? AR_GI##_index : 0) \
138        |SM((_series)[_index].ChSel, AR_ChainSel##_index)
139
140/*
141 * Descriptor Access Functions
142 */
143
144#define VALID_PKT_TYPES \
145        ((1<<HAL_PKT_TYPE_NORMAL)|(1<<HAL_PKT_TYPE_ATIM)|\
146         (1<<HAL_PKT_TYPE_PSPOLL)|(1<<HAL_PKT_TYPE_PROBE_RESP)|\
147         (1<<HAL_PKT_TYPE_BEACON)|(1<<HAL_PKT_TYPE_AMPDU))
148#define isValidPktType(_t)      ((1<<(_t)) & VALID_PKT_TYPES)
149#define VALID_TX_RATES \
150        ((1<<0x0b)|(1<<0x0f)|(1<<0x0a)|(1<<0x0e)|(1<<0x09)|(1<<0x0d)|\
151         (1<<0x08)|(1<<0x0c)|(1<<0x1b)|(1<<0x1a)|(1<<0x1e)|(1<<0x19)|\
152	 (1<<0x1d)|(1<<0x18)|(1<<0x1c)|(1<<0x01)|(1<<0x02)|(1<<0x03)|\
153	 (1<<0x04)|(1<<0x05)|(1<<0x06)|(1<<0x07)|(1<<0x00))
154/* NB: accept HT rates */
155#define	isValidTxRate(_r)	((1<<((_r) & 0x7f)) & VALID_TX_RATES)
156
157HAL_BOOL
158ar5416SetupTxDesc(struct ath_hal *ah, struct ath_desc *ds,
159	u_int pktLen,
160	u_int hdrLen,
161	HAL_PKT_TYPE type,
162	u_int txPower,
163	u_int txRate0, u_int txTries0,
164	u_int keyIx,
165	u_int antMode,
166	u_int flags,
167	u_int rtsctsRate,
168	u_int rtsctsDuration,
169	u_int compicvLen,
170	u_int compivLen,
171	u_int comp)
172{
173#define	RTSCTS	(HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)
174	struct ar5416_desc *ads = AR5416DESC(ds);
175	struct ath_hal_5416 *ahp = AH5416(ah);
176
177	(void) hdrLen;
178
179	HALASSERT(txTries0 != 0);
180	HALASSERT(isValidPktType(type));
181	HALASSERT(isValidTxRate(txRate0));
182	HALASSERT((flags & RTSCTS) != RTSCTS);
183	/* XXX validate antMode */
184
185        txPower = (txPower + AH5212(ah)->ah_txPowerIndexOffset);
186        if (txPower > 63)
187		txPower = 63;
188
189	ads->ds_ctl0 = (pktLen & AR_FrameLen)
190		     | (txPower << AR_XmitPower_S)
191		     | (flags & HAL_TXDESC_VEOL ? AR_VEOL : 0)
192		     | (flags & HAL_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
193		     | (flags & HAL_TXDESC_INTREQ ? AR_TxIntrReq : 0)
194		     ;
195	ads->ds_ctl1 = (type << AR_FrameType_S)
196		     | (flags & HAL_TXDESC_NOACK ? AR_NoAck : 0)
197                     ;
198	ads->ds_ctl2 = SM(txTries0, AR_XmitDataTries0)
199		     | (flags & HAL_TXDESC_DURENA ? AR_DurUpdateEn : 0)
200		     ;
201	ads->ds_ctl3 = (txRate0 << AR_XmitRate0_S)
202		     ;
203	ads->ds_ctl4 = 0;
204	ads->ds_ctl5 = 0;
205	ads->ds_ctl6 = 0;
206	ads->ds_ctl7 = SM(ahp->ah_tx_chainmask, AR_ChainSel0)
207		     | SM(ahp->ah_tx_chainmask, AR_ChainSel1)
208		     | SM(ahp->ah_tx_chainmask, AR_ChainSel2)
209		     | SM(ahp->ah_tx_chainmask, AR_ChainSel3)
210		     ;
211	ads->ds_ctl8 = SM(0, AR_AntCtl0);
212	ads->ds_ctl9 = SM(0, AR_AntCtl1) | SM(txPower, AR_XmitPower1);
213	ads->ds_ctl10 = SM(0, AR_AntCtl2) | SM(txPower, AR_XmitPower2);
214	ads->ds_ctl11 = SM(0, AR_AntCtl3) | SM(txPower, AR_XmitPower3);
215
216	if (keyIx != HAL_TXKEYIX_INVALID) {
217		/* XXX validate key index */
218		ads->ds_ctl1 |= SM(keyIx, AR_DestIdx);
219		ads->ds_ctl0 |= AR_DestIdxValid;
220		ads->ds_ctl6 |= SM(ahp->ah_keytype[keyIx], AR_EncrType);
221	}
222	if (flags & RTSCTS) {
223		if (!isValidTxRate(rtsctsRate)) {
224			HALDEBUG(ah, HAL_DEBUG_ANY,
225			    "%s: invalid rts/cts rate 0x%x\n",
226			    __func__, rtsctsRate);
227			return AH_FALSE;
228		}
229		/* XXX validate rtsctsDuration */
230		ads->ds_ctl0 |= (flags & HAL_TXDESC_CTSENA ? AR_CTSEnable : 0)
231			     | (flags & HAL_TXDESC_RTSENA ? AR_RTSEnable : 0)
232			     ;
233		ads->ds_ctl7 |= (rtsctsRate << AR_RTSCTSRate_S);
234	}
235
236	/*
237	 * Set the TX antenna to 0 for Kite
238	 * To preserve existing behaviour, also set the TPC bits to 0;
239	 * when TPC is enabled these should be filled in appropriately.
240	 */
241	if (AR_SREV_KITE(ah)) {
242		ads->ds_ctl8 = SM(0, AR_AntCtl0);
243		ads->ds_ctl9 = SM(0, AR_AntCtl1) | SM(0, AR_XmitPower1);
244		ads->ds_ctl10 = SM(0, AR_AntCtl2) | SM(0, AR_XmitPower2);
245		ads->ds_ctl11 = SM(0, AR_AntCtl3) | SM(0, AR_XmitPower3);
246	}
247	return AH_TRUE;
248#undef RTSCTS
249}
250
251HAL_BOOL
252ar5416SetupXTxDesc(struct ath_hal *ah, struct ath_desc *ds,
253	u_int txRate1, u_int txTries1,
254	u_int txRate2, u_int txTries2,
255	u_int txRate3, u_int txTries3)
256{
257	struct ar5416_desc *ads = AR5416DESC(ds);
258
259	if (txTries1) {
260		HALASSERT(isValidTxRate(txRate1));
261		ads->ds_ctl2 |= SM(txTries1, AR_XmitDataTries1);
262		ads->ds_ctl3 |= (txRate1 << AR_XmitRate1_S);
263	}
264	if (txTries2) {
265		HALASSERT(isValidTxRate(txRate2));
266		ads->ds_ctl2 |= SM(txTries2, AR_XmitDataTries2);
267		ads->ds_ctl3 |= (txRate2 << AR_XmitRate2_S);
268	}
269	if (txTries3) {
270		HALASSERT(isValidTxRate(txRate3));
271		ads->ds_ctl2 |= SM(txTries3, AR_XmitDataTries3);
272		ads->ds_ctl3 |= (txRate3 << AR_XmitRate3_S);
273	}
274	return AH_TRUE;
275}
276
277HAL_BOOL
278ar5416FillTxDesc(struct ath_hal *ah, struct ath_desc *ds,
279	u_int segLen, HAL_BOOL firstSeg, HAL_BOOL lastSeg,
280	const struct ath_desc *ds0)
281{
282	struct ar5416_desc *ads = AR5416DESC(ds);
283
284	HALASSERT((segLen &~ AR_BufLen) == 0);
285
286	if (firstSeg) {
287		/*
288		 * First descriptor, don't clobber xmit control data
289		 * setup by ar5212SetupTxDesc.
290		 */
291		ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_TxMore);
292	} else if (lastSeg) {		/* !firstSeg && lastSeg */
293		/*
294		 * Last descriptor in a multi-descriptor frame,
295		 * copy the multi-rate transmit parameters from
296		 * the first frame for processing on completion.
297		 */
298		ads->ds_ctl0 = 0;
299		ads->ds_ctl1 = segLen;
300#ifdef AH_NEED_DESC_SWAP
301		ads->ds_ctl2 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl2);
302		ads->ds_ctl3 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl3);
303#else
304		ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
305		ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
306#endif
307	} else {			/* !firstSeg && !lastSeg */
308		/*
309		 * Intermediate descriptor in a multi-descriptor frame.
310		 */
311		ads->ds_ctl0 = 0;
312		ads->ds_ctl1 = segLen | AR_TxMore;
313		ads->ds_ctl2 = 0;
314		ads->ds_ctl3 = 0;
315	}
316	/* XXX only on last descriptor? */
317	OS_MEMZERO(ads->u.tx.status, sizeof(ads->u.tx.status));
318	return AH_TRUE;
319}
320
321HAL_BOOL
322ar5416ChainTxDesc(struct ath_hal *ah, struct ath_desc *ds,
323	u_int pktLen,
324	u_int hdrLen,
325	HAL_PKT_TYPE type,
326	u_int keyIx,
327	HAL_CIPHER cipher,
328	uint8_t delims,
329	u_int segLen,
330	HAL_BOOL firstSeg,
331	HAL_BOOL lastSeg)
332{
333	struct ar5416_desc *ads = AR5416DESC(ds);
334	uint32_t *ds_txstatus = AR5416_DS_TXSTATUS(ah,ads);
335	struct ath_hal_5416 *ahp = AH5416(ah);
336
337	int isaggr = 0;
338
339	(void) hdrLen;
340	(void) ah;
341
342	HALASSERT((segLen &~ AR_BufLen) == 0);
343
344	HALASSERT(isValidPktType(type));
345	if (type == HAL_PKT_TYPE_AMPDU) {
346		type = HAL_PKT_TYPE_NORMAL;
347		isaggr = 1;
348	}
349
350	if (!firstSeg) {
351		OS_MEMZERO(ds->ds_hw, AR5416_DESC_TX_CTL_SZ);
352	}
353
354	ads->ds_ctl0 = (pktLen & AR_FrameLen);
355	ads->ds_ctl1 = (type << AR_FrameType_S)
356			| (isaggr ? (AR_IsAggr | AR_MoreAggr) : 0);
357	ads->ds_ctl2 = 0;
358	ads->ds_ctl3 = 0;
359	if (keyIx != HAL_TXKEYIX_INVALID) {
360		/* XXX validate key index */
361		ads->ds_ctl1 |= SM(keyIx, AR_DestIdx);
362		ads->ds_ctl0 |= AR_DestIdxValid;
363	}
364
365	ads->ds_ctl6 = SM(ahp->ah_keytype[cipher], AR_EncrType);
366	if (isaggr) {
367		ads->ds_ctl6 |= SM(delims, AR_PadDelim);
368	}
369
370	if (firstSeg) {
371		ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_TxMore);
372	} else if (lastSeg) {           /* !firstSeg && lastSeg */
373		ads->ds_ctl0 = 0;
374		ads->ds_ctl1 |= segLen;
375	} else {                        /* !firstSeg && !lastSeg */
376		/*
377		 * Intermediate descriptor in a multi-descriptor frame.
378		 */
379		ads->ds_ctl0 = 0;
380		ads->ds_ctl1 |= segLen | AR_TxMore;
381	}
382	ds_txstatus[0] = ds_txstatus[1] = 0;
383	ds_txstatus[9] &= ~AR_TxDone;
384
385	return AH_TRUE;
386}
387
388HAL_BOOL
389ar5416SetupFirstTxDesc(struct ath_hal *ah, struct ath_desc *ds,
390	u_int aggrLen, u_int flags, u_int txPower,
391	u_int txRate0, u_int txTries0, u_int antMode,
392	u_int rtsctsRate, u_int rtsctsDuration)
393{
394#define RTSCTS  (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)
395	struct ar5416_desc *ads = AR5416DESC(ds);
396	struct ath_hal_5212 *ahp = AH5212(ah);
397
398	HALASSERT(txTries0 != 0);
399	HALASSERT(isValidTxRate(txRate0));
400	HALASSERT((flags & RTSCTS) != RTSCTS);
401	/* XXX validate antMode */
402
403	txPower = (txPower + ahp->ah_txPowerIndexOffset );
404	if(txPower > 63)  txPower=63;
405
406	ads->ds_ctl0 |= (txPower << AR_XmitPower_S)
407		| (flags & HAL_TXDESC_VEOL ? AR_VEOL : 0)
408		| (flags & HAL_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
409		| (flags & HAL_TXDESC_INTREQ ? AR_TxIntrReq : 0);
410	ads->ds_ctl1 |= (flags & HAL_TXDESC_NOACK ? AR_NoAck : 0);
411	ads->ds_ctl2 |= SM(txTries0, AR_XmitDataTries0);
412	ads->ds_ctl3 |= (txRate0 << AR_XmitRate0_S);
413	ads->ds_ctl7 = SM(AH5416(ah)->ah_tx_chainmask, AR_ChainSel0)
414		| SM(AH5416(ah)->ah_tx_chainmask, AR_ChainSel1)
415		| SM(AH5416(ah)->ah_tx_chainmask, AR_ChainSel2)
416		| SM(AH5416(ah)->ah_tx_chainmask, AR_ChainSel3);
417
418	/* NB: no V1 WAR */
419	ads->ds_ctl8 = SM(0, AR_AntCtl0);
420	ads->ds_ctl9 = SM(0, AR_AntCtl1) | SM(txPower, AR_XmitPower1);
421	ads->ds_ctl10 = SM(0, AR_AntCtl2) | SM(txPower, AR_XmitPower2);
422	ads->ds_ctl11 = SM(0, AR_AntCtl3) | SM(txPower, AR_XmitPower3);
423
424	ads->ds_ctl6 &= ~(0xffff);
425	ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
426
427	if (flags & RTSCTS) {
428		/* XXX validate rtsctsDuration */
429		ads->ds_ctl0 |= (flags & HAL_TXDESC_CTSENA ? AR_CTSEnable : 0)
430			| (flags & HAL_TXDESC_RTSENA ? AR_RTSEnable : 0);
431	}
432
433	/*
434	 * Set the TX antenna to 0 for Kite
435	 * To preserve existing behaviour, also set the TPC bits to 0;
436	 * when TPC is enabled these should be filled in appropriately.
437	 */
438	if (AR_SREV_KITE(ah)) {
439		ads->ds_ctl8 = SM(0, AR_AntCtl0);
440		ads->ds_ctl9 = SM(0, AR_AntCtl1) | SM(0, AR_XmitPower1);
441		ads->ds_ctl10 = SM(0, AR_AntCtl2) | SM(0, AR_XmitPower2);
442		ads->ds_ctl11 = SM(0, AR_AntCtl3) | SM(0, AR_XmitPower3);
443	}
444
445	return AH_TRUE;
446#undef RTSCTS
447}
448
449HAL_BOOL
450ar5416SetupLastTxDesc(struct ath_hal *ah, struct ath_desc *ds,
451		const struct ath_desc *ds0)
452{
453	struct ar5416_desc *ads = AR5416DESC(ds);
454
455	ads->ds_ctl1 &= ~AR_MoreAggr;
456	ads->ds_ctl6 &= ~AR_PadDelim;
457
458	/* hack to copy rate info to last desc for later processing */
459#ifdef AH_NEED_DESC_SWAP
460	ads->ds_ctl2 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl2);
461	ads->ds_ctl3 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl3);
462#else
463	ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
464	ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
465#endif
466
467	return AH_TRUE;
468}
469
470#ifdef AH_NEED_DESC_SWAP
471/* Swap transmit descriptor */
472static __inline void
473ar5416SwapTxDesc(struct ath_desc *ds)
474{
475	ds->ds_data = __bswap32(ds->ds_data);
476	ds->ds_ctl0 = __bswap32(ds->ds_ctl0);
477	ds->ds_ctl1 = __bswap32(ds->ds_ctl1);
478	ds->ds_hw[0] = __bswap32(ds->ds_hw[0]);
479	ds->ds_hw[1] = __bswap32(ds->ds_hw[1]);
480	ds->ds_hw[2] = __bswap32(ds->ds_hw[2]);
481	ds->ds_hw[3] = __bswap32(ds->ds_hw[3]);
482}
483#endif
484
485/*
486 * Processing of HW TX descriptor.
487 */
488HAL_STATUS
489ar5416ProcTxDesc(struct ath_hal *ah,
490	struct ath_desc *ds, struct ath_tx_status *ts)
491{
492	struct ar5416_desc *ads = AR5416DESC(ds);
493	uint32_t *ds_txstatus = AR5416_DS_TXSTATUS(ah,ads);
494
495#ifdef AH_NEED_DESC_SWAP
496	if ((ds_txstatus[9] & __bswap32(AR_TxDone)) == 0)
497		return HAL_EINPROGRESS;
498	ar5416SwapTxDesc(ds);
499#else
500	if ((ds_txstatus[9] & AR_TxDone) == 0)
501		return HAL_EINPROGRESS;
502#endif
503
504	/* Update software copies of the HW status */
505	ts->ts_seqnum = MS(ds_txstatus[9], AR_SeqNum);
506	ts->ts_tstamp = AR_SendTimestamp(ds_txstatus);
507
508	ts->ts_status = 0;
509	if (ds_txstatus[1] & AR_ExcessiveRetries)
510		ts->ts_status |= HAL_TXERR_XRETRY;
511	if (ds_txstatus[1] & AR_Filtered)
512		ts->ts_status |= HAL_TXERR_FILT;
513	if (ds_txstatus[1] & AR_FIFOUnderrun)
514		ts->ts_status |= HAL_TXERR_FIFO;
515	if (ds_txstatus[9] & AR_TxOpExceeded)
516		ts->ts_status |= HAL_TXERR_XTXOP;
517	if (ds_txstatus[1] & AR_TxTimerExpired)
518		ts->ts_status |= HAL_TXERR_TIMER_EXPIRED;
519
520	ts->ts_flags  = 0;
521	if (ds_txstatus[0] & AR_TxBaStatus) {
522		ts->ts_flags |= HAL_TX_BA;
523		ts->ts_ba_low = AR_BaBitmapLow(ds_txstatus);
524		ts->ts_ba_high = AR_BaBitmapHigh(ds_txstatus);
525	}
526	if (ds->ds_ctl1 & AR_IsAggr)
527		ts->ts_flags |= HAL_TX_AGGR;
528	if (ds_txstatus[1] & AR_DescCfgErr)
529		ts->ts_flags |= HAL_TX_DESC_CFG_ERR;
530	if (ds_txstatus[1] & AR_TxDataUnderrun)
531		ts->ts_flags |= HAL_TX_DATA_UNDERRUN;
532	if (ds_txstatus[1] & AR_TxDelimUnderrun)
533		ts->ts_flags |= HAL_TX_DELIM_UNDERRUN;
534
535	/*
536	 * Extract the transmit rate used and mark the rate as
537	 * ``alternate'' if it wasn't the series 0 rate.
538	 */
539	ts->ts_finaltsi =  MS(ds_txstatus[9], AR_FinalTxIdx);
540	switch (ts->ts_finaltsi) {
541	case 0:
542		ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate0);
543		break;
544	case 1:
545		ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate1);
546		break;
547	case 2:
548		ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate2);
549		break;
550	case 3:
551		ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate3);
552		break;
553	}
554
555	ts->ts_rssi = MS(ds_txstatus[5], AR_TxRSSICombined);
556	ts->ts_rssi_ctl[0] = MS(ds_txstatus[0], AR_TxRSSIAnt00);
557	ts->ts_rssi_ctl[1] = MS(ds_txstatus[0], AR_TxRSSIAnt01);
558	ts->ts_rssi_ctl[2] = MS(ds_txstatus[0], AR_TxRSSIAnt02);
559	ts->ts_rssi_ext[0] = MS(ds_txstatus[5], AR_TxRSSIAnt10);
560	ts->ts_rssi_ext[1] = MS(ds_txstatus[5], AR_TxRSSIAnt11);
561	ts->ts_rssi_ext[2] = MS(ds_txstatus[5], AR_TxRSSIAnt12);
562	ts->ts_evm0 = AR_TxEVM0(ds_txstatus);
563	ts->ts_evm1 = AR_TxEVM1(ds_txstatus);
564	ts->ts_evm2 = AR_TxEVM2(ds_txstatus);
565
566	ts->ts_shortretry = MS(ds_txstatus[1], AR_RTSFailCnt);
567	ts->ts_longretry = MS(ds_txstatus[1], AR_DataFailCnt);
568	/*
569	 * The retry count has the number of un-acked tries for the
570	 * final series used.  When doing multi-rate retry we must
571	 * fixup the retry count by adding in the try counts for
572	 * each series that was fully-processed.  Beware that this
573	 * takes values from the try counts in the final descriptor.
574	 * These are not required by the hardware.  We assume they
575	 * are placed there by the driver as otherwise we have no
576	 * access and the driver can't do the calculation because it
577	 * doesn't know the descriptor format.
578	 */
579	switch (ts->ts_finaltsi) {
580	case 3: ts->ts_longretry += MS(ads->ds_ctl2, AR_XmitDataTries2);
581	case 2: ts->ts_longretry += MS(ads->ds_ctl2, AR_XmitDataTries1);
582	case 1: ts->ts_longretry += MS(ads->ds_ctl2, AR_XmitDataTries0);
583	}
584
585	/*
586	 * These fields are not used. Zero these to preserve compatability
587	 * with existing drivers.
588	 */
589	ts->ts_virtcol = MS(ads->ds_ctl1, AR_VirtRetryCnt);
590	ts->ts_antenna = 0; /* We don't switch antennas on Owl*/
591
592	/* handle tx trigger level changes internally */
593	if ((ts->ts_status & HAL_TXERR_FIFO) ||
594	    (ts->ts_flags & (HAL_TX_DATA_UNDERRUN | HAL_TX_DELIM_UNDERRUN)))
595		ar5212UpdateTxTrigLevel(ah, AH_TRUE);
596
597	return HAL_OK;
598}
599
600HAL_BOOL
601ar5416SetGlobalTxTimeout(struct ath_hal *ah, u_int tu)
602{
603	struct ath_hal_5416 *ahp = AH5416(ah);
604
605	if (tu > 0xFFFF) {
606		HALDEBUG(ah, HAL_DEBUG_ANY, "%s: bad global tx timeout %u\n",
607		    __func__, tu);
608		/* restore default handling */
609		ahp->ah_globaltxtimeout = (u_int) -1;
610		return AH_FALSE;
611	}
612	OS_REG_RMW_FIELD(ah, AR_GTXTO, AR_GTXTO_TIMEOUT_LIMIT, tu);
613	ahp->ah_globaltxtimeout = tu;
614	return AH_TRUE;
615}
616
617u_int
618ar5416GetGlobalTxTimeout(struct ath_hal *ah)
619{
620	return MS(OS_REG_READ(ah, AR_GTXTO), AR_GTXTO_TIMEOUT_LIMIT);
621}
622
623void
624ar5416Set11nRateScenario(struct ath_hal *ah, struct ath_desc *ds,
625        u_int durUpdateEn, u_int rtsctsRate,
626	HAL_11N_RATE_SERIES series[], u_int nseries, u_int flags)
627{
628	struct ar5416_desc *ads = AR5416DESC(ds);
629	uint32_t ds_ctl0;
630
631	HALASSERT(nseries == 4);
632	(void)nseries;
633
634	/*
635	 * XXX since the upper layers doesn't know the current chainmask
636	 * XXX setup, just override its decisions here.
637	 * XXX The upper layers need to be taught this!
638	 */
639	if (series[0].Tries != 0)
640		series[0].ChSel = AH5416(ah)->ah_tx_chainmask;
641	if (series[1].Tries != 0)
642		series[1].ChSel = AH5416(ah)->ah_tx_chainmask;
643	if (series[2].Tries != 0)
644		series[2].ChSel = AH5416(ah)->ah_tx_chainmask;
645	if (series[3].Tries != 0)
646		series[3].ChSel = AH5416(ah)->ah_tx_chainmask;
647
648	/*
649	 * Only one of RTS and CTS enable must be set.
650	 * If a frame has both set, just do RTS protection -
651	 * that's enough to satisfy legacy protection.
652	 */
653	if (flags & (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) {
654		ds_ctl0 = ads->ds_ctl0;
655
656		if (flags & HAL_TXDESC_RTSENA) {
657			ds_ctl0 &= ~AR_CTSEnable;
658			ds_ctl0 |= AR_RTSEnable;
659		} else {
660			ds_ctl0 &= ~AR_RTSEnable;
661			ds_ctl0 |= AR_CTSEnable;
662		}
663
664		ads->ds_ctl0 = ds_ctl0;
665	} else {
666		ads->ds_ctl0 =
667		    (ads->ds_ctl0 & ~(AR_RTSEnable | AR_CTSEnable));
668	}
669
670	ads->ds_ctl2 = set11nTries(series, 0)
671		     | set11nTries(series, 1)
672		     | set11nTries(series, 2)
673		     | set11nTries(series, 3)
674		     | (durUpdateEn ? AR_DurUpdateEn : 0);
675
676	ads->ds_ctl3 = set11nRate(series, 0)
677		     | set11nRate(series, 1)
678		     | set11nRate(series, 2)
679		     | set11nRate(series, 3);
680
681	ads->ds_ctl4 = set11nPktDurRTSCTS(series, 0)
682		     | set11nPktDurRTSCTS(series, 1);
683
684	ads->ds_ctl5 = set11nPktDurRTSCTS(series, 2)
685		     | set11nPktDurRTSCTS(series, 3);
686
687	ads->ds_ctl7 = set11nRateFlags(series, 0)
688		     | set11nRateFlags(series, 1)
689		     | set11nRateFlags(series, 2)
690		     | set11nRateFlags(series, 3)
691		     | SM(rtsctsRate, AR_RTSCTSRate);
692}
693
694void
695ar5416Set11nAggrMiddle(struct ath_hal *ah, struct ath_desc *ds, u_int numDelims)
696{
697	struct ar5416_desc *ads = AR5416DESC(ds);
698	uint32_t *ds_txstatus = AR5416_DS_TXSTATUS(ah,ads);
699
700	ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
701
702	ads->ds_ctl6 &= ~AR_PadDelim;
703	ads->ds_ctl6 |= SM(numDelims, AR_PadDelim);
704	ads->ds_ctl6 &= ~AR_AggrLen;
705
706	/*
707	 * Clear the TxDone status here, may need to change
708	 * func name to reflect this
709	 */
710	ds_txstatus[9] &= ~AR_TxDone;
711}
712
713void
714ar5416Clr11nAggr(struct ath_hal *ah, struct ath_desc *ds)
715{
716	struct ar5416_desc *ads = AR5416DESC(ds);
717
718	ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr);
719	ads->ds_ctl6 &= ~AR_PadDelim;
720	ads->ds_ctl6 &= ~AR_AggrLen;
721}
722
723void
724ar5416Set11nBurstDuration(struct ath_hal *ah, struct ath_desc *ds,
725                                                  u_int burstDuration)
726{
727	struct ar5416_desc *ads = AR5416DESC(ds);
728
729	ads->ds_ctl2 &= ~AR_BurstDur;
730	ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur);
731}
732
733/*
734 * Retrieve the rate table from the given TX completion descriptor
735 */
736HAL_BOOL
737ar5416GetTxCompletionRates(struct ath_hal *ah, const struct ath_desc *ds0, int *rates, int *tries)
738{
739	const struct ar5416_desc *ads = AR5416DESC_CONST(ds0);
740
741	rates[0] = MS(ads->ds_ctl3, AR_XmitRate0);
742	rates[1] = MS(ads->ds_ctl3, AR_XmitRate1);
743	rates[2] = MS(ads->ds_ctl3, AR_XmitRate2);
744	rates[3] = MS(ads->ds_ctl3, AR_XmitRate3);
745
746	tries[0] = MS(ads->ds_ctl2, AR_XmitDataTries0);
747	tries[1] = MS(ads->ds_ctl2, AR_XmitDataTries1);
748	tries[2] = MS(ads->ds_ctl2, AR_XmitDataTries2);
749	tries[3] = MS(ads->ds_ctl2, AR_XmitDataTries3);
750
751	return AH_TRUE;
752}
753
754
755/*
756 * TX queue management routines - AR5416 and later chipsets
757 */
758
759/*
760 * Allocate and initialize a tx DCU/QCU combination.
761 */
762int
763ar5416SetupTxQueue(struct ath_hal *ah, HAL_TX_QUEUE type,
764	const HAL_TXQ_INFO *qInfo)
765{
766	struct ath_hal_5212 *ahp = AH5212(ah);
767	HAL_TX_QUEUE_INFO *qi;
768	HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps;
769	int q, defqflags;
770
771	/* by default enable OK+ERR+DESC+URN interrupts */
772	defqflags = HAL_TXQ_TXOKINT_ENABLE
773		  | HAL_TXQ_TXERRINT_ENABLE
774		  | HAL_TXQ_TXDESCINT_ENABLE
775		  | HAL_TXQ_TXURNINT_ENABLE;
776	/* XXX move queue assignment to driver */
777	switch (type) {
778	case HAL_TX_QUEUE_BEACON:
779		q = pCap->halTotalQueues-1;	/* highest priority */
780		defqflags |= HAL_TXQ_DBA_GATED
781		       | HAL_TXQ_CBR_DIS_QEMPTY
782		       | HAL_TXQ_ARB_LOCKOUT_GLOBAL
783		       | HAL_TXQ_BACKOFF_DISABLE;
784		break;
785	case HAL_TX_QUEUE_CAB:
786		q = pCap->halTotalQueues-2;	/* next highest priority */
787		defqflags |= HAL_TXQ_DBA_GATED
788		       | HAL_TXQ_CBR_DIS_QEMPTY
789		       | HAL_TXQ_CBR_DIS_BEMPTY
790		       | HAL_TXQ_ARB_LOCKOUT_GLOBAL
791		       | HAL_TXQ_BACKOFF_DISABLE;
792		break;
793	case HAL_TX_QUEUE_PSPOLL:
794		q = 1;				/* lowest priority */
795		defqflags |= HAL_TXQ_DBA_GATED
796		       | HAL_TXQ_CBR_DIS_QEMPTY
797		       | HAL_TXQ_CBR_DIS_BEMPTY
798		       | HAL_TXQ_ARB_LOCKOUT_GLOBAL
799		       | HAL_TXQ_BACKOFF_DISABLE;
800		break;
801	case HAL_TX_QUEUE_UAPSD:
802		q = pCap->halTotalQueues-3;	/* nextest highest priority */
803		if (ahp->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE) {
804			HALDEBUG(ah, HAL_DEBUG_ANY,
805			    "%s: no available UAPSD tx queue\n", __func__);
806			return -1;
807		}
808		break;
809	case HAL_TX_QUEUE_DATA:
810		for (q = 0; q < pCap->halTotalQueues; q++)
811			if (ahp->ah_txq[q].tqi_type == HAL_TX_QUEUE_INACTIVE)
812				break;
813		if (q == pCap->halTotalQueues) {
814			HALDEBUG(ah, HAL_DEBUG_ANY,
815			    "%s: no available tx queue\n", __func__);
816			return -1;
817		}
818		break;
819	default:
820		HALDEBUG(ah, HAL_DEBUG_ANY,
821		    "%s: bad tx queue type %u\n", __func__, type);
822		return -1;
823	}
824
825	HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: queue %u\n", __func__, q);
826
827	qi = &ahp->ah_txq[q];
828	if (qi->tqi_type != HAL_TX_QUEUE_INACTIVE) {
829		HALDEBUG(ah, HAL_DEBUG_ANY, "%s: tx queue %u already active\n",
830		    __func__, q);
831		return -1;
832	}
833	OS_MEMZERO(qi, sizeof(HAL_TX_QUEUE_INFO));
834	qi->tqi_type = type;
835	if (qInfo == AH_NULL) {
836		qi->tqi_qflags = defqflags;
837		qi->tqi_aifs = INIT_AIFS;
838		qi->tqi_cwmin = HAL_TXQ_USEDEFAULT;	/* NB: do at reset */
839		qi->tqi_cwmax = INIT_CWMAX;
840		qi->tqi_shretry = INIT_SH_RETRY;
841		qi->tqi_lgretry = INIT_LG_RETRY;
842		qi->tqi_physCompBuf = 0;
843	} else {
844		qi->tqi_physCompBuf = qInfo->tqi_compBuf;
845		(void) ar5212SetTxQueueProps(ah, q, qInfo);
846	}
847	/* NB: must be followed by ar5212ResetTxQueue */
848	return q;
849}
850
851/*
852 * Update the h/w interrupt registers to reflect a tx q's configuration.
853 */
854static void
855setTxQInterrupts(struct ath_hal *ah, HAL_TX_QUEUE_INFO *qi)
856{
857	struct ath_hal_5212 *ahp = AH5212(ah);
858
859	HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
860	    "%s: tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n", __func__,
861	    ahp->ah_txOkInterruptMask, ahp->ah_txErrInterruptMask,
862	    ahp->ah_txDescInterruptMask, ahp->ah_txEolInterruptMask,
863	    ahp->ah_txUrnInterruptMask);
864
865	OS_REG_WRITE(ah, AR_IMR_S0,
866		  SM(ahp->ah_txOkInterruptMask, AR_IMR_S0_QCU_TXOK)
867		| SM(ahp->ah_txDescInterruptMask, AR_IMR_S0_QCU_TXDESC)
868	);
869	OS_REG_WRITE(ah, AR_IMR_S1,
870		  SM(ahp->ah_txErrInterruptMask, AR_IMR_S1_QCU_TXERR)
871		| SM(ahp->ah_txEolInterruptMask, AR_IMR_S1_QCU_TXEOL)
872	);
873	OS_REG_RMW_FIELD(ah, AR_IMR_S2,
874		AR_IMR_S2_QCU_TXURN, ahp->ah_txUrnInterruptMask);
875}
876
877/*
878 * Set the retry, aifs, cwmin/max, readyTime regs for specified queue
879 * Assumes:
880 *  phwChannel has been set to point to the current channel
881 */
882HAL_BOOL
883ar5416ResetTxQueue(struct ath_hal *ah, u_int q)
884{
885	struct ath_hal_5212 *ahp = AH5212(ah);
886	HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps;
887	const struct ieee80211_channel *chan = AH_PRIVATE(ah)->ah_curchan;
888	HAL_TX_QUEUE_INFO *qi;
889	uint32_t cwMin, chanCwMin, value, qmisc, dmisc;
890
891	if (q >= pCap->halTotalQueues) {
892		HALDEBUG(ah, HAL_DEBUG_ANY, "%s: invalid queue num %u\n",
893		    __func__, q);
894		return AH_FALSE;
895	}
896	qi = &ahp->ah_txq[q];
897	if (qi->tqi_type == HAL_TX_QUEUE_INACTIVE) {
898		HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: inactive queue %u\n",
899		    __func__, q);
900		return AH_TRUE;		/* XXX??? */
901	}
902
903	HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: reset queue %u\n", __func__, q);
904
905	if (qi->tqi_cwmin == HAL_TXQ_USEDEFAULT) {
906		/*
907		 * Select cwmin according to channel type.
908		 * NB: chan can be NULL during attach
909		 */
910		if (chan && IEEE80211_IS_CHAN_B(chan))
911			chanCwMin = INIT_CWMIN_11B;
912		else
913			chanCwMin = INIT_CWMIN;
914		/* make sure that the CWmin is of the form (2^n - 1) */
915		for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1)
916			;
917	} else
918		cwMin = qi->tqi_cwmin;
919
920	/* set cwMin/Max and AIFS values */
921	OS_REG_WRITE(ah, AR_DLCL_IFS(q),
922		  SM(cwMin, AR_D_LCL_IFS_CWMIN)
923		| SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX)
924		| SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
925
926	/* Set retry limit values */
927	OS_REG_WRITE(ah, AR_DRETRY_LIMIT(q),
928		   SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH)
929		 | SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG)
930		 | SM(qi->tqi_lgretry, AR_D_RETRY_LIMIT_FR_LG)
931		 | SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH)
932	);
933
934	/* NB: always enable early termination on the QCU */
935	qmisc = AR_Q_MISC_DCU_EARLY_TERM_REQ
936	      | SM(AR_Q_MISC_FSP_ASAP, AR_Q_MISC_FSP);
937
938	/* NB: always enable DCU to wait for next fragment from QCU */
939	dmisc = AR_D_MISC_FRAG_WAIT_EN;
940
941	/* Enable exponential backoff window */
942	dmisc |= AR_D_MISC_BKOFF_PERSISTENCE;
943
944	/*
945	 * The chip reset default is to use a DCU backoff threshold of 0x2.
946	 * Restore this when programming the DCU MISC register.
947	 */
948	dmisc |= 0x2;
949
950	/* multiqueue support */
951	if (qi->tqi_cbrPeriod) {
952		OS_REG_WRITE(ah, AR_QCBRCFG(q),
953			  SM(qi->tqi_cbrPeriod,AR_Q_CBRCFG_CBR_INTERVAL)
954			| SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_CBR_OVF_THRESH));
955		qmisc = (qmisc &~ AR_Q_MISC_FSP) | AR_Q_MISC_FSP_CBR;
956		if (qi->tqi_cbrOverflowLimit)
957			qmisc |= AR_Q_MISC_CBR_EXP_CNTR_LIMIT;
958	}
959	if (qi->tqi_readyTime) {
960		OS_REG_WRITE(ah, AR_QRDYTIMECFG(q),
961			  SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_INT)
962			| AR_Q_RDYTIMECFG_ENA);
963	}
964
965	OS_REG_WRITE(ah, AR_DCHNTIME(q),
966		  SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR)
967		| (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
968
969	if (qi->tqi_readyTime &&
970	    (qi->tqi_qflags & HAL_TXQ_RDYTIME_EXP_POLICY_ENABLE))
971		qmisc |= AR_Q_MISC_RDYTIME_EXP_POLICY;
972	if (qi->tqi_qflags & HAL_TXQ_DBA_GATED)
973		qmisc = (qmisc &~ AR_Q_MISC_FSP) | AR_Q_MISC_FSP_DBA_GATED;
974	if (MS(qmisc, AR_Q_MISC_FSP) != AR_Q_MISC_FSP_ASAP) {
975		/*
976		 * These are meangingful only when not scheduled asap.
977		 */
978		if (qi->tqi_qflags & HAL_TXQ_CBR_DIS_BEMPTY)
979			qmisc |= AR_Q_MISC_CBR_INCR_DIS0;
980		else
981			qmisc &= ~AR_Q_MISC_CBR_INCR_DIS0;
982		if (qi->tqi_qflags & HAL_TXQ_CBR_DIS_QEMPTY)
983			qmisc |= AR_Q_MISC_CBR_INCR_DIS1;
984		else
985			qmisc &= ~AR_Q_MISC_CBR_INCR_DIS1;
986	}
987
988	if (qi->tqi_qflags & HAL_TXQ_BACKOFF_DISABLE)
989		dmisc |= AR_D_MISC_POST_FR_BKOFF_DIS;
990	if (qi->tqi_qflags & HAL_TXQ_FRAG_BURST_BACKOFF_ENABLE)
991		dmisc |= AR_D_MISC_FRAG_BKOFF_EN;
992	if (qi->tqi_qflags & HAL_TXQ_ARB_LOCKOUT_GLOBAL)
993		dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
994			    AR_D_MISC_ARB_LOCKOUT_CNTRL);
995	else if (qi->tqi_qflags & HAL_TXQ_ARB_LOCKOUT_INTRA)
996		dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_INTRA_FR,
997			    AR_D_MISC_ARB_LOCKOUT_CNTRL);
998	if (qi->tqi_qflags & HAL_TXQ_IGNORE_VIRTCOL)
999		dmisc |= SM(AR_D_MISC_VIR_COL_HANDLING_IGNORE,
1000			    AR_D_MISC_VIR_COL_HANDLING);
1001	if (qi->tqi_qflags & HAL_TXQ_SEQNUM_INC_DIS)
1002		dmisc |= AR_D_MISC_SEQ_NUM_INCR_DIS;
1003
1004	/*
1005	 * Fillin type-dependent bits.  Most of this can be
1006	 * removed by specifying the queue parameters in the
1007	 * driver; it's here for backwards compatibility.
1008	 */
1009	switch (qi->tqi_type) {
1010	case HAL_TX_QUEUE_BEACON:		/* beacon frames */
1011		qmisc |= AR_Q_MISC_FSP_DBA_GATED
1012		      |  AR_Q_MISC_BEACON_USE
1013		      |  AR_Q_MISC_CBR_INCR_DIS1;
1014
1015		dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
1016			    AR_D_MISC_ARB_LOCKOUT_CNTRL)
1017		      |  AR_D_MISC_BEACON_USE
1018		      |  AR_D_MISC_POST_FR_BKOFF_DIS;
1019		break;
1020	case HAL_TX_QUEUE_CAB:			/* CAB  frames */
1021		/*
1022		 * No longer Enable AR_Q_MISC_RDYTIME_EXP_POLICY,
1023		 * There is an issue with the CAB Queue
1024		 * not properly refreshing the Tx descriptor if
1025		 * the TXE clear setting is used.
1026		 */
1027		qmisc |= AR_Q_MISC_FSP_DBA_GATED
1028		      |  AR_Q_MISC_CBR_INCR_DIS1
1029		      |  AR_Q_MISC_CBR_INCR_DIS0;
1030
1031		if (!qi->tqi_readyTime) {
1032			/*
1033			 * NB: don't set default ready time if driver
1034			 * has explicitly specified something.  This is
1035			 * here solely for backwards compatibility.
1036			 */
1037			value = (ahp->ah_beaconInterval
1038				- (ah->ah_config.ah_sw_beacon_response_time -
1039					ah->ah_config.ah_dma_beacon_response_time)
1040				- ah->ah_config.ah_additional_swba_backoff) * 1024;
1041			OS_REG_WRITE(ah, AR_QRDYTIMECFG(q), value | AR_Q_RDYTIMECFG_ENA);
1042		}
1043		dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
1044			    AR_D_MISC_ARB_LOCKOUT_CNTRL);
1045		break;
1046	case HAL_TX_QUEUE_PSPOLL:
1047		qmisc |= AR_Q_MISC_CBR_INCR_DIS1;
1048		break;
1049	case HAL_TX_QUEUE_UAPSD:
1050		dmisc |= AR_D_MISC_POST_FR_BKOFF_DIS;
1051		break;
1052	default:			/* NB: silence compiler */
1053		break;
1054	}
1055
1056	OS_REG_WRITE(ah, AR_QMISC(q), qmisc);
1057	OS_REG_WRITE(ah, AR_DMISC(q), dmisc);
1058
1059	/* Setup compression scratchpad buffer */
1060	/*
1061	 * XXX: calling this asynchronously to queue operation can
1062	 *      cause unexpected behavior!!!
1063	 */
1064	if (qi->tqi_physCompBuf) {
1065		HALASSERT(qi->tqi_type == HAL_TX_QUEUE_DATA ||
1066			  qi->tqi_type == HAL_TX_QUEUE_UAPSD);
1067		OS_REG_WRITE(ah, AR_Q_CBBS, (80 + 2*q));
1068		OS_REG_WRITE(ah, AR_Q_CBBA, qi->tqi_physCompBuf);
1069		OS_REG_WRITE(ah, AR_Q_CBC,  HAL_COMP_BUF_MAX_SIZE/1024);
1070		OS_REG_WRITE(ah, AR_Q0_MISC + 4*q,
1071			     OS_REG_READ(ah, AR_Q0_MISC + 4*q)
1072			     | AR_Q_MISC_QCU_COMP_EN);
1073	}
1074
1075	/*
1076	 * Always update the secondary interrupt mask registers - this
1077	 * could be a new queue getting enabled in a running system or
1078	 * hw getting re-initialized during a reset!
1079	 *
1080	 * Since we don't differentiate between tx interrupts corresponding
1081	 * to individual queues - secondary tx mask regs are always unmasked;
1082	 * tx interrupts are enabled/disabled for all queues collectively
1083	 * using the primary mask reg
1084	 */
1085	if (qi->tqi_qflags & HAL_TXQ_TXOKINT_ENABLE)
1086		ahp->ah_txOkInterruptMask |= 1 << q;
1087	else
1088		ahp->ah_txOkInterruptMask &= ~(1 << q);
1089	if (qi->tqi_qflags & HAL_TXQ_TXERRINT_ENABLE)
1090		ahp->ah_txErrInterruptMask |= 1 << q;
1091	else
1092		ahp->ah_txErrInterruptMask &= ~(1 << q);
1093	if (qi->tqi_qflags & HAL_TXQ_TXDESCINT_ENABLE)
1094		ahp->ah_txDescInterruptMask |= 1 << q;
1095	else
1096		ahp->ah_txDescInterruptMask &= ~(1 << q);
1097	if (qi->tqi_qflags & HAL_TXQ_TXEOLINT_ENABLE)
1098		ahp->ah_txEolInterruptMask |= 1 << q;
1099	else
1100		ahp->ah_txEolInterruptMask &= ~(1 << q);
1101	if (qi->tqi_qflags & HAL_TXQ_TXURNINT_ENABLE)
1102		ahp->ah_txUrnInterruptMask |= 1 << q;
1103	else
1104		ahp->ah_txUrnInterruptMask &= ~(1 << q);
1105	setTxQInterrupts(ah, qi);
1106
1107	return AH_TRUE;
1108}
1109