1/*-
2 * SPDX-License-Identifier: ISC
3 *
4 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
5 * Copyright (c) 2002-2008 Atheros Communications, Inc.
6 *
7 * Permission to use, copy, modify, and/or distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19#include "opt_ah.h"
20
21#include "ah.h"
22#include "ah_internal.h"
23#include "ah_desc.h"
24
25#include "ar5212/ar5212.h"
26#include "ar5212/ar5212reg.h"
27#include "ar5212/ar5212desc.h"
28#include "ar5212/ar5212phy.h"
29#ifdef AH_SUPPORT_5311
30#include "ar5212/ar5311reg.h"
31#endif
32
33#ifdef AH_NEED_DESC_SWAP
34static void ar5212SwapTxDesc(struct ath_desc *ds);
35#endif
36
37/*
38 * Update Tx FIFO trigger level.
39 *
40 * Set bIncTrigLevel to TRUE to increase the trigger level.
41 * Set bIncTrigLevel to FALSE to decrease the trigger level.
42 *
43 * Returns TRUE if the trigger level was updated
44 */
45HAL_BOOL
46ar5212UpdateTxTrigLevel(struct ath_hal *ah, HAL_BOOL bIncTrigLevel)
47{
48	struct ath_hal_5212 *ahp = AH5212(ah);
49	uint32_t txcfg, curLevel, newLevel;
50	HAL_INT omask;
51
52	if (ahp->ah_txTrigLev >= ahp->ah_maxTxTrigLev)
53		return AH_FALSE;
54
55	/*
56	 * Disable interrupts while futzing with the fifo level.
57	 */
58	omask = ath_hal_setInterrupts(ah, ahp->ah_maskReg &~ HAL_INT_GLOBAL);
59
60	txcfg = OS_REG_READ(ah, AR_TXCFG);
61	curLevel = MS(txcfg, AR_FTRIG);
62	newLevel = curLevel;
63	if (bIncTrigLevel) {		/* increase the trigger level */
64		if (curLevel < ahp->ah_maxTxTrigLev)
65			newLevel++;
66	} else if (curLevel > MIN_TX_FIFO_THRESHOLD)
67		newLevel--;
68	if (newLevel != curLevel)
69		/* Update the trigger level */
70		OS_REG_WRITE(ah, AR_TXCFG,
71			(txcfg &~ AR_FTRIG) | SM(newLevel, AR_FTRIG));
72
73	ahp->ah_txTrigLev = newLevel;
74
75	/* re-enable chip interrupts */
76	ath_hal_setInterrupts(ah, omask);
77
78	return (newLevel != curLevel);
79}
80
81/*
82 * Set the properties of the tx queue with the parameters
83 * from qInfo.
84 */
85HAL_BOOL
86ar5212SetTxQueueProps(struct ath_hal *ah, int q, const HAL_TXQ_INFO *qInfo)
87{
88	struct ath_hal_5212 *ahp = AH5212(ah);
89	HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps;
90
91	if (q >= pCap->halTotalQueues) {
92		HALDEBUG(ah, HAL_DEBUG_ANY, "%s: invalid queue num %u\n",
93		    __func__, q);
94		return AH_FALSE;
95	}
96	return ath_hal_setTxQProps(ah, &ahp->ah_txq[q], qInfo);
97}
98
99/*
100 * Return the properties for the specified tx queue.
101 */
102HAL_BOOL
103ar5212GetTxQueueProps(struct ath_hal *ah, int q, HAL_TXQ_INFO *qInfo)
104{
105	struct ath_hal_5212 *ahp = AH5212(ah);
106	HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps;
107
108	if (q >= pCap->halTotalQueues) {
109		HALDEBUG(ah, HAL_DEBUG_ANY, "%s: invalid queue num %u\n",
110		    __func__, q);
111		return AH_FALSE;
112	}
113	return ath_hal_getTxQProps(ah, qInfo, &ahp->ah_txq[q]);
114}
115
116/*
117 * Allocate and initialize a tx DCU/QCU combination.
118 */
119int
120ar5212SetupTxQueue(struct ath_hal *ah, HAL_TX_QUEUE type,
121	const HAL_TXQ_INFO *qInfo)
122{
123	struct ath_hal_5212 *ahp = AH5212(ah);
124	HAL_TX_QUEUE_INFO *qi;
125	HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps;
126	int q, defqflags;
127
128	/* by default enable OK+ERR+DESC+URN interrupts */
129	defqflags = HAL_TXQ_TXOKINT_ENABLE
130		  | HAL_TXQ_TXERRINT_ENABLE
131		  | HAL_TXQ_TXDESCINT_ENABLE
132		  | HAL_TXQ_TXURNINT_ENABLE;
133	/* XXX move queue assignment to driver */
134	switch (type) {
135	case HAL_TX_QUEUE_BEACON:
136		q = pCap->halTotalQueues-1;	/* highest priority */
137		defqflags |= HAL_TXQ_DBA_GATED
138		       | HAL_TXQ_CBR_DIS_QEMPTY
139		       | HAL_TXQ_ARB_LOCKOUT_GLOBAL
140		       | HAL_TXQ_BACKOFF_DISABLE;
141		break;
142	case HAL_TX_QUEUE_CAB:
143		q = pCap->halTotalQueues-2;	/* next highest priority */
144		defqflags |= HAL_TXQ_DBA_GATED
145		       | HAL_TXQ_CBR_DIS_QEMPTY
146		       | HAL_TXQ_CBR_DIS_BEMPTY
147		       | HAL_TXQ_ARB_LOCKOUT_GLOBAL
148		       | HAL_TXQ_BACKOFF_DISABLE;
149		break;
150	case HAL_TX_QUEUE_UAPSD:
151		q = pCap->halTotalQueues-3;	/* nextest highest priority */
152		if (ahp->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE) {
153			HALDEBUG(ah, HAL_DEBUG_ANY,
154			    "%s: no available UAPSD tx queue\n", __func__);
155			return -1;
156		}
157		break;
158	case HAL_TX_QUEUE_DATA:
159		for (q = 0; q < pCap->halTotalQueues; q++)
160			if (ahp->ah_txq[q].tqi_type == HAL_TX_QUEUE_INACTIVE)
161				break;
162		if (q == pCap->halTotalQueues) {
163			HALDEBUG(ah, HAL_DEBUG_ANY,
164			    "%s: no available tx queue\n", __func__);
165			return -1;
166		}
167		break;
168	default:
169		HALDEBUG(ah, HAL_DEBUG_ANY,
170		    "%s: bad tx queue type %u\n", __func__, type);
171		return -1;
172	}
173
174	HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: queue %u\n", __func__, q);
175
176	qi = &ahp->ah_txq[q];
177	if (qi->tqi_type != HAL_TX_QUEUE_INACTIVE) {
178		HALDEBUG(ah, HAL_DEBUG_ANY, "%s: tx queue %u already active\n",
179		    __func__, q);
180		return -1;
181	}
182	OS_MEMZERO(qi, sizeof(HAL_TX_QUEUE_INFO));
183	qi->tqi_type = type;
184	if (qInfo == AH_NULL) {
185		qi->tqi_qflags = defqflags;
186		qi->tqi_aifs = INIT_AIFS;
187		qi->tqi_cwmin = HAL_TXQ_USEDEFAULT;	/* NB: do at reset */
188		qi->tqi_cwmax = INIT_CWMAX;
189		qi->tqi_shretry = INIT_SH_RETRY;
190		qi->tqi_lgretry = INIT_LG_RETRY;
191		qi->tqi_physCompBuf = 0;
192	} else {
193		qi->tqi_physCompBuf = qInfo->tqi_compBuf;
194		(void) ar5212SetTxQueueProps(ah, q, qInfo);
195	}
196	/* NB: must be followed by ar5212ResetTxQueue */
197	return q;
198}
199
200/*
201 * Update the h/w interrupt registers to reflect a tx q's configuration.
202 */
203static void
204setTxQInterrupts(struct ath_hal *ah, HAL_TX_QUEUE_INFO *qi)
205{
206	struct ath_hal_5212 *ahp = AH5212(ah);
207
208	HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
209	    "%s: tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n", __func__,
210	    ahp->ah_txOkInterruptMask, ahp->ah_txErrInterruptMask,
211	    ahp->ah_txDescInterruptMask, ahp->ah_txEolInterruptMask,
212	    ahp->ah_txUrnInterruptMask);
213
214	OS_REG_WRITE(ah, AR_IMR_S0,
215		  SM(ahp->ah_txOkInterruptMask, AR_IMR_S0_QCU_TXOK)
216		| SM(ahp->ah_txDescInterruptMask, AR_IMR_S0_QCU_TXDESC)
217	);
218	OS_REG_WRITE(ah, AR_IMR_S1,
219		  SM(ahp->ah_txErrInterruptMask, AR_IMR_S1_QCU_TXERR)
220		| SM(ahp->ah_txEolInterruptMask, AR_IMR_S1_QCU_TXEOL)
221	);
222	OS_REG_RMW_FIELD(ah, AR_IMR_S2,
223		AR_IMR_S2_QCU_TXURN, ahp->ah_txUrnInterruptMask);
224}
225
226/*
227 * Free a tx DCU/QCU combination.
228 */
229HAL_BOOL
230ar5212ReleaseTxQueue(struct ath_hal *ah, u_int q)
231{
232	struct ath_hal_5212 *ahp = AH5212(ah);
233	HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps;
234	HAL_TX_QUEUE_INFO *qi;
235
236	if (q >= pCap->halTotalQueues) {
237		HALDEBUG(ah, HAL_DEBUG_ANY, "%s: invalid queue num %u\n",
238		    __func__, q);
239		return AH_FALSE;
240	}
241	qi = &ahp->ah_txq[q];
242	if (qi->tqi_type == HAL_TX_QUEUE_INACTIVE) {
243		HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: inactive queue %u\n",
244		    __func__, q);
245		return AH_FALSE;
246	}
247
248	HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: release queue %u\n", __func__, q);
249
250	qi->tqi_type = HAL_TX_QUEUE_INACTIVE;
251	ahp->ah_txOkInterruptMask &= ~(1 << q);
252	ahp->ah_txErrInterruptMask &= ~(1 << q);
253	ahp->ah_txDescInterruptMask &= ~(1 << q);
254	ahp->ah_txEolInterruptMask &= ~(1 << q);
255	ahp->ah_txUrnInterruptMask &= ~(1 << q);
256	setTxQInterrupts(ah, qi);
257
258	return AH_TRUE;
259}
260
261/*
262 * Set the retry, aifs, cwmin/max, readyTime regs for specified queue
263 * Assumes:
264 *  phwChannel has been set to point to the current channel
265 */
266#define	TU_TO_USEC(_tu)		((_tu) << 10)
267HAL_BOOL
268ar5212ResetTxQueue(struct ath_hal *ah, u_int q)
269{
270	struct ath_hal_5212 *ahp = AH5212(ah);
271	HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps;
272	const struct ieee80211_channel *chan = AH_PRIVATE(ah)->ah_curchan;
273	HAL_TX_QUEUE_INFO *qi;
274	uint32_t cwMin, chanCwMin, qmisc, dmisc;
275
276	if (q >= pCap->halTotalQueues) {
277		HALDEBUG(ah, HAL_DEBUG_ANY, "%s: invalid queue num %u\n",
278		    __func__, q);
279		return AH_FALSE;
280	}
281	qi = &ahp->ah_txq[q];
282	if (qi->tqi_type == HAL_TX_QUEUE_INACTIVE) {
283		HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: inactive queue %u\n",
284		    __func__, q);
285		return AH_TRUE;		/* XXX??? */
286	}
287
288	HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: reset queue %u\n", __func__, q);
289
290	if (qi->tqi_cwmin == HAL_TXQ_USEDEFAULT) {
291		/*
292		 * Select cwmin according to channel type.
293		 * NB: chan can be NULL during attach
294		 */
295		if (chan && IEEE80211_IS_CHAN_B(chan))
296			chanCwMin = INIT_CWMIN_11B;
297		else
298			chanCwMin = INIT_CWMIN;
299		/* make sure that the CWmin is of the form (2^n - 1) */
300		for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1)
301			;
302	} else
303		cwMin = qi->tqi_cwmin;
304
305	/* set cwMin/Max and AIFS values */
306	OS_REG_WRITE(ah, AR_DLCL_IFS(q),
307		  SM(cwMin, AR_D_LCL_IFS_CWMIN)
308		| SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX)
309		| SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
310
311	/* Set retry limit values */
312	OS_REG_WRITE(ah, AR_DRETRY_LIMIT(q),
313		   SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH)
314		 | SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG)
315		 | SM(qi->tqi_lgretry, AR_D_RETRY_LIMIT_FR_LG)
316		 | SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH)
317	);
318
319	/* NB: always enable early termination on the QCU */
320	qmisc = AR_Q_MISC_DCU_EARLY_TERM_REQ
321	      | SM(AR_Q_MISC_FSP_ASAP, AR_Q_MISC_FSP);
322
323	/* NB: always enable DCU to wait for next fragment from QCU */
324	dmisc = AR_D_MISC_FRAG_WAIT_EN;
325
326#ifdef AH_SUPPORT_5311
327	if (AH_PRIVATE(ah)->ah_macVersion < AR_SREV_VERSION_OAHU) {
328		/* Configure DCU to use the global sequence count */
329		dmisc |= AR5311_D_MISC_SEQ_NUM_CONTROL;
330	}
331#endif
332	/* multiqueue support */
333	if (qi->tqi_cbrPeriod) {
334		OS_REG_WRITE(ah, AR_QCBRCFG(q),
335			  SM(qi->tqi_cbrPeriod,AR_Q_CBRCFG_CBR_INTERVAL)
336			| SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_CBR_OVF_THRESH));
337		qmisc = (qmisc &~ AR_Q_MISC_FSP) | AR_Q_MISC_FSP_CBR;
338		if (qi->tqi_cbrOverflowLimit)
339			qmisc |= AR_Q_MISC_CBR_EXP_CNTR_LIMIT;
340	}
341	if (qi->tqi_readyTime) {
342		OS_REG_WRITE(ah, AR_QRDYTIMECFG(q),
343			  SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_INT)
344			| AR_Q_RDYTIMECFG_ENA);
345	}
346
347	OS_REG_WRITE(ah, AR_DCHNTIME(q),
348		  SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR)
349		| (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
350
351	if (qi->tqi_readyTime &&
352	    (qi->tqi_qflags & HAL_TXQ_RDYTIME_EXP_POLICY_ENABLE))
353		qmisc |= AR_Q_MISC_RDYTIME_EXP_POLICY;
354	if (qi->tqi_qflags & HAL_TXQ_DBA_GATED)
355		qmisc = (qmisc &~ AR_Q_MISC_FSP) | AR_Q_MISC_FSP_DBA_GATED;
356	if (MS(qmisc, AR_Q_MISC_FSP) != AR_Q_MISC_FSP_ASAP) {
357		/*
358		 * These are meangingful only when not scheduled asap.
359		 */
360		if (qi->tqi_qflags & HAL_TXQ_CBR_DIS_BEMPTY)
361			qmisc |= AR_Q_MISC_CBR_INCR_DIS0;
362		else
363			qmisc &= ~AR_Q_MISC_CBR_INCR_DIS0;
364		if (qi->tqi_qflags & HAL_TXQ_CBR_DIS_QEMPTY)
365			qmisc |= AR_Q_MISC_CBR_INCR_DIS1;
366		else
367			qmisc &= ~AR_Q_MISC_CBR_INCR_DIS1;
368	}
369
370	if (qi->tqi_qflags & HAL_TXQ_BACKOFF_DISABLE)
371		dmisc |= AR_D_MISC_POST_FR_BKOFF_DIS;
372	if (qi->tqi_qflags & HAL_TXQ_FRAG_BURST_BACKOFF_ENABLE)
373		dmisc |= AR_D_MISC_FRAG_BKOFF_EN;
374	if (qi->tqi_qflags & HAL_TXQ_ARB_LOCKOUT_GLOBAL)
375		dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
376			    AR_D_MISC_ARB_LOCKOUT_CNTRL);
377	else if (qi->tqi_qflags & HAL_TXQ_ARB_LOCKOUT_INTRA)
378		dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_INTRA_FR,
379			    AR_D_MISC_ARB_LOCKOUT_CNTRL);
380	if (qi->tqi_qflags & HAL_TXQ_IGNORE_VIRTCOL)
381		dmisc |= SM(AR_D_MISC_VIR_COL_HANDLING_IGNORE,
382			    AR_D_MISC_VIR_COL_HANDLING);
383	if (qi->tqi_qflags & HAL_TXQ_SEQNUM_INC_DIS)
384		dmisc |= AR_D_MISC_SEQ_NUM_INCR_DIS;
385
386	/*
387	 * Fillin type-dependent bits.  Most of this can be
388	 * removed by specifying the queue parameters in the
389	 * driver; it's here for backwards compatibility.
390	 */
391	switch (qi->tqi_type) {
392	case HAL_TX_QUEUE_BEACON:		/* beacon frames */
393		qmisc |= AR_Q_MISC_FSP_DBA_GATED
394		      |  AR_Q_MISC_BEACON_USE
395		      |  AR_Q_MISC_CBR_INCR_DIS1;
396
397		dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
398			    AR_D_MISC_ARB_LOCKOUT_CNTRL)
399		      |  AR_D_MISC_BEACON_USE
400		      |  AR_D_MISC_POST_FR_BKOFF_DIS;
401		break;
402	case HAL_TX_QUEUE_CAB:			/* CAB  frames */
403		/*
404		 * No longer Enable AR_Q_MISC_RDYTIME_EXP_POLICY,
405		 * There is an issue with the CAB Queue
406		 * not properly refreshing the Tx descriptor if
407		 * the TXE clear setting is used.
408		 */
409		qmisc |= AR_Q_MISC_FSP_DBA_GATED
410		      |  AR_Q_MISC_CBR_INCR_DIS1
411		      |  AR_Q_MISC_CBR_INCR_DIS0;
412
413		if (qi->tqi_readyTime) {
414			HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
415			    "%s: using tqi_readyTime\n", __func__);
416			OS_REG_WRITE(ah, AR_QRDYTIMECFG(q),
417			    SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_INT) |
418			    AR_Q_RDYTIMECFG_ENA);
419		} else {
420			int value;
421			/*
422			 * NB: don't set default ready time if driver
423			 * has explicitly specified something.  This is
424			 * here solely for backwards compatibility.
425			 */
426			/*
427			 * XXX for now, hard-code a CAB interval of 70%
428			 * XXX of the total beacon interval.
429			 */
430
431			value = (ahp->ah_beaconInterval * 70 / 100)
432				- (ah->ah_config.ah_sw_beacon_response_time -
433				+ ah->ah_config.ah_dma_beacon_response_time)
434				- ah->ah_config.ah_additional_swba_backoff;
435			/*
436			 * XXX Ensure it isn't too low - nothing lower
437			 * XXX than 10 TU
438			 */
439			if (value < 10)
440				value = 10;
441			HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
442			    "%s: defaulting to rdytime = %d uS\n",
443			    __func__, value);
444			OS_REG_WRITE(ah, AR_QRDYTIMECFG(q),
445			    SM(TU_TO_USEC(value), AR_Q_RDYTIMECFG_INT) |
446			    AR_Q_RDYTIMECFG_ENA);
447		}
448		dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
449			    AR_D_MISC_ARB_LOCKOUT_CNTRL);
450		break;
451	default:			/* NB: silence compiler */
452		break;
453	}
454
455	OS_REG_WRITE(ah, AR_QMISC(q), qmisc);
456	OS_REG_WRITE(ah, AR_DMISC(q), dmisc);
457
458	/* Setup compression scratchpad buffer */
459	/*
460	 * XXX: calling this asynchronously to queue operation can
461	 *      cause unexpected behavior!!!
462	 */
463	if (qi->tqi_physCompBuf) {
464		HALASSERT(qi->tqi_type == HAL_TX_QUEUE_DATA ||
465			  qi->tqi_type == HAL_TX_QUEUE_UAPSD);
466		OS_REG_WRITE(ah, AR_Q_CBBS, (80 + 2*q));
467		OS_REG_WRITE(ah, AR_Q_CBBA, qi->tqi_physCompBuf);
468		OS_REG_WRITE(ah, AR_Q_CBC,  HAL_COMP_BUF_MAX_SIZE/1024);
469		OS_REG_WRITE(ah, AR_Q0_MISC + 4*q,
470			     OS_REG_READ(ah, AR_Q0_MISC + 4*q)
471			     | AR_Q_MISC_QCU_COMP_EN);
472	}
473
474	/*
475	 * Always update the secondary interrupt mask registers - this
476	 * could be a new queue getting enabled in a running system or
477	 * hw getting re-initialized during a reset!
478	 *
479	 * Since we don't differentiate between tx interrupts corresponding
480	 * to individual queues - secondary tx mask regs are always unmasked;
481	 * tx interrupts are enabled/disabled for all queues collectively
482	 * using the primary mask reg
483	 */
484	if (qi->tqi_qflags & HAL_TXQ_TXOKINT_ENABLE)
485		ahp->ah_txOkInterruptMask |= 1 << q;
486	else
487		ahp->ah_txOkInterruptMask &= ~(1 << q);
488	if (qi->tqi_qflags & HAL_TXQ_TXERRINT_ENABLE)
489		ahp->ah_txErrInterruptMask |= 1 << q;
490	else
491		ahp->ah_txErrInterruptMask &= ~(1 << q);
492	if (qi->tqi_qflags & HAL_TXQ_TXDESCINT_ENABLE)
493		ahp->ah_txDescInterruptMask |= 1 << q;
494	else
495		ahp->ah_txDescInterruptMask &= ~(1 << q);
496	if (qi->tqi_qflags & HAL_TXQ_TXEOLINT_ENABLE)
497		ahp->ah_txEolInterruptMask |= 1 << q;
498	else
499		ahp->ah_txEolInterruptMask &= ~(1 << q);
500	if (qi->tqi_qflags & HAL_TXQ_TXURNINT_ENABLE)
501		ahp->ah_txUrnInterruptMask |= 1 << q;
502	else
503		ahp->ah_txUrnInterruptMask &= ~(1 << q);
504	setTxQInterrupts(ah, qi);
505
506	return AH_TRUE;
507}
508#undef	TU_TO_USEC
509
510/*
511 * Get the TXDP for the specified queue
512 */
513uint32_t
514ar5212GetTxDP(struct ath_hal *ah, u_int q)
515{
516	HALASSERT(q < AH_PRIVATE(ah)->ah_caps.halTotalQueues);
517	return OS_REG_READ(ah, AR_QTXDP(q));
518}
519
520/*
521 * Set the TxDP for the specified queue
522 */
523HAL_BOOL
524ar5212SetTxDP(struct ath_hal *ah, u_int q, uint32_t txdp)
525{
526	HALASSERT(q < AH_PRIVATE(ah)->ah_caps.halTotalQueues);
527	HALASSERT(AH5212(ah)->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE);
528
529	/*
530	 * Make sure that TXE is deasserted before setting the TXDP.  If TXE
531	 * is still asserted, setting TXDP will have no effect.
532	 */
533	HALASSERT((OS_REG_READ(ah, AR_Q_TXE) & (1 << q)) == 0);
534
535	OS_REG_WRITE(ah, AR_QTXDP(q), txdp);
536
537	return AH_TRUE;
538}
539
540/*
541 * Set Transmit Enable bits for the specified queue
542 */
543HAL_BOOL
544ar5212StartTxDma(struct ath_hal *ah, u_int q)
545{
546	HALASSERT(q < AH_PRIVATE(ah)->ah_caps.halTotalQueues);
547
548	HALASSERT(AH5212(ah)->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE);
549
550	HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: queue %u\n", __func__, q);
551
552	/* Check to be sure we're not enabling a q that has its TXD bit set. */
553	HALASSERT((OS_REG_READ(ah, AR_Q_TXD) & (1 << q)) == 0);
554
555	OS_REG_WRITE(ah, AR_Q_TXE, 1 << q);
556	return AH_TRUE;
557}
558
559/*
560 * Return the number of pending frames or 0 if the specified
561 * queue is stopped.
562 */
563uint32_t
564ar5212NumTxPending(struct ath_hal *ah, u_int q)
565{
566	uint32_t npend;
567
568	HALASSERT(q < AH_PRIVATE(ah)->ah_caps.halTotalQueues);
569	HALASSERT(AH5212(ah)->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE);
570
571	npend = OS_REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT;
572	if (npend == 0) {
573		/*
574		 * Pending frame count (PFC) can momentarily go to zero
575		 * while TXE remains asserted.  In other words a PFC of
576		 * zero is not sufficient to say that the queue has stopped.
577		 */
578		if (OS_REG_READ(ah, AR_Q_TXE) & (1 << q))
579			npend = 1;		/* arbitrarily return 1 */
580	}
581	return npend;
582}
583
584/*
585 * Stop transmit on the specified queue
586 */
587HAL_BOOL
588ar5212StopTxDma(struct ath_hal *ah, u_int q)
589{
590	u_int i;
591	u_int wait;
592
593	HALASSERT(q < AH_PRIVATE(ah)->ah_caps.halTotalQueues);
594
595	HALASSERT(AH5212(ah)->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE);
596
597	OS_REG_WRITE(ah, AR_Q_TXD, 1 << q);
598	for (i = 1000; i != 0; i--) {
599		if (ar5212NumTxPending(ah, q) == 0)
600			break;
601		OS_DELAY(100);        /* XXX get actual value */
602	}
603#ifdef AH_DEBUG
604	if (i == 0) {
605		HALDEBUG(ah, HAL_DEBUG_ANY,
606		    "%s: queue %u DMA did not stop in 100 msec\n", __func__, q);
607		HALDEBUG(ah, HAL_DEBUG_ANY,
608		    "%s: QSTS 0x%x Q_TXE 0x%x Q_TXD 0x%x Q_CBR 0x%x\n", __func__,
609		    OS_REG_READ(ah, AR_QSTS(q)), OS_REG_READ(ah, AR_Q_TXE),
610		    OS_REG_READ(ah, AR_Q_TXD), OS_REG_READ(ah, AR_QCBRCFG(q)));
611		HALDEBUG(ah, HAL_DEBUG_ANY,
612		    "%s: Q_MISC 0x%x Q_RDYTIMECFG 0x%x Q_RDYTIMESHDN 0x%x\n",
613		    __func__, OS_REG_READ(ah, AR_QMISC(q)),
614		    OS_REG_READ(ah, AR_QRDYTIMECFG(q)),
615		    OS_REG_READ(ah, AR_Q_RDYTIMESHDN));
616	}
617#endif /* AH_DEBUG */
618
619	/* 2413+ and up can kill packets at the PCU level */
620	if (ar5212NumTxPending(ah, q) &&
621	    (IS_2413(ah) || IS_5413(ah) || IS_2425(ah) || IS_2417(ah))) {
622		uint32_t tsfLow, j;
623
624		HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
625		    "%s: Num of pending TX Frames %d on Q %d\n",
626		    __func__, ar5212NumTxPending(ah, q), q);
627
628		/* Kill last PCU Tx Frame */
629		/* TODO - save off and restore current values of Q1/Q2? */
630		for (j = 0; j < 2; j++) {
631			tsfLow = OS_REG_READ(ah, AR_TSF_L32);
632			OS_REG_WRITE(ah, AR_QUIET2, SM(100, AR_QUIET2_QUIET_PER) |
633				     SM(10, AR_QUIET2_QUIET_DUR));
634			OS_REG_WRITE(ah, AR_QUIET1, AR_QUIET1_QUIET_ENABLE |
635				     SM(tsfLow >> 10, AR_QUIET1_NEXT_QUIET));
636			if ((OS_REG_READ(ah, AR_TSF_L32) >> 10) == (tsfLow >> 10)) {
637				break;
638			}
639			HALDEBUG(ah, HAL_DEBUG_ANY,
640			    "%s: TSF moved while trying to set quiet time "
641			    "TSF: 0x%08x\n", __func__, tsfLow);
642			HALASSERT(j < 1); /* TSF shouldn't count twice or reg access is taking forever */
643		}
644
645		OS_REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_CHAN_IDLE);
646
647		/* Allow the quiet mechanism to do its work */
648		OS_DELAY(200);
649		OS_REG_CLR_BIT(ah, AR_QUIET1, AR_QUIET1_QUIET_ENABLE);
650
651		/* Give at least 1 millisec more to wait */
652		wait = 100;
653
654		/* Verify all transmit is dead */
655		while (ar5212NumTxPending(ah, q)) {
656			if ((--wait) == 0) {
657				HALDEBUG(ah, HAL_DEBUG_ANY,
658				    "%s: Failed to stop Tx DMA in %d msec after killing last frame\n",
659				    __func__, wait);
660				break;
661			}
662			OS_DELAY(10);
663		}
664
665		OS_REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_CHAN_IDLE);
666	}
667
668	OS_REG_WRITE(ah, AR_Q_TXD, 0);
669	return (i != 0);
670}
671
672/*
673 * Descriptor Access Functions
674 */
675
676#define	VALID_PKT_TYPES \
677	((1<<HAL_PKT_TYPE_NORMAL)|(1<<HAL_PKT_TYPE_ATIM)|\
678	 (1<<HAL_PKT_TYPE_PSPOLL)|(1<<HAL_PKT_TYPE_PROBE_RESP)|\
679	 (1<<HAL_PKT_TYPE_BEACON))
680#define	isValidPktType(_t)	((1<<(_t)) & VALID_PKT_TYPES)
681#define	VALID_TX_RATES \
682	((1<<0x0b)|(1<<0x0f)|(1<<0x0a)|(1<<0x0e)|(1<<0x09)|(1<<0x0d)|\
683	 (1<<0x08)|(1<<0x0c)|(1<<0x1b)|(1<<0x1a)|(1<<0x1e)|(1<<0x19)|\
684	 (1<<0x1d)|(1<<0x18)|(1<<0x1c))
685#define	isValidTxRate(_r)	((1<<(_r)) & VALID_TX_RATES)
686
687HAL_BOOL
688ar5212SetupTxDesc(struct ath_hal *ah, struct ath_desc *ds,
689	u_int pktLen,
690	u_int hdrLen,
691	HAL_PKT_TYPE type,
692	u_int txPower,
693	u_int txRate0, u_int txTries0,
694	u_int keyIx,
695	u_int antMode,
696	u_int flags,
697	u_int rtsctsRate,
698	u_int rtsctsDuration,
699	u_int compicvLen,
700	u_int compivLen,
701	u_int comp)
702{
703#define	RTSCTS	(HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)
704	struct ar5212_desc *ads = AR5212DESC(ds);
705	struct ath_hal_5212 *ahp = AH5212(ah);
706
707	(void) hdrLen;
708
709	HALASSERT(txTries0 != 0);
710	HALASSERT(isValidPktType(type));
711	HALASSERT(isValidTxRate(txRate0));
712	HALASSERT((flags & RTSCTS) != RTSCTS);
713	/* XXX validate antMode */
714
715        txPower = (txPower + ahp->ah_txPowerIndexOffset );
716        if(txPower > 63)  txPower=63;
717
718	ads->ds_ctl0 = (pktLen & AR_FrameLen)
719		     | (txPower << AR_XmitPower_S)
720		     | (flags & HAL_TXDESC_VEOL ? AR_VEOL : 0)
721		     | (flags & HAL_TXDESC_CLRDMASK ? AR_ClearDestMask : 0)
722		     | SM(antMode, AR_AntModeXmit)
723		     | (flags & HAL_TXDESC_INTREQ ? AR_TxInterReq : 0)
724		     ;
725	ads->ds_ctl1 = (type << AR_FrmType_S)
726		     | (flags & HAL_TXDESC_NOACK ? AR_NoAck : 0)
727                     | (comp << AR_CompProc_S)
728                     | (compicvLen << AR_CompICVLen_S)
729                     | (compivLen << AR_CompIVLen_S)
730                     ;
731	ads->ds_ctl2 = SM(txTries0, AR_XmitDataTries0)
732		     | (flags & HAL_TXDESC_DURENA ? AR_DurUpdateEna : 0)
733		     ;
734	ads->ds_ctl3 = (txRate0 << AR_XmitRate0_S)
735		     ;
736	if (keyIx != HAL_TXKEYIX_INVALID) {
737		/* XXX validate key index */
738		ads->ds_ctl1 |= SM(keyIx, AR_DestIdx);
739		ads->ds_ctl0 |= AR_DestIdxValid;
740	}
741	if (flags & RTSCTS) {
742		if (!isValidTxRate(rtsctsRate)) {
743			HALDEBUG(ah, HAL_DEBUG_ANY,
744			    "%s: invalid rts/cts rate 0x%x\n",
745			    __func__, rtsctsRate);
746			return AH_FALSE;
747		}
748		/* XXX validate rtsctsDuration */
749		ads->ds_ctl0 |= (flags & HAL_TXDESC_CTSENA ? AR_CTSEnable : 0)
750			     | (flags & HAL_TXDESC_RTSENA ? AR_RTSCTSEnable : 0)
751			     ;
752		ads->ds_ctl2 |= SM(rtsctsDuration, AR_RTSCTSDuration);
753		ads->ds_ctl3 |= (rtsctsRate << AR_RTSCTSRate_S);
754	}
755	return AH_TRUE;
756#undef RTSCTS
757}
758
759HAL_BOOL
760ar5212SetupXTxDesc(struct ath_hal *ah, struct ath_desc *ds,
761	u_int txRate1, u_int txTries1,
762	u_int txRate2, u_int txTries2,
763	u_int txRate3, u_int txTries3)
764{
765	struct ar5212_desc *ads = AR5212DESC(ds);
766
767	if (txTries1) {
768		HALASSERT(isValidTxRate(txRate1));
769		ads->ds_ctl2 |= SM(txTries1, AR_XmitDataTries1)
770			     |  AR_DurUpdateEna
771			     ;
772		ads->ds_ctl3 |= (txRate1 << AR_XmitRate1_S);
773	}
774	if (txTries2) {
775		HALASSERT(isValidTxRate(txRate2));
776		ads->ds_ctl2 |= SM(txTries2, AR_XmitDataTries2)
777			     |  AR_DurUpdateEna
778			     ;
779		ads->ds_ctl3 |= (txRate2 << AR_XmitRate2_S);
780	}
781	if (txTries3) {
782		HALASSERT(isValidTxRate(txRate3));
783		ads->ds_ctl2 |= SM(txTries3, AR_XmitDataTries3)
784			     |  AR_DurUpdateEna
785			     ;
786		ads->ds_ctl3 |= (txRate3 << AR_XmitRate3_S);
787	}
788	return AH_TRUE;
789}
790
791void
792ar5212IntrReqTxDesc(struct ath_hal *ah, struct ath_desc *ds)
793{
794	struct ar5212_desc *ads = AR5212DESC(ds);
795
796#ifdef AH_NEED_DESC_SWAP
797	ads->ds_ctl0 |= __bswap32(AR_TxInterReq);
798#else
799	ads->ds_ctl0 |= AR_TxInterReq;
800#endif
801}
802
803HAL_BOOL
804ar5212FillTxDesc(struct ath_hal *ah, struct ath_desc *ds,
805	HAL_DMA_ADDR *bufAddrList, uint32_t *segLenList, u_int qcuId,
806	u_int descId, HAL_BOOL firstSeg, HAL_BOOL lastSeg,
807	const struct ath_desc *ds0)
808{
809	struct ar5212_desc *ads = AR5212DESC(ds);
810	uint32_t segLen = segLenList[0];
811
812	HALASSERT((segLen &~ AR_BufLen) == 0);
813
814	ds->ds_data = bufAddrList[0];
815
816	if (firstSeg) {
817		/*
818		 * First descriptor, don't clobber xmit control data
819		 * setup by ar5212SetupTxDesc.
820		 */
821		ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_More);
822	} else if (lastSeg) {		/* !firstSeg && lastSeg */
823		/*
824		 * Last descriptor in a multi-descriptor frame,
825		 * copy the multi-rate transmit parameters from
826		 * the first frame for processing on completion.
827		 */
828		ads->ds_ctl1 = segLen;
829#ifdef AH_NEED_DESC_SWAP
830		ads->ds_ctl0 = __bswap32(AR5212DESC_CONST(ds0)->ds_ctl0)
831		    & AR_TxInterReq;
832		ads->ds_ctl2 = __bswap32(AR5212DESC_CONST(ds0)->ds_ctl2);
833		ads->ds_ctl3 = __bswap32(AR5212DESC_CONST(ds0)->ds_ctl3);
834#else
835		ads->ds_ctl0 = AR5212DESC_CONST(ds0)->ds_ctl0 & AR_TxInterReq;
836		ads->ds_ctl2 = AR5212DESC_CONST(ds0)->ds_ctl2;
837		ads->ds_ctl3 = AR5212DESC_CONST(ds0)->ds_ctl3;
838#endif
839	} else {			/* !firstSeg && !lastSeg */
840		/*
841		 * Intermediate descriptor in a multi-descriptor frame.
842		 */
843#ifdef AH_NEED_DESC_SWAP
844		ads->ds_ctl0 = __bswap32(AR5212DESC_CONST(ds0)->ds_ctl0)
845		    & AR_TxInterReq;
846#else
847		ads->ds_ctl0 = AR5212DESC_CONST(ds0)->ds_ctl0 & AR_TxInterReq;
848#endif
849		ads->ds_ctl1 = segLen | AR_More;
850		ads->ds_ctl2 = 0;
851		ads->ds_ctl3 = 0;
852	}
853	ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
854	return AH_TRUE;
855}
856
857#ifdef AH_NEED_DESC_SWAP
858/* Swap transmit descriptor */
859static __inline void
860ar5212SwapTxDesc(struct ath_desc *ds)
861{
862	ds->ds_data = __bswap32(ds->ds_data);
863        ds->ds_ctl0 = __bswap32(ds->ds_ctl0);
864        ds->ds_ctl1 = __bswap32(ds->ds_ctl1);
865        ds->ds_hw[0] = __bswap32(ds->ds_hw[0]);
866        ds->ds_hw[1] = __bswap32(ds->ds_hw[1]);
867        ds->ds_hw[2] = __bswap32(ds->ds_hw[2]);
868        ds->ds_hw[3] = __bswap32(ds->ds_hw[3]);
869}
870#endif
871
872/*
873 * Processing of HW TX descriptor.
874 */
875HAL_STATUS
876ar5212ProcTxDesc(struct ath_hal *ah,
877	struct ath_desc *ds, struct ath_tx_status *ts)
878{
879	struct ar5212_desc *ads = AR5212DESC(ds);
880
881#ifdef AH_NEED_DESC_SWAP
882	if ((ads->ds_txstatus1 & __bswap32(AR_Done)) == 0)
883                return HAL_EINPROGRESS;
884
885	ar5212SwapTxDesc(ds);
886#else
887	if ((ads->ds_txstatus1 & AR_Done) == 0)
888		return HAL_EINPROGRESS;
889#endif
890
891	/* Update software copies of the HW status */
892	ts->ts_seqnum = MS(ads->ds_txstatus1, AR_SeqNum);
893	ts->ts_tstamp = MS(ads->ds_txstatus0, AR_SendTimestamp);
894	ts->ts_status = 0;
895	if ((ads->ds_txstatus0 & AR_FrmXmitOK) == 0) {
896		if (ads->ds_txstatus0 & AR_ExcessiveRetries)
897			ts->ts_status |= HAL_TXERR_XRETRY;
898		if (ads->ds_txstatus0 & AR_Filtered)
899			ts->ts_status |= HAL_TXERR_FILT;
900		if (ads->ds_txstatus0 & AR_FIFOUnderrun)
901			ts->ts_status |= HAL_TXERR_FIFO;
902	}
903	/*
904	 * Extract the transmit rate used and mark the rate as
905	 * ``alternate'' if it wasn't the series 0 rate.
906	 */
907	ts->ts_finaltsi = MS(ads->ds_txstatus1, AR_FinalTSIndex);
908	switch (ts->ts_finaltsi) {
909	case 0:
910		ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate0);
911		break;
912	case 1:
913		ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate1);
914		break;
915	case 2:
916		ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate2);
917		break;
918	case 3:
919		ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate3);
920		break;
921	}
922	ts->ts_rssi = MS(ads->ds_txstatus1, AR_AckSigStrength);
923	ts->ts_shortretry = MS(ads->ds_txstatus0, AR_RTSFailCnt);
924	ts->ts_longretry = MS(ads->ds_txstatus0, AR_DataFailCnt);
925	/*
926	 * The retry count has the number of un-acked tries for the
927	 * final series used.  When doing multi-rate retry we must
928	 * fixup the retry count by adding in the try counts for
929	 * each series that was fully-processed.  Beware that this
930	 * takes values from the try counts in the final descriptor.
931	 * These are not required by the hardware.  We assume they
932	 * are placed there by the driver as otherwise we have no
933	 * access and the driver can't do the calculation because it
934	 * doesn't know the descriptor format.
935	 */
936	switch (ts->ts_finaltsi) {
937	case 3: ts->ts_longretry += MS(ads->ds_ctl2, AR_XmitDataTries2);
938	case 2: ts->ts_longretry += MS(ads->ds_ctl2, AR_XmitDataTries1);
939	case 1: ts->ts_longretry += MS(ads->ds_ctl2, AR_XmitDataTries0);
940	}
941	ts->ts_virtcol = MS(ads->ds_txstatus0, AR_VirtCollCnt);
942	ts->ts_antenna = (ads->ds_txstatus1 & AR_XmitAtenna ? 2 : 1);
943
944	return HAL_OK;
945}
946
947/*
948 * Determine which tx queues need interrupt servicing.
949 */
950void
951ar5212GetTxIntrQueue(struct ath_hal *ah, uint32_t *txqs)
952{
953	struct ath_hal_5212 *ahp = AH5212(ah);
954	*txqs &= ahp->ah_intrTxqs;
955	ahp->ah_intrTxqs &= ~(*txqs);
956}
957
958/*
959 * Retrieve the rate table from the given TX completion descriptor
960 */
961HAL_BOOL
962ar5212GetTxCompletionRates(struct ath_hal *ah, const struct ath_desc *ds0, int *rates, int *tries)
963{
964	const struct ar5212_desc *ads = AR5212DESC_CONST(ds0);
965
966	rates[0] = MS(ads->ds_ctl3, AR_XmitRate0);
967	rates[1] = MS(ads->ds_ctl3, AR_XmitRate1);
968	rates[2] = MS(ads->ds_ctl3, AR_XmitRate2);
969	rates[3] = MS(ads->ds_ctl3, AR_XmitRate3);
970
971	tries[0] = MS(ads->ds_ctl2, AR_XmitDataTries0);
972	tries[1] = MS(ads->ds_ctl2, AR_XmitDataTries1);
973	tries[2] = MS(ads->ds_ctl2, AR_XmitDataTries2);
974	tries[3] = MS(ads->ds_ctl2, AR_XmitDataTries3);
975
976	return AH_TRUE;
977}
978
979void
980ar5212SetTxDescLink(struct ath_hal *ah, void *ds, uint32_t link)
981{
982	struct ar5212_desc *ads = AR5212DESC(ds);
983
984	ads->ds_link = link;
985}
986
987void
988ar5212GetTxDescLink(struct ath_hal *ah, void *ds, uint32_t *link)
989{
990	struct ar5212_desc *ads = AR5212DESC(ds);
991
992	*link = ads->ds_link;
993}
994
995void
996ar5212GetTxDescLinkPtr(struct ath_hal *ah, void *ds, uint32_t **linkptr)
997{
998	struct ar5212_desc *ads = AR5212DESC(ds);
999
1000	*linkptr = &ads->ds_link;
1001}
1002