• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/drivers/net/wireless/ath/ath9k/
1/*
2 * Copyright (c) 2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "htc.h"
18
19/******/
20/* TX */
21/******/
22
23#define ATH9K_HTC_INIT_TXQ(subtype) do {			\
24		qi.tqi_subtype = subtype;			\
25		qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;		\
26		qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;		\
27		qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;		\
28		qi.tqi_physCompBuf = 0;				\
29		qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |	\
30			TXQ_FLAG_TXDESCINT_ENABLE;		\
31	} while (0)
32
33int get_hw_qnum(u16 queue, int *hwq_map)
34{
35	switch (queue) {
36	case 0:
37		return hwq_map[WME_AC_VO];
38	case 1:
39		return hwq_map[WME_AC_VI];
40	case 2:
41		return hwq_map[WME_AC_BE];
42	case 3:
43		return hwq_map[WME_AC_BK];
44	default:
45		return hwq_map[WME_AC_BE];
46	}
47}
48
49int ath_htc_txq_update(struct ath9k_htc_priv *priv, int qnum,
50		       struct ath9k_tx_queue_info *qinfo)
51{
52	struct ath_hw *ah = priv->ah;
53	int error = 0;
54	struct ath9k_tx_queue_info qi;
55
56	ath9k_hw_get_txq_props(ah, qnum, &qi);
57
58	qi.tqi_aifs = qinfo->tqi_aifs;
59	qi.tqi_cwmin = qinfo->tqi_cwmin / 2;
60	qi.tqi_cwmax = qinfo->tqi_cwmax;
61	qi.tqi_burstTime = qinfo->tqi_burstTime;
62	qi.tqi_readyTime = qinfo->tqi_readyTime;
63
64	if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
65		ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
66			  "Unable to update hardware queue %u!\n", qnum);
67		error = -EIO;
68	} else {
69		ath9k_hw_resettxqueue(ah, qnum);
70	}
71
72	return error;
73}
74
75int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
76{
77	struct ieee80211_hdr *hdr;
78	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
79	struct ieee80211_sta *sta = tx_info->control.sta;
80	struct ath9k_htc_sta *ista;
81	struct ath9k_htc_tx_ctl tx_ctl;
82	enum htc_endpoint_id epid;
83	u16 qnum;
84	__le16 fc;
85	u8 *tx_fhdr;
86	u8 sta_idx, vif_idx;
87
88	hdr = (struct ieee80211_hdr *) skb->data;
89	fc = hdr->frame_control;
90
91	if (tx_info->control.vif &&
92			(struct ath9k_htc_vif *) tx_info->control.vif->drv_priv)
93		vif_idx = ((struct ath9k_htc_vif *)
94				tx_info->control.vif->drv_priv)->index;
95	else
96		vif_idx = priv->nvifs;
97
98	if (sta) {
99		ista = (struct ath9k_htc_sta *) sta->drv_priv;
100		sta_idx = ista->index;
101	} else {
102		sta_idx = 0;
103	}
104
105	memset(&tx_ctl, 0, sizeof(struct ath9k_htc_tx_ctl));
106
107	if (ieee80211_is_data(fc)) {
108		struct tx_frame_hdr tx_hdr;
109		u8 *qc;
110
111		memset(&tx_hdr, 0, sizeof(struct tx_frame_hdr));
112
113		tx_hdr.node_idx = sta_idx;
114		tx_hdr.vif_idx = vif_idx;
115
116		if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
117			tx_ctl.type = ATH9K_HTC_AMPDU;
118			tx_hdr.data_type = ATH9K_HTC_AMPDU;
119		} else {
120			tx_ctl.type = ATH9K_HTC_NORMAL;
121			tx_hdr.data_type = ATH9K_HTC_NORMAL;
122		}
123
124		if (ieee80211_is_data_qos(fc)) {
125			qc = ieee80211_get_qos_ctl(hdr);
126			tx_hdr.tidno = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
127		}
128
129		/* Check for RTS protection */
130		if (priv->hw->wiphy->rts_threshold != (u32) -1)
131			if (skb->len > priv->hw->wiphy->rts_threshold)
132				tx_hdr.flags |= ATH9K_HTC_TX_RTSCTS;
133
134		/* CTS-to-self */
135		if (!(tx_hdr.flags & ATH9K_HTC_TX_RTSCTS) &&
136		    (priv->op_flags & OP_PROTECT_ENABLE))
137			tx_hdr.flags |= ATH9K_HTC_TX_CTSONLY;
138
139		tx_hdr.key_type = ath9k_cmn_get_hw_crypto_keytype(skb);
140		if (tx_hdr.key_type == ATH9K_KEY_TYPE_CLEAR)
141			tx_hdr.keyix = (u8) ATH9K_TXKEYIX_INVALID;
142		else
143			tx_hdr.keyix = tx_info->control.hw_key->hw_key_idx;
144
145		tx_fhdr = skb_push(skb, sizeof(tx_hdr));
146		memcpy(tx_fhdr, (u8 *) &tx_hdr, sizeof(tx_hdr));
147
148		qnum = skb_get_queue_mapping(skb);
149
150		switch (qnum) {
151		case 0:
152			TX_QSTAT_INC(WME_AC_VO);
153			epid = priv->data_vo_ep;
154			break;
155		case 1:
156			TX_QSTAT_INC(WME_AC_VI);
157			epid = priv->data_vi_ep;
158			break;
159		case 2:
160			TX_QSTAT_INC(WME_AC_BE);
161			epid = priv->data_be_ep;
162			break;
163		case 3:
164		default:
165			TX_QSTAT_INC(WME_AC_BK);
166			epid = priv->data_bk_ep;
167			break;
168		}
169	} else {
170		struct tx_mgmt_hdr mgmt_hdr;
171
172		memset(&mgmt_hdr, 0, sizeof(struct tx_mgmt_hdr));
173
174		tx_ctl.type = ATH9K_HTC_NORMAL;
175
176		mgmt_hdr.node_idx = sta_idx;
177		mgmt_hdr.vif_idx = vif_idx;
178		mgmt_hdr.tidno = 0;
179		mgmt_hdr.flags = 0;
180
181		mgmt_hdr.key_type = ath9k_cmn_get_hw_crypto_keytype(skb);
182		if (mgmt_hdr.key_type == ATH9K_KEY_TYPE_CLEAR)
183			mgmt_hdr.keyix = (u8) ATH9K_TXKEYIX_INVALID;
184		else
185			mgmt_hdr.keyix = tx_info->control.hw_key->hw_key_idx;
186
187		tx_fhdr = skb_push(skb, sizeof(mgmt_hdr));
188		memcpy(tx_fhdr, (u8 *) &mgmt_hdr, sizeof(mgmt_hdr));
189		epid = priv->mgmt_ep;
190	}
191
192	return htc_send(priv->htc, skb, epid, &tx_ctl);
193}
194
195static bool ath9k_htc_check_tx_aggr(struct ath9k_htc_priv *priv,
196				    struct ath9k_htc_sta *ista, u8 tid)
197{
198	bool ret = false;
199
200	spin_lock_bh(&priv->tx_lock);
201	if ((tid < ATH9K_HTC_MAX_TID) && (ista->tid_state[tid] == AGGR_STOP))
202		ret = true;
203	spin_unlock_bh(&priv->tx_lock);
204
205	return ret;
206}
207
208void ath9k_tx_tasklet(unsigned long data)
209{
210	struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
211	struct ieee80211_sta *sta;
212	struct ieee80211_hdr *hdr;
213	struct ieee80211_tx_info *tx_info;
214	struct sk_buff *skb = NULL;
215	__le16 fc;
216
217	while ((skb = skb_dequeue(&priv->tx_queue)) != NULL) {
218
219		hdr = (struct ieee80211_hdr *) skb->data;
220		fc = hdr->frame_control;
221		tx_info = IEEE80211_SKB_CB(skb);
222
223		memset(&tx_info->status, 0, sizeof(tx_info->status));
224
225		rcu_read_lock();
226
227		sta = ieee80211_find_sta(priv->vif, hdr->addr1);
228		if (!sta) {
229			rcu_read_unlock();
230			ieee80211_tx_status(priv->hw, skb);
231			continue;
232		}
233
234		/* Check if we need to start aggregation */
235
236		if (sta && conf_is_ht(&priv->hw->conf) &&
237		    !(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
238			if (ieee80211_is_data_qos(fc)) {
239				u8 *qc, tid;
240				struct ath9k_htc_sta *ista;
241
242				qc = ieee80211_get_qos_ctl(hdr);
243				tid = qc[0] & 0xf;
244				ista = (struct ath9k_htc_sta *)sta->drv_priv;
245
246				if (ath9k_htc_check_tx_aggr(priv, ista, tid)) {
247					ieee80211_start_tx_ba_session(sta, tid);
248					spin_lock_bh(&priv->tx_lock);
249					ista->tid_state[tid] = AGGR_PROGRESS;
250					spin_unlock_bh(&priv->tx_lock);
251				}
252			}
253		}
254
255		rcu_read_unlock();
256
257		/* Send status to mac80211 */
258		ieee80211_tx_status(priv->hw, skb);
259	}
260
261	/* Wake TX queues if needed */
262	spin_lock_bh(&priv->tx_lock);
263	if (priv->tx_queues_stop) {
264		priv->tx_queues_stop = false;
265		spin_unlock_bh(&priv->tx_lock);
266		ath_print(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
267			  "Waking up TX queues\n");
268		ieee80211_wake_queues(priv->hw);
269		return;
270	}
271	spin_unlock_bh(&priv->tx_lock);
272}
273
274void ath9k_htc_txep(void *drv_priv, struct sk_buff *skb,
275		    enum htc_endpoint_id ep_id, bool txok)
276{
277	struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) drv_priv;
278	struct ath_common *common = ath9k_hw_common(priv->ah);
279	struct ieee80211_tx_info *tx_info;
280
281	if (!skb)
282		return;
283
284	if (ep_id == priv->mgmt_ep) {
285		skb_pull(skb, sizeof(struct tx_mgmt_hdr));
286	} else if ((ep_id == priv->data_bk_ep) ||
287		   (ep_id == priv->data_be_ep) ||
288		   (ep_id == priv->data_vi_ep) ||
289		   (ep_id == priv->data_vo_ep)) {
290		skb_pull(skb, sizeof(struct tx_frame_hdr));
291	} else {
292		ath_print(common, ATH_DBG_FATAL,
293			  "Unsupported TX EPID: %d\n", ep_id);
294		dev_kfree_skb_any(skb);
295		return;
296	}
297
298	tx_info = IEEE80211_SKB_CB(skb);
299
300	if (txok)
301		tx_info->flags |= IEEE80211_TX_STAT_ACK;
302
303	skb_queue_tail(&priv->tx_queue, skb);
304	tasklet_schedule(&priv->tx_tasklet);
305}
306
307int ath9k_tx_init(struct ath9k_htc_priv *priv)
308{
309	skb_queue_head_init(&priv->tx_queue);
310	return 0;
311}
312
313void ath9k_tx_cleanup(struct ath9k_htc_priv *priv)
314{
315
316}
317
318bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv, int subtype)
319{
320	struct ath_hw *ah = priv->ah;
321	struct ath_common *common = ath9k_hw_common(ah);
322	struct ath9k_tx_queue_info qi;
323	int qnum;
324
325	memset(&qi, 0, sizeof(qi));
326	ATH9K_HTC_INIT_TXQ(subtype);
327
328	qnum = ath9k_hw_setuptxqueue(priv->ah, ATH9K_TX_QUEUE_DATA, &qi);
329	if (qnum == -1)
330		return false;
331
332	if (qnum >= ARRAY_SIZE(priv->hwq_map)) {
333		ath_print(common, ATH_DBG_FATAL,
334			  "qnum %u out of range, max %u!\n",
335			  qnum, (unsigned int)ARRAY_SIZE(priv->hwq_map));
336		ath9k_hw_releasetxqueue(ah, qnum);
337		return false;
338	}
339
340	priv->hwq_map[subtype] = qnum;
341	return true;
342}
343
344int ath9k_htc_cabq_setup(struct ath9k_htc_priv *priv)
345{
346	struct ath9k_tx_queue_info qi;
347
348	memset(&qi, 0, sizeof(qi));
349	ATH9K_HTC_INIT_TXQ(0);
350
351	return ath9k_hw_setuptxqueue(priv->ah, ATH9K_TX_QUEUE_CAB, &qi);
352}
353
354/******/
355/* RX */
356/******/
357
358/*
359 * Calculate the RX filter to be set in the HW.
360 */
361u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv)
362{
363#define	RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
364
365	struct ath_hw *ah = priv->ah;
366	u32 rfilt;
367
368	rfilt = (ath9k_hw_getrxfilter(ah) & RX_FILTER_PRESERVE)
369		| ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
370		| ATH9K_RX_FILTER_MCAST;
371
372	/* If not a STA, enable processing of Probe Requests */
373	if (ah->opmode != NL80211_IFTYPE_STATION)
374		rfilt |= ATH9K_RX_FILTER_PROBEREQ;
375
376	/*
377	 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
378	 * mode interface or when in monitor mode. AP mode does not need this
379	 * since it receives all in-BSS frames anyway.
380	 */
381	if (((ah->opmode != NL80211_IFTYPE_AP) &&
382	     (priv->rxfilter & FIF_PROMISC_IN_BSS)) ||
383	    (ah->opmode == NL80211_IFTYPE_MONITOR))
384		rfilt |= ATH9K_RX_FILTER_PROM;
385
386	if (priv->rxfilter & FIF_CONTROL)
387		rfilt |= ATH9K_RX_FILTER_CONTROL;
388
389	if ((ah->opmode == NL80211_IFTYPE_STATION) &&
390	    !(priv->rxfilter & FIF_BCN_PRBRESP_PROMISC))
391		rfilt |= ATH9K_RX_FILTER_MYBEACON;
392	else
393		rfilt |= ATH9K_RX_FILTER_BEACON;
394
395	if (conf_is_ht(&priv->hw->conf))
396		rfilt |= ATH9K_RX_FILTER_COMP_BAR;
397
398	return rfilt;
399
400#undef RX_FILTER_PRESERVE
401}
402
403/*
404 * Recv initialization for opmode change.
405 */
406static void ath9k_htc_opmode_init(struct ath9k_htc_priv *priv)
407{
408	struct ath_hw *ah = priv->ah;
409	struct ath_common *common = ath9k_hw_common(ah);
410
411	u32 rfilt, mfilt[2];
412
413	/* configure rx filter */
414	rfilt = ath9k_htc_calcrxfilter(priv);
415	ath9k_hw_setrxfilter(ah, rfilt);
416
417	/* configure bssid mask */
418	if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
419		ath_hw_setbssidmask(common);
420
421	/* configure operational mode */
422	ath9k_hw_setopmode(ah);
423
424	/* calculate and install multicast filter */
425	mfilt[0] = mfilt[1] = ~0;
426	ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
427}
428
429void ath9k_host_rx_init(struct ath9k_htc_priv *priv)
430{
431	ath9k_hw_rxena(priv->ah);
432	ath9k_htc_opmode_init(priv);
433	ath9k_hw_startpcureceive(priv->ah, (priv->op_flags & OP_SCANNING));
434	priv->rx.last_rssi = ATH_RSSI_DUMMY_MARKER;
435}
436
437static void ath9k_process_rate(struct ieee80211_hw *hw,
438			       struct ieee80211_rx_status *rxs,
439			       u8 rx_rate, u8 rs_flags)
440{
441	struct ieee80211_supported_band *sband;
442	enum ieee80211_band band;
443	unsigned int i = 0;
444
445	if (rx_rate & 0x80) {
446		/* HT rate */
447		rxs->flag |= RX_FLAG_HT;
448		if (rs_flags & ATH9K_RX_2040)
449			rxs->flag |= RX_FLAG_40MHZ;
450		if (rs_flags & ATH9K_RX_GI)
451			rxs->flag |= RX_FLAG_SHORT_GI;
452		rxs->rate_idx = rx_rate & 0x7f;
453		return;
454	}
455
456	band = hw->conf.channel->band;
457	sband = hw->wiphy->bands[band];
458
459	for (i = 0; i < sband->n_bitrates; i++) {
460		if (sband->bitrates[i].hw_value == rx_rate) {
461			rxs->rate_idx = i;
462			return;
463		}
464		if (sband->bitrates[i].hw_value_short == rx_rate) {
465			rxs->rate_idx = i;
466			rxs->flag |= RX_FLAG_SHORTPRE;
467			return;
468		}
469	}
470
471}
472
473static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
474			     struct ath9k_htc_rxbuf *rxbuf,
475			     struct ieee80211_rx_status *rx_status)
476
477{
478	struct ieee80211_hdr *hdr;
479	struct ieee80211_hw *hw = priv->hw;
480	struct sk_buff *skb = rxbuf->skb;
481	struct ath_common *common = ath9k_hw_common(priv->ah);
482	struct ath_htc_rx_status *rxstatus;
483	int hdrlen, padpos, padsize;
484	int last_rssi = ATH_RSSI_DUMMY_MARKER;
485	__le16 fc;
486
487	if (skb->len <= HTC_RX_FRAME_HEADER_SIZE) {
488		ath_print(common, ATH_DBG_FATAL,
489			  "Corrupted RX frame, dropping\n");
490		goto rx_next;
491	}
492
493	rxstatus = (struct ath_htc_rx_status *)skb->data;
494
495	if (be16_to_cpu(rxstatus->rs_datalen) -
496	    (skb->len - HTC_RX_FRAME_HEADER_SIZE) != 0) {
497		ath_print(common, ATH_DBG_FATAL,
498			  "Corrupted RX data len, dropping "
499			  "(dlen: %d, skblen: %d)\n",
500			  rxstatus->rs_datalen, skb->len);
501		goto rx_next;
502	}
503
504	/* Get the RX status information */
505	memcpy(&rxbuf->rxstatus, rxstatus, HTC_RX_FRAME_HEADER_SIZE);
506	skb_pull(skb, HTC_RX_FRAME_HEADER_SIZE);
507
508	hdr = (struct ieee80211_hdr *)skb->data;
509	fc = hdr->frame_control;
510	hdrlen = ieee80211_get_hdrlen_from_skb(skb);
511
512	padpos = ath9k_cmn_padpos(fc);
513
514	padsize = padpos & 3;
515	if (padsize && skb->len >= padpos+padsize+FCS_LEN) {
516		memmove(skb->data + padsize, skb->data, padpos);
517		skb_pull(skb, padsize);
518	}
519
520	memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
521
522	if (rxbuf->rxstatus.rs_status != 0) {
523		if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_CRC)
524			rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
525		if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_PHY)
526			goto rx_next;
527
528		if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_DECRYPT) {
529		} else if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_MIC) {
530			if (ieee80211_is_ctl(fc))
531				/*
532				 * Sometimes, we get invalid
533				 * MIC failures on valid control frames.
534				 * Remove these mic errors.
535				 */
536				rxbuf->rxstatus.rs_status &= ~ATH9K_RXERR_MIC;
537			else
538				rx_status->flag |= RX_FLAG_MMIC_ERROR;
539		}
540
541		/*
542		 * Reject error frames with the exception of
543		 * decryption and MIC failures. For monitor mode,
544		 * we also ignore the CRC error.
545		 */
546		if (priv->ah->opmode == NL80211_IFTYPE_MONITOR) {
547			if (rxbuf->rxstatus.rs_status &
548			    ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
549			      ATH9K_RXERR_CRC))
550				goto rx_next;
551		} else {
552			if (rxbuf->rxstatus.rs_status &
553			    ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
554				goto rx_next;
555			}
556		}
557	}
558
559	if (!(rxbuf->rxstatus.rs_status & ATH9K_RXERR_DECRYPT)) {
560		u8 keyix;
561		keyix = rxbuf->rxstatus.rs_keyix;
562		if (keyix != ATH9K_RXKEYIX_INVALID) {
563			rx_status->flag |= RX_FLAG_DECRYPTED;
564		} else if (ieee80211_has_protected(fc) &&
565			   skb->len >= hdrlen + 4) {
566			keyix = skb->data[hdrlen + 3] >> 6;
567			if (test_bit(keyix, common->keymap))
568				rx_status->flag |= RX_FLAG_DECRYPTED;
569		}
570	}
571
572	ath9k_process_rate(hw, rx_status, rxbuf->rxstatus.rs_rate,
573			   rxbuf->rxstatus.rs_flags);
574
575	if (priv->op_flags & OP_ASSOCIATED) {
576		if (rxbuf->rxstatus.rs_rssi != ATH9K_RSSI_BAD &&
577		    !rxbuf->rxstatus.rs_moreaggr)
578			ATH_RSSI_LPF(priv->rx.last_rssi,
579				     rxbuf->rxstatus.rs_rssi);
580
581		last_rssi = priv->rx.last_rssi;
582
583		if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
584			rxbuf->rxstatus.rs_rssi = ATH_EP_RND(last_rssi,
585							     ATH_RSSI_EP_MULTIPLIER);
586
587		if (rxbuf->rxstatus.rs_rssi < 0)
588			rxbuf->rxstatus.rs_rssi = 0;
589
590		if (ieee80211_is_beacon(fc))
591			priv->ah->stats.avgbrssi = rxbuf->rxstatus.rs_rssi;
592	}
593
594	rx_status->mactime = be64_to_cpu(rxbuf->rxstatus.rs_tstamp);
595	rx_status->band = hw->conf.channel->band;
596	rx_status->freq = hw->conf.channel->center_freq;
597	rx_status->signal =  rxbuf->rxstatus.rs_rssi + ATH_DEFAULT_NOISE_FLOOR;
598	rx_status->antenna = rxbuf->rxstatus.rs_antenna;
599	rx_status->flag |= RX_FLAG_TSFT;
600
601	return true;
602
603rx_next:
604	return false;
605}
606
607void ath9k_rx_tasklet(unsigned long data)
608{
609	struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
610	struct ath9k_htc_rxbuf *rxbuf = NULL, *tmp_buf = NULL;
611	struct ieee80211_rx_status rx_status;
612	struct sk_buff *skb;
613	unsigned long flags;
614	struct ieee80211_hdr *hdr;
615
616	do {
617		spin_lock_irqsave(&priv->rx.rxbuflock, flags);
618		list_for_each_entry(tmp_buf, &priv->rx.rxbuf, list) {
619			if (tmp_buf->in_process) {
620				rxbuf = tmp_buf;
621				break;
622			}
623		}
624
625		if (rxbuf == NULL) {
626			spin_unlock_irqrestore(&priv->rx.rxbuflock, flags);
627			break;
628		}
629
630		if (!rxbuf->skb)
631			goto requeue;
632
633		if (!ath9k_rx_prepare(priv, rxbuf, &rx_status)) {
634			dev_kfree_skb_any(rxbuf->skb);
635			goto requeue;
636		}
637
638		memcpy(IEEE80211_SKB_RXCB(rxbuf->skb), &rx_status,
639		       sizeof(struct ieee80211_rx_status));
640		skb = rxbuf->skb;
641		hdr = (struct ieee80211_hdr *) skb->data;
642
643		if (ieee80211_is_beacon(hdr->frame_control) && priv->ps_enabled)
644				ieee80211_queue_work(priv->hw, &priv->ps_work);
645
646		spin_unlock_irqrestore(&priv->rx.rxbuflock, flags);
647
648		ieee80211_rx(priv->hw, skb);
649
650		spin_lock_irqsave(&priv->rx.rxbuflock, flags);
651requeue:
652		rxbuf->in_process = false;
653		rxbuf->skb = NULL;
654		list_move_tail(&rxbuf->list, &priv->rx.rxbuf);
655		rxbuf = NULL;
656		spin_unlock_irqrestore(&priv->rx.rxbuflock, flags);
657	} while (1);
658
659}
660
661void ath9k_htc_rxep(void *drv_priv, struct sk_buff *skb,
662		    enum htc_endpoint_id ep_id)
663{
664	struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)drv_priv;
665	struct ath_hw *ah = priv->ah;
666	struct ath_common *common = ath9k_hw_common(ah);
667	struct ath9k_htc_rxbuf *rxbuf = NULL, *tmp_buf = NULL;
668
669	spin_lock(&priv->rx.rxbuflock);
670	list_for_each_entry(tmp_buf, &priv->rx.rxbuf, list) {
671		if (!tmp_buf->in_process) {
672			rxbuf = tmp_buf;
673			break;
674		}
675	}
676	spin_unlock(&priv->rx.rxbuflock);
677
678	if (rxbuf == NULL) {
679		ath_print(common, ATH_DBG_ANY,
680			  "No free RX buffer\n");
681		goto err;
682	}
683
684	spin_lock(&priv->rx.rxbuflock);
685	rxbuf->skb = skb;
686	rxbuf->in_process = true;
687	spin_unlock(&priv->rx.rxbuflock);
688
689	tasklet_schedule(&priv->rx_tasklet);
690	return;
691err:
692	dev_kfree_skb_any(skb);
693}
694
695
696void ath9k_rx_cleanup(struct ath9k_htc_priv *priv)
697{
698	struct ath9k_htc_rxbuf *rxbuf, *tbuf;
699
700	list_for_each_entry_safe(rxbuf, tbuf, &priv->rx.rxbuf, list) {
701		list_del(&rxbuf->list);
702		if (rxbuf->skb)
703			dev_kfree_skb_any(rxbuf->skb);
704		kfree(rxbuf);
705	}
706}
707
708int ath9k_rx_init(struct ath9k_htc_priv *priv)
709{
710	struct ath_hw *ah = priv->ah;
711	struct ath_common *common = ath9k_hw_common(ah);
712	struct ath9k_htc_rxbuf *rxbuf;
713	int i = 0;
714
715	INIT_LIST_HEAD(&priv->rx.rxbuf);
716	spin_lock_init(&priv->rx.rxbuflock);
717
718	for (i = 0; i < ATH9K_HTC_RXBUF; i++) {
719		rxbuf = kzalloc(sizeof(struct ath9k_htc_rxbuf), GFP_KERNEL);
720		if (rxbuf == NULL) {
721			ath_print(common, ATH_DBG_FATAL,
722				  "Unable to allocate RX buffers\n");
723			goto err;
724		}
725		list_add_tail(&rxbuf->list, &priv->rx.rxbuf);
726	}
727
728	return 0;
729
730err:
731	ath9k_rx_cleanup(priv);
732	return -ENOMEM;
733}
734