• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/net/wireless/ath/ar9170/
1/*
2 * Atheros AR9170 driver
3 *
4 * mac80211 interaction code
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, Christian Lamparter <chunkeey@web.de>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING.  If not, see
21 * http://www.gnu.org/licenses/.
22 *
23 * This file incorporates work covered by the following copyright and
24 * permission notice:
25 *    Copyright (c) 2007-2008 Atheros Communications, Inc.
26 *
27 *    Permission to use, copy, modify, and/or distribute this software for any
28 *    purpose with or without fee is hereby granted, provided that the above
29 *    copyright notice and this permission notice appear in all copies.
30 *
31 *    THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32 *    WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33 *    MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34 *    ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35 *    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36 *    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37 *    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
38 */
39
40#include <linux/init.h>
41#include <linux/slab.h>
42#include <linux/module.h>
43#include <linux/etherdevice.h>
44#include <net/mac80211.h>
45#include "ar9170.h"
46#include "hw.h"
47#include "cmd.h"
48
49static int modparam_nohwcrypt;
50module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
51MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
52
53#define RATE(_bitrate, _hw_rate, _txpidx, _flags) {	\
54	.bitrate	= (_bitrate),			\
55	.flags		= (_flags),			\
56	.hw_value	= (_hw_rate) | (_txpidx) << 4,	\
57}
58
59static struct ieee80211_rate __ar9170_ratetable[] = {
60	RATE(10, 0, 0, 0),
61	RATE(20, 1, 1, IEEE80211_RATE_SHORT_PREAMBLE),
62	RATE(55, 2, 2, IEEE80211_RATE_SHORT_PREAMBLE),
63	RATE(110, 3, 3, IEEE80211_RATE_SHORT_PREAMBLE),
64	RATE(60, 0xb, 0, 0),
65	RATE(90, 0xf, 0, 0),
66	RATE(120, 0xa, 0, 0),
67	RATE(180, 0xe, 0, 0),
68	RATE(240, 0x9, 0, 0),
69	RATE(360, 0xd, 1, 0),
70	RATE(480, 0x8, 2, 0),
71	RATE(540, 0xc, 3, 0),
72};
73#undef RATE
74
75#define ar9170_g_ratetable	(__ar9170_ratetable + 0)
76#define ar9170_g_ratetable_size	12
77#define ar9170_a_ratetable	(__ar9170_ratetable + 4)
78#define ar9170_a_ratetable_size	8
79
80/*
81 * NB: The hw_value is used as an index into the ar9170_phy_freq_params
82 *     array in phy.c so that we don't have to do frequency lookups!
83 */
84#define CHAN(_freq, _idx) {		\
85	.center_freq	= (_freq),	\
86	.hw_value	= (_idx),	\
87	.max_power	= 18,	\
88}
89
90static struct ieee80211_channel ar9170_2ghz_chantable[] = {
91	CHAN(2412,  0),
92	CHAN(2417,  1),
93	CHAN(2422,  2),
94	CHAN(2427,  3),
95	CHAN(2432,  4),
96	CHAN(2437,  5),
97	CHAN(2442,  6),
98	CHAN(2447,  7),
99	CHAN(2452,  8),
100	CHAN(2457,  9),
101	CHAN(2462, 10),
102	CHAN(2467, 11),
103	CHAN(2472, 12),
104	CHAN(2484, 13),
105};
106
107static struct ieee80211_channel ar9170_5ghz_chantable[] = {
108	CHAN(4920, 14),
109	CHAN(4940, 15),
110	CHAN(4960, 16),
111	CHAN(4980, 17),
112	CHAN(5040, 18),
113	CHAN(5060, 19),
114	CHAN(5080, 20),
115	CHAN(5180, 21),
116	CHAN(5200, 22),
117	CHAN(5220, 23),
118	CHAN(5240, 24),
119	CHAN(5260, 25),
120	CHAN(5280, 26),
121	CHAN(5300, 27),
122	CHAN(5320, 28),
123	CHAN(5500, 29),
124	CHAN(5520, 30),
125	CHAN(5540, 31),
126	CHAN(5560, 32),
127	CHAN(5580, 33),
128	CHAN(5600, 34),
129	CHAN(5620, 35),
130	CHAN(5640, 36),
131	CHAN(5660, 37),
132	CHAN(5680, 38),
133	CHAN(5700, 39),
134	CHAN(5745, 40),
135	CHAN(5765, 41),
136	CHAN(5785, 42),
137	CHAN(5805, 43),
138	CHAN(5825, 44),
139	CHAN(5170, 45),
140	CHAN(5190, 46),
141	CHAN(5210, 47),
142	CHAN(5230, 48),
143};
144#undef CHAN
145
146#define AR9170_HT_CAP							\
147{									\
148	.ht_supported	= true,						\
149	.cap		= IEEE80211_HT_CAP_MAX_AMSDU |			\
150			  IEEE80211_HT_CAP_SUP_WIDTH_20_40 |		\
151			  IEEE80211_HT_CAP_SGI_40 |			\
152			  IEEE80211_HT_CAP_GRN_FLD |			\
153			  IEEE80211_HT_CAP_DSSSCCK40 |			\
154			  IEEE80211_HT_CAP_SM_PS,			\
155	.ampdu_factor	= 3,						\
156	.ampdu_density	= 6,						\
157	.mcs		= {						\
158		.rx_mask = { 0xff, 0xff, 0, 0, 0x1, 0, 0, 0, 0, 0, },	\
159		.rx_highest = cpu_to_le16(300),				\
160		.tx_params = IEEE80211_HT_MCS_TX_DEFINED,		\
161	},								\
162}
163
164static struct ieee80211_supported_band ar9170_band_2GHz = {
165	.channels	= ar9170_2ghz_chantable,
166	.n_channels	= ARRAY_SIZE(ar9170_2ghz_chantable),
167	.bitrates	= ar9170_g_ratetable,
168	.n_bitrates	= ar9170_g_ratetable_size,
169	.ht_cap		= AR9170_HT_CAP,
170};
171
172static struct ieee80211_supported_band ar9170_band_5GHz = {
173	.channels	= ar9170_5ghz_chantable,
174	.n_channels	= ARRAY_SIZE(ar9170_5ghz_chantable),
175	.bitrates	= ar9170_a_ratetable,
176	.n_bitrates	= ar9170_a_ratetable_size,
177	.ht_cap		= AR9170_HT_CAP,
178};
179
180static void ar9170_tx(struct ar9170 *ar);
181
182static inline u16 ar9170_get_seq_h(struct ieee80211_hdr *hdr)
183{
184	return le16_to_cpu(hdr->seq_ctrl) >> 4;
185}
186
187static inline u16 ar9170_get_seq(struct sk_buff *skb)
188{
189	struct ar9170_tx_control *txc = (void *) skb->data;
190	return ar9170_get_seq_h((void *) txc->frame_data);
191}
192
193#ifdef AR9170_QUEUE_DEBUG
194static void ar9170_print_txheader(struct ar9170 *ar, struct sk_buff *skb)
195{
196	struct ar9170_tx_control *txc = (void *) skb->data;
197	struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
198	struct ar9170_tx_info *arinfo = (void *) txinfo->rate_driver_data;
199	struct ieee80211_hdr *hdr = (void *) txc->frame_data;
200
201	wiphy_debug(ar->hw->wiphy,
202		    "=> FRAME [skb:%p, q:%d, DA:[%pM] s:%d "
203		    "mac_ctrl:%04x, phy_ctrl:%08x, timeout:[%d ms]]\n",
204		    skb, skb_get_queue_mapping(skb),
205		    ieee80211_get_DA(hdr), ar9170_get_seq_h(hdr),
206		    le16_to_cpu(txc->mac_control), le32_to_cpu(txc->phy_control),
207		    jiffies_to_msecs(arinfo->timeout - jiffies));
208}
209
210static void __ar9170_dump_txqueue(struct ar9170 *ar,
211				struct sk_buff_head *queue)
212{
213	struct sk_buff *skb;
214	int i = 0;
215
216	printk(KERN_DEBUG "---[ cut here ]---\n");
217	wiphy_debug(ar->hw->wiphy, "%d entries in queue.\n",
218		    skb_queue_len(queue));
219
220	skb_queue_walk(queue, skb) {
221		printk(KERN_DEBUG "index:%d =>\n", i++);
222		ar9170_print_txheader(ar, skb);
223	}
224	if (i != skb_queue_len(queue))
225		printk(KERN_DEBUG "WARNING: queue frame counter "
226		       "mismatch %d != %d\n", skb_queue_len(queue), i);
227	printk(KERN_DEBUG "---[ end ]---\n");
228}
229#endif /* AR9170_QUEUE_DEBUG */
230
231#ifdef AR9170_QUEUE_DEBUG
232static void ar9170_dump_txqueue(struct ar9170 *ar,
233				struct sk_buff_head *queue)
234{
235	unsigned long flags;
236
237	spin_lock_irqsave(&queue->lock, flags);
238	__ar9170_dump_txqueue(ar, queue);
239	spin_unlock_irqrestore(&queue->lock, flags);
240}
241#endif /* AR9170_QUEUE_DEBUG */
242
243#ifdef AR9170_QUEUE_STOP_DEBUG
244static void __ar9170_dump_txstats(struct ar9170 *ar)
245{
246	int i;
247
248	wiphy_debug(ar->hw->wiphy, "QoS queue stats\n");
249
250	for (i = 0; i < __AR9170_NUM_TXQ; i++)
251		wiphy_debug(ar->hw->wiphy,
252			    "queue:%d limit:%d len:%d waitack:%d stopped:%d\n",
253			    i, ar->tx_stats[i].limit, ar->tx_stats[i].len,
254			    skb_queue_len(&ar->tx_status[i]),
255			    ieee80211_queue_stopped(ar->hw, i));
256}
257#endif /* AR9170_QUEUE_STOP_DEBUG */
258
259/* caller must guarantee exclusive access for _bin_ queue. */
260static void ar9170_recycle_expired(struct ar9170 *ar,
261				   struct sk_buff_head *queue,
262				   struct sk_buff_head *bin)
263{
264	struct sk_buff *skb, *old = NULL;
265	unsigned long flags;
266
267	spin_lock_irqsave(&queue->lock, flags);
268	while ((skb = skb_peek(queue))) {
269		struct ieee80211_tx_info *txinfo;
270		struct ar9170_tx_info *arinfo;
271
272		txinfo = IEEE80211_SKB_CB(skb);
273		arinfo = (void *) txinfo->rate_driver_data;
274
275		if (time_is_before_jiffies(arinfo->timeout)) {
276#ifdef AR9170_QUEUE_DEBUG
277			wiphy_debug(ar->hw->wiphy,
278				    "[%ld > %ld] frame expired => recycle\n",
279				    jiffies, arinfo->timeout);
280			ar9170_print_txheader(ar, skb);
281#endif /* AR9170_QUEUE_DEBUG */
282			__skb_unlink(skb, queue);
283			__skb_queue_tail(bin, skb);
284		} else {
285			break;
286		}
287
288		if (unlikely(old == skb)) {
289			/* bail out - queue is shot. */
290
291			WARN_ON(1);
292			break;
293		}
294		old = skb;
295	}
296	spin_unlock_irqrestore(&queue->lock, flags);
297}
298
299static void ar9170_tx_status(struct ar9170 *ar, struct sk_buff *skb,
300				    u16 tx_status)
301{
302	struct ieee80211_tx_info *txinfo;
303	unsigned int retries = 0;
304
305	txinfo = IEEE80211_SKB_CB(skb);
306	ieee80211_tx_info_clear_status(txinfo);
307
308	switch (tx_status) {
309	case AR9170_TX_STATUS_RETRY:
310		retries = 2;
311	case AR9170_TX_STATUS_COMPLETE:
312		txinfo->flags |= IEEE80211_TX_STAT_ACK;
313		break;
314
315	case AR9170_TX_STATUS_FAILED:
316		retries = ar->hw->conf.long_frame_max_tx_count;
317		break;
318
319	default:
320		wiphy_err(ar->hw->wiphy,
321			  "invalid tx_status response (%x)\n", tx_status);
322		break;
323	}
324
325	txinfo->status.rates[0].count = retries + 1;
326	skb_pull(skb, sizeof(struct ar9170_tx_control));
327	ieee80211_tx_status_irqsafe(ar->hw, skb);
328}
329
330void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
331{
332	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
333	struct ar9170_tx_info *arinfo = (void *) info->rate_driver_data;
334	unsigned int queue = skb_get_queue_mapping(skb);
335	unsigned long flags;
336
337	spin_lock_irqsave(&ar->tx_stats_lock, flags);
338	ar->tx_stats[queue].len--;
339
340	if (ar->tx_stats[queue].len < AR9170_NUM_TX_LIMIT_SOFT) {
341#ifdef AR9170_QUEUE_STOP_DEBUG
342		wiphy_debug(ar->hw->wiphy, "wake queue %d\n", queue);
343		__ar9170_dump_txstats(ar);
344#endif /* AR9170_QUEUE_STOP_DEBUG */
345		ieee80211_wake_queue(ar->hw, queue);
346	}
347	spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
348
349	if (info->flags & IEEE80211_TX_CTL_NO_ACK) {
350		ar9170_tx_status(ar, skb, AR9170_TX_STATUS_FAILED);
351	} else {
352		arinfo->timeout = jiffies +
353			  msecs_to_jiffies(AR9170_TX_TIMEOUT);
354
355		skb_queue_tail(&ar->tx_status[queue], skb);
356	}
357
358	if (!ar->tx_stats[queue].len &&
359	    !skb_queue_empty(&ar->tx_pending[queue])) {
360		ar9170_tx(ar);
361	}
362}
363
364static struct sk_buff *ar9170_get_queued_skb(struct ar9170 *ar,
365					     const u8 *mac,
366					     struct sk_buff_head *queue,
367					     const u32 rate)
368{
369	unsigned long flags;
370	struct sk_buff *skb;
371
372	/*
373	 * Unfortunately, the firmware does not tell to which (queued) frame
374	 * this transmission status report belongs to.
375	 *
376	 * So we have to make risky guesses - with the scarce information
377	 * the firmware provided (-> destination MAC, and phy_control) -
378	 * and hope that we picked the right one...
379	 */
380
381	spin_lock_irqsave(&queue->lock, flags);
382	skb_queue_walk(queue, skb) {
383		struct ar9170_tx_control *txc = (void *) skb->data;
384		struct ieee80211_hdr *hdr = (void *) txc->frame_data;
385		u32 r;
386
387		if (mac && compare_ether_addr(ieee80211_get_DA(hdr), mac)) {
388#ifdef AR9170_QUEUE_DEBUG
389			wiphy_debug(ar->hw->wiphy,
390				    "skip frame => DA %pM != %pM\n",
391				    mac, ieee80211_get_DA(hdr));
392			ar9170_print_txheader(ar, skb);
393#endif /* AR9170_QUEUE_DEBUG */
394			continue;
395		}
396
397		r = (le32_to_cpu(txc->phy_control) & AR9170_TX_PHY_MCS_MASK) >>
398		    AR9170_TX_PHY_MCS_SHIFT;
399
400		if ((rate != AR9170_TX_INVALID_RATE) && (r != rate)) {
401#ifdef AR9170_QUEUE_DEBUG
402			wiphy_debug(ar->hw->wiphy,
403				    "skip frame => rate %d != %d\n", rate, r);
404			ar9170_print_txheader(ar, skb);
405#endif /* AR9170_QUEUE_DEBUG */
406			continue;
407		}
408
409		__skb_unlink(skb, queue);
410		spin_unlock_irqrestore(&queue->lock, flags);
411		return skb;
412	}
413
414#ifdef AR9170_QUEUE_DEBUG
415	wiphy_err(ar->hw->wiphy,
416		  "ESS:[%pM] does not have any outstanding frames in queue.\n",
417		  mac);
418	__ar9170_dump_txqueue(ar, queue);
419#endif /* AR9170_QUEUE_DEBUG */
420	spin_unlock_irqrestore(&queue->lock, flags);
421
422	return NULL;
423}
424
425/*
426 * This worker tries to keeps an maintain tx_status queues.
427 * So we can guarantee that incoming tx_status reports are
428 * actually for a pending frame.
429 */
430
431static void ar9170_tx_janitor(struct work_struct *work)
432{
433	struct ar9170 *ar = container_of(work, struct ar9170,
434					 tx_janitor.work);
435	struct sk_buff_head waste;
436	unsigned int i;
437	bool resched = false;
438
439	if (unlikely(!IS_STARTED(ar)))
440		return ;
441
442	skb_queue_head_init(&waste);
443
444	for (i = 0; i < __AR9170_NUM_TXQ; i++) {
445#ifdef AR9170_QUEUE_DEBUG
446		wiphy_debug(ar->hw->wiphy, "garbage collector scans queue:%d\n",
447			    i);
448		ar9170_dump_txqueue(ar, &ar->tx_pending[i]);
449		ar9170_dump_txqueue(ar, &ar->tx_status[i]);
450#endif /* AR9170_QUEUE_DEBUG */
451
452		ar9170_recycle_expired(ar, &ar->tx_status[i], &waste);
453		ar9170_recycle_expired(ar, &ar->tx_pending[i], &waste);
454		skb_queue_purge(&waste);
455
456		if (!skb_queue_empty(&ar->tx_status[i]) ||
457		    !skb_queue_empty(&ar->tx_pending[i]))
458			resched = true;
459	}
460
461	if (!resched)
462		return;
463
464	ieee80211_queue_delayed_work(ar->hw,
465				     &ar->tx_janitor,
466				     msecs_to_jiffies(AR9170_JANITOR_DELAY));
467}
468
469void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
470{
471	struct ar9170_cmd_response *cmd = (void *) buf;
472
473	if ((cmd->type & 0xc0) != 0xc0) {
474		ar->callback_cmd(ar, len, buf);
475		return;
476	}
477
478	/* hardware event handlers */
479	switch (cmd->type) {
480	case 0xc1: {
481		/*
482		 * TX status notification:
483		 * bytes: 0c c1 XX YY M1 M2 M3 M4 M5 M6 R4 R3 R2 R1 S2 S1
484		 *
485		 * XX always 81
486		 * YY always 00
487		 * M1-M6 is the MAC address
488		 * R1-R4 is the transmit rate
489		 * S1-S2 is the transmit status
490		 */
491
492		struct sk_buff *skb;
493		u32 phy = le32_to_cpu(cmd->tx_status.rate);
494		u32 q = (phy & AR9170_TX_PHY_QOS_MASK) >>
495			AR9170_TX_PHY_QOS_SHIFT;
496#ifdef AR9170_QUEUE_DEBUG
497		wiphy_debug(ar->hw->wiphy,
498			    "recv tx_status for %pm, p:%08x, q:%d\n",
499			    cmd->tx_status.dst, phy, q);
500#endif /* AR9170_QUEUE_DEBUG */
501
502		skb = ar9170_get_queued_skb(ar, cmd->tx_status.dst,
503					    &ar->tx_status[q],
504					    AR9170_TX_INVALID_RATE);
505		if (unlikely(!skb))
506			return ;
507
508		ar9170_tx_status(ar, skb, le16_to_cpu(cmd->tx_status.status));
509		break;
510		}
511
512	case 0xc0:
513		/*
514		 * pre-TBTT event
515		 */
516		if (ar->vif && ar->vif->type == NL80211_IFTYPE_AP)
517			ieee80211_queue_work(ar->hw, &ar->beacon_work);
518		break;
519
520	case 0xc2:
521		/*
522		 * (IBSS) beacon send notification
523		 * bytes: 04 c2 XX YY B4 B3 B2 B1
524		 *
525		 * XX always 80
526		 * YY always 00
527		 * B1-B4 "should" be the number of send out beacons.
528		 */
529		break;
530
531	case 0xc3:
532		/* End of Atim Window */
533		break;
534
535	case 0xc4:
536		/* BlockACK bitmap */
537		break;
538
539	case 0xc5:
540		/* BlockACK events */
541		break;
542
543	case 0xc6:
544		/* Watchdog Interrupt */
545		break;
546
547	case 0xc9:
548		/* retransmission issue / SIFS/EIFS collision ?! */
549		break;
550
551	/* firmware debug */
552	case 0xca:
553		printk(KERN_DEBUG "ar9170 FW: %.*s\n", len - 4,
554				(char *)buf + 4);
555		break;
556	case 0xcb:
557		len -= 4;
558
559		switch (len) {
560		case 1:
561			printk(KERN_DEBUG "ar9170 FW: u8: %#.2x\n",
562				*((char *)buf + 4));
563			break;
564		case 2:
565			printk(KERN_DEBUG "ar9170 FW: u8: %#.4x\n",
566				le16_to_cpup((__le16 *)((char *)buf + 4)));
567			break;
568		case 4:
569			printk(KERN_DEBUG "ar9170 FW: u8: %#.8x\n",
570				le32_to_cpup((__le32 *)((char *)buf + 4)));
571			break;
572		case 8:
573			printk(KERN_DEBUG "ar9170 FW: u8: %#.16lx\n",
574				(unsigned long)le64_to_cpup(
575						(__le64 *)((char *)buf + 4)));
576			break;
577		}
578		break;
579	case 0xcc:
580		print_hex_dump_bytes("ar9170 FW:", DUMP_PREFIX_NONE,
581				     (char *)buf + 4, len - 4);
582		break;
583
584	default:
585		pr_info("received unhandled event %x\n", cmd->type);
586		print_hex_dump_bytes("dump:", DUMP_PREFIX_NONE, buf, len);
587		break;
588	}
589}
590
591static void ar9170_rx_reset_rx_mpdu(struct ar9170 *ar)
592{
593	memset(&ar->rx_mpdu.plcp, 0, sizeof(struct ar9170_rx_head));
594	ar->rx_mpdu.has_plcp = false;
595}
596
597int ar9170_nag_limiter(struct ar9170 *ar)
598{
599	bool print_message;
600
601	/*
602	 * we expect all sorts of errors in promiscuous mode.
603	 * don't bother with it, it's OK!
604	 */
605	if (ar->sniffer_enabled)
606		return false;
607
608	/*
609	 * only go for frequent errors! The hardware tends to
610	 * do some stupid thing once in a while under load, in
611	 * noisy environments or just for fun!
612	 */
613	if (time_before(jiffies, ar->bad_hw_nagger) && net_ratelimit())
614		print_message = true;
615	else
616		print_message = false;
617
618	/* reset threshold for "once in a while" */
619	ar->bad_hw_nagger = jiffies + HZ / 4;
620	return print_message;
621}
622
623static int ar9170_rx_mac_status(struct ar9170 *ar,
624				struct ar9170_rx_head *head,
625				struct ar9170_rx_macstatus *mac,
626				struct ieee80211_rx_status *status)
627{
628	u8 error, decrypt;
629
630	BUILD_BUG_ON(sizeof(struct ar9170_rx_head) != 12);
631	BUILD_BUG_ON(sizeof(struct ar9170_rx_macstatus) != 4);
632
633	error = mac->error;
634	if (error & AR9170_RX_ERROR_MMIC) {
635		status->flag |= RX_FLAG_MMIC_ERROR;
636		error &= ~AR9170_RX_ERROR_MMIC;
637	}
638
639	if (error & AR9170_RX_ERROR_PLCP) {
640		status->flag |= RX_FLAG_FAILED_PLCP_CRC;
641		error &= ~AR9170_RX_ERROR_PLCP;
642
643		if (!(ar->filter_state & FIF_PLCPFAIL))
644			return -EINVAL;
645	}
646
647	if (error & AR9170_RX_ERROR_FCS) {
648		status->flag |= RX_FLAG_FAILED_FCS_CRC;
649		error &= ~AR9170_RX_ERROR_FCS;
650
651		if (!(ar->filter_state & FIF_FCSFAIL))
652			return -EINVAL;
653	}
654
655	decrypt = ar9170_get_decrypt_type(mac);
656	if (!(decrypt & AR9170_RX_ENC_SOFTWARE) &&
657	    decrypt != AR9170_ENC_ALG_NONE)
658		status->flag |= RX_FLAG_DECRYPTED;
659
660	/* ignore wrong RA errors */
661	error &= ~AR9170_RX_ERROR_WRONG_RA;
662
663	if (error & AR9170_RX_ERROR_DECRYPT) {
664		error &= ~AR9170_RX_ERROR_DECRYPT;
665		/*
666		 * Rx decryption is done in place,
667		 * the original data is lost anyway.
668		 */
669
670		return -EINVAL;
671	}
672
673	/* drop any other error frames */
674	if (unlikely(error)) {
675		/* TODO: update netdevice's RX dropped/errors statistics */
676
677		if (ar9170_nag_limiter(ar))
678			wiphy_debug(ar->hw->wiphy,
679				    "received frame with suspicious error code (%#x).\n",
680				    error);
681
682		return -EINVAL;
683	}
684
685	status->band = ar->channel->band;
686	status->freq = ar->channel->center_freq;
687
688	switch (mac->status & AR9170_RX_STATUS_MODULATION_MASK) {
689	case AR9170_RX_STATUS_MODULATION_CCK:
690		if (mac->status & AR9170_RX_STATUS_SHORT_PREAMBLE)
691			status->flag |= RX_FLAG_SHORTPRE;
692		switch (head->plcp[0]) {
693		case 0x0a:
694			status->rate_idx = 0;
695			break;
696		case 0x14:
697			status->rate_idx = 1;
698			break;
699		case 0x37:
700			status->rate_idx = 2;
701			break;
702		case 0x6e:
703			status->rate_idx = 3;
704			break;
705		default:
706			if (ar9170_nag_limiter(ar))
707				wiphy_err(ar->hw->wiphy,
708					  "invalid plcp cck rate (%x).\n",
709					  head->plcp[0]);
710			return -EINVAL;
711		}
712		break;
713
714	case AR9170_RX_STATUS_MODULATION_DUPOFDM:
715	case AR9170_RX_STATUS_MODULATION_OFDM:
716		switch (head->plcp[0] & 0xf) {
717		case 0xb:
718			status->rate_idx = 0;
719			break;
720		case 0xf:
721			status->rate_idx = 1;
722			break;
723		case 0xa:
724			status->rate_idx = 2;
725			break;
726		case 0xe:
727			status->rate_idx = 3;
728			break;
729		case 0x9:
730			status->rate_idx = 4;
731			break;
732		case 0xd:
733			status->rate_idx = 5;
734			break;
735		case 0x8:
736			status->rate_idx = 6;
737			break;
738		case 0xc:
739			status->rate_idx = 7;
740			break;
741		default:
742			if (ar9170_nag_limiter(ar))
743				wiphy_err(ar->hw->wiphy,
744					  "invalid plcp ofdm rate (%x).\n",
745					  head->plcp[0]);
746			return -EINVAL;
747		}
748		if (status->band == IEEE80211_BAND_2GHZ)
749			status->rate_idx += 4;
750		break;
751
752	case AR9170_RX_STATUS_MODULATION_HT:
753		if (head->plcp[3] & 0x80)
754			status->flag |= RX_FLAG_40MHZ;
755		if (head->plcp[6] & 0x80)
756			status->flag |= RX_FLAG_SHORT_GI;
757
758		status->rate_idx = clamp(0, 75, head->plcp[6] & 0x7f);
759		status->flag |= RX_FLAG_HT;
760		break;
761
762	default:
763		if (ar9170_nag_limiter(ar))
764			wiphy_err(ar->hw->wiphy, "invalid modulation\n");
765		return -EINVAL;
766	}
767
768	return 0;
769}
770
771static void ar9170_rx_phy_status(struct ar9170 *ar,
772				 struct ar9170_rx_phystatus *phy,
773				 struct ieee80211_rx_status *status)
774{
775	int i;
776
777	BUILD_BUG_ON(sizeof(struct ar9170_rx_phystatus) != 20);
778
779	for (i = 0; i < 3; i++)
780		if (phy->rssi[i] != 0x80)
781			status->antenna |= BIT(i);
782
783	/* post-process RSSI */
784	for (i = 0; i < 7; i++)
785		if (phy->rssi[i] & 0x80)
786			phy->rssi[i] = ((phy->rssi[i] & 0x7f) + 1) & 0x7f;
787
788	/* TODO: we could do something with phy_errors */
789	status->signal = ar->noise[0] + phy->rssi_combined;
790}
791
792static struct sk_buff *ar9170_rx_copy_data(u8 *buf, int len)
793{
794	struct sk_buff *skb;
795	int reserved = 0;
796	struct ieee80211_hdr *hdr = (void *) buf;
797
798	if (ieee80211_is_data_qos(hdr->frame_control)) {
799		u8 *qc = ieee80211_get_qos_ctl(hdr);
800		reserved += NET_IP_ALIGN;
801
802		if (*qc & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)
803			reserved += NET_IP_ALIGN;
804	}
805
806	if (ieee80211_has_a4(hdr->frame_control))
807		reserved += NET_IP_ALIGN;
808
809	reserved = 32 + (reserved & NET_IP_ALIGN);
810
811	skb = dev_alloc_skb(len + reserved);
812	if (likely(skb)) {
813		skb_reserve(skb, reserved);
814		memcpy(skb_put(skb, len), buf, len);
815	}
816
817	return skb;
818}
819
820/*
821 * If the frame alignment is right (or the kernel has
822 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS), and there
823 * is only a single MPDU in the USB frame, then we could
824 * submit to mac80211 the SKB directly. However, since
825 * there may be multiple packets in one SKB in stream
826 * mode, and we need to observe the proper ordering,
827 * this is non-trivial.
828 */
829
830static void ar9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
831{
832	struct ar9170_rx_head *head;
833	struct ar9170_rx_macstatus *mac;
834	struct ar9170_rx_phystatus *phy = NULL;
835	struct ieee80211_rx_status status;
836	struct sk_buff *skb;
837	int mpdu_len;
838
839	if (unlikely(!IS_STARTED(ar) || len < (sizeof(*mac))))
840		return ;
841
842	/* Received MPDU */
843	mpdu_len = len - sizeof(*mac);
844
845	mac = (void *)(buf + mpdu_len);
846	if (unlikely(mac->error & AR9170_RX_ERROR_FATAL)) {
847		/* this frame is too damaged and can't be used - drop it */
848
849		return ;
850	}
851
852	switch (mac->status & AR9170_RX_STATUS_MPDU_MASK) {
853	case AR9170_RX_STATUS_MPDU_FIRST:
854		/* first mpdu packet has the plcp header */
855		if (likely(mpdu_len >= sizeof(struct ar9170_rx_head))) {
856			head = (void *) buf;
857			memcpy(&ar->rx_mpdu.plcp, (void *) buf,
858			       sizeof(struct ar9170_rx_head));
859
860			mpdu_len -= sizeof(struct ar9170_rx_head);
861			buf += sizeof(struct ar9170_rx_head);
862			ar->rx_mpdu.has_plcp = true;
863		} else {
864			if (ar9170_nag_limiter(ar))
865				wiphy_err(ar->hw->wiphy,
866					  "plcp info is clipped.\n");
867			return ;
868		}
869		break;
870
871	case AR9170_RX_STATUS_MPDU_LAST:
872		/* last mpdu has a extra tail with phy status information */
873
874		if (likely(mpdu_len >= sizeof(struct ar9170_rx_phystatus))) {
875			mpdu_len -= sizeof(struct ar9170_rx_phystatus);
876			phy = (void *)(buf + mpdu_len);
877		} else {
878			if (ar9170_nag_limiter(ar))
879				wiphy_err(ar->hw->wiphy,
880					  "frame tail is clipped.\n");
881			return ;
882		}
883
884	case AR9170_RX_STATUS_MPDU_MIDDLE:
885		/* middle mpdus are just data */
886		if (unlikely(!ar->rx_mpdu.has_plcp)) {
887			if (!ar9170_nag_limiter(ar))
888				return ;
889
890			wiphy_err(ar->hw->wiphy,
891				  "rx stream did not start with a first_mpdu frame tag.\n");
892
893			return ;
894		}
895
896		head = &ar->rx_mpdu.plcp;
897		break;
898
899	case AR9170_RX_STATUS_MPDU_SINGLE:
900		/* single mpdu - has plcp (head) and phy status (tail) */
901		head = (void *) buf;
902
903		mpdu_len -= sizeof(struct ar9170_rx_head);
904		mpdu_len -= sizeof(struct ar9170_rx_phystatus);
905
906		buf += sizeof(struct ar9170_rx_head);
907		phy = (void *)(buf + mpdu_len);
908		break;
909
910	default:
911		BUG_ON(1);
912		break;
913	}
914
915	if (unlikely(mpdu_len < FCS_LEN))
916		return ;
917
918	memset(&status, 0, sizeof(status));
919	if (unlikely(ar9170_rx_mac_status(ar, head, mac, &status)))
920		return ;
921
922	if (phy)
923		ar9170_rx_phy_status(ar, phy, &status);
924
925	skb = ar9170_rx_copy_data(buf, mpdu_len);
926	if (likely(skb)) {
927		memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
928		ieee80211_rx_irqsafe(ar->hw, skb);
929	}
930}
931
932void ar9170_rx(struct ar9170 *ar, struct sk_buff *skb)
933{
934	unsigned int i, tlen, resplen, wlen = 0, clen = 0;
935	u8 *tbuf, *respbuf;
936
937	tbuf = skb->data;
938	tlen = skb->len;
939
940	while (tlen >= 4) {
941		clen = tbuf[1] << 8 | tbuf[0];
942		wlen = ALIGN(clen, 4);
943
944		/* check if this is stream has a valid tag.*/
945		if (tbuf[2] != 0 || tbuf[3] != 0x4e) {
946			/*
947			 * TODO: handle the highly unlikely event that the
948			 * corrupted stream has the TAG at the right position.
949			 */
950
951			/* check if the frame can be repaired. */
952			if (!ar->rx_failover_missing) {
953				/* this is no "short read". */
954				if (ar9170_nag_limiter(ar)) {
955					wiphy_err(ar->hw->wiphy,
956						  "missing tag!\n");
957					goto err_telluser;
958				} else
959					goto err_silent;
960			}
961
962			if (ar->rx_failover_missing > tlen) {
963				if (ar9170_nag_limiter(ar)) {
964					wiphy_err(ar->hw->wiphy,
965						  "possible multi stream corruption!\n");
966					goto err_telluser;
967				} else
968					goto err_silent;
969			}
970
971			memcpy(skb_put(ar->rx_failover, tlen), tbuf, tlen);
972			ar->rx_failover_missing -= tlen;
973
974			if (ar->rx_failover_missing <= 0) {
975				/*
976				 * nested ar9170_rx call!
977				 * termination is guranteed, even when the
978				 * combined frame also have a element with
979				 * a bad tag.
980				 */
981
982				ar->rx_failover_missing = 0;
983				ar9170_rx(ar, ar->rx_failover);
984
985				skb_reset_tail_pointer(ar->rx_failover);
986				skb_trim(ar->rx_failover, 0);
987			}
988
989			return ;
990		}
991
992		/* check if stream is clipped */
993		if (wlen > tlen - 4) {
994			if (ar->rx_failover_missing) {
995				/* TODO: handle double stream corruption. */
996				if (ar9170_nag_limiter(ar)) {
997					wiphy_err(ar->hw->wiphy,
998						  "double rx stream corruption!\n");
999					goto err_telluser;
1000				} else
1001					goto err_silent;
1002			}
1003
1004			/*
1005			 * save incomplete data set.
1006			 * the firmware will resend the missing bits when
1007			 * the rx - descriptor comes round again.
1008			 */
1009
1010			memcpy(skb_put(ar->rx_failover, tlen), tbuf, tlen);
1011			ar->rx_failover_missing = clen - tlen;
1012			return ;
1013		}
1014		resplen = clen;
1015		respbuf = tbuf + 4;
1016		tbuf += wlen + 4;
1017		tlen -= wlen + 4;
1018
1019		i = 0;
1020
1021		/* weird thing, but this is the same in the original driver */
1022		while (resplen > 2 && i < 12 &&
1023		       respbuf[0] == 0xff && respbuf[1] == 0xff) {
1024			i += 2;
1025			resplen -= 2;
1026			respbuf += 2;
1027		}
1028
1029		if (resplen < 4)
1030			continue;
1031
1032		/* found the 6 * 0xffff marker? */
1033		if (i == 12)
1034			ar9170_handle_command_response(ar, respbuf, resplen);
1035		else
1036			ar9170_handle_mpdu(ar, respbuf, clen);
1037	}
1038
1039	if (tlen) {
1040		if (net_ratelimit())
1041			wiphy_err(ar->hw->wiphy,
1042				  "%d bytes of unprocessed data left in rx stream!\n",
1043				  tlen);
1044
1045		goto err_telluser;
1046	}
1047
1048	return ;
1049
1050err_telluser:
1051	wiphy_err(ar->hw->wiphy,
1052		  "damaged RX stream data [want:%d, data:%d, rx:%d, pending:%d ]\n",
1053		  clen, wlen, tlen, ar->rx_failover_missing);
1054
1055	if (ar->rx_failover_missing)
1056		print_hex_dump_bytes("rxbuf:", DUMP_PREFIX_OFFSET,
1057				     ar->rx_failover->data,
1058				     ar->rx_failover->len);
1059
1060	print_hex_dump_bytes("stream:", DUMP_PREFIX_OFFSET,
1061			     skb->data, skb->len);
1062
1063	wiphy_err(ar->hw->wiphy,
1064		  "If you see this message frequently, please check your hardware and cables.\n");
1065
1066err_silent:
1067	if (ar->rx_failover_missing) {
1068		skb_reset_tail_pointer(ar->rx_failover);
1069		skb_trim(ar->rx_failover, 0);
1070		ar->rx_failover_missing = 0;
1071	}
1072}
1073
1074#define AR9170_FILL_QUEUE(queue, ai_fs, cwmin, cwmax, _txop)		\
1075do {									\
1076	queue.aifs = ai_fs;						\
1077	queue.cw_min = cwmin;						\
1078	queue.cw_max = cwmax;						\
1079	queue.txop = _txop;						\
1080} while (0)
1081
1082static int ar9170_op_start(struct ieee80211_hw *hw)
1083{
1084	struct ar9170 *ar = hw->priv;
1085	int err, i;
1086
1087	mutex_lock(&ar->mutex);
1088
1089	/* reinitialize queues statistics */
1090	memset(&ar->tx_stats, 0, sizeof(ar->tx_stats));
1091	for (i = 0; i < __AR9170_NUM_TXQ; i++)
1092		ar->tx_stats[i].limit = AR9170_TXQ_DEPTH;
1093
1094	/* reset QoS defaults */
1095	AR9170_FILL_QUEUE(ar->edcf[0], 3, 15, 1023,  0); /* BEST EFFORT*/
1096	AR9170_FILL_QUEUE(ar->edcf[1], 7, 15, 1023,  0); /* BACKGROUND */
1097	AR9170_FILL_QUEUE(ar->edcf[2], 2, 7,    15, 94); /* VIDEO */
1098	AR9170_FILL_QUEUE(ar->edcf[3], 2, 3,     7, 47); /* VOICE */
1099	AR9170_FILL_QUEUE(ar->edcf[4], 2, 3,     7,  0); /* SPECIAL */
1100
1101	/* set sane AMPDU defaults */
1102	ar->global_ampdu_density = 6;
1103	ar->global_ampdu_factor = 3;
1104
1105	ar->bad_hw_nagger = jiffies;
1106
1107	err = ar->open(ar);
1108	if (err)
1109		goto out;
1110
1111	err = ar9170_init_mac(ar);
1112	if (err)
1113		goto out;
1114
1115	err = ar9170_set_qos(ar);
1116	if (err)
1117		goto out;
1118
1119	err = ar9170_init_phy(ar, IEEE80211_BAND_2GHZ);
1120	if (err)
1121		goto out;
1122
1123	err = ar9170_init_rf(ar);
1124	if (err)
1125		goto out;
1126
1127	/* start DMA */
1128	err = ar9170_write_reg(ar, 0x1c3d30, 0x100);
1129	if (err)
1130		goto out;
1131
1132	ar->state = AR9170_STARTED;
1133
1134out:
1135	mutex_unlock(&ar->mutex);
1136	return err;
1137}
1138
1139static void ar9170_op_stop(struct ieee80211_hw *hw)
1140{
1141	struct ar9170 *ar = hw->priv;
1142	unsigned int i;
1143
1144	if (IS_STARTED(ar))
1145		ar->state = AR9170_IDLE;
1146
1147	cancel_delayed_work_sync(&ar->tx_janitor);
1148#ifdef CONFIG_AR9170_LEDS
1149	cancel_delayed_work_sync(&ar->led_work);
1150#endif
1151	cancel_work_sync(&ar->beacon_work);
1152
1153	mutex_lock(&ar->mutex);
1154
1155	if (IS_ACCEPTING_CMD(ar)) {
1156		ar9170_set_leds_state(ar, 0);
1157
1158		/* stop DMA */
1159		ar9170_write_reg(ar, 0x1c3d30, 0);
1160		ar->stop(ar);
1161	}
1162
1163	for (i = 0; i < __AR9170_NUM_TXQ; i++) {
1164		skb_queue_purge(&ar->tx_pending[i]);
1165		skb_queue_purge(&ar->tx_status[i]);
1166	}
1167
1168	mutex_unlock(&ar->mutex);
1169}
1170
1171static int ar9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
1172{
1173	struct ieee80211_hdr *hdr;
1174	struct ar9170_tx_control *txc;
1175	struct ieee80211_tx_info *info;
1176	struct ieee80211_tx_rate *txrate;
1177	struct ar9170_tx_info *arinfo;
1178	unsigned int queue = skb_get_queue_mapping(skb);
1179	u16 keytype = 0;
1180	u16 len, icv = 0;
1181
1182	BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data));
1183
1184	hdr = (void *)skb->data;
1185	info = IEEE80211_SKB_CB(skb);
1186	len = skb->len;
1187
1188	txc = (void *)skb_push(skb, sizeof(*txc));
1189
1190	if (info->control.hw_key) {
1191		icv = info->control.hw_key->icv_len;
1192
1193		switch (info->control.hw_key->alg) {
1194		case ALG_WEP:
1195			keytype = AR9170_TX_MAC_ENCR_RC4;
1196			break;
1197		case ALG_TKIP:
1198			keytype = AR9170_TX_MAC_ENCR_RC4;
1199			break;
1200		case ALG_CCMP:
1201			keytype = AR9170_TX_MAC_ENCR_AES;
1202			break;
1203		default:
1204			WARN_ON(1);
1205			goto err_out;
1206		}
1207	}
1208
1209	/* Length */
1210	txc->length = cpu_to_le16(len + icv + 4);
1211
1212	txc->mac_control = cpu_to_le16(AR9170_TX_MAC_HW_DURATION |
1213				       AR9170_TX_MAC_BACKOFF);
1214	txc->mac_control |= cpu_to_le16(ar9170_qos_hwmap[queue] <<
1215					AR9170_TX_MAC_QOS_SHIFT);
1216	txc->mac_control |= cpu_to_le16(keytype);
1217	txc->phy_control = cpu_to_le32(0);
1218
1219	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
1220		txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_NO_ACK);
1221
1222	txrate = &info->control.rates[0];
1223	if (txrate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
1224		txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS);
1225	else if (txrate->flags & IEEE80211_TX_RC_USE_RTS_CTS)
1226		txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS);
1227
1228	arinfo = (void *)info->rate_driver_data;
1229	arinfo->timeout = jiffies + msecs_to_jiffies(AR9170_QUEUE_TIMEOUT);
1230
1231	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
1232	     (is_valid_ether_addr(ieee80211_get_DA(hdr)))) {
1233		/*
1234		 * WARNING:
1235		 * Putting the QoS queue bits into an unexplored territory is
1236		 * certainly not elegant.
1237		 *
1238		 * In my defense: This idea provides a reasonable way to
1239		 * smuggle valuable information to the tx_status callback.
1240		 * Also, the idea behind this bit-abuse came straight from
1241		 * the original driver code.
1242		 */
1243
1244		txc->phy_control |=
1245			cpu_to_le32(queue << AR9170_TX_PHY_QOS_SHIFT);
1246
1247		txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE);
1248	}
1249
1250	return 0;
1251
1252err_out:
1253	skb_pull(skb, sizeof(*txc));
1254	return -EINVAL;
1255}
1256
1257static void ar9170_tx_prepare_phy(struct ar9170 *ar, struct sk_buff *skb)
1258{
1259	struct ar9170_tx_control *txc;
1260	struct ieee80211_tx_info *info;
1261	struct ieee80211_rate *rate = NULL;
1262	struct ieee80211_tx_rate *txrate;
1263	u32 power, chains;
1264
1265	txc = (void *) skb->data;
1266	info = IEEE80211_SKB_CB(skb);
1267	txrate = &info->control.rates[0];
1268
1269	if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
1270		txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_GREENFIELD);
1271
1272	if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1273		txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_SHORT_PREAMBLE);
1274
1275	if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1276		txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ);
1277	/* this works because 40 MHz is 2 and dup is 3 */
1278	if (txrate->flags & IEEE80211_TX_RC_DUP_DATA)
1279		txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ_DUP);
1280
1281	if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
1282		txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_SHORT_GI);
1283
1284	if (txrate->flags & IEEE80211_TX_RC_MCS) {
1285		u32 r = txrate->idx;
1286		u8 *txpower;
1287
1288		/* heavy clip control */
1289		txc->phy_control |= cpu_to_le32((r & 0x7) << 7);
1290
1291		r <<= AR9170_TX_PHY_MCS_SHIFT;
1292		BUG_ON(r & ~AR9170_TX_PHY_MCS_MASK);
1293
1294		txc->phy_control |= cpu_to_le32(r & AR9170_TX_PHY_MCS_MASK);
1295		txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_MOD_HT);
1296
1297		if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
1298			if (info->band == IEEE80211_BAND_5GHZ)
1299				txpower = ar->power_5G_ht40;
1300			else
1301				txpower = ar->power_2G_ht40;
1302		} else {
1303			if (info->band == IEEE80211_BAND_5GHZ)
1304				txpower = ar->power_5G_ht20;
1305			else
1306				txpower = ar->power_2G_ht20;
1307		}
1308
1309		power = txpower[(txrate->idx) & 7];
1310	} else {
1311		u8 *txpower;
1312		u32 mod;
1313		u32 phyrate;
1314		u8 idx = txrate->idx;
1315
1316		if (info->band != IEEE80211_BAND_2GHZ) {
1317			idx += 4;
1318			txpower = ar->power_5G_leg;
1319			mod = AR9170_TX_PHY_MOD_OFDM;
1320		} else {
1321			if (idx < 4) {
1322				txpower = ar->power_2G_cck;
1323				mod = AR9170_TX_PHY_MOD_CCK;
1324			} else {
1325				mod = AR9170_TX_PHY_MOD_OFDM;
1326				txpower = ar->power_2G_ofdm;
1327			}
1328		}
1329
1330		rate = &__ar9170_ratetable[idx];
1331
1332		phyrate = rate->hw_value & 0xF;
1333		power = txpower[(rate->hw_value & 0x30) >> 4];
1334		phyrate <<= AR9170_TX_PHY_MCS_SHIFT;
1335
1336		txc->phy_control |= cpu_to_le32(mod);
1337		txc->phy_control |= cpu_to_le32(phyrate);
1338	}
1339
1340	power <<= AR9170_TX_PHY_TX_PWR_SHIFT;
1341	power &= AR9170_TX_PHY_TX_PWR_MASK;
1342	txc->phy_control |= cpu_to_le32(power);
1343
1344	/* set TX chains */
1345	if (ar->eeprom.tx_mask == 1) {
1346		chains = AR9170_TX_PHY_TXCHAIN_1;
1347	} else {
1348		chains = AR9170_TX_PHY_TXCHAIN_2;
1349
1350		/* >= 36M legacy OFDM - use only one chain */
1351		if (rate && rate->bitrate >= 360)
1352			chains = AR9170_TX_PHY_TXCHAIN_1;
1353	}
1354	txc->phy_control |= cpu_to_le32(chains << AR9170_TX_PHY_TXCHAIN_SHIFT);
1355}
1356
1357static void ar9170_tx(struct ar9170 *ar)
1358{
1359	struct sk_buff *skb;
1360	unsigned long flags;
1361	struct ieee80211_tx_info *info;
1362	struct ar9170_tx_info *arinfo;
1363	unsigned int i, frames, frames_failed, remaining_space;
1364	int err;
1365	bool schedule_garbagecollector = false;
1366
1367	BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data));
1368
1369	if (unlikely(!IS_STARTED(ar)))
1370		return ;
1371
1372	remaining_space = AR9170_TX_MAX_PENDING;
1373
1374	for (i = 0; i < __AR9170_NUM_TXQ; i++) {
1375		spin_lock_irqsave(&ar->tx_stats_lock, flags);
1376		frames = min(ar->tx_stats[i].limit - ar->tx_stats[i].len,
1377			     skb_queue_len(&ar->tx_pending[i]));
1378
1379		if (remaining_space < frames) {
1380#ifdef AR9170_QUEUE_DEBUG
1381			wiphy_debug(ar->hw->wiphy,
1382				    "tx quota reached queue:%d, "
1383				    "remaining slots:%d, needed:%d\n",
1384				    i, remaining_space, frames);
1385#endif /* AR9170_QUEUE_DEBUG */
1386			frames = remaining_space;
1387		}
1388
1389		ar->tx_stats[i].len += frames;
1390		ar->tx_stats[i].count += frames;
1391		if (ar->tx_stats[i].len >= ar->tx_stats[i].limit) {
1392#ifdef AR9170_QUEUE_DEBUG
1393			wiphy_debug(ar->hw->wiphy, "queue %d full\n", i);
1394			wiphy_debug(ar->hw->wiphy, "stuck frames: ===>\n");
1395			ar9170_dump_txqueue(ar, &ar->tx_pending[i]);
1396			ar9170_dump_txqueue(ar, &ar->tx_status[i]);
1397#endif /* AR9170_QUEUE_DEBUG */
1398
1399#ifdef AR9170_QUEUE_STOP_DEBUG
1400			wiphy_debug(ar->hw->wiphy, "stop queue %d\n", i);
1401			__ar9170_dump_txstats(ar);
1402#endif /* AR9170_QUEUE_STOP_DEBUG */
1403			ieee80211_stop_queue(ar->hw, i);
1404		}
1405
1406		spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
1407
1408		if (!frames)
1409			continue;
1410
1411		frames_failed = 0;
1412		while (frames) {
1413			skb = skb_dequeue(&ar->tx_pending[i]);
1414			if (unlikely(!skb)) {
1415				frames_failed += frames;
1416				frames = 0;
1417				break;
1418			}
1419
1420			info = IEEE80211_SKB_CB(skb);
1421			arinfo = (void *) info->rate_driver_data;
1422
1423			/* TODO: cancel stuck frames */
1424			arinfo->timeout = jiffies +
1425					  msecs_to_jiffies(AR9170_TX_TIMEOUT);
1426
1427#ifdef AR9170_QUEUE_DEBUG
1428			wiphy_debug(ar->hw->wiphy, "send frame q:%d =>\n", i);
1429			ar9170_print_txheader(ar, skb);
1430#endif /* AR9170_QUEUE_DEBUG */
1431
1432			err = ar->tx(ar, skb);
1433			if (unlikely(err)) {
1434				frames_failed++;
1435				dev_kfree_skb_any(skb);
1436			} else {
1437				remaining_space--;
1438				schedule_garbagecollector = true;
1439			}
1440
1441			frames--;
1442		}
1443
1444#ifdef AR9170_QUEUE_DEBUG
1445		wiphy_debug(ar->hw->wiphy,
1446			    "ar9170_tx report for queue %d\n", i);
1447
1448		wiphy_debug(ar->hw->wiphy,
1449			    "unprocessed pending frames left:\n");
1450		ar9170_dump_txqueue(ar, &ar->tx_pending[i]);
1451#endif /* AR9170_QUEUE_DEBUG */
1452
1453		if (unlikely(frames_failed)) {
1454#ifdef AR9170_QUEUE_DEBUG
1455			wiphy_debug(ar->hw->wiphy,
1456				    "frames failed %d =>\n", frames_failed);
1457#endif /* AR9170_QUEUE_DEBUG */
1458
1459			spin_lock_irqsave(&ar->tx_stats_lock, flags);
1460			ar->tx_stats[i].len -= frames_failed;
1461			ar->tx_stats[i].count -= frames_failed;
1462#ifdef AR9170_QUEUE_STOP_DEBUG
1463			wiphy_debug(ar->hw->wiphy, "wake queue %d\n", i);
1464			__ar9170_dump_txstats(ar);
1465#endif /* AR9170_QUEUE_STOP_DEBUG */
1466			ieee80211_wake_queue(ar->hw, i);
1467			spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
1468		}
1469	}
1470
1471	if (!schedule_garbagecollector)
1472		return;
1473
1474	ieee80211_queue_delayed_work(ar->hw,
1475				     &ar->tx_janitor,
1476				     msecs_to_jiffies(AR9170_JANITOR_DELAY));
1477}
1478
1479int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1480{
1481	struct ar9170 *ar = hw->priv;
1482	struct ieee80211_tx_info *info;
1483	unsigned int queue;
1484
1485	if (unlikely(!IS_STARTED(ar)))
1486		goto err_free;
1487
1488	if (unlikely(ar9170_tx_prepare(ar, skb)))
1489		goto err_free;
1490
1491	queue = skb_get_queue_mapping(skb);
1492	info = IEEE80211_SKB_CB(skb);
1493	ar9170_tx_prepare_phy(ar, skb);
1494	skb_queue_tail(&ar->tx_pending[queue], skb);
1495
1496	ar9170_tx(ar);
1497	return NETDEV_TX_OK;
1498
1499err_free:
1500	dev_kfree_skb_any(skb);
1501	return NETDEV_TX_OK;
1502}
1503
1504static int ar9170_op_add_interface(struct ieee80211_hw *hw,
1505				   struct ieee80211_vif *vif)
1506{
1507	struct ar9170 *ar = hw->priv;
1508	struct ath_common *common = &ar->common;
1509	int err = 0;
1510
1511	mutex_lock(&ar->mutex);
1512
1513	if (ar->vif) {
1514		err = -EBUSY;
1515		goto unlock;
1516	}
1517
1518	ar->vif = vif;
1519	memcpy(common->macaddr, vif->addr, ETH_ALEN);
1520
1521	if (modparam_nohwcrypt || (ar->vif->type != NL80211_IFTYPE_STATION)) {
1522		ar->rx_software_decryption = true;
1523		ar->disable_offload = true;
1524	}
1525
1526	ar->cur_filter = 0;
1527	err = ar9170_update_frame_filter(ar, AR9170_MAC_REG_FTF_DEFAULTS);
1528	if (err)
1529		goto unlock;
1530
1531	err = ar9170_set_operating_mode(ar);
1532
1533unlock:
1534	mutex_unlock(&ar->mutex);
1535	return err;
1536}
1537
1538static void ar9170_op_remove_interface(struct ieee80211_hw *hw,
1539				       struct ieee80211_vif *vif)
1540{
1541	struct ar9170 *ar = hw->priv;
1542
1543	mutex_lock(&ar->mutex);
1544	ar->vif = NULL;
1545	ar9170_update_frame_filter(ar, 0);
1546	ar9170_set_beacon_timers(ar);
1547	dev_kfree_skb(ar->beacon);
1548	ar->beacon = NULL;
1549	ar->sniffer_enabled = false;
1550	ar->rx_software_decryption = false;
1551	ar9170_set_operating_mode(ar);
1552	mutex_unlock(&ar->mutex);
1553}
1554
1555static int ar9170_op_config(struct ieee80211_hw *hw, u32 changed)
1556{
1557	struct ar9170 *ar = hw->priv;
1558	int err = 0;
1559
1560	mutex_lock(&ar->mutex);
1561
1562	if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
1563		/* TODO */
1564		err = 0;
1565	}
1566
1567	if (changed & IEEE80211_CONF_CHANGE_PS) {
1568		/* TODO */
1569		err = 0;
1570	}
1571
1572	if (changed & IEEE80211_CONF_CHANGE_POWER) {
1573		/* TODO */
1574		err = 0;
1575	}
1576
1577	if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
1578		/*
1579		 * is it long_frame_max_tx_count or short_frame_max_tx_count?
1580		 */
1581
1582		err = ar9170_set_hwretry_limit(ar,
1583			ar->hw->conf.long_frame_max_tx_count);
1584		if (err)
1585			goto out;
1586	}
1587
1588	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
1589
1590		/* adjust slot time for 5 GHz */
1591		err = ar9170_set_slot_time(ar);
1592		if (err)
1593			goto out;
1594
1595		err = ar9170_set_dyn_sifs_ack(ar);
1596		if (err)
1597			goto out;
1598
1599		err = ar9170_set_channel(ar, hw->conf.channel,
1600				AR9170_RFI_NONE,
1601				nl80211_to_ar9170(hw->conf.channel_type));
1602		if (err)
1603			goto out;
1604	}
1605
1606out:
1607	mutex_unlock(&ar->mutex);
1608	return err;
1609}
1610
1611static u64 ar9170_op_prepare_multicast(struct ieee80211_hw *hw,
1612				       struct netdev_hw_addr_list *mc_list)
1613{
1614	u64 mchash;
1615	struct netdev_hw_addr *ha;
1616
1617	/* always get broadcast frames */
1618	mchash = 1ULL << (0xff >> 2);
1619
1620	netdev_hw_addr_list_for_each(ha, mc_list)
1621		mchash |= 1ULL << (ha->addr[5] >> 2);
1622
1623	return mchash;
1624}
1625
1626static void ar9170_op_configure_filter(struct ieee80211_hw *hw,
1627				       unsigned int changed_flags,
1628				       unsigned int *new_flags,
1629				       u64 multicast)
1630{
1631	struct ar9170 *ar = hw->priv;
1632
1633	if (unlikely(!IS_ACCEPTING_CMD(ar)))
1634		return ;
1635
1636	mutex_lock(&ar->mutex);
1637
1638	/* mask supported flags */
1639	*new_flags &= FIF_ALLMULTI | FIF_CONTROL | FIF_BCN_PRBRESP_PROMISC |
1640		      FIF_PROMISC_IN_BSS | FIF_FCSFAIL | FIF_PLCPFAIL;
1641	ar->filter_state = *new_flags;
1642	/*
1643	 * We can support more by setting the sniffer bit and
1644	 * then checking the error flags, later.
1645	 */
1646
1647	if (changed_flags & FIF_ALLMULTI && *new_flags & FIF_ALLMULTI)
1648		multicast = ~0ULL;
1649
1650	if (multicast != ar->cur_mc_hash)
1651		ar9170_update_multicast(ar, multicast);
1652
1653	if (changed_flags & FIF_CONTROL) {
1654		u32 filter = AR9170_MAC_REG_FTF_PSPOLL |
1655			     AR9170_MAC_REG_FTF_RTS |
1656			     AR9170_MAC_REG_FTF_CTS |
1657			     AR9170_MAC_REG_FTF_ACK |
1658			     AR9170_MAC_REG_FTF_CFE |
1659			     AR9170_MAC_REG_FTF_CFE_ACK;
1660
1661		if (*new_flags & FIF_CONTROL)
1662			filter |= ar->cur_filter;
1663		else
1664			filter &= (~ar->cur_filter);
1665
1666		ar9170_update_frame_filter(ar, filter);
1667	}
1668
1669	if (changed_flags & FIF_PROMISC_IN_BSS) {
1670		ar->sniffer_enabled = ((*new_flags) & FIF_PROMISC_IN_BSS) != 0;
1671		ar9170_set_operating_mode(ar);
1672	}
1673
1674	mutex_unlock(&ar->mutex);
1675}
1676
1677
1678static void ar9170_op_bss_info_changed(struct ieee80211_hw *hw,
1679				       struct ieee80211_vif *vif,
1680				       struct ieee80211_bss_conf *bss_conf,
1681				       u32 changed)
1682{
1683	struct ar9170 *ar = hw->priv;
1684	struct ath_common *common = &ar->common;
1685	int err = 0;
1686
1687	mutex_lock(&ar->mutex);
1688
1689	if (changed & BSS_CHANGED_BSSID) {
1690		memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
1691		err = ar9170_set_operating_mode(ar);
1692		if (err)
1693			goto out;
1694	}
1695
1696	if (changed & BSS_CHANGED_BEACON_ENABLED)
1697		ar->enable_beacon = bss_conf->enable_beacon;
1698
1699	if (changed & BSS_CHANGED_BEACON) {
1700		err = ar9170_update_beacon(ar);
1701		if (err)
1702			goto out;
1703	}
1704
1705	if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON |
1706		       BSS_CHANGED_BEACON_INT)) {
1707		err = ar9170_set_beacon_timers(ar);
1708		if (err)
1709			goto out;
1710	}
1711
1712	if (changed & BSS_CHANGED_ASSOC) {
1713#ifndef CONFIG_AR9170_LEDS
1714		/* enable assoc LED. */
1715		err = ar9170_set_leds_state(ar, bss_conf->assoc ? 2 : 0);
1716#endif /* CONFIG_AR9170_LEDS */
1717	}
1718
1719	if (changed & BSS_CHANGED_HT) {
1720		/* TODO */
1721		err = 0;
1722	}
1723
1724	if (changed & BSS_CHANGED_ERP_SLOT) {
1725		err = ar9170_set_slot_time(ar);
1726		if (err)
1727			goto out;
1728	}
1729
1730	if (changed & BSS_CHANGED_BASIC_RATES) {
1731		err = ar9170_set_basic_rates(ar);
1732		if (err)
1733			goto out;
1734	}
1735
1736out:
1737	mutex_unlock(&ar->mutex);
1738}
1739
1740static u64 ar9170_op_get_tsf(struct ieee80211_hw *hw)
1741{
1742	struct ar9170 *ar = hw->priv;
1743	int err;
1744	u64 tsf;
1745#define NR 3
1746	static const u32 addr[NR] = { AR9170_MAC_REG_TSF_H,
1747				    AR9170_MAC_REG_TSF_L,
1748				    AR9170_MAC_REG_TSF_H };
1749	u32 val[NR];
1750	int loops = 0;
1751
1752	mutex_lock(&ar->mutex);
1753
1754	while (loops++ < 10) {
1755		err = ar9170_read_mreg(ar, NR, addr, val);
1756		if (err || val[0] == val[2])
1757			break;
1758	}
1759
1760	mutex_unlock(&ar->mutex);
1761
1762	if (WARN_ON(err))
1763		return 0;
1764	tsf = val[0];
1765	tsf = (tsf << 32) | val[1];
1766	return tsf;
1767#undef NR
1768}
1769
1770static int ar9170_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1771			  struct ieee80211_vif *vif, struct ieee80211_sta *sta,
1772			  struct ieee80211_key_conf *key)
1773{
1774	struct ar9170 *ar = hw->priv;
1775	int err = 0, i;
1776	u8 ktype;
1777
1778	if ((!ar->vif) || (ar->disable_offload))
1779		return -EOPNOTSUPP;
1780
1781	switch (key->alg) {
1782	case ALG_WEP:
1783		if (key->keylen == WLAN_KEY_LEN_WEP40)
1784			ktype = AR9170_ENC_ALG_WEP64;
1785		else
1786			ktype = AR9170_ENC_ALG_WEP128;
1787		break;
1788	case ALG_TKIP:
1789		ktype = AR9170_ENC_ALG_TKIP;
1790		break;
1791	case ALG_CCMP:
1792		ktype = AR9170_ENC_ALG_AESCCMP;
1793		break;
1794	default:
1795		return -EOPNOTSUPP;
1796	}
1797
1798	mutex_lock(&ar->mutex);
1799	if (cmd == SET_KEY) {
1800		if (unlikely(!IS_STARTED(ar))) {
1801			err = -EOPNOTSUPP;
1802			goto out;
1803		}
1804
1805		/* group keys need all-zeroes address */
1806		if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1807			sta = NULL;
1808
1809		if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
1810			for (i = 0; i < 64; i++)
1811				if (!(ar->usedkeys & BIT(i)))
1812					break;
1813			if (i == 64) {
1814				ar->rx_software_decryption = true;
1815				ar9170_set_operating_mode(ar);
1816				err = -ENOSPC;
1817				goto out;
1818			}
1819		} else {
1820			i = 64 + key->keyidx;
1821		}
1822
1823		key->hw_key_idx = i;
1824
1825		err = ar9170_upload_key(ar, i, sta ? sta->addr : NULL, ktype, 0,
1826					key->key, min_t(u8, 16, key->keylen));
1827		if (err)
1828			goto out;
1829
1830		if (key->alg == ALG_TKIP) {
1831			err = ar9170_upload_key(ar, i, sta ? sta->addr : NULL,
1832						ktype, 1, key->key + 16, 16);
1833			if (err)
1834				goto out;
1835
1836			/*
1837			 * hardware is not capable generating the MMIC
1838			 * for fragmented frames!
1839			 */
1840			key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
1841		}
1842
1843		if (i < 64)
1844			ar->usedkeys |= BIT(i);
1845
1846		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1847	} else {
1848		if (unlikely(!IS_STARTED(ar))) {
1849			/* The device is gone... together with the key ;-) */
1850			err = 0;
1851			goto out;
1852		}
1853
1854		err = ar9170_disable_key(ar, key->hw_key_idx);
1855		if (err)
1856			goto out;
1857
1858		if (key->hw_key_idx < 64) {
1859			ar->usedkeys &= ~BIT(key->hw_key_idx);
1860		} else {
1861			err = ar9170_upload_key(ar, key->hw_key_idx, NULL,
1862						AR9170_ENC_ALG_NONE, 0,
1863						NULL, 0);
1864			if (err)
1865				goto out;
1866
1867			if (key->alg == ALG_TKIP) {
1868				err = ar9170_upload_key(ar, key->hw_key_idx,
1869							NULL,
1870							AR9170_ENC_ALG_NONE, 1,
1871							NULL, 0);
1872				if (err)
1873					goto out;
1874			}
1875
1876		}
1877	}
1878
1879	ar9170_regwrite_begin(ar);
1880	ar9170_regwrite(AR9170_MAC_REG_ROLL_CALL_TBL_L, ar->usedkeys);
1881	ar9170_regwrite(AR9170_MAC_REG_ROLL_CALL_TBL_H, ar->usedkeys >> 32);
1882	ar9170_regwrite_finish();
1883	err = ar9170_regwrite_result();
1884
1885out:
1886	mutex_unlock(&ar->mutex);
1887
1888	return err;
1889}
1890
1891static int ar9170_get_stats(struct ieee80211_hw *hw,
1892			    struct ieee80211_low_level_stats *stats)
1893{
1894	struct ar9170 *ar = hw->priv;
1895	u32 val;
1896	int err;
1897
1898	mutex_lock(&ar->mutex);
1899	err = ar9170_read_reg(ar, AR9170_MAC_REG_TX_RETRY, &val);
1900	ar->stats.dot11ACKFailureCount += val;
1901
1902	memcpy(stats, &ar->stats, sizeof(*stats));
1903	mutex_unlock(&ar->mutex);
1904
1905	return 0;
1906}
1907
1908static int ar9170_get_survey(struct ieee80211_hw *hw, int idx,
1909				struct survey_info *survey)
1910{
1911	struct ar9170 *ar = hw->priv;
1912	struct ieee80211_conf *conf = &hw->conf;
1913
1914	if (idx != 0)
1915		return -ENOENT;
1916
1917	/* TODO: update noise value, e.g. call ar9170_set_channel */
1918
1919	survey->channel = conf->channel;
1920	survey->filled = SURVEY_INFO_NOISE_DBM;
1921	survey->noise = ar->noise[0];
1922
1923	return 0;
1924}
1925
1926static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue,
1927			  const struct ieee80211_tx_queue_params *param)
1928{
1929	struct ar9170 *ar = hw->priv;
1930	int ret;
1931
1932	mutex_lock(&ar->mutex);
1933	if (queue < __AR9170_NUM_TXQ) {
1934		memcpy(&ar->edcf[ar9170_qos_hwmap[queue]],
1935		       param, sizeof(*param));
1936
1937		ret = ar9170_set_qos(ar);
1938	} else {
1939		ret = -EINVAL;
1940	}
1941
1942	mutex_unlock(&ar->mutex);
1943	return ret;
1944}
1945
1946static int ar9170_ampdu_action(struct ieee80211_hw *hw,
1947			       struct ieee80211_vif *vif,
1948			       enum ieee80211_ampdu_mlme_action action,
1949			       struct ieee80211_sta *sta, u16 tid, u16 *ssn)
1950{
1951	switch (action) {
1952	case IEEE80211_AMPDU_RX_START:
1953	case IEEE80211_AMPDU_RX_STOP:
1954		/* Handled by firmware */
1955		break;
1956
1957	default:
1958		return -EOPNOTSUPP;
1959	}
1960
1961	return 0;
1962}
1963
1964static const struct ieee80211_ops ar9170_ops = {
1965	.start			= ar9170_op_start,
1966	.stop			= ar9170_op_stop,
1967	.tx			= ar9170_op_tx,
1968	.add_interface		= ar9170_op_add_interface,
1969	.remove_interface	= ar9170_op_remove_interface,
1970	.config			= ar9170_op_config,
1971	.prepare_multicast	= ar9170_op_prepare_multicast,
1972	.configure_filter	= ar9170_op_configure_filter,
1973	.conf_tx		= ar9170_conf_tx,
1974	.bss_info_changed	= ar9170_op_bss_info_changed,
1975	.get_tsf		= ar9170_op_get_tsf,
1976	.set_key		= ar9170_set_key,
1977	.get_stats		= ar9170_get_stats,
1978	.get_survey		= ar9170_get_survey,
1979	.ampdu_action		= ar9170_ampdu_action,
1980};
1981
1982void *ar9170_alloc(size_t priv_size)
1983{
1984	struct ieee80211_hw *hw;
1985	struct ar9170 *ar;
1986	struct sk_buff *skb;
1987	int i;
1988
1989	/*
1990	 * this buffer is used for rx stream reconstruction.
1991	 * Under heavy load this device (or the transport layer?)
1992	 * tends to split the streams into separate rx descriptors.
1993	 */
1994
1995	skb = __dev_alloc_skb(AR9170_RX_STREAM_MAX_SIZE, GFP_KERNEL);
1996	if (!skb)
1997		goto err_nomem;
1998
1999	hw = ieee80211_alloc_hw(priv_size, &ar9170_ops);
2000	if (!hw)
2001		goto err_nomem;
2002
2003	ar = hw->priv;
2004	ar->hw = hw;
2005	ar->rx_failover = skb;
2006
2007	mutex_init(&ar->mutex);
2008	spin_lock_init(&ar->cmdlock);
2009	spin_lock_init(&ar->tx_stats_lock);
2010	for (i = 0; i < __AR9170_NUM_TXQ; i++) {
2011		skb_queue_head_init(&ar->tx_status[i]);
2012		skb_queue_head_init(&ar->tx_pending[i]);
2013	}
2014	ar9170_rx_reset_rx_mpdu(ar);
2015	INIT_WORK(&ar->beacon_work, ar9170_new_beacon);
2016	INIT_DELAYED_WORK(&ar->tx_janitor, ar9170_tx_janitor);
2017
2018	/* all hw supports 2.4 GHz, so set channel to 1 by default */
2019	ar->channel = &ar9170_2ghz_chantable[0];
2020
2021	/* first part of wiphy init */
2022	ar->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
2023					 BIT(NL80211_IFTYPE_WDS) |
2024					 BIT(NL80211_IFTYPE_ADHOC);
2025	ar->hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS |
2026			 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
2027			 IEEE80211_HW_SIGNAL_DBM;
2028
2029	ar->hw->queues = __AR9170_NUM_TXQ;
2030	ar->hw->extra_tx_headroom = 8;
2031
2032	ar->hw->max_rates = 1;
2033	ar->hw->max_rate_tries = 3;
2034
2035	for (i = 0; i < ARRAY_SIZE(ar->noise); i++)
2036		ar->noise[i] = -95; /* ATH_DEFAULT_NOISE_FLOOR */
2037
2038	return ar;
2039
2040err_nomem:
2041	kfree_skb(skb);
2042	return ERR_PTR(-ENOMEM);
2043}
2044
2045static int ar9170_read_eeprom(struct ar9170 *ar)
2046{
2047#define RW	8	/* number of words to read at once */
2048#define RB	(sizeof(u32) * RW)
2049	struct ath_regulatory *regulatory = &ar->common.regulatory;
2050	u8 *eeprom = (void *)&ar->eeprom;
2051	u8 *addr = ar->eeprom.mac_address;
2052	__le32 offsets[RW];
2053	unsigned int rx_streams, tx_streams, tx_params = 0;
2054	int i, j, err, bands = 0;
2055
2056	BUILD_BUG_ON(sizeof(ar->eeprom) & 3);
2057
2058	BUILD_BUG_ON(RB > AR9170_MAX_CMD_LEN - 4);
2059#ifndef __CHECKER__
2060	/* don't want to handle trailing remains */
2061	BUILD_BUG_ON(sizeof(ar->eeprom) % RB);
2062#endif
2063
2064	for (i = 0; i < sizeof(ar->eeprom)/RB; i++) {
2065		for (j = 0; j < RW; j++)
2066			offsets[j] = cpu_to_le32(AR9170_EEPROM_START +
2067						 RB * i + 4 * j);
2068
2069		err = ar->exec_cmd(ar, AR9170_CMD_RREG,
2070				   RB, (u8 *) &offsets,
2071				   RB, eeprom + RB * i);
2072		if (err)
2073			return err;
2074	}
2075
2076#undef RW
2077#undef RB
2078
2079	if (ar->eeprom.length == cpu_to_le16(0xFFFF))
2080		return -ENODATA;
2081
2082	if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) {
2083		ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &ar9170_band_2GHz;
2084		bands++;
2085	}
2086	if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) {
2087		ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &ar9170_band_5GHz;
2088		bands++;
2089	}
2090
2091	rx_streams = hweight8(ar->eeprom.rx_mask);
2092	tx_streams = hweight8(ar->eeprom.tx_mask);
2093
2094	if (rx_streams != tx_streams)
2095		tx_params = IEEE80211_HT_MCS_TX_RX_DIFF;
2096
2097	if (tx_streams >= 1 && tx_streams <= IEEE80211_HT_MCS_TX_MAX_STREAMS)
2098		tx_params = (tx_streams - 1) <<
2099			    IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
2100
2101	ar9170_band_2GHz.ht_cap.mcs.tx_params |= tx_params;
2102	ar9170_band_5GHz.ht_cap.mcs.tx_params |= tx_params;
2103
2104	if (bands == 2)
2105		ar->hw->channel_change_time = 135 * 1000;
2106	else
2107		ar->hw->channel_change_time = 80 * 1000;
2108
2109	regulatory->current_rd = le16_to_cpu(ar->eeprom.reg_domain[0]);
2110	regulatory->current_rd_ext = le16_to_cpu(ar->eeprom.reg_domain[1]);
2111
2112	/* second part of wiphy init */
2113	SET_IEEE80211_PERM_ADDR(ar->hw, addr);
2114
2115	return bands ? 0 : -EINVAL;
2116}
2117
2118static int ar9170_reg_notifier(struct wiphy *wiphy,
2119			struct regulatory_request *request)
2120{
2121	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
2122	struct ar9170 *ar = hw->priv;
2123
2124	return ath_reg_notifier_apply(wiphy, request, &ar->common.regulatory);
2125}
2126
2127int ar9170_register(struct ar9170 *ar, struct device *pdev)
2128{
2129	struct ath_regulatory *regulatory = &ar->common.regulatory;
2130	int err;
2131
2132	/* try to read EEPROM, init MAC addr */
2133	err = ar9170_read_eeprom(ar);
2134	if (err)
2135		goto err_out;
2136
2137	err = ath_regd_init(regulatory, ar->hw->wiphy,
2138			    ar9170_reg_notifier);
2139	if (err)
2140		goto err_out;
2141
2142	err = ieee80211_register_hw(ar->hw);
2143	if (err)
2144		goto err_out;
2145
2146	if (!ath_is_world_regd(regulatory))
2147		regulatory_hint(ar->hw->wiphy, regulatory->alpha2);
2148
2149	err = ar9170_init_leds(ar);
2150	if (err)
2151		goto err_unreg;
2152
2153#ifdef CONFIG_AR9170_LEDS
2154	err = ar9170_register_leds(ar);
2155	if (err)
2156		goto err_unreg;
2157#endif /* CONFIG_AR9170_LEDS */
2158
2159	dev_info(pdev, "Atheros AR9170 is registered as '%s'\n",
2160		 wiphy_name(ar->hw->wiphy));
2161
2162	ar->registered = true;
2163	return 0;
2164
2165err_unreg:
2166	ieee80211_unregister_hw(ar->hw);
2167
2168err_out:
2169	return err;
2170}
2171
2172void ar9170_unregister(struct ar9170 *ar)
2173{
2174	if (ar->registered) {
2175#ifdef CONFIG_AR9170_LEDS
2176		ar9170_unregister_leds(ar);
2177#endif /* CONFIG_AR9170_LEDS */
2178
2179	ieee80211_unregister_hw(ar->hw);
2180	}
2181
2182	kfree_skb(ar->rx_failover);
2183	mutex_destroy(&ar->mutex);
2184}
2185