1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * This file is part of wl1271
4 *
5 * Copyright (C) 2009 Nokia Corporation
6 *
7 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/etherdevice.h>
13#include <linux/pm_runtime.h>
14#include <linux/spinlock.h>
15
16#include "wlcore.h"
17#include "debug.h"
18#include "io.h"
19#include "ps.h"
20#include "tx.h"
21#include "event.h"
22#include "hw_ops.h"
23
24/*
25 * TODO: this is here just for now, it must be removed when the data
26 * operations are in place.
27 */
28#include "../wl12xx/reg.h"
29
30static int wl1271_set_default_wep_key(struct wl1271 *wl,
31				      struct wl12xx_vif *wlvif, u8 id)
32{
33	int ret;
34	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
35
36	if (is_ap)
37		ret = wl12xx_cmd_set_default_wep_key(wl, id,
38						     wlvif->ap.bcast_hlid);
39	else
40		ret = wl12xx_cmd_set_default_wep_key(wl, id, wlvif->sta.hlid);
41
42	if (ret < 0)
43		return ret;
44
45	wl1271_debug(DEBUG_CRYPT, "default wep key idx: %d", (int)id);
46	return 0;
47}
48
49static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb)
50{
51	int id;
52
53	id = find_first_zero_bit(wl->tx_frames_map, wl->num_tx_desc);
54	if (id >= wl->num_tx_desc)
55		return -EBUSY;
56
57	__set_bit(id, wl->tx_frames_map);
58	wl->tx_frames[id] = skb;
59	wl->tx_frames_cnt++;
60	return id;
61}
62
63void wl1271_free_tx_id(struct wl1271 *wl, int id)
64{
65	if (__test_and_clear_bit(id, wl->tx_frames_map)) {
66		if (unlikely(wl->tx_frames_cnt == wl->num_tx_desc))
67			clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
68
69		wl->tx_frames[id] = NULL;
70		wl->tx_frames_cnt--;
71	}
72}
73EXPORT_SYMBOL(wl1271_free_tx_id);
74
75static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
76						 struct wl12xx_vif *wlvif,
77						 struct sk_buff *skb)
78{
79	struct ieee80211_hdr *hdr;
80
81	hdr = (struct ieee80211_hdr *)(skb->data +
82				       sizeof(struct wl1271_tx_hw_descr));
83	if (!ieee80211_is_auth(hdr->frame_control))
84		return;
85
86	/*
87	 * add the station to the known list before transmitting the
88	 * authentication response. this way it won't get de-authed by FW
89	 * when transmitting too soon.
90	 */
91	wl1271_acx_set_inconnection_sta(wl, wlvif, hdr->addr1);
92
93	/*
94	 * ROC for 1 second on the AP channel for completing the connection.
95	 * Note the ROC will be continued by the update_sta_state callbacks
96	 * once the station reaches the associated state.
97	 */
98	wlcore_update_inconn_sta(wl, wlvif, NULL, true);
99	wlvif->pending_auth_reply_time = jiffies;
100	cancel_delayed_work(&wlvif->pending_auth_complete_work);
101	ieee80211_queue_delayed_work(wl->hw,
102				&wlvif->pending_auth_complete_work,
103				msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT));
104}
105
106static void wl1271_tx_regulate_link(struct wl1271 *wl,
107				    struct wl12xx_vif *wlvif,
108				    u8 hlid)
109{
110	bool fw_ps;
111	u8 tx_pkts;
112
113	if (WARN_ON(!test_bit(hlid, wlvif->links_map)))
114		return;
115
116	fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
117	tx_pkts = wl->links[hlid].allocated_pkts;
118
119	/*
120	 * if in FW PS and there is enough data in FW we can put the link
121	 * into high-level PS and clean out its TX queues.
122	 * Make an exception if this is the only connected link. In this
123	 * case FW-memory congestion is less of a problem.
124	 * Note that a single connected STA means 2*ap_count + 1 active links,
125	 * since we must account for the global and broadcast AP links
126	 * for each AP. The "fw_ps" check assures us the other link is a STA
127	 * connected to the AP. Otherwise the FW would not set the PSM bit.
128	 */
129	if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
130	    tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
131		wl12xx_ps_link_start(wl, wlvif, hlid, true);
132}
133
134bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
135{
136	return wl->dummy_packet == skb;
137}
138EXPORT_SYMBOL(wl12xx_is_dummy_packet);
139
140static u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
141				struct sk_buff *skb, struct ieee80211_sta *sta)
142{
143	if (sta) {
144		struct wl1271_station *wl_sta;
145
146		wl_sta = (struct wl1271_station *)sta->drv_priv;
147		return wl_sta->hlid;
148	} else {
149		struct ieee80211_hdr *hdr;
150
151		if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
152			return wl->system_hlid;
153
154		hdr = (struct ieee80211_hdr *)skb->data;
155		if (is_multicast_ether_addr(ieee80211_get_DA(hdr)))
156			return wlvif->ap.bcast_hlid;
157		else
158			return wlvif->ap.global_hlid;
159	}
160}
161
162u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
163		      struct sk_buff *skb, struct ieee80211_sta *sta)
164{
165	struct ieee80211_tx_info *control;
166
167	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
168		return wl12xx_tx_get_hlid_ap(wl, wlvif, skb, sta);
169
170	control = IEEE80211_SKB_CB(skb);
171	if (control->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
172		wl1271_debug(DEBUG_TX, "tx offchannel");
173		return wlvif->dev_hlid;
174	}
175
176	return wlvif->sta.hlid;
177}
178
179unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl,
180					  unsigned int packet_length)
181{
182	if ((wl->quirks & WLCORE_QUIRK_TX_PAD_LAST_FRAME) ||
183	    !(wl->quirks & WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN))
184		return ALIGN(packet_length, WL1271_TX_ALIGN_TO);
185	else
186		return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE);
187}
188EXPORT_SYMBOL(wlcore_calc_packet_alignment);
189
190static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
191			      struct sk_buff *skb, u32 extra, u32 buf_offset,
192			      u8 hlid, bool is_gem)
193{
194	struct wl1271_tx_hw_descr *desc;
195	u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
196	u32 total_blocks;
197	int id, ret = -EBUSY, ac;
198	u32 spare_blocks;
199
200	if (buf_offset + total_len > wl->aggr_buf_size)
201		return -EAGAIN;
202
203	spare_blocks = wlcore_hw_get_spare_blocks(wl, is_gem);
204
205	/* allocate free identifier for the packet */
206	id = wl1271_alloc_tx_id(wl, skb);
207	if (id < 0)
208		return id;
209
210	total_blocks = wlcore_hw_calc_tx_blocks(wl, total_len, spare_blocks);
211
212	if (total_blocks <= wl->tx_blocks_available) {
213		desc = skb_push(skb, total_len - skb->len);
214
215		wlcore_hw_set_tx_desc_blocks(wl, desc, total_blocks,
216					     spare_blocks);
217
218		desc->id = id;
219
220		wl->tx_blocks_available -= total_blocks;
221		wl->tx_allocated_blocks += total_blocks;
222
223		/*
224		 * If the FW was empty before, arm the Tx watchdog. Also do
225		 * this on the first Tx after resume, as we always cancel the
226		 * watchdog on suspend.
227		 */
228		if (wl->tx_allocated_blocks == total_blocks ||
229		    test_and_clear_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags))
230			wl12xx_rearm_tx_watchdog_locked(wl);
231
232		ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
233		wl->tx_allocated_pkts[ac]++;
234
235		if (test_bit(hlid, wl->links_map))
236			wl->links[hlid].allocated_pkts++;
237
238		ret = 0;
239
240		wl1271_debug(DEBUG_TX,
241			     "tx_allocate: size: %d, blocks: %d, id: %d",
242			     total_len, total_blocks, id);
243	} else {
244		wl1271_free_tx_id(wl, id);
245	}
246
247	return ret;
248}
249
250static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
251			       struct sk_buff *skb, u32 extra,
252			       struct ieee80211_tx_info *control, u8 hlid)
253{
254	struct wl1271_tx_hw_descr *desc;
255	int ac, rate_idx;
256	s64 hosttime;
257	u16 tx_attr = 0;
258	__le16 frame_control;
259	struct ieee80211_hdr *hdr;
260	u8 *frame_start;
261	bool is_dummy;
262
263	desc = (struct wl1271_tx_hw_descr *) skb->data;
264	frame_start = (u8 *)(desc + 1);
265	hdr = (struct ieee80211_hdr *)(frame_start + extra);
266	frame_control = hdr->frame_control;
267
268	/* relocate space for security header */
269	if (extra) {
270		int hdrlen = ieee80211_hdrlen(frame_control);
271		memmove(frame_start, hdr, hdrlen);
272		skb_set_network_header(skb, skb_network_offset(skb) + extra);
273	}
274
275	/* configure packet life time */
276	hosttime = (ktime_get_boottime_ns() >> 10);
277	desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
278
279	is_dummy = wl12xx_is_dummy_packet(wl, skb);
280	if (is_dummy || !wlvif || wlvif->bss_type != BSS_TYPE_AP_BSS)
281		desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
282	else
283		desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU);
284
285	/* queue */
286	ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
287	desc->tid = skb->priority;
288
289	if (is_dummy) {
290		/*
291		 * FW expects the dummy packet to have an invalid session id -
292		 * any session id that is different than the one set in the join
293		 */
294		tx_attr = (SESSION_COUNTER_INVALID <<
295			   TX_HW_ATTR_OFST_SESSION_COUNTER) &
296			   TX_HW_ATTR_SESSION_COUNTER;
297
298		tx_attr |= TX_HW_ATTR_TX_DUMMY_REQ;
299	} else if (wlvif) {
300		u8 session_id = wl->session_ids[hlid];
301
302		if ((wl->quirks & WLCORE_QUIRK_AP_ZERO_SESSION_ID) &&
303		    (wlvif->bss_type == BSS_TYPE_AP_BSS))
304			session_id = 0;
305
306		/* configure the tx attributes */
307		tx_attr = session_id << TX_HW_ATTR_OFST_SESSION_COUNTER;
308	}
309
310	desc->hlid = hlid;
311	if (is_dummy || !wlvif)
312		rate_idx = 0;
313	else if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
314		/*
315		 * if the packets are data packets
316		 * send them with AP rate policies (EAPOLs are an exception),
317		 * otherwise use default basic rates
318		 */
319		if (skb->protocol == cpu_to_be16(ETH_P_PAE))
320			rate_idx = wlvif->sta.basic_rate_idx;
321		else if (control->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
322			rate_idx = wlvif->sta.p2p_rate_idx;
323		else if (ieee80211_is_data(frame_control))
324			rate_idx = wlvif->sta.ap_rate_idx;
325		else
326			rate_idx = wlvif->sta.basic_rate_idx;
327	} else {
328		if (hlid == wlvif->ap.global_hlid)
329			rate_idx = wlvif->ap.mgmt_rate_idx;
330		else if (hlid == wlvif->ap.bcast_hlid ||
331			 skb->protocol == cpu_to_be16(ETH_P_PAE) ||
332			 !ieee80211_is_data(frame_control))
333			/*
334			 * send non-data, bcast and EAPOLs using the
335			 * min basic rate
336			 */
337			rate_idx = wlvif->ap.bcast_rate_idx;
338		else
339			rate_idx = wlvif->ap.ucast_rate_idx[ac];
340	}
341
342	tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY;
343
344	/* for WEP shared auth - no fw encryption is needed */
345	if (ieee80211_is_auth(frame_control) &&
346	    ieee80211_has_protected(frame_control))
347		tx_attr |= TX_HW_ATTR_HOST_ENCRYPT;
348
349	/* send EAPOL frames as voice */
350	if (control->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO)
351		tx_attr |= TX_HW_ATTR_EAPOL_FRAME;
352
353	desc->tx_attr = cpu_to_le16(tx_attr);
354
355	wlcore_hw_set_tx_desc_csum(wl, desc, skb);
356	wlcore_hw_set_tx_desc_data_len(wl, desc, skb);
357}
358
359/* caller must hold wl->mutex */
360static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
361				   struct sk_buff *skb, u32 buf_offset, u8 hlid)
362{
363	struct ieee80211_tx_info *info;
364	u32 extra = 0;
365	int ret = 0;
366	u32 total_len;
367	bool is_dummy;
368	bool is_gem = false;
369
370	if (!skb) {
371		wl1271_error("discarding null skb");
372		return -EINVAL;
373	}
374
375	if (hlid == WL12XX_INVALID_LINK_ID) {
376		wl1271_error("invalid hlid. dropping skb 0x%p", skb);
377		return -EINVAL;
378	}
379
380	info = IEEE80211_SKB_CB(skb);
381
382	is_dummy = wl12xx_is_dummy_packet(wl, skb);
383
384	if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
385	    info->control.hw_key &&
386	    info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP)
387		extra = WL1271_EXTRA_SPACE_TKIP;
388
389	if (info->control.hw_key) {
390		bool is_wep;
391		u8 idx = info->control.hw_key->hw_key_idx;
392		u32 cipher = info->control.hw_key->cipher;
393
394		is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) ||
395			 (cipher == WLAN_CIPHER_SUITE_WEP104);
396
397		if (WARN_ON(is_wep && wlvif && wlvif->default_key != idx)) {
398			ret = wl1271_set_default_wep_key(wl, wlvif, idx);
399			if (ret < 0)
400				return ret;
401			wlvif->default_key = idx;
402		}
403
404		is_gem = (cipher == WL1271_CIPHER_SUITE_GEM);
405	}
406
407	ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid,
408				 is_gem);
409	if (ret < 0)
410		return ret;
411
412	wl1271_tx_fill_hdr(wl, wlvif, skb, extra, info, hlid);
413
414	if (!is_dummy && wlvif && wlvif->bss_type == BSS_TYPE_AP_BSS) {
415		wl1271_tx_ap_update_inconnection_sta(wl, wlvif, skb);
416		wl1271_tx_regulate_link(wl, wlvif, hlid);
417	}
418
419	/*
420	 * The length of each packet is stored in terms of
421	 * words. Thus, we must pad the skb data to make sure its
422	 * length is aligned.  The number of padding bytes is computed
423	 * and set in wl1271_tx_fill_hdr.
424	 * In special cases, we want to align to a specific block size
425	 * (eg. for wl128x with SDIO we align to 256).
426	 */
427	total_len = wlcore_calc_packet_alignment(wl, skb->len);
428
429	memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len);
430	memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len);
431
432	/* Revert side effects in the dummy packet skb, so it can be reused */
433	if (is_dummy)
434		skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
435
436	return total_len;
437}
438
439u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
440				enum nl80211_band rate_band)
441{
442	struct ieee80211_supported_band *band;
443	u32 enabled_rates = 0;
444	int bit;
445
446	band = wl->hw->wiphy->bands[rate_band];
447	for (bit = 0; bit < band->n_bitrates; bit++) {
448		if (rate_set & 0x1)
449			enabled_rates |= band->bitrates[bit].hw_value;
450		rate_set >>= 1;
451	}
452
453	/* MCS rates indication are on bits 16 - 31 */
454	rate_set >>= HW_HT_RATES_OFFSET - band->n_bitrates;
455
456	for (bit = 0; bit < 16; bit++) {
457		if (rate_set & 0x1)
458			enabled_rates |= (CONF_HW_BIT_RATE_MCS_0 << bit);
459		rate_set >>= 1;
460	}
461
462	return enabled_rates;
463}
464
465void wl1271_handle_tx_low_watermark(struct wl1271 *wl)
466{
467	int i;
468	struct wl12xx_vif *wlvif;
469
470	wl12xx_for_each_wlvif(wl, wlvif) {
471		for (i = 0; i < NUM_TX_QUEUES; i++) {
472			if (wlcore_is_queue_stopped_by_reason(wl, wlvif, i,
473					WLCORE_QUEUE_STOP_REASON_WATERMARK) &&
474			    wlvif->tx_queue_count[i] <=
475					WL1271_TX_QUEUE_LOW_WATERMARK)
476				/* firmware buffer has space, restart queues */
477				wlcore_wake_queue(wl, wlvif, i,
478					WLCORE_QUEUE_STOP_REASON_WATERMARK);
479		}
480	}
481}
482
483static int wlcore_select_ac(struct wl1271 *wl)
484{
485	int i, q = -1, ac;
486	u32 min_pkts = 0xffffffff;
487
488	/*
489	 * Find a non-empty ac where:
490	 * 1. There are packets to transmit
491	 * 2. The FW has the least allocated blocks
492	 *
493	 * We prioritize the ACs according to VO>VI>BE>BK
494	 */
495	for (i = 0; i < NUM_TX_QUEUES; i++) {
496		ac = wl1271_tx_get_queue(i);
497		if (wl->tx_queue_count[ac] &&
498		    wl->tx_allocated_pkts[ac] < min_pkts) {
499			q = ac;
500			min_pkts = wl->tx_allocated_pkts[q];
501		}
502	}
503
504	return q;
505}
506
507static struct sk_buff *wlcore_lnk_dequeue(struct wl1271 *wl,
508					  struct wl1271_link *lnk, u8 q)
509{
510	struct sk_buff *skb;
511	unsigned long flags;
512
513	skb = skb_dequeue(&lnk->tx_queue[q]);
514	if (skb) {
515		spin_lock_irqsave(&wl->wl_lock, flags);
516		WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
517		wl->tx_queue_count[q]--;
518		if (lnk->wlvif) {
519			WARN_ON_ONCE(lnk->wlvif->tx_queue_count[q] <= 0);
520			lnk->wlvif->tx_queue_count[q]--;
521		}
522		spin_unlock_irqrestore(&wl->wl_lock, flags);
523	}
524
525	return skb;
526}
527
528static struct sk_buff *wlcore_lnk_dequeue_high_prio(struct wl1271 *wl,
529						    u8 hlid, u8 ac,
530						    u8 *low_prio_hlid)
531{
532	struct wl1271_link *lnk = &wl->links[hlid];
533
534	if (!wlcore_hw_lnk_high_prio(wl, hlid, lnk)) {
535		if (*low_prio_hlid == WL12XX_INVALID_LINK_ID &&
536		    !skb_queue_empty(&lnk->tx_queue[ac]) &&
537		    wlcore_hw_lnk_low_prio(wl, hlid, lnk))
538			/* we found the first non-empty low priority queue */
539			*low_prio_hlid = hlid;
540
541		return NULL;
542	}
543
544	return wlcore_lnk_dequeue(wl, lnk, ac);
545}
546
547static struct sk_buff *wlcore_vif_dequeue_high_prio(struct wl1271 *wl,
548						    struct wl12xx_vif *wlvif,
549						    u8 ac, u8 *hlid,
550						    u8 *low_prio_hlid)
551{
552	struct sk_buff *skb = NULL;
553	int i, h, start_hlid;
554
555	/* start from the link after the last one */
556	start_hlid = (wlvif->last_tx_hlid + 1) % wl->num_links;
557
558	/* dequeue according to AC, round robin on each link */
559	for (i = 0; i < wl->num_links; i++) {
560		h = (start_hlid + i) % wl->num_links;
561
562		/* only consider connected stations */
563		if (!test_bit(h, wlvif->links_map))
564			continue;
565
566		skb = wlcore_lnk_dequeue_high_prio(wl, h, ac,
567						   low_prio_hlid);
568		if (!skb)
569			continue;
570
571		wlvif->last_tx_hlid = h;
572		break;
573	}
574
575	if (!skb)
576		wlvif->last_tx_hlid = 0;
577
578	*hlid = wlvif->last_tx_hlid;
579	return skb;
580}
581
582static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl, u8 *hlid)
583{
584	unsigned long flags;
585	struct wl12xx_vif *wlvif = wl->last_wlvif;
586	struct sk_buff *skb = NULL;
587	int ac;
588	u8 low_prio_hlid = WL12XX_INVALID_LINK_ID;
589
590	ac = wlcore_select_ac(wl);
591	if (ac < 0)
592		goto out;
593
594	/* continue from last wlvif (round robin) */
595	if (wlvif) {
596		wl12xx_for_each_wlvif_continue(wl, wlvif) {
597			if (!wlvif->tx_queue_count[ac])
598				continue;
599
600			skb = wlcore_vif_dequeue_high_prio(wl, wlvif, ac, hlid,
601							   &low_prio_hlid);
602			if (!skb)
603				continue;
604
605			wl->last_wlvif = wlvif;
606			break;
607		}
608	}
609
610	/* dequeue from the system HLID before the restarting wlvif list */
611	if (!skb) {
612		skb = wlcore_lnk_dequeue_high_prio(wl, wl->system_hlid,
613						   ac, &low_prio_hlid);
614		if (skb) {
615			*hlid = wl->system_hlid;
616			wl->last_wlvif = NULL;
617		}
618	}
619
620	/* Do a new pass over the wlvif list. But no need to continue
621	 * after last_wlvif. The previous pass should have found it. */
622	if (!skb) {
623		wl12xx_for_each_wlvif(wl, wlvif) {
624			if (!wlvif->tx_queue_count[ac])
625				goto next;
626
627			skb = wlcore_vif_dequeue_high_prio(wl, wlvif, ac, hlid,
628							   &low_prio_hlid);
629			if (skb) {
630				wl->last_wlvif = wlvif;
631				break;
632			}
633
634next:
635			if (wlvif == wl->last_wlvif)
636				break;
637		}
638	}
639
640	/* no high priority skbs found - but maybe a low priority one? */
641	if (!skb && low_prio_hlid != WL12XX_INVALID_LINK_ID) {
642		struct wl1271_link *lnk = &wl->links[low_prio_hlid];
643		skb = wlcore_lnk_dequeue(wl, lnk, ac);
644
645		WARN_ON(!skb); /* we checked this before */
646		*hlid = low_prio_hlid;
647
648		/* ensure proper round robin in the vif/link levels */
649		wl->last_wlvif = lnk->wlvif;
650		if (lnk->wlvif)
651			lnk->wlvif->last_tx_hlid = low_prio_hlid;
652
653	}
654
655out:
656	if (!skb &&
657	    test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) {
658		int q;
659
660		skb = wl->dummy_packet;
661		*hlid = wl->system_hlid;
662		q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
663		spin_lock_irqsave(&wl->wl_lock, flags);
664		WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
665		wl->tx_queue_count[q]--;
666		spin_unlock_irqrestore(&wl->wl_lock, flags);
667	}
668
669	return skb;
670}
671
672static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
673				  struct sk_buff *skb, u8 hlid)
674{
675	unsigned long flags;
676	int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
677
678	if (wl12xx_is_dummy_packet(wl, skb)) {
679		set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
680	} else {
681		skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
682
683		/* make sure we dequeue the same packet next time */
684		wlvif->last_tx_hlid = (hlid + wl->num_links - 1) %
685				      wl->num_links;
686	}
687
688	spin_lock_irqsave(&wl->wl_lock, flags);
689	wl->tx_queue_count[q]++;
690	if (wlvif)
691		wlvif->tx_queue_count[q]++;
692	spin_unlock_irqrestore(&wl->wl_lock, flags);
693}
694
695static bool wl1271_tx_is_data_present(struct sk_buff *skb)
696{
697	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
698
699	return ieee80211_is_data_present(hdr->frame_control);
700}
701
702void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids)
703{
704	struct wl12xx_vif *wlvif;
705	u32 timeout;
706	u8 hlid;
707
708	if (!wl->conf.rx_streaming.interval)
709		return;
710
711	if (!wl->conf.rx_streaming.always &&
712	    !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))
713		return;
714
715	timeout = wl->conf.rx_streaming.duration;
716	wl12xx_for_each_wlvif_sta(wl, wlvif) {
717		bool found = false;
718		for_each_set_bit(hlid, active_hlids, wl->num_links) {
719			if (test_bit(hlid, wlvif->links_map)) {
720				found  = true;
721				break;
722			}
723		}
724
725		if (!found)
726			continue;
727
728		/* enable rx streaming */
729		if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
730			ieee80211_queue_work(wl->hw,
731					     &wlvif->rx_streaming_enable_work);
732
733		mod_timer(&wlvif->rx_streaming_timer,
734			  jiffies + msecs_to_jiffies(timeout));
735	}
736}
737
738/*
739 * Returns failure values only in case of failed bus ops within this function.
740 * wl1271_prepare_tx_frame retvals won't be returned in order to avoid
741 * triggering recovery by higher layers when not necessary.
742 * In case a FW command fails within wl1271_prepare_tx_frame fails a recovery
743 * will be queued in wl1271_cmd_send. -EAGAIN/-EBUSY from prepare_tx_frame
744 * can occur and are legitimate so don't propagate. -EINVAL will emit a WARNING
745 * within prepare_tx_frame code but there's nothing we should do about those
746 * as well.
747 */
748int wlcore_tx_work_locked(struct wl1271 *wl)
749{
750	struct wl12xx_vif *wlvif;
751	struct sk_buff *skb;
752	struct wl1271_tx_hw_descr *desc;
753	u32 buf_offset = 0, last_len = 0;
754	bool sent_packets = false;
755	unsigned long active_hlids[BITS_TO_LONGS(WLCORE_MAX_LINKS)] = {0};
756	int ret = 0;
757	int bus_ret = 0;
758	u8 hlid;
759
760	if (unlikely(wl->state != WLCORE_STATE_ON))
761		return 0;
762
763	while ((skb = wl1271_skb_dequeue(wl, &hlid))) {
764		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
765		bool has_data = false;
766
767		wlvif = NULL;
768		if (!wl12xx_is_dummy_packet(wl, skb))
769			wlvif = wl12xx_vif_to_data(info->control.vif);
770		else
771			hlid = wl->system_hlid;
772
773		has_data = wlvif && wl1271_tx_is_data_present(skb);
774		ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset,
775					      hlid);
776		if (ret == -EAGAIN) {
777			/*
778			 * Aggregation buffer is full.
779			 * Flush buffer and try again.
780			 */
781			wl1271_skb_queue_head(wl, wlvif, skb, hlid);
782
783			buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset,
784							    last_len);
785			bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA,
786					     wl->aggr_buf, buf_offset, true);
787			if (bus_ret < 0)
788				goto out;
789
790			sent_packets = true;
791			buf_offset = 0;
792			continue;
793		} else if (ret == -EBUSY) {
794			/*
795			 * Firmware buffer is full.
796			 * Queue back last skb, and stop aggregating.
797			 */
798			wl1271_skb_queue_head(wl, wlvif, skb, hlid);
799			/* No work left, avoid scheduling redundant tx work */
800			set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
801			goto out_ack;
802		} else if (ret < 0) {
803			if (wl12xx_is_dummy_packet(wl, skb))
804				/*
805				 * fw still expects dummy packet,
806				 * so re-enqueue it
807				 */
808				wl1271_skb_queue_head(wl, wlvif, skb, hlid);
809			else
810				ieee80211_free_txskb(wl->hw, skb);
811			goto out_ack;
812		}
813		last_len = ret;
814		buf_offset += last_len;
815		wl->tx_packets_count++;
816		if (has_data) {
817			desc = (struct wl1271_tx_hw_descr *) skb->data;
818			__set_bit(desc->hlid, active_hlids);
819		}
820	}
821
822out_ack:
823	if (buf_offset) {
824		buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset, last_len);
825		bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf,
826					     buf_offset, true);
827		if (bus_ret < 0)
828			goto out;
829
830		sent_packets = true;
831	}
832	if (sent_packets) {
833		/*
834		 * Interrupt the firmware with the new packets. This is only
835		 * required for older hardware revisions
836		 */
837		if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION) {
838			bus_ret = wlcore_write32(wl, WL12XX_HOST_WR_ACCESS,
839					     wl->tx_packets_count);
840			if (bus_ret < 0)
841				goto out;
842		}
843
844		wl1271_handle_tx_low_watermark(wl);
845	}
846	wl12xx_rearm_rx_streaming(wl, active_hlids);
847
848out:
849	return bus_ret;
850}
851
852void wl1271_tx_work(struct work_struct *work)
853{
854	struct wl1271 *wl = container_of(work, struct wl1271, tx_work);
855	int ret;
856
857	mutex_lock(&wl->mutex);
858	ret = pm_runtime_resume_and_get(wl->dev);
859	if (ret < 0)
860		goto out;
861
862	ret = wlcore_tx_work_locked(wl);
863	if (ret < 0) {
864		pm_runtime_put_noidle(wl->dev);
865		wl12xx_queue_recovery_work(wl);
866		goto out;
867	}
868
869	pm_runtime_mark_last_busy(wl->dev);
870	pm_runtime_put_autosuspend(wl->dev);
871out:
872	mutex_unlock(&wl->mutex);
873}
874
875static u8 wl1271_tx_get_rate_flags(u8 rate_class_index)
876{
877	u8 flags = 0;
878
879	/*
880	 * TODO: use wl12xx constants when this code is moved to wl12xx, as
881	 * only it uses Tx-completion.
882	 */
883	if (rate_class_index <= 8)
884		flags |= IEEE80211_TX_RC_MCS;
885
886	/*
887	 * TODO: use wl12xx constants when this code is moved to wl12xx, as
888	 * only it uses Tx-completion.
889	 */
890	if (rate_class_index == 0)
891		flags |= IEEE80211_TX_RC_SHORT_GI;
892
893	return flags;
894}
895
896static void wl1271_tx_complete_packet(struct wl1271 *wl,
897				      struct wl1271_tx_hw_res_descr *result)
898{
899	struct ieee80211_tx_info *info;
900	struct ieee80211_vif *vif;
901	struct wl12xx_vif *wlvif;
902	struct sk_buff *skb;
903	int id = result->id;
904	int rate = -1;
905	u8 rate_flags = 0;
906	u8 retries = 0;
907
908	/* check for id legality */
909	if (unlikely(id >= wl->num_tx_desc || wl->tx_frames[id] == NULL)) {
910		wl1271_warning("TX result illegal id: %d", id);
911		return;
912	}
913
914	skb = wl->tx_frames[id];
915	info = IEEE80211_SKB_CB(skb);
916
917	if (wl12xx_is_dummy_packet(wl, skb)) {
918		wl1271_free_tx_id(wl, id);
919		return;
920	}
921
922	/* info->control is valid as long as we don't update info->status */
923	vif = info->control.vif;
924	wlvif = wl12xx_vif_to_data(vif);
925
926	/* update the TX status info */
927	if (result->status == TX_SUCCESS) {
928		if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
929			info->flags |= IEEE80211_TX_STAT_ACK;
930		rate = wlcore_rate_to_idx(wl, result->rate_class_index,
931					  wlvif->band);
932		rate_flags = wl1271_tx_get_rate_flags(result->rate_class_index);
933		retries = result->ack_failures;
934	} else if (result->status == TX_RETRY_EXCEEDED) {
935		wl->stats.excessive_retries++;
936		retries = result->ack_failures;
937	}
938
939	info->status.rates[0].idx = rate;
940	info->status.rates[0].count = retries;
941	info->status.rates[0].flags = rate_flags;
942	info->status.ack_signal = -1;
943
944	wl->stats.retry_count += result->ack_failures;
945
946	/* remove private header from packet */
947	skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
948
949	/* remove TKIP header space if present */
950	if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
951	    info->control.hw_key &&
952	    info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
953		int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
954		memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, skb->data,
955			hdrlen);
956		skb_pull(skb, WL1271_EXTRA_SPACE_TKIP);
957	}
958
959	wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x"
960		     " status 0x%x",
961		     result->id, skb, result->ack_failures,
962		     result->rate_class_index, result->status);
963
964	/* return the packet to the stack */
965	skb_queue_tail(&wl->deferred_tx_queue, skb);
966	queue_work(wl->freezable_wq, &wl->netstack_work);
967	wl1271_free_tx_id(wl, result->id);
968}
969
970/* Called upon reception of a TX complete interrupt */
971int wlcore_tx_complete(struct wl1271 *wl)
972{
973	struct wl1271_acx_mem_map *memmap = wl->target_mem_map;
974	u32 count, fw_counter;
975	u32 i;
976	int ret;
977
978	/* read the tx results from the chipset */
979	ret = wlcore_read(wl, le32_to_cpu(memmap->tx_result),
980			  wl->tx_res_if, sizeof(*wl->tx_res_if), false);
981	if (ret < 0)
982		goto out;
983
984	fw_counter = le32_to_cpu(wl->tx_res_if->tx_result_fw_counter);
985
986	/* write host counter to chipset (to ack) */
987	ret = wlcore_write32(wl, le32_to_cpu(memmap->tx_result) +
988			     offsetof(struct wl1271_tx_hw_res_if,
989				      tx_result_host_counter), fw_counter);
990	if (ret < 0)
991		goto out;
992
993	count = fw_counter - wl->tx_results_count;
994	wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count);
995
996	/* verify that the result buffer is not getting overrun */
997	if (unlikely(count > TX_HW_RESULT_QUEUE_LEN))
998		wl1271_warning("TX result overflow from chipset: %d", count);
999
1000	/* process the results */
1001	for (i = 0; i < count; i++) {
1002		struct wl1271_tx_hw_res_descr *result;
1003		u8 offset = wl->tx_results_count & TX_HW_RESULT_QUEUE_LEN_MASK;
1004
1005		/* process the packet */
1006		result =  &(wl->tx_res_if->tx_results_queue[offset]);
1007		wl1271_tx_complete_packet(wl, result);
1008
1009		wl->tx_results_count++;
1010	}
1011
1012out:
1013	return ret;
1014}
1015EXPORT_SYMBOL(wlcore_tx_complete);
1016
1017void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
1018{
1019	struct sk_buff *skb;
1020	int i;
1021	unsigned long flags;
1022	struct ieee80211_tx_info *info;
1023	int total[NUM_TX_QUEUES];
1024	struct wl1271_link *lnk = &wl->links[hlid];
1025
1026	for (i = 0; i < NUM_TX_QUEUES; i++) {
1027		total[i] = 0;
1028		while ((skb = skb_dequeue(&lnk->tx_queue[i]))) {
1029			wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb);
1030
1031			if (!wl12xx_is_dummy_packet(wl, skb)) {
1032				info = IEEE80211_SKB_CB(skb);
1033				info->status.rates[0].idx = -1;
1034				info->status.rates[0].count = 0;
1035				ieee80211_tx_status_ni(wl->hw, skb);
1036			}
1037
1038			total[i]++;
1039		}
1040	}
1041
1042	spin_lock_irqsave(&wl->wl_lock, flags);
1043	for (i = 0; i < NUM_TX_QUEUES; i++) {
1044		wl->tx_queue_count[i] -= total[i];
1045		if (lnk->wlvif)
1046			lnk->wlvif->tx_queue_count[i] -= total[i];
1047	}
1048	spin_unlock_irqrestore(&wl->wl_lock, flags);
1049
1050	wl1271_handle_tx_low_watermark(wl);
1051}
1052
1053/* caller must hold wl->mutex and TX must be stopped */
1054void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1055{
1056	int i;
1057
1058	/* TX failure */
1059	for_each_set_bit(i, wlvif->links_map, wl->num_links) {
1060		if (wlvif->bss_type == BSS_TYPE_AP_BSS &&
1061		    i != wlvif->ap.bcast_hlid && i != wlvif->ap.global_hlid) {
1062			/* this calls wl12xx_free_link */
1063			wl1271_free_sta(wl, wlvif, i);
1064		} else {
1065			u8 hlid = i;
1066			wl12xx_free_link(wl, wlvif, &hlid);
1067		}
1068	}
1069	wlvif->last_tx_hlid = 0;
1070
1071	for (i = 0; i < NUM_TX_QUEUES; i++)
1072		wlvif->tx_queue_count[i] = 0;
1073}
1074/* caller must hold wl->mutex and TX must be stopped */
1075void wl12xx_tx_reset(struct wl1271 *wl)
1076{
1077	int i;
1078	struct sk_buff *skb;
1079	struct ieee80211_tx_info *info;
1080
1081	/* only reset the queues if something bad happened */
1082	if (wl1271_tx_total_queue_count(wl) != 0) {
1083		for (i = 0; i < wl->num_links; i++)
1084			wl1271_tx_reset_link_queues(wl, i);
1085
1086		for (i = 0; i < NUM_TX_QUEUES; i++)
1087			wl->tx_queue_count[i] = 0;
1088	}
1089
1090	/*
1091	 * Make sure the driver is at a consistent state, in case this
1092	 * function is called from a context other than interface removal.
1093	 * This call will always wake the TX queues.
1094	 */
1095	wl1271_handle_tx_low_watermark(wl);
1096
1097	for (i = 0; i < wl->num_tx_desc; i++) {
1098		if (wl->tx_frames[i] == NULL)
1099			continue;
1100
1101		skb = wl->tx_frames[i];
1102		wl1271_free_tx_id(wl, i);
1103		wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
1104
1105		if (!wl12xx_is_dummy_packet(wl, skb)) {
1106			/*
1107			 * Remove private headers before passing the skb to
1108			 * mac80211
1109			 */
1110			info = IEEE80211_SKB_CB(skb);
1111			skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
1112			if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
1113			    info->control.hw_key &&
1114			    info->control.hw_key->cipher ==
1115			    WLAN_CIPHER_SUITE_TKIP) {
1116				int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1117				memmove(skb->data + WL1271_EXTRA_SPACE_TKIP,
1118					skb->data, hdrlen);
1119				skb_pull(skb, WL1271_EXTRA_SPACE_TKIP);
1120			}
1121
1122			info->status.rates[0].idx = -1;
1123			info->status.rates[0].count = 0;
1124
1125			ieee80211_tx_status_ni(wl->hw, skb);
1126		}
1127	}
1128}
1129
1130#define WL1271_TX_FLUSH_TIMEOUT 500000
1131
1132/* caller must *NOT* hold wl->mutex */
1133void wl1271_tx_flush(struct wl1271 *wl)
1134{
1135	unsigned long timeout, start_time;
1136	int i;
1137	start_time = jiffies;
1138	timeout = start_time + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT);
1139
1140	/* only one flush should be in progress, for consistent queue state */
1141	mutex_lock(&wl->flush_mutex);
1142
1143	mutex_lock(&wl->mutex);
1144	if (wl->tx_frames_cnt == 0 && wl1271_tx_total_queue_count(wl) == 0) {
1145		mutex_unlock(&wl->mutex);
1146		goto out;
1147	}
1148
1149	wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH);
1150
1151	while (!time_after(jiffies, timeout)) {
1152		wl1271_debug(DEBUG_MAC80211, "flushing tx buffer: %d %d",
1153			     wl->tx_frames_cnt,
1154			     wl1271_tx_total_queue_count(wl));
1155
1156		/* force Tx and give the driver some time to flush data */
1157		mutex_unlock(&wl->mutex);
1158		if (wl1271_tx_total_queue_count(wl))
1159			wl1271_tx_work(&wl->tx_work);
1160		msleep(20);
1161		mutex_lock(&wl->mutex);
1162
1163		if ((wl->tx_frames_cnt == 0) &&
1164		    (wl1271_tx_total_queue_count(wl) == 0)) {
1165			wl1271_debug(DEBUG_MAC80211, "tx flush took %d ms",
1166				     jiffies_to_msecs(jiffies - start_time));
1167			goto out_wake;
1168		}
1169	}
1170
1171	wl1271_warning("Unable to flush all TX buffers, "
1172		       "timed out (timeout %d ms",
1173		       WL1271_TX_FLUSH_TIMEOUT / 1000);
1174
1175	/* forcibly flush all Tx buffers on our queues */
1176	for (i = 0; i < wl->num_links; i++)
1177		wl1271_tx_reset_link_queues(wl, i);
1178
1179out_wake:
1180	wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH);
1181	mutex_unlock(&wl->mutex);
1182out:
1183	mutex_unlock(&wl->flush_mutex);
1184}
1185EXPORT_SYMBOL_GPL(wl1271_tx_flush);
1186
1187u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set)
1188{
1189	if (WARN_ON(!rate_set))
1190		return 0;
1191
1192	return BIT(__ffs(rate_set));
1193}
1194EXPORT_SYMBOL_GPL(wl1271_tx_min_rate_get);
1195
1196void wlcore_stop_queue_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1197			      u8 queue, enum wlcore_queue_stop_reason reason)
1198{
1199	int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue);
1200	bool stopped = !!wl->queue_stop_reasons[hwq];
1201
1202	/* queue should not be stopped for this reason */
1203	WARN_ON_ONCE(test_and_set_bit(reason, &wl->queue_stop_reasons[hwq]));
1204
1205	if (stopped)
1206		return;
1207
1208	ieee80211_stop_queue(wl->hw, hwq);
1209}
1210
1211void wlcore_stop_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue,
1212		       enum wlcore_queue_stop_reason reason)
1213{
1214	unsigned long flags;
1215
1216	spin_lock_irqsave(&wl->wl_lock, flags);
1217	wlcore_stop_queue_locked(wl, wlvif, queue, reason);
1218	spin_unlock_irqrestore(&wl->wl_lock, flags);
1219}
1220
1221void wlcore_wake_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue,
1222		       enum wlcore_queue_stop_reason reason)
1223{
1224	unsigned long flags;
1225	int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue);
1226
1227	spin_lock_irqsave(&wl->wl_lock, flags);
1228
1229	/* queue should not be clear for this reason */
1230	WARN_ON_ONCE(!test_and_clear_bit(reason, &wl->queue_stop_reasons[hwq]));
1231
1232	if (wl->queue_stop_reasons[hwq])
1233		goto out;
1234
1235	ieee80211_wake_queue(wl->hw, hwq);
1236
1237out:
1238	spin_unlock_irqrestore(&wl->wl_lock, flags);
1239}
1240
1241void wlcore_stop_queues(struct wl1271 *wl,
1242			enum wlcore_queue_stop_reason reason)
1243{
1244	int i;
1245	unsigned long flags;
1246
1247	spin_lock_irqsave(&wl->wl_lock, flags);
1248
1249	/* mark all possible queues as stopped */
1250        for (i = 0; i < WLCORE_NUM_MAC_ADDRESSES * NUM_TX_QUEUES; i++)
1251                WARN_ON_ONCE(test_and_set_bit(reason,
1252					      &wl->queue_stop_reasons[i]));
1253
1254	/* use the global version to make sure all vifs in mac80211 we don't
1255	 * know are stopped.
1256	 */
1257	ieee80211_stop_queues(wl->hw);
1258
1259	spin_unlock_irqrestore(&wl->wl_lock, flags);
1260}
1261
1262void wlcore_wake_queues(struct wl1271 *wl,
1263			enum wlcore_queue_stop_reason reason)
1264{
1265	int i;
1266	unsigned long flags;
1267
1268	spin_lock_irqsave(&wl->wl_lock, flags);
1269
1270	/* mark all possible queues as awake */
1271        for (i = 0; i < WLCORE_NUM_MAC_ADDRESSES * NUM_TX_QUEUES; i++)
1272		WARN_ON_ONCE(!test_and_clear_bit(reason,
1273						 &wl->queue_stop_reasons[i]));
1274
1275	/* use the global version to make sure all vifs in mac80211 we don't
1276	 * know are woken up.
1277	 */
1278	ieee80211_wake_queues(wl->hw);
1279
1280	spin_unlock_irqrestore(&wl->wl_lock, flags);
1281}
1282
1283bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl,
1284				       struct wl12xx_vif *wlvif, u8 queue,
1285				       enum wlcore_queue_stop_reason reason)
1286{
1287	unsigned long flags;
1288	bool stopped;
1289
1290	spin_lock_irqsave(&wl->wl_lock, flags);
1291	stopped = wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, queue,
1292							   reason);
1293	spin_unlock_irqrestore(&wl->wl_lock, flags);
1294
1295	return stopped;
1296}
1297
1298bool wlcore_is_queue_stopped_by_reason_locked(struct wl1271 *wl,
1299				       struct wl12xx_vif *wlvif, u8 queue,
1300				       enum wlcore_queue_stop_reason reason)
1301{
1302	int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue);
1303
1304	assert_spin_locked(&wl->wl_lock);
1305	return test_bit(reason, &wl->queue_stop_reasons[hwq]);
1306}
1307
1308bool wlcore_is_queue_stopped_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1309				    u8 queue)
1310{
1311	int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue);
1312
1313	assert_spin_locked(&wl->wl_lock);
1314	return !!wl->queue_stop_reasons[hwq];
1315}
1316