Deleted Added
full compact
if_ath.c (190526) if_ath.c (190571)
1/*-
2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13 * redistribution must be conditioned upon including a substantially
14 * similar Disclaimer requirement for further binary redistribution.
15 *
16 * NO WARRANTY
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGES.
28 */
29
30#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13 * redistribution must be conditioned upon including a substantially
14 * similar Disclaimer requirement for further binary redistribution.
15 *
16 * NO WARRANTY
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGES.
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: head/sys/dev/ath/if_ath.c 190526 2009-03-29 17:59:14Z sam $");
31__FBSDID("$FreeBSD: head/sys/dev/ath/if_ath.c 190571 2009-03-30 19:23:49Z sam $");
32
33/*
34 * Driver for the Atheros Wireless LAN controller.
35 *
36 * This software is derived from work of Atsushi Onoe; his contribution
37 * is greatly appreciated.
38 */
39
40#include "opt_inet.h"
41#include "opt_ath.h"
32
33/*
34 * Driver for the Atheros Wireless LAN controller.
35 *
36 * This software is derived from work of Atsushi Onoe; his contribution
37 * is greatly appreciated.
38 */
39
40#include "opt_inet.h"
41#include "opt_ath.h"
42#include "opt_wlan.h"
42
43#include <sys/param.h>
44#include <sys/systm.h>
45#include <sys/sysctl.h>
46#include <sys/mbuf.h>
47#include <sys/malloc.h>
48#include <sys/lock.h>
49#include <sys/mutex.h>
50#include <sys/kernel.h>
51#include <sys/socket.h>
52#include <sys/sockio.h>
53#include <sys/errno.h>
54#include <sys/callout.h>
55#include <sys/bus.h>
56#include <sys/endian.h>
57#include <sys/kthread.h>
58#include <sys/taskqueue.h>
59#include <sys/priv.h>
60
61#include <machine/bus.h>
62
63#include <net/if.h>
64#include <net/if_dl.h>
65#include <net/if_media.h>
66#include <net/if_types.h>
67#include <net/if_arp.h>
68#include <net/ethernet.h>
69#include <net/if_llc.h>
70
71#include <net80211/ieee80211_var.h>
72#include <net80211/ieee80211_regdomain.h>
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/sysctl.h>
47#include <sys/mbuf.h>
48#include <sys/malloc.h>
49#include <sys/lock.h>
50#include <sys/mutex.h>
51#include <sys/kernel.h>
52#include <sys/socket.h>
53#include <sys/sockio.h>
54#include <sys/errno.h>
55#include <sys/callout.h>
56#include <sys/bus.h>
57#include <sys/endian.h>
58#include <sys/kthread.h>
59#include <sys/taskqueue.h>
60#include <sys/priv.h>
61
62#include <machine/bus.h>
63
64#include <net/if.h>
65#include <net/if_dl.h>
66#include <net/if_media.h>
67#include <net/if_types.h>
68#include <net/if_arp.h>
69#include <net/ethernet.h>
70#include <net/if_llc.h>
71
72#include <net80211/ieee80211_var.h>
73#include <net80211/ieee80211_regdomain.h>
73#ifdef ATH_SUPPORT_TDMA
74#ifdef IEEE80211_SUPPORT_TDMA
74#include <net80211/ieee80211_tdma.h>
75#endif
76
77#include <net/bpf.h>
78
79#ifdef INET
80#include <netinet/in.h>
81#include <netinet/if_ether.h>
82#endif
83
84#include <dev/ath/if_athvar.h>
85#include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */
86
87#ifdef ATH_TX99_DIAG
88#include <dev/ath/ath_tx99/ath_tx99.h>
89#endif
90
91/*
92 * ATH_BCBUF determines the number of vap's that can transmit
93 * beacons and also (currently) the number of vap's that can
94 * have unique mac addresses/bssid. When staggering beacons
95 * 4 is probably a good max as otherwise the beacons become
96 * very closely spaced and there is limited time for cab q traffic
97 * to go out. You can burst beacons instead but that is not good
98 * for stations in power save and at some point you really want
99 * another radio (and channel).
100 *
101 * The limit on the number of mac addresses is tied to our use of
102 * the U/L bit and tracking addresses in a byte; it would be
103 * worthwhile to allow more for applications like proxy sta.
104 */
105CTASSERT(ATH_BCBUF <= 8);
106
107/* unaligned little endian access */
108#define LE_READ_2(p) \
109 ((u_int16_t) \
110 ((((u_int8_t *)(p))[0] ) | (((u_int8_t *)(p))[1] << 8)))
111#define LE_READ_4(p) \
112 ((u_int32_t) \
113 ((((u_int8_t *)(p))[0] ) | (((u_int8_t *)(p))[1] << 8) | \
114 (((u_int8_t *)(p))[2] << 16) | (((u_int8_t *)(p))[3] << 24)))
115
116static struct ieee80211vap *ath_vap_create(struct ieee80211com *,
117 const char name[IFNAMSIZ], int unit, int opmode,
118 int flags, const uint8_t bssid[IEEE80211_ADDR_LEN],
119 const uint8_t mac[IEEE80211_ADDR_LEN]);
120static void ath_vap_delete(struct ieee80211vap *);
121static void ath_init(void *);
122static void ath_stop_locked(struct ifnet *);
123static void ath_stop(struct ifnet *);
124static void ath_start(struct ifnet *);
125static int ath_reset(struct ifnet *);
126static int ath_reset_vap(struct ieee80211vap *, u_long);
127static int ath_media_change(struct ifnet *);
128static void ath_watchdog(void *);
129static int ath_ioctl(struct ifnet *, u_long, caddr_t);
130static void ath_fatal_proc(void *, int);
131static void ath_bmiss_vap(struct ieee80211vap *);
132static void ath_bmiss_proc(void *, int);
133static int ath_keyset(struct ath_softc *, const struct ieee80211_key *,
134 struct ieee80211_node *);
135static int ath_key_alloc(struct ieee80211vap *,
136 struct ieee80211_key *,
137 ieee80211_keyix *, ieee80211_keyix *);
138static int ath_key_delete(struct ieee80211vap *,
139 const struct ieee80211_key *);
140static int ath_key_set(struct ieee80211vap *, const struct ieee80211_key *,
141 const u_int8_t mac[IEEE80211_ADDR_LEN]);
142static void ath_key_update_begin(struct ieee80211vap *);
143static void ath_key_update_end(struct ieee80211vap *);
144static void ath_update_mcast(struct ifnet *);
145static void ath_update_promisc(struct ifnet *);
146static void ath_mode_init(struct ath_softc *);
147static void ath_setslottime(struct ath_softc *);
148static void ath_updateslot(struct ifnet *);
149static int ath_beaconq_setup(struct ath_hal *);
150static int ath_beacon_alloc(struct ath_softc *, struct ieee80211_node *);
151static void ath_beacon_update(struct ieee80211vap *, int item);
152static void ath_beacon_setup(struct ath_softc *, struct ath_buf *);
153static void ath_beacon_proc(void *, int);
154static struct ath_buf *ath_beacon_generate(struct ath_softc *,
155 struct ieee80211vap *);
156static void ath_bstuck_proc(void *, int);
157static void ath_beacon_return(struct ath_softc *, struct ath_buf *);
158static void ath_beacon_free(struct ath_softc *);
159static void ath_beacon_config(struct ath_softc *, struct ieee80211vap *);
160static void ath_descdma_cleanup(struct ath_softc *sc,
161 struct ath_descdma *, ath_bufhead *);
162static int ath_desc_alloc(struct ath_softc *);
163static void ath_desc_free(struct ath_softc *);
164static struct ieee80211_node *ath_node_alloc(struct ieee80211vap *,
165 const uint8_t [IEEE80211_ADDR_LEN]);
166static void ath_node_free(struct ieee80211_node *);
167static void ath_node_getsignal(const struct ieee80211_node *,
168 int8_t *, int8_t *);
169static int ath_rxbuf_init(struct ath_softc *, struct ath_buf *);
170static void ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m,
171 int subtype, int rssi, int noise, u_int32_t rstamp);
172static void ath_setdefantenna(struct ath_softc *, u_int);
173static void ath_rx_proc(void *, int);
174static void ath_txq_init(struct ath_softc *sc, struct ath_txq *, int);
175static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype);
176static int ath_tx_setup(struct ath_softc *, int, int);
177static int ath_wme_update(struct ieee80211com *);
178static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *);
179static void ath_tx_cleanup(struct ath_softc *);
180static void ath_freetx(struct mbuf *);
181static int ath_tx_start(struct ath_softc *, struct ieee80211_node *,
182 struct ath_buf *, struct mbuf *);
183static void ath_tx_proc_q0(void *, int);
184static void ath_tx_proc_q0123(void *, int);
185static void ath_tx_proc(void *, int);
186static void ath_tx_draintxq(struct ath_softc *, struct ath_txq *);
187static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *);
188static void ath_draintxq(struct ath_softc *);
189static void ath_stoprecv(struct ath_softc *);
190static int ath_startrecv(struct ath_softc *);
191static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *);
192static void ath_scan_start(struct ieee80211com *);
193static void ath_scan_end(struct ieee80211com *);
194static void ath_set_channel(struct ieee80211com *);
195static void ath_calibrate(void *);
196static int ath_newstate(struct ieee80211vap *, enum ieee80211_state, int);
197static void ath_setup_stationkey(struct ieee80211_node *);
198static void ath_newassoc(struct ieee80211_node *, int);
199static int ath_setregdomain(struct ieee80211com *,
200 struct ieee80211_regdomain *, int,
201 struct ieee80211_channel []);
202static void ath_getradiocaps(struct ieee80211com *, int, int *,
203 struct ieee80211_channel []);
204static int ath_getchannels(struct ath_softc *);
205static void ath_led_event(struct ath_softc *, int);
206
207static int ath_rate_setup(struct ath_softc *, u_int mode);
208static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode);
209
210static void ath_sysctlattach(struct ath_softc *);
211static int ath_raw_xmit(struct ieee80211_node *,
212 struct mbuf *, const struct ieee80211_bpf_params *);
213static void ath_bpfattach(struct ath_softc *);
214static void ath_announce(struct ath_softc *);
215
75#include <net80211/ieee80211_tdma.h>
76#endif
77
78#include <net/bpf.h>
79
80#ifdef INET
81#include <netinet/in.h>
82#include <netinet/if_ether.h>
83#endif
84
85#include <dev/ath/if_athvar.h>
86#include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */
87
88#ifdef ATH_TX99_DIAG
89#include <dev/ath/ath_tx99/ath_tx99.h>
90#endif
91
92/*
93 * ATH_BCBUF determines the number of vap's that can transmit
94 * beacons and also (currently) the number of vap's that can
95 * have unique mac addresses/bssid. When staggering beacons
96 * 4 is probably a good max as otherwise the beacons become
97 * very closely spaced and there is limited time for cab q traffic
98 * to go out. You can burst beacons instead but that is not good
99 * for stations in power save and at some point you really want
100 * another radio (and channel).
101 *
102 * The limit on the number of mac addresses is tied to our use of
103 * the U/L bit and tracking addresses in a byte; it would be
104 * worthwhile to allow more for applications like proxy sta.
105 */
106CTASSERT(ATH_BCBUF <= 8);
107
108/* unaligned little endian access */
109#define LE_READ_2(p) \
110 ((u_int16_t) \
111 ((((u_int8_t *)(p))[0] ) | (((u_int8_t *)(p))[1] << 8)))
112#define LE_READ_4(p) \
113 ((u_int32_t) \
114 ((((u_int8_t *)(p))[0] ) | (((u_int8_t *)(p))[1] << 8) | \
115 (((u_int8_t *)(p))[2] << 16) | (((u_int8_t *)(p))[3] << 24)))
116
117static struct ieee80211vap *ath_vap_create(struct ieee80211com *,
118 const char name[IFNAMSIZ], int unit, int opmode,
119 int flags, const uint8_t bssid[IEEE80211_ADDR_LEN],
120 const uint8_t mac[IEEE80211_ADDR_LEN]);
121static void ath_vap_delete(struct ieee80211vap *);
122static void ath_init(void *);
123static void ath_stop_locked(struct ifnet *);
124static void ath_stop(struct ifnet *);
125static void ath_start(struct ifnet *);
126static int ath_reset(struct ifnet *);
127static int ath_reset_vap(struct ieee80211vap *, u_long);
128static int ath_media_change(struct ifnet *);
129static void ath_watchdog(void *);
130static int ath_ioctl(struct ifnet *, u_long, caddr_t);
131static void ath_fatal_proc(void *, int);
132static void ath_bmiss_vap(struct ieee80211vap *);
133static void ath_bmiss_proc(void *, int);
134static int ath_keyset(struct ath_softc *, const struct ieee80211_key *,
135 struct ieee80211_node *);
136static int ath_key_alloc(struct ieee80211vap *,
137 struct ieee80211_key *,
138 ieee80211_keyix *, ieee80211_keyix *);
139static int ath_key_delete(struct ieee80211vap *,
140 const struct ieee80211_key *);
141static int ath_key_set(struct ieee80211vap *, const struct ieee80211_key *,
142 const u_int8_t mac[IEEE80211_ADDR_LEN]);
143static void ath_key_update_begin(struct ieee80211vap *);
144static void ath_key_update_end(struct ieee80211vap *);
145static void ath_update_mcast(struct ifnet *);
146static void ath_update_promisc(struct ifnet *);
147static void ath_mode_init(struct ath_softc *);
148static void ath_setslottime(struct ath_softc *);
149static void ath_updateslot(struct ifnet *);
150static int ath_beaconq_setup(struct ath_hal *);
151static int ath_beacon_alloc(struct ath_softc *, struct ieee80211_node *);
152static void ath_beacon_update(struct ieee80211vap *, int item);
153static void ath_beacon_setup(struct ath_softc *, struct ath_buf *);
154static void ath_beacon_proc(void *, int);
155static struct ath_buf *ath_beacon_generate(struct ath_softc *,
156 struct ieee80211vap *);
157static void ath_bstuck_proc(void *, int);
158static void ath_beacon_return(struct ath_softc *, struct ath_buf *);
159static void ath_beacon_free(struct ath_softc *);
160static void ath_beacon_config(struct ath_softc *, struct ieee80211vap *);
161static void ath_descdma_cleanup(struct ath_softc *sc,
162 struct ath_descdma *, ath_bufhead *);
163static int ath_desc_alloc(struct ath_softc *);
164static void ath_desc_free(struct ath_softc *);
165static struct ieee80211_node *ath_node_alloc(struct ieee80211vap *,
166 const uint8_t [IEEE80211_ADDR_LEN]);
167static void ath_node_free(struct ieee80211_node *);
168static void ath_node_getsignal(const struct ieee80211_node *,
169 int8_t *, int8_t *);
170static int ath_rxbuf_init(struct ath_softc *, struct ath_buf *);
171static void ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m,
172 int subtype, int rssi, int noise, u_int32_t rstamp);
173static void ath_setdefantenna(struct ath_softc *, u_int);
174static void ath_rx_proc(void *, int);
175static void ath_txq_init(struct ath_softc *sc, struct ath_txq *, int);
176static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype);
177static int ath_tx_setup(struct ath_softc *, int, int);
178static int ath_wme_update(struct ieee80211com *);
179static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *);
180static void ath_tx_cleanup(struct ath_softc *);
181static void ath_freetx(struct mbuf *);
182static int ath_tx_start(struct ath_softc *, struct ieee80211_node *,
183 struct ath_buf *, struct mbuf *);
184static void ath_tx_proc_q0(void *, int);
185static void ath_tx_proc_q0123(void *, int);
186static void ath_tx_proc(void *, int);
187static void ath_tx_draintxq(struct ath_softc *, struct ath_txq *);
188static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *);
189static void ath_draintxq(struct ath_softc *);
190static void ath_stoprecv(struct ath_softc *);
191static int ath_startrecv(struct ath_softc *);
192static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *);
193static void ath_scan_start(struct ieee80211com *);
194static void ath_scan_end(struct ieee80211com *);
195static void ath_set_channel(struct ieee80211com *);
196static void ath_calibrate(void *);
197static int ath_newstate(struct ieee80211vap *, enum ieee80211_state, int);
198static void ath_setup_stationkey(struct ieee80211_node *);
199static void ath_newassoc(struct ieee80211_node *, int);
200static int ath_setregdomain(struct ieee80211com *,
201 struct ieee80211_regdomain *, int,
202 struct ieee80211_channel []);
203static void ath_getradiocaps(struct ieee80211com *, int, int *,
204 struct ieee80211_channel []);
205static int ath_getchannels(struct ath_softc *);
206static void ath_led_event(struct ath_softc *, int);
207
208static int ath_rate_setup(struct ath_softc *, u_int mode);
209static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode);
210
211static void ath_sysctlattach(struct ath_softc *);
212static int ath_raw_xmit(struct ieee80211_node *,
213 struct mbuf *, const struct ieee80211_bpf_params *);
214static void ath_bpfattach(struct ath_softc *);
215static void ath_announce(struct ath_softc *);
216
216#ifdef ATH_SUPPORT_TDMA
217#ifdef IEEE80211_SUPPORT_TDMA
217static void ath_tdma_settimers(struct ath_softc *sc, u_int32_t nexttbtt,
218 u_int32_t bintval);
219static void ath_tdma_bintvalsetup(struct ath_softc *sc,
220 const struct ieee80211_tdma_state *tdma);
221static void ath_tdma_config(struct ath_softc *sc, struct ieee80211vap *vap);
222static void ath_tdma_update(struct ieee80211_node *ni,
223 const struct ieee80211_tdma_param *tdma, int);
224static void ath_tdma_beacon_send(struct ath_softc *sc,
225 struct ieee80211vap *vap);
226
227static __inline void
228ath_hal_setcca(struct ath_hal *ah, int ena)
229{
230 /*
231 * NB: fill me in; this is not provided by default because disabling
232 * CCA in most locales violates regulatory.
233 */
234}
235
236static __inline int
237ath_hal_getcca(struct ath_hal *ah)
238{
239 u_int32_t diag;
240 if (ath_hal_getcapability(ah, HAL_CAP_DIAG, 0, &diag) != HAL_OK)
241 return 1;
242 return ((diag & 0x500000) == 0);
243}
244
245#define TDMA_EP_MULTIPLIER (1<<10) /* pow2 to optimize out * and / */
246#define TDMA_LPF_LEN 6
247#define TDMA_DUMMY_MARKER 0x127
248#define TDMA_EP_MUL(x, mul) ((x) * (mul))
249#define TDMA_IN(x) (TDMA_EP_MUL((x), TDMA_EP_MULTIPLIER))
250#define TDMA_LPF(x, y, len) \
251 ((x != TDMA_DUMMY_MARKER) ? (((x) * ((len)-1) + (y)) / (len)) : (y))
252#define TDMA_SAMPLE(x, y) do { \
253 x = TDMA_LPF((x), TDMA_IN(y), TDMA_LPF_LEN); \
254} while (0)
255#define TDMA_EP_RND(x,mul) \
256 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul))
257#define TDMA_AVG(x) TDMA_EP_RND(x, TDMA_EP_MULTIPLIER)
218static void ath_tdma_settimers(struct ath_softc *sc, u_int32_t nexttbtt,
219 u_int32_t bintval);
220static void ath_tdma_bintvalsetup(struct ath_softc *sc,
221 const struct ieee80211_tdma_state *tdma);
222static void ath_tdma_config(struct ath_softc *sc, struct ieee80211vap *vap);
223static void ath_tdma_update(struct ieee80211_node *ni,
224 const struct ieee80211_tdma_param *tdma, int);
225static void ath_tdma_beacon_send(struct ath_softc *sc,
226 struct ieee80211vap *vap);
227
228static __inline void
229ath_hal_setcca(struct ath_hal *ah, int ena)
230{
231 /*
232 * NB: fill me in; this is not provided by default because disabling
233 * CCA in most locales violates regulatory.
234 */
235}
236
237static __inline int
238ath_hal_getcca(struct ath_hal *ah)
239{
240 u_int32_t diag;
241 if (ath_hal_getcapability(ah, HAL_CAP_DIAG, 0, &diag) != HAL_OK)
242 return 1;
243 return ((diag & 0x500000) == 0);
244}
245
246#define TDMA_EP_MULTIPLIER (1<<10) /* pow2 to optimize out * and / */
247#define TDMA_LPF_LEN 6
248#define TDMA_DUMMY_MARKER 0x127
249#define TDMA_EP_MUL(x, mul) ((x) * (mul))
250#define TDMA_IN(x) (TDMA_EP_MUL((x), TDMA_EP_MULTIPLIER))
251#define TDMA_LPF(x, y, len) \
252 ((x != TDMA_DUMMY_MARKER) ? (((x) * ((len)-1) + (y)) / (len)) : (y))
253#define TDMA_SAMPLE(x, y) do { \
254 x = TDMA_LPF((x), TDMA_IN(y), TDMA_LPF_LEN); \
255} while (0)
256#define TDMA_EP_RND(x,mul) \
257 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul))
258#define TDMA_AVG(x) TDMA_EP_RND(x, TDMA_EP_MULTIPLIER)
258#endif /* ATH_SUPPORT_TDMA */
259#endif /* IEEE80211_SUPPORT_TDMA */
259
260SYSCTL_DECL(_hw_ath);
261
262/* XXX validate sysctl values */
263static int ath_longcalinterval = 30; /* long cals every 30 secs */
264SYSCTL_INT(_hw_ath, OID_AUTO, longcal, CTLFLAG_RW, &ath_longcalinterval,
265 0, "long chip calibration interval (secs)");
266static int ath_shortcalinterval = 100; /* short cals every 100 ms */
267SYSCTL_INT(_hw_ath, OID_AUTO, shortcal, CTLFLAG_RW, &ath_shortcalinterval,
268 0, "short chip calibration interval (msecs)");
269static int ath_resetcalinterval = 20*60; /* reset cal state 20 mins */
270SYSCTL_INT(_hw_ath, OID_AUTO, resetcal, CTLFLAG_RW, &ath_resetcalinterval,
271 0, "reset chip calibration results (secs)");
272
273static int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */
274SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RW, &ath_rxbuf,
275 0, "rx buffers allocated");
276TUNABLE_INT("hw.ath.rxbuf", &ath_rxbuf);
277static int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */
278SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RW, &ath_txbuf,
279 0, "tx buffers allocated");
280TUNABLE_INT("hw.ath.txbuf", &ath_txbuf);
281
282static int ath_bstuck_threshold = 4; /* max missed beacons */
283SYSCTL_INT(_hw_ath, OID_AUTO, bstuck, CTLFLAG_RW, &ath_bstuck_threshold,
284 0, "max missed beacon xmits before chip reset");
285
286#ifdef ATH_DEBUG
287enum {
288 ATH_DEBUG_XMIT = 0x00000001, /* basic xmit operation */
289 ATH_DEBUG_XMIT_DESC = 0x00000002, /* xmit descriptors */
290 ATH_DEBUG_RECV = 0x00000004, /* basic recv operation */
291 ATH_DEBUG_RECV_DESC = 0x00000008, /* recv descriptors */
292 ATH_DEBUG_RATE = 0x00000010, /* rate control */
293 ATH_DEBUG_RESET = 0x00000020, /* reset processing */
294 ATH_DEBUG_MODE = 0x00000040, /* mode init/setup */
295 ATH_DEBUG_BEACON = 0x00000080, /* beacon handling */
296 ATH_DEBUG_WATCHDOG = 0x00000100, /* watchdog timeout */
297 ATH_DEBUG_INTR = 0x00001000, /* ISR */
298 ATH_DEBUG_TX_PROC = 0x00002000, /* tx ISR proc */
299 ATH_DEBUG_RX_PROC = 0x00004000, /* rx ISR proc */
300 ATH_DEBUG_BEACON_PROC = 0x00008000, /* beacon ISR proc */
301 ATH_DEBUG_CALIBRATE = 0x00010000, /* periodic calibration */
302 ATH_DEBUG_KEYCACHE = 0x00020000, /* key cache management */
303 ATH_DEBUG_STATE = 0x00040000, /* 802.11 state transitions */
304 ATH_DEBUG_NODE = 0x00080000, /* node management */
305 ATH_DEBUG_LED = 0x00100000, /* led management */
306 ATH_DEBUG_FF = 0x00200000, /* fast frames */
307 ATH_DEBUG_DFS = 0x00400000, /* DFS processing */
308 ATH_DEBUG_TDMA = 0x00800000, /* TDMA processing */
309 ATH_DEBUG_TDMA_TIMER = 0x01000000, /* TDMA timer processing */
310 ATH_DEBUG_REGDOMAIN = 0x02000000, /* regulatory processing */
311 ATH_DEBUG_FATAL = 0x80000000, /* fatal errors */
312 ATH_DEBUG_ANY = 0xffffffff
313};
314static int ath_debug = 0;
315SYSCTL_INT(_hw_ath, OID_AUTO, debug, CTLFLAG_RW, &ath_debug,
316 0, "control debugging printfs");
317TUNABLE_INT("hw.ath.debug", &ath_debug);
318
319#define IFF_DUMPPKTS(sc, m) \
320 ((sc->sc_debug & (m)) || \
321 (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
322#define DPRINTF(sc, m, fmt, ...) do { \
323 if (sc->sc_debug & (m)) \
324 printf(fmt, __VA_ARGS__); \
325} while (0)
326#define KEYPRINTF(sc, ix, hk, mac) do { \
327 if (sc->sc_debug & ATH_DEBUG_KEYCACHE) \
328 ath_keyprint(sc, __func__, ix, hk, mac); \
329} while (0)
330static void ath_printrxbuf(struct ath_softc *, const struct ath_buf *bf,
331 u_int ix, int);
332static void ath_printtxbuf(struct ath_softc *, const struct ath_buf *bf,
333 u_int qnum, u_int ix, int done);
334#else
335#define IFF_DUMPPKTS(sc, m) \
336 ((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
337#define DPRINTF(sc, m, fmt, ...) do { \
338 (void) sc; \
339} while (0)
340#define KEYPRINTF(sc, k, ix, mac) do { \
341 (void) sc; \
342} while (0)
343#endif
344
345MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers");
346
347int
348ath_attach(u_int16_t devid, struct ath_softc *sc)
349{
350 struct ifnet *ifp;
351 struct ieee80211com *ic;
352 struct ath_hal *ah = NULL;
353 HAL_STATUS status;
354 int error = 0, i;
355 u_int wmodes;
356 uint8_t macaddr[IEEE80211_ADDR_LEN];
357
358 DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
359
360 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
361 if (ifp == NULL) {
362 device_printf(sc->sc_dev, "can not if_alloc()\n");
363 error = ENOSPC;
364 goto bad;
365 }
366 ic = ifp->if_l2com;
367
368 /* set these up early for if_printf use */
369 if_initname(ifp, device_get_name(sc->sc_dev),
370 device_get_unit(sc->sc_dev));
371
372 ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, &status);
373 if (ah == NULL) {
374 if_printf(ifp, "unable to attach hardware; HAL status %u\n",
375 status);
376 error = ENXIO;
377 goto bad;
378 }
379 sc->sc_ah = ah;
380 sc->sc_invalid = 0; /* ready to go, enable interrupt handling */
381#ifdef ATH_DEBUG
382 sc->sc_debug = ath_debug;
383#endif
384
385 /*
386 * Check if the MAC has multi-rate retry support.
387 * We do this by trying to setup a fake extended
388 * descriptor. MAC's that don't have support will
389 * return false w/o doing anything. MAC's that do
390 * support it will return true w/o doing anything.
391 */
392 sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0);
393
394 /*
395 * Check if the device has hardware counters for PHY
396 * errors. If so we need to enable the MIB interrupt
397 * so we can act on stat triggers.
398 */
399 if (ath_hal_hwphycounters(ah))
400 sc->sc_needmib = 1;
401
402 /*
403 * Get the hardware key cache size.
404 */
405 sc->sc_keymax = ath_hal_keycachesize(ah);
406 if (sc->sc_keymax > ATH_KEYMAX) {
407 if_printf(ifp, "Warning, using only %u of %u key cache slots\n",
408 ATH_KEYMAX, sc->sc_keymax);
409 sc->sc_keymax = ATH_KEYMAX;
410 }
411 /*
412 * Reset the key cache since some parts do not
413 * reset the contents on initial power up.
414 */
415 for (i = 0; i < sc->sc_keymax; i++)
416 ath_hal_keyreset(ah, i);
417
418 /*
419 * Collect the default channel list.
420 */
421 error = ath_getchannels(sc);
422 if (error != 0)
423 goto bad;
424
425 /*
426 * Setup rate tables for all potential media types.
427 */
428 ath_rate_setup(sc, IEEE80211_MODE_11A);
429 ath_rate_setup(sc, IEEE80211_MODE_11B);
430 ath_rate_setup(sc, IEEE80211_MODE_11G);
431 ath_rate_setup(sc, IEEE80211_MODE_TURBO_A);
432 ath_rate_setup(sc, IEEE80211_MODE_TURBO_G);
433 ath_rate_setup(sc, IEEE80211_MODE_STURBO_A);
434 ath_rate_setup(sc, IEEE80211_MODE_11NA);
435 ath_rate_setup(sc, IEEE80211_MODE_11NG);
436 ath_rate_setup(sc, IEEE80211_MODE_HALF);
437 ath_rate_setup(sc, IEEE80211_MODE_QUARTER);
438
439 /* NB: setup here so ath_rate_update is happy */
440 ath_setcurmode(sc, IEEE80211_MODE_11A);
441
442 /*
443 * Allocate tx+rx descriptors and populate the lists.
444 */
445 error = ath_desc_alloc(sc);
446 if (error != 0) {
447 if_printf(ifp, "failed to allocate descriptors: %d\n", error);
448 goto bad;
449 }
450 callout_init_mtx(&sc->sc_cal_ch, &sc->sc_mtx, 0);
451 callout_init_mtx(&sc->sc_wd_ch, &sc->sc_mtx, 0);
452
453 ATH_TXBUF_LOCK_INIT(sc);
454
455 sc->sc_tq = taskqueue_create("ath_taskq", M_NOWAIT,
456 taskqueue_thread_enqueue, &sc->sc_tq);
457 taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
458 "%s taskq", ifp->if_xname);
459
460 TASK_INIT(&sc->sc_rxtask, 0, ath_rx_proc, sc);
461 TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc);
462 TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc);
463
464 /*
465 * Allocate hardware transmit queues: one queue for
466 * beacon frames and one data queue for each QoS
467 * priority. Note that the hal handles reseting
468 * these queues at the needed time.
469 *
470 * XXX PS-Poll
471 */
472 sc->sc_bhalq = ath_beaconq_setup(ah);
473 if (sc->sc_bhalq == (u_int) -1) {
474 if_printf(ifp, "unable to setup a beacon xmit queue!\n");
475 error = EIO;
476 goto bad2;
477 }
478 sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0);
479 if (sc->sc_cabq == NULL) {
480 if_printf(ifp, "unable to setup CAB xmit queue!\n");
481 error = EIO;
482 goto bad2;
483 }
484 /* NB: insure BK queue is the lowest priority h/w queue */
485 if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) {
486 if_printf(ifp, "unable to setup xmit queue for %s traffic!\n",
487 ieee80211_wme_acnames[WME_AC_BK]);
488 error = EIO;
489 goto bad2;
490 }
491 if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) ||
492 !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) ||
493 !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) {
494 /*
495 * Not enough hardware tx queues to properly do WME;
496 * just punt and assign them all to the same h/w queue.
497 * We could do a better job of this if, for example,
498 * we allocate queues when we switch from station to
499 * AP mode.
500 */
501 if (sc->sc_ac2q[WME_AC_VI] != NULL)
502 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
503 if (sc->sc_ac2q[WME_AC_BE] != NULL)
504 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
505 sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
506 sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
507 sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
508 }
509
510 /*
511 * Special case certain configurations. Note the
512 * CAB queue is handled by these specially so don't
513 * include them when checking the txq setup mask.
514 */
515 switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) {
516 case 0x01:
517 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc);
518 break;
519 case 0x0f:
520 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc);
521 break;
522 default:
523 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc);
524 break;
525 }
526
527 /*
528 * Setup rate control. Some rate control modules
529 * call back to change the anntena state so expose
530 * the necessary entry points.
531 * XXX maybe belongs in struct ath_ratectrl?
532 */
533 sc->sc_setdefantenna = ath_setdefantenna;
534 sc->sc_rc = ath_rate_attach(sc);
535 if (sc->sc_rc == NULL) {
536 error = EIO;
537 goto bad2;
538 }
539
540 sc->sc_blinking = 0;
541 sc->sc_ledstate = 1;
542 sc->sc_ledon = 0; /* low true */
543 sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */
544 callout_init(&sc->sc_ledtimer, CALLOUT_MPSAFE);
545 /*
546 * Auto-enable soft led processing for IBM cards and for
547 * 5211 minipci cards. Users can also manually enable/disable
548 * support with a sysctl.
549 */
550 sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID);
551 if (sc->sc_softled) {
552 ath_hal_gpioCfgOutput(ah, sc->sc_ledpin,
553 HAL_GPIO_MUX_MAC_NETWORK_LED);
554 ath_hal_gpioset(ah, sc->sc_ledpin, !sc->sc_ledon);
555 }
556
557 ifp->if_softc = sc;
558 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
559 ifp->if_start = ath_start;
560 ifp->if_watchdog = NULL;
561 ifp->if_ioctl = ath_ioctl;
562 ifp->if_init = ath_init;
563 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
564 ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
565 IFQ_SET_READY(&ifp->if_snd);
566
567 ic->ic_ifp = ifp;
568 /* XXX not right but it's not used anywhere important */
569 ic->ic_phytype = IEEE80211_T_OFDM;
570 ic->ic_opmode = IEEE80211_M_STA;
571 ic->ic_caps =
572 IEEE80211_C_STA /* station mode */
573 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */
574 | IEEE80211_C_HOSTAP /* hostap mode */
575 | IEEE80211_C_MONITOR /* monitor mode */
576 | IEEE80211_C_AHDEMO /* adhoc demo mode */
577 | IEEE80211_C_WDS /* 4-address traffic works */
578 | IEEE80211_C_SHPREAMBLE /* short preamble supported */
579 | IEEE80211_C_SHSLOT /* short slot time supported */
580 | IEEE80211_C_WPA /* capable of WPA1+WPA2 */
581 | IEEE80211_C_BGSCAN /* capable of bg scanning */
582 | IEEE80211_C_TXFRAG /* handle tx frags */
583 ;
584 /*
585 * Query the hal to figure out h/w crypto support.
586 */
587 if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP))
588 ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP;
589 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB))
590 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_OCB;
591 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM))
592 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM;
593 if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP))
594 ic->ic_cryptocaps |= IEEE80211_CRYPTO_CKIP;
595 if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) {
596 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIP;
597 /*
598 * Check if h/w does the MIC and/or whether the
599 * separate key cache entries are required to
600 * handle both tx+rx MIC keys.
601 */
602 if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC))
603 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC;
604 /*
605 * If the h/w supports storing tx+rx MIC keys
606 * in one cache slot automatically enable use.
607 */
608 if (ath_hal_hastkipsplit(ah) ||
609 !ath_hal_settkipsplit(ah, AH_FALSE))
610 sc->sc_splitmic = 1;
611 /*
612 * If the h/w can do TKIP MIC together with WME then
613 * we use it; otherwise we force the MIC to be done
614 * in software by the net80211 layer.
615 */
616 if (ath_hal_haswmetkipmic(ah))
617 sc->sc_wmetkipmic = 1;
618 }
619 sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR);
620 sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah);
621 /*
622 * Mark key cache slots associated with global keys
623 * as in use. If we knew TKIP was not to be used we
624 * could leave the +32, +64, and +32+64 slots free.
625 */
626 for (i = 0; i < IEEE80211_WEP_NKID; i++) {
627 setbit(sc->sc_keymap, i);
628 setbit(sc->sc_keymap, i+64);
629 if (sc->sc_splitmic) {
630 setbit(sc->sc_keymap, i+32);
631 setbit(sc->sc_keymap, i+32+64);
632 }
633 }
634 /*
635 * TPC support can be done either with a global cap or
636 * per-packet support. The latter is not available on
637 * all parts. We're a bit pedantic here as all parts
638 * support a global cap.
639 */
640 if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah))
641 ic->ic_caps |= IEEE80211_C_TXPMGT;
642
643 /*
644 * Mark WME capability only if we have sufficient
645 * hardware queues to do proper priority scheduling.
646 */
647 if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK])
648 ic->ic_caps |= IEEE80211_C_WME;
649 /*
650 * Check for misc other capabilities.
651 */
652 if (ath_hal_hasbursting(ah))
653 ic->ic_caps |= IEEE80211_C_BURST;
654 sc->sc_hasbmask = ath_hal_hasbssidmask(ah);
655 sc->sc_hastsfadd = ath_hal_hastsfadjust(ah);
656 if (ath_hal_hasfastframes(ah))
657 ic->ic_caps |= IEEE80211_C_FF;
658 wmodes = ath_hal_getwirelessmodes(ah);
659 if (wmodes & (HAL_MODE_108G|HAL_MODE_TURBO))
660 ic->ic_caps |= IEEE80211_C_TURBOP;
260
261SYSCTL_DECL(_hw_ath);
262
263/* XXX validate sysctl values */
264static int ath_longcalinterval = 30; /* long cals every 30 secs */
265SYSCTL_INT(_hw_ath, OID_AUTO, longcal, CTLFLAG_RW, &ath_longcalinterval,
266 0, "long chip calibration interval (secs)");
267static int ath_shortcalinterval = 100; /* short cals every 100 ms */
268SYSCTL_INT(_hw_ath, OID_AUTO, shortcal, CTLFLAG_RW, &ath_shortcalinterval,
269 0, "short chip calibration interval (msecs)");
270static int ath_resetcalinterval = 20*60; /* reset cal state 20 mins */
271SYSCTL_INT(_hw_ath, OID_AUTO, resetcal, CTLFLAG_RW, &ath_resetcalinterval,
272 0, "reset chip calibration results (secs)");
273
274static int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */
275SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RW, &ath_rxbuf,
276 0, "rx buffers allocated");
277TUNABLE_INT("hw.ath.rxbuf", &ath_rxbuf);
278static int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */
279SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RW, &ath_txbuf,
280 0, "tx buffers allocated");
281TUNABLE_INT("hw.ath.txbuf", &ath_txbuf);
282
283static int ath_bstuck_threshold = 4; /* max missed beacons */
284SYSCTL_INT(_hw_ath, OID_AUTO, bstuck, CTLFLAG_RW, &ath_bstuck_threshold,
285 0, "max missed beacon xmits before chip reset");
286
287#ifdef ATH_DEBUG
288enum {
289 ATH_DEBUG_XMIT = 0x00000001, /* basic xmit operation */
290 ATH_DEBUG_XMIT_DESC = 0x00000002, /* xmit descriptors */
291 ATH_DEBUG_RECV = 0x00000004, /* basic recv operation */
292 ATH_DEBUG_RECV_DESC = 0x00000008, /* recv descriptors */
293 ATH_DEBUG_RATE = 0x00000010, /* rate control */
294 ATH_DEBUG_RESET = 0x00000020, /* reset processing */
295 ATH_DEBUG_MODE = 0x00000040, /* mode init/setup */
296 ATH_DEBUG_BEACON = 0x00000080, /* beacon handling */
297 ATH_DEBUG_WATCHDOG = 0x00000100, /* watchdog timeout */
298 ATH_DEBUG_INTR = 0x00001000, /* ISR */
299 ATH_DEBUG_TX_PROC = 0x00002000, /* tx ISR proc */
300 ATH_DEBUG_RX_PROC = 0x00004000, /* rx ISR proc */
301 ATH_DEBUG_BEACON_PROC = 0x00008000, /* beacon ISR proc */
302 ATH_DEBUG_CALIBRATE = 0x00010000, /* periodic calibration */
303 ATH_DEBUG_KEYCACHE = 0x00020000, /* key cache management */
304 ATH_DEBUG_STATE = 0x00040000, /* 802.11 state transitions */
305 ATH_DEBUG_NODE = 0x00080000, /* node management */
306 ATH_DEBUG_LED = 0x00100000, /* led management */
307 ATH_DEBUG_FF = 0x00200000, /* fast frames */
308 ATH_DEBUG_DFS = 0x00400000, /* DFS processing */
309 ATH_DEBUG_TDMA = 0x00800000, /* TDMA processing */
310 ATH_DEBUG_TDMA_TIMER = 0x01000000, /* TDMA timer processing */
311 ATH_DEBUG_REGDOMAIN = 0x02000000, /* regulatory processing */
312 ATH_DEBUG_FATAL = 0x80000000, /* fatal errors */
313 ATH_DEBUG_ANY = 0xffffffff
314};
315static int ath_debug = 0;
316SYSCTL_INT(_hw_ath, OID_AUTO, debug, CTLFLAG_RW, &ath_debug,
317 0, "control debugging printfs");
318TUNABLE_INT("hw.ath.debug", &ath_debug);
319
320#define IFF_DUMPPKTS(sc, m) \
321 ((sc->sc_debug & (m)) || \
322 (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
323#define DPRINTF(sc, m, fmt, ...) do { \
324 if (sc->sc_debug & (m)) \
325 printf(fmt, __VA_ARGS__); \
326} while (0)
327#define KEYPRINTF(sc, ix, hk, mac) do { \
328 if (sc->sc_debug & ATH_DEBUG_KEYCACHE) \
329 ath_keyprint(sc, __func__, ix, hk, mac); \
330} while (0)
331static void ath_printrxbuf(struct ath_softc *, const struct ath_buf *bf,
332 u_int ix, int);
333static void ath_printtxbuf(struct ath_softc *, const struct ath_buf *bf,
334 u_int qnum, u_int ix, int done);
335#else
336#define IFF_DUMPPKTS(sc, m) \
337 ((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
338#define DPRINTF(sc, m, fmt, ...) do { \
339 (void) sc; \
340} while (0)
341#define KEYPRINTF(sc, k, ix, mac) do { \
342 (void) sc; \
343} while (0)
344#endif
345
346MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers");
347
348int
349ath_attach(u_int16_t devid, struct ath_softc *sc)
350{
351 struct ifnet *ifp;
352 struct ieee80211com *ic;
353 struct ath_hal *ah = NULL;
354 HAL_STATUS status;
355 int error = 0, i;
356 u_int wmodes;
357 uint8_t macaddr[IEEE80211_ADDR_LEN];
358
359 DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
360
361 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
362 if (ifp == NULL) {
363 device_printf(sc->sc_dev, "can not if_alloc()\n");
364 error = ENOSPC;
365 goto bad;
366 }
367 ic = ifp->if_l2com;
368
369 /* set these up early for if_printf use */
370 if_initname(ifp, device_get_name(sc->sc_dev),
371 device_get_unit(sc->sc_dev));
372
373 ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, &status);
374 if (ah == NULL) {
375 if_printf(ifp, "unable to attach hardware; HAL status %u\n",
376 status);
377 error = ENXIO;
378 goto bad;
379 }
380 sc->sc_ah = ah;
381 sc->sc_invalid = 0; /* ready to go, enable interrupt handling */
382#ifdef ATH_DEBUG
383 sc->sc_debug = ath_debug;
384#endif
385
386 /*
387 * Check if the MAC has multi-rate retry support.
388 * We do this by trying to setup a fake extended
389 * descriptor. MAC's that don't have support will
390 * return false w/o doing anything. MAC's that do
391 * support it will return true w/o doing anything.
392 */
393 sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0);
394
395 /*
396 * Check if the device has hardware counters for PHY
397 * errors. If so we need to enable the MIB interrupt
398 * so we can act on stat triggers.
399 */
400 if (ath_hal_hwphycounters(ah))
401 sc->sc_needmib = 1;
402
403 /*
404 * Get the hardware key cache size.
405 */
406 sc->sc_keymax = ath_hal_keycachesize(ah);
407 if (sc->sc_keymax > ATH_KEYMAX) {
408 if_printf(ifp, "Warning, using only %u of %u key cache slots\n",
409 ATH_KEYMAX, sc->sc_keymax);
410 sc->sc_keymax = ATH_KEYMAX;
411 }
412 /*
413 * Reset the key cache since some parts do not
414 * reset the contents on initial power up.
415 */
416 for (i = 0; i < sc->sc_keymax; i++)
417 ath_hal_keyreset(ah, i);
418
419 /*
420 * Collect the default channel list.
421 */
422 error = ath_getchannels(sc);
423 if (error != 0)
424 goto bad;
425
426 /*
427 * Setup rate tables for all potential media types.
428 */
429 ath_rate_setup(sc, IEEE80211_MODE_11A);
430 ath_rate_setup(sc, IEEE80211_MODE_11B);
431 ath_rate_setup(sc, IEEE80211_MODE_11G);
432 ath_rate_setup(sc, IEEE80211_MODE_TURBO_A);
433 ath_rate_setup(sc, IEEE80211_MODE_TURBO_G);
434 ath_rate_setup(sc, IEEE80211_MODE_STURBO_A);
435 ath_rate_setup(sc, IEEE80211_MODE_11NA);
436 ath_rate_setup(sc, IEEE80211_MODE_11NG);
437 ath_rate_setup(sc, IEEE80211_MODE_HALF);
438 ath_rate_setup(sc, IEEE80211_MODE_QUARTER);
439
440 /* NB: setup here so ath_rate_update is happy */
441 ath_setcurmode(sc, IEEE80211_MODE_11A);
442
443 /*
444 * Allocate tx+rx descriptors and populate the lists.
445 */
446 error = ath_desc_alloc(sc);
447 if (error != 0) {
448 if_printf(ifp, "failed to allocate descriptors: %d\n", error);
449 goto bad;
450 }
451 callout_init_mtx(&sc->sc_cal_ch, &sc->sc_mtx, 0);
452 callout_init_mtx(&sc->sc_wd_ch, &sc->sc_mtx, 0);
453
454 ATH_TXBUF_LOCK_INIT(sc);
455
456 sc->sc_tq = taskqueue_create("ath_taskq", M_NOWAIT,
457 taskqueue_thread_enqueue, &sc->sc_tq);
458 taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
459 "%s taskq", ifp->if_xname);
460
461 TASK_INIT(&sc->sc_rxtask, 0, ath_rx_proc, sc);
462 TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc);
463 TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc);
464
465 /*
466 * Allocate hardware transmit queues: one queue for
467 * beacon frames and one data queue for each QoS
468 * priority. Note that the hal handles reseting
469 * these queues at the needed time.
470 *
471 * XXX PS-Poll
472 */
473 sc->sc_bhalq = ath_beaconq_setup(ah);
474 if (sc->sc_bhalq == (u_int) -1) {
475 if_printf(ifp, "unable to setup a beacon xmit queue!\n");
476 error = EIO;
477 goto bad2;
478 }
479 sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0);
480 if (sc->sc_cabq == NULL) {
481 if_printf(ifp, "unable to setup CAB xmit queue!\n");
482 error = EIO;
483 goto bad2;
484 }
485 /* NB: insure BK queue is the lowest priority h/w queue */
486 if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) {
487 if_printf(ifp, "unable to setup xmit queue for %s traffic!\n",
488 ieee80211_wme_acnames[WME_AC_BK]);
489 error = EIO;
490 goto bad2;
491 }
492 if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) ||
493 !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) ||
494 !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) {
495 /*
496 * Not enough hardware tx queues to properly do WME;
497 * just punt and assign them all to the same h/w queue.
498 * We could do a better job of this if, for example,
499 * we allocate queues when we switch from station to
500 * AP mode.
501 */
502 if (sc->sc_ac2q[WME_AC_VI] != NULL)
503 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
504 if (sc->sc_ac2q[WME_AC_BE] != NULL)
505 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
506 sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
507 sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
508 sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
509 }
510
511 /*
512 * Special case certain configurations. Note the
513 * CAB queue is handled by these specially so don't
514 * include them when checking the txq setup mask.
515 */
516 switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) {
517 case 0x01:
518 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc);
519 break;
520 case 0x0f:
521 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc);
522 break;
523 default:
524 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc);
525 break;
526 }
527
528 /*
529 * Setup rate control. Some rate control modules
530 * call back to change the anntena state so expose
531 * the necessary entry points.
532 * XXX maybe belongs in struct ath_ratectrl?
533 */
534 sc->sc_setdefantenna = ath_setdefantenna;
535 sc->sc_rc = ath_rate_attach(sc);
536 if (sc->sc_rc == NULL) {
537 error = EIO;
538 goto bad2;
539 }
540
541 sc->sc_blinking = 0;
542 sc->sc_ledstate = 1;
543 sc->sc_ledon = 0; /* low true */
544 sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */
545 callout_init(&sc->sc_ledtimer, CALLOUT_MPSAFE);
546 /*
547 * Auto-enable soft led processing for IBM cards and for
548 * 5211 minipci cards. Users can also manually enable/disable
549 * support with a sysctl.
550 */
551 sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID);
552 if (sc->sc_softled) {
553 ath_hal_gpioCfgOutput(ah, sc->sc_ledpin,
554 HAL_GPIO_MUX_MAC_NETWORK_LED);
555 ath_hal_gpioset(ah, sc->sc_ledpin, !sc->sc_ledon);
556 }
557
558 ifp->if_softc = sc;
559 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
560 ifp->if_start = ath_start;
561 ifp->if_watchdog = NULL;
562 ifp->if_ioctl = ath_ioctl;
563 ifp->if_init = ath_init;
564 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
565 ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
566 IFQ_SET_READY(&ifp->if_snd);
567
568 ic->ic_ifp = ifp;
569 /* XXX not right but it's not used anywhere important */
570 ic->ic_phytype = IEEE80211_T_OFDM;
571 ic->ic_opmode = IEEE80211_M_STA;
572 ic->ic_caps =
573 IEEE80211_C_STA /* station mode */
574 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */
575 | IEEE80211_C_HOSTAP /* hostap mode */
576 | IEEE80211_C_MONITOR /* monitor mode */
577 | IEEE80211_C_AHDEMO /* adhoc demo mode */
578 | IEEE80211_C_WDS /* 4-address traffic works */
579 | IEEE80211_C_SHPREAMBLE /* short preamble supported */
580 | IEEE80211_C_SHSLOT /* short slot time supported */
581 | IEEE80211_C_WPA /* capable of WPA1+WPA2 */
582 | IEEE80211_C_BGSCAN /* capable of bg scanning */
583 | IEEE80211_C_TXFRAG /* handle tx frags */
584 ;
585 /*
586 * Query the hal to figure out h/w crypto support.
587 */
588 if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP))
589 ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP;
590 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB))
591 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_OCB;
592 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM))
593 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM;
594 if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP))
595 ic->ic_cryptocaps |= IEEE80211_CRYPTO_CKIP;
596 if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) {
597 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIP;
598 /*
599 * Check if h/w does the MIC and/or whether the
600 * separate key cache entries are required to
601 * handle both tx+rx MIC keys.
602 */
603 if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC))
604 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC;
605 /*
606 * If the h/w supports storing tx+rx MIC keys
607 * in one cache slot automatically enable use.
608 */
609 if (ath_hal_hastkipsplit(ah) ||
610 !ath_hal_settkipsplit(ah, AH_FALSE))
611 sc->sc_splitmic = 1;
612 /*
613 * If the h/w can do TKIP MIC together with WME then
614 * we use it; otherwise we force the MIC to be done
615 * in software by the net80211 layer.
616 */
617 if (ath_hal_haswmetkipmic(ah))
618 sc->sc_wmetkipmic = 1;
619 }
620 sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR);
621 sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah);
622 /*
623 * Mark key cache slots associated with global keys
624 * as in use. If we knew TKIP was not to be used we
625 * could leave the +32, +64, and +32+64 slots free.
626 */
627 for (i = 0; i < IEEE80211_WEP_NKID; i++) {
628 setbit(sc->sc_keymap, i);
629 setbit(sc->sc_keymap, i+64);
630 if (sc->sc_splitmic) {
631 setbit(sc->sc_keymap, i+32);
632 setbit(sc->sc_keymap, i+32+64);
633 }
634 }
635 /*
636 * TPC support can be done either with a global cap or
637 * per-packet support. The latter is not available on
638 * all parts. We're a bit pedantic here as all parts
639 * support a global cap.
640 */
641 if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah))
642 ic->ic_caps |= IEEE80211_C_TXPMGT;
643
644 /*
645 * Mark WME capability only if we have sufficient
646 * hardware queues to do proper priority scheduling.
647 */
648 if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK])
649 ic->ic_caps |= IEEE80211_C_WME;
650 /*
651 * Check for misc other capabilities.
652 */
653 if (ath_hal_hasbursting(ah))
654 ic->ic_caps |= IEEE80211_C_BURST;
655 sc->sc_hasbmask = ath_hal_hasbssidmask(ah);
656 sc->sc_hastsfadd = ath_hal_hastsfadjust(ah);
657 if (ath_hal_hasfastframes(ah))
658 ic->ic_caps |= IEEE80211_C_FF;
659 wmodes = ath_hal_getwirelessmodes(ah);
660 if (wmodes & (HAL_MODE_108G|HAL_MODE_TURBO))
661 ic->ic_caps |= IEEE80211_C_TURBOP;
661#ifdef ATH_SUPPORT_TDMA
662#ifdef IEEE80211_SUPPORT_TDMA
662 if (ath_hal_macversion(ah) > 0x78) {
663 ic->ic_caps |= IEEE80211_C_TDMA; /* capable of TDMA */
664 ic->ic_tdma_update = ath_tdma_update;
665 }
666#endif
667 /*
668 * Indicate we need the 802.11 header padded to a
669 * 32-bit boundary for 4-address and QoS frames.
670 */
671 ic->ic_flags |= IEEE80211_F_DATAPAD;
672
673 /*
674 * Query the hal about antenna support.
675 */
676 sc->sc_defant = ath_hal_getdefantenna(ah);
677
678 /*
679 * Not all chips have the VEOL support we want to
680 * use with IBSS beacons; check here for it.
681 */
682 sc->sc_hasveol = ath_hal_hasveol(ah);
683
684 /* get mac address from hardware */
685 ath_hal_getmac(ah, macaddr);
686 if (sc->sc_hasbmask)
687 ath_hal_getbssidmask(ah, sc->sc_hwbssidmask);
688
689 /* NB: used to size node table key mapping array */
690 ic->ic_max_keyix = sc->sc_keymax;
691 /* call MI attach routine. */
692 ieee80211_ifattach(ic, macaddr);
693 ic->ic_setregdomain = ath_setregdomain;
694 ic->ic_getradiocaps = ath_getradiocaps;
695 sc->sc_opmode = HAL_M_STA;
696
697 /* override default methods */
698 ic->ic_newassoc = ath_newassoc;
699 ic->ic_updateslot = ath_updateslot;
700 ic->ic_wme.wme_update = ath_wme_update;
701 ic->ic_vap_create = ath_vap_create;
702 ic->ic_vap_delete = ath_vap_delete;
703 ic->ic_raw_xmit = ath_raw_xmit;
704 ic->ic_update_mcast = ath_update_mcast;
705 ic->ic_update_promisc = ath_update_promisc;
706 ic->ic_node_alloc = ath_node_alloc;
707 sc->sc_node_free = ic->ic_node_free;
708 ic->ic_node_free = ath_node_free;
709 ic->ic_node_getsignal = ath_node_getsignal;
710 ic->ic_scan_start = ath_scan_start;
711 ic->ic_scan_end = ath_scan_end;
712 ic->ic_set_channel = ath_set_channel;
713
714 ath_bpfattach(sc);
715 /*
716 * Setup dynamic sysctl's now that country code and
717 * regdomain are available from the hal.
718 */
719 ath_sysctlattach(sc);
720
721 if (bootverbose)
722 ieee80211_announce(ic);
723 ath_announce(sc);
724 return 0;
725bad2:
726 ath_tx_cleanup(sc);
727 ath_desc_free(sc);
728bad:
729 if (ah)
730 ath_hal_detach(ah);
731 if (ifp != NULL)
732 if_free(ifp);
733 sc->sc_invalid = 1;
734 return error;
735}
736
737int
738ath_detach(struct ath_softc *sc)
739{
740 struct ifnet *ifp = sc->sc_ifp;
741
742 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
743 __func__, ifp->if_flags);
744
745 /*
746 * NB: the order of these is important:
747 * o stop the chip so no more interrupts will fire
748 * o call the 802.11 layer before detaching the hal to
749 * insure callbacks into the driver to delete global
750 * key cache entries can be handled
751 * o free the taskqueue which drains any pending tasks
752 * o reclaim the bpf tap now that we know nothing will use
753 * it (e.g. rx processing from the task q thread)
754 * o reclaim the tx queue data structures after calling
755 * the 802.11 layer as we'll get called back to reclaim
756 * node state and potentially want to use them
757 * o to cleanup the tx queues the hal is called, so detach
758 * it last
759 * Other than that, it's straightforward...
760 */
761 ath_stop(ifp);
762 ieee80211_ifdetach(ifp->if_l2com);
763 taskqueue_free(sc->sc_tq);
764 bpfdetach(ifp);
765#ifdef ATH_TX99_DIAG
766 if (sc->sc_tx99 != NULL)
767 sc->sc_tx99->detach(sc->sc_tx99);
768#endif
769 ath_rate_detach(sc->sc_rc);
770 ath_desc_free(sc);
771 ath_tx_cleanup(sc);
772 ath_hal_detach(sc->sc_ah); /* NB: sets chip in full sleep */
773 if_free(ifp);
774
775 return 0;
776}
777
778/*
779 * MAC address handling for multiple BSS on the same radio.
780 * The first vap uses the MAC address from the EEPROM. For
781 * subsequent vap's we set the U/L bit (bit 1) in the MAC
782 * address and use the next six bits as an index.
783 */
784static void
785assign_address(struct ath_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
786{
787 int i;
788
789 if (clone && sc->sc_hasbmask) {
790 /* NB: we only do this if h/w supports multiple bssid */
791 for (i = 0; i < 8; i++)
792 if ((sc->sc_bssidmask & (1<<i)) == 0)
793 break;
794 if (i != 0)
795 mac[0] |= (i << 2)|0x2;
796 } else
797 i = 0;
798 sc->sc_bssidmask |= 1<<i;
799 sc->sc_hwbssidmask[0] &= ~mac[0];
800 if (i == 0)
801 sc->sc_nbssid0++;
802}
803
804static void
805reclaim_address(struct ath_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN])
806{
807 int i = mac[0] >> 2;
808 uint8_t mask;
809
810 if (i != 0 || --sc->sc_nbssid0 == 0) {
811 sc->sc_bssidmask &= ~(1<<i);
812 /* recalculate bssid mask from remaining addresses */
813 mask = 0xff;
814 for (i = 1; i < 8; i++)
815 if (sc->sc_bssidmask & (1<<i))
816 mask &= ~((i<<2)|0x2);
817 sc->sc_hwbssidmask[0] |= mask;
818 }
819}
820
821/*
822 * Assign a beacon xmit slot. We try to space out
823 * assignments so when beacons are staggered the
824 * traffic coming out of the cab q has maximal time
825 * to go out before the next beacon is scheduled.
826 */
827static int
828assign_bslot(struct ath_softc *sc)
829{
830 u_int slot, free;
831
832 free = 0;
833 for (slot = 0; slot < ATH_BCBUF; slot++)
834 if (sc->sc_bslot[slot] == NULL) {
835 if (sc->sc_bslot[(slot+1)%ATH_BCBUF] == NULL &&
836 sc->sc_bslot[(slot-1)%ATH_BCBUF] == NULL)
837 return slot;
838 free = slot;
839 /* NB: keep looking for a double slot */
840 }
841 return free;
842}
843
844static struct ieee80211vap *
845ath_vap_create(struct ieee80211com *ic,
846 const char name[IFNAMSIZ], int unit, int opmode, int flags,
847 const uint8_t bssid[IEEE80211_ADDR_LEN],
848 const uint8_t mac0[IEEE80211_ADDR_LEN])
849{
850 struct ath_softc *sc = ic->ic_ifp->if_softc;
851 struct ath_vap *avp;
852 struct ieee80211vap *vap;
853 uint8_t mac[IEEE80211_ADDR_LEN];
854 int ic_opmode, needbeacon, error;
855
856 avp = (struct ath_vap *) malloc(sizeof(struct ath_vap),
857 M_80211_VAP, M_WAITOK | M_ZERO);
858 needbeacon = 0;
859 IEEE80211_ADDR_COPY(mac, mac0);
860
861 ATH_LOCK(sc);
862 switch (opmode) {
863 case IEEE80211_M_STA:
864 if (sc->sc_nstavaps != 0) { /* XXX only 1 sta for now */
865 device_printf(sc->sc_dev, "only 1 sta vap supported\n");
866 goto bad;
867 }
868 if (sc->sc_nvaps) {
869 /*
870 * When there are multiple vaps we must fall
871 * back to s/w beacon miss handling.
872 */
873 flags |= IEEE80211_CLONE_NOBEACONS;
874 }
875 if (flags & IEEE80211_CLONE_NOBEACONS)
876 ic_opmode = IEEE80211_M_HOSTAP;
877 else
878 ic_opmode = opmode;
879 break;
880 case IEEE80211_M_IBSS:
881 if (sc->sc_nvaps != 0) { /* XXX only 1 for now */
882 device_printf(sc->sc_dev,
883 "only 1 ibss vap supported\n");
884 goto bad;
885 }
886 ic_opmode = opmode;
887 needbeacon = 1;
888 break;
889 case IEEE80211_M_AHDEMO:
663 if (ath_hal_macversion(ah) > 0x78) {
664 ic->ic_caps |= IEEE80211_C_TDMA; /* capable of TDMA */
665 ic->ic_tdma_update = ath_tdma_update;
666 }
667#endif
668 /*
669 * Indicate we need the 802.11 header padded to a
670 * 32-bit boundary for 4-address and QoS frames.
671 */
672 ic->ic_flags |= IEEE80211_F_DATAPAD;
673
674 /*
675 * Query the hal about antenna support.
676 */
677 sc->sc_defant = ath_hal_getdefantenna(ah);
678
679 /*
680 * Not all chips have the VEOL support we want to
681 * use with IBSS beacons; check here for it.
682 */
683 sc->sc_hasveol = ath_hal_hasveol(ah);
684
685 /* get mac address from hardware */
686 ath_hal_getmac(ah, macaddr);
687 if (sc->sc_hasbmask)
688 ath_hal_getbssidmask(ah, sc->sc_hwbssidmask);
689
690 /* NB: used to size node table key mapping array */
691 ic->ic_max_keyix = sc->sc_keymax;
692 /* call MI attach routine. */
693 ieee80211_ifattach(ic, macaddr);
694 ic->ic_setregdomain = ath_setregdomain;
695 ic->ic_getradiocaps = ath_getradiocaps;
696 sc->sc_opmode = HAL_M_STA;
697
698 /* override default methods */
699 ic->ic_newassoc = ath_newassoc;
700 ic->ic_updateslot = ath_updateslot;
701 ic->ic_wme.wme_update = ath_wme_update;
702 ic->ic_vap_create = ath_vap_create;
703 ic->ic_vap_delete = ath_vap_delete;
704 ic->ic_raw_xmit = ath_raw_xmit;
705 ic->ic_update_mcast = ath_update_mcast;
706 ic->ic_update_promisc = ath_update_promisc;
707 ic->ic_node_alloc = ath_node_alloc;
708 sc->sc_node_free = ic->ic_node_free;
709 ic->ic_node_free = ath_node_free;
710 ic->ic_node_getsignal = ath_node_getsignal;
711 ic->ic_scan_start = ath_scan_start;
712 ic->ic_scan_end = ath_scan_end;
713 ic->ic_set_channel = ath_set_channel;
714
715 ath_bpfattach(sc);
716 /*
717 * Setup dynamic sysctl's now that country code and
718 * regdomain are available from the hal.
719 */
720 ath_sysctlattach(sc);
721
722 if (bootverbose)
723 ieee80211_announce(ic);
724 ath_announce(sc);
725 return 0;
726bad2:
727 ath_tx_cleanup(sc);
728 ath_desc_free(sc);
729bad:
730 if (ah)
731 ath_hal_detach(ah);
732 if (ifp != NULL)
733 if_free(ifp);
734 sc->sc_invalid = 1;
735 return error;
736}
737
738int
739ath_detach(struct ath_softc *sc)
740{
741 struct ifnet *ifp = sc->sc_ifp;
742
743 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
744 __func__, ifp->if_flags);
745
746 /*
747 * NB: the order of these is important:
748 * o stop the chip so no more interrupts will fire
749 * o call the 802.11 layer before detaching the hal to
750 * insure callbacks into the driver to delete global
751 * key cache entries can be handled
752 * o free the taskqueue which drains any pending tasks
753 * o reclaim the bpf tap now that we know nothing will use
754 * it (e.g. rx processing from the task q thread)
755 * o reclaim the tx queue data structures after calling
756 * the 802.11 layer as we'll get called back to reclaim
757 * node state and potentially want to use them
758 * o to cleanup the tx queues the hal is called, so detach
759 * it last
760 * Other than that, it's straightforward...
761 */
762 ath_stop(ifp);
763 ieee80211_ifdetach(ifp->if_l2com);
764 taskqueue_free(sc->sc_tq);
765 bpfdetach(ifp);
766#ifdef ATH_TX99_DIAG
767 if (sc->sc_tx99 != NULL)
768 sc->sc_tx99->detach(sc->sc_tx99);
769#endif
770 ath_rate_detach(sc->sc_rc);
771 ath_desc_free(sc);
772 ath_tx_cleanup(sc);
773 ath_hal_detach(sc->sc_ah); /* NB: sets chip in full sleep */
774 if_free(ifp);
775
776 return 0;
777}
778
779/*
780 * MAC address handling for multiple BSS on the same radio.
781 * The first vap uses the MAC address from the EEPROM. For
782 * subsequent vap's we set the U/L bit (bit 1) in the MAC
783 * address and use the next six bits as an index.
784 */
785static void
786assign_address(struct ath_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
787{
788 int i;
789
790 if (clone && sc->sc_hasbmask) {
791 /* NB: we only do this if h/w supports multiple bssid */
792 for (i = 0; i < 8; i++)
793 if ((sc->sc_bssidmask & (1<<i)) == 0)
794 break;
795 if (i != 0)
796 mac[0] |= (i << 2)|0x2;
797 } else
798 i = 0;
799 sc->sc_bssidmask |= 1<<i;
800 sc->sc_hwbssidmask[0] &= ~mac[0];
801 if (i == 0)
802 sc->sc_nbssid0++;
803}
804
805static void
806reclaim_address(struct ath_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN])
807{
808 int i = mac[0] >> 2;
809 uint8_t mask;
810
811 if (i != 0 || --sc->sc_nbssid0 == 0) {
812 sc->sc_bssidmask &= ~(1<<i);
813 /* recalculate bssid mask from remaining addresses */
814 mask = 0xff;
815 for (i = 1; i < 8; i++)
816 if (sc->sc_bssidmask & (1<<i))
817 mask &= ~((i<<2)|0x2);
818 sc->sc_hwbssidmask[0] |= mask;
819 }
820}
821
822/*
823 * Assign a beacon xmit slot. We try to space out
824 * assignments so when beacons are staggered the
825 * traffic coming out of the cab q has maximal time
826 * to go out before the next beacon is scheduled.
827 */
828static int
829assign_bslot(struct ath_softc *sc)
830{
831 u_int slot, free;
832
833 free = 0;
834 for (slot = 0; slot < ATH_BCBUF; slot++)
835 if (sc->sc_bslot[slot] == NULL) {
836 if (sc->sc_bslot[(slot+1)%ATH_BCBUF] == NULL &&
837 sc->sc_bslot[(slot-1)%ATH_BCBUF] == NULL)
838 return slot;
839 free = slot;
840 /* NB: keep looking for a double slot */
841 }
842 return free;
843}
844
845static struct ieee80211vap *
846ath_vap_create(struct ieee80211com *ic,
847 const char name[IFNAMSIZ], int unit, int opmode, int flags,
848 const uint8_t bssid[IEEE80211_ADDR_LEN],
849 const uint8_t mac0[IEEE80211_ADDR_LEN])
850{
851 struct ath_softc *sc = ic->ic_ifp->if_softc;
852 struct ath_vap *avp;
853 struct ieee80211vap *vap;
854 uint8_t mac[IEEE80211_ADDR_LEN];
855 int ic_opmode, needbeacon, error;
856
857 avp = (struct ath_vap *) malloc(sizeof(struct ath_vap),
858 M_80211_VAP, M_WAITOK | M_ZERO);
859 needbeacon = 0;
860 IEEE80211_ADDR_COPY(mac, mac0);
861
862 ATH_LOCK(sc);
863 switch (opmode) {
864 case IEEE80211_M_STA:
865 if (sc->sc_nstavaps != 0) { /* XXX only 1 sta for now */
866 device_printf(sc->sc_dev, "only 1 sta vap supported\n");
867 goto bad;
868 }
869 if (sc->sc_nvaps) {
870 /*
871 * When there are multiple vaps we must fall
872 * back to s/w beacon miss handling.
873 */
874 flags |= IEEE80211_CLONE_NOBEACONS;
875 }
876 if (flags & IEEE80211_CLONE_NOBEACONS)
877 ic_opmode = IEEE80211_M_HOSTAP;
878 else
879 ic_opmode = opmode;
880 break;
881 case IEEE80211_M_IBSS:
882 if (sc->sc_nvaps != 0) { /* XXX only 1 for now */
883 device_printf(sc->sc_dev,
884 "only 1 ibss vap supported\n");
885 goto bad;
886 }
887 ic_opmode = opmode;
888 needbeacon = 1;
889 break;
890 case IEEE80211_M_AHDEMO:
890#ifdef ATH_SUPPORT_TDMA
891#ifdef IEEE80211_SUPPORT_TDMA
891 if (flags & IEEE80211_CLONE_TDMA) {
892 needbeacon = 1;
893 flags |= IEEE80211_CLONE_NOBEACONS;
894 }
895 /* fall thru... */
896#endif
897 case IEEE80211_M_MONITOR:
898 if (sc->sc_nvaps != 0 && ic->ic_opmode != opmode) {
899 /* XXX not right for monitor mode */
900 ic_opmode = ic->ic_opmode;
901 } else
902 ic_opmode = opmode;
903 break;
904 case IEEE80211_M_HOSTAP:
905 needbeacon = 1;
906 /* fall thru... */
907 case IEEE80211_M_WDS:
908 if (sc->sc_nvaps && ic->ic_opmode == IEEE80211_M_STA) {
909 device_printf(sc->sc_dev,
910 "wds not supported in sta mode\n");
911 goto bad;
912 }
913 if (opmode == IEEE80211_M_WDS) {
914 /*
915 * Silently remove any request for a unique
916 * bssid; WDS vap's always share the local
917 * mac address.
918 */
919 flags &= ~IEEE80211_CLONE_BSSID;
920 }
921 ic_opmode = IEEE80211_M_HOSTAP;
922 break;
923 default:
924 device_printf(sc->sc_dev, "unknown opmode %d\n", opmode);
925 goto bad;
926 }
927 /*
928 * Check that a beacon buffer is available; the code below assumes it.
929 */
930 if (needbeacon & STAILQ_EMPTY(&sc->sc_bbuf)) {
931 device_printf(sc->sc_dev, "no beacon buffer available\n");
932 goto bad;
933 }
934
935 /* STA, AHDEMO? */
936 if (opmode == IEEE80211_M_HOSTAP) {
937 assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
938 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask);
939 }
940
941 vap = &avp->av_vap;
942 /* XXX can't hold mutex across if_alloc */
943 ATH_UNLOCK(sc);
944 error = ieee80211_vap_setup(ic, vap, name, unit, opmode, flags,
945 bssid, mac);
946 ATH_LOCK(sc);
947 if (error != 0) {
948 device_printf(sc->sc_dev, "%s: error %d creating vap\n",
949 __func__, error);
950 goto bad2;
951 }
952
953 /* h/w crypto support */
954 vap->iv_key_alloc = ath_key_alloc;
955 vap->iv_key_delete = ath_key_delete;
956 vap->iv_key_set = ath_key_set;
957 vap->iv_key_update_begin = ath_key_update_begin;
958 vap->iv_key_update_end = ath_key_update_end;
959
960 /* override various methods */
961 avp->av_recv_mgmt = vap->iv_recv_mgmt;
962 vap->iv_recv_mgmt = ath_recv_mgmt;
963 vap->iv_reset = ath_reset_vap;
964 vap->iv_update_beacon = ath_beacon_update;
965 avp->av_newstate = vap->iv_newstate;
966 vap->iv_newstate = ath_newstate;
967 avp->av_bmiss = vap->iv_bmiss;
968 vap->iv_bmiss = ath_bmiss_vap;
969
970 avp->av_bslot = -1;
971 if (needbeacon) {
972 /*
973 * Allocate beacon state and setup the q for buffered
974 * multicast frames. We know a beacon buffer is
975 * available because we checked above.
976 */
977 avp->av_bcbuf = STAILQ_FIRST(&sc->sc_bbuf);
978 STAILQ_REMOVE_HEAD(&sc->sc_bbuf, bf_list);
979 if (opmode != IEEE80211_M_IBSS || !sc->sc_hasveol) {
980 /*
981 * Assign the vap to a beacon xmit slot. As above
982 * this cannot fail to find a free one.
983 */
984 avp->av_bslot = assign_bslot(sc);
985 KASSERT(sc->sc_bslot[avp->av_bslot] == NULL,
986 ("beacon slot %u not empty", avp->av_bslot));
987 sc->sc_bslot[avp->av_bslot] = vap;
988 sc->sc_nbcnvaps++;
989 }
990 if (sc->sc_hastsfadd && sc->sc_nbcnvaps > 0) {
991 /*
992 * Multple vaps are to transmit beacons and we
993 * have h/w support for TSF adjusting; enable
994 * use of staggered beacons.
995 */
996 sc->sc_stagbeacons = 1;
997 }
998 ath_txq_init(sc, &avp->av_mcastq, ATH_TXQ_SWQ);
999 }
1000
1001 ic->ic_opmode = ic_opmode;
1002 if (opmode != IEEE80211_M_WDS) {
1003 sc->sc_nvaps++;
1004 if (opmode == IEEE80211_M_STA)
1005 sc->sc_nstavaps++;
1006 }
1007 switch (ic_opmode) {
1008 case IEEE80211_M_IBSS:
1009 sc->sc_opmode = HAL_M_IBSS;
1010 break;
1011 case IEEE80211_M_STA:
1012 sc->sc_opmode = HAL_M_STA;
1013 break;
1014 case IEEE80211_M_AHDEMO:
892 if (flags & IEEE80211_CLONE_TDMA) {
893 needbeacon = 1;
894 flags |= IEEE80211_CLONE_NOBEACONS;
895 }
896 /* fall thru... */
897#endif
898 case IEEE80211_M_MONITOR:
899 if (sc->sc_nvaps != 0 && ic->ic_opmode != opmode) {
900 /* XXX not right for monitor mode */
901 ic_opmode = ic->ic_opmode;
902 } else
903 ic_opmode = opmode;
904 break;
905 case IEEE80211_M_HOSTAP:
906 needbeacon = 1;
907 /* fall thru... */
908 case IEEE80211_M_WDS:
909 if (sc->sc_nvaps && ic->ic_opmode == IEEE80211_M_STA) {
910 device_printf(sc->sc_dev,
911 "wds not supported in sta mode\n");
912 goto bad;
913 }
914 if (opmode == IEEE80211_M_WDS) {
915 /*
916 * Silently remove any request for a unique
917 * bssid; WDS vap's always share the local
918 * mac address.
919 */
920 flags &= ~IEEE80211_CLONE_BSSID;
921 }
922 ic_opmode = IEEE80211_M_HOSTAP;
923 break;
924 default:
925 device_printf(sc->sc_dev, "unknown opmode %d\n", opmode);
926 goto bad;
927 }
928 /*
929 * Check that a beacon buffer is available; the code below assumes it.
930 */
931 if (needbeacon & STAILQ_EMPTY(&sc->sc_bbuf)) {
932 device_printf(sc->sc_dev, "no beacon buffer available\n");
933 goto bad;
934 }
935
936 /* STA, AHDEMO? */
937 if (opmode == IEEE80211_M_HOSTAP) {
938 assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
939 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask);
940 }
941
942 vap = &avp->av_vap;
943 /* XXX can't hold mutex across if_alloc */
944 ATH_UNLOCK(sc);
945 error = ieee80211_vap_setup(ic, vap, name, unit, opmode, flags,
946 bssid, mac);
947 ATH_LOCK(sc);
948 if (error != 0) {
949 device_printf(sc->sc_dev, "%s: error %d creating vap\n",
950 __func__, error);
951 goto bad2;
952 }
953
954 /* h/w crypto support */
955 vap->iv_key_alloc = ath_key_alloc;
956 vap->iv_key_delete = ath_key_delete;
957 vap->iv_key_set = ath_key_set;
958 vap->iv_key_update_begin = ath_key_update_begin;
959 vap->iv_key_update_end = ath_key_update_end;
960
961 /* override various methods */
962 avp->av_recv_mgmt = vap->iv_recv_mgmt;
963 vap->iv_recv_mgmt = ath_recv_mgmt;
964 vap->iv_reset = ath_reset_vap;
965 vap->iv_update_beacon = ath_beacon_update;
966 avp->av_newstate = vap->iv_newstate;
967 vap->iv_newstate = ath_newstate;
968 avp->av_bmiss = vap->iv_bmiss;
969 vap->iv_bmiss = ath_bmiss_vap;
970
971 avp->av_bslot = -1;
972 if (needbeacon) {
973 /*
974 * Allocate beacon state and setup the q for buffered
975 * multicast frames. We know a beacon buffer is
976 * available because we checked above.
977 */
978 avp->av_bcbuf = STAILQ_FIRST(&sc->sc_bbuf);
979 STAILQ_REMOVE_HEAD(&sc->sc_bbuf, bf_list);
980 if (opmode != IEEE80211_M_IBSS || !sc->sc_hasveol) {
981 /*
982 * Assign the vap to a beacon xmit slot. As above
983 * this cannot fail to find a free one.
984 */
985 avp->av_bslot = assign_bslot(sc);
986 KASSERT(sc->sc_bslot[avp->av_bslot] == NULL,
987 ("beacon slot %u not empty", avp->av_bslot));
988 sc->sc_bslot[avp->av_bslot] = vap;
989 sc->sc_nbcnvaps++;
990 }
991 if (sc->sc_hastsfadd && sc->sc_nbcnvaps > 0) {
992 /*
993 * Multple vaps are to transmit beacons and we
994 * have h/w support for TSF adjusting; enable
995 * use of staggered beacons.
996 */
997 sc->sc_stagbeacons = 1;
998 }
999 ath_txq_init(sc, &avp->av_mcastq, ATH_TXQ_SWQ);
1000 }
1001
1002 ic->ic_opmode = ic_opmode;
1003 if (opmode != IEEE80211_M_WDS) {
1004 sc->sc_nvaps++;
1005 if (opmode == IEEE80211_M_STA)
1006 sc->sc_nstavaps++;
1007 }
1008 switch (ic_opmode) {
1009 case IEEE80211_M_IBSS:
1010 sc->sc_opmode = HAL_M_IBSS;
1011 break;
1012 case IEEE80211_M_STA:
1013 sc->sc_opmode = HAL_M_STA;
1014 break;
1015 case IEEE80211_M_AHDEMO:
1015#ifdef ATH_SUPPORT_TDMA
1016#ifdef IEEE80211_SUPPORT_TDMA
1016 if (vap->iv_caps & IEEE80211_C_TDMA) {
1017 sc->sc_tdma = 1;
1018 /* NB: disable tsf adjust */
1019 sc->sc_stagbeacons = 0;
1020 }
1021 /*
1022 * NB: adhoc demo mode is a pseudo mode; to the hal it's
1023 * just ap mode.
1024 */
1025 /* fall thru... */
1026#endif
1027 case IEEE80211_M_HOSTAP:
1028 sc->sc_opmode = HAL_M_HOSTAP;
1029 break;
1030 case IEEE80211_M_MONITOR:
1031 sc->sc_opmode = HAL_M_MONITOR;
1032 break;
1033 default:
1034 /* XXX should not happen */
1035 break;
1036 }
1037 if (sc->sc_hastsfadd) {
1038 /*
1039 * Configure whether or not TSF adjust should be done.
1040 */
1041 ath_hal_settsfadjust(sc->sc_ah, sc->sc_stagbeacons);
1042 }
1043 if (flags & IEEE80211_CLONE_NOBEACONS) {
1044 /*
1045 * Enable s/w beacon miss handling.
1046 */
1047 sc->sc_swbmiss = 1;
1048 }
1049 ATH_UNLOCK(sc);
1050
1051 /* complete setup */
1052 ieee80211_vap_attach(vap, ath_media_change, ieee80211_media_status);
1053 return vap;
1054bad2:
1055 reclaim_address(sc, mac);
1056 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask);
1057bad:
1058 free(avp, M_80211_VAP);
1059 ATH_UNLOCK(sc);
1060 return NULL;
1061}
1062
1063static void
1064ath_vap_delete(struct ieee80211vap *vap)
1065{
1066 struct ieee80211com *ic = vap->iv_ic;
1067 struct ifnet *ifp = ic->ic_ifp;
1068 struct ath_softc *sc = ifp->if_softc;
1069 struct ath_hal *ah = sc->sc_ah;
1070 struct ath_vap *avp = ATH_VAP(vap);
1071
1072 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1073 /*
1074 * Quiesce the hardware while we remove the vap. In
1075 * particular we need to reclaim all references to
1076 * the vap state by any frames pending on the tx queues.
1077 */
1078 ath_hal_intrset(ah, 0); /* disable interrupts */
1079 ath_draintxq(sc); /* stop xmit side */
1080 ath_stoprecv(sc); /* stop recv side */
1081 }
1082
1083 ieee80211_vap_detach(vap);
1084 ATH_LOCK(sc);
1085 /*
1086 * Reclaim beacon state. Note this must be done before
1087 * the vap instance is reclaimed as we may have a reference
1088 * to it in the buffer for the beacon frame.
1089 */
1090 if (avp->av_bcbuf != NULL) {
1091 if (avp->av_bslot != -1) {
1092 sc->sc_bslot[avp->av_bslot] = NULL;
1093 sc->sc_nbcnvaps--;
1094 }
1095 ath_beacon_return(sc, avp->av_bcbuf);
1096 avp->av_bcbuf = NULL;
1097 if (sc->sc_nbcnvaps == 0) {
1098 sc->sc_stagbeacons = 0;
1099 if (sc->sc_hastsfadd)
1100 ath_hal_settsfadjust(sc->sc_ah, 0);
1101 }
1102 /*
1103 * Reclaim any pending mcast frames for the vap.
1104 */
1105 ath_tx_draintxq(sc, &avp->av_mcastq);
1106 ATH_TXQ_LOCK_DESTROY(&avp->av_mcastq);
1107 }
1108 /*
1109 * Update bookkeeping.
1110 */
1111 if (vap->iv_opmode == IEEE80211_M_STA) {
1112 sc->sc_nstavaps--;
1113 if (sc->sc_nstavaps == 0 && sc->sc_swbmiss)
1114 sc->sc_swbmiss = 0;
1115 } else if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
1116 reclaim_address(sc, vap->iv_myaddr);
1117 ath_hal_setbssidmask(ah, sc->sc_hwbssidmask);
1118 }
1119 if (vap->iv_opmode != IEEE80211_M_WDS)
1120 sc->sc_nvaps--;
1017 if (vap->iv_caps & IEEE80211_C_TDMA) {
1018 sc->sc_tdma = 1;
1019 /* NB: disable tsf adjust */
1020 sc->sc_stagbeacons = 0;
1021 }
1022 /*
1023 * NB: adhoc demo mode is a pseudo mode; to the hal it's
1024 * just ap mode.
1025 */
1026 /* fall thru... */
1027#endif
1028 case IEEE80211_M_HOSTAP:
1029 sc->sc_opmode = HAL_M_HOSTAP;
1030 break;
1031 case IEEE80211_M_MONITOR:
1032 sc->sc_opmode = HAL_M_MONITOR;
1033 break;
1034 default:
1035 /* XXX should not happen */
1036 break;
1037 }
1038 if (sc->sc_hastsfadd) {
1039 /*
1040 * Configure whether or not TSF adjust should be done.
1041 */
1042 ath_hal_settsfadjust(sc->sc_ah, sc->sc_stagbeacons);
1043 }
1044 if (flags & IEEE80211_CLONE_NOBEACONS) {
1045 /*
1046 * Enable s/w beacon miss handling.
1047 */
1048 sc->sc_swbmiss = 1;
1049 }
1050 ATH_UNLOCK(sc);
1051
1052 /* complete setup */
1053 ieee80211_vap_attach(vap, ath_media_change, ieee80211_media_status);
1054 return vap;
1055bad2:
1056 reclaim_address(sc, mac);
1057 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask);
1058bad:
1059 free(avp, M_80211_VAP);
1060 ATH_UNLOCK(sc);
1061 return NULL;
1062}
1063
1064static void
1065ath_vap_delete(struct ieee80211vap *vap)
1066{
1067 struct ieee80211com *ic = vap->iv_ic;
1068 struct ifnet *ifp = ic->ic_ifp;
1069 struct ath_softc *sc = ifp->if_softc;
1070 struct ath_hal *ah = sc->sc_ah;
1071 struct ath_vap *avp = ATH_VAP(vap);
1072
1073 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1074 /*
1075 * Quiesce the hardware while we remove the vap. In
1076 * particular we need to reclaim all references to
1077 * the vap state by any frames pending on the tx queues.
1078 */
1079 ath_hal_intrset(ah, 0); /* disable interrupts */
1080 ath_draintxq(sc); /* stop xmit side */
1081 ath_stoprecv(sc); /* stop recv side */
1082 }
1083
1084 ieee80211_vap_detach(vap);
1085 ATH_LOCK(sc);
1086 /*
1087 * Reclaim beacon state. Note this must be done before
1088 * the vap instance is reclaimed as we may have a reference
1089 * to it in the buffer for the beacon frame.
1090 */
1091 if (avp->av_bcbuf != NULL) {
1092 if (avp->av_bslot != -1) {
1093 sc->sc_bslot[avp->av_bslot] = NULL;
1094 sc->sc_nbcnvaps--;
1095 }
1096 ath_beacon_return(sc, avp->av_bcbuf);
1097 avp->av_bcbuf = NULL;
1098 if (sc->sc_nbcnvaps == 0) {
1099 sc->sc_stagbeacons = 0;
1100 if (sc->sc_hastsfadd)
1101 ath_hal_settsfadjust(sc->sc_ah, 0);
1102 }
1103 /*
1104 * Reclaim any pending mcast frames for the vap.
1105 */
1106 ath_tx_draintxq(sc, &avp->av_mcastq);
1107 ATH_TXQ_LOCK_DESTROY(&avp->av_mcastq);
1108 }
1109 /*
1110 * Update bookkeeping.
1111 */
1112 if (vap->iv_opmode == IEEE80211_M_STA) {
1113 sc->sc_nstavaps--;
1114 if (sc->sc_nstavaps == 0 && sc->sc_swbmiss)
1115 sc->sc_swbmiss = 0;
1116 } else if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
1117 reclaim_address(sc, vap->iv_myaddr);
1118 ath_hal_setbssidmask(ah, sc->sc_hwbssidmask);
1119 }
1120 if (vap->iv_opmode != IEEE80211_M_WDS)
1121 sc->sc_nvaps--;
1121#ifdef ATH_SUPPORT_TDMA
1122#ifdef IEEE80211_SUPPORT_TDMA
1122 /* TDMA operation ceases when the last vap is destroyed */
1123 if (sc->sc_tdma && sc->sc_nvaps == 0) {
1124 sc->sc_tdma = 0;
1125 sc->sc_swbmiss = 0;
1126 }
1127#endif
1128 ATH_UNLOCK(sc);
1129 free(avp, M_80211_VAP);
1130
1131 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1132 /*
1133 * Restart rx+tx machines if still running (RUNNING will
1134 * be reset if we just destroyed the last vap).
1135 */
1136 if (ath_startrecv(sc) != 0)
1137 if_printf(ifp, "%s: unable to restart recv logic\n",
1138 __func__);
1139 if (sc->sc_beacons)
1140 ath_beacon_config(sc, NULL);
1141 ath_hal_intrset(ah, sc->sc_imask);
1142 }
1143}
1144
1145void
1146ath_suspend(struct ath_softc *sc)
1147{
1148 struct ifnet *ifp = sc->sc_ifp;
1149 struct ieee80211com *ic = ifp->if_l2com;
1150
1151 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1152 __func__, ifp->if_flags);
1153
1154 sc->sc_resume_up = (ifp->if_flags & IFF_UP) != 0;
1155 if (ic->ic_opmode == IEEE80211_M_STA)
1156 ath_stop(ifp);
1157 else
1158 ieee80211_suspend_all(ic);
1159 /*
1160 * NB: don't worry about putting the chip in low power
1161 * mode; pci will power off our socket on suspend and
1162 * cardbus detaches the device.
1163 */
1164}
1165
1166/*
1167 * Reset the key cache since some parts do not reset the
1168 * contents on resume. First we clear all entries, then
1169 * re-load keys that the 802.11 layer assumes are setup
1170 * in h/w.
1171 */
1172static void
1173ath_reset_keycache(struct ath_softc *sc)
1174{
1175 struct ifnet *ifp = sc->sc_ifp;
1176 struct ieee80211com *ic = ifp->if_l2com;
1177 struct ath_hal *ah = sc->sc_ah;
1178 int i;
1179
1180 for (i = 0; i < sc->sc_keymax; i++)
1181 ath_hal_keyreset(ah, i);
1182 ieee80211_crypto_reload_keys(ic);
1183}
1184
1185void
1186ath_resume(struct ath_softc *sc)
1187{
1188 struct ifnet *ifp = sc->sc_ifp;
1189 struct ieee80211com *ic = ifp->if_l2com;
1190 struct ath_hal *ah = sc->sc_ah;
1191 HAL_STATUS status;
1192
1193 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1194 __func__, ifp->if_flags);
1195
1196 /*
1197 * Must reset the chip before we reload the
1198 * keycache as we were powered down on suspend.
1199 */
1200 ath_hal_reset(ah, sc->sc_opmode,
1201 sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan,
1202 AH_FALSE, &status);
1203 ath_reset_keycache(sc);
1204 if (sc->sc_resume_up) {
1205 if (ic->ic_opmode == IEEE80211_M_STA) {
1206 ath_init(sc);
1207 ieee80211_beacon_miss(ic);
1208 } else
1209 ieee80211_resume_all(ic);
1210 }
1211 if (sc->sc_softled) {
1212 ath_hal_gpioCfgOutput(ah, sc->sc_ledpin,
1213 HAL_GPIO_MUX_MAC_NETWORK_LED);
1214 ath_hal_gpioset(ah, sc->sc_ledpin, !sc->sc_ledon);
1215 }
1216}
1217
1218void
1219ath_shutdown(struct ath_softc *sc)
1220{
1221 struct ifnet *ifp = sc->sc_ifp;
1222
1223 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1224 __func__, ifp->if_flags);
1225
1226 ath_stop(ifp);
1227 /* NB: no point powering down chip as we're about to reboot */
1228}
1229
1230/*
1231 * Interrupt handler. Most of the actual processing is deferred.
1232 */
1233void
1234ath_intr(void *arg)
1235{
1236 struct ath_softc *sc = arg;
1237 struct ifnet *ifp = sc->sc_ifp;
1238 struct ath_hal *ah = sc->sc_ah;
1239 HAL_INT status;
1240
1241 if (sc->sc_invalid) {
1242 /*
1243 * The hardware is not ready/present, don't touch anything.
1244 * Note this can happen early on if the IRQ is shared.
1245 */
1246 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
1247 return;
1248 }
1249 if (!ath_hal_intrpend(ah)) /* shared irq, not for us */
1250 return;
1251 if ((ifp->if_flags & IFF_UP) == 0 ||
1252 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1253 HAL_INT status;
1254
1255 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n",
1256 __func__, ifp->if_flags);
1257 ath_hal_getisr(ah, &status); /* clear ISR */
1258 ath_hal_intrset(ah, 0); /* disable further intr's */
1259 return;
1260 }
1261 /*
1262 * Figure out the reason(s) for the interrupt. Note
1263 * that the hal returns a pseudo-ISR that may include
1264 * bits we haven't explicitly enabled so we mask the
1265 * value to insure we only process bits we requested.
1266 */
1267 ath_hal_getisr(ah, &status); /* NB: clears ISR too */
1268 DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status);
1269 status &= sc->sc_imask; /* discard unasked for bits */
1270 if (status & HAL_INT_FATAL) {
1271 sc->sc_stats.ast_hardware++;
1272 ath_hal_intrset(ah, 0); /* disable intr's until reset */
1273 ath_fatal_proc(sc, 0);
1274 } else {
1275 if (status & HAL_INT_SWBA) {
1276 /*
1277 * Software beacon alert--time to send a beacon.
1278 * Handle beacon transmission directly; deferring
1279 * this is too slow to meet timing constraints
1280 * under load.
1281 */
1123 /* TDMA operation ceases when the last vap is destroyed */
1124 if (sc->sc_tdma && sc->sc_nvaps == 0) {
1125 sc->sc_tdma = 0;
1126 sc->sc_swbmiss = 0;
1127 }
1128#endif
1129 ATH_UNLOCK(sc);
1130 free(avp, M_80211_VAP);
1131
1132 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1133 /*
1134 * Restart rx+tx machines if still running (RUNNING will
1135 * be reset if we just destroyed the last vap).
1136 */
1137 if (ath_startrecv(sc) != 0)
1138 if_printf(ifp, "%s: unable to restart recv logic\n",
1139 __func__);
1140 if (sc->sc_beacons)
1141 ath_beacon_config(sc, NULL);
1142 ath_hal_intrset(ah, sc->sc_imask);
1143 }
1144}
1145
1146void
1147ath_suspend(struct ath_softc *sc)
1148{
1149 struct ifnet *ifp = sc->sc_ifp;
1150 struct ieee80211com *ic = ifp->if_l2com;
1151
1152 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1153 __func__, ifp->if_flags);
1154
1155 sc->sc_resume_up = (ifp->if_flags & IFF_UP) != 0;
1156 if (ic->ic_opmode == IEEE80211_M_STA)
1157 ath_stop(ifp);
1158 else
1159 ieee80211_suspend_all(ic);
1160 /*
1161 * NB: don't worry about putting the chip in low power
1162 * mode; pci will power off our socket on suspend and
1163 * cardbus detaches the device.
1164 */
1165}
1166
1167/*
1168 * Reset the key cache since some parts do not reset the
1169 * contents on resume. First we clear all entries, then
1170 * re-load keys that the 802.11 layer assumes are setup
1171 * in h/w.
1172 */
1173static void
1174ath_reset_keycache(struct ath_softc *sc)
1175{
1176 struct ifnet *ifp = sc->sc_ifp;
1177 struct ieee80211com *ic = ifp->if_l2com;
1178 struct ath_hal *ah = sc->sc_ah;
1179 int i;
1180
1181 for (i = 0; i < sc->sc_keymax; i++)
1182 ath_hal_keyreset(ah, i);
1183 ieee80211_crypto_reload_keys(ic);
1184}
1185
1186void
1187ath_resume(struct ath_softc *sc)
1188{
1189 struct ifnet *ifp = sc->sc_ifp;
1190 struct ieee80211com *ic = ifp->if_l2com;
1191 struct ath_hal *ah = sc->sc_ah;
1192 HAL_STATUS status;
1193
1194 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1195 __func__, ifp->if_flags);
1196
1197 /*
1198 * Must reset the chip before we reload the
1199 * keycache as we were powered down on suspend.
1200 */
1201 ath_hal_reset(ah, sc->sc_opmode,
1202 sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan,
1203 AH_FALSE, &status);
1204 ath_reset_keycache(sc);
1205 if (sc->sc_resume_up) {
1206 if (ic->ic_opmode == IEEE80211_M_STA) {
1207 ath_init(sc);
1208 ieee80211_beacon_miss(ic);
1209 } else
1210 ieee80211_resume_all(ic);
1211 }
1212 if (sc->sc_softled) {
1213 ath_hal_gpioCfgOutput(ah, sc->sc_ledpin,
1214 HAL_GPIO_MUX_MAC_NETWORK_LED);
1215 ath_hal_gpioset(ah, sc->sc_ledpin, !sc->sc_ledon);
1216 }
1217}
1218
1219void
1220ath_shutdown(struct ath_softc *sc)
1221{
1222 struct ifnet *ifp = sc->sc_ifp;
1223
1224 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1225 __func__, ifp->if_flags);
1226
1227 ath_stop(ifp);
1228 /* NB: no point powering down chip as we're about to reboot */
1229}
1230
1231/*
1232 * Interrupt handler. Most of the actual processing is deferred.
1233 */
1234void
1235ath_intr(void *arg)
1236{
1237 struct ath_softc *sc = arg;
1238 struct ifnet *ifp = sc->sc_ifp;
1239 struct ath_hal *ah = sc->sc_ah;
1240 HAL_INT status;
1241
1242 if (sc->sc_invalid) {
1243 /*
1244 * The hardware is not ready/present, don't touch anything.
1245 * Note this can happen early on if the IRQ is shared.
1246 */
1247 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
1248 return;
1249 }
1250 if (!ath_hal_intrpend(ah)) /* shared irq, not for us */
1251 return;
1252 if ((ifp->if_flags & IFF_UP) == 0 ||
1253 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1254 HAL_INT status;
1255
1256 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n",
1257 __func__, ifp->if_flags);
1258 ath_hal_getisr(ah, &status); /* clear ISR */
1259 ath_hal_intrset(ah, 0); /* disable further intr's */
1260 return;
1261 }
1262 /*
1263 * Figure out the reason(s) for the interrupt. Note
1264 * that the hal returns a pseudo-ISR that may include
1265 * bits we haven't explicitly enabled so we mask the
1266 * value to insure we only process bits we requested.
1267 */
1268 ath_hal_getisr(ah, &status); /* NB: clears ISR too */
1269 DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status);
1270 status &= sc->sc_imask; /* discard unasked for bits */
1271 if (status & HAL_INT_FATAL) {
1272 sc->sc_stats.ast_hardware++;
1273 ath_hal_intrset(ah, 0); /* disable intr's until reset */
1274 ath_fatal_proc(sc, 0);
1275 } else {
1276 if (status & HAL_INT_SWBA) {
1277 /*
1278 * Software beacon alert--time to send a beacon.
1279 * Handle beacon transmission directly; deferring
1280 * this is too slow to meet timing constraints
1281 * under load.
1282 */
1282#ifdef ATH_SUPPORT_TDMA
1283#ifdef IEEE80211_SUPPORT_TDMA
1283 if (sc->sc_tdma) {
1284 if (sc->sc_tdmaswba == 0) {
1285 struct ieee80211com *ic = ifp->if_l2com;
1286 struct ieee80211vap *vap =
1287 TAILQ_FIRST(&ic->ic_vaps);
1288 ath_tdma_beacon_send(sc, vap);
1289 sc->sc_tdmaswba =
1290 vap->iv_tdma->tdma_bintval;
1291 } else
1292 sc->sc_tdmaswba--;
1293 } else
1294#endif
1295 ath_beacon_proc(sc, 0);
1296 }
1297 if (status & HAL_INT_RXEOL) {
1298 /*
1299 * NB: the hardware should re-read the link when
1300 * RXE bit is written, but it doesn't work at
1301 * least on older hardware revs.
1302 */
1303 sc->sc_stats.ast_rxeol++;
1304 sc->sc_rxlink = NULL;
1305 }
1306 if (status & HAL_INT_TXURN) {
1307 sc->sc_stats.ast_txurn++;
1308 /* bump tx trigger level */
1309 ath_hal_updatetxtriglevel(ah, AH_TRUE);
1310 }
1311 if (status & HAL_INT_RX)
1312 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
1313 if (status & HAL_INT_TX)
1314 taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
1315 if (status & HAL_INT_BMISS) {
1316 sc->sc_stats.ast_bmiss++;
1317 taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask);
1318 }
1319 if (status & HAL_INT_MIB) {
1320 sc->sc_stats.ast_mib++;
1321 /*
1322 * Disable interrupts until we service the MIB
1323 * interrupt; otherwise it will continue to fire.
1324 */
1325 ath_hal_intrset(ah, 0);
1326 /*
1327 * Let the hal handle the event. We assume it will
1328 * clear whatever condition caused the interrupt.
1329 */
1330 ath_hal_mibevent(ah, &sc->sc_halstats);
1331 ath_hal_intrset(ah, sc->sc_imask);
1332 }
1333 if (status & HAL_INT_RXORN) {
1334 /* NB: hal marks HAL_INT_FATAL when RXORN is fatal */
1335 sc->sc_stats.ast_rxorn++;
1336 }
1337 }
1338}
1339
1340static void
1341ath_fatal_proc(void *arg, int pending)
1342{
1343 struct ath_softc *sc = arg;
1344 struct ifnet *ifp = sc->sc_ifp;
1345 u_int32_t *state;
1346 u_int32_t len;
1347 void *sp;
1348
1349 if_printf(ifp, "hardware error; resetting\n");
1350 /*
1351 * Fatal errors are unrecoverable. Typically these
1352 * are caused by DMA errors. Collect h/w state from
1353 * the hal so we can diagnose what's going on.
1354 */
1355 if (ath_hal_getfatalstate(sc->sc_ah, &sp, &len)) {
1356 KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len));
1357 state = sp;
1358 if_printf(ifp, "0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n",
1359 state[0], state[1] , state[2], state[3],
1360 state[4], state[5]);
1361 }
1362 ath_reset(ifp);
1363}
1364
1365static void
1366ath_bmiss_vap(struct ieee80211vap *vap)
1367{
1368 /*
1369 * Workaround phantom bmiss interrupts by sanity-checking
1370 * the time of our last rx'd frame. If it is within the
1371 * beacon miss interval then ignore the interrupt. If it's
1372 * truly a bmiss we'll get another interrupt soon and that'll
1373 * be dispatched up for processing. Note this applies only
1374 * for h/w beacon miss events.
1375 */
1376 if ((vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) == 0) {
1377 struct ifnet *ifp = vap->iv_ic->ic_ifp;
1378 struct ath_softc *sc = ifp->if_softc;
1379 u_int64_t lastrx = sc->sc_lastrx;
1380 u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah);
1381 u_int bmisstimeout =
1382 vap->iv_bmissthreshold * vap->iv_bss->ni_intval * 1024;
1383
1384 DPRINTF(sc, ATH_DEBUG_BEACON,
1385 "%s: tsf %llu lastrx %lld (%llu) bmiss %u\n",
1386 __func__, (unsigned long long) tsf,
1387 (unsigned long long)(tsf - lastrx),
1388 (unsigned long long) lastrx, bmisstimeout);
1389
1390 if (tsf - lastrx <= bmisstimeout) {
1391 sc->sc_stats.ast_bmiss_phantom++;
1392 return;
1393 }
1394 }
1395 ATH_VAP(vap)->av_bmiss(vap);
1396}
1397
1398static int
1399ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, uint32_t *hangs)
1400{
1401 uint32_t rsize;
1402 void *sp;
1403
1404 if (!ath_hal_getdiagstate(ah, 32, &mask, sizeof(&mask), &sp, &rsize))
1405 return 0;
1406 KASSERT(rsize == sizeof(uint32_t), ("resultsize %u", rsize));
1407 *hangs = *(uint32_t *)sp;
1408 return 1;
1409}
1410
1411static void
1412ath_bmiss_proc(void *arg, int pending)
1413{
1414 struct ath_softc *sc = arg;
1415 struct ifnet *ifp = sc->sc_ifp;
1416 uint32_t hangs;
1417
1418 DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending);
1419
1420 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) {
1421 if_printf(ifp, "bb hang detected (0x%x), reseting\n", hangs);
1422 ath_reset(ifp);
1423 } else
1424 ieee80211_beacon_miss(ifp->if_l2com);
1425}
1426
1427/*
1428 * Handle TKIP MIC setup to deal hardware that doesn't do MIC
1429 * calcs together with WME. If necessary disable the crypto
1430 * hardware and mark the 802.11 state so keys will be setup
1431 * with the MIC work done in software.
1432 */
1433static void
1434ath_settkipmic(struct ath_softc *sc)
1435{
1436 struct ifnet *ifp = sc->sc_ifp;
1437 struct ieee80211com *ic = ifp->if_l2com;
1438
1439 if ((ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP) && !sc->sc_wmetkipmic) {
1440 if (ic->ic_flags & IEEE80211_F_WME) {
1441 ath_hal_settkipmic(sc->sc_ah, AH_FALSE);
1442 ic->ic_cryptocaps &= ~IEEE80211_CRYPTO_TKIPMIC;
1443 } else {
1444 ath_hal_settkipmic(sc->sc_ah, AH_TRUE);
1445 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC;
1446 }
1447 }
1448}
1449
1450static void
1451ath_init(void *arg)
1452{
1453 struct ath_softc *sc = (struct ath_softc *) arg;
1454 struct ifnet *ifp = sc->sc_ifp;
1455 struct ieee80211com *ic = ifp->if_l2com;
1456 struct ath_hal *ah = sc->sc_ah;
1457 HAL_STATUS status;
1458
1459 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n",
1460 __func__, ifp->if_flags);
1461
1462 ATH_LOCK(sc);
1463 /*
1464 * Stop anything previously setup. This is safe
1465 * whether this is the first time through or not.
1466 */
1467 ath_stop_locked(ifp);
1468
1469 /*
1470 * The basic interface to setting the hardware in a good
1471 * state is ``reset''. On return the hardware is known to
1472 * be powered up and with interrupts disabled. This must
1473 * be followed by initialization of the appropriate bits
1474 * and then setup of the interrupt mask.
1475 */
1476 ath_settkipmic(sc);
1477 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_FALSE, &status)) {
1478 if_printf(ifp, "unable to reset hardware; hal status %u\n",
1479 status);
1480 ATH_UNLOCK(sc);
1481 return;
1482 }
1483 ath_chan_change(sc, ic->ic_curchan);
1484
1485 /*
1486 * Likewise this is set during reset so update
1487 * state cached in the driver.
1488 */
1489 sc->sc_diversity = ath_hal_getdiversity(ah);
1490 sc->sc_lastlongcal = 0;
1491 sc->sc_resetcal = 1;
1492 sc->sc_lastcalreset = 0;
1493
1494 /*
1495 * Setup the hardware after reset: the key cache
1496 * is filled as needed and the receive engine is
1497 * set going. Frame transmit is handled entirely
1498 * in the frame output path; there's nothing to do
1499 * here except setup the interrupt mask.
1500 */
1501 if (ath_startrecv(sc) != 0) {
1502 if_printf(ifp, "unable to start recv logic\n");
1503 ATH_UNLOCK(sc);
1504 return;
1505 }
1506
1507 /*
1508 * Enable interrupts.
1509 */
1510 sc->sc_imask = HAL_INT_RX | HAL_INT_TX
1511 | HAL_INT_RXEOL | HAL_INT_RXORN
1512 | HAL_INT_FATAL | HAL_INT_GLOBAL;
1513 /*
1514 * Enable MIB interrupts when there are hardware phy counters.
1515 * Note we only do this (at the moment) for station mode.
1516 */
1517 if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA)
1518 sc->sc_imask |= HAL_INT_MIB;
1519
1520 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1521 callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc);
1522 ath_hal_intrset(ah, sc->sc_imask);
1523
1524 ATH_UNLOCK(sc);
1525
1526#ifdef ATH_TX99_DIAG
1527 if (sc->sc_tx99 != NULL)
1528 sc->sc_tx99->start(sc->sc_tx99);
1529 else
1530#endif
1531 ieee80211_start_all(ic); /* start all vap's */
1532}
1533
1534static void
1535ath_stop_locked(struct ifnet *ifp)
1536{
1537 struct ath_softc *sc = ifp->if_softc;
1538 struct ath_hal *ah = sc->sc_ah;
1539
1540 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n",
1541 __func__, sc->sc_invalid, ifp->if_flags);
1542
1543 ATH_LOCK_ASSERT(sc);
1544 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1545 /*
1546 * Shutdown the hardware and driver:
1547 * reset 802.11 state machine
1548 * turn off timers
1549 * disable interrupts
1550 * turn off the radio
1551 * clear transmit machinery
1552 * clear receive machinery
1553 * drain and release tx queues
1554 * reclaim beacon resources
1555 * power down hardware
1556 *
1557 * Note that some of this work is not possible if the
1558 * hardware is gone (invalid).
1559 */
1560#ifdef ATH_TX99_DIAG
1561 if (sc->sc_tx99 != NULL)
1562 sc->sc_tx99->stop(sc->sc_tx99);
1563#endif
1564 callout_stop(&sc->sc_wd_ch);
1565 sc->sc_wd_timer = 0;
1566 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1567 if (!sc->sc_invalid) {
1568 if (sc->sc_softled) {
1569 callout_stop(&sc->sc_ledtimer);
1570 ath_hal_gpioset(ah, sc->sc_ledpin,
1571 !sc->sc_ledon);
1572 sc->sc_blinking = 0;
1573 }
1574 ath_hal_intrset(ah, 0);
1575 }
1576 ath_draintxq(sc);
1577 if (!sc->sc_invalid) {
1578 ath_stoprecv(sc);
1579 ath_hal_phydisable(ah);
1580 } else
1581 sc->sc_rxlink = NULL;
1582 ath_beacon_free(sc); /* XXX not needed */
1583 }
1584}
1585
1586static void
1587ath_stop(struct ifnet *ifp)
1588{
1589 struct ath_softc *sc = ifp->if_softc;
1590
1591 ATH_LOCK(sc);
1592 ath_stop_locked(ifp);
1593 ATH_UNLOCK(sc);
1594}
1595
1596/*
1597 * Reset the hardware w/o losing operational state. This is
1598 * basically a more efficient way of doing ath_stop, ath_init,
1599 * followed by state transitions to the current 802.11
1600 * operational state. Used to recover from various errors and
1601 * to reset or reload hardware state.
1602 */
1603static int
1604ath_reset(struct ifnet *ifp)
1605{
1606 struct ath_softc *sc = ifp->if_softc;
1607 struct ieee80211com *ic = ifp->if_l2com;
1608 struct ath_hal *ah = sc->sc_ah;
1609 HAL_STATUS status;
1610
1611 ath_hal_intrset(ah, 0); /* disable interrupts */
1612 ath_draintxq(sc); /* stop xmit side */
1613 ath_stoprecv(sc); /* stop recv side */
1614 ath_settkipmic(sc); /* configure TKIP MIC handling */
1615 /* NB: indicate channel change so we do a full reset */
1616 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_TRUE, &status))
1617 if_printf(ifp, "%s: unable to reset hardware; hal status %u\n",
1618 __func__, status);
1619 sc->sc_diversity = ath_hal_getdiversity(ah);
1620 if (ath_startrecv(sc) != 0) /* restart recv */
1621 if_printf(ifp, "%s: unable to start recv logic\n", __func__);
1622 /*
1623 * We may be doing a reset in response to an ioctl
1624 * that changes the channel so update any state that
1625 * might change as a result.
1626 */
1627 ath_chan_change(sc, ic->ic_curchan);
1628 if (sc->sc_beacons) {
1284 if (sc->sc_tdma) {
1285 if (sc->sc_tdmaswba == 0) {
1286 struct ieee80211com *ic = ifp->if_l2com;
1287 struct ieee80211vap *vap =
1288 TAILQ_FIRST(&ic->ic_vaps);
1289 ath_tdma_beacon_send(sc, vap);
1290 sc->sc_tdmaswba =
1291 vap->iv_tdma->tdma_bintval;
1292 } else
1293 sc->sc_tdmaswba--;
1294 } else
1295#endif
1296 ath_beacon_proc(sc, 0);
1297 }
1298 if (status & HAL_INT_RXEOL) {
1299 /*
1300 * NB: the hardware should re-read the link when
1301 * RXE bit is written, but it doesn't work at
1302 * least on older hardware revs.
1303 */
1304 sc->sc_stats.ast_rxeol++;
1305 sc->sc_rxlink = NULL;
1306 }
1307 if (status & HAL_INT_TXURN) {
1308 sc->sc_stats.ast_txurn++;
1309 /* bump tx trigger level */
1310 ath_hal_updatetxtriglevel(ah, AH_TRUE);
1311 }
1312 if (status & HAL_INT_RX)
1313 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
1314 if (status & HAL_INT_TX)
1315 taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
1316 if (status & HAL_INT_BMISS) {
1317 sc->sc_stats.ast_bmiss++;
1318 taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask);
1319 }
1320 if (status & HAL_INT_MIB) {
1321 sc->sc_stats.ast_mib++;
1322 /*
1323 * Disable interrupts until we service the MIB
1324 * interrupt; otherwise it will continue to fire.
1325 */
1326 ath_hal_intrset(ah, 0);
1327 /*
1328 * Let the hal handle the event. We assume it will
1329 * clear whatever condition caused the interrupt.
1330 */
1331 ath_hal_mibevent(ah, &sc->sc_halstats);
1332 ath_hal_intrset(ah, sc->sc_imask);
1333 }
1334 if (status & HAL_INT_RXORN) {
1335 /* NB: hal marks HAL_INT_FATAL when RXORN is fatal */
1336 sc->sc_stats.ast_rxorn++;
1337 }
1338 }
1339}
1340
1341static void
1342ath_fatal_proc(void *arg, int pending)
1343{
1344 struct ath_softc *sc = arg;
1345 struct ifnet *ifp = sc->sc_ifp;
1346 u_int32_t *state;
1347 u_int32_t len;
1348 void *sp;
1349
1350 if_printf(ifp, "hardware error; resetting\n");
1351 /*
1352 * Fatal errors are unrecoverable. Typically these
1353 * are caused by DMA errors. Collect h/w state from
1354 * the hal so we can diagnose what's going on.
1355 */
1356 if (ath_hal_getfatalstate(sc->sc_ah, &sp, &len)) {
1357 KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len));
1358 state = sp;
1359 if_printf(ifp, "0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n",
1360 state[0], state[1] , state[2], state[3],
1361 state[4], state[5]);
1362 }
1363 ath_reset(ifp);
1364}
1365
1366static void
1367ath_bmiss_vap(struct ieee80211vap *vap)
1368{
1369 /*
1370 * Workaround phantom bmiss interrupts by sanity-checking
1371 * the time of our last rx'd frame. If it is within the
1372 * beacon miss interval then ignore the interrupt. If it's
1373 * truly a bmiss we'll get another interrupt soon and that'll
1374 * be dispatched up for processing. Note this applies only
1375 * for h/w beacon miss events.
1376 */
1377 if ((vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) == 0) {
1378 struct ifnet *ifp = vap->iv_ic->ic_ifp;
1379 struct ath_softc *sc = ifp->if_softc;
1380 u_int64_t lastrx = sc->sc_lastrx;
1381 u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah);
1382 u_int bmisstimeout =
1383 vap->iv_bmissthreshold * vap->iv_bss->ni_intval * 1024;
1384
1385 DPRINTF(sc, ATH_DEBUG_BEACON,
1386 "%s: tsf %llu lastrx %lld (%llu) bmiss %u\n",
1387 __func__, (unsigned long long) tsf,
1388 (unsigned long long)(tsf - lastrx),
1389 (unsigned long long) lastrx, bmisstimeout);
1390
1391 if (tsf - lastrx <= bmisstimeout) {
1392 sc->sc_stats.ast_bmiss_phantom++;
1393 return;
1394 }
1395 }
1396 ATH_VAP(vap)->av_bmiss(vap);
1397}
1398
1399static int
1400ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, uint32_t *hangs)
1401{
1402 uint32_t rsize;
1403 void *sp;
1404
1405 if (!ath_hal_getdiagstate(ah, 32, &mask, sizeof(&mask), &sp, &rsize))
1406 return 0;
1407 KASSERT(rsize == sizeof(uint32_t), ("resultsize %u", rsize));
1408 *hangs = *(uint32_t *)sp;
1409 return 1;
1410}
1411
1412static void
1413ath_bmiss_proc(void *arg, int pending)
1414{
1415 struct ath_softc *sc = arg;
1416 struct ifnet *ifp = sc->sc_ifp;
1417 uint32_t hangs;
1418
1419 DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending);
1420
1421 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) {
1422 if_printf(ifp, "bb hang detected (0x%x), reseting\n", hangs);
1423 ath_reset(ifp);
1424 } else
1425 ieee80211_beacon_miss(ifp->if_l2com);
1426}
1427
1428/*
1429 * Handle TKIP MIC setup to deal hardware that doesn't do MIC
1430 * calcs together with WME. If necessary disable the crypto
1431 * hardware and mark the 802.11 state so keys will be setup
1432 * with the MIC work done in software.
1433 */
1434static void
1435ath_settkipmic(struct ath_softc *sc)
1436{
1437 struct ifnet *ifp = sc->sc_ifp;
1438 struct ieee80211com *ic = ifp->if_l2com;
1439
1440 if ((ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP) && !sc->sc_wmetkipmic) {
1441 if (ic->ic_flags & IEEE80211_F_WME) {
1442 ath_hal_settkipmic(sc->sc_ah, AH_FALSE);
1443 ic->ic_cryptocaps &= ~IEEE80211_CRYPTO_TKIPMIC;
1444 } else {
1445 ath_hal_settkipmic(sc->sc_ah, AH_TRUE);
1446 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC;
1447 }
1448 }
1449}
1450
1451static void
1452ath_init(void *arg)
1453{
1454 struct ath_softc *sc = (struct ath_softc *) arg;
1455 struct ifnet *ifp = sc->sc_ifp;
1456 struct ieee80211com *ic = ifp->if_l2com;
1457 struct ath_hal *ah = sc->sc_ah;
1458 HAL_STATUS status;
1459
1460 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n",
1461 __func__, ifp->if_flags);
1462
1463 ATH_LOCK(sc);
1464 /*
1465 * Stop anything previously setup. This is safe
1466 * whether this is the first time through or not.
1467 */
1468 ath_stop_locked(ifp);
1469
1470 /*
1471 * The basic interface to setting the hardware in a good
1472 * state is ``reset''. On return the hardware is known to
1473 * be powered up and with interrupts disabled. This must
1474 * be followed by initialization of the appropriate bits
1475 * and then setup of the interrupt mask.
1476 */
1477 ath_settkipmic(sc);
1478 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_FALSE, &status)) {
1479 if_printf(ifp, "unable to reset hardware; hal status %u\n",
1480 status);
1481 ATH_UNLOCK(sc);
1482 return;
1483 }
1484 ath_chan_change(sc, ic->ic_curchan);
1485
1486 /*
1487 * Likewise this is set during reset so update
1488 * state cached in the driver.
1489 */
1490 sc->sc_diversity = ath_hal_getdiversity(ah);
1491 sc->sc_lastlongcal = 0;
1492 sc->sc_resetcal = 1;
1493 sc->sc_lastcalreset = 0;
1494
1495 /*
1496 * Setup the hardware after reset: the key cache
1497 * is filled as needed and the receive engine is
1498 * set going. Frame transmit is handled entirely
1499 * in the frame output path; there's nothing to do
1500 * here except setup the interrupt mask.
1501 */
1502 if (ath_startrecv(sc) != 0) {
1503 if_printf(ifp, "unable to start recv logic\n");
1504 ATH_UNLOCK(sc);
1505 return;
1506 }
1507
1508 /*
1509 * Enable interrupts.
1510 */
1511 sc->sc_imask = HAL_INT_RX | HAL_INT_TX
1512 | HAL_INT_RXEOL | HAL_INT_RXORN
1513 | HAL_INT_FATAL | HAL_INT_GLOBAL;
1514 /*
1515 * Enable MIB interrupts when there are hardware phy counters.
1516 * Note we only do this (at the moment) for station mode.
1517 */
1518 if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA)
1519 sc->sc_imask |= HAL_INT_MIB;
1520
1521 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1522 callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc);
1523 ath_hal_intrset(ah, sc->sc_imask);
1524
1525 ATH_UNLOCK(sc);
1526
1527#ifdef ATH_TX99_DIAG
1528 if (sc->sc_tx99 != NULL)
1529 sc->sc_tx99->start(sc->sc_tx99);
1530 else
1531#endif
1532 ieee80211_start_all(ic); /* start all vap's */
1533}
1534
1535static void
1536ath_stop_locked(struct ifnet *ifp)
1537{
1538 struct ath_softc *sc = ifp->if_softc;
1539 struct ath_hal *ah = sc->sc_ah;
1540
1541 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n",
1542 __func__, sc->sc_invalid, ifp->if_flags);
1543
1544 ATH_LOCK_ASSERT(sc);
1545 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1546 /*
1547 * Shutdown the hardware and driver:
1548 * reset 802.11 state machine
1549 * turn off timers
1550 * disable interrupts
1551 * turn off the radio
1552 * clear transmit machinery
1553 * clear receive machinery
1554 * drain and release tx queues
1555 * reclaim beacon resources
1556 * power down hardware
1557 *
1558 * Note that some of this work is not possible if the
1559 * hardware is gone (invalid).
1560 */
1561#ifdef ATH_TX99_DIAG
1562 if (sc->sc_tx99 != NULL)
1563 sc->sc_tx99->stop(sc->sc_tx99);
1564#endif
1565 callout_stop(&sc->sc_wd_ch);
1566 sc->sc_wd_timer = 0;
1567 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1568 if (!sc->sc_invalid) {
1569 if (sc->sc_softled) {
1570 callout_stop(&sc->sc_ledtimer);
1571 ath_hal_gpioset(ah, sc->sc_ledpin,
1572 !sc->sc_ledon);
1573 sc->sc_blinking = 0;
1574 }
1575 ath_hal_intrset(ah, 0);
1576 }
1577 ath_draintxq(sc);
1578 if (!sc->sc_invalid) {
1579 ath_stoprecv(sc);
1580 ath_hal_phydisable(ah);
1581 } else
1582 sc->sc_rxlink = NULL;
1583 ath_beacon_free(sc); /* XXX not needed */
1584 }
1585}
1586
1587static void
1588ath_stop(struct ifnet *ifp)
1589{
1590 struct ath_softc *sc = ifp->if_softc;
1591
1592 ATH_LOCK(sc);
1593 ath_stop_locked(ifp);
1594 ATH_UNLOCK(sc);
1595}
1596
1597/*
1598 * Reset the hardware w/o losing operational state. This is
1599 * basically a more efficient way of doing ath_stop, ath_init,
1600 * followed by state transitions to the current 802.11
1601 * operational state. Used to recover from various errors and
1602 * to reset or reload hardware state.
1603 */
1604static int
1605ath_reset(struct ifnet *ifp)
1606{
1607 struct ath_softc *sc = ifp->if_softc;
1608 struct ieee80211com *ic = ifp->if_l2com;
1609 struct ath_hal *ah = sc->sc_ah;
1610 HAL_STATUS status;
1611
1612 ath_hal_intrset(ah, 0); /* disable interrupts */
1613 ath_draintxq(sc); /* stop xmit side */
1614 ath_stoprecv(sc); /* stop recv side */
1615 ath_settkipmic(sc); /* configure TKIP MIC handling */
1616 /* NB: indicate channel change so we do a full reset */
1617 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_TRUE, &status))
1618 if_printf(ifp, "%s: unable to reset hardware; hal status %u\n",
1619 __func__, status);
1620 sc->sc_diversity = ath_hal_getdiversity(ah);
1621 if (ath_startrecv(sc) != 0) /* restart recv */
1622 if_printf(ifp, "%s: unable to start recv logic\n", __func__);
1623 /*
1624 * We may be doing a reset in response to an ioctl
1625 * that changes the channel so update any state that
1626 * might change as a result.
1627 */
1628 ath_chan_change(sc, ic->ic_curchan);
1629 if (sc->sc_beacons) {
1629#ifdef ATH_SUPPORT_TDMA
1630#ifdef IEEE80211_SUPPORT_TDMA
1630 if (sc->sc_tdma)
1631 ath_tdma_config(sc, NULL);
1632 else
1633#endif
1634 ath_beacon_config(sc, NULL); /* restart beacons */
1635 }
1636 ath_hal_intrset(ah, sc->sc_imask);
1637
1638 ath_start(ifp); /* restart xmit */
1639 return 0;
1640}
1641
1642static int
1643ath_reset_vap(struct ieee80211vap *vap, u_long cmd)
1644{
1645 struct ieee80211com *ic = vap->iv_ic;
1646 struct ifnet *ifp = ic->ic_ifp;
1647 struct ath_softc *sc = ifp->if_softc;
1648 struct ath_hal *ah = sc->sc_ah;
1649
1650 switch (cmd) {
1651 case IEEE80211_IOC_TXPOWER:
1652 /*
1653 * If per-packet TPC is enabled, then we have nothing
1654 * to do; otherwise we need to force the global limit.
1655 * All this can happen directly; no need to reset.
1656 */
1657 if (!ath_hal_gettpc(ah))
1658 ath_hal_settxpowlimit(ah, ic->ic_txpowlimit);
1659 return 0;
1660 }
1661 return ath_reset(ifp);
1662}
1663
1664static int
1665ath_ff_always(struct ath_txq *txq, struct ath_buf *bf)
1666{
1667 return 0;
1668}
1669
1670#if 0
1671static int
1672ath_ff_ageflushtestdone(struct ath_txq *txq, struct ath_buf *bf)
1673{
1674 return (txq->axq_curage - bf->bf_age) < ATH_FF_STAGEMAX;
1675}
1676#endif
1677
1678/*
1679 * Flush FF staging queue.
1680 */
1681static void
1682ath_ff_stageq_flush(struct ath_softc *sc, struct ath_txq *txq,
1683 int (*ath_ff_flushdonetest)(struct ath_txq *txq, struct ath_buf *bf))
1684{
1685 struct ath_buf *bf;
1686 struct ieee80211_node *ni;
1687 int pktlen, pri;
1688
1689 for (;;) {
1690 ATH_TXQ_LOCK(txq);
1691 /*
1692 * Go from the back (oldest) to front so we can
1693 * stop early based on the age of the entry.
1694 */
1695 bf = TAILQ_LAST(&txq->axq_stageq, axq_headtype);
1696 if (bf == NULL || ath_ff_flushdonetest(txq, bf)) {
1697 ATH_TXQ_UNLOCK(txq);
1698 break;
1699 }
1700
1701 ni = bf->bf_node;
1702 pri = M_WME_GETAC(bf->bf_m);
1703 KASSERT(ATH_NODE(ni)->an_ff_buf[pri],
1704 ("no bf on staging queue %p", bf));
1705 ATH_NODE(ni)->an_ff_buf[pri] = NULL;
1706 TAILQ_REMOVE(&txq->axq_stageq, bf, bf_stagelist);
1707
1708 ATH_TXQ_UNLOCK(txq);
1709
1710 DPRINTF(sc, ATH_DEBUG_FF, "%s: flush frame, age %u\n",
1711 __func__, bf->bf_age);
1712
1713 sc->sc_stats.ast_ff_flush++;
1714
1715 /* encap and xmit */
1716 bf->bf_m = ieee80211_encap(ni, bf->bf_m);
1717 if (bf->bf_m == NULL) {
1718 DPRINTF(sc, ATH_DEBUG_XMIT | ATH_DEBUG_FF,
1719 "%s: discard, encapsulation failure\n",
1720 __func__);
1721 sc->sc_stats.ast_tx_encap++;
1722 goto bad;
1723 }
1724 pktlen = bf->bf_m->m_pkthdr.len; /* NB: don't reference below */
1725 if (ath_tx_start(sc, ni, bf, bf->bf_m) == 0) {
1726#if 0 /*XXX*/
1727 ifp->if_opackets++;
1728#endif
1729 continue;
1730 }
1731 bad:
1732 if (ni != NULL)
1733 ieee80211_free_node(ni);
1734 bf->bf_node = NULL;
1735 if (bf->bf_m != NULL) {
1736 m_freem(bf->bf_m);
1737 bf->bf_m = NULL;
1738 }
1739
1740 ATH_TXBUF_LOCK(sc);
1741 STAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
1742 ATH_TXBUF_UNLOCK(sc);
1743 }
1744}
1745
1746static __inline u_int32_t
1747ath_ff_approx_txtime(struct ath_softc *sc, struct ath_node *an, struct mbuf *m)
1748{
1749 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
1750 u_int32_t framelen;
1751 struct ath_buf *bf;
1752
1753 /*
1754 * Approximate the frame length to be transmitted. A swag to add
1755 * the following maximal values to the skb payload:
1756 * - 32: 802.11 encap + CRC
1757 * - 24: encryption overhead (if wep bit)
1758 * - 4 + 6: fast-frame header and padding
1759 * - 16: 2 LLC FF tunnel headers
1760 * - 14: 1 802.3 FF tunnel header (skb already accounts for 2nd)
1761 */
1762 framelen = m->m_pkthdr.len + 32 + 4 + 6 + 16 + 14;
1763 if (ic->ic_flags & IEEE80211_F_PRIVACY)
1764 framelen += 24;
1765 bf = an->an_ff_buf[M_WME_GETAC(m)];
1766 if (bf != NULL)
1767 framelen += bf->bf_m->m_pkthdr.len;
1768 return ath_hal_computetxtime(sc->sc_ah, sc->sc_currates, framelen,
1769 sc->sc_lastdatarix, AH_FALSE);
1770}
1771
1772/*
1773 * Determine if a data frame may be aggregated via ff tunnelling.
1774 * Note the caller is responsible for checking if the destination
1775 * supports fast frames.
1776 *
1777 * NB: allowing EAPOL frames to be aggregated with other unicast traffic.
1778 * Do 802.1x EAPOL frames proceed in the clear? Then they couldn't
1779 * be aggregated with other types of frames when encryption is on?
1780 *
1781 * NB: assumes lock on an_ff_buf effectively held by txq lock mechanism.
1782 */
1783static __inline int
1784ath_ff_can_aggregate(struct ath_softc *sc,
1785 struct ath_node *an, struct mbuf *m, int *flushq)
1786{
1787 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
1788 struct ath_txq *txq;
1789 u_int32_t txoplimit;
1790 u_int pri;
1791
1792 *flushq = 0;
1793
1794 /*
1795 * If there is no frame to combine with and the txq has
1796 * fewer frames than the minimum required; then do not
1797 * attempt to aggregate this frame.
1798 */
1799 pri = M_WME_GETAC(m);
1800 txq = sc->sc_ac2q[pri];
1801 if (an->an_ff_buf[pri] == NULL && txq->axq_depth < sc->sc_fftxqmin)
1802 return 0;
1803 /*
1804 * When not in station mode never aggregate a multicast
1805 * frame; this insures, for example, that a combined frame
1806 * does not require multiple encryption keys when using
1807 * 802.1x/WPA.
1808 */
1809 if (ic->ic_opmode != IEEE80211_M_STA &&
1810 ETHER_IS_MULTICAST(mtod(m, struct ether_header *)->ether_dhost))
1811 return 0;
1812 /*
1813 * Consult the max bursting interval to insure a combined
1814 * frame fits within the TxOp window.
1815 */
1816 txoplimit = IEEE80211_TXOP_TO_US(
1817 ic->ic_wme.wme_chanParams.cap_wmeParams[pri].wmep_txopLimit);
1818 if (txoplimit != 0 && ath_ff_approx_txtime(sc, an, m) > txoplimit) {
1819 DPRINTF(sc, ATH_DEBUG_XMIT | ATH_DEBUG_FF,
1820 "%s: FF TxOp violation\n", __func__);
1821 if (an->an_ff_buf[pri] != NULL)
1822 *flushq = 1;
1823 return 0;
1824 }
1825 return 1; /* try to aggregate */
1826}
1827
1828/*
1829 * Check if the supplied frame can be partnered with an existing
1830 * or pending frame. Return a reference to any frame that should be
1831 * sent on return; otherwise return NULL.
1832 */
1833static struct mbuf *
1834ath_ff_check(struct ath_softc *sc, struct ath_txq *txq,
1835 struct ath_buf *bf, struct mbuf *m, struct ieee80211_node *ni)
1836{
1837 struct ath_node *an = ATH_NODE(ni);
1838 struct ath_buf *bfstaged;
1839 int ff_flush, pri;
1840
1841 /*
1842 * Check if the supplied frame can be aggregated.
1843 *
1844 * NB: we use the txq lock to protect references to
1845 * an->an_ff_txbuf in ath_ff_can_aggregate().
1846 */
1847 ATH_TXQ_LOCK(txq);
1848 pri = M_WME_GETAC(m);
1849 if (ath_ff_can_aggregate(sc, an, m, &ff_flush)) {
1850 struct ath_buf *bfstaged = an->an_ff_buf[pri];
1851 if (bfstaged != NULL) {
1852 /*
1853 * A frame is available for partnering; remove
1854 * it, chain it to this one, and encapsulate.
1855 */
1856 an->an_ff_buf[pri] = NULL;
1857 TAILQ_REMOVE(&txq->axq_stageq, bfstaged, bf_stagelist);
1858 ATH_TXQ_UNLOCK(txq);
1859
1860 /*
1861 * Chain mbufs and add FF magic.
1862 */
1863 DPRINTF(sc, ATH_DEBUG_FF,
1864 "[%s] aggregate fast-frame, age %u\n",
1865 ether_sprintf(ni->ni_macaddr), txq->axq_curage);
1866 m->m_nextpkt = NULL;
1867 bfstaged->bf_m->m_nextpkt = m;
1868 m = bfstaged->bf_m;
1869 bfstaged->bf_m = NULL;
1870 m->m_flags |= M_FF;
1871 /*
1872 * Release the node reference held while
1873 * the packet sat on an_ff_buf[]
1874 */
1875 bfstaged->bf_node = NULL;
1876 ieee80211_free_node(ni);
1877
1878 /*
1879 * Return bfstaged to the free list.
1880 */
1881 ATH_TXBUF_LOCK(sc);
1882 STAILQ_INSERT_HEAD(&sc->sc_txbuf, bfstaged, bf_list);
1883 ATH_TXBUF_UNLOCK(sc);
1884
1885 return m; /* ready to go */
1886 } else {
1887 /*
1888 * No frame available, queue this frame to wait
1889 * for a partner. Note that we hold the buffer
1890 * and a reference to the node; we need the
1891 * buffer in particular so we're certain we
1892 * can flush the frame at a later time.
1893 */
1894 DPRINTF(sc, ATH_DEBUG_FF,
1895 "[%s] stage fast-frame, age %u\n",
1896 ether_sprintf(ni->ni_macaddr), txq->axq_curage);
1897
1898 bf->bf_m = m;
1899 bf->bf_node = ni; /* NB: held reference */
1900 bf->bf_age = txq->axq_curage;
1901 an->an_ff_buf[pri] = bf;
1902 TAILQ_INSERT_HEAD(&txq->axq_stageq, bf, bf_stagelist);
1903 ATH_TXQ_UNLOCK(txq);
1904
1905 return NULL; /* consumed */
1906 }
1907 }
1908 /*
1909 * Frame could not be aggregated, it needs to be returned
1910 * to the caller for immediate transmission. In addition
1911 * we check if we should first flush a frame from the
1912 * staging queue before sending this one.
1913 *
1914 * NB: ath_ff_can_aggregate only marks ff_flush if a frame
1915 * is present to flush.
1916 */
1917 if (ff_flush) {
1918 int pktlen;
1919
1920 bfstaged = an->an_ff_buf[pri];
1921 an->an_ff_buf[pri] = NULL;
1922 TAILQ_REMOVE(&txq->axq_stageq, bfstaged, bf_stagelist);
1923 ATH_TXQ_UNLOCK(txq);
1924
1925 DPRINTF(sc, ATH_DEBUG_FF, "[%s] flush staged frame\n",
1926 ether_sprintf(an->an_node.ni_macaddr));
1927
1928 /* encap and xmit */
1929 bfstaged->bf_m = ieee80211_encap(ni, bfstaged->bf_m);
1930 if (bfstaged->bf_m == NULL) {
1931 DPRINTF(sc, ATH_DEBUG_XMIT | ATH_DEBUG_FF,
1932 "%s: discard, encap failure\n", __func__);
1933 sc->sc_stats.ast_tx_encap++;
1934 goto ff_flushbad;
1935 }
1936 pktlen = bfstaged->bf_m->m_pkthdr.len;
1937 if (ath_tx_start(sc, ni, bfstaged, bfstaged->bf_m)) {
1938 DPRINTF(sc, ATH_DEBUG_XMIT,
1939 "%s: discard, xmit failure\n", __func__);
1940 ff_flushbad:
1941 /*
1942 * Unable to transmit frame that was on the staging
1943 * queue. Reclaim the node reference and other
1944 * resources.
1945 */
1946 if (ni != NULL)
1947 ieee80211_free_node(ni);
1948 bfstaged->bf_node = NULL;
1949 if (bfstaged->bf_m != NULL) {
1950 m_freem(bfstaged->bf_m);
1951 bfstaged->bf_m = NULL;
1952 }
1953
1954 ATH_TXBUF_LOCK(sc);
1955 STAILQ_INSERT_HEAD(&sc->sc_txbuf, bfstaged, bf_list);
1956 ATH_TXBUF_UNLOCK(sc);
1957 } else {
1958#if 0
1959 ifp->if_opackets++;
1960#endif
1961 }
1962 } else {
1963 if (an->an_ff_buf[pri] != NULL) {
1964 /*
1965 * XXX: out-of-order condition only occurs for AP
1966 * mode and multicast. There may be no valid way
1967 * to get this condition.
1968 */
1969 DPRINTF(sc, ATH_DEBUG_FF, "[%s] out-of-order frame\n",
1970 ether_sprintf(an->an_node.ni_macaddr));
1971 /* XXX stat */
1972 }
1973 ATH_TXQ_UNLOCK(txq);
1974 }
1975 return m;
1976}
1977
1978static struct ath_buf *
1979_ath_getbuf_locked(struct ath_softc *sc)
1980{
1981 struct ath_buf *bf;
1982
1983 ATH_TXBUF_LOCK_ASSERT(sc);
1984
1985 bf = STAILQ_FIRST(&sc->sc_txbuf);
1986 if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0)
1987 STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list);
1988 else
1989 bf = NULL;
1990 if (bf == NULL) {
1991 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %s\n", __func__,
1992 STAILQ_FIRST(&sc->sc_txbuf) == NULL ?
1993 "out of xmit buffers" : "xmit buffer busy");
1994 sc->sc_stats.ast_tx_nobuf++;
1995 }
1996 return bf;
1997}
1998
1999static struct ath_buf *
2000ath_getbuf(struct ath_softc *sc)
2001{
2002 struct ath_buf *bf;
2003
2004 ATH_TXBUF_LOCK(sc);
2005 bf = _ath_getbuf_locked(sc);
2006 if (bf == NULL) {
2007 struct ifnet *ifp = sc->sc_ifp;
2008
2009 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: stop queue\n", __func__);
2010 sc->sc_stats.ast_tx_qstop++;
2011 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2012 }
2013 ATH_TXBUF_UNLOCK(sc);
2014 return bf;
2015}
2016
2017/*
2018 * Cleanup driver resources when we run out of buffers
2019 * while processing fragments; return the tx buffers
2020 * allocated and drop node references.
2021 */
2022static void
2023ath_txfrag_cleanup(struct ath_softc *sc,
2024 ath_bufhead *frags, struct ieee80211_node *ni)
2025{
2026 struct ath_buf *bf, *next;
2027
2028 ATH_TXBUF_LOCK_ASSERT(sc);
2029
2030 STAILQ_FOREACH_SAFE(bf, frags, bf_list, next) {
2031 /* NB: bf assumed clean */
2032 STAILQ_REMOVE_HEAD(frags, bf_list);
2033 STAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
2034 ieee80211_node_decref(ni);
2035 }
2036}
2037
2038/*
2039 * Setup xmit of a fragmented frame. Allocate a buffer
2040 * for each frag and bump the node reference count to
2041 * reflect the held reference to be setup by ath_tx_start.
2042 */
2043static int
2044ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags,
2045 struct mbuf *m0, struct ieee80211_node *ni)
2046{
2047 struct mbuf *m;
2048 struct ath_buf *bf;
2049
2050 ATH_TXBUF_LOCK(sc);
2051 for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) {
2052 bf = _ath_getbuf_locked(sc);
2053 if (bf == NULL) { /* out of buffers, cleanup */
2054 ath_txfrag_cleanup(sc, frags, ni);
2055 break;
2056 }
2057 ieee80211_node_incref(ni);
2058 STAILQ_INSERT_TAIL(frags, bf, bf_list);
2059 }
2060 ATH_TXBUF_UNLOCK(sc);
2061
2062 return !STAILQ_EMPTY(frags);
2063}
2064
2065static void
2066ath_start(struct ifnet *ifp)
2067{
2068 struct ath_softc *sc = ifp->if_softc;
2069 struct ieee80211_node *ni;
2070 struct ath_buf *bf;
2071 struct mbuf *m, *next;
2072 struct ath_txq *txq;
2073 ath_bufhead frags;
2074 int pri;
2075
2076 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid)
2077 return;
2078 for (;;) {
2079 /*
2080 * Grab a TX buffer and associated resources.
2081 */
2082 bf = ath_getbuf(sc);
2083 if (bf == NULL)
2084 break;
2085
2086 IFQ_DEQUEUE(&ifp->if_snd, m);
2087 if (m == NULL) {
2088 ATH_TXBUF_LOCK(sc);
2089 STAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
2090 ATH_TXBUF_UNLOCK(sc);
2091 break;
2092 }
2093 STAILQ_INIT(&frags);
2094 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
2095 pri = M_WME_GETAC(m);
2096 txq = sc->sc_ac2q[pri];
2097 if (IEEE80211_ATH_CAP(ni->ni_vap, ni, IEEE80211_NODE_FF)) {
2098 /*
2099 * Check queue length; if too deep drop this
2100 * frame (tail drop considered good).
2101 */
2102 if (txq->axq_depth >= sc->sc_fftxqmax) {
2103 DPRINTF(sc, ATH_DEBUG_FF,
2104 "[%s] tail drop on q %u depth %u\n",
2105 ether_sprintf(ni->ni_macaddr),
2106 txq->axq_qnum, txq->axq_depth);
2107 sc->sc_stats.ast_tx_qfull++;
2108 m_freem(m);
2109 goto reclaim;
2110 }
2111 m = ath_ff_check(sc, txq, bf, m, ni);
2112 if (m == NULL) {
2113 /* NB: ni ref & bf held on stageq */
2114 continue;
2115 }
2116 }
2117 ifp->if_opackets++;
2118 /*
2119 * Encapsulate the packet in prep for transmission.
2120 */
2121 m = ieee80211_encap(ni, m);
2122 if (m == NULL) {
2123 DPRINTF(sc, ATH_DEBUG_XMIT,
2124 "%s: encapsulation failure\n", __func__);
2125 sc->sc_stats.ast_tx_encap++;
2126 goto bad;
2127 }
2128 /*
2129 * Check for fragmentation. If this frame
2130 * has been broken up verify we have enough
2131 * buffers to send all the fragments so all
2132 * go out or none...
2133 */
2134 if ((m->m_flags & M_FRAG) &&
2135 !ath_txfrag_setup(sc, &frags, m, ni)) {
2136 DPRINTF(sc, ATH_DEBUG_XMIT,
2137 "%s: out of txfrag buffers\n", __func__);
2138 sc->sc_stats.ast_tx_nofrag++;
2139 ath_freetx(m);
2140 goto bad;
2141 }
2142 nextfrag:
2143 /*
2144 * Pass the frame to the h/w for transmission.
2145 * Fragmented frames have each frag chained together
2146 * with m_nextpkt. We know there are sufficient ath_buf's
2147 * to send all the frags because of work done by
2148 * ath_txfrag_setup. We leave m_nextpkt set while
2149 * calling ath_tx_start so it can use it to extend the
2150 * the tx duration to cover the subsequent frag and
2151 * so it can reclaim all the mbufs in case of an error;
2152 * ath_tx_start clears m_nextpkt once it commits to
2153 * handing the frame to the hardware.
2154 */
2155 next = m->m_nextpkt;
2156 if (ath_tx_start(sc, ni, bf, m)) {
2157 bad:
2158 ifp->if_oerrors++;
2159 reclaim:
2160 bf->bf_m = NULL;
2161 bf->bf_node = NULL;
2162 ATH_TXBUF_LOCK(sc);
2163 STAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
2164 ath_txfrag_cleanup(sc, &frags, ni);
2165 ATH_TXBUF_UNLOCK(sc);
2166 if (ni != NULL)
2167 ieee80211_free_node(ni);
2168 continue;
2169 }
2170 if (next != NULL) {
2171 /*
2172 * Beware of state changing between frags.
2173 * XXX check sta power-save state?
2174 */
2175 if (ni->ni_vap->iv_state != IEEE80211_S_RUN) {
2176 DPRINTF(sc, ATH_DEBUG_XMIT,
2177 "%s: flush fragmented packet, state %s\n",
2178 __func__,
2179 ieee80211_state_name[ni->ni_vap->iv_state]);
2180 ath_freetx(next);
2181 goto reclaim;
2182 }
2183 m = next;
2184 bf = STAILQ_FIRST(&frags);
2185 KASSERT(bf != NULL, ("no buf for txfrag"));
2186 STAILQ_REMOVE_HEAD(&frags, bf_list);
2187 goto nextfrag;
2188 }
2189
2190 sc->sc_wd_timer = 5;
2191#if 0
2192 /*
2193 * Flush stale frames from the fast-frame staging queue.
2194 */
2195 if (ic->ic_opmode != IEEE80211_M_STA)
2196 ath_ff_stageq_flush(sc, txq, ath_ff_ageflushtestdone);
2197#endif
2198 }
2199}
2200
2201static int
2202ath_media_change(struct ifnet *ifp)
2203{
2204 int error = ieee80211_media_change(ifp);
2205 /* NB: only the fixed rate can change and that doesn't need a reset */
2206 return (error == ENETRESET ? 0 : error);
2207}
2208
2209#ifdef ATH_DEBUG
2210static void
2211ath_keyprint(struct ath_softc *sc, const char *tag, u_int ix,
2212 const HAL_KEYVAL *hk, const u_int8_t mac[IEEE80211_ADDR_LEN])
2213{
2214 static const char *ciphers[] = {
2215 "WEP",
2216 "AES-OCB",
2217 "AES-CCM",
2218 "CKIP",
2219 "TKIP",
2220 "CLR",
2221 };
2222 int i, n;
2223
2224 printf("%s: [%02u] %-7s ", tag, ix, ciphers[hk->kv_type]);
2225 for (i = 0, n = hk->kv_len; i < n; i++)
2226 printf("%02x", hk->kv_val[i]);
2227 printf(" mac %s", ether_sprintf(mac));
2228 if (hk->kv_type == HAL_CIPHER_TKIP) {
2229 printf(" %s ", sc->sc_splitmic ? "mic" : "rxmic");
2230 for (i = 0; i < sizeof(hk->kv_mic); i++)
2231 printf("%02x", hk->kv_mic[i]);
2232 if (!sc->sc_splitmic) {
2233 printf(" txmic ");
2234 for (i = 0; i < sizeof(hk->kv_txmic); i++)
2235 printf("%02x", hk->kv_txmic[i]);
2236 }
2237 }
2238 printf("\n");
2239}
2240#endif
2241
2242/*
2243 * Set a TKIP key into the hardware. This handles the
2244 * potential distribution of key state to multiple key
2245 * cache slots for TKIP.
2246 */
2247static int
2248ath_keyset_tkip(struct ath_softc *sc, const struct ieee80211_key *k,
2249 HAL_KEYVAL *hk, const u_int8_t mac[IEEE80211_ADDR_LEN])
2250{
2251#define IEEE80211_KEY_XR (IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV)
2252 static const u_int8_t zerobssid[IEEE80211_ADDR_LEN];
2253 struct ath_hal *ah = sc->sc_ah;
2254
2255 KASSERT(k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP,
2256 ("got a non-TKIP key, cipher %u", k->wk_cipher->ic_cipher));
2257 if ((k->wk_flags & IEEE80211_KEY_XR) == IEEE80211_KEY_XR) {
2258 if (sc->sc_splitmic) {
2259 /*
2260 * TX key goes at first index, RX key at the rx index.
2261 * The hal handles the MIC keys at index+64.
2262 */
2263 memcpy(hk->kv_mic, k->wk_txmic, sizeof(hk->kv_mic));
2264 KEYPRINTF(sc, k->wk_keyix, hk, zerobssid);
2265 if (!ath_hal_keyset(ah, k->wk_keyix, hk, zerobssid))
2266 return 0;
2267
2268 memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic));
2269 KEYPRINTF(sc, k->wk_keyix+32, hk, mac);
2270 /* XXX delete tx key on failure? */
2271 return ath_hal_keyset(ah, k->wk_keyix+32, hk, mac);
2272 } else {
2273 /*
2274 * Room for both TX+RX MIC keys in one key cache
2275 * slot, just set key at the first index; the hal
2276 * will handle the rest.
2277 */
2278 memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic));
2279 memcpy(hk->kv_txmic, k->wk_txmic, sizeof(hk->kv_txmic));
2280 KEYPRINTF(sc, k->wk_keyix, hk, mac);
2281 return ath_hal_keyset(ah, k->wk_keyix, hk, mac);
2282 }
2283 } else if (k->wk_flags & IEEE80211_KEY_XMIT) {
2284 if (sc->sc_splitmic) {
2285 /*
2286 * NB: must pass MIC key in expected location when
2287 * the keycache only holds one MIC key per entry.
2288 */
2289 memcpy(hk->kv_mic, k->wk_txmic, sizeof(hk->kv_txmic));
2290 } else
2291 memcpy(hk->kv_txmic, k->wk_txmic, sizeof(hk->kv_txmic));
2292 KEYPRINTF(sc, k->wk_keyix, hk, mac);
2293 return ath_hal_keyset(ah, k->wk_keyix, hk, mac);
2294 } else if (k->wk_flags & IEEE80211_KEY_RECV) {
2295 memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic));
2296 KEYPRINTF(sc, k->wk_keyix, hk, mac);
2297 return ath_hal_keyset(ah, k->wk_keyix, hk, mac);
2298 }
2299 return 0;
2300#undef IEEE80211_KEY_XR
2301}
2302
2303/*
2304 * Set a net80211 key into the hardware. This handles the
2305 * potential distribution of key state to multiple key
2306 * cache slots for TKIP with hardware MIC support.
2307 */
2308static int
2309ath_keyset(struct ath_softc *sc, const struct ieee80211_key *k,
2310 struct ieee80211_node *bss)
2311{
2312#define N(a) (sizeof(a)/sizeof(a[0]))
2313 static const u_int8_t ciphermap[] = {
2314 HAL_CIPHER_WEP, /* IEEE80211_CIPHER_WEP */
2315 HAL_CIPHER_TKIP, /* IEEE80211_CIPHER_TKIP */
2316 HAL_CIPHER_AES_OCB, /* IEEE80211_CIPHER_AES_OCB */
2317 HAL_CIPHER_AES_CCM, /* IEEE80211_CIPHER_AES_CCM */
2318 (u_int8_t) -1, /* 4 is not allocated */
2319 HAL_CIPHER_CKIP, /* IEEE80211_CIPHER_CKIP */
2320 HAL_CIPHER_CLR, /* IEEE80211_CIPHER_NONE */
2321 };
2322 struct ath_hal *ah = sc->sc_ah;
2323 const struct ieee80211_cipher *cip = k->wk_cipher;
2324 u_int8_t gmac[IEEE80211_ADDR_LEN];
2325 const u_int8_t *mac;
2326 HAL_KEYVAL hk;
2327
2328 memset(&hk, 0, sizeof(hk));
2329 /*
2330 * Software crypto uses a "clear key" so non-crypto
2331 * state kept in the key cache are maintained and
2332 * so that rx frames have an entry to match.
2333 */
2334 if ((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0) {
2335 KASSERT(cip->ic_cipher < N(ciphermap),
2336 ("invalid cipher type %u", cip->ic_cipher));
2337 hk.kv_type = ciphermap[cip->ic_cipher];
2338 hk.kv_len = k->wk_keylen;
2339 memcpy(hk.kv_val, k->wk_key, k->wk_keylen);
2340 } else
2341 hk.kv_type = HAL_CIPHER_CLR;
2342
2343 if ((k->wk_flags & IEEE80211_KEY_GROUP) && sc->sc_mcastkey) {
2344 /*
2345 * Group keys on hardware that supports multicast frame
2346 * key search use a mac that is the sender's address with
2347 * the high bit set instead of the app-specified address.
2348 */
2349 IEEE80211_ADDR_COPY(gmac, bss->ni_macaddr);
2350 gmac[0] |= 0x80;
2351 mac = gmac;
2352 } else
2353 mac = k->wk_macaddr;
2354
2355 if (hk.kv_type == HAL_CIPHER_TKIP &&
2356 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
2357 return ath_keyset_tkip(sc, k, &hk, mac);
2358 } else {
2359 KEYPRINTF(sc, k->wk_keyix, &hk, mac);
2360 return ath_hal_keyset(ah, k->wk_keyix, &hk, mac);
2361 }
2362#undef N
2363}
2364
2365/*
2366 * Allocate tx/rx key slots for TKIP. We allocate two slots for
2367 * each key, one for decrypt/encrypt and the other for the MIC.
2368 */
2369static u_int16_t
2370key_alloc_2pair(struct ath_softc *sc,
2371 ieee80211_keyix *txkeyix, ieee80211_keyix *rxkeyix)
2372{
2373#define N(a) (sizeof(a)/sizeof(a[0]))
2374 u_int i, keyix;
2375
2376 KASSERT(sc->sc_splitmic, ("key cache !split"));
2377 /* XXX could optimize */
2378 for (i = 0; i < N(sc->sc_keymap)/4; i++) {
2379 u_int8_t b = sc->sc_keymap[i];
2380 if (b != 0xff) {
2381 /*
2382 * One or more slots in this byte are free.
2383 */
2384 keyix = i*NBBY;
2385 while (b & 1) {
2386 again:
2387 keyix++;
2388 b >>= 1;
2389 }
2390 /* XXX IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV */
2391 if (isset(sc->sc_keymap, keyix+32) ||
2392 isset(sc->sc_keymap, keyix+64) ||
2393 isset(sc->sc_keymap, keyix+32+64)) {
2394 /* full pair unavailable */
2395 /* XXX statistic */
2396 if (keyix == (i+1)*NBBY) {
2397 /* no slots were appropriate, advance */
2398 continue;
2399 }
2400 goto again;
2401 }
2402 setbit(sc->sc_keymap, keyix);
2403 setbit(sc->sc_keymap, keyix+64);
2404 setbit(sc->sc_keymap, keyix+32);
2405 setbit(sc->sc_keymap, keyix+32+64);
2406 DPRINTF(sc, ATH_DEBUG_KEYCACHE,
2407 "%s: key pair %u,%u %u,%u\n",
2408 __func__, keyix, keyix+64,
2409 keyix+32, keyix+32+64);
2410 *txkeyix = keyix;
2411 *rxkeyix = keyix+32;
2412 return 1;
2413 }
2414 }
2415 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of pair space\n", __func__);
2416 return 0;
2417#undef N
2418}
2419
2420/*
2421 * Allocate tx/rx key slots for TKIP. We allocate two slots for
2422 * each key, one for decrypt/encrypt and the other for the MIC.
2423 */
2424static u_int16_t
2425key_alloc_pair(struct ath_softc *sc,
2426 ieee80211_keyix *txkeyix, ieee80211_keyix *rxkeyix)
2427{
2428#define N(a) (sizeof(a)/sizeof(a[0]))
2429 u_int i, keyix;
2430
2431 KASSERT(!sc->sc_splitmic, ("key cache split"));
2432 /* XXX could optimize */
2433 for (i = 0; i < N(sc->sc_keymap)/4; i++) {
2434 u_int8_t b = sc->sc_keymap[i];
2435 if (b != 0xff) {
2436 /*
2437 * One or more slots in this byte are free.
2438 */
2439 keyix = i*NBBY;
2440 while (b & 1) {
2441 again:
2442 keyix++;
2443 b >>= 1;
2444 }
2445 if (isset(sc->sc_keymap, keyix+64)) {
2446 /* full pair unavailable */
2447 /* XXX statistic */
2448 if (keyix == (i+1)*NBBY) {
2449 /* no slots were appropriate, advance */
2450 continue;
2451 }
2452 goto again;
2453 }
2454 setbit(sc->sc_keymap, keyix);
2455 setbit(sc->sc_keymap, keyix+64);
2456 DPRINTF(sc, ATH_DEBUG_KEYCACHE,
2457 "%s: key pair %u,%u\n",
2458 __func__, keyix, keyix+64);
2459 *txkeyix = *rxkeyix = keyix;
2460 return 1;
2461 }
2462 }
2463 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of pair space\n", __func__);
2464 return 0;
2465#undef N
2466}
2467
2468/*
2469 * Allocate a single key cache slot.
2470 */
2471static int
2472key_alloc_single(struct ath_softc *sc,
2473 ieee80211_keyix *txkeyix, ieee80211_keyix *rxkeyix)
2474{
2475#define N(a) (sizeof(a)/sizeof(a[0]))
2476 u_int i, keyix;
2477
2478 /* XXX try i,i+32,i+64,i+32+64 to minimize key pair conflicts */
2479 for (i = 0; i < N(sc->sc_keymap); i++) {
2480 u_int8_t b = sc->sc_keymap[i];
2481 if (b != 0xff) {
2482 /*
2483 * One or more slots are free.
2484 */
2485 keyix = i*NBBY;
2486 while (b & 1)
2487 keyix++, b >>= 1;
2488 setbit(sc->sc_keymap, keyix);
2489 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: key %u\n",
2490 __func__, keyix);
2491 *txkeyix = *rxkeyix = keyix;
2492 return 1;
2493 }
2494 }
2495 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of space\n", __func__);
2496 return 0;
2497#undef N
2498}
2499
2500/*
2501 * Allocate one or more key cache slots for a uniacst key. The
2502 * key itself is needed only to identify the cipher. For hardware
2503 * TKIP with split cipher+MIC keys we allocate two key cache slot
2504 * pairs so that we can setup separate TX and RX MIC keys. Note
2505 * that the MIC key for a TKIP key at slot i is assumed by the
2506 * hardware to be at slot i+64. This limits TKIP keys to the first
2507 * 64 entries.
2508 */
2509static int
2510ath_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
2511 ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
2512{
2513 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
2514
2515 /*
2516 * Group key allocation must be handled specially for
2517 * parts that do not support multicast key cache search
2518 * functionality. For those parts the key id must match
2519 * the h/w key index so lookups find the right key. On
2520 * parts w/ the key search facility we install the sender's
2521 * mac address (with the high bit set) and let the hardware
2522 * find the key w/o using the key id. This is preferred as
2523 * it permits us to support multiple users for adhoc and/or
2524 * multi-station operation.
2525 */
2526 if (k->wk_keyix != IEEE80211_KEYIX_NONE || /* global key */
2527 ((k->wk_flags & IEEE80211_KEY_GROUP) && !sc->sc_mcastkey)) {
2528 if (!(&vap->iv_nw_keys[0] <= k &&
2529 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) {
2530 /* should not happen */
2531 DPRINTF(sc, ATH_DEBUG_KEYCACHE,
2532 "%s: bogus group key\n", __func__);
2533 return 0;
2534 }
2535 /*
2536 * XXX we pre-allocate the global keys so
2537 * have no way to check if they've already been allocated.
2538 */
2539 *keyix = *rxkeyix = k - vap->iv_nw_keys;
2540 return 1;
2541 }
2542
2543 /*
2544 * We allocate two pair for TKIP when using the h/w to do
2545 * the MIC. For everything else, including software crypto,
2546 * we allocate a single entry. Note that s/w crypto requires
2547 * a pass-through slot on the 5211 and 5212. The 5210 does
2548 * not support pass-through cache entries and we map all
2549 * those requests to slot 0.
2550 */
2551 if (k->wk_flags & IEEE80211_KEY_SWCRYPT) {
2552 return key_alloc_single(sc, keyix, rxkeyix);
2553 } else if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP &&
2554 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
2555 if (sc->sc_splitmic)
2556 return key_alloc_2pair(sc, keyix, rxkeyix);
2557 else
2558 return key_alloc_pair(sc, keyix, rxkeyix);
2559 } else {
2560 return key_alloc_single(sc, keyix, rxkeyix);
2561 }
2562}
2563
2564/*
2565 * Delete an entry in the key cache allocated by ath_key_alloc.
2566 */
2567static int
2568ath_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
2569{
2570 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
2571 struct ath_hal *ah = sc->sc_ah;
2572 const struct ieee80211_cipher *cip = k->wk_cipher;
2573 u_int keyix = k->wk_keyix;
2574
2575 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: delete key %u\n", __func__, keyix);
2576
2577 ath_hal_keyreset(ah, keyix);
2578 /*
2579 * Handle split tx/rx keying required for TKIP with h/w MIC.
2580 */
2581 if (cip->ic_cipher == IEEE80211_CIPHER_TKIP &&
2582 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && sc->sc_splitmic)
2583 ath_hal_keyreset(ah, keyix+32); /* RX key */
2584 if (keyix >= IEEE80211_WEP_NKID) {
2585 /*
2586 * Don't touch keymap entries for global keys so
2587 * they are never considered for dynamic allocation.
2588 */
2589 clrbit(sc->sc_keymap, keyix);
2590 if (cip->ic_cipher == IEEE80211_CIPHER_TKIP &&
2591 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
2592 clrbit(sc->sc_keymap, keyix+64); /* TX key MIC */
2593 if (sc->sc_splitmic) {
2594 /* +32 for RX key, +32+64 for RX key MIC */
2595 clrbit(sc->sc_keymap, keyix+32);
2596 clrbit(sc->sc_keymap, keyix+32+64);
2597 }
2598 }
2599 }
2600 return 1;
2601}
2602
2603/*
2604 * Set the key cache contents for the specified key. Key cache
2605 * slot(s) must already have been allocated by ath_key_alloc.
2606 */
2607static int
2608ath_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k,
2609 const u_int8_t mac[IEEE80211_ADDR_LEN])
2610{
2611 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
2612
2613 return ath_keyset(sc, k, vap->iv_bss);
2614}
2615
2616/*
2617 * Block/unblock tx+rx processing while a key change is done.
2618 * We assume the caller serializes key management operations
2619 * so we only need to worry about synchronization with other
2620 * uses that originate in the driver.
2621 */
2622static void
2623ath_key_update_begin(struct ieee80211vap *vap)
2624{
2625 struct ifnet *ifp = vap->iv_ic->ic_ifp;
2626 struct ath_softc *sc = ifp->if_softc;
2627
2628 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
2629 taskqueue_block(sc->sc_tq);
2630 IF_LOCK(&ifp->if_snd); /* NB: doesn't block mgmt frames */
2631}
2632
2633static void
2634ath_key_update_end(struct ieee80211vap *vap)
2635{
2636 struct ifnet *ifp = vap->iv_ic->ic_ifp;
2637 struct ath_softc *sc = ifp->if_softc;
2638
2639 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
2640 IF_UNLOCK(&ifp->if_snd);
2641 taskqueue_unblock(sc->sc_tq);
2642}
2643
2644/*
2645 * Calculate the receive filter according to the
2646 * operating mode and state:
2647 *
2648 * o always accept unicast, broadcast, and multicast traffic
2649 * o accept PHY error frames when hardware doesn't have MIB support
2650 * to count and we need them for ANI (sta mode only until recently)
2651 * and we are not scanning (ANI is disabled)
2652 * NB: older hal's add rx filter bits out of sight and we need to
2653 * blindly preserve them
2654 * o probe request frames are accepted only when operating in
2655 * hostap, adhoc, or monitor modes
2656 * o enable promiscuous mode
2657 * - when in monitor mode
2658 * - if interface marked PROMISC (assumes bridge setting is filtered)
2659 * o accept beacons:
2660 * - when operating in station mode for collecting rssi data when
2661 * the station is otherwise quiet, or
2662 * - when operating in adhoc mode so the 802.11 layer creates
2663 * node table entries for peers,
2664 * - when scanning
2665 * - when doing s/w beacon miss (e.g. for ap+sta)
2666 * - when operating in ap mode in 11g to detect overlapping bss that
2667 * require protection
2668 * o accept control frames:
2669 * - when in monitor mode
2670 * XXX BAR frames for 11n
2671 * XXX HT protection for 11n
2672 */
2673static u_int32_t
2674ath_calcrxfilter(struct ath_softc *sc)
2675{
2676 struct ifnet *ifp = sc->sc_ifp;
2677 struct ieee80211com *ic = ifp->if_l2com;
2678 u_int32_t rfilt;
2679
2680 rfilt = HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST | HAL_RX_FILTER_MCAST;
2681 if (!sc->sc_needmib && !sc->sc_scanning)
2682 rfilt |= HAL_RX_FILTER_PHYERR;
2683 if (ic->ic_opmode != IEEE80211_M_STA)
2684 rfilt |= HAL_RX_FILTER_PROBEREQ;
2685 if (ic->ic_opmode == IEEE80211_M_MONITOR || (ifp->if_flags & IFF_PROMISC))
2686 rfilt |= HAL_RX_FILTER_PROM;
2687 if (ic->ic_opmode == IEEE80211_M_STA ||
2688 ic->ic_opmode == IEEE80211_M_IBSS ||
2689 sc->sc_swbmiss || sc->sc_scanning)
2690 rfilt |= HAL_RX_FILTER_BEACON;
2691 /*
2692 * NB: We don't recalculate the rx filter when
2693 * ic_protmode changes; otherwise we could do
2694 * this only when ic_protmode != NONE.
2695 */
2696 if (ic->ic_opmode == IEEE80211_M_HOSTAP &&
2697 IEEE80211_IS_CHAN_ANYG(ic->ic_curchan))
2698 rfilt |= HAL_RX_FILTER_BEACON;
2699 if (ic->ic_opmode == IEEE80211_M_MONITOR)
2700 rfilt |= HAL_RX_FILTER_CONTROL;
2701 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x, %s if_flags 0x%x\n",
2702 __func__, rfilt, ieee80211_opmode_name[ic->ic_opmode], ifp->if_flags);
2703 return rfilt;
2704}
2705
2706static void
2707ath_update_promisc(struct ifnet *ifp)
2708{
2709 struct ath_softc *sc = ifp->if_softc;
2710 u_int32_t rfilt;
2711
2712 /* configure rx filter */
2713 rfilt = ath_calcrxfilter(sc);
2714 ath_hal_setrxfilter(sc->sc_ah, rfilt);
2715
2716 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt);
2717}
2718
2719static void
2720ath_update_mcast(struct ifnet *ifp)
2721{
2722 struct ath_softc *sc = ifp->if_softc;
2723 u_int32_t mfilt[2];
2724
2725 /* calculate and install multicast filter */
2726 if ((ifp->if_flags & IFF_ALLMULTI) == 0) {
2727 struct ifmultiaddr *ifma;
2728 /*
2729 * Merge multicast addresses to form the hardware filter.
2730 */
2731 mfilt[0] = mfilt[1] = 0;
2732 IF_ADDR_LOCK(ifp); /* XXX need some fiddling to remove? */
2733 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2734 caddr_t dl;
2735 u_int32_t val;
2736 u_int8_t pos;
2737
2738 /* calculate XOR of eight 6bit values */
2739 dl = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
2740 val = LE_READ_4(dl + 0);
2741 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2742 val = LE_READ_4(dl + 3);
2743 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2744 pos &= 0x3f;
2745 mfilt[pos / 32] |= (1 << (pos % 32));
2746 }
2747 IF_ADDR_UNLOCK(ifp);
2748 } else
2749 mfilt[0] = mfilt[1] = ~0;
2750 ath_hal_setmcastfilter(sc->sc_ah, mfilt[0], mfilt[1]);
2751 DPRINTF(sc, ATH_DEBUG_MODE, "%s: MC filter %08x:%08x\n",
2752 __func__, mfilt[0], mfilt[1]);
2753}
2754
2755static void
2756ath_mode_init(struct ath_softc *sc)
2757{
2758 struct ifnet *ifp = sc->sc_ifp;
2759 struct ath_hal *ah = sc->sc_ah;
2760 u_int32_t rfilt;
2761
2762 /* configure rx filter */
2763 rfilt = ath_calcrxfilter(sc);
2764 ath_hal_setrxfilter(ah, rfilt);
2765
2766 /* configure operational mode */
2767 ath_hal_setopmode(ah);
2768
2769 /* handle any link-level address change */
2770 ath_hal_setmac(ah, IF_LLADDR(ifp));
2771
2772 /* calculate and install multicast filter */
2773 ath_update_mcast(ifp);
2774}
2775
2776/*
2777 * Set the slot time based on the current setting.
2778 */
2779static void
2780ath_setslottime(struct ath_softc *sc)
2781{
2782 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
2783 struct ath_hal *ah = sc->sc_ah;
2784 u_int usec;
2785
2786 if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan))
2787 usec = 13;
2788 else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan))
2789 usec = 21;
2790 else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
2791 /* honor short/long slot time only in 11g */
2792 /* XXX shouldn't honor on pure g or turbo g channel */
2793 if (ic->ic_flags & IEEE80211_F_SHSLOT)
2794 usec = HAL_SLOT_TIME_9;
2795 else
2796 usec = HAL_SLOT_TIME_20;
2797 } else
2798 usec = HAL_SLOT_TIME_9;
2799
2800 DPRINTF(sc, ATH_DEBUG_RESET,
2801 "%s: chan %u MHz flags 0x%x %s slot, %u usec\n",
2802 __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
2803 ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec);
2804
2805 ath_hal_setslottime(ah, usec);
2806 sc->sc_updateslot = OK;
2807}
2808
2809/*
2810 * Callback from the 802.11 layer to update the
2811 * slot time based on the current setting.
2812 */
2813static void
2814ath_updateslot(struct ifnet *ifp)
2815{
2816 struct ath_softc *sc = ifp->if_softc;
2817 struct ieee80211com *ic = ifp->if_l2com;
2818
2819 /*
2820 * When not coordinating the BSS, change the hardware
2821 * immediately. For other operation we defer the change
2822 * until beacon updates have propagated to the stations.
2823 */
2824 if (ic->ic_opmode == IEEE80211_M_HOSTAP)
2825 sc->sc_updateslot = UPDATE;
2826 else
2827 ath_setslottime(sc);
2828}
2829
2830/*
2831 * Setup a h/w transmit queue for beacons.
2832 */
2833static int
2834ath_beaconq_setup(struct ath_hal *ah)
2835{
2836 HAL_TXQ_INFO qi;
2837
2838 memset(&qi, 0, sizeof(qi));
2839 qi.tqi_aifs = HAL_TXQ_USEDEFAULT;
2840 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT;
2841 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT;
2842 /* NB: for dynamic turbo, don't enable any other interrupts */
2843 qi.tqi_qflags = HAL_TXQ_TXDESCINT_ENABLE;
2844 return ath_hal_setuptxqueue(ah, HAL_TX_QUEUE_BEACON, &qi);
2845}
2846
2847/*
2848 * Setup the transmit queue parameters for the beacon queue.
2849 */
2850static int
2851ath_beaconq_config(struct ath_softc *sc)
2852{
2853#define ATH_EXPONENT_TO_VALUE(v) ((1<<(v))-1)
2854 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
2855 struct ath_hal *ah = sc->sc_ah;
2856 HAL_TXQ_INFO qi;
2857
2858 ath_hal_gettxqueueprops(ah, sc->sc_bhalq, &qi);
2859 if (ic->ic_opmode == IEEE80211_M_HOSTAP) {
2860 /*
2861 * Always burst out beacon and CAB traffic.
2862 */
2863 qi.tqi_aifs = ATH_BEACON_AIFS_DEFAULT;
2864 qi.tqi_cwmin = ATH_BEACON_CWMIN_DEFAULT;
2865 qi.tqi_cwmax = ATH_BEACON_CWMAX_DEFAULT;
2866 } else {
2867 struct wmeParams *wmep =
2868 &ic->ic_wme.wme_chanParams.cap_wmeParams[WME_AC_BE];
2869 /*
2870 * Adhoc mode; important thing is to use 2x cwmin.
2871 */
2872 qi.tqi_aifs = wmep->wmep_aifsn;
2873 qi.tqi_cwmin = 2*ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
2874 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
2875 }
2876
2877 if (!ath_hal_settxqueueprops(ah, sc->sc_bhalq, &qi)) {
2878 device_printf(sc->sc_dev, "unable to update parameters for "
2879 "beacon hardware queue!\n");
2880 return 0;
2881 } else {
2882 ath_hal_resettxqueue(ah, sc->sc_bhalq); /* push to h/w */
2883 return 1;
2884 }
2885#undef ATH_EXPONENT_TO_VALUE
2886}
2887
2888/*
2889 * Allocate and setup an initial beacon frame.
2890 */
2891static int
2892ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_node *ni)
2893{
2894 struct ieee80211vap *vap = ni->ni_vap;
2895 struct ath_vap *avp = ATH_VAP(vap);
2896 struct ath_buf *bf;
2897 struct mbuf *m;
2898 int error;
2899
2900 bf = avp->av_bcbuf;
2901 if (bf->bf_m != NULL) {
2902 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
2903 m_freem(bf->bf_m);
2904 bf->bf_m = NULL;
2905 }
2906 if (bf->bf_node != NULL) {
2907 ieee80211_free_node(bf->bf_node);
2908 bf->bf_node = NULL;
2909 }
2910
2911 /*
2912 * NB: the beacon data buffer must be 32-bit aligned;
2913 * we assume the mbuf routines will return us something
2914 * with this alignment (perhaps should assert).
2915 */
2916 m = ieee80211_beacon_alloc(ni, &avp->av_boff);
2917 if (m == NULL) {
2918 device_printf(sc->sc_dev, "%s: cannot get mbuf\n", __func__);
2919 sc->sc_stats.ast_be_nombuf++;
2920 return ENOMEM;
2921 }
2922 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m,
2923 bf->bf_segs, &bf->bf_nseg,
2924 BUS_DMA_NOWAIT);
2925 if (error != 0) {
2926 device_printf(sc->sc_dev,
2927 "%s: cannot map mbuf, bus_dmamap_load_mbuf_sg returns %d\n",
2928 __func__, error);
2929 m_freem(m);
2930 return error;
2931 }
2932
2933 /*
2934 * Calculate a TSF adjustment factor required for staggered
2935 * beacons. Note that we assume the format of the beacon
2936 * frame leaves the tstamp field immediately following the
2937 * header.
2938 */
2939 if (sc->sc_stagbeacons && avp->av_bslot > 0) {
2940 uint64_t tsfadjust;
2941 struct ieee80211_frame *wh;
2942
2943 /*
2944 * The beacon interval is in TU's; the TSF is in usecs.
2945 * We figure out how many TU's to add to align the timestamp
2946 * then convert to TSF units and handle byte swapping before
2947 * inserting it in the frame. The hardware will then add this
2948 * each time a beacon frame is sent. Note that we align vap's
2949 * 1..N and leave vap 0 untouched. This means vap 0 has a
2950 * timestamp in one beacon interval while the others get a
2951 * timstamp aligned to the next interval.
2952 */
2953 tsfadjust = ni->ni_intval *
2954 (ATH_BCBUF - avp->av_bslot) / ATH_BCBUF;
2955 tsfadjust = htole64(tsfadjust << 10); /* TU -> TSF */
2956
2957 DPRINTF(sc, ATH_DEBUG_BEACON,
2958 "%s: %s beacons bslot %d intval %u tsfadjust %llu\n",
2959 __func__, sc->sc_stagbeacons ? "stagger" : "burst",
2960 avp->av_bslot, ni->ni_intval,
2961 (long long unsigned) le64toh(tsfadjust));
2962
2963 wh = mtod(m, struct ieee80211_frame *);
2964 memcpy(&wh[1], &tsfadjust, sizeof(tsfadjust));
2965 }
2966 bf->bf_m = m;
2967 bf->bf_node = ieee80211_ref_node(ni);
2968
2969 return 0;
2970}
2971
2972/*
2973 * Setup the beacon frame for transmit.
2974 */
2975static void
2976ath_beacon_setup(struct ath_softc *sc, struct ath_buf *bf)
2977{
2978#define USE_SHPREAMBLE(_ic) \
2979 (((_ic)->ic_flags & (IEEE80211_F_SHPREAMBLE | IEEE80211_F_USEBARKER))\
2980 == IEEE80211_F_SHPREAMBLE)
2981 struct ieee80211_node *ni = bf->bf_node;
2982 struct ieee80211com *ic = ni->ni_ic;
2983 struct mbuf *m = bf->bf_m;
2984 struct ath_hal *ah = sc->sc_ah;
2985 struct ath_desc *ds;
2986 int flags, antenna;
2987 const HAL_RATE_TABLE *rt;
2988 u_int8_t rix, rate;
2989
2990 DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: m %p len %u\n",
2991 __func__, m, m->m_len);
2992
2993 /* setup descriptors */
2994 ds = bf->bf_desc;
2995
2996 flags = HAL_TXDESC_NOACK;
2997 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) {
2998 ds->ds_link = bf->bf_daddr; /* self-linked */
2999 flags |= HAL_TXDESC_VEOL;
3000 /*
3001 * Let hardware handle antenna switching.
3002 */
3003 antenna = sc->sc_txantenna;
3004 } else {
3005 ds->ds_link = 0;
3006 /*
3007 * Switch antenna every 4 beacons.
3008 * XXX assumes two antenna
3009 */
3010 if (sc->sc_txantenna != 0)
3011 antenna = sc->sc_txantenna;
3012 else if (sc->sc_stagbeacons && sc->sc_nbcnvaps != 0)
3013 antenna = ((sc->sc_stats.ast_be_xmit / sc->sc_nbcnvaps) & 4 ? 2 : 1);
3014 else
3015 antenna = (sc->sc_stats.ast_be_xmit & 4 ? 2 : 1);
3016 }
3017
3018 KASSERT(bf->bf_nseg == 1,
3019 ("multi-segment beacon frame; nseg %u", bf->bf_nseg));
3020 ds->ds_data = bf->bf_segs[0].ds_addr;
3021 /*
3022 * Calculate rate code.
3023 * XXX everything at min xmit rate
3024 */
3025 rix = 0;
3026 rt = sc->sc_currates;
3027 rate = rt->info[rix].rateCode;
3028 if (USE_SHPREAMBLE(ic))
3029 rate |= rt->info[rix].shortPreamble;
3030 ath_hal_setuptxdesc(ah, ds
3031 , m->m_len + IEEE80211_CRC_LEN /* frame length */
3032 , sizeof(struct ieee80211_frame)/* header length */
3033 , HAL_PKT_TYPE_BEACON /* Atheros packet type */
3034 , ni->ni_txpower /* txpower XXX */
3035 , rate, 1 /* series 0 rate/tries */
3036 , HAL_TXKEYIX_INVALID /* no encryption */
3037 , antenna /* antenna mode */
3038 , flags /* no ack, veol for beacons */
3039 , 0 /* rts/cts rate */
3040 , 0 /* rts/cts duration */
3041 );
3042 /* NB: beacon's BufLen must be a multiple of 4 bytes */
3043 ath_hal_filltxdesc(ah, ds
3044 , roundup(m->m_len, 4) /* buffer length */
3045 , AH_TRUE /* first segment */
3046 , AH_TRUE /* last segment */
3047 , ds /* first descriptor */
3048 );
3049#if 0
3050 ath_desc_swap(ds);
3051#endif
3052#undef USE_SHPREAMBLE
3053}
3054
3055static void
3056ath_beacon_update(struct ieee80211vap *vap, int item)
3057{
3058 struct ieee80211_beacon_offsets *bo = &ATH_VAP(vap)->av_boff;
3059
3060 setbit(bo->bo_flags, item);
3061}
3062
3063/*
3064 * Append the contents of src to dst; both queues
3065 * are assumed to be locked.
3066 */
3067static void
3068ath_txqmove(struct ath_txq *dst, struct ath_txq *src)
3069{
3070 STAILQ_CONCAT(&dst->axq_q, &src->axq_q);
3071 dst->axq_link = src->axq_link;
3072 src->axq_link = NULL;
3073 dst->axq_depth += src->axq_depth;
3074 src->axq_depth = 0;
3075}
3076
3077/*
3078 * Transmit a beacon frame at SWBA. Dynamic updates to the
3079 * frame contents are done as needed and the slot time is
3080 * also adjusted based on current state.
3081 */
3082static void
3083ath_beacon_proc(void *arg, int pending)
3084{
3085 struct ath_softc *sc = arg;
3086 struct ath_hal *ah = sc->sc_ah;
3087 struct ieee80211vap *vap;
3088 struct ath_buf *bf;
3089 int slot, otherant;
3090 uint32_t bfaddr;
3091
3092 DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: pending %u\n",
3093 __func__, pending);
3094 /*
3095 * Check if the previous beacon has gone out. If
3096 * not don't try to post another, skip this period
3097 * and wait for the next. Missed beacons indicate
3098 * a problem and should not occur. If we miss too
3099 * many consecutive beacons reset the device.
3100 */
3101 if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) {
3102 sc->sc_bmisscount++;
3103 DPRINTF(sc, ATH_DEBUG_BEACON,
3104 "%s: missed %u consecutive beacons\n",
3105 __func__, sc->sc_bmisscount);
3106 if (sc->sc_bmisscount >= ath_bstuck_threshold)
3107 taskqueue_enqueue(sc->sc_tq, &sc->sc_bstucktask);
3108 return;
3109 }
3110 if (sc->sc_bmisscount != 0) {
3111 DPRINTF(sc, ATH_DEBUG_BEACON,
3112 "%s: resume beacon xmit after %u misses\n",
3113 __func__, sc->sc_bmisscount);
3114 sc->sc_bmisscount = 0;
3115 }
3116
3117 if (sc->sc_stagbeacons) { /* staggered beacons */
3118 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
3119 uint32_t tsftu;
3120
3121 tsftu = ath_hal_gettsf32(ah) >> 10;
3122 /* XXX lintval */
3123 slot = ((tsftu % ic->ic_lintval) * ATH_BCBUF) / ic->ic_lintval;
3124 vap = sc->sc_bslot[(slot+1) % ATH_BCBUF];
3125 bfaddr = 0;
3126 if (vap != NULL && vap->iv_state == IEEE80211_S_RUN) {
3127 bf = ath_beacon_generate(sc, vap);
3128 if (bf != NULL)
3129 bfaddr = bf->bf_daddr;
3130 }
3131 } else { /* burst'd beacons */
3132 uint32_t *bflink = &bfaddr;
3133
3134 for (slot = 0; slot < ATH_BCBUF; slot++) {
3135 vap = sc->sc_bslot[slot];
3136 if (vap != NULL && vap->iv_state == IEEE80211_S_RUN) {
3137 bf = ath_beacon_generate(sc, vap);
3138 if (bf != NULL) {
3139 *bflink = bf->bf_daddr;
3140 bflink = &bf->bf_desc->ds_link;
3141 }
3142 }
3143 }
3144 *bflink = 0; /* terminate list */
3145 }
3146
3147 /*
3148 * Handle slot time change when a non-ERP station joins/leaves
3149 * an 11g network. The 802.11 layer notifies us via callback,
3150 * we mark updateslot, then wait one beacon before effecting
3151 * the change. This gives associated stations at least one
3152 * beacon interval to note the state change.
3153 */
3154 /* XXX locking */
3155 if (sc->sc_updateslot == UPDATE) {
3156 sc->sc_updateslot = COMMIT; /* commit next beacon */
3157 sc->sc_slotupdate = slot;
3158 } else if (sc->sc_updateslot == COMMIT && sc->sc_slotupdate == slot)
3159 ath_setslottime(sc); /* commit change to h/w */
3160
3161 /*
3162 * Check recent per-antenna transmit statistics and flip
3163 * the default antenna if noticeably more frames went out
3164 * on the non-default antenna.
3165 * XXX assumes 2 anntenae
3166 */
3167 if (!sc->sc_diversity && (!sc->sc_stagbeacons || slot == 0)) {
3168 otherant = sc->sc_defant & 1 ? 2 : 1;
3169 if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2)
3170 ath_setdefantenna(sc, otherant);
3171 sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0;
3172 }
3173
3174 if (bfaddr != 0) {
3175 /*
3176 * Stop any current dma and put the new frame on the queue.
3177 * This should never fail since we check above that no frames
3178 * are still pending on the queue.
3179 */
3180 if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) {
3181 DPRINTF(sc, ATH_DEBUG_ANY,
3182 "%s: beacon queue %u did not stop?\n",
3183 __func__, sc->sc_bhalq);
3184 }
3185 /* NB: cabq traffic should already be queued and primed */
3186 ath_hal_puttxbuf(ah, sc->sc_bhalq, bfaddr);
3187 ath_hal_txstart(ah, sc->sc_bhalq);
3188
3189 sc->sc_stats.ast_be_xmit++;
3190 }
3191}
3192
3193static struct ath_buf *
3194ath_beacon_generate(struct ath_softc *sc, struct ieee80211vap *vap)
3195{
3196 struct ath_vap *avp = ATH_VAP(vap);
3197 struct ath_txq *cabq = sc->sc_cabq;
3198 struct ath_buf *bf;
3199 struct mbuf *m;
3200 int nmcastq, error;
3201
3202 KASSERT(vap->iv_state == IEEE80211_S_RUN,
3203 ("not running, state %d", vap->iv_state));
3204 KASSERT(avp->av_bcbuf != NULL, ("no beacon buffer"));
3205
3206 /*
3207 * Update dynamic beacon contents. If this returns
3208 * non-zero then we need to remap the memory because
3209 * the beacon frame changed size (probably because
3210 * of the TIM bitmap).
3211 */
3212 bf = avp->av_bcbuf;
3213 m = bf->bf_m;
3214 nmcastq = avp->av_mcastq.axq_depth;
3215 if (ieee80211_beacon_update(bf->bf_node, &avp->av_boff, m, nmcastq)) {
3216 /* XXX too conservative? */
3217 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3218 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m,
3219 bf->bf_segs, &bf->bf_nseg,
3220 BUS_DMA_NOWAIT);
3221 if (error != 0) {
3222 if_printf(vap->iv_ifp,
3223 "%s: bus_dmamap_load_mbuf_sg failed, error %u\n",
3224 __func__, error);
3225 return NULL;
3226 }
3227 }
3228 if ((avp->av_boff.bo_tim[4] & 1) && cabq->axq_depth) {
3229 DPRINTF(sc, ATH_DEBUG_BEACON,
3230 "%s: cabq did not drain, mcastq %u cabq %u\n",
3231 __func__, nmcastq, cabq->axq_depth);
3232 sc->sc_stats.ast_cabq_busy++;
3233 if (sc->sc_nvaps > 1 && sc->sc_stagbeacons) {
3234 /*
3235 * CABQ traffic from a previous vap is still pending.
3236 * We must drain the q before this beacon frame goes
3237 * out as otherwise this vap's stations will get cab
3238 * frames from a different vap.
3239 * XXX could be slow causing us to miss DBA
3240 */
3241 ath_tx_draintxq(sc, cabq);
3242 }
3243 }
3244 ath_beacon_setup(sc, bf);
3245 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
3246
3247 /*
3248 * Enable the CAB queue before the beacon queue to
3249 * insure cab frames are triggered by this beacon.
3250 */
3251 if (avp->av_boff.bo_tim[4] & 1) {
3252 struct ath_hal *ah = sc->sc_ah;
3253
3254 /* NB: only at DTIM */
3255 ATH_TXQ_LOCK(cabq);
3256 ATH_TXQ_LOCK(&avp->av_mcastq);
3257 if (nmcastq) {
3258 struct ath_buf *bfm;
3259
3260 /*
3261 * Move frames from the s/w mcast q to the h/w cab q.
3262 * XXX MORE_DATA bit
3263 */
3264 bfm = STAILQ_FIRST(&avp->av_mcastq.axq_q);
3265 if (cabq->axq_link != NULL) {
3266 *cabq->axq_link = bfm->bf_daddr;
3267 } else
3268 ath_hal_puttxbuf(ah, cabq->axq_qnum,
3269 bfm->bf_daddr);
3270 ath_txqmove(cabq, &avp->av_mcastq);
3271
3272 sc->sc_stats.ast_cabq_xmit += nmcastq;
3273 }
3274 /* NB: gated by beacon so safe to start here */
3275 ath_hal_txstart(ah, cabq->axq_qnum);
3276 ATH_TXQ_UNLOCK(cabq);
3277 ATH_TXQ_UNLOCK(&avp->av_mcastq);
3278 }
3279 return bf;
3280}
3281
3282static void
3283ath_beacon_start_adhoc(struct ath_softc *sc, struct ieee80211vap *vap)
3284{
3285 struct ath_vap *avp = ATH_VAP(vap);
3286 struct ath_hal *ah = sc->sc_ah;
3287 struct ath_buf *bf;
3288 struct mbuf *m;
3289 int error;
3290
3291 KASSERT(avp->av_bcbuf != NULL, ("no beacon buffer"));
3292
3293 /*
3294 * Update dynamic beacon contents. If this returns
3295 * non-zero then we need to remap the memory because
3296 * the beacon frame changed size (probably because
3297 * of the TIM bitmap).
3298 */
3299 bf = avp->av_bcbuf;
3300 m = bf->bf_m;
3301 if (ieee80211_beacon_update(bf->bf_node, &avp->av_boff, m, 0)) {
3302 /* XXX too conservative? */
3303 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3304 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m,
3305 bf->bf_segs, &bf->bf_nseg,
3306 BUS_DMA_NOWAIT);
3307 if (error != 0) {
3308 if_printf(vap->iv_ifp,
3309 "%s: bus_dmamap_load_mbuf_sg failed, error %u\n",
3310 __func__, error);
3311 return;
3312 }
3313 }
3314 ath_beacon_setup(sc, bf);
3315 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
3316
3317 /* NB: caller is known to have already stopped tx dma */
3318 ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr);
3319 ath_hal_txstart(ah, sc->sc_bhalq);
3320}
3321
3322/*
3323 * Reset the hardware after detecting beacons have stopped.
3324 */
3325static void
3326ath_bstuck_proc(void *arg, int pending)
3327{
3328 struct ath_softc *sc = arg;
3329 struct ifnet *ifp = sc->sc_ifp;
3330
3331 if_printf(ifp, "stuck beacon; resetting (bmiss count %u)\n",
3332 sc->sc_bmisscount);
3333 sc->sc_stats.ast_bstuck++;
3334 ath_reset(ifp);
3335}
3336
3337/*
3338 * Reclaim beacon resources and return buffer to the pool.
3339 */
3340static void
3341ath_beacon_return(struct ath_softc *sc, struct ath_buf *bf)
3342{
3343
3344 if (bf->bf_m != NULL) {
3345 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3346 m_freem(bf->bf_m);
3347 bf->bf_m = NULL;
3348 }
3349 if (bf->bf_node != NULL) {
3350 ieee80211_free_node(bf->bf_node);
3351 bf->bf_node = NULL;
3352 }
3353 STAILQ_INSERT_TAIL(&sc->sc_bbuf, bf, bf_list);
3354}
3355
3356/*
3357 * Reclaim beacon resources.
3358 */
3359static void
3360ath_beacon_free(struct ath_softc *sc)
3361{
3362 struct ath_buf *bf;
3363
3364 STAILQ_FOREACH(bf, &sc->sc_bbuf, bf_list) {
3365 if (bf->bf_m != NULL) {
3366 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3367 m_freem(bf->bf_m);
3368 bf->bf_m = NULL;
3369 }
3370 if (bf->bf_node != NULL) {
3371 ieee80211_free_node(bf->bf_node);
3372 bf->bf_node = NULL;
3373 }
3374 }
3375}
3376
3377/*
3378 * Configure the beacon and sleep timers.
3379 *
3380 * When operating as an AP this resets the TSF and sets
3381 * up the hardware to notify us when we need to issue beacons.
3382 *
3383 * When operating in station mode this sets up the beacon
3384 * timers according to the timestamp of the last received
3385 * beacon and the current TSF, configures PCF and DTIM
3386 * handling, programs the sleep registers so the hardware
3387 * will wakeup in time to receive beacons, and configures
3388 * the beacon miss handling so we'll receive a BMISS
3389 * interrupt when we stop seeing beacons from the AP
3390 * we've associated with.
3391 */
3392static void
3393ath_beacon_config(struct ath_softc *sc, struct ieee80211vap *vap)
3394{
3395#define TSF_TO_TU(_h,_l) \
3396 ((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10))
3397#define FUDGE 2
3398 struct ath_hal *ah = sc->sc_ah;
3399 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
3400 struct ieee80211_node *ni;
3401 u_int32_t nexttbtt, intval, tsftu;
3402 u_int64_t tsf;
3403
3404 if (vap == NULL)
3405 vap = TAILQ_FIRST(&ic->ic_vaps); /* XXX */
3406 ni = vap->iv_bss;
3407
3408 /* extract tstamp from last beacon and convert to TU */
3409 nexttbtt = TSF_TO_TU(LE_READ_4(ni->ni_tstamp.data + 4),
3410 LE_READ_4(ni->ni_tstamp.data));
3411 if (ic->ic_opmode == IEEE80211_M_HOSTAP) {
3412 /*
3413 * For multi-bss ap support beacons are either staggered
3414 * evenly over N slots or burst together. For the former
3415 * arrange for the SWBA to be delivered for each slot.
3416 * Slots that are not occupied will generate nothing.
3417 */
3418 /* NB: the beacon interval is kept internally in TU's */
3419 intval = ni->ni_intval & HAL_BEACON_PERIOD;
3420 if (sc->sc_stagbeacons)
3421 intval /= ATH_BCBUF;
3422 } else {
3423 /* NB: the beacon interval is kept internally in TU's */
3424 intval = ni->ni_intval & HAL_BEACON_PERIOD;
3425 }
3426 if (nexttbtt == 0) /* e.g. for ap mode */
3427 nexttbtt = intval;
3428 else if (intval) /* NB: can be 0 for monitor mode */
3429 nexttbtt = roundup(nexttbtt, intval);
3430 DPRINTF(sc, ATH_DEBUG_BEACON, "%s: nexttbtt %u intval %u (%u)\n",
3431 __func__, nexttbtt, intval, ni->ni_intval);
3432 if (ic->ic_opmode == IEEE80211_M_STA && !sc->sc_swbmiss) {
3433 HAL_BEACON_STATE bs;
3434 int dtimperiod, dtimcount;
3435 int cfpperiod, cfpcount;
3436
3437 /*
3438 * Setup dtim and cfp parameters according to
3439 * last beacon we received (which may be none).
3440 */
3441 dtimperiod = ni->ni_dtim_period;
3442 if (dtimperiod <= 0) /* NB: 0 if not known */
3443 dtimperiod = 1;
3444 dtimcount = ni->ni_dtim_count;
3445 if (dtimcount >= dtimperiod) /* NB: sanity check */
3446 dtimcount = 0; /* XXX? */
3447 cfpperiod = 1; /* NB: no PCF support yet */
3448 cfpcount = 0;
3449 /*
3450 * Pull nexttbtt forward to reflect the current
3451 * TSF and calculate dtim+cfp state for the result.
3452 */
3453 tsf = ath_hal_gettsf64(ah);
3454 tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
3455 do {
3456 nexttbtt += intval;
3457 if (--dtimcount < 0) {
3458 dtimcount = dtimperiod - 1;
3459 if (--cfpcount < 0)
3460 cfpcount = cfpperiod - 1;
3461 }
3462 } while (nexttbtt < tsftu);
3463 memset(&bs, 0, sizeof(bs));
3464 bs.bs_intval = intval;
3465 bs.bs_nexttbtt = nexttbtt;
3466 bs.bs_dtimperiod = dtimperiod*intval;
3467 bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval;
3468 bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod;
3469 bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod;
3470 bs.bs_cfpmaxduration = 0;
3471#if 0
3472 /*
3473 * The 802.11 layer records the offset to the DTIM
3474 * bitmap while receiving beacons; use it here to
3475 * enable h/w detection of our AID being marked in
3476 * the bitmap vector (to indicate frames for us are
3477 * pending at the AP).
3478 * XXX do DTIM handling in s/w to WAR old h/w bugs
3479 * XXX enable based on h/w rev for newer chips
3480 */
3481 bs.bs_timoffset = ni->ni_timoff;
3482#endif
3483 /*
3484 * Calculate the number of consecutive beacons to miss
3485 * before taking a BMISS interrupt.
3486 * Note that we clamp the result to at most 10 beacons.
3487 */
3488 bs.bs_bmissthreshold = vap->iv_bmissthreshold;
3489 if (bs.bs_bmissthreshold > 10)
3490 bs.bs_bmissthreshold = 10;
3491 else if (bs.bs_bmissthreshold <= 0)
3492 bs.bs_bmissthreshold = 1;
3493
3494 /*
3495 * Calculate sleep duration. The configuration is
3496 * given in ms. We insure a multiple of the beacon
3497 * period is used. Also, if the sleep duration is
3498 * greater than the DTIM period then it makes senses
3499 * to make it a multiple of that.
3500 *
3501 * XXX fixed at 100ms
3502 */
3503 bs.bs_sleepduration =
3504 roundup(IEEE80211_MS_TO_TU(100), bs.bs_intval);
3505 if (bs.bs_sleepduration > bs.bs_dtimperiod)
3506 bs.bs_sleepduration = roundup(bs.bs_sleepduration, bs.bs_dtimperiod);
3507
3508 DPRINTF(sc, ATH_DEBUG_BEACON,
3509 "%s: tsf %ju tsf:tu %u intval %u nexttbtt %u dtim %u nextdtim %u bmiss %u sleep %u cfp:period %u maxdur %u next %u timoffset %u\n"
3510 , __func__
3511 , tsf, tsftu
3512 , bs.bs_intval
3513 , bs.bs_nexttbtt
3514 , bs.bs_dtimperiod
3515 , bs.bs_nextdtim
3516 , bs.bs_bmissthreshold
3517 , bs.bs_sleepduration
3518 , bs.bs_cfpperiod
3519 , bs.bs_cfpmaxduration
3520 , bs.bs_cfpnext
3521 , bs.bs_timoffset
3522 );
3523 ath_hal_intrset(ah, 0);
3524 ath_hal_beacontimers(ah, &bs);
3525 sc->sc_imask |= HAL_INT_BMISS;
3526 ath_hal_intrset(ah, sc->sc_imask);
3527 } else {
3528 ath_hal_intrset(ah, 0);
3529 if (nexttbtt == intval)
3530 intval |= HAL_BEACON_RESET_TSF;
3531 if (ic->ic_opmode == IEEE80211_M_IBSS) {
3532 /*
3533 * In IBSS mode enable the beacon timers but only
3534 * enable SWBA interrupts if we need to manually
3535 * prepare beacon frames. Otherwise we use a
3536 * self-linked tx descriptor and let the hardware
3537 * deal with things.
3538 */
3539 intval |= HAL_BEACON_ENA;
3540 if (!sc->sc_hasveol)
3541 sc->sc_imask |= HAL_INT_SWBA;
3542 if ((intval & HAL_BEACON_RESET_TSF) == 0) {
3543 /*
3544 * Pull nexttbtt forward to reflect
3545 * the current TSF.
3546 */
3547 tsf = ath_hal_gettsf64(ah);
3548 tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
3549 do {
3550 nexttbtt += intval;
3551 } while (nexttbtt < tsftu);
3552 }
3553 ath_beaconq_config(sc);
3554 } else if (ic->ic_opmode == IEEE80211_M_HOSTAP) {
3555 /*
3556 * In AP mode we enable the beacon timers and
3557 * SWBA interrupts to prepare beacon frames.
3558 */
3559 intval |= HAL_BEACON_ENA;
3560 sc->sc_imask |= HAL_INT_SWBA; /* beacon prepare */
3561 ath_beaconq_config(sc);
3562 }
3563 ath_hal_beaconinit(ah, nexttbtt, intval);
3564 sc->sc_bmisscount = 0;
3565 ath_hal_intrset(ah, sc->sc_imask);
3566 /*
3567 * When using a self-linked beacon descriptor in
3568 * ibss mode load it once here.
3569 */
3570 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol)
3571 ath_beacon_start_adhoc(sc, vap);
3572 }
3573 sc->sc_syncbeacon = 0;
3574#undef FUDGE
3575#undef TSF_TO_TU
3576}
3577
3578static void
3579ath_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
3580{
3581 bus_addr_t *paddr = (bus_addr_t*) arg;
3582 KASSERT(error == 0, ("error %u on bus_dma callback", error));
3583 *paddr = segs->ds_addr;
3584}
3585
3586static int
3587ath_descdma_setup(struct ath_softc *sc,
3588 struct ath_descdma *dd, ath_bufhead *head,
3589 const char *name, int nbuf, int ndesc)
3590{
3591#define DS2PHYS(_dd, _ds) \
3592 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
3593 struct ifnet *ifp = sc->sc_ifp;
3594 struct ath_desc *ds;
3595 struct ath_buf *bf;
3596 int i, bsize, error;
3597
3598 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers %u desc/buf\n",
3599 __func__, name, nbuf, ndesc);
3600
3601 dd->dd_name = name;
3602 dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
3603
3604 /*
3605 * Setup DMA descriptor area.
3606 */
3607 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */
3608 PAGE_SIZE, 0, /* alignment, bounds */
3609 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
3610 BUS_SPACE_MAXADDR, /* highaddr */
3611 NULL, NULL, /* filter, filterarg */
3612 dd->dd_desc_len, /* maxsize */
3613 1, /* nsegments */
3614 dd->dd_desc_len, /* maxsegsize */
3615 BUS_DMA_ALLOCNOW, /* flags */
3616 NULL, /* lockfunc */
3617 NULL, /* lockarg */
3618 &dd->dd_dmat);
3619 if (error != 0) {
3620 if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name);
3621 return error;
3622 }
3623
3624 /* allocate descriptors */
3625 error = bus_dmamap_create(dd->dd_dmat, BUS_DMA_NOWAIT, &dd->dd_dmamap);
3626 if (error != 0) {
3627 if_printf(ifp, "unable to create dmamap for %s descriptors, "
3628 "error %u\n", dd->dd_name, error);
3629 goto fail0;
3630 }
3631
3632 error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
3633 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
3634 &dd->dd_dmamap);
3635 if (error != 0) {
3636 if_printf(ifp, "unable to alloc memory for %u %s descriptors, "
3637 "error %u\n", nbuf * ndesc, dd->dd_name, error);
3638 goto fail1;
3639 }
3640
3641 error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
3642 dd->dd_desc, dd->dd_desc_len,
3643 ath_load_cb, &dd->dd_desc_paddr,
3644 BUS_DMA_NOWAIT);
3645 if (error != 0) {
3646 if_printf(ifp, "unable to map %s descriptors, error %u\n",
3647 dd->dd_name, error);
3648 goto fail2;
3649 }
3650
3651 ds = dd->dd_desc;
3652 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n",
3653 __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len,
3654 (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len);
3655
3656 /* allocate rx buffers */
3657 bsize = sizeof(struct ath_buf) * nbuf;
3658 bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO);
3659 if (bf == NULL) {
3660 if_printf(ifp, "malloc of %s buffers failed, size %u\n",
3661 dd->dd_name, bsize);
3662 goto fail3;
3663 }
3664 dd->dd_bufptr = bf;
3665
3666 STAILQ_INIT(head);
3667 for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
3668 bf->bf_desc = ds;
3669 bf->bf_daddr = DS2PHYS(dd, ds);
3670 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
3671 &bf->bf_dmamap);
3672 if (error != 0) {
3673 if_printf(ifp, "unable to create dmamap for %s "
3674 "buffer %u, error %u\n", dd->dd_name, i, error);
3675 ath_descdma_cleanup(sc, dd, head);
3676 return error;
3677 }
3678 STAILQ_INSERT_TAIL(head, bf, bf_list);
3679 }
3680 return 0;
3681fail3:
3682 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
3683fail2:
3684 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
3685fail1:
3686 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
3687fail0:
3688 bus_dma_tag_destroy(dd->dd_dmat);
3689 memset(dd, 0, sizeof(*dd));
3690 return error;
3691#undef DS2PHYS
3692}
3693
3694static void
3695ath_descdma_cleanup(struct ath_softc *sc,
3696 struct ath_descdma *dd, ath_bufhead *head)
3697{
3698 struct ath_buf *bf;
3699 struct ieee80211_node *ni;
3700
3701 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
3702 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
3703 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
3704 bus_dma_tag_destroy(dd->dd_dmat);
3705
3706 STAILQ_FOREACH(bf, head, bf_list) {
3707 if (bf->bf_m) {
3708 m_freem(bf->bf_m);
3709 bf->bf_m = NULL;
3710 }
3711 if (bf->bf_dmamap != NULL) {
3712 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
3713 bf->bf_dmamap = NULL;
3714 }
3715 ni = bf->bf_node;
3716 bf->bf_node = NULL;
3717 if (ni != NULL) {
3718 /*
3719 * Reclaim node reference.
3720 */
3721 ieee80211_free_node(ni);
3722 }
3723 }
3724
3725 STAILQ_INIT(head);
3726 free(dd->dd_bufptr, M_ATHDEV);
3727 memset(dd, 0, sizeof(*dd));
3728}
3729
3730static int
3731ath_desc_alloc(struct ath_softc *sc)
3732{
3733 int error;
3734
3735 error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf,
3736 "rx", ath_rxbuf, 1);
3737 if (error != 0)
3738 return error;
3739
3740 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf,
3741 "tx", ath_txbuf, ATH_TXDESC);
3742 if (error != 0) {
3743 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
3744 return error;
3745 }
3746
3747 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf,
3748 "beacon", ATH_BCBUF, 1);
3749 if (error != 0) {
3750 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
3751 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
3752 return error;
3753 }
3754 return 0;
3755}
3756
3757static void
3758ath_desc_free(struct ath_softc *sc)
3759{
3760
3761 if (sc->sc_bdma.dd_desc_len != 0)
3762 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf);
3763 if (sc->sc_txdma.dd_desc_len != 0)
3764 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
3765 if (sc->sc_rxdma.dd_desc_len != 0)
3766 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
3767}
3768
3769static struct ieee80211_node *
3770ath_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
3771{
3772 struct ieee80211com *ic = vap->iv_ic;
3773 struct ath_softc *sc = ic->ic_ifp->if_softc;
3774 const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space;
3775 struct ath_node *an;
3776
3777 an = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO);
3778 if (an == NULL) {
3779 /* XXX stat+msg */
3780 return NULL;
3781 }
3782 ath_rate_node_init(sc, an);
3783
3784 DPRINTF(sc, ATH_DEBUG_NODE, "%s: an %p\n", __func__, an);
3785 return &an->an_node;
3786}
3787
3788static void
3789ath_node_free(struct ieee80211_node *ni)
3790{
3791 struct ieee80211com *ic = ni->ni_ic;
3792 struct ath_softc *sc = ic->ic_ifp->if_softc;
3793
3794 DPRINTF(sc, ATH_DEBUG_NODE, "%s: ni %p\n", __func__, ni);
3795
3796 ath_rate_node_cleanup(sc, ATH_NODE(ni));
3797 sc->sc_node_free(ni);
3798}
3799
3800static void
3801ath_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
3802{
3803 struct ieee80211com *ic = ni->ni_ic;
3804 struct ath_softc *sc = ic->ic_ifp->if_softc;
3805 struct ath_hal *ah = sc->sc_ah;
3806
3807 *rssi = ic->ic_node_getrssi(ni);
3808 if (ni->ni_chan != IEEE80211_CHAN_ANYC)
3809 *noise = ath_hal_getchannoise(ah, ni->ni_chan);
3810 else
3811 *noise = -95; /* nominally correct */
3812}
3813
3814static int
3815ath_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf)
3816{
3817 struct ath_hal *ah = sc->sc_ah;
3818 int error;
3819 struct mbuf *m;
3820 struct ath_desc *ds;
3821
3822 m = bf->bf_m;
3823 if (m == NULL) {
3824 /*
3825 * NB: by assigning a page to the rx dma buffer we
3826 * implicitly satisfy the Atheros requirement that
3827 * this buffer be cache-line-aligned and sized to be
3828 * multiple of the cache line size. Not doing this
3829 * causes weird stuff to happen (for the 5210 at least).
3830 */
3831 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
3832 if (m == NULL) {
3833 DPRINTF(sc, ATH_DEBUG_ANY,
3834 "%s: no mbuf/cluster\n", __func__);
3835 sc->sc_stats.ast_rx_nombuf++;
3836 return ENOMEM;
3837 }
3838 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
3839
3840 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat,
3841 bf->bf_dmamap, m,
3842 bf->bf_segs, &bf->bf_nseg,
3843 BUS_DMA_NOWAIT);
3844 if (error != 0) {
3845 DPRINTF(sc, ATH_DEBUG_ANY,
3846 "%s: bus_dmamap_load_mbuf_sg failed; error %d\n",
3847 __func__, error);
3848 sc->sc_stats.ast_rx_busdma++;
3849 m_freem(m);
3850 return error;
3851 }
3852 KASSERT(bf->bf_nseg == 1,
3853 ("multi-segment packet; nseg %u", bf->bf_nseg));
3854 bf->bf_m = m;
3855 }
3856 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREREAD);
3857
3858 /*
3859 * Setup descriptors. For receive we always terminate
3860 * the descriptor list with a self-linked entry so we'll
3861 * not get overrun under high load (as can happen with a
3862 * 5212 when ANI processing enables PHY error frames).
3863 *
3864 * To insure the last descriptor is self-linked we create
3865 * each descriptor as self-linked and add it to the end. As
3866 * each additional descriptor is added the previous self-linked
3867 * entry is ``fixed'' naturally. This should be safe even
3868 * if DMA is happening. When processing RX interrupts we
3869 * never remove/process the last, self-linked, entry on the
3870 * descriptor list. This insures the hardware always has
3871 * someplace to write a new frame.
3872 */
3873 ds = bf->bf_desc;
3874 ds->ds_link = bf->bf_daddr; /* link to self */
3875 ds->ds_data = bf->bf_segs[0].ds_addr;
3876 ath_hal_setuprxdesc(ah, ds
3877 , m->m_len /* buffer size */
3878 , 0
3879 );
3880
3881 if (sc->sc_rxlink != NULL)
3882 *sc->sc_rxlink = bf->bf_daddr;
3883 sc->sc_rxlink = &ds->ds_link;
3884 return 0;
3885}
3886
3887/*
3888 * Extend 15-bit time stamp from rx descriptor to
3889 * a full 64-bit TSF using the specified TSF.
3890 */
3891static __inline u_int64_t
3892ath_extend_tsf(u_int32_t rstamp, u_int64_t tsf)
3893{
3894 if ((tsf & 0x7fff) < rstamp)
3895 tsf -= 0x8000;
3896 return ((tsf &~ 0x7fff) | rstamp);
3897}
3898
3899/*
3900 * Intercept management frames to collect beacon rssi data
3901 * and to do ibss merges.
3902 */
3903static void
3904ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m,
3905 int subtype, int rssi, int noise, u_int32_t rstamp)
3906{
3907 struct ieee80211vap *vap = ni->ni_vap;
3908 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
3909
3910 /*
3911 * Call up first so subsequent work can use information
3912 * potentially stored in the node (e.g. for ibss merge).
3913 */
3914 ATH_VAP(vap)->av_recv_mgmt(ni, m, subtype, rssi, noise, rstamp);
3915 switch (subtype) {
3916 case IEEE80211_FC0_SUBTYPE_BEACON:
3917 /* update rssi statistics for use by the hal */
3918 ATH_RSSI_LPF(sc->sc_halstats.ns_avgbrssi, rssi);
3919 if (sc->sc_syncbeacon &&
3920 ni == vap->iv_bss && vap->iv_state == IEEE80211_S_RUN) {
3921 /*
3922 * Resync beacon timers using the tsf of the beacon
3923 * frame we just received.
3924 */
3925 ath_beacon_config(sc, vap);
3926 }
3927 /* fall thru... */
3928 case IEEE80211_FC0_SUBTYPE_PROBE_RESP:
3929 if (vap->iv_opmode == IEEE80211_M_IBSS &&
3930 vap->iv_state == IEEE80211_S_RUN) {
3931 u_int64_t tsf = ath_extend_tsf(rstamp,
3932 ath_hal_gettsf64(sc->sc_ah));
3933 /*
3934 * Handle ibss merge as needed; check the tsf on the
3935 * frame before attempting the merge. The 802.11 spec
3936 * says the station should change it's bssid to match
3937 * the oldest station with the same ssid, where oldest
3938 * is determined by the tsf. Note that hardware
3939 * reconfiguration happens through callback to
3940 * ath_newstate as the state machine will go from
3941 * RUN -> RUN when this happens.
3942 */
3943 if (le64toh(ni->ni_tstamp.tsf) >= tsf) {
3944 DPRINTF(sc, ATH_DEBUG_STATE,
3945 "ibss merge, rstamp %u tsf %ju "
3946 "tstamp %ju\n", rstamp, (uintmax_t)tsf,
3947 (uintmax_t)ni->ni_tstamp.tsf);
3948 (void) ieee80211_ibss_merge(ni);
3949 }
3950 }
3951 break;
3952 }
3953}
3954
3955/*
3956 * Set the default antenna.
3957 */
3958static void
3959ath_setdefantenna(struct ath_softc *sc, u_int antenna)
3960{
3961 struct ath_hal *ah = sc->sc_ah;
3962
3963 /* XXX block beacon interrupts */
3964 ath_hal_setdefantenna(ah, antenna);
3965 if (sc->sc_defant != antenna)
3966 sc->sc_stats.ast_ant_defswitch++;
3967 sc->sc_defant = antenna;
3968 sc->sc_rxotherant = 0;
3969}
3970
3971static int
3972ath_rx_tap(struct ifnet *ifp, struct mbuf *m,
3973 const struct ath_rx_status *rs, u_int64_t tsf, int16_t nf)
3974{
3975#define CHAN_HT20 htole32(IEEE80211_CHAN_HT20)
3976#define CHAN_HT40U htole32(IEEE80211_CHAN_HT40U)
3977#define CHAN_HT40D htole32(IEEE80211_CHAN_HT40D)
3978#define CHAN_HT (CHAN_HT20|CHAN_HT40U|CHAN_HT40D)
3979 struct ath_softc *sc = ifp->if_softc;
3980 const HAL_RATE_TABLE *rt;
3981 uint8_t rix;
3982
3983 /*
3984 * Discard anything shorter than an ack or cts.
3985 */
3986 if (m->m_pkthdr.len < IEEE80211_ACK_LEN) {
3987 DPRINTF(sc, ATH_DEBUG_RECV, "%s: runt packet %d\n",
3988 __func__, m->m_pkthdr.len);
3989 sc->sc_stats.ast_rx_tooshort++;
3990 return 0;
3991 }
3992 rt = sc->sc_currates;
3993 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
3994 rix = rt->rateCodeToIndex[rs->rs_rate];
3995 sc->sc_rx_th.wr_rate = sc->sc_hwmap[rix].ieeerate;
3996 sc->sc_rx_th.wr_flags = sc->sc_hwmap[rix].rxflags;
3997#ifdef AH_SUPPORT_AR5416
3998 sc->sc_rx_th.wr_chan_flags &= ~CHAN_HT;
3999 if (sc->sc_rx_th.wr_rate & IEEE80211_RATE_MCS) { /* HT rate */
4000 struct ieee80211com *ic = ifp->if_l2com;
4001
4002 if ((rs->rs_flags & HAL_RX_2040) == 0)
4003 sc->sc_rx_th.wr_chan_flags |= CHAN_HT20;
4004 else if (IEEE80211_IS_CHAN_HT40U(ic->ic_curchan))
4005 sc->sc_rx_th.wr_chan_flags |= CHAN_HT40U;
4006 else
4007 sc->sc_rx_th.wr_chan_flags |= CHAN_HT40D;
4008 if ((rs->rs_flags & HAL_RX_GI) == 0)
4009 sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_SHORTGI;
4010 }
4011#endif
4012 sc->sc_rx_th.wr_tsf = htole64(ath_extend_tsf(rs->rs_tstamp, tsf));
4013 if (rs->rs_status & HAL_RXERR_CRC)
4014 sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_BADFCS;
4015 /* XXX propagate other error flags from descriptor */
4016 sc->sc_rx_th.wr_antsignal = rs->rs_rssi + nf;
4017 sc->sc_rx_th.wr_antnoise = nf;
4018 sc->sc_rx_th.wr_antenna = rs->rs_antenna;
4019
4020 bpf_mtap2(ifp->if_bpf, &sc->sc_rx_th, sc->sc_rx_th_len, m);
4021
4022 return 1;
4023#undef CHAN_HT
4024#undef CHAN_HT20
4025#undef CHAN_HT40U
4026#undef CHAN_HT40D
4027}
4028
4029static void
4030ath_handle_micerror(struct ieee80211com *ic,
4031 struct ieee80211_frame *wh, int keyix)
4032{
4033 struct ieee80211_node *ni;
4034
4035 /* XXX recheck MIC to deal w/ chips that lie */
4036 /* XXX discard MIC errors on !data frames */
4037 ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh);
4038 if (ni != NULL) {
4039 ieee80211_notify_michael_failure(ni->ni_vap, wh, keyix);
4040 ieee80211_free_node(ni);
4041 }
4042}
4043
4044static void
4045ath_rx_proc(void *arg, int npending)
4046{
4047#define PA2DESC(_sc, _pa) \
4048 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
4049 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
4050 struct ath_softc *sc = arg;
4051 struct ath_buf *bf;
4052 struct ifnet *ifp = sc->sc_ifp;
4053 struct ieee80211com *ic = ifp->if_l2com;
4054 struct ath_hal *ah = sc->sc_ah;
4055 struct ath_desc *ds;
4056 struct ath_rx_status *rs;
4057 struct mbuf *m;
4058 struct ieee80211_node *ni;
4059 int len, type, ngood;
4060 u_int phyerr;
4061 HAL_STATUS status;
4062 int16_t nf;
4063 u_int64_t tsf;
4064
4065 DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: pending %u\n", __func__, npending);
4066 ngood = 0;
4067 nf = ath_hal_getchannoise(ah, sc->sc_curchan);
4068 sc->sc_stats.ast_rx_noise = nf;
4069 tsf = ath_hal_gettsf64(ah);
4070 do {
4071 bf = STAILQ_FIRST(&sc->sc_rxbuf);
4072 if (bf == NULL) { /* NB: shouldn't happen */
4073 if_printf(ifp, "%s: no buffer!\n", __func__);
4074 break;
4075 }
4076 m = bf->bf_m;
4077 if (m == NULL) { /* NB: shouldn't happen */
4078 /*
4079 * If mbuf allocation failed previously there
4080 * will be no mbuf; try again to re-populate it.
4081 */
4082 /* XXX make debug msg */
4083 if_printf(ifp, "%s: no mbuf!\n", __func__);
4084 STAILQ_REMOVE_HEAD(&sc->sc_rxbuf, bf_list);
4085 goto rx_next;
4086 }
4087 ds = bf->bf_desc;
4088 if (ds->ds_link == bf->bf_daddr) {
4089 /* NB: never process the self-linked entry at the end */
4090 break;
4091 }
4092 /* XXX sync descriptor memory */
4093 /*
4094 * Must provide the virtual address of the current
4095 * descriptor, the physical address, and the virtual
4096 * address of the next descriptor in the h/w chain.
4097 * This allows the HAL to look ahead to see if the
4098 * hardware is done with a descriptor by checking the
4099 * done bit in the following descriptor and the address
4100 * of the current descriptor the DMA engine is working
4101 * on. All this is necessary because of our use of
4102 * a self-linked list to avoid rx overruns.
4103 */
4104 rs = &bf->bf_status.ds_rxstat;
4105 status = ath_hal_rxprocdesc(ah, ds,
4106 bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs);
4107#ifdef ATH_DEBUG
4108 if (sc->sc_debug & ATH_DEBUG_RECV_DESC)
4109 ath_printrxbuf(sc, bf, 0, status == HAL_OK);
4110#endif
4111 if (status == HAL_EINPROGRESS)
4112 break;
4113 STAILQ_REMOVE_HEAD(&sc->sc_rxbuf, bf_list);
4114 if (rs->rs_status != 0) {
4115 if (rs->rs_status & HAL_RXERR_CRC)
4116 sc->sc_stats.ast_rx_crcerr++;
4117 if (rs->rs_status & HAL_RXERR_FIFO)
4118 sc->sc_stats.ast_rx_fifoerr++;
4119 if (rs->rs_status & HAL_RXERR_PHY) {
4120 sc->sc_stats.ast_rx_phyerr++;
4121 phyerr = rs->rs_phyerr & 0x1f;
4122 sc->sc_stats.ast_rx_phy[phyerr]++;
4123 goto rx_error; /* NB: don't count in ierrors */
4124 }
4125 if (rs->rs_status & HAL_RXERR_DECRYPT) {
4126 /*
4127 * Decrypt error. If the error occurred
4128 * because there was no hardware key, then
4129 * let the frame through so the upper layers
4130 * can process it. This is necessary for 5210
4131 * parts which have no way to setup a ``clear''
4132 * key cache entry.
4133 *
4134 * XXX do key cache faulting
4135 */
4136 if (rs->rs_keyix == HAL_RXKEYIX_INVALID)
4137 goto rx_accept;
4138 sc->sc_stats.ast_rx_badcrypt++;
4139 }
4140 if (rs->rs_status & HAL_RXERR_MIC) {
4141 sc->sc_stats.ast_rx_badmic++;
4142 /*
4143 * Do minimal work required to hand off
4144 * the 802.11 header for notifcation.
4145 */
4146 /* XXX frag's and qos frames */
4147 len = rs->rs_datalen;
4148 if (len >= sizeof (struct ieee80211_frame)) {
4149 bus_dmamap_sync(sc->sc_dmat,
4150 bf->bf_dmamap,
4151 BUS_DMASYNC_POSTREAD);
4152 ath_handle_micerror(ic,
4153 mtod(m, struct ieee80211_frame *),
4154 sc->sc_splitmic ?
4155 rs->rs_keyix-32 : rs->rs_keyix);
4156 }
4157 }
4158 ifp->if_ierrors++;
4159rx_error:
4160 /*
4161 * Cleanup any pending partial frame.
4162 */
4163 if (sc->sc_rxpending != NULL) {
4164 m_freem(sc->sc_rxpending);
4165 sc->sc_rxpending = NULL;
4166 }
4167 /*
4168 * When a tap is present pass error frames
4169 * that have been requested. By default we
4170 * pass decrypt+mic errors but others may be
4171 * interesting (e.g. crc).
4172 */
4173 if (bpf_peers_present(ifp->if_bpf) &&
4174 (rs->rs_status & sc->sc_monpass)) {
4175 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
4176 BUS_DMASYNC_POSTREAD);
4177 /* NB: bpf needs the mbuf length setup */
4178 len = rs->rs_datalen;
4179 m->m_pkthdr.len = m->m_len = len;
4180 (void) ath_rx_tap(ifp, m, rs, tsf, nf);
4181 }
4182 /* XXX pass MIC errors up for s/w reclaculation */
4183 goto rx_next;
4184 }
4185rx_accept:
4186 /*
4187 * Sync and unmap the frame. At this point we're
4188 * committed to passing the mbuf somewhere so clear
4189 * bf_m; this means a new mbuf must be allocated
4190 * when the rx descriptor is setup again to receive
4191 * another frame.
4192 */
4193 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
4194 BUS_DMASYNC_POSTREAD);
4195 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
4196 bf->bf_m = NULL;
4197
4198 len = rs->rs_datalen;
4199 m->m_len = len;
4200
4201 if (rs->rs_more) {
4202 /*
4203 * Frame spans multiple descriptors; save
4204 * it for the next completed descriptor, it
4205 * will be used to construct a jumbogram.
4206 */
4207 if (sc->sc_rxpending != NULL) {
4208 /* NB: max frame size is currently 2 clusters */
4209 sc->sc_stats.ast_rx_toobig++;
4210 m_freem(sc->sc_rxpending);
4211 }
4212 m->m_pkthdr.rcvif = ifp;
4213 m->m_pkthdr.len = len;
4214 sc->sc_rxpending = m;
4215 goto rx_next;
4216 } else if (sc->sc_rxpending != NULL) {
4217 /*
4218 * This is the second part of a jumbogram,
4219 * chain it to the first mbuf, adjust the
4220 * frame length, and clear the rxpending state.
4221 */
4222 sc->sc_rxpending->m_next = m;
4223 sc->sc_rxpending->m_pkthdr.len += len;
4224 m = sc->sc_rxpending;
4225 sc->sc_rxpending = NULL;
4226 } else {
4227 /*
4228 * Normal single-descriptor receive; setup
4229 * the rcvif and packet length.
4230 */
4231 m->m_pkthdr.rcvif = ifp;
4232 m->m_pkthdr.len = len;
4233 }
4234
4235 ifp->if_ipackets++;
4236 sc->sc_stats.ast_ant_rx[rs->rs_antenna]++;
4237
4238 if (bpf_peers_present(ifp->if_bpf) &&
4239 !ath_rx_tap(ifp, m, rs, tsf, nf)) {
4240 m_freem(m); /* XXX reclaim */
4241 goto rx_next;
4242 }
4243
4244 /*
4245 * From this point on we assume the frame is at least
4246 * as large as ieee80211_frame_min; verify that.
4247 */
4248 if (len < IEEE80211_MIN_LEN) {
4249 DPRINTF(sc, ATH_DEBUG_RECV, "%s: short packet %d\n",
4250 __func__, len);
4251 sc->sc_stats.ast_rx_tooshort++;
4252 m_freem(m);
4253 goto rx_next;
4254 }
4255
4256 if (IFF_DUMPPKTS(sc, ATH_DEBUG_RECV)) {
4257 const HAL_RATE_TABLE *rt = sc->sc_currates;
4258 uint8_t rix = rt->rateCodeToIndex[rs->rs_rate];
4259
4260 ieee80211_dump_pkt(ic, mtod(m, caddr_t), len,
4261 sc->sc_hwmap[rix].ieeerate, rs->rs_rssi);
4262 }
4263
4264 m_adj(m, -IEEE80211_CRC_LEN);
4265
4266 /*
4267 * Locate the node for sender, track state, and then
4268 * pass the (referenced) node up to the 802.11 layer
4269 * for its use.
4270 */
4271 ni = ieee80211_find_rxnode_withkey(ic,
4272 mtod(m, const struct ieee80211_frame_min *),
4273 rs->rs_keyix == HAL_RXKEYIX_INVALID ?
4274 IEEE80211_KEYIX_NONE : rs->rs_keyix);
4275 if (ni != NULL) {
4276 /*
4277 * Sending station is known, dispatch directly.
4278 */
1631 if (sc->sc_tdma)
1632 ath_tdma_config(sc, NULL);
1633 else
1634#endif
1635 ath_beacon_config(sc, NULL); /* restart beacons */
1636 }
1637 ath_hal_intrset(ah, sc->sc_imask);
1638
1639 ath_start(ifp); /* restart xmit */
1640 return 0;
1641}
1642
1643static int
1644ath_reset_vap(struct ieee80211vap *vap, u_long cmd)
1645{
1646 struct ieee80211com *ic = vap->iv_ic;
1647 struct ifnet *ifp = ic->ic_ifp;
1648 struct ath_softc *sc = ifp->if_softc;
1649 struct ath_hal *ah = sc->sc_ah;
1650
1651 switch (cmd) {
1652 case IEEE80211_IOC_TXPOWER:
1653 /*
1654 * If per-packet TPC is enabled, then we have nothing
1655 * to do; otherwise we need to force the global limit.
1656 * All this can happen directly; no need to reset.
1657 */
1658 if (!ath_hal_gettpc(ah))
1659 ath_hal_settxpowlimit(ah, ic->ic_txpowlimit);
1660 return 0;
1661 }
1662 return ath_reset(ifp);
1663}
1664
1665static int
1666ath_ff_always(struct ath_txq *txq, struct ath_buf *bf)
1667{
1668 return 0;
1669}
1670
1671#if 0
1672static int
1673ath_ff_ageflushtestdone(struct ath_txq *txq, struct ath_buf *bf)
1674{
1675 return (txq->axq_curage - bf->bf_age) < ATH_FF_STAGEMAX;
1676}
1677#endif
1678
1679/*
1680 * Flush FF staging queue.
1681 */
1682static void
1683ath_ff_stageq_flush(struct ath_softc *sc, struct ath_txq *txq,
1684 int (*ath_ff_flushdonetest)(struct ath_txq *txq, struct ath_buf *bf))
1685{
1686 struct ath_buf *bf;
1687 struct ieee80211_node *ni;
1688 int pktlen, pri;
1689
1690 for (;;) {
1691 ATH_TXQ_LOCK(txq);
1692 /*
1693 * Go from the back (oldest) to front so we can
1694 * stop early based on the age of the entry.
1695 */
1696 bf = TAILQ_LAST(&txq->axq_stageq, axq_headtype);
1697 if (bf == NULL || ath_ff_flushdonetest(txq, bf)) {
1698 ATH_TXQ_UNLOCK(txq);
1699 break;
1700 }
1701
1702 ni = bf->bf_node;
1703 pri = M_WME_GETAC(bf->bf_m);
1704 KASSERT(ATH_NODE(ni)->an_ff_buf[pri],
1705 ("no bf on staging queue %p", bf));
1706 ATH_NODE(ni)->an_ff_buf[pri] = NULL;
1707 TAILQ_REMOVE(&txq->axq_stageq, bf, bf_stagelist);
1708
1709 ATH_TXQ_UNLOCK(txq);
1710
1711 DPRINTF(sc, ATH_DEBUG_FF, "%s: flush frame, age %u\n",
1712 __func__, bf->bf_age);
1713
1714 sc->sc_stats.ast_ff_flush++;
1715
1716 /* encap and xmit */
1717 bf->bf_m = ieee80211_encap(ni, bf->bf_m);
1718 if (bf->bf_m == NULL) {
1719 DPRINTF(sc, ATH_DEBUG_XMIT | ATH_DEBUG_FF,
1720 "%s: discard, encapsulation failure\n",
1721 __func__);
1722 sc->sc_stats.ast_tx_encap++;
1723 goto bad;
1724 }
1725 pktlen = bf->bf_m->m_pkthdr.len; /* NB: don't reference below */
1726 if (ath_tx_start(sc, ni, bf, bf->bf_m) == 0) {
1727#if 0 /*XXX*/
1728 ifp->if_opackets++;
1729#endif
1730 continue;
1731 }
1732 bad:
1733 if (ni != NULL)
1734 ieee80211_free_node(ni);
1735 bf->bf_node = NULL;
1736 if (bf->bf_m != NULL) {
1737 m_freem(bf->bf_m);
1738 bf->bf_m = NULL;
1739 }
1740
1741 ATH_TXBUF_LOCK(sc);
1742 STAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
1743 ATH_TXBUF_UNLOCK(sc);
1744 }
1745}
1746
1747static __inline u_int32_t
1748ath_ff_approx_txtime(struct ath_softc *sc, struct ath_node *an, struct mbuf *m)
1749{
1750 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
1751 u_int32_t framelen;
1752 struct ath_buf *bf;
1753
1754 /*
1755 * Approximate the frame length to be transmitted. A swag to add
1756 * the following maximal values to the skb payload:
1757 * - 32: 802.11 encap + CRC
1758 * - 24: encryption overhead (if wep bit)
1759 * - 4 + 6: fast-frame header and padding
1760 * - 16: 2 LLC FF tunnel headers
1761 * - 14: 1 802.3 FF tunnel header (skb already accounts for 2nd)
1762 */
1763 framelen = m->m_pkthdr.len + 32 + 4 + 6 + 16 + 14;
1764 if (ic->ic_flags & IEEE80211_F_PRIVACY)
1765 framelen += 24;
1766 bf = an->an_ff_buf[M_WME_GETAC(m)];
1767 if (bf != NULL)
1768 framelen += bf->bf_m->m_pkthdr.len;
1769 return ath_hal_computetxtime(sc->sc_ah, sc->sc_currates, framelen,
1770 sc->sc_lastdatarix, AH_FALSE);
1771}
1772
1773/*
1774 * Determine if a data frame may be aggregated via ff tunnelling.
1775 * Note the caller is responsible for checking if the destination
1776 * supports fast frames.
1777 *
1778 * NB: allowing EAPOL frames to be aggregated with other unicast traffic.
1779 * Do 802.1x EAPOL frames proceed in the clear? Then they couldn't
1780 * be aggregated with other types of frames when encryption is on?
1781 *
1782 * NB: assumes lock on an_ff_buf effectively held by txq lock mechanism.
1783 */
1784static __inline int
1785ath_ff_can_aggregate(struct ath_softc *sc,
1786 struct ath_node *an, struct mbuf *m, int *flushq)
1787{
1788 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
1789 struct ath_txq *txq;
1790 u_int32_t txoplimit;
1791 u_int pri;
1792
1793 *flushq = 0;
1794
1795 /*
1796 * If there is no frame to combine with and the txq has
1797 * fewer frames than the minimum required; then do not
1798 * attempt to aggregate this frame.
1799 */
1800 pri = M_WME_GETAC(m);
1801 txq = sc->sc_ac2q[pri];
1802 if (an->an_ff_buf[pri] == NULL && txq->axq_depth < sc->sc_fftxqmin)
1803 return 0;
1804 /*
1805 * When not in station mode never aggregate a multicast
1806 * frame; this insures, for example, that a combined frame
1807 * does not require multiple encryption keys when using
1808 * 802.1x/WPA.
1809 */
1810 if (ic->ic_opmode != IEEE80211_M_STA &&
1811 ETHER_IS_MULTICAST(mtod(m, struct ether_header *)->ether_dhost))
1812 return 0;
1813 /*
1814 * Consult the max bursting interval to insure a combined
1815 * frame fits within the TxOp window.
1816 */
1817 txoplimit = IEEE80211_TXOP_TO_US(
1818 ic->ic_wme.wme_chanParams.cap_wmeParams[pri].wmep_txopLimit);
1819 if (txoplimit != 0 && ath_ff_approx_txtime(sc, an, m) > txoplimit) {
1820 DPRINTF(sc, ATH_DEBUG_XMIT | ATH_DEBUG_FF,
1821 "%s: FF TxOp violation\n", __func__);
1822 if (an->an_ff_buf[pri] != NULL)
1823 *flushq = 1;
1824 return 0;
1825 }
1826 return 1; /* try to aggregate */
1827}
1828
1829/*
1830 * Check if the supplied frame can be partnered with an existing
1831 * or pending frame. Return a reference to any frame that should be
1832 * sent on return; otherwise return NULL.
1833 */
1834static struct mbuf *
1835ath_ff_check(struct ath_softc *sc, struct ath_txq *txq,
1836 struct ath_buf *bf, struct mbuf *m, struct ieee80211_node *ni)
1837{
1838 struct ath_node *an = ATH_NODE(ni);
1839 struct ath_buf *bfstaged;
1840 int ff_flush, pri;
1841
1842 /*
1843 * Check if the supplied frame can be aggregated.
1844 *
1845 * NB: we use the txq lock to protect references to
1846 * an->an_ff_txbuf in ath_ff_can_aggregate().
1847 */
1848 ATH_TXQ_LOCK(txq);
1849 pri = M_WME_GETAC(m);
1850 if (ath_ff_can_aggregate(sc, an, m, &ff_flush)) {
1851 struct ath_buf *bfstaged = an->an_ff_buf[pri];
1852 if (bfstaged != NULL) {
1853 /*
1854 * A frame is available for partnering; remove
1855 * it, chain it to this one, and encapsulate.
1856 */
1857 an->an_ff_buf[pri] = NULL;
1858 TAILQ_REMOVE(&txq->axq_stageq, bfstaged, bf_stagelist);
1859 ATH_TXQ_UNLOCK(txq);
1860
1861 /*
1862 * Chain mbufs and add FF magic.
1863 */
1864 DPRINTF(sc, ATH_DEBUG_FF,
1865 "[%s] aggregate fast-frame, age %u\n",
1866 ether_sprintf(ni->ni_macaddr), txq->axq_curage);
1867 m->m_nextpkt = NULL;
1868 bfstaged->bf_m->m_nextpkt = m;
1869 m = bfstaged->bf_m;
1870 bfstaged->bf_m = NULL;
1871 m->m_flags |= M_FF;
1872 /*
1873 * Release the node reference held while
1874 * the packet sat on an_ff_buf[]
1875 */
1876 bfstaged->bf_node = NULL;
1877 ieee80211_free_node(ni);
1878
1879 /*
1880 * Return bfstaged to the free list.
1881 */
1882 ATH_TXBUF_LOCK(sc);
1883 STAILQ_INSERT_HEAD(&sc->sc_txbuf, bfstaged, bf_list);
1884 ATH_TXBUF_UNLOCK(sc);
1885
1886 return m; /* ready to go */
1887 } else {
1888 /*
1889 * No frame available, queue this frame to wait
1890 * for a partner. Note that we hold the buffer
1891 * and a reference to the node; we need the
1892 * buffer in particular so we're certain we
1893 * can flush the frame at a later time.
1894 */
1895 DPRINTF(sc, ATH_DEBUG_FF,
1896 "[%s] stage fast-frame, age %u\n",
1897 ether_sprintf(ni->ni_macaddr), txq->axq_curage);
1898
1899 bf->bf_m = m;
1900 bf->bf_node = ni; /* NB: held reference */
1901 bf->bf_age = txq->axq_curage;
1902 an->an_ff_buf[pri] = bf;
1903 TAILQ_INSERT_HEAD(&txq->axq_stageq, bf, bf_stagelist);
1904 ATH_TXQ_UNLOCK(txq);
1905
1906 return NULL; /* consumed */
1907 }
1908 }
1909 /*
1910 * Frame could not be aggregated, it needs to be returned
1911 * to the caller for immediate transmission. In addition
1912 * we check if we should first flush a frame from the
1913 * staging queue before sending this one.
1914 *
1915 * NB: ath_ff_can_aggregate only marks ff_flush if a frame
1916 * is present to flush.
1917 */
1918 if (ff_flush) {
1919 int pktlen;
1920
1921 bfstaged = an->an_ff_buf[pri];
1922 an->an_ff_buf[pri] = NULL;
1923 TAILQ_REMOVE(&txq->axq_stageq, bfstaged, bf_stagelist);
1924 ATH_TXQ_UNLOCK(txq);
1925
1926 DPRINTF(sc, ATH_DEBUG_FF, "[%s] flush staged frame\n",
1927 ether_sprintf(an->an_node.ni_macaddr));
1928
1929 /* encap and xmit */
1930 bfstaged->bf_m = ieee80211_encap(ni, bfstaged->bf_m);
1931 if (bfstaged->bf_m == NULL) {
1932 DPRINTF(sc, ATH_DEBUG_XMIT | ATH_DEBUG_FF,
1933 "%s: discard, encap failure\n", __func__);
1934 sc->sc_stats.ast_tx_encap++;
1935 goto ff_flushbad;
1936 }
1937 pktlen = bfstaged->bf_m->m_pkthdr.len;
1938 if (ath_tx_start(sc, ni, bfstaged, bfstaged->bf_m)) {
1939 DPRINTF(sc, ATH_DEBUG_XMIT,
1940 "%s: discard, xmit failure\n", __func__);
1941 ff_flushbad:
1942 /*
1943 * Unable to transmit frame that was on the staging
1944 * queue. Reclaim the node reference and other
1945 * resources.
1946 */
1947 if (ni != NULL)
1948 ieee80211_free_node(ni);
1949 bfstaged->bf_node = NULL;
1950 if (bfstaged->bf_m != NULL) {
1951 m_freem(bfstaged->bf_m);
1952 bfstaged->bf_m = NULL;
1953 }
1954
1955 ATH_TXBUF_LOCK(sc);
1956 STAILQ_INSERT_HEAD(&sc->sc_txbuf, bfstaged, bf_list);
1957 ATH_TXBUF_UNLOCK(sc);
1958 } else {
1959#if 0
1960 ifp->if_opackets++;
1961#endif
1962 }
1963 } else {
1964 if (an->an_ff_buf[pri] != NULL) {
1965 /*
1966 * XXX: out-of-order condition only occurs for AP
1967 * mode and multicast. There may be no valid way
1968 * to get this condition.
1969 */
1970 DPRINTF(sc, ATH_DEBUG_FF, "[%s] out-of-order frame\n",
1971 ether_sprintf(an->an_node.ni_macaddr));
1972 /* XXX stat */
1973 }
1974 ATH_TXQ_UNLOCK(txq);
1975 }
1976 return m;
1977}
1978
1979static struct ath_buf *
1980_ath_getbuf_locked(struct ath_softc *sc)
1981{
1982 struct ath_buf *bf;
1983
1984 ATH_TXBUF_LOCK_ASSERT(sc);
1985
1986 bf = STAILQ_FIRST(&sc->sc_txbuf);
1987 if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0)
1988 STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list);
1989 else
1990 bf = NULL;
1991 if (bf == NULL) {
1992 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %s\n", __func__,
1993 STAILQ_FIRST(&sc->sc_txbuf) == NULL ?
1994 "out of xmit buffers" : "xmit buffer busy");
1995 sc->sc_stats.ast_tx_nobuf++;
1996 }
1997 return bf;
1998}
1999
2000static struct ath_buf *
2001ath_getbuf(struct ath_softc *sc)
2002{
2003 struct ath_buf *bf;
2004
2005 ATH_TXBUF_LOCK(sc);
2006 bf = _ath_getbuf_locked(sc);
2007 if (bf == NULL) {
2008 struct ifnet *ifp = sc->sc_ifp;
2009
2010 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: stop queue\n", __func__);
2011 sc->sc_stats.ast_tx_qstop++;
2012 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2013 }
2014 ATH_TXBUF_UNLOCK(sc);
2015 return bf;
2016}
2017
2018/*
2019 * Cleanup driver resources when we run out of buffers
2020 * while processing fragments; return the tx buffers
2021 * allocated and drop node references.
2022 */
2023static void
2024ath_txfrag_cleanup(struct ath_softc *sc,
2025 ath_bufhead *frags, struct ieee80211_node *ni)
2026{
2027 struct ath_buf *bf, *next;
2028
2029 ATH_TXBUF_LOCK_ASSERT(sc);
2030
2031 STAILQ_FOREACH_SAFE(bf, frags, bf_list, next) {
2032 /* NB: bf assumed clean */
2033 STAILQ_REMOVE_HEAD(frags, bf_list);
2034 STAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
2035 ieee80211_node_decref(ni);
2036 }
2037}
2038
2039/*
2040 * Setup xmit of a fragmented frame. Allocate a buffer
2041 * for each frag and bump the node reference count to
2042 * reflect the held reference to be setup by ath_tx_start.
2043 */
2044static int
2045ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags,
2046 struct mbuf *m0, struct ieee80211_node *ni)
2047{
2048 struct mbuf *m;
2049 struct ath_buf *bf;
2050
2051 ATH_TXBUF_LOCK(sc);
2052 for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) {
2053 bf = _ath_getbuf_locked(sc);
2054 if (bf == NULL) { /* out of buffers, cleanup */
2055 ath_txfrag_cleanup(sc, frags, ni);
2056 break;
2057 }
2058 ieee80211_node_incref(ni);
2059 STAILQ_INSERT_TAIL(frags, bf, bf_list);
2060 }
2061 ATH_TXBUF_UNLOCK(sc);
2062
2063 return !STAILQ_EMPTY(frags);
2064}
2065
2066static void
2067ath_start(struct ifnet *ifp)
2068{
2069 struct ath_softc *sc = ifp->if_softc;
2070 struct ieee80211_node *ni;
2071 struct ath_buf *bf;
2072 struct mbuf *m, *next;
2073 struct ath_txq *txq;
2074 ath_bufhead frags;
2075 int pri;
2076
2077 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid)
2078 return;
2079 for (;;) {
2080 /*
2081 * Grab a TX buffer and associated resources.
2082 */
2083 bf = ath_getbuf(sc);
2084 if (bf == NULL)
2085 break;
2086
2087 IFQ_DEQUEUE(&ifp->if_snd, m);
2088 if (m == NULL) {
2089 ATH_TXBUF_LOCK(sc);
2090 STAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
2091 ATH_TXBUF_UNLOCK(sc);
2092 break;
2093 }
2094 STAILQ_INIT(&frags);
2095 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
2096 pri = M_WME_GETAC(m);
2097 txq = sc->sc_ac2q[pri];
2098 if (IEEE80211_ATH_CAP(ni->ni_vap, ni, IEEE80211_NODE_FF)) {
2099 /*
2100 * Check queue length; if too deep drop this
2101 * frame (tail drop considered good).
2102 */
2103 if (txq->axq_depth >= sc->sc_fftxqmax) {
2104 DPRINTF(sc, ATH_DEBUG_FF,
2105 "[%s] tail drop on q %u depth %u\n",
2106 ether_sprintf(ni->ni_macaddr),
2107 txq->axq_qnum, txq->axq_depth);
2108 sc->sc_stats.ast_tx_qfull++;
2109 m_freem(m);
2110 goto reclaim;
2111 }
2112 m = ath_ff_check(sc, txq, bf, m, ni);
2113 if (m == NULL) {
2114 /* NB: ni ref & bf held on stageq */
2115 continue;
2116 }
2117 }
2118 ifp->if_opackets++;
2119 /*
2120 * Encapsulate the packet in prep for transmission.
2121 */
2122 m = ieee80211_encap(ni, m);
2123 if (m == NULL) {
2124 DPRINTF(sc, ATH_DEBUG_XMIT,
2125 "%s: encapsulation failure\n", __func__);
2126 sc->sc_stats.ast_tx_encap++;
2127 goto bad;
2128 }
2129 /*
2130 * Check for fragmentation. If this frame
2131 * has been broken up verify we have enough
2132 * buffers to send all the fragments so all
2133 * go out or none...
2134 */
2135 if ((m->m_flags & M_FRAG) &&
2136 !ath_txfrag_setup(sc, &frags, m, ni)) {
2137 DPRINTF(sc, ATH_DEBUG_XMIT,
2138 "%s: out of txfrag buffers\n", __func__);
2139 sc->sc_stats.ast_tx_nofrag++;
2140 ath_freetx(m);
2141 goto bad;
2142 }
2143 nextfrag:
2144 /*
2145 * Pass the frame to the h/w for transmission.
2146 * Fragmented frames have each frag chained together
2147 * with m_nextpkt. We know there are sufficient ath_buf's
2148 * to send all the frags because of work done by
2149 * ath_txfrag_setup. We leave m_nextpkt set while
2150 * calling ath_tx_start so it can use it to extend the
2151 * the tx duration to cover the subsequent frag and
2152 * so it can reclaim all the mbufs in case of an error;
2153 * ath_tx_start clears m_nextpkt once it commits to
2154 * handing the frame to the hardware.
2155 */
2156 next = m->m_nextpkt;
2157 if (ath_tx_start(sc, ni, bf, m)) {
2158 bad:
2159 ifp->if_oerrors++;
2160 reclaim:
2161 bf->bf_m = NULL;
2162 bf->bf_node = NULL;
2163 ATH_TXBUF_LOCK(sc);
2164 STAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
2165 ath_txfrag_cleanup(sc, &frags, ni);
2166 ATH_TXBUF_UNLOCK(sc);
2167 if (ni != NULL)
2168 ieee80211_free_node(ni);
2169 continue;
2170 }
2171 if (next != NULL) {
2172 /*
2173 * Beware of state changing between frags.
2174 * XXX check sta power-save state?
2175 */
2176 if (ni->ni_vap->iv_state != IEEE80211_S_RUN) {
2177 DPRINTF(sc, ATH_DEBUG_XMIT,
2178 "%s: flush fragmented packet, state %s\n",
2179 __func__,
2180 ieee80211_state_name[ni->ni_vap->iv_state]);
2181 ath_freetx(next);
2182 goto reclaim;
2183 }
2184 m = next;
2185 bf = STAILQ_FIRST(&frags);
2186 KASSERT(bf != NULL, ("no buf for txfrag"));
2187 STAILQ_REMOVE_HEAD(&frags, bf_list);
2188 goto nextfrag;
2189 }
2190
2191 sc->sc_wd_timer = 5;
2192#if 0
2193 /*
2194 * Flush stale frames from the fast-frame staging queue.
2195 */
2196 if (ic->ic_opmode != IEEE80211_M_STA)
2197 ath_ff_stageq_flush(sc, txq, ath_ff_ageflushtestdone);
2198#endif
2199 }
2200}
2201
2202static int
2203ath_media_change(struct ifnet *ifp)
2204{
2205 int error = ieee80211_media_change(ifp);
2206 /* NB: only the fixed rate can change and that doesn't need a reset */
2207 return (error == ENETRESET ? 0 : error);
2208}
2209
2210#ifdef ATH_DEBUG
2211static void
2212ath_keyprint(struct ath_softc *sc, const char *tag, u_int ix,
2213 const HAL_KEYVAL *hk, const u_int8_t mac[IEEE80211_ADDR_LEN])
2214{
2215 static const char *ciphers[] = {
2216 "WEP",
2217 "AES-OCB",
2218 "AES-CCM",
2219 "CKIP",
2220 "TKIP",
2221 "CLR",
2222 };
2223 int i, n;
2224
2225 printf("%s: [%02u] %-7s ", tag, ix, ciphers[hk->kv_type]);
2226 for (i = 0, n = hk->kv_len; i < n; i++)
2227 printf("%02x", hk->kv_val[i]);
2228 printf(" mac %s", ether_sprintf(mac));
2229 if (hk->kv_type == HAL_CIPHER_TKIP) {
2230 printf(" %s ", sc->sc_splitmic ? "mic" : "rxmic");
2231 for (i = 0; i < sizeof(hk->kv_mic); i++)
2232 printf("%02x", hk->kv_mic[i]);
2233 if (!sc->sc_splitmic) {
2234 printf(" txmic ");
2235 for (i = 0; i < sizeof(hk->kv_txmic); i++)
2236 printf("%02x", hk->kv_txmic[i]);
2237 }
2238 }
2239 printf("\n");
2240}
2241#endif
2242
2243/*
2244 * Set a TKIP key into the hardware. This handles the
2245 * potential distribution of key state to multiple key
2246 * cache slots for TKIP.
2247 */
2248static int
2249ath_keyset_tkip(struct ath_softc *sc, const struct ieee80211_key *k,
2250 HAL_KEYVAL *hk, const u_int8_t mac[IEEE80211_ADDR_LEN])
2251{
2252#define IEEE80211_KEY_XR (IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV)
2253 static const u_int8_t zerobssid[IEEE80211_ADDR_LEN];
2254 struct ath_hal *ah = sc->sc_ah;
2255
2256 KASSERT(k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP,
2257 ("got a non-TKIP key, cipher %u", k->wk_cipher->ic_cipher));
2258 if ((k->wk_flags & IEEE80211_KEY_XR) == IEEE80211_KEY_XR) {
2259 if (sc->sc_splitmic) {
2260 /*
2261 * TX key goes at first index, RX key at the rx index.
2262 * The hal handles the MIC keys at index+64.
2263 */
2264 memcpy(hk->kv_mic, k->wk_txmic, sizeof(hk->kv_mic));
2265 KEYPRINTF(sc, k->wk_keyix, hk, zerobssid);
2266 if (!ath_hal_keyset(ah, k->wk_keyix, hk, zerobssid))
2267 return 0;
2268
2269 memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic));
2270 KEYPRINTF(sc, k->wk_keyix+32, hk, mac);
2271 /* XXX delete tx key on failure? */
2272 return ath_hal_keyset(ah, k->wk_keyix+32, hk, mac);
2273 } else {
2274 /*
2275 * Room for both TX+RX MIC keys in one key cache
2276 * slot, just set key at the first index; the hal
2277 * will handle the rest.
2278 */
2279 memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic));
2280 memcpy(hk->kv_txmic, k->wk_txmic, sizeof(hk->kv_txmic));
2281 KEYPRINTF(sc, k->wk_keyix, hk, mac);
2282 return ath_hal_keyset(ah, k->wk_keyix, hk, mac);
2283 }
2284 } else if (k->wk_flags & IEEE80211_KEY_XMIT) {
2285 if (sc->sc_splitmic) {
2286 /*
2287 * NB: must pass MIC key in expected location when
2288 * the keycache only holds one MIC key per entry.
2289 */
2290 memcpy(hk->kv_mic, k->wk_txmic, sizeof(hk->kv_txmic));
2291 } else
2292 memcpy(hk->kv_txmic, k->wk_txmic, sizeof(hk->kv_txmic));
2293 KEYPRINTF(sc, k->wk_keyix, hk, mac);
2294 return ath_hal_keyset(ah, k->wk_keyix, hk, mac);
2295 } else if (k->wk_flags & IEEE80211_KEY_RECV) {
2296 memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic));
2297 KEYPRINTF(sc, k->wk_keyix, hk, mac);
2298 return ath_hal_keyset(ah, k->wk_keyix, hk, mac);
2299 }
2300 return 0;
2301#undef IEEE80211_KEY_XR
2302}
2303
2304/*
2305 * Set a net80211 key into the hardware. This handles the
2306 * potential distribution of key state to multiple key
2307 * cache slots for TKIP with hardware MIC support.
2308 */
2309static int
2310ath_keyset(struct ath_softc *sc, const struct ieee80211_key *k,
2311 struct ieee80211_node *bss)
2312{
2313#define N(a) (sizeof(a)/sizeof(a[0]))
2314 static const u_int8_t ciphermap[] = {
2315 HAL_CIPHER_WEP, /* IEEE80211_CIPHER_WEP */
2316 HAL_CIPHER_TKIP, /* IEEE80211_CIPHER_TKIP */
2317 HAL_CIPHER_AES_OCB, /* IEEE80211_CIPHER_AES_OCB */
2318 HAL_CIPHER_AES_CCM, /* IEEE80211_CIPHER_AES_CCM */
2319 (u_int8_t) -1, /* 4 is not allocated */
2320 HAL_CIPHER_CKIP, /* IEEE80211_CIPHER_CKIP */
2321 HAL_CIPHER_CLR, /* IEEE80211_CIPHER_NONE */
2322 };
2323 struct ath_hal *ah = sc->sc_ah;
2324 const struct ieee80211_cipher *cip = k->wk_cipher;
2325 u_int8_t gmac[IEEE80211_ADDR_LEN];
2326 const u_int8_t *mac;
2327 HAL_KEYVAL hk;
2328
2329 memset(&hk, 0, sizeof(hk));
2330 /*
2331 * Software crypto uses a "clear key" so non-crypto
2332 * state kept in the key cache are maintained and
2333 * so that rx frames have an entry to match.
2334 */
2335 if ((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0) {
2336 KASSERT(cip->ic_cipher < N(ciphermap),
2337 ("invalid cipher type %u", cip->ic_cipher));
2338 hk.kv_type = ciphermap[cip->ic_cipher];
2339 hk.kv_len = k->wk_keylen;
2340 memcpy(hk.kv_val, k->wk_key, k->wk_keylen);
2341 } else
2342 hk.kv_type = HAL_CIPHER_CLR;
2343
2344 if ((k->wk_flags & IEEE80211_KEY_GROUP) && sc->sc_mcastkey) {
2345 /*
2346 * Group keys on hardware that supports multicast frame
2347 * key search use a mac that is the sender's address with
2348 * the high bit set instead of the app-specified address.
2349 */
2350 IEEE80211_ADDR_COPY(gmac, bss->ni_macaddr);
2351 gmac[0] |= 0x80;
2352 mac = gmac;
2353 } else
2354 mac = k->wk_macaddr;
2355
2356 if (hk.kv_type == HAL_CIPHER_TKIP &&
2357 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
2358 return ath_keyset_tkip(sc, k, &hk, mac);
2359 } else {
2360 KEYPRINTF(sc, k->wk_keyix, &hk, mac);
2361 return ath_hal_keyset(ah, k->wk_keyix, &hk, mac);
2362 }
2363#undef N
2364}
2365
2366/*
2367 * Allocate tx/rx key slots for TKIP. We allocate two slots for
2368 * each key, one for decrypt/encrypt and the other for the MIC.
2369 */
2370static u_int16_t
2371key_alloc_2pair(struct ath_softc *sc,
2372 ieee80211_keyix *txkeyix, ieee80211_keyix *rxkeyix)
2373{
2374#define N(a) (sizeof(a)/sizeof(a[0]))
2375 u_int i, keyix;
2376
2377 KASSERT(sc->sc_splitmic, ("key cache !split"));
2378 /* XXX could optimize */
2379 for (i = 0; i < N(sc->sc_keymap)/4; i++) {
2380 u_int8_t b = sc->sc_keymap[i];
2381 if (b != 0xff) {
2382 /*
2383 * One or more slots in this byte are free.
2384 */
2385 keyix = i*NBBY;
2386 while (b & 1) {
2387 again:
2388 keyix++;
2389 b >>= 1;
2390 }
2391 /* XXX IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV */
2392 if (isset(sc->sc_keymap, keyix+32) ||
2393 isset(sc->sc_keymap, keyix+64) ||
2394 isset(sc->sc_keymap, keyix+32+64)) {
2395 /* full pair unavailable */
2396 /* XXX statistic */
2397 if (keyix == (i+1)*NBBY) {
2398 /* no slots were appropriate, advance */
2399 continue;
2400 }
2401 goto again;
2402 }
2403 setbit(sc->sc_keymap, keyix);
2404 setbit(sc->sc_keymap, keyix+64);
2405 setbit(sc->sc_keymap, keyix+32);
2406 setbit(sc->sc_keymap, keyix+32+64);
2407 DPRINTF(sc, ATH_DEBUG_KEYCACHE,
2408 "%s: key pair %u,%u %u,%u\n",
2409 __func__, keyix, keyix+64,
2410 keyix+32, keyix+32+64);
2411 *txkeyix = keyix;
2412 *rxkeyix = keyix+32;
2413 return 1;
2414 }
2415 }
2416 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of pair space\n", __func__);
2417 return 0;
2418#undef N
2419}
2420
2421/*
2422 * Allocate tx/rx key slots for TKIP. We allocate two slots for
2423 * each key, one for decrypt/encrypt and the other for the MIC.
2424 */
2425static u_int16_t
2426key_alloc_pair(struct ath_softc *sc,
2427 ieee80211_keyix *txkeyix, ieee80211_keyix *rxkeyix)
2428{
2429#define N(a) (sizeof(a)/sizeof(a[0]))
2430 u_int i, keyix;
2431
2432 KASSERT(!sc->sc_splitmic, ("key cache split"));
2433 /* XXX could optimize */
2434 for (i = 0; i < N(sc->sc_keymap)/4; i++) {
2435 u_int8_t b = sc->sc_keymap[i];
2436 if (b != 0xff) {
2437 /*
2438 * One or more slots in this byte are free.
2439 */
2440 keyix = i*NBBY;
2441 while (b & 1) {
2442 again:
2443 keyix++;
2444 b >>= 1;
2445 }
2446 if (isset(sc->sc_keymap, keyix+64)) {
2447 /* full pair unavailable */
2448 /* XXX statistic */
2449 if (keyix == (i+1)*NBBY) {
2450 /* no slots were appropriate, advance */
2451 continue;
2452 }
2453 goto again;
2454 }
2455 setbit(sc->sc_keymap, keyix);
2456 setbit(sc->sc_keymap, keyix+64);
2457 DPRINTF(sc, ATH_DEBUG_KEYCACHE,
2458 "%s: key pair %u,%u\n",
2459 __func__, keyix, keyix+64);
2460 *txkeyix = *rxkeyix = keyix;
2461 return 1;
2462 }
2463 }
2464 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of pair space\n", __func__);
2465 return 0;
2466#undef N
2467}
2468
2469/*
2470 * Allocate a single key cache slot.
2471 */
2472static int
2473key_alloc_single(struct ath_softc *sc,
2474 ieee80211_keyix *txkeyix, ieee80211_keyix *rxkeyix)
2475{
2476#define N(a) (sizeof(a)/sizeof(a[0]))
2477 u_int i, keyix;
2478
2479 /* XXX try i,i+32,i+64,i+32+64 to minimize key pair conflicts */
2480 for (i = 0; i < N(sc->sc_keymap); i++) {
2481 u_int8_t b = sc->sc_keymap[i];
2482 if (b != 0xff) {
2483 /*
2484 * One or more slots are free.
2485 */
2486 keyix = i*NBBY;
2487 while (b & 1)
2488 keyix++, b >>= 1;
2489 setbit(sc->sc_keymap, keyix);
2490 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: key %u\n",
2491 __func__, keyix);
2492 *txkeyix = *rxkeyix = keyix;
2493 return 1;
2494 }
2495 }
2496 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of space\n", __func__);
2497 return 0;
2498#undef N
2499}
2500
2501/*
2502 * Allocate one or more key cache slots for a uniacst key. The
2503 * key itself is needed only to identify the cipher. For hardware
2504 * TKIP with split cipher+MIC keys we allocate two key cache slot
2505 * pairs so that we can setup separate TX and RX MIC keys. Note
2506 * that the MIC key for a TKIP key at slot i is assumed by the
2507 * hardware to be at slot i+64. This limits TKIP keys to the first
2508 * 64 entries.
2509 */
2510static int
2511ath_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
2512 ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
2513{
2514 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
2515
2516 /*
2517 * Group key allocation must be handled specially for
2518 * parts that do not support multicast key cache search
2519 * functionality. For those parts the key id must match
2520 * the h/w key index so lookups find the right key. On
2521 * parts w/ the key search facility we install the sender's
2522 * mac address (with the high bit set) and let the hardware
2523 * find the key w/o using the key id. This is preferred as
2524 * it permits us to support multiple users for adhoc and/or
2525 * multi-station operation.
2526 */
2527 if (k->wk_keyix != IEEE80211_KEYIX_NONE || /* global key */
2528 ((k->wk_flags & IEEE80211_KEY_GROUP) && !sc->sc_mcastkey)) {
2529 if (!(&vap->iv_nw_keys[0] <= k &&
2530 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) {
2531 /* should not happen */
2532 DPRINTF(sc, ATH_DEBUG_KEYCACHE,
2533 "%s: bogus group key\n", __func__);
2534 return 0;
2535 }
2536 /*
2537 * XXX we pre-allocate the global keys so
2538 * have no way to check if they've already been allocated.
2539 */
2540 *keyix = *rxkeyix = k - vap->iv_nw_keys;
2541 return 1;
2542 }
2543
2544 /*
2545 * We allocate two pair for TKIP when using the h/w to do
2546 * the MIC. For everything else, including software crypto,
2547 * we allocate a single entry. Note that s/w crypto requires
2548 * a pass-through slot on the 5211 and 5212. The 5210 does
2549 * not support pass-through cache entries and we map all
2550 * those requests to slot 0.
2551 */
2552 if (k->wk_flags & IEEE80211_KEY_SWCRYPT) {
2553 return key_alloc_single(sc, keyix, rxkeyix);
2554 } else if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP &&
2555 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
2556 if (sc->sc_splitmic)
2557 return key_alloc_2pair(sc, keyix, rxkeyix);
2558 else
2559 return key_alloc_pair(sc, keyix, rxkeyix);
2560 } else {
2561 return key_alloc_single(sc, keyix, rxkeyix);
2562 }
2563}
2564
2565/*
2566 * Delete an entry in the key cache allocated by ath_key_alloc.
2567 */
2568static int
2569ath_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
2570{
2571 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
2572 struct ath_hal *ah = sc->sc_ah;
2573 const struct ieee80211_cipher *cip = k->wk_cipher;
2574 u_int keyix = k->wk_keyix;
2575
2576 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: delete key %u\n", __func__, keyix);
2577
2578 ath_hal_keyreset(ah, keyix);
2579 /*
2580 * Handle split tx/rx keying required for TKIP with h/w MIC.
2581 */
2582 if (cip->ic_cipher == IEEE80211_CIPHER_TKIP &&
2583 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && sc->sc_splitmic)
2584 ath_hal_keyreset(ah, keyix+32); /* RX key */
2585 if (keyix >= IEEE80211_WEP_NKID) {
2586 /*
2587 * Don't touch keymap entries for global keys so
2588 * they are never considered for dynamic allocation.
2589 */
2590 clrbit(sc->sc_keymap, keyix);
2591 if (cip->ic_cipher == IEEE80211_CIPHER_TKIP &&
2592 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
2593 clrbit(sc->sc_keymap, keyix+64); /* TX key MIC */
2594 if (sc->sc_splitmic) {
2595 /* +32 for RX key, +32+64 for RX key MIC */
2596 clrbit(sc->sc_keymap, keyix+32);
2597 clrbit(sc->sc_keymap, keyix+32+64);
2598 }
2599 }
2600 }
2601 return 1;
2602}
2603
2604/*
2605 * Set the key cache contents for the specified key. Key cache
2606 * slot(s) must already have been allocated by ath_key_alloc.
2607 */
2608static int
2609ath_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k,
2610 const u_int8_t mac[IEEE80211_ADDR_LEN])
2611{
2612 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
2613
2614 return ath_keyset(sc, k, vap->iv_bss);
2615}
2616
2617/*
2618 * Block/unblock tx+rx processing while a key change is done.
2619 * We assume the caller serializes key management operations
2620 * so we only need to worry about synchronization with other
2621 * uses that originate in the driver.
2622 */
2623static void
2624ath_key_update_begin(struct ieee80211vap *vap)
2625{
2626 struct ifnet *ifp = vap->iv_ic->ic_ifp;
2627 struct ath_softc *sc = ifp->if_softc;
2628
2629 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
2630 taskqueue_block(sc->sc_tq);
2631 IF_LOCK(&ifp->if_snd); /* NB: doesn't block mgmt frames */
2632}
2633
2634static void
2635ath_key_update_end(struct ieee80211vap *vap)
2636{
2637 struct ifnet *ifp = vap->iv_ic->ic_ifp;
2638 struct ath_softc *sc = ifp->if_softc;
2639
2640 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
2641 IF_UNLOCK(&ifp->if_snd);
2642 taskqueue_unblock(sc->sc_tq);
2643}
2644
2645/*
2646 * Calculate the receive filter according to the
2647 * operating mode and state:
2648 *
2649 * o always accept unicast, broadcast, and multicast traffic
2650 * o accept PHY error frames when hardware doesn't have MIB support
2651 * to count and we need them for ANI (sta mode only until recently)
2652 * and we are not scanning (ANI is disabled)
2653 * NB: older hal's add rx filter bits out of sight and we need to
2654 * blindly preserve them
2655 * o probe request frames are accepted only when operating in
2656 * hostap, adhoc, or monitor modes
2657 * o enable promiscuous mode
2658 * - when in monitor mode
2659 * - if interface marked PROMISC (assumes bridge setting is filtered)
2660 * o accept beacons:
2661 * - when operating in station mode for collecting rssi data when
2662 * the station is otherwise quiet, or
2663 * - when operating in adhoc mode so the 802.11 layer creates
2664 * node table entries for peers,
2665 * - when scanning
2666 * - when doing s/w beacon miss (e.g. for ap+sta)
2667 * - when operating in ap mode in 11g to detect overlapping bss that
2668 * require protection
2669 * o accept control frames:
2670 * - when in monitor mode
2671 * XXX BAR frames for 11n
2672 * XXX HT protection for 11n
2673 */
2674static u_int32_t
2675ath_calcrxfilter(struct ath_softc *sc)
2676{
2677 struct ifnet *ifp = sc->sc_ifp;
2678 struct ieee80211com *ic = ifp->if_l2com;
2679 u_int32_t rfilt;
2680
2681 rfilt = HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST | HAL_RX_FILTER_MCAST;
2682 if (!sc->sc_needmib && !sc->sc_scanning)
2683 rfilt |= HAL_RX_FILTER_PHYERR;
2684 if (ic->ic_opmode != IEEE80211_M_STA)
2685 rfilt |= HAL_RX_FILTER_PROBEREQ;
2686 if (ic->ic_opmode == IEEE80211_M_MONITOR || (ifp->if_flags & IFF_PROMISC))
2687 rfilt |= HAL_RX_FILTER_PROM;
2688 if (ic->ic_opmode == IEEE80211_M_STA ||
2689 ic->ic_opmode == IEEE80211_M_IBSS ||
2690 sc->sc_swbmiss || sc->sc_scanning)
2691 rfilt |= HAL_RX_FILTER_BEACON;
2692 /*
2693 * NB: We don't recalculate the rx filter when
2694 * ic_protmode changes; otherwise we could do
2695 * this only when ic_protmode != NONE.
2696 */
2697 if (ic->ic_opmode == IEEE80211_M_HOSTAP &&
2698 IEEE80211_IS_CHAN_ANYG(ic->ic_curchan))
2699 rfilt |= HAL_RX_FILTER_BEACON;
2700 if (ic->ic_opmode == IEEE80211_M_MONITOR)
2701 rfilt |= HAL_RX_FILTER_CONTROL;
2702 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x, %s if_flags 0x%x\n",
2703 __func__, rfilt, ieee80211_opmode_name[ic->ic_opmode], ifp->if_flags);
2704 return rfilt;
2705}
2706
2707static void
2708ath_update_promisc(struct ifnet *ifp)
2709{
2710 struct ath_softc *sc = ifp->if_softc;
2711 u_int32_t rfilt;
2712
2713 /* configure rx filter */
2714 rfilt = ath_calcrxfilter(sc);
2715 ath_hal_setrxfilter(sc->sc_ah, rfilt);
2716
2717 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt);
2718}
2719
2720static void
2721ath_update_mcast(struct ifnet *ifp)
2722{
2723 struct ath_softc *sc = ifp->if_softc;
2724 u_int32_t mfilt[2];
2725
2726 /* calculate and install multicast filter */
2727 if ((ifp->if_flags & IFF_ALLMULTI) == 0) {
2728 struct ifmultiaddr *ifma;
2729 /*
2730 * Merge multicast addresses to form the hardware filter.
2731 */
2732 mfilt[0] = mfilt[1] = 0;
2733 IF_ADDR_LOCK(ifp); /* XXX need some fiddling to remove? */
2734 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2735 caddr_t dl;
2736 u_int32_t val;
2737 u_int8_t pos;
2738
2739 /* calculate XOR of eight 6bit values */
2740 dl = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
2741 val = LE_READ_4(dl + 0);
2742 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2743 val = LE_READ_4(dl + 3);
2744 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2745 pos &= 0x3f;
2746 mfilt[pos / 32] |= (1 << (pos % 32));
2747 }
2748 IF_ADDR_UNLOCK(ifp);
2749 } else
2750 mfilt[0] = mfilt[1] = ~0;
2751 ath_hal_setmcastfilter(sc->sc_ah, mfilt[0], mfilt[1]);
2752 DPRINTF(sc, ATH_DEBUG_MODE, "%s: MC filter %08x:%08x\n",
2753 __func__, mfilt[0], mfilt[1]);
2754}
2755
2756static void
2757ath_mode_init(struct ath_softc *sc)
2758{
2759 struct ifnet *ifp = sc->sc_ifp;
2760 struct ath_hal *ah = sc->sc_ah;
2761 u_int32_t rfilt;
2762
2763 /* configure rx filter */
2764 rfilt = ath_calcrxfilter(sc);
2765 ath_hal_setrxfilter(ah, rfilt);
2766
2767 /* configure operational mode */
2768 ath_hal_setopmode(ah);
2769
2770 /* handle any link-level address change */
2771 ath_hal_setmac(ah, IF_LLADDR(ifp));
2772
2773 /* calculate and install multicast filter */
2774 ath_update_mcast(ifp);
2775}
2776
2777/*
2778 * Set the slot time based on the current setting.
2779 */
2780static void
2781ath_setslottime(struct ath_softc *sc)
2782{
2783 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
2784 struct ath_hal *ah = sc->sc_ah;
2785 u_int usec;
2786
2787 if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan))
2788 usec = 13;
2789 else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan))
2790 usec = 21;
2791 else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
2792 /* honor short/long slot time only in 11g */
2793 /* XXX shouldn't honor on pure g or turbo g channel */
2794 if (ic->ic_flags & IEEE80211_F_SHSLOT)
2795 usec = HAL_SLOT_TIME_9;
2796 else
2797 usec = HAL_SLOT_TIME_20;
2798 } else
2799 usec = HAL_SLOT_TIME_9;
2800
2801 DPRINTF(sc, ATH_DEBUG_RESET,
2802 "%s: chan %u MHz flags 0x%x %s slot, %u usec\n",
2803 __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
2804 ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec);
2805
2806 ath_hal_setslottime(ah, usec);
2807 sc->sc_updateslot = OK;
2808}
2809
2810/*
2811 * Callback from the 802.11 layer to update the
2812 * slot time based on the current setting.
2813 */
2814static void
2815ath_updateslot(struct ifnet *ifp)
2816{
2817 struct ath_softc *sc = ifp->if_softc;
2818 struct ieee80211com *ic = ifp->if_l2com;
2819
2820 /*
2821 * When not coordinating the BSS, change the hardware
2822 * immediately. For other operation we defer the change
2823 * until beacon updates have propagated to the stations.
2824 */
2825 if (ic->ic_opmode == IEEE80211_M_HOSTAP)
2826 sc->sc_updateslot = UPDATE;
2827 else
2828 ath_setslottime(sc);
2829}
2830
2831/*
2832 * Setup a h/w transmit queue for beacons.
2833 */
2834static int
2835ath_beaconq_setup(struct ath_hal *ah)
2836{
2837 HAL_TXQ_INFO qi;
2838
2839 memset(&qi, 0, sizeof(qi));
2840 qi.tqi_aifs = HAL_TXQ_USEDEFAULT;
2841 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT;
2842 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT;
2843 /* NB: for dynamic turbo, don't enable any other interrupts */
2844 qi.tqi_qflags = HAL_TXQ_TXDESCINT_ENABLE;
2845 return ath_hal_setuptxqueue(ah, HAL_TX_QUEUE_BEACON, &qi);
2846}
2847
2848/*
2849 * Setup the transmit queue parameters for the beacon queue.
2850 */
2851static int
2852ath_beaconq_config(struct ath_softc *sc)
2853{
2854#define ATH_EXPONENT_TO_VALUE(v) ((1<<(v))-1)
2855 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
2856 struct ath_hal *ah = sc->sc_ah;
2857 HAL_TXQ_INFO qi;
2858
2859 ath_hal_gettxqueueprops(ah, sc->sc_bhalq, &qi);
2860 if (ic->ic_opmode == IEEE80211_M_HOSTAP) {
2861 /*
2862 * Always burst out beacon and CAB traffic.
2863 */
2864 qi.tqi_aifs = ATH_BEACON_AIFS_DEFAULT;
2865 qi.tqi_cwmin = ATH_BEACON_CWMIN_DEFAULT;
2866 qi.tqi_cwmax = ATH_BEACON_CWMAX_DEFAULT;
2867 } else {
2868 struct wmeParams *wmep =
2869 &ic->ic_wme.wme_chanParams.cap_wmeParams[WME_AC_BE];
2870 /*
2871 * Adhoc mode; important thing is to use 2x cwmin.
2872 */
2873 qi.tqi_aifs = wmep->wmep_aifsn;
2874 qi.tqi_cwmin = 2*ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
2875 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
2876 }
2877
2878 if (!ath_hal_settxqueueprops(ah, sc->sc_bhalq, &qi)) {
2879 device_printf(sc->sc_dev, "unable to update parameters for "
2880 "beacon hardware queue!\n");
2881 return 0;
2882 } else {
2883 ath_hal_resettxqueue(ah, sc->sc_bhalq); /* push to h/w */
2884 return 1;
2885 }
2886#undef ATH_EXPONENT_TO_VALUE
2887}
2888
2889/*
2890 * Allocate and setup an initial beacon frame.
2891 */
2892static int
2893ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_node *ni)
2894{
2895 struct ieee80211vap *vap = ni->ni_vap;
2896 struct ath_vap *avp = ATH_VAP(vap);
2897 struct ath_buf *bf;
2898 struct mbuf *m;
2899 int error;
2900
2901 bf = avp->av_bcbuf;
2902 if (bf->bf_m != NULL) {
2903 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
2904 m_freem(bf->bf_m);
2905 bf->bf_m = NULL;
2906 }
2907 if (bf->bf_node != NULL) {
2908 ieee80211_free_node(bf->bf_node);
2909 bf->bf_node = NULL;
2910 }
2911
2912 /*
2913 * NB: the beacon data buffer must be 32-bit aligned;
2914 * we assume the mbuf routines will return us something
2915 * with this alignment (perhaps should assert).
2916 */
2917 m = ieee80211_beacon_alloc(ni, &avp->av_boff);
2918 if (m == NULL) {
2919 device_printf(sc->sc_dev, "%s: cannot get mbuf\n", __func__);
2920 sc->sc_stats.ast_be_nombuf++;
2921 return ENOMEM;
2922 }
2923 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m,
2924 bf->bf_segs, &bf->bf_nseg,
2925 BUS_DMA_NOWAIT);
2926 if (error != 0) {
2927 device_printf(sc->sc_dev,
2928 "%s: cannot map mbuf, bus_dmamap_load_mbuf_sg returns %d\n",
2929 __func__, error);
2930 m_freem(m);
2931 return error;
2932 }
2933
2934 /*
2935 * Calculate a TSF adjustment factor required for staggered
2936 * beacons. Note that we assume the format of the beacon
2937 * frame leaves the tstamp field immediately following the
2938 * header.
2939 */
2940 if (sc->sc_stagbeacons && avp->av_bslot > 0) {
2941 uint64_t tsfadjust;
2942 struct ieee80211_frame *wh;
2943
2944 /*
2945 * The beacon interval is in TU's; the TSF is in usecs.
2946 * We figure out how many TU's to add to align the timestamp
2947 * then convert to TSF units and handle byte swapping before
2948 * inserting it in the frame. The hardware will then add this
2949 * each time a beacon frame is sent. Note that we align vap's
2950 * 1..N and leave vap 0 untouched. This means vap 0 has a
2951 * timestamp in one beacon interval while the others get a
2952 * timstamp aligned to the next interval.
2953 */
2954 tsfadjust = ni->ni_intval *
2955 (ATH_BCBUF - avp->av_bslot) / ATH_BCBUF;
2956 tsfadjust = htole64(tsfadjust << 10); /* TU -> TSF */
2957
2958 DPRINTF(sc, ATH_DEBUG_BEACON,
2959 "%s: %s beacons bslot %d intval %u tsfadjust %llu\n",
2960 __func__, sc->sc_stagbeacons ? "stagger" : "burst",
2961 avp->av_bslot, ni->ni_intval,
2962 (long long unsigned) le64toh(tsfadjust));
2963
2964 wh = mtod(m, struct ieee80211_frame *);
2965 memcpy(&wh[1], &tsfadjust, sizeof(tsfadjust));
2966 }
2967 bf->bf_m = m;
2968 bf->bf_node = ieee80211_ref_node(ni);
2969
2970 return 0;
2971}
2972
2973/*
2974 * Setup the beacon frame for transmit.
2975 */
2976static void
2977ath_beacon_setup(struct ath_softc *sc, struct ath_buf *bf)
2978{
2979#define USE_SHPREAMBLE(_ic) \
2980 (((_ic)->ic_flags & (IEEE80211_F_SHPREAMBLE | IEEE80211_F_USEBARKER))\
2981 == IEEE80211_F_SHPREAMBLE)
2982 struct ieee80211_node *ni = bf->bf_node;
2983 struct ieee80211com *ic = ni->ni_ic;
2984 struct mbuf *m = bf->bf_m;
2985 struct ath_hal *ah = sc->sc_ah;
2986 struct ath_desc *ds;
2987 int flags, antenna;
2988 const HAL_RATE_TABLE *rt;
2989 u_int8_t rix, rate;
2990
2991 DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: m %p len %u\n",
2992 __func__, m, m->m_len);
2993
2994 /* setup descriptors */
2995 ds = bf->bf_desc;
2996
2997 flags = HAL_TXDESC_NOACK;
2998 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) {
2999 ds->ds_link = bf->bf_daddr; /* self-linked */
3000 flags |= HAL_TXDESC_VEOL;
3001 /*
3002 * Let hardware handle antenna switching.
3003 */
3004 antenna = sc->sc_txantenna;
3005 } else {
3006 ds->ds_link = 0;
3007 /*
3008 * Switch antenna every 4 beacons.
3009 * XXX assumes two antenna
3010 */
3011 if (sc->sc_txantenna != 0)
3012 antenna = sc->sc_txantenna;
3013 else if (sc->sc_stagbeacons && sc->sc_nbcnvaps != 0)
3014 antenna = ((sc->sc_stats.ast_be_xmit / sc->sc_nbcnvaps) & 4 ? 2 : 1);
3015 else
3016 antenna = (sc->sc_stats.ast_be_xmit & 4 ? 2 : 1);
3017 }
3018
3019 KASSERT(bf->bf_nseg == 1,
3020 ("multi-segment beacon frame; nseg %u", bf->bf_nseg));
3021 ds->ds_data = bf->bf_segs[0].ds_addr;
3022 /*
3023 * Calculate rate code.
3024 * XXX everything at min xmit rate
3025 */
3026 rix = 0;
3027 rt = sc->sc_currates;
3028 rate = rt->info[rix].rateCode;
3029 if (USE_SHPREAMBLE(ic))
3030 rate |= rt->info[rix].shortPreamble;
3031 ath_hal_setuptxdesc(ah, ds
3032 , m->m_len + IEEE80211_CRC_LEN /* frame length */
3033 , sizeof(struct ieee80211_frame)/* header length */
3034 , HAL_PKT_TYPE_BEACON /* Atheros packet type */
3035 , ni->ni_txpower /* txpower XXX */
3036 , rate, 1 /* series 0 rate/tries */
3037 , HAL_TXKEYIX_INVALID /* no encryption */
3038 , antenna /* antenna mode */
3039 , flags /* no ack, veol for beacons */
3040 , 0 /* rts/cts rate */
3041 , 0 /* rts/cts duration */
3042 );
3043 /* NB: beacon's BufLen must be a multiple of 4 bytes */
3044 ath_hal_filltxdesc(ah, ds
3045 , roundup(m->m_len, 4) /* buffer length */
3046 , AH_TRUE /* first segment */
3047 , AH_TRUE /* last segment */
3048 , ds /* first descriptor */
3049 );
3050#if 0
3051 ath_desc_swap(ds);
3052#endif
3053#undef USE_SHPREAMBLE
3054}
3055
3056static void
3057ath_beacon_update(struct ieee80211vap *vap, int item)
3058{
3059 struct ieee80211_beacon_offsets *bo = &ATH_VAP(vap)->av_boff;
3060
3061 setbit(bo->bo_flags, item);
3062}
3063
3064/*
3065 * Append the contents of src to dst; both queues
3066 * are assumed to be locked.
3067 */
3068static void
3069ath_txqmove(struct ath_txq *dst, struct ath_txq *src)
3070{
3071 STAILQ_CONCAT(&dst->axq_q, &src->axq_q);
3072 dst->axq_link = src->axq_link;
3073 src->axq_link = NULL;
3074 dst->axq_depth += src->axq_depth;
3075 src->axq_depth = 0;
3076}
3077
3078/*
3079 * Transmit a beacon frame at SWBA. Dynamic updates to the
3080 * frame contents are done as needed and the slot time is
3081 * also adjusted based on current state.
3082 */
3083static void
3084ath_beacon_proc(void *arg, int pending)
3085{
3086 struct ath_softc *sc = arg;
3087 struct ath_hal *ah = sc->sc_ah;
3088 struct ieee80211vap *vap;
3089 struct ath_buf *bf;
3090 int slot, otherant;
3091 uint32_t bfaddr;
3092
3093 DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: pending %u\n",
3094 __func__, pending);
3095 /*
3096 * Check if the previous beacon has gone out. If
3097 * not don't try to post another, skip this period
3098 * and wait for the next. Missed beacons indicate
3099 * a problem and should not occur. If we miss too
3100 * many consecutive beacons reset the device.
3101 */
3102 if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) {
3103 sc->sc_bmisscount++;
3104 DPRINTF(sc, ATH_DEBUG_BEACON,
3105 "%s: missed %u consecutive beacons\n",
3106 __func__, sc->sc_bmisscount);
3107 if (sc->sc_bmisscount >= ath_bstuck_threshold)
3108 taskqueue_enqueue(sc->sc_tq, &sc->sc_bstucktask);
3109 return;
3110 }
3111 if (sc->sc_bmisscount != 0) {
3112 DPRINTF(sc, ATH_DEBUG_BEACON,
3113 "%s: resume beacon xmit after %u misses\n",
3114 __func__, sc->sc_bmisscount);
3115 sc->sc_bmisscount = 0;
3116 }
3117
3118 if (sc->sc_stagbeacons) { /* staggered beacons */
3119 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
3120 uint32_t tsftu;
3121
3122 tsftu = ath_hal_gettsf32(ah) >> 10;
3123 /* XXX lintval */
3124 slot = ((tsftu % ic->ic_lintval) * ATH_BCBUF) / ic->ic_lintval;
3125 vap = sc->sc_bslot[(slot+1) % ATH_BCBUF];
3126 bfaddr = 0;
3127 if (vap != NULL && vap->iv_state == IEEE80211_S_RUN) {
3128 bf = ath_beacon_generate(sc, vap);
3129 if (bf != NULL)
3130 bfaddr = bf->bf_daddr;
3131 }
3132 } else { /* burst'd beacons */
3133 uint32_t *bflink = &bfaddr;
3134
3135 for (slot = 0; slot < ATH_BCBUF; slot++) {
3136 vap = sc->sc_bslot[slot];
3137 if (vap != NULL && vap->iv_state == IEEE80211_S_RUN) {
3138 bf = ath_beacon_generate(sc, vap);
3139 if (bf != NULL) {
3140 *bflink = bf->bf_daddr;
3141 bflink = &bf->bf_desc->ds_link;
3142 }
3143 }
3144 }
3145 *bflink = 0; /* terminate list */
3146 }
3147
3148 /*
3149 * Handle slot time change when a non-ERP station joins/leaves
3150 * an 11g network. The 802.11 layer notifies us via callback,
3151 * we mark updateslot, then wait one beacon before effecting
3152 * the change. This gives associated stations at least one
3153 * beacon interval to note the state change.
3154 */
3155 /* XXX locking */
3156 if (sc->sc_updateslot == UPDATE) {
3157 sc->sc_updateslot = COMMIT; /* commit next beacon */
3158 sc->sc_slotupdate = slot;
3159 } else if (sc->sc_updateslot == COMMIT && sc->sc_slotupdate == slot)
3160 ath_setslottime(sc); /* commit change to h/w */
3161
3162 /*
3163 * Check recent per-antenna transmit statistics and flip
3164 * the default antenna if noticeably more frames went out
3165 * on the non-default antenna.
3166 * XXX assumes 2 anntenae
3167 */
3168 if (!sc->sc_diversity && (!sc->sc_stagbeacons || slot == 0)) {
3169 otherant = sc->sc_defant & 1 ? 2 : 1;
3170 if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2)
3171 ath_setdefantenna(sc, otherant);
3172 sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0;
3173 }
3174
3175 if (bfaddr != 0) {
3176 /*
3177 * Stop any current dma and put the new frame on the queue.
3178 * This should never fail since we check above that no frames
3179 * are still pending on the queue.
3180 */
3181 if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) {
3182 DPRINTF(sc, ATH_DEBUG_ANY,
3183 "%s: beacon queue %u did not stop?\n",
3184 __func__, sc->sc_bhalq);
3185 }
3186 /* NB: cabq traffic should already be queued and primed */
3187 ath_hal_puttxbuf(ah, sc->sc_bhalq, bfaddr);
3188 ath_hal_txstart(ah, sc->sc_bhalq);
3189
3190 sc->sc_stats.ast_be_xmit++;
3191 }
3192}
3193
3194static struct ath_buf *
3195ath_beacon_generate(struct ath_softc *sc, struct ieee80211vap *vap)
3196{
3197 struct ath_vap *avp = ATH_VAP(vap);
3198 struct ath_txq *cabq = sc->sc_cabq;
3199 struct ath_buf *bf;
3200 struct mbuf *m;
3201 int nmcastq, error;
3202
3203 KASSERT(vap->iv_state == IEEE80211_S_RUN,
3204 ("not running, state %d", vap->iv_state));
3205 KASSERT(avp->av_bcbuf != NULL, ("no beacon buffer"));
3206
3207 /*
3208 * Update dynamic beacon contents. If this returns
3209 * non-zero then we need to remap the memory because
3210 * the beacon frame changed size (probably because
3211 * of the TIM bitmap).
3212 */
3213 bf = avp->av_bcbuf;
3214 m = bf->bf_m;
3215 nmcastq = avp->av_mcastq.axq_depth;
3216 if (ieee80211_beacon_update(bf->bf_node, &avp->av_boff, m, nmcastq)) {
3217 /* XXX too conservative? */
3218 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3219 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m,
3220 bf->bf_segs, &bf->bf_nseg,
3221 BUS_DMA_NOWAIT);
3222 if (error != 0) {
3223 if_printf(vap->iv_ifp,
3224 "%s: bus_dmamap_load_mbuf_sg failed, error %u\n",
3225 __func__, error);
3226 return NULL;
3227 }
3228 }
3229 if ((avp->av_boff.bo_tim[4] & 1) && cabq->axq_depth) {
3230 DPRINTF(sc, ATH_DEBUG_BEACON,
3231 "%s: cabq did not drain, mcastq %u cabq %u\n",
3232 __func__, nmcastq, cabq->axq_depth);
3233 sc->sc_stats.ast_cabq_busy++;
3234 if (sc->sc_nvaps > 1 && sc->sc_stagbeacons) {
3235 /*
3236 * CABQ traffic from a previous vap is still pending.
3237 * We must drain the q before this beacon frame goes
3238 * out as otherwise this vap's stations will get cab
3239 * frames from a different vap.
3240 * XXX could be slow causing us to miss DBA
3241 */
3242 ath_tx_draintxq(sc, cabq);
3243 }
3244 }
3245 ath_beacon_setup(sc, bf);
3246 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
3247
3248 /*
3249 * Enable the CAB queue before the beacon queue to
3250 * insure cab frames are triggered by this beacon.
3251 */
3252 if (avp->av_boff.bo_tim[4] & 1) {
3253 struct ath_hal *ah = sc->sc_ah;
3254
3255 /* NB: only at DTIM */
3256 ATH_TXQ_LOCK(cabq);
3257 ATH_TXQ_LOCK(&avp->av_mcastq);
3258 if (nmcastq) {
3259 struct ath_buf *bfm;
3260
3261 /*
3262 * Move frames from the s/w mcast q to the h/w cab q.
3263 * XXX MORE_DATA bit
3264 */
3265 bfm = STAILQ_FIRST(&avp->av_mcastq.axq_q);
3266 if (cabq->axq_link != NULL) {
3267 *cabq->axq_link = bfm->bf_daddr;
3268 } else
3269 ath_hal_puttxbuf(ah, cabq->axq_qnum,
3270 bfm->bf_daddr);
3271 ath_txqmove(cabq, &avp->av_mcastq);
3272
3273 sc->sc_stats.ast_cabq_xmit += nmcastq;
3274 }
3275 /* NB: gated by beacon so safe to start here */
3276 ath_hal_txstart(ah, cabq->axq_qnum);
3277 ATH_TXQ_UNLOCK(cabq);
3278 ATH_TXQ_UNLOCK(&avp->av_mcastq);
3279 }
3280 return bf;
3281}
3282
3283static void
3284ath_beacon_start_adhoc(struct ath_softc *sc, struct ieee80211vap *vap)
3285{
3286 struct ath_vap *avp = ATH_VAP(vap);
3287 struct ath_hal *ah = sc->sc_ah;
3288 struct ath_buf *bf;
3289 struct mbuf *m;
3290 int error;
3291
3292 KASSERT(avp->av_bcbuf != NULL, ("no beacon buffer"));
3293
3294 /*
3295 * Update dynamic beacon contents. If this returns
3296 * non-zero then we need to remap the memory because
3297 * the beacon frame changed size (probably because
3298 * of the TIM bitmap).
3299 */
3300 bf = avp->av_bcbuf;
3301 m = bf->bf_m;
3302 if (ieee80211_beacon_update(bf->bf_node, &avp->av_boff, m, 0)) {
3303 /* XXX too conservative? */
3304 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3305 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m,
3306 bf->bf_segs, &bf->bf_nseg,
3307 BUS_DMA_NOWAIT);
3308 if (error != 0) {
3309 if_printf(vap->iv_ifp,
3310 "%s: bus_dmamap_load_mbuf_sg failed, error %u\n",
3311 __func__, error);
3312 return;
3313 }
3314 }
3315 ath_beacon_setup(sc, bf);
3316 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
3317
3318 /* NB: caller is known to have already stopped tx dma */
3319 ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr);
3320 ath_hal_txstart(ah, sc->sc_bhalq);
3321}
3322
3323/*
3324 * Reset the hardware after detecting beacons have stopped.
3325 */
3326static void
3327ath_bstuck_proc(void *arg, int pending)
3328{
3329 struct ath_softc *sc = arg;
3330 struct ifnet *ifp = sc->sc_ifp;
3331
3332 if_printf(ifp, "stuck beacon; resetting (bmiss count %u)\n",
3333 sc->sc_bmisscount);
3334 sc->sc_stats.ast_bstuck++;
3335 ath_reset(ifp);
3336}
3337
3338/*
3339 * Reclaim beacon resources and return buffer to the pool.
3340 */
3341static void
3342ath_beacon_return(struct ath_softc *sc, struct ath_buf *bf)
3343{
3344
3345 if (bf->bf_m != NULL) {
3346 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3347 m_freem(bf->bf_m);
3348 bf->bf_m = NULL;
3349 }
3350 if (bf->bf_node != NULL) {
3351 ieee80211_free_node(bf->bf_node);
3352 bf->bf_node = NULL;
3353 }
3354 STAILQ_INSERT_TAIL(&sc->sc_bbuf, bf, bf_list);
3355}
3356
3357/*
3358 * Reclaim beacon resources.
3359 */
3360static void
3361ath_beacon_free(struct ath_softc *sc)
3362{
3363 struct ath_buf *bf;
3364
3365 STAILQ_FOREACH(bf, &sc->sc_bbuf, bf_list) {
3366 if (bf->bf_m != NULL) {
3367 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3368 m_freem(bf->bf_m);
3369 bf->bf_m = NULL;
3370 }
3371 if (bf->bf_node != NULL) {
3372 ieee80211_free_node(bf->bf_node);
3373 bf->bf_node = NULL;
3374 }
3375 }
3376}
3377
3378/*
3379 * Configure the beacon and sleep timers.
3380 *
3381 * When operating as an AP this resets the TSF and sets
3382 * up the hardware to notify us when we need to issue beacons.
3383 *
3384 * When operating in station mode this sets up the beacon
3385 * timers according to the timestamp of the last received
3386 * beacon and the current TSF, configures PCF and DTIM
3387 * handling, programs the sleep registers so the hardware
3388 * will wakeup in time to receive beacons, and configures
3389 * the beacon miss handling so we'll receive a BMISS
3390 * interrupt when we stop seeing beacons from the AP
3391 * we've associated with.
3392 */
3393static void
3394ath_beacon_config(struct ath_softc *sc, struct ieee80211vap *vap)
3395{
3396#define TSF_TO_TU(_h,_l) \
3397 ((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10))
3398#define FUDGE 2
3399 struct ath_hal *ah = sc->sc_ah;
3400 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
3401 struct ieee80211_node *ni;
3402 u_int32_t nexttbtt, intval, tsftu;
3403 u_int64_t tsf;
3404
3405 if (vap == NULL)
3406 vap = TAILQ_FIRST(&ic->ic_vaps); /* XXX */
3407 ni = vap->iv_bss;
3408
3409 /* extract tstamp from last beacon and convert to TU */
3410 nexttbtt = TSF_TO_TU(LE_READ_4(ni->ni_tstamp.data + 4),
3411 LE_READ_4(ni->ni_tstamp.data));
3412 if (ic->ic_opmode == IEEE80211_M_HOSTAP) {
3413 /*
3414 * For multi-bss ap support beacons are either staggered
3415 * evenly over N slots or burst together. For the former
3416 * arrange for the SWBA to be delivered for each slot.
3417 * Slots that are not occupied will generate nothing.
3418 */
3419 /* NB: the beacon interval is kept internally in TU's */
3420 intval = ni->ni_intval & HAL_BEACON_PERIOD;
3421 if (sc->sc_stagbeacons)
3422 intval /= ATH_BCBUF;
3423 } else {
3424 /* NB: the beacon interval is kept internally in TU's */
3425 intval = ni->ni_intval & HAL_BEACON_PERIOD;
3426 }
3427 if (nexttbtt == 0) /* e.g. for ap mode */
3428 nexttbtt = intval;
3429 else if (intval) /* NB: can be 0 for monitor mode */
3430 nexttbtt = roundup(nexttbtt, intval);
3431 DPRINTF(sc, ATH_DEBUG_BEACON, "%s: nexttbtt %u intval %u (%u)\n",
3432 __func__, nexttbtt, intval, ni->ni_intval);
3433 if (ic->ic_opmode == IEEE80211_M_STA && !sc->sc_swbmiss) {
3434 HAL_BEACON_STATE bs;
3435 int dtimperiod, dtimcount;
3436 int cfpperiod, cfpcount;
3437
3438 /*
3439 * Setup dtim and cfp parameters according to
3440 * last beacon we received (which may be none).
3441 */
3442 dtimperiod = ni->ni_dtim_period;
3443 if (dtimperiod <= 0) /* NB: 0 if not known */
3444 dtimperiod = 1;
3445 dtimcount = ni->ni_dtim_count;
3446 if (dtimcount >= dtimperiod) /* NB: sanity check */
3447 dtimcount = 0; /* XXX? */
3448 cfpperiod = 1; /* NB: no PCF support yet */
3449 cfpcount = 0;
3450 /*
3451 * Pull nexttbtt forward to reflect the current
3452 * TSF and calculate dtim+cfp state for the result.
3453 */
3454 tsf = ath_hal_gettsf64(ah);
3455 tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
3456 do {
3457 nexttbtt += intval;
3458 if (--dtimcount < 0) {
3459 dtimcount = dtimperiod - 1;
3460 if (--cfpcount < 0)
3461 cfpcount = cfpperiod - 1;
3462 }
3463 } while (nexttbtt < tsftu);
3464 memset(&bs, 0, sizeof(bs));
3465 bs.bs_intval = intval;
3466 bs.bs_nexttbtt = nexttbtt;
3467 bs.bs_dtimperiod = dtimperiod*intval;
3468 bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval;
3469 bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod;
3470 bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod;
3471 bs.bs_cfpmaxduration = 0;
3472#if 0
3473 /*
3474 * The 802.11 layer records the offset to the DTIM
3475 * bitmap while receiving beacons; use it here to
3476 * enable h/w detection of our AID being marked in
3477 * the bitmap vector (to indicate frames for us are
3478 * pending at the AP).
3479 * XXX do DTIM handling in s/w to WAR old h/w bugs
3480 * XXX enable based on h/w rev for newer chips
3481 */
3482 bs.bs_timoffset = ni->ni_timoff;
3483#endif
3484 /*
3485 * Calculate the number of consecutive beacons to miss
3486 * before taking a BMISS interrupt.
3487 * Note that we clamp the result to at most 10 beacons.
3488 */
3489 bs.bs_bmissthreshold = vap->iv_bmissthreshold;
3490 if (bs.bs_bmissthreshold > 10)
3491 bs.bs_bmissthreshold = 10;
3492 else if (bs.bs_bmissthreshold <= 0)
3493 bs.bs_bmissthreshold = 1;
3494
3495 /*
3496 * Calculate sleep duration. The configuration is
3497 * given in ms. We insure a multiple of the beacon
3498 * period is used. Also, if the sleep duration is
3499 * greater than the DTIM period then it makes senses
3500 * to make it a multiple of that.
3501 *
3502 * XXX fixed at 100ms
3503 */
3504 bs.bs_sleepduration =
3505 roundup(IEEE80211_MS_TO_TU(100), bs.bs_intval);
3506 if (bs.bs_sleepduration > bs.bs_dtimperiod)
3507 bs.bs_sleepduration = roundup(bs.bs_sleepduration, bs.bs_dtimperiod);
3508
3509 DPRINTF(sc, ATH_DEBUG_BEACON,
3510 "%s: tsf %ju tsf:tu %u intval %u nexttbtt %u dtim %u nextdtim %u bmiss %u sleep %u cfp:period %u maxdur %u next %u timoffset %u\n"
3511 , __func__
3512 , tsf, tsftu
3513 , bs.bs_intval
3514 , bs.bs_nexttbtt
3515 , bs.bs_dtimperiod
3516 , bs.bs_nextdtim
3517 , bs.bs_bmissthreshold
3518 , bs.bs_sleepduration
3519 , bs.bs_cfpperiod
3520 , bs.bs_cfpmaxduration
3521 , bs.bs_cfpnext
3522 , bs.bs_timoffset
3523 );
3524 ath_hal_intrset(ah, 0);
3525 ath_hal_beacontimers(ah, &bs);
3526 sc->sc_imask |= HAL_INT_BMISS;
3527 ath_hal_intrset(ah, sc->sc_imask);
3528 } else {
3529 ath_hal_intrset(ah, 0);
3530 if (nexttbtt == intval)
3531 intval |= HAL_BEACON_RESET_TSF;
3532 if (ic->ic_opmode == IEEE80211_M_IBSS) {
3533 /*
3534 * In IBSS mode enable the beacon timers but only
3535 * enable SWBA interrupts if we need to manually
3536 * prepare beacon frames. Otherwise we use a
3537 * self-linked tx descriptor and let the hardware
3538 * deal with things.
3539 */
3540 intval |= HAL_BEACON_ENA;
3541 if (!sc->sc_hasveol)
3542 sc->sc_imask |= HAL_INT_SWBA;
3543 if ((intval & HAL_BEACON_RESET_TSF) == 0) {
3544 /*
3545 * Pull nexttbtt forward to reflect
3546 * the current TSF.
3547 */
3548 tsf = ath_hal_gettsf64(ah);
3549 tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
3550 do {
3551 nexttbtt += intval;
3552 } while (nexttbtt < tsftu);
3553 }
3554 ath_beaconq_config(sc);
3555 } else if (ic->ic_opmode == IEEE80211_M_HOSTAP) {
3556 /*
3557 * In AP mode we enable the beacon timers and
3558 * SWBA interrupts to prepare beacon frames.
3559 */
3560 intval |= HAL_BEACON_ENA;
3561 sc->sc_imask |= HAL_INT_SWBA; /* beacon prepare */
3562 ath_beaconq_config(sc);
3563 }
3564 ath_hal_beaconinit(ah, nexttbtt, intval);
3565 sc->sc_bmisscount = 0;
3566 ath_hal_intrset(ah, sc->sc_imask);
3567 /*
3568 * When using a self-linked beacon descriptor in
3569 * ibss mode load it once here.
3570 */
3571 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol)
3572 ath_beacon_start_adhoc(sc, vap);
3573 }
3574 sc->sc_syncbeacon = 0;
3575#undef FUDGE
3576#undef TSF_TO_TU
3577}
3578
3579static void
3580ath_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
3581{
3582 bus_addr_t *paddr = (bus_addr_t*) arg;
3583 KASSERT(error == 0, ("error %u on bus_dma callback", error));
3584 *paddr = segs->ds_addr;
3585}
3586
3587static int
3588ath_descdma_setup(struct ath_softc *sc,
3589 struct ath_descdma *dd, ath_bufhead *head,
3590 const char *name, int nbuf, int ndesc)
3591{
3592#define DS2PHYS(_dd, _ds) \
3593 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
3594 struct ifnet *ifp = sc->sc_ifp;
3595 struct ath_desc *ds;
3596 struct ath_buf *bf;
3597 int i, bsize, error;
3598
3599 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers %u desc/buf\n",
3600 __func__, name, nbuf, ndesc);
3601
3602 dd->dd_name = name;
3603 dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
3604
3605 /*
3606 * Setup DMA descriptor area.
3607 */
3608 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */
3609 PAGE_SIZE, 0, /* alignment, bounds */
3610 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
3611 BUS_SPACE_MAXADDR, /* highaddr */
3612 NULL, NULL, /* filter, filterarg */
3613 dd->dd_desc_len, /* maxsize */
3614 1, /* nsegments */
3615 dd->dd_desc_len, /* maxsegsize */
3616 BUS_DMA_ALLOCNOW, /* flags */
3617 NULL, /* lockfunc */
3618 NULL, /* lockarg */
3619 &dd->dd_dmat);
3620 if (error != 0) {
3621 if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name);
3622 return error;
3623 }
3624
3625 /* allocate descriptors */
3626 error = bus_dmamap_create(dd->dd_dmat, BUS_DMA_NOWAIT, &dd->dd_dmamap);
3627 if (error != 0) {
3628 if_printf(ifp, "unable to create dmamap for %s descriptors, "
3629 "error %u\n", dd->dd_name, error);
3630 goto fail0;
3631 }
3632
3633 error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
3634 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
3635 &dd->dd_dmamap);
3636 if (error != 0) {
3637 if_printf(ifp, "unable to alloc memory for %u %s descriptors, "
3638 "error %u\n", nbuf * ndesc, dd->dd_name, error);
3639 goto fail1;
3640 }
3641
3642 error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
3643 dd->dd_desc, dd->dd_desc_len,
3644 ath_load_cb, &dd->dd_desc_paddr,
3645 BUS_DMA_NOWAIT);
3646 if (error != 0) {
3647 if_printf(ifp, "unable to map %s descriptors, error %u\n",
3648 dd->dd_name, error);
3649 goto fail2;
3650 }
3651
3652 ds = dd->dd_desc;
3653 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n",
3654 __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len,
3655 (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len);
3656
3657 /* allocate rx buffers */
3658 bsize = sizeof(struct ath_buf) * nbuf;
3659 bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO);
3660 if (bf == NULL) {
3661 if_printf(ifp, "malloc of %s buffers failed, size %u\n",
3662 dd->dd_name, bsize);
3663 goto fail3;
3664 }
3665 dd->dd_bufptr = bf;
3666
3667 STAILQ_INIT(head);
3668 for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
3669 bf->bf_desc = ds;
3670 bf->bf_daddr = DS2PHYS(dd, ds);
3671 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
3672 &bf->bf_dmamap);
3673 if (error != 0) {
3674 if_printf(ifp, "unable to create dmamap for %s "
3675 "buffer %u, error %u\n", dd->dd_name, i, error);
3676 ath_descdma_cleanup(sc, dd, head);
3677 return error;
3678 }
3679 STAILQ_INSERT_TAIL(head, bf, bf_list);
3680 }
3681 return 0;
3682fail3:
3683 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
3684fail2:
3685 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
3686fail1:
3687 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
3688fail0:
3689 bus_dma_tag_destroy(dd->dd_dmat);
3690 memset(dd, 0, sizeof(*dd));
3691 return error;
3692#undef DS2PHYS
3693}
3694
3695static void
3696ath_descdma_cleanup(struct ath_softc *sc,
3697 struct ath_descdma *dd, ath_bufhead *head)
3698{
3699 struct ath_buf *bf;
3700 struct ieee80211_node *ni;
3701
3702 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
3703 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
3704 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
3705 bus_dma_tag_destroy(dd->dd_dmat);
3706
3707 STAILQ_FOREACH(bf, head, bf_list) {
3708 if (bf->bf_m) {
3709 m_freem(bf->bf_m);
3710 bf->bf_m = NULL;
3711 }
3712 if (bf->bf_dmamap != NULL) {
3713 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
3714 bf->bf_dmamap = NULL;
3715 }
3716 ni = bf->bf_node;
3717 bf->bf_node = NULL;
3718 if (ni != NULL) {
3719 /*
3720 * Reclaim node reference.
3721 */
3722 ieee80211_free_node(ni);
3723 }
3724 }
3725
3726 STAILQ_INIT(head);
3727 free(dd->dd_bufptr, M_ATHDEV);
3728 memset(dd, 0, sizeof(*dd));
3729}
3730
3731static int
3732ath_desc_alloc(struct ath_softc *sc)
3733{
3734 int error;
3735
3736 error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf,
3737 "rx", ath_rxbuf, 1);
3738 if (error != 0)
3739 return error;
3740
3741 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf,
3742 "tx", ath_txbuf, ATH_TXDESC);
3743 if (error != 0) {
3744 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
3745 return error;
3746 }
3747
3748 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf,
3749 "beacon", ATH_BCBUF, 1);
3750 if (error != 0) {
3751 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
3752 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
3753 return error;
3754 }
3755 return 0;
3756}
3757
3758static void
3759ath_desc_free(struct ath_softc *sc)
3760{
3761
3762 if (sc->sc_bdma.dd_desc_len != 0)
3763 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf);
3764 if (sc->sc_txdma.dd_desc_len != 0)
3765 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
3766 if (sc->sc_rxdma.dd_desc_len != 0)
3767 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
3768}
3769
3770static struct ieee80211_node *
3771ath_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
3772{
3773 struct ieee80211com *ic = vap->iv_ic;
3774 struct ath_softc *sc = ic->ic_ifp->if_softc;
3775 const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space;
3776 struct ath_node *an;
3777
3778 an = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO);
3779 if (an == NULL) {
3780 /* XXX stat+msg */
3781 return NULL;
3782 }
3783 ath_rate_node_init(sc, an);
3784
3785 DPRINTF(sc, ATH_DEBUG_NODE, "%s: an %p\n", __func__, an);
3786 return &an->an_node;
3787}
3788
3789static void
3790ath_node_free(struct ieee80211_node *ni)
3791{
3792 struct ieee80211com *ic = ni->ni_ic;
3793 struct ath_softc *sc = ic->ic_ifp->if_softc;
3794
3795 DPRINTF(sc, ATH_DEBUG_NODE, "%s: ni %p\n", __func__, ni);
3796
3797 ath_rate_node_cleanup(sc, ATH_NODE(ni));
3798 sc->sc_node_free(ni);
3799}
3800
3801static void
3802ath_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
3803{
3804 struct ieee80211com *ic = ni->ni_ic;
3805 struct ath_softc *sc = ic->ic_ifp->if_softc;
3806 struct ath_hal *ah = sc->sc_ah;
3807
3808 *rssi = ic->ic_node_getrssi(ni);
3809 if (ni->ni_chan != IEEE80211_CHAN_ANYC)
3810 *noise = ath_hal_getchannoise(ah, ni->ni_chan);
3811 else
3812 *noise = -95; /* nominally correct */
3813}
3814
3815static int
3816ath_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf)
3817{
3818 struct ath_hal *ah = sc->sc_ah;
3819 int error;
3820 struct mbuf *m;
3821 struct ath_desc *ds;
3822
3823 m = bf->bf_m;
3824 if (m == NULL) {
3825 /*
3826 * NB: by assigning a page to the rx dma buffer we
3827 * implicitly satisfy the Atheros requirement that
3828 * this buffer be cache-line-aligned and sized to be
3829 * multiple of the cache line size. Not doing this
3830 * causes weird stuff to happen (for the 5210 at least).
3831 */
3832 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
3833 if (m == NULL) {
3834 DPRINTF(sc, ATH_DEBUG_ANY,
3835 "%s: no mbuf/cluster\n", __func__);
3836 sc->sc_stats.ast_rx_nombuf++;
3837 return ENOMEM;
3838 }
3839 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
3840
3841 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat,
3842 bf->bf_dmamap, m,
3843 bf->bf_segs, &bf->bf_nseg,
3844 BUS_DMA_NOWAIT);
3845 if (error != 0) {
3846 DPRINTF(sc, ATH_DEBUG_ANY,
3847 "%s: bus_dmamap_load_mbuf_sg failed; error %d\n",
3848 __func__, error);
3849 sc->sc_stats.ast_rx_busdma++;
3850 m_freem(m);
3851 return error;
3852 }
3853 KASSERT(bf->bf_nseg == 1,
3854 ("multi-segment packet; nseg %u", bf->bf_nseg));
3855 bf->bf_m = m;
3856 }
3857 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREREAD);
3858
3859 /*
3860 * Setup descriptors. For receive we always terminate
3861 * the descriptor list with a self-linked entry so we'll
3862 * not get overrun under high load (as can happen with a
3863 * 5212 when ANI processing enables PHY error frames).
3864 *
3865 * To insure the last descriptor is self-linked we create
3866 * each descriptor as self-linked and add it to the end. As
3867 * each additional descriptor is added the previous self-linked
3868 * entry is ``fixed'' naturally. This should be safe even
3869 * if DMA is happening. When processing RX interrupts we
3870 * never remove/process the last, self-linked, entry on the
3871 * descriptor list. This insures the hardware always has
3872 * someplace to write a new frame.
3873 */
3874 ds = bf->bf_desc;
3875 ds->ds_link = bf->bf_daddr; /* link to self */
3876 ds->ds_data = bf->bf_segs[0].ds_addr;
3877 ath_hal_setuprxdesc(ah, ds
3878 , m->m_len /* buffer size */
3879 , 0
3880 );
3881
3882 if (sc->sc_rxlink != NULL)
3883 *sc->sc_rxlink = bf->bf_daddr;
3884 sc->sc_rxlink = &ds->ds_link;
3885 return 0;
3886}
3887
3888/*
3889 * Extend 15-bit time stamp from rx descriptor to
3890 * a full 64-bit TSF using the specified TSF.
3891 */
3892static __inline u_int64_t
3893ath_extend_tsf(u_int32_t rstamp, u_int64_t tsf)
3894{
3895 if ((tsf & 0x7fff) < rstamp)
3896 tsf -= 0x8000;
3897 return ((tsf &~ 0x7fff) | rstamp);
3898}
3899
3900/*
3901 * Intercept management frames to collect beacon rssi data
3902 * and to do ibss merges.
3903 */
3904static void
3905ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m,
3906 int subtype, int rssi, int noise, u_int32_t rstamp)
3907{
3908 struct ieee80211vap *vap = ni->ni_vap;
3909 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
3910
3911 /*
3912 * Call up first so subsequent work can use information
3913 * potentially stored in the node (e.g. for ibss merge).
3914 */
3915 ATH_VAP(vap)->av_recv_mgmt(ni, m, subtype, rssi, noise, rstamp);
3916 switch (subtype) {
3917 case IEEE80211_FC0_SUBTYPE_BEACON:
3918 /* update rssi statistics for use by the hal */
3919 ATH_RSSI_LPF(sc->sc_halstats.ns_avgbrssi, rssi);
3920 if (sc->sc_syncbeacon &&
3921 ni == vap->iv_bss && vap->iv_state == IEEE80211_S_RUN) {
3922 /*
3923 * Resync beacon timers using the tsf of the beacon
3924 * frame we just received.
3925 */
3926 ath_beacon_config(sc, vap);
3927 }
3928 /* fall thru... */
3929 case IEEE80211_FC0_SUBTYPE_PROBE_RESP:
3930 if (vap->iv_opmode == IEEE80211_M_IBSS &&
3931 vap->iv_state == IEEE80211_S_RUN) {
3932 u_int64_t tsf = ath_extend_tsf(rstamp,
3933 ath_hal_gettsf64(sc->sc_ah));
3934 /*
3935 * Handle ibss merge as needed; check the tsf on the
3936 * frame before attempting the merge. The 802.11 spec
3937 * says the station should change it's bssid to match
3938 * the oldest station with the same ssid, where oldest
3939 * is determined by the tsf. Note that hardware
3940 * reconfiguration happens through callback to
3941 * ath_newstate as the state machine will go from
3942 * RUN -> RUN when this happens.
3943 */
3944 if (le64toh(ni->ni_tstamp.tsf) >= tsf) {
3945 DPRINTF(sc, ATH_DEBUG_STATE,
3946 "ibss merge, rstamp %u tsf %ju "
3947 "tstamp %ju\n", rstamp, (uintmax_t)tsf,
3948 (uintmax_t)ni->ni_tstamp.tsf);
3949 (void) ieee80211_ibss_merge(ni);
3950 }
3951 }
3952 break;
3953 }
3954}
3955
3956/*
3957 * Set the default antenna.
3958 */
3959static void
3960ath_setdefantenna(struct ath_softc *sc, u_int antenna)
3961{
3962 struct ath_hal *ah = sc->sc_ah;
3963
3964 /* XXX block beacon interrupts */
3965 ath_hal_setdefantenna(ah, antenna);
3966 if (sc->sc_defant != antenna)
3967 sc->sc_stats.ast_ant_defswitch++;
3968 sc->sc_defant = antenna;
3969 sc->sc_rxotherant = 0;
3970}
3971
3972static int
3973ath_rx_tap(struct ifnet *ifp, struct mbuf *m,
3974 const struct ath_rx_status *rs, u_int64_t tsf, int16_t nf)
3975{
3976#define CHAN_HT20 htole32(IEEE80211_CHAN_HT20)
3977#define CHAN_HT40U htole32(IEEE80211_CHAN_HT40U)
3978#define CHAN_HT40D htole32(IEEE80211_CHAN_HT40D)
3979#define CHAN_HT (CHAN_HT20|CHAN_HT40U|CHAN_HT40D)
3980 struct ath_softc *sc = ifp->if_softc;
3981 const HAL_RATE_TABLE *rt;
3982 uint8_t rix;
3983
3984 /*
3985 * Discard anything shorter than an ack or cts.
3986 */
3987 if (m->m_pkthdr.len < IEEE80211_ACK_LEN) {
3988 DPRINTF(sc, ATH_DEBUG_RECV, "%s: runt packet %d\n",
3989 __func__, m->m_pkthdr.len);
3990 sc->sc_stats.ast_rx_tooshort++;
3991 return 0;
3992 }
3993 rt = sc->sc_currates;
3994 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
3995 rix = rt->rateCodeToIndex[rs->rs_rate];
3996 sc->sc_rx_th.wr_rate = sc->sc_hwmap[rix].ieeerate;
3997 sc->sc_rx_th.wr_flags = sc->sc_hwmap[rix].rxflags;
3998#ifdef AH_SUPPORT_AR5416
3999 sc->sc_rx_th.wr_chan_flags &= ~CHAN_HT;
4000 if (sc->sc_rx_th.wr_rate & IEEE80211_RATE_MCS) { /* HT rate */
4001 struct ieee80211com *ic = ifp->if_l2com;
4002
4003 if ((rs->rs_flags & HAL_RX_2040) == 0)
4004 sc->sc_rx_th.wr_chan_flags |= CHAN_HT20;
4005 else if (IEEE80211_IS_CHAN_HT40U(ic->ic_curchan))
4006 sc->sc_rx_th.wr_chan_flags |= CHAN_HT40U;
4007 else
4008 sc->sc_rx_th.wr_chan_flags |= CHAN_HT40D;
4009 if ((rs->rs_flags & HAL_RX_GI) == 0)
4010 sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_SHORTGI;
4011 }
4012#endif
4013 sc->sc_rx_th.wr_tsf = htole64(ath_extend_tsf(rs->rs_tstamp, tsf));
4014 if (rs->rs_status & HAL_RXERR_CRC)
4015 sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_BADFCS;
4016 /* XXX propagate other error flags from descriptor */
4017 sc->sc_rx_th.wr_antsignal = rs->rs_rssi + nf;
4018 sc->sc_rx_th.wr_antnoise = nf;
4019 sc->sc_rx_th.wr_antenna = rs->rs_antenna;
4020
4021 bpf_mtap2(ifp->if_bpf, &sc->sc_rx_th, sc->sc_rx_th_len, m);
4022
4023 return 1;
4024#undef CHAN_HT
4025#undef CHAN_HT20
4026#undef CHAN_HT40U
4027#undef CHAN_HT40D
4028}
4029
4030static void
4031ath_handle_micerror(struct ieee80211com *ic,
4032 struct ieee80211_frame *wh, int keyix)
4033{
4034 struct ieee80211_node *ni;
4035
4036 /* XXX recheck MIC to deal w/ chips that lie */
4037 /* XXX discard MIC errors on !data frames */
4038 ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh);
4039 if (ni != NULL) {
4040 ieee80211_notify_michael_failure(ni->ni_vap, wh, keyix);
4041 ieee80211_free_node(ni);
4042 }
4043}
4044
4045static void
4046ath_rx_proc(void *arg, int npending)
4047{
4048#define PA2DESC(_sc, _pa) \
4049 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
4050 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
4051 struct ath_softc *sc = arg;
4052 struct ath_buf *bf;
4053 struct ifnet *ifp = sc->sc_ifp;
4054 struct ieee80211com *ic = ifp->if_l2com;
4055 struct ath_hal *ah = sc->sc_ah;
4056 struct ath_desc *ds;
4057 struct ath_rx_status *rs;
4058 struct mbuf *m;
4059 struct ieee80211_node *ni;
4060 int len, type, ngood;
4061 u_int phyerr;
4062 HAL_STATUS status;
4063 int16_t nf;
4064 u_int64_t tsf;
4065
4066 DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: pending %u\n", __func__, npending);
4067 ngood = 0;
4068 nf = ath_hal_getchannoise(ah, sc->sc_curchan);
4069 sc->sc_stats.ast_rx_noise = nf;
4070 tsf = ath_hal_gettsf64(ah);
4071 do {
4072 bf = STAILQ_FIRST(&sc->sc_rxbuf);
4073 if (bf == NULL) { /* NB: shouldn't happen */
4074 if_printf(ifp, "%s: no buffer!\n", __func__);
4075 break;
4076 }
4077 m = bf->bf_m;
4078 if (m == NULL) { /* NB: shouldn't happen */
4079 /*
4080 * If mbuf allocation failed previously there
4081 * will be no mbuf; try again to re-populate it.
4082 */
4083 /* XXX make debug msg */
4084 if_printf(ifp, "%s: no mbuf!\n", __func__);
4085 STAILQ_REMOVE_HEAD(&sc->sc_rxbuf, bf_list);
4086 goto rx_next;
4087 }
4088 ds = bf->bf_desc;
4089 if (ds->ds_link == bf->bf_daddr) {
4090 /* NB: never process the self-linked entry at the end */
4091 break;
4092 }
4093 /* XXX sync descriptor memory */
4094 /*
4095 * Must provide the virtual address of the current
4096 * descriptor, the physical address, and the virtual
4097 * address of the next descriptor in the h/w chain.
4098 * This allows the HAL to look ahead to see if the
4099 * hardware is done with a descriptor by checking the
4100 * done bit in the following descriptor and the address
4101 * of the current descriptor the DMA engine is working
4102 * on. All this is necessary because of our use of
4103 * a self-linked list to avoid rx overruns.
4104 */
4105 rs = &bf->bf_status.ds_rxstat;
4106 status = ath_hal_rxprocdesc(ah, ds,
4107 bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs);
4108#ifdef ATH_DEBUG
4109 if (sc->sc_debug & ATH_DEBUG_RECV_DESC)
4110 ath_printrxbuf(sc, bf, 0, status == HAL_OK);
4111#endif
4112 if (status == HAL_EINPROGRESS)
4113 break;
4114 STAILQ_REMOVE_HEAD(&sc->sc_rxbuf, bf_list);
4115 if (rs->rs_status != 0) {
4116 if (rs->rs_status & HAL_RXERR_CRC)
4117 sc->sc_stats.ast_rx_crcerr++;
4118 if (rs->rs_status & HAL_RXERR_FIFO)
4119 sc->sc_stats.ast_rx_fifoerr++;
4120 if (rs->rs_status & HAL_RXERR_PHY) {
4121 sc->sc_stats.ast_rx_phyerr++;
4122 phyerr = rs->rs_phyerr & 0x1f;
4123 sc->sc_stats.ast_rx_phy[phyerr]++;
4124 goto rx_error; /* NB: don't count in ierrors */
4125 }
4126 if (rs->rs_status & HAL_RXERR_DECRYPT) {
4127 /*
4128 * Decrypt error. If the error occurred
4129 * because there was no hardware key, then
4130 * let the frame through so the upper layers
4131 * can process it. This is necessary for 5210
4132 * parts which have no way to setup a ``clear''
4133 * key cache entry.
4134 *
4135 * XXX do key cache faulting
4136 */
4137 if (rs->rs_keyix == HAL_RXKEYIX_INVALID)
4138 goto rx_accept;
4139 sc->sc_stats.ast_rx_badcrypt++;
4140 }
4141 if (rs->rs_status & HAL_RXERR_MIC) {
4142 sc->sc_stats.ast_rx_badmic++;
4143 /*
4144 * Do minimal work required to hand off
4145 * the 802.11 header for notifcation.
4146 */
4147 /* XXX frag's and qos frames */
4148 len = rs->rs_datalen;
4149 if (len >= sizeof (struct ieee80211_frame)) {
4150 bus_dmamap_sync(sc->sc_dmat,
4151 bf->bf_dmamap,
4152 BUS_DMASYNC_POSTREAD);
4153 ath_handle_micerror(ic,
4154 mtod(m, struct ieee80211_frame *),
4155 sc->sc_splitmic ?
4156 rs->rs_keyix-32 : rs->rs_keyix);
4157 }
4158 }
4159 ifp->if_ierrors++;
4160rx_error:
4161 /*
4162 * Cleanup any pending partial frame.
4163 */
4164 if (sc->sc_rxpending != NULL) {
4165 m_freem(sc->sc_rxpending);
4166 sc->sc_rxpending = NULL;
4167 }
4168 /*
4169 * When a tap is present pass error frames
4170 * that have been requested. By default we
4171 * pass decrypt+mic errors but others may be
4172 * interesting (e.g. crc).
4173 */
4174 if (bpf_peers_present(ifp->if_bpf) &&
4175 (rs->rs_status & sc->sc_monpass)) {
4176 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
4177 BUS_DMASYNC_POSTREAD);
4178 /* NB: bpf needs the mbuf length setup */
4179 len = rs->rs_datalen;
4180 m->m_pkthdr.len = m->m_len = len;
4181 (void) ath_rx_tap(ifp, m, rs, tsf, nf);
4182 }
4183 /* XXX pass MIC errors up for s/w reclaculation */
4184 goto rx_next;
4185 }
4186rx_accept:
4187 /*
4188 * Sync and unmap the frame. At this point we're
4189 * committed to passing the mbuf somewhere so clear
4190 * bf_m; this means a new mbuf must be allocated
4191 * when the rx descriptor is setup again to receive
4192 * another frame.
4193 */
4194 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
4195 BUS_DMASYNC_POSTREAD);
4196 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
4197 bf->bf_m = NULL;
4198
4199 len = rs->rs_datalen;
4200 m->m_len = len;
4201
4202 if (rs->rs_more) {
4203 /*
4204 * Frame spans multiple descriptors; save
4205 * it for the next completed descriptor, it
4206 * will be used to construct a jumbogram.
4207 */
4208 if (sc->sc_rxpending != NULL) {
4209 /* NB: max frame size is currently 2 clusters */
4210 sc->sc_stats.ast_rx_toobig++;
4211 m_freem(sc->sc_rxpending);
4212 }
4213 m->m_pkthdr.rcvif = ifp;
4214 m->m_pkthdr.len = len;
4215 sc->sc_rxpending = m;
4216 goto rx_next;
4217 } else if (sc->sc_rxpending != NULL) {
4218 /*
4219 * This is the second part of a jumbogram,
4220 * chain it to the first mbuf, adjust the
4221 * frame length, and clear the rxpending state.
4222 */
4223 sc->sc_rxpending->m_next = m;
4224 sc->sc_rxpending->m_pkthdr.len += len;
4225 m = sc->sc_rxpending;
4226 sc->sc_rxpending = NULL;
4227 } else {
4228 /*
4229 * Normal single-descriptor receive; setup
4230 * the rcvif and packet length.
4231 */
4232 m->m_pkthdr.rcvif = ifp;
4233 m->m_pkthdr.len = len;
4234 }
4235
4236 ifp->if_ipackets++;
4237 sc->sc_stats.ast_ant_rx[rs->rs_antenna]++;
4238
4239 if (bpf_peers_present(ifp->if_bpf) &&
4240 !ath_rx_tap(ifp, m, rs, tsf, nf)) {
4241 m_freem(m); /* XXX reclaim */
4242 goto rx_next;
4243 }
4244
4245 /*
4246 * From this point on we assume the frame is at least
4247 * as large as ieee80211_frame_min; verify that.
4248 */
4249 if (len < IEEE80211_MIN_LEN) {
4250 DPRINTF(sc, ATH_DEBUG_RECV, "%s: short packet %d\n",
4251 __func__, len);
4252 sc->sc_stats.ast_rx_tooshort++;
4253 m_freem(m);
4254 goto rx_next;
4255 }
4256
4257 if (IFF_DUMPPKTS(sc, ATH_DEBUG_RECV)) {
4258 const HAL_RATE_TABLE *rt = sc->sc_currates;
4259 uint8_t rix = rt->rateCodeToIndex[rs->rs_rate];
4260
4261 ieee80211_dump_pkt(ic, mtod(m, caddr_t), len,
4262 sc->sc_hwmap[rix].ieeerate, rs->rs_rssi);
4263 }
4264
4265 m_adj(m, -IEEE80211_CRC_LEN);
4266
4267 /*
4268 * Locate the node for sender, track state, and then
4269 * pass the (referenced) node up to the 802.11 layer
4270 * for its use.
4271 */
4272 ni = ieee80211_find_rxnode_withkey(ic,
4273 mtod(m, const struct ieee80211_frame_min *),
4274 rs->rs_keyix == HAL_RXKEYIX_INVALID ?
4275 IEEE80211_KEYIX_NONE : rs->rs_keyix);
4276 if (ni != NULL) {
4277 /*
4278 * Sending station is known, dispatch directly.
4279 */
4279#ifdef ATH_SUPPORT_TDMA
4280#ifdef IEEE80211_SUPPORT_TDMA
4280 sc->sc_tdmars = rs;
4281#endif
4282 type = ieee80211_input(ni, m,
4283 rs->rs_rssi, nf, rs->rs_tstamp);
4284 ieee80211_free_node(ni);
4285 /*
4286 * Arrange to update the last rx timestamp only for
4287 * frames from our ap when operating in station mode.
4288 * This assumes the rx key is always setup when
4289 * associated.
4290 */
4291 if (ic->ic_opmode == IEEE80211_M_STA &&
4292 rs->rs_keyix != HAL_RXKEYIX_INVALID)
4293 ngood++;
4294 } else {
4295 type = ieee80211_input_all(ic, m,
4296 rs->rs_rssi, nf, rs->rs_tstamp);
4297 }
4298 /*
4299 * Track rx rssi and do any rx antenna management.
4300 */
4301 ATH_RSSI_LPF(sc->sc_halstats.ns_avgrssi, rs->rs_rssi);
4302 if (sc->sc_diversity) {
4303 /*
4304 * When using fast diversity, change the default rx
4305 * antenna if diversity chooses the other antenna 3
4306 * times in a row.
4307 */
4308 if (sc->sc_defant != rs->rs_antenna) {
4309 if (++sc->sc_rxotherant >= 3)
4310 ath_setdefantenna(sc, rs->rs_antenna);
4311 } else
4312 sc->sc_rxotherant = 0;
4313 }
4314 if (sc->sc_softled) {
4315 /*
4316 * Blink for any data frame. Otherwise do a
4317 * heartbeat-style blink when idle. The latter
4318 * is mainly for station mode where we depend on
4319 * periodic beacon frames to trigger the poll event.
4320 */
4321 if (type == IEEE80211_FC0_TYPE_DATA) {
4322 const HAL_RATE_TABLE *rt = sc->sc_currates;
4323 ath_led_event(sc,
4324 rt->rateCodeToIndex[rs->rs_rate]);
4325 } else if (ticks - sc->sc_ledevent >= sc->sc_ledidle)
4326 ath_led_event(sc, 0);
4327 }
4328rx_next:
4329 STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
4330 } while (ath_rxbuf_init(sc, bf) == 0);
4331
4332 /* rx signal state monitoring */
4333 ath_hal_rxmonitor(ah, &sc->sc_halstats, sc->sc_curchan);
4334 if (ngood)
4335 sc->sc_lastrx = tsf;
4336
4337 if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0 &&
4338 !IFQ_IS_EMPTY(&ifp->if_snd))
4339 ath_start(ifp);
4340
4341#undef PA2DESC
4342}
4343
4344static void
4345ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum)
4346{
4347 txq->axq_qnum = qnum;
4348 txq->axq_depth = 0;
4349 txq->axq_intrcnt = 0;
4350 txq->axq_link = NULL;
4351 STAILQ_INIT(&txq->axq_q);
4352 ATH_TXQ_LOCK_INIT(sc, txq);
4353 TAILQ_INIT(&txq->axq_stageq);
4354 txq->axq_curage = 0;
4355}
4356
4357/*
4358 * Setup a h/w transmit queue.
4359 */
4360static struct ath_txq *
4361ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
4362{
4363#define N(a) (sizeof(a)/sizeof(a[0]))
4364 struct ath_hal *ah = sc->sc_ah;
4365 HAL_TXQ_INFO qi;
4366 int qnum;
4367
4368 memset(&qi, 0, sizeof(qi));
4369 qi.tqi_subtype = subtype;
4370 qi.tqi_aifs = HAL_TXQ_USEDEFAULT;
4371 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT;
4372 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT;
4373 /*
4374 * Enable interrupts only for EOL and DESC conditions.
4375 * We mark tx descriptors to receive a DESC interrupt
4376 * when a tx queue gets deep; otherwise waiting for the
4377 * EOL to reap descriptors. Note that this is done to
4378 * reduce interrupt load and this only defers reaping
4379 * descriptors, never transmitting frames. Aside from
4380 * reducing interrupts this also permits more concurrency.
4381 * The only potential downside is if the tx queue backs
4382 * up in which case the top half of the kernel may backup
4383 * due to a lack of tx descriptors.
4384 */
4385 qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | HAL_TXQ_TXDESCINT_ENABLE;
4386 qnum = ath_hal_setuptxqueue(ah, qtype, &qi);
4387 if (qnum == -1) {
4388 /*
4389 * NB: don't print a message, this happens
4390 * normally on parts with too few tx queues
4391 */
4392 return NULL;
4393 }
4394 if (qnum >= N(sc->sc_txq)) {
4395 device_printf(sc->sc_dev,
4396 "hal qnum %u out of range, max %zu!\n",
4397 qnum, N(sc->sc_txq));
4398 ath_hal_releasetxqueue(ah, qnum);
4399 return NULL;
4400 }
4401 if (!ATH_TXQ_SETUP(sc, qnum)) {
4402 ath_txq_init(sc, &sc->sc_txq[qnum], qnum);
4403 sc->sc_txqsetup |= 1<<qnum;
4404 }
4405 return &sc->sc_txq[qnum];
4406#undef N
4407}
4408
4409/*
4410 * Setup a hardware data transmit queue for the specified
4411 * access control. The hal may not support all requested
4412 * queues in which case it will return a reference to a
4413 * previously setup queue. We record the mapping from ac's
4414 * to h/w queues for use by ath_tx_start and also track
4415 * the set of h/w queues being used to optimize work in the
4416 * transmit interrupt handler and related routines.
4417 */
4418static int
4419ath_tx_setup(struct ath_softc *sc, int ac, int haltype)
4420{
4421#define N(a) (sizeof(a)/sizeof(a[0]))
4422 struct ath_txq *txq;
4423
4424 if (ac >= N(sc->sc_ac2q)) {
4425 device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n",
4426 ac, N(sc->sc_ac2q));
4427 return 0;
4428 }
4429 txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype);
4430 if (txq != NULL) {
4431 sc->sc_ac2q[ac] = txq;
4432 return 1;
4433 } else
4434 return 0;
4435#undef N
4436}
4437
4438/*
4439 * Update WME parameters for a transmit queue.
4440 */
4441static int
4442ath_txq_update(struct ath_softc *sc, int ac)
4443{
4444#define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1)
4445#define ATH_TXOP_TO_US(v) (v<<5)
4446 struct ifnet *ifp = sc->sc_ifp;
4447 struct ieee80211com *ic = ifp->if_l2com;
4448 struct ath_txq *txq = sc->sc_ac2q[ac];
4449 struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
4450 struct ath_hal *ah = sc->sc_ah;
4451 HAL_TXQ_INFO qi;
4452
4453 ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi);
4281 sc->sc_tdmars = rs;
4282#endif
4283 type = ieee80211_input(ni, m,
4284 rs->rs_rssi, nf, rs->rs_tstamp);
4285 ieee80211_free_node(ni);
4286 /*
4287 * Arrange to update the last rx timestamp only for
4288 * frames from our ap when operating in station mode.
4289 * This assumes the rx key is always setup when
4290 * associated.
4291 */
4292 if (ic->ic_opmode == IEEE80211_M_STA &&
4293 rs->rs_keyix != HAL_RXKEYIX_INVALID)
4294 ngood++;
4295 } else {
4296 type = ieee80211_input_all(ic, m,
4297 rs->rs_rssi, nf, rs->rs_tstamp);
4298 }
4299 /*
4300 * Track rx rssi and do any rx antenna management.
4301 */
4302 ATH_RSSI_LPF(sc->sc_halstats.ns_avgrssi, rs->rs_rssi);
4303 if (sc->sc_diversity) {
4304 /*
4305 * When using fast diversity, change the default rx
4306 * antenna if diversity chooses the other antenna 3
4307 * times in a row.
4308 */
4309 if (sc->sc_defant != rs->rs_antenna) {
4310 if (++sc->sc_rxotherant >= 3)
4311 ath_setdefantenna(sc, rs->rs_antenna);
4312 } else
4313 sc->sc_rxotherant = 0;
4314 }
4315 if (sc->sc_softled) {
4316 /*
4317 * Blink for any data frame. Otherwise do a
4318 * heartbeat-style blink when idle. The latter
4319 * is mainly for station mode where we depend on
4320 * periodic beacon frames to trigger the poll event.
4321 */
4322 if (type == IEEE80211_FC0_TYPE_DATA) {
4323 const HAL_RATE_TABLE *rt = sc->sc_currates;
4324 ath_led_event(sc,
4325 rt->rateCodeToIndex[rs->rs_rate]);
4326 } else if (ticks - sc->sc_ledevent >= sc->sc_ledidle)
4327 ath_led_event(sc, 0);
4328 }
4329rx_next:
4330 STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
4331 } while (ath_rxbuf_init(sc, bf) == 0);
4332
4333 /* rx signal state monitoring */
4334 ath_hal_rxmonitor(ah, &sc->sc_halstats, sc->sc_curchan);
4335 if (ngood)
4336 sc->sc_lastrx = tsf;
4337
4338 if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0 &&
4339 !IFQ_IS_EMPTY(&ifp->if_snd))
4340 ath_start(ifp);
4341
4342#undef PA2DESC
4343}
4344
4345static void
4346ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum)
4347{
4348 txq->axq_qnum = qnum;
4349 txq->axq_depth = 0;
4350 txq->axq_intrcnt = 0;
4351 txq->axq_link = NULL;
4352 STAILQ_INIT(&txq->axq_q);
4353 ATH_TXQ_LOCK_INIT(sc, txq);
4354 TAILQ_INIT(&txq->axq_stageq);
4355 txq->axq_curage = 0;
4356}
4357
4358/*
4359 * Setup a h/w transmit queue.
4360 */
4361static struct ath_txq *
4362ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
4363{
4364#define N(a) (sizeof(a)/sizeof(a[0]))
4365 struct ath_hal *ah = sc->sc_ah;
4366 HAL_TXQ_INFO qi;
4367 int qnum;
4368
4369 memset(&qi, 0, sizeof(qi));
4370 qi.tqi_subtype = subtype;
4371 qi.tqi_aifs = HAL_TXQ_USEDEFAULT;
4372 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT;
4373 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT;
4374 /*
4375 * Enable interrupts only for EOL and DESC conditions.
4376 * We mark tx descriptors to receive a DESC interrupt
4377 * when a tx queue gets deep; otherwise waiting for the
4378 * EOL to reap descriptors. Note that this is done to
4379 * reduce interrupt load and this only defers reaping
4380 * descriptors, never transmitting frames. Aside from
4381 * reducing interrupts this also permits more concurrency.
4382 * The only potential downside is if the tx queue backs
4383 * up in which case the top half of the kernel may backup
4384 * due to a lack of tx descriptors.
4385 */
4386 qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | HAL_TXQ_TXDESCINT_ENABLE;
4387 qnum = ath_hal_setuptxqueue(ah, qtype, &qi);
4388 if (qnum == -1) {
4389 /*
4390 * NB: don't print a message, this happens
4391 * normally on parts with too few tx queues
4392 */
4393 return NULL;
4394 }
4395 if (qnum >= N(sc->sc_txq)) {
4396 device_printf(sc->sc_dev,
4397 "hal qnum %u out of range, max %zu!\n",
4398 qnum, N(sc->sc_txq));
4399 ath_hal_releasetxqueue(ah, qnum);
4400 return NULL;
4401 }
4402 if (!ATH_TXQ_SETUP(sc, qnum)) {
4403 ath_txq_init(sc, &sc->sc_txq[qnum], qnum);
4404 sc->sc_txqsetup |= 1<<qnum;
4405 }
4406 return &sc->sc_txq[qnum];
4407#undef N
4408}
4409
4410/*
4411 * Setup a hardware data transmit queue for the specified
4412 * access control. The hal may not support all requested
4413 * queues in which case it will return a reference to a
4414 * previously setup queue. We record the mapping from ac's
4415 * to h/w queues for use by ath_tx_start and also track
4416 * the set of h/w queues being used to optimize work in the
4417 * transmit interrupt handler and related routines.
4418 */
4419static int
4420ath_tx_setup(struct ath_softc *sc, int ac, int haltype)
4421{
4422#define N(a) (sizeof(a)/sizeof(a[0]))
4423 struct ath_txq *txq;
4424
4425 if (ac >= N(sc->sc_ac2q)) {
4426 device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n",
4427 ac, N(sc->sc_ac2q));
4428 return 0;
4429 }
4430 txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype);
4431 if (txq != NULL) {
4432 sc->sc_ac2q[ac] = txq;
4433 return 1;
4434 } else
4435 return 0;
4436#undef N
4437}
4438
4439/*
4440 * Update WME parameters for a transmit queue.
4441 */
4442static int
4443ath_txq_update(struct ath_softc *sc, int ac)
4444{
4445#define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1)
4446#define ATH_TXOP_TO_US(v) (v<<5)
4447 struct ifnet *ifp = sc->sc_ifp;
4448 struct ieee80211com *ic = ifp->if_l2com;
4449 struct ath_txq *txq = sc->sc_ac2q[ac];
4450 struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
4451 struct ath_hal *ah = sc->sc_ah;
4452 HAL_TXQ_INFO qi;
4453
4454 ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi);
4454#ifdef ATH_SUPPORT_TDMA
4455#ifdef IEEE80211_SUPPORT_TDMA
4455 if (sc->sc_tdma) {
4456 /*
4457 * AIFS is zero so there's no pre-transmit wait. The
4458 * burst time defines the slot duration and is configured
4459 * via sysctl. The QCU is setup to not do post-xmit
4460 * back off, lockout all lower-priority QCU's, and fire
4461 * off the DMA beacon alert timer which is setup based
4462 * on the slot configuration.
4463 */
4464 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE
4465 | HAL_TXQ_TXERRINT_ENABLE
4466 | HAL_TXQ_TXURNINT_ENABLE
4467 | HAL_TXQ_TXEOLINT_ENABLE
4468 | HAL_TXQ_DBA_GATED
4469 | HAL_TXQ_BACKOFF_DISABLE
4470 | HAL_TXQ_ARB_LOCKOUT_GLOBAL
4471 ;
4472 qi.tqi_aifs = 0;
4473 /* XXX +dbaprep? */
4474 qi.tqi_readyTime = sc->sc_tdmaslotlen;
4475 qi.tqi_burstTime = qi.tqi_readyTime;
4476 } else {
4477#endif
4478 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE
4479 | HAL_TXQ_TXERRINT_ENABLE
4480 | HAL_TXQ_TXDESCINT_ENABLE
4481 | HAL_TXQ_TXURNINT_ENABLE
4482 ;
4483 qi.tqi_aifs = wmep->wmep_aifsn;
4484 qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
4485 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
4486 qi.tqi_readyTime = 0;
4487 qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit);
4456 if (sc->sc_tdma) {
4457 /*
4458 * AIFS is zero so there's no pre-transmit wait. The
4459 * burst time defines the slot duration and is configured
4460 * via sysctl. The QCU is setup to not do post-xmit
4461 * back off, lockout all lower-priority QCU's, and fire
4462 * off the DMA beacon alert timer which is setup based
4463 * on the slot configuration.
4464 */
4465 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE
4466 | HAL_TXQ_TXERRINT_ENABLE
4467 | HAL_TXQ_TXURNINT_ENABLE
4468 | HAL_TXQ_TXEOLINT_ENABLE
4469 | HAL_TXQ_DBA_GATED
4470 | HAL_TXQ_BACKOFF_DISABLE
4471 | HAL_TXQ_ARB_LOCKOUT_GLOBAL
4472 ;
4473 qi.tqi_aifs = 0;
4474 /* XXX +dbaprep? */
4475 qi.tqi_readyTime = sc->sc_tdmaslotlen;
4476 qi.tqi_burstTime = qi.tqi_readyTime;
4477 } else {
4478#endif
4479 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE
4480 | HAL_TXQ_TXERRINT_ENABLE
4481 | HAL_TXQ_TXDESCINT_ENABLE
4482 | HAL_TXQ_TXURNINT_ENABLE
4483 ;
4484 qi.tqi_aifs = wmep->wmep_aifsn;
4485 qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
4486 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
4487 qi.tqi_readyTime = 0;
4488 qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit);
4488#ifdef ATH_SUPPORT_TDMA
4489#ifdef IEEE80211_SUPPORT_TDMA
4489 }
4490#endif
4491
4492 DPRINTF(sc, ATH_DEBUG_RESET,
4493 "%s: Q%u qflags 0x%x aifs %u cwmin %u cwmax %u burstTime %u\n",
4494 __func__, txq->axq_qnum, qi.tqi_qflags,
4495 qi.tqi_aifs, qi.tqi_cwmin, qi.tqi_cwmax, qi.tqi_burstTime);
4496
4497 if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) {
4498 if_printf(ifp, "unable to update hardware queue "
4499 "parameters for %s traffic!\n",
4500 ieee80211_wme_acnames[ac]);
4501 return 0;
4502 } else {
4503 ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */
4504 return 1;
4505 }
4506#undef ATH_TXOP_TO_US
4507#undef ATH_EXPONENT_TO_VALUE
4508}
4509
4510/*
4511 * Callback from the 802.11 layer to update WME parameters.
4512 */
4513static int
4514ath_wme_update(struct ieee80211com *ic)
4515{
4516 struct ath_softc *sc = ic->ic_ifp->if_softc;
4517
4518 return !ath_txq_update(sc, WME_AC_BE) ||
4519 !ath_txq_update(sc, WME_AC_BK) ||
4520 !ath_txq_update(sc, WME_AC_VI) ||
4521 !ath_txq_update(sc, WME_AC_VO) ? EIO : 0;
4522}
4523
4524/*
4525 * Reclaim resources for a setup queue.
4526 */
4527static void
4528ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
4529{
4530
4531 ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum);
4532 ATH_TXQ_LOCK_DESTROY(txq);
4533 sc->sc_txqsetup &= ~(1<<txq->axq_qnum);
4534}
4535
4536/*
4537 * Reclaim all tx queue resources.
4538 */
4539static void
4540ath_tx_cleanup(struct ath_softc *sc)
4541{
4542 int i;
4543
4544 ATH_TXBUF_LOCK_DESTROY(sc);
4545 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
4546 if (ATH_TXQ_SETUP(sc, i))
4547 ath_tx_cleanupq(sc, &sc->sc_txq[i]);
4548}
4549
4550/*
4551 * Return h/w rate index for an IEEE rate (w/o basic rate bit).
4552 */
4553static int
4554ath_tx_findrix(const HAL_RATE_TABLE *rt, int rate)
4555{
4556 int i;
4557
4558 for (i = 0; i < rt->rateCount; i++)
4559 if ((rt->info[i].dot11Rate & IEEE80211_RATE_VAL) == rate)
4560 return i;
4561 return 0; /* NB: lowest rate */
4562}
4563
4564/*
4565 * Reclaim mbuf resources. For fragmented frames we
4566 * need to claim each frag chained with m_nextpkt.
4567 */
4568static void
4569ath_freetx(struct mbuf *m)
4570{
4571 struct mbuf *next;
4572
4573 do {
4574 next = m->m_nextpkt;
4575 m->m_nextpkt = NULL;
4576 m_freem(m);
4577 } while ((m = next) != NULL);
4578}
4579
4580static int
4581ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0)
4582{
4583 struct mbuf *m;
4584 int error;
4585
4586 /*
4587 * Load the DMA map so any coalescing is done. This
4588 * also calculates the number of descriptors we need.
4589 */
4590 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
4591 bf->bf_segs, &bf->bf_nseg,
4592 BUS_DMA_NOWAIT);
4593 if (error == EFBIG) {
4594 /* XXX packet requires too many descriptors */
4595 bf->bf_nseg = ATH_TXDESC+1;
4596 } else if (error != 0) {
4597 sc->sc_stats.ast_tx_busdma++;
4598 ath_freetx(m0);
4599 return error;
4600 }
4601 /*
4602 * Discard null packets and check for packets that
4603 * require too many TX descriptors. We try to convert
4604 * the latter to a cluster.
4605 */
4606 if (bf->bf_nseg > ATH_TXDESC) { /* too many desc's, linearize */
4607 sc->sc_stats.ast_tx_linear++;
4608 m = m_collapse(m0, M_DONTWAIT, ATH_TXDESC);
4609 if (m == NULL) {
4610 ath_freetx(m0);
4611 sc->sc_stats.ast_tx_nombuf++;
4612 return ENOMEM;
4613 }
4614 m0 = m;
4615 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
4616 bf->bf_segs, &bf->bf_nseg,
4617 BUS_DMA_NOWAIT);
4618 if (error != 0) {
4619 sc->sc_stats.ast_tx_busdma++;
4620 ath_freetx(m0);
4621 return error;
4622 }
4623 KASSERT(bf->bf_nseg <= ATH_TXDESC,
4624 ("too many segments after defrag; nseg %u", bf->bf_nseg));
4625 } else if (bf->bf_nseg == 0) { /* null packet, discard */
4626 sc->sc_stats.ast_tx_nodata++;
4627 ath_freetx(m0);
4628 return EIO;
4629 }
4630 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n",
4631 __func__, m0, m0->m_pkthdr.len);
4632 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
4633 bf->bf_m = m0;
4634
4635 return 0;
4636}
4637
4638static void
4639ath_tx_handoff(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf)
4640{
4641 struct ath_hal *ah = sc->sc_ah;
4642 struct ath_desc *ds, *ds0;
4643 int i;
4644
4645 /*
4646 * Fillin the remainder of the descriptor info.
4647 */
4648 ds0 = ds = bf->bf_desc;
4649 for (i = 0; i < bf->bf_nseg; i++, ds++) {
4650 ds->ds_data = bf->bf_segs[i].ds_addr;
4651 if (i == bf->bf_nseg - 1)
4652 ds->ds_link = 0;
4653 else
4654 ds->ds_link = bf->bf_daddr + sizeof(*ds) * (i + 1);
4655 ath_hal_filltxdesc(ah, ds
4656 , bf->bf_segs[i].ds_len /* segment length */
4657 , i == 0 /* first segment */
4658 , i == bf->bf_nseg - 1 /* last segment */
4659 , ds0 /* first descriptor */
4660 );
4661 DPRINTF(sc, ATH_DEBUG_XMIT,
4662 "%s: %d: %08x %08x %08x %08x %08x %08x\n",
4663 __func__, i, ds->ds_link, ds->ds_data,
4664 ds->ds_ctl0, ds->ds_ctl1, ds->ds_hw[0], ds->ds_hw[1]);
4665 }
4666 /*
4667 * Insert the frame on the outbound list and pass it on
4668 * to the hardware. Multicast frames buffered for power
4669 * save stations and transmit from the CAB queue are stored
4670 * on a s/w only queue and loaded on to the CAB queue in
4671 * the SWBA handler since frames only go out on DTIM and
4672 * to avoid possible races.
4673 */
4674 ATH_TXQ_LOCK(txq);
4675 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
4676 ("busy status 0x%x", bf->bf_flags));
4677 if (txq->axq_qnum != ATH_TXQ_SWQ) {
4490 }
4491#endif
4492
4493 DPRINTF(sc, ATH_DEBUG_RESET,
4494 "%s: Q%u qflags 0x%x aifs %u cwmin %u cwmax %u burstTime %u\n",
4495 __func__, txq->axq_qnum, qi.tqi_qflags,
4496 qi.tqi_aifs, qi.tqi_cwmin, qi.tqi_cwmax, qi.tqi_burstTime);
4497
4498 if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) {
4499 if_printf(ifp, "unable to update hardware queue "
4500 "parameters for %s traffic!\n",
4501 ieee80211_wme_acnames[ac]);
4502 return 0;
4503 } else {
4504 ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */
4505 return 1;
4506 }
4507#undef ATH_TXOP_TO_US
4508#undef ATH_EXPONENT_TO_VALUE
4509}
4510
4511/*
4512 * Callback from the 802.11 layer to update WME parameters.
4513 */
4514static int
4515ath_wme_update(struct ieee80211com *ic)
4516{
4517 struct ath_softc *sc = ic->ic_ifp->if_softc;
4518
4519 return !ath_txq_update(sc, WME_AC_BE) ||
4520 !ath_txq_update(sc, WME_AC_BK) ||
4521 !ath_txq_update(sc, WME_AC_VI) ||
4522 !ath_txq_update(sc, WME_AC_VO) ? EIO : 0;
4523}
4524
4525/*
4526 * Reclaim resources for a setup queue.
4527 */
4528static void
4529ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
4530{
4531
4532 ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum);
4533 ATH_TXQ_LOCK_DESTROY(txq);
4534 sc->sc_txqsetup &= ~(1<<txq->axq_qnum);
4535}
4536
4537/*
4538 * Reclaim all tx queue resources.
4539 */
4540static void
4541ath_tx_cleanup(struct ath_softc *sc)
4542{
4543 int i;
4544
4545 ATH_TXBUF_LOCK_DESTROY(sc);
4546 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
4547 if (ATH_TXQ_SETUP(sc, i))
4548 ath_tx_cleanupq(sc, &sc->sc_txq[i]);
4549}
4550
4551/*
4552 * Return h/w rate index for an IEEE rate (w/o basic rate bit).
4553 */
4554static int
4555ath_tx_findrix(const HAL_RATE_TABLE *rt, int rate)
4556{
4557 int i;
4558
4559 for (i = 0; i < rt->rateCount; i++)
4560 if ((rt->info[i].dot11Rate & IEEE80211_RATE_VAL) == rate)
4561 return i;
4562 return 0; /* NB: lowest rate */
4563}
4564
4565/*
4566 * Reclaim mbuf resources. For fragmented frames we
4567 * need to claim each frag chained with m_nextpkt.
4568 */
4569static void
4570ath_freetx(struct mbuf *m)
4571{
4572 struct mbuf *next;
4573
4574 do {
4575 next = m->m_nextpkt;
4576 m->m_nextpkt = NULL;
4577 m_freem(m);
4578 } while ((m = next) != NULL);
4579}
4580
4581static int
4582ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0)
4583{
4584 struct mbuf *m;
4585 int error;
4586
4587 /*
4588 * Load the DMA map so any coalescing is done. This
4589 * also calculates the number of descriptors we need.
4590 */
4591 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
4592 bf->bf_segs, &bf->bf_nseg,
4593 BUS_DMA_NOWAIT);
4594 if (error == EFBIG) {
4595 /* XXX packet requires too many descriptors */
4596 bf->bf_nseg = ATH_TXDESC+1;
4597 } else if (error != 0) {
4598 sc->sc_stats.ast_tx_busdma++;
4599 ath_freetx(m0);
4600 return error;
4601 }
4602 /*
4603 * Discard null packets and check for packets that
4604 * require too many TX descriptors. We try to convert
4605 * the latter to a cluster.
4606 */
4607 if (bf->bf_nseg > ATH_TXDESC) { /* too many desc's, linearize */
4608 sc->sc_stats.ast_tx_linear++;
4609 m = m_collapse(m0, M_DONTWAIT, ATH_TXDESC);
4610 if (m == NULL) {
4611 ath_freetx(m0);
4612 sc->sc_stats.ast_tx_nombuf++;
4613 return ENOMEM;
4614 }
4615 m0 = m;
4616 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
4617 bf->bf_segs, &bf->bf_nseg,
4618 BUS_DMA_NOWAIT);
4619 if (error != 0) {
4620 sc->sc_stats.ast_tx_busdma++;
4621 ath_freetx(m0);
4622 return error;
4623 }
4624 KASSERT(bf->bf_nseg <= ATH_TXDESC,
4625 ("too many segments after defrag; nseg %u", bf->bf_nseg));
4626 } else if (bf->bf_nseg == 0) { /* null packet, discard */
4627 sc->sc_stats.ast_tx_nodata++;
4628 ath_freetx(m0);
4629 return EIO;
4630 }
4631 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n",
4632 __func__, m0, m0->m_pkthdr.len);
4633 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
4634 bf->bf_m = m0;
4635
4636 return 0;
4637}
4638
4639static void
4640ath_tx_handoff(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf)
4641{
4642 struct ath_hal *ah = sc->sc_ah;
4643 struct ath_desc *ds, *ds0;
4644 int i;
4645
4646 /*
4647 * Fillin the remainder of the descriptor info.
4648 */
4649 ds0 = ds = bf->bf_desc;
4650 for (i = 0; i < bf->bf_nseg; i++, ds++) {
4651 ds->ds_data = bf->bf_segs[i].ds_addr;
4652 if (i == bf->bf_nseg - 1)
4653 ds->ds_link = 0;
4654 else
4655 ds->ds_link = bf->bf_daddr + sizeof(*ds) * (i + 1);
4656 ath_hal_filltxdesc(ah, ds
4657 , bf->bf_segs[i].ds_len /* segment length */
4658 , i == 0 /* first segment */
4659 , i == bf->bf_nseg - 1 /* last segment */
4660 , ds0 /* first descriptor */
4661 );
4662 DPRINTF(sc, ATH_DEBUG_XMIT,
4663 "%s: %d: %08x %08x %08x %08x %08x %08x\n",
4664 __func__, i, ds->ds_link, ds->ds_data,
4665 ds->ds_ctl0, ds->ds_ctl1, ds->ds_hw[0], ds->ds_hw[1]);
4666 }
4667 /*
4668 * Insert the frame on the outbound list and pass it on
4669 * to the hardware. Multicast frames buffered for power
4670 * save stations and transmit from the CAB queue are stored
4671 * on a s/w only queue and loaded on to the CAB queue in
4672 * the SWBA handler since frames only go out on DTIM and
4673 * to avoid possible races.
4674 */
4675 ATH_TXQ_LOCK(txq);
4676 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
4677 ("busy status 0x%x", bf->bf_flags));
4678 if (txq->axq_qnum != ATH_TXQ_SWQ) {
4678#ifdef ATH_SUPPORT_TDMA
4679#ifdef IEEE80211_SUPPORT_TDMA
4679 int qbusy;
4680
4681 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
4682 qbusy = ath_hal_txqenabled(ah, txq->axq_qnum);
4683 if (txq->axq_link == NULL) {
4684 /*
4685 * Be careful writing the address to TXDP. If
4686 * the tx q is enabled then this write will be
4687 * ignored. Normally this is not an issue but
4688 * when tdma is in use and the q is beacon gated
4689 * this race can occur. If the q is busy then
4690 * defer the work to later--either when another
4691 * packet comes along or when we prepare a beacon
4692 * frame at SWBA.
4693 */
4694 if (!qbusy) {
4695 ath_hal_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
4696 txq->axq_flags &= ~ATH_TXQ_PUTPENDING;
4697 DPRINTF(sc, ATH_DEBUG_XMIT,
4698 "%s: TXDP[%u] = %p (%p) depth %d\n",
4699 __func__, txq->axq_qnum,
4700 (caddr_t)bf->bf_daddr, bf->bf_desc,
4701 txq->axq_depth);
4702 } else {
4703 txq->axq_flags |= ATH_TXQ_PUTPENDING;
4704 DPRINTF(sc, ATH_DEBUG_TDMA | ATH_DEBUG_XMIT,
4705 "%s: Q%u busy, defer enable\n", __func__,
4706 txq->axq_qnum);
4707 }
4708 } else {
4709 *txq->axq_link = bf->bf_daddr;
4710 DPRINTF(sc, ATH_DEBUG_XMIT,
4711 "%s: link[%u](%p)=%p (%p) depth %d\n", __func__,
4712 txq->axq_qnum, txq->axq_link,
4713 (caddr_t)bf->bf_daddr, bf->bf_desc, txq->axq_depth);
4714 if ((txq->axq_flags & ATH_TXQ_PUTPENDING) && !qbusy) {
4715 /*
4716 * The q was busy when we previously tried
4717 * to write the address of the first buffer
4718 * in the chain. Since it's not busy now
4719 * handle this chore. We are certain the
4720 * buffer at the front is the right one since
4721 * axq_link is NULL only when the buffer list
4722 * is/was empty.
4723 */
4724 ath_hal_puttxbuf(ah, txq->axq_qnum,
4725 STAILQ_FIRST(&txq->axq_q)->bf_daddr);
4726 txq->axq_flags &= ~ATH_TXQ_PUTPENDING;
4727 DPRINTF(sc, ATH_DEBUG_TDMA | ATH_DEBUG_XMIT,
4728 "%s: Q%u restarted\n", __func__,
4729 txq->axq_qnum);
4730 }
4731 }
4732#else
4733 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
4734 if (txq->axq_link == NULL) {
4735 ath_hal_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
4736 DPRINTF(sc, ATH_DEBUG_XMIT,
4737 "%s: TXDP[%u] = %p (%p) depth %d\n",
4738 __func__, txq->axq_qnum,
4739 (caddr_t)bf->bf_daddr, bf->bf_desc,
4740 txq->axq_depth);
4741 } else {
4742 *txq->axq_link = bf->bf_daddr;
4743 DPRINTF(sc, ATH_DEBUG_XMIT,
4744 "%s: link[%u](%p)=%p (%p) depth %d\n", __func__,
4745 txq->axq_qnum, txq->axq_link,
4746 (caddr_t)bf->bf_daddr, bf->bf_desc, txq->axq_depth);
4747 }
4680 int qbusy;
4681
4682 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
4683 qbusy = ath_hal_txqenabled(ah, txq->axq_qnum);
4684 if (txq->axq_link == NULL) {
4685 /*
4686 * Be careful writing the address to TXDP. If
4687 * the tx q is enabled then this write will be
4688 * ignored. Normally this is not an issue but
4689 * when tdma is in use and the q is beacon gated
4690 * this race can occur. If the q is busy then
4691 * defer the work to later--either when another
4692 * packet comes along or when we prepare a beacon
4693 * frame at SWBA.
4694 */
4695 if (!qbusy) {
4696 ath_hal_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
4697 txq->axq_flags &= ~ATH_TXQ_PUTPENDING;
4698 DPRINTF(sc, ATH_DEBUG_XMIT,
4699 "%s: TXDP[%u] = %p (%p) depth %d\n",
4700 __func__, txq->axq_qnum,
4701 (caddr_t)bf->bf_daddr, bf->bf_desc,
4702 txq->axq_depth);
4703 } else {
4704 txq->axq_flags |= ATH_TXQ_PUTPENDING;
4705 DPRINTF(sc, ATH_DEBUG_TDMA | ATH_DEBUG_XMIT,
4706 "%s: Q%u busy, defer enable\n", __func__,
4707 txq->axq_qnum);
4708 }
4709 } else {
4710 *txq->axq_link = bf->bf_daddr;
4711 DPRINTF(sc, ATH_DEBUG_XMIT,
4712 "%s: link[%u](%p)=%p (%p) depth %d\n", __func__,
4713 txq->axq_qnum, txq->axq_link,
4714 (caddr_t)bf->bf_daddr, bf->bf_desc, txq->axq_depth);
4715 if ((txq->axq_flags & ATH_TXQ_PUTPENDING) && !qbusy) {
4716 /*
4717 * The q was busy when we previously tried
4718 * to write the address of the first buffer
4719 * in the chain. Since it's not busy now
4720 * handle this chore. We are certain the
4721 * buffer at the front is the right one since
4722 * axq_link is NULL only when the buffer list
4723 * is/was empty.
4724 */
4725 ath_hal_puttxbuf(ah, txq->axq_qnum,
4726 STAILQ_FIRST(&txq->axq_q)->bf_daddr);
4727 txq->axq_flags &= ~ATH_TXQ_PUTPENDING;
4728 DPRINTF(sc, ATH_DEBUG_TDMA | ATH_DEBUG_XMIT,
4729 "%s: Q%u restarted\n", __func__,
4730 txq->axq_qnum);
4731 }
4732 }
4733#else
4734 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
4735 if (txq->axq_link == NULL) {
4736 ath_hal_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
4737 DPRINTF(sc, ATH_DEBUG_XMIT,
4738 "%s: TXDP[%u] = %p (%p) depth %d\n",
4739 __func__, txq->axq_qnum,
4740 (caddr_t)bf->bf_daddr, bf->bf_desc,
4741 txq->axq_depth);
4742 } else {
4743 *txq->axq_link = bf->bf_daddr;
4744 DPRINTF(sc, ATH_DEBUG_XMIT,
4745 "%s: link[%u](%p)=%p (%p) depth %d\n", __func__,
4746 txq->axq_qnum, txq->axq_link,
4747 (caddr_t)bf->bf_daddr, bf->bf_desc, txq->axq_depth);
4748 }
4748#endif /* ATH_SUPPORT_TDMA */
4749#endif /* IEEE80211_SUPPORT_TDMA */
4749 txq->axq_link = &bf->bf_desc[bf->bf_nseg - 1].ds_link;
4750 ath_hal_txstart(ah, txq->axq_qnum);
4751 } else {
4752 if (txq->axq_link != NULL) {
4753 struct ath_buf *last = ATH_TXQ_LAST(txq);
4754 struct ieee80211_frame *wh;
4755
4756 /* mark previous frame */
4757 wh = mtod(last->bf_m, struct ieee80211_frame *);
4758 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
4759 bus_dmamap_sync(sc->sc_dmat, last->bf_dmamap,
4760 BUS_DMASYNC_PREWRITE);
4761
4762 /* link descriptor */
4763 *txq->axq_link = bf->bf_daddr;
4764 }
4765 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
4766 txq->axq_link = &bf->bf_desc[bf->bf_nseg - 1].ds_link;
4767 }
4768 ATH_TXQ_UNLOCK(txq);
4769}
4770
4771static int
4772ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf,
4773 struct mbuf *m0)
4774{
4775 struct ieee80211vap *vap = ni->ni_vap;
4776 struct ath_vap *avp = ATH_VAP(vap);
4777 struct ath_hal *ah = sc->sc_ah;
4778 struct ifnet *ifp = sc->sc_ifp;
4779 struct ieee80211com *ic = ifp->if_l2com;
4780 const struct chanAccParams *cap = &ic->ic_wme.wme_chanParams;
4781 int error, iswep, ismcast, isfrag, ismrr;
4782 int keyix, hdrlen, pktlen, try0;
4783 u_int8_t rix, txrate, ctsrate;
4784 u_int8_t cix = 0xff; /* NB: silence compiler */
4785 struct ath_desc *ds;
4786 struct ath_txq *txq;
4787 struct ieee80211_frame *wh;
4788 u_int subtype, flags, ctsduration;
4789 HAL_PKT_TYPE atype;
4790 const HAL_RATE_TABLE *rt;
4791 HAL_BOOL shortPreamble;
4792 struct ath_node *an;
4793 u_int pri;
4794
4795 wh = mtod(m0, struct ieee80211_frame *);
4796 iswep = wh->i_fc[1] & IEEE80211_FC1_WEP;
4797 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
4798 isfrag = m0->m_flags & M_FRAG;
4799 hdrlen = ieee80211_anyhdrsize(wh);
4800 /*
4801 * Packet length must not include any
4802 * pad bytes; deduct them here.
4803 */
4804 pktlen = m0->m_pkthdr.len - (hdrlen & 3);
4805
4806 if (iswep) {
4807 const struct ieee80211_cipher *cip;
4808 struct ieee80211_key *k;
4809
4810 /*
4811 * Construct the 802.11 header+trailer for an encrypted
4812 * frame. The only reason this can fail is because of an
4813 * unknown or unsupported cipher/key type.
4814 */
4815 k = ieee80211_crypto_encap(ni, m0);
4816 if (k == NULL) {
4817 /*
4818 * This can happen when the key is yanked after the
4819 * frame was queued. Just discard the frame; the
4820 * 802.11 layer counts failures and provides
4821 * debugging/diagnostics.
4822 */
4823 ath_freetx(m0);
4824 return EIO;
4825 }
4826 /*
4827 * Adjust the packet + header lengths for the crypto
4828 * additions and calculate the h/w key index. When
4829 * a s/w mic is done the frame will have had any mic
4830 * added to it prior to entry so m0->m_pkthdr.len will
4831 * account for it. Otherwise we need to add it to the
4832 * packet length.
4833 */
4834 cip = k->wk_cipher;
4835 hdrlen += cip->ic_header;
4836 pktlen += cip->ic_header + cip->ic_trailer;
4837 /* NB: frags always have any TKIP MIC done in s/w */
4838 if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag)
4839 pktlen += cip->ic_miclen;
4840 keyix = k->wk_keyix;
4841
4842 /* packet header may have moved, reset our local pointer */
4843 wh = mtod(m0, struct ieee80211_frame *);
4844 } else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) {
4845 /*
4846 * Use station key cache slot, if assigned.
4847 */
4848 keyix = ni->ni_ucastkey.wk_keyix;
4849 if (keyix == IEEE80211_KEYIX_NONE)
4850 keyix = HAL_TXKEYIX_INVALID;
4851 } else
4852 keyix = HAL_TXKEYIX_INVALID;
4853
4854 pktlen += IEEE80211_CRC_LEN;
4855
4856 /*
4857 * Load the DMA map so any coalescing is done. This
4858 * also calculates the number of descriptors we need.
4859 */
4860 error = ath_tx_dmasetup(sc, bf, m0);
4861 if (error != 0)
4862 return error;
4863 bf->bf_node = ni; /* NB: held reference */
4864 m0 = bf->bf_m; /* NB: may have changed */
4865 wh = mtod(m0, struct ieee80211_frame *);
4866
4867 /* setup descriptors */
4868 ds = bf->bf_desc;
4869 rt = sc->sc_currates;
4870 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
4871
4872 /*
4873 * NB: the 802.11 layer marks whether or not we should
4874 * use short preamble based on the current mode and
4875 * negotiated parameters.
4876 */
4877 if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
4878 (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) {
4879 shortPreamble = AH_TRUE;
4880 sc->sc_stats.ast_tx_shortpre++;
4881 } else {
4882 shortPreamble = AH_FALSE;
4883 }
4884
4885 an = ATH_NODE(ni);
4886 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */
4887 ismrr = 0; /* default no multi-rate retry*/
4888 pri = M_WME_GETAC(m0); /* honor classification */
4889 /* XXX use txparams instead of fixed values */
4890 /*
4891 * Calculate Atheros packet type from IEEE80211 packet header,
4892 * setup for rate calculations, and select h/w transmit queue.
4893 */
4894 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
4895 case IEEE80211_FC0_TYPE_MGT:
4896 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4897 if (subtype == IEEE80211_FC0_SUBTYPE_BEACON)
4898 atype = HAL_PKT_TYPE_BEACON;
4899 else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
4900 atype = HAL_PKT_TYPE_PROBE_RESP;
4901 else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM)
4902 atype = HAL_PKT_TYPE_ATIM;
4903 else
4904 atype = HAL_PKT_TYPE_NORMAL; /* XXX */
4905 rix = an->an_mgmtrix;
4906 txrate = rt->info[rix].rateCode;
4907 if (shortPreamble)
4908 txrate |= rt->info[rix].shortPreamble;
4909 try0 = ATH_TXMGTTRY;
4910 flags |= HAL_TXDESC_INTREQ; /* force interrupt */
4911 break;
4912 case IEEE80211_FC0_TYPE_CTL:
4913 atype = HAL_PKT_TYPE_PSPOLL; /* stop setting of duration */
4914 rix = an->an_mgmtrix;
4915 txrate = rt->info[rix].rateCode;
4916 if (shortPreamble)
4917 txrate |= rt->info[rix].shortPreamble;
4918 try0 = ATH_TXMGTTRY;
4919 flags |= HAL_TXDESC_INTREQ; /* force interrupt */
4920 break;
4921 case IEEE80211_FC0_TYPE_DATA:
4922 atype = HAL_PKT_TYPE_NORMAL; /* default */
4923 /*
4924 * Data frames: multicast frames go out at a fixed rate,
4925 * EAPOL frames use the mgmt frame rate; otherwise consult
4926 * the rate control module for the rate to use.
4927 */
4928 if (ismcast) {
4929 rix = an->an_mcastrix;
4930 txrate = rt->info[rix].rateCode;
4931 if (shortPreamble)
4932 txrate |= rt->info[rix].shortPreamble;
4933 try0 = 1;
4934 } else if (m0->m_flags & M_EAPOL) {
4935 /* XXX? maybe always use long preamble? */
4936 rix = an->an_mgmtrix;
4937 txrate = rt->info[rix].rateCode;
4938 if (shortPreamble)
4939 txrate |= rt->info[rix].shortPreamble;
4940 try0 = ATH_TXMAXTRY; /* XXX?too many? */
4941 } else {
4942 ath_rate_findrate(sc, an, shortPreamble, pktlen,
4943 &rix, &try0, &txrate);
4944 sc->sc_txrix = rix; /* for LED blinking */
4945 sc->sc_lastdatarix = rix; /* for fast frames */
4946 if (try0 != ATH_TXMAXTRY)
4947 ismrr = 1;
4948 }
4949 if (cap->cap_wmeParams[pri].wmep_noackPolicy)
4950 flags |= HAL_TXDESC_NOACK;
4951 break;
4952 default:
4953 if_printf(ifp, "bogus frame type 0x%x (%s)\n",
4954 wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
4955 /* XXX statistic */
4956 ath_freetx(m0);
4957 return EIO;
4958 }
4959 txq = sc->sc_ac2q[pri];
4960
4961 /*
4962 * When servicing one or more stations in power-save mode
4963 * (or) if there is some mcast data waiting on the mcast
4964 * queue (to prevent out of order delivery) multicast
4965 * frames must be buffered until after the beacon.
4966 */
4967 if (ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth))
4968 txq = &avp->av_mcastq;
4969
4970 /*
4971 * Calculate miscellaneous flags.
4972 */
4973 if (ismcast) {
4974 flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */
4975 } else if (pktlen > vap->iv_rtsthreshold &&
4976 (ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) {
4977 flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */
4978 cix = rt->info[rix].controlRate;
4979 sc->sc_stats.ast_tx_rts++;
4980 }
4981 if (flags & HAL_TXDESC_NOACK) /* NB: avoid double counting */
4982 sc->sc_stats.ast_tx_noack++;
4750 txq->axq_link = &bf->bf_desc[bf->bf_nseg - 1].ds_link;
4751 ath_hal_txstart(ah, txq->axq_qnum);
4752 } else {
4753 if (txq->axq_link != NULL) {
4754 struct ath_buf *last = ATH_TXQ_LAST(txq);
4755 struct ieee80211_frame *wh;
4756
4757 /* mark previous frame */
4758 wh = mtod(last->bf_m, struct ieee80211_frame *);
4759 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
4760 bus_dmamap_sync(sc->sc_dmat, last->bf_dmamap,
4761 BUS_DMASYNC_PREWRITE);
4762
4763 /* link descriptor */
4764 *txq->axq_link = bf->bf_daddr;
4765 }
4766 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
4767 txq->axq_link = &bf->bf_desc[bf->bf_nseg - 1].ds_link;
4768 }
4769 ATH_TXQ_UNLOCK(txq);
4770}
4771
4772static int
4773ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf,
4774 struct mbuf *m0)
4775{
4776 struct ieee80211vap *vap = ni->ni_vap;
4777 struct ath_vap *avp = ATH_VAP(vap);
4778 struct ath_hal *ah = sc->sc_ah;
4779 struct ifnet *ifp = sc->sc_ifp;
4780 struct ieee80211com *ic = ifp->if_l2com;
4781 const struct chanAccParams *cap = &ic->ic_wme.wme_chanParams;
4782 int error, iswep, ismcast, isfrag, ismrr;
4783 int keyix, hdrlen, pktlen, try0;
4784 u_int8_t rix, txrate, ctsrate;
4785 u_int8_t cix = 0xff; /* NB: silence compiler */
4786 struct ath_desc *ds;
4787 struct ath_txq *txq;
4788 struct ieee80211_frame *wh;
4789 u_int subtype, flags, ctsduration;
4790 HAL_PKT_TYPE atype;
4791 const HAL_RATE_TABLE *rt;
4792 HAL_BOOL shortPreamble;
4793 struct ath_node *an;
4794 u_int pri;
4795
4796 wh = mtod(m0, struct ieee80211_frame *);
4797 iswep = wh->i_fc[1] & IEEE80211_FC1_WEP;
4798 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
4799 isfrag = m0->m_flags & M_FRAG;
4800 hdrlen = ieee80211_anyhdrsize(wh);
4801 /*
4802 * Packet length must not include any
4803 * pad bytes; deduct them here.
4804 */
4805 pktlen = m0->m_pkthdr.len - (hdrlen & 3);
4806
4807 if (iswep) {
4808 const struct ieee80211_cipher *cip;
4809 struct ieee80211_key *k;
4810
4811 /*
4812 * Construct the 802.11 header+trailer for an encrypted
4813 * frame. The only reason this can fail is because of an
4814 * unknown or unsupported cipher/key type.
4815 */
4816 k = ieee80211_crypto_encap(ni, m0);
4817 if (k == NULL) {
4818 /*
4819 * This can happen when the key is yanked after the
4820 * frame was queued. Just discard the frame; the
4821 * 802.11 layer counts failures and provides
4822 * debugging/diagnostics.
4823 */
4824 ath_freetx(m0);
4825 return EIO;
4826 }
4827 /*
4828 * Adjust the packet + header lengths for the crypto
4829 * additions and calculate the h/w key index. When
4830 * a s/w mic is done the frame will have had any mic
4831 * added to it prior to entry so m0->m_pkthdr.len will
4832 * account for it. Otherwise we need to add it to the
4833 * packet length.
4834 */
4835 cip = k->wk_cipher;
4836 hdrlen += cip->ic_header;
4837 pktlen += cip->ic_header + cip->ic_trailer;
4838 /* NB: frags always have any TKIP MIC done in s/w */
4839 if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag)
4840 pktlen += cip->ic_miclen;
4841 keyix = k->wk_keyix;
4842
4843 /* packet header may have moved, reset our local pointer */
4844 wh = mtod(m0, struct ieee80211_frame *);
4845 } else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) {
4846 /*
4847 * Use station key cache slot, if assigned.
4848 */
4849 keyix = ni->ni_ucastkey.wk_keyix;
4850 if (keyix == IEEE80211_KEYIX_NONE)
4851 keyix = HAL_TXKEYIX_INVALID;
4852 } else
4853 keyix = HAL_TXKEYIX_INVALID;
4854
4855 pktlen += IEEE80211_CRC_LEN;
4856
4857 /*
4858 * Load the DMA map so any coalescing is done. This
4859 * also calculates the number of descriptors we need.
4860 */
4861 error = ath_tx_dmasetup(sc, bf, m0);
4862 if (error != 0)
4863 return error;
4864 bf->bf_node = ni; /* NB: held reference */
4865 m0 = bf->bf_m; /* NB: may have changed */
4866 wh = mtod(m0, struct ieee80211_frame *);
4867
4868 /* setup descriptors */
4869 ds = bf->bf_desc;
4870 rt = sc->sc_currates;
4871 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
4872
4873 /*
4874 * NB: the 802.11 layer marks whether or not we should
4875 * use short preamble based on the current mode and
4876 * negotiated parameters.
4877 */
4878 if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
4879 (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) {
4880 shortPreamble = AH_TRUE;
4881 sc->sc_stats.ast_tx_shortpre++;
4882 } else {
4883 shortPreamble = AH_FALSE;
4884 }
4885
4886 an = ATH_NODE(ni);
4887 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */
4888 ismrr = 0; /* default no multi-rate retry*/
4889 pri = M_WME_GETAC(m0); /* honor classification */
4890 /* XXX use txparams instead of fixed values */
4891 /*
4892 * Calculate Atheros packet type from IEEE80211 packet header,
4893 * setup for rate calculations, and select h/w transmit queue.
4894 */
4895 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
4896 case IEEE80211_FC0_TYPE_MGT:
4897 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4898 if (subtype == IEEE80211_FC0_SUBTYPE_BEACON)
4899 atype = HAL_PKT_TYPE_BEACON;
4900 else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
4901 atype = HAL_PKT_TYPE_PROBE_RESP;
4902 else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM)
4903 atype = HAL_PKT_TYPE_ATIM;
4904 else
4905 atype = HAL_PKT_TYPE_NORMAL; /* XXX */
4906 rix = an->an_mgmtrix;
4907 txrate = rt->info[rix].rateCode;
4908 if (shortPreamble)
4909 txrate |= rt->info[rix].shortPreamble;
4910 try0 = ATH_TXMGTTRY;
4911 flags |= HAL_TXDESC_INTREQ; /* force interrupt */
4912 break;
4913 case IEEE80211_FC0_TYPE_CTL:
4914 atype = HAL_PKT_TYPE_PSPOLL; /* stop setting of duration */
4915 rix = an->an_mgmtrix;
4916 txrate = rt->info[rix].rateCode;
4917 if (shortPreamble)
4918 txrate |= rt->info[rix].shortPreamble;
4919 try0 = ATH_TXMGTTRY;
4920 flags |= HAL_TXDESC_INTREQ; /* force interrupt */
4921 break;
4922 case IEEE80211_FC0_TYPE_DATA:
4923 atype = HAL_PKT_TYPE_NORMAL; /* default */
4924 /*
4925 * Data frames: multicast frames go out at a fixed rate,
4926 * EAPOL frames use the mgmt frame rate; otherwise consult
4927 * the rate control module for the rate to use.
4928 */
4929 if (ismcast) {
4930 rix = an->an_mcastrix;
4931 txrate = rt->info[rix].rateCode;
4932 if (shortPreamble)
4933 txrate |= rt->info[rix].shortPreamble;
4934 try0 = 1;
4935 } else if (m0->m_flags & M_EAPOL) {
4936 /* XXX? maybe always use long preamble? */
4937 rix = an->an_mgmtrix;
4938 txrate = rt->info[rix].rateCode;
4939 if (shortPreamble)
4940 txrate |= rt->info[rix].shortPreamble;
4941 try0 = ATH_TXMAXTRY; /* XXX?too many? */
4942 } else {
4943 ath_rate_findrate(sc, an, shortPreamble, pktlen,
4944 &rix, &try0, &txrate);
4945 sc->sc_txrix = rix; /* for LED blinking */
4946 sc->sc_lastdatarix = rix; /* for fast frames */
4947 if (try0 != ATH_TXMAXTRY)
4948 ismrr = 1;
4949 }
4950 if (cap->cap_wmeParams[pri].wmep_noackPolicy)
4951 flags |= HAL_TXDESC_NOACK;
4952 break;
4953 default:
4954 if_printf(ifp, "bogus frame type 0x%x (%s)\n",
4955 wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
4956 /* XXX statistic */
4957 ath_freetx(m0);
4958 return EIO;
4959 }
4960 txq = sc->sc_ac2q[pri];
4961
4962 /*
4963 * When servicing one or more stations in power-save mode
4964 * (or) if there is some mcast data waiting on the mcast
4965 * queue (to prevent out of order delivery) multicast
4966 * frames must be buffered until after the beacon.
4967 */
4968 if (ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth))
4969 txq = &avp->av_mcastq;
4970
4971 /*
4972 * Calculate miscellaneous flags.
4973 */
4974 if (ismcast) {
4975 flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */
4976 } else if (pktlen > vap->iv_rtsthreshold &&
4977 (ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) {
4978 flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */
4979 cix = rt->info[rix].controlRate;
4980 sc->sc_stats.ast_tx_rts++;
4981 }
4982 if (flags & HAL_TXDESC_NOACK) /* NB: avoid double counting */
4983 sc->sc_stats.ast_tx_noack++;
4983#ifdef ATH_SUPPORT_TDMA
4984#ifdef IEEE80211_SUPPORT_TDMA
4984 if (sc->sc_tdma && (flags & HAL_TXDESC_NOACK) == 0) {
4985 DPRINTF(sc, ATH_DEBUG_TDMA,
4986 "%s: discard frame, ACK required w/ TDMA\n", __func__);
4987 sc->sc_stats.ast_tdma_ack++;
4988 ath_freetx(m0);
4989 return EIO;
4990 }
4991#endif
4992
4993 /*
4994 * If 802.11g protection is enabled, determine whether
4995 * to use RTS/CTS or just CTS. Note that this is only
4996 * done for OFDM unicast frames.
4997 */
4998 if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
4999 rt->info[rix].phy == IEEE80211_T_OFDM &&
5000 (flags & HAL_TXDESC_NOACK) == 0) {
5001 /* XXX fragments must use CCK rates w/ protection */
5002 if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
5003 flags |= HAL_TXDESC_RTSENA;
5004 else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
5005 flags |= HAL_TXDESC_CTSENA;
5006 if (isfrag) {
5007 /*
5008 * For frags it would be desirable to use the
5009 * highest CCK rate for RTS/CTS. But stations
5010 * farther away may detect it at a lower CCK rate
5011 * so use the configured protection rate instead
5012 * (for now).
5013 */
5014 cix = rt->info[sc->sc_protrix].controlRate;
5015 } else
5016 cix = rt->info[sc->sc_protrix].controlRate;
5017 sc->sc_stats.ast_tx_protect++;
5018 }
5019
5020 /*
5021 * Calculate duration. This logically belongs in the 802.11
5022 * layer but it lacks sufficient information to calculate it.
5023 */
5024 if ((flags & HAL_TXDESC_NOACK) == 0 &&
5025 (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) {
5026 u_int16_t dur;
5027 if (shortPreamble)
5028 dur = rt->info[rix].spAckDuration;
5029 else
5030 dur = rt->info[rix].lpAckDuration;
5031 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) {
5032 dur += dur; /* additional SIFS+ACK */
5033 KASSERT(m0->m_nextpkt != NULL, ("no fragment"));
5034 /*
5035 * Include the size of next fragment so NAV is
5036 * updated properly. The last fragment uses only
5037 * the ACK duration
5038 */
5039 dur += ath_hal_computetxtime(ah, rt,
5040 m0->m_nextpkt->m_pkthdr.len,
5041 rix, shortPreamble);
5042 }
5043 if (isfrag) {
5044 /*
5045 * Force hardware to use computed duration for next
5046 * fragment by disabling multi-rate retry which updates
5047 * duration based on the multi-rate duration table.
5048 */
5049 ismrr = 0;
5050 try0 = ATH_TXMGTTRY; /* XXX? */
5051 }
5052 *(u_int16_t *)wh->i_dur = htole16(dur);
5053 }
5054
5055 /*
5056 * Calculate RTS/CTS rate and duration if needed.
5057 */
5058 ctsduration = 0;
5059 if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) {
5060 /*
5061 * CTS transmit rate is derived from the transmit rate
5062 * by looking in the h/w rate table. We must also factor
5063 * in whether or not a short preamble is to be used.
5064 */
5065 /* NB: cix is set above where RTS/CTS is enabled */
5066 KASSERT(cix != 0xff, ("cix not setup"));
5067 ctsrate = rt->info[cix].rateCode;
5068 /*
5069 * Compute the transmit duration based on the frame
5070 * size and the size of an ACK frame. We call into the
5071 * HAL to do the computation since it depends on the
5072 * characteristics of the actual PHY being used.
5073 *
5074 * NB: CTS is assumed the same size as an ACK so we can
5075 * use the precalculated ACK durations.
5076 */
5077 if (shortPreamble) {
5078 ctsrate |= rt->info[cix].shortPreamble;
5079 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */
5080 ctsduration += rt->info[cix].spAckDuration;
5081 ctsduration += ath_hal_computetxtime(ah,
5082 rt, pktlen, rix, AH_TRUE);
5083 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */
5084 ctsduration += rt->info[rix].spAckDuration;
5085 } else {
5086 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */
5087 ctsduration += rt->info[cix].lpAckDuration;
5088 ctsduration += ath_hal_computetxtime(ah,
5089 rt, pktlen, rix, AH_FALSE);
5090 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */
5091 ctsduration += rt->info[rix].lpAckDuration;
5092 }
5093 /*
5094 * Must disable multi-rate retry when using RTS/CTS.
5095 */
5096 ismrr = 0;
5097 try0 = ATH_TXMGTTRY; /* XXX */
5098 } else
5099 ctsrate = 0;
5100
5101 /*
5102 * At this point we are committed to sending the frame
5103 * and we don't need to look at m_nextpkt; clear it in
5104 * case this frame is part of frag chain.
5105 */
5106 m0->m_nextpkt = NULL;
5107
5108 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
5109 ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len,
5110 sc->sc_hwmap[rix].ieeerate, -1);
5111
5112 if (bpf_peers_present(ifp->if_bpf)) {
5113 u_int64_t tsf = ath_hal_gettsf64(ah);
5114
5115 sc->sc_tx_th.wt_tsf = htole64(tsf);
5116 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags;
5117 if (iswep)
5118 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
5119 if (isfrag)
5120 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
5121 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;
5122 sc->sc_tx_th.wt_txpower = ni->ni_txpower;
5123 sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
5124
5125 bpf_mtap2(ifp->if_bpf, &sc->sc_tx_th, sc->sc_tx_th_len, m0);
5126 }
5127
5128 /*
5129 * Determine if a tx interrupt should be generated for
5130 * this descriptor. We take a tx interrupt to reap
5131 * descriptors when the h/w hits an EOL condition or
5132 * when the descriptor is specifically marked to generate
5133 * an interrupt. We periodically mark descriptors in this
5134 * way to insure timely replenishing of the supply needed
5135 * for sending frames. Defering interrupts reduces system
5136 * load and potentially allows more concurrent work to be
5137 * done but if done to aggressively can cause senders to
5138 * backup.
5139 *
5140 * NB: use >= to deal with sc_txintrperiod changing
5141 * dynamically through sysctl.
5142 */
5143 if (flags & HAL_TXDESC_INTREQ) {
5144 txq->axq_intrcnt = 0;
5145 } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) {
5146 flags |= HAL_TXDESC_INTREQ;
5147 txq->axq_intrcnt = 0;
5148 }
5149
5150 /*
5151 * Formulate first tx descriptor with tx controls.
5152 */
5153 /* XXX check return value? */
5154 ath_hal_setuptxdesc(ah, ds
5155 , pktlen /* packet length */
5156 , hdrlen /* header length */
5157 , atype /* Atheros packet type */
5158 , ni->ni_txpower /* txpower */
5159 , txrate, try0 /* series 0 rate/tries */
5160 , keyix /* key cache index */
5161 , sc->sc_txantenna /* antenna mode */
5162 , flags /* flags */
5163 , ctsrate /* rts/cts rate */
5164 , ctsduration /* rts/cts duration */
5165 );
5166 bf->bf_txflags = flags;
5167 /*
5168 * Setup the multi-rate retry state only when we're
5169 * going to use it. This assumes ath_hal_setuptxdesc
5170 * initializes the descriptors (so we don't have to)
5171 * when the hardware supports multi-rate retry and
5172 * we don't use it.
5173 */
5174 if (ismrr)
5175 ath_rate_setupxtxdesc(sc, an, ds, shortPreamble, rix);
5176
5177 ath_tx_handoff(sc, txq, bf);
5178 return 0;
5179}
5180
5181/*
5182 * Process completed xmit descriptors from the specified queue.
5183 */
5184static int
5185ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
5186{
5187 struct ath_hal *ah = sc->sc_ah;
5188 struct ifnet *ifp = sc->sc_ifp;
5189 struct ieee80211com *ic = ifp->if_l2com;
5190 struct ath_buf *bf, *last;
5191 struct ath_desc *ds, *ds0;
5192 struct ath_tx_status *ts;
5193 struct ieee80211_node *ni;
5194 struct ath_node *an;
5195 int sr, lr, pri, nacked;
5196 HAL_STATUS status;
5197
5198 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n",
5199 __func__, txq->axq_qnum,
5200 (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum),
5201 txq->axq_link);
5202 nacked = 0;
5203 for (;;) {
5204 ATH_TXQ_LOCK(txq);
5205 txq->axq_intrcnt = 0; /* reset periodic desc intr count */
5206 bf = STAILQ_FIRST(&txq->axq_q);
5207 if (bf == NULL) {
5208 ATH_TXQ_UNLOCK(txq);
5209 break;
5210 }
5211 ds0 = &bf->bf_desc[0];
5212 ds = &bf->bf_desc[bf->bf_nseg - 1];
5213 ts = &bf->bf_status.ds_txstat;
5214 status = ath_hal_txprocdesc(ah, ds, ts);
5215#ifdef ATH_DEBUG
5216 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC)
5217 ath_printtxbuf(sc, bf, txq->axq_qnum, 0,
5218 status == HAL_OK);
5219#endif
5220 if (status == HAL_EINPROGRESS) {
5221 ATH_TXQ_UNLOCK(txq);
5222 break;
5223 }
5224 ATH_TXQ_REMOVE_HEAD(txq, bf_list);
4985 if (sc->sc_tdma && (flags & HAL_TXDESC_NOACK) == 0) {
4986 DPRINTF(sc, ATH_DEBUG_TDMA,
4987 "%s: discard frame, ACK required w/ TDMA\n", __func__);
4988 sc->sc_stats.ast_tdma_ack++;
4989 ath_freetx(m0);
4990 return EIO;
4991 }
4992#endif
4993
4994 /*
4995 * If 802.11g protection is enabled, determine whether
4996 * to use RTS/CTS or just CTS. Note that this is only
4997 * done for OFDM unicast frames.
4998 */
4999 if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
5000 rt->info[rix].phy == IEEE80211_T_OFDM &&
5001 (flags & HAL_TXDESC_NOACK) == 0) {
5002 /* XXX fragments must use CCK rates w/ protection */
5003 if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
5004 flags |= HAL_TXDESC_RTSENA;
5005 else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
5006 flags |= HAL_TXDESC_CTSENA;
5007 if (isfrag) {
5008 /*
5009 * For frags it would be desirable to use the
5010 * highest CCK rate for RTS/CTS. But stations
5011 * farther away may detect it at a lower CCK rate
5012 * so use the configured protection rate instead
5013 * (for now).
5014 */
5015 cix = rt->info[sc->sc_protrix].controlRate;
5016 } else
5017 cix = rt->info[sc->sc_protrix].controlRate;
5018 sc->sc_stats.ast_tx_protect++;
5019 }
5020
5021 /*
5022 * Calculate duration. This logically belongs in the 802.11
5023 * layer but it lacks sufficient information to calculate it.
5024 */
5025 if ((flags & HAL_TXDESC_NOACK) == 0 &&
5026 (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) {
5027 u_int16_t dur;
5028 if (shortPreamble)
5029 dur = rt->info[rix].spAckDuration;
5030 else
5031 dur = rt->info[rix].lpAckDuration;
5032 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) {
5033 dur += dur; /* additional SIFS+ACK */
5034 KASSERT(m0->m_nextpkt != NULL, ("no fragment"));
5035 /*
5036 * Include the size of next fragment so NAV is
5037 * updated properly. The last fragment uses only
5038 * the ACK duration
5039 */
5040 dur += ath_hal_computetxtime(ah, rt,
5041 m0->m_nextpkt->m_pkthdr.len,
5042 rix, shortPreamble);
5043 }
5044 if (isfrag) {
5045 /*
5046 * Force hardware to use computed duration for next
5047 * fragment by disabling multi-rate retry which updates
5048 * duration based on the multi-rate duration table.
5049 */
5050 ismrr = 0;
5051 try0 = ATH_TXMGTTRY; /* XXX? */
5052 }
5053 *(u_int16_t *)wh->i_dur = htole16(dur);
5054 }
5055
5056 /*
5057 * Calculate RTS/CTS rate and duration if needed.
5058 */
5059 ctsduration = 0;
5060 if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) {
5061 /*
5062 * CTS transmit rate is derived from the transmit rate
5063 * by looking in the h/w rate table. We must also factor
5064 * in whether or not a short preamble is to be used.
5065 */
5066 /* NB: cix is set above where RTS/CTS is enabled */
5067 KASSERT(cix != 0xff, ("cix not setup"));
5068 ctsrate = rt->info[cix].rateCode;
5069 /*
5070 * Compute the transmit duration based on the frame
5071 * size and the size of an ACK frame. We call into the
5072 * HAL to do the computation since it depends on the
5073 * characteristics of the actual PHY being used.
5074 *
5075 * NB: CTS is assumed the same size as an ACK so we can
5076 * use the precalculated ACK durations.
5077 */
5078 if (shortPreamble) {
5079 ctsrate |= rt->info[cix].shortPreamble;
5080 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */
5081 ctsduration += rt->info[cix].spAckDuration;
5082 ctsduration += ath_hal_computetxtime(ah,
5083 rt, pktlen, rix, AH_TRUE);
5084 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */
5085 ctsduration += rt->info[rix].spAckDuration;
5086 } else {
5087 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */
5088 ctsduration += rt->info[cix].lpAckDuration;
5089 ctsduration += ath_hal_computetxtime(ah,
5090 rt, pktlen, rix, AH_FALSE);
5091 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */
5092 ctsduration += rt->info[rix].lpAckDuration;
5093 }
5094 /*
5095 * Must disable multi-rate retry when using RTS/CTS.
5096 */
5097 ismrr = 0;
5098 try0 = ATH_TXMGTTRY; /* XXX */
5099 } else
5100 ctsrate = 0;
5101
5102 /*
5103 * At this point we are committed to sending the frame
5104 * and we don't need to look at m_nextpkt; clear it in
5105 * case this frame is part of frag chain.
5106 */
5107 m0->m_nextpkt = NULL;
5108
5109 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
5110 ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len,
5111 sc->sc_hwmap[rix].ieeerate, -1);
5112
5113 if (bpf_peers_present(ifp->if_bpf)) {
5114 u_int64_t tsf = ath_hal_gettsf64(ah);
5115
5116 sc->sc_tx_th.wt_tsf = htole64(tsf);
5117 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags;
5118 if (iswep)
5119 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
5120 if (isfrag)
5121 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
5122 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;
5123 sc->sc_tx_th.wt_txpower = ni->ni_txpower;
5124 sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
5125
5126 bpf_mtap2(ifp->if_bpf, &sc->sc_tx_th, sc->sc_tx_th_len, m0);
5127 }
5128
5129 /*
5130 * Determine if a tx interrupt should be generated for
5131 * this descriptor. We take a tx interrupt to reap
5132 * descriptors when the h/w hits an EOL condition or
5133 * when the descriptor is specifically marked to generate
5134 * an interrupt. We periodically mark descriptors in this
5135 * way to insure timely replenishing of the supply needed
5136 * for sending frames. Defering interrupts reduces system
5137 * load and potentially allows more concurrent work to be
5138 * done but if done to aggressively can cause senders to
5139 * backup.
5140 *
5141 * NB: use >= to deal with sc_txintrperiod changing
5142 * dynamically through sysctl.
5143 */
5144 if (flags & HAL_TXDESC_INTREQ) {
5145 txq->axq_intrcnt = 0;
5146 } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) {
5147 flags |= HAL_TXDESC_INTREQ;
5148 txq->axq_intrcnt = 0;
5149 }
5150
5151 /*
5152 * Formulate first tx descriptor with tx controls.
5153 */
5154 /* XXX check return value? */
5155 ath_hal_setuptxdesc(ah, ds
5156 , pktlen /* packet length */
5157 , hdrlen /* header length */
5158 , atype /* Atheros packet type */
5159 , ni->ni_txpower /* txpower */
5160 , txrate, try0 /* series 0 rate/tries */
5161 , keyix /* key cache index */
5162 , sc->sc_txantenna /* antenna mode */
5163 , flags /* flags */
5164 , ctsrate /* rts/cts rate */
5165 , ctsduration /* rts/cts duration */
5166 );
5167 bf->bf_txflags = flags;
5168 /*
5169 * Setup the multi-rate retry state only when we're
5170 * going to use it. This assumes ath_hal_setuptxdesc
5171 * initializes the descriptors (so we don't have to)
5172 * when the hardware supports multi-rate retry and
5173 * we don't use it.
5174 */
5175 if (ismrr)
5176 ath_rate_setupxtxdesc(sc, an, ds, shortPreamble, rix);
5177
5178 ath_tx_handoff(sc, txq, bf);
5179 return 0;
5180}
5181
5182/*
5183 * Process completed xmit descriptors from the specified queue.
5184 */
5185static int
5186ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
5187{
5188 struct ath_hal *ah = sc->sc_ah;
5189 struct ifnet *ifp = sc->sc_ifp;
5190 struct ieee80211com *ic = ifp->if_l2com;
5191 struct ath_buf *bf, *last;
5192 struct ath_desc *ds, *ds0;
5193 struct ath_tx_status *ts;
5194 struct ieee80211_node *ni;
5195 struct ath_node *an;
5196 int sr, lr, pri, nacked;
5197 HAL_STATUS status;
5198
5199 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n",
5200 __func__, txq->axq_qnum,
5201 (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum),
5202 txq->axq_link);
5203 nacked = 0;
5204 for (;;) {
5205 ATH_TXQ_LOCK(txq);
5206 txq->axq_intrcnt = 0; /* reset periodic desc intr count */
5207 bf = STAILQ_FIRST(&txq->axq_q);
5208 if (bf == NULL) {
5209 ATH_TXQ_UNLOCK(txq);
5210 break;
5211 }
5212 ds0 = &bf->bf_desc[0];
5213 ds = &bf->bf_desc[bf->bf_nseg - 1];
5214 ts = &bf->bf_status.ds_txstat;
5215 status = ath_hal_txprocdesc(ah, ds, ts);
5216#ifdef ATH_DEBUG
5217 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC)
5218 ath_printtxbuf(sc, bf, txq->axq_qnum, 0,
5219 status == HAL_OK);
5220#endif
5221 if (status == HAL_EINPROGRESS) {
5222 ATH_TXQ_UNLOCK(txq);
5223 break;
5224 }
5225 ATH_TXQ_REMOVE_HEAD(txq, bf_list);
5225#ifdef ATH_SUPPORT_TDMA
5226#ifdef IEEE80211_SUPPORT_TDMA
5226 if (txq->axq_depth > 0) {
5227 /*
5228 * More frames follow. Mark the buffer busy
5229 * so it's not re-used while the hardware may
5230 * still re-read the link field in the descriptor.
5231 */
5232 bf->bf_flags |= ATH_BUF_BUSY;
5233 } else
5234#else
5235 if (txq->axq_depth == 0)
5236#endif
5237 txq->axq_link = NULL;
5238 ATH_TXQ_UNLOCK(txq);
5239
5240 ni = bf->bf_node;
5241 if (ni != NULL) {
5242 an = ATH_NODE(ni);
5243 if (ts->ts_status == 0) {
5244 u_int8_t txant = ts->ts_antenna;
5245 sc->sc_stats.ast_ant_tx[txant]++;
5246 sc->sc_ant_tx[txant]++;
5247 if (ts->ts_rate & HAL_TXSTAT_ALTRATE)
5248 sc->sc_stats.ast_tx_altrate++;
5249 pri = M_WME_GETAC(bf->bf_m);
5250 if (pri >= WME_AC_VO)
5251 ic->ic_wme.wme_hipri_traffic++;
5252 if ((bf->bf_txflags & HAL_TXDESC_NOACK) == 0)
5253 ni->ni_inact = ni->ni_inact_reload;
5254 } else {
5255 if (ts->ts_status & HAL_TXERR_XRETRY)
5256 sc->sc_stats.ast_tx_xretries++;
5257 if (ts->ts_status & HAL_TXERR_FIFO)
5258 sc->sc_stats.ast_tx_fifoerr++;
5259 if (ts->ts_status & HAL_TXERR_FILT)
5260 sc->sc_stats.ast_tx_filtered++;
5261 if (bf->bf_m->m_flags & M_FF)
5262 sc->sc_stats.ast_ff_txerr++;
5263 }
5264 sr = ts->ts_shortretry;
5265 lr = ts->ts_longretry;
5266 sc->sc_stats.ast_tx_shortretry += sr;
5267 sc->sc_stats.ast_tx_longretry += lr;
5268 /*
5269 * Hand the descriptor to the rate control algorithm.
5270 */
5271 if ((ts->ts_status & HAL_TXERR_FILT) == 0 &&
5272 (bf->bf_txflags & HAL_TXDESC_NOACK) == 0) {
5273 /*
5274 * If frame was ack'd update statistics,
5275 * including the last rx time used to
5276 * workaround phantom bmiss interrupts.
5277 */
5278 if (ts->ts_status == 0) {
5279 nacked++;
5280 sc->sc_stats.ast_tx_rssi = ts->ts_rssi;
5281 ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi,
5282 ts->ts_rssi);
5283 }
5284 ath_rate_tx_complete(sc, an, bf);
5285 }
5286 /*
5287 * Do any tx complete callback. Note this must
5288 * be done before releasing the node reference.
5289 */
5290 if (bf->bf_m->m_flags & M_TXCB)
5291 ieee80211_process_callback(ni, bf->bf_m,
5292 (bf->bf_txflags & HAL_TXDESC_NOACK) == 0 ?
5293 ts->ts_status : HAL_TXERR_XRETRY);
5294 /*
5295 * Reclaim reference to node.
5296 *
5297 * NB: the node may be reclaimed here if, for example
5298 * this is a DEAUTH message that was sent and the
5299 * node was timed out due to inactivity.
5300 */
5301 ieee80211_free_node(ni);
5302 }
5303 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
5304 BUS_DMASYNC_POSTWRITE);
5305 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
5306
5307 m_freem(bf->bf_m);
5308 bf->bf_m = NULL;
5309 bf->bf_node = NULL;
5310
5311 ATH_TXBUF_LOCK(sc);
5312 last = STAILQ_LAST(&sc->sc_txbuf, ath_buf, bf_list);
5313 if (last != NULL)
5314 last->bf_flags &= ~ATH_BUF_BUSY;
5315 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
5316 ATH_TXBUF_UNLOCK(sc);
5317 }
5318 /*
5319 * Flush fast-frame staging queue when traffic slows.
5320 */
5321 if (txq->axq_depth <= 1)
5322 ath_ff_stageq_flush(sc, txq, ath_ff_always);
5323 return nacked;
5324}
5325
5326static __inline int
5327txqactive(struct ath_hal *ah, int qnum)
5328{
5329 u_int32_t txqs = 1<<qnum;
5330 ath_hal_gettxintrtxqs(ah, &txqs);
5331 return (txqs & (1<<qnum));
5332}
5333
5334/*
5335 * Deferred processing of transmit interrupt; special-cased
5336 * for a single hardware transmit queue (e.g. 5210 and 5211).
5337 */
5338static void
5339ath_tx_proc_q0(void *arg, int npending)
5340{
5341 struct ath_softc *sc = arg;
5342 struct ifnet *ifp = sc->sc_ifp;
5343
5344 if (txqactive(sc->sc_ah, 0) && ath_tx_processq(sc, &sc->sc_txq[0]))
5345 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
5346 if (txqactive(sc->sc_ah, sc->sc_cabq->axq_qnum))
5347 ath_tx_processq(sc, sc->sc_cabq);
5348 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
5349 sc->sc_wd_timer = 0;
5350
5351 if (sc->sc_softled)
5352 ath_led_event(sc, sc->sc_txrix);
5353
5354 ath_start(ifp);
5355}
5356
5357/*
5358 * Deferred processing of transmit interrupt; special-cased
5359 * for four hardware queues, 0-3 (e.g. 5212 w/ WME support).
5360 */
5361static void
5362ath_tx_proc_q0123(void *arg, int npending)
5363{
5364 struct ath_softc *sc = arg;
5365 struct ifnet *ifp = sc->sc_ifp;
5366 int nacked;
5367
5368 /*
5369 * Process each active queue.
5370 */
5371 nacked = 0;
5372 if (txqactive(sc->sc_ah, 0))
5373 nacked += ath_tx_processq(sc, &sc->sc_txq[0]);
5374 if (txqactive(sc->sc_ah, 1))
5375 nacked += ath_tx_processq(sc, &sc->sc_txq[1]);
5376 if (txqactive(sc->sc_ah, 2))
5377 nacked += ath_tx_processq(sc, &sc->sc_txq[2]);
5378 if (txqactive(sc->sc_ah, 3))
5379 nacked += ath_tx_processq(sc, &sc->sc_txq[3]);
5380 if (txqactive(sc->sc_ah, sc->sc_cabq->axq_qnum))
5381 ath_tx_processq(sc, sc->sc_cabq);
5382 if (nacked)
5383 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
5384
5385 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
5386 sc->sc_wd_timer = 0;
5387
5388 if (sc->sc_softled)
5389 ath_led_event(sc, sc->sc_txrix);
5390
5391 ath_start(ifp);
5392}
5393
5394/*
5395 * Deferred processing of transmit interrupt.
5396 */
5397static void
5398ath_tx_proc(void *arg, int npending)
5399{
5400 struct ath_softc *sc = arg;
5401 struct ifnet *ifp = sc->sc_ifp;
5402 int i, nacked;
5403
5404 /*
5405 * Process each active queue.
5406 */
5407 nacked = 0;
5408 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
5409 if (ATH_TXQ_SETUP(sc, i) && txqactive(sc->sc_ah, i))
5410 nacked += ath_tx_processq(sc, &sc->sc_txq[i]);
5411 if (nacked)
5412 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
5413
5414 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
5415 sc->sc_wd_timer = 0;
5416
5417 if (sc->sc_softled)
5418 ath_led_event(sc, sc->sc_txrix);
5419
5420 ath_start(ifp);
5421}
5422
5423static void
5424ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq)
5425{
5426#ifdef ATH_DEBUG
5427 struct ath_hal *ah = sc->sc_ah;
5428#endif
5429 struct ieee80211_node *ni;
5430 struct ath_buf *bf;
5431 u_int ix;
5432
5433 /*
5434 * NB: this assumes output has been stopped and
5435 * we do not need to block ath_tx_proc
5436 */
5437 ATH_TXBUF_LOCK(sc);
5438 bf = STAILQ_LAST(&sc->sc_txbuf, ath_buf, bf_list);
5439 if (bf != NULL)
5440 bf->bf_flags &= ~ATH_BUF_BUSY;
5441 ATH_TXBUF_UNLOCK(sc);
5442 for (ix = 0;; ix++) {
5443 ATH_TXQ_LOCK(txq);
5444 bf = STAILQ_FIRST(&txq->axq_q);
5445 if (bf == NULL) {
5446 txq->axq_link = NULL;
5447 ATH_TXQ_UNLOCK(txq);
5448 break;
5449 }
5450 ATH_TXQ_REMOVE_HEAD(txq, bf_list);
5451 ATH_TXQ_UNLOCK(txq);
5452#ifdef ATH_DEBUG
5453 if (sc->sc_debug & ATH_DEBUG_RESET) {
5454 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
5455
5456 ath_printtxbuf(sc, bf, txq->axq_qnum, ix,
5457 ath_hal_txprocdesc(ah, bf->bf_desc,
5458 &bf->bf_status.ds_txstat) == HAL_OK);
5459 ieee80211_dump_pkt(ic, mtod(bf->bf_m, caddr_t),
5460 bf->bf_m->m_len, 0, -1);
5461 }
5462#endif /* ATH_DEBUG */
5463 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
5464 ni = bf->bf_node;
5465 bf->bf_node = NULL;
5466 if (ni != NULL) {
5467 /*
5468 * Do any callback and reclaim the node reference.
5469 */
5470 if (bf->bf_m->m_flags & M_TXCB)
5471 ieee80211_process_callback(ni, bf->bf_m, -1);
5472 ieee80211_free_node(ni);
5473 }
5474 m_freem(bf->bf_m);
5475 bf->bf_m = NULL;
5476 bf->bf_flags &= ~ATH_BUF_BUSY;
5477
5478 ATH_TXBUF_LOCK(sc);
5479 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
5480 ATH_TXBUF_UNLOCK(sc);
5481 }
5482}
5483
5484static void
5485ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq)
5486{
5487 struct ath_hal *ah = sc->sc_ah;
5488
5489 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n",
5490 __func__, txq->axq_qnum,
5491 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum),
5492 txq->axq_link);
5493 (void) ath_hal_stoptxdma(ah, txq->axq_qnum);
5494}
5495
5496/*
5497 * Drain the transmit queues and reclaim resources.
5498 */
5499static void
5500ath_draintxq(struct ath_softc *sc)
5501{
5502 struct ath_hal *ah = sc->sc_ah;
5503 struct ifnet *ifp = sc->sc_ifp;
5504 int i;
5505
5506 /* XXX return value */
5507 if (!sc->sc_invalid) {
5508 /* don't touch the hardware if marked invalid */
5509 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n",
5510 __func__, sc->sc_bhalq,
5511 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq),
5512 NULL);
5513 (void) ath_hal_stoptxdma(ah, sc->sc_bhalq);
5514 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
5515 if (ATH_TXQ_SETUP(sc, i))
5516 ath_tx_stopdma(sc, &sc->sc_txq[i]);
5517 }
5518 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
5519 if (ATH_TXQ_SETUP(sc, i))
5520 ath_tx_draintxq(sc, &sc->sc_txq[i]);
5521#ifdef ATH_DEBUG
5522 if (sc->sc_debug & ATH_DEBUG_RESET) {
5523 struct ath_buf *bf = STAILQ_FIRST(&sc->sc_bbuf);
5524 if (bf != NULL && bf->bf_m != NULL) {
5525 ath_printtxbuf(sc, bf, sc->sc_bhalq, 0,
5526 ath_hal_txprocdesc(ah, bf->bf_desc,
5527 &bf->bf_status.ds_txstat) == HAL_OK);
5528 ieee80211_dump_pkt(ifp->if_l2com, mtod(bf->bf_m, caddr_t),
5529 bf->bf_m->m_len, 0, -1);
5530 }
5531 }
5532#endif /* ATH_DEBUG */
5533 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
5534 sc->sc_wd_timer = 0;
5535}
5536
5537/*
5538 * Disable the receive h/w in preparation for a reset.
5539 */
5540static void
5541ath_stoprecv(struct ath_softc *sc)
5542{
5543#define PA2DESC(_sc, _pa) \
5544 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
5545 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
5546 struct ath_hal *ah = sc->sc_ah;
5547
5548 ath_hal_stoppcurecv(ah); /* disable PCU */
5549 ath_hal_setrxfilter(ah, 0); /* clear recv filter */
5550 ath_hal_stopdmarecv(ah); /* disable DMA engine */
5551 DELAY(3000); /* 3ms is long enough for 1 frame */
5552#ifdef ATH_DEBUG
5553 if (sc->sc_debug & (ATH_DEBUG_RESET | ATH_DEBUG_FATAL)) {
5554 struct ath_buf *bf;
5555 u_int ix;
5556
5557 printf("%s: rx queue %p, link %p\n", __func__,
5558 (caddr_t)(uintptr_t) ath_hal_getrxbuf(ah), sc->sc_rxlink);
5559 ix = 0;
5560 STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
5561 struct ath_desc *ds = bf->bf_desc;
5562 struct ath_rx_status *rs = &bf->bf_status.ds_rxstat;
5563 HAL_STATUS status = ath_hal_rxprocdesc(ah, ds,
5564 bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs);
5565 if (status == HAL_OK || (sc->sc_debug & ATH_DEBUG_FATAL))
5566 ath_printrxbuf(sc, bf, ix, status == HAL_OK);
5567 ix++;
5568 }
5569 }
5570#endif
5571 if (sc->sc_rxpending != NULL) {
5572 m_freem(sc->sc_rxpending);
5573 sc->sc_rxpending = NULL;
5574 }
5575 sc->sc_rxlink = NULL; /* just in case */
5576#undef PA2DESC
5577}
5578
5579/*
5580 * Enable the receive h/w following a reset.
5581 */
5582static int
5583ath_startrecv(struct ath_softc *sc)
5584{
5585 struct ath_hal *ah = sc->sc_ah;
5586 struct ath_buf *bf;
5587
5588 sc->sc_rxlink = NULL;
5589 sc->sc_rxpending = NULL;
5590 STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
5591 int error = ath_rxbuf_init(sc, bf);
5592 if (error != 0) {
5593 DPRINTF(sc, ATH_DEBUG_RECV,
5594 "%s: ath_rxbuf_init failed %d\n",
5595 __func__, error);
5596 return error;
5597 }
5598 }
5599
5600 bf = STAILQ_FIRST(&sc->sc_rxbuf);
5601 ath_hal_putrxbuf(ah, bf->bf_daddr);
5602 ath_hal_rxena(ah); /* enable recv descriptors */
5603 ath_mode_init(sc); /* set filters, etc. */
5604 ath_hal_startpcurecv(ah); /* re-enable PCU/DMA engine */
5605 return 0;
5606}
5607
5608/*
5609 * Update internal state after a channel change.
5610 */
5611static void
5612ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan)
5613{
5614 enum ieee80211_phymode mode;
5615
5616 /*
5617 * Change channels and update the h/w rate map
5618 * if we're switching; e.g. 11a to 11b/g.
5619 */
5620 mode = ieee80211_chan2mode(chan);
5621 if (mode != sc->sc_curmode)
5622 ath_setcurmode(sc, mode);
5623 sc->sc_curchan = chan;
5624
5625 sc->sc_rx_th.wr_chan_flags = htole32(chan->ic_flags);
5626 sc->sc_tx_th.wt_chan_flags = sc->sc_rx_th.wr_chan_flags;
5627 sc->sc_rx_th.wr_chan_freq = htole16(chan->ic_freq);
5628 sc->sc_tx_th.wt_chan_freq = sc->sc_rx_th.wr_chan_freq;
5629 sc->sc_rx_th.wr_chan_ieee = chan->ic_ieee;
5630 sc->sc_tx_th.wt_chan_ieee = sc->sc_rx_th.wr_chan_ieee;
5631 sc->sc_rx_th.wr_chan_maxpow = chan->ic_maxregpower;
5632 sc->sc_tx_th.wt_chan_maxpow = sc->sc_rx_th.wr_chan_maxpow;
5633}
5634
5635/*
5636 * Set/change channels. If the channel is really being changed,
5637 * it's done by reseting the chip. To accomplish this we must
5638 * first cleanup any pending DMA, then restart stuff after a la
5639 * ath_init.
5640 */
5641static int
5642ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan)
5643{
5644 struct ifnet *ifp = sc->sc_ifp;
5645 struct ieee80211com *ic = ifp->if_l2com;
5646 struct ath_hal *ah = sc->sc_ah;
5647
5648 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz, flags 0x%x)\n",
5649 __func__, ieee80211_chan2ieee(ic, chan),
5650 chan->ic_freq, chan->ic_flags);
5651 if (chan != sc->sc_curchan) {
5652 HAL_STATUS status;
5653 /*
5654 * To switch channels clear any pending DMA operations;
5655 * wait long enough for the RX fifo to drain, reset the
5656 * hardware at the new frequency, and then re-enable
5657 * the relevant bits of the h/w.
5658 */
5659 ath_hal_intrset(ah, 0); /* disable interrupts */
5660 ath_draintxq(sc); /* clear pending tx frames */
5661 ath_stoprecv(sc); /* turn off frame recv */
5662 if (!ath_hal_reset(ah, sc->sc_opmode, chan, AH_TRUE, &status)) {
5663 if_printf(ifp, "%s: unable to reset "
5664 "channel %u (%u Mhz, flags 0x%x), hal status %u\n",
5665 __func__, ieee80211_chan2ieee(ic, chan),
5666 chan->ic_freq, chan->ic_flags, status);
5667 return EIO;
5668 }
5669 sc->sc_diversity = ath_hal_getdiversity(ah);
5670
5671 /*
5672 * Re-enable rx framework.
5673 */
5674 if (ath_startrecv(sc) != 0) {
5675 if_printf(ifp, "%s: unable to restart recv logic\n",
5676 __func__);
5677 return EIO;
5678 }
5679
5680 /*
5681 * Change channels and update the h/w rate map
5682 * if we're switching; e.g. 11a to 11b/g.
5683 */
5684 ath_chan_change(sc, chan);
5685
5686 /*
5687 * Re-enable interrupts.
5688 */
5689 ath_hal_intrset(ah, sc->sc_imask);
5690 }
5691 return 0;
5692}
5693
5694/*
5695 * Periodically recalibrate the PHY to account
5696 * for temperature/environment changes.
5697 */
5698static void
5699ath_calibrate(void *arg)
5700{
5701 struct ath_softc *sc = arg;
5702 struct ath_hal *ah = sc->sc_ah;
5703 struct ifnet *ifp = sc->sc_ifp;
5704 struct ieee80211com *ic = ifp->if_l2com;
5705 HAL_BOOL longCal, isCalDone;
5706 int nextcal;
5707
5708 if (ic->ic_flags & IEEE80211_F_SCAN) /* defer, off channel */
5709 goto restart;
5710 longCal = (ticks - sc->sc_lastlongcal >= ath_longcalinterval*hz);
5711 if (longCal) {
5712 sc->sc_stats.ast_per_cal++;
5713 if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) {
5714 /*
5715 * Rfgain is out of bounds, reset the chip
5716 * to load new gain values.
5717 */
5718 DPRINTF(sc, ATH_DEBUG_CALIBRATE,
5719 "%s: rfgain change\n", __func__);
5720 sc->sc_stats.ast_per_rfgain++;
5721 ath_reset(ifp);
5722 }
5723 /*
5724 * If this long cal is after an idle period, then
5725 * reset the data collection state so we start fresh.
5726 */
5727 if (sc->sc_resetcal) {
5728 (void) ath_hal_calreset(ah, sc->sc_curchan);
5729 sc->sc_lastcalreset = ticks;
5730 sc->sc_resetcal = 0;
5731 }
5732 }
5733 if (ath_hal_calibrateN(ah, sc->sc_curchan, longCal, &isCalDone)) {
5734 if (longCal) {
5735 /*
5736 * Calibrate noise floor data again in case of change.
5737 */
5738 ath_hal_process_noisefloor(ah);
5739 }
5740 } else {
5741 DPRINTF(sc, ATH_DEBUG_ANY,
5742 "%s: calibration of channel %u failed\n",
5743 __func__, sc->sc_curchan->ic_freq);
5744 sc->sc_stats.ast_per_calfail++;
5745 }
5746 if (!isCalDone) {
5747restart:
5748 /*
5749 * Use a shorter interval to potentially collect multiple
5750 * data samples required to complete calibration. Once
5751 * we're told the work is done we drop back to a longer
5752 * interval between requests. We're more aggressive doing
5753 * work when operating as an AP to improve operation right
5754 * after startup.
5755 */
5756 nextcal = (1000*ath_shortcalinterval)/hz;
5757 if (sc->sc_opmode != HAL_M_HOSTAP)
5758 nextcal *= 10;
5759 } else {
5760 nextcal = ath_longcalinterval*hz;
5761 sc->sc_lastlongcal = ticks;
5762 if (sc->sc_lastcalreset == 0)
5763 sc->sc_lastcalreset = sc->sc_lastlongcal;
5764 else if (ticks - sc->sc_lastcalreset >= ath_resetcalinterval*hz)
5765 sc->sc_resetcal = 1; /* setup reset next trip */
5766 }
5767
5768 if (nextcal != 0) {
5769 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: next +%u (%sisCalDone)\n",
5770 __func__, nextcal, isCalDone ? "" : "!");
5771 callout_reset(&sc->sc_cal_ch, nextcal, ath_calibrate, sc);
5772 } else {
5773 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: calibration disabled\n",
5774 __func__);
5775 /* NB: don't rearm timer */
5776 }
5777}
5778
5779static void
5780ath_scan_start(struct ieee80211com *ic)
5781{
5782 struct ifnet *ifp = ic->ic_ifp;
5783 struct ath_softc *sc = ifp->if_softc;
5784 struct ath_hal *ah = sc->sc_ah;
5785 u_int32_t rfilt;
5786
5787 /* XXX calibration timer? */
5788
5789 sc->sc_scanning = 1;
5790 sc->sc_syncbeacon = 0;
5791 rfilt = ath_calcrxfilter(sc);
5792 ath_hal_setrxfilter(ah, rfilt);
5793 ath_hal_setassocid(ah, ifp->if_broadcastaddr, 0);
5794
5795 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0\n",
5796 __func__, rfilt, ether_sprintf(ifp->if_broadcastaddr));
5797}
5798
5799static void
5800ath_scan_end(struct ieee80211com *ic)
5801{
5802 struct ifnet *ifp = ic->ic_ifp;
5803 struct ath_softc *sc = ifp->if_softc;
5804 struct ath_hal *ah = sc->sc_ah;
5805 u_int32_t rfilt;
5806
5807 sc->sc_scanning = 0;
5808 rfilt = ath_calcrxfilter(sc);
5809 ath_hal_setrxfilter(ah, rfilt);
5810 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid);
5811
5812 ath_hal_process_noisefloor(ah);
5813
5814 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n",
5815 __func__, rfilt, ether_sprintf(sc->sc_curbssid),
5816 sc->sc_curaid);
5817}
5818
5819static void
5820ath_set_channel(struct ieee80211com *ic)
5821{
5822 struct ifnet *ifp = ic->ic_ifp;
5823 struct ath_softc *sc = ifp->if_softc;
5824
5825 (void) ath_chan_set(sc, ic->ic_curchan);
5826 /*
5827 * If we are returning to our bss channel then mark state
5828 * so the next recv'd beacon's tsf will be used to sync the
5829 * beacon timers. Note that since we only hear beacons in
5830 * sta/ibss mode this has no effect in other operating modes.
5831 */
5832 if (!sc->sc_scanning && ic->ic_curchan == ic->ic_bsschan)
5833 sc->sc_syncbeacon = 1;
5834}
5835
5836/*
5837 * Walk the vap list and check if there any vap's in RUN state.
5838 */
5839static int
5840ath_isanyrunningvaps(struct ieee80211vap *this)
5841{
5842 struct ieee80211com *ic = this->iv_ic;
5843 struct ieee80211vap *vap;
5844
5845 IEEE80211_LOCK_ASSERT(ic);
5846
5847 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
5848 if (vap != this && vap->iv_state == IEEE80211_S_RUN)
5849 return 1;
5850 }
5851 return 0;
5852}
5853
5854static int
5855ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
5856{
5857 struct ieee80211com *ic = vap->iv_ic;
5858 struct ath_softc *sc = ic->ic_ifp->if_softc;
5859 struct ath_vap *avp = ATH_VAP(vap);
5860 struct ath_hal *ah = sc->sc_ah;
5861 struct ieee80211_node *ni = NULL;
5862 int i, error, stamode;
5863 u_int32_t rfilt;
5864 static const HAL_LED_STATE leds[] = {
5865 HAL_LED_INIT, /* IEEE80211_S_INIT */
5866 HAL_LED_SCAN, /* IEEE80211_S_SCAN */
5867 HAL_LED_AUTH, /* IEEE80211_S_AUTH */
5868 HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */
5869 HAL_LED_RUN, /* IEEE80211_S_CAC */
5870 HAL_LED_RUN, /* IEEE80211_S_RUN */
5871 HAL_LED_RUN, /* IEEE80211_S_CSA */
5872 HAL_LED_RUN, /* IEEE80211_S_SLEEP */
5873 };
5874
5875 DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__,
5876 ieee80211_state_name[vap->iv_state],
5877 ieee80211_state_name[nstate]);
5878
5879 callout_drain(&sc->sc_cal_ch);
5880 ath_hal_setledstate(ah, leds[nstate]); /* set LED */
5881
5882 if (nstate == IEEE80211_S_SCAN) {
5883 /*
5884 * Scanning: turn off beacon miss and don't beacon.
5885 * Mark beacon state so when we reach RUN state we'll
5886 * [re]setup beacons. Unblock the task q thread so
5887 * deferred interrupt processing is done.
5888 */
5889 ath_hal_intrset(ah,
5890 sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS));
5891 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
5892 sc->sc_beacons = 0;
5893 taskqueue_unblock(sc->sc_tq);
5894 }
5895
5896 ni = vap->iv_bss;
5897 rfilt = ath_calcrxfilter(sc);
5898 stamode = (vap->iv_opmode == IEEE80211_M_STA ||
5899 vap->iv_opmode == IEEE80211_M_AHDEMO ||
5900 vap->iv_opmode == IEEE80211_M_IBSS);
5901 if (stamode && nstate == IEEE80211_S_RUN) {
5902 sc->sc_curaid = ni->ni_associd;
5903 IEEE80211_ADDR_COPY(sc->sc_curbssid, ni->ni_bssid);
5904 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid);
5905 }
5906 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n",
5907 __func__, rfilt, ether_sprintf(sc->sc_curbssid), sc->sc_curaid);
5908 ath_hal_setrxfilter(ah, rfilt);
5909
5910 /* XXX is this to restore keycache on resume? */
5911 if (vap->iv_opmode != IEEE80211_M_STA &&
5912 (vap->iv_flags & IEEE80211_F_PRIVACY)) {
5913 for (i = 0; i < IEEE80211_WEP_NKID; i++)
5914 if (ath_hal_keyisvalid(ah, i))
5915 ath_hal_keysetmac(ah, i, ni->ni_bssid);
5916 }
5917
5918 /*
5919 * Invoke the parent method to do net80211 work.
5920 */
5921 error = avp->av_newstate(vap, nstate, arg);
5922 if (error != 0)
5923 goto bad;
5924
5925 if (nstate == IEEE80211_S_RUN) {
5926 /* NB: collect bss node again, it may have changed */
5927 ni = vap->iv_bss;
5928
5929 DPRINTF(sc, ATH_DEBUG_STATE,
5930 "%s(RUN): iv_flags 0x%08x bintvl %d bssid %s "
5931 "capinfo 0x%04x chan %d\n", __func__,
5932 vap->iv_flags, ni->ni_intval, ether_sprintf(ni->ni_bssid),
5933 ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan));
5934
5935 switch (vap->iv_opmode) {
5227 if (txq->axq_depth > 0) {
5228 /*
5229 * More frames follow. Mark the buffer busy
5230 * so it's not re-used while the hardware may
5231 * still re-read the link field in the descriptor.
5232 */
5233 bf->bf_flags |= ATH_BUF_BUSY;
5234 } else
5235#else
5236 if (txq->axq_depth == 0)
5237#endif
5238 txq->axq_link = NULL;
5239 ATH_TXQ_UNLOCK(txq);
5240
5241 ni = bf->bf_node;
5242 if (ni != NULL) {
5243 an = ATH_NODE(ni);
5244 if (ts->ts_status == 0) {
5245 u_int8_t txant = ts->ts_antenna;
5246 sc->sc_stats.ast_ant_tx[txant]++;
5247 sc->sc_ant_tx[txant]++;
5248 if (ts->ts_rate & HAL_TXSTAT_ALTRATE)
5249 sc->sc_stats.ast_tx_altrate++;
5250 pri = M_WME_GETAC(bf->bf_m);
5251 if (pri >= WME_AC_VO)
5252 ic->ic_wme.wme_hipri_traffic++;
5253 if ((bf->bf_txflags & HAL_TXDESC_NOACK) == 0)
5254 ni->ni_inact = ni->ni_inact_reload;
5255 } else {
5256 if (ts->ts_status & HAL_TXERR_XRETRY)
5257 sc->sc_stats.ast_tx_xretries++;
5258 if (ts->ts_status & HAL_TXERR_FIFO)
5259 sc->sc_stats.ast_tx_fifoerr++;
5260 if (ts->ts_status & HAL_TXERR_FILT)
5261 sc->sc_stats.ast_tx_filtered++;
5262 if (bf->bf_m->m_flags & M_FF)
5263 sc->sc_stats.ast_ff_txerr++;
5264 }
5265 sr = ts->ts_shortretry;
5266 lr = ts->ts_longretry;
5267 sc->sc_stats.ast_tx_shortretry += sr;
5268 sc->sc_stats.ast_tx_longretry += lr;
5269 /*
5270 * Hand the descriptor to the rate control algorithm.
5271 */
5272 if ((ts->ts_status & HAL_TXERR_FILT) == 0 &&
5273 (bf->bf_txflags & HAL_TXDESC_NOACK) == 0) {
5274 /*
5275 * If frame was ack'd update statistics,
5276 * including the last rx time used to
5277 * workaround phantom bmiss interrupts.
5278 */
5279 if (ts->ts_status == 0) {
5280 nacked++;
5281 sc->sc_stats.ast_tx_rssi = ts->ts_rssi;
5282 ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi,
5283 ts->ts_rssi);
5284 }
5285 ath_rate_tx_complete(sc, an, bf);
5286 }
5287 /*
5288 * Do any tx complete callback. Note this must
5289 * be done before releasing the node reference.
5290 */
5291 if (bf->bf_m->m_flags & M_TXCB)
5292 ieee80211_process_callback(ni, bf->bf_m,
5293 (bf->bf_txflags & HAL_TXDESC_NOACK) == 0 ?
5294 ts->ts_status : HAL_TXERR_XRETRY);
5295 /*
5296 * Reclaim reference to node.
5297 *
5298 * NB: the node may be reclaimed here if, for example
5299 * this is a DEAUTH message that was sent and the
5300 * node was timed out due to inactivity.
5301 */
5302 ieee80211_free_node(ni);
5303 }
5304 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
5305 BUS_DMASYNC_POSTWRITE);
5306 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
5307
5308 m_freem(bf->bf_m);
5309 bf->bf_m = NULL;
5310 bf->bf_node = NULL;
5311
5312 ATH_TXBUF_LOCK(sc);
5313 last = STAILQ_LAST(&sc->sc_txbuf, ath_buf, bf_list);
5314 if (last != NULL)
5315 last->bf_flags &= ~ATH_BUF_BUSY;
5316 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
5317 ATH_TXBUF_UNLOCK(sc);
5318 }
5319 /*
5320 * Flush fast-frame staging queue when traffic slows.
5321 */
5322 if (txq->axq_depth <= 1)
5323 ath_ff_stageq_flush(sc, txq, ath_ff_always);
5324 return nacked;
5325}
5326
5327static __inline int
5328txqactive(struct ath_hal *ah, int qnum)
5329{
5330 u_int32_t txqs = 1<<qnum;
5331 ath_hal_gettxintrtxqs(ah, &txqs);
5332 return (txqs & (1<<qnum));
5333}
5334
5335/*
5336 * Deferred processing of transmit interrupt; special-cased
5337 * for a single hardware transmit queue (e.g. 5210 and 5211).
5338 */
5339static void
5340ath_tx_proc_q0(void *arg, int npending)
5341{
5342 struct ath_softc *sc = arg;
5343 struct ifnet *ifp = sc->sc_ifp;
5344
5345 if (txqactive(sc->sc_ah, 0) && ath_tx_processq(sc, &sc->sc_txq[0]))
5346 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
5347 if (txqactive(sc->sc_ah, sc->sc_cabq->axq_qnum))
5348 ath_tx_processq(sc, sc->sc_cabq);
5349 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
5350 sc->sc_wd_timer = 0;
5351
5352 if (sc->sc_softled)
5353 ath_led_event(sc, sc->sc_txrix);
5354
5355 ath_start(ifp);
5356}
5357
5358/*
5359 * Deferred processing of transmit interrupt; special-cased
5360 * for four hardware queues, 0-3 (e.g. 5212 w/ WME support).
5361 */
5362static void
5363ath_tx_proc_q0123(void *arg, int npending)
5364{
5365 struct ath_softc *sc = arg;
5366 struct ifnet *ifp = sc->sc_ifp;
5367 int nacked;
5368
5369 /*
5370 * Process each active queue.
5371 */
5372 nacked = 0;
5373 if (txqactive(sc->sc_ah, 0))
5374 nacked += ath_tx_processq(sc, &sc->sc_txq[0]);
5375 if (txqactive(sc->sc_ah, 1))
5376 nacked += ath_tx_processq(sc, &sc->sc_txq[1]);
5377 if (txqactive(sc->sc_ah, 2))
5378 nacked += ath_tx_processq(sc, &sc->sc_txq[2]);
5379 if (txqactive(sc->sc_ah, 3))
5380 nacked += ath_tx_processq(sc, &sc->sc_txq[3]);
5381 if (txqactive(sc->sc_ah, sc->sc_cabq->axq_qnum))
5382 ath_tx_processq(sc, sc->sc_cabq);
5383 if (nacked)
5384 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
5385
5386 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
5387 sc->sc_wd_timer = 0;
5388
5389 if (sc->sc_softled)
5390 ath_led_event(sc, sc->sc_txrix);
5391
5392 ath_start(ifp);
5393}
5394
5395/*
5396 * Deferred processing of transmit interrupt.
5397 */
5398static void
5399ath_tx_proc(void *arg, int npending)
5400{
5401 struct ath_softc *sc = arg;
5402 struct ifnet *ifp = sc->sc_ifp;
5403 int i, nacked;
5404
5405 /*
5406 * Process each active queue.
5407 */
5408 nacked = 0;
5409 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
5410 if (ATH_TXQ_SETUP(sc, i) && txqactive(sc->sc_ah, i))
5411 nacked += ath_tx_processq(sc, &sc->sc_txq[i]);
5412 if (nacked)
5413 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
5414
5415 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
5416 sc->sc_wd_timer = 0;
5417
5418 if (sc->sc_softled)
5419 ath_led_event(sc, sc->sc_txrix);
5420
5421 ath_start(ifp);
5422}
5423
5424static void
5425ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq)
5426{
5427#ifdef ATH_DEBUG
5428 struct ath_hal *ah = sc->sc_ah;
5429#endif
5430 struct ieee80211_node *ni;
5431 struct ath_buf *bf;
5432 u_int ix;
5433
5434 /*
5435 * NB: this assumes output has been stopped and
5436 * we do not need to block ath_tx_proc
5437 */
5438 ATH_TXBUF_LOCK(sc);
5439 bf = STAILQ_LAST(&sc->sc_txbuf, ath_buf, bf_list);
5440 if (bf != NULL)
5441 bf->bf_flags &= ~ATH_BUF_BUSY;
5442 ATH_TXBUF_UNLOCK(sc);
5443 for (ix = 0;; ix++) {
5444 ATH_TXQ_LOCK(txq);
5445 bf = STAILQ_FIRST(&txq->axq_q);
5446 if (bf == NULL) {
5447 txq->axq_link = NULL;
5448 ATH_TXQ_UNLOCK(txq);
5449 break;
5450 }
5451 ATH_TXQ_REMOVE_HEAD(txq, bf_list);
5452 ATH_TXQ_UNLOCK(txq);
5453#ifdef ATH_DEBUG
5454 if (sc->sc_debug & ATH_DEBUG_RESET) {
5455 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
5456
5457 ath_printtxbuf(sc, bf, txq->axq_qnum, ix,
5458 ath_hal_txprocdesc(ah, bf->bf_desc,
5459 &bf->bf_status.ds_txstat) == HAL_OK);
5460 ieee80211_dump_pkt(ic, mtod(bf->bf_m, caddr_t),
5461 bf->bf_m->m_len, 0, -1);
5462 }
5463#endif /* ATH_DEBUG */
5464 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
5465 ni = bf->bf_node;
5466 bf->bf_node = NULL;
5467 if (ni != NULL) {
5468 /*
5469 * Do any callback and reclaim the node reference.
5470 */
5471 if (bf->bf_m->m_flags & M_TXCB)
5472 ieee80211_process_callback(ni, bf->bf_m, -1);
5473 ieee80211_free_node(ni);
5474 }
5475 m_freem(bf->bf_m);
5476 bf->bf_m = NULL;
5477 bf->bf_flags &= ~ATH_BUF_BUSY;
5478
5479 ATH_TXBUF_LOCK(sc);
5480 STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
5481 ATH_TXBUF_UNLOCK(sc);
5482 }
5483}
5484
5485static void
5486ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq)
5487{
5488 struct ath_hal *ah = sc->sc_ah;
5489
5490 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n",
5491 __func__, txq->axq_qnum,
5492 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum),
5493 txq->axq_link);
5494 (void) ath_hal_stoptxdma(ah, txq->axq_qnum);
5495}
5496
5497/*
5498 * Drain the transmit queues and reclaim resources.
5499 */
5500static void
5501ath_draintxq(struct ath_softc *sc)
5502{
5503 struct ath_hal *ah = sc->sc_ah;
5504 struct ifnet *ifp = sc->sc_ifp;
5505 int i;
5506
5507 /* XXX return value */
5508 if (!sc->sc_invalid) {
5509 /* don't touch the hardware if marked invalid */
5510 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n",
5511 __func__, sc->sc_bhalq,
5512 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq),
5513 NULL);
5514 (void) ath_hal_stoptxdma(ah, sc->sc_bhalq);
5515 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
5516 if (ATH_TXQ_SETUP(sc, i))
5517 ath_tx_stopdma(sc, &sc->sc_txq[i]);
5518 }
5519 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
5520 if (ATH_TXQ_SETUP(sc, i))
5521 ath_tx_draintxq(sc, &sc->sc_txq[i]);
5522#ifdef ATH_DEBUG
5523 if (sc->sc_debug & ATH_DEBUG_RESET) {
5524 struct ath_buf *bf = STAILQ_FIRST(&sc->sc_bbuf);
5525 if (bf != NULL && bf->bf_m != NULL) {
5526 ath_printtxbuf(sc, bf, sc->sc_bhalq, 0,
5527 ath_hal_txprocdesc(ah, bf->bf_desc,
5528 &bf->bf_status.ds_txstat) == HAL_OK);
5529 ieee80211_dump_pkt(ifp->if_l2com, mtod(bf->bf_m, caddr_t),
5530 bf->bf_m->m_len, 0, -1);
5531 }
5532 }
5533#endif /* ATH_DEBUG */
5534 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
5535 sc->sc_wd_timer = 0;
5536}
5537
5538/*
5539 * Disable the receive h/w in preparation for a reset.
5540 */
5541static void
5542ath_stoprecv(struct ath_softc *sc)
5543{
5544#define PA2DESC(_sc, _pa) \
5545 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
5546 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
5547 struct ath_hal *ah = sc->sc_ah;
5548
5549 ath_hal_stoppcurecv(ah); /* disable PCU */
5550 ath_hal_setrxfilter(ah, 0); /* clear recv filter */
5551 ath_hal_stopdmarecv(ah); /* disable DMA engine */
5552 DELAY(3000); /* 3ms is long enough for 1 frame */
5553#ifdef ATH_DEBUG
5554 if (sc->sc_debug & (ATH_DEBUG_RESET | ATH_DEBUG_FATAL)) {
5555 struct ath_buf *bf;
5556 u_int ix;
5557
5558 printf("%s: rx queue %p, link %p\n", __func__,
5559 (caddr_t)(uintptr_t) ath_hal_getrxbuf(ah), sc->sc_rxlink);
5560 ix = 0;
5561 STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
5562 struct ath_desc *ds = bf->bf_desc;
5563 struct ath_rx_status *rs = &bf->bf_status.ds_rxstat;
5564 HAL_STATUS status = ath_hal_rxprocdesc(ah, ds,
5565 bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs);
5566 if (status == HAL_OK || (sc->sc_debug & ATH_DEBUG_FATAL))
5567 ath_printrxbuf(sc, bf, ix, status == HAL_OK);
5568 ix++;
5569 }
5570 }
5571#endif
5572 if (sc->sc_rxpending != NULL) {
5573 m_freem(sc->sc_rxpending);
5574 sc->sc_rxpending = NULL;
5575 }
5576 sc->sc_rxlink = NULL; /* just in case */
5577#undef PA2DESC
5578}
5579
5580/*
5581 * Enable the receive h/w following a reset.
5582 */
5583static int
5584ath_startrecv(struct ath_softc *sc)
5585{
5586 struct ath_hal *ah = sc->sc_ah;
5587 struct ath_buf *bf;
5588
5589 sc->sc_rxlink = NULL;
5590 sc->sc_rxpending = NULL;
5591 STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
5592 int error = ath_rxbuf_init(sc, bf);
5593 if (error != 0) {
5594 DPRINTF(sc, ATH_DEBUG_RECV,
5595 "%s: ath_rxbuf_init failed %d\n",
5596 __func__, error);
5597 return error;
5598 }
5599 }
5600
5601 bf = STAILQ_FIRST(&sc->sc_rxbuf);
5602 ath_hal_putrxbuf(ah, bf->bf_daddr);
5603 ath_hal_rxena(ah); /* enable recv descriptors */
5604 ath_mode_init(sc); /* set filters, etc. */
5605 ath_hal_startpcurecv(ah); /* re-enable PCU/DMA engine */
5606 return 0;
5607}
5608
5609/*
5610 * Update internal state after a channel change.
5611 */
5612static void
5613ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan)
5614{
5615 enum ieee80211_phymode mode;
5616
5617 /*
5618 * Change channels and update the h/w rate map
5619 * if we're switching; e.g. 11a to 11b/g.
5620 */
5621 mode = ieee80211_chan2mode(chan);
5622 if (mode != sc->sc_curmode)
5623 ath_setcurmode(sc, mode);
5624 sc->sc_curchan = chan;
5625
5626 sc->sc_rx_th.wr_chan_flags = htole32(chan->ic_flags);
5627 sc->sc_tx_th.wt_chan_flags = sc->sc_rx_th.wr_chan_flags;
5628 sc->sc_rx_th.wr_chan_freq = htole16(chan->ic_freq);
5629 sc->sc_tx_th.wt_chan_freq = sc->sc_rx_th.wr_chan_freq;
5630 sc->sc_rx_th.wr_chan_ieee = chan->ic_ieee;
5631 sc->sc_tx_th.wt_chan_ieee = sc->sc_rx_th.wr_chan_ieee;
5632 sc->sc_rx_th.wr_chan_maxpow = chan->ic_maxregpower;
5633 sc->sc_tx_th.wt_chan_maxpow = sc->sc_rx_th.wr_chan_maxpow;
5634}
5635
5636/*
5637 * Set/change channels. If the channel is really being changed,
5638 * it's done by reseting the chip. To accomplish this we must
5639 * first cleanup any pending DMA, then restart stuff after a la
5640 * ath_init.
5641 */
5642static int
5643ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan)
5644{
5645 struct ifnet *ifp = sc->sc_ifp;
5646 struct ieee80211com *ic = ifp->if_l2com;
5647 struct ath_hal *ah = sc->sc_ah;
5648
5649 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz, flags 0x%x)\n",
5650 __func__, ieee80211_chan2ieee(ic, chan),
5651 chan->ic_freq, chan->ic_flags);
5652 if (chan != sc->sc_curchan) {
5653 HAL_STATUS status;
5654 /*
5655 * To switch channels clear any pending DMA operations;
5656 * wait long enough for the RX fifo to drain, reset the
5657 * hardware at the new frequency, and then re-enable
5658 * the relevant bits of the h/w.
5659 */
5660 ath_hal_intrset(ah, 0); /* disable interrupts */
5661 ath_draintxq(sc); /* clear pending tx frames */
5662 ath_stoprecv(sc); /* turn off frame recv */
5663 if (!ath_hal_reset(ah, sc->sc_opmode, chan, AH_TRUE, &status)) {
5664 if_printf(ifp, "%s: unable to reset "
5665 "channel %u (%u Mhz, flags 0x%x), hal status %u\n",
5666 __func__, ieee80211_chan2ieee(ic, chan),
5667 chan->ic_freq, chan->ic_flags, status);
5668 return EIO;
5669 }
5670 sc->sc_diversity = ath_hal_getdiversity(ah);
5671
5672 /*
5673 * Re-enable rx framework.
5674 */
5675 if (ath_startrecv(sc) != 0) {
5676 if_printf(ifp, "%s: unable to restart recv logic\n",
5677 __func__);
5678 return EIO;
5679 }
5680
5681 /*
5682 * Change channels and update the h/w rate map
5683 * if we're switching; e.g. 11a to 11b/g.
5684 */
5685 ath_chan_change(sc, chan);
5686
5687 /*
5688 * Re-enable interrupts.
5689 */
5690 ath_hal_intrset(ah, sc->sc_imask);
5691 }
5692 return 0;
5693}
5694
5695/*
5696 * Periodically recalibrate the PHY to account
5697 * for temperature/environment changes.
5698 */
5699static void
5700ath_calibrate(void *arg)
5701{
5702 struct ath_softc *sc = arg;
5703 struct ath_hal *ah = sc->sc_ah;
5704 struct ifnet *ifp = sc->sc_ifp;
5705 struct ieee80211com *ic = ifp->if_l2com;
5706 HAL_BOOL longCal, isCalDone;
5707 int nextcal;
5708
5709 if (ic->ic_flags & IEEE80211_F_SCAN) /* defer, off channel */
5710 goto restart;
5711 longCal = (ticks - sc->sc_lastlongcal >= ath_longcalinterval*hz);
5712 if (longCal) {
5713 sc->sc_stats.ast_per_cal++;
5714 if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) {
5715 /*
5716 * Rfgain is out of bounds, reset the chip
5717 * to load new gain values.
5718 */
5719 DPRINTF(sc, ATH_DEBUG_CALIBRATE,
5720 "%s: rfgain change\n", __func__);
5721 sc->sc_stats.ast_per_rfgain++;
5722 ath_reset(ifp);
5723 }
5724 /*
5725 * If this long cal is after an idle period, then
5726 * reset the data collection state so we start fresh.
5727 */
5728 if (sc->sc_resetcal) {
5729 (void) ath_hal_calreset(ah, sc->sc_curchan);
5730 sc->sc_lastcalreset = ticks;
5731 sc->sc_resetcal = 0;
5732 }
5733 }
5734 if (ath_hal_calibrateN(ah, sc->sc_curchan, longCal, &isCalDone)) {
5735 if (longCal) {
5736 /*
5737 * Calibrate noise floor data again in case of change.
5738 */
5739 ath_hal_process_noisefloor(ah);
5740 }
5741 } else {
5742 DPRINTF(sc, ATH_DEBUG_ANY,
5743 "%s: calibration of channel %u failed\n",
5744 __func__, sc->sc_curchan->ic_freq);
5745 sc->sc_stats.ast_per_calfail++;
5746 }
5747 if (!isCalDone) {
5748restart:
5749 /*
5750 * Use a shorter interval to potentially collect multiple
5751 * data samples required to complete calibration. Once
5752 * we're told the work is done we drop back to a longer
5753 * interval between requests. We're more aggressive doing
5754 * work when operating as an AP to improve operation right
5755 * after startup.
5756 */
5757 nextcal = (1000*ath_shortcalinterval)/hz;
5758 if (sc->sc_opmode != HAL_M_HOSTAP)
5759 nextcal *= 10;
5760 } else {
5761 nextcal = ath_longcalinterval*hz;
5762 sc->sc_lastlongcal = ticks;
5763 if (sc->sc_lastcalreset == 0)
5764 sc->sc_lastcalreset = sc->sc_lastlongcal;
5765 else if (ticks - sc->sc_lastcalreset >= ath_resetcalinterval*hz)
5766 sc->sc_resetcal = 1; /* setup reset next trip */
5767 }
5768
5769 if (nextcal != 0) {
5770 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: next +%u (%sisCalDone)\n",
5771 __func__, nextcal, isCalDone ? "" : "!");
5772 callout_reset(&sc->sc_cal_ch, nextcal, ath_calibrate, sc);
5773 } else {
5774 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: calibration disabled\n",
5775 __func__);
5776 /* NB: don't rearm timer */
5777 }
5778}
5779
5780static void
5781ath_scan_start(struct ieee80211com *ic)
5782{
5783 struct ifnet *ifp = ic->ic_ifp;
5784 struct ath_softc *sc = ifp->if_softc;
5785 struct ath_hal *ah = sc->sc_ah;
5786 u_int32_t rfilt;
5787
5788 /* XXX calibration timer? */
5789
5790 sc->sc_scanning = 1;
5791 sc->sc_syncbeacon = 0;
5792 rfilt = ath_calcrxfilter(sc);
5793 ath_hal_setrxfilter(ah, rfilt);
5794 ath_hal_setassocid(ah, ifp->if_broadcastaddr, 0);
5795
5796 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0\n",
5797 __func__, rfilt, ether_sprintf(ifp->if_broadcastaddr));
5798}
5799
5800static void
5801ath_scan_end(struct ieee80211com *ic)
5802{
5803 struct ifnet *ifp = ic->ic_ifp;
5804 struct ath_softc *sc = ifp->if_softc;
5805 struct ath_hal *ah = sc->sc_ah;
5806 u_int32_t rfilt;
5807
5808 sc->sc_scanning = 0;
5809 rfilt = ath_calcrxfilter(sc);
5810 ath_hal_setrxfilter(ah, rfilt);
5811 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid);
5812
5813 ath_hal_process_noisefloor(ah);
5814
5815 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n",
5816 __func__, rfilt, ether_sprintf(sc->sc_curbssid),
5817 sc->sc_curaid);
5818}
5819
5820static void
5821ath_set_channel(struct ieee80211com *ic)
5822{
5823 struct ifnet *ifp = ic->ic_ifp;
5824 struct ath_softc *sc = ifp->if_softc;
5825
5826 (void) ath_chan_set(sc, ic->ic_curchan);
5827 /*
5828 * If we are returning to our bss channel then mark state
5829 * so the next recv'd beacon's tsf will be used to sync the
5830 * beacon timers. Note that since we only hear beacons in
5831 * sta/ibss mode this has no effect in other operating modes.
5832 */
5833 if (!sc->sc_scanning && ic->ic_curchan == ic->ic_bsschan)
5834 sc->sc_syncbeacon = 1;
5835}
5836
5837/*
5838 * Walk the vap list and check if there any vap's in RUN state.
5839 */
5840static int
5841ath_isanyrunningvaps(struct ieee80211vap *this)
5842{
5843 struct ieee80211com *ic = this->iv_ic;
5844 struct ieee80211vap *vap;
5845
5846 IEEE80211_LOCK_ASSERT(ic);
5847
5848 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
5849 if (vap != this && vap->iv_state == IEEE80211_S_RUN)
5850 return 1;
5851 }
5852 return 0;
5853}
5854
5855static int
5856ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
5857{
5858 struct ieee80211com *ic = vap->iv_ic;
5859 struct ath_softc *sc = ic->ic_ifp->if_softc;
5860 struct ath_vap *avp = ATH_VAP(vap);
5861 struct ath_hal *ah = sc->sc_ah;
5862 struct ieee80211_node *ni = NULL;
5863 int i, error, stamode;
5864 u_int32_t rfilt;
5865 static const HAL_LED_STATE leds[] = {
5866 HAL_LED_INIT, /* IEEE80211_S_INIT */
5867 HAL_LED_SCAN, /* IEEE80211_S_SCAN */
5868 HAL_LED_AUTH, /* IEEE80211_S_AUTH */
5869 HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */
5870 HAL_LED_RUN, /* IEEE80211_S_CAC */
5871 HAL_LED_RUN, /* IEEE80211_S_RUN */
5872 HAL_LED_RUN, /* IEEE80211_S_CSA */
5873 HAL_LED_RUN, /* IEEE80211_S_SLEEP */
5874 };
5875
5876 DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__,
5877 ieee80211_state_name[vap->iv_state],
5878 ieee80211_state_name[nstate]);
5879
5880 callout_drain(&sc->sc_cal_ch);
5881 ath_hal_setledstate(ah, leds[nstate]); /* set LED */
5882
5883 if (nstate == IEEE80211_S_SCAN) {
5884 /*
5885 * Scanning: turn off beacon miss and don't beacon.
5886 * Mark beacon state so when we reach RUN state we'll
5887 * [re]setup beacons. Unblock the task q thread so
5888 * deferred interrupt processing is done.
5889 */
5890 ath_hal_intrset(ah,
5891 sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS));
5892 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
5893 sc->sc_beacons = 0;
5894 taskqueue_unblock(sc->sc_tq);
5895 }
5896
5897 ni = vap->iv_bss;
5898 rfilt = ath_calcrxfilter(sc);
5899 stamode = (vap->iv_opmode == IEEE80211_M_STA ||
5900 vap->iv_opmode == IEEE80211_M_AHDEMO ||
5901 vap->iv_opmode == IEEE80211_M_IBSS);
5902 if (stamode && nstate == IEEE80211_S_RUN) {
5903 sc->sc_curaid = ni->ni_associd;
5904 IEEE80211_ADDR_COPY(sc->sc_curbssid, ni->ni_bssid);
5905 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid);
5906 }
5907 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n",
5908 __func__, rfilt, ether_sprintf(sc->sc_curbssid), sc->sc_curaid);
5909 ath_hal_setrxfilter(ah, rfilt);
5910
5911 /* XXX is this to restore keycache on resume? */
5912 if (vap->iv_opmode != IEEE80211_M_STA &&
5913 (vap->iv_flags & IEEE80211_F_PRIVACY)) {
5914 for (i = 0; i < IEEE80211_WEP_NKID; i++)
5915 if (ath_hal_keyisvalid(ah, i))
5916 ath_hal_keysetmac(ah, i, ni->ni_bssid);
5917 }
5918
5919 /*
5920 * Invoke the parent method to do net80211 work.
5921 */
5922 error = avp->av_newstate(vap, nstate, arg);
5923 if (error != 0)
5924 goto bad;
5925
5926 if (nstate == IEEE80211_S_RUN) {
5927 /* NB: collect bss node again, it may have changed */
5928 ni = vap->iv_bss;
5929
5930 DPRINTF(sc, ATH_DEBUG_STATE,
5931 "%s(RUN): iv_flags 0x%08x bintvl %d bssid %s "
5932 "capinfo 0x%04x chan %d\n", __func__,
5933 vap->iv_flags, ni->ni_intval, ether_sprintf(ni->ni_bssid),
5934 ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan));
5935
5936 switch (vap->iv_opmode) {
5936#ifdef ATH_SUPPORT_TDMA
5937#ifdef IEEE80211_SUPPORT_TDMA
5937 case IEEE80211_M_AHDEMO:
5938 if ((vap->iv_caps & IEEE80211_C_TDMA) == 0)
5939 break;
5940 /* fall thru... */
5941#endif
5942 case IEEE80211_M_HOSTAP:
5943 case IEEE80211_M_IBSS:
5944 /*
5945 * Allocate and setup the beacon frame.
5946 *
5947 * Stop any previous beacon DMA. This may be
5948 * necessary, for example, when an ibss merge
5949 * causes reconfiguration; there will be a state
5950 * transition from RUN->RUN that means we may
5951 * be called with beacon transmission active.
5952 */
5953 ath_hal_stoptxdma(ah, sc->sc_bhalq);
5954
5955 error = ath_beacon_alloc(sc, ni);
5956 if (error != 0)
5957 goto bad;
5958 /*
5959 * If joining an adhoc network defer beacon timer
5960 * configuration to the next beacon frame so we
5961 * have a current TSF to use. Otherwise we're
5962 * starting an ibss/bss so there's no need to delay;
5963 * if this is the first vap moving to RUN state, then
5964 * beacon state needs to be [re]configured.
5965 */
5966 if (vap->iv_opmode == IEEE80211_M_IBSS &&
5967 ni->ni_tstamp.tsf != 0) {
5968 sc->sc_syncbeacon = 1;
5969 } else if (!sc->sc_beacons) {
5938 case IEEE80211_M_AHDEMO:
5939 if ((vap->iv_caps & IEEE80211_C_TDMA) == 0)
5940 break;
5941 /* fall thru... */
5942#endif
5943 case IEEE80211_M_HOSTAP:
5944 case IEEE80211_M_IBSS:
5945 /*
5946 * Allocate and setup the beacon frame.
5947 *
5948 * Stop any previous beacon DMA. This may be
5949 * necessary, for example, when an ibss merge
5950 * causes reconfiguration; there will be a state
5951 * transition from RUN->RUN that means we may
5952 * be called with beacon transmission active.
5953 */
5954 ath_hal_stoptxdma(ah, sc->sc_bhalq);
5955
5956 error = ath_beacon_alloc(sc, ni);
5957 if (error != 0)
5958 goto bad;
5959 /*
5960 * If joining an adhoc network defer beacon timer
5961 * configuration to the next beacon frame so we
5962 * have a current TSF to use. Otherwise we're
5963 * starting an ibss/bss so there's no need to delay;
5964 * if this is the first vap moving to RUN state, then
5965 * beacon state needs to be [re]configured.
5966 */
5967 if (vap->iv_opmode == IEEE80211_M_IBSS &&
5968 ni->ni_tstamp.tsf != 0) {
5969 sc->sc_syncbeacon = 1;
5970 } else if (!sc->sc_beacons) {
5970#ifdef ATH_SUPPORT_TDMA
5971#ifdef IEEE80211_SUPPORT_TDMA
5971 if (vap->iv_caps & IEEE80211_C_TDMA)
5972 ath_tdma_config(sc, vap);
5973 else
5974#endif
5975 ath_beacon_config(sc, vap);
5976 sc->sc_beacons = 1;
5977 }
5978 break;
5979 case IEEE80211_M_STA:
5980 /*
5981 * Defer beacon timer configuration to the next
5982 * beacon frame so we have a current TSF to use
5983 * (any TSF collected when scanning is likely old).
5984 */
5985 sc->sc_syncbeacon = 1;
5986 break;
5987 case IEEE80211_M_MONITOR:
5988 /*
5989 * Monitor mode vaps have only INIT->RUN and RUN->RUN
5990 * transitions so we must re-enable interrupts here to
5991 * handle the case of a single monitor mode vap.
5992 */
5993 ath_hal_intrset(ah, sc->sc_imask);
5994 break;
5995 case IEEE80211_M_WDS:
5996 break;
5997 default:
5998 break;
5999 }
6000 /*
6001 * Let the hal process statistics collected during a
6002 * scan so it can provide calibrated noise floor data.
6003 */
6004 ath_hal_process_noisefloor(ah);
6005 /*
6006 * Reset rssi stats; maybe not the best place...
6007 */
6008 sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER;
6009 sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER;
6010 sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER;
6011 /*
6012 * Finally, start any timers and the task q thread
6013 * (in case we didn't go through SCAN state).
6014 */
6015 if (ath_longcalinterval != 0) {
6016 /* start periodic recalibration timer */
6017 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc);
6018 } else {
6019 DPRINTF(sc, ATH_DEBUG_CALIBRATE,
6020 "%s: calibration disabled\n", __func__);
6021 }
6022 taskqueue_unblock(sc->sc_tq);
6023 } else if (nstate == IEEE80211_S_INIT) {
6024 /*
6025 * If there are no vaps left in RUN state then
6026 * shutdown host/driver operation:
6027 * o disable interrupts
6028 * o disable the task queue thread
6029 * o mark beacon processing as stopped
6030 */
6031 if (!ath_isanyrunningvaps(vap)) {
6032 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
6033 /* disable interrupts */
6034 ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL);
6035 taskqueue_block(sc->sc_tq);
6036 sc->sc_beacons = 0;
6037 }
5972 if (vap->iv_caps & IEEE80211_C_TDMA)
5973 ath_tdma_config(sc, vap);
5974 else
5975#endif
5976 ath_beacon_config(sc, vap);
5977 sc->sc_beacons = 1;
5978 }
5979 break;
5980 case IEEE80211_M_STA:
5981 /*
5982 * Defer beacon timer configuration to the next
5983 * beacon frame so we have a current TSF to use
5984 * (any TSF collected when scanning is likely old).
5985 */
5986 sc->sc_syncbeacon = 1;
5987 break;
5988 case IEEE80211_M_MONITOR:
5989 /*
5990 * Monitor mode vaps have only INIT->RUN and RUN->RUN
5991 * transitions so we must re-enable interrupts here to
5992 * handle the case of a single monitor mode vap.
5993 */
5994 ath_hal_intrset(ah, sc->sc_imask);
5995 break;
5996 case IEEE80211_M_WDS:
5997 break;
5998 default:
5999 break;
6000 }
6001 /*
6002 * Let the hal process statistics collected during a
6003 * scan so it can provide calibrated noise floor data.
6004 */
6005 ath_hal_process_noisefloor(ah);
6006 /*
6007 * Reset rssi stats; maybe not the best place...
6008 */
6009 sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER;
6010 sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER;
6011 sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER;
6012 /*
6013 * Finally, start any timers and the task q thread
6014 * (in case we didn't go through SCAN state).
6015 */
6016 if (ath_longcalinterval != 0) {
6017 /* start periodic recalibration timer */
6018 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc);
6019 } else {
6020 DPRINTF(sc, ATH_DEBUG_CALIBRATE,
6021 "%s: calibration disabled\n", __func__);
6022 }
6023 taskqueue_unblock(sc->sc_tq);
6024 } else if (nstate == IEEE80211_S_INIT) {
6025 /*
6026 * If there are no vaps left in RUN state then
6027 * shutdown host/driver operation:
6028 * o disable interrupts
6029 * o disable the task queue thread
6030 * o mark beacon processing as stopped
6031 */
6032 if (!ath_isanyrunningvaps(vap)) {
6033 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
6034 /* disable interrupts */
6035 ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL);
6036 taskqueue_block(sc->sc_tq);
6037 sc->sc_beacons = 0;
6038 }
6038#ifdef ATH_SUPPORT_TDMA
6039#ifdef IEEE80211_SUPPORT_TDMA
6039 ath_hal_setcca(ah, AH_TRUE);
6040#endif
6041 }
6042bad:
6043 return error;
6044}
6045
6046/*
6047 * Allocate a key cache slot to the station so we can
6048 * setup a mapping from key index to node. The key cache
6049 * slot is needed for managing antenna state and for
6050 * compression when stations do not use crypto. We do
6051 * it uniliaterally here; if crypto is employed this slot
6052 * will be reassigned.
6053 */
6054static void
6055ath_setup_stationkey(struct ieee80211_node *ni)
6056{
6057 struct ieee80211vap *vap = ni->ni_vap;
6058 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
6059 ieee80211_keyix keyix, rxkeyix;
6060
6061 if (!ath_key_alloc(vap, &ni->ni_ucastkey, &keyix, &rxkeyix)) {
6062 /*
6063 * Key cache is full; we'll fall back to doing
6064 * the more expensive lookup in software. Note
6065 * this also means no h/w compression.
6066 */
6067 /* XXX msg+statistic */
6068 } else {
6069 /* XXX locking? */
6070 ni->ni_ucastkey.wk_keyix = keyix;
6071 ni->ni_ucastkey.wk_rxkeyix = rxkeyix;
6072 /* NB: must mark device key to get called back on delete */
6073 ni->ni_ucastkey.wk_flags |= IEEE80211_KEY_DEVKEY;
6074 IEEE80211_ADDR_COPY(ni->ni_ucastkey.wk_macaddr, ni->ni_macaddr);
6075 /* NB: this will create a pass-thru key entry */
6076 ath_keyset(sc, &ni->ni_ucastkey, vap->iv_bss);
6077 }
6078}
6079
6080/*
6081 * Setup driver-specific state for a newly associated node.
6082 * Note that we're called also on a re-associate, the isnew
6083 * param tells us if this is the first time or not.
6084 */
6085static void
6086ath_newassoc(struct ieee80211_node *ni, int isnew)
6087{
6088 struct ath_node *an = ATH_NODE(ni);
6089 struct ieee80211vap *vap = ni->ni_vap;
6090 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
6091 const struct ieee80211_txparam *tp = ni->ni_txparms;
6092
6093 an->an_mcastrix = ath_tx_findrix(sc->sc_currates, tp->mcastrate);
6094 an->an_mgmtrix = ath_tx_findrix(sc->sc_currates, tp->mgmtrate);
6095
6096 ath_rate_newassoc(sc, an, isnew);
6097 if (isnew &&
6098 (vap->iv_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey &&
6099 ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE)
6100 ath_setup_stationkey(ni);
6101}
6102
6103static int
6104ath_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *reg,
6105 int nchans, struct ieee80211_channel chans[])
6106{
6107 struct ath_softc *sc = ic->ic_ifp->if_softc;
6108 struct ath_hal *ah = sc->sc_ah;
6109 HAL_STATUS status;
6110
6111 DPRINTF(sc, ATH_DEBUG_REGDOMAIN,
6112 "%s: rd %u cc %u location %c%s\n",
6113 __func__, reg->regdomain, reg->country, reg->location,
6114 reg->ecm ? " ecm" : "");
6115
6116 status = ath_hal_set_channels(ah, chans, nchans,
6117 reg->country, reg->regdomain);
6118 if (status != HAL_OK) {
6119 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: failed, status %u\n",
6120 __func__, status);
6121 return EINVAL; /* XXX */
6122 }
6123 return 0;
6124}
6125
6126static void
6127ath_getradiocaps(struct ieee80211com *ic,
6128 int maxchans, int *nchans, struct ieee80211_channel chans[])
6129{
6130 struct ath_softc *sc = ic->ic_ifp->if_softc;
6131 struct ath_hal *ah = sc->sc_ah;
6132
6133 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: use rd %u cc %d\n",
6134 __func__, SKU_DEBUG, CTRY_DEFAULT);
6135
6136 /* XXX check return */
6137 (void) ath_hal_getchannels(ah, chans, maxchans, nchans,
6138 HAL_MODE_ALL, CTRY_DEFAULT, SKU_DEBUG, AH_TRUE);
6139
6140}
6141
6142static int
6143ath_getchannels(struct ath_softc *sc)
6144{
6145 struct ifnet *ifp = sc->sc_ifp;
6146 struct ieee80211com *ic = ifp->if_l2com;
6147 struct ath_hal *ah = sc->sc_ah;
6148 HAL_STATUS status;
6149
6150 /*
6151 * Collect channel set based on EEPROM contents.
6152 */
6153 status = ath_hal_init_channels(ah, ic->ic_channels, IEEE80211_CHAN_MAX,
6154 &ic->ic_nchans, HAL_MODE_ALL, CTRY_DEFAULT, SKU_NONE, AH_TRUE);
6155 if (status != HAL_OK) {
6156 if_printf(ifp, "%s: unable to collect channel list from hal, "
6157 "status %d\n", __func__, status);
6158 return EINVAL;
6159 }
6160 (void) ath_hal_getregdomain(ah, &sc->sc_eerd);
6161 ath_hal_getcountrycode(ah, &sc->sc_eecc); /* NB: cannot fail */
6162 /* XXX map Atheros sku's to net80211 SKU's */
6163 /* XXX net80211 types too small */
6164 ic->ic_regdomain.regdomain = (uint16_t) sc->sc_eerd;
6165 ic->ic_regdomain.country = (uint16_t) sc->sc_eecc;
6166 ic->ic_regdomain.isocc[0] = ' '; /* XXX don't know */
6167 ic->ic_regdomain.isocc[1] = ' ';
6168
6169 ic->ic_regdomain.ecm = 1;
6170 ic->ic_regdomain.location = 'I';
6171
6172 DPRINTF(sc, ATH_DEBUG_REGDOMAIN,
6173 "%s: eeprom rd %u cc %u (mapped rd %u cc %u) location %c%s\n",
6174 __func__, sc->sc_eerd, sc->sc_eecc,
6175 ic->ic_regdomain.regdomain, ic->ic_regdomain.country,
6176 ic->ic_regdomain.location, ic->ic_regdomain.ecm ? " ecm" : "");
6177 return 0;
6178}
6179
6180static void
6181ath_led_done(void *arg)
6182{
6183 struct ath_softc *sc = arg;
6184
6185 sc->sc_blinking = 0;
6186}
6187
6188/*
6189 * Turn the LED off: flip the pin and then set a timer so no
6190 * update will happen for the specified duration.
6191 */
6192static void
6193ath_led_off(void *arg)
6194{
6195 struct ath_softc *sc = arg;
6196
6197 ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, !sc->sc_ledon);
6198 callout_reset(&sc->sc_ledtimer, sc->sc_ledoff, ath_led_done, sc);
6199}
6200
6201/*
6202 * Blink the LED according to the specified on/off times.
6203 */
6204static void
6205ath_led_blink(struct ath_softc *sc, int on, int off)
6206{
6207 DPRINTF(sc, ATH_DEBUG_LED, "%s: on %u off %u\n", __func__, on, off);
6208 ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, sc->sc_ledon);
6209 sc->sc_blinking = 1;
6210 sc->sc_ledoff = off;
6211 callout_reset(&sc->sc_ledtimer, on, ath_led_off, sc);
6212}
6213
6214static void
6215ath_led_event(struct ath_softc *sc, int rix)
6216{
6217 sc->sc_ledevent = ticks; /* time of last event */
6218 if (sc->sc_blinking) /* don't interrupt active blink */
6219 return;
6220 ath_led_blink(sc, sc->sc_hwmap[rix].ledon, sc->sc_hwmap[rix].ledoff);
6221}
6222
6223static int
6224ath_rate_setup(struct ath_softc *sc, u_int mode)
6225{
6226 struct ath_hal *ah = sc->sc_ah;
6227 const HAL_RATE_TABLE *rt;
6228
6229 switch (mode) {
6230 case IEEE80211_MODE_11A:
6231 rt = ath_hal_getratetable(ah, HAL_MODE_11A);
6232 break;
6233 case IEEE80211_MODE_HALF:
6234 rt = ath_hal_getratetable(ah, HAL_MODE_11A_HALF_RATE);
6235 break;
6236 case IEEE80211_MODE_QUARTER:
6237 rt = ath_hal_getratetable(ah, HAL_MODE_11A_QUARTER_RATE);
6238 break;
6239 case IEEE80211_MODE_11B:
6240 rt = ath_hal_getratetable(ah, HAL_MODE_11B);
6241 break;
6242 case IEEE80211_MODE_11G:
6243 rt = ath_hal_getratetable(ah, HAL_MODE_11G);
6244 break;
6245 case IEEE80211_MODE_TURBO_A:
6246 rt = ath_hal_getratetable(ah, HAL_MODE_108A);
6247 break;
6248 case IEEE80211_MODE_TURBO_G:
6249 rt = ath_hal_getratetable(ah, HAL_MODE_108G);
6250 break;
6251 case IEEE80211_MODE_STURBO_A:
6252 rt = ath_hal_getratetable(ah, HAL_MODE_TURBO);
6253 break;
6254 case IEEE80211_MODE_11NA:
6255 rt = ath_hal_getratetable(ah, HAL_MODE_11NA_HT20);
6256 break;
6257 case IEEE80211_MODE_11NG:
6258 rt = ath_hal_getratetable(ah, HAL_MODE_11NG_HT20);
6259 break;
6260 default:
6261 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n",
6262 __func__, mode);
6263 return 0;
6264 }
6265 sc->sc_rates[mode] = rt;
6266 return (rt != NULL);
6267}
6268
6269static void
6270ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode)
6271{
6272#define N(a) (sizeof(a)/sizeof(a[0]))
6273 /* NB: on/off times from the Atheros NDIS driver, w/ permission */
6274 static const struct {
6275 u_int rate; /* tx/rx 802.11 rate */
6276 u_int16_t timeOn; /* LED on time (ms) */
6277 u_int16_t timeOff; /* LED off time (ms) */
6278 } blinkrates[] = {
6279 { 108, 40, 10 },
6280 { 96, 44, 11 },
6281 { 72, 50, 13 },
6282 { 48, 57, 14 },
6283 { 36, 67, 16 },
6284 { 24, 80, 20 },
6285 { 22, 100, 25 },
6286 { 18, 133, 34 },
6287 { 12, 160, 40 },
6288 { 10, 200, 50 },
6289 { 6, 240, 58 },
6290 { 4, 267, 66 },
6291 { 2, 400, 100 },
6292 { 0, 500, 130 },
6293 /* XXX half/quarter rates */
6294 };
6295 const HAL_RATE_TABLE *rt;
6296 int i, j;
6297
6298 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap));
6299 rt = sc->sc_rates[mode];
6300 KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode));
6301 for (i = 0; i < rt->rateCount; i++) {
6302 uint8_t ieeerate = rt->info[i].dot11Rate & IEEE80211_RATE_VAL;
6303 if (rt->info[i].phy != IEEE80211_T_HT)
6304 sc->sc_rixmap[ieeerate] = i;
6305 else
6306 sc->sc_rixmap[ieeerate | IEEE80211_RATE_MCS] = i;
6307 }
6308 memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap));
6309 for (i = 0; i < N(sc->sc_hwmap); i++) {
6310 if (i >= rt->rateCount) {
6311 sc->sc_hwmap[i].ledon = (500 * hz) / 1000;
6312 sc->sc_hwmap[i].ledoff = (130 * hz) / 1000;
6313 continue;
6314 }
6315 sc->sc_hwmap[i].ieeerate =
6316 rt->info[i].dot11Rate & IEEE80211_RATE_VAL;
6317 if (rt->info[i].phy == IEEE80211_T_HT)
6318 sc->sc_hwmap[i].ieeerate |= IEEE80211_RATE_MCS;
6319 sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD;
6320 if (rt->info[i].shortPreamble ||
6321 rt->info[i].phy == IEEE80211_T_OFDM)
6322 sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE;
6323 /* NB: receive frames include FCS */
6324 sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags |
6325 IEEE80211_RADIOTAP_F_FCS;
6326 /* setup blink rate table to avoid per-packet lookup */
6327 for (j = 0; j < N(blinkrates)-1; j++)
6328 if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate)
6329 break;
6330 /* NB: this uses the last entry if the rate isn't found */
6331 /* XXX beware of overlow */
6332 sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000;
6333 sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000;
6334 }
6335 sc->sc_currates = rt;
6336 sc->sc_curmode = mode;
6337 /*
6338 * All protection frames are transmited at 2Mb/s for
6339 * 11g, otherwise at 1Mb/s.
6340 */
6341 if (mode == IEEE80211_MODE_11G)
6342 sc->sc_protrix = ath_tx_findrix(rt, 2*2);
6343 else
6344 sc->sc_protrix = ath_tx_findrix(rt, 2*1);
6345 /* NB: caller is responsible for reseting rate control state */
6346#undef N
6347}
6348
6349#ifdef ATH_DEBUG
6350static void
6351ath_printrxbuf(struct ath_softc *sc, const struct ath_buf *bf,
6352 u_int ix, int done)
6353{
6354 const struct ath_rx_status *rs = &bf->bf_status.ds_rxstat;
6355 struct ath_hal *ah = sc->sc_ah;
6356 const struct ath_desc *ds;
6357 int i;
6358
6359 for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) {
6360 printf("R[%2u] (DS.V:%p DS.P:%p) L:%08x D:%08x%s\n"
6361 " %08x %08x %08x %08x\n",
6362 ix, ds, (const struct ath_desc *)bf->bf_daddr + i,
6363 ds->ds_link, ds->ds_data,
6364 !done ? "" : (rs->rs_status == 0) ? " *" : " !",
6365 ds->ds_ctl0, ds->ds_ctl1,
6366 ds->ds_hw[0], ds->ds_hw[1]);
6367 if (ah->ah_magic == 0x20065416) {
6368 printf(" %08x %08x %08x %08x %08x %08x %08x\n",
6369 ds->ds_hw[2], ds->ds_hw[3], ds->ds_hw[4],
6370 ds->ds_hw[5], ds->ds_hw[6], ds->ds_hw[7],
6371 ds->ds_hw[8]);
6372 }
6373 }
6374}
6375
6376static void
6377ath_printtxbuf(struct ath_softc *sc, const struct ath_buf *bf,
6378 u_int qnum, u_int ix, int done)
6379{
6380 const struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
6381 struct ath_hal *ah = sc->sc_ah;
6382 const struct ath_desc *ds;
6383 int i;
6384
6385 printf("Q%u[%3u]", qnum, ix);
6386 for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) {
6387 printf(" (DS.V:%p DS.P:%p) L:%08x D:%08x F:04%x%s\n"
6388 " %08x %08x %08x %08x %08x %08x\n",
6389 ds, (const struct ath_desc *)bf->bf_daddr + i,
6390 ds->ds_link, ds->ds_data, bf->bf_txflags,
6391 !done ? "" : (ts->ts_status == 0) ? " *" : " !",
6392 ds->ds_ctl0, ds->ds_ctl1,
6393 ds->ds_hw[0], ds->ds_hw[1], ds->ds_hw[2], ds->ds_hw[3]);
6394 if (ah->ah_magic == 0x20065416) {
6395 printf(" %08x %08x %08x %08x %08x %08x %08x %08x\n",
6396 ds->ds_hw[4], ds->ds_hw[5], ds->ds_hw[6],
6397 ds->ds_hw[7], ds->ds_hw[8], ds->ds_hw[9],
6398 ds->ds_hw[10],ds->ds_hw[11]);
6399 printf(" %08x %08x %08x %08x %08x %08x %08x %08x\n",
6400 ds->ds_hw[12],ds->ds_hw[13],ds->ds_hw[14],
6401 ds->ds_hw[15],ds->ds_hw[16],ds->ds_hw[17],
6402 ds->ds_hw[18], ds->ds_hw[19]);
6403 }
6404 }
6405}
6406#endif /* ATH_DEBUG */
6407
6408static void
6409ath_watchdog(void *arg)
6410{
6411 struct ath_softc *sc = arg;
6412
6413 if (sc->sc_wd_timer != 0 && --sc->sc_wd_timer == 0) {
6414 struct ifnet *ifp = sc->sc_ifp;
6415 uint32_t hangs;
6416
6417 if (ath_hal_gethangstate(sc->sc_ah, 0xffff, &hangs) &&
6418 hangs != 0) {
6419 if_printf(ifp, "%s hang detected (0x%x)\n",
6420 hangs & 0xff ? "bb" : "mac", hangs);
6421 } else
6422 if_printf(ifp, "device timeout\n");
6423 ath_reset(ifp);
6424 ifp->if_oerrors++;
6425 sc->sc_stats.ast_watchdog++;
6426 }
6427 callout_schedule(&sc->sc_wd_ch, hz);
6428}
6429
6430#ifdef ATH_DIAGAPI
6431/*
6432 * Diagnostic interface to the HAL. This is used by various
6433 * tools to do things like retrieve register contents for
6434 * debugging. The mechanism is intentionally opaque so that
6435 * it can change frequently w/o concern for compatiblity.
6436 */
6437static int
6438ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad)
6439{
6440 struct ath_hal *ah = sc->sc_ah;
6441 u_int id = ad->ad_id & ATH_DIAG_ID;
6442 void *indata = NULL;
6443 void *outdata = NULL;
6444 u_int32_t insize = ad->ad_in_size;
6445 u_int32_t outsize = ad->ad_out_size;
6446 int error = 0;
6447
6448 if (ad->ad_id & ATH_DIAG_IN) {
6449 /*
6450 * Copy in data.
6451 */
6452 indata = malloc(insize, M_TEMP, M_NOWAIT);
6453 if (indata == NULL) {
6454 error = ENOMEM;
6455 goto bad;
6456 }
6457 error = copyin(ad->ad_in_data, indata, insize);
6458 if (error)
6459 goto bad;
6460 }
6461 if (ad->ad_id & ATH_DIAG_DYN) {
6462 /*
6463 * Allocate a buffer for the results (otherwise the HAL
6464 * returns a pointer to a buffer where we can read the
6465 * results). Note that we depend on the HAL leaving this
6466 * pointer for us to use below in reclaiming the buffer;
6467 * may want to be more defensive.
6468 */
6469 outdata = malloc(outsize, M_TEMP, M_NOWAIT);
6470 if (outdata == NULL) {
6471 error = ENOMEM;
6472 goto bad;
6473 }
6474 }
6475 if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) {
6476 if (outsize < ad->ad_out_size)
6477 ad->ad_out_size = outsize;
6478 if (outdata != NULL)
6479 error = copyout(outdata, ad->ad_out_data,
6480 ad->ad_out_size);
6481 } else {
6482 error = EINVAL;
6483 }
6484bad:
6485 if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL)
6486 free(indata, M_TEMP);
6487 if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL)
6488 free(outdata, M_TEMP);
6489 return error;
6490}
6491#endif /* ATH_DIAGAPI */
6492
6493static int
6494ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
6495{
6496#define IS_RUNNING(ifp) \
6497 ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))
6498 struct ath_softc *sc = ifp->if_softc;
6499 struct ieee80211com *ic = ifp->if_l2com;
6500 struct ifreq *ifr = (struct ifreq *)data;
6501 const HAL_RATE_TABLE *rt;
6502 int error = 0;
6503
6504 switch (cmd) {
6505 case SIOCSIFFLAGS:
6506 ATH_LOCK(sc);
6507 if (IS_RUNNING(ifp)) {
6508 /*
6509 * To avoid rescanning another access point,
6510 * do not call ath_init() here. Instead,
6511 * only reflect promisc mode settings.
6512 */
6513 ath_mode_init(sc);
6514 } else if (ifp->if_flags & IFF_UP) {
6515 /*
6516 * Beware of being called during attach/detach
6517 * to reset promiscuous mode. In that case we
6518 * will still be marked UP but not RUNNING.
6519 * However trying to re-init the interface
6520 * is the wrong thing to do as we've already
6521 * torn down much of our state. There's
6522 * probably a better way to deal with this.
6523 */
6524 if (!sc->sc_invalid)
6525 ath_init(sc); /* XXX lose error */
6526 } else {
6527 ath_stop_locked(ifp);
6528#ifdef notyet
6529 /* XXX must wakeup in places like ath_vap_delete */
6530 if (!sc->sc_invalid)
6531 ath_hal_setpower(sc->sc_ah, HAL_PM_FULL_SLEEP);
6532#endif
6533 }
6534 ATH_UNLOCK(sc);
6535 break;
6536 case SIOCGIFMEDIA:
6537 case SIOCSIFMEDIA:
6538 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
6539 break;
6540 case SIOCGATHSTATS:
6541 /* NB: embed these numbers to get a consistent view */
6542 sc->sc_stats.ast_tx_packets = ifp->if_opackets;
6543 sc->sc_stats.ast_rx_packets = ifp->if_ipackets;
6544 sc->sc_stats.ast_tx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgtxrssi);
6545 sc->sc_stats.ast_rx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgrssi);
6040 ath_hal_setcca(ah, AH_TRUE);
6041#endif
6042 }
6043bad:
6044 return error;
6045}
6046
6047/*
6048 * Allocate a key cache slot to the station so we can
6049 * setup a mapping from key index to node. The key cache
6050 * slot is needed for managing antenna state and for
6051 * compression when stations do not use crypto. We do
6052 * it uniliaterally here; if crypto is employed this slot
6053 * will be reassigned.
6054 */
6055static void
6056ath_setup_stationkey(struct ieee80211_node *ni)
6057{
6058 struct ieee80211vap *vap = ni->ni_vap;
6059 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
6060 ieee80211_keyix keyix, rxkeyix;
6061
6062 if (!ath_key_alloc(vap, &ni->ni_ucastkey, &keyix, &rxkeyix)) {
6063 /*
6064 * Key cache is full; we'll fall back to doing
6065 * the more expensive lookup in software. Note
6066 * this also means no h/w compression.
6067 */
6068 /* XXX msg+statistic */
6069 } else {
6070 /* XXX locking? */
6071 ni->ni_ucastkey.wk_keyix = keyix;
6072 ni->ni_ucastkey.wk_rxkeyix = rxkeyix;
6073 /* NB: must mark device key to get called back on delete */
6074 ni->ni_ucastkey.wk_flags |= IEEE80211_KEY_DEVKEY;
6075 IEEE80211_ADDR_COPY(ni->ni_ucastkey.wk_macaddr, ni->ni_macaddr);
6076 /* NB: this will create a pass-thru key entry */
6077 ath_keyset(sc, &ni->ni_ucastkey, vap->iv_bss);
6078 }
6079}
6080
6081/*
6082 * Setup driver-specific state for a newly associated node.
6083 * Note that we're called also on a re-associate, the isnew
6084 * param tells us if this is the first time or not.
6085 */
6086static void
6087ath_newassoc(struct ieee80211_node *ni, int isnew)
6088{
6089 struct ath_node *an = ATH_NODE(ni);
6090 struct ieee80211vap *vap = ni->ni_vap;
6091 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
6092 const struct ieee80211_txparam *tp = ni->ni_txparms;
6093
6094 an->an_mcastrix = ath_tx_findrix(sc->sc_currates, tp->mcastrate);
6095 an->an_mgmtrix = ath_tx_findrix(sc->sc_currates, tp->mgmtrate);
6096
6097 ath_rate_newassoc(sc, an, isnew);
6098 if (isnew &&
6099 (vap->iv_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey &&
6100 ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE)
6101 ath_setup_stationkey(ni);
6102}
6103
6104static int
6105ath_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *reg,
6106 int nchans, struct ieee80211_channel chans[])
6107{
6108 struct ath_softc *sc = ic->ic_ifp->if_softc;
6109 struct ath_hal *ah = sc->sc_ah;
6110 HAL_STATUS status;
6111
6112 DPRINTF(sc, ATH_DEBUG_REGDOMAIN,
6113 "%s: rd %u cc %u location %c%s\n",
6114 __func__, reg->regdomain, reg->country, reg->location,
6115 reg->ecm ? " ecm" : "");
6116
6117 status = ath_hal_set_channels(ah, chans, nchans,
6118 reg->country, reg->regdomain);
6119 if (status != HAL_OK) {
6120 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: failed, status %u\n",
6121 __func__, status);
6122 return EINVAL; /* XXX */
6123 }
6124 return 0;
6125}
6126
6127static void
6128ath_getradiocaps(struct ieee80211com *ic,
6129 int maxchans, int *nchans, struct ieee80211_channel chans[])
6130{
6131 struct ath_softc *sc = ic->ic_ifp->if_softc;
6132 struct ath_hal *ah = sc->sc_ah;
6133
6134 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: use rd %u cc %d\n",
6135 __func__, SKU_DEBUG, CTRY_DEFAULT);
6136
6137 /* XXX check return */
6138 (void) ath_hal_getchannels(ah, chans, maxchans, nchans,
6139 HAL_MODE_ALL, CTRY_DEFAULT, SKU_DEBUG, AH_TRUE);
6140
6141}
6142
6143static int
6144ath_getchannels(struct ath_softc *sc)
6145{
6146 struct ifnet *ifp = sc->sc_ifp;
6147 struct ieee80211com *ic = ifp->if_l2com;
6148 struct ath_hal *ah = sc->sc_ah;
6149 HAL_STATUS status;
6150
6151 /*
6152 * Collect channel set based on EEPROM contents.
6153 */
6154 status = ath_hal_init_channels(ah, ic->ic_channels, IEEE80211_CHAN_MAX,
6155 &ic->ic_nchans, HAL_MODE_ALL, CTRY_DEFAULT, SKU_NONE, AH_TRUE);
6156 if (status != HAL_OK) {
6157 if_printf(ifp, "%s: unable to collect channel list from hal, "
6158 "status %d\n", __func__, status);
6159 return EINVAL;
6160 }
6161 (void) ath_hal_getregdomain(ah, &sc->sc_eerd);
6162 ath_hal_getcountrycode(ah, &sc->sc_eecc); /* NB: cannot fail */
6163 /* XXX map Atheros sku's to net80211 SKU's */
6164 /* XXX net80211 types too small */
6165 ic->ic_regdomain.regdomain = (uint16_t) sc->sc_eerd;
6166 ic->ic_regdomain.country = (uint16_t) sc->sc_eecc;
6167 ic->ic_regdomain.isocc[0] = ' '; /* XXX don't know */
6168 ic->ic_regdomain.isocc[1] = ' ';
6169
6170 ic->ic_regdomain.ecm = 1;
6171 ic->ic_regdomain.location = 'I';
6172
6173 DPRINTF(sc, ATH_DEBUG_REGDOMAIN,
6174 "%s: eeprom rd %u cc %u (mapped rd %u cc %u) location %c%s\n",
6175 __func__, sc->sc_eerd, sc->sc_eecc,
6176 ic->ic_regdomain.regdomain, ic->ic_regdomain.country,
6177 ic->ic_regdomain.location, ic->ic_regdomain.ecm ? " ecm" : "");
6178 return 0;
6179}
6180
6181static void
6182ath_led_done(void *arg)
6183{
6184 struct ath_softc *sc = arg;
6185
6186 sc->sc_blinking = 0;
6187}
6188
6189/*
6190 * Turn the LED off: flip the pin and then set a timer so no
6191 * update will happen for the specified duration.
6192 */
6193static void
6194ath_led_off(void *arg)
6195{
6196 struct ath_softc *sc = arg;
6197
6198 ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, !sc->sc_ledon);
6199 callout_reset(&sc->sc_ledtimer, sc->sc_ledoff, ath_led_done, sc);
6200}
6201
6202/*
6203 * Blink the LED according to the specified on/off times.
6204 */
6205static void
6206ath_led_blink(struct ath_softc *sc, int on, int off)
6207{
6208 DPRINTF(sc, ATH_DEBUG_LED, "%s: on %u off %u\n", __func__, on, off);
6209 ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, sc->sc_ledon);
6210 sc->sc_blinking = 1;
6211 sc->sc_ledoff = off;
6212 callout_reset(&sc->sc_ledtimer, on, ath_led_off, sc);
6213}
6214
6215static void
6216ath_led_event(struct ath_softc *sc, int rix)
6217{
6218 sc->sc_ledevent = ticks; /* time of last event */
6219 if (sc->sc_blinking) /* don't interrupt active blink */
6220 return;
6221 ath_led_blink(sc, sc->sc_hwmap[rix].ledon, sc->sc_hwmap[rix].ledoff);
6222}
6223
6224static int
6225ath_rate_setup(struct ath_softc *sc, u_int mode)
6226{
6227 struct ath_hal *ah = sc->sc_ah;
6228 const HAL_RATE_TABLE *rt;
6229
6230 switch (mode) {
6231 case IEEE80211_MODE_11A:
6232 rt = ath_hal_getratetable(ah, HAL_MODE_11A);
6233 break;
6234 case IEEE80211_MODE_HALF:
6235 rt = ath_hal_getratetable(ah, HAL_MODE_11A_HALF_RATE);
6236 break;
6237 case IEEE80211_MODE_QUARTER:
6238 rt = ath_hal_getratetable(ah, HAL_MODE_11A_QUARTER_RATE);
6239 break;
6240 case IEEE80211_MODE_11B:
6241 rt = ath_hal_getratetable(ah, HAL_MODE_11B);
6242 break;
6243 case IEEE80211_MODE_11G:
6244 rt = ath_hal_getratetable(ah, HAL_MODE_11G);
6245 break;
6246 case IEEE80211_MODE_TURBO_A:
6247 rt = ath_hal_getratetable(ah, HAL_MODE_108A);
6248 break;
6249 case IEEE80211_MODE_TURBO_G:
6250 rt = ath_hal_getratetable(ah, HAL_MODE_108G);
6251 break;
6252 case IEEE80211_MODE_STURBO_A:
6253 rt = ath_hal_getratetable(ah, HAL_MODE_TURBO);
6254 break;
6255 case IEEE80211_MODE_11NA:
6256 rt = ath_hal_getratetable(ah, HAL_MODE_11NA_HT20);
6257 break;
6258 case IEEE80211_MODE_11NG:
6259 rt = ath_hal_getratetable(ah, HAL_MODE_11NG_HT20);
6260 break;
6261 default:
6262 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n",
6263 __func__, mode);
6264 return 0;
6265 }
6266 sc->sc_rates[mode] = rt;
6267 return (rt != NULL);
6268}
6269
6270static void
6271ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode)
6272{
6273#define N(a) (sizeof(a)/sizeof(a[0]))
6274 /* NB: on/off times from the Atheros NDIS driver, w/ permission */
6275 static const struct {
6276 u_int rate; /* tx/rx 802.11 rate */
6277 u_int16_t timeOn; /* LED on time (ms) */
6278 u_int16_t timeOff; /* LED off time (ms) */
6279 } blinkrates[] = {
6280 { 108, 40, 10 },
6281 { 96, 44, 11 },
6282 { 72, 50, 13 },
6283 { 48, 57, 14 },
6284 { 36, 67, 16 },
6285 { 24, 80, 20 },
6286 { 22, 100, 25 },
6287 { 18, 133, 34 },
6288 { 12, 160, 40 },
6289 { 10, 200, 50 },
6290 { 6, 240, 58 },
6291 { 4, 267, 66 },
6292 { 2, 400, 100 },
6293 { 0, 500, 130 },
6294 /* XXX half/quarter rates */
6295 };
6296 const HAL_RATE_TABLE *rt;
6297 int i, j;
6298
6299 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap));
6300 rt = sc->sc_rates[mode];
6301 KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode));
6302 for (i = 0; i < rt->rateCount; i++) {
6303 uint8_t ieeerate = rt->info[i].dot11Rate & IEEE80211_RATE_VAL;
6304 if (rt->info[i].phy != IEEE80211_T_HT)
6305 sc->sc_rixmap[ieeerate] = i;
6306 else
6307 sc->sc_rixmap[ieeerate | IEEE80211_RATE_MCS] = i;
6308 }
6309 memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap));
6310 for (i = 0; i < N(sc->sc_hwmap); i++) {
6311 if (i >= rt->rateCount) {
6312 sc->sc_hwmap[i].ledon = (500 * hz) / 1000;
6313 sc->sc_hwmap[i].ledoff = (130 * hz) / 1000;
6314 continue;
6315 }
6316 sc->sc_hwmap[i].ieeerate =
6317 rt->info[i].dot11Rate & IEEE80211_RATE_VAL;
6318 if (rt->info[i].phy == IEEE80211_T_HT)
6319 sc->sc_hwmap[i].ieeerate |= IEEE80211_RATE_MCS;
6320 sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD;
6321 if (rt->info[i].shortPreamble ||
6322 rt->info[i].phy == IEEE80211_T_OFDM)
6323 sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE;
6324 /* NB: receive frames include FCS */
6325 sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags |
6326 IEEE80211_RADIOTAP_F_FCS;
6327 /* setup blink rate table to avoid per-packet lookup */
6328 for (j = 0; j < N(blinkrates)-1; j++)
6329 if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate)
6330 break;
6331 /* NB: this uses the last entry if the rate isn't found */
6332 /* XXX beware of overlow */
6333 sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000;
6334 sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000;
6335 }
6336 sc->sc_currates = rt;
6337 sc->sc_curmode = mode;
6338 /*
6339 * All protection frames are transmited at 2Mb/s for
6340 * 11g, otherwise at 1Mb/s.
6341 */
6342 if (mode == IEEE80211_MODE_11G)
6343 sc->sc_protrix = ath_tx_findrix(rt, 2*2);
6344 else
6345 sc->sc_protrix = ath_tx_findrix(rt, 2*1);
6346 /* NB: caller is responsible for reseting rate control state */
6347#undef N
6348}
6349
6350#ifdef ATH_DEBUG
6351static void
6352ath_printrxbuf(struct ath_softc *sc, const struct ath_buf *bf,
6353 u_int ix, int done)
6354{
6355 const struct ath_rx_status *rs = &bf->bf_status.ds_rxstat;
6356 struct ath_hal *ah = sc->sc_ah;
6357 const struct ath_desc *ds;
6358 int i;
6359
6360 for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) {
6361 printf("R[%2u] (DS.V:%p DS.P:%p) L:%08x D:%08x%s\n"
6362 " %08x %08x %08x %08x\n",
6363 ix, ds, (const struct ath_desc *)bf->bf_daddr + i,
6364 ds->ds_link, ds->ds_data,
6365 !done ? "" : (rs->rs_status == 0) ? " *" : " !",
6366 ds->ds_ctl0, ds->ds_ctl1,
6367 ds->ds_hw[0], ds->ds_hw[1]);
6368 if (ah->ah_magic == 0x20065416) {
6369 printf(" %08x %08x %08x %08x %08x %08x %08x\n",
6370 ds->ds_hw[2], ds->ds_hw[3], ds->ds_hw[4],
6371 ds->ds_hw[5], ds->ds_hw[6], ds->ds_hw[7],
6372 ds->ds_hw[8]);
6373 }
6374 }
6375}
6376
6377static void
6378ath_printtxbuf(struct ath_softc *sc, const struct ath_buf *bf,
6379 u_int qnum, u_int ix, int done)
6380{
6381 const struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
6382 struct ath_hal *ah = sc->sc_ah;
6383 const struct ath_desc *ds;
6384 int i;
6385
6386 printf("Q%u[%3u]", qnum, ix);
6387 for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) {
6388 printf(" (DS.V:%p DS.P:%p) L:%08x D:%08x F:04%x%s\n"
6389 " %08x %08x %08x %08x %08x %08x\n",
6390 ds, (const struct ath_desc *)bf->bf_daddr + i,
6391 ds->ds_link, ds->ds_data, bf->bf_txflags,
6392 !done ? "" : (ts->ts_status == 0) ? " *" : " !",
6393 ds->ds_ctl0, ds->ds_ctl1,
6394 ds->ds_hw[0], ds->ds_hw[1], ds->ds_hw[2], ds->ds_hw[3]);
6395 if (ah->ah_magic == 0x20065416) {
6396 printf(" %08x %08x %08x %08x %08x %08x %08x %08x\n",
6397 ds->ds_hw[4], ds->ds_hw[5], ds->ds_hw[6],
6398 ds->ds_hw[7], ds->ds_hw[8], ds->ds_hw[9],
6399 ds->ds_hw[10],ds->ds_hw[11]);
6400 printf(" %08x %08x %08x %08x %08x %08x %08x %08x\n",
6401 ds->ds_hw[12],ds->ds_hw[13],ds->ds_hw[14],
6402 ds->ds_hw[15],ds->ds_hw[16],ds->ds_hw[17],
6403 ds->ds_hw[18], ds->ds_hw[19]);
6404 }
6405 }
6406}
6407#endif /* ATH_DEBUG */
6408
6409static void
6410ath_watchdog(void *arg)
6411{
6412 struct ath_softc *sc = arg;
6413
6414 if (sc->sc_wd_timer != 0 && --sc->sc_wd_timer == 0) {
6415 struct ifnet *ifp = sc->sc_ifp;
6416 uint32_t hangs;
6417
6418 if (ath_hal_gethangstate(sc->sc_ah, 0xffff, &hangs) &&
6419 hangs != 0) {
6420 if_printf(ifp, "%s hang detected (0x%x)\n",
6421 hangs & 0xff ? "bb" : "mac", hangs);
6422 } else
6423 if_printf(ifp, "device timeout\n");
6424 ath_reset(ifp);
6425 ifp->if_oerrors++;
6426 sc->sc_stats.ast_watchdog++;
6427 }
6428 callout_schedule(&sc->sc_wd_ch, hz);
6429}
6430
6431#ifdef ATH_DIAGAPI
6432/*
6433 * Diagnostic interface to the HAL. This is used by various
6434 * tools to do things like retrieve register contents for
6435 * debugging. The mechanism is intentionally opaque so that
6436 * it can change frequently w/o concern for compatiblity.
6437 */
6438static int
6439ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad)
6440{
6441 struct ath_hal *ah = sc->sc_ah;
6442 u_int id = ad->ad_id & ATH_DIAG_ID;
6443 void *indata = NULL;
6444 void *outdata = NULL;
6445 u_int32_t insize = ad->ad_in_size;
6446 u_int32_t outsize = ad->ad_out_size;
6447 int error = 0;
6448
6449 if (ad->ad_id & ATH_DIAG_IN) {
6450 /*
6451 * Copy in data.
6452 */
6453 indata = malloc(insize, M_TEMP, M_NOWAIT);
6454 if (indata == NULL) {
6455 error = ENOMEM;
6456 goto bad;
6457 }
6458 error = copyin(ad->ad_in_data, indata, insize);
6459 if (error)
6460 goto bad;
6461 }
6462 if (ad->ad_id & ATH_DIAG_DYN) {
6463 /*
6464 * Allocate a buffer for the results (otherwise the HAL
6465 * returns a pointer to a buffer where we can read the
6466 * results). Note that we depend on the HAL leaving this
6467 * pointer for us to use below in reclaiming the buffer;
6468 * may want to be more defensive.
6469 */
6470 outdata = malloc(outsize, M_TEMP, M_NOWAIT);
6471 if (outdata == NULL) {
6472 error = ENOMEM;
6473 goto bad;
6474 }
6475 }
6476 if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) {
6477 if (outsize < ad->ad_out_size)
6478 ad->ad_out_size = outsize;
6479 if (outdata != NULL)
6480 error = copyout(outdata, ad->ad_out_data,
6481 ad->ad_out_size);
6482 } else {
6483 error = EINVAL;
6484 }
6485bad:
6486 if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL)
6487 free(indata, M_TEMP);
6488 if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL)
6489 free(outdata, M_TEMP);
6490 return error;
6491}
6492#endif /* ATH_DIAGAPI */
6493
6494static int
6495ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
6496{
6497#define IS_RUNNING(ifp) \
6498 ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))
6499 struct ath_softc *sc = ifp->if_softc;
6500 struct ieee80211com *ic = ifp->if_l2com;
6501 struct ifreq *ifr = (struct ifreq *)data;
6502 const HAL_RATE_TABLE *rt;
6503 int error = 0;
6504
6505 switch (cmd) {
6506 case SIOCSIFFLAGS:
6507 ATH_LOCK(sc);
6508 if (IS_RUNNING(ifp)) {
6509 /*
6510 * To avoid rescanning another access point,
6511 * do not call ath_init() here. Instead,
6512 * only reflect promisc mode settings.
6513 */
6514 ath_mode_init(sc);
6515 } else if (ifp->if_flags & IFF_UP) {
6516 /*
6517 * Beware of being called during attach/detach
6518 * to reset promiscuous mode. In that case we
6519 * will still be marked UP but not RUNNING.
6520 * However trying to re-init the interface
6521 * is the wrong thing to do as we've already
6522 * torn down much of our state. There's
6523 * probably a better way to deal with this.
6524 */
6525 if (!sc->sc_invalid)
6526 ath_init(sc); /* XXX lose error */
6527 } else {
6528 ath_stop_locked(ifp);
6529#ifdef notyet
6530 /* XXX must wakeup in places like ath_vap_delete */
6531 if (!sc->sc_invalid)
6532 ath_hal_setpower(sc->sc_ah, HAL_PM_FULL_SLEEP);
6533#endif
6534 }
6535 ATH_UNLOCK(sc);
6536 break;
6537 case SIOCGIFMEDIA:
6538 case SIOCSIFMEDIA:
6539 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
6540 break;
6541 case SIOCGATHSTATS:
6542 /* NB: embed these numbers to get a consistent view */
6543 sc->sc_stats.ast_tx_packets = ifp->if_opackets;
6544 sc->sc_stats.ast_rx_packets = ifp->if_ipackets;
6545 sc->sc_stats.ast_tx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgtxrssi);
6546 sc->sc_stats.ast_rx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgrssi);
6546#ifdef ATH_SUPPORT_TDMA
6547#ifdef IEEE80211_SUPPORT_TDMA
6547 sc->sc_stats.ast_tdma_tsfadjp = TDMA_AVG(sc->sc_avgtsfdeltap);
6548 sc->sc_stats.ast_tdma_tsfadjm = TDMA_AVG(sc->sc_avgtsfdeltam);
6549#endif
6550 rt = sc->sc_currates;
6551 /* XXX HT rates */
6552 sc->sc_stats.ast_tx_rate =
6553 rt->info[sc->sc_txrix].dot11Rate &~ IEEE80211_RATE_BASIC;
6554 return copyout(&sc->sc_stats,
6555 ifr->ifr_data, sizeof (sc->sc_stats));
6556 case SIOCZATHSTATS:
6557 error = priv_check(curthread, PRIV_DRIVER);
6558 if (error == 0)
6559 memset(&sc->sc_stats, 0, sizeof(sc->sc_stats));
6560 break;
6561#ifdef ATH_DIAGAPI
6562 case SIOCGATHDIAG:
6563 error = ath_ioctl_diag(sc, (struct ath_diag *) ifr);
6564 break;
6565#endif
6566 case SIOCGIFADDR:
6567 error = ether_ioctl(ifp, cmd, data);
6568 break;
6569 default:
6570 error = EINVAL;
6571 break;
6572 }
6573 return error;
6574#undef IS_RUNNING
6575}
6576
6577static int
6578ath_sysctl_slottime(SYSCTL_HANDLER_ARGS)
6579{
6580 struct ath_softc *sc = arg1;
6581 u_int slottime = ath_hal_getslottime(sc->sc_ah);
6582 int error;
6583
6584 error = sysctl_handle_int(oidp, &slottime, 0, req);
6585 if (error || !req->newptr)
6586 return error;
6587 return !ath_hal_setslottime(sc->sc_ah, slottime) ? EINVAL : 0;
6588}
6589
6590static int
6591ath_sysctl_acktimeout(SYSCTL_HANDLER_ARGS)
6592{
6593 struct ath_softc *sc = arg1;
6594 u_int acktimeout = ath_hal_getacktimeout(sc->sc_ah);
6595 int error;
6596
6597 error = sysctl_handle_int(oidp, &acktimeout, 0, req);
6598 if (error || !req->newptr)
6599 return error;
6600 return !ath_hal_setacktimeout(sc->sc_ah, acktimeout) ? EINVAL : 0;
6601}
6602
6603static int
6604ath_sysctl_ctstimeout(SYSCTL_HANDLER_ARGS)
6605{
6606 struct ath_softc *sc = arg1;
6607 u_int ctstimeout = ath_hal_getctstimeout(sc->sc_ah);
6608 int error;
6609
6610 error = sysctl_handle_int(oidp, &ctstimeout, 0, req);
6611 if (error || !req->newptr)
6612 return error;
6613 return !ath_hal_setctstimeout(sc->sc_ah, ctstimeout) ? EINVAL : 0;
6614}
6615
6616static int
6617ath_sysctl_softled(SYSCTL_HANDLER_ARGS)
6618{
6619 struct ath_softc *sc = arg1;
6620 int softled = sc->sc_softled;
6621 int error;
6622
6623 error = sysctl_handle_int(oidp, &softled, 0, req);
6624 if (error || !req->newptr)
6625 return error;
6626 softled = (softled != 0);
6627 if (softled != sc->sc_softled) {
6628 if (softled) {
6629 /* NB: handle any sc_ledpin change */
6630 ath_hal_gpioCfgOutput(sc->sc_ah, sc->sc_ledpin,
6631 HAL_GPIO_MUX_MAC_NETWORK_LED);
6632 ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin,
6633 !sc->sc_ledon);
6634 }
6635 sc->sc_softled = softled;
6636 }
6637 return 0;
6638}
6639
6640static int
6641ath_sysctl_ledpin(SYSCTL_HANDLER_ARGS)
6642{
6643 struct ath_softc *sc = arg1;
6644 int ledpin = sc->sc_ledpin;
6645 int error;
6646
6647 error = sysctl_handle_int(oidp, &ledpin, 0, req);
6648 if (error || !req->newptr)
6649 return error;
6650 if (ledpin != sc->sc_ledpin) {
6651 sc->sc_ledpin = ledpin;
6652 if (sc->sc_softled) {
6653 ath_hal_gpioCfgOutput(sc->sc_ah, sc->sc_ledpin,
6654 HAL_GPIO_MUX_MAC_NETWORK_LED);
6655 ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin,
6656 !sc->sc_ledon);
6657 }
6658 }
6659 return 0;
6660}
6661
6662static int
6663ath_sysctl_txantenna(SYSCTL_HANDLER_ARGS)
6664{
6665 struct ath_softc *sc = arg1;
6666 u_int txantenna = ath_hal_getantennaswitch(sc->sc_ah);
6667 int error;
6668
6669 error = sysctl_handle_int(oidp, &txantenna, 0, req);
6670 if (!error && req->newptr) {
6671 /* XXX assumes 2 antenna ports */
6672 if (txantenna < HAL_ANT_VARIABLE || txantenna > HAL_ANT_FIXED_B)
6673 return EINVAL;
6674 ath_hal_setantennaswitch(sc->sc_ah, txantenna);
6675 /*
6676 * NB: with the switch locked this isn't meaningful,
6677 * but set it anyway so things like radiotap get
6678 * consistent info in their data.
6679 */
6680 sc->sc_txantenna = txantenna;
6681 }
6682 return error;
6683}
6684
6685static int
6686ath_sysctl_rxantenna(SYSCTL_HANDLER_ARGS)
6687{
6688 struct ath_softc *sc = arg1;
6689 u_int defantenna = ath_hal_getdefantenna(sc->sc_ah);
6690 int error;
6691
6692 error = sysctl_handle_int(oidp, &defantenna, 0, req);
6693 if (!error && req->newptr)
6694 ath_hal_setdefantenna(sc->sc_ah, defantenna);
6695 return error;
6696}
6697
6698static int
6699ath_sysctl_diversity(SYSCTL_HANDLER_ARGS)
6700{
6701 struct ath_softc *sc = arg1;
6702 u_int diversity = ath_hal_getdiversity(sc->sc_ah);
6703 int error;
6704
6705 error = sysctl_handle_int(oidp, &diversity, 0, req);
6706 if (error || !req->newptr)
6707 return error;
6708 if (!ath_hal_setdiversity(sc->sc_ah, diversity))
6709 return EINVAL;
6710 sc->sc_diversity = diversity;
6711 return 0;
6712}
6713
6714static int
6715ath_sysctl_diag(SYSCTL_HANDLER_ARGS)
6716{
6717 struct ath_softc *sc = arg1;
6718 u_int32_t diag;
6719 int error;
6720
6721 if (!ath_hal_getdiag(sc->sc_ah, &diag))
6722 return EINVAL;
6723 error = sysctl_handle_int(oidp, &diag, 0, req);
6724 if (error || !req->newptr)
6725 return error;
6726 return !ath_hal_setdiag(sc->sc_ah, diag) ? EINVAL : 0;
6727}
6728
6729static int
6730ath_sysctl_tpscale(SYSCTL_HANDLER_ARGS)
6731{
6732 struct ath_softc *sc = arg1;
6733 struct ifnet *ifp = sc->sc_ifp;
6734 u_int32_t scale;
6735 int error;
6736
6737 (void) ath_hal_gettpscale(sc->sc_ah, &scale);
6738 error = sysctl_handle_int(oidp, &scale, 0, req);
6739 if (error || !req->newptr)
6740 return error;
6741 return !ath_hal_settpscale(sc->sc_ah, scale) ? EINVAL :
6742 (ifp->if_drv_flags & IFF_DRV_RUNNING) ? ath_reset(ifp) : 0;
6743}
6744
6745static int
6746ath_sysctl_tpc(SYSCTL_HANDLER_ARGS)
6747{
6748 struct ath_softc *sc = arg1;
6749 u_int tpc = ath_hal_gettpc(sc->sc_ah);
6750 int error;
6751
6752 error = sysctl_handle_int(oidp, &tpc, 0, req);
6753 if (error || !req->newptr)
6754 return error;
6755 return !ath_hal_settpc(sc->sc_ah, tpc) ? EINVAL : 0;
6756}
6757
6758static int
6759ath_sysctl_rfkill(SYSCTL_HANDLER_ARGS)
6760{
6761 struct ath_softc *sc = arg1;
6762 struct ifnet *ifp = sc->sc_ifp;
6763 struct ath_hal *ah = sc->sc_ah;
6764 u_int rfkill = ath_hal_getrfkill(ah);
6765 int error;
6766
6767 error = sysctl_handle_int(oidp, &rfkill, 0, req);
6768 if (error || !req->newptr)
6769 return error;
6770 if (rfkill == ath_hal_getrfkill(ah)) /* unchanged */
6771 return 0;
6772 if (!ath_hal_setrfkill(ah, rfkill))
6773 return EINVAL;
6774 return (ifp->if_drv_flags & IFF_DRV_RUNNING) ? ath_reset(ifp) : 0;
6775}
6776
6777static int
6778ath_sysctl_rfsilent(SYSCTL_HANDLER_ARGS)
6779{
6780 struct ath_softc *sc = arg1;
6781 u_int rfsilent;
6782 int error;
6783
6784 (void) ath_hal_getrfsilent(sc->sc_ah, &rfsilent);
6785 error = sysctl_handle_int(oidp, &rfsilent, 0, req);
6786 if (error || !req->newptr)
6787 return error;
6788 if (!ath_hal_setrfsilent(sc->sc_ah, rfsilent))
6789 return EINVAL;
6790 sc->sc_rfsilentpin = rfsilent & 0x1c;
6791 sc->sc_rfsilentpol = (rfsilent & 0x2) != 0;
6792 return 0;
6793}
6794
6795static int
6796ath_sysctl_tpack(SYSCTL_HANDLER_ARGS)
6797{
6798 struct ath_softc *sc = arg1;
6799 u_int32_t tpack;
6800 int error;
6801
6802 (void) ath_hal_gettpack(sc->sc_ah, &tpack);
6803 error = sysctl_handle_int(oidp, &tpack, 0, req);
6804 if (error || !req->newptr)
6805 return error;
6806 return !ath_hal_settpack(sc->sc_ah, tpack) ? EINVAL : 0;
6807}
6808
6809static int
6810ath_sysctl_tpcts(SYSCTL_HANDLER_ARGS)
6811{
6812 struct ath_softc *sc = arg1;
6813 u_int32_t tpcts;
6814 int error;
6815
6816 (void) ath_hal_gettpcts(sc->sc_ah, &tpcts);
6817 error = sysctl_handle_int(oidp, &tpcts, 0, req);
6818 if (error || !req->newptr)
6819 return error;
6820 return !ath_hal_settpcts(sc->sc_ah, tpcts) ? EINVAL : 0;
6821}
6822
6823static int
6824ath_sysctl_intmit(SYSCTL_HANDLER_ARGS)
6825{
6826 struct ath_softc *sc = arg1;
6827 int intmit, error;
6828
6829 intmit = ath_hal_getintmit(sc->sc_ah);
6830 error = sysctl_handle_int(oidp, &intmit, 0, req);
6831 if (error || !req->newptr)
6832 return error;
6833 return !ath_hal_setintmit(sc->sc_ah, intmit) ? EINVAL : 0;
6834}
6835
6548 sc->sc_stats.ast_tdma_tsfadjp = TDMA_AVG(sc->sc_avgtsfdeltap);
6549 sc->sc_stats.ast_tdma_tsfadjm = TDMA_AVG(sc->sc_avgtsfdeltam);
6550#endif
6551 rt = sc->sc_currates;
6552 /* XXX HT rates */
6553 sc->sc_stats.ast_tx_rate =
6554 rt->info[sc->sc_txrix].dot11Rate &~ IEEE80211_RATE_BASIC;
6555 return copyout(&sc->sc_stats,
6556 ifr->ifr_data, sizeof (sc->sc_stats));
6557 case SIOCZATHSTATS:
6558 error = priv_check(curthread, PRIV_DRIVER);
6559 if (error == 0)
6560 memset(&sc->sc_stats, 0, sizeof(sc->sc_stats));
6561 break;
6562#ifdef ATH_DIAGAPI
6563 case SIOCGATHDIAG:
6564 error = ath_ioctl_diag(sc, (struct ath_diag *) ifr);
6565 break;
6566#endif
6567 case SIOCGIFADDR:
6568 error = ether_ioctl(ifp, cmd, data);
6569 break;
6570 default:
6571 error = EINVAL;
6572 break;
6573 }
6574 return error;
6575#undef IS_RUNNING
6576}
6577
6578static int
6579ath_sysctl_slottime(SYSCTL_HANDLER_ARGS)
6580{
6581 struct ath_softc *sc = arg1;
6582 u_int slottime = ath_hal_getslottime(sc->sc_ah);
6583 int error;
6584
6585 error = sysctl_handle_int(oidp, &slottime, 0, req);
6586 if (error || !req->newptr)
6587 return error;
6588 return !ath_hal_setslottime(sc->sc_ah, slottime) ? EINVAL : 0;
6589}
6590
6591static int
6592ath_sysctl_acktimeout(SYSCTL_HANDLER_ARGS)
6593{
6594 struct ath_softc *sc = arg1;
6595 u_int acktimeout = ath_hal_getacktimeout(sc->sc_ah);
6596 int error;
6597
6598 error = sysctl_handle_int(oidp, &acktimeout, 0, req);
6599 if (error || !req->newptr)
6600 return error;
6601 return !ath_hal_setacktimeout(sc->sc_ah, acktimeout) ? EINVAL : 0;
6602}
6603
6604static int
6605ath_sysctl_ctstimeout(SYSCTL_HANDLER_ARGS)
6606{
6607 struct ath_softc *sc = arg1;
6608 u_int ctstimeout = ath_hal_getctstimeout(sc->sc_ah);
6609 int error;
6610
6611 error = sysctl_handle_int(oidp, &ctstimeout, 0, req);
6612 if (error || !req->newptr)
6613 return error;
6614 return !ath_hal_setctstimeout(sc->sc_ah, ctstimeout) ? EINVAL : 0;
6615}
6616
6617static int
6618ath_sysctl_softled(SYSCTL_HANDLER_ARGS)
6619{
6620 struct ath_softc *sc = arg1;
6621 int softled = sc->sc_softled;
6622 int error;
6623
6624 error = sysctl_handle_int(oidp, &softled, 0, req);
6625 if (error || !req->newptr)
6626 return error;
6627 softled = (softled != 0);
6628 if (softled != sc->sc_softled) {
6629 if (softled) {
6630 /* NB: handle any sc_ledpin change */
6631 ath_hal_gpioCfgOutput(sc->sc_ah, sc->sc_ledpin,
6632 HAL_GPIO_MUX_MAC_NETWORK_LED);
6633 ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin,
6634 !sc->sc_ledon);
6635 }
6636 sc->sc_softled = softled;
6637 }
6638 return 0;
6639}
6640
6641static int
6642ath_sysctl_ledpin(SYSCTL_HANDLER_ARGS)
6643{
6644 struct ath_softc *sc = arg1;
6645 int ledpin = sc->sc_ledpin;
6646 int error;
6647
6648 error = sysctl_handle_int(oidp, &ledpin, 0, req);
6649 if (error || !req->newptr)
6650 return error;
6651 if (ledpin != sc->sc_ledpin) {
6652 sc->sc_ledpin = ledpin;
6653 if (sc->sc_softled) {
6654 ath_hal_gpioCfgOutput(sc->sc_ah, sc->sc_ledpin,
6655 HAL_GPIO_MUX_MAC_NETWORK_LED);
6656 ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin,
6657 !sc->sc_ledon);
6658 }
6659 }
6660 return 0;
6661}
6662
6663static int
6664ath_sysctl_txantenna(SYSCTL_HANDLER_ARGS)
6665{
6666 struct ath_softc *sc = arg1;
6667 u_int txantenna = ath_hal_getantennaswitch(sc->sc_ah);
6668 int error;
6669
6670 error = sysctl_handle_int(oidp, &txantenna, 0, req);
6671 if (!error && req->newptr) {
6672 /* XXX assumes 2 antenna ports */
6673 if (txantenna < HAL_ANT_VARIABLE || txantenna > HAL_ANT_FIXED_B)
6674 return EINVAL;
6675 ath_hal_setantennaswitch(sc->sc_ah, txantenna);
6676 /*
6677 * NB: with the switch locked this isn't meaningful,
6678 * but set it anyway so things like radiotap get
6679 * consistent info in their data.
6680 */
6681 sc->sc_txantenna = txantenna;
6682 }
6683 return error;
6684}
6685
6686static int
6687ath_sysctl_rxantenna(SYSCTL_HANDLER_ARGS)
6688{
6689 struct ath_softc *sc = arg1;
6690 u_int defantenna = ath_hal_getdefantenna(sc->sc_ah);
6691 int error;
6692
6693 error = sysctl_handle_int(oidp, &defantenna, 0, req);
6694 if (!error && req->newptr)
6695 ath_hal_setdefantenna(sc->sc_ah, defantenna);
6696 return error;
6697}
6698
6699static int
6700ath_sysctl_diversity(SYSCTL_HANDLER_ARGS)
6701{
6702 struct ath_softc *sc = arg1;
6703 u_int diversity = ath_hal_getdiversity(sc->sc_ah);
6704 int error;
6705
6706 error = sysctl_handle_int(oidp, &diversity, 0, req);
6707 if (error || !req->newptr)
6708 return error;
6709 if (!ath_hal_setdiversity(sc->sc_ah, diversity))
6710 return EINVAL;
6711 sc->sc_diversity = diversity;
6712 return 0;
6713}
6714
6715static int
6716ath_sysctl_diag(SYSCTL_HANDLER_ARGS)
6717{
6718 struct ath_softc *sc = arg1;
6719 u_int32_t diag;
6720 int error;
6721
6722 if (!ath_hal_getdiag(sc->sc_ah, &diag))
6723 return EINVAL;
6724 error = sysctl_handle_int(oidp, &diag, 0, req);
6725 if (error || !req->newptr)
6726 return error;
6727 return !ath_hal_setdiag(sc->sc_ah, diag) ? EINVAL : 0;
6728}
6729
6730static int
6731ath_sysctl_tpscale(SYSCTL_HANDLER_ARGS)
6732{
6733 struct ath_softc *sc = arg1;
6734 struct ifnet *ifp = sc->sc_ifp;
6735 u_int32_t scale;
6736 int error;
6737
6738 (void) ath_hal_gettpscale(sc->sc_ah, &scale);
6739 error = sysctl_handle_int(oidp, &scale, 0, req);
6740 if (error || !req->newptr)
6741 return error;
6742 return !ath_hal_settpscale(sc->sc_ah, scale) ? EINVAL :
6743 (ifp->if_drv_flags & IFF_DRV_RUNNING) ? ath_reset(ifp) : 0;
6744}
6745
6746static int
6747ath_sysctl_tpc(SYSCTL_HANDLER_ARGS)
6748{
6749 struct ath_softc *sc = arg1;
6750 u_int tpc = ath_hal_gettpc(sc->sc_ah);
6751 int error;
6752
6753 error = sysctl_handle_int(oidp, &tpc, 0, req);
6754 if (error || !req->newptr)
6755 return error;
6756 return !ath_hal_settpc(sc->sc_ah, tpc) ? EINVAL : 0;
6757}
6758
6759static int
6760ath_sysctl_rfkill(SYSCTL_HANDLER_ARGS)
6761{
6762 struct ath_softc *sc = arg1;
6763 struct ifnet *ifp = sc->sc_ifp;
6764 struct ath_hal *ah = sc->sc_ah;
6765 u_int rfkill = ath_hal_getrfkill(ah);
6766 int error;
6767
6768 error = sysctl_handle_int(oidp, &rfkill, 0, req);
6769 if (error || !req->newptr)
6770 return error;
6771 if (rfkill == ath_hal_getrfkill(ah)) /* unchanged */
6772 return 0;
6773 if (!ath_hal_setrfkill(ah, rfkill))
6774 return EINVAL;
6775 return (ifp->if_drv_flags & IFF_DRV_RUNNING) ? ath_reset(ifp) : 0;
6776}
6777
6778static int
6779ath_sysctl_rfsilent(SYSCTL_HANDLER_ARGS)
6780{
6781 struct ath_softc *sc = arg1;
6782 u_int rfsilent;
6783 int error;
6784
6785 (void) ath_hal_getrfsilent(sc->sc_ah, &rfsilent);
6786 error = sysctl_handle_int(oidp, &rfsilent, 0, req);
6787 if (error || !req->newptr)
6788 return error;
6789 if (!ath_hal_setrfsilent(sc->sc_ah, rfsilent))
6790 return EINVAL;
6791 sc->sc_rfsilentpin = rfsilent & 0x1c;
6792 sc->sc_rfsilentpol = (rfsilent & 0x2) != 0;
6793 return 0;
6794}
6795
6796static int
6797ath_sysctl_tpack(SYSCTL_HANDLER_ARGS)
6798{
6799 struct ath_softc *sc = arg1;
6800 u_int32_t tpack;
6801 int error;
6802
6803 (void) ath_hal_gettpack(sc->sc_ah, &tpack);
6804 error = sysctl_handle_int(oidp, &tpack, 0, req);
6805 if (error || !req->newptr)
6806 return error;
6807 return !ath_hal_settpack(sc->sc_ah, tpack) ? EINVAL : 0;
6808}
6809
6810static int
6811ath_sysctl_tpcts(SYSCTL_HANDLER_ARGS)
6812{
6813 struct ath_softc *sc = arg1;
6814 u_int32_t tpcts;
6815 int error;
6816
6817 (void) ath_hal_gettpcts(sc->sc_ah, &tpcts);
6818 error = sysctl_handle_int(oidp, &tpcts, 0, req);
6819 if (error || !req->newptr)
6820 return error;
6821 return !ath_hal_settpcts(sc->sc_ah, tpcts) ? EINVAL : 0;
6822}
6823
6824static int
6825ath_sysctl_intmit(SYSCTL_HANDLER_ARGS)
6826{
6827 struct ath_softc *sc = arg1;
6828 int intmit, error;
6829
6830 intmit = ath_hal_getintmit(sc->sc_ah);
6831 error = sysctl_handle_int(oidp, &intmit, 0, req);
6832 if (error || !req->newptr)
6833 return error;
6834 return !ath_hal_setintmit(sc->sc_ah, intmit) ? EINVAL : 0;
6835}
6836
6836#ifdef ATH_SUPPORT_TDMA
6837#ifdef IEEE80211_SUPPORT_TDMA
6837static int
6838ath_sysctl_setcca(SYSCTL_HANDLER_ARGS)
6839{
6840 struct ath_softc *sc = arg1;
6841 int setcca, error;
6842
6843 setcca = sc->sc_setcca;
6844 error = sysctl_handle_int(oidp, &setcca, 0, req);
6845 if (error || !req->newptr)
6846 return error;
6847 sc->sc_setcca = (setcca != 0);
6848 return 0;
6849}
6838static int
6839ath_sysctl_setcca(SYSCTL_HANDLER_ARGS)
6840{
6841 struct ath_softc *sc = arg1;
6842 int setcca, error;
6843
6844 setcca = sc->sc_setcca;
6845 error = sysctl_handle_int(oidp, &setcca, 0, req);
6846 if (error || !req->newptr)
6847 return error;
6848 sc->sc_setcca = (setcca != 0);
6849 return 0;
6850}
6850#endif /* ATH_SUPPORT_TDMA */
6851#endif /* IEEE80211_SUPPORT_TDMA */
6851
6852static void
6853ath_sysctlattach(struct ath_softc *sc)
6854{
6855 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
6856 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
6857 struct ath_hal *ah = sc->sc_ah;
6858
6859 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6860 "countrycode", CTLFLAG_RD, &sc->sc_eecc, 0,
6861 "EEPROM country code");
6862 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6863 "regdomain", CTLFLAG_RD, &sc->sc_eerd, 0,
6864 "EEPROM regdomain code");
6865#ifdef ATH_DEBUG
6866 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6867 "debug", CTLFLAG_RW, &sc->sc_debug, 0,
6868 "control debugging printfs");
6869#endif
6870 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6871 "slottime", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6872 ath_sysctl_slottime, "I", "802.11 slot time (us)");
6873 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6874 "acktimeout", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6875 ath_sysctl_acktimeout, "I", "802.11 ACK timeout (us)");
6876 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6877 "ctstimeout", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6878 ath_sysctl_ctstimeout, "I", "802.11 CTS timeout (us)");
6879 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6880 "softled", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6881 ath_sysctl_softled, "I", "enable/disable software LED support");
6882 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6883 "ledpin", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6884 ath_sysctl_ledpin, "I", "GPIO pin connected to LED");
6885 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6886 "ledon", CTLFLAG_RW, &sc->sc_ledon, 0,
6887 "setting to turn LED on");
6888 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6889 "ledidle", CTLFLAG_RW, &sc->sc_ledidle, 0,
6890 "idle time for inactivity LED (ticks)");
6891 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6892 "txantenna", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6893 ath_sysctl_txantenna, "I", "antenna switch");
6894 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6895 "rxantenna", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6896 ath_sysctl_rxantenna, "I", "default/rx antenna");
6897 if (ath_hal_hasdiversity(ah))
6898 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6899 "diversity", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6900 ath_sysctl_diversity, "I", "antenna diversity");
6901 sc->sc_txintrperiod = ATH_TXINTR_PERIOD;
6902 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6903 "txintrperiod", CTLFLAG_RW, &sc->sc_txintrperiod, 0,
6904 "tx descriptor batching");
6905 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6906 "diag", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6907 ath_sysctl_diag, "I", "h/w diagnostic control");
6908 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6909 "tpscale", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6910 ath_sysctl_tpscale, "I", "tx power scaling");
6911 if (ath_hal_hastpc(ah)) {
6912 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6913 "tpc", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6914 ath_sysctl_tpc, "I", "enable/disable per-packet TPC");
6915 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6916 "tpack", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6917 ath_sysctl_tpack, "I", "tx power for ack frames");
6918 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6919 "tpcts", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6920 ath_sysctl_tpcts, "I", "tx power for cts frames");
6921 }
6922 if (ath_hal_hasfastframes(sc->sc_ah)) {
6923 sc->sc_fftxqmin = ATH_FF_TXQMIN;
6924 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6925 "fftxqmin", CTLFLAG_RW, &sc->sc_fftxqmin, 0,
6926 "min frames before fast-frame staging");
6927 sc->sc_fftxqmax = ATH_FF_TXQMAX;
6928 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6929 "fftxqmax", CTLFLAG_RW, &sc->sc_fftxqmax, 0,
6930 "max queued frames before tail drop");
6931 }
6932 if (ath_hal_hasrfsilent(ah)) {
6933 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6934 "rfsilent", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6935 ath_sysctl_rfsilent, "I", "h/w RF silent config");
6936 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6937 "rfkill", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6938 ath_sysctl_rfkill, "I", "enable/disable RF kill switch");
6939 }
6940 if (ath_hal_hasintmit(ah)) {
6941 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6942 "intmit", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6943 ath_sysctl_intmit, "I", "interference mitigation");
6944 }
6945 sc->sc_monpass = HAL_RXERR_DECRYPT | HAL_RXERR_MIC;
6946 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6947 "monpass", CTLFLAG_RW, &sc->sc_monpass, 0,
6948 "mask of error frames to pass when monitoring");
6852
6853static void
6854ath_sysctlattach(struct ath_softc *sc)
6855{
6856 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
6857 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
6858 struct ath_hal *ah = sc->sc_ah;
6859
6860 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6861 "countrycode", CTLFLAG_RD, &sc->sc_eecc, 0,
6862 "EEPROM country code");
6863 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6864 "regdomain", CTLFLAG_RD, &sc->sc_eerd, 0,
6865 "EEPROM regdomain code");
6866#ifdef ATH_DEBUG
6867 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6868 "debug", CTLFLAG_RW, &sc->sc_debug, 0,
6869 "control debugging printfs");
6870#endif
6871 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6872 "slottime", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6873 ath_sysctl_slottime, "I", "802.11 slot time (us)");
6874 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6875 "acktimeout", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6876 ath_sysctl_acktimeout, "I", "802.11 ACK timeout (us)");
6877 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6878 "ctstimeout", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6879 ath_sysctl_ctstimeout, "I", "802.11 CTS timeout (us)");
6880 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6881 "softled", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6882 ath_sysctl_softled, "I", "enable/disable software LED support");
6883 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6884 "ledpin", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6885 ath_sysctl_ledpin, "I", "GPIO pin connected to LED");
6886 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6887 "ledon", CTLFLAG_RW, &sc->sc_ledon, 0,
6888 "setting to turn LED on");
6889 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6890 "ledidle", CTLFLAG_RW, &sc->sc_ledidle, 0,
6891 "idle time for inactivity LED (ticks)");
6892 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6893 "txantenna", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6894 ath_sysctl_txantenna, "I", "antenna switch");
6895 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6896 "rxantenna", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6897 ath_sysctl_rxantenna, "I", "default/rx antenna");
6898 if (ath_hal_hasdiversity(ah))
6899 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6900 "diversity", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6901 ath_sysctl_diversity, "I", "antenna diversity");
6902 sc->sc_txintrperiod = ATH_TXINTR_PERIOD;
6903 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6904 "txintrperiod", CTLFLAG_RW, &sc->sc_txintrperiod, 0,
6905 "tx descriptor batching");
6906 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6907 "diag", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6908 ath_sysctl_diag, "I", "h/w diagnostic control");
6909 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6910 "tpscale", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6911 ath_sysctl_tpscale, "I", "tx power scaling");
6912 if (ath_hal_hastpc(ah)) {
6913 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6914 "tpc", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6915 ath_sysctl_tpc, "I", "enable/disable per-packet TPC");
6916 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6917 "tpack", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6918 ath_sysctl_tpack, "I", "tx power for ack frames");
6919 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6920 "tpcts", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6921 ath_sysctl_tpcts, "I", "tx power for cts frames");
6922 }
6923 if (ath_hal_hasfastframes(sc->sc_ah)) {
6924 sc->sc_fftxqmin = ATH_FF_TXQMIN;
6925 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6926 "fftxqmin", CTLFLAG_RW, &sc->sc_fftxqmin, 0,
6927 "min frames before fast-frame staging");
6928 sc->sc_fftxqmax = ATH_FF_TXQMAX;
6929 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6930 "fftxqmax", CTLFLAG_RW, &sc->sc_fftxqmax, 0,
6931 "max queued frames before tail drop");
6932 }
6933 if (ath_hal_hasrfsilent(ah)) {
6934 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6935 "rfsilent", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6936 ath_sysctl_rfsilent, "I", "h/w RF silent config");
6937 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6938 "rfkill", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6939 ath_sysctl_rfkill, "I", "enable/disable RF kill switch");
6940 }
6941 if (ath_hal_hasintmit(ah)) {
6942 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6943 "intmit", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6944 ath_sysctl_intmit, "I", "interference mitigation");
6945 }
6946 sc->sc_monpass = HAL_RXERR_DECRYPT | HAL_RXERR_MIC;
6947 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6948 "monpass", CTLFLAG_RW, &sc->sc_monpass, 0,
6949 "mask of error frames to pass when monitoring");
6949#ifdef ATH_SUPPORT_TDMA
6950#ifdef IEEE80211_SUPPORT_TDMA
6950 if (ath_hal_macversion(ah) > 0x78) {
6951 sc->sc_tdmadbaprep = 2;
6952 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6953 "dbaprep", CTLFLAG_RW, &sc->sc_tdmadbaprep, 0,
6954 "TDMA DBA preparation time");
6955 sc->sc_tdmaswbaprep = 10;
6956 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6957 "swbaprep", CTLFLAG_RW, &sc->sc_tdmaswbaprep, 0,
6958 "TDMA SWBA preparation time");
6959 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6960 "guardtime", CTLFLAG_RW, &sc->sc_tdmaguard, 0,
6961 "TDMA slot guard time");
6962 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6963 "superframe", CTLFLAG_RD, &sc->sc_tdmabintval, 0,
6964 "TDMA calculated super frame");
6965 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6966 "setcca", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6967 ath_sysctl_setcca, "I", "enable CCA control");
6968 }
6969#endif
6970}
6971
6972static void
6973ath_bpfattach(struct ath_softc *sc)
6974{
6975 struct ifnet *ifp = sc->sc_ifp;
6976
6977 bpfattach(ifp, DLT_IEEE802_11_RADIO,
6978 sizeof(struct ieee80211_frame) + sizeof(sc->sc_tx_th));
6979 /*
6980 * Initialize constant fields.
6981 * XXX make header lengths a multiple of 32-bits so subsequent
6982 * headers are properly aligned; this is a kludge to keep
6983 * certain applications happy.
6984 *
6985 * NB: the channel is setup each time we transition to the
6986 * RUN state to avoid filling it in for each frame.
6987 */
6988 sc->sc_tx_th_len = roundup(sizeof(sc->sc_tx_th), sizeof(u_int32_t));
6989 sc->sc_tx_th.wt_ihdr.it_len = htole16(sc->sc_tx_th_len);
6990 sc->sc_tx_th.wt_ihdr.it_present = htole32(ATH_TX_RADIOTAP_PRESENT);
6991
6992 sc->sc_rx_th_len = roundup(sizeof(sc->sc_rx_th), sizeof(u_int32_t));
6993 sc->sc_rx_th.wr_ihdr.it_len = htole16(sc->sc_rx_th_len);
6994 sc->sc_rx_th.wr_ihdr.it_present = htole32(ATH_RX_RADIOTAP_PRESENT);
6995}
6996
6997static int
6998ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni,
6999 struct ath_buf *bf, struct mbuf *m0,
7000 const struct ieee80211_bpf_params *params)
7001{
7002 struct ifnet *ifp = sc->sc_ifp;
7003 struct ieee80211com *ic = ifp->if_l2com;
7004 struct ath_hal *ah = sc->sc_ah;
7005 int error, ismcast, ismrr;
7006 int keyix, hdrlen, pktlen, try0, txantenna;
7007 u_int8_t rix, cix, txrate, ctsrate, rate1, rate2, rate3;
7008 struct ieee80211_frame *wh;
7009 u_int flags, ctsduration;
7010 HAL_PKT_TYPE atype;
7011 const HAL_RATE_TABLE *rt;
7012 struct ath_desc *ds;
7013 u_int pri;
7014
7015 wh = mtod(m0, struct ieee80211_frame *);
7016 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
7017 hdrlen = ieee80211_anyhdrsize(wh);
7018 /*
7019 * Packet length must not include any
7020 * pad bytes; deduct them here.
7021 */
7022 /* XXX honor IEEE80211_BPF_DATAPAD */
7023 pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN;
7024
7025 if (params->ibp_flags & IEEE80211_BPF_CRYPTO) {
7026 const struct ieee80211_cipher *cip;
7027 struct ieee80211_key *k;
7028
7029 /*
7030 * Construct the 802.11 header+trailer for an encrypted
7031 * frame. The only reason this can fail is because of an
7032 * unknown or unsupported cipher/key type.
7033 */
7034 k = ieee80211_crypto_encap(ni, m0);
7035 if (k == NULL) {
7036 /*
7037 * This can happen when the key is yanked after the
7038 * frame was queued. Just discard the frame; the
7039 * 802.11 layer counts failures and provides
7040 * debugging/diagnostics.
7041 */
7042 ath_freetx(m0);
7043 return EIO;
7044 }
7045 /*
7046 * Adjust the packet + header lengths for the crypto
7047 * additions and calculate the h/w key index. When
7048 * a s/w mic is done the frame will have had any mic
7049 * added to it prior to entry so m0->m_pkthdr.len will
7050 * account for it. Otherwise we need to add it to the
7051 * packet length.
7052 */
7053 cip = k->wk_cipher;
7054 hdrlen += cip->ic_header;
7055 pktlen += cip->ic_header + cip->ic_trailer;
7056 /* NB: frags always have any TKIP MIC done in s/w */
7057 if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0)
7058 pktlen += cip->ic_miclen;
7059 keyix = k->wk_keyix;
7060
7061 /* packet header may have moved, reset our local pointer */
7062 wh = mtod(m0, struct ieee80211_frame *);
7063 } else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) {
7064 /*
7065 * Use station key cache slot, if assigned.
7066 */
7067 keyix = ni->ni_ucastkey.wk_keyix;
7068 if (keyix == IEEE80211_KEYIX_NONE)
7069 keyix = HAL_TXKEYIX_INVALID;
7070 } else
7071 keyix = HAL_TXKEYIX_INVALID;
7072
7073 error = ath_tx_dmasetup(sc, bf, m0);
7074 if (error != 0)
7075 return error;
7076 m0 = bf->bf_m; /* NB: may have changed */
7077 wh = mtod(m0, struct ieee80211_frame *);
7078 bf->bf_node = ni; /* NB: held reference */
7079
7080 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */
7081 flags |= HAL_TXDESC_INTREQ; /* force interrupt */
7082 if (params->ibp_flags & IEEE80211_BPF_RTS)
7083 flags |= HAL_TXDESC_RTSENA;
7084 else if (params->ibp_flags & IEEE80211_BPF_CTS)
7085 flags |= HAL_TXDESC_CTSENA;
7086 /* XXX leave ismcast to injector? */
7087 if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast)
7088 flags |= HAL_TXDESC_NOACK;
7089
7090 rt = sc->sc_currates;
7091 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
7092 rix = ath_tx_findrix(rt, params->ibp_rate0);
7093 txrate = rt->info[rix].rateCode;
7094 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
7095 txrate |= rt->info[rix].shortPreamble;
7096 sc->sc_txrix = rix;
7097 try0 = params->ibp_try0;
7098 ismrr = (params->ibp_try1 != 0);
7099 txantenna = params->ibp_pri >> 2;
7100 if (txantenna == 0) /* XXX? */
7101 txantenna = sc->sc_txantenna;
7102 ctsduration = 0;
7103 if (flags & (HAL_TXDESC_CTSENA | HAL_TXDESC_RTSENA)) {
7104 cix = ath_tx_findrix(rt, params->ibp_ctsrate);
7105 ctsrate = rt->info[cix].rateCode;
7106 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) {
7107 ctsrate |= rt->info[cix].shortPreamble;
7108 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */
7109 ctsduration += rt->info[cix].spAckDuration;
7110 ctsduration += ath_hal_computetxtime(ah,
7111 rt, pktlen, rix, AH_TRUE);
7112 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */
7113 ctsduration += rt->info[rix].spAckDuration;
7114 } else {
7115 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */
7116 ctsduration += rt->info[cix].lpAckDuration;
7117 ctsduration += ath_hal_computetxtime(ah,
7118 rt, pktlen, rix, AH_FALSE);
7119 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */
7120 ctsduration += rt->info[rix].lpAckDuration;
7121 }
7122 ismrr = 0; /* XXX */
7123 } else
7124 ctsrate = 0;
7125 pri = params->ibp_pri & 3;
7126 /*
7127 * NB: we mark all packets as type PSPOLL so the h/w won't
7128 * set the sequence number, duration, etc.
7129 */
7130 atype = HAL_PKT_TYPE_PSPOLL;
7131
7132 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
7133 ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len,
7134 sc->sc_hwmap[rix].ieeerate, -1);
7135
7136 if (bpf_peers_present(ifp->if_bpf)) {
7137 u_int64_t tsf = ath_hal_gettsf64(ah);
7138
7139 sc->sc_tx_th.wt_tsf = htole64(tsf);
7140 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags;
7141 if (wh->i_fc[1] & IEEE80211_FC1_WEP)
7142 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
7143 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;
7144 sc->sc_tx_th.wt_txpower = ni->ni_txpower;
7145 sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
7146
7147 bpf_mtap2(ifp->if_bpf, &sc->sc_tx_th, sc->sc_tx_th_len, m0);
7148 }
7149
7150 /*
7151 * Formulate first tx descriptor with tx controls.
7152 */
7153 ds = bf->bf_desc;
7154 /* XXX check return value? */
7155 ath_hal_setuptxdesc(ah, ds
7156 , pktlen /* packet length */
7157 , hdrlen /* header length */
7158 , atype /* Atheros packet type */
7159 , params->ibp_power /* txpower */
7160 , txrate, try0 /* series 0 rate/tries */
7161 , keyix /* key cache index */
7162 , txantenna /* antenna mode */
7163 , flags /* flags */
7164 , ctsrate /* rts/cts rate */
7165 , ctsduration /* rts/cts duration */
7166 );
7167 bf->bf_txflags = flags;
7168
7169 if (ismrr) {
7170 rix = ath_tx_findrix(rt, params->ibp_rate1);
7171 rate1 = rt->info[rix].rateCode;
7172 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
7173 rate1 |= rt->info[rix].shortPreamble;
7174 if (params->ibp_try2) {
7175 rix = ath_tx_findrix(rt, params->ibp_rate2);
7176 rate2 = rt->info[rix].rateCode;
7177 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
7178 rate2 |= rt->info[rix].shortPreamble;
7179 } else
7180 rate2 = 0;
7181 if (params->ibp_try3) {
7182 rix = ath_tx_findrix(rt, params->ibp_rate3);
7183 rate3 = rt->info[rix].rateCode;
7184 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
7185 rate3 |= rt->info[rix].shortPreamble;
7186 } else
7187 rate3 = 0;
7188 ath_hal_setupxtxdesc(ah, ds
7189 , rate1, params->ibp_try1 /* series 1 */
7190 , rate2, params->ibp_try2 /* series 2 */
7191 , rate3, params->ibp_try3 /* series 3 */
7192 );
7193 }
7194
7195 /* NB: no buffered multicast in power save support */
7196 ath_tx_handoff(sc, sc->sc_ac2q[pri], bf);
7197 return 0;
7198}
7199
7200static int
7201ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
7202 const struct ieee80211_bpf_params *params)
7203{
7204 struct ieee80211com *ic = ni->ni_ic;
7205 struct ifnet *ifp = ic->ic_ifp;
7206 struct ath_softc *sc = ifp->if_softc;
7207 struct ath_buf *bf;
7208
7209 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) {
7210 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: discard frame, %s", __func__,
7211 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ?
7212 "!running" : "invalid");
7213 sc->sc_stats.ast_tx_raw_fail++;
7214 ieee80211_free_node(ni);
7215 m_freem(m);
7216 return ENETDOWN;
7217 }
7218 /*
7219 * Grab a TX buffer and associated resources.
7220 */
7221 bf = ath_getbuf(sc);
7222 if (bf == NULL) {
7223 /* NB: ath_getbuf handles stat+msg */
7224 ieee80211_free_node(ni);
7225 m_freem(m);
7226 return ENOBUFS;
7227 }
7228
7229 ifp->if_opackets++;
7230 sc->sc_stats.ast_tx_raw++;
7231
7232 if (params == NULL) {
7233 /*
7234 * Legacy path; interpret frame contents to decide
7235 * precisely how to send the frame.
7236 */
7237 if (ath_tx_start(sc, ni, bf, m))
7238 goto bad;
7239 } else {
7240 /*
7241 * Caller supplied explicit parameters to use in
7242 * sending the frame.
7243 */
7244 if (ath_tx_raw_start(sc, ni, bf, m, params))
7245 goto bad;
7246 }
7247 sc->sc_wd_timer = 5;
7248
7249 return 0;
7250bad:
7251 ifp->if_oerrors++;
7252 ATH_TXBUF_LOCK(sc);
7253 STAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
7254 ATH_TXBUF_UNLOCK(sc);
7255 ieee80211_free_node(ni);
7256 return EIO; /* XXX */
7257}
7258
7259/*
7260 * Announce various information on device/driver attach.
7261 */
7262static void
7263ath_announce(struct ath_softc *sc)
7264{
7265 struct ifnet *ifp = sc->sc_ifp;
7266 struct ath_hal *ah = sc->sc_ah;
7267
7268 if_printf(ifp, "AR%s mac %d.%d RF%s phy %d.%d\n",
7269 ath_hal_mac_name(ah), ah->ah_macVersion, ah->ah_macRev,
7270 ath_hal_rf_name(ah), ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf);
7271 if (bootverbose) {
7272 int i;
7273 for (i = 0; i <= WME_AC_VO; i++) {
7274 struct ath_txq *txq = sc->sc_ac2q[i];
7275 if_printf(ifp, "Use hw queue %u for %s traffic\n",
7276 txq->axq_qnum, ieee80211_wme_acnames[i]);
7277 }
7278 if_printf(ifp, "Use hw queue %u for CAB traffic\n",
7279 sc->sc_cabq->axq_qnum);
7280 if_printf(ifp, "Use hw queue %u for beacons\n", sc->sc_bhalq);
7281 }
7282 if (ath_rxbuf != ATH_RXBUF)
7283 if_printf(ifp, "using %u rx buffers\n", ath_rxbuf);
7284 if (ath_txbuf != ATH_TXBUF)
7285 if_printf(ifp, "using %u tx buffers\n", ath_txbuf);
7286}
7287
6951 if (ath_hal_macversion(ah) > 0x78) {
6952 sc->sc_tdmadbaprep = 2;
6953 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6954 "dbaprep", CTLFLAG_RW, &sc->sc_tdmadbaprep, 0,
6955 "TDMA DBA preparation time");
6956 sc->sc_tdmaswbaprep = 10;
6957 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6958 "swbaprep", CTLFLAG_RW, &sc->sc_tdmaswbaprep, 0,
6959 "TDMA SWBA preparation time");
6960 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6961 "guardtime", CTLFLAG_RW, &sc->sc_tdmaguard, 0,
6962 "TDMA slot guard time");
6963 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6964 "superframe", CTLFLAG_RD, &sc->sc_tdmabintval, 0,
6965 "TDMA calculated super frame");
6966 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6967 "setcca", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6968 ath_sysctl_setcca, "I", "enable CCA control");
6969 }
6970#endif
6971}
6972
6973static void
6974ath_bpfattach(struct ath_softc *sc)
6975{
6976 struct ifnet *ifp = sc->sc_ifp;
6977
6978 bpfattach(ifp, DLT_IEEE802_11_RADIO,
6979 sizeof(struct ieee80211_frame) + sizeof(sc->sc_tx_th));
6980 /*
6981 * Initialize constant fields.
6982 * XXX make header lengths a multiple of 32-bits so subsequent
6983 * headers are properly aligned; this is a kludge to keep
6984 * certain applications happy.
6985 *
6986 * NB: the channel is setup each time we transition to the
6987 * RUN state to avoid filling it in for each frame.
6988 */
6989 sc->sc_tx_th_len = roundup(sizeof(sc->sc_tx_th), sizeof(u_int32_t));
6990 sc->sc_tx_th.wt_ihdr.it_len = htole16(sc->sc_tx_th_len);
6991 sc->sc_tx_th.wt_ihdr.it_present = htole32(ATH_TX_RADIOTAP_PRESENT);
6992
6993 sc->sc_rx_th_len = roundup(sizeof(sc->sc_rx_th), sizeof(u_int32_t));
6994 sc->sc_rx_th.wr_ihdr.it_len = htole16(sc->sc_rx_th_len);
6995 sc->sc_rx_th.wr_ihdr.it_present = htole32(ATH_RX_RADIOTAP_PRESENT);
6996}
6997
6998static int
6999ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni,
7000 struct ath_buf *bf, struct mbuf *m0,
7001 const struct ieee80211_bpf_params *params)
7002{
7003 struct ifnet *ifp = sc->sc_ifp;
7004 struct ieee80211com *ic = ifp->if_l2com;
7005 struct ath_hal *ah = sc->sc_ah;
7006 int error, ismcast, ismrr;
7007 int keyix, hdrlen, pktlen, try0, txantenna;
7008 u_int8_t rix, cix, txrate, ctsrate, rate1, rate2, rate3;
7009 struct ieee80211_frame *wh;
7010 u_int flags, ctsduration;
7011 HAL_PKT_TYPE atype;
7012 const HAL_RATE_TABLE *rt;
7013 struct ath_desc *ds;
7014 u_int pri;
7015
7016 wh = mtod(m0, struct ieee80211_frame *);
7017 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
7018 hdrlen = ieee80211_anyhdrsize(wh);
7019 /*
7020 * Packet length must not include any
7021 * pad bytes; deduct them here.
7022 */
7023 /* XXX honor IEEE80211_BPF_DATAPAD */
7024 pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN;
7025
7026 if (params->ibp_flags & IEEE80211_BPF_CRYPTO) {
7027 const struct ieee80211_cipher *cip;
7028 struct ieee80211_key *k;
7029
7030 /*
7031 * Construct the 802.11 header+trailer for an encrypted
7032 * frame. The only reason this can fail is because of an
7033 * unknown or unsupported cipher/key type.
7034 */
7035 k = ieee80211_crypto_encap(ni, m0);
7036 if (k == NULL) {
7037 /*
7038 * This can happen when the key is yanked after the
7039 * frame was queued. Just discard the frame; the
7040 * 802.11 layer counts failures and provides
7041 * debugging/diagnostics.
7042 */
7043 ath_freetx(m0);
7044 return EIO;
7045 }
7046 /*
7047 * Adjust the packet + header lengths for the crypto
7048 * additions and calculate the h/w key index. When
7049 * a s/w mic is done the frame will have had any mic
7050 * added to it prior to entry so m0->m_pkthdr.len will
7051 * account for it. Otherwise we need to add it to the
7052 * packet length.
7053 */
7054 cip = k->wk_cipher;
7055 hdrlen += cip->ic_header;
7056 pktlen += cip->ic_header + cip->ic_trailer;
7057 /* NB: frags always have any TKIP MIC done in s/w */
7058 if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0)
7059 pktlen += cip->ic_miclen;
7060 keyix = k->wk_keyix;
7061
7062 /* packet header may have moved, reset our local pointer */
7063 wh = mtod(m0, struct ieee80211_frame *);
7064 } else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) {
7065 /*
7066 * Use station key cache slot, if assigned.
7067 */
7068 keyix = ni->ni_ucastkey.wk_keyix;
7069 if (keyix == IEEE80211_KEYIX_NONE)
7070 keyix = HAL_TXKEYIX_INVALID;
7071 } else
7072 keyix = HAL_TXKEYIX_INVALID;
7073
7074 error = ath_tx_dmasetup(sc, bf, m0);
7075 if (error != 0)
7076 return error;
7077 m0 = bf->bf_m; /* NB: may have changed */
7078 wh = mtod(m0, struct ieee80211_frame *);
7079 bf->bf_node = ni; /* NB: held reference */
7080
7081 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */
7082 flags |= HAL_TXDESC_INTREQ; /* force interrupt */
7083 if (params->ibp_flags & IEEE80211_BPF_RTS)
7084 flags |= HAL_TXDESC_RTSENA;
7085 else if (params->ibp_flags & IEEE80211_BPF_CTS)
7086 flags |= HAL_TXDESC_CTSENA;
7087 /* XXX leave ismcast to injector? */
7088 if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast)
7089 flags |= HAL_TXDESC_NOACK;
7090
7091 rt = sc->sc_currates;
7092 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
7093 rix = ath_tx_findrix(rt, params->ibp_rate0);
7094 txrate = rt->info[rix].rateCode;
7095 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
7096 txrate |= rt->info[rix].shortPreamble;
7097 sc->sc_txrix = rix;
7098 try0 = params->ibp_try0;
7099 ismrr = (params->ibp_try1 != 0);
7100 txantenna = params->ibp_pri >> 2;
7101 if (txantenna == 0) /* XXX? */
7102 txantenna = sc->sc_txantenna;
7103 ctsduration = 0;
7104 if (flags & (HAL_TXDESC_CTSENA | HAL_TXDESC_RTSENA)) {
7105 cix = ath_tx_findrix(rt, params->ibp_ctsrate);
7106 ctsrate = rt->info[cix].rateCode;
7107 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) {
7108 ctsrate |= rt->info[cix].shortPreamble;
7109 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */
7110 ctsduration += rt->info[cix].spAckDuration;
7111 ctsduration += ath_hal_computetxtime(ah,
7112 rt, pktlen, rix, AH_TRUE);
7113 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */
7114 ctsduration += rt->info[rix].spAckDuration;
7115 } else {
7116 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */
7117 ctsduration += rt->info[cix].lpAckDuration;
7118 ctsduration += ath_hal_computetxtime(ah,
7119 rt, pktlen, rix, AH_FALSE);
7120 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */
7121 ctsduration += rt->info[rix].lpAckDuration;
7122 }
7123 ismrr = 0; /* XXX */
7124 } else
7125 ctsrate = 0;
7126 pri = params->ibp_pri & 3;
7127 /*
7128 * NB: we mark all packets as type PSPOLL so the h/w won't
7129 * set the sequence number, duration, etc.
7130 */
7131 atype = HAL_PKT_TYPE_PSPOLL;
7132
7133 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
7134 ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len,
7135 sc->sc_hwmap[rix].ieeerate, -1);
7136
7137 if (bpf_peers_present(ifp->if_bpf)) {
7138 u_int64_t tsf = ath_hal_gettsf64(ah);
7139
7140 sc->sc_tx_th.wt_tsf = htole64(tsf);
7141 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags;
7142 if (wh->i_fc[1] & IEEE80211_FC1_WEP)
7143 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
7144 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;
7145 sc->sc_tx_th.wt_txpower = ni->ni_txpower;
7146 sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
7147
7148 bpf_mtap2(ifp->if_bpf, &sc->sc_tx_th, sc->sc_tx_th_len, m0);
7149 }
7150
7151 /*
7152 * Formulate first tx descriptor with tx controls.
7153 */
7154 ds = bf->bf_desc;
7155 /* XXX check return value? */
7156 ath_hal_setuptxdesc(ah, ds
7157 , pktlen /* packet length */
7158 , hdrlen /* header length */
7159 , atype /* Atheros packet type */
7160 , params->ibp_power /* txpower */
7161 , txrate, try0 /* series 0 rate/tries */
7162 , keyix /* key cache index */
7163 , txantenna /* antenna mode */
7164 , flags /* flags */
7165 , ctsrate /* rts/cts rate */
7166 , ctsduration /* rts/cts duration */
7167 );
7168 bf->bf_txflags = flags;
7169
7170 if (ismrr) {
7171 rix = ath_tx_findrix(rt, params->ibp_rate1);
7172 rate1 = rt->info[rix].rateCode;
7173 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
7174 rate1 |= rt->info[rix].shortPreamble;
7175 if (params->ibp_try2) {
7176 rix = ath_tx_findrix(rt, params->ibp_rate2);
7177 rate2 = rt->info[rix].rateCode;
7178 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
7179 rate2 |= rt->info[rix].shortPreamble;
7180 } else
7181 rate2 = 0;
7182 if (params->ibp_try3) {
7183 rix = ath_tx_findrix(rt, params->ibp_rate3);
7184 rate3 = rt->info[rix].rateCode;
7185 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
7186 rate3 |= rt->info[rix].shortPreamble;
7187 } else
7188 rate3 = 0;
7189 ath_hal_setupxtxdesc(ah, ds
7190 , rate1, params->ibp_try1 /* series 1 */
7191 , rate2, params->ibp_try2 /* series 2 */
7192 , rate3, params->ibp_try3 /* series 3 */
7193 );
7194 }
7195
7196 /* NB: no buffered multicast in power save support */
7197 ath_tx_handoff(sc, sc->sc_ac2q[pri], bf);
7198 return 0;
7199}
7200
7201static int
7202ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
7203 const struct ieee80211_bpf_params *params)
7204{
7205 struct ieee80211com *ic = ni->ni_ic;
7206 struct ifnet *ifp = ic->ic_ifp;
7207 struct ath_softc *sc = ifp->if_softc;
7208 struct ath_buf *bf;
7209
7210 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) {
7211 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: discard frame, %s", __func__,
7212 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ?
7213 "!running" : "invalid");
7214 sc->sc_stats.ast_tx_raw_fail++;
7215 ieee80211_free_node(ni);
7216 m_freem(m);
7217 return ENETDOWN;
7218 }
7219 /*
7220 * Grab a TX buffer and associated resources.
7221 */
7222 bf = ath_getbuf(sc);
7223 if (bf == NULL) {
7224 /* NB: ath_getbuf handles stat+msg */
7225 ieee80211_free_node(ni);
7226 m_freem(m);
7227 return ENOBUFS;
7228 }
7229
7230 ifp->if_opackets++;
7231 sc->sc_stats.ast_tx_raw++;
7232
7233 if (params == NULL) {
7234 /*
7235 * Legacy path; interpret frame contents to decide
7236 * precisely how to send the frame.
7237 */
7238 if (ath_tx_start(sc, ni, bf, m))
7239 goto bad;
7240 } else {
7241 /*
7242 * Caller supplied explicit parameters to use in
7243 * sending the frame.
7244 */
7245 if (ath_tx_raw_start(sc, ni, bf, m, params))
7246 goto bad;
7247 }
7248 sc->sc_wd_timer = 5;
7249
7250 return 0;
7251bad:
7252 ifp->if_oerrors++;
7253 ATH_TXBUF_LOCK(sc);
7254 STAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
7255 ATH_TXBUF_UNLOCK(sc);
7256 ieee80211_free_node(ni);
7257 return EIO; /* XXX */
7258}
7259
7260/*
7261 * Announce various information on device/driver attach.
7262 */
7263static void
7264ath_announce(struct ath_softc *sc)
7265{
7266 struct ifnet *ifp = sc->sc_ifp;
7267 struct ath_hal *ah = sc->sc_ah;
7268
7269 if_printf(ifp, "AR%s mac %d.%d RF%s phy %d.%d\n",
7270 ath_hal_mac_name(ah), ah->ah_macVersion, ah->ah_macRev,
7271 ath_hal_rf_name(ah), ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf);
7272 if (bootverbose) {
7273 int i;
7274 for (i = 0; i <= WME_AC_VO; i++) {
7275 struct ath_txq *txq = sc->sc_ac2q[i];
7276 if_printf(ifp, "Use hw queue %u for %s traffic\n",
7277 txq->axq_qnum, ieee80211_wme_acnames[i]);
7278 }
7279 if_printf(ifp, "Use hw queue %u for CAB traffic\n",
7280 sc->sc_cabq->axq_qnum);
7281 if_printf(ifp, "Use hw queue %u for beacons\n", sc->sc_bhalq);
7282 }
7283 if (ath_rxbuf != ATH_RXBUF)
7284 if_printf(ifp, "using %u rx buffers\n", ath_rxbuf);
7285 if (ath_txbuf != ATH_TXBUF)
7286 if_printf(ifp, "using %u tx buffers\n", ath_txbuf);
7287}
7288
7288#ifdef ATH_SUPPORT_TDMA
7289#ifdef IEEE80211_SUPPORT_TDMA
7289static __inline uint32_t
7290ath_hal_getnexttbtt(struct ath_hal *ah)
7291{
7292#define AR_TIMER0 0x8028
7293 return OS_REG_READ(ah, AR_TIMER0);
7294}
7295
7296static __inline void
7297ath_hal_adjusttsf(struct ath_hal *ah, int32_t tsfdelta)
7298{
7299 /* XXX handle wrap/overflow */
7300 OS_REG_WRITE(ah, AR_TSF_L32, OS_REG_READ(ah, AR_TSF_L32) + tsfdelta);
7301}
7302
7303static void
7304ath_tdma_settimers(struct ath_softc *sc, u_int32_t nexttbtt, u_int32_t bintval)
7305{
7306 struct ath_hal *ah = sc->sc_ah;
7307 HAL_BEACON_TIMERS bt;
7308
7309 bt.bt_intval = bintval | HAL_BEACON_ENA;
7310 bt.bt_nexttbtt = nexttbtt;
7311 bt.bt_nextdba = (nexttbtt<<3) - sc->sc_tdmadbaprep;
7312 bt.bt_nextswba = (nexttbtt<<3) - sc->sc_tdmaswbaprep;
7313 bt.bt_nextatim = nexttbtt+1;
7314 ath_hal_beaconsettimers(ah, &bt);
7315}
7316
7317/*
7318 * Calculate the beacon interval. This is periodic in the
7319 * superframe for the bss. We assume each station is configured
7320 * identically wrt transmit rate so the guard time we calculate
7321 * above will be the same on all stations. Note we need to
7322 * factor in the xmit time because the hardware will schedule
7323 * a frame for transmit if the start of the frame is within
7324 * the burst time. When we get hardware that properly kills
7325 * frames in the PCU we can reduce/eliminate the guard time.
7326 *
7327 * Roundup to 1024 is so we have 1 TU buffer in the guard time
7328 * to deal with the granularity of the nexttbtt timer. 11n MAC's
7329 * with 1us timer granularity should allow us to reduce/eliminate
7330 * this.
7331 */
7332static void
7333ath_tdma_bintvalsetup(struct ath_softc *sc,
7334 const struct ieee80211_tdma_state *tdma)
7335{
7336 /* copy from vap state (XXX check all vaps have same value?) */
7337 sc->sc_tdmaslotlen = tdma->tdma_slotlen;
7338 sc->sc_tdmabintcnt = tdma->tdma_bintval;
7339
7340 sc->sc_tdmabintval = roundup((sc->sc_tdmaslotlen+sc->sc_tdmaguard) *
7341 tdma->tdma_slotcnt, 1024);
7342 sc->sc_tdmabintval >>= 10; /* TSF -> TU */
7343 if (sc->sc_tdmabintval & 1)
7344 sc->sc_tdmabintval++;
7345
7346 if (tdma->tdma_slot == 0) {
7347 /*
7348 * Only slot 0 beacons; other slots respond.
7349 */
7350 sc->sc_imask |= HAL_INT_SWBA;
7351 sc->sc_tdmaswba = 0; /* beacon immediately */
7352 } else {
7353 /* XXX all vaps must be slot 0 or slot !0 */
7354 sc->sc_imask &= ~HAL_INT_SWBA;
7355 }
7356}
7357
7358/*
7359 * Max 802.11 overhead. This assumes no 4-address frames and
7360 * the encapsulation done by ieee80211_encap (llc). We also
7361 * include potential crypto overhead.
7362 */
7363#define IEEE80211_MAXOVERHEAD \
7364 (sizeof(struct ieee80211_qosframe) \
7365 + sizeof(struct llc) \
7366 + IEEE80211_ADDR_LEN \
7367 + IEEE80211_WEP_IVLEN \
7368 + IEEE80211_WEP_KIDLEN \
7369 + IEEE80211_WEP_CRCLEN \
7370 + IEEE80211_WEP_MICLEN \
7371 + IEEE80211_CRC_LEN)
7372
7373/*
7374 * Setup initially for tdma operation. Start the beacon
7375 * timers and enable SWBA if we are slot 0. Otherwise
7376 * we wait for slot 0 to arrive so we can sync up before
7377 * starting to transmit.
7378 */
7379static void
7380ath_tdma_config(struct ath_softc *sc, struct ieee80211vap *vap)
7381{
7382 struct ath_hal *ah = sc->sc_ah;
7383 struct ifnet *ifp = sc->sc_ifp;
7384 struct ieee80211com *ic = ifp->if_l2com;
7385 const struct ieee80211_txparam *tp;
7386 const struct ieee80211_tdma_state *tdma = NULL;
7387 int rix;
7388
7389 if (vap == NULL) {
7390 vap = TAILQ_FIRST(&ic->ic_vaps); /* XXX */
7391 if (vap == NULL) {
7392 if_printf(ifp, "%s: no vaps?\n", __func__);
7393 return;
7394 }
7395 }
7396 tp = vap->iv_bss->ni_txparms;
7397 /*
7398 * Calculate the guard time for each slot. This is the
7399 * time to send a maximal-size frame according to the
7400 * fixed/lowest transmit rate. Note that the interface
7401 * mtu does not include the 802.11 overhead so we must
7402 * tack that on (ath_hal_computetxtime includes the
7403 * preamble and plcp in it's calculation).
7404 */
7405 tdma = vap->iv_tdma;
7406 if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE)
7407 rix = ath_tx_findrix(sc->sc_currates, tp->ucastrate);
7408 else
7409 rix = ath_tx_findrix(sc->sc_currates, tp->mcastrate);
7410 /* XXX short preamble assumed */
7411 sc->sc_tdmaguard = ath_hal_computetxtime(ah, sc->sc_currates,
7412 ifp->if_mtu + IEEE80211_MAXOVERHEAD, rix, AH_TRUE);
7413
7414 ath_hal_intrset(ah, 0);
7415
7416 ath_beaconq_config(sc); /* setup h/w beacon q */
7417 if (sc->sc_setcca)
7418 ath_hal_setcca(ah, AH_FALSE); /* disable CCA */
7419 ath_tdma_bintvalsetup(sc, tdma); /* calculate beacon interval */
7420 ath_tdma_settimers(sc, sc->sc_tdmabintval,
7421 sc->sc_tdmabintval | HAL_BEACON_RESET_TSF);
7422 sc->sc_syncbeacon = 0;
7423
7424 sc->sc_avgtsfdeltap = TDMA_DUMMY_MARKER;
7425 sc->sc_avgtsfdeltam = TDMA_DUMMY_MARKER;
7426
7427 ath_hal_intrset(ah, sc->sc_imask);
7428
7429 DPRINTF(sc, ATH_DEBUG_TDMA, "%s: slot %u len %uus cnt %u "
7430 "bsched %u guard %uus bintval %u TU dba prep %u\n", __func__,
7431 tdma->tdma_slot, tdma->tdma_slotlen, tdma->tdma_slotcnt,
7432 tdma->tdma_bintval, sc->sc_tdmaguard, sc->sc_tdmabintval,
7433 sc->sc_tdmadbaprep);
7434}
7435
7436/*
7437 * Update tdma operation. Called from the 802.11 layer
7438 * when a beacon is received from the TDMA station operating
7439 * in the slot immediately preceding us in the bss. Use
7440 * the rx timestamp for the beacon frame to update our
7441 * beacon timers so we follow their schedule. Note that
7442 * by using the rx timestamp we implicitly include the
7443 * propagation delay in our schedule.
7444 */
7445static void
7446ath_tdma_update(struct ieee80211_node *ni,
7447 const struct ieee80211_tdma_param *tdma, int changed)
7448{
7449#define TSF_TO_TU(_h,_l) \
7450 ((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10))
7451#define TU_TO_TSF(_tu) (((u_int64_t)(_tu)) << 10)
7452 struct ieee80211vap *vap = ni->ni_vap;
7453 struct ieee80211com *ic = ni->ni_ic;
7454 struct ath_softc *sc = ic->ic_ifp->if_softc;
7455 struct ath_hal *ah = sc->sc_ah;
7456 const HAL_RATE_TABLE *rt = sc->sc_currates;
7457 u_int64_t tsf, rstamp, nextslot;
7458 u_int32_t txtime, nextslottu, timer0;
7459 int32_t tudelta, tsfdelta;
7460 const struct ath_rx_status *rs;
7461 int rix;
7462
7463 sc->sc_stats.ast_tdma_update++;
7464
7465 /*
7466 * Check for and adopt configuration changes.
7467 */
7468 if (changed != 0) {
7469 const struct ieee80211_tdma_state *ts = vap->iv_tdma;
7470
7471 ath_tdma_bintvalsetup(sc, ts);
7472
7473 DPRINTF(sc, ATH_DEBUG_TDMA,
7474 "%s: adopt slot %u slotcnt %u slotlen %u us "
7475 "bintval %u TU\n", __func__,
7476 ts->tdma_slot, ts->tdma_slotcnt, ts->tdma_slotlen,
7477 sc->sc_tdmabintval);
7478
7479 ath_beaconq_config(sc);
7480 /* XXX right? */
7481 ath_hal_intrset(ah, sc->sc_imask);
7482 /* NB: beacon timers programmed below */
7483 }
7484
7485 /* extend rx timestamp to 64 bits */
7486 tsf = ath_hal_gettsf64(ah);
7487 rstamp = ath_extend_tsf(ni->ni_rstamp, tsf);
7488 /*
7489 * The rx timestamp is set by the hardware on completing
7490 * reception (at the point where the rx descriptor is DMA'd
7491 * to the host). To find the start of our next slot we
7492 * must adjust this time by the time required to send
7493 * the packet just received.
7494 */
7495 rs = sc->sc_tdmars;
7496 rix = rt->rateCodeToIndex[rs->rs_rate];
7497 txtime = ath_hal_computetxtime(ah, rt, rs->rs_datalen, rix,
7498 rt->info[rix].shortPreamble);
7499 /* NB: << 9 is to cvt to TU and /2 */
7500 nextslot = (rstamp - txtime) + (sc->sc_tdmabintval << 9);
7501 nextslottu = TSF_TO_TU(nextslot>>32, nextslot) & HAL_BEACON_PERIOD;
7502
7503 /*
7504 * TIMER0 is the h/w's idea of NextTBTT (in TU's). Convert
7505 * to usecs and calculate the difference between what the
7506 * other station thinks and what we have programmed. This
7507 * lets us figure how to adjust our timers to match. The
7508 * adjustments are done by pulling the TSF forward and possibly
7509 * rewriting the beacon timers.
7510 */
7511 timer0 = ath_hal_getnexttbtt(ah);
7512 tsfdelta = (int32_t)((nextslot % TU_TO_TSF(HAL_BEACON_PERIOD+1)) - TU_TO_TSF(timer0));
7513
7514 DPRINTF(sc, ATH_DEBUG_TDMA_TIMER,
7515 "tsfdelta %d avg +%d/-%d\n", tsfdelta,
7516 TDMA_AVG(sc->sc_avgtsfdeltap), TDMA_AVG(sc->sc_avgtsfdeltam));
7517
7518 if (tsfdelta < 0) {
7519 TDMA_SAMPLE(sc->sc_avgtsfdeltap, 0);
7520 TDMA_SAMPLE(sc->sc_avgtsfdeltam, -tsfdelta);
7521 tsfdelta = -tsfdelta % 1024;
7522 nextslottu++;
7523 } else if (tsfdelta > 0) {
7524 TDMA_SAMPLE(sc->sc_avgtsfdeltap, tsfdelta);
7525 TDMA_SAMPLE(sc->sc_avgtsfdeltam, 0);
7526 tsfdelta = 1024 - (tsfdelta % 1024);
7527 nextslottu++;
7528 } else {
7529 TDMA_SAMPLE(sc->sc_avgtsfdeltap, 0);
7530 TDMA_SAMPLE(sc->sc_avgtsfdeltam, 0);
7531 }
7532 tudelta = nextslottu - timer0;
7533
7534 /*
7535 * Copy sender's timetstamp into tdma ie so they can
7536 * calculate roundtrip time. We submit a beacon frame
7537 * below after any timer adjustment. The frame goes out
7538 * at the next TBTT so the sender can calculate the
7539 * roundtrip by inspecting the tdma ie in our beacon frame.
7540 *
7541 * NB: This tstamp is subtlely preserved when
7542 * IEEE80211_BEACON_TDMA is marked (e.g. when the
7543 * slot position changes) because ieee80211_add_tdma
7544 * skips over the data.
7545 */
7546 memcpy(ATH_VAP(vap)->av_boff.bo_tdma +
7547 __offsetof(struct ieee80211_tdma_param, tdma_tstamp),
7548 &ni->ni_tstamp.data, 8);
7549#if 0
7550 DPRINTF(sc, ATH_DEBUG_TDMA_TIMER,
7551 "tsf %llu nextslot %llu (%d, %d) nextslottu %u timer0 %u (%d)\n",
7552 (unsigned long long) tsf, (unsigned long long) nextslot,
7553 (int)(nextslot - tsf), tsfdelta,
7554 nextslottu, timer0, tudelta);
7555#endif
7556 /*
7557 * Adjust the beacon timers only when pulling them forward
7558 * or when going back by less than the beacon interval.
7559 * Negative jumps larger than the beacon interval seem to
7560 * cause the timers to stop and generally cause instability.
7561 * This basically filters out jumps due to missed beacons.
7562 */
7563 if (tudelta != 0 && (tudelta > 0 || -tudelta < sc->sc_tdmabintval)) {
7564 ath_tdma_settimers(sc, nextslottu, sc->sc_tdmabintval);
7565 sc->sc_stats.ast_tdma_timers++;
7566 }
7567 if (tsfdelta > 0) {
7568 ath_hal_adjusttsf(ah, tsfdelta);
7569 sc->sc_stats.ast_tdma_tsf++;
7570 }
7571 ath_tdma_beacon_send(sc, vap); /* prepare response */
7572#undef TU_TO_TSF
7573#undef TSF_TO_TU
7574}
7575
7576/*
7577 * Transmit a beacon frame at SWBA. Dynamic updates
7578 * to the frame contents are done as needed.
7579 */
7580static void
7581ath_tdma_beacon_send(struct ath_softc *sc, struct ieee80211vap *vap)
7582{
7583 struct ath_hal *ah = sc->sc_ah;
7584 struct ath_buf *bf;
7585 int otherant;
7586
7587 /*
7588 * Check if the previous beacon has gone out. If
7589 * not don't try to post another, skip this period
7590 * and wait for the next. Missed beacons indicate
7591 * a problem and should not occur. If we miss too
7592 * many consecutive beacons reset the device.
7593 */
7594 if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) {
7595 sc->sc_bmisscount++;
7596 DPRINTF(sc, ATH_DEBUG_BEACON,
7597 "%s: missed %u consecutive beacons\n",
7598 __func__, sc->sc_bmisscount);
7599 if (sc->sc_bmisscount >= ath_bstuck_threshold)
7600 taskqueue_enqueue(sc->sc_tq, &sc->sc_bstucktask);
7601 return;
7602 }
7603 if (sc->sc_bmisscount != 0) {
7604 DPRINTF(sc, ATH_DEBUG_BEACON,
7605 "%s: resume beacon xmit after %u misses\n",
7606 __func__, sc->sc_bmisscount);
7607 sc->sc_bmisscount = 0;
7608 }
7609
7610 /*
7611 * Check recent per-antenna transmit statistics and flip
7612 * the default antenna if noticeably more frames went out
7613 * on the non-default antenna.
7614 * XXX assumes 2 anntenae
7615 */
7616 if (!sc->sc_diversity) {
7617 otherant = sc->sc_defant & 1 ? 2 : 1;
7618 if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2)
7619 ath_setdefantenna(sc, otherant);
7620 sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0;
7621 }
7622
7623 bf = ath_beacon_generate(sc, vap);
7624 if (bf != NULL) {
7625 /*
7626 * Stop any current dma and put the new frame on the queue.
7627 * This should never fail since we check above that no frames
7628 * are still pending on the queue.
7629 */
7630 if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) {
7631 DPRINTF(sc, ATH_DEBUG_ANY,
7632 "%s: beacon queue %u did not stop?\n",
7633 __func__, sc->sc_bhalq);
7634 /* NB: the HAL still stops DMA, so proceed */
7635 }
7636 ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr);
7637 ath_hal_txstart(ah, sc->sc_bhalq);
7638
7639 sc->sc_stats.ast_be_xmit++; /* XXX per-vap? */
7640
7641 /*
7642 * Record local TSF for our last send for use
7643 * in arbitrating slot collisions.
7644 */
7645 vap->iv_bss->ni_tstamp.tsf = ath_hal_gettsf64(ah);
7646 }
7647}
7290static __inline uint32_t
7291ath_hal_getnexttbtt(struct ath_hal *ah)
7292{
7293#define AR_TIMER0 0x8028
7294 return OS_REG_READ(ah, AR_TIMER0);
7295}
7296
7297static __inline void
7298ath_hal_adjusttsf(struct ath_hal *ah, int32_t tsfdelta)
7299{
7300 /* XXX handle wrap/overflow */
7301 OS_REG_WRITE(ah, AR_TSF_L32, OS_REG_READ(ah, AR_TSF_L32) + tsfdelta);
7302}
7303
7304static void
7305ath_tdma_settimers(struct ath_softc *sc, u_int32_t nexttbtt, u_int32_t bintval)
7306{
7307 struct ath_hal *ah = sc->sc_ah;
7308 HAL_BEACON_TIMERS bt;
7309
7310 bt.bt_intval = bintval | HAL_BEACON_ENA;
7311 bt.bt_nexttbtt = nexttbtt;
7312 bt.bt_nextdba = (nexttbtt<<3) - sc->sc_tdmadbaprep;
7313 bt.bt_nextswba = (nexttbtt<<3) - sc->sc_tdmaswbaprep;
7314 bt.bt_nextatim = nexttbtt+1;
7315 ath_hal_beaconsettimers(ah, &bt);
7316}
7317
7318/*
7319 * Calculate the beacon interval. This is periodic in the
7320 * superframe for the bss. We assume each station is configured
7321 * identically wrt transmit rate so the guard time we calculate
7322 * above will be the same on all stations. Note we need to
7323 * factor in the xmit time because the hardware will schedule
7324 * a frame for transmit if the start of the frame is within
7325 * the burst time. When we get hardware that properly kills
7326 * frames in the PCU we can reduce/eliminate the guard time.
7327 *
7328 * Roundup to 1024 is so we have 1 TU buffer in the guard time
7329 * to deal with the granularity of the nexttbtt timer. 11n MAC's
7330 * with 1us timer granularity should allow us to reduce/eliminate
7331 * this.
7332 */
7333static void
7334ath_tdma_bintvalsetup(struct ath_softc *sc,
7335 const struct ieee80211_tdma_state *tdma)
7336{
7337 /* copy from vap state (XXX check all vaps have same value?) */
7338 sc->sc_tdmaslotlen = tdma->tdma_slotlen;
7339 sc->sc_tdmabintcnt = tdma->tdma_bintval;
7340
7341 sc->sc_tdmabintval = roundup((sc->sc_tdmaslotlen+sc->sc_tdmaguard) *
7342 tdma->tdma_slotcnt, 1024);
7343 sc->sc_tdmabintval >>= 10; /* TSF -> TU */
7344 if (sc->sc_tdmabintval & 1)
7345 sc->sc_tdmabintval++;
7346
7347 if (tdma->tdma_slot == 0) {
7348 /*
7349 * Only slot 0 beacons; other slots respond.
7350 */
7351 sc->sc_imask |= HAL_INT_SWBA;
7352 sc->sc_tdmaswba = 0; /* beacon immediately */
7353 } else {
7354 /* XXX all vaps must be slot 0 or slot !0 */
7355 sc->sc_imask &= ~HAL_INT_SWBA;
7356 }
7357}
7358
7359/*
7360 * Max 802.11 overhead. This assumes no 4-address frames and
7361 * the encapsulation done by ieee80211_encap (llc). We also
7362 * include potential crypto overhead.
7363 */
7364#define IEEE80211_MAXOVERHEAD \
7365 (sizeof(struct ieee80211_qosframe) \
7366 + sizeof(struct llc) \
7367 + IEEE80211_ADDR_LEN \
7368 + IEEE80211_WEP_IVLEN \
7369 + IEEE80211_WEP_KIDLEN \
7370 + IEEE80211_WEP_CRCLEN \
7371 + IEEE80211_WEP_MICLEN \
7372 + IEEE80211_CRC_LEN)
7373
7374/*
7375 * Setup initially for tdma operation. Start the beacon
7376 * timers and enable SWBA if we are slot 0. Otherwise
7377 * we wait for slot 0 to arrive so we can sync up before
7378 * starting to transmit.
7379 */
7380static void
7381ath_tdma_config(struct ath_softc *sc, struct ieee80211vap *vap)
7382{
7383 struct ath_hal *ah = sc->sc_ah;
7384 struct ifnet *ifp = sc->sc_ifp;
7385 struct ieee80211com *ic = ifp->if_l2com;
7386 const struct ieee80211_txparam *tp;
7387 const struct ieee80211_tdma_state *tdma = NULL;
7388 int rix;
7389
7390 if (vap == NULL) {
7391 vap = TAILQ_FIRST(&ic->ic_vaps); /* XXX */
7392 if (vap == NULL) {
7393 if_printf(ifp, "%s: no vaps?\n", __func__);
7394 return;
7395 }
7396 }
7397 tp = vap->iv_bss->ni_txparms;
7398 /*
7399 * Calculate the guard time for each slot. This is the
7400 * time to send a maximal-size frame according to the
7401 * fixed/lowest transmit rate. Note that the interface
7402 * mtu does not include the 802.11 overhead so we must
7403 * tack that on (ath_hal_computetxtime includes the
7404 * preamble and plcp in it's calculation).
7405 */
7406 tdma = vap->iv_tdma;
7407 if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE)
7408 rix = ath_tx_findrix(sc->sc_currates, tp->ucastrate);
7409 else
7410 rix = ath_tx_findrix(sc->sc_currates, tp->mcastrate);
7411 /* XXX short preamble assumed */
7412 sc->sc_tdmaguard = ath_hal_computetxtime(ah, sc->sc_currates,
7413 ifp->if_mtu + IEEE80211_MAXOVERHEAD, rix, AH_TRUE);
7414
7415 ath_hal_intrset(ah, 0);
7416
7417 ath_beaconq_config(sc); /* setup h/w beacon q */
7418 if (sc->sc_setcca)
7419 ath_hal_setcca(ah, AH_FALSE); /* disable CCA */
7420 ath_tdma_bintvalsetup(sc, tdma); /* calculate beacon interval */
7421 ath_tdma_settimers(sc, sc->sc_tdmabintval,
7422 sc->sc_tdmabintval | HAL_BEACON_RESET_TSF);
7423 sc->sc_syncbeacon = 0;
7424
7425 sc->sc_avgtsfdeltap = TDMA_DUMMY_MARKER;
7426 sc->sc_avgtsfdeltam = TDMA_DUMMY_MARKER;
7427
7428 ath_hal_intrset(ah, sc->sc_imask);
7429
7430 DPRINTF(sc, ATH_DEBUG_TDMA, "%s: slot %u len %uus cnt %u "
7431 "bsched %u guard %uus bintval %u TU dba prep %u\n", __func__,
7432 tdma->tdma_slot, tdma->tdma_slotlen, tdma->tdma_slotcnt,
7433 tdma->tdma_bintval, sc->sc_tdmaguard, sc->sc_tdmabintval,
7434 sc->sc_tdmadbaprep);
7435}
7436
7437/*
7438 * Update tdma operation. Called from the 802.11 layer
7439 * when a beacon is received from the TDMA station operating
7440 * in the slot immediately preceding us in the bss. Use
7441 * the rx timestamp for the beacon frame to update our
7442 * beacon timers so we follow their schedule. Note that
7443 * by using the rx timestamp we implicitly include the
7444 * propagation delay in our schedule.
7445 */
7446static void
7447ath_tdma_update(struct ieee80211_node *ni,
7448 const struct ieee80211_tdma_param *tdma, int changed)
7449{
7450#define TSF_TO_TU(_h,_l) \
7451 ((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10))
7452#define TU_TO_TSF(_tu) (((u_int64_t)(_tu)) << 10)
7453 struct ieee80211vap *vap = ni->ni_vap;
7454 struct ieee80211com *ic = ni->ni_ic;
7455 struct ath_softc *sc = ic->ic_ifp->if_softc;
7456 struct ath_hal *ah = sc->sc_ah;
7457 const HAL_RATE_TABLE *rt = sc->sc_currates;
7458 u_int64_t tsf, rstamp, nextslot;
7459 u_int32_t txtime, nextslottu, timer0;
7460 int32_t tudelta, tsfdelta;
7461 const struct ath_rx_status *rs;
7462 int rix;
7463
7464 sc->sc_stats.ast_tdma_update++;
7465
7466 /*
7467 * Check for and adopt configuration changes.
7468 */
7469 if (changed != 0) {
7470 const struct ieee80211_tdma_state *ts = vap->iv_tdma;
7471
7472 ath_tdma_bintvalsetup(sc, ts);
7473
7474 DPRINTF(sc, ATH_DEBUG_TDMA,
7475 "%s: adopt slot %u slotcnt %u slotlen %u us "
7476 "bintval %u TU\n", __func__,
7477 ts->tdma_slot, ts->tdma_slotcnt, ts->tdma_slotlen,
7478 sc->sc_tdmabintval);
7479
7480 ath_beaconq_config(sc);
7481 /* XXX right? */
7482 ath_hal_intrset(ah, sc->sc_imask);
7483 /* NB: beacon timers programmed below */
7484 }
7485
7486 /* extend rx timestamp to 64 bits */
7487 tsf = ath_hal_gettsf64(ah);
7488 rstamp = ath_extend_tsf(ni->ni_rstamp, tsf);
7489 /*
7490 * The rx timestamp is set by the hardware on completing
7491 * reception (at the point where the rx descriptor is DMA'd
7492 * to the host). To find the start of our next slot we
7493 * must adjust this time by the time required to send
7494 * the packet just received.
7495 */
7496 rs = sc->sc_tdmars;
7497 rix = rt->rateCodeToIndex[rs->rs_rate];
7498 txtime = ath_hal_computetxtime(ah, rt, rs->rs_datalen, rix,
7499 rt->info[rix].shortPreamble);
7500 /* NB: << 9 is to cvt to TU and /2 */
7501 nextslot = (rstamp - txtime) + (sc->sc_tdmabintval << 9);
7502 nextslottu = TSF_TO_TU(nextslot>>32, nextslot) & HAL_BEACON_PERIOD;
7503
7504 /*
7505 * TIMER0 is the h/w's idea of NextTBTT (in TU's). Convert
7506 * to usecs and calculate the difference between what the
7507 * other station thinks and what we have programmed. This
7508 * lets us figure how to adjust our timers to match. The
7509 * adjustments are done by pulling the TSF forward and possibly
7510 * rewriting the beacon timers.
7511 */
7512 timer0 = ath_hal_getnexttbtt(ah);
7513 tsfdelta = (int32_t)((nextslot % TU_TO_TSF(HAL_BEACON_PERIOD+1)) - TU_TO_TSF(timer0));
7514
7515 DPRINTF(sc, ATH_DEBUG_TDMA_TIMER,
7516 "tsfdelta %d avg +%d/-%d\n", tsfdelta,
7517 TDMA_AVG(sc->sc_avgtsfdeltap), TDMA_AVG(sc->sc_avgtsfdeltam));
7518
7519 if (tsfdelta < 0) {
7520 TDMA_SAMPLE(sc->sc_avgtsfdeltap, 0);
7521 TDMA_SAMPLE(sc->sc_avgtsfdeltam, -tsfdelta);
7522 tsfdelta = -tsfdelta % 1024;
7523 nextslottu++;
7524 } else if (tsfdelta > 0) {
7525 TDMA_SAMPLE(sc->sc_avgtsfdeltap, tsfdelta);
7526 TDMA_SAMPLE(sc->sc_avgtsfdeltam, 0);
7527 tsfdelta = 1024 - (tsfdelta % 1024);
7528 nextslottu++;
7529 } else {
7530 TDMA_SAMPLE(sc->sc_avgtsfdeltap, 0);
7531 TDMA_SAMPLE(sc->sc_avgtsfdeltam, 0);
7532 }
7533 tudelta = nextslottu - timer0;
7534
7535 /*
7536 * Copy sender's timetstamp into tdma ie so they can
7537 * calculate roundtrip time. We submit a beacon frame
7538 * below after any timer adjustment. The frame goes out
7539 * at the next TBTT so the sender can calculate the
7540 * roundtrip by inspecting the tdma ie in our beacon frame.
7541 *
7542 * NB: This tstamp is subtlely preserved when
7543 * IEEE80211_BEACON_TDMA is marked (e.g. when the
7544 * slot position changes) because ieee80211_add_tdma
7545 * skips over the data.
7546 */
7547 memcpy(ATH_VAP(vap)->av_boff.bo_tdma +
7548 __offsetof(struct ieee80211_tdma_param, tdma_tstamp),
7549 &ni->ni_tstamp.data, 8);
7550#if 0
7551 DPRINTF(sc, ATH_DEBUG_TDMA_TIMER,
7552 "tsf %llu nextslot %llu (%d, %d) nextslottu %u timer0 %u (%d)\n",
7553 (unsigned long long) tsf, (unsigned long long) nextslot,
7554 (int)(nextslot - tsf), tsfdelta,
7555 nextslottu, timer0, tudelta);
7556#endif
7557 /*
7558 * Adjust the beacon timers only when pulling them forward
7559 * or when going back by less than the beacon interval.
7560 * Negative jumps larger than the beacon interval seem to
7561 * cause the timers to stop and generally cause instability.
7562 * This basically filters out jumps due to missed beacons.
7563 */
7564 if (tudelta != 0 && (tudelta > 0 || -tudelta < sc->sc_tdmabintval)) {
7565 ath_tdma_settimers(sc, nextslottu, sc->sc_tdmabintval);
7566 sc->sc_stats.ast_tdma_timers++;
7567 }
7568 if (tsfdelta > 0) {
7569 ath_hal_adjusttsf(ah, tsfdelta);
7570 sc->sc_stats.ast_tdma_tsf++;
7571 }
7572 ath_tdma_beacon_send(sc, vap); /* prepare response */
7573#undef TU_TO_TSF
7574#undef TSF_TO_TU
7575}
7576
7577/*
7578 * Transmit a beacon frame at SWBA. Dynamic updates
7579 * to the frame contents are done as needed.
7580 */
7581static void
7582ath_tdma_beacon_send(struct ath_softc *sc, struct ieee80211vap *vap)
7583{
7584 struct ath_hal *ah = sc->sc_ah;
7585 struct ath_buf *bf;
7586 int otherant;
7587
7588 /*
7589 * Check if the previous beacon has gone out. If
7590 * not don't try to post another, skip this period
7591 * and wait for the next. Missed beacons indicate
7592 * a problem and should not occur. If we miss too
7593 * many consecutive beacons reset the device.
7594 */
7595 if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) {
7596 sc->sc_bmisscount++;
7597 DPRINTF(sc, ATH_DEBUG_BEACON,
7598 "%s: missed %u consecutive beacons\n",
7599 __func__, sc->sc_bmisscount);
7600 if (sc->sc_bmisscount >= ath_bstuck_threshold)
7601 taskqueue_enqueue(sc->sc_tq, &sc->sc_bstucktask);
7602 return;
7603 }
7604 if (sc->sc_bmisscount != 0) {
7605 DPRINTF(sc, ATH_DEBUG_BEACON,
7606 "%s: resume beacon xmit after %u misses\n",
7607 __func__, sc->sc_bmisscount);
7608 sc->sc_bmisscount = 0;
7609 }
7610
7611 /*
7612 * Check recent per-antenna transmit statistics and flip
7613 * the default antenna if noticeably more frames went out
7614 * on the non-default antenna.
7615 * XXX assumes 2 anntenae
7616 */
7617 if (!sc->sc_diversity) {
7618 otherant = sc->sc_defant & 1 ? 2 : 1;
7619 if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2)
7620 ath_setdefantenna(sc, otherant);
7621 sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0;
7622 }
7623
7624 bf = ath_beacon_generate(sc, vap);
7625 if (bf != NULL) {
7626 /*
7627 * Stop any current dma and put the new frame on the queue.
7628 * This should never fail since we check above that no frames
7629 * are still pending on the queue.
7630 */
7631 if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) {
7632 DPRINTF(sc, ATH_DEBUG_ANY,
7633 "%s: beacon queue %u did not stop?\n",
7634 __func__, sc->sc_bhalq);
7635 /* NB: the HAL still stops DMA, so proceed */
7636 }
7637 ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr);
7638 ath_hal_txstart(ah, sc->sc_bhalq);
7639
7640 sc->sc_stats.ast_be_xmit++; /* XXX per-vap? */
7641
7642 /*
7643 * Record local TSF for our last send for use
7644 * in arbitrating slot collisions.
7645 */
7646 vap->iv_bss->ni_tstamp.tsf = ath_hal_gettsf64(ah);
7647 }
7648}
7648#endif /* ATH_SUPPORT_TDMA */
7649#endif /* IEEE80211_SUPPORT_TDMA */