Deleted Added
full compact
if_iwn.c (220662) if_iwn.c (220667)
1/*-
2 * Copyright (c) 2007-2009
3 * Damien Bergamini <damien.bergamini@free.fr>
4 * Copyright (c) 2008
5 * Benjamin Close <benjsc@FreeBSD.org>
6 * Copyright (c) 2008 Sam Leffler, Errno Consulting
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21/*
22 * Driver for Intel WiFi Link 4965 and 1000/5000/6000 Series 802.11 network
23 * adapters.
24 */
25
26#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2007-2009
3 * Damien Bergamini <damien.bergamini@free.fr>
4 * Copyright (c) 2008
5 * Benjamin Close <benjsc@FreeBSD.org>
6 * Copyright (c) 2008 Sam Leffler, Errno Consulting
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21/*
22 * Driver for Intel WiFi Link 4965 and 1000/5000/6000 Series 802.11 network
23 * adapters.
24 */
25
26#include <sys/cdefs.h>
27__FBSDID("$FreeBSD: head/sys/dev/iwn/if_iwn.c 220662 2011-04-15 17:10:52Z bschmidt $");
27__FBSDID("$FreeBSD: head/sys/dev/iwn/if_iwn.c 220667 2011-04-15 20:17:52Z bschmidt $");
28
29#include <sys/param.h>
30#include <sys/sockio.h>
31#include <sys/sysctl.h>
32#include <sys/mbuf.h>
33#include <sys/kernel.h>
34#include <sys/socket.h>
35#include <sys/systm.h>
36#include <sys/malloc.h>
37#include <sys/bus.h>
38#include <sys/rman.h>
39#include <sys/endian.h>
40#include <sys/firmware.h>
41#include <sys/limits.h>
42#include <sys/module.h>
43#include <sys/queue.h>
44#include <sys/taskqueue.h>
45
46#include <machine/bus.h>
47#include <machine/resource.h>
48#include <machine/clock.h>
49
50#include <dev/pci/pcireg.h>
51#include <dev/pci/pcivar.h>
52
53#include <net/bpf.h>
54#include <net/if.h>
55#include <net/if_arp.h>
56#include <net/ethernet.h>
57#include <net/if_dl.h>
58#include <net/if_media.h>
59#include <net/if_types.h>
60
61#include <netinet/in.h>
62#include <netinet/in_systm.h>
63#include <netinet/in_var.h>
64#include <netinet/if_ether.h>
65#include <netinet/ip.h>
66
67#include <net80211/ieee80211_var.h>
68#include <net80211/ieee80211_radiotap.h>
69#include <net80211/ieee80211_regdomain.h>
70#include <net80211/ieee80211_ratectl.h>
71
72#include <dev/iwn/if_iwnreg.h>
73#include <dev/iwn/if_iwnvar.h>
74
75static int iwn_probe(device_t);
76static int iwn_attach(device_t);
77static const struct iwn_hal *iwn_hal_attach(struct iwn_softc *);
78static void iwn_radiotap_attach(struct iwn_softc *);
79static struct ieee80211vap *iwn_vap_create(struct ieee80211com *,
80 const char name[IFNAMSIZ], int unit, int opmode,
81 int flags, const uint8_t bssid[IEEE80211_ADDR_LEN],
82 const uint8_t mac[IEEE80211_ADDR_LEN]);
83static void iwn_vap_delete(struct ieee80211vap *);
84static int iwn_detach(device_t);
85static int iwn_nic_lock(struct iwn_softc *);
86static int iwn_eeprom_lock(struct iwn_softc *);
87static int iwn_init_otprom(struct iwn_softc *);
88static int iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int);
89static void iwn_dma_map_addr(void *, bus_dma_segment_t *, int, int);
90static int iwn_dma_contig_alloc(struct iwn_softc *, struct iwn_dma_info *,
91 void **, bus_size_t, bus_size_t, int);
92static void iwn_dma_contig_free(struct iwn_dma_info *);
93static int iwn_alloc_sched(struct iwn_softc *);
94static void iwn_free_sched(struct iwn_softc *);
95static int iwn_alloc_kw(struct iwn_softc *);
96static void iwn_free_kw(struct iwn_softc *);
97static int iwn_alloc_ict(struct iwn_softc *);
98static void iwn_free_ict(struct iwn_softc *);
99static int iwn_alloc_fwmem(struct iwn_softc *);
100static void iwn_free_fwmem(struct iwn_softc *);
101static int iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
102static void iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
103static void iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
104static int iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *,
105 int);
106static void iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *);
107static void iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *);
108static void iwn5000_ict_reset(struct iwn_softc *);
109static int iwn_read_eeprom(struct iwn_softc *,
110 uint8_t macaddr[IEEE80211_ADDR_LEN]);
111static void iwn4965_read_eeprom(struct iwn_softc *);
112static void iwn4965_print_power_group(struct iwn_softc *, int);
113static void iwn5000_read_eeprom(struct iwn_softc *);
114static uint32_t iwn_eeprom_channel_flags(struct iwn_eeprom_chan *);
115static void iwn_read_eeprom_band(struct iwn_softc *, int);
116#if 0 /* HT */
117static void iwn_read_eeprom_ht40(struct iwn_softc *, int);
118#endif
119static void iwn_read_eeprom_channels(struct iwn_softc *, int,
120 uint32_t);
121static void iwn_read_eeprom_enhinfo(struct iwn_softc *);
122static struct ieee80211_node *iwn_node_alloc(struct ieee80211vap *,
123 const uint8_t mac[IEEE80211_ADDR_LEN]);
124static int iwn_media_change(struct ifnet *);
125static int iwn_newstate(struct ieee80211vap *, enum ieee80211_state, int);
28
29#include <sys/param.h>
30#include <sys/sockio.h>
31#include <sys/sysctl.h>
32#include <sys/mbuf.h>
33#include <sys/kernel.h>
34#include <sys/socket.h>
35#include <sys/systm.h>
36#include <sys/malloc.h>
37#include <sys/bus.h>
38#include <sys/rman.h>
39#include <sys/endian.h>
40#include <sys/firmware.h>
41#include <sys/limits.h>
42#include <sys/module.h>
43#include <sys/queue.h>
44#include <sys/taskqueue.h>
45
46#include <machine/bus.h>
47#include <machine/resource.h>
48#include <machine/clock.h>
49
50#include <dev/pci/pcireg.h>
51#include <dev/pci/pcivar.h>
52
53#include <net/bpf.h>
54#include <net/if.h>
55#include <net/if_arp.h>
56#include <net/ethernet.h>
57#include <net/if_dl.h>
58#include <net/if_media.h>
59#include <net/if_types.h>
60
61#include <netinet/in.h>
62#include <netinet/in_systm.h>
63#include <netinet/in_var.h>
64#include <netinet/if_ether.h>
65#include <netinet/ip.h>
66
67#include <net80211/ieee80211_var.h>
68#include <net80211/ieee80211_radiotap.h>
69#include <net80211/ieee80211_regdomain.h>
70#include <net80211/ieee80211_ratectl.h>
71
72#include <dev/iwn/if_iwnreg.h>
73#include <dev/iwn/if_iwnvar.h>
74
75static int iwn_probe(device_t);
76static int iwn_attach(device_t);
77static const struct iwn_hal *iwn_hal_attach(struct iwn_softc *);
78static void iwn_radiotap_attach(struct iwn_softc *);
79static struct ieee80211vap *iwn_vap_create(struct ieee80211com *,
80 const char name[IFNAMSIZ], int unit, int opmode,
81 int flags, const uint8_t bssid[IEEE80211_ADDR_LEN],
82 const uint8_t mac[IEEE80211_ADDR_LEN]);
83static void iwn_vap_delete(struct ieee80211vap *);
84static int iwn_detach(device_t);
85static int iwn_nic_lock(struct iwn_softc *);
86static int iwn_eeprom_lock(struct iwn_softc *);
87static int iwn_init_otprom(struct iwn_softc *);
88static int iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int);
89static void iwn_dma_map_addr(void *, bus_dma_segment_t *, int, int);
90static int iwn_dma_contig_alloc(struct iwn_softc *, struct iwn_dma_info *,
91 void **, bus_size_t, bus_size_t, int);
92static void iwn_dma_contig_free(struct iwn_dma_info *);
93static int iwn_alloc_sched(struct iwn_softc *);
94static void iwn_free_sched(struct iwn_softc *);
95static int iwn_alloc_kw(struct iwn_softc *);
96static void iwn_free_kw(struct iwn_softc *);
97static int iwn_alloc_ict(struct iwn_softc *);
98static void iwn_free_ict(struct iwn_softc *);
99static int iwn_alloc_fwmem(struct iwn_softc *);
100static void iwn_free_fwmem(struct iwn_softc *);
101static int iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
102static void iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
103static void iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
104static int iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *,
105 int);
106static void iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *);
107static void iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *);
108static void iwn5000_ict_reset(struct iwn_softc *);
109static int iwn_read_eeprom(struct iwn_softc *,
110 uint8_t macaddr[IEEE80211_ADDR_LEN]);
111static void iwn4965_read_eeprom(struct iwn_softc *);
112static void iwn4965_print_power_group(struct iwn_softc *, int);
113static void iwn5000_read_eeprom(struct iwn_softc *);
114static uint32_t iwn_eeprom_channel_flags(struct iwn_eeprom_chan *);
115static void iwn_read_eeprom_band(struct iwn_softc *, int);
116#if 0 /* HT */
117static void iwn_read_eeprom_ht40(struct iwn_softc *, int);
118#endif
119static void iwn_read_eeprom_channels(struct iwn_softc *, int,
120 uint32_t);
121static void iwn_read_eeprom_enhinfo(struct iwn_softc *);
122static struct ieee80211_node *iwn_node_alloc(struct ieee80211vap *,
123 const uint8_t mac[IEEE80211_ADDR_LEN]);
124static int iwn_media_change(struct ifnet *);
125static int iwn_newstate(struct ieee80211vap *, enum ieee80211_state, int);
126static void iwn_calib_timeout(void *);
126static void iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *,
127 struct iwn_rx_data *);
127static void iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *,
128 struct iwn_rx_data *);
128static void iwn_timer_timeout(void *);
129static void iwn_calib_reset(struct iwn_softc *);
130static void iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *,
131 struct iwn_rx_data *);
132#if 0 /* HT */
133static void iwn_rx_compressed_ba(struct iwn_softc *, struct iwn_rx_desc *,
134 struct iwn_rx_data *);
135#endif
136static void iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *,
137 struct iwn_rx_data *);
138static void iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
139 struct iwn_rx_data *);
140static void iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
141 struct iwn_rx_data *);
142static void iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *, int,
143 uint8_t);
144static void iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *);
145static void iwn_notif_intr(struct iwn_softc *);
146static void iwn_wakeup_intr(struct iwn_softc *);
147static void iwn_rftoggle_intr(struct iwn_softc *);
148static void iwn_fatal_intr(struct iwn_softc *);
149static void iwn_intr(void *);
150static void iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t,
151 uint16_t);
152static void iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t,
153 uint16_t);
154#ifdef notyet
155static void iwn5000_reset_sched(struct iwn_softc *, int, int);
156#endif
157static uint8_t iwn_plcp_signal(int);
158static int iwn_tx_data(struct iwn_softc *, struct mbuf *,
159 struct ieee80211_node *, struct iwn_tx_ring *);
160static int iwn_raw_xmit(struct ieee80211_node *, struct mbuf *,
161 const struct ieee80211_bpf_params *);
162static void iwn_start(struct ifnet *);
163static void iwn_start_locked(struct ifnet *);
129static void iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *,
130 struct iwn_rx_data *);
131#if 0 /* HT */
132static void iwn_rx_compressed_ba(struct iwn_softc *, struct iwn_rx_desc *,
133 struct iwn_rx_data *);
134#endif
135static void iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *,
136 struct iwn_rx_data *);
137static void iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
138 struct iwn_rx_data *);
139static void iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
140 struct iwn_rx_data *);
141static void iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *, int,
142 uint8_t);
143static void iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *);
144static void iwn_notif_intr(struct iwn_softc *);
145static void iwn_wakeup_intr(struct iwn_softc *);
146static void iwn_rftoggle_intr(struct iwn_softc *);
147static void iwn_fatal_intr(struct iwn_softc *);
148static void iwn_intr(void *);
149static void iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t,
150 uint16_t);
151static void iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t,
152 uint16_t);
153#ifdef notyet
154static void iwn5000_reset_sched(struct iwn_softc *, int, int);
155#endif
156static uint8_t iwn_plcp_signal(int);
157static int iwn_tx_data(struct iwn_softc *, struct mbuf *,
158 struct ieee80211_node *, struct iwn_tx_ring *);
159static int iwn_raw_xmit(struct ieee80211_node *, struct mbuf *,
160 const struct ieee80211_bpf_params *);
161static void iwn_start(struct ifnet *);
162static void iwn_start_locked(struct ifnet *);
164static void iwn_watchdog(struct iwn_softc *sc);
163static void iwn_watchdog(void *);
165static int iwn_ioctl(struct ifnet *, u_long, caddr_t);
166static int iwn_cmd(struct iwn_softc *, int, const void *, int, int);
167static int iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *,
168 int);
169static int iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *,
170 int);
171static int iwn_set_link_quality(struct iwn_softc *, uint8_t, int);
172static int iwn_add_broadcast_node(struct iwn_softc *, int);
173static int iwn_wme_update(struct ieee80211com *);
174static void iwn_update_mcast(struct ifnet *);
175static void iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t);
176static int iwn_set_critical_temp(struct iwn_softc *);
177static int iwn_set_timing(struct iwn_softc *, struct ieee80211_node *);
178static void iwn4965_power_calibration(struct iwn_softc *, int);
179static int iwn4965_set_txpower(struct iwn_softc *,
180 struct ieee80211_channel *, int);
181static int iwn5000_set_txpower(struct iwn_softc *,
182 struct ieee80211_channel *, int);
183static int iwn4965_get_rssi(struct iwn_softc *, struct iwn_rx_stat *);
184static int iwn5000_get_rssi(struct iwn_softc *, struct iwn_rx_stat *);
185static int iwn_get_noise(const struct iwn_rx_general_stats *);
186static int iwn4965_get_temperature(struct iwn_softc *);
187static int iwn5000_get_temperature(struct iwn_softc *);
188static int iwn_init_sensitivity(struct iwn_softc *);
189static void iwn_collect_noise(struct iwn_softc *,
190 const struct iwn_rx_general_stats *);
191static int iwn4965_init_gains(struct iwn_softc *);
192static int iwn5000_init_gains(struct iwn_softc *);
193static int iwn4965_set_gains(struct iwn_softc *);
194static int iwn5000_set_gains(struct iwn_softc *);
195static void iwn_tune_sensitivity(struct iwn_softc *,
196 const struct iwn_rx_stats *);
197static int iwn_send_sensitivity(struct iwn_softc *);
198static int iwn_set_pslevel(struct iwn_softc *, int, int, int);
199static int iwn_send_btcoex(struct iwn_softc *);
200static int iwn_config(struct iwn_softc *);
201static uint8_t *ieee80211_add_ssid(uint8_t *, const uint8_t *, u_int);
202static int iwn_scan(struct iwn_softc *);
203static int iwn_auth(struct iwn_softc *, struct ieee80211vap *vap);
204static int iwn_run(struct iwn_softc *, struct ieee80211vap *vap);
205#if 0 /* HT */
206static int iwn_ampdu_rx_start(struct ieee80211com *,
207 struct ieee80211_node *, uint8_t);
208static void iwn_ampdu_rx_stop(struct ieee80211com *,
209 struct ieee80211_node *, uint8_t);
210static int iwn_ampdu_tx_start(struct ieee80211com *,
211 struct ieee80211_node *, uint8_t);
212static void iwn_ampdu_tx_stop(struct ieee80211com *,
213 struct ieee80211_node *, uint8_t);
214static void iwn4965_ampdu_tx_start(struct iwn_softc *,
215 struct ieee80211_node *, uint8_t, uint16_t);
216static void iwn4965_ampdu_tx_stop(struct iwn_softc *, uint8_t, uint16_t);
217static void iwn5000_ampdu_tx_start(struct iwn_softc *,
218 struct ieee80211_node *, uint8_t, uint16_t);
219static void iwn5000_ampdu_tx_stop(struct iwn_softc *, uint8_t, uint16_t);
220#endif
221static int iwn5000_send_calib_results(struct iwn_softc *);
222static int iwn5000_save_calib_result(struct iwn_softc *,
223 struct iwn_phy_calib *, int, int);
224static void iwn5000_free_calib_results(struct iwn_softc *);
225static int iwn5000_chrystal_calib(struct iwn_softc *);
226static int iwn5000_send_calib_query(struct iwn_softc *, uint32_t);
227static int iwn5000_rx_calib_result(struct iwn_softc *,
228 struct iwn_rx_desc *, struct iwn_rx_data *);
229static int iwn5000_send_wimax_coex(struct iwn_softc *);
230static int iwn4965_post_alive(struct iwn_softc *);
231static int iwn5000_post_alive(struct iwn_softc *);
232static int iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *,
233 int);
234static int iwn4965_load_firmware(struct iwn_softc *);
235static int iwn5000_load_firmware_section(struct iwn_softc *, uint32_t,
236 const uint8_t *, int);
237static int iwn5000_load_firmware(struct iwn_softc *);
238static int iwn_read_firmware_leg(struct iwn_softc *,
239 struct iwn_fw_info *);
240static int iwn_read_firmware_tlv(struct iwn_softc *,
241 struct iwn_fw_info *, uint16_t);
242static int iwn_read_firmware(struct iwn_softc *);
243static int iwn_clock_wait(struct iwn_softc *);
244static int iwn_apm_init(struct iwn_softc *);
245static void iwn_apm_stop_master(struct iwn_softc *);
246static void iwn_apm_stop(struct iwn_softc *);
247static int iwn4965_nic_config(struct iwn_softc *);
248static int iwn5000_nic_config(struct iwn_softc *);
249static int iwn_hw_prepare(struct iwn_softc *);
250static int iwn_hw_init(struct iwn_softc *);
251static void iwn_hw_stop(struct iwn_softc *);
252static void iwn_init_locked(struct iwn_softc *);
253static void iwn_init(void *);
254static void iwn_stop_locked(struct iwn_softc *);
255static void iwn_stop(struct iwn_softc *);
256static void iwn_scan_start(struct ieee80211com *);
257static void iwn_scan_end(struct ieee80211com *);
258static void iwn_set_channel(struct ieee80211com *);
259static void iwn_scan_curchan(struct ieee80211_scan_state *, unsigned long);
260static void iwn_scan_mindwell(struct ieee80211_scan_state *);
261static struct iwn_eeprom_chan *iwn_find_eeprom_channel(struct iwn_softc *,
262 struct ieee80211_channel *);
263static int iwn_setregdomain(struct ieee80211com *,
264 struct ieee80211_regdomain *, int,
265 struct ieee80211_channel []);
266static void iwn_hw_reset(void *, int);
267static void iwn_radio_on(void *, int);
268static void iwn_radio_off(void *, int);
269static void iwn_sysctlattach(struct iwn_softc *);
270static int iwn_shutdown(device_t);
271static int iwn_suspend(device_t);
272static int iwn_resume(device_t);
273
274#define IWN_DEBUG
275#ifdef IWN_DEBUG
276enum {
277 IWN_DEBUG_XMIT = 0x00000001, /* basic xmit operation */
278 IWN_DEBUG_RECV = 0x00000002, /* basic recv operation */
279 IWN_DEBUG_STATE = 0x00000004, /* 802.11 state transitions */
280 IWN_DEBUG_TXPOW = 0x00000008, /* tx power processing */
281 IWN_DEBUG_RESET = 0x00000010, /* reset processing */
282 IWN_DEBUG_OPS = 0x00000020, /* iwn_ops processing */
283 IWN_DEBUG_BEACON = 0x00000040, /* beacon handling */
284 IWN_DEBUG_WATCHDOG = 0x00000080, /* watchdog timeout */
285 IWN_DEBUG_INTR = 0x00000100, /* ISR */
286 IWN_DEBUG_CALIBRATE = 0x00000200, /* periodic calibration */
287 IWN_DEBUG_NODE = 0x00000400, /* node management */
288 IWN_DEBUG_LED = 0x00000800, /* led management */
289 IWN_DEBUG_CMD = 0x00001000, /* cmd submission */
290 IWN_DEBUG_FATAL = 0x80000000, /* fatal errors */
291 IWN_DEBUG_ANY = 0xffffffff
292};
293
294#define DPRINTF(sc, m, fmt, ...) do { \
295 if (sc->sc_debug & (m)) \
296 printf(fmt, __VA_ARGS__); \
297} while (0)
298
299static const char *iwn_intr_str(uint8_t);
300#else
301#define DPRINTF(sc, m, fmt, ...) do { (void) sc; } while (0)
302#endif
303
304struct iwn_ident {
305 uint16_t vendor;
306 uint16_t device;
307 const char *name;
308};
309
310static const struct iwn_ident iwn_ident_table [] = {
311 { 0x8086, 0x4229, "Intel(R) PRO/Wireless 4965BGN" },
312 { 0x8086, 0x422D, "Intel(R) PRO/Wireless 4965BGN" },
313 { 0x8086, 0x4230, "Intel(R) PRO/Wireless 4965BGN" },
314 { 0x8086, 0x4233, "Intel(R) PRO/Wireless 4965BGN" },
315 { 0x8086, 0x4232, "Intel(R) PRO/Wireless 5100" },
316 { 0x8086, 0x4237, "Intel(R) PRO/Wireless 5100" },
317 { 0x8086, 0x423C, "Intel(R) PRO/Wireless 5150" },
318 { 0x8086, 0x423D, "Intel(R) PRO/Wireless 5150" },
319 { 0x8086, 0x4235, "Intel(R) PRO/Wireless 5300" },
320 { 0x8086, 0x4236, "Intel(R) PRO/Wireless 5300" },
321 { 0x8086, 0x423A, "Intel(R) PRO/Wireless 5350" },
322 { 0x8086, 0x423B, "Intel(R) PRO/Wireless 5350" },
323 { 0x8086, 0x0083, "Intel(R) PRO/Wireless 1000" },
324 { 0x8086, 0x0084, "Intel(R) PRO/Wireless 1000" },
325 { 0x8086, 0x008D, "Intel(R) PRO/Wireless 6000" },
326 { 0x8086, 0x008E, "Intel(R) PRO/Wireless 6000" },
327 { 0x8086, 0x4238, "Intel(R) PRO/Wireless 6000" },
328 { 0x8086, 0x4239, "Intel(R) PRO/Wireless 6000" },
329 { 0x8086, 0x422B, "Intel(R) PRO/Wireless 6000" },
330 { 0x8086, 0x422C, "Intel(R) PRO/Wireless 6000" },
331 { 0x8086, 0x0087, "Intel(R) PRO/Wireless 6250" },
332 { 0x8086, 0x0089, "Intel(R) PRO/Wireless 6250" },
333 { 0x8086, 0x0082, "Intel(R) PRO/Wireless 6205a" },
334 { 0x8086, 0x0085, "Intel(R) PRO/Wireless 6205a" },
335#ifdef notyet
336 { 0x8086, 0x008a, "Intel(R) PRO/Wireless 6205b" },
337 { 0x8086, 0x008b, "Intel(R) PRO/Wireless 6205b" },
338 { 0x8086, 0x008f, "Intel(R) PRO/Wireless 6205b" },
339 { 0x8086, 0x0090, "Intel(R) PRO/Wireless 6205b" },
340 { 0x8086, 0x0091, "Intel(R) PRO/Wireless 6205b" },
341#endif
342 { 0, 0, NULL }
343};
344
345static const struct iwn_hal iwn4965_hal = {
346 iwn4965_load_firmware,
347 iwn4965_read_eeprom,
348 iwn4965_post_alive,
349 iwn4965_nic_config,
350 iwn4965_update_sched,
351 iwn4965_get_temperature,
352 iwn4965_get_rssi,
353 iwn4965_set_txpower,
354 iwn4965_init_gains,
355 iwn4965_set_gains,
356 iwn4965_add_node,
357 iwn4965_tx_done,
358#if 0 /* HT */
359 iwn4965_ampdu_tx_start,
360 iwn4965_ampdu_tx_stop,
361#endif
362 IWN4965_NTXQUEUES,
363 IWN4965_NDMACHNLS,
364 IWN4965_ID_BROADCAST,
365 IWN4965_RXONSZ,
366 IWN4965_SCHEDSZ,
367 IWN4965_FW_TEXT_MAXSZ,
368 IWN4965_FW_DATA_MAXSZ,
369 IWN4965_FWSZ,
370 IWN4965_SCHED_TXFACT
371};
372
373static const struct iwn_hal iwn5000_hal = {
374 iwn5000_load_firmware,
375 iwn5000_read_eeprom,
376 iwn5000_post_alive,
377 iwn5000_nic_config,
378 iwn5000_update_sched,
379 iwn5000_get_temperature,
380 iwn5000_get_rssi,
381 iwn5000_set_txpower,
382 iwn5000_init_gains,
383 iwn5000_set_gains,
384 iwn5000_add_node,
385 iwn5000_tx_done,
386#if 0 /* HT */
387 iwn5000_ampdu_tx_start,
388 iwn5000_ampdu_tx_stop,
389#endif
390 IWN5000_NTXQUEUES,
391 IWN5000_NDMACHNLS,
392 IWN5000_ID_BROADCAST,
393 IWN5000_RXONSZ,
394 IWN5000_SCHEDSZ,
395 IWN5000_FW_TEXT_MAXSZ,
396 IWN5000_FW_DATA_MAXSZ,
397 IWN5000_FWSZ,
398 IWN5000_SCHED_TXFACT
399};
400
401static int
402iwn_probe(device_t dev)
403{
404 const struct iwn_ident *ident;
405
406 for (ident = iwn_ident_table; ident->name != NULL; ident++) {
407 if (pci_get_vendor(dev) == ident->vendor &&
408 pci_get_device(dev) == ident->device) {
409 device_set_desc(dev, ident->name);
410 return 0;
411 }
412 }
413 return ENXIO;
414}
415
416static int
417iwn_attach(device_t dev)
418{
419 struct iwn_softc *sc = (struct iwn_softc *)device_get_softc(dev);
420 struct ieee80211com *ic;
421 struct ifnet *ifp;
422 const struct iwn_hal *hal;
423 uint32_t tmp;
424 int i, error, result;
425 uint8_t macaddr[IEEE80211_ADDR_LEN];
426
427 sc->sc_dev = dev;
428
429 /*
430 * Get the offset of the PCI Express Capability Structure in PCI
431 * Configuration Space.
432 */
433 error = pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off);
434 if (error != 0) {
435 device_printf(dev, "PCIe capability structure not found!\n");
436 return error;
437 }
438
439 /* Clear device-specific "PCI retry timeout" register (41h). */
440 pci_write_config(dev, 0x41, 0, 1);
441
442 /* Hardware bug workaround. */
443 tmp = pci_read_config(dev, PCIR_COMMAND, 1);
444 if (tmp & PCIM_CMD_INTxDIS) {
445 DPRINTF(sc, IWN_DEBUG_RESET, "%s: PCIe INTx Disable set\n",
446 __func__);
447 tmp &= ~PCIM_CMD_INTxDIS;
448 pci_write_config(dev, PCIR_COMMAND, tmp, 1);
449 }
450
451 /* Enable bus-mastering. */
452 pci_enable_busmaster(dev);
453
454 sc->mem_rid = PCIR_BAR(0);
455 sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid,
456 RF_ACTIVE);
457 if (sc->mem == NULL ) {
458 device_printf(dev, "could not allocate memory resources\n");
459 error = ENOMEM;
460 return error;
461 }
462
463 sc->sc_st = rman_get_bustag(sc->mem);
464 sc->sc_sh = rman_get_bushandle(sc->mem);
465 sc->irq_rid = 0;
466 if ((result = pci_msi_count(dev)) == 1 &&
467 pci_alloc_msi(dev, &result) == 0)
468 sc->irq_rid = 1;
469 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid,
470 RF_ACTIVE | RF_SHAREABLE);
471 if (sc->irq == NULL) {
472 device_printf(dev, "could not allocate interrupt resource\n");
473 error = ENOMEM;
474 goto fail;
475 }
476
477 IWN_LOCK_INIT(sc);
164static int iwn_ioctl(struct ifnet *, u_long, caddr_t);
165static int iwn_cmd(struct iwn_softc *, int, const void *, int, int);
166static int iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *,
167 int);
168static int iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *,
169 int);
170static int iwn_set_link_quality(struct iwn_softc *, uint8_t, int);
171static int iwn_add_broadcast_node(struct iwn_softc *, int);
172static int iwn_wme_update(struct ieee80211com *);
173static void iwn_update_mcast(struct ifnet *);
174static void iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t);
175static int iwn_set_critical_temp(struct iwn_softc *);
176static int iwn_set_timing(struct iwn_softc *, struct ieee80211_node *);
177static void iwn4965_power_calibration(struct iwn_softc *, int);
178static int iwn4965_set_txpower(struct iwn_softc *,
179 struct ieee80211_channel *, int);
180static int iwn5000_set_txpower(struct iwn_softc *,
181 struct ieee80211_channel *, int);
182static int iwn4965_get_rssi(struct iwn_softc *, struct iwn_rx_stat *);
183static int iwn5000_get_rssi(struct iwn_softc *, struct iwn_rx_stat *);
184static int iwn_get_noise(const struct iwn_rx_general_stats *);
185static int iwn4965_get_temperature(struct iwn_softc *);
186static int iwn5000_get_temperature(struct iwn_softc *);
187static int iwn_init_sensitivity(struct iwn_softc *);
188static void iwn_collect_noise(struct iwn_softc *,
189 const struct iwn_rx_general_stats *);
190static int iwn4965_init_gains(struct iwn_softc *);
191static int iwn5000_init_gains(struct iwn_softc *);
192static int iwn4965_set_gains(struct iwn_softc *);
193static int iwn5000_set_gains(struct iwn_softc *);
194static void iwn_tune_sensitivity(struct iwn_softc *,
195 const struct iwn_rx_stats *);
196static int iwn_send_sensitivity(struct iwn_softc *);
197static int iwn_set_pslevel(struct iwn_softc *, int, int, int);
198static int iwn_send_btcoex(struct iwn_softc *);
199static int iwn_config(struct iwn_softc *);
200static uint8_t *ieee80211_add_ssid(uint8_t *, const uint8_t *, u_int);
201static int iwn_scan(struct iwn_softc *);
202static int iwn_auth(struct iwn_softc *, struct ieee80211vap *vap);
203static int iwn_run(struct iwn_softc *, struct ieee80211vap *vap);
204#if 0 /* HT */
205static int iwn_ampdu_rx_start(struct ieee80211com *,
206 struct ieee80211_node *, uint8_t);
207static void iwn_ampdu_rx_stop(struct ieee80211com *,
208 struct ieee80211_node *, uint8_t);
209static int iwn_ampdu_tx_start(struct ieee80211com *,
210 struct ieee80211_node *, uint8_t);
211static void iwn_ampdu_tx_stop(struct ieee80211com *,
212 struct ieee80211_node *, uint8_t);
213static void iwn4965_ampdu_tx_start(struct iwn_softc *,
214 struct ieee80211_node *, uint8_t, uint16_t);
215static void iwn4965_ampdu_tx_stop(struct iwn_softc *, uint8_t, uint16_t);
216static void iwn5000_ampdu_tx_start(struct iwn_softc *,
217 struct ieee80211_node *, uint8_t, uint16_t);
218static void iwn5000_ampdu_tx_stop(struct iwn_softc *, uint8_t, uint16_t);
219#endif
220static int iwn5000_send_calib_results(struct iwn_softc *);
221static int iwn5000_save_calib_result(struct iwn_softc *,
222 struct iwn_phy_calib *, int, int);
223static void iwn5000_free_calib_results(struct iwn_softc *);
224static int iwn5000_chrystal_calib(struct iwn_softc *);
225static int iwn5000_send_calib_query(struct iwn_softc *, uint32_t);
226static int iwn5000_rx_calib_result(struct iwn_softc *,
227 struct iwn_rx_desc *, struct iwn_rx_data *);
228static int iwn5000_send_wimax_coex(struct iwn_softc *);
229static int iwn4965_post_alive(struct iwn_softc *);
230static int iwn5000_post_alive(struct iwn_softc *);
231static int iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *,
232 int);
233static int iwn4965_load_firmware(struct iwn_softc *);
234static int iwn5000_load_firmware_section(struct iwn_softc *, uint32_t,
235 const uint8_t *, int);
236static int iwn5000_load_firmware(struct iwn_softc *);
237static int iwn_read_firmware_leg(struct iwn_softc *,
238 struct iwn_fw_info *);
239static int iwn_read_firmware_tlv(struct iwn_softc *,
240 struct iwn_fw_info *, uint16_t);
241static int iwn_read_firmware(struct iwn_softc *);
242static int iwn_clock_wait(struct iwn_softc *);
243static int iwn_apm_init(struct iwn_softc *);
244static void iwn_apm_stop_master(struct iwn_softc *);
245static void iwn_apm_stop(struct iwn_softc *);
246static int iwn4965_nic_config(struct iwn_softc *);
247static int iwn5000_nic_config(struct iwn_softc *);
248static int iwn_hw_prepare(struct iwn_softc *);
249static int iwn_hw_init(struct iwn_softc *);
250static void iwn_hw_stop(struct iwn_softc *);
251static void iwn_init_locked(struct iwn_softc *);
252static void iwn_init(void *);
253static void iwn_stop_locked(struct iwn_softc *);
254static void iwn_stop(struct iwn_softc *);
255static void iwn_scan_start(struct ieee80211com *);
256static void iwn_scan_end(struct ieee80211com *);
257static void iwn_set_channel(struct ieee80211com *);
258static void iwn_scan_curchan(struct ieee80211_scan_state *, unsigned long);
259static void iwn_scan_mindwell(struct ieee80211_scan_state *);
260static struct iwn_eeprom_chan *iwn_find_eeprom_channel(struct iwn_softc *,
261 struct ieee80211_channel *);
262static int iwn_setregdomain(struct ieee80211com *,
263 struct ieee80211_regdomain *, int,
264 struct ieee80211_channel []);
265static void iwn_hw_reset(void *, int);
266static void iwn_radio_on(void *, int);
267static void iwn_radio_off(void *, int);
268static void iwn_sysctlattach(struct iwn_softc *);
269static int iwn_shutdown(device_t);
270static int iwn_suspend(device_t);
271static int iwn_resume(device_t);
272
273#define IWN_DEBUG
274#ifdef IWN_DEBUG
275enum {
276 IWN_DEBUG_XMIT = 0x00000001, /* basic xmit operation */
277 IWN_DEBUG_RECV = 0x00000002, /* basic recv operation */
278 IWN_DEBUG_STATE = 0x00000004, /* 802.11 state transitions */
279 IWN_DEBUG_TXPOW = 0x00000008, /* tx power processing */
280 IWN_DEBUG_RESET = 0x00000010, /* reset processing */
281 IWN_DEBUG_OPS = 0x00000020, /* iwn_ops processing */
282 IWN_DEBUG_BEACON = 0x00000040, /* beacon handling */
283 IWN_DEBUG_WATCHDOG = 0x00000080, /* watchdog timeout */
284 IWN_DEBUG_INTR = 0x00000100, /* ISR */
285 IWN_DEBUG_CALIBRATE = 0x00000200, /* periodic calibration */
286 IWN_DEBUG_NODE = 0x00000400, /* node management */
287 IWN_DEBUG_LED = 0x00000800, /* led management */
288 IWN_DEBUG_CMD = 0x00001000, /* cmd submission */
289 IWN_DEBUG_FATAL = 0x80000000, /* fatal errors */
290 IWN_DEBUG_ANY = 0xffffffff
291};
292
293#define DPRINTF(sc, m, fmt, ...) do { \
294 if (sc->sc_debug & (m)) \
295 printf(fmt, __VA_ARGS__); \
296} while (0)
297
298static const char *iwn_intr_str(uint8_t);
299#else
300#define DPRINTF(sc, m, fmt, ...) do { (void) sc; } while (0)
301#endif
302
303struct iwn_ident {
304 uint16_t vendor;
305 uint16_t device;
306 const char *name;
307};
308
309static const struct iwn_ident iwn_ident_table [] = {
310 { 0x8086, 0x4229, "Intel(R) PRO/Wireless 4965BGN" },
311 { 0x8086, 0x422D, "Intel(R) PRO/Wireless 4965BGN" },
312 { 0x8086, 0x4230, "Intel(R) PRO/Wireless 4965BGN" },
313 { 0x8086, 0x4233, "Intel(R) PRO/Wireless 4965BGN" },
314 { 0x8086, 0x4232, "Intel(R) PRO/Wireless 5100" },
315 { 0x8086, 0x4237, "Intel(R) PRO/Wireless 5100" },
316 { 0x8086, 0x423C, "Intel(R) PRO/Wireless 5150" },
317 { 0x8086, 0x423D, "Intel(R) PRO/Wireless 5150" },
318 { 0x8086, 0x4235, "Intel(R) PRO/Wireless 5300" },
319 { 0x8086, 0x4236, "Intel(R) PRO/Wireless 5300" },
320 { 0x8086, 0x423A, "Intel(R) PRO/Wireless 5350" },
321 { 0x8086, 0x423B, "Intel(R) PRO/Wireless 5350" },
322 { 0x8086, 0x0083, "Intel(R) PRO/Wireless 1000" },
323 { 0x8086, 0x0084, "Intel(R) PRO/Wireless 1000" },
324 { 0x8086, 0x008D, "Intel(R) PRO/Wireless 6000" },
325 { 0x8086, 0x008E, "Intel(R) PRO/Wireless 6000" },
326 { 0x8086, 0x4238, "Intel(R) PRO/Wireless 6000" },
327 { 0x8086, 0x4239, "Intel(R) PRO/Wireless 6000" },
328 { 0x8086, 0x422B, "Intel(R) PRO/Wireless 6000" },
329 { 0x8086, 0x422C, "Intel(R) PRO/Wireless 6000" },
330 { 0x8086, 0x0087, "Intel(R) PRO/Wireless 6250" },
331 { 0x8086, 0x0089, "Intel(R) PRO/Wireless 6250" },
332 { 0x8086, 0x0082, "Intel(R) PRO/Wireless 6205a" },
333 { 0x8086, 0x0085, "Intel(R) PRO/Wireless 6205a" },
334#ifdef notyet
335 { 0x8086, 0x008a, "Intel(R) PRO/Wireless 6205b" },
336 { 0x8086, 0x008b, "Intel(R) PRO/Wireless 6205b" },
337 { 0x8086, 0x008f, "Intel(R) PRO/Wireless 6205b" },
338 { 0x8086, 0x0090, "Intel(R) PRO/Wireless 6205b" },
339 { 0x8086, 0x0091, "Intel(R) PRO/Wireless 6205b" },
340#endif
341 { 0, 0, NULL }
342};
343
344static const struct iwn_hal iwn4965_hal = {
345 iwn4965_load_firmware,
346 iwn4965_read_eeprom,
347 iwn4965_post_alive,
348 iwn4965_nic_config,
349 iwn4965_update_sched,
350 iwn4965_get_temperature,
351 iwn4965_get_rssi,
352 iwn4965_set_txpower,
353 iwn4965_init_gains,
354 iwn4965_set_gains,
355 iwn4965_add_node,
356 iwn4965_tx_done,
357#if 0 /* HT */
358 iwn4965_ampdu_tx_start,
359 iwn4965_ampdu_tx_stop,
360#endif
361 IWN4965_NTXQUEUES,
362 IWN4965_NDMACHNLS,
363 IWN4965_ID_BROADCAST,
364 IWN4965_RXONSZ,
365 IWN4965_SCHEDSZ,
366 IWN4965_FW_TEXT_MAXSZ,
367 IWN4965_FW_DATA_MAXSZ,
368 IWN4965_FWSZ,
369 IWN4965_SCHED_TXFACT
370};
371
372static const struct iwn_hal iwn5000_hal = {
373 iwn5000_load_firmware,
374 iwn5000_read_eeprom,
375 iwn5000_post_alive,
376 iwn5000_nic_config,
377 iwn5000_update_sched,
378 iwn5000_get_temperature,
379 iwn5000_get_rssi,
380 iwn5000_set_txpower,
381 iwn5000_init_gains,
382 iwn5000_set_gains,
383 iwn5000_add_node,
384 iwn5000_tx_done,
385#if 0 /* HT */
386 iwn5000_ampdu_tx_start,
387 iwn5000_ampdu_tx_stop,
388#endif
389 IWN5000_NTXQUEUES,
390 IWN5000_NDMACHNLS,
391 IWN5000_ID_BROADCAST,
392 IWN5000_RXONSZ,
393 IWN5000_SCHEDSZ,
394 IWN5000_FW_TEXT_MAXSZ,
395 IWN5000_FW_DATA_MAXSZ,
396 IWN5000_FWSZ,
397 IWN5000_SCHED_TXFACT
398};
399
400static int
401iwn_probe(device_t dev)
402{
403 const struct iwn_ident *ident;
404
405 for (ident = iwn_ident_table; ident->name != NULL; ident++) {
406 if (pci_get_vendor(dev) == ident->vendor &&
407 pci_get_device(dev) == ident->device) {
408 device_set_desc(dev, ident->name);
409 return 0;
410 }
411 }
412 return ENXIO;
413}
414
415static int
416iwn_attach(device_t dev)
417{
418 struct iwn_softc *sc = (struct iwn_softc *)device_get_softc(dev);
419 struct ieee80211com *ic;
420 struct ifnet *ifp;
421 const struct iwn_hal *hal;
422 uint32_t tmp;
423 int i, error, result;
424 uint8_t macaddr[IEEE80211_ADDR_LEN];
425
426 sc->sc_dev = dev;
427
428 /*
429 * Get the offset of the PCI Express Capability Structure in PCI
430 * Configuration Space.
431 */
432 error = pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off);
433 if (error != 0) {
434 device_printf(dev, "PCIe capability structure not found!\n");
435 return error;
436 }
437
438 /* Clear device-specific "PCI retry timeout" register (41h). */
439 pci_write_config(dev, 0x41, 0, 1);
440
441 /* Hardware bug workaround. */
442 tmp = pci_read_config(dev, PCIR_COMMAND, 1);
443 if (tmp & PCIM_CMD_INTxDIS) {
444 DPRINTF(sc, IWN_DEBUG_RESET, "%s: PCIe INTx Disable set\n",
445 __func__);
446 tmp &= ~PCIM_CMD_INTxDIS;
447 pci_write_config(dev, PCIR_COMMAND, tmp, 1);
448 }
449
450 /* Enable bus-mastering. */
451 pci_enable_busmaster(dev);
452
453 sc->mem_rid = PCIR_BAR(0);
454 sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid,
455 RF_ACTIVE);
456 if (sc->mem == NULL ) {
457 device_printf(dev, "could not allocate memory resources\n");
458 error = ENOMEM;
459 return error;
460 }
461
462 sc->sc_st = rman_get_bustag(sc->mem);
463 sc->sc_sh = rman_get_bushandle(sc->mem);
464 sc->irq_rid = 0;
465 if ((result = pci_msi_count(dev)) == 1 &&
466 pci_alloc_msi(dev, &result) == 0)
467 sc->irq_rid = 1;
468 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid,
469 RF_ACTIVE | RF_SHAREABLE);
470 if (sc->irq == NULL) {
471 device_printf(dev, "could not allocate interrupt resource\n");
472 error = ENOMEM;
473 goto fail;
474 }
475
476 IWN_LOCK_INIT(sc);
478 callout_init_mtx(&sc->sc_timer_to, &sc->sc_mtx, 0);
479 TASK_INIT(&sc->sc_reinit_task, 0, iwn_hw_reset, sc );
480 TASK_INIT(&sc->sc_radioon_task, 0, iwn_radio_on, sc );
481 TASK_INIT(&sc->sc_radiooff_task, 0, iwn_radio_off, sc );
482
483 /* Attach Hardware Abstraction Layer. */
484 hal = iwn_hal_attach(sc);
485 if (hal == NULL) {
486 error = ENXIO; /* XXX: Wrong error code? */
487 goto fail;
488 }
489
490 error = iwn_hw_prepare(sc);
491 if (error != 0) {
492 device_printf(dev, "hardware not ready, error %d\n", error);
493 goto fail;
494 }
495
496 /* Allocate DMA memory for firmware transfers. */
497 error = iwn_alloc_fwmem(sc);
498 if (error != 0) {
499 device_printf(dev,
500 "could not allocate memory for firmware, error %d\n",
501 error);
502 goto fail;
503 }
504
505 /* Allocate "Keep Warm" page. */
506 error = iwn_alloc_kw(sc);
507 if (error != 0) {
508 device_printf(dev,
509 "could not allocate \"Keep Warm\" page, error %d\n", error);
510 goto fail;
511 }
512
513 /* Allocate ICT table for 5000 Series. */
514 if (sc->hw_type != IWN_HW_REV_TYPE_4965 &&
515 (error = iwn_alloc_ict(sc)) != 0) {
516 device_printf(dev,
517 "%s: could not allocate ICT table, error %d\n",
518 __func__, error);
519 goto fail;
520 }
521
522 /* Allocate TX scheduler "rings". */
523 error = iwn_alloc_sched(sc);
524 if (error != 0) {
525 device_printf(dev,
526 "could not allocate TX scheduler rings, error %d\n",
527 error);
528 goto fail;
529 }
530
531 /* Allocate TX rings (16 on 4965AGN, 20 on 5000). */
532 for (i = 0; i < hal->ntxqs; i++) {
533 error = iwn_alloc_tx_ring(sc, &sc->txq[i], i);
534 if (error != 0) {
535 device_printf(dev,
536 "could not allocate Tx ring %d, error %d\n",
537 i, error);
538 goto fail;
539 }
540 }
541
542 /* Allocate RX ring. */
543 error = iwn_alloc_rx_ring(sc, &sc->rxq);
544 if (error != 0 ){
545 device_printf(dev,
546 "could not allocate Rx ring, error %d\n", error);
547 goto fail;
548 }
549
550 /* Clear pending interrupts. */
551 IWN_WRITE(sc, IWN_INT, 0xffffffff);
552
553 /* Count the number of available chains. */
554 sc->ntxchains =
555 ((sc->txchainmask >> 2) & 1) +
556 ((sc->txchainmask >> 1) & 1) +
557 ((sc->txchainmask >> 0) & 1);
558 sc->nrxchains =
559 ((sc->rxchainmask >> 2) & 1) +
560 ((sc->rxchainmask >> 1) & 1) +
561 ((sc->rxchainmask >> 0) & 1);
562
563 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
564 if (ifp == NULL) {
565 device_printf(dev, "can not allocate ifnet structure\n");
566 goto fail;
567 }
568 ic = ifp->if_l2com;
569
570 ic->ic_ifp = ifp;
571 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
572 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
573
574 /* Set device capabilities. */
575 ic->ic_caps =
576 IEEE80211_C_STA /* station mode supported */
577 | IEEE80211_C_MONITOR /* monitor mode supported */
578 | IEEE80211_C_TXPMGT /* tx power management */
579 | IEEE80211_C_SHSLOT /* short slot time supported */
580 | IEEE80211_C_WPA
581 | IEEE80211_C_SHPREAMBLE /* short preamble supported */
582 | IEEE80211_C_BGSCAN /* background scanning */
583#if 0
584 | IEEE80211_C_IBSS /* ibss/adhoc mode */
585#endif
586 | IEEE80211_C_WME /* WME */
587 ;
588#if 0 /* HT */
589 /* XXX disable until HT channel setup works */
590 ic->ic_htcaps =
591 IEEE80211_HTCAP_SMPS_ENA /* SM PS mode enabled */
592 | IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width */
593 | IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */
594 | IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */
595 | IEEE80211_HTCAP_RXSTBC_2STREAM/* 1-2 spatial streams */
596 | IEEE80211_HTCAP_MAXAMSDU_3839 /* max A-MSDU length */
597 /* s/w capabilities */
598 | IEEE80211_HTC_HT /* HT operation */
599 | IEEE80211_HTC_AMPDU /* tx A-MPDU */
600 | IEEE80211_HTC_AMSDU /* tx A-MSDU */
601 ;
602
603 /* Set HT capabilities. */
604 ic->ic_htcaps =
605#if IWN_RBUF_SIZE == 8192
606 IEEE80211_HTCAP_AMSDU7935 |
607#endif
608 IEEE80211_HTCAP_CBW20_40 |
609 IEEE80211_HTCAP_SGI20 |
610 IEEE80211_HTCAP_SGI40;
611 if (sc->hw_type != IWN_HW_REV_TYPE_4965)
612 ic->ic_htcaps |= IEEE80211_HTCAP_GF;
613 if (sc->hw_type == IWN_HW_REV_TYPE_6050)
614 ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_DYN;
615 else
616 ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_DIS;
617#endif
618
619 /* Read MAC address, channels, etc from EEPROM. */
620 error = iwn_read_eeprom(sc, macaddr);
621 if (error != 0) {
622 device_printf(dev, "could not read EEPROM, error %d\n",
623 error);
624 goto fail;
625 }
626
627 device_printf(sc->sc_dev, "MIMO %dT%dR, %.4s, address %6D\n",
628 sc->ntxchains, sc->nrxchains, sc->eeprom_domain,
629 macaddr, ":");
630
631#if 0 /* HT */
632 /* Set supported HT rates. */
633 ic->ic_sup_mcs[0] = 0xff;
634 if (sc->nrxchains > 1)
635 ic->ic_sup_mcs[1] = 0xff;
636 if (sc->nrxchains > 2)
637 ic->ic_sup_mcs[2] = 0xff;
638#endif
639
640 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
641 ifp->if_softc = sc;
642 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
643 ifp->if_init = iwn_init;
644 ifp->if_ioctl = iwn_ioctl;
645 ifp->if_start = iwn_start;
646 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
647 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
648 IFQ_SET_READY(&ifp->if_snd);
649
650 ieee80211_ifattach(ic, macaddr);
651 ic->ic_vap_create = iwn_vap_create;
652 ic->ic_vap_delete = iwn_vap_delete;
653 ic->ic_raw_xmit = iwn_raw_xmit;
654 ic->ic_node_alloc = iwn_node_alloc;
655 ic->ic_wme.wme_update = iwn_wme_update;
656 ic->ic_update_mcast = iwn_update_mcast;
657 ic->ic_scan_start = iwn_scan_start;
658 ic->ic_scan_end = iwn_scan_end;
659 ic->ic_set_channel = iwn_set_channel;
660 ic->ic_scan_curchan = iwn_scan_curchan;
661 ic->ic_scan_mindwell = iwn_scan_mindwell;
662 ic->ic_setregdomain = iwn_setregdomain;
663#if 0 /* HT */
664 ic->ic_ampdu_rx_start = iwn_ampdu_rx_start;
665 ic->ic_ampdu_rx_stop = iwn_ampdu_rx_stop;
666 ic->ic_ampdu_tx_start = iwn_ampdu_tx_start;
667 ic->ic_ampdu_tx_stop = iwn_ampdu_tx_stop;
668#endif
669
670 iwn_radiotap_attach(sc);
477 TASK_INIT(&sc->sc_reinit_task, 0, iwn_hw_reset, sc );
478 TASK_INIT(&sc->sc_radioon_task, 0, iwn_radio_on, sc );
479 TASK_INIT(&sc->sc_radiooff_task, 0, iwn_radio_off, sc );
480
481 /* Attach Hardware Abstraction Layer. */
482 hal = iwn_hal_attach(sc);
483 if (hal == NULL) {
484 error = ENXIO; /* XXX: Wrong error code? */
485 goto fail;
486 }
487
488 error = iwn_hw_prepare(sc);
489 if (error != 0) {
490 device_printf(dev, "hardware not ready, error %d\n", error);
491 goto fail;
492 }
493
494 /* Allocate DMA memory for firmware transfers. */
495 error = iwn_alloc_fwmem(sc);
496 if (error != 0) {
497 device_printf(dev,
498 "could not allocate memory for firmware, error %d\n",
499 error);
500 goto fail;
501 }
502
503 /* Allocate "Keep Warm" page. */
504 error = iwn_alloc_kw(sc);
505 if (error != 0) {
506 device_printf(dev,
507 "could not allocate \"Keep Warm\" page, error %d\n", error);
508 goto fail;
509 }
510
511 /* Allocate ICT table for 5000 Series. */
512 if (sc->hw_type != IWN_HW_REV_TYPE_4965 &&
513 (error = iwn_alloc_ict(sc)) != 0) {
514 device_printf(dev,
515 "%s: could not allocate ICT table, error %d\n",
516 __func__, error);
517 goto fail;
518 }
519
520 /* Allocate TX scheduler "rings". */
521 error = iwn_alloc_sched(sc);
522 if (error != 0) {
523 device_printf(dev,
524 "could not allocate TX scheduler rings, error %d\n",
525 error);
526 goto fail;
527 }
528
529 /* Allocate TX rings (16 on 4965AGN, 20 on 5000). */
530 for (i = 0; i < hal->ntxqs; i++) {
531 error = iwn_alloc_tx_ring(sc, &sc->txq[i], i);
532 if (error != 0) {
533 device_printf(dev,
534 "could not allocate Tx ring %d, error %d\n",
535 i, error);
536 goto fail;
537 }
538 }
539
540 /* Allocate RX ring. */
541 error = iwn_alloc_rx_ring(sc, &sc->rxq);
542 if (error != 0 ){
543 device_printf(dev,
544 "could not allocate Rx ring, error %d\n", error);
545 goto fail;
546 }
547
548 /* Clear pending interrupts. */
549 IWN_WRITE(sc, IWN_INT, 0xffffffff);
550
551 /* Count the number of available chains. */
552 sc->ntxchains =
553 ((sc->txchainmask >> 2) & 1) +
554 ((sc->txchainmask >> 1) & 1) +
555 ((sc->txchainmask >> 0) & 1);
556 sc->nrxchains =
557 ((sc->rxchainmask >> 2) & 1) +
558 ((sc->rxchainmask >> 1) & 1) +
559 ((sc->rxchainmask >> 0) & 1);
560
561 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
562 if (ifp == NULL) {
563 device_printf(dev, "can not allocate ifnet structure\n");
564 goto fail;
565 }
566 ic = ifp->if_l2com;
567
568 ic->ic_ifp = ifp;
569 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
570 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
571
572 /* Set device capabilities. */
573 ic->ic_caps =
574 IEEE80211_C_STA /* station mode supported */
575 | IEEE80211_C_MONITOR /* monitor mode supported */
576 | IEEE80211_C_TXPMGT /* tx power management */
577 | IEEE80211_C_SHSLOT /* short slot time supported */
578 | IEEE80211_C_WPA
579 | IEEE80211_C_SHPREAMBLE /* short preamble supported */
580 | IEEE80211_C_BGSCAN /* background scanning */
581#if 0
582 | IEEE80211_C_IBSS /* ibss/adhoc mode */
583#endif
584 | IEEE80211_C_WME /* WME */
585 ;
586#if 0 /* HT */
587 /* XXX disable until HT channel setup works */
588 ic->ic_htcaps =
589 IEEE80211_HTCAP_SMPS_ENA /* SM PS mode enabled */
590 | IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width */
591 | IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */
592 | IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */
593 | IEEE80211_HTCAP_RXSTBC_2STREAM/* 1-2 spatial streams */
594 | IEEE80211_HTCAP_MAXAMSDU_3839 /* max A-MSDU length */
595 /* s/w capabilities */
596 | IEEE80211_HTC_HT /* HT operation */
597 | IEEE80211_HTC_AMPDU /* tx A-MPDU */
598 | IEEE80211_HTC_AMSDU /* tx A-MSDU */
599 ;
600
601 /* Set HT capabilities. */
602 ic->ic_htcaps =
603#if IWN_RBUF_SIZE == 8192
604 IEEE80211_HTCAP_AMSDU7935 |
605#endif
606 IEEE80211_HTCAP_CBW20_40 |
607 IEEE80211_HTCAP_SGI20 |
608 IEEE80211_HTCAP_SGI40;
609 if (sc->hw_type != IWN_HW_REV_TYPE_4965)
610 ic->ic_htcaps |= IEEE80211_HTCAP_GF;
611 if (sc->hw_type == IWN_HW_REV_TYPE_6050)
612 ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_DYN;
613 else
614 ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_DIS;
615#endif
616
617 /* Read MAC address, channels, etc from EEPROM. */
618 error = iwn_read_eeprom(sc, macaddr);
619 if (error != 0) {
620 device_printf(dev, "could not read EEPROM, error %d\n",
621 error);
622 goto fail;
623 }
624
625 device_printf(sc->sc_dev, "MIMO %dT%dR, %.4s, address %6D\n",
626 sc->ntxchains, sc->nrxchains, sc->eeprom_domain,
627 macaddr, ":");
628
629#if 0 /* HT */
630 /* Set supported HT rates. */
631 ic->ic_sup_mcs[0] = 0xff;
632 if (sc->nrxchains > 1)
633 ic->ic_sup_mcs[1] = 0xff;
634 if (sc->nrxchains > 2)
635 ic->ic_sup_mcs[2] = 0xff;
636#endif
637
638 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
639 ifp->if_softc = sc;
640 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
641 ifp->if_init = iwn_init;
642 ifp->if_ioctl = iwn_ioctl;
643 ifp->if_start = iwn_start;
644 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
645 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
646 IFQ_SET_READY(&ifp->if_snd);
647
648 ieee80211_ifattach(ic, macaddr);
649 ic->ic_vap_create = iwn_vap_create;
650 ic->ic_vap_delete = iwn_vap_delete;
651 ic->ic_raw_xmit = iwn_raw_xmit;
652 ic->ic_node_alloc = iwn_node_alloc;
653 ic->ic_wme.wme_update = iwn_wme_update;
654 ic->ic_update_mcast = iwn_update_mcast;
655 ic->ic_scan_start = iwn_scan_start;
656 ic->ic_scan_end = iwn_scan_end;
657 ic->ic_set_channel = iwn_set_channel;
658 ic->ic_scan_curchan = iwn_scan_curchan;
659 ic->ic_scan_mindwell = iwn_scan_mindwell;
660 ic->ic_setregdomain = iwn_setregdomain;
661#if 0 /* HT */
662 ic->ic_ampdu_rx_start = iwn_ampdu_rx_start;
663 ic->ic_ampdu_rx_stop = iwn_ampdu_rx_stop;
664 ic->ic_ampdu_tx_start = iwn_ampdu_tx_start;
665 ic->ic_ampdu_tx_stop = iwn_ampdu_tx_stop;
666#endif
667
668 iwn_radiotap_attach(sc);
669
670 callout_init_mtx(&sc->calib_to, &sc->sc_mtx, 0);
671 callout_init_mtx(&sc->watchdog_to, &sc->sc_mtx, 0);
672
671 iwn_sysctlattach(sc);
672
673 /*
674 * Hook our interrupt after all initialization is complete.
675 */
676 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
677 NULL, iwn_intr, sc, &sc->sc_ih);
678 if (error != 0) {
679 device_printf(dev, "could not set up interrupt, error %d\n",
680 error);
681 goto fail;
682 }
683
684 ieee80211_announce(ic);
685 return 0;
686fail:
687 iwn_detach(dev);
688 return error;
689}
690
691static const struct iwn_hal *
692iwn_hal_attach(struct iwn_softc *sc)
693{
694 sc->hw_type = (IWN_READ(sc, IWN_HW_REV) >> 4) & 0xf;
695
696 switch (sc->hw_type) {
697 case IWN_HW_REV_TYPE_4965:
698 sc->sc_hal = &iwn4965_hal;
699 sc->limits = &iwn4965_sensitivity_limits;
700 sc->fwname = "iwn4965fw";
701 sc->txchainmask = IWN_ANT_AB;
702 sc->rxchainmask = IWN_ANT_ABC;
703 break;
704 case IWN_HW_REV_TYPE_5100:
705 sc->sc_hal = &iwn5000_hal;
706 sc->limits = &iwn5000_sensitivity_limits;
707 sc->fwname = "iwn5000fw";
708 sc->txchainmask = IWN_ANT_B;
709 sc->rxchainmask = IWN_ANT_AB;
710 sc->calib_init = IWN_CALIB_XTAL | IWN_CALIB_LO |
711 IWN_CALIB_TX_IQ | IWN_CALIB_TX_IQ_PERIODIC |
712 IWN_CALIB_BASE_BAND;
713 break;
714 case IWN_HW_REV_TYPE_5150:
715 sc->sc_hal = &iwn5000_hal;
716 sc->limits = &iwn5150_sensitivity_limits;
717 sc->fwname = "iwn5150fw";
718 sc->txchainmask = IWN_ANT_A;
719 sc->rxchainmask = IWN_ANT_AB;
720 sc->calib_init = IWN_CALIB_DC | IWN_CALIB_LO |
721 IWN_CALIB_TX_IQ | IWN_CALIB_BASE_BAND;
722 break;
723 case IWN_HW_REV_TYPE_5300:
724 case IWN_HW_REV_TYPE_5350:
725 sc->sc_hal = &iwn5000_hal;
726 sc->limits = &iwn5000_sensitivity_limits;
727 sc->fwname = "iwn5000fw";
728 sc->txchainmask = IWN_ANT_ABC;
729 sc->rxchainmask = IWN_ANT_ABC;
730 sc->calib_init = IWN_CALIB_XTAL | IWN_CALIB_LO |
731 IWN_CALIB_TX_IQ | IWN_CALIB_TX_IQ_PERIODIC |
732 IWN_CALIB_BASE_BAND;
733 break;
734 case IWN_HW_REV_TYPE_1000:
735 sc->sc_hal = &iwn5000_hal;
736 sc->limits = &iwn1000_sensitivity_limits;
737 sc->fwname = "iwn1000fw";
738 sc->txchainmask = IWN_ANT_A;
739 sc->rxchainmask = IWN_ANT_AB;
740 sc->calib_init = IWN_CALIB_XTAL | IWN_CALIB_LO |
741 IWN_CALIB_TX_IQ | IWN_CALIB_TX_IQ_PERIODIC |
742 IWN_CALIB_BASE_BAND;
743 break;
744 case IWN_HW_REV_TYPE_6000:
745 sc->sc_hal = &iwn5000_hal;
746 sc->limits = &iwn6000_sensitivity_limits;
747 sc->fwname = "iwn6000fw";
748 switch (pci_get_device(sc->sc_dev)) {
749 case 0x422C:
750 case 0x4239:
751 sc->sc_flags |= IWN_FLAG_INTERNAL_PA;
752 sc->txchainmask = IWN_ANT_BC;
753 sc->rxchainmask = IWN_ANT_BC;
754 break;
755 default:
756 sc->txchainmask = IWN_ANT_ABC;
757 sc->rxchainmask = IWN_ANT_ABC;
758 sc->calib_runtime = IWN_CALIB_DC;
759 break;
760 }
761 sc->calib_init = IWN_CALIB_XTAL | IWN_CALIB_LO |
762 IWN_CALIB_TX_IQ | IWN_CALIB_BASE_BAND;
763 break;
764 case IWN_HW_REV_TYPE_6050:
765 sc->sc_hal = &iwn5000_hal;
766 sc->limits = &iwn6000_sensitivity_limits;
767 sc->fwname = "iwn6050fw";
768 sc->txchainmask = IWN_ANT_AB;
769 sc->rxchainmask = IWN_ANT_AB;
770 sc->calib_init = IWN_CALIB_XTAL | IWN_CALIB_LO |
771 IWN_CALIB_TX_IQ | IWN_CALIB_BASE_BAND;
772 sc->calib_runtime = IWN_CALIB_DC;
773 break;
774 case IWN_HW_REV_TYPE_6005:
775 sc->sc_hal = &iwn5000_hal;
776 sc->limits = &iwn6000_sensitivity_limits;
777 sc->fwname = "iwn6005fw";
778 sc->txchainmask = IWN_ANT_AB;
779 sc->rxchainmask = IWN_ANT_AB;
780 sc->calib_init = IWN_CALIB_XTAL | IWN_CALIB_LO |
781 IWN_CALIB_TX_IQ | IWN_CALIB_BASE_BAND;
782 sc->calib_runtime = IWN_CALIB_DC;
783 break;
784 default:
785 device_printf(sc->sc_dev, "adapter type %d not supported\n",
786 sc->hw_type);
787 return NULL;
788 }
789 return sc->sc_hal;
790}
791
792/*
793 * Attach the interface to 802.11 radiotap.
794 */
795static void
796iwn_radiotap_attach(struct iwn_softc *sc)
797{
798 struct ifnet *ifp = sc->sc_ifp;
799 struct ieee80211com *ic = ifp->if_l2com;
800
801 ieee80211_radiotap_attach(ic,
802 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
803 IWN_TX_RADIOTAP_PRESENT,
804 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
805 IWN_RX_RADIOTAP_PRESENT);
806}
807
808static struct ieee80211vap *
809iwn_vap_create(struct ieee80211com *ic,
810 const char name[IFNAMSIZ], int unit, int opmode, int flags,
811 const uint8_t bssid[IEEE80211_ADDR_LEN],
812 const uint8_t mac[IEEE80211_ADDR_LEN])
813{
814 struct iwn_vap *ivp;
815 struct ieee80211vap *vap;
816
817 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
818 return NULL;
819 ivp = (struct iwn_vap *) malloc(sizeof(struct iwn_vap),
820 M_80211_VAP, M_NOWAIT | M_ZERO);
821 if (ivp == NULL)
822 return NULL;
823 vap = &ivp->iv_vap;
824 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac);
825 vap->iv_bmissthreshold = 10; /* override default */
826 /* Override with driver methods. */
827 ivp->iv_newstate = vap->iv_newstate;
828 vap->iv_newstate = iwn_newstate;
829
830 ieee80211_ratectl_init(vap);
831 /* Complete setup. */
832 ieee80211_vap_attach(vap, iwn_media_change, ieee80211_media_status);
833 ic->ic_opmode = opmode;
834 return vap;
835}
836
837static void
838iwn_vap_delete(struct ieee80211vap *vap)
839{
840 struct iwn_vap *ivp = IWN_VAP(vap);
841
842 ieee80211_ratectl_deinit(vap);
843 ieee80211_vap_detach(vap);
844 free(ivp, M_80211_VAP);
845}
846
847static int
848iwn_detach(device_t dev)
849{
850 struct iwn_softc *sc = device_get_softc(dev);
851 struct ifnet *ifp = sc->sc_ifp;
852 struct ieee80211com *ic;
853 int i;
854
855 if (ifp != NULL) {
856 ic = ifp->if_l2com;
857
858 ieee80211_draintask(ic, &sc->sc_reinit_task);
859 ieee80211_draintask(ic, &sc->sc_radioon_task);
860 ieee80211_draintask(ic, &sc->sc_radiooff_task);
861
862 iwn_stop(sc);
673 iwn_sysctlattach(sc);
674
675 /*
676 * Hook our interrupt after all initialization is complete.
677 */
678 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
679 NULL, iwn_intr, sc, &sc->sc_ih);
680 if (error != 0) {
681 device_printf(dev, "could not set up interrupt, error %d\n",
682 error);
683 goto fail;
684 }
685
686 ieee80211_announce(ic);
687 return 0;
688fail:
689 iwn_detach(dev);
690 return error;
691}
692
693static const struct iwn_hal *
694iwn_hal_attach(struct iwn_softc *sc)
695{
696 sc->hw_type = (IWN_READ(sc, IWN_HW_REV) >> 4) & 0xf;
697
698 switch (sc->hw_type) {
699 case IWN_HW_REV_TYPE_4965:
700 sc->sc_hal = &iwn4965_hal;
701 sc->limits = &iwn4965_sensitivity_limits;
702 sc->fwname = "iwn4965fw";
703 sc->txchainmask = IWN_ANT_AB;
704 sc->rxchainmask = IWN_ANT_ABC;
705 break;
706 case IWN_HW_REV_TYPE_5100:
707 sc->sc_hal = &iwn5000_hal;
708 sc->limits = &iwn5000_sensitivity_limits;
709 sc->fwname = "iwn5000fw";
710 sc->txchainmask = IWN_ANT_B;
711 sc->rxchainmask = IWN_ANT_AB;
712 sc->calib_init = IWN_CALIB_XTAL | IWN_CALIB_LO |
713 IWN_CALIB_TX_IQ | IWN_CALIB_TX_IQ_PERIODIC |
714 IWN_CALIB_BASE_BAND;
715 break;
716 case IWN_HW_REV_TYPE_5150:
717 sc->sc_hal = &iwn5000_hal;
718 sc->limits = &iwn5150_sensitivity_limits;
719 sc->fwname = "iwn5150fw";
720 sc->txchainmask = IWN_ANT_A;
721 sc->rxchainmask = IWN_ANT_AB;
722 sc->calib_init = IWN_CALIB_DC | IWN_CALIB_LO |
723 IWN_CALIB_TX_IQ | IWN_CALIB_BASE_BAND;
724 break;
725 case IWN_HW_REV_TYPE_5300:
726 case IWN_HW_REV_TYPE_5350:
727 sc->sc_hal = &iwn5000_hal;
728 sc->limits = &iwn5000_sensitivity_limits;
729 sc->fwname = "iwn5000fw";
730 sc->txchainmask = IWN_ANT_ABC;
731 sc->rxchainmask = IWN_ANT_ABC;
732 sc->calib_init = IWN_CALIB_XTAL | IWN_CALIB_LO |
733 IWN_CALIB_TX_IQ | IWN_CALIB_TX_IQ_PERIODIC |
734 IWN_CALIB_BASE_BAND;
735 break;
736 case IWN_HW_REV_TYPE_1000:
737 sc->sc_hal = &iwn5000_hal;
738 sc->limits = &iwn1000_sensitivity_limits;
739 sc->fwname = "iwn1000fw";
740 sc->txchainmask = IWN_ANT_A;
741 sc->rxchainmask = IWN_ANT_AB;
742 sc->calib_init = IWN_CALIB_XTAL | IWN_CALIB_LO |
743 IWN_CALIB_TX_IQ | IWN_CALIB_TX_IQ_PERIODIC |
744 IWN_CALIB_BASE_BAND;
745 break;
746 case IWN_HW_REV_TYPE_6000:
747 sc->sc_hal = &iwn5000_hal;
748 sc->limits = &iwn6000_sensitivity_limits;
749 sc->fwname = "iwn6000fw";
750 switch (pci_get_device(sc->sc_dev)) {
751 case 0x422C:
752 case 0x4239:
753 sc->sc_flags |= IWN_FLAG_INTERNAL_PA;
754 sc->txchainmask = IWN_ANT_BC;
755 sc->rxchainmask = IWN_ANT_BC;
756 break;
757 default:
758 sc->txchainmask = IWN_ANT_ABC;
759 sc->rxchainmask = IWN_ANT_ABC;
760 sc->calib_runtime = IWN_CALIB_DC;
761 break;
762 }
763 sc->calib_init = IWN_CALIB_XTAL | IWN_CALIB_LO |
764 IWN_CALIB_TX_IQ | IWN_CALIB_BASE_BAND;
765 break;
766 case IWN_HW_REV_TYPE_6050:
767 sc->sc_hal = &iwn5000_hal;
768 sc->limits = &iwn6000_sensitivity_limits;
769 sc->fwname = "iwn6050fw";
770 sc->txchainmask = IWN_ANT_AB;
771 sc->rxchainmask = IWN_ANT_AB;
772 sc->calib_init = IWN_CALIB_XTAL | IWN_CALIB_LO |
773 IWN_CALIB_TX_IQ | IWN_CALIB_BASE_BAND;
774 sc->calib_runtime = IWN_CALIB_DC;
775 break;
776 case IWN_HW_REV_TYPE_6005:
777 sc->sc_hal = &iwn5000_hal;
778 sc->limits = &iwn6000_sensitivity_limits;
779 sc->fwname = "iwn6005fw";
780 sc->txchainmask = IWN_ANT_AB;
781 sc->rxchainmask = IWN_ANT_AB;
782 sc->calib_init = IWN_CALIB_XTAL | IWN_CALIB_LO |
783 IWN_CALIB_TX_IQ | IWN_CALIB_BASE_BAND;
784 sc->calib_runtime = IWN_CALIB_DC;
785 break;
786 default:
787 device_printf(sc->sc_dev, "adapter type %d not supported\n",
788 sc->hw_type);
789 return NULL;
790 }
791 return sc->sc_hal;
792}
793
794/*
795 * Attach the interface to 802.11 radiotap.
796 */
797static void
798iwn_radiotap_attach(struct iwn_softc *sc)
799{
800 struct ifnet *ifp = sc->sc_ifp;
801 struct ieee80211com *ic = ifp->if_l2com;
802
803 ieee80211_radiotap_attach(ic,
804 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
805 IWN_TX_RADIOTAP_PRESENT,
806 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
807 IWN_RX_RADIOTAP_PRESENT);
808}
809
810static struct ieee80211vap *
811iwn_vap_create(struct ieee80211com *ic,
812 const char name[IFNAMSIZ], int unit, int opmode, int flags,
813 const uint8_t bssid[IEEE80211_ADDR_LEN],
814 const uint8_t mac[IEEE80211_ADDR_LEN])
815{
816 struct iwn_vap *ivp;
817 struct ieee80211vap *vap;
818
819 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
820 return NULL;
821 ivp = (struct iwn_vap *) malloc(sizeof(struct iwn_vap),
822 M_80211_VAP, M_NOWAIT | M_ZERO);
823 if (ivp == NULL)
824 return NULL;
825 vap = &ivp->iv_vap;
826 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac);
827 vap->iv_bmissthreshold = 10; /* override default */
828 /* Override with driver methods. */
829 ivp->iv_newstate = vap->iv_newstate;
830 vap->iv_newstate = iwn_newstate;
831
832 ieee80211_ratectl_init(vap);
833 /* Complete setup. */
834 ieee80211_vap_attach(vap, iwn_media_change, ieee80211_media_status);
835 ic->ic_opmode = opmode;
836 return vap;
837}
838
839static void
840iwn_vap_delete(struct ieee80211vap *vap)
841{
842 struct iwn_vap *ivp = IWN_VAP(vap);
843
844 ieee80211_ratectl_deinit(vap);
845 ieee80211_vap_detach(vap);
846 free(ivp, M_80211_VAP);
847}
848
849static int
850iwn_detach(device_t dev)
851{
852 struct iwn_softc *sc = device_get_softc(dev);
853 struct ifnet *ifp = sc->sc_ifp;
854 struct ieee80211com *ic;
855 int i;
856
857 if (ifp != NULL) {
858 ic = ifp->if_l2com;
859
860 ieee80211_draintask(ic, &sc->sc_reinit_task);
861 ieee80211_draintask(ic, &sc->sc_radioon_task);
862 ieee80211_draintask(ic, &sc->sc_radiooff_task);
863
864 iwn_stop(sc);
863 callout_drain(&sc->sc_timer_to);
865 callout_drain(&sc->watchdog_to);
866 callout_drain(&sc->calib_to);
864 ieee80211_ifdetach(ic);
865 }
866
867 iwn5000_free_calib_results(sc);
868
869 /* Free DMA resources. */
870 iwn_free_rx_ring(sc, &sc->rxq);
871 if (sc->sc_hal != NULL)
872 for (i = 0; i < sc->sc_hal->ntxqs; i++)
873 iwn_free_tx_ring(sc, &sc->txq[i]);
874 iwn_free_sched(sc);
875 iwn_free_kw(sc);
876 if (sc->ict != NULL)
877 iwn_free_ict(sc);
878 iwn_free_fwmem(sc);
879
880 if (sc->irq != NULL) {
881 bus_teardown_intr(dev, sc->irq, sc->sc_ih);
882 bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq);
883 if (sc->irq_rid == 1)
884 pci_release_msi(dev);
885 }
886
887 if (sc->mem != NULL)
888 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem);
889
890 if (ifp != NULL)
891 if_free(ifp);
892
893 IWN_LOCK_DESTROY(sc);
894 return 0;
895}
896
897static int
898iwn_nic_lock(struct iwn_softc *sc)
899{
900 int ntries;
901
902 /* Request exclusive access to NIC. */
903 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ);
904
905 /* Spin until we actually get the lock. */
906 for (ntries = 0; ntries < 1000; ntries++) {
907 if ((IWN_READ(sc, IWN_GP_CNTRL) &
908 (IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) ==
909 IWN_GP_CNTRL_MAC_ACCESS_ENA)
910 return 0;
911 DELAY(10);
912 }
913 return ETIMEDOUT;
914}
915
916static __inline void
917iwn_nic_unlock(struct iwn_softc *sc)
918{
919 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ);
920}
921
922static __inline uint32_t
923iwn_prph_read(struct iwn_softc *sc, uint32_t addr)
924{
925 IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr);
926 IWN_BARRIER_READ_WRITE(sc);
927 return IWN_READ(sc, IWN_PRPH_RDATA);
928}
929
930static __inline void
931iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data)
932{
933 IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr);
934 IWN_BARRIER_WRITE(sc);
935 IWN_WRITE(sc, IWN_PRPH_WDATA, data);
936}
937
938static __inline void
939iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask)
940{
941 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask);
942}
943
944static __inline void
945iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask)
946{
947 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask);
948}
949
950static __inline void
951iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr,
952 const uint32_t *data, int count)
953{
954 for (; count > 0; count--, data++, addr += 4)
955 iwn_prph_write(sc, addr, *data);
956}
957
958static __inline uint32_t
959iwn_mem_read(struct iwn_softc *sc, uint32_t addr)
960{
961 IWN_WRITE(sc, IWN_MEM_RADDR, addr);
962 IWN_BARRIER_READ_WRITE(sc);
963 return IWN_READ(sc, IWN_MEM_RDATA);
964}
965
966static __inline void
967iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data)
968{
969 IWN_WRITE(sc, IWN_MEM_WADDR, addr);
970 IWN_BARRIER_WRITE(sc);
971 IWN_WRITE(sc, IWN_MEM_WDATA, data);
972}
973
974static __inline void
975iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data)
976{
977 uint32_t tmp;
978
979 tmp = iwn_mem_read(sc, addr & ~3);
980 if (addr & 3)
981 tmp = (tmp & 0x0000ffff) | data << 16;
982 else
983 tmp = (tmp & 0xffff0000) | data;
984 iwn_mem_write(sc, addr & ~3, tmp);
985}
986
987static __inline void
988iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data,
989 int count)
990{
991 for (; count > 0; count--, addr += 4)
992 *data++ = iwn_mem_read(sc, addr);
993}
994
995static __inline void
996iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val,
997 int count)
998{
999 for (; count > 0; count--, addr += 4)
1000 iwn_mem_write(sc, addr, val);
1001}
1002
1003static int
1004iwn_eeprom_lock(struct iwn_softc *sc)
1005{
1006 int i, ntries;
1007
1008 for (i = 0; i < 100; i++) {
1009 /* Request exclusive access to EEPROM. */
1010 IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
1011 IWN_HW_IF_CONFIG_EEPROM_LOCKED);
1012
1013 /* Spin until we actually get the lock. */
1014 for (ntries = 0; ntries < 100; ntries++) {
1015 if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
1016 IWN_HW_IF_CONFIG_EEPROM_LOCKED)
1017 return 0;
1018 DELAY(10);
1019 }
1020 }
1021 return ETIMEDOUT;
1022}
1023
1024static __inline void
1025iwn_eeprom_unlock(struct iwn_softc *sc)
1026{
1027 IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED);
1028}
1029
1030/*
1031 * Initialize access by host to One Time Programmable ROM.
1032 * NB: This kind of ROM can be found on 1000 or 6000 Series only.
1033 */
1034static int
1035iwn_init_otprom(struct iwn_softc *sc)
1036{
1037 uint16_t prev, base, next;
1038 int count, error;
1039
1040 /* Wait for clock stabilization before accessing prph. */
1041 error = iwn_clock_wait(sc);
1042 if (error != 0)
1043 return error;
1044
1045 error = iwn_nic_lock(sc);
1046 if (error != 0)
1047 return error;
1048 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ);
1049 DELAY(5);
1050 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ);
1051 iwn_nic_unlock(sc);
1052
1053 /* Set auto clock gate disable bit for HW with OTP shadow RAM. */
1054 if (sc->hw_type != IWN_HW_REV_TYPE_1000) {
1055 IWN_SETBITS(sc, IWN_DBG_LINK_PWR_MGMT,
1056 IWN_RESET_LINK_PWR_MGMT_DIS);
1057 }
1058 IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER);
1059 /* Clear ECC status. */
1060 IWN_SETBITS(sc, IWN_OTP_GP,
1061 IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS);
1062
1063 /*
1064 * Find the block before last block (contains the EEPROM image)
1065 * for HW without OTP shadow RAM.
1066 */
1067 if (sc->hw_type == IWN_HW_REV_TYPE_1000) {
1068 /* Switch to absolute addressing mode. */
1069 IWN_CLRBITS(sc, IWN_OTP_GP, IWN_OTP_GP_RELATIVE_ACCESS);
1070 base = prev = 0;
1071 for (count = 0; count < IWN1000_OTP_NBLOCKS; count++) {
1072 error = iwn_read_prom_data(sc, base, &next, 2);
1073 if (error != 0)
1074 return error;
1075 if (next == 0) /* End of linked-list. */
1076 break;
1077 prev = base;
1078 base = le16toh(next);
1079 }
1080 if (count == 0 || count == IWN1000_OTP_NBLOCKS)
1081 return EIO;
1082 /* Skip "next" word. */
1083 sc->prom_base = prev + 1;
1084 }
1085 return 0;
1086}
1087
1088static int
1089iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count)
1090{
1091 uint32_t val, tmp;
1092 int ntries;
1093 uint8_t *out = data;
1094
1095 addr += sc->prom_base;
1096 for (; count > 0; count -= 2, addr++) {
1097 IWN_WRITE(sc, IWN_EEPROM, addr << 2);
1098 for (ntries = 0; ntries < 10; ntries++) {
1099 val = IWN_READ(sc, IWN_EEPROM);
1100 if (val & IWN_EEPROM_READ_VALID)
1101 break;
1102 DELAY(5);
1103 }
1104 if (ntries == 10) {
1105 device_printf(sc->sc_dev,
1106 "timeout reading ROM at 0x%x\n", addr);
1107 return ETIMEDOUT;
1108 }
1109 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) {
1110 /* OTPROM, check for ECC errors. */
1111 tmp = IWN_READ(sc, IWN_OTP_GP);
1112 if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) {
1113 device_printf(sc->sc_dev,
1114 "OTPROM ECC error at 0x%x\n", addr);
1115 return EIO;
1116 }
1117 if (tmp & IWN_OTP_GP_ECC_CORR_STTS) {
1118 /* Correctable ECC error, clear bit. */
1119 IWN_SETBITS(sc, IWN_OTP_GP,
1120 IWN_OTP_GP_ECC_CORR_STTS);
1121 }
1122 }
1123 *out++ = val >> 16;
1124 if (count > 1)
1125 *out++ = val >> 24;
1126 }
1127 return 0;
1128}
1129
1130static void
1131iwn_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1132{
1133 if (error != 0)
1134 return;
1135 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
1136 *(bus_addr_t *)arg = segs[0].ds_addr;
1137}
1138
1139static int
1140iwn_dma_contig_alloc(struct iwn_softc *sc, struct iwn_dma_info *dma,
1141 void **kvap, bus_size_t size, bus_size_t alignment, int flags)
1142{
1143 int error;
1144
1145 dma->size = size;
1146 dma->tag = NULL;
1147
1148 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment,
1149 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
1150 1, size, flags, NULL, NULL, &dma->tag);
1151 if (error != 0) {
1152 device_printf(sc->sc_dev,
1153 "%s: bus_dma_tag_create failed, error %d\n",
1154 __func__, error);
1155 goto fail;
1156 }
1157 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
1158 flags | BUS_DMA_ZERO, &dma->map);
1159 if (error != 0) {
1160 device_printf(sc->sc_dev,
1161 "%s: bus_dmamem_alloc failed, error %d\n", __func__, error);
1162 goto fail;
1163 }
1164 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr,
1165 size, iwn_dma_map_addr, &dma->paddr, flags);
1166 if (error != 0) {
1167 device_printf(sc->sc_dev,
1168 "%s: bus_dmamap_load failed, error %d\n", __func__, error);
1169 goto fail;
1170 }
1171
1172 if (kvap != NULL)
1173 *kvap = dma->vaddr;
1174 return 0;
1175fail:
1176 iwn_dma_contig_free(dma);
1177 return error;
1178}
1179
1180static void
1181iwn_dma_contig_free(struct iwn_dma_info *dma)
1182{
1183 if (dma->tag != NULL) {
1184 if (dma->map != NULL) {
1185 if (dma->paddr == 0) {
1186 bus_dmamap_sync(dma->tag, dma->map,
1187 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1188 bus_dmamap_unload(dma->tag, dma->map);
1189 }
1190 bus_dmamem_free(dma->tag, &dma->vaddr, dma->map);
1191 }
1192 bus_dma_tag_destroy(dma->tag);
1193 }
1194}
1195
1196static int
1197iwn_alloc_sched(struct iwn_softc *sc)
1198{
1199 /* TX scheduler rings must be aligned on a 1KB boundary. */
1200 return iwn_dma_contig_alloc(sc, &sc->sched_dma,
1201 (void **)&sc->sched, sc->sc_hal->schedsz, 1024, BUS_DMA_NOWAIT);
1202}
1203
1204static void
1205iwn_free_sched(struct iwn_softc *sc)
1206{
1207 iwn_dma_contig_free(&sc->sched_dma);
1208}
1209
1210static int
1211iwn_alloc_kw(struct iwn_softc *sc)
1212{
1213 /* "Keep Warm" page must be aligned on a 4KB boundary. */
1214 return iwn_dma_contig_alloc(sc, &sc->kw_dma, NULL, 4096, 4096,
1215 BUS_DMA_NOWAIT);
1216}
1217
1218static void
1219iwn_free_kw(struct iwn_softc *sc)
1220{
1221 iwn_dma_contig_free(&sc->kw_dma);
1222}
1223
1224static int
1225iwn_alloc_ict(struct iwn_softc *sc)
1226{
1227 /* ICT table must be aligned on a 4KB boundary. */
1228 return iwn_dma_contig_alloc(sc, &sc->ict_dma,
1229 (void **)&sc->ict, IWN_ICT_SIZE, 4096, BUS_DMA_NOWAIT);
1230}
1231
1232static void
1233iwn_free_ict(struct iwn_softc *sc)
1234{
1235 iwn_dma_contig_free(&sc->ict_dma);
1236}
1237
1238static int
1239iwn_alloc_fwmem(struct iwn_softc *sc)
1240{
1241 /* Must be aligned on a 16-byte boundary. */
1242 return iwn_dma_contig_alloc(sc, &sc->fw_dma, NULL,
1243 sc->sc_hal->fwsz, 16, BUS_DMA_NOWAIT);
1244}
1245
1246static void
1247iwn_free_fwmem(struct iwn_softc *sc)
1248{
1249 iwn_dma_contig_free(&sc->fw_dma);
1250}
1251
1252static int
1253iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1254{
1255 bus_size_t size;
1256 int i, error;
1257
1258 ring->cur = 0;
1259
1260 /* Allocate RX descriptors (256-byte aligned). */
1261 size = IWN_RX_RING_COUNT * sizeof (uint32_t);
1262 error = iwn_dma_contig_alloc(sc, &ring->desc_dma,
1263 (void **)&ring->desc, size, 256, BUS_DMA_NOWAIT);
1264 if (error != 0) {
1265 device_printf(sc->sc_dev,
1266 "%s: could not allocate Rx ring DMA memory, error %d\n",
1267 __func__, error);
1268 goto fail;
1269 }
1270
1271 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
1272 BUS_SPACE_MAXADDR_32BIT,
1273 BUS_SPACE_MAXADDR, NULL, NULL, MJUMPAGESIZE, 1,
1274 MJUMPAGESIZE, BUS_DMA_NOWAIT, NULL, NULL, &ring->data_dmat);
1275 if (error != 0) {
1276 device_printf(sc->sc_dev,
1277 "%s: bus_dma_tag_create_failed, error %d\n",
1278 __func__, error);
1279 goto fail;
1280 }
1281
1282 /* Allocate RX status area (16-byte aligned). */
1283 error = iwn_dma_contig_alloc(sc, &ring->stat_dma,
1284 (void **)&ring->stat, sizeof (struct iwn_rx_status),
1285 16, BUS_DMA_NOWAIT);
1286 if (error != 0) {
1287 device_printf(sc->sc_dev,
1288 "%s: could not allocate Rx status DMA memory, error %d\n",
1289 __func__, error);
1290 goto fail;
1291 }
1292
1293 /*
1294 * Allocate and map RX buffers.
1295 */
1296 for (i = 0; i < IWN_RX_RING_COUNT; i++) {
1297 struct iwn_rx_data *data = &ring->data[i];
1298 bus_addr_t paddr;
1299
1300 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1301 if (error != 0) {
1302 device_printf(sc->sc_dev,
1303 "%s: bus_dmamap_create failed, error %d\n",
1304 __func__, error);
1305 goto fail;
1306 }
1307
1308 data->m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
1309 if (data->m == NULL) {
1310 device_printf(sc->sc_dev,
1311 "%s: could not allocate rx mbuf\n", __func__);
1312 error = ENOMEM;
1313 goto fail;
1314 }
1315
1316 /* Map page. */
1317 error = bus_dmamap_load(ring->data_dmat, data->map,
1318 mtod(data->m, caddr_t), MJUMPAGESIZE,
1319 iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
1320 if (error != 0 && error != EFBIG) {
1321 device_printf(sc->sc_dev,
1322 "%s: bus_dmamap_load failed, error %d\n",
1323 __func__, error);
1324 m_freem(data->m);
1325 error = ENOMEM; /* XXX unique code */
1326 goto fail;
1327 }
1328 bus_dmamap_sync(ring->data_dmat, data->map,
1329 BUS_DMASYNC_PREWRITE);
1330
1331 /* Set physical address of RX buffer (256-byte aligned). */
1332 ring->desc[i] = htole32(paddr >> 8);
1333 }
1334 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1335 BUS_DMASYNC_PREWRITE);
1336 return 0;
1337fail:
1338 iwn_free_rx_ring(sc, ring);
1339 return error;
1340}
1341
1342static void
1343iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1344{
1345 int ntries;
1346
1347 if (iwn_nic_lock(sc) == 0) {
1348 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0);
1349 for (ntries = 0; ntries < 1000; ntries++) {
1350 if (IWN_READ(sc, IWN_FH_RX_STATUS) &
1351 IWN_FH_RX_STATUS_IDLE)
1352 break;
1353 DELAY(10);
1354 }
1355 iwn_nic_unlock(sc);
1356#ifdef IWN_DEBUG
1357 if (ntries == 1000)
1358 DPRINTF(sc, IWN_DEBUG_ANY, "%s\n",
1359 "timeout resetting Rx ring");
1360#endif
1361 }
1362 ring->cur = 0;
1363 sc->last_rx_valid = 0;
1364}
1365
1366static void
1367iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1368{
1369 int i;
1370
1371 iwn_dma_contig_free(&ring->desc_dma);
1372 iwn_dma_contig_free(&ring->stat_dma);
1373
1374 for (i = 0; i < IWN_RX_RING_COUNT; i++) {
1375 struct iwn_rx_data *data = &ring->data[i];
1376
1377 if (data->m != NULL) {
1378 bus_dmamap_sync(ring->data_dmat, data->map,
1379 BUS_DMASYNC_POSTREAD);
1380 bus_dmamap_unload(ring->data_dmat, data->map);
1381 m_freem(data->m);
1382 }
1383 if (data->map != NULL)
1384 bus_dmamap_destroy(ring->data_dmat, data->map);
1385 }
1386}
1387
1388static int
1389iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid)
1390{
1391 bus_size_t size;
1392 bus_addr_t paddr;
1393 int i, error;
1394
1395 ring->qid = qid;
1396 ring->queued = 0;
1397 ring->cur = 0;
1398
1399 /* Allocate TX descriptors (256-byte aligned.) */
1400 size = IWN_TX_RING_COUNT * sizeof(struct iwn_tx_desc);
1401 error = iwn_dma_contig_alloc(sc, &ring->desc_dma,
1402 (void **)&ring->desc, size, 256, BUS_DMA_NOWAIT);
1403 if (error != 0) {
1404 device_printf(sc->sc_dev,
1405 "%s: could not allocate TX ring DMA memory, error %d\n",
1406 __func__, error);
1407 goto fail;
1408 }
1409
1410 /*
1411 * We only use rings 0 through 4 (4 EDCA + cmd) so there is no need
1412 * to allocate commands space for other rings.
1413 */
1414 if (qid > 4)
1415 return 0;
1416
1417 size = IWN_TX_RING_COUNT * sizeof(struct iwn_tx_cmd);
1418 error = iwn_dma_contig_alloc(sc, &ring->cmd_dma,
1419 (void **)&ring->cmd, size, 4, BUS_DMA_NOWAIT);
1420 if (error != 0) {
1421 device_printf(sc->sc_dev,
1422 "%s: could not allocate TX cmd DMA memory, error %d\n",
1423 __func__, error);
1424 goto fail;
1425 }
1426
1427 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
1428 BUS_SPACE_MAXADDR_32BIT,
1429 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, IWN_MAX_SCATTER - 1,
1430 MCLBYTES, BUS_DMA_NOWAIT, NULL, NULL, &ring->data_dmat);
1431 if (error != 0) {
1432 device_printf(sc->sc_dev,
1433 "%s: bus_dma_tag_create_failed, error %d\n",
1434 __func__, error);
1435 goto fail;
1436 }
1437
1438 paddr = ring->cmd_dma.paddr;
1439 for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1440 struct iwn_tx_data *data = &ring->data[i];
1441
1442 data->cmd_paddr = paddr;
1443 data->scratch_paddr = paddr + 12;
1444 paddr += sizeof (struct iwn_tx_cmd);
1445
1446 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1447 if (error != 0) {
1448 device_printf(sc->sc_dev,
1449 "%s: bus_dmamap_create failed, error %d\n",
1450 __func__, error);
1451 goto fail;
1452 }
1453 bus_dmamap_sync(ring->data_dmat, data->map,
1454 BUS_DMASYNC_PREWRITE);
1455 }
1456 return 0;
1457fail:
1458 iwn_free_tx_ring(sc, ring);
1459 return error;
1460}
1461
1462static void
1463iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring)
1464{
1465 int i;
1466
1467 for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1468 struct iwn_tx_data *data = &ring->data[i];
1469
1470 if (data->m != NULL) {
1471 bus_dmamap_unload(ring->data_dmat, data->map);
1472 m_freem(data->m);
1473 data->m = NULL;
1474 }
1475 }
1476 /* Clear TX descriptors. */
1477 memset(ring->desc, 0, ring->desc_dma.size);
1478 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1479 BUS_DMASYNC_PREWRITE);
1480 sc->qfullmsk &= ~(1 << ring->qid);
1481 ring->queued = 0;
1482 ring->cur = 0;
1483}
1484
1485static void
1486iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring)
1487{
1488 int i;
1489
1490 iwn_dma_contig_free(&ring->desc_dma);
1491 iwn_dma_contig_free(&ring->cmd_dma);
1492
1493 for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1494 struct iwn_tx_data *data = &ring->data[i];
1495
1496 if (data->m != NULL) {
1497 bus_dmamap_sync(ring->data_dmat, data->map,
1498 BUS_DMASYNC_POSTWRITE);
1499 bus_dmamap_unload(ring->data_dmat, data->map);
1500 m_freem(data->m);
1501 }
1502 if (data->map != NULL)
1503 bus_dmamap_destroy(ring->data_dmat, data->map);
1504 }
1505}
1506
1507static void
1508iwn5000_ict_reset(struct iwn_softc *sc)
1509{
1510 /* Disable interrupts. */
1511 IWN_WRITE(sc, IWN_INT_MASK, 0);
1512
1513 /* Reset ICT table. */
1514 memset(sc->ict, 0, IWN_ICT_SIZE);
1515 sc->ict_cur = 0;
1516
1517 /* Set physical address of ICT table (4KB aligned.) */
1518 DPRINTF(sc, IWN_DEBUG_RESET, "%s: enabling ICT\n", __func__);
1519 IWN_WRITE(sc, IWN_DRAM_INT_TBL, IWN_DRAM_INT_TBL_ENABLE |
1520 IWN_DRAM_INT_TBL_WRAP_CHECK | sc->ict_dma.paddr >> 12);
1521
1522 /* Enable periodic RX interrupt. */
1523 sc->int_mask |= IWN_INT_RX_PERIODIC;
1524 /* Switch to ICT interrupt mode in driver. */
1525 sc->sc_flags |= IWN_FLAG_USE_ICT;
1526
1527 /* Re-enable interrupts. */
1528 IWN_WRITE(sc, IWN_INT, 0xffffffff);
1529 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
1530}
1531
1532static int
1533iwn_read_eeprom(struct iwn_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN])
1534{
1535 const struct iwn_hal *hal = sc->sc_hal;
1536 int error;
1537 uint16_t val;
1538
1539 /* Check whether adapter has an EEPROM or an OTPROM. */
1540 if (sc->hw_type >= IWN_HW_REV_TYPE_1000 &&
1541 (IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP))
1542 sc->sc_flags |= IWN_FLAG_HAS_OTPROM;
1543 DPRINTF(sc, IWN_DEBUG_RESET, "%s found\n",
1544 (sc->sc_flags & IWN_FLAG_HAS_OTPROM) ? "OTPROM" : "EEPROM");
1545
1546 /* Adapter has to be powered on for EEPROM access to work. */
1547 error = iwn_apm_init(sc);
1548 if (error != 0) {
1549 device_printf(sc->sc_dev,
1550 "%s: could not power ON adapter, error %d\n",
1551 __func__, error);
1552 return error;
1553 }
1554
1555 if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) {
1556 device_printf(sc->sc_dev, "%s: bad ROM signature\n", __func__);
1557 return EIO;
1558 }
1559 error = iwn_eeprom_lock(sc);
1560 if (error != 0) {
1561 device_printf(sc->sc_dev,
1562 "%s: could not lock ROM, error %d\n",
1563 __func__, error);
1564 return error;
1565 }
1566
1567 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) {
1568 error = iwn_init_otprom(sc);
1569 if (error != 0) {
1570 device_printf(sc->sc_dev,
1571 "%s: could not initialize OTPROM, error %d\n",
1572 __func__, error);
1573 return error;
1574 }
1575 }
1576
1577 iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2);
1578 sc->rfcfg = le16toh(val);
1579 DPRINTF(sc, IWN_DEBUG_RESET, "radio config=0x%04x\n", sc->rfcfg);
1580
1581 /* Read MAC address. */
1582 iwn_read_prom_data(sc, IWN_EEPROM_MAC, macaddr, 6);
1583
1584 /* Read adapter-specific information from EEPROM. */
1585 hal->read_eeprom(sc);
1586
1587 iwn_apm_stop(sc); /* Power OFF adapter. */
1588
1589 iwn_eeprom_unlock(sc);
1590 return 0;
1591}
1592
1593static void
1594iwn4965_read_eeprom(struct iwn_softc *sc)
1595{
1596 uint32_t addr;
1597 int i;
1598 uint16_t val;
1599
1600 /* Read regulatory domain (4 ASCII characters.) */
1601 iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4);
1602
1603 /* Read the list of authorized channels (20MHz ones only.) */
1604 for (i = 0; i < 5; i++) {
1605 addr = iwn4965_regulatory_bands[i];
1606 iwn_read_eeprom_channels(sc, i, addr);
1607 }
1608
1609 /* Read maximum allowed TX power for 2GHz and 5GHz bands. */
1610 iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2);
1611 sc->maxpwr2GHz = val & 0xff;
1612 sc->maxpwr5GHz = val >> 8;
1613 /* Check that EEPROM values are within valid range. */
1614 if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50)
1615 sc->maxpwr5GHz = 38;
1616 if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50)
1617 sc->maxpwr2GHz = 38;
1618 DPRINTF(sc, IWN_DEBUG_RESET, "maxpwr 2GHz=%d 5GHz=%d\n",
1619 sc->maxpwr2GHz, sc->maxpwr5GHz);
1620
1621 /* Read samples for each TX power group. */
1622 iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands,
1623 sizeof sc->bands);
1624
1625 /* Read voltage at which samples were taken. */
1626 iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2);
1627 sc->eeprom_voltage = (int16_t)le16toh(val);
1628 DPRINTF(sc, IWN_DEBUG_RESET, "voltage=%d (in 0.3V)\n",
1629 sc->eeprom_voltage);
1630
1631#ifdef IWN_DEBUG
1632 /* Print samples. */
1633 if (sc->sc_debug & IWN_DEBUG_ANY) {
1634 for (i = 0; i < IWN_NBANDS; i++)
1635 iwn4965_print_power_group(sc, i);
1636 }
1637#endif
1638}
1639
1640#ifdef IWN_DEBUG
1641static void
1642iwn4965_print_power_group(struct iwn_softc *sc, int i)
1643{
1644 struct iwn4965_eeprom_band *band = &sc->bands[i];
1645 struct iwn4965_eeprom_chan_samples *chans = band->chans;
1646 int j, c;
1647
1648 printf("===band %d===\n", i);
1649 printf("chan lo=%d, chan hi=%d\n", band->lo, band->hi);
1650 printf("chan1 num=%d\n", chans[0].num);
1651 for (c = 0; c < 2; c++) {
1652 for (j = 0; j < IWN_NSAMPLES; j++) {
1653 printf("chain %d, sample %d: temp=%d gain=%d "
1654 "power=%d pa_det=%d\n", c, j,
1655 chans[0].samples[c][j].temp,
1656 chans[0].samples[c][j].gain,
1657 chans[0].samples[c][j].power,
1658 chans[0].samples[c][j].pa_det);
1659 }
1660 }
1661 printf("chan2 num=%d\n", chans[1].num);
1662 for (c = 0; c < 2; c++) {
1663 for (j = 0; j < IWN_NSAMPLES; j++) {
1664 printf("chain %d, sample %d: temp=%d gain=%d "
1665 "power=%d pa_det=%d\n", c, j,
1666 chans[1].samples[c][j].temp,
1667 chans[1].samples[c][j].gain,
1668 chans[1].samples[c][j].power,
1669 chans[1].samples[c][j].pa_det);
1670 }
1671 }
1672}
1673#endif
1674
1675static void
1676iwn5000_read_eeprom(struct iwn_softc *sc)
1677{
1678 struct iwn5000_eeprom_calib_hdr hdr;
1679 int32_t temp, volt;
1680 uint32_t addr, base;
1681 int i;
1682 uint16_t val;
1683
1684 /* Read regulatory domain (4 ASCII characters.) */
1685 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2);
1686 base = le16toh(val);
1687 iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN,
1688 sc->eeprom_domain, 4);
1689
1690 /* Read the list of authorized channels (20MHz ones only.) */
1691 for (i = 0; i < 5; i++) {
1692 addr = base + iwn5000_regulatory_bands[i];
1693 iwn_read_eeprom_channels(sc, i, addr);
1694 }
1695
1696 /* Read enhanced TX power information for 6000 Series. */
1697 if (sc->hw_type >= IWN_HW_REV_TYPE_6000)
1698 iwn_read_eeprom_enhinfo(sc);
1699
1700 iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2);
1701 base = le16toh(val);
1702 iwn_read_prom_data(sc, base, &hdr, sizeof hdr);
1703 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
1704 "%s: calib version=%u pa type=%u voltage=%u\n",
1705 __func__, hdr.version, hdr.pa_type, le16toh(hdr.volt));
1706 sc->calib_ver = hdr.version;
1707
1708 if (sc->hw_type == IWN_HW_REV_TYPE_5150) {
1709 /* Compute temperature offset. */
1710 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2);
1711 temp = le16toh(val);
1712 iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2);
1713 volt = le16toh(val);
1714 sc->temp_off = temp - (volt / -5);
1715 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "temp=%d volt=%d offset=%dK\n",
1716 temp, volt, sc->temp_off);
1717 }
1718}
1719
1720/*
1721 * Translate EEPROM flags to net80211.
1722 */
1723static uint32_t
1724iwn_eeprom_channel_flags(struct iwn_eeprom_chan *channel)
1725{
1726 uint32_t nflags;
1727
1728 nflags = 0;
1729 if ((channel->flags & IWN_EEPROM_CHAN_ACTIVE) == 0)
1730 nflags |= IEEE80211_CHAN_PASSIVE;
1731 if ((channel->flags & IWN_EEPROM_CHAN_IBSS) == 0)
1732 nflags |= IEEE80211_CHAN_NOADHOC;
1733 if (channel->flags & IWN_EEPROM_CHAN_RADAR) {
1734 nflags |= IEEE80211_CHAN_DFS;
1735 /* XXX apparently IBSS may still be marked */
1736 nflags |= IEEE80211_CHAN_NOADHOC;
1737 }
1738
1739 return nflags;
1740}
1741
1742static void
1743iwn_read_eeprom_band(struct iwn_softc *sc, int n)
1744{
1745 struct ifnet *ifp = sc->sc_ifp;
1746 struct ieee80211com *ic = ifp->if_l2com;
1747 struct iwn_eeprom_chan *channels = sc->eeprom_channels[n];
1748 const struct iwn_chan_band *band = &iwn_bands[n];
1749 struct ieee80211_channel *c;
1750 int i, chan, nflags;
1751
1752 for (i = 0; i < band->nchan; i++) {
1753 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) {
1754 DPRINTF(sc, IWN_DEBUG_RESET,
1755 "skip chan %d flags 0x%x maxpwr %d\n",
1756 band->chan[i], channels[i].flags,
1757 channels[i].maxpwr);
1758 continue;
1759 }
1760 chan = band->chan[i];
1761 nflags = iwn_eeprom_channel_flags(&channels[i]);
1762
1763 DPRINTF(sc, IWN_DEBUG_RESET,
1764 "add chan %d flags 0x%x maxpwr %d\n",
1765 chan, channels[i].flags, channels[i].maxpwr);
1766
1767 c = &ic->ic_channels[ic->ic_nchans++];
1768 c->ic_ieee = chan;
1769 c->ic_maxregpower = channels[i].maxpwr;
1770 c->ic_maxpower = 2*c->ic_maxregpower;
1771
1772 /* Save maximum allowed TX power for this channel. */
1773 sc->maxpwr[chan] = channels[i].maxpwr;
1774
1775 if (n == 0) { /* 2GHz band */
1776 c->ic_freq = ieee80211_ieee2mhz(chan,
1777 IEEE80211_CHAN_G);
1778
1779 /* G =>'s B is supported */
1780 c->ic_flags = IEEE80211_CHAN_B | nflags;
1781
1782 c = &ic->ic_channels[ic->ic_nchans++];
1783 c[0] = c[-1];
1784 c->ic_flags = IEEE80211_CHAN_G | nflags;
1785 } else { /* 5GHz band */
1786 c->ic_freq = ieee80211_ieee2mhz(chan,
1787 IEEE80211_CHAN_A);
1788 c->ic_flags = IEEE80211_CHAN_A | nflags;
1789 }
1790#if 0 /* HT */
1791 /* XXX no constraints on using HT20 */
1792 /* add HT20, HT40 added separately */
1793 c = &ic->ic_channels[ic->ic_nchans++];
1794 c[0] = c[-1];
1795 c->ic_flags |= IEEE80211_CHAN_HT20;
1796 /* XXX NARROW =>'s 1/2 and 1/4 width? */
1797#endif
1798 }
1799}
1800
1801#if 0 /* HT */
1802static void
1803iwn_read_eeprom_ht40(struct iwn_softc *sc, int n)
1804{
1805 struct ifnet *ifp = sc->sc_ifp;
1806 struct ieee80211com *ic = ifp->if_l2com;
1807 struct iwn_eeprom_chan *channels = sc->eeprom_channels[n];
1808 const struct iwn_chan_band *band = &iwn_bands[n];
1809 struct ieee80211_channel *c, *cent, *extc;
1810 int i;
1811
1812 for (i = 0; i < band->nchan; i++) {
1813 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID) ||
1814 !(channels[i].flags & IWN_EEPROM_CHAN_WIDE)) {
1815 DPRINTF(sc, IWN_DEBUG_RESET,
1816 "skip chan %d flags 0x%x maxpwr %d\n",
1817 band->chan[i], channels[i].flags,
1818 channels[i].maxpwr);
1819 continue;
1820 }
1821 /*
1822 * Each entry defines an HT40 channel pair; find the
1823 * center channel, then the extension channel above.
1824 */
1825 cent = ieee80211_find_channel_byieee(ic, band->chan[i],
1826 band->flags & ~IEEE80211_CHAN_HT);
1827 if (cent == NULL) { /* XXX shouldn't happen */
1828 device_printf(sc->sc_dev,
1829 "%s: no entry for channel %d\n",
1830 __func__, band->chan[i]);
1831 continue;
1832 }
1833 extc = ieee80211_find_channel(ic, cent->ic_freq+20,
1834 band->flags & ~IEEE80211_CHAN_HT);
1835 if (extc == NULL) {
1836 DPRINTF(sc, IWN_DEBUG_RESET,
1837 "skip chan %d, extension channel not found\n",
1838 band->chan[i]);
1839 continue;
1840 }
1841
1842 DPRINTF(sc, IWN_DEBUG_RESET,
1843 "add ht40 chan %d flags 0x%x maxpwr %d\n",
1844 band->chan[i], channels[i].flags, channels[i].maxpwr);
1845
1846 c = &ic->ic_channels[ic->ic_nchans++];
1847 c[0] = cent[0];
1848 c->ic_extieee = extc->ic_ieee;
1849 c->ic_flags &= ~IEEE80211_CHAN_HT;
1850 c->ic_flags |= IEEE80211_CHAN_HT40U;
1851 c = &ic->ic_channels[ic->ic_nchans++];
1852 c[0] = extc[0];
1853 c->ic_extieee = cent->ic_ieee;
1854 c->ic_flags &= ~IEEE80211_CHAN_HT;
1855 c->ic_flags |= IEEE80211_CHAN_HT40D;
1856 }
1857}
1858#endif
1859
1860static void
1861iwn_read_eeprom_channels(struct iwn_softc *sc, int n, uint32_t addr)
1862{
1863 struct ifnet *ifp = sc->sc_ifp;
1864 struct ieee80211com *ic = ifp->if_l2com;
1865
1866 iwn_read_prom_data(sc, addr, &sc->eeprom_channels[n],
1867 iwn_bands[n].nchan * sizeof (struct iwn_eeprom_chan));
1868
1869 if (n < 5)
1870 iwn_read_eeprom_band(sc, n);
1871#if 0 /* HT */
1872 else
1873 iwn_read_eeprom_ht40(sc, n);
1874#endif
1875 ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans);
1876}
1877
1878#define nitems(_a) (sizeof((_a)) / sizeof((_a)[0]))
1879
1880static void
1881iwn_read_eeprom_enhinfo(struct iwn_softc *sc)
1882{
1883 struct iwn_eeprom_enhinfo enhinfo[35];
1884 uint16_t val, base;
1885 int8_t maxpwr;
1886 int i;
1887
1888 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2);
1889 base = le16toh(val);
1890 iwn_read_prom_data(sc, base + IWN6000_EEPROM_ENHINFO,
1891 enhinfo, sizeof enhinfo);
1892
1893 memset(sc->enh_maxpwr, 0, sizeof sc->enh_maxpwr);
1894 for (i = 0; i < nitems(enhinfo); i++) {
1895 if (enhinfo[i].chan == 0 || enhinfo[i].reserved != 0)
1896 continue; /* Skip invalid entries. */
1897
1898 maxpwr = 0;
1899 if (sc->txchainmask & IWN_ANT_A)
1900 maxpwr = MAX(maxpwr, enhinfo[i].chain[0]);
1901 if (sc->txchainmask & IWN_ANT_B)
1902 maxpwr = MAX(maxpwr, enhinfo[i].chain[1]);
1903 if (sc->txchainmask & IWN_ANT_C)
1904 maxpwr = MAX(maxpwr, enhinfo[i].chain[2]);
1905 if (sc->ntxchains == 2)
1906 maxpwr = MAX(maxpwr, enhinfo[i].mimo2);
1907 else if (sc->ntxchains == 3)
1908 maxpwr = MAX(maxpwr, enhinfo[i].mimo3);
1909 maxpwr /= 2; /* Convert half-dBm to dBm. */
1910
1911 DPRINTF(sc, IWN_DEBUG_RESET, "enhinfo %d, maxpwr=%d\n", i,
1912 maxpwr);
1913 sc->enh_maxpwr[i] = maxpwr;
1914 }
1915}
1916
1917static struct ieee80211_node *
1918iwn_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
1919{
1920 return malloc(sizeof (struct iwn_node), M_80211_NODE,M_NOWAIT | M_ZERO);
1921}
1922
1923static int
1924iwn_media_change(struct ifnet *ifp)
1925{
1926 int error = ieee80211_media_change(ifp);
1927 /* NB: only the fixed rate can change and that doesn't need a reset */
1928 return (error == ENETRESET ? 0 : error);
1929}
1930
1931static int
1932iwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
1933{
1934 struct iwn_vap *ivp = IWN_VAP(vap);
1935 struct ieee80211com *ic = vap->iv_ic;
1936 struct iwn_softc *sc = ic->ic_ifp->if_softc;
1937 int error;
1938
1939 DPRINTF(sc, IWN_DEBUG_STATE, "%s: %s -> %s\n", __func__,
1940 ieee80211_state_name[vap->iv_state],
1941 ieee80211_state_name[nstate]);
1942
1943 IEEE80211_UNLOCK(ic);
1944 IWN_LOCK(sc);
867 ieee80211_ifdetach(ic);
868 }
869
870 iwn5000_free_calib_results(sc);
871
872 /* Free DMA resources. */
873 iwn_free_rx_ring(sc, &sc->rxq);
874 if (sc->sc_hal != NULL)
875 for (i = 0; i < sc->sc_hal->ntxqs; i++)
876 iwn_free_tx_ring(sc, &sc->txq[i]);
877 iwn_free_sched(sc);
878 iwn_free_kw(sc);
879 if (sc->ict != NULL)
880 iwn_free_ict(sc);
881 iwn_free_fwmem(sc);
882
883 if (sc->irq != NULL) {
884 bus_teardown_intr(dev, sc->irq, sc->sc_ih);
885 bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq);
886 if (sc->irq_rid == 1)
887 pci_release_msi(dev);
888 }
889
890 if (sc->mem != NULL)
891 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem);
892
893 if (ifp != NULL)
894 if_free(ifp);
895
896 IWN_LOCK_DESTROY(sc);
897 return 0;
898}
899
900static int
901iwn_nic_lock(struct iwn_softc *sc)
902{
903 int ntries;
904
905 /* Request exclusive access to NIC. */
906 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ);
907
908 /* Spin until we actually get the lock. */
909 for (ntries = 0; ntries < 1000; ntries++) {
910 if ((IWN_READ(sc, IWN_GP_CNTRL) &
911 (IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) ==
912 IWN_GP_CNTRL_MAC_ACCESS_ENA)
913 return 0;
914 DELAY(10);
915 }
916 return ETIMEDOUT;
917}
918
919static __inline void
920iwn_nic_unlock(struct iwn_softc *sc)
921{
922 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ);
923}
924
925static __inline uint32_t
926iwn_prph_read(struct iwn_softc *sc, uint32_t addr)
927{
928 IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr);
929 IWN_BARRIER_READ_WRITE(sc);
930 return IWN_READ(sc, IWN_PRPH_RDATA);
931}
932
933static __inline void
934iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data)
935{
936 IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr);
937 IWN_BARRIER_WRITE(sc);
938 IWN_WRITE(sc, IWN_PRPH_WDATA, data);
939}
940
941static __inline void
942iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask)
943{
944 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask);
945}
946
947static __inline void
948iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask)
949{
950 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask);
951}
952
953static __inline void
954iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr,
955 const uint32_t *data, int count)
956{
957 for (; count > 0; count--, data++, addr += 4)
958 iwn_prph_write(sc, addr, *data);
959}
960
961static __inline uint32_t
962iwn_mem_read(struct iwn_softc *sc, uint32_t addr)
963{
964 IWN_WRITE(sc, IWN_MEM_RADDR, addr);
965 IWN_BARRIER_READ_WRITE(sc);
966 return IWN_READ(sc, IWN_MEM_RDATA);
967}
968
969static __inline void
970iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data)
971{
972 IWN_WRITE(sc, IWN_MEM_WADDR, addr);
973 IWN_BARRIER_WRITE(sc);
974 IWN_WRITE(sc, IWN_MEM_WDATA, data);
975}
976
977static __inline void
978iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data)
979{
980 uint32_t tmp;
981
982 tmp = iwn_mem_read(sc, addr & ~3);
983 if (addr & 3)
984 tmp = (tmp & 0x0000ffff) | data << 16;
985 else
986 tmp = (tmp & 0xffff0000) | data;
987 iwn_mem_write(sc, addr & ~3, tmp);
988}
989
990static __inline void
991iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data,
992 int count)
993{
994 for (; count > 0; count--, addr += 4)
995 *data++ = iwn_mem_read(sc, addr);
996}
997
998static __inline void
999iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val,
1000 int count)
1001{
1002 for (; count > 0; count--, addr += 4)
1003 iwn_mem_write(sc, addr, val);
1004}
1005
1006static int
1007iwn_eeprom_lock(struct iwn_softc *sc)
1008{
1009 int i, ntries;
1010
1011 for (i = 0; i < 100; i++) {
1012 /* Request exclusive access to EEPROM. */
1013 IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
1014 IWN_HW_IF_CONFIG_EEPROM_LOCKED);
1015
1016 /* Spin until we actually get the lock. */
1017 for (ntries = 0; ntries < 100; ntries++) {
1018 if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
1019 IWN_HW_IF_CONFIG_EEPROM_LOCKED)
1020 return 0;
1021 DELAY(10);
1022 }
1023 }
1024 return ETIMEDOUT;
1025}
1026
1027static __inline void
1028iwn_eeprom_unlock(struct iwn_softc *sc)
1029{
1030 IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED);
1031}
1032
1033/*
1034 * Initialize access by host to One Time Programmable ROM.
1035 * NB: This kind of ROM can be found on 1000 or 6000 Series only.
1036 */
1037static int
1038iwn_init_otprom(struct iwn_softc *sc)
1039{
1040 uint16_t prev, base, next;
1041 int count, error;
1042
1043 /* Wait for clock stabilization before accessing prph. */
1044 error = iwn_clock_wait(sc);
1045 if (error != 0)
1046 return error;
1047
1048 error = iwn_nic_lock(sc);
1049 if (error != 0)
1050 return error;
1051 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ);
1052 DELAY(5);
1053 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ);
1054 iwn_nic_unlock(sc);
1055
1056 /* Set auto clock gate disable bit for HW with OTP shadow RAM. */
1057 if (sc->hw_type != IWN_HW_REV_TYPE_1000) {
1058 IWN_SETBITS(sc, IWN_DBG_LINK_PWR_MGMT,
1059 IWN_RESET_LINK_PWR_MGMT_DIS);
1060 }
1061 IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER);
1062 /* Clear ECC status. */
1063 IWN_SETBITS(sc, IWN_OTP_GP,
1064 IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS);
1065
1066 /*
1067 * Find the block before last block (contains the EEPROM image)
1068 * for HW without OTP shadow RAM.
1069 */
1070 if (sc->hw_type == IWN_HW_REV_TYPE_1000) {
1071 /* Switch to absolute addressing mode. */
1072 IWN_CLRBITS(sc, IWN_OTP_GP, IWN_OTP_GP_RELATIVE_ACCESS);
1073 base = prev = 0;
1074 for (count = 0; count < IWN1000_OTP_NBLOCKS; count++) {
1075 error = iwn_read_prom_data(sc, base, &next, 2);
1076 if (error != 0)
1077 return error;
1078 if (next == 0) /* End of linked-list. */
1079 break;
1080 prev = base;
1081 base = le16toh(next);
1082 }
1083 if (count == 0 || count == IWN1000_OTP_NBLOCKS)
1084 return EIO;
1085 /* Skip "next" word. */
1086 sc->prom_base = prev + 1;
1087 }
1088 return 0;
1089}
1090
1091static int
1092iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count)
1093{
1094 uint32_t val, tmp;
1095 int ntries;
1096 uint8_t *out = data;
1097
1098 addr += sc->prom_base;
1099 for (; count > 0; count -= 2, addr++) {
1100 IWN_WRITE(sc, IWN_EEPROM, addr << 2);
1101 for (ntries = 0; ntries < 10; ntries++) {
1102 val = IWN_READ(sc, IWN_EEPROM);
1103 if (val & IWN_EEPROM_READ_VALID)
1104 break;
1105 DELAY(5);
1106 }
1107 if (ntries == 10) {
1108 device_printf(sc->sc_dev,
1109 "timeout reading ROM at 0x%x\n", addr);
1110 return ETIMEDOUT;
1111 }
1112 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) {
1113 /* OTPROM, check for ECC errors. */
1114 tmp = IWN_READ(sc, IWN_OTP_GP);
1115 if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) {
1116 device_printf(sc->sc_dev,
1117 "OTPROM ECC error at 0x%x\n", addr);
1118 return EIO;
1119 }
1120 if (tmp & IWN_OTP_GP_ECC_CORR_STTS) {
1121 /* Correctable ECC error, clear bit. */
1122 IWN_SETBITS(sc, IWN_OTP_GP,
1123 IWN_OTP_GP_ECC_CORR_STTS);
1124 }
1125 }
1126 *out++ = val >> 16;
1127 if (count > 1)
1128 *out++ = val >> 24;
1129 }
1130 return 0;
1131}
1132
1133static void
1134iwn_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1135{
1136 if (error != 0)
1137 return;
1138 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
1139 *(bus_addr_t *)arg = segs[0].ds_addr;
1140}
1141
1142static int
1143iwn_dma_contig_alloc(struct iwn_softc *sc, struct iwn_dma_info *dma,
1144 void **kvap, bus_size_t size, bus_size_t alignment, int flags)
1145{
1146 int error;
1147
1148 dma->size = size;
1149 dma->tag = NULL;
1150
1151 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment,
1152 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
1153 1, size, flags, NULL, NULL, &dma->tag);
1154 if (error != 0) {
1155 device_printf(sc->sc_dev,
1156 "%s: bus_dma_tag_create failed, error %d\n",
1157 __func__, error);
1158 goto fail;
1159 }
1160 error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
1161 flags | BUS_DMA_ZERO, &dma->map);
1162 if (error != 0) {
1163 device_printf(sc->sc_dev,
1164 "%s: bus_dmamem_alloc failed, error %d\n", __func__, error);
1165 goto fail;
1166 }
1167 error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr,
1168 size, iwn_dma_map_addr, &dma->paddr, flags);
1169 if (error != 0) {
1170 device_printf(sc->sc_dev,
1171 "%s: bus_dmamap_load failed, error %d\n", __func__, error);
1172 goto fail;
1173 }
1174
1175 if (kvap != NULL)
1176 *kvap = dma->vaddr;
1177 return 0;
1178fail:
1179 iwn_dma_contig_free(dma);
1180 return error;
1181}
1182
1183static void
1184iwn_dma_contig_free(struct iwn_dma_info *dma)
1185{
1186 if (dma->tag != NULL) {
1187 if (dma->map != NULL) {
1188 if (dma->paddr == 0) {
1189 bus_dmamap_sync(dma->tag, dma->map,
1190 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1191 bus_dmamap_unload(dma->tag, dma->map);
1192 }
1193 bus_dmamem_free(dma->tag, &dma->vaddr, dma->map);
1194 }
1195 bus_dma_tag_destroy(dma->tag);
1196 }
1197}
1198
1199static int
1200iwn_alloc_sched(struct iwn_softc *sc)
1201{
1202 /* TX scheduler rings must be aligned on a 1KB boundary. */
1203 return iwn_dma_contig_alloc(sc, &sc->sched_dma,
1204 (void **)&sc->sched, sc->sc_hal->schedsz, 1024, BUS_DMA_NOWAIT);
1205}
1206
1207static void
1208iwn_free_sched(struct iwn_softc *sc)
1209{
1210 iwn_dma_contig_free(&sc->sched_dma);
1211}
1212
1213static int
1214iwn_alloc_kw(struct iwn_softc *sc)
1215{
1216 /* "Keep Warm" page must be aligned on a 4KB boundary. */
1217 return iwn_dma_contig_alloc(sc, &sc->kw_dma, NULL, 4096, 4096,
1218 BUS_DMA_NOWAIT);
1219}
1220
1221static void
1222iwn_free_kw(struct iwn_softc *sc)
1223{
1224 iwn_dma_contig_free(&sc->kw_dma);
1225}
1226
1227static int
1228iwn_alloc_ict(struct iwn_softc *sc)
1229{
1230 /* ICT table must be aligned on a 4KB boundary. */
1231 return iwn_dma_contig_alloc(sc, &sc->ict_dma,
1232 (void **)&sc->ict, IWN_ICT_SIZE, 4096, BUS_DMA_NOWAIT);
1233}
1234
1235static void
1236iwn_free_ict(struct iwn_softc *sc)
1237{
1238 iwn_dma_contig_free(&sc->ict_dma);
1239}
1240
1241static int
1242iwn_alloc_fwmem(struct iwn_softc *sc)
1243{
1244 /* Must be aligned on a 16-byte boundary. */
1245 return iwn_dma_contig_alloc(sc, &sc->fw_dma, NULL,
1246 sc->sc_hal->fwsz, 16, BUS_DMA_NOWAIT);
1247}
1248
1249static void
1250iwn_free_fwmem(struct iwn_softc *sc)
1251{
1252 iwn_dma_contig_free(&sc->fw_dma);
1253}
1254
1255static int
1256iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1257{
1258 bus_size_t size;
1259 int i, error;
1260
1261 ring->cur = 0;
1262
1263 /* Allocate RX descriptors (256-byte aligned). */
1264 size = IWN_RX_RING_COUNT * sizeof (uint32_t);
1265 error = iwn_dma_contig_alloc(sc, &ring->desc_dma,
1266 (void **)&ring->desc, size, 256, BUS_DMA_NOWAIT);
1267 if (error != 0) {
1268 device_printf(sc->sc_dev,
1269 "%s: could not allocate Rx ring DMA memory, error %d\n",
1270 __func__, error);
1271 goto fail;
1272 }
1273
1274 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
1275 BUS_SPACE_MAXADDR_32BIT,
1276 BUS_SPACE_MAXADDR, NULL, NULL, MJUMPAGESIZE, 1,
1277 MJUMPAGESIZE, BUS_DMA_NOWAIT, NULL, NULL, &ring->data_dmat);
1278 if (error != 0) {
1279 device_printf(sc->sc_dev,
1280 "%s: bus_dma_tag_create_failed, error %d\n",
1281 __func__, error);
1282 goto fail;
1283 }
1284
1285 /* Allocate RX status area (16-byte aligned). */
1286 error = iwn_dma_contig_alloc(sc, &ring->stat_dma,
1287 (void **)&ring->stat, sizeof (struct iwn_rx_status),
1288 16, BUS_DMA_NOWAIT);
1289 if (error != 0) {
1290 device_printf(sc->sc_dev,
1291 "%s: could not allocate Rx status DMA memory, error %d\n",
1292 __func__, error);
1293 goto fail;
1294 }
1295
1296 /*
1297 * Allocate and map RX buffers.
1298 */
1299 for (i = 0; i < IWN_RX_RING_COUNT; i++) {
1300 struct iwn_rx_data *data = &ring->data[i];
1301 bus_addr_t paddr;
1302
1303 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1304 if (error != 0) {
1305 device_printf(sc->sc_dev,
1306 "%s: bus_dmamap_create failed, error %d\n",
1307 __func__, error);
1308 goto fail;
1309 }
1310
1311 data->m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
1312 if (data->m == NULL) {
1313 device_printf(sc->sc_dev,
1314 "%s: could not allocate rx mbuf\n", __func__);
1315 error = ENOMEM;
1316 goto fail;
1317 }
1318
1319 /* Map page. */
1320 error = bus_dmamap_load(ring->data_dmat, data->map,
1321 mtod(data->m, caddr_t), MJUMPAGESIZE,
1322 iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
1323 if (error != 0 && error != EFBIG) {
1324 device_printf(sc->sc_dev,
1325 "%s: bus_dmamap_load failed, error %d\n",
1326 __func__, error);
1327 m_freem(data->m);
1328 error = ENOMEM; /* XXX unique code */
1329 goto fail;
1330 }
1331 bus_dmamap_sync(ring->data_dmat, data->map,
1332 BUS_DMASYNC_PREWRITE);
1333
1334 /* Set physical address of RX buffer (256-byte aligned). */
1335 ring->desc[i] = htole32(paddr >> 8);
1336 }
1337 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1338 BUS_DMASYNC_PREWRITE);
1339 return 0;
1340fail:
1341 iwn_free_rx_ring(sc, ring);
1342 return error;
1343}
1344
1345static void
1346iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1347{
1348 int ntries;
1349
1350 if (iwn_nic_lock(sc) == 0) {
1351 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0);
1352 for (ntries = 0; ntries < 1000; ntries++) {
1353 if (IWN_READ(sc, IWN_FH_RX_STATUS) &
1354 IWN_FH_RX_STATUS_IDLE)
1355 break;
1356 DELAY(10);
1357 }
1358 iwn_nic_unlock(sc);
1359#ifdef IWN_DEBUG
1360 if (ntries == 1000)
1361 DPRINTF(sc, IWN_DEBUG_ANY, "%s\n",
1362 "timeout resetting Rx ring");
1363#endif
1364 }
1365 ring->cur = 0;
1366 sc->last_rx_valid = 0;
1367}
1368
1369static void
1370iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1371{
1372 int i;
1373
1374 iwn_dma_contig_free(&ring->desc_dma);
1375 iwn_dma_contig_free(&ring->stat_dma);
1376
1377 for (i = 0; i < IWN_RX_RING_COUNT; i++) {
1378 struct iwn_rx_data *data = &ring->data[i];
1379
1380 if (data->m != NULL) {
1381 bus_dmamap_sync(ring->data_dmat, data->map,
1382 BUS_DMASYNC_POSTREAD);
1383 bus_dmamap_unload(ring->data_dmat, data->map);
1384 m_freem(data->m);
1385 }
1386 if (data->map != NULL)
1387 bus_dmamap_destroy(ring->data_dmat, data->map);
1388 }
1389}
1390
1391static int
1392iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid)
1393{
1394 bus_size_t size;
1395 bus_addr_t paddr;
1396 int i, error;
1397
1398 ring->qid = qid;
1399 ring->queued = 0;
1400 ring->cur = 0;
1401
1402 /* Allocate TX descriptors (256-byte aligned.) */
1403 size = IWN_TX_RING_COUNT * sizeof(struct iwn_tx_desc);
1404 error = iwn_dma_contig_alloc(sc, &ring->desc_dma,
1405 (void **)&ring->desc, size, 256, BUS_DMA_NOWAIT);
1406 if (error != 0) {
1407 device_printf(sc->sc_dev,
1408 "%s: could not allocate TX ring DMA memory, error %d\n",
1409 __func__, error);
1410 goto fail;
1411 }
1412
1413 /*
1414 * We only use rings 0 through 4 (4 EDCA + cmd) so there is no need
1415 * to allocate commands space for other rings.
1416 */
1417 if (qid > 4)
1418 return 0;
1419
1420 size = IWN_TX_RING_COUNT * sizeof(struct iwn_tx_cmd);
1421 error = iwn_dma_contig_alloc(sc, &ring->cmd_dma,
1422 (void **)&ring->cmd, size, 4, BUS_DMA_NOWAIT);
1423 if (error != 0) {
1424 device_printf(sc->sc_dev,
1425 "%s: could not allocate TX cmd DMA memory, error %d\n",
1426 __func__, error);
1427 goto fail;
1428 }
1429
1430 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
1431 BUS_SPACE_MAXADDR_32BIT,
1432 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, IWN_MAX_SCATTER - 1,
1433 MCLBYTES, BUS_DMA_NOWAIT, NULL, NULL, &ring->data_dmat);
1434 if (error != 0) {
1435 device_printf(sc->sc_dev,
1436 "%s: bus_dma_tag_create_failed, error %d\n",
1437 __func__, error);
1438 goto fail;
1439 }
1440
1441 paddr = ring->cmd_dma.paddr;
1442 for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1443 struct iwn_tx_data *data = &ring->data[i];
1444
1445 data->cmd_paddr = paddr;
1446 data->scratch_paddr = paddr + 12;
1447 paddr += sizeof (struct iwn_tx_cmd);
1448
1449 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1450 if (error != 0) {
1451 device_printf(sc->sc_dev,
1452 "%s: bus_dmamap_create failed, error %d\n",
1453 __func__, error);
1454 goto fail;
1455 }
1456 bus_dmamap_sync(ring->data_dmat, data->map,
1457 BUS_DMASYNC_PREWRITE);
1458 }
1459 return 0;
1460fail:
1461 iwn_free_tx_ring(sc, ring);
1462 return error;
1463}
1464
1465static void
1466iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring)
1467{
1468 int i;
1469
1470 for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1471 struct iwn_tx_data *data = &ring->data[i];
1472
1473 if (data->m != NULL) {
1474 bus_dmamap_unload(ring->data_dmat, data->map);
1475 m_freem(data->m);
1476 data->m = NULL;
1477 }
1478 }
1479 /* Clear TX descriptors. */
1480 memset(ring->desc, 0, ring->desc_dma.size);
1481 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1482 BUS_DMASYNC_PREWRITE);
1483 sc->qfullmsk &= ~(1 << ring->qid);
1484 ring->queued = 0;
1485 ring->cur = 0;
1486}
1487
1488static void
1489iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring)
1490{
1491 int i;
1492
1493 iwn_dma_contig_free(&ring->desc_dma);
1494 iwn_dma_contig_free(&ring->cmd_dma);
1495
1496 for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1497 struct iwn_tx_data *data = &ring->data[i];
1498
1499 if (data->m != NULL) {
1500 bus_dmamap_sync(ring->data_dmat, data->map,
1501 BUS_DMASYNC_POSTWRITE);
1502 bus_dmamap_unload(ring->data_dmat, data->map);
1503 m_freem(data->m);
1504 }
1505 if (data->map != NULL)
1506 bus_dmamap_destroy(ring->data_dmat, data->map);
1507 }
1508}
1509
1510static void
1511iwn5000_ict_reset(struct iwn_softc *sc)
1512{
1513 /* Disable interrupts. */
1514 IWN_WRITE(sc, IWN_INT_MASK, 0);
1515
1516 /* Reset ICT table. */
1517 memset(sc->ict, 0, IWN_ICT_SIZE);
1518 sc->ict_cur = 0;
1519
1520 /* Set physical address of ICT table (4KB aligned.) */
1521 DPRINTF(sc, IWN_DEBUG_RESET, "%s: enabling ICT\n", __func__);
1522 IWN_WRITE(sc, IWN_DRAM_INT_TBL, IWN_DRAM_INT_TBL_ENABLE |
1523 IWN_DRAM_INT_TBL_WRAP_CHECK | sc->ict_dma.paddr >> 12);
1524
1525 /* Enable periodic RX interrupt. */
1526 sc->int_mask |= IWN_INT_RX_PERIODIC;
1527 /* Switch to ICT interrupt mode in driver. */
1528 sc->sc_flags |= IWN_FLAG_USE_ICT;
1529
1530 /* Re-enable interrupts. */
1531 IWN_WRITE(sc, IWN_INT, 0xffffffff);
1532 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
1533}
1534
1535static int
1536iwn_read_eeprom(struct iwn_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN])
1537{
1538 const struct iwn_hal *hal = sc->sc_hal;
1539 int error;
1540 uint16_t val;
1541
1542 /* Check whether adapter has an EEPROM or an OTPROM. */
1543 if (sc->hw_type >= IWN_HW_REV_TYPE_1000 &&
1544 (IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP))
1545 sc->sc_flags |= IWN_FLAG_HAS_OTPROM;
1546 DPRINTF(sc, IWN_DEBUG_RESET, "%s found\n",
1547 (sc->sc_flags & IWN_FLAG_HAS_OTPROM) ? "OTPROM" : "EEPROM");
1548
1549 /* Adapter has to be powered on for EEPROM access to work. */
1550 error = iwn_apm_init(sc);
1551 if (error != 0) {
1552 device_printf(sc->sc_dev,
1553 "%s: could not power ON adapter, error %d\n",
1554 __func__, error);
1555 return error;
1556 }
1557
1558 if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) {
1559 device_printf(sc->sc_dev, "%s: bad ROM signature\n", __func__);
1560 return EIO;
1561 }
1562 error = iwn_eeprom_lock(sc);
1563 if (error != 0) {
1564 device_printf(sc->sc_dev,
1565 "%s: could not lock ROM, error %d\n",
1566 __func__, error);
1567 return error;
1568 }
1569
1570 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) {
1571 error = iwn_init_otprom(sc);
1572 if (error != 0) {
1573 device_printf(sc->sc_dev,
1574 "%s: could not initialize OTPROM, error %d\n",
1575 __func__, error);
1576 return error;
1577 }
1578 }
1579
1580 iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2);
1581 sc->rfcfg = le16toh(val);
1582 DPRINTF(sc, IWN_DEBUG_RESET, "radio config=0x%04x\n", sc->rfcfg);
1583
1584 /* Read MAC address. */
1585 iwn_read_prom_data(sc, IWN_EEPROM_MAC, macaddr, 6);
1586
1587 /* Read adapter-specific information from EEPROM. */
1588 hal->read_eeprom(sc);
1589
1590 iwn_apm_stop(sc); /* Power OFF adapter. */
1591
1592 iwn_eeprom_unlock(sc);
1593 return 0;
1594}
1595
1596static void
1597iwn4965_read_eeprom(struct iwn_softc *sc)
1598{
1599 uint32_t addr;
1600 int i;
1601 uint16_t val;
1602
1603 /* Read regulatory domain (4 ASCII characters.) */
1604 iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4);
1605
1606 /* Read the list of authorized channels (20MHz ones only.) */
1607 for (i = 0; i < 5; i++) {
1608 addr = iwn4965_regulatory_bands[i];
1609 iwn_read_eeprom_channels(sc, i, addr);
1610 }
1611
1612 /* Read maximum allowed TX power for 2GHz and 5GHz bands. */
1613 iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2);
1614 sc->maxpwr2GHz = val & 0xff;
1615 sc->maxpwr5GHz = val >> 8;
1616 /* Check that EEPROM values are within valid range. */
1617 if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50)
1618 sc->maxpwr5GHz = 38;
1619 if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50)
1620 sc->maxpwr2GHz = 38;
1621 DPRINTF(sc, IWN_DEBUG_RESET, "maxpwr 2GHz=%d 5GHz=%d\n",
1622 sc->maxpwr2GHz, sc->maxpwr5GHz);
1623
1624 /* Read samples for each TX power group. */
1625 iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands,
1626 sizeof sc->bands);
1627
1628 /* Read voltage at which samples were taken. */
1629 iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2);
1630 sc->eeprom_voltage = (int16_t)le16toh(val);
1631 DPRINTF(sc, IWN_DEBUG_RESET, "voltage=%d (in 0.3V)\n",
1632 sc->eeprom_voltage);
1633
1634#ifdef IWN_DEBUG
1635 /* Print samples. */
1636 if (sc->sc_debug & IWN_DEBUG_ANY) {
1637 for (i = 0; i < IWN_NBANDS; i++)
1638 iwn4965_print_power_group(sc, i);
1639 }
1640#endif
1641}
1642
1643#ifdef IWN_DEBUG
1644static void
1645iwn4965_print_power_group(struct iwn_softc *sc, int i)
1646{
1647 struct iwn4965_eeprom_band *band = &sc->bands[i];
1648 struct iwn4965_eeprom_chan_samples *chans = band->chans;
1649 int j, c;
1650
1651 printf("===band %d===\n", i);
1652 printf("chan lo=%d, chan hi=%d\n", band->lo, band->hi);
1653 printf("chan1 num=%d\n", chans[0].num);
1654 for (c = 0; c < 2; c++) {
1655 for (j = 0; j < IWN_NSAMPLES; j++) {
1656 printf("chain %d, sample %d: temp=%d gain=%d "
1657 "power=%d pa_det=%d\n", c, j,
1658 chans[0].samples[c][j].temp,
1659 chans[0].samples[c][j].gain,
1660 chans[0].samples[c][j].power,
1661 chans[0].samples[c][j].pa_det);
1662 }
1663 }
1664 printf("chan2 num=%d\n", chans[1].num);
1665 for (c = 0; c < 2; c++) {
1666 for (j = 0; j < IWN_NSAMPLES; j++) {
1667 printf("chain %d, sample %d: temp=%d gain=%d "
1668 "power=%d pa_det=%d\n", c, j,
1669 chans[1].samples[c][j].temp,
1670 chans[1].samples[c][j].gain,
1671 chans[1].samples[c][j].power,
1672 chans[1].samples[c][j].pa_det);
1673 }
1674 }
1675}
1676#endif
1677
1678static void
1679iwn5000_read_eeprom(struct iwn_softc *sc)
1680{
1681 struct iwn5000_eeprom_calib_hdr hdr;
1682 int32_t temp, volt;
1683 uint32_t addr, base;
1684 int i;
1685 uint16_t val;
1686
1687 /* Read regulatory domain (4 ASCII characters.) */
1688 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2);
1689 base = le16toh(val);
1690 iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN,
1691 sc->eeprom_domain, 4);
1692
1693 /* Read the list of authorized channels (20MHz ones only.) */
1694 for (i = 0; i < 5; i++) {
1695 addr = base + iwn5000_regulatory_bands[i];
1696 iwn_read_eeprom_channels(sc, i, addr);
1697 }
1698
1699 /* Read enhanced TX power information for 6000 Series. */
1700 if (sc->hw_type >= IWN_HW_REV_TYPE_6000)
1701 iwn_read_eeprom_enhinfo(sc);
1702
1703 iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2);
1704 base = le16toh(val);
1705 iwn_read_prom_data(sc, base, &hdr, sizeof hdr);
1706 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
1707 "%s: calib version=%u pa type=%u voltage=%u\n",
1708 __func__, hdr.version, hdr.pa_type, le16toh(hdr.volt));
1709 sc->calib_ver = hdr.version;
1710
1711 if (sc->hw_type == IWN_HW_REV_TYPE_5150) {
1712 /* Compute temperature offset. */
1713 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2);
1714 temp = le16toh(val);
1715 iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2);
1716 volt = le16toh(val);
1717 sc->temp_off = temp - (volt / -5);
1718 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "temp=%d volt=%d offset=%dK\n",
1719 temp, volt, sc->temp_off);
1720 }
1721}
1722
1723/*
1724 * Translate EEPROM flags to net80211.
1725 */
1726static uint32_t
1727iwn_eeprom_channel_flags(struct iwn_eeprom_chan *channel)
1728{
1729 uint32_t nflags;
1730
1731 nflags = 0;
1732 if ((channel->flags & IWN_EEPROM_CHAN_ACTIVE) == 0)
1733 nflags |= IEEE80211_CHAN_PASSIVE;
1734 if ((channel->flags & IWN_EEPROM_CHAN_IBSS) == 0)
1735 nflags |= IEEE80211_CHAN_NOADHOC;
1736 if (channel->flags & IWN_EEPROM_CHAN_RADAR) {
1737 nflags |= IEEE80211_CHAN_DFS;
1738 /* XXX apparently IBSS may still be marked */
1739 nflags |= IEEE80211_CHAN_NOADHOC;
1740 }
1741
1742 return nflags;
1743}
1744
1745static void
1746iwn_read_eeprom_band(struct iwn_softc *sc, int n)
1747{
1748 struct ifnet *ifp = sc->sc_ifp;
1749 struct ieee80211com *ic = ifp->if_l2com;
1750 struct iwn_eeprom_chan *channels = sc->eeprom_channels[n];
1751 const struct iwn_chan_band *band = &iwn_bands[n];
1752 struct ieee80211_channel *c;
1753 int i, chan, nflags;
1754
1755 for (i = 0; i < band->nchan; i++) {
1756 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) {
1757 DPRINTF(sc, IWN_DEBUG_RESET,
1758 "skip chan %d flags 0x%x maxpwr %d\n",
1759 band->chan[i], channels[i].flags,
1760 channels[i].maxpwr);
1761 continue;
1762 }
1763 chan = band->chan[i];
1764 nflags = iwn_eeprom_channel_flags(&channels[i]);
1765
1766 DPRINTF(sc, IWN_DEBUG_RESET,
1767 "add chan %d flags 0x%x maxpwr %d\n",
1768 chan, channels[i].flags, channels[i].maxpwr);
1769
1770 c = &ic->ic_channels[ic->ic_nchans++];
1771 c->ic_ieee = chan;
1772 c->ic_maxregpower = channels[i].maxpwr;
1773 c->ic_maxpower = 2*c->ic_maxregpower;
1774
1775 /* Save maximum allowed TX power for this channel. */
1776 sc->maxpwr[chan] = channels[i].maxpwr;
1777
1778 if (n == 0) { /* 2GHz band */
1779 c->ic_freq = ieee80211_ieee2mhz(chan,
1780 IEEE80211_CHAN_G);
1781
1782 /* G =>'s B is supported */
1783 c->ic_flags = IEEE80211_CHAN_B | nflags;
1784
1785 c = &ic->ic_channels[ic->ic_nchans++];
1786 c[0] = c[-1];
1787 c->ic_flags = IEEE80211_CHAN_G | nflags;
1788 } else { /* 5GHz band */
1789 c->ic_freq = ieee80211_ieee2mhz(chan,
1790 IEEE80211_CHAN_A);
1791 c->ic_flags = IEEE80211_CHAN_A | nflags;
1792 }
1793#if 0 /* HT */
1794 /* XXX no constraints on using HT20 */
1795 /* add HT20, HT40 added separately */
1796 c = &ic->ic_channels[ic->ic_nchans++];
1797 c[0] = c[-1];
1798 c->ic_flags |= IEEE80211_CHAN_HT20;
1799 /* XXX NARROW =>'s 1/2 and 1/4 width? */
1800#endif
1801 }
1802}
1803
1804#if 0 /* HT */
1805static void
1806iwn_read_eeprom_ht40(struct iwn_softc *sc, int n)
1807{
1808 struct ifnet *ifp = sc->sc_ifp;
1809 struct ieee80211com *ic = ifp->if_l2com;
1810 struct iwn_eeprom_chan *channels = sc->eeprom_channels[n];
1811 const struct iwn_chan_band *band = &iwn_bands[n];
1812 struct ieee80211_channel *c, *cent, *extc;
1813 int i;
1814
1815 for (i = 0; i < band->nchan; i++) {
1816 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID) ||
1817 !(channels[i].flags & IWN_EEPROM_CHAN_WIDE)) {
1818 DPRINTF(sc, IWN_DEBUG_RESET,
1819 "skip chan %d flags 0x%x maxpwr %d\n",
1820 band->chan[i], channels[i].flags,
1821 channels[i].maxpwr);
1822 continue;
1823 }
1824 /*
1825 * Each entry defines an HT40 channel pair; find the
1826 * center channel, then the extension channel above.
1827 */
1828 cent = ieee80211_find_channel_byieee(ic, band->chan[i],
1829 band->flags & ~IEEE80211_CHAN_HT);
1830 if (cent == NULL) { /* XXX shouldn't happen */
1831 device_printf(sc->sc_dev,
1832 "%s: no entry for channel %d\n",
1833 __func__, band->chan[i]);
1834 continue;
1835 }
1836 extc = ieee80211_find_channel(ic, cent->ic_freq+20,
1837 band->flags & ~IEEE80211_CHAN_HT);
1838 if (extc == NULL) {
1839 DPRINTF(sc, IWN_DEBUG_RESET,
1840 "skip chan %d, extension channel not found\n",
1841 band->chan[i]);
1842 continue;
1843 }
1844
1845 DPRINTF(sc, IWN_DEBUG_RESET,
1846 "add ht40 chan %d flags 0x%x maxpwr %d\n",
1847 band->chan[i], channels[i].flags, channels[i].maxpwr);
1848
1849 c = &ic->ic_channels[ic->ic_nchans++];
1850 c[0] = cent[0];
1851 c->ic_extieee = extc->ic_ieee;
1852 c->ic_flags &= ~IEEE80211_CHAN_HT;
1853 c->ic_flags |= IEEE80211_CHAN_HT40U;
1854 c = &ic->ic_channels[ic->ic_nchans++];
1855 c[0] = extc[0];
1856 c->ic_extieee = cent->ic_ieee;
1857 c->ic_flags &= ~IEEE80211_CHAN_HT;
1858 c->ic_flags |= IEEE80211_CHAN_HT40D;
1859 }
1860}
1861#endif
1862
1863static void
1864iwn_read_eeprom_channels(struct iwn_softc *sc, int n, uint32_t addr)
1865{
1866 struct ifnet *ifp = sc->sc_ifp;
1867 struct ieee80211com *ic = ifp->if_l2com;
1868
1869 iwn_read_prom_data(sc, addr, &sc->eeprom_channels[n],
1870 iwn_bands[n].nchan * sizeof (struct iwn_eeprom_chan));
1871
1872 if (n < 5)
1873 iwn_read_eeprom_band(sc, n);
1874#if 0 /* HT */
1875 else
1876 iwn_read_eeprom_ht40(sc, n);
1877#endif
1878 ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans);
1879}
1880
1881#define nitems(_a) (sizeof((_a)) / sizeof((_a)[0]))
1882
1883static void
1884iwn_read_eeprom_enhinfo(struct iwn_softc *sc)
1885{
1886 struct iwn_eeprom_enhinfo enhinfo[35];
1887 uint16_t val, base;
1888 int8_t maxpwr;
1889 int i;
1890
1891 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2);
1892 base = le16toh(val);
1893 iwn_read_prom_data(sc, base + IWN6000_EEPROM_ENHINFO,
1894 enhinfo, sizeof enhinfo);
1895
1896 memset(sc->enh_maxpwr, 0, sizeof sc->enh_maxpwr);
1897 for (i = 0; i < nitems(enhinfo); i++) {
1898 if (enhinfo[i].chan == 0 || enhinfo[i].reserved != 0)
1899 continue; /* Skip invalid entries. */
1900
1901 maxpwr = 0;
1902 if (sc->txchainmask & IWN_ANT_A)
1903 maxpwr = MAX(maxpwr, enhinfo[i].chain[0]);
1904 if (sc->txchainmask & IWN_ANT_B)
1905 maxpwr = MAX(maxpwr, enhinfo[i].chain[1]);
1906 if (sc->txchainmask & IWN_ANT_C)
1907 maxpwr = MAX(maxpwr, enhinfo[i].chain[2]);
1908 if (sc->ntxchains == 2)
1909 maxpwr = MAX(maxpwr, enhinfo[i].mimo2);
1910 else if (sc->ntxchains == 3)
1911 maxpwr = MAX(maxpwr, enhinfo[i].mimo3);
1912 maxpwr /= 2; /* Convert half-dBm to dBm. */
1913
1914 DPRINTF(sc, IWN_DEBUG_RESET, "enhinfo %d, maxpwr=%d\n", i,
1915 maxpwr);
1916 sc->enh_maxpwr[i] = maxpwr;
1917 }
1918}
1919
1920static struct ieee80211_node *
1921iwn_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
1922{
1923 return malloc(sizeof (struct iwn_node), M_80211_NODE,M_NOWAIT | M_ZERO);
1924}
1925
1926static int
1927iwn_media_change(struct ifnet *ifp)
1928{
1929 int error = ieee80211_media_change(ifp);
1930 /* NB: only the fixed rate can change and that doesn't need a reset */
1931 return (error == ENETRESET ? 0 : error);
1932}
1933
1934static int
1935iwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
1936{
1937 struct iwn_vap *ivp = IWN_VAP(vap);
1938 struct ieee80211com *ic = vap->iv_ic;
1939 struct iwn_softc *sc = ic->ic_ifp->if_softc;
1940 int error;
1941
1942 DPRINTF(sc, IWN_DEBUG_STATE, "%s: %s -> %s\n", __func__,
1943 ieee80211_state_name[vap->iv_state],
1944 ieee80211_state_name[nstate]);
1945
1946 IEEE80211_UNLOCK(ic);
1947 IWN_LOCK(sc);
1945 callout_stop(&sc->sc_timer_to);
1948 callout_stop(&sc->calib_to);
1946
1947 switch (nstate) {
1948 case IEEE80211_S_ASSOC:
1949 if (vap->iv_state != IEEE80211_S_RUN)
1950 break;
1951 /* FALLTHROUGH */
1952 case IEEE80211_S_AUTH:
1953 if (vap->iv_state == IEEE80211_S_AUTH)
1954 break;
1955
1956 /*
1957 * !AUTH -> AUTH transition requires state reset to handle
1958 * reassociations correctly.
1959 */
1960 sc->rxon.associd = 0;
1961 sc->rxon.filter &= ~htole32(IWN_FILTER_BSS);
1949
1950 switch (nstate) {
1951 case IEEE80211_S_ASSOC:
1952 if (vap->iv_state != IEEE80211_S_RUN)
1953 break;
1954 /* FALLTHROUGH */
1955 case IEEE80211_S_AUTH:
1956 if (vap->iv_state == IEEE80211_S_AUTH)
1957 break;
1958
1959 /*
1960 * !AUTH -> AUTH transition requires state reset to handle
1961 * reassociations correctly.
1962 */
1963 sc->rxon.associd = 0;
1964 sc->rxon.filter &= ~htole32(IWN_FILTER_BSS);
1962 iwn_calib_reset(sc);
1965 sc->calib.state = IWN_CALIB_STATE_INIT;
1966
1963 error = iwn_auth(sc, vap);
1964 break;
1965
1966 case IEEE80211_S_RUN:
1967 /*
1968 * RUN -> RUN transition; Just restart the timers.
1969 */
1967 error = iwn_auth(sc, vap);
1968 break;
1969
1970 case IEEE80211_S_RUN:
1971 /*
1972 * RUN -> RUN transition; Just restart the timers.
1973 */
1970 if (vap->iv_state == IEEE80211_S_RUN &&
1971 vap->iv_opmode != IEEE80211_M_MONITOR) {
1972 iwn_calib_reset(sc);
1974 if (vap->iv_state == IEEE80211_S_RUN) {
1975 sc->calib_cnt = 0;
1973 break;
1974 }
1975
1976 /*
1977 * !RUN -> RUN requires setting the association id
1978 * which is done with a firmware cmd. We also defer
1979 * starting the timers until that work is done.
1980 */
1981 error = iwn_run(sc, vap);
1982 break;
1983
1976 break;
1977 }
1978
1979 /*
1980 * !RUN -> RUN requires setting the association id
1981 * which is done with a firmware cmd. We also defer
1982 * starting the timers until that work is done.
1983 */
1984 error = iwn_run(sc, vap);
1985 break;
1986
1987 case IEEE80211_S_INIT:
1988 sc->calib.state = IWN_CALIB_STATE_INIT;
1989 break;
1990
1984 default:
1985 break;
1986 }
1987 IWN_UNLOCK(sc);
1988 IEEE80211_LOCK(ic);
1989 return ivp->iv_newstate(vap, nstate, arg);
1990}
1991
1991 default:
1992 break;
1993 }
1994 IWN_UNLOCK(sc);
1995 IEEE80211_LOCK(ic);
1996 return ivp->iv_newstate(vap, nstate, arg);
1997}
1998
1999static void
2000iwn_calib_timeout(void *arg)
2001{
2002 struct iwn_softc *sc = arg;
2003
2004 IWN_LOCK_ASSERT(sc);
2005
2006 /* Force automatic TX power calibration every 60 secs. */
2007 if (++sc->calib_cnt >= 120) {
2008 uint32_t flags = 0;
2009
2010 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s\n",
2011 "sending request for statistics");
2012 (void)iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags,
2013 sizeof flags, 1);
2014 sc->calib_cnt = 0;
2015 }
2016 callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout,
2017 sc);
2018}
2019
1992/*
1993 * Process an RX_PHY firmware notification. This is usually immediately
1994 * followed by an MPDU_RX_DONE notification.
1995 */
1996static void
1997iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc,
1998 struct iwn_rx_data *data)
1999{
2000 struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1);
2001
2002 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received PHY stats\n", __func__);
2003 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2004
2005 /* Save RX statistics, they will be used on MPDU_RX_DONE. */
2006 memcpy(&sc->last_rx_stat, stat, sizeof (*stat));
2007 sc->last_rx_valid = 1;
2008}
2009
2020/*
2021 * Process an RX_PHY firmware notification. This is usually immediately
2022 * followed by an MPDU_RX_DONE notification.
2023 */
2024static void
2025iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2026 struct iwn_rx_data *data)
2027{
2028 struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1);
2029
2030 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received PHY stats\n", __func__);
2031 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2032
2033 /* Save RX statistics, they will be used on MPDU_RX_DONE. */
2034 memcpy(&sc->last_rx_stat, stat, sizeof (*stat));
2035 sc->last_rx_valid = 1;
2036}
2037
2010static void
2011iwn_timer_timeout(void *arg)
2012{
2013 struct iwn_softc *sc = arg;
2014 uint32_t flags = 0;
2015
2016 IWN_LOCK_ASSERT(sc);
2017
2018 if (sc->calib_cnt && --sc->calib_cnt == 0) {
2019 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s\n",
2020 "send statistics request");
2021 (void) iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags,
2022 sizeof flags, 1);
2023 sc->calib_cnt = 60; /* do calibration every 60s */
2024 }
2025 iwn_watchdog(sc); /* NB: piggyback tx watchdog */
2026 callout_reset(&sc->sc_timer_to, hz, iwn_timer_timeout, sc);
2027}
2028
2029static void
2030iwn_calib_reset(struct iwn_softc *sc)
2031{
2032 callout_reset(&sc->sc_timer_to, hz, iwn_timer_timeout, sc);
2033 sc->calib_cnt = 60; /* do calibration every 60s */
2034}
2035
2036/*
2037 * Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification.
2038 * Each MPDU_RX_DONE notification must be preceded by an RX_PHY one.
2039 */
2040static void
2041iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2042 struct iwn_rx_data *data)
2043{
2044 const struct iwn_hal *hal = sc->sc_hal;
2045 struct ifnet *ifp = sc->sc_ifp;
2046 struct ieee80211com *ic = ifp->if_l2com;
2047 struct iwn_rx_ring *ring = &sc->rxq;
2048 struct ieee80211_frame *wh;
2049 struct ieee80211_node *ni;
2050 struct mbuf *m, *m1;
2051 struct iwn_rx_stat *stat;
2052 caddr_t head;
2053 bus_addr_t paddr;
2054 uint32_t flags;
2055 int error, len, rssi, nf;
2056
2057 if (desc->type == IWN_MPDU_RX_DONE) {
2058 /* Check for prior RX_PHY notification. */
2059 if (!sc->last_rx_valid) {
2060 DPRINTF(sc, IWN_DEBUG_ANY,
2061 "%s: missing RX_PHY\n", __func__);
2062 ifp->if_ierrors++;
2063 return;
2064 }
2065 sc->last_rx_valid = 0;
2066 stat = &sc->last_rx_stat;
2067 } else
2068 stat = (struct iwn_rx_stat *)(desc + 1);
2069
2070 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2071
2072 if (stat->cfg_phy_len > IWN_STAT_MAXLEN) {
2073 device_printf(sc->sc_dev,
2074 "%s: invalid rx statistic header, len %d\n",
2075 __func__, stat->cfg_phy_len);
2076 ifp->if_ierrors++;
2077 return;
2078 }
2079 if (desc->type == IWN_MPDU_RX_DONE) {
2080 struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1);
2081 head = (caddr_t)(mpdu + 1);
2082 len = le16toh(mpdu->len);
2083 } else {
2084 head = (caddr_t)(stat + 1) + stat->cfg_phy_len;
2085 len = le16toh(stat->len);
2086 }
2087
2088 flags = le32toh(*(uint32_t *)(head + len));
2089
2090 /* Discard frames with a bad FCS early. */
2091 if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) {
2092 DPRINTF(sc, IWN_DEBUG_RECV, "%s: rx flags error %x\n",
2093 __func__, flags);
2094 ifp->if_ierrors++;
2095 return;
2096 }
2097 /* Discard frames that are too short. */
2098 if (len < sizeof (*wh)) {
2099 DPRINTF(sc, IWN_DEBUG_RECV, "%s: frame too short: %d\n",
2100 __func__, len);
2101 ifp->if_ierrors++;
2102 return;
2103 }
2104
2105 /* XXX don't need mbuf, just dma buffer */
2106 m1 = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
2107 if (m1 == NULL) {
2108 DPRINTF(sc, IWN_DEBUG_ANY, "%s: no mbuf to restock ring\n",
2109 __func__);
2110 ifp->if_ierrors++;
2111 return;
2112 }
2113 bus_dmamap_unload(ring->data_dmat, data->map);
2114
2115 error = bus_dmamap_load(ring->data_dmat, data->map,
2116 mtod(m1, caddr_t), MJUMPAGESIZE,
2117 iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
2118 if (error != 0 && error != EFBIG) {
2119 device_printf(sc->sc_dev,
2120 "%s: bus_dmamap_load failed, error %d\n", __func__, error);
2121 m_freem(m1);
2122 ifp->if_ierrors++;
2123 return;
2124 }
2125
2126 m = data->m;
2127 data->m = m1;
2128 /* Update RX descriptor. */
2129 ring->desc[ring->cur] = htole32(paddr >> 8);
2130 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2131 BUS_DMASYNC_PREWRITE);
2132
2133 /* Finalize mbuf. */
2134 m->m_pkthdr.rcvif = ifp;
2135 m->m_data = head;
2136 m->m_pkthdr.len = m->m_len = len;
2137
2138 rssi = hal->get_rssi(sc, stat);
2139
2140 /* Grab a reference to the source node. */
2141 wh = mtod(m, struct ieee80211_frame *);
2142 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
2143 nf = (ni != NULL && ni->ni_vap->iv_state == IEEE80211_S_RUN &&
2144 (ic->ic_flags & IEEE80211_F_SCAN) == 0) ? sc->noise : -95;
2145
2146 if (ieee80211_radiotap_active(ic)) {
2147 struct iwn_rx_radiotap_header *tap = &sc->sc_rxtap;
2148
2149 tap->wr_tsft = htole64(stat->tstamp);
2150 tap->wr_flags = 0;
2151 if (stat->flags & htole16(IWN_STAT_FLAG_SHPREAMBLE))
2152 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2153 switch (stat->rate) {
2154 /* CCK rates. */
2155 case 10: tap->wr_rate = 2; break;
2156 case 20: tap->wr_rate = 4; break;
2157 case 55: tap->wr_rate = 11; break;
2158 case 110: tap->wr_rate = 22; break;
2159 /* OFDM rates. */
2160 case 0xd: tap->wr_rate = 12; break;
2161 case 0xf: tap->wr_rate = 18; break;
2162 case 0x5: tap->wr_rate = 24; break;
2163 case 0x7: tap->wr_rate = 36; break;
2164 case 0x9: tap->wr_rate = 48; break;
2165 case 0xb: tap->wr_rate = 72; break;
2166 case 0x1: tap->wr_rate = 96; break;
2167 case 0x3: tap->wr_rate = 108; break;
2168 /* Unknown rate: should not happen. */
2169 default: tap->wr_rate = 0;
2170 }
2171 tap->wr_dbm_antsignal = rssi;
2172 tap->wr_dbm_antnoise = nf;
2173 }
2174
2175 IWN_UNLOCK(sc);
2176
2177 /* Send the frame to the 802.11 layer. */
2178 if (ni != NULL) {
2179 (void) ieee80211_input(ni, m, rssi - nf, nf);
2180 /* Node is no longer needed. */
2181 ieee80211_free_node(ni);
2182 } else
2183 (void) ieee80211_input_all(ic, m, rssi - nf, nf);
2184
2185 IWN_LOCK(sc);
2186}
2187
2188#if 0 /* HT */
2189/* Process an incoming Compressed BlockAck. */
2190static void
2191iwn_rx_compressed_ba(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2192 struct iwn_rx_data *data)
2193{
2194 struct iwn_compressed_ba *ba = (struct iwn_compressed_ba *)(desc + 1);
2195 struct iwn_tx_ring *txq;
2196
2197 txq = &sc->txq[letoh16(ba->qid)];
2198 /* XXX TBD */
2199}
2200#endif
2201
2202/*
2203 * Process an RX_STATISTICS or BEACON_STATISTICS firmware notification.
2204 * The latter is sent by the firmware after each received beacon.
2205 */
2206static void
2207iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2208 struct iwn_rx_data *data)
2209{
2210 const struct iwn_hal *hal = sc->sc_hal;
2211 struct ifnet *ifp = sc->sc_ifp;
2212 struct ieee80211com *ic = ifp->if_l2com;
2213 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2214 struct iwn_calib_state *calib = &sc->calib;
2215 struct iwn_stats *stats = (struct iwn_stats *)(desc + 1);
2216 int temp;
2217
2218 /* Beacon stats are meaningful only when associated and not scanning. */
2219 if (vap->iv_state != IEEE80211_S_RUN ||
2220 (ic->ic_flags & IEEE80211_F_SCAN))
2221 return;
2222
2223 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2224 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: cmd %d\n", __func__, desc->type);
2038/*
2039 * Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification.
2040 * Each MPDU_RX_DONE notification must be preceded by an RX_PHY one.
2041 */
2042static void
2043iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2044 struct iwn_rx_data *data)
2045{
2046 const struct iwn_hal *hal = sc->sc_hal;
2047 struct ifnet *ifp = sc->sc_ifp;
2048 struct ieee80211com *ic = ifp->if_l2com;
2049 struct iwn_rx_ring *ring = &sc->rxq;
2050 struct ieee80211_frame *wh;
2051 struct ieee80211_node *ni;
2052 struct mbuf *m, *m1;
2053 struct iwn_rx_stat *stat;
2054 caddr_t head;
2055 bus_addr_t paddr;
2056 uint32_t flags;
2057 int error, len, rssi, nf;
2058
2059 if (desc->type == IWN_MPDU_RX_DONE) {
2060 /* Check for prior RX_PHY notification. */
2061 if (!sc->last_rx_valid) {
2062 DPRINTF(sc, IWN_DEBUG_ANY,
2063 "%s: missing RX_PHY\n", __func__);
2064 ifp->if_ierrors++;
2065 return;
2066 }
2067 sc->last_rx_valid = 0;
2068 stat = &sc->last_rx_stat;
2069 } else
2070 stat = (struct iwn_rx_stat *)(desc + 1);
2071
2072 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2073
2074 if (stat->cfg_phy_len > IWN_STAT_MAXLEN) {
2075 device_printf(sc->sc_dev,
2076 "%s: invalid rx statistic header, len %d\n",
2077 __func__, stat->cfg_phy_len);
2078 ifp->if_ierrors++;
2079 return;
2080 }
2081 if (desc->type == IWN_MPDU_RX_DONE) {
2082 struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1);
2083 head = (caddr_t)(mpdu + 1);
2084 len = le16toh(mpdu->len);
2085 } else {
2086 head = (caddr_t)(stat + 1) + stat->cfg_phy_len;
2087 len = le16toh(stat->len);
2088 }
2089
2090 flags = le32toh(*(uint32_t *)(head + len));
2091
2092 /* Discard frames with a bad FCS early. */
2093 if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) {
2094 DPRINTF(sc, IWN_DEBUG_RECV, "%s: rx flags error %x\n",
2095 __func__, flags);
2096 ifp->if_ierrors++;
2097 return;
2098 }
2099 /* Discard frames that are too short. */
2100 if (len < sizeof (*wh)) {
2101 DPRINTF(sc, IWN_DEBUG_RECV, "%s: frame too short: %d\n",
2102 __func__, len);
2103 ifp->if_ierrors++;
2104 return;
2105 }
2106
2107 /* XXX don't need mbuf, just dma buffer */
2108 m1 = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
2109 if (m1 == NULL) {
2110 DPRINTF(sc, IWN_DEBUG_ANY, "%s: no mbuf to restock ring\n",
2111 __func__);
2112 ifp->if_ierrors++;
2113 return;
2114 }
2115 bus_dmamap_unload(ring->data_dmat, data->map);
2116
2117 error = bus_dmamap_load(ring->data_dmat, data->map,
2118 mtod(m1, caddr_t), MJUMPAGESIZE,
2119 iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
2120 if (error != 0 && error != EFBIG) {
2121 device_printf(sc->sc_dev,
2122 "%s: bus_dmamap_load failed, error %d\n", __func__, error);
2123 m_freem(m1);
2124 ifp->if_ierrors++;
2125 return;
2126 }
2127
2128 m = data->m;
2129 data->m = m1;
2130 /* Update RX descriptor. */
2131 ring->desc[ring->cur] = htole32(paddr >> 8);
2132 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
2133 BUS_DMASYNC_PREWRITE);
2134
2135 /* Finalize mbuf. */
2136 m->m_pkthdr.rcvif = ifp;
2137 m->m_data = head;
2138 m->m_pkthdr.len = m->m_len = len;
2139
2140 rssi = hal->get_rssi(sc, stat);
2141
2142 /* Grab a reference to the source node. */
2143 wh = mtod(m, struct ieee80211_frame *);
2144 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
2145 nf = (ni != NULL && ni->ni_vap->iv_state == IEEE80211_S_RUN &&
2146 (ic->ic_flags & IEEE80211_F_SCAN) == 0) ? sc->noise : -95;
2147
2148 if (ieee80211_radiotap_active(ic)) {
2149 struct iwn_rx_radiotap_header *tap = &sc->sc_rxtap;
2150
2151 tap->wr_tsft = htole64(stat->tstamp);
2152 tap->wr_flags = 0;
2153 if (stat->flags & htole16(IWN_STAT_FLAG_SHPREAMBLE))
2154 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2155 switch (stat->rate) {
2156 /* CCK rates. */
2157 case 10: tap->wr_rate = 2; break;
2158 case 20: tap->wr_rate = 4; break;
2159 case 55: tap->wr_rate = 11; break;
2160 case 110: tap->wr_rate = 22; break;
2161 /* OFDM rates. */
2162 case 0xd: tap->wr_rate = 12; break;
2163 case 0xf: tap->wr_rate = 18; break;
2164 case 0x5: tap->wr_rate = 24; break;
2165 case 0x7: tap->wr_rate = 36; break;
2166 case 0x9: tap->wr_rate = 48; break;
2167 case 0xb: tap->wr_rate = 72; break;
2168 case 0x1: tap->wr_rate = 96; break;
2169 case 0x3: tap->wr_rate = 108; break;
2170 /* Unknown rate: should not happen. */
2171 default: tap->wr_rate = 0;
2172 }
2173 tap->wr_dbm_antsignal = rssi;
2174 tap->wr_dbm_antnoise = nf;
2175 }
2176
2177 IWN_UNLOCK(sc);
2178
2179 /* Send the frame to the 802.11 layer. */
2180 if (ni != NULL) {
2181 (void) ieee80211_input(ni, m, rssi - nf, nf);
2182 /* Node is no longer needed. */
2183 ieee80211_free_node(ni);
2184 } else
2185 (void) ieee80211_input_all(ic, m, rssi - nf, nf);
2186
2187 IWN_LOCK(sc);
2188}
2189
2190#if 0 /* HT */
2191/* Process an incoming Compressed BlockAck. */
2192static void
2193iwn_rx_compressed_ba(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2194 struct iwn_rx_data *data)
2195{
2196 struct iwn_compressed_ba *ba = (struct iwn_compressed_ba *)(desc + 1);
2197 struct iwn_tx_ring *txq;
2198
2199 txq = &sc->txq[letoh16(ba->qid)];
2200 /* XXX TBD */
2201}
2202#endif
2203
2204/*
2205 * Process an RX_STATISTICS or BEACON_STATISTICS firmware notification.
2206 * The latter is sent by the firmware after each received beacon.
2207 */
2208static void
2209iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2210 struct iwn_rx_data *data)
2211{
2212 const struct iwn_hal *hal = sc->sc_hal;
2213 struct ifnet *ifp = sc->sc_ifp;
2214 struct ieee80211com *ic = ifp->if_l2com;
2215 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2216 struct iwn_calib_state *calib = &sc->calib;
2217 struct iwn_stats *stats = (struct iwn_stats *)(desc + 1);
2218 int temp;
2219
2220 /* Beacon stats are meaningful only when associated and not scanning. */
2221 if (vap->iv_state != IEEE80211_S_RUN ||
2222 (ic->ic_flags & IEEE80211_F_SCAN))
2223 return;
2224
2225 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2226 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: cmd %d\n", __func__, desc->type);
2225 iwn_calib_reset(sc); /* Reset TX power calibration timeout. */
2227 sc->calib_cnt = 0; /* Reset TX power calibration timeout. */
2226
2227 /* Test if temperature has changed. */
2228 if (stats->general.temp != sc->rawtemp) {
2229 /* Convert "raw" temperature to degC. */
2230 sc->rawtemp = stats->general.temp;
2231 temp = hal->get_temperature(sc);
2232 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d\n",
2233 __func__, temp);
2234
2235 /* Update TX power if need be (4965AGN only.) */
2236 if (sc->hw_type == IWN_HW_REV_TYPE_4965)
2237 iwn4965_power_calibration(sc, temp);
2238 }
2239
2240 if (desc->type != IWN_BEACON_STATISTICS)
2241 return; /* Reply to a statistics request. */
2242
2243 sc->noise = iwn_get_noise(&stats->rx.general);
2244 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: noise %d\n", __func__, sc->noise);
2245
2246 /* Test that RSSI and noise are present in stats report. */
2247 if (le32toh(stats->rx.general.flags) != 1) {
2248 DPRINTF(sc, IWN_DEBUG_ANY, "%s\n",
2249 "received statistics without RSSI");
2250 return;
2251 }
2252
2253 if (calib->state == IWN_CALIB_STATE_ASSOC)
2254 iwn_collect_noise(sc, &stats->rx.general);
2255 else if (calib->state == IWN_CALIB_STATE_RUN)
2256 iwn_tune_sensitivity(sc, &stats->rx);
2257}
2258
2259/*
2260 * Process a TX_DONE firmware notification. Unfortunately, the 4965AGN
2261 * and 5000 adapters have different incompatible TX status formats.
2262 */
2263static void
2264iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2265 struct iwn_rx_data *data)
2266{
2267 struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1);
2268 struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf];
2269
2270 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: "
2271 "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n",
2272 __func__, desc->qid, desc->idx, stat->ackfailcnt,
2273 stat->btkillcnt, stat->rate, le16toh(stat->duration),
2274 le32toh(stat->status));
2275
2276 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2277 iwn_tx_done(sc, desc, stat->ackfailcnt, le32toh(stat->status) & 0xff);
2278}
2279
2280static void
2281iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2282 struct iwn_rx_data *data)
2283{
2284 struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1);
2285 struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf];
2286
2287 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: "
2288 "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n",
2289 __func__, desc->qid, desc->idx, stat->ackfailcnt,
2290 stat->btkillcnt, stat->rate, le16toh(stat->duration),
2291 le32toh(stat->status));
2292
2293#ifdef notyet
2294 /* Reset TX scheduler slot. */
2295 iwn5000_reset_sched(sc, desc->qid & 0xf, desc->idx);
2296#endif
2297
2298 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2299 iwn_tx_done(sc, desc, stat->ackfailcnt, le16toh(stat->status) & 0xff);
2300}
2301
2302/*
2303 * Adapter-independent backend for TX_DONE firmware notifications.
2304 */
2305static void
2306iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, int ackfailcnt,
2307 uint8_t status)
2308{
2309 struct ifnet *ifp = sc->sc_ifp;
2310 struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf];
2311 struct iwn_tx_data *data = &ring->data[desc->idx];
2312 struct mbuf *m;
2313 struct ieee80211_node *ni;
2314 struct ieee80211vap *vap;
2315
2316 KASSERT(data->ni != NULL, ("no node"));
2317
2318 /* Unmap and free mbuf. */
2319 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE);
2320 bus_dmamap_unload(ring->data_dmat, data->map);
2321 m = data->m, data->m = NULL;
2322 ni = data->ni, data->ni = NULL;
2323 vap = ni->ni_vap;
2324
2325 if (m->m_flags & M_TXCB) {
2326 /*
2327 * Channels marked for "radar" require traffic to be received
2328 * to unlock before we can transmit. Until traffic is seen
2329 * any attempt to transmit is returned immediately with status
2330 * set to IWN_TX_FAIL_TX_LOCKED. Unfortunately this can easily
2331 * happen on first authenticate after scanning. To workaround
2332 * this we ignore a failure of this sort in AUTH state so the
2333 * 802.11 layer will fall back to using a timeout to wait for
2334 * the AUTH reply. This allows the firmware time to see
2335 * traffic so a subsequent retry of AUTH succeeds. It's
2336 * unclear why the firmware does not maintain state for
2337 * channels recently visited as this would allow immediate
2338 * use of the channel after a scan (where we see traffic).
2339 */
2340 if (status == IWN_TX_FAIL_TX_LOCKED &&
2341 ni->ni_vap->iv_state == IEEE80211_S_AUTH)
2342 ieee80211_process_callback(ni, m, 0);
2343 else
2344 ieee80211_process_callback(ni, m,
2345 (status & IWN_TX_FAIL) != 0);
2346 }
2347
2348 /*
2349 * Update rate control statistics for the node.
2350 */
2351 if (status & 0x80) {
2352 ifp->if_oerrors++;
2353 ieee80211_ratectl_tx_complete(vap, ni,
2354 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL);
2355 } else {
2356 ieee80211_ratectl_tx_complete(vap, ni,
2357 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL);
2358 }
2359 m_freem(m);
2360 ieee80211_free_node(ni);
2361
2362 sc->sc_tx_timer = 0;
2363 if (--ring->queued < IWN_TX_RING_LOMARK) {
2364 sc->qfullmsk &= ~(1 << ring->qid);
2365 if (sc->qfullmsk == 0 &&
2366 (ifp->if_drv_flags & IFF_DRV_OACTIVE)) {
2367 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2368 iwn_start_locked(ifp);
2369 }
2370 }
2371}
2372
2373/*
2374 * Process a "command done" firmware notification. This is where we wakeup
2375 * processes waiting for a synchronous command completion.
2376 */
2377static void
2378iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc)
2379{
2380 struct iwn_tx_ring *ring = &sc->txq[4];
2381 struct iwn_tx_data *data;
2382
2383 if ((desc->qid & 0xf) != 4)
2384 return; /* Not a command ack. */
2385
2386 data = &ring->data[desc->idx];
2387
2388 /* If the command was mapped in an mbuf, free it. */
2389 if (data->m != NULL) {
2390 bus_dmamap_unload(ring->data_dmat, data->map);
2391 m_freem(data->m);
2392 data->m = NULL;
2393 }
2394 wakeup(&ring->desc[desc->idx]);
2395}
2396
2397/*
2398 * Process an INT_FH_RX or INT_SW_RX interrupt.
2399 */
2400static void
2401iwn_notif_intr(struct iwn_softc *sc)
2402{
2403 struct ifnet *ifp = sc->sc_ifp;
2404 struct ieee80211com *ic = ifp->if_l2com;
2405 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2406 uint16_t hw;
2407
2408 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
2409 BUS_DMASYNC_POSTREAD);
2410
2411 hw = le16toh(sc->rxq.stat->closed_count) & 0xfff;
2412 while (sc->rxq.cur != hw) {
2413 struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur];
2414 struct iwn_rx_desc *desc;
2415
2416 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2417 BUS_DMASYNC_POSTREAD);
2418 desc = mtod(data->m, struct iwn_rx_desc *);
2419
2420 DPRINTF(sc, IWN_DEBUG_RECV,
2421 "%s: qid %x idx %d flags %x type %d(%s) len %d\n",
2422 __func__, desc->qid & 0xf, desc->idx, desc->flags,
2423 desc->type, iwn_intr_str(desc->type),
2424 le16toh(desc->len));
2425
2426 if (!(desc->qid & 0x80)) /* Reply to a command. */
2427 iwn_cmd_done(sc, desc);
2428
2429 switch (desc->type) {
2430 case IWN_RX_PHY:
2431 iwn_rx_phy(sc, desc, data);
2432 break;
2433
2434 case IWN_RX_DONE: /* 4965AGN only. */
2435 case IWN_MPDU_RX_DONE:
2436 /* An 802.11 frame has been received. */
2437 iwn_rx_done(sc, desc, data);
2438 break;
2439
2440#if 0 /* HT */
2441 case IWN_RX_COMPRESSED_BA:
2442 /* A Compressed BlockAck has been received. */
2443 iwn_rx_compressed_ba(sc, desc, data);
2444 break;
2445#endif
2446
2447 case IWN_TX_DONE:
2448 /* An 802.11 frame has been transmitted. */
2449 sc->sc_hal->tx_done(sc, desc, data);
2450 break;
2451
2452 case IWN_RX_STATISTICS:
2453 case IWN_BEACON_STATISTICS:
2454 iwn_rx_statistics(sc, desc, data);
2455 break;
2456
2457 case IWN_BEACON_MISSED:
2458 {
2459 struct iwn_beacon_missed *miss =
2460 (struct iwn_beacon_missed *)(desc + 1);
2461 int misses;
2462
2463 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2464 BUS_DMASYNC_POSTREAD);
2465 misses = le32toh(miss->consecutive);
2466
2467 DPRINTF(sc, IWN_DEBUG_STATE,
2468 "%s: beacons missed %d/%d\n", __func__,
2469 misses, le32toh(miss->total));
2470 /*
2471 * If more than 5 consecutive beacons are missed,
2472 * reinitialize the sensitivity state machine.
2473 */
2474 if (vap->iv_state == IEEE80211_S_RUN &&
2475 (ic->ic_flags & IEEE80211_F_SCAN) != 0) {
2476 if (misses > 5)
2477 (void)iwn_init_sensitivity(sc);
2478 if (misses >= vap->iv_bmissthreshold) {
2479 IWN_UNLOCK(sc);
2480 ieee80211_beacon_miss(ic);
2481 IWN_LOCK(sc);
2482 }
2483 }
2484 break;
2485 }
2486 case IWN_UC_READY:
2487 {
2488 struct iwn_ucode_info *uc =
2489 (struct iwn_ucode_info *)(desc + 1);
2490
2491 /* The microcontroller is ready. */
2492 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2493 BUS_DMASYNC_POSTREAD);
2494 DPRINTF(sc, IWN_DEBUG_RESET,
2495 "microcode alive notification version=%d.%d "
2496 "subtype=%x alive=%x\n", uc->major, uc->minor,
2497 uc->subtype, le32toh(uc->valid));
2498
2499 if (le32toh(uc->valid) != 1) {
2500 device_printf(sc->sc_dev,
2501 "microcontroller initialization failed");
2502 break;
2503 }
2504 if (uc->subtype == IWN_UCODE_INIT) {
2505 /* Save microcontroller report. */
2506 memcpy(&sc->ucode_info, uc, sizeof (*uc));
2507 }
2508 /* Save the address of the error log in SRAM. */
2509 sc->errptr = le32toh(uc->errptr);
2510 break;
2511 }
2512 case IWN_STATE_CHANGED:
2513 {
2514 uint32_t *status = (uint32_t *)(desc + 1);
2515
2516 /*
2517 * State change allows hardware switch change to be
2518 * noted. However, we handle this in iwn_intr as we
2519 * get both the enable/disble intr.
2520 */
2521 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2522 BUS_DMASYNC_POSTREAD);
2523 DPRINTF(sc, IWN_DEBUG_INTR, "state changed to %x\n",
2524 le32toh(*status));
2525 break;
2526 }
2527 case IWN_START_SCAN:
2528 {
2529 struct iwn_start_scan *scan =
2530 (struct iwn_start_scan *)(desc + 1);
2531
2532 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2533 BUS_DMASYNC_POSTREAD);
2534 DPRINTF(sc, IWN_DEBUG_ANY,
2535 "%s: scanning channel %d status %x\n",
2536 __func__, scan->chan, le32toh(scan->status));
2537 break;
2538 }
2539 case IWN_STOP_SCAN:
2540 {
2541 struct iwn_stop_scan *scan =
2542 (struct iwn_stop_scan *)(desc + 1);
2543
2544 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2545 BUS_DMASYNC_POSTREAD);
2546 DPRINTF(sc, IWN_DEBUG_STATE,
2547 "scan finished nchan=%d status=%d chan=%d\n",
2548 scan->nchan, scan->status, scan->chan);
2549
2550 IWN_UNLOCK(sc);
2551 ieee80211_scan_next(vap);
2552 IWN_LOCK(sc);
2553 break;
2554 }
2555 case IWN5000_CALIBRATION_RESULT:
2556 iwn5000_rx_calib_result(sc, desc, data);
2557 break;
2558
2559 case IWN5000_CALIBRATION_DONE:
2560 sc->sc_flags |= IWN_FLAG_CALIB_DONE;
2561 wakeup(sc);
2562 break;
2563 }
2564
2565 sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT;
2566 }
2567
2568 /* Tell the firmware what we have processed. */
2569 hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1;
2570 IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7);
2571}
2572
2573/*
2574 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up
2575 * from power-down sleep mode.
2576 */
2577static void
2578iwn_wakeup_intr(struct iwn_softc *sc)
2579{
2580 int qid;
2581
2582 DPRINTF(sc, IWN_DEBUG_RESET, "%s: ucode wakeup from power-down sleep\n",
2583 __func__);
2584
2585 /* Wakeup RX and TX rings. */
2586 IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7);
2587 for (qid = 0; qid < sc->sc_hal->ntxqs; qid++) {
2588 struct iwn_tx_ring *ring = &sc->txq[qid];
2589 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur);
2590 }
2591}
2592
2593static void
2594iwn_rftoggle_intr(struct iwn_softc *sc)
2595{
2596 struct ifnet *ifp = sc->sc_ifp;
2597 struct ieee80211com *ic = ifp->if_l2com;
2598 uint32_t tmp = IWN_READ(sc, IWN_GP_CNTRL);
2599
2600 IWN_LOCK_ASSERT(sc);
2601
2602 device_printf(sc->sc_dev, "RF switch: radio %s\n",
2603 (tmp & IWN_GP_CNTRL_RFKILL) ? "enabled" : "disabled");
2604 if (tmp & IWN_GP_CNTRL_RFKILL)
2605 ieee80211_runtask(ic, &sc->sc_radioon_task);
2606 else
2607 ieee80211_runtask(ic, &sc->sc_radiooff_task);
2608}
2609
2610/*
2611 * Dump the error log of the firmware when a firmware panic occurs. Although
2612 * we can't debug the firmware because it is neither open source nor free, it
2613 * can help us to identify certain classes of problems.
2614 */
2615static void
2616iwn_fatal_intr(struct iwn_softc *sc)
2617{
2618 const struct iwn_hal *hal = sc->sc_hal;
2619 struct iwn_fw_dump dump;
2620 int i;
2621
2622 IWN_LOCK_ASSERT(sc);
2623
2624 /* Force a complete recalibration on next init. */
2625 sc->sc_flags &= ~IWN_FLAG_CALIB_DONE;
2626
2627 /* Check that the error log address is valid. */
2628 if (sc->errptr < IWN_FW_DATA_BASE ||
2629 sc->errptr + sizeof (dump) >
2630 IWN_FW_DATA_BASE + hal->fw_data_maxsz) {
2631 printf("%s: bad firmware error log address 0x%08x\n",
2632 __func__, sc->errptr);
2633 return;
2634 }
2635 if (iwn_nic_lock(sc) != 0) {
2636 printf("%s: could not read firmware error log\n",
2637 __func__);
2638 return;
2639 }
2640 /* Read firmware error log from SRAM. */
2641 iwn_mem_read_region_4(sc, sc->errptr, (uint32_t *)&dump,
2642 sizeof (dump) / sizeof (uint32_t));
2643 iwn_nic_unlock(sc);
2644
2645 if (dump.valid == 0) {
2646 printf("%s: firmware error log is empty\n",
2647 __func__);
2648 return;
2649 }
2650 printf("firmware error log:\n");
2651 printf(" error type = \"%s\" (0x%08X)\n",
2652 (dump.id < nitems(iwn_fw_errmsg)) ?
2653 iwn_fw_errmsg[dump.id] : "UNKNOWN",
2654 dump.id);
2655 printf(" program counter = 0x%08X\n", dump.pc);
2656 printf(" source line = 0x%08X\n", dump.src_line);
2657 printf(" error data = 0x%08X%08X\n",
2658 dump.error_data[0], dump.error_data[1]);
2659 printf(" branch link = 0x%08X%08X\n",
2660 dump.branch_link[0], dump.branch_link[1]);
2661 printf(" interrupt link = 0x%08X%08X\n",
2662 dump.interrupt_link[0], dump.interrupt_link[1]);
2663 printf(" time = %u\n", dump.time[0]);
2664
2665 /* Dump driver status (TX and RX rings) while we're here. */
2666 printf("driver status:\n");
2667 for (i = 0; i < hal->ntxqs; i++) {
2668 struct iwn_tx_ring *ring = &sc->txq[i];
2669 printf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n",
2670 i, ring->qid, ring->cur, ring->queued);
2671 }
2672 printf(" rx ring: cur=%d\n", sc->rxq.cur);
2673}
2674
2675static void
2676iwn_intr(void *arg)
2677{
2678 struct iwn_softc *sc = arg;
2679 struct ifnet *ifp = sc->sc_ifp;
2680 uint32_t r1, r2, tmp;
2681
2682 IWN_LOCK(sc);
2683
2684 /* Disable interrupts. */
2685 IWN_WRITE(sc, IWN_INT_MASK, 0);
2686
2687 /* Read interrupts from ICT (fast) or from registers (slow). */
2688 if (sc->sc_flags & IWN_FLAG_USE_ICT) {
2689 tmp = 0;
2690 while (sc->ict[sc->ict_cur] != 0) {
2691 tmp |= sc->ict[sc->ict_cur];
2692 sc->ict[sc->ict_cur] = 0; /* Acknowledge. */
2693 sc->ict_cur = (sc->ict_cur + 1) % IWN_ICT_COUNT;
2694 }
2695 tmp = le32toh(tmp);
2696 if (tmp == 0xffffffff) /* Shouldn't happen. */
2697 tmp = 0;
2698 else if (tmp & 0xc0000) /* Workaround a HW bug. */
2699 tmp |= 0x8000;
2700 r1 = (tmp & 0xff00) << 16 | (tmp & 0xff);
2701 r2 = 0; /* Unused. */
2702 } else {
2703 r1 = IWN_READ(sc, IWN_INT);
2704 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
2705 return; /* Hardware gone! */
2706 r2 = IWN_READ(sc, IWN_FH_INT);
2707 }
2708
2709 DPRINTF(sc, IWN_DEBUG_INTR, "interrupt reg1=%x reg2=%x\n", r1, r2);
2710
2711 if (r1 == 0 && r2 == 0)
2712 goto done; /* Interrupt not for us. */
2713
2714 /* Acknowledge interrupts. */
2715 IWN_WRITE(sc, IWN_INT, r1);
2716 if (!(sc->sc_flags & IWN_FLAG_USE_ICT))
2717 IWN_WRITE(sc, IWN_FH_INT, r2);
2718
2719 if (r1 & IWN_INT_RF_TOGGLED) {
2720 iwn_rftoggle_intr(sc);
2721 goto done;
2722 }
2723 if (r1 & IWN_INT_CT_REACHED) {
2724 device_printf(sc->sc_dev, "%s: critical temperature reached!\n",
2725 __func__);
2726 }
2727 if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) {
2728 iwn_fatal_intr(sc);
2729 ifp->if_flags &= ~IFF_UP;
2730 iwn_stop_locked(sc);
2731 goto done;
2732 }
2733 if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX | IWN_INT_RX_PERIODIC)) ||
2734 (r2 & IWN_FH_INT_RX)) {
2735 if (sc->sc_flags & IWN_FLAG_USE_ICT) {
2736 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX))
2737 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX);
2738 IWN_WRITE_1(sc, IWN_INT_PERIODIC,
2739 IWN_INT_PERIODIC_DIS);
2740 iwn_notif_intr(sc);
2741 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) {
2742 IWN_WRITE_1(sc, IWN_INT_PERIODIC,
2743 IWN_INT_PERIODIC_ENA);
2744 }
2745 } else
2746 iwn_notif_intr(sc);
2747 }
2748
2749 if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) {
2750 if (sc->sc_flags & IWN_FLAG_USE_ICT)
2751 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_TX);
2752 wakeup(sc); /* FH DMA transfer completed. */
2753 }
2754
2755 if (r1 & IWN_INT_ALIVE)
2756 wakeup(sc); /* Firmware is alive. */
2757
2758 if (r1 & IWN_INT_WAKEUP)
2759 iwn_wakeup_intr(sc);
2760
2761done:
2762 /* Re-enable interrupts. */
2763 if (ifp->if_flags & IFF_UP)
2764 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
2765
2766 IWN_UNLOCK(sc);
2767}
2768
2769/*
2770 * Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and
2771 * 5000 adapters use a slightly different format.)
2772 */
2773static void
2774iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id,
2775 uint16_t len)
2776{
2777 uint16_t *w = &sc->sched[qid * IWN4965_SCHED_COUNT + idx];
2778
2779 *w = htole16(len + 8);
2780 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2781 BUS_DMASYNC_PREWRITE);
2782 if (idx < IWN_SCHED_WINSZ) {
2783 *(w + IWN_TX_RING_COUNT) = *w;
2784 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2785 BUS_DMASYNC_PREWRITE);
2786 }
2787}
2788
2789static void
2790iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id,
2791 uint16_t len)
2792{
2793 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx];
2794
2795 *w = htole16(id << 12 | (len + 8));
2796
2797 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2798 BUS_DMASYNC_PREWRITE);
2799 if (idx < IWN_SCHED_WINSZ) {
2800 *(w + IWN_TX_RING_COUNT) = *w;
2801 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2802 BUS_DMASYNC_PREWRITE);
2803 }
2804}
2805
2806#ifdef notyet
2807static void
2808iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx)
2809{
2810 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx];
2811
2812 *w = (*w & htole16(0xf000)) | htole16(1);
2813 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2814 BUS_DMASYNC_PREWRITE);
2815 if (idx < IWN_SCHED_WINSZ) {
2816 *(w + IWN_TX_RING_COUNT) = *w;
2817 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2818 BUS_DMASYNC_PREWRITE);
2819 }
2820}
2821#endif
2822
2823static uint8_t
2824iwn_plcp_signal(int rate) {
2825 int i;
2826
2827 for (i = 0; i < IWN_RIDX_MAX + 1; i++) {
2828 if (rate == iwn_rates[i].rate)
2829 return i;
2830 }
2831
2832 return 0;
2833}
2834
2835static int
2836iwn_tx_data(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
2837 struct iwn_tx_ring *ring)
2838{
2839 const struct iwn_hal *hal = sc->sc_hal;
2840 const struct ieee80211_txparam *tp;
2841 const struct iwn_rate *rinfo;
2842 struct ieee80211vap *vap = ni->ni_vap;
2843 struct ieee80211com *ic = ni->ni_ic;
2844 struct iwn_node *wn = (void *)ni;
2845 struct iwn_tx_desc *desc;
2846 struct iwn_tx_data *data;
2847 struct iwn_tx_cmd *cmd;
2848 struct iwn_cmd_data *tx;
2849 struct ieee80211_frame *wh;
2850 struct ieee80211_key *k = NULL;
2851 struct mbuf *mnew;
2852 bus_dma_segment_t segs[IWN_MAX_SCATTER];
2853 uint32_t flags;
2854 u_int hdrlen;
2855 int totlen, error, pad, nsegs = 0, i, rate;
2856 uint8_t ridx, type, txant;
2857
2858 IWN_LOCK_ASSERT(sc);
2859
2860 wh = mtod(m, struct ieee80211_frame *);
2861 hdrlen = ieee80211_anyhdrsize(wh);
2862 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2863
2864 desc = &ring->desc[ring->cur];
2865 data = &ring->data[ring->cur];
2866
2867 /* Choose a TX rate index. */
2868 tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)];
2869 if (type == IEEE80211_FC0_TYPE_MGT)
2870 rate = tp->mgmtrate;
2871 else if (IEEE80211_IS_MULTICAST(wh->i_addr1))
2872 rate = tp->mcastrate;
2873 else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE)
2874 rate = tp->ucastrate;
2875 else {
2876 /* XXX pass pktlen */
2877 (void) ieee80211_ratectl_rate(ni, NULL, 0);
2878 rate = ni->ni_txrate;
2879 }
2880 ridx = iwn_plcp_signal(rate);
2881 rinfo = &iwn_rates[ridx];
2882
2883 /* Encrypt the frame if need be. */
2884 if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
2885 k = ieee80211_crypto_encap(ni, m);
2886 if (k == NULL) {
2887 m_freem(m);
2888 return ENOBUFS;
2889 }
2890 /* Packet header may have moved, reset our local pointer. */
2891 wh = mtod(m, struct ieee80211_frame *);
2892 }
2893 totlen = m->m_pkthdr.len;
2894
2895 if (ieee80211_radiotap_active_vap(vap)) {
2896 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap;
2897
2898 tap->wt_flags = 0;
2899 tap->wt_rate = rinfo->rate;
2900 if (k != NULL)
2901 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
2902
2903 ieee80211_radiotap_tx(vap, m);
2904 }
2905
2906 /* Prepare TX firmware command. */
2907 cmd = &ring->cmd[ring->cur];
2908 cmd->code = IWN_CMD_TX_DATA;
2909 cmd->flags = 0;
2910 cmd->qid = ring->qid;
2911 cmd->idx = ring->cur;
2912
2913 tx = (struct iwn_cmd_data *)cmd->data;
2914 /* NB: No need to clear tx, all fields are reinitialized here. */
2915 tx->scratch = 0; /* clear "scratch" area */
2916
2917 flags = 0;
2918 if (!IEEE80211_IS_MULTICAST(wh->i_addr1))
2919 flags |= IWN_TX_NEED_ACK;
2920 if ((wh->i_fc[0] &
2921 (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) ==
2922 (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR))
2923 flags |= IWN_TX_IMM_BA; /* Cannot happen yet. */
2924
2925 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG)
2926 flags |= IWN_TX_MORE_FRAG; /* Cannot happen yet. */
2927
2928 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */
2929 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2930 /* NB: Group frames are sent using CCK in 802.11b/g. */
2931 if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) {
2932 flags |= IWN_TX_NEED_RTS;
2933 } else if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
2934 ridx >= IWN_RIDX_OFDM6) {
2935 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
2936 flags |= IWN_TX_NEED_CTS;
2937 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
2938 flags |= IWN_TX_NEED_RTS;
2939 }
2940 if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) {
2941 if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
2942 /* 5000 autoselects RTS/CTS or CTS-to-self. */
2943 flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS);
2944 flags |= IWN_TX_NEED_PROTECTION;
2945 } else
2946 flags |= IWN_TX_FULL_TXOP;
2947 }
2948 }
2949
2950 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
2951 type != IEEE80211_FC0_TYPE_DATA)
2952 tx->id = hal->broadcast_id;
2953 else
2954 tx->id = wn->id;
2955
2956 if (type == IEEE80211_FC0_TYPE_MGT) {
2957 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2958
2959 /* Tell HW to set timestamp in probe responses. */
2960 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
2961 flags |= IWN_TX_INSERT_TSTAMP;
2962
2963 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
2964 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
2965 tx->timeout = htole16(3);
2966 else
2967 tx->timeout = htole16(2);
2968 } else
2969 tx->timeout = htole16(0);
2970
2971 if (hdrlen & 3) {
2972 /* First segment length must be a multiple of 4. */
2973 flags |= IWN_TX_NEED_PADDING;
2974 pad = 4 - (hdrlen & 3);
2975 } else
2976 pad = 0;
2977
2978 tx->len = htole16(totlen);
2979 tx->tid = 0;
2980 tx->rts_ntries = 60;
2981 tx->data_ntries = 15;
2982 tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
2983 tx->plcp = rinfo->plcp;
2984 tx->rflags = rinfo->flags;
2985 if (tx->id == hal->broadcast_id) {
2986 /* Group or management frame. */
2987 tx->linkq = 0;
2988 /* XXX Alternate between antenna A and B? */
2989 txant = IWN_LSB(sc->txchainmask);
2990 tx->rflags |= IWN_RFLAG_ANT(txant);
2991 } else {
2992 tx->linkq = IWN_RIDX_OFDM54 - ridx;
2993 flags |= IWN_TX_LINKQ; /* enable MRR */
2994 }
2995
2996 /* Set physical address of "scratch area". */
2997 tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr));
2998 tx->hiaddr = IWN_HIADDR(data->scratch_paddr);
2999
3000 /* Copy 802.11 header in TX command. */
3001 memcpy((uint8_t *)(tx + 1), wh, hdrlen);
3002
3003 /* Trim 802.11 header. */
3004 m_adj(m, hdrlen);
3005 tx->security = 0;
3006 tx->flags = htole32(flags);
3007
3008 if (m->m_len > 0) {
3009 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map,
3010 m, segs, &nsegs, BUS_DMA_NOWAIT);
3011 if (error == EFBIG) {
3012 /* too many fragments, linearize */
3013 mnew = m_collapse(m, M_DONTWAIT, IWN_MAX_SCATTER);
3014 if (mnew == NULL) {
3015 device_printf(sc->sc_dev,
3016 "%s: could not defrag mbuf\n", __func__);
3017 m_freem(m);
3018 return ENOBUFS;
3019 }
3020 m = mnew;
3021 error = bus_dmamap_load_mbuf_sg(ring->data_dmat,
3022 data->map, m, segs, &nsegs, BUS_DMA_NOWAIT);
3023 }
3024 if (error != 0) {
3025 device_printf(sc->sc_dev,
3026 "%s: bus_dmamap_load_mbuf_sg failed, error %d\n",
3027 __func__, error);
3028 m_freem(m);
3029 return error;
3030 }
3031 }
3032
3033 data->m = m;
3034 data->ni = ni;
3035
3036 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n",
3037 __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs);
3038
3039 /* Fill TX descriptor. */
3040 desc->nsegs = 1 + nsegs;
3041 /* First DMA segment is used by the TX command. */
3042 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr));
3043 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) |
3044 (4 + sizeof (*tx) + hdrlen + pad) << 4);
3045 /* Other DMA segments are for data payload. */
3046 for (i = 1; i <= nsegs; i++) {
3047 desc->segs[i].addr = htole32(IWN_LOADDR(segs[i - 1].ds_addr));
3048 desc->segs[i].len = htole16(IWN_HIADDR(segs[i - 1].ds_addr) |
3049 segs[i - 1].ds_len << 4);
3050 }
3051
3052 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
3053 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
3054 BUS_DMASYNC_PREWRITE);
3055 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3056 BUS_DMASYNC_PREWRITE);
3057
3058#ifdef notyet
3059 /* Update TX scheduler. */
3060 hal->update_sched(sc, ring->qid, ring->cur, tx->id, totlen);
3061#endif
3062
3063 /* Kick TX ring. */
3064 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
3065 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3066
3067 /* Mark TX ring as full if we reach a certain threshold. */
3068 if (++ring->queued > IWN_TX_RING_HIMARK)
3069 sc->qfullmsk |= 1 << ring->qid;
3070
3071 return 0;
3072}
3073
3074static int
3075iwn_tx_data_raw(struct iwn_softc *sc, struct mbuf *m,
3076 struct ieee80211_node *ni, struct iwn_tx_ring *ring,
3077 const struct ieee80211_bpf_params *params)
3078{
3079 const struct iwn_hal *hal = sc->sc_hal;
3080 const struct iwn_rate *rinfo;
3081 struct ifnet *ifp = sc->sc_ifp;
3082 struct ieee80211vap *vap = ni->ni_vap;
3083 struct ieee80211com *ic = ifp->if_l2com;
3084 struct iwn_tx_cmd *cmd;
3085 struct iwn_cmd_data *tx;
3086 struct ieee80211_frame *wh;
3087 struct iwn_tx_desc *desc;
3088 struct iwn_tx_data *data;
3089 struct mbuf *mnew;
3090 bus_addr_t paddr;
3091 bus_dma_segment_t segs[IWN_MAX_SCATTER];
3092 uint32_t flags;
3093 u_int hdrlen;
3094 int totlen, error, pad, nsegs = 0, i, rate;
3095 uint8_t ridx, type, txant;
3096
3097 IWN_LOCK_ASSERT(sc);
3098
3099 wh = mtod(m, struct ieee80211_frame *);
3100 hdrlen = ieee80211_anyhdrsize(wh);
3101 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3102
3103 desc = &ring->desc[ring->cur];
3104 data = &ring->data[ring->cur];
3105
3106 /* Choose a TX rate index. */
3107 rate = params->ibp_rate0;
3108 if (!ieee80211_isratevalid(ic->ic_rt, rate)) {
3109 /* XXX fall back to mcast/mgmt rate? */
3110 m_freem(m);
3111 return EINVAL;
3112 }
3113 ridx = iwn_plcp_signal(rate);
3114 rinfo = &iwn_rates[ridx];
3115
3116 totlen = m->m_pkthdr.len;
3117
3118 /* Prepare TX firmware command. */
3119 cmd = &ring->cmd[ring->cur];
3120 cmd->code = IWN_CMD_TX_DATA;
3121 cmd->flags = 0;
3122 cmd->qid = ring->qid;
3123 cmd->idx = ring->cur;
3124
3125 tx = (struct iwn_cmd_data *)cmd->data;
3126 /* NB: No need to clear tx, all fields are reinitialized here. */
3127 tx->scratch = 0; /* clear "scratch" area */
3128
3129 flags = 0;
3130 if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0)
3131 flags |= IWN_TX_NEED_ACK;
3132 if (params->ibp_flags & IEEE80211_BPF_RTS) {
3133 if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
3134 /* 5000 autoselects RTS/CTS or CTS-to-self. */
3135 flags &= ~IWN_TX_NEED_RTS;
3136 flags |= IWN_TX_NEED_PROTECTION;
3137 } else
3138 flags |= IWN_TX_NEED_RTS | IWN_TX_FULL_TXOP;
3139 }
3140 if (params->ibp_flags & IEEE80211_BPF_CTS) {
3141 if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
3142 /* 5000 autoselects RTS/CTS or CTS-to-self. */
3143 flags &= ~IWN_TX_NEED_CTS;
3144 flags |= IWN_TX_NEED_PROTECTION;
3145 } else
3146 flags |= IWN_TX_NEED_CTS | IWN_TX_FULL_TXOP;
3147 }
3148 if (type == IEEE80211_FC0_TYPE_MGT) {
3149 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3150
3151 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
3152 flags |= IWN_TX_INSERT_TSTAMP;
3153
3154 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3155 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
3156 tx->timeout = htole16(3);
3157 else
3158 tx->timeout = htole16(2);
3159 } else
3160 tx->timeout = htole16(0);
3161
3162 if (hdrlen & 3) {
3163 /* First segment length must be a multiple of 4. */
3164 flags |= IWN_TX_NEED_PADDING;
3165 pad = 4 - (hdrlen & 3);
3166 } else
3167 pad = 0;
3168
3169 if (ieee80211_radiotap_active_vap(vap)) {
3170 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap;
3171
3172 tap->wt_flags = 0;
3173 tap->wt_rate = rate;
3174
3175 ieee80211_radiotap_tx(vap, m);
3176 }
3177
3178 tx->len = htole16(totlen);
3179 tx->tid = 0;
3180 tx->id = hal->broadcast_id;
3181 tx->rts_ntries = params->ibp_try1;
3182 tx->data_ntries = params->ibp_try0;
3183 tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
3184 tx->plcp = rinfo->plcp;
3185 tx->rflags = rinfo->flags;
3186 /* Group or management frame. */
3187 tx->linkq = 0;
3188 txant = IWN_LSB(sc->txchainmask);
3189 tx->rflags |= IWN_RFLAG_ANT(txant);
3190 /* Set physical address of "scratch area". */
3191 paddr = ring->cmd_dma.paddr + ring->cur * sizeof (struct iwn_tx_cmd);
3192 tx->loaddr = htole32(IWN_LOADDR(paddr));
3193 tx->hiaddr = IWN_HIADDR(paddr);
3194
3195 /* Copy 802.11 header in TX command. */
3196 memcpy((uint8_t *)(tx + 1), wh, hdrlen);
3197
3198 /* Trim 802.11 header. */
3199 m_adj(m, hdrlen);
3200 tx->security = 0;
3201 tx->flags = htole32(flags);
3202
3203 if (m->m_len > 0) {
3204 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map,
3205 m, segs, &nsegs, BUS_DMA_NOWAIT);
3206 if (error == EFBIG) {
3207 /* Too many fragments, linearize. */
3208 mnew = m_collapse(m, M_DONTWAIT, IWN_MAX_SCATTER);
3209 if (mnew == NULL) {
3210 device_printf(sc->sc_dev,
3211 "%s: could not defrag mbuf\n", __func__);
3212 m_freem(m);
3213 return ENOBUFS;
3214 }
3215 m = mnew;
3216 error = bus_dmamap_load_mbuf_sg(ring->data_dmat,
3217 data->map, m, segs, &nsegs, BUS_DMA_NOWAIT);
3218 }
3219 if (error != 0) {
3220 device_printf(sc->sc_dev,
3221 "%s: bus_dmamap_load_mbuf_sg failed, error %d\n",
3222 __func__, error);
3223 m_freem(m);
3224 return error;
3225 }
3226 }
3227
3228 data->m = m;
3229 data->ni = ni;
3230
3231 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n",
3232 __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs);
3233
3234 /* Fill TX descriptor. */
3235 desc->nsegs = 1 + nsegs;
3236 /* First DMA segment is used by the TX command. */
3237 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr));
3238 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) |
3239 (4 + sizeof (*tx) + hdrlen + pad) << 4);
3240 /* Other DMA segments are for data payload. */
3241 for (i = 1; i <= nsegs; i++) {
3242 desc->segs[i].addr = htole32(IWN_LOADDR(segs[i - 1].ds_addr));
3243 desc->segs[i].len = htole16(IWN_HIADDR(segs[i - 1].ds_addr) |
3244 segs[i - 1].ds_len << 4);
3245 }
3246
3247 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
3248 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
3249 BUS_DMASYNC_PREWRITE);
3250 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3251 BUS_DMASYNC_PREWRITE);
3252
3253#ifdef notyet
3254 /* Update TX scheduler. */
3255 hal->update_sched(sc, ring->qid, ring->cur, tx->id, totlen);
3256#endif
3257
3258 /* Kick TX ring. */
3259 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
3260 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3261
3262 /* Mark TX ring as full if we reach a certain threshold. */
3263 if (++ring->queued > IWN_TX_RING_HIMARK)
3264 sc->qfullmsk |= 1 << ring->qid;
3265
3266 return 0;
3267}
3268
3269static int
3270iwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3271 const struct ieee80211_bpf_params *params)
3272{
3273 struct ieee80211com *ic = ni->ni_ic;
3274 struct ifnet *ifp = ic->ic_ifp;
3275 struct iwn_softc *sc = ifp->if_softc;
3276 struct iwn_tx_ring *txq;
3277 int error = 0;
3278
3279 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
3280 ieee80211_free_node(ni);
3281 m_freem(m);
3282 return ENETDOWN;
3283 }
3284
3285 IWN_LOCK(sc);
3286 if (params == NULL)
3287 txq = &sc->txq[M_WME_GETAC(m)];
3288 else
3289 txq = &sc->txq[params->ibp_pri & 3];
3290
3291 if (params == NULL) {
3292 /*
3293 * Legacy path; interpret frame contents to decide
3294 * precisely how to send the frame.
3295 */
3296 error = iwn_tx_data(sc, m, ni, txq);
3297 } else {
3298 /*
3299 * Caller supplied explicit parameters to use in
3300 * sending the frame.
3301 */
3302 error = iwn_tx_data_raw(sc, m, ni, txq, params);
3303 }
3304 if (error != 0) {
3305 /* NB: m is reclaimed on tx failure */
3306 ieee80211_free_node(ni);
3307 ifp->if_oerrors++;
3308 }
2228
2229 /* Test if temperature has changed. */
2230 if (stats->general.temp != sc->rawtemp) {
2231 /* Convert "raw" temperature to degC. */
2232 sc->rawtemp = stats->general.temp;
2233 temp = hal->get_temperature(sc);
2234 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d\n",
2235 __func__, temp);
2236
2237 /* Update TX power if need be (4965AGN only.) */
2238 if (sc->hw_type == IWN_HW_REV_TYPE_4965)
2239 iwn4965_power_calibration(sc, temp);
2240 }
2241
2242 if (desc->type != IWN_BEACON_STATISTICS)
2243 return; /* Reply to a statistics request. */
2244
2245 sc->noise = iwn_get_noise(&stats->rx.general);
2246 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: noise %d\n", __func__, sc->noise);
2247
2248 /* Test that RSSI and noise are present in stats report. */
2249 if (le32toh(stats->rx.general.flags) != 1) {
2250 DPRINTF(sc, IWN_DEBUG_ANY, "%s\n",
2251 "received statistics without RSSI");
2252 return;
2253 }
2254
2255 if (calib->state == IWN_CALIB_STATE_ASSOC)
2256 iwn_collect_noise(sc, &stats->rx.general);
2257 else if (calib->state == IWN_CALIB_STATE_RUN)
2258 iwn_tune_sensitivity(sc, &stats->rx);
2259}
2260
2261/*
2262 * Process a TX_DONE firmware notification. Unfortunately, the 4965AGN
2263 * and 5000 adapters have different incompatible TX status formats.
2264 */
2265static void
2266iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2267 struct iwn_rx_data *data)
2268{
2269 struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1);
2270 struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf];
2271
2272 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: "
2273 "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n",
2274 __func__, desc->qid, desc->idx, stat->ackfailcnt,
2275 stat->btkillcnt, stat->rate, le16toh(stat->duration),
2276 le32toh(stat->status));
2277
2278 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2279 iwn_tx_done(sc, desc, stat->ackfailcnt, le32toh(stat->status) & 0xff);
2280}
2281
2282static void
2283iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2284 struct iwn_rx_data *data)
2285{
2286 struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1);
2287 struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf];
2288
2289 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: "
2290 "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n",
2291 __func__, desc->qid, desc->idx, stat->ackfailcnt,
2292 stat->btkillcnt, stat->rate, le16toh(stat->duration),
2293 le32toh(stat->status));
2294
2295#ifdef notyet
2296 /* Reset TX scheduler slot. */
2297 iwn5000_reset_sched(sc, desc->qid & 0xf, desc->idx);
2298#endif
2299
2300 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
2301 iwn_tx_done(sc, desc, stat->ackfailcnt, le16toh(stat->status) & 0xff);
2302}
2303
2304/*
2305 * Adapter-independent backend for TX_DONE firmware notifications.
2306 */
2307static void
2308iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, int ackfailcnt,
2309 uint8_t status)
2310{
2311 struct ifnet *ifp = sc->sc_ifp;
2312 struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf];
2313 struct iwn_tx_data *data = &ring->data[desc->idx];
2314 struct mbuf *m;
2315 struct ieee80211_node *ni;
2316 struct ieee80211vap *vap;
2317
2318 KASSERT(data->ni != NULL, ("no node"));
2319
2320 /* Unmap and free mbuf. */
2321 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE);
2322 bus_dmamap_unload(ring->data_dmat, data->map);
2323 m = data->m, data->m = NULL;
2324 ni = data->ni, data->ni = NULL;
2325 vap = ni->ni_vap;
2326
2327 if (m->m_flags & M_TXCB) {
2328 /*
2329 * Channels marked for "radar" require traffic to be received
2330 * to unlock before we can transmit. Until traffic is seen
2331 * any attempt to transmit is returned immediately with status
2332 * set to IWN_TX_FAIL_TX_LOCKED. Unfortunately this can easily
2333 * happen on first authenticate after scanning. To workaround
2334 * this we ignore a failure of this sort in AUTH state so the
2335 * 802.11 layer will fall back to using a timeout to wait for
2336 * the AUTH reply. This allows the firmware time to see
2337 * traffic so a subsequent retry of AUTH succeeds. It's
2338 * unclear why the firmware does not maintain state for
2339 * channels recently visited as this would allow immediate
2340 * use of the channel after a scan (where we see traffic).
2341 */
2342 if (status == IWN_TX_FAIL_TX_LOCKED &&
2343 ni->ni_vap->iv_state == IEEE80211_S_AUTH)
2344 ieee80211_process_callback(ni, m, 0);
2345 else
2346 ieee80211_process_callback(ni, m,
2347 (status & IWN_TX_FAIL) != 0);
2348 }
2349
2350 /*
2351 * Update rate control statistics for the node.
2352 */
2353 if (status & 0x80) {
2354 ifp->if_oerrors++;
2355 ieee80211_ratectl_tx_complete(vap, ni,
2356 IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL);
2357 } else {
2358 ieee80211_ratectl_tx_complete(vap, ni,
2359 IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL);
2360 }
2361 m_freem(m);
2362 ieee80211_free_node(ni);
2363
2364 sc->sc_tx_timer = 0;
2365 if (--ring->queued < IWN_TX_RING_LOMARK) {
2366 sc->qfullmsk &= ~(1 << ring->qid);
2367 if (sc->qfullmsk == 0 &&
2368 (ifp->if_drv_flags & IFF_DRV_OACTIVE)) {
2369 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2370 iwn_start_locked(ifp);
2371 }
2372 }
2373}
2374
2375/*
2376 * Process a "command done" firmware notification. This is where we wakeup
2377 * processes waiting for a synchronous command completion.
2378 */
2379static void
2380iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc)
2381{
2382 struct iwn_tx_ring *ring = &sc->txq[4];
2383 struct iwn_tx_data *data;
2384
2385 if ((desc->qid & 0xf) != 4)
2386 return; /* Not a command ack. */
2387
2388 data = &ring->data[desc->idx];
2389
2390 /* If the command was mapped in an mbuf, free it. */
2391 if (data->m != NULL) {
2392 bus_dmamap_unload(ring->data_dmat, data->map);
2393 m_freem(data->m);
2394 data->m = NULL;
2395 }
2396 wakeup(&ring->desc[desc->idx]);
2397}
2398
2399/*
2400 * Process an INT_FH_RX or INT_SW_RX interrupt.
2401 */
2402static void
2403iwn_notif_intr(struct iwn_softc *sc)
2404{
2405 struct ifnet *ifp = sc->sc_ifp;
2406 struct ieee80211com *ic = ifp->if_l2com;
2407 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
2408 uint16_t hw;
2409
2410 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
2411 BUS_DMASYNC_POSTREAD);
2412
2413 hw = le16toh(sc->rxq.stat->closed_count) & 0xfff;
2414 while (sc->rxq.cur != hw) {
2415 struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur];
2416 struct iwn_rx_desc *desc;
2417
2418 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2419 BUS_DMASYNC_POSTREAD);
2420 desc = mtod(data->m, struct iwn_rx_desc *);
2421
2422 DPRINTF(sc, IWN_DEBUG_RECV,
2423 "%s: qid %x idx %d flags %x type %d(%s) len %d\n",
2424 __func__, desc->qid & 0xf, desc->idx, desc->flags,
2425 desc->type, iwn_intr_str(desc->type),
2426 le16toh(desc->len));
2427
2428 if (!(desc->qid & 0x80)) /* Reply to a command. */
2429 iwn_cmd_done(sc, desc);
2430
2431 switch (desc->type) {
2432 case IWN_RX_PHY:
2433 iwn_rx_phy(sc, desc, data);
2434 break;
2435
2436 case IWN_RX_DONE: /* 4965AGN only. */
2437 case IWN_MPDU_RX_DONE:
2438 /* An 802.11 frame has been received. */
2439 iwn_rx_done(sc, desc, data);
2440 break;
2441
2442#if 0 /* HT */
2443 case IWN_RX_COMPRESSED_BA:
2444 /* A Compressed BlockAck has been received. */
2445 iwn_rx_compressed_ba(sc, desc, data);
2446 break;
2447#endif
2448
2449 case IWN_TX_DONE:
2450 /* An 802.11 frame has been transmitted. */
2451 sc->sc_hal->tx_done(sc, desc, data);
2452 break;
2453
2454 case IWN_RX_STATISTICS:
2455 case IWN_BEACON_STATISTICS:
2456 iwn_rx_statistics(sc, desc, data);
2457 break;
2458
2459 case IWN_BEACON_MISSED:
2460 {
2461 struct iwn_beacon_missed *miss =
2462 (struct iwn_beacon_missed *)(desc + 1);
2463 int misses;
2464
2465 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2466 BUS_DMASYNC_POSTREAD);
2467 misses = le32toh(miss->consecutive);
2468
2469 DPRINTF(sc, IWN_DEBUG_STATE,
2470 "%s: beacons missed %d/%d\n", __func__,
2471 misses, le32toh(miss->total));
2472 /*
2473 * If more than 5 consecutive beacons are missed,
2474 * reinitialize the sensitivity state machine.
2475 */
2476 if (vap->iv_state == IEEE80211_S_RUN &&
2477 (ic->ic_flags & IEEE80211_F_SCAN) != 0) {
2478 if (misses > 5)
2479 (void)iwn_init_sensitivity(sc);
2480 if (misses >= vap->iv_bmissthreshold) {
2481 IWN_UNLOCK(sc);
2482 ieee80211_beacon_miss(ic);
2483 IWN_LOCK(sc);
2484 }
2485 }
2486 break;
2487 }
2488 case IWN_UC_READY:
2489 {
2490 struct iwn_ucode_info *uc =
2491 (struct iwn_ucode_info *)(desc + 1);
2492
2493 /* The microcontroller is ready. */
2494 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2495 BUS_DMASYNC_POSTREAD);
2496 DPRINTF(sc, IWN_DEBUG_RESET,
2497 "microcode alive notification version=%d.%d "
2498 "subtype=%x alive=%x\n", uc->major, uc->minor,
2499 uc->subtype, le32toh(uc->valid));
2500
2501 if (le32toh(uc->valid) != 1) {
2502 device_printf(sc->sc_dev,
2503 "microcontroller initialization failed");
2504 break;
2505 }
2506 if (uc->subtype == IWN_UCODE_INIT) {
2507 /* Save microcontroller report. */
2508 memcpy(&sc->ucode_info, uc, sizeof (*uc));
2509 }
2510 /* Save the address of the error log in SRAM. */
2511 sc->errptr = le32toh(uc->errptr);
2512 break;
2513 }
2514 case IWN_STATE_CHANGED:
2515 {
2516 uint32_t *status = (uint32_t *)(desc + 1);
2517
2518 /*
2519 * State change allows hardware switch change to be
2520 * noted. However, we handle this in iwn_intr as we
2521 * get both the enable/disble intr.
2522 */
2523 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2524 BUS_DMASYNC_POSTREAD);
2525 DPRINTF(sc, IWN_DEBUG_INTR, "state changed to %x\n",
2526 le32toh(*status));
2527 break;
2528 }
2529 case IWN_START_SCAN:
2530 {
2531 struct iwn_start_scan *scan =
2532 (struct iwn_start_scan *)(desc + 1);
2533
2534 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2535 BUS_DMASYNC_POSTREAD);
2536 DPRINTF(sc, IWN_DEBUG_ANY,
2537 "%s: scanning channel %d status %x\n",
2538 __func__, scan->chan, le32toh(scan->status));
2539 break;
2540 }
2541 case IWN_STOP_SCAN:
2542 {
2543 struct iwn_stop_scan *scan =
2544 (struct iwn_stop_scan *)(desc + 1);
2545
2546 bus_dmamap_sync(sc->rxq.data_dmat, data->map,
2547 BUS_DMASYNC_POSTREAD);
2548 DPRINTF(sc, IWN_DEBUG_STATE,
2549 "scan finished nchan=%d status=%d chan=%d\n",
2550 scan->nchan, scan->status, scan->chan);
2551
2552 IWN_UNLOCK(sc);
2553 ieee80211_scan_next(vap);
2554 IWN_LOCK(sc);
2555 break;
2556 }
2557 case IWN5000_CALIBRATION_RESULT:
2558 iwn5000_rx_calib_result(sc, desc, data);
2559 break;
2560
2561 case IWN5000_CALIBRATION_DONE:
2562 sc->sc_flags |= IWN_FLAG_CALIB_DONE;
2563 wakeup(sc);
2564 break;
2565 }
2566
2567 sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT;
2568 }
2569
2570 /* Tell the firmware what we have processed. */
2571 hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1;
2572 IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7);
2573}
2574
2575/*
2576 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up
2577 * from power-down sleep mode.
2578 */
2579static void
2580iwn_wakeup_intr(struct iwn_softc *sc)
2581{
2582 int qid;
2583
2584 DPRINTF(sc, IWN_DEBUG_RESET, "%s: ucode wakeup from power-down sleep\n",
2585 __func__);
2586
2587 /* Wakeup RX and TX rings. */
2588 IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7);
2589 for (qid = 0; qid < sc->sc_hal->ntxqs; qid++) {
2590 struct iwn_tx_ring *ring = &sc->txq[qid];
2591 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur);
2592 }
2593}
2594
2595static void
2596iwn_rftoggle_intr(struct iwn_softc *sc)
2597{
2598 struct ifnet *ifp = sc->sc_ifp;
2599 struct ieee80211com *ic = ifp->if_l2com;
2600 uint32_t tmp = IWN_READ(sc, IWN_GP_CNTRL);
2601
2602 IWN_LOCK_ASSERT(sc);
2603
2604 device_printf(sc->sc_dev, "RF switch: radio %s\n",
2605 (tmp & IWN_GP_CNTRL_RFKILL) ? "enabled" : "disabled");
2606 if (tmp & IWN_GP_CNTRL_RFKILL)
2607 ieee80211_runtask(ic, &sc->sc_radioon_task);
2608 else
2609 ieee80211_runtask(ic, &sc->sc_radiooff_task);
2610}
2611
2612/*
2613 * Dump the error log of the firmware when a firmware panic occurs. Although
2614 * we can't debug the firmware because it is neither open source nor free, it
2615 * can help us to identify certain classes of problems.
2616 */
2617static void
2618iwn_fatal_intr(struct iwn_softc *sc)
2619{
2620 const struct iwn_hal *hal = sc->sc_hal;
2621 struct iwn_fw_dump dump;
2622 int i;
2623
2624 IWN_LOCK_ASSERT(sc);
2625
2626 /* Force a complete recalibration on next init. */
2627 sc->sc_flags &= ~IWN_FLAG_CALIB_DONE;
2628
2629 /* Check that the error log address is valid. */
2630 if (sc->errptr < IWN_FW_DATA_BASE ||
2631 sc->errptr + sizeof (dump) >
2632 IWN_FW_DATA_BASE + hal->fw_data_maxsz) {
2633 printf("%s: bad firmware error log address 0x%08x\n",
2634 __func__, sc->errptr);
2635 return;
2636 }
2637 if (iwn_nic_lock(sc) != 0) {
2638 printf("%s: could not read firmware error log\n",
2639 __func__);
2640 return;
2641 }
2642 /* Read firmware error log from SRAM. */
2643 iwn_mem_read_region_4(sc, sc->errptr, (uint32_t *)&dump,
2644 sizeof (dump) / sizeof (uint32_t));
2645 iwn_nic_unlock(sc);
2646
2647 if (dump.valid == 0) {
2648 printf("%s: firmware error log is empty\n",
2649 __func__);
2650 return;
2651 }
2652 printf("firmware error log:\n");
2653 printf(" error type = \"%s\" (0x%08X)\n",
2654 (dump.id < nitems(iwn_fw_errmsg)) ?
2655 iwn_fw_errmsg[dump.id] : "UNKNOWN",
2656 dump.id);
2657 printf(" program counter = 0x%08X\n", dump.pc);
2658 printf(" source line = 0x%08X\n", dump.src_line);
2659 printf(" error data = 0x%08X%08X\n",
2660 dump.error_data[0], dump.error_data[1]);
2661 printf(" branch link = 0x%08X%08X\n",
2662 dump.branch_link[0], dump.branch_link[1]);
2663 printf(" interrupt link = 0x%08X%08X\n",
2664 dump.interrupt_link[0], dump.interrupt_link[1]);
2665 printf(" time = %u\n", dump.time[0]);
2666
2667 /* Dump driver status (TX and RX rings) while we're here. */
2668 printf("driver status:\n");
2669 for (i = 0; i < hal->ntxqs; i++) {
2670 struct iwn_tx_ring *ring = &sc->txq[i];
2671 printf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n",
2672 i, ring->qid, ring->cur, ring->queued);
2673 }
2674 printf(" rx ring: cur=%d\n", sc->rxq.cur);
2675}
2676
2677static void
2678iwn_intr(void *arg)
2679{
2680 struct iwn_softc *sc = arg;
2681 struct ifnet *ifp = sc->sc_ifp;
2682 uint32_t r1, r2, tmp;
2683
2684 IWN_LOCK(sc);
2685
2686 /* Disable interrupts. */
2687 IWN_WRITE(sc, IWN_INT_MASK, 0);
2688
2689 /* Read interrupts from ICT (fast) or from registers (slow). */
2690 if (sc->sc_flags & IWN_FLAG_USE_ICT) {
2691 tmp = 0;
2692 while (sc->ict[sc->ict_cur] != 0) {
2693 tmp |= sc->ict[sc->ict_cur];
2694 sc->ict[sc->ict_cur] = 0; /* Acknowledge. */
2695 sc->ict_cur = (sc->ict_cur + 1) % IWN_ICT_COUNT;
2696 }
2697 tmp = le32toh(tmp);
2698 if (tmp == 0xffffffff) /* Shouldn't happen. */
2699 tmp = 0;
2700 else if (tmp & 0xc0000) /* Workaround a HW bug. */
2701 tmp |= 0x8000;
2702 r1 = (tmp & 0xff00) << 16 | (tmp & 0xff);
2703 r2 = 0; /* Unused. */
2704 } else {
2705 r1 = IWN_READ(sc, IWN_INT);
2706 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
2707 return; /* Hardware gone! */
2708 r2 = IWN_READ(sc, IWN_FH_INT);
2709 }
2710
2711 DPRINTF(sc, IWN_DEBUG_INTR, "interrupt reg1=%x reg2=%x\n", r1, r2);
2712
2713 if (r1 == 0 && r2 == 0)
2714 goto done; /* Interrupt not for us. */
2715
2716 /* Acknowledge interrupts. */
2717 IWN_WRITE(sc, IWN_INT, r1);
2718 if (!(sc->sc_flags & IWN_FLAG_USE_ICT))
2719 IWN_WRITE(sc, IWN_FH_INT, r2);
2720
2721 if (r1 & IWN_INT_RF_TOGGLED) {
2722 iwn_rftoggle_intr(sc);
2723 goto done;
2724 }
2725 if (r1 & IWN_INT_CT_REACHED) {
2726 device_printf(sc->sc_dev, "%s: critical temperature reached!\n",
2727 __func__);
2728 }
2729 if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) {
2730 iwn_fatal_intr(sc);
2731 ifp->if_flags &= ~IFF_UP;
2732 iwn_stop_locked(sc);
2733 goto done;
2734 }
2735 if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX | IWN_INT_RX_PERIODIC)) ||
2736 (r2 & IWN_FH_INT_RX)) {
2737 if (sc->sc_flags & IWN_FLAG_USE_ICT) {
2738 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX))
2739 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX);
2740 IWN_WRITE_1(sc, IWN_INT_PERIODIC,
2741 IWN_INT_PERIODIC_DIS);
2742 iwn_notif_intr(sc);
2743 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) {
2744 IWN_WRITE_1(sc, IWN_INT_PERIODIC,
2745 IWN_INT_PERIODIC_ENA);
2746 }
2747 } else
2748 iwn_notif_intr(sc);
2749 }
2750
2751 if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) {
2752 if (sc->sc_flags & IWN_FLAG_USE_ICT)
2753 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_TX);
2754 wakeup(sc); /* FH DMA transfer completed. */
2755 }
2756
2757 if (r1 & IWN_INT_ALIVE)
2758 wakeup(sc); /* Firmware is alive. */
2759
2760 if (r1 & IWN_INT_WAKEUP)
2761 iwn_wakeup_intr(sc);
2762
2763done:
2764 /* Re-enable interrupts. */
2765 if (ifp->if_flags & IFF_UP)
2766 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
2767
2768 IWN_UNLOCK(sc);
2769}
2770
2771/*
2772 * Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and
2773 * 5000 adapters use a slightly different format.)
2774 */
2775static void
2776iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id,
2777 uint16_t len)
2778{
2779 uint16_t *w = &sc->sched[qid * IWN4965_SCHED_COUNT + idx];
2780
2781 *w = htole16(len + 8);
2782 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2783 BUS_DMASYNC_PREWRITE);
2784 if (idx < IWN_SCHED_WINSZ) {
2785 *(w + IWN_TX_RING_COUNT) = *w;
2786 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2787 BUS_DMASYNC_PREWRITE);
2788 }
2789}
2790
2791static void
2792iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id,
2793 uint16_t len)
2794{
2795 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx];
2796
2797 *w = htole16(id << 12 | (len + 8));
2798
2799 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2800 BUS_DMASYNC_PREWRITE);
2801 if (idx < IWN_SCHED_WINSZ) {
2802 *(w + IWN_TX_RING_COUNT) = *w;
2803 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2804 BUS_DMASYNC_PREWRITE);
2805 }
2806}
2807
2808#ifdef notyet
2809static void
2810iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx)
2811{
2812 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx];
2813
2814 *w = (*w & htole16(0xf000)) | htole16(1);
2815 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2816 BUS_DMASYNC_PREWRITE);
2817 if (idx < IWN_SCHED_WINSZ) {
2818 *(w + IWN_TX_RING_COUNT) = *w;
2819 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
2820 BUS_DMASYNC_PREWRITE);
2821 }
2822}
2823#endif
2824
2825static uint8_t
2826iwn_plcp_signal(int rate) {
2827 int i;
2828
2829 for (i = 0; i < IWN_RIDX_MAX + 1; i++) {
2830 if (rate == iwn_rates[i].rate)
2831 return i;
2832 }
2833
2834 return 0;
2835}
2836
2837static int
2838iwn_tx_data(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
2839 struct iwn_tx_ring *ring)
2840{
2841 const struct iwn_hal *hal = sc->sc_hal;
2842 const struct ieee80211_txparam *tp;
2843 const struct iwn_rate *rinfo;
2844 struct ieee80211vap *vap = ni->ni_vap;
2845 struct ieee80211com *ic = ni->ni_ic;
2846 struct iwn_node *wn = (void *)ni;
2847 struct iwn_tx_desc *desc;
2848 struct iwn_tx_data *data;
2849 struct iwn_tx_cmd *cmd;
2850 struct iwn_cmd_data *tx;
2851 struct ieee80211_frame *wh;
2852 struct ieee80211_key *k = NULL;
2853 struct mbuf *mnew;
2854 bus_dma_segment_t segs[IWN_MAX_SCATTER];
2855 uint32_t flags;
2856 u_int hdrlen;
2857 int totlen, error, pad, nsegs = 0, i, rate;
2858 uint8_t ridx, type, txant;
2859
2860 IWN_LOCK_ASSERT(sc);
2861
2862 wh = mtod(m, struct ieee80211_frame *);
2863 hdrlen = ieee80211_anyhdrsize(wh);
2864 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2865
2866 desc = &ring->desc[ring->cur];
2867 data = &ring->data[ring->cur];
2868
2869 /* Choose a TX rate index. */
2870 tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)];
2871 if (type == IEEE80211_FC0_TYPE_MGT)
2872 rate = tp->mgmtrate;
2873 else if (IEEE80211_IS_MULTICAST(wh->i_addr1))
2874 rate = tp->mcastrate;
2875 else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE)
2876 rate = tp->ucastrate;
2877 else {
2878 /* XXX pass pktlen */
2879 (void) ieee80211_ratectl_rate(ni, NULL, 0);
2880 rate = ni->ni_txrate;
2881 }
2882 ridx = iwn_plcp_signal(rate);
2883 rinfo = &iwn_rates[ridx];
2884
2885 /* Encrypt the frame if need be. */
2886 if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
2887 k = ieee80211_crypto_encap(ni, m);
2888 if (k == NULL) {
2889 m_freem(m);
2890 return ENOBUFS;
2891 }
2892 /* Packet header may have moved, reset our local pointer. */
2893 wh = mtod(m, struct ieee80211_frame *);
2894 }
2895 totlen = m->m_pkthdr.len;
2896
2897 if (ieee80211_radiotap_active_vap(vap)) {
2898 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap;
2899
2900 tap->wt_flags = 0;
2901 tap->wt_rate = rinfo->rate;
2902 if (k != NULL)
2903 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
2904
2905 ieee80211_radiotap_tx(vap, m);
2906 }
2907
2908 /* Prepare TX firmware command. */
2909 cmd = &ring->cmd[ring->cur];
2910 cmd->code = IWN_CMD_TX_DATA;
2911 cmd->flags = 0;
2912 cmd->qid = ring->qid;
2913 cmd->idx = ring->cur;
2914
2915 tx = (struct iwn_cmd_data *)cmd->data;
2916 /* NB: No need to clear tx, all fields are reinitialized here. */
2917 tx->scratch = 0; /* clear "scratch" area */
2918
2919 flags = 0;
2920 if (!IEEE80211_IS_MULTICAST(wh->i_addr1))
2921 flags |= IWN_TX_NEED_ACK;
2922 if ((wh->i_fc[0] &
2923 (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) ==
2924 (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR))
2925 flags |= IWN_TX_IMM_BA; /* Cannot happen yet. */
2926
2927 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG)
2928 flags |= IWN_TX_MORE_FRAG; /* Cannot happen yet. */
2929
2930 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */
2931 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2932 /* NB: Group frames are sent using CCK in 802.11b/g. */
2933 if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) {
2934 flags |= IWN_TX_NEED_RTS;
2935 } else if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
2936 ridx >= IWN_RIDX_OFDM6) {
2937 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
2938 flags |= IWN_TX_NEED_CTS;
2939 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
2940 flags |= IWN_TX_NEED_RTS;
2941 }
2942 if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) {
2943 if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
2944 /* 5000 autoselects RTS/CTS or CTS-to-self. */
2945 flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS);
2946 flags |= IWN_TX_NEED_PROTECTION;
2947 } else
2948 flags |= IWN_TX_FULL_TXOP;
2949 }
2950 }
2951
2952 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
2953 type != IEEE80211_FC0_TYPE_DATA)
2954 tx->id = hal->broadcast_id;
2955 else
2956 tx->id = wn->id;
2957
2958 if (type == IEEE80211_FC0_TYPE_MGT) {
2959 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2960
2961 /* Tell HW to set timestamp in probe responses. */
2962 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
2963 flags |= IWN_TX_INSERT_TSTAMP;
2964
2965 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
2966 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
2967 tx->timeout = htole16(3);
2968 else
2969 tx->timeout = htole16(2);
2970 } else
2971 tx->timeout = htole16(0);
2972
2973 if (hdrlen & 3) {
2974 /* First segment length must be a multiple of 4. */
2975 flags |= IWN_TX_NEED_PADDING;
2976 pad = 4 - (hdrlen & 3);
2977 } else
2978 pad = 0;
2979
2980 tx->len = htole16(totlen);
2981 tx->tid = 0;
2982 tx->rts_ntries = 60;
2983 tx->data_ntries = 15;
2984 tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
2985 tx->plcp = rinfo->plcp;
2986 tx->rflags = rinfo->flags;
2987 if (tx->id == hal->broadcast_id) {
2988 /* Group or management frame. */
2989 tx->linkq = 0;
2990 /* XXX Alternate between antenna A and B? */
2991 txant = IWN_LSB(sc->txchainmask);
2992 tx->rflags |= IWN_RFLAG_ANT(txant);
2993 } else {
2994 tx->linkq = IWN_RIDX_OFDM54 - ridx;
2995 flags |= IWN_TX_LINKQ; /* enable MRR */
2996 }
2997
2998 /* Set physical address of "scratch area". */
2999 tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr));
3000 tx->hiaddr = IWN_HIADDR(data->scratch_paddr);
3001
3002 /* Copy 802.11 header in TX command. */
3003 memcpy((uint8_t *)(tx + 1), wh, hdrlen);
3004
3005 /* Trim 802.11 header. */
3006 m_adj(m, hdrlen);
3007 tx->security = 0;
3008 tx->flags = htole32(flags);
3009
3010 if (m->m_len > 0) {
3011 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map,
3012 m, segs, &nsegs, BUS_DMA_NOWAIT);
3013 if (error == EFBIG) {
3014 /* too many fragments, linearize */
3015 mnew = m_collapse(m, M_DONTWAIT, IWN_MAX_SCATTER);
3016 if (mnew == NULL) {
3017 device_printf(sc->sc_dev,
3018 "%s: could not defrag mbuf\n", __func__);
3019 m_freem(m);
3020 return ENOBUFS;
3021 }
3022 m = mnew;
3023 error = bus_dmamap_load_mbuf_sg(ring->data_dmat,
3024 data->map, m, segs, &nsegs, BUS_DMA_NOWAIT);
3025 }
3026 if (error != 0) {
3027 device_printf(sc->sc_dev,
3028 "%s: bus_dmamap_load_mbuf_sg failed, error %d\n",
3029 __func__, error);
3030 m_freem(m);
3031 return error;
3032 }
3033 }
3034
3035 data->m = m;
3036 data->ni = ni;
3037
3038 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n",
3039 __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs);
3040
3041 /* Fill TX descriptor. */
3042 desc->nsegs = 1 + nsegs;
3043 /* First DMA segment is used by the TX command. */
3044 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr));
3045 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) |
3046 (4 + sizeof (*tx) + hdrlen + pad) << 4);
3047 /* Other DMA segments are for data payload. */
3048 for (i = 1; i <= nsegs; i++) {
3049 desc->segs[i].addr = htole32(IWN_LOADDR(segs[i - 1].ds_addr));
3050 desc->segs[i].len = htole16(IWN_HIADDR(segs[i - 1].ds_addr) |
3051 segs[i - 1].ds_len << 4);
3052 }
3053
3054 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
3055 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
3056 BUS_DMASYNC_PREWRITE);
3057 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3058 BUS_DMASYNC_PREWRITE);
3059
3060#ifdef notyet
3061 /* Update TX scheduler. */
3062 hal->update_sched(sc, ring->qid, ring->cur, tx->id, totlen);
3063#endif
3064
3065 /* Kick TX ring. */
3066 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
3067 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3068
3069 /* Mark TX ring as full if we reach a certain threshold. */
3070 if (++ring->queued > IWN_TX_RING_HIMARK)
3071 sc->qfullmsk |= 1 << ring->qid;
3072
3073 return 0;
3074}
3075
3076static int
3077iwn_tx_data_raw(struct iwn_softc *sc, struct mbuf *m,
3078 struct ieee80211_node *ni, struct iwn_tx_ring *ring,
3079 const struct ieee80211_bpf_params *params)
3080{
3081 const struct iwn_hal *hal = sc->sc_hal;
3082 const struct iwn_rate *rinfo;
3083 struct ifnet *ifp = sc->sc_ifp;
3084 struct ieee80211vap *vap = ni->ni_vap;
3085 struct ieee80211com *ic = ifp->if_l2com;
3086 struct iwn_tx_cmd *cmd;
3087 struct iwn_cmd_data *tx;
3088 struct ieee80211_frame *wh;
3089 struct iwn_tx_desc *desc;
3090 struct iwn_tx_data *data;
3091 struct mbuf *mnew;
3092 bus_addr_t paddr;
3093 bus_dma_segment_t segs[IWN_MAX_SCATTER];
3094 uint32_t flags;
3095 u_int hdrlen;
3096 int totlen, error, pad, nsegs = 0, i, rate;
3097 uint8_t ridx, type, txant;
3098
3099 IWN_LOCK_ASSERT(sc);
3100
3101 wh = mtod(m, struct ieee80211_frame *);
3102 hdrlen = ieee80211_anyhdrsize(wh);
3103 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3104
3105 desc = &ring->desc[ring->cur];
3106 data = &ring->data[ring->cur];
3107
3108 /* Choose a TX rate index. */
3109 rate = params->ibp_rate0;
3110 if (!ieee80211_isratevalid(ic->ic_rt, rate)) {
3111 /* XXX fall back to mcast/mgmt rate? */
3112 m_freem(m);
3113 return EINVAL;
3114 }
3115 ridx = iwn_plcp_signal(rate);
3116 rinfo = &iwn_rates[ridx];
3117
3118 totlen = m->m_pkthdr.len;
3119
3120 /* Prepare TX firmware command. */
3121 cmd = &ring->cmd[ring->cur];
3122 cmd->code = IWN_CMD_TX_DATA;
3123 cmd->flags = 0;
3124 cmd->qid = ring->qid;
3125 cmd->idx = ring->cur;
3126
3127 tx = (struct iwn_cmd_data *)cmd->data;
3128 /* NB: No need to clear tx, all fields are reinitialized here. */
3129 tx->scratch = 0; /* clear "scratch" area */
3130
3131 flags = 0;
3132 if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0)
3133 flags |= IWN_TX_NEED_ACK;
3134 if (params->ibp_flags & IEEE80211_BPF_RTS) {
3135 if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
3136 /* 5000 autoselects RTS/CTS or CTS-to-self. */
3137 flags &= ~IWN_TX_NEED_RTS;
3138 flags |= IWN_TX_NEED_PROTECTION;
3139 } else
3140 flags |= IWN_TX_NEED_RTS | IWN_TX_FULL_TXOP;
3141 }
3142 if (params->ibp_flags & IEEE80211_BPF_CTS) {
3143 if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
3144 /* 5000 autoselects RTS/CTS or CTS-to-self. */
3145 flags &= ~IWN_TX_NEED_CTS;
3146 flags |= IWN_TX_NEED_PROTECTION;
3147 } else
3148 flags |= IWN_TX_NEED_CTS | IWN_TX_FULL_TXOP;
3149 }
3150 if (type == IEEE80211_FC0_TYPE_MGT) {
3151 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3152
3153 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
3154 flags |= IWN_TX_INSERT_TSTAMP;
3155
3156 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3157 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
3158 tx->timeout = htole16(3);
3159 else
3160 tx->timeout = htole16(2);
3161 } else
3162 tx->timeout = htole16(0);
3163
3164 if (hdrlen & 3) {
3165 /* First segment length must be a multiple of 4. */
3166 flags |= IWN_TX_NEED_PADDING;
3167 pad = 4 - (hdrlen & 3);
3168 } else
3169 pad = 0;
3170
3171 if (ieee80211_radiotap_active_vap(vap)) {
3172 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap;
3173
3174 tap->wt_flags = 0;
3175 tap->wt_rate = rate;
3176
3177 ieee80211_radiotap_tx(vap, m);
3178 }
3179
3180 tx->len = htole16(totlen);
3181 tx->tid = 0;
3182 tx->id = hal->broadcast_id;
3183 tx->rts_ntries = params->ibp_try1;
3184 tx->data_ntries = params->ibp_try0;
3185 tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
3186 tx->plcp = rinfo->plcp;
3187 tx->rflags = rinfo->flags;
3188 /* Group or management frame. */
3189 tx->linkq = 0;
3190 txant = IWN_LSB(sc->txchainmask);
3191 tx->rflags |= IWN_RFLAG_ANT(txant);
3192 /* Set physical address of "scratch area". */
3193 paddr = ring->cmd_dma.paddr + ring->cur * sizeof (struct iwn_tx_cmd);
3194 tx->loaddr = htole32(IWN_LOADDR(paddr));
3195 tx->hiaddr = IWN_HIADDR(paddr);
3196
3197 /* Copy 802.11 header in TX command. */
3198 memcpy((uint8_t *)(tx + 1), wh, hdrlen);
3199
3200 /* Trim 802.11 header. */
3201 m_adj(m, hdrlen);
3202 tx->security = 0;
3203 tx->flags = htole32(flags);
3204
3205 if (m->m_len > 0) {
3206 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map,
3207 m, segs, &nsegs, BUS_DMA_NOWAIT);
3208 if (error == EFBIG) {
3209 /* Too many fragments, linearize. */
3210 mnew = m_collapse(m, M_DONTWAIT, IWN_MAX_SCATTER);
3211 if (mnew == NULL) {
3212 device_printf(sc->sc_dev,
3213 "%s: could not defrag mbuf\n", __func__);
3214 m_freem(m);
3215 return ENOBUFS;
3216 }
3217 m = mnew;
3218 error = bus_dmamap_load_mbuf_sg(ring->data_dmat,
3219 data->map, m, segs, &nsegs, BUS_DMA_NOWAIT);
3220 }
3221 if (error != 0) {
3222 device_printf(sc->sc_dev,
3223 "%s: bus_dmamap_load_mbuf_sg failed, error %d\n",
3224 __func__, error);
3225 m_freem(m);
3226 return error;
3227 }
3228 }
3229
3230 data->m = m;
3231 data->ni = ni;
3232
3233 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n",
3234 __func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs);
3235
3236 /* Fill TX descriptor. */
3237 desc->nsegs = 1 + nsegs;
3238 /* First DMA segment is used by the TX command. */
3239 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr));
3240 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) |
3241 (4 + sizeof (*tx) + hdrlen + pad) << 4);
3242 /* Other DMA segments are for data payload. */
3243 for (i = 1; i <= nsegs; i++) {
3244 desc->segs[i].addr = htole32(IWN_LOADDR(segs[i - 1].ds_addr));
3245 desc->segs[i].len = htole16(IWN_HIADDR(segs[i - 1].ds_addr) |
3246 segs[i - 1].ds_len << 4);
3247 }
3248
3249 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
3250 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
3251 BUS_DMASYNC_PREWRITE);
3252 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3253 BUS_DMASYNC_PREWRITE);
3254
3255#ifdef notyet
3256 /* Update TX scheduler. */
3257 hal->update_sched(sc, ring->qid, ring->cur, tx->id, totlen);
3258#endif
3259
3260 /* Kick TX ring. */
3261 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
3262 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3263
3264 /* Mark TX ring as full if we reach a certain threshold. */
3265 if (++ring->queued > IWN_TX_RING_HIMARK)
3266 sc->qfullmsk |= 1 << ring->qid;
3267
3268 return 0;
3269}
3270
3271static int
3272iwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3273 const struct ieee80211_bpf_params *params)
3274{
3275 struct ieee80211com *ic = ni->ni_ic;
3276 struct ifnet *ifp = ic->ic_ifp;
3277 struct iwn_softc *sc = ifp->if_softc;
3278 struct iwn_tx_ring *txq;
3279 int error = 0;
3280
3281 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
3282 ieee80211_free_node(ni);
3283 m_freem(m);
3284 return ENETDOWN;
3285 }
3286
3287 IWN_LOCK(sc);
3288 if (params == NULL)
3289 txq = &sc->txq[M_WME_GETAC(m)];
3290 else
3291 txq = &sc->txq[params->ibp_pri & 3];
3292
3293 if (params == NULL) {
3294 /*
3295 * Legacy path; interpret frame contents to decide
3296 * precisely how to send the frame.
3297 */
3298 error = iwn_tx_data(sc, m, ni, txq);
3299 } else {
3300 /*
3301 * Caller supplied explicit parameters to use in
3302 * sending the frame.
3303 */
3304 error = iwn_tx_data_raw(sc, m, ni, txq, params);
3305 }
3306 if (error != 0) {
3307 /* NB: m is reclaimed on tx failure */
3308 ieee80211_free_node(ni);
3309 ifp->if_oerrors++;
3310 }
3311 sc->sc_tx_timer = 5;
3312
3309 IWN_UNLOCK(sc);
3310 return error;
3311}
3312
3313static void
3314iwn_start(struct ifnet *ifp)
3315{
3316 struct iwn_softc *sc = ifp->if_softc;
3317
3318 IWN_LOCK(sc);
3319 iwn_start_locked(ifp);
3320 IWN_UNLOCK(sc);
3321}
3322
3323static void
3324iwn_start_locked(struct ifnet *ifp)
3325{
3326 struct iwn_softc *sc = ifp->if_softc;
3327 struct ieee80211_node *ni;
3328 struct iwn_tx_ring *txq;
3329 struct mbuf *m;
3330 int pri;
3331
3332 IWN_LOCK_ASSERT(sc);
3333
3334 for (;;) {
3335 if (sc->qfullmsk != 0) {
3336 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3337 break;
3338 }
3339 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
3340 if (m == NULL)
3341 break;
3342 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
3343 pri = M_WME_GETAC(m);
3344 txq = &sc->txq[pri];
3345 if (iwn_tx_data(sc, m, ni, txq) != 0) {
3346 ifp->if_oerrors++;
3347 ieee80211_free_node(ni);
3348 break;
3349 }
3350 sc->sc_tx_timer = 5;
3351 }
3352}
3353
3354static void
3313 IWN_UNLOCK(sc);
3314 return error;
3315}
3316
3317static void
3318iwn_start(struct ifnet *ifp)
3319{
3320 struct iwn_softc *sc = ifp->if_softc;
3321
3322 IWN_LOCK(sc);
3323 iwn_start_locked(ifp);
3324 IWN_UNLOCK(sc);
3325}
3326
3327static void
3328iwn_start_locked(struct ifnet *ifp)
3329{
3330 struct iwn_softc *sc = ifp->if_softc;
3331 struct ieee80211_node *ni;
3332 struct iwn_tx_ring *txq;
3333 struct mbuf *m;
3334 int pri;
3335
3336 IWN_LOCK_ASSERT(sc);
3337
3338 for (;;) {
3339 if (sc->qfullmsk != 0) {
3340 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3341 break;
3342 }
3343 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
3344 if (m == NULL)
3345 break;
3346 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
3347 pri = M_WME_GETAC(m);
3348 txq = &sc->txq[pri];
3349 if (iwn_tx_data(sc, m, ni, txq) != 0) {
3350 ifp->if_oerrors++;
3351 ieee80211_free_node(ni);
3352 break;
3353 }
3354 sc->sc_tx_timer = 5;
3355 }
3356}
3357
3358static void
3355iwn_watchdog(struct iwn_softc *sc)
3359iwn_watchdog(void *arg)
3356{
3360{
3357 if (sc->sc_tx_timer > 0 && --sc->sc_tx_timer == 0) {
3358 struct ifnet *ifp = sc->sc_ifp;
3359 struct ieee80211com *ic = ifp->if_l2com;
3361 struct iwn_softc *sc = arg;
3362 struct ifnet *ifp = sc->sc_ifp;
3363 struct ieee80211com *ic = ifp->if_l2com;
3360
3364
3361 if_printf(ifp, "device timeout\n");
3362 ieee80211_runtask(ic, &sc->sc_reinit_task);
3365 IWN_LOCK_ASSERT(sc);
3366
3367 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING, ("not running"));
3368
3369 if (sc->sc_tx_timer > 0 || counter == 50) {
3370 if (--sc->sc_tx_timer == 0 || counter == 50) {
3371 if_printf(ifp, "device timeout\n");
3372 ieee80211_runtask(ic, &sc->sc_reinit_task);
3373 return;
3374 }
3363 }
3375 }
3376 callout_reset(&sc->watchdog_to, hz, iwn_watchdog, sc);
3364}
3365
3366static int
3367iwn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
3368{
3369 struct iwn_softc *sc = ifp->if_softc;
3370 struct ieee80211com *ic = ifp->if_l2com;
3371 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3372 struct ifreq *ifr = (struct ifreq *) data;
3373 int error = 0, startall = 0, stop = 0;
3374
3375 switch (cmd) {
3376 case SIOCSIFFLAGS:
3377 IWN_LOCK(sc);
3378 if (ifp->if_flags & IFF_UP) {
3379 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3380 iwn_init_locked(sc);
3381 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)
3382 startall = 1;
3383 else
3384 stop = 1;
3385 }
3386 } else {
3387 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3388 iwn_stop_locked(sc);
3389 }
3390 IWN_UNLOCK(sc);
3391 if (startall)
3392 ieee80211_start_all(ic);
3393 else if (vap != NULL && stop)
3394 ieee80211_stop(vap);
3395 break;
3396 case SIOCGIFMEDIA:
3397 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
3398 break;
3399 case SIOCGIFADDR:
3400 error = ether_ioctl(ifp, cmd, data);
3401 break;
3402 default:
3403 error = EINVAL;
3404 break;
3405 }
3406 return error;
3407}
3408
3409/*
3410 * Send a command to the firmware.
3411 */
3412static int
3413iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async)
3414{
3415 struct iwn_tx_ring *ring = &sc->txq[4];
3416 struct iwn_tx_desc *desc;
3417 struct iwn_tx_data *data;
3418 struct iwn_tx_cmd *cmd;
3419 struct mbuf *m;
3420 bus_addr_t paddr;
3421 int totlen, error;
3422
3423 IWN_LOCK_ASSERT(sc);
3424
3425 desc = &ring->desc[ring->cur];
3426 data = &ring->data[ring->cur];
3427 totlen = 4 + size;
3428
3429 if (size > sizeof cmd->data) {
3430 /* Command is too large to fit in a descriptor. */
3431 if (totlen > MCLBYTES)
3432 return EINVAL;
3433 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
3434 if (m == NULL)
3435 return ENOMEM;
3436 cmd = mtod(m, struct iwn_tx_cmd *);
3437 error = bus_dmamap_load(ring->data_dmat, data->map, cmd,
3438 totlen, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
3439 if (error != 0) {
3440 m_freem(m);
3441 return error;
3442 }
3443 data->m = m;
3444 } else {
3445 cmd = &ring->cmd[ring->cur];
3446 paddr = data->cmd_paddr;
3447 }
3448
3449 cmd->code = code;
3450 cmd->flags = 0;
3451 cmd->qid = ring->qid;
3452 cmd->idx = ring->cur;
3453 memcpy(cmd->data, buf, size);
3454
3455 desc->nsegs = 1;
3456 desc->segs[0].addr = htole32(IWN_LOADDR(paddr));
3457 desc->segs[0].len = htole16(IWN_HIADDR(paddr) | totlen << 4);
3458
3459 DPRINTF(sc, IWN_DEBUG_CMD, "%s: %s (0x%x) flags %d qid %d idx %d\n",
3460 __func__, iwn_intr_str(cmd->code), cmd->code,
3461 cmd->flags, cmd->qid, cmd->idx);
3462
3463 if (size > sizeof cmd->data) {
3464 bus_dmamap_sync(ring->data_dmat, data->map,
3465 BUS_DMASYNC_PREWRITE);
3466 } else {
3467 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
3468 BUS_DMASYNC_PREWRITE);
3469 }
3470 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3471 BUS_DMASYNC_PREWRITE);
3472
3473#ifdef notyet
3474 /* Update TX scheduler. */
3475 sc->sc_hal->update_sched(sc, ring->qid, ring->cur, 0, 0);
3476#endif
3477
3478 /* Kick command ring. */
3479 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
3480 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3481
3482 return async ? 0 : msleep(desc, &sc->sc_mtx, PCATCH, "iwncmd", hz);
3483}
3484
3485static int
3486iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
3487{
3488 struct iwn4965_node_info hnode;
3489 caddr_t src, dst;
3490
3491 /*
3492 * We use the node structure for 5000 Series internally (it is
3493 * a superset of the one for 4965AGN). We thus copy the common
3494 * fields before sending the command.
3495 */
3496 src = (caddr_t)node;
3497 dst = (caddr_t)&hnode;
3498 memcpy(dst, src, 48);
3499 /* Skip TSC, RX MIC and TX MIC fields from ``src''. */
3500 memcpy(dst + 48, src + 72, 20);
3501 return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async);
3502}
3503
3504static int
3505iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
3506{
3507 /* Direct mapping. */
3508 return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async);
3509}
3510
3511#if 0 /* HT */
3512static const uint8_t iwn_ridx_to_plcp[] = {
3513 10, 20, 55, 110, /* CCK */
3514 0xd, 0xf, 0x5, 0x7, 0x9, 0xb, 0x1, 0x3, 0x3 /* OFDM R1-R4 */
3515};
3516static const uint8_t iwn_siso_mcs_to_plcp[] = {
3517 0, 0, 0, 0, /* CCK */
3518 0, 0, 1, 2, 3, 4, 5, 6, 7 /* HT */
3519};
3520static const uint8_t iwn_mimo_mcs_to_plcp[] = {
3521 0, 0, 0, 0, /* CCK */
3522 8, 8, 9, 10, 11, 12, 13, 14, 15 /* HT */
3523};
3524#endif
3525static const uint8_t iwn_prev_ridx[] = {
3526 /* NB: allow fallback from CCK11 to OFDM9 and from OFDM6 to CCK5 */
3527 0, 0, 1, 5, /* CCK */
3528 2, 4, 3, 6, 7, 8, 9, 10, 10 /* OFDM */
3529};
3530
3531/*
3532 * Configure hardware link parameters for the specified
3533 * node operating on the specified channel.
3534 */
3535static int
3536iwn_set_link_quality(struct iwn_softc *sc, uint8_t id, int async)
3537{
3538 struct ifnet *ifp = sc->sc_ifp;
3539 struct ieee80211com *ic = ifp->if_l2com;
3540 struct iwn_cmd_link_quality linkq;
3541 const struct iwn_rate *rinfo;
3542 int i;
3543 uint8_t txant, ridx;
3544
3545 /* Use the first valid TX antenna. */
3546 txant = IWN_LSB(sc->txchainmask);
3547
3548 memset(&linkq, 0, sizeof linkq);
3549 linkq.id = id;
3550 linkq.antmsk_1stream = txant;
3551 linkq.antmsk_2stream = IWN_ANT_AB;
3552 linkq.ampdu_max = 31;
3553 linkq.ampdu_threshold = 3;
3554 linkq.ampdu_limit = htole16(4000); /* 4ms */
3555
3556#if 0 /* HT */
3557 if (IEEE80211_IS_CHAN_HT(c))
3558 linkq.mimo = 1;
3559#endif
3560
3561 if (id == IWN_ID_BSS)
3562 ridx = IWN_RIDX_OFDM54;
3563 else if (IEEE80211_IS_CHAN_A(ic->ic_curchan))
3564 ridx = IWN_RIDX_OFDM6;
3565 else
3566 ridx = IWN_RIDX_CCK1;
3567
3568 for (i = 0; i < IWN_MAX_TX_RETRIES; i++) {
3569 rinfo = &iwn_rates[ridx];
3570#if 0 /* HT */
3571 if (IEEE80211_IS_CHAN_HT40(c)) {
3572 linkq.retry[i].plcp = iwn_mimo_mcs_to_plcp[ridx]
3573 | IWN_RIDX_MCS;
3574 linkq.retry[i].rflags = IWN_RFLAG_HT
3575 | IWN_RFLAG_HT40;
3576 /* XXX shortGI */
3577 } else if (IEEE80211_IS_CHAN_HT(c)) {
3578 linkq.retry[i].plcp = iwn_siso_mcs_to_plcp[ridx]
3579 | IWN_RIDX_MCS;
3580 linkq.retry[i].rflags = IWN_RFLAG_HT;
3581 /* XXX shortGI */
3582 } else
3583#endif
3584 {
3585 linkq.retry[i].plcp = rinfo->plcp;
3586 linkq.retry[i].rflags = rinfo->flags;
3587 }
3588 linkq.retry[i].rflags |= IWN_RFLAG_ANT(txant);
3589 ridx = iwn_prev_ridx[ridx];
3590 }
3591#ifdef IWN_DEBUG
3592 if (sc->sc_debug & IWN_DEBUG_STATE) {
3593 printf("%s: set link quality for node %d, mimo %d ssmask %d\n",
3594 __func__, id, linkq.mimo, linkq.antmsk_1stream);
3595 printf("%s:", __func__);
3596 for (i = 0; i < IWN_MAX_TX_RETRIES; i++)
3597 printf(" %d:%x", linkq.retry[i].plcp,
3598 linkq.retry[i].rflags);
3599 printf("\n");
3600 }
3601#endif
3602 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async);
3603}
3604
3605/*
3606 * Broadcast node is used to send group-addressed and management frames.
3607 */
3608static int
3609iwn_add_broadcast_node(struct iwn_softc *sc, int async)
3610{
3611 const struct iwn_hal *hal = sc->sc_hal;
3612 struct ifnet *ifp = sc->sc_ifp;
3613 struct iwn_node_info node;
3614 int error;
3615
3616 memset(&node, 0, sizeof node);
3617 IEEE80211_ADDR_COPY(node.macaddr, ifp->if_broadcastaddr);
3618 node.id = hal->broadcast_id;
3619 DPRINTF(sc, IWN_DEBUG_RESET, "%s: adding broadcast node\n", __func__);
3620 error = hal->add_node(sc, &node, async);
3621 if (error != 0)
3622 return error;
3623
3624 error = iwn_set_link_quality(sc, hal->broadcast_id, async);
3625 return error;
3626}
3627
3628static int
3629iwn_wme_update(struct ieee80211com *ic)
3630{
3631#define IWN_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */
3632 struct iwn_softc *sc = ic->ic_ifp->if_softc;
3633 struct iwn_edca_params cmd;
3634 int i;
3635
3636 memset(&cmd, 0, sizeof cmd);
3637 cmd.flags = htole32(IWN_EDCA_UPDATE);
3638 for (i = 0; i < WME_NUM_AC; i++) {
3639 const struct wmeParams *wmep =
3640 &ic->ic_wme.wme_chanParams.cap_wmeParams[i];
3641 cmd.ac[i].aifsn = wmep->wmep_aifsn;
3642 cmd.ac[i].cwmin = htole16(IWN_EXP2(wmep->wmep_logcwmin));
3643 cmd.ac[i].cwmax = htole16(IWN_EXP2(wmep->wmep_logcwmax));
3644 cmd.ac[i].txoplimit =
3645 htole16(IEEE80211_TXOP_TO_US(wmep->wmep_txopLimit));
3646 }
3647 IEEE80211_UNLOCK(ic);
3648 IWN_LOCK(sc);
3649 (void) iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1 /*async*/);
3650 IWN_UNLOCK(sc);
3651 IEEE80211_LOCK(ic);
3652 return 0;
3653#undef IWN_EXP2
3654}
3655
3656static void
3657iwn_update_mcast(struct ifnet *ifp)
3658{
3659 /* Ignore */
3660}
3661
3662static void
3663iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on)
3664{
3665 struct iwn_cmd_led led;
3666
3667 /* Clear microcode LED ownership. */
3668 IWN_CLRBITS(sc, IWN_LED, IWN_LED_BSM_CTRL);
3669
3670 led.which = which;
3671 led.unit = htole32(10000); /* on/off in unit of 100ms */
3672 led.off = off;
3673 led.on = on;
3674 (void)iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1);
3675}
3676
3677/*
3678 * Set the critical temperature at which the firmware will stop the radio
3679 * and notify us.
3680 */
3681static int
3682iwn_set_critical_temp(struct iwn_softc *sc)
3683{
3684 struct iwn_critical_temp crit;
3685 int32_t temp;
3686
3687 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CTEMP_STOP_RF);
3688
3689 if (sc->hw_type == IWN_HW_REV_TYPE_5150)
3690 temp = (IWN_CTOK(110) - sc->temp_off) * -5;
3691 else if (sc->hw_type == IWN_HW_REV_TYPE_4965)
3692 temp = IWN_CTOK(110);
3693 else
3694 temp = 110;
3695 memset(&crit, 0, sizeof crit);
3696 crit.tempR = htole32(temp);
3697 DPRINTF(sc, IWN_DEBUG_RESET, "setting critical temp to %d\n",
3698 temp);
3699 return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0);
3700}
3701
3702static int
3703iwn_set_timing(struct iwn_softc *sc, struct ieee80211_node *ni)
3704{
3705 struct iwn_cmd_timing cmd;
3706 uint64_t val, mod;
3707
3708 memset(&cmd, 0, sizeof cmd);
3709 memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t));
3710 cmd.bintval = htole16(ni->ni_intval);
3711 cmd.lintval = htole16(10);
3712
3713 /* Compute remaining time until next beacon. */
3714 val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU;
3715 mod = le64toh(cmd.tstamp) % val;
3716 cmd.binitval = htole32((uint32_t)(val - mod));
3717
3718 DPRINTF(sc, IWN_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n",
3719 ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod));
3720
3721 return iwn_cmd(sc, IWN_CMD_TIMING, &cmd, sizeof cmd, 1);
3722}
3723
3724static void
3725iwn4965_power_calibration(struct iwn_softc *sc, int temp)
3726{
3727 struct ifnet *ifp = sc->sc_ifp;
3728 struct ieee80211com *ic = ifp->if_l2com;
3729
3730 /* Adjust TX power if need be (delta >= 3 degC.) */
3731 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d->%d\n",
3732 __func__, sc->temp, temp);
3733 if (abs(temp - sc->temp) >= 3) {
3734 /* Record temperature of last calibration. */
3735 sc->temp = temp;
3736 (void)iwn4965_set_txpower(sc, ic->ic_bsschan, 1);
3737 }
3738}
3739
3740/*
3741 * Set TX power for current channel (each rate has its own power settings).
3742 * This function takes into account the regulatory information from EEPROM,
3743 * the current temperature and the current voltage.
3744 */
3745static int
3746iwn4965_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch,
3747 int async)
3748{
3749/* Fixed-point arithmetic division using a n-bit fractional part. */
3750#define fdivround(a, b, n) \
3751 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n))
3752/* Linear interpolation. */
3753#define interpolate(x, x1, y1, x2, y2, n) \
3754 ((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n))
3755
3756 static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 };
3757 struct ifnet *ifp = sc->sc_ifp;
3758 struct ieee80211com *ic = ifp->if_l2com;
3759 struct iwn_ucode_info *uc = &sc->ucode_info;
3760 struct iwn4965_cmd_txpower cmd;
3761 struct iwn4965_eeprom_chan_samples *chans;
3762 int32_t vdiff, tdiff;
3763 int i, c, grp, maxpwr;
3764 const uint8_t *rf_gain, *dsp_gain;
3765 uint8_t chan;
3766
3767 /* Retrieve channel number. */
3768 chan = ieee80211_chan2ieee(ic, ch);
3769 DPRINTF(sc, IWN_DEBUG_RESET, "setting TX power for channel %d\n",
3770 chan);
3771
3772 memset(&cmd, 0, sizeof cmd);
3773 cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1;
3774 cmd.chan = chan;
3775
3776 if (IEEE80211_IS_CHAN_5GHZ(ch)) {
3777 maxpwr = sc->maxpwr5GHz;
3778 rf_gain = iwn4965_rf_gain_5ghz;
3779 dsp_gain = iwn4965_dsp_gain_5ghz;
3780 } else {
3781 maxpwr = sc->maxpwr2GHz;
3782 rf_gain = iwn4965_rf_gain_2ghz;
3783 dsp_gain = iwn4965_dsp_gain_2ghz;
3784 }
3785
3786 /* Compute voltage compensation. */
3787 vdiff = ((int32_t)le32toh(uc->volt) - sc->eeprom_voltage) / 7;
3788 if (vdiff > 0)
3789 vdiff *= 2;
3790 if (abs(vdiff) > 2)
3791 vdiff = 0;
3792 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
3793 "%s: voltage compensation=%d (UCODE=%d, EEPROM=%d)\n",
3794 __func__, vdiff, le32toh(uc->volt), sc->eeprom_voltage);
3795
3796 /* Get channel attenuation group. */
3797 if (chan <= 20) /* 1-20 */
3798 grp = 4;
3799 else if (chan <= 43) /* 34-43 */
3800 grp = 0;
3801 else if (chan <= 70) /* 44-70 */
3802 grp = 1;
3803 else if (chan <= 124) /* 71-124 */
3804 grp = 2;
3805 else /* 125-200 */
3806 grp = 3;
3807 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
3808 "%s: chan %d, attenuation group=%d\n", __func__, chan, grp);
3809
3810 /* Get channel sub-band. */
3811 for (i = 0; i < IWN_NBANDS; i++)
3812 if (sc->bands[i].lo != 0 &&
3813 sc->bands[i].lo <= chan && chan <= sc->bands[i].hi)
3814 break;
3815 if (i == IWN_NBANDS) /* Can't happen in real-life. */
3816 return EINVAL;
3817 chans = sc->bands[i].chans;
3818 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
3819 "%s: chan %d sub-band=%d\n", __func__, chan, i);
3820
3821 for (c = 0; c < 2; c++) {
3822 uint8_t power, gain, temp;
3823 int maxchpwr, pwr, ridx, idx;
3824
3825 power = interpolate(chan,
3826 chans[0].num, chans[0].samples[c][1].power,
3827 chans[1].num, chans[1].samples[c][1].power, 1);
3828 gain = interpolate(chan,
3829 chans[0].num, chans[0].samples[c][1].gain,
3830 chans[1].num, chans[1].samples[c][1].gain, 1);
3831 temp = interpolate(chan,
3832 chans[0].num, chans[0].samples[c][1].temp,
3833 chans[1].num, chans[1].samples[c][1].temp, 1);
3834 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
3835 "%s: Tx chain %d: power=%d gain=%d temp=%d\n",
3836 __func__, c, power, gain, temp);
3837
3838 /* Compute temperature compensation. */
3839 tdiff = ((sc->temp - temp) * 2) / tdiv[grp];
3840 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
3841 "%s: temperature compensation=%d (current=%d, EEPROM=%d)\n",
3842 __func__, tdiff, sc->temp, temp);
3843
3844 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) {
3845 /* Convert dBm to half-dBm. */
3846 maxchpwr = sc->maxpwr[chan] * 2;
3847 if ((ridx / 8) & 1)
3848 maxchpwr -= 6; /* MIMO 2T: -3dB */
3849
3850 pwr = maxpwr;
3851
3852 /* Adjust TX power based on rate. */
3853 if ((ridx % 8) == 5)
3854 pwr -= 15; /* OFDM48: -7.5dB */
3855 else if ((ridx % 8) == 6)
3856 pwr -= 17; /* OFDM54: -8.5dB */
3857 else if ((ridx % 8) == 7)
3858 pwr -= 20; /* OFDM60: -10dB */
3859 else
3860 pwr -= 10; /* Others: -5dB */
3861
3862 /* Do not exceed channel max TX power. */
3863 if (pwr > maxchpwr)
3864 pwr = maxchpwr;
3865
3866 idx = gain - (pwr - power) - tdiff - vdiff;
3867 if ((ridx / 8) & 1) /* MIMO */
3868 idx += (int32_t)le32toh(uc->atten[grp][c]);
3869
3870 if (cmd.band == 0)
3871 idx += 9; /* 5GHz */
3872 if (ridx == IWN_RIDX_MAX)
3873 idx += 5; /* CCK */
3874
3875 /* Make sure idx stays in a valid range. */
3876 if (idx < 0)
3877 idx = 0;
3878 else if (idx > IWN4965_MAX_PWR_INDEX)
3879 idx = IWN4965_MAX_PWR_INDEX;
3880
3881 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
3882 "%s: Tx chain %d, rate idx %d: power=%d\n",
3883 __func__, c, ridx, idx);
3884 cmd.power[ridx].rf_gain[c] = rf_gain[idx];
3885 cmd.power[ridx].dsp_gain[c] = dsp_gain[idx];
3886 }
3887 }
3888
3889 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
3890 "%s: set tx power for chan %d\n", __func__, chan);
3891 return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async);
3892
3893#undef interpolate
3894#undef fdivround
3895}
3896
3897static int
3898iwn5000_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch,
3899 int async)
3900{
3901 struct iwn5000_cmd_txpower cmd;
3902
3903 /*
3904 * TX power calibration is handled automatically by the firmware
3905 * for 5000 Series.
3906 */
3907 memset(&cmd, 0, sizeof cmd);
3908 cmd.global_limit = 2 * IWN5000_TXPOWER_MAX_DBM; /* 16 dBm */
3909 cmd.flags = IWN5000_TXPOWER_NO_CLOSED;
3910 cmd.srv_limit = IWN5000_TXPOWER_AUTO;
3911 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: setting TX power\n", __func__);
3912 return iwn_cmd(sc, IWN_CMD_TXPOWER_DBM, &cmd, sizeof cmd, async);
3913}
3914
3915/*
3916 * Retrieve the maximum RSSI (in dBm) among receivers.
3917 */
3918static int
3919iwn4965_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat)
3920{
3921 struct iwn4965_rx_phystat *phy = (void *)stat->phybuf;
3922 uint8_t mask, agc;
3923 int rssi;
3924
3925 mask = (le16toh(phy->antenna) >> 4) & IWN_ANT_ABC;
3926 agc = (le16toh(phy->agc) >> 7) & 0x7f;
3927
3928 rssi = 0;
3929#if 0
3930 if (mask & IWN_ANT_A) /* Ant A */
3931 rssi = max(rssi, phy->rssi[0]);
3932 if (mask & IWN_ATH_B) /* Ant B */
3933 rssi = max(rssi, phy->rssi[2]);
3934 if (mask & IWN_ANT_C) /* Ant C */
3935 rssi = max(rssi, phy->rssi[4]);
3936#else
3937 rssi = max(rssi, phy->rssi[0]);
3938 rssi = max(rssi, phy->rssi[2]);
3939 rssi = max(rssi, phy->rssi[4]);
3940#endif
3941
3942 DPRINTF(sc, IWN_DEBUG_RECV, "%s: agc %d mask 0x%x rssi %d %d %d "
3943 "result %d\n", __func__, agc, mask,
3944 phy->rssi[0], phy->rssi[2], phy->rssi[4],
3945 rssi - agc - IWN_RSSI_TO_DBM);
3946 return rssi - agc - IWN_RSSI_TO_DBM;
3947}
3948
3949static int
3950iwn5000_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat)
3951{
3952 struct iwn5000_rx_phystat *phy = (void *)stat->phybuf;
3953 int rssi;
3954 uint8_t agc;
3955
3956 agc = (le32toh(phy->agc) >> 9) & 0x7f;
3957
3958 rssi = MAX(le16toh(phy->rssi[0]) & 0xff,
3959 le16toh(phy->rssi[1]) & 0xff);
3960 rssi = MAX(le16toh(phy->rssi[2]) & 0xff, rssi);
3961
3962 DPRINTF(sc, IWN_DEBUG_RECV, "%s: agc %d rssi %d %d %d "
3963 "result %d\n", __func__, agc,
3964 phy->rssi[0], phy->rssi[1], phy->rssi[2],
3965 rssi - agc - IWN_RSSI_TO_DBM);
3966 return rssi - agc - IWN_RSSI_TO_DBM;
3967}
3968
3969/*
3970 * Retrieve the average noise (in dBm) among receivers.
3971 */
3972static int
3973iwn_get_noise(const struct iwn_rx_general_stats *stats)
3974{
3975 int i, total, nbant, noise;
3976
3977 total = nbant = 0;
3978 for (i = 0; i < 3; i++) {
3979 if ((noise = le32toh(stats->noise[i]) & 0xff) == 0)
3980 continue;
3981 total += noise;
3982 nbant++;
3983 }
3984 /* There should be at least one antenna but check anyway. */
3985 return (nbant == 0) ? -127 : (total / nbant) - 107;
3986}
3987
3988/*
3989 * Compute temperature (in degC) from last received statistics.
3990 */
3991static int
3992iwn4965_get_temperature(struct iwn_softc *sc)
3993{
3994 struct iwn_ucode_info *uc = &sc->ucode_info;
3995 int32_t r1, r2, r3, r4, temp;
3996
3997 r1 = le32toh(uc->temp[0].chan20MHz);
3998 r2 = le32toh(uc->temp[1].chan20MHz);
3999 r3 = le32toh(uc->temp[2].chan20MHz);
4000 r4 = le32toh(sc->rawtemp);
4001
4002 if (r1 == r3) /* Prevents division by 0 (should not happen.) */
4003 return 0;
4004
4005 /* Sign-extend 23-bit R4 value to 32-bit. */
4006 r4 = ((r4 & 0xffffff) ^ 0x800000) - 0x800000;
4007 /* Compute temperature in Kelvin. */
4008 temp = (259 * (r4 - r2)) / (r3 - r1);
4009 temp = (temp * 97) / 100 + 8;
4010
4011 DPRINTF(sc, IWN_DEBUG_ANY, "temperature %dK/%dC\n", temp,
4012 IWN_KTOC(temp));
4013 return IWN_KTOC(temp);
4014}
4015
4016static int
4017iwn5000_get_temperature(struct iwn_softc *sc)
4018{
4019 int32_t temp;
4020
4021 /*
4022 * Temperature is not used by the driver for 5000 Series because
4023 * TX power calibration is handled by firmware. We export it to
4024 * users through the sensor framework though.
4025 */
4026 temp = le32toh(sc->rawtemp);
4027 if (sc->hw_type == IWN_HW_REV_TYPE_5150) {
4028 temp = (temp / -5) + sc->temp_off;
4029 temp = IWN_KTOC(temp);
4030 }
4031 return temp;
4032}
4033
4034/*
4035 * Initialize sensitivity calibration state machine.
4036 */
4037static int
4038iwn_init_sensitivity(struct iwn_softc *sc)
4039{
4040 const struct iwn_hal *hal = sc->sc_hal;
4041 struct iwn_calib_state *calib = &sc->calib;
4042 uint32_t flags;
4043 int error;
4044
4045 /* Reset calibration state machine. */
4046 memset(calib, 0, sizeof (*calib));
4047 calib->state = IWN_CALIB_STATE_INIT;
4048 calib->cck_state = IWN_CCK_STATE_HIFA;
4049 /* Set initial correlation values. */
4050 calib->ofdm_x1 = sc->limits->min_ofdm_x1;
4051 calib->ofdm_mrc_x1 = sc->limits->min_ofdm_mrc_x1;
4052 calib->ofdm_x4 = sc->limits->min_ofdm_x4;
4053 calib->ofdm_mrc_x4 = sc->limits->min_ofdm_mrc_x4;
4054 calib->cck_x4 = 125;
4055 calib->cck_mrc_x4 = sc->limits->min_cck_mrc_x4;
4056 calib->energy_cck = sc->limits->energy_cck;
4057
4058 /* Write initial sensitivity. */
4059 error = iwn_send_sensitivity(sc);
4060 if (error != 0)
4061 return error;
4062
4063 /* Write initial gains. */
4064 error = hal->init_gains(sc);
4065 if (error != 0)
4066 return error;
4067
4068 /* Request statistics at each beacon interval. */
4069 flags = 0;
4070 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: calibrate phy\n", __func__);
4071 return iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1);
4072}
4073
4074/*
4075 * Collect noise and RSSI statistics for the first 20 beacons received
4076 * after association and use them to determine connected antennas and
4077 * to set differential gains.
4078 */
4079static void
4080iwn_collect_noise(struct iwn_softc *sc,
4081 const struct iwn_rx_general_stats *stats)
4082{
4083 const struct iwn_hal *hal = sc->sc_hal;
4084 struct iwn_calib_state *calib = &sc->calib;
4085 uint32_t val;
4086 int i;
4087
4088 /* Accumulate RSSI and noise for all 3 antennas. */
4089 for (i = 0; i < 3; i++) {
4090 calib->rssi[i] += le32toh(stats->rssi[i]) & 0xff;
4091 calib->noise[i] += le32toh(stats->noise[i]) & 0xff;
4092 }
4093 /* NB: We update differential gains only once after 20 beacons. */
4094 if (++calib->nbeacons < 20)
4095 return;
4096
4097 /* Determine highest average RSSI. */
4098 val = MAX(calib->rssi[0], calib->rssi[1]);
4099 val = MAX(calib->rssi[2], val);
4100
4101 /* Determine which antennas are connected. */
4102 sc->chainmask = sc->rxchainmask;
4103 for (i = 0; i < 3; i++)
4104 if (val - calib->rssi[i] > 15 * 20)
4105 sc->chainmask &= ~(1 << i);
4106 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4107 "%s: RX chains mask: theoretical=0x%x, actual=0x%x\n",
4108 __func__, sc->rxchainmask, sc->chainmask);
4109
4110 /* If none of the TX antennas are connected, keep at least one. */
4111 if ((sc->chainmask & sc->txchainmask) == 0)
4112 sc->chainmask |= IWN_LSB(sc->txchainmask);
4113
4114 (void)hal->set_gains(sc);
4115 calib->state = IWN_CALIB_STATE_RUN;
4116
4117#ifdef notyet
4118 /* XXX Disable RX chains with no antennas connected. */
4119 sc->rxon.rxchain = htole16(IWN_RXCHAIN_SEL(sc->chainmask));
4120 (void)iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, hal->rxonsz, 1);
4121#endif
4122
4123#if 0
4124 /* XXX: not yet */
4125 /* Enable power-saving mode if requested by user. */
4126 if (sc->sc_ic.ic_flags & IEEE80211_F_PMGTON)
4127 (void)iwn_set_pslevel(sc, 0, 3, 1);
4128#endif
4129}
4130
4131static int
4132iwn4965_init_gains(struct iwn_softc *sc)
4133{
4134 struct iwn_phy_calib_gain cmd;
4135
4136 memset(&cmd, 0, sizeof cmd);
4137 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN;
4138 /* Differential gains initially set to 0 for all 3 antennas. */
4139 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4140 "%s: setting initial differential gains\n", __func__);
4141 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4142}
4143
4144static int
4145iwn5000_init_gains(struct iwn_softc *sc)
4146{
4147 struct iwn_phy_calib cmd;
4148
4149 memset(&cmd, 0, sizeof cmd);
4150 cmd.code = IWN5000_PHY_CALIB_RESET_NOISE_GAIN;
4151 cmd.ngroups = 1;
4152 cmd.isvalid = 1;
4153 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4154 "%s: setting initial differential gains\n", __func__);
4155 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4156}
4157
4158static int
4159iwn4965_set_gains(struct iwn_softc *sc)
4160{
4161 struct iwn_calib_state *calib = &sc->calib;
4162 struct iwn_phy_calib_gain cmd;
4163 int i, delta, noise;
4164
4165 /* Get minimal noise among connected antennas. */
4166 noise = INT_MAX; /* NB: There's at least one antenna. */
4167 for (i = 0; i < 3; i++)
4168 if (sc->chainmask & (1 << i))
4169 noise = MIN(calib->noise[i], noise);
4170
4171 memset(&cmd, 0, sizeof cmd);
4172 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN;
4173 /* Set differential gains for connected antennas. */
4174 for (i = 0; i < 3; i++) {
4175 if (sc->chainmask & (1 << i)) {
4176 /* Compute attenuation (in unit of 1.5dB). */
4177 delta = (noise - (int32_t)calib->noise[i]) / 30;
4178 /* NB: delta <= 0 */
4179 /* Limit to [-4.5dB,0]. */
4180 cmd.gain[i] = MIN(abs(delta), 3);
4181 if (delta < 0)
4182 cmd.gain[i] |= 1 << 2; /* sign bit */
4183 }
4184 }
4185 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4186 "setting differential gains Ant A/B/C: %x/%x/%x (%x)\n",
4187 cmd.gain[0], cmd.gain[1], cmd.gain[2], sc->chainmask);
4188 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4189}
4190
4191static int
4192iwn5000_set_gains(struct iwn_softc *sc)
4193{
4194 struct iwn_calib_state *calib = &sc->calib;
4195 struct iwn_phy_calib_gain cmd;
4196 int i, ant, delta, div;
4197
4198 /* We collected 20 beacons and !=6050 need a 1.5 factor. */
4199 div = (sc->hw_type == IWN_HW_REV_TYPE_6050) ? 20 : 30;
4200
4201 memset(&cmd, 0, sizeof cmd);
4202 cmd.code = IWN5000_PHY_CALIB_NOISE_GAIN;
4203 cmd.ngroups = 1;
4204 cmd.isvalid = 1;
4205 /* Get first available RX antenna as referential. */
4206 ant = IWN_LSB(sc->rxchainmask);
4207 /* Set differential gains for other antennas. */
4208 for (i = ant + 1; i < 3; i++) {
4209 if (sc->chainmask & (1 << i)) {
4210 /* The delta is relative to antenna "ant". */
4211 delta = ((int32_t)calib->noise[ant] -
4212 (int32_t)calib->noise[i]) / div;
4213 /* Limit to [-4.5dB,+4.5dB]. */
4214 cmd.gain[i - 1] = MIN(abs(delta), 3);
4215 if (delta < 0)
4216 cmd.gain[i - 1] |= 1 << 2; /* sign bit */
4217 }
4218 }
4219 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4220 "setting differential gains Ant B/C: %x/%x (%x)\n",
4221 cmd.gain[0], cmd.gain[1], sc->chainmask);
4222 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4223}
4224
4225/*
4226 * Tune RF RX sensitivity based on the number of false alarms detected
4227 * during the last beacon period.
4228 */
4229static void
4230iwn_tune_sensitivity(struct iwn_softc *sc, const struct iwn_rx_stats *stats)
4231{
4232#define inc(val, inc, max) \
4233 if ((val) < (max)) { \
4234 if ((val) < (max) - (inc)) \
4235 (val) += (inc); \
4236 else \
4237 (val) = (max); \
4238 needs_update = 1; \
4239 }
4240#define dec(val, dec, min) \
4241 if ((val) > (min)) { \
4242 if ((val) > (min) + (dec)) \
4243 (val) -= (dec); \
4244 else \
4245 (val) = (min); \
4246 needs_update = 1; \
4247 }
4248
4249 const struct iwn_sensitivity_limits *limits = sc->limits;
4250 struct iwn_calib_state *calib = &sc->calib;
4251 uint32_t val, rxena, fa;
4252 uint32_t energy[3], energy_min;
4253 uint8_t noise[3], noise_ref;
4254 int i, needs_update = 0;
4255
4256 /* Check that we've been enabled long enough. */
4257 rxena = le32toh(stats->general.load);
4258 if (rxena == 0)
4259 return;
4260
4261 /* Compute number of false alarms since last call for OFDM. */
4262 fa = le32toh(stats->ofdm.bad_plcp) - calib->bad_plcp_ofdm;
4263 fa += le32toh(stats->ofdm.fa) - calib->fa_ofdm;
4264 fa *= 200 * IEEE80211_DUR_TU; /* 200TU */
4265
4266 /* Save counters values for next call. */
4267 calib->bad_plcp_ofdm = le32toh(stats->ofdm.bad_plcp);
4268 calib->fa_ofdm = le32toh(stats->ofdm.fa);
4269
4270 if (fa > 50 * rxena) {
4271 /* High false alarm count, decrease sensitivity. */
4272 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4273 "%s: OFDM high false alarm count: %u\n", __func__, fa);
4274 inc(calib->ofdm_x1, 1, limits->max_ofdm_x1);
4275 inc(calib->ofdm_mrc_x1, 1, limits->max_ofdm_mrc_x1);
4276 inc(calib->ofdm_x4, 1, limits->max_ofdm_x4);
4277 inc(calib->ofdm_mrc_x4, 1, limits->max_ofdm_mrc_x4);
4278
4279 } else if (fa < 5 * rxena) {
4280 /* Low false alarm count, increase sensitivity. */
4281 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4282 "%s: OFDM low false alarm count: %u\n", __func__, fa);
4283 dec(calib->ofdm_x1, 1, limits->min_ofdm_x1);
4284 dec(calib->ofdm_mrc_x1, 1, limits->min_ofdm_mrc_x1);
4285 dec(calib->ofdm_x4, 1, limits->min_ofdm_x4);
4286 dec(calib->ofdm_mrc_x4, 1, limits->min_ofdm_mrc_x4);
4287 }
4288
4289 /* Compute maximum noise among 3 receivers. */
4290 for (i = 0; i < 3; i++)
4291 noise[i] = (le32toh(stats->general.noise[i]) >> 8) & 0xff;
4292 val = MAX(noise[0], noise[1]);
4293 val = MAX(noise[2], val);
4294 /* Insert it into our samples table. */
4295 calib->noise_samples[calib->cur_noise_sample] = val;
4296 calib->cur_noise_sample = (calib->cur_noise_sample + 1) % 20;
4297
4298 /* Compute maximum noise among last 20 samples. */
4299 noise_ref = calib->noise_samples[0];
4300 for (i = 1; i < 20; i++)
4301 noise_ref = MAX(noise_ref, calib->noise_samples[i]);
4302
4303 /* Compute maximum energy among 3 receivers. */
4304 for (i = 0; i < 3; i++)
4305 energy[i] = le32toh(stats->general.energy[i]);
4306 val = MIN(energy[0], energy[1]);
4307 val = MIN(energy[2], val);
4308 /* Insert it into our samples table. */
4309 calib->energy_samples[calib->cur_energy_sample] = val;
4310 calib->cur_energy_sample = (calib->cur_energy_sample + 1) % 10;
4311
4312 /* Compute minimum energy among last 10 samples. */
4313 energy_min = calib->energy_samples[0];
4314 for (i = 1; i < 10; i++)
4315 energy_min = MAX(energy_min, calib->energy_samples[i]);
4316 energy_min += 6;
4317
4318 /* Compute number of false alarms since last call for CCK. */
4319 fa = le32toh(stats->cck.bad_plcp) - calib->bad_plcp_cck;
4320 fa += le32toh(stats->cck.fa) - calib->fa_cck;
4321 fa *= 200 * IEEE80211_DUR_TU; /* 200TU */
4322
4323 /* Save counters values for next call. */
4324 calib->bad_plcp_cck = le32toh(stats->cck.bad_plcp);
4325 calib->fa_cck = le32toh(stats->cck.fa);
4326
4327 if (fa > 50 * rxena) {
4328 /* High false alarm count, decrease sensitivity. */
4329 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4330 "%s: CCK high false alarm count: %u\n", __func__, fa);
4331 calib->cck_state = IWN_CCK_STATE_HIFA;
4332 calib->low_fa = 0;
4333
4334 if (calib->cck_x4 > 160) {
4335 calib->noise_ref = noise_ref;
4336 if (calib->energy_cck > 2)
4337 dec(calib->energy_cck, 2, energy_min);
4338 }
4339 if (calib->cck_x4 < 160) {
4340 calib->cck_x4 = 161;
4341 needs_update = 1;
4342 } else
4343 inc(calib->cck_x4, 3, limits->max_cck_x4);
4344
4345 inc(calib->cck_mrc_x4, 3, limits->max_cck_mrc_x4);
4346
4347 } else if (fa < 5 * rxena) {
4348 /* Low false alarm count, increase sensitivity. */
4349 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4350 "%s: CCK low false alarm count: %u\n", __func__, fa);
4351 calib->cck_state = IWN_CCK_STATE_LOFA;
4352 calib->low_fa++;
4353
4354 if (calib->cck_state != IWN_CCK_STATE_INIT &&
4355 (((int32_t)calib->noise_ref - (int32_t)noise_ref) > 2 ||
4356 calib->low_fa > 100)) {
4357 inc(calib->energy_cck, 2, limits->min_energy_cck);
4358 dec(calib->cck_x4, 3, limits->min_cck_x4);
4359 dec(calib->cck_mrc_x4, 3, limits->min_cck_mrc_x4);
4360 }
4361 } else {
4362 /* Not worth to increase or decrease sensitivity. */
4363 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4364 "%s: CCK normal false alarm count: %u\n", __func__, fa);
4365 calib->low_fa = 0;
4366 calib->noise_ref = noise_ref;
4367
4368 if (calib->cck_state == IWN_CCK_STATE_HIFA) {
4369 /* Previous interval had many false alarms. */
4370 dec(calib->energy_cck, 8, energy_min);
4371 }
4372 calib->cck_state = IWN_CCK_STATE_INIT;
4373 }
4374
4375 if (needs_update)
4376 (void)iwn_send_sensitivity(sc);
4377#undef dec
4378#undef inc
4379}
4380
4381static int
4382iwn_send_sensitivity(struct iwn_softc *sc)
4383{
4384 struct iwn_calib_state *calib = &sc->calib;
4385 struct iwn_sensitivity_cmd cmd;
4386
4387 memset(&cmd, 0, sizeof cmd);
4388 cmd.which = IWN_SENSITIVITY_WORKTBL;
4389 /* OFDM modulation. */
4390 cmd.corr_ofdm_x1 = htole16(calib->ofdm_x1);
4391 cmd.corr_ofdm_mrc_x1 = htole16(calib->ofdm_mrc_x1);
4392 cmd.corr_ofdm_x4 = htole16(calib->ofdm_x4);
4393 cmd.corr_ofdm_mrc_x4 = htole16(calib->ofdm_mrc_x4);
4394 cmd.energy_ofdm = htole16(sc->limits->energy_ofdm);
4395 cmd.energy_ofdm_th = htole16(62);
4396 /* CCK modulation. */
4397 cmd.corr_cck_x4 = htole16(calib->cck_x4);
4398 cmd.corr_cck_mrc_x4 = htole16(calib->cck_mrc_x4);
4399 cmd.energy_cck = htole16(calib->energy_cck);
4400 /* Barker modulation: use default values. */
4401 cmd.corr_barker = htole16(190);
4402 cmd.corr_barker_mrc = htole16(390);
4403
4404 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4405 "%s: set sensitivity %d/%d/%d/%d/%d/%d/%d\n", __func__,
4406 calib->ofdm_x1, calib->ofdm_mrc_x1, calib->ofdm_x4,
4407 calib->ofdm_mrc_x4, calib->cck_x4,
4408 calib->cck_mrc_x4, calib->energy_cck);
4409 return iwn_cmd(sc, IWN_CMD_SET_SENSITIVITY, &cmd, sizeof cmd, 1);
4410}
4411
4412/*
4413 * Set STA mode power saving level (between 0 and 5).
4414 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving.
4415 */
4416static int
4417iwn_set_pslevel(struct iwn_softc *sc, int dtim, int level, int async)
4418{
4419 const struct iwn_pmgt *pmgt;
4420 struct iwn_pmgt_cmd cmd;
4421 uint32_t max, skip_dtim;
4422 uint32_t tmp;
4423 int i;
4424
4425 /* Select which PS parameters to use. */
4426 if (dtim <= 2)
4427 pmgt = &iwn_pmgt[0][level];
4428 else if (dtim <= 10)
4429 pmgt = &iwn_pmgt[1][level];
4430 else
4431 pmgt = &iwn_pmgt[2][level];
4432
4433 memset(&cmd, 0, sizeof cmd);
4434 if (level != 0) /* not CAM */
4435 cmd.flags |= htole16(IWN_PS_ALLOW_SLEEP);
4436 if (level == 5)
4437 cmd.flags |= htole16(IWN_PS_FAST_PD);
4438 /* Retrieve PCIe Active State Power Management (ASPM). */
4439 tmp = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1);
4440 if (!(tmp & 0x1)) /* L0s Entry disabled. */
4441 cmd.flags |= htole16(IWN_PS_PCI_PMGT);
4442 cmd.rxtimeout = htole32(pmgt->rxtimeout * 1024);
4443 cmd.txtimeout = htole32(pmgt->txtimeout * 1024);
4444
4445 if (dtim == 0) {
4446 dtim = 1;
4447 skip_dtim = 0;
4448 } else
4449 skip_dtim = pmgt->skip_dtim;
4450 if (skip_dtim != 0) {
4451 cmd.flags |= htole16(IWN_PS_SLEEP_OVER_DTIM);
4452 max = pmgt->intval[4];
4453 if (max == (uint32_t)-1)
4454 max = dtim * (skip_dtim + 1);
4455 else if (max > dtim)
4456 max = (max / dtim) * dtim;
4457 } else
4458 max = dtim;
4459 for (i = 0; i < 5; i++)
4460 cmd.intval[i] = htole32(MIN(max, pmgt->intval[i]));
4461
4462 DPRINTF(sc, IWN_DEBUG_RESET, "setting power saving level to %d\n",
4463 level);
4464 return iwn_cmd(sc, IWN_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async);
4465}
4466
4467static int
4468iwn_send_btcoex(struct iwn_softc *sc)
4469{
4470 struct iwn_bluetooth cmd;
4471
4472 memset(&cmd, 0, sizeof cmd);
4473 cmd.flags = IWN_BT_COEX_CHAN_ANN | IWN_BT_COEX_BT_PRIO;
4474 cmd.lead_time = IWN_BT_LEAD_TIME_DEF;
4475 cmd.max_kill = IWN_BT_MAX_KILL_DEF;
4476 DPRINTF(sc, IWN_DEBUG_RESET, "%s: configuring bluetooth coexistence\n",
4477 __func__);
4478 return iwn_cmd(sc, IWN_CMD_BT_COEX, &cmd, sizeof(cmd), 0);
4479}
4480
4481static int
4482iwn_config(struct iwn_softc *sc)
4483{
4484 const struct iwn_hal *hal = sc->sc_hal;
4485 struct ifnet *ifp = sc->sc_ifp;
4486 struct ieee80211com *ic = ifp->if_l2com;
4487 uint32_t txmask;
4488 int error;
4489 uint16_t rxchain;
4490
4491 /* Configure valid TX chains for 5000 Series. */
4492 if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
4493 txmask = htole32(sc->txchainmask);
4494 DPRINTF(sc, IWN_DEBUG_RESET,
4495 "%s: configuring valid TX chains 0x%x\n", __func__, txmask);
4496 error = iwn_cmd(sc, IWN5000_CMD_TX_ANT_CONFIG, &txmask,
4497 sizeof txmask, 0);
4498 if (error != 0) {
4499 device_printf(sc->sc_dev,
4500 "%s: could not configure valid TX chains, "
4501 "error %d\n", __func__, error);
4502 return error;
4503 }
4504 }
4505
4506 /* Configure bluetooth coexistence. */
4507 error = iwn_send_btcoex(sc);
4508 if (error != 0) {
4509 device_printf(sc->sc_dev,
4510 "%s: could not configure bluetooth coexistence, error %d\n",
4511 __func__, error);
4512 return error;
4513 }
4514
4515 /* Set mode, channel, RX filter and enable RX. */
4516 memset(&sc->rxon, 0, sizeof (struct iwn_rxon));
4517 IEEE80211_ADDR_COPY(sc->rxon.myaddr, IF_LLADDR(ifp));
4518 IEEE80211_ADDR_COPY(sc->rxon.wlap, IF_LLADDR(ifp));
4519 sc->rxon.chan = ieee80211_chan2ieee(ic, ic->ic_curchan);
4520 sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
4521 if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan))
4522 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
4523 switch (ic->ic_opmode) {
4524 case IEEE80211_M_STA:
4525 sc->rxon.mode = IWN_MODE_STA;
4526 sc->rxon.filter = htole32(IWN_FILTER_MULTICAST);
4527 break;
4528 case IEEE80211_M_MONITOR:
4529 sc->rxon.mode = IWN_MODE_MONITOR;
4530 sc->rxon.filter = htole32(IWN_FILTER_MULTICAST |
4531 IWN_FILTER_CTL | IWN_FILTER_PROMISC);
4532 break;
4533 default:
4534 /* Should not get there. */
4535 break;
4536 }
4537 sc->rxon.cck_mask = 0x0f; /* not yet negotiated */
4538 sc->rxon.ofdm_mask = 0xff; /* not yet negotiated */
4539 sc->rxon.ht_single_mask = 0xff;
4540 sc->rxon.ht_dual_mask = 0xff;
4541 sc->rxon.ht_triple_mask = 0xff;
4542 rxchain =
4543 IWN_RXCHAIN_VALID(sc->rxchainmask) |
4544 IWN_RXCHAIN_MIMO_COUNT(2) |
4545 IWN_RXCHAIN_IDLE_COUNT(2);
4546 sc->rxon.rxchain = htole16(rxchain);
4547 DPRINTF(sc, IWN_DEBUG_RESET, "%s: setting configuration\n", __func__);
4548 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, hal->rxonsz, 0);
4549 if (error != 0) {
4550 device_printf(sc->sc_dev,
4551 "%s: RXON command failed\n", __func__);
4552 return error;
4553 }
4554
4555 error = iwn_add_broadcast_node(sc, 0);
4556 if (error != 0) {
4557 device_printf(sc->sc_dev,
4558 "%s: could not add broadcast node\n", __func__);
4559 return error;
4560 }
4561
4562 /* Configuration has changed, set TX power accordingly. */
4563 error = hal->set_txpower(sc, ic->ic_curchan, 0);
4564 if (error != 0) {
4565 device_printf(sc->sc_dev,
4566 "%s: could not set TX power\n", __func__);
4567 return error;
4568 }
4569
4570 error = iwn_set_critical_temp(sc);
4571 if (error != 0) {
4572 device_printf(sc->sc_dev,
4573 "%s: ccould not set critical temperature\n", __func__);
4574 return error;
4575 }
4576
4577 /* Set power saving level to CAM during initialization. */
4578 error = iwn_set_pslevel(sc, 0, 0, 0);
4579 if (error != 0) {
4580 device_printf(sc->sc_dev,
4581 "%s: could not set power saving level\n", __func__);
4582 return error;
4583 }
4584 return 0;
4585}
4586
4587/*
4588 * Add an ssid element to a frame.
4589 */
4590static uint8_t *
4591ieee80211_add_ssid(uint8_t *frm, const uint8_t *ssid, u_int len)
4592{
4593 *frm++ = IEEE80211_ELEMID_SSID;
4594 *frm++ = len;
4595 memcpy(frm, ssid, len);
4596 return frm + len;
4597}
4598
4599static int
4600iwn_scan(struct iwn_softc *sc)
4601{
4602 struct ifnet *ifp = sc->sc_ifp;
4603 struct ieee80211com *ic = ifp->if_l2com;
4604 struct ieee80211_scan_state *ss = ic->ic_scan; /*XXX*/
4605 struct iwn_scan_hdr *hdr;
4606 struct iwn_cmd_data *tx;
4607 struct iwn_scan_essid *essid;
4608 struct iwn_scan_chan *chan;
4609 struct ieee80211_frame *wh;
4610 struct ieee80211_rateset *rs;
4611 struct ieee80211_channel *c;
4612 int buflen, error;
4613 uint16_t rxchain;
4614 uint8_t *buf, *frm, txant;
4615
4616 buf = malloc(IWN_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO);
4617 if (buf == NULL) {
4618 device_printf(sc->sc_dev,
4619 "%s: could not allocate buffer for scan command\n",
4620 __func__);
4621 return ENOMEM;
4622 }
4623 hdr = (struct iwn_scan_hdr *)buf;
4624
4625 /*
4626 * Move to the next channel if no frames are received within 10ms
4627 * after sending the probe request.
4628 */
4629 hdr->quiet_time = htole16(10); /* timeout in milliseconds */
4630 hdr->quiet_threshold = htole16(1); /* min # of packets */
4631
4632 /* Select antennas for scanning. */
4633 rxchain =
4634 IWN_RXCHAIN_VALID(sc->rxchainmask) |
4635 IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask) |
4636 IWN_RXCHAIN_DRIVER_FORCE;
4637 if (IEEE80211_IS_CHAN_A(ic->ic_curchan) &&
4638 sc->hw_type == IWN_HW_REV_TYPE_4965) {
4639 /* Ant A must be avoided in 5GHz because of an HW bug. */
4640 rxchain |= IWN_RXCHAIN_FORCE_SEL(IWN_ANT_BC);
4641 } else /* Use all available RX antennas. */
4642 rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask);
4643 hdr->rxchain = htole16(rxchain);
4644 hdr->filter = htole32(IWN_FILTER_MULTICAST | IWN_FILTER_BEACON);
4645
4646 tx = (struct iwn_cmd_data *)(hdr + 1);
4647 tx->flags = htole32(IWN_TX_AUTO_SEQ);
4648 tx->id = sc->sc_hal->broadcast_id;
4649 tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
4650
4651 if (IEEE80211_IS_CHAN_A(ic->ic_curchan)) {
4652 /* Send probe requests at 6Mbps. */
4653 tx->plcp = iwn_rates[IWN_RIDX_OFDM6].plcp;
4654 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
4655 } else {
4656 hdr->flags = htole32(IWN_RXON_24GHZ | IWN_RXON_AUTO);
4657 /* Send probe requests at 1Mbps. */
4658 tx->plcp = iwn_rates[IWN_RIDX_CCK1].plcp;
4659 tx->rflags = IWN_RFLAG_CCK;
4660 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
4661 }
4662 /* Use the first valid TX antenna. */
4663 txant = IWN_LSB(sc->txchainmask);
4664 tx->rflags |= IWN_RFLAG_ANT(txant);
4665
4666 essid = (struct iwn_scan_essid *)(tx + 1);
4667 if (ss->ss_ssid[0].len != 0) {
4668 essid[0].id = IEEE80211_ELEMID_SSID;
4669 essid[0].len = ss->ss_ssid[0].len;
4670 memcpy(essid[0].data, ss->ss_ssid[0].ssid, ss->ss_ssid[0].len);
4671 }
4672
4673 /*
4674 * Build a probe request frame. Most of the following code is a
4675 * copy & paste of what is done in net80211.
4676 */
4677 wh = (struct ieee80211_frame *)(essid + 20);
4678 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
4679 IEEE80211_FC0_SUBTYPE_PROBE_REQ;
4680 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
4681 IEEE80211_ADDR_COPY(wh->i_addr1, ifp->if_broadcastaddr);
4682 IEEE80211_ADDR_COPY(wh->i_addr2, IF_LLADDR(ifp));
4683 IEEE80211_ADDR_COPY(wh->i_addr3, ifp->if_broadcastaddr);
4684 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */
4685 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */
4686
4687 frm = (uint8_t *)(wh + 1);
4688 frm = ieee80211_add_ssid(frm, NULL, 0);
4689 frm = ieee80211_add_rates(frm, rs);
4690 if (rs->rs_nrates > IEEE80211_RATE_SIZE)
4691 frm = ieee80211_add_xrates(frm, rs);
4692#if 0 /* HT */
4693 if (ic->ic_flags & IEEE80211_F_HTON)
4694 frm = ieee80211_add_htcaps(frm, ic);
4695#endif
4696
4697 /* Set length of probe request. */
4698 tx->len = htole16(frm - (uint8_t *)wh);
4699
4700 c = ic->ic_curchan;
4701 chan = (struct iwn_scan_chan *)frm;
4702 chan->chan = htole16(ieee80211_chan2ieee(ic, c));
4703 chan->flags = 0;
4704 if (ss->ss_nssid > 0)
4705 chan->flags |= htole32(IWN_CHAN_NPBREQS(1));
4706 chan->dsp_gain = 0x6e;
4707 if (IEEE80211_IS_CHAN_5GHZ(c) &&
4708 !(c->ic_flags & IEEE80211_CHAN_PASSIVE)) {
4709 chan->rf_gain = 0x3b;
4710 chan->active = htole16(24);
4711 chan->passive = htole16(110);
4712 chan->flags |= htole32(IWN_CHAN_ACTIVE);
4713 } else if (IEEE80211_IS_CHAN_5GHZ(c)) {
4714 chan->rf_gain = 0x3b;
4715 chan->active = htole16(24);
4716 if (sc->rxon.associd)
4717 chan->passive = htole16(78);
4718 else
4719 chan->passive = htole16(110);
4720 hdr->crc_threshold = 0xffff;
4721 } else if (!(c->ic_flags & IEEE80211_CHAN_PASSIVE)) {
4722 chan->rf_gain = 0x28;
4723 chan->active = htole16(36);
4724 chan->passive = htole16(120);
4725 chan->flags |= htole32(IWN_CHAN_ACTIVE);
4726 } else {
4727 chan->rf_gain = 0x28;
4728 chan->active = htole16(36);
4729 if (sc->rxon.associd)
4730 chan->passive = htole16(88);
4731 else
4732 chan->passive = htole16(120);
4733 hdr->crc_threshold = 0xffff;
4734 }
4735
4736 DPRINTF(sc, IWN_DEBUG_STATE,
4737 "%s: chan %u flags 0x%x rf_gain 0x%x "
4738 "dsp_gain 0x%x active 0x%x passive 0x%x\n", __func__,
4739 chan->chan, chan->flags, chan->rf_gain, chan->dsp_gain,
4740 chan->active, chan->passive);
4741
4742 hdr->nchan++;
4743 chan++;
4744 buflen = (uint8_t *)chan - buf;
4745 hdr->len = htole16(buflen);
4746
4747 DPRINTF(sc, IWN_DEBUG_STATE, "sending scan command nchan=%d\n",
4748 hdr->nchan);
4749 error = iwn_cmd(sc, IWN_CMD_SCAN, buf, buflen, 1);
4750 free(buf, M_DEVBUF);
4751 return error;
4752}
4753
4754static int
4755iwn_auth(struct iwn_softc *sc, struct ieee80211vap *vap)
4756{
4757 const struct iwn_hal *hal = sc->sc_hal;
4758 struct ifnet *ifp = sc->sc_ifp;
4759 struct ieee80211com *ic = ifp->if_l2com;
4760 struct ieee80211_node *ni = vap->iv_bss;
4761 int error;
4762
3377}
3378
3379static int
3380iwn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
3381{
3382 struct iwn_softc *sc = ifp->if_softc;
3383 struct ieee80211com *ic = ifp->if_l2com;
3384 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3385 struct ifreq *ifr = (struct ifreq *) data;
3386 int error = 0, startall = 0, stop = 0;
3387
3388 switch (cmd) {
3389 case SIOCSIFFLAGS:
3390 IWN_LOCK(sc);
3391 if (ifp->if_flags & IFF_UP) {
3392 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
3393 iwn_init_locked(sc);
3394 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)
3395 startall = 1;
3396 else
3397 stop = 1;
3398 }
3399 } else {
3400 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
3401 iwn_stop_locked(sc);
3402 }
3403 IWN_UNLOCK(sc);
3404 if (startall)
3405 ieee80211_start_all(ic);
3406 else if (vap != NULL && stop)
3407 ieee80211_stop(vap);
3408 break;
3409 case SIOCGIFMEDIA:
3410 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
3411 break;
3412 case SIOCGIFADDR:
3413 error = ether_ioctl(ifp, cmd, data);
3414 break;
3415 default:
3416 error = EINVAL;
3417 break;
3418 }
3419 return error;
3420}
3421
3422/*
3423 * Send a command to the firmware.
3424 */
3425static int
3426iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async)
3427{
3428 struct iwn_tx_ring *ring = &sc->txq[4];
3429 struct iwn_tx_desc *desc;
3430 struct iwn_tx_data *data;
3431 struct iwn_tx_cmd *cmd;
3432 struct mbuf *m;
3433 bus_addr_t paddr;
3434 int totlen, error;
3435
3436 IWN_LOCK_ASSERT(sc);
3437
3438 desc = &ring->desc[ring->cur];
3439 data = &ring->data[ring->cur];
3440 totlen = 4 + size;
3441
3442 if (size > sizeof cmd->data) {
3443 /* Command is too large to fit in a descriptor. */
3444 if (totlen > MCLBYTES)
3445 return EINVAL;
3446 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
3447 if (m == NULL)
3448 return ENOMEM;
3449 cmd = mtod(m, struct iwn_tx_cmd *);
3450 error = bus_dmamap_load(ring->data_dmat, data->map, cmd,
3451 totlen, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
3452 if (error != 0) {
3453 m_freem(m);
3454 return error;
3455 }
3456 data->m = m;
3457 } else {
3458 cmd = &ring->cmd[ring->cur];
3459 paddr = data->cmd_paddr;
3460 }
3461
3462 cmd->code = code;
3463 cmd->flags = 0;
3464 cmd->qid = ring->qid;
3465 cmd->idx = ring->cur;
3466 memcpy(cmd->data, buf, size);
3467
3468 desc->nsegs = 1;
3469 desc->segs[0].addr = htole32(IWN_LOADDR(paddr));
3470 desc->segs[0].len = htole16(IWN_HIADDR(paddr) | totlen << 4);
3471
3472 DPRINTF(sc, IWN_DEBUG_CMD, "%s: %s (0x%x) flags %d qid %d idx %d\n",
3473 __func__, iwn_intr_str(cmd->code), cmd->code,
3474 cmd->flags, cmd->qid, cmd->idx);
3475
3476 if (size > sizeof cmd->data) {
3477 bus_dmamap_sync(ring->data_dmat, data->map,
3478 BUS_DMASYNC_PREWRITE);
3479 } else {
3480 bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
3481 BUS_DMASYNC_PREWRITE);
3482 }
3483 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3484 BUS_DMASYNC_PREWRITE);
3485
3486#ifdef notyet
3487 /* Update TX scheduler. */
3488 sc->sc_hal->update_sched(sc, ring->qid, ring->cur, 0, 0);
3489#endif
3490
3491 /* Kick command ring. */
3492 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
3493 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3494
3495 return async ? 0 : msleep(desc, &sc->sc_mtx, PCATCH, "iwncmd", hz);
3496}
3497
3498static int
3499iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
3500{
3501 struct iwn4965_node_info hnode;
3502 caddr_t src, dst;
3503
3504 /*
3505 * We use the node structure for 5000 Series internally (it is
3506 * a superset of the one for 4965AGN). We thus copy the common
3507 * fields before sending the command.
3508 */
3509 src = (caddr_t)node;
3510 dst = (caddr_t)&hnode;
3511 memcpy(dst, src, 48);
3512 /* Skip TSC, RX MIC and TX MIC fields from ``src''. */
3513 memcpy(dst + 48, src + 72, 20);
3514 return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async);
3515}
3516
3517static int
3518iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
3519{
3520 /* Direct mapping. */
3521 return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async);
3522}
3523
3524#if 0 /* HT */
3525static const uint8_t iwn_ridx_to_plcp[] = {
3526 10, 20, 55, 110, /* CCK */
3527 0xd, 0xf, 0x5, 0x7, 0x9, 0xb, 0x1, 0x3, 0x3 /* OFDM R1-R4 */
3528};
3529static const uint8_t iwn_siso_mcs_to_plcp[] = {
3530 0, 0, 0, 0, /* CCK */
3531 0, 0, 1, 2, 3, 4, 5, 6, 7 /* HT */
3532};
3533static const uint8_t iwn_mimo_mcs_to_plcp[] = {
3534 0, 0, 0, 0, /* CCK */
3535 8, 8, 9, 10, 11, 12, 13, 14, 15 /* HT */
3536};
3537#endif
3538static const uint8_t iwn_prev_ridx[] = {
3539 /* NB: allow fallback from CCK11 to OFDM9 and from OFDM6 to CCK5 */
3540 0, 0, 1, 5, /* CCK */
3541 2, 4, 3, 6, 7, 8, 9, 10, 10 /* OFDM */
3542};
3543
3544/*
3545 * Configure hardware link parameters for the specified
3546 * node operating on the specified channel.
3547 */
3548static int
3549iwn_set_link_quality(struct iwn_softc *sc, uint8_t id, int async)
3550{
3551 struct ifnet *ifp = sc->sc_ifp;
3552 struct ieee80211com *ic = ifp->if_l2com;
3553 struct iwn_cmd_link_quality linkq;
3554 const struct iwn_rate *rinfo;
3555 int i;
3556 uint8_t txant, ridx;
3557
3558 /* Use the first valid TX antenna. */
3559 txant = IWN_LSB(sc->txchainmask);
3560
3561 memset(&linkq, 0, sizeof linkq);
3562 linkq.id = id;
3563 linkq.antmsk_1stream = txant;
3564 linkq.antmsk_2stream = IWN_ANT_AB;
3565 linkq.ampdu_max = 31;
3566 linkq.ampdu_threshold = 3;
3567 linkq.ampdu_limit = htole16(4000); /* 4ms */
3568
3569#if 0 /* HT */
3570 if (IEEE80211_IS_CHAN_HT(c))
3571 linkq.mimo = 1;
3572#endif
3573
3574 if (id == IWN_ID_BSS)
3575 ridx = IWN_RIDX_OFDM54;
3576 else if (IEEE80211_IS_CHAN_A(ic->ic_curchan))
3577 ridx = IWN_RIDX_OFDM6;
3578 else
3579 ridx = IWN_RIDX_CCK1;
3580
3581 for (i = 0; i < IWN_MAX_TX_RETRIES; i++) {
3582 rinfo = &iwn_rates[ridx];
3583#if 0 /* HT */
3584 if (IEEE80211_IS_CHAN_HT40(c)) {
3585 linkq.retry[i].plcp = iwn_mimo_mcs_to_plcp[ridx]
3586 | IWN_RIDX_MCS;
3587 linkq.retry[i].rflags = IWN_RFLAG_HT
3588 | IWN_RFLAG_HT40;
3589 /* XXX shortGI */
3590 } else if (IEEE80211_IS_CHAN_HT(c)) {
3591 linkq.retry[i].plcp = iwn_siso_mcs_to_plcp[ridx]
3592 | IWN_RIDX_MCS;
3593 linkq.retry[i].rflags = IWN_RFLAG_HT;
3594 /* XXX shortGI */
3595 } else
3596#endif
3597 {
3598 linkq.retry[i].plcp = rinfo->plcp;
3599 linkq.retry[i].rflags = rinfo->flags;
3600 }
3601 linkq.retry[i].rflags |= IWN_RFLAG_ANT(txant);
3602 ridx = iwn_prev_ridx[ridx];
3603 }
3604#ifdef IWN_DEBUG
3605 if (sc->sc_debug & IWN_DEBUG_STATE) {
3606 printf("%s: set link quality for node %d, mimo %d ssmask %d\n",
3607 __func__, id, linkq.mimo, linkq.antmsk_1stream);
3608 printf("%s:", __func__);
3609 for (i = 0; i < IWN_MAX_TX_RETRIES; i++)
3610 printf(" %d:%x", linkq.retry[i].plcp,
3611 linkq.retry[i].rflags);
3612 printf("\n");
3613 }
3614#endif
3615 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async);
3616}
3617
3618/*
3619 * Broadcast node is used to send group-addressed and management frames.
3620 */
3621static int
3622iwn_add_broadcast_node(struct iwn_softc *sc, int async)
3623{
3624 const struct iwn_hal *hal = sc->sc_hal;
3625 struct ifnet *ifp = sc->sc_ifp;
3626 struct iwn_node_info node;
3627 int error;
3628
3629 memset(&node, 0, sizeof node);
3630 IEEE80211_ADDR_COPY(node.macaddr, ifp->if_broadcastaddr);
3631 node.id = hal->broadcast_id;
3632 DPRINTF(sc, IWN_DEBUG_RESET, "%s: adding broadcast node\n", __func__);
3633 error = hal->add_node(sc, &node, async);
3634 if (error != 0)
3635 return error;
3636
3637 error = iwn_set_link_quality(sc, hal->broadcast_id, async);
3638 return error;
3639}
3640
3641static int
3642iwn_wme_update(struct ieee80211com *ic)
3643{
3644#define IWN_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */
3645 struct iwn_softc *sc = ic->ic_ifp->if_softc;
3646 struct iwn_edca_params cmd;
3647 int i;
3648
3649 memset(&cmd, 0, sizeof cmd);
3650 cmd.flags = htole32(IWN_EDCA_UPDATE);
3651 for (i = 0; i < WME_NUM_AC; i++) {
3652 const struct wmeParams *wmep =
3653 &ic->ic_wme.wme_chanParams.cap_wmeParams[i];
3654 cmd.ac[i].aifsn = wmep->wmep_aifsn;
3655 cmd.ac[i].cwmin = htole16(IWN_EXP2(wmep->wmep_logcwmin));
3656 cmd.ac[i].cwmax = htole16(IWN_EXP2(wmep->wmep_logcwmax));
3657 cmd.ac[i].txoplimit =
3658 htole16(IEEE80211_TXOP_TO_US(wmep->wmep_txopLimit));
3659 }
3660 IEEE80211_UNLOCK(ic);
3661 IWN_LOCK(sc);
3662 (void) iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1 /*async*/);
3663 IWN_UNLOCK(sc);
3664 IEEE80211_LOCK(ic);
3665 return 0;
3666#undef IWN_EXP2
3667}
3668
3669static void
3670iwn_update_mcast(struct ifnet *ifp)
3671{
3672 /* Ignore */
3673}
3674
3675static void
3676iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on)
3677{
3678 struct iwn_cmd_led led;
3679
3680 /* Clear microcode LED ownership. */
3681 IWN_CLRBITS(sc, IWN_LED, IWN_LED_BSM_CTRL);
3682
3683 led.which = which;
3684 led.unit = htole32(10000); /* on/off in unit of 100ms */
3685 led.off = off;
3686 led.on = on;
3687 (void)iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1);
3688}
3689
3690/*
3691 * Set the critical temperature at which the firmware will stop the radio
3692 * and notify us.
3693 */
3694static int
3695iwn_set_critical_temp(struct iwn_softc *sc)
3696{
3697 struct iwn_critical_temp crit;
3698 int32_t temp;
3699
3700 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CTEMP_STOP_RF);
3701
3702 if (sc->hw_type == IWN_HW_REV_TYPE_5150)
3703 temp = (IWN_CTOK(110) - sc->temp_off) * -5;
3704 else if (sc->hw_type == IWN_HW_REV_TYPE_4965)
3705 temp = IWN_CTOK(110);
3706 else
3707 temp = 110;
3708 memset(&crit, 0, sizeof crit);
3709 crit.tempR = htole32(temp);
3710 DPRINTF(sc, IWN_DEBUG_RESET, "setting critical temp to %d\n",
3711 temp);
3712 return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0);
3713}
3714
3715static int
3716iwn_set_timing(struct iwn_softc *sc, struct ieee80211_node *ni)
3717{
3718 struct iwn_cmd_timing cmd;
3719 uint64_t val, mod;
3720
3721 memset(&cmd, 0, sizeof cmd);
3722 memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t));
3723 cmd.bintval = htole16(ni->ni_intval);
3724 cmd.lintval = htole16(10);
3725
3726 /* Compute remaining time until next beacon. */
3727 val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU;
3728 mod = le64toh(cmd.tstamp) % val;
3729 cmd.binitval = htole32((uint32_t)(val - mod));
3730
3731 DPRINTF(sc, IWN_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n",
3732 ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod));
3733
3734 return iwn_cmd(sc, IWN_CMD_TIMING, &cmd, sizeof cmd, 1);
3735}
3736
3737static void
3738iwn4965_power_calibration(struct iwn_softc *sc, int temp)
3739{
3740 struct ifnet *ifp = sc->sc_ifp;
3741 struct ieee80211com *ic = ifp->if_l2com;
3742
3743 /* Adjust TX power if need be (delta >= 3 degC.) */
3744 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d->%d\n",
3745 __func__, sc->temp, temp);
3746 if (abs(temp - sc->temp) >= 3) {
3747 /* Record temperature of last calibration. */
3748 sc->temp = temp;
3749 (void)iwn4965_set_txpower(sc, ic->ic_bsschan, 1);
3750 }
3751}
3752
3753/*
3754 * Set TX power for current channel (each rate has its own power settings).
3755 * This function takes into account the regulatory information from EEPROM,
3756 * the current temperature and the current voltage.
3757 */
3758static int
3759iwn4965_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch,
3760 int async)
3761{
3762/* Fixed-point arithmetic division using a n-bit fractional part. */
3763#define fdivround(a, b, n) \
3764 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n))
3765/* Linear interpolation. */
3766#define interpolate(x, x1, y1, x2, y2, n) \
3767 ((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n))
3768
3769 static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 };
3770 struct ifnet *ifp = sc->sc_ifp;
3771 struct ieee80211com *ic = ifp->if_l2com;
3772 struct iwn_ucode_info *uc = &sc->ucode_info;
3773 struct iwn4965_cmd_txpower cmd;
3774 struct iwn4965_eeprom_chan_samples *chans;
3775 int32_t vdiff, tdiff;
3776 int i, c, grp, maxpwr;
3777 const uint8_t *rf_gain, *dsp_gain;
3778 uint8_t chan;
3779
3780 /* Retrieve channel number. */
3781 chan = ieee80211_chan2ieee(ic, ch);
3782 DPRINTF(sc, IWN_DEBUG_RESET, "setting TX power for channel %d\n",
3783 chan);
3784
3785 memset(&cmd, 0, sizeof cmd);
3786 cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1;
3787 cmd.chan = chan;
3788
3789 if (IEEE80211_IS_CHAN_5GHZ(ch)) {
3790 maxpwr = sc->maxpwr5GHz;
3791 rf_gain = iwn4965_rf_gain_5ghz;
3792 dsp_gain = iwn4965_dsp_gain_5ghz;
3793 } else {
3794 maxpwr = sc->maxpwr2GHz;
3795 rf_gain = iwn4965_rf_gain_2ghz;
3796 dsp_gain = iwn4965_dsp_gain_2ghz;
3797 }
3798
3799 /* Compute voltage compensation. */
3800 vdiff = ((int32_t)le32toh(uc->volt) - sc->eeprom_voltage) / 7;
3801 if (vdiff > 0)
3802 vdiff *= 2;
3803 if (abs(vdiff) > 2)
3804 vdiff = 0;
3805 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
3806 "%s: voltage compensation=%d (UCODE=%d, EEPROM=%d)\n",
3807 __func__, vdiff, le32toh(uc->volt), sc->eeprom_voltage);
3808
3809 /* Get channel attenuation group. */
3810 if (chan <= 20) /* 1-20 */
3811 grp = 4;
3812 else if (chan <= 43) /* 34-43 */
3813 grp = 0;
3814 else if (chan <= 70) /* 44-70 */
3815 grp = 1;
3816 else if (chan <= 124) /* 71-124 */
3817 grp = 2;
3818 else /* 125-200 */
3819 grp = 3;
3820 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
3821 "%s: chan %d, attenuation group=%d\n", __func__, chan, grp);
3822
3823 /* Get channel sub-band. */
3824 for (i = 0; i < IWN_NBANDS; i++)
3825 if (sc->bands[i].lo != 0 &&
3826 sc->bands[i].lo <= chan && chan <= sc->bands[i].hi)
3827 break;
3828 if (i == IWN_NBANDS) /* Can't happen in real-life. */
3829 return EINVAL;
3830 chans = sc->bands[i].chans;
3831 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
3832 "%s: chan %d sub-band=%d\n", __func__, chan, i);
3833
3834 for (c = 0; c < 2; c++) {
3835 uint8_t power, gain, temp;
3836 int maxchpwr, pwr, ridx, idx;
3837
3838 power = interpolate(chan,
3839 chans[0].num, chans[0].samples[c][1].power,
3840 chans[1].num, chans[1].samples[c][1].power, 1);
3841 gain = interpolate(chan,
3842 chans[0].num, chans[0].samples[c][1].gain,
3843 chans[1].num, chans[1].samples[c][1].gain, 1);
3844 temp = interpolate(chan,
3845 chans[0].num, chans[0].samples[c][1].temp,
3846 chans[1].num, chans[1].samples[c][1].temp, 1);
3847 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
3848 "%s: Tx chain %d: power=%d gain=%d temp=%d\n",
3849 __func__, c, power, gain, temp);
3850
3851 /* Compute temperature compensation. */
3852 tdiff = ((sc->temp - temp) * 2) / tdiv[grp];
3853 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
3854 "%s: temperature compensation=%d (current=%d, EEPROM=%d)\n",
3855 __func__, tdiff, sc->temp, temp);
3856
3857 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) {
3858 /* Convert dBm to half-dBm. */
3859 maxchpwr = sc->maxpwr[chan] * 2;
3860 if ((ridx / 8) & 1)
3861 maxchpwr -= 6; /* MIMO 2T: -3dB */
3862
3863 pwr = maxpwr;
3864
3865 /* Adjust TX power based on rate. */
3866 if ((ridx % 8) == 5)
3867 pwr -= 15; /* OFDM48: -7.5dB */
3868 else if ((ridx % 8) == 6)
3869 pwr -= 17; /* OFDM54: -8.5dB */
3870 else if ((ridx % 8) == 7)
3871 pwr -= 20; /* OFDM60: -10dB */
3872 else
3873 pwr -= 10; /* Others: -5dB */
3874
3875 /* Do not exceed channel max TX power. */
3876 if (pwr > maxchpwr)
3877 pwr = maxchpwr;
3878
3879 idx = gain - (pwr - power) - tdiff - vdiff;
3880 if ((ridx / 8) & 1) /* MIMO */
3881 idx += (int32_t)le32toh(uc->atten[grp][c]);
3882
3883 if (cmd.band == 0)
3884 idx += 9; /* 5GHz */
3885 if (ridx == IWN_RIDX_MAX)
3886 idx += 5; /* CCK */
3887
3888 /* Make sure idx stays in a valid range. */
3889 if (idx < 0)
3890 idx = 0;
3891 else if (idx > IWN4965_MAX_PWR_INDEX)
3892 idx = IWN4965_MAX_PWR_INDEX;
3893
3894 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
3895 "%s: Tx chain %d, rate idx %d: power=%d\n",
3896 __func__, c, ridx, idx);
3897 cmd.power[ridx].rf_gain[c] = rf_gain[idx];
3898 cmd.power[ridx].dsp_gain[c] = dsp_gain[idx];
3899 }
3900 }
3901
3902 DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
3903 "%s: set tx power for chan %d\n", __func__, chan);
3904 return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async);
3905
3906#undef interpolate
3907#undef fdivround
3908}
3909
3910static int
3911iwn5000_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch,
3912 int async)
3913{
3914 struct iwn5000_cmd_txpower cmd;
3915
3916 /*
3917 * TX power calibration is handled automatically by the firmware
3918 * for 5000 Series.
3919 */
3920 memset(&cmd, 0, sizeof cmd);
3921 cmd.global_limit = 2 * IWN5000_TXPOWER_MAX_DBM; /* 16 dBm */
3922 cmd.flags = IWN5000_TXPOWER_NO_CLOSED;
3923 cmd.srv_limit = IWN5000_TXPOWER_AUTO;
3924 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: setting TX power\n", __func__);
3925 return iwn_cmd(sc, IWN_CMD_TXPOWER_DBM, &cmd, sizeof cmd, async);
3926}
3927
3928/*
3929 * Retrieve the maximum RSSI (in dBm) among receivers.
3930 */
3931static int
3932iwn4965_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat)
3933{
3934 struct iwn4965_rx_phystat *phy = (void *)stat->phybuf;
3935 uint8_t mask, agc;
3936 int rssi;
3937
3938 mask = (le16toh(phy->antenna) >> 4) & IWN_ANT_ABC;
3939 agc = (le16toh(phy->agc) >> 7) & 0x7f;
3940
3941 rssi = 0;
3942#if 0
3943 if (mask & IWN_ANT_A) /* Ant A */
3944 rssi = max(rssi, phy->rssi[0]);
3945 if (mask & IWN_ATH_B) /* Ant B */
3946 rssi = max(rssi, phy->rssi[2]);
3947 if (mask & IWN_ANT_C) /* Ant C */
3948 rssi = max(rssi, phy->rssi[4]);
3949#else
3950 rssi = max(rssi, phy->rssi[0]);
3951 rssi = max(rssi, phy->rssi[2]);
3952 rssi = max(rssi, phy->rssi[4]);
3953#endif
3954
3955 DPRINTF(sc, IWN_DEBUG_RECV, "%s: agc %d mask 0x%x rssi %d %d %d "
3956 "result %d\n", __func__, agc, mask,
3957 phy->rssi[0], phy->rssi[2], phy->rssi[4],
3958 rssi - agc - IWN_RSSI_TO_DBM);
3959 return rssi - agc - IWN_RSSI_TO_DBM;
3960}
3961
3962static int
3963iwn5000_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat)
3964{
3965 struct iwn5000_rx_phystat *phy = (void *)stat->phybuf;
3966 int rssi;
3967 uint8_t agc;
3968
3969 agc = (le32toh(phy->agc) >> 9) & 0x7f;
3970
3971 rssi = MAX(le16toh(phy->rssi[0]) & 0xff,
3972 le16toh(phy->rssi[1]) & 0xff);
3973 rssi = MAX(le16toh(phy->rssi[2]) & 0xff, rssi);
3974
3975 DPRINTF(sc, IWN_DEBUG_RECV, "%s: agc %d rssi %d %d %d "
3976 "result %d\n", __func__, agc,
3977 phy->rssi[0], phy->rssi[1], phy->rssi[2],
3978 rssi - agc - IWN_RSSI_TO_DBM);
3979 return rssi - agc - IWN_RSSI_TO_DBM;
3980}
3981
3982/*
3983 * Retrieve the average noise (in dBm) among receivers.
3984 */
3985static int
3986iwn_get_noise(const struct iwn_rx_general_stats *stats)
3987{
3988 int i, total, nbant, noise;
3989
3990 total = nbant = 0;
3991 for (i = 0; i < 3; i++) {
3992 if ((noise = le32toh(stats->noise[i]) & 0xff) == 0)
3993 continue;
3994 total += noise;
3995 nbant++;
3996 }
3997 /* There should be at least one antenna but check anyway. */
3998 return (nbant == 0) ? -127 : (total / nbant) - 107;
3999}
4000
4001/*
4002 * Compute temperature (in degC) from last received statistics.
4003 */
4004static int
4005iwn4965_get_temperature(struct iwn_softc *sc)
4006{
4007 struct iwn_ucode_info *uc = &sc->ucode_info;
4008 int32_t r1, r2, r3, r4, temp;
4009
4010 r1 = le32toh(uc->temp[0].chan20MHz);
4011 r2 = le32toh(uc->temp[1].chan20MHz);
4012 r3 = le32toh(uc->temp[2].chan20MHz);
4013 r4 = le32toh(sc->rawtemp);
4014
4015 if (r1 == r3) /* Prevents division by 0 (should not happen.) */
4016 return 0;
4017
4018 /* Sign-extend 23-bit R4 value to 32-bit. */
4019 r4 = ((r4 & 0xffffff) ^ 0x800000) - 0x800000;
4020 /* Compute temperature in Kelvin. */
4021 temp = (259 * (r4 - r2)) / (r3 - r1);
4022 temp = (temp * 97) / 100 + 8;
4023
4024 DPRINTF(sc, IWN_DEBUG_ANY, "temperature %dK/%dC\n", temp,
4025 IWN_KTOC(temp));
4026 return IWN_KTOC(temp);
4027}
4028
4029static int
4030iwn5000_get_temperature(struct iwn_softc *sc)
4031{
4032 int32_t temp;
4033
4034 /*
4035 * Temperature is not used by the driver for 5000 Series because
4036 * TX power calibration is handled by firmware. We export it to
4037 * users through the sensor framework though.
4038 */
4039 temp = le32toh(sc->rawtemp);
4040 if (sc->hw_type == IWN_HW_REV_TYPE_5150) {
4041 temp = (temp / -5) + sc->temp_off;
4042 temp = IWN_KTOC(temp);
4043 }
4044 return temp;
4045}
4046
4047/*
4048 * Initialize sensitivity calibration state machine.
4049 */
4050static int
4051iwn_init_sensitivity(struct iwn_softc *sc)
4052{
4053 const struct iwn_hal *hal = sc->sc_hal;
4054 struct iwn_calib_state *calib = &sc->calib;
4055 uint32_t flags;
4056 int error;
4057
4058 /* Reset calibration state machine. */
4059 memset(calib, 0, sizeof (*calib));
4060 calib->state = IWN_CALIB_STATE_INIT;
4061 calib->cck_state = IWN_CCK_STATE_HIFA;
4062 /* Set initial correlation values. */
4063 calib->ofdm_x1 = sc->limits->min_ofdm_x1;
4064 calib->ofdm_mrc_x1 = sc->limits->min_ofdm_mrc_x1;
4065 calib->ofdm_x4 = sc->limits->min_ofdm_x4;
4066 calib->ofdm_mrc_x4 = sc->limits->min_ofdm_mrc_x4;
4067 calib->cck_x4 = 125;
4068 calib->cck_mrc_x4 = sc->limits->min_cck_mrc_x4;
4069 calib->energy_cck = sc->limits->energy_cck;
4070
4071 /* Write initial sensitivity. */
4072 error = iwn_send_sensitivity(sc);
4073 if (error != 0)
4074 return error;
4075
4076 /* Write initial gains. */
4077 error = hal->init_gains(sc);
4078 if (error != 0)
4079 return error;
4080
4081 /* Request statistics at each beacon interval. */
4082 flags = 0;
4083 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: calibrate phy\n", __func__);
4084 return iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1);
4085}
4086
4087/*
4088 * Collect noise and RSSI statistics for the first 20 beacons received
4089 * after association and use them to determine connected antennas and
4090 * to set differential gains.
4091 */
4092static void
4093iwn_collect_noise(struct iwn_softc *sc,
4094 const struct iwn_rx_general_stats *stats)
4095{
4096 const struct iwn_hal *hal = sc->sc_hal;
4097 struct iwn_calib_state *calib = &sc->calib;
4098 uint32_t val;
4099 int i;
4100
4101 /* Accumulate RSSI and noise for all 3 antennas. */
4102 for (i = 0; i < 3; i++) {
4103 calib->rssi[i] += le32toh(stats->rssi[i]) & 0xff;
4104 calib->noise[i] += le32toh(stats->noise[i]) & 0xff;
4105 }
4106 /* NB: We update differential gains only once after 20 beacons. */
4107 if (++calib->nbeacons < 20)
4108 return;
4109
4110 /* Determine highest average RSSI. */
4111 val = MAX(calib->rssi[0], calib->rssi[1]);
4112 val = MAX(calib->rssi[2], val);
4113
4114 /* Determine which antennas are connected. */
4115 sc->chainmask = sc->rxchainmask;
4116 for (i = 0; i < 3; i++)
4117 if (val - calib->rssi[i] > 15 * 20)
4118 sc->chainmask &= ~(1 << i);
4119 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4120 "%s: RX chains mask: theoretical=0x%x, actual=0x%x\n",
4121 __func__, sc->rxchainmask, sc->chainmask);
4122
4123 /* If none of the TX antennas are connected, keep at least one. */
4124 if ((sc->chainmask & sc->txchainmask) == 0)
4125 sc->chainmask |= IWN_LSB(sc->txchainmask);
4126
4127 (void)hal->set_gains(sc);
4128 calib->state = IWN_CALIB_STATE_RUN;
4129
4130#ifdef notyet
4131 /* XXX Disable RX chains with no antennas connected. */
4132 sc->rxon.rxchain = htole16(IWN_RXCHAIN_SEL(sc->chainmask));
4133 (void)iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, hal->rxonsz, 1);
4134#endif
4135
4136#if 0
4137 /* XXX: not yet */
4138 /* Enable power-saving mode if requested by user. */
4139 if (sc->sc_ic.ic_flags & IEEE80211_F_PMGTON)
4140 (void)iwn_set_pslevel(sc, 0, 3, 1);
4141#endif
4142}
4143
4144static int
4145iwn4965_init_gains(struct iwn_softc *sc)
4146{
4147 struct iwn_phy_calib_gain cmd;
4148
4149 memset(&cmd, 0, sizeof cmd);
4150 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN;
4151 /* Differential gains initially set to 0 for all 3 antennas. */
4152 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4153 "%s: setting initial differential gains\n", __func__);
4154 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4155}
4156
4157static int
4158iwn5000_init_gains(struct iwn_softc *sc)
4159{
4160 struct iwn_phy_calib cmd;
4161
4162 memset(&cmd, 0, sizeof cmd);
4163 cmd.code = IWN5000_PHY_CALIB_RESET_NOISE_GAIN;
4164 cmd.ngroups = 1;
4165 cmd.isvalid = 1;
4166 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4167 "%s: setting initial differential gains\n", __func__);
4168 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4169}
4170
4171static int
4172iwn4965_set_gains(struct iwn_softc *sc)
4173{
4174 struct iwn_calib_state *calib = &sc->calib;
4175 struct iwn_phy_calib_gain cmd;
4176 int i, delta, noise;
4177
4178 /* Get minimal noise among connected antennas. */
4179 noise = INT_MAX; /* NB: There's at least one antenna. */
4180 for (i = 0; i < 3; i++)
4181 if (sc->chainmask & (1 << i))
4182 noise = MIN(calib->noise[i], noise);
4183
4184 memset(&cmd, 0, sizeof cmd);
4185 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN;
4186 /* Set differential gains for connected antennas. */
4187 for (i = 0; i < 3; i++) {
4188 if (sc->chainmask & (1 << i)) {
4189 /* Compute attenuation (in unit of 1.5dB). */
4190 delta = (noise - (int32_t)calib->noise[i]) / 30;
4191 /* NB: delta <= 0 */
4192 /* Limit to [-4.5dB,0]. */
4193 cmd.gain[i] = MIN(abs(delta), 3);
4194 if (delta < 0)
4195 cmd.gain[i] |= 1 << 2; /* sign bit */
4196 }
4197 }
4198 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4199 "setting differential gains Ant A/B/C: %x/%x/%x (%x)\n",
4200 cmd.gain[0], cmd.gain[1], cmd.gain[2], sc->chainmask);
4201 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4202}
4203
4204static int
4205iwn5000_set_gains(struct iwn_softc *sc)
4206{
4207 struct iwn_calib_state *calib = &sc->calib;
4208 struct iwn_phy_calib_gain cmd;
4209 int i, ant, delta, div;
4210
4211 /* We collected 20 beacons and !=6050 need a 1.5 factor. */
4212 div = (sc->hw_type == IWN_HW_REV_TYPE_6050) ? 20 : 30;
4213
4214 memset(&cmd, 0, sizeof cmd);
4215 cmd.code = IWN5000_PHY_CALIB_NOISE_GAIN;
4216 cmd.ngroups = 1;
4217 cmd.isvalid = 1;
4218 /* Get first available RX antenna as referential. */
4219 ant = IWN_LSB(sc->rxchainmask);
4220 /* Set differential gains for other antennas. */
4221 for (i = ant + 1; i < 3; i++) {
4222 if (sc->chainmask & (1 << i)) {
4223 /* The delta is relative to antenna "ant". */
4224 delta = ((int32_t)calib->noise[ant] -
4225 (int32_t)calib->noise[i]) / div;
4226 /* Limit to [-4.5dB,+4.5dB]. */
4227 cmd.gain[i - 1] = MIN(abs(delta), 3);
4228 if (delta < 0)
4229 cmd.gain[i - 1] |= 1 << 2; /* sign bit */
4230 }
4231 }
4232 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4233 "setting differential gains Ant B/C: %x/%x (%x)\n",
4234 cmd.gain[0], cmd.gain[1], sc->chainmask);
4235 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
4236}
4237
4238/*
4239 * Tune RF RX sensitivity based on the number of false alarms detected
4240 * during the last beacon period.
4241 */
4242static void
4243iwn_tune_sensitivity(struct iwn_softc *sc, const struct iwn_rx_stats *stats)
4244{
4245#define inc(val, inc, max) \
4246 if ((val) < (max)) { \
4247 if ((val) < (max) - (inc)) \
4248 (val) += (inc); \
4249 else \
4250 (val) = (max); \
4251 needs_update = 1; \
4252 }
4253#define dec(val, dec, min) \
4254 if ((val) > (min)) { \
4255 if ((val) > (min) + (dec)) \
4256 (val) -= (dec); \
4257 else \
4258 (val) = (min); \
4259 needs_update = 1; \
4260 }
4261
4262 const struct iwn_sensitivity_limits *limits = sc->limits;
4263 struct iwn_calib_state *calib = &sc->calib;
4264 uint32_t val, rxena, fa;
4265 uint32_t energy[3], energy_min;
4266 uint8_t noise[3], noise_ref;
4267 int i, needs_update = 0;
4268
4269 /* Check that we've been enabled long enough. */
4270 rxena = le32toh(stats->general.load);
4271 if (rxena == 0)
4272 return;
4273
4274 /* Compute number of false alarms since last call for OFDM. */
4275 fa = le32toh(stats->ofdm.bad_plcp) - calib->bad_plcp_ofdm;
4276 fa += le32toh(stats->ofdm.fa) - calib->fa_ofdm;
4277 fa *= 200 * IEEE80211_DUR_TU; /* 200TU */
4278
4279 /* Save counters values for next call. */
4280 calib->bad_plcp_ofdm = le32toh(stats->ofdm.bad_plcp);
4281 calib->fa_ofdm = le32toh(stats->ofdm.fa);
4282
4283 if (fa > 50 * rxena) {
4284 /* High false alarm count, decrease sensitivity. */
4285 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4286 "%s: OFDM high false alarm count: %u\n", __func__, fa);
4287 inc(calib->ofdm_x1, 1, limits->max_ofdm_x1);
4288 inc(calib->ofdm_mrc_x1, 1, limits->max_ofdm_mrc_x1);
4289 inc(calib->ofdm_x4, 1, limits->max_ofdm_x4);
4290 inc(calib->ofdm_mrc_x4, 1, limits->max_ofdm_mrc_x4);
4291
4292 } else if (fa < 5 * rxena) {
4293 /* Low false alarm count, increase sensitivity. */
4294 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4295 "%s: OFDM low false alarm count: %u\n", __func__, fa);
4296 dec(calib->ofdm_x1, 1, limits->min_ofdm_x1);
4297 dec(calib->ofdm_mrc_x1, 1, limits->min_ofdm_mrc_x1);
4298 dec(calib->ofdm_x4, 1, limits->min_ofdm_x4);
4299 dec(calib->ofdm_mrc_x4, 1, limits->min_ofdm_mrc_x4);
4300 }
4301
4302 /* Compute maximum noise among 3 receivers. */
4303 for (i = 0; i < 3; i++)
4304 noise[i] = (le32toh(stats->general.noise[i]) >> 8) & 0xff;
4305 val = MAX(noise[0], noise[1]);
4306 val = MAX(noise[2], val);
4307 /* Insert it into our samples table. */
4308 calib->noise_samples[calib->cur_noise_sample] = val;
4309 calib->cur_noise_sample = (calib->cur_noise_sample + 1) % 20;
4310
4311 /* Compute maximum noise among last 20 samples. */
4312 noise_ref = calib->noise_samples[0];
4313 for (i = 1; i < 20; i++)
4314 noise_ref = MAX(noise_ref, calib->noise_samples[i]);
4315
4316 /* Compute maximum energy among 3 receivers. */
4317 for (i = 0; i < 3; i++)
4318 energy[i] = le32toh(stats->general.energy[i]);
4319 val = MIN(energy[0], energy[1]);
4320 val = MIN(energy[2], val);
4321 /* Insert it into our samples table. */
4322 calib->energy_samples[calib->cur_energy_sample] = val;
4323 calib->cur_energy_sample = (calib->cur_energy_sample + 1) % 10;
4324
4325 /* Compute minimum energy among last 10 samples. */
4326 energy_min = calib->energy_samples[0];
4327 for (i = 1; i < 10; i++)
4328 energy_min = MAX(energy_min, calib->energy_samples[i]);
4329 energy_min += 6;
4330
4331 /* Compute number of false alarms since last call for CCK. */
4332 fa = le32toh(stats->cck.bad_plcp) - calib->bad_plcp_cck;
4333 fa += le32toh(stats->cck.fa) - calib->fa_cck;
4334 fa *= 200 * IEEE80211_DUR_TU; /* 200TU */
4335
4336 /* Save counters values for next call. */
4337 calib->bad_plcp_cck = le32toh(stats->cck.bad_plcp);
4338 calib->fa_cck = le32toh(stats->cck.fa);
4339
4340 if (fa > 50 * rxena) {
4341 /* High false alarm count, decrease sensitivity. */
4342 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4343 "%s: CCK high false alarm count: %u\n", __func__, fa);
4344 calib->cck_state = IWN_CCK_STATE_HIFA;
4345 calib->low_fa = 0;
4346
4347 if (calib->cck_x4 > 160) {
4348 calib->noise_ref = noise_ref;
4349 if (calib->energy_cck > 2)
4350 dec(calib->energy_cck, 2, energy_min);
4351 }
4352 if (calib->cck_x4 < 160) {
4353 calib->cck_x4 = 161;
4354 needs_update = 1;
4355 } else
4356 inc(calib->cck_x4, 3, limits->max_cck_x4);
4357
4358 inc(calib->cck_mrc_x4, 3, limits->max_cck_mrc_x4);
4359
4360 } else if (fa < 5 * rxena) {
4361 /* Low false alarm count, increase sensitivity. */
4362 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4363 "%s: CCK low false alarm count: %u\n", __func__, fa);
4364 calib->cck_state = IWN_CCK_STATE_LOFA;
4365 calib->low_fa++;
4366
4367 if (calib->cck_state != IWN_CCK_STATE_INIT &&
4368 (((int32_t)calib->noise_ref - (int32_t)noise_ref) > 2 ||
4369 calib->low_fa > 100)) {
4370 inc(calib->energy_cck, 2, limits->min_energy_cck);
4371 dec(calib->cck_x4, 3, limits->min_cck_x4);
4372 dec(calib->cck_mrc_x4, 3, limits->min_cck_mrc_x4);
4373 }
4374 } else {
4375 /* Not worth to increase or decrease sensitivity. */
4376 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4377 "%s: CCK normal false alarm count: %u\n", __func__, fa);
4378 calib->low_fa = 0;
4379 calib->noise_ref = noise_ref;
4380
4381 if (calib->cck_state == IWN_CCK_STATE_HIFA) {
4382 /* Previous interval had many false alarms. */
4383 dec(calib->energy_cck, 8, energy_min);
4384 }
4385 calib->cck_state = IWN_CCK_STATE_INIT;
4386 }
4387
4388 if (needs_update)
4389 (void)iwn_send_sensitivity(sc);
4390#undef dec
4391#undef inc
4392}
4393
4394static int
4395iwn_send_sensitivity(struct iwn_softc *sc)
4396{
4397 struct iwn_calib_state *calib = &sc->calib;
4398 struct iwn_sensitivity_cmd cmd;
4399
4400 memset(&cmd, 0, sizeof cmd);
4401 cmd.which = IWN_SENSITIVITY_WORKTBL;
4402 /* OFDM modulation. */
4403 cmd.corr_ofdm_x1 = htole16(calib->ofdm_x1);
4404 cmd.corr_ofdm_mrc_x1 = htole16(calib->ofdm_mrc_x1);
4405 cmd.corr_ofdm_x4 = htole16(calib->ofdm_x4);
4406 cmd.corr_ofdm_mrc_x4 = htole16(calib->ofdm_mrc_x4);
4407 cmd.energy_ofdm = htole16(sc->limits->energy_ofdm);
4408 cmd.energy_ofdm_th = htole16(62);
4409 /* CCK modulation. */
4410 cmd.corr_cck_x4 = htole16(calib->cck_x4);
4411 cmd.corr_cck_mrc_x4 = htole16(calib->cck_mrc_x4);
4412 cmd.energy_cck = htole16(calib->energy_cck);
4413 /* Barker modulation: use default values. */
4414 cmd.corr_barker = htole16(190);
4415 cmd.corr_barker_mrc = htole16(390);
4416
4417 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
4418 "%s: set sensitivity %d/%d/%d/%d/%d/%d/%d\n", __func__,
4419 calib->ofdm_x1, calib->ofdm_mrc_x1, calib->ofdm_x4,
4420 calib->ofdm_mrc_x4, calib->cck_x4,
4421 calib->cck_mrc_x4, calib->energy_cck);
4422 return iwn_cmd(sc, IWN_CMD_SET_SENSITIVITY, &cmd, sizeof cmd, 1);
4423}
4424
4425/*
4426 * Set STA mode power saving level (between 0 and 5).
4427 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving.
4428 */
4429static int
4430iwn_set_pslevel(struct iwn_softc *sc, int dtim, int level, int async)
4431{
4432 const struct iwn_pmgt *pmgt;
4433 struct iwn_pmgt_cmd cmd;
4434 uint32_t max, skip_dtim;
4435 uint32_t tmp;
4436 int i;
4437
4438 /* Select which PS parameters to use. */
4439 if (dtim <= 2)
4440 pmgt = &iwn_pmgt[0][level];
4441 else if (dtim <= 10)
4442 pmgt = &iwn_pmgt[1][level];
4443 else
4444 pmgt = &iwn_pmgt[2][level];
4445
4446 memset(&cmd, 0, sizeof cmd);
4447 if (level != 0) /* not CAM */
4448 cmd.flags |= htole16(IWN_PS_ALLOW_SLEEP);
4449 if (level == 5)
4450 cmd.flags |= htole16(IWN_PS_FAST_PD);
4451 /* Retrieve PCIe Active State Power Management (ASPM). */
4452 tmp = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1);
4453 if (!(tmp & 0x1)) /* L0s Entry disabled. */
4454 cmd.flags |= htole16(IWN_PS_PCI_PMGT);
4455 cmd.rxtimeout = htole32(pmgt->rxtimeout * 1024);
4456 cmd.txtimeout = htole32(pmgt->txtimeout * 1024);
4457
4458 if (dtim == 0) {
4459 dtim = 1;
4460 skip_dtim = 0;
4461 } else
4462 skip_dtim = pmgt->skip_dtim;
4463 if (skip_dtim != 0) {
4464 cmd.flags |= htole16(IWN_PS_SLEEP_OVER_DTIM);
4465 max = pmgt->intval[4];
4466 if (max == (uint32_t)-1)
4467 max = dtim * (skip_dtim + 1);
4468 else if (max > dtim)
4469 max = (max / dtim) * dtim;
4470 } else
4471 max = dtim;
4472 for (i = 0; i < 5; i++)
4473 cmd.intval[i] = htole32(MIN(max, pmgt->intval[i]));
4474
4475 DPRINTF(sc, IWN_DEBUG_RESET, "setting power saving level to %d\n",
4476 level);
4477 return iwn_cmd(sc, IWN_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async);
4478}
4479
4480static int
4481iwn_send_btcoex(struct iwn_softc *sc)
4482{
4483 struct iwn_bluetooth cmd;
4484
4485 memset(&cmd, 0, sizeof cmd);
4486 cmd.flags = IWN_BT_COEX_CHAN_ANN | IWN_BT_COEX_BT_PRIO;
4487 cmd.lead_time = IWN_BT_LEAD_TIME_DEF;
4488 cmd.max_kill = IWN_BT_MAX_KILL_DEF;
4489 DPRINTF(sc, IWN_DEBUG_RESET, "%s: configuring bluetooth coexistence\n",
4490 __func__);
4491 return iwn_cmd(sc, IWN_CMD_BT_COEX, &cmd, sizeof(cmd), 0);
4492}
4493
4494static int
4495iwn_config(struct iwn_softc *sc)
4496{
4497 const struct iwn_hal *hal = sc->sc_hal;
4498 struct ifnet *ifp = sc->sc_ifp;
4499 struct ieee80211com *ic = ifp->if_l2com;
4500 uint32_t txmask;
4501 int error;
4502 uint16_t rxchain;
4503
4504 /* Configure valid TX chains for 5000 Series. */
4505 if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
4506 txmask = htole32(sc->txchainmask);
4507 DPRINTF(sc, IWN_DEBUG_RESET,
4508 "%s: configuring valid TX chains 0x%x\n", __func__, txmask);
4509 error = iwn_cmd(sc, IWN5000_CMD_TX_ANT_CONFIG, &txmask,
4510 sizeof txmask, 0);
4511 if (error != 0) {
4512 device_printf(sc->sc_dev,
4513 "%s: could not configure valid TX chains, "
4514 "error %d\n", __func__, error);
4515 return error;
4516 }
4517 }
4518
4519 /* Configure bluetooth coexistence. */
4520 error = iwn_send_btcoex(sc);
4521 if (error != 0) {
4522 device_printf(sc->sc_dev,
4523 "%s: could not configure bluetooth coexistence, error %d\n",
4524 __func__, error);
4525 return error;
4526 }
4527
4528 /* Set mode, channel, RX filter and enable RX. */
4529 memset(&sc->rxon, 0, sizeof (struct iwn_rxon));
4530 IEEE80211_ADDR_COPY(sc->rxon.myaddr, IF_LLADDR(ifp));
4531 IEEE80211_ADDR_COPY(sc->rxon.wlap, IF_LLADDR(ifp));
4532 sc->rxon.chan = ieee80211_chan2ieee(ic, ic->ic_curchan);
4533 sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
4534 if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan))
4535 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
4536 switch (ic->ic_opmode) {
4537 case IEEE80211_M_STA:
4538 sc->rxon.mode = IWN_MODE_STA;
4539 sc->rxon.filter = htole32(IWN_FILTER_MULTICAST);
4540 break;
4541 case IEEE80211_M_MONITOR:
4542 sc->rxon.mode = IWN_MODE_MONITOR;
4543 sc->rxon.filter = htole32(IWN_FILTER_MULTICAST |
4544 IWN_FILTER_CTL | IWN_FILTER_PROMISC);
4545 break;
4546 default:
4547 /* Should not get there. */
4548 break;
4549 }
4550 sc->rxon.cck_mask = 0x0f; /* not yet negotiated */
4551 sc->rxon.ofdm_mask = 0xff; /* not yet negotiated */
4552 sc->rxon.ht_single_mask = 0xff;
4553 sc->rxon.ht_dual_mask = 0xff;
4554 sc->rxon.ht_triple_mask = 0xff;
4555 rxchain =
4556 IWN_RXCHAIN_VALID(sc->rxchainmask) |
4557 IWN_RXCHAIN_MIMO_COUNT(2) |
4558 IWN_RXCHAIN_IDLE_COUNT(2);
4559 sc->rxon.rxchain = htole16(rxchain);
4560 DPRINTF(sc, IWN_DEBUG_RESET, "%s: setting configuration\n", __func__);
4561 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, hal->rxonsz, 0);
4562 if (error != 0) {
4563 device_printf(sc->sc_dev,
4564 "%s: RXON command failed\n", __func__);
4565 return error;
4566 }
4567
4568 error = iwn_add_broadcast_node(sc, 0);
4569 if (error != 0) {
4570 device_printf(sc->sc_dev,
4571 "%s: could not add broadcast node\n", __func__);
4572 return error;
4573 }
4574
4575 /* Configuration has changed, set TX power accordingly. */
4576 error = hal->set_txpower(sc, ic->ic_curchan, 0);
4577 if (error != 0) {
4578 device_printf(sc->sc_dev,
4579 "%s: could not set TX power\n", __func__);
4580 return error;
4581 }
4582
4583 error = iwn_set_critical_temp(sc);
4584 if (error != 0) {
4585 device_printf(sc->sc_dev,
4586 "%s: ccould not set critical temperature\n", __func__);
4587 return error;
4588 }
4589
4590 /* Set power saving level to CAM during initialization. */
4591 error = iwn_set_pslevel(sc, 0, 0, 0);
4592 if (error != 0) {
4593 device_printf(sc->sc_dev,
4594 "%s: could not set power saving level\n", __func__);
4595 return error;
4596 }
4597 return 0;
4598}
4599
4600/*
4601 * Add an ssid element to a frame.
4602 */
4603static uint8_t *
4604ieee80211_add_ssid(uint8_t *frm, const uint8_t *ssid, u_int len)
4605{
4606 *frm++ = IEEE80211_ELEMID_SSID;
4607 *frm++ = len;
4608 memcpy(frm, ssid, len);
4609 return frm + len;
4610}
4611
4612static int
4613iwn_scan(struct iwn_softc *sc)
4614{
4615 struct ifnet *ifp = sc->sc_ifp;
4616 struct ieee80211com *ic = ifp->if_l2com;
4617 struct ieee80211_scan_state *ss = ic->ic_scan; /*XXX*/
4618 struct iwn_scan_hdr *hdr;
4619 struct iwn_cmd_data *tx;
4620 struct iwn_scan_essid *essid;
4621 struct iwn_scan_chan *chan;
4622 struct ieee80211_frame *wh;
4623 struct ieee80211_rateset *rs;
4624 struct ieee80211_channel *c;
4625 int buflen, error;
4626 uint16_t rxchain;
4627 uint8_t *buf, *frm, txant;
4628
4629 buf = malloc(IWN_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO);
4630 if (buf == NULL) {
4631 device_printf(sc->sc_dev,
4632 "%s: could not allocate buffer for scan command\n",
4633 __func__);
4634 return ENOMEM;
4635 }
4636 hdr = (struct iwn_scan_hdr *)buf;
4637
4638 /*
4639 * Move to the next channel if no frames are received within 10ms
4640 * after sending the probe request.
4641 */
4642 hdr->quiet_time = htole16(10); /* timeout in milliseconds */
4643 hdr->quiet_threshold = htole16(1); /* min # of packets */
4644
4645 /* Select antennas for scanning. */
4646 rxchain =
4647 IWN_RXCHAIN_VALID(sc->rxchainmask) |
4648 IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask) |
4649 IWN_RXCHAIN_DRIVER_FORCE;
4650 if (IEEE80211_IS_CHAN_A(ic->ic_curchan) &&
4651 sc->hw_type == IWN_HW_REV_TYPE_4965) {
4652 /* Ant A must be avoided in 5GHz because of an HW bug. */
4653 rxchain |= IWN_RXCHAIN_FORCE_SEL(IWN_ANT_BC);
4654 } else /* Use all available RX antennas. */
4655 rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask);
4656 hdr->rxchain = htole16(rxchain);
4657 hdr->filter = htole32(IWN_FILTER_MULTICAST | IWN_FILTER_BEACON);
4658
4659 tx = (struct iwn_cmd_data *)(hdr + 1);
4660 tx->flags = htole32(IWN_TX_AUTO_SEQ);
4661 tx->id = sc->sc_hal->broadcast_id;
4662 tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
4663
4664 if (IEEE80211_IS_CHAN_A(ic->ic_curchan)) {
4665 /* Send probe requests at 6Mbps. */
4666 tx->plcp = iwn_rates[IWN_RIDX_OFDM6].plcp;
4667 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
4668 } else {
4669 hdr->flags = htole32(IWN_RXON_24GHZ | IWN_RXON_AUTO);
4670 /* Send probe requests at 1Mbps. */
4671 tx->plcp = iwn_rates[IWN_RIDX_CCK1].plcp;
4672 tx->rflags = IWN_RFLAG_CCK;
4673 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
4674 }
4675 /* Use the first valid TX antenna. */
4676 txant = IWN_LSB(sc->txchainmask);
4677 tx->rflags |= IWN_RFLAG_ANT(txant);
4678
4679 essid = (struct iwn_scan_essid *)(tx + 1);
4680 if (ss->ss_ssid[0].len != 0) {
4681 essid[0].id = IEEE80211_ELEMID_SSID;
4682 essid[0].len = ss->ss_ssid[0].len;
4683 memcpy(essid[0].data, ss->ss_ssid[0].ssid, ss->ss_ssid[0].len);
4684 }
4685
4686 /*
4687 * Build a probe request frame. Most of the following code is a
4688 * copy & paste of what is done in net80211.
4689 */
4690 wh = (struct ieee80211_frame *)(essid + 20);
4691 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
4692 IEEE80211_FC0_SUBTYPE_PROBE_REQ;
4693 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
4694 IEEE80211_ADDR_COPY(wh->i_addr1, ifp->if_broadcastaddr);
4695 IEEE80211_ADDR_COPY(wh->i_addr2, IF_LLADDR(ifp));
4696 IEEE80211_ADDR_COPY(wh->i_addr3, ifp->if_broadcastaddr);
4697 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */
4698 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */
4699
4700 frm = (uint8_t *)(wh + 1);
4701 frm = ieee80211_add_ssid(frm, NULL, 0);
4702 frm = ieee80211_add_rates(frm, rs);
4703 if (rs->rs_nrates > IEEE80211_RATE_SIZE)
4704 frm = ieee80211_add_xrates(frm, rs);
4705#if 0 /* HT */
4706 if (ic->ic_flags & IEEE80211_F_HTON)
4707 frm = ieee80211_add_htcaps(frm, ic);
4708#endif
4709
4710 /* Set length of probe request. */
4711 tx->len = htole16(frm - (uint8_t *)wh);
4712
4713 c = ic->ic_curchan;
4714 chan = (struct iwn_scan_chan *)frm;
4715 chan->chan = htole16(ieee80211_chan2ieee(ic, c));
4716 chan->flags = 0;
4717 if (ss->ss_nssid > 0)
4718 chan->flags |= htole32(IWN_CHAN_NPBREQS(1));
4719 chan->dsp_gain = 0x6e;
4720 if (IEEE80211_IS_CHAN_5GHZ(c) &&
4721 !(c->ic_flags & IEEE80211_CHAN_PASSIVE)) {
4722 chan->rf_gain = 0x3b;
4723 chan->active = htole16(24);
4724 chan->passive = htole16(110);
4725 chan->flags |= htole32(IWN_CHAN_ACTIVE);
4726 } else if (IEEE80211_IS_CHAN_5GHZ(c)) {
4727 chan->rf_gain = 0x3b;
4728 chan->active = htole16(24);
4729 if (sc->rxon.associd)
4730 chan->passive = htole16(78);
4731 else
4732 chan->passive = htole16(110);
4733 hdr->crc_threshold = 0xffff;
4734 } else if (!(c->ic_flags & IEEE80211_CHAN_PASSIVE)) {
4735 chan->rf_gain = 0x28;
4736 chan->active = htole16(36);
4737 chan->passive = htole16(120);
4738 chan->flags |= htole32(IWN_CHAN_ACTIVE);
4739 } else {
4740 chan->rf_gain = 0x28;
4741 chan->active = htole16(36);
4742 if (sc->rxon.associd)
4743 chan->passive = htole16(88);
4744 else
4745 chan->passive = htole16(120);
4746 hdr->crc_threshold = 0xffff;
4747 }
4748
4749 DPRINTF(sc, IWN_DEBUG_STATE,
4750 "%s: chan %u flags 0x%x rf_gain 0x%x "
4751 "dsp_gain 0x%x active 0x%x passive 0x%x\n", __func__,
4752 chan->chan, chan->flags, chan->rf_gain, chan->dsp_gain,
4753 chan->active, chan->passive);
4754
4755 hdr->nchan++;
4756 chan++;
4757 buflen = (uint8_t *)chan - buf;
4758 hdr->len = htole16(buflen);
4759
4760 DPRINTF(sc, IWN_DEBUG_STATE, "sending scan command nchan=%d\n",
4761 hdr->nchan);
4762 error = iwn_cmd(sc, IWN_CMD_SCAN, buf, buflen, 1);
4763 free(buf, M_DEVBUF);
4764 return error;
4765}
4766
4767static int
4768iwn_auth(struct iwn_softc *sc, struct ieee80211vap *vap)
4769{
4770 const struct iwn_hal *hal = sc->sc_hal;
4771 struct ifnet *ifp = sc->sc_ifp;
4772 struct ieee80211com *ic = ifp->if_l2com;
4773 struct ieee80211_node *ni = vap->iv_bss;
4774 int error;
4775
4763 sc->calib.state = IWN_CALIB_STATE_INIT;
4764
4765 /* Update adapter configuration. */
4766 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid);
4767 sc->rxon.chan = ieee80211_chan2ieee(ic, ni->ni_chan);
4768 sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
4769 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
4770 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
4771 if (ic->ic_flags & IEEE80211_F_SHSLOT)
4772 sc->rxon.flags |= htole32(IWN_RXON_SHSLOT);
4773 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
4774 sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE);
4775 if (IEEE80211_IS_CHAN_A(ni->ni_chan)) {
4776 sc->rxon.cck_mask = 0;
4777 sc->rxon.ofdm_mask = 0x15;
4778 } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) {
4779 sc->rxon.cck_mask = 0x03;
4780 sc->rxon.ofdm_mask = 0;
4781 } else {
4782 /* XXX assume 802.11b/g */
4783 sc->rxon.cck_mask = 0x0f;
4784 sc->rxon.ofdm_mask = 0x15;
4785 }
4786 DPRINTF(sc, IWN_DEBUG_STATE,
4787 "%s: config chan %d mode %d flags 0x%x cck 0x%x ofdm 0x%x "
4788 "ht_single 0x%x ht_dual 0x%x rxchain 0x%x "
4789 "myaddr %6D wlap %6D bssid %6D associd %d filter 0x%x\n",
4790 __func__,
4791 le16toh(sc->rxon.chan), sc->rxon.mode, le32toh(sc->rxon.flags),
4792 sc->rxon.cck_mask, sc->rxon.ofdm_mask,
4793 sc->rxon.ht_single_mask, sc->rxon.ht_dual_mask,
4794 le16toh(sc->rxon.rxchain),
4795 sc->rxon.myaddr, ":", sc->rxon.wlap, ":", sc->rxon.bssid, ":",
4796 le16toh(sc->rxon.associd), le32toh(sc->rxon.filter));
4797 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, hal->rxonsz, 1);
4798 if (error != 0) {
4799 device_printf(sc->sc_dev,
4800 "%s: RXON command failed, error %d\n", __func__, error);
4801 return error;
4802 }
4803
4804 /* Configuration has changed, set TX power accordingly. */
4805 error = hal->set_txpower(sc, ni->ni_chan, 1);
4806 if (error != 0) {
4807 device_printf(sc->sc_dev,
4808 "%s: could not set Tx power, error %d\n", __func__, error);
4809 return error;
4810 }
4811 /*
4812 * Reconfiguring RXON clears the firmware nodes table so we must
4813 * add the broadcast node again.
4814 */
4815 error = iwn_add_broadcast_node(sc, 1);
4816 if (error != 0) {
4817 device_printf(sc->sc_dev,
4818 "%s: could not add broadcast node, error %d\n",
4819 __func__, error);
4820 return error;
4821 }
4822 return 0;
4823}
4824
4825/*
4826 * Configure the adapter for associated state.
4827 */
4828static int
4829iwn_run(struct iwn_softc *sc, struct ieee80211vap *vap)
4830{
4831#define MS(v,x) (((v) & x) >> x##_S)
4832 const struct iwn_hal *hal = sc->sc_hal;
4833 struct ifnet *ifp = sc->sc_ifp;
4834 struct ieee80211com *ic = ifp->if_l2com;
4835 struct ieee80211_node *ni = vap->iv_bss;
4836 struct iwn_node_info node;
4837 int error;
4838
4839 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
4840 /* Link LED blinks while monitoring. */
4841 iwn_set_led(sc, IWN_LED_LINK, 20, 20);
4842 return 0;
4843 }
4844 error = iwn_set_timing(sc, ni);
4845 if (error != 0) {
4846 device_printf(sc->sc_dev,
4847 "%s: could not set timing, error %d\n", __func__, error);
4848 return error;
4849 }
4850
4851 /* Update adapter configuration. */
4852 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid);
4853 sc->rxon.associd = htole16(IEEE80211_AID(ni->ni_associd));
4854 sc->rxon.chan = ieee80211_chan2ieee(ic, ni->ni_chan);
4855 sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
4856 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
4857 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
4858 if (ic->ic_flags & IEEE80211_F_SHSLOT)
4859 sc->rxon.flags |= htole32(IWN_RXON_SHSLOT);
4860 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
4861 sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE);
4862 if (IEEE80211_IS_CHAN_A(ni->ni_chan)) {
4863 sc->rxon.cck_mask = 0;
4864 sc->rxon.ofdm_mask = 0x15;
4865 } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) {
4866 sc->rxon.cck_mask = 0x03;
4867 sc->rxon.ofdm_mask = 0;
4868 } else {
4869 /* XXX assume 802.11b/g */
4870 sc->rxon.cck_mask = 0x0f;
4871 sc->rxon.ofdm_mask = 0x15;
4872 }
4873#if 0 /* HT */
4874 if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) {
4875 sc->rxon.flags &= ~htole32(IWN_RXON_HT);
4876 if (IEEE80211_IS_CHAN_HT40U(ni->ni_chan))
4877 sc->rxon.flags |= htole32(IWN_RXON_HT40U);
4878 else if (IEEE80211_IS_CHAN_HT40D(ni->ni_chan))
4879 sc->rxon.flags |= htole32(IWN_RXON_HT40D);
4880 else
4881 sc->rxon.flags |= htole32(IWN_RXON_HT20);
4882 sc->rxon.rxchain = htole16(
4883 IWN_RXCHAIN_VALID(3)
4884 | IWN_RXCHAIN_MIMO_COUNT(3)
4885 | IWN_RXCHAIN_IDLE_COUNT(1)
4886 | IWN_RXCHAIN_MIMO_FORCE);
4887
4888 maxrxampdu = MS(ni->ni_htparam, IEEE80211_HTCAP_MAXRXAMPDU);
4889 ampdudensity = MS(ni->ni_htparam, IEEE80211_HTCAP_MPDUDENSITY);
4890 } else
4891 maxrxampdu = ampdudensity = 0;
4892#endif
4893 sc->rxon.filter |= htole32(IWN_FILTER_BSS);
4894
4895 DPRINTF(sc, IWN_DEBUG_STATE,
4896 "%s: config chan %d mode %d flags 0x%x cck 0x%x ofdm 0x%x "
4897 "ht_single 0x%x ht_dual 0x%x rxchain 0x%x "
4898 "myaddr %6D wlap %6D bssid %6D associd %d filter 0x%x\n",
4899 __func__,
4900 le16toh(sc->rxon.chan), sc->rxon.mode, le32toh(sc->rxon.flags),
4901 sc->rxon.cck_mask, sc->rxon.ofdm_mask,
4902 sc->rxon.ht_single_mask, sc->rxon.ht_dual_mask,
4903 le16toh(sc->rxon.rxchain),
4904 sc->rxon.myaddr, ":", sc->rxon.wlap, ":", sc->rxon.bssid, ":",
4905 le16toh(sc->rxon.associd), le32toh(sc->rxon.filter));
4906 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, hal->rxonsz, 1);
4907 if (error != 0) {
4908 device_printf(sc->sc_dev,
4909 "%s: could not update configuration, error %d\n",
4910 __func__, error);
4911 return error;
4912 }
4913
4914 /* Configuration has changed, set TX power accordingly. */
4915 error = hal->set_txpower(sc, ni->ni_chan, 1);
4916 if (error != 0) {
4917 device_printf(sc->sc_dev,
4918 "%s: could not set Tx power, error %d\n", __func__, error);
4919 return error;
4920 }
4921
4922 /* Add BSS node. */
4923 memset(&node, 0, sizeof node);
4924 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr);
4925 node.id = IWN_ID_BSS;
4926#ifdef notyet
4927 node.htflags = htole32(IWN_AMDPU_SIZE_FACTOR(3) |
4928 IWN_AMDPU_DENSITY(5)); /* 2us */
4929#endif
4930 DPRINTF(sc, IWN_DEBUG_STATE, "%s: add BSS node, id %d htflags 0x%x\n",
4931 __func__, node.id, le32toh(node.htflags));
4932 error = hal->add_node(sc, &node, 1);
4933 if (error != 0) {
4934 device_printf(sc->sc_dev, "could not add BSS node\n");
4935 return error;
4936 }
4937 DPRINTF(sc, IWN_DEBUG_STATE, "setting link quality for node %d\n",
4938 node.id);
4939 error = iwn_set_link_quality(sc, node.id, 1);
4940 if (error != 0) {
4941 device_printf(sc->sc_dev,
4942 "%s: could not setup MRR for node %d, error %d\n",
4943 __func__, node.id, error);
4944 return error;
4945 }
4946
4947 error = iwn_init_sensitivity(sc);
4948 if (error != 0) {
4949 device_printf(sc->sc_dev,
4950 "%s: could not set sensitivity, error %d\n",
4951 __func__, error);
4952 return error;
4953 }
4954
4955 /* Start periodic calibration timer. */
4956 sc->calib.state = IWN_CALIB_STATE_ASSOC;
4776 /* Update adapter configuration. */
4777 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid);
4778 sc->rxon.chan = ieee80211_chan2ieee(ic, ni->ni_chan);
4779 sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
4780 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
4781 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
4782 if (ic->ic_flags & IEEE80211_F_SHSLOT)
4783 sc->rxon.flags |= htole32(IWN_RXON_SHSLOT);
4784 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
4785 sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE);
4786 if (IEEE80211_IS_CHAN_A(ni->ni_chan)) {
4787 sc->rxon.cck_mask = 0;
4788 sc->rxon.ofdm_mask = 0x15;
4789 } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) {
4790 sc->rxon.cck_mask = 0x03;
4791 sc->rxon.ofdm_mask = 0;
4792 } else {
4793 /* XXX assume 802.11b/g */
4794 sc->rxon.cck_mask = 0x0f;
4795 sc->rxon.ofdm_mask = 0x15;
4796 }
4797 DPRINTF(sc, IWN_DEBUG_STATE,
4798 "%s: config chan %d mode %d flags 0x%x cck 0x%x ofdm 0x%x "
4799 "ht_single 0x%x ht_dual 0x%x rxchain 0x%x "
4800 "myaddr %6D wlap %6D bssid %6D associd %d filter 0x%x\n",
4801 __func__,
4802 le16toh(sc->rxon.chan), sc->rxon.mode, le32toh(sc->rxon.flags),
4803 sc->rxon.cck_mask, sc->rxon.ofdm_mask,
4804 sc->rxon.ht_single_mask, sc->rxon.ht_dual_mask,
4805 le16toh(sc->rxon.rxchain),
4806 sc->rxon.myaddr, ":", sc->rxon.wlap, ":", sc->rxon.bssid, ":",
4807 le16toh(sc->rxon.associd), le32toh(sc->rxon.filter));
4808 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, hal->rxonsz, 1);
4809 if (error != 0) {
4810 device_printf(sc->sc_dev,
4811 "%s: RXON command failed, error %d\n", __func__, error);
4812 return error;
4813 }
4814
4815 /* Configuration has changed, set TX power accordingly. */
4816 error = hal->set_txpower(sc, ni->ni_chan, 1);
4817 if (error != 0) {
4818 device_printf(sc->sc_dev,
4819 "%s: could not set Tx power, error %d\n", __func__, error);
4820 return error;
4821 }
4822 /*
4823 * Reconfiguring RXON clears the firmware nodes table so we must
4824 * add the broadcast node again.
4825 */
4826 error = iwn_add_broadcast_node(sc, 1);
4827 if (error != 0) {
4828 device_printf(sc->sc_dev,
4829 "%s: could not add broadcast node, error %d\n",
4830 __func__, error);
4831 return error;
4832 }
4833 return 0;
4834}
4835
4836/*
4837 * Configure the adapter for associated state.
4838 */
4839static int
4840iwn_run(struct iwn_softc *sc, struct ieee80211vap *vap)
4841{
4842#define MS(v,x) (((v) & x) >> x##_S)
4843 const struct iwn_hal *hal = sc->sc_hal;
4844 struct ifnet *ifp = sc->sc_ifp;
4845 struct ieee80211com *ic = ifp->if_l2com;
4846 struct ieee80211_node *ni = vap->iv_bss;
4847 struct iwn_node_info node;
4848 int error;
4849
4850 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
4851 /* Link LED blinks while monitoring. */
4852 iwn_set_led(sc, IWN_LED_LINK, 20, 20);
4853 return 0;
4854 }
4855 error = iwn_set_timing(sc, ni);
4856 if (error != 0) {
4857 device_printf(sc->sc_dev,
4858 "%s: could not set timing, error %d\n", __func__, error);
4859 return error;
4860 }
4861
4862 /* Update adapter configuration. */
4863 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid);
4864 sc->rxon.associd = htole16(IEEE80211_AID(ni->ni_associd));
4865 sc->rxon.chan = ieee80211_chan2ieee(ic, ni->ni_chan);
4866 sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
4867 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
4868 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
4869 if (ic->ic_flags & IEEE80211_F_SHSLOT)
4870 sc->rxon.flags |= htole32(IWN_RXON_SHSLOT);
4871 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
4872 sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE);
4873 if (IEEE80211_IS_CHAN_A(ni->ni_chan)) {
4874 sc->rxon.cck_mask = 0;
4875 sc->rxon.ofdm_mask = 0x15;
4876 } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) {
4877 sc->rxon.cck_mask = 0x03;
4878 sc->rxon.ofdm_mask = 0;
4879 } else {
4880 /* XXX assume 802.11b/g */
4881 sc->rxon.cck_mask = 0x0f;
4882 sc->rxon.ofdm_mask = 0x15;
4883 }
4884#if 0 /* HT */
4885 if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) {
4886 sc->rxon.flags &= ~htole32(IWN_RXON_HT);
4887 if (IEEE80211_IS_CHAN_HT40U(ni->ni_chan))
4888 sc->rxon.flags |= htole32(IWN_RXON_HT40U);
4889 else if (IEEE80211_IS_CHAN_HT40D(ni->ni_chan))
4890 sc->rxon.flags |= htole32(IWN_RXON_HT40D);
4891 else
4892 sc->rxon.flags |= htole32(IWN_RXON_HT20);
4893 sc->rxon.rxchain = htole16(
4894 IWN_RXCHAIN_VALID(3)
4895 | IWN_RXCHAIN_MIMO_COUNT(3)
4896 | IWN_RXCHAIN_IDLE_COUNT(1)
4897 | IWN_RXCHAIN_MIMO_FORCE);
4898
4899 maxrxampdu = MS(ni->ni_htparam, IEEE80211_HTCAP_MAXRXAMPDU);
4900 ampdudensity = MS(ni->ni_htparam, IEEE80211_HTCAP_MPDUDENSITY);
4901 } else
4902 maxrxampdu = ampdudensity = 0;
4903#endif
4904 sc->rxon.filter |= htole32(IWN_FILTER_BSS);
4905
4906 DPRINTF(sc, IWN_DEBUG_STATE,
4907 "%s: config chan %d mode %d flags 0x%x cck 0x%x ofdm 0x%x "
4908 "ht_single 0x%x ht_dual 0x%x rxchain 0x%x "
4909 "myaddr %6D wlap %6D bssid %6D associd %d filter 0x%x\n",
4910 __func__,
4911 le16toh(sc->rxon.chan), sc->rxon.mode, le32toh(sc->rxon.flags),
4912 sc->rxon.cck_mask, sc->rxon.ofdm_mask,
4913 sc->rxon.ht_single_mask, sc->rxon.ht_dual_mask,
4914 le16toh(sc->rxon.rxchain),
4915 sc->rxon.myaddr, ":", sc->rxon.wlap, ":", sc->rxon.bssid, ":",
4916 le16toh(sc->rxon.associd), le32toh(sc->rxon.filter));
4917 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, hal->rxonsz, 1);
4918 if (error != 0) {
4919 device_printf(sc->sc_dev,
4920 "%s: could not update configuration, error %d\n",
4921 __func__, error);
4922 return error;
4923 }
4924
4925 /* Configuration has changed, set TX power accordingly. */
4926 error = hal->set_txpower(sc, ni->ni_chan, 1);
4927 if (error != 0) {
4928 device_printf(sc->sc_dev,
4929 "%s: could not set Tx power, error %d\n", __func__, error);
4930 return error;
4931 }
4932
4933 /* Add BSS node. */
4934 memset(&node, 0, sizeof node);
4935 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr);
4936 node.id = IWN_ID_BSS;
4937#ifdef notyet
4938 node.htflags = htole32(IWN_AMDPU_SIZE_FACTOR(3) |
4939 IWN_AMDPU_DENSITY(5)); /* 2us */
4940#endif
4941 DPRINTF(sc, IWN_DEBUG_STATE, "%s: add BSS node, id %d htflags 0x%x\n",
4942 __func__, node.id, le32toh(node.htflags));
4943 error = hal->add_node(sc, &node, 1);
4944 if (error != 0) {
4945 device_printf(sc->sc_dev, "could not add BSS node\n");
4946 return error;
4947 }
4948 DPRINTF(sc, IWN_DEBUG_STATE, "setting link quality for node %d\n",
4949 node.id);
4950 error = iwn_set_link_quality(sc, node.id, 1);
4951 if (error != 0) {
4952 device_printf(sc->sc_dev,
4953 "%s: could not setup MRR for node %d, error %d\n",
4954 __func__, node.id, error);
4955 return error;
4956 }
4957
4958 error = iwn_init_sensitivity(sc);
4959 if (error != 0) {
4960 device_printf(sc->sc_dev,
4961 "%s: could not set sensitivity, error %d\n",
4962 __func__, error);
4963 return error;
4964 }
4965
4966 /* Start periodic calibration timer. */
4967 sc->calib.state = IWN_CALIB_STATE_ASSOC;
4957 iwn_calib_reset(sc);
4968 sc->calib_cnt = 0;
4969 callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout,
4970 sc);
4958
4959 /* Link LED always on while associated. */
4960 iwn_set_led(sc, IWN_LED_LINK, 0, 1);
4961
4962 return 0;
4963#undef MS
4964}
4965
4966#if 0 /* HT */
4967/*
4968 * This function is called by upper layer when an ADDBA request is received
4969 * from another STA and before the ADDBA response is sent.
4970 */
4971static int
4972iwn_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
4973 uint8_t tid)
4974{
4975 struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
4976 struct iwn_softc *sc = ic->ic_softc;
4977 struct iwn_node *wn = (void *)ni;
4978 struct iwn_node_info node;
4979
4980 memset(&node, 0, sizeof node);
4981 node.id = wn->id;
4982 node.control = IWN_NODE_UPDATE;
4983 node.flags = IWN_FLAG_SET_ADDBA;
4984 node.addba_tid = tid;
4985 node.addba_ssn = htole16(ba->ba_winstart);
4986 DPRINTF(sc, IWN_DEBUG_RECV, "ADDBA RA=%d TID=%d SSN=%d\n",
4987 wn->id, tid, ba->ba_winstart));
4988 return sc->sc_hal->add_node(sc, &node, 1);
4989}
4990
4991/*
4992 * This function is called by upper layer on teardown of an HT-immediate
4993 * Block Ack agreement (eg. uppon receipt of a DELBA frame.)
4994 */
4995static void
4996iwn_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
4997 uint8_t tid)
4998{
4999 struct iwn_softc *sc = ic->ic_softc;
5000 struct iwn_node *wn = (void *)ni;
5001 struct iwn_node_info node;
5002
5003 memset(&node, 0, sizeof node);
5004 node.id = wn->id;
5005 node.control = IWN_NODE_UPDATE;
5006 node.flags = IWN_FLAG_SET_DELBA;
5007 node.delba_tid = tid;
5008 DPRINTF(sc, IWN_DEBUG_RECV, "DELBA RA=%d TID=%d\n", wn->id, tid);
5009 (void)sc->sc_hal->add_node(sc, &node, 1);
5010}
5011
5012/*
5013 * This function is called by upper layer when an ADDBA response is received
5014 * from another STA.
5015 */
5016static int
5017iwn_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
5018 uint8_t tid)
5019{
5020 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
5021 struct iwn_softc *sc = ic->ic_softc;
5022 const struct iwn_hal *hal = sc->sc_hal;
5023 struct iwn_node *wn = (void *)ni;
5024 struct iwn_node_info node;
5025 int error;
5026
5027 /* Enable TX for the specified RA/TID. */
5028 wn->disable_tid &= ~(1 << tid);
5029 memset(&node, 0, sizeof node);
5030 node.id = wn->id;
5031 node.control = IWN_NODE_UPDATE;
5032 node.flags = IWN_FLAG_SET_DISABLE_TID;
5033 node.disable_tid = htole16(wn->disable_tid);
5034 error = hal->add_node(sc, &node, 1);
5035 if (error != 0)
5036 return error;
5037
5038 if ((error = iwn_nic_lock(sc)) != 0)
5039 return error;
5040 hal->ampdu_tx_start(sc, ni, tid, ba->ba_winstart);
5041 iwn_nic_unlock(sc);
5042 return 0;
5043}
5044
5045static void
5046iwn_ampdu_tx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
5047 uint8_t tid)
5048{
5049 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
5050 struct iwn_softc *sc = ic->ic_softc;
5051 int error;
5052
5053 error = iwn_nic_lock(sc);
5054 if (error != 0)
5055 return;
5056 sc->sc_hal->ampdu_tx_stop(sc, tid, ba->ba_winstart);
5057 iwn_nic_unlock(sc);
5058}
5059
5060static void
5061iwn4965_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni,
5062 uint8_t tid, uint16_t ssn)
5063{
5064 struct iwn_node *wn = (void *)ni;
5065 int qid = 7 + tid;
5066
5067 /* Stop TX scheduler while we're changing its configuration. */
5068 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5069 IWN4965_TXQ_STATUS_CHGACT);
5070
5071 /* Assign RA/TID translation to the queue. */
5072 iwn_mem_write_2(sc, sc->sched_base + IWN4965_SCHED_TRANS_TBL(qid),
5073 wn->id << 4 | tid);
5074
5075 /* Enable chain-building mode for the queue. */
5076 iwn_prph_setbits(sc, IWN4965_SCHED_QCHAIN_SEL, 1 << qid);
5077
5078 /* Set starting sequence number from the ADDBA request. */
5079 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
5080 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn);
5081
5082 /* Set scheduler window size. */
5083 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid),
5084 IWN_SCHED_WINSZ);
5085 /* Set scheduler frame limit. */
5086 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid) + 4,
5087 IWN_SCHED_LIMIT << 16);
5088
5089 /* Enable interrupts for the queue. */
5090 iwn_prph_setbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid);
5091
5092 /* Mark the queue as active. */
5093 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5094 IWN4965_TXQ_STATUS_ACTIVE | IWN4965_TXQ_STATUS_AGGR_ENA |
5095 iwn_tid2fifo[tid] << 1);
5096}
5097
5098static void
5099iwn4965_ampdu_tx_stop(struct iwn_softc *sc, uint8_t tid, uint16_t ssn)
5100{
5101 int qid = 7 + tid;
5102
5103 /* Stop TX scheduler while we're changing its configuration. */
5104 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5105 IWN4965_TXQ_STATUS_CHGACT);
5106
5107 /* Set starting sequence number from the ADDBA request. */
5108 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
5109 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn);
5110
5111 /* Disable interrupts for the queue. */
5112 iwn_prph_clrbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid);
5113
5114 /* Mark the queue as inactive. */
5115 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5116 IWN4965_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid] << 1);
5117}
5118
5119static void
5120iwn5000_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni,
5121 uint8_t tid, uint16_t ssn)
5122{
5123 struct iwn_node *wn = (void *)ni;
5124 int qid = 10 + tid;
5125
5126 /* Stop TX scheduler while we're changing its configuration. */
5127 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
5128 IWN5000_TXQ_STATUS_CHGACT);
5129
5130 /* Assign RA/TID translation to the queue. */
5131 iwn_mem_write_2(sc, sc->sched_base + IWN5000_SCHED_TRANS_TBL(qid),
5132 wn->id << 4 | tid);
5133
5134 /* Enable chain-building mode for the queue. */
5135 iwn_prph_setbits(sc, IWN5000_SCHED_QCHAIN_SEL, 1 << qid);
5136
5137 /* Enable aggregation for the queue. */
5138 iwn_prph_setbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid);
5139
5140 /* Set starting sequence number from the ADDBA request. */
5141 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
5142 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn);
5143
5144 /* Set scheduler window size and frame limit. */
5145 iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid) + 4,
5146 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ);
5147
5148 /* Enable interrupts for the queue. */
5149 iwn_prph_setbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid);
5150
5151 /* Mark the queue as active. */
5152 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
5153 IWN5000_TXQ_STATUS_ACTIVE | iwn_tid2fifo[tid]);
5154}
5155
5156static void
5157iwn5000_ampdu_tx_stop(struct iwn_softc *sc, uint8_t tid, uint16_t ssn)
5158{
5159 int qid = 10 + tid;
5160
5161 /* Stop TX scheduler while we're changing its configuration. */
5162 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
5163 IWN5000_TXQ_STATUS_CHGACT);
5164
5165 /* Disable aggregation for the queue. */
5166 iwn_prph_clrbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid);
5167
5168 /* Set starting sequence number from the ADDBA request. */
5169 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
5170 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn);
5171
5172 /* Disable interrupts for the queue. */
5173 iwn_prph_clrbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid);
5174
5175 /* Mark the queue as inactive. */
5176 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
5177 IWN5000_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid]);
5178}
5179#endif
5180
5181/*
5182 * Send calibration results to the runtime firmware. These results were
5183 * obtained on first boot from the initialization firmware, or by reading
5184 * the EEPROM for crystal calibration.
5185 */
5186static int
5187iwn5000_send_calib_results(struct iwn_softc *sc)
5188{
5189 struct iwn_calib_info *calib_result;
5190 int idx, error;
5191
5192 for (idx = 0; idx < IWN_CALIB_NUM; idx++) {
5193 calib_result = &sc->calib_results[idx];
5194
5195 /* No support for this type of calibration. */
5196 if ((sc->calib_init & (1 << idx)) == 0)
5197 continue;
5198
5199 /* No calibration result available. */
5200 if (calib_result->buf == NULL)
5201 continue;
5202
5203 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5204 "%s: send calibration result idx=%d, len=%d\n",
5205 __func__, idx, calib_result->len);
5206
5207 error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, calib_result->buf,
5208 calib_result->len, 0);
5209 if (error != 0) {
5210 device_printf(sc->sc_dev,
5211 "%s: could not send calibration result "
5212 "idx=%d, error=%d\n",
5213 __func__, idx, error);
5214 return error;
5215 }
5216 }
5217 return 0;
5218}
5219
5220/*
5221 * Save calibration result at the given index. The index determines
5222 * in which order the results are sent to the runtime firmware.
5223 */
5224static int
5225iwn5000_save_calib_result(struct iwn_softc *sc, struct iwn_phy_calib *calib,
5226 int len, int idx)
5227{
5228 struct iwn_calib_info *calib_result = &sc->calib_results[idx];
5229
5230 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5231 "%s: saving calibration result code=%d, idx=%d, len=%d\n",
5232 __func__, calib->code, idx, len);
5233
5234 if (calib_result->buf != NULL)
5235 free(calib_result->buf, M_DEVBUF);
5236
5237 calib_result->buf = malloc(len, M_DEVBUF, M_NOWAIT);
5238 if (calib_result->buf == NULL) {
5239 device_printf(sc->sc_dev,
5240 "%s: not enough memory for calibration result "
5241 "code=%d, len=%d\n", __func__, calib->code, len);
5242 return ENOMEM;
5243 }
5244
5245 calib_result->len = len;
5246 memcpy(calib_result->buf, calib, len);
5247 return 0;
5248}
5249
5250static void
5251iwn5000_free_calib_results(struct iwn_softc *sc)
5252{
5253 struct iwn_calib_info *calib_result;
5254 int idx;
5255
5256 for (idx = 0; idx < IWN_CALIB_NUM; idx++) {
5257 calib_result = &sc->calib_results[idx];
5258
5259 if (calib_result->buf != NULL)
5260 free(calib_result->buf, M_DEVBUF);
5261
5262 calib_result->buf = NULL;
5263 calib_result->len = 0;
5264 }
5265}
5266
5267/*
5268 * Obtain the crystal calibration result from the EEPROM.
5269 */
5270static int
5271iwn5000_chrystal_calib(struct iwn_softc *sc)
5272{
5273 struct iwn5000_phy_calib_crystal cmd;
5274 uint32_t base, crystal;
5275 uint16_t val;
5276
5277 /* Read crystal calibration. */
5278 iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2);
5279 base = le16toh(val);
5280 iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL, &crystal,
5281 sizeof(uint32_t));
5282 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: crystal calibration=0x%08x\n",
5283 __func__, le32toh(crystal));
5284
5285 memset(&cmd, 0, sizeof cmd);
5286 cmd.code = IWN5000_PHY_CALIB_CRYSTAL;
5287 cmd.ngroups = 1;
5288 cmd.isvalid = 1;
5289 cmd.cap_pin[0] = le32toh(crystal) & 0xff;
5290 cmd.cap_pin[1] = (le32toh(crystal) >> 16) & 0xff;
5291
5292 return iwn5000_save_calib_result(sc, (struct iwn_phy_calib *)&cmd,
5293 sizeof cmd, IWN_CALIB_IDX_XTAL);
5294}
5295
5296/*
5297 * Query calibration results from the initialization firmware. We do this
5298 * only once at first boot.
5299 */
5300static int
5301iwn5000_send_calib_query(struct iwn_softc *sc, uint32_t cfg)
5302{
5303#define CALIB_INIT_CFG 0xffffffff;
5304 struct iwn5000_calib_config cmd;
5305 int error;
5306
5307 memset(&cmd, 0, sizeof cmd);
5308 cmd.ucode.once.enable = CALIB_INIT_CFG;
5309 if (cfg == 0) {
5310 cmd.ucode.once.start = CALIB_INIT_CFG;
5311 cmd.ucode.once.send = CALIB_INIT_CFG;
5312 cmd.ucode.flags = CALIB_INIT_CFG;
5313 } else
5314 cmd.ucode.once.start = cfg;
5315
5316 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5317 "%s: query calibration results, cfg %x\n", __func__, cfg);
5318
5319 error = iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof cmd, 0);
5320 if (error != 0)
5321 return error;
5322
5323 /* Wait at most two seconds for calibration to complete. */
5324 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE))
5325 error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", 2 * hz);
5326
5327 return error;
5328#undef CALIB_INIT_CFG
5329}
5330
5331/*
5332 * Process a CALIBRATION_RESULT notification sent by the initialization
5333 * firmware on response to a CMD_CALIB_CONFIG command.
5334 */
5335static int
5336iwn5000_rx_calib_result(struct iwn_softc *sc, struct iwn_rx_desc *desc,
5337 struct iwn_rx_data *data)
5338{
5339#define FRAME_SIZE_MASK 0x3fff
5340 struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1);
5341 int len, idx;
5342
5343 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
5344 len = (le32toh(desc->len) & FRAME_SIZE_MASK);
5345
5346 /* Remove length field itself. */
5347 len -= 4;
5348
5349 /*
5350 * Determine the order in which the results will be send to the
5351 * runtime firmware.
5352 */
5353 switch (calib->code) {
5354 case IWN5000_PHY_CALIB_DC:
5355 idx = IWN_CALIB_IDX_DC;
5356 break;
5357 case IWN5000_PHY_CALIB_LO:
5358 idx = IWN_CALIB_IDX_LO;
5359 break;
5360 case IWN5000_PHY_CALIB_TX_IQ:
5361 idx = IWN_CALIB_IDX_TX_IQ;
5362 break;
5363 case IWN5000_PHY_CALIB_TX_IQ_PERIODIC:
5364 idx = IWN_CALIB_IDX_TX_IQ_PERIODIC;
5365 break;
5366 case IWN5000_PHY_CALIB_BASE_BAND:
5367 idx = IWN_CALIB_IDX_BASE_BAND;
5368 break;
5369 default:
5370 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5371 "%s: unknown calibration code=%d\n", __func__, calib->code);
5372 return EINVAL;
5373 }
5374 return iwn5000_save_calib_result(sc, calib, len, idx);
5375#undef FRAME_SIZE_MASK
5376}
5377
5378static int
5379iwn5000_send_wimax_coex(struct iwn_softc *sc)
5380{
5381 struct iwn5000_wimax_coex wimax;
5382
5383#ifdef notyet
5384 if (sc->hw_type == IWN_HW_REV_TYPE_6050) {
5385 /* Enable WiMAX coexistence for combo adapters. */
5386 wimax.flags =
5387 IWN_WIMAX_COEX_ASSOC_WA_UNMASK |
5388 IWN_WIMAX_COEX_UNASSOC_WA_UNMASK |
5389 IWN_WIMAX_COEX_STA_TABLE_VALID |
5390 IWN_WIMAX_COEX_ENABLE;
5391 memcpy(wimax.events, iwn6050_wimax_events,
5392 sizeof iwn6050_wimax_events);
5393 } else
5394#endif
5395 {
5396 /* Disable WiMAX coexistence. */
5397 wimax.flags = 0;
5398 memset(wimax.events, 0, sizeof wimax.events);
5399 }
5400 DPRINTF(sc, IWN_DEBUG_RESET, "%s: Configuring WiMAX coexistence\n",
5401 __func__);
5402 return iwn_cmd(sc, IWN5000_CMD_WIMAX_COEX, &wimax, sizeof wimax, 0);
5403}
5404
5405/*
5406 * This function is called after the runtime firmware notifies us of its
5407 * readiness (called in a process context.)
5408 */
5409static int
5410iwn4965_post_alive(struct iwn_softc *sc)
5411{
5412 int error, qid;
5413
5414 if ((error = iwn_nic_lock(sc)) != 0)
5415 return error;
5416
5417 /* Clear TX scheduler state in SRAM. */
5418 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR);
5419 iwn_mem_set_region_4(sc, sc->sched_base + IWN4965_SCHED_CTX_OFF, 0,
5420 IWN4965_SCHED_CTX_LEN / sizeof (uint32_t));
5421
5422 /* Set physical address of TX scheduler rings (1KB aligned.) */
5423 iwn_prph_write(sc, IWN4965_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10);
5424
5425 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY);
5426
5427 /* Disable chain mode for all our 16 queues. */
5428 iwn_prph_write(sc, IWN4965_SCHED_QCHAIN_SEL, 0);
5429
5430 for (qid = 0; qid < IWN4965_NTXQUEUES; qid++) {
5431 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), 0);
5432 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0);
5433
5434 /* Set scheduler window size. */
5435 iwn_mem_write(sc, sc->sched_base +
5436 IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ);
5437 /* Set scheduler frame limit. */
5438 iwn_mem_write(sc, sc->sched_base +
5439 IWN4965_SCHED_QUEUE_OFFSET(qid) + 4,
5440 IWN_SCHED_LIMIT << 16);
5441 }
5442
5443 /* Enable interrupts for all our 16 queues. */
5444 iwn_prph_write(sc, IWN4965_SCHED_INTR_MASK, 0xffff);
5445 /* Identify TX FIFO rings (0-7). */
5446 iwn_prph_write(sc, IWN4965_SCHED_TXFACT, 0xff);
5447
5448 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */
5449 for (qid = 0; qid < 7; qid++) {
5450 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 4, 5, 6 };
5451 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5452 IWN4965_TXQ_STATUS_ACTIVE | qid2fifo[qid] << 1);
5453 }
5454 iwn_nic_unlock(sc);
5455 return 0;
5456}
5457
5458/*
5459 * This function is called after the initialization or runtime firmware
5460 * notifies us of its readiness (called in a process context.)
5461 */
5462static int
5463iwn5000_post_alive(struct iwn_softc *sc)
5464{
5465 int error, qid;
5466
5467 /* Switch to using ICT interrupt mode. */
5468 iwn5000_ict_reset(sc);
5469
5470 error = iwn_nic_lock(sc);
5471 if (error != 0)
5472 return error;
5473
5474 /* Clear TX scheduler state in SRAM. */
5475 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR);
5476 iwn_mem_set_region_4(sc, sc->sched_base + IWN5000_SCHED_CTX_OFF, 0,
5477 IWN5000_SCHED_CTX_LEN / sizeof (uint32_t));
5478
5479 /* Set physical address of TX scheduler rings (1KB aligned.) */
5480 iwn_prph_write(sc, IWN5000_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10);
5481
5482 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY);
5483
5484 /* Enable chain mode for all queues, except command queue. */
5485 iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffef);
5486 iwn_prph_write(sc, IWN5000_SCHED_AGGR_SEL, 0);
5487
5488 for (qid = 0; qid < IWN5000_NTXQUEUES; qid++) {
5489 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), 0);
5490 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0);
5491
5492 iwn_mem_write(sc, sc->sched_base +
5493 IWN5000_SCHED_QUEUE_OFFSET(qid), 0);
5494 /* Set scheduler window size and frame limit. */
5495 iwn_mem_write(sc, sc->sched_base +
5496 IWN5000_SCHED_QUEUE_OFFSET(qid) + 4,
5497 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ);
5498 }
5499
5500 /* Enable interrupts for all our 20 queues. */
5501 iwn_prph_write(sc, IWN5000_SCHED_INTR_MASK, 0xfffff);
5502 /* Identify TX FIFO rings (0-7). */
5503 iwn_prph_write(sc, IWN5000_SCHED_TXFACT, 0xff);
5504
5505 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */
5506 for (qid = 0; qid < 7; qid++) {
5507 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 7, 5, 6 };
5508 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
5509 IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]);
5510 }
5511 iwn_nic_unlock(sc);
5512
5513 /* Configure WiMAX coexistence for combo adapters. */
5514 error = iwn5000_send_wimax_coex(sc);
5515 if (error != 0) {
5516 device_printf(sc->sc_dev,
5517 "%s: could not configure WiMAX coexistence, error %d\n",
5518 __func__, error);
5519 return error;
5520 }
5521
5522 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) {
5523 /*
5524 * Start calibration by setting and sending the chrystal
5525 * calibration first, this must be done before we are able
5526 * to query the other calibration results.
5527 */
5528 error = iwn5000_chrystal_calib(sc);
5529 if (error != 0) {
5530 device_printf(sc->sc_dev,
5531 "%s: could not set chrystal calibration, "
5532 "error=%d\n", __func__, error);
5533 return error;
5534 }
5535 error = iwn5000_send_calib_results(sc);
5536 if (error != 0) {
5537 device_printf(sc->sc_dev,
5538 "%s: could not send chrystal calibration, "
5539 "error=%d\n", __func__, error);
5540 return error;
5541 }
5542
5543 /*
5544 * Query other calibration results from the initialization
5545 * firmware.
5546 */
5547 error = iwn5000_send_calib_query(sc, 0);
5548 if (error != 0) {
5549 device_printf(sc->sc_dev,
5550 "%s: could not query calibration, error=%d\n",
5551 __func__, error);
5552 return error;
5553 }
5554
5555 /*
5556 * We have the calibration results now, reboot with the
5557 * runtime firmware (call ourselves recursively!)
5558 */
5559 iwn_hw_stop(sc);
5560 error = iwn_hw_init(sc);
5561 } else {
5562 /*
5563 * Send calibration results obtained from the initialization
5564 * firmware to the runtime firmware.
5565 */
5566 error = iwn5000_send_calib_results(sc);
5567
5568 /*
5569 * Tell the runtime firmware to do certain calibration types.
5570 */
5571 if (sc->calib_runtime != 0) {
5572 error = iwn5000_send_calib_query(sc, sc->calib_runtime);
5573 if (error != 0) {
5574 device_printf(sc->sc_dev,
5575 "%s: could not send query calibration, "
5576 "error=%d, cfg=%x\n", __func__, error,
5577 sc->calib_runtime);
5578 }
5579 }
5580 }
5581 return error;
5582}
5583
5584/*
5585 * The firmware boot code is small and is intended to be copied directly into
5586 * the NIC internal memory (no DMA transfer.)
5587 */
5588static int
5589iwn4965_load_bootcode(struct iwn_softc *sc, const uint8_t *ucode, int size)
5590{
5591 int error, ntries;
5592
5593 size /= sizeof (uint32_t);
5594
5595 error = iwn_nic_lock(sc);
5596 if (error != 0)
5597 return error;
5598
5599 /* Copy microcode image into NIC memory. */
5600 iwn_prph_write_region_4(sc, IWN_BSM_SRAM_BASE,
5601 (const uint32_t *)ucode, size);
5602
5603 iwn_prph_write(sc, IWN_BSM_WR_MEM_SRC, 0);
5604 iwn_prph_write(sc, IWN_BSM_WR_MEM_DST, IWN_FW_TEXT_BASE);
5605 iwn_prph_write(sc, IWN_BSM_WR_DWCOUNT, size);
5606
5607 /* Start boot load now. */
5608 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START);
5609
5610 /* Wait for transfer to complete. */
5611 for (ntries = 0; ntries < 1000; ntries++) {
5612 if (!(iwn_prph_read(sc, IWN_BSM_WR_CTRL) &
5613 IWN_BSM_WR_CTRL_START))
5614 break;
5615 DELAY(10);
5616 }
5617 if (ntries == 1000) {
5618 device_printf(sc->sc_dev, "%s: could not load boot firmware\n",
5619 __func__);
5620 iwn_nic_unlock(sc);
5621 return ETIMEDOUT;
5622 }
5623
5624 /* Enable boot after power up. */
5625 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START_EN);
5626
5627 iwn_nic_unlock(sc);
5628 return 0;
5629}
5630
5631static int
5632iwn4965_load_firmware(struct iwn_softc *sc)
5633{
5634 struct iwn_fw_info *fw = &sc->fw;
5635 struct iwn_dma_info *dma = &sc->fw_dma;
5636 int error;
5637
5638 /* Copy initialization sections into pre-allocated DMA-safe memory. */
5639 memcpy(dma->vaddr, fw->init.data, fw->init.datasz);
5640 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
5641 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ,
5642 fw->init.text, fw->init.textsz);
5643 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
5644
5645 /* Tell adapter where to find initialization sections. */
5646 error = iwn_nic_lock(sc);
5647 if (error != 0)
5648 return error;
5649 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4);
5650 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->init.datasz);
5651 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR,
5652 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4);
5653 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, fw->init.textsz);
5654 iwn_nic_unlock(sc);
5655
5656 /* Load firmware boot code. */
5657 error = iwn4965_load_bootcode(sc, fw->boot.text, fw->boot.textsz);
5658 if (error != 0) {
5659 device_printf(sc->sc_dev, "%s: could not load boot firmware\n",
5660 __func__);
5661 return error;
5662 }
5663 /* Now press "execute". */
5664 IWN_WRITE(sc, IWN_RESET, 0);
5665
5666 /* Wait at most one second for first alive notification. */
5667 error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz);
5668 if (error) {
5669 device_printf(sc->sc_dev,
5670 "%s: timeout waiting for adapter to initialize, error %d\n",
5671 __func__, error);
5672 return error;
5673 }
5674
5675 /* Retrieve current temperature for initial TX power calibration. */
5676 sc->rawtemp = sc->ucode_info.temp[3].chan20MHz;
5677 sc->temp = iwn4965_get_temperature(sc);
5678
5679 /* Copy runtime sections into pre-allocated DMA-safe memory. */
5680 memcpy(dma->vaddr, fw->main.data, fw->main.datasz);
5681 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
5682 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ,
5683 fw->main.text, fw->main.textsz);
5684 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
5685
5686 /* Tell adapter where to find runtime sections. */
5687 error = iwn_nic_lock(sc);
5688 if (error != 0)
5689 return error;
5690
5691 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4);
5692 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->main.datasz);
5693 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR,
5694 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4);
5695 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE,
5696 IWN_FW_UPDATED | fw->main.textsz);
5697 iwn_nic_unlock(sc);
5698
5699 return 0;
5700}
5701
5702static int
5703iwn5000_load_firmware_section(struct iwn_softc *sc, uint32_t dst,
5704 const uint8_t *section, int size)
5705{
5706 struct iwn_dma_info *dma = &sc->fw_dma;
5707 int error;
5708
5709 /* Copy firmware section into pre-allocated DMA-safe memory. */
5710 memcpy(dma->vaddr, section, size);
5711 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
5712
5713 error = iwn_nic_lock(sc);
5714 if (error != 0)
5715 return error;
5716
5717 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL),
5718 IWN_FH_TX_CONFIG_DMA_PAUSE);
5719
5720 IWN_WRITE(sc, IWN_FH_SRAM_ADDR(IWN_SRVC_DMACHNL), dst);
5721 IWN_WRITE(sc, IWN_FH_TFBD_CTRL0(IWN_SRVC_DMACHNL),
5722 IWN_LOADDR(dma->paddr));
5723 IWN_WRITE(sc, IWN_FH_TFBD_CTRL1(IWN_SRVC_DMACHNL),
5724 IWN_HIADDR(dma->paddr) << 28 | size);
5725 IWN_WRITE(sc, IWN_FH_TXBUF_STATUS(IWN_SRVC_DMACHNL),
5726 IWN_FH_TXBUF_STATUS_TBNUM(1) |
5727 IWN_FH_TXBUF_STATUS_TBIDX(1) |
5728 IWN_FH_TXBUF_STATUS_TFBD_VALID);
5729
5730 /* Kick Flow Handler to start DMA transfer. */
5731 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL),
5732 IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_CIRQ_HOST_ENDTFD);
5733
5734 iwn_nic_unlock(sc);
5735
5736 /* Wait at most five seconds for FH DMA transfer to complete. */
5737 return msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", 5 * hz);
5738}
5739
5740static int
5741iwn5000_load_firmware(struct iwn_softc *sc)
5742{
5743 struct iwn_fw_part *fw;
5744 int error;
5745
5746 /* Load the initialization firmware on first boot only. */
5747 fw = (sc->sc_flags & IWN_FLAG_CALIB_DONE) ?
5748 &sc->fw.main : &sc->fw.init;
5749
5750 error = iwn5000_load_firmware_section(sc, IWN_FW_TEXT_BASE,
5751 fw->text, fw->textsz);
5752 if (error != 0) {
5753 device_printf(sc->sc_dev,
5754 "%s: could not load firmware %s section, error %d\n",
5755 __func__, ".text", error);
5756 return error;
5757 }
5758 error = iwn5000_load_firmware_section(sc, IWN_FW_DATA_BASE,
5759 fw->data, fw->datasz);
5760 if (error != 0) {
5761 device_printf(sc->sc_dev,
5762 "%s: could not load firmware %s section, error %d\n",
5763 __func__, ".data", error);
5764 return error;
5765 }
5766
5767 /* Now press "execute". */
5768 IWN_WRITE(sc, IWN_RESET, 0);
5769 return 0;
5770}
5771
5772/*
5773 * Extract text and data sections from a legacy firmware image.
5774 */
5775static int
5776iwn_read_firmware_leg(struct iwn_softc *sc, struct iwn_fw_info *fw)
5777{
5778 const uint32_t *ptr;
5779 size_t hdrlen = 24;
5780 uint32_t rev;
5781
5782 ptr = (const uint32_t *)fw->data;
5783 rev = le32toh(*ptr++);
5784
5785 /* Check firmware API version. */
5786 if (IWN_FW_API(rev) <= 1) {
5787 device_printf(sc->sc_dev,
5788 "%s: bad firmware, need API version >=2\n", __func__);
5789 return EINVAL;
5790 }
5791 if (IWN_FW_API(rev) >= 3) {
5792 /* Skip build number (version 2 header). */
5793 hdrlen += 4;
5794 ptr++;
5795 }
5796 if (fw->size < hdrlen) {
5797 device_printf(sc->sc_dev,
5798 "%s: firmware file too short: %zu bytes\n",
5799 __func__, fw->size);
5800 return EINVAL;
5801 }
5802 fw->main.textsz = le32toh(*ptr++);
5803 fw->main.datasz = le32toh(*ptr++);
5804 fw->init.textsz = le32toh(*ptr++);
5805 fw->init.datasz = le32toh(*ptr++);
5806 fw->boot.textsz = le32toh(*ptr++);
5807
5808 /* Check that all firmware sections fit. */
5809 if (fw->size < hdrlen + fw->main.textsz + fw->main.datasz +
5810 fw->init.textsz + fw->init.datasz + fw->boot.textsz) {
5811 device_printf(sc->sc_dev,
5812 "%s: firmware file too short: %zu bytes\n",
5813 __func__, fw->size);
5814 return EINVAL;
5815 }
5816
5817 /* Get pointers to firmware sections. */
5818 fw->main.text = (const uint8_t *)ptr;
5819 fw->main.data = fw->main.text + fw->main.textsz;
5820 fw->init.text = fw->main.data + fw->main.datasz;
5821 fw->init.data = fw->init.text + fw->init.textsz;
5822 fw->boot.text = fw->init.data + fw->init.datasz;
5823
5824 return 0;
5825}
5826
5827/*
5828 * Extract text and data sections from a TLV firmware image.
5829 */
5830static int
5831iwn_read_firmware_tlv(struct iwn_softc *sc, struct iwn_fw_info *fw,
5832 uint16_t alt)
5833{
5834 const struct iwn_fw_tlv_hdr *hdr;
5835 const struct iwn_fw_tlv *tlv;
5836 const uint8_t *ptr, *end;
5837 uint64_t altmask;
5838 uint32_t len;
5839
5840 if (fw->size < sizeof (*hdr)) {
5841 device_printf(sc->sc_dev,
5842 "%s: firmware file too short: %zu bytes\n",
5843 __func__, fw->size);
5844 return EINVAL;
5845 }
5846 hdr = (const struct iwn_fw_tlv_hdr *)fw->data;
5847 if (hdr->signature != htole32(IWN_FW_SIGNATURE)) {
5848 device_printf(sc->sc_dev,
5849 "%s: bad firmware file signature 0x%08x\n",
5850 __func__, le32toh(hdr->signature));
5851 return EINVAL;
5852 }
5853
5854 /*
5855 * Select the closest supported alternative that is less than
5856 * or equal to the specified one.
5857 */
5858 altmask = le64toh(hdr->altmask);
5859 while (alt > 0 && !(altmask & (1ULL << alt)))
5860 alt--; /* Downgrade. */
5861
5862 ptr = (const uint8_t *)(hdr + 1);
5863 end = (const uint8_t *)(fw->data + fw->size);
5864
5865 /* Parse type-length-value fields. */
5866 while (ptr + sizeof (*tlv) <= end) {
5867 tlv = (const struct iwn_fw_tlv *)ptr;
5868 len = le32toh(tlv->len);
5869
5870 ptr += sizeof (*tlv);
5871 if (ptr + len > end) {
5872 device_printf(sc->sc_dev,
5873 "%s: firmware file too short: %zu bytes\n",
5874 __func__, fw->size);
5875 return EINVAL;
5876 }
5877 /* Skip other alternatives. */
5878 if (tlv->alt != 0 && tlv->alt != htole16(alt))
5879 goto next;
5880
5881 switch (le16toh(tlv->type)) {
5882 case IWN_FW_TLV_MAIN_TEXT:
5883 fw->main.text = ptr;
5884 fw->main.textsz = len;
5885 break;
5886 case IWN_FW_TLV_MAIN_DATA:
5887 fw->main.data = ptr;
5888 fw->main.datasz = len;
5889 break;
5890 case IWN_FW_TLV_INIT_TEXT:
5891 fw->init.text = ptr;
5892 fw->init.textsz = len;
5893 break;
5894 case IWN_FW_TLV_INIT_DATA:
5895 fw->init.data = ptr;
5896 fw->init.datasz = len;
5897 break;
5898 case IWN_FW_TLV_BOOT_TEXT:
5899 fw->boot.text = ptr;
5900 fw->boot.textsz = len;
5901 break;
5902 default:
5903 DPRINTF(sc, IWN_DEBUG_RESET,
5904 "%s: TLV type %d not handled\n",
5905 __func__, le16toh(tlv->type));
5906 break;
5907 }
5908next: /* TLV fields are 32-bit aligned. */
5909 ptr += (len + 3) & ~3;
5910 }
5911 return 0;
5912}
5913
5914static int
5915iwn_read_firmware(struct iwn_softc *sc)
5916{
5917 const struct iwn_hal *hal = sc->sc_hal;
5918 struct iwn_fw_info *fw = &sc->fw;
5919 int error;
5920
5921 IWN_UNLOCK(sc);
5922
5923 memset(fw, 0, sizeof (*fw));
5924
5925 /* Read firmware image from filesystem. */
5926 sc->fw_fp = firmware_get(sc->fwname);
5927 if (sc->fw_fp == NULL) {
5928 device_printf(sc->sc_dev,
5929 "%s: could not load firmare image \"%s\"\n", __func__,
5930 sc->fwname);
5931 IWN_LOCK(sc);
5932 return EINVAL;
5933 }
5934 IWN_LOCK(sc);
5935
5936 fw->size = sc->fw_fp->datasize;
5937 fw->data = (const uint8_t *)sc->fw_fp->data;
5938 if (fw->size < sizeof (uint32_t)) {
5939 device_printf(sc->sc_dev,
5940 "%s: firmware file too short: %zu bytes\n",
5941 __func__, fw->size);
5942 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
5943 sc->fw_fp = NULL;
5944 return EINVAL;
5945 }
5946
5947 /* Retrieve text and data sections. */
5948 if (*(const uint32_t *)fw->data != 0) /* Legacy image. */
5949 error = iwn_read_firmware_leg(sc, fw);
5950 else
5951 error = iwn_read_firmware_tlv(sc, fw, 1);
5952 if (error != 0) {
5953 device_printf(sc->sc_dev,
5954 "%s: could not read firmware sections\n", __func__);
5955 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
5956 sc->fw_fp = NULL;
5957 return error;
5958 }
5959
5960 /* Make sure text and data sections fit in hardware memory. */
5961 if (fw->main.textsz > hal->fw_text_maxsz ||
5962 fw->main.datasz > hal->fw_data_maxsz ||
5963 fw->init.textsz > hal->fw_text_maxsz ||
5964 fw->init.datasz > hal->fw_data_maxsz ||
5965 fw->boot.textsz > IWN_FW_BOOT_TEXT_MAXSZ ||
5966 (fw->boot.textsz & 3) != 0) {
5967 device_printf(sc->sc_dev,
5968 "%s: firmware sections too large\n", __func__);
5969 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
5970 sc->fw_fp = NULL;
5971 return EINVAL;
5972 }
5973
5974 /* We can proceed with loading the firmware. */
5975 return 0;
5976}
5977
5978static int
5979iwn_clock_wait(struct iwn_softc *sc)
5980{
5981 int ntries;
5982
5983 /* Set "initialization complete" bit. */
5984 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE);
5985
5986 /* Wait for clock stabilization. */
5987 for (ntries = 0; ntries < 2500; ntries++) {
5988 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_MAC_CLOCK_READY)
5989 return 0;
5990 DELAY(10);
5991 }
5992 device_printf(sc->sc_dev,
5993 "%s: timeout waiting for clock stabilization\n", __func__);
5994 return ETIMEDOUT;
5995}
5996
5997static int
5998iwn_apm_init(struct iwn_softc *sc)
5999{
6000 uint32_t tmp;
6001 int error;
6002
6003 /* Disable L0s exit timer (NMI bug workaround.) */
6004 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_DIS_L0S_TIMER);
6005 /* Don't wait for ICH L0s (ICH bug workaround.) */
6006 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_L1A_NO_L0S_RX);
6007
6008 /* Set FH wait threshold to max (HW bug under stress workaround.) */
6009 IWN_SETBITS(sc, IWN_DBG_HPET_MEM, 0xffff0000);
6010
6011 /* Enable HAP INTA to move adapter from L1a to L0s. */
6012 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_HAP_WAKE_L1A);
6013
6014 /* Retrieve PCIe Active State Power Management (ASPM). */
6015 tmp = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1);
6016 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */
6017 if (tmp & 0x02) /* L1 Entry enabled. */
6018 IWN_SETBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA);
6019 else
6020 IWN_CLRBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA);
6021
6022 if (sc->hw_type != IWN_HW_REV_TYPE_4965 &&
6023 sc->hw_type <= IWN_HW_REV_TYPE_1000)
6024 IWN_SETBITS(sc, IWN_ANA_PLL, IWN_ANA_PLL_INIT);
6025
6026 /* Wait for clock stabilization before accessing prph. */
6027 error = iwn_clock_wait(sc);
6028 if (error != 0)
6029 return error;
6030
6031 error = iwn_nic_lock(sc);
6032 if (error != 0)
6033 return error;
6034
6035 if (sc->hw_type == IWN_HW_REV_TYPE_4965) {
6036 /* Enable DMA and BSM (Bootstrap State Machine.) */
6037 iwn_prph_write(sc, IWN_APMG_CLK_EN,
6038 IWN_APMG_CLK_CTRL_DMA_CLK_RQT |
6039 IWN_APMG_CLK_CTRL_BSM_CLK_RQT);
6040 } else {
6041 /* Enable DMA. */
6042 iwn_prph_write(sc, IWN_APMG_CLK_EN,
6043 IWN_APMG_CLK_CTRL_DMA_CLK_RQT);
6044 }
6045 DELAY(20);
6046
6047 /* Disable L1-Active. */
6048 iwn_prph_setbits(sc, IWN_APMG_PCI_STT, IWN_APMG_PCI_STT_L1A_DIS);
6049 iwn_nic_unlock(sc);
6050
6051 return 0;
6052}
6053
6054static void
6055iwn_apm_stop_master(struct iwn_softc *sc)
6056{
6057 int ntries;
6058
6059 /* Stop busmaster DMA activity. */
6060 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_STOP_MASTER);
6061 for (ntries = 0; ntries < 100; ntries++) {
6062 if (IWN_READ(sc, IWN_RESET) & IWN_RESET_MASTER_DISABLED)
6063 return;
6064 DELAY(10);
6065 }
6066 device_printf(sc->sc_dev, "%s: timeout waiting for master\n",
6067 __func__);
6068}
6069
6070static void
6071iwn_apm_stop(struct iwn_softc *sc)
6072{
6073 iwn_apm_stop_master(sc);
6074
6075 /* Reset the entire device. */
6076 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_SW);
6077 DELAY(10);
6078 /* Clear "initialization complete" bit. */
6079 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE);
6080}
6081
6082static int
6083iwn4965_nic_config(struct iwn_softc *sc)
6084{
6085 if (IWN_RFCFG_TYPE(sc->rfcfg) == 1) {
6086 /*
6087 * I don't believe this to be correct but this is what the
6088 * vendor driver is doing. Probably the bits should not be
6089 * shifted in IWN_RFCFG_*.
6090 */
6091 IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
6092 IWN_RFCFG_TYPE(sc->rfcfg) |
6093 IWN_RFCFG_STEP(sc->rfcfg) |
6094 IWN_RFCFG_DASH(sc->rfcfg));
6095 }
6096 IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
6097 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI);
6098 return 0;
6099}
6100
6101static int
6102iwn5000_nic_config(struct iwn_softc *sc)
6103{
6104 uint32_t tmp;
6105 int error;
6106
6107 if (IWN_RFCFG_TYPE(sc->rfcfg) < 3) {
6108 IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
6109 IWN_RFCFG_TYPE(sc->rfcfg) |
6110 IWN_RFCFG_STEP(sc->rfcfg) |
6111 IWN_RFCFG_DASH(sc->rfcfg));
6112 }
6113 IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
6114 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI);
6115
6116 error = iwn_nic_lock(sc);
6117 if (error != 0)
6118 return error;
6119 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_EARLY_PWROFF_DIS);
6120
6121 if (sc->hw_type == IWN_HW_REV_TYPE_1000) {
6122 /*
6123 * Select first Switching Voltage Regulator (1.32V) to
6124 * solve a stability issue related to noisy DC2DC line
6125 * in the silicon of 1000 Series.
6126 */
6127 tmp = iwn_prph_read(sc, IWN_APMG_DIGITAL_SVR);
6128 tmp &= ~IWN_APMG_DIGITAL_SVR_VOLTAGE_MASK;
6129 tmp |= IWN_APMG_DIGITAL_SVR_VOLTAGE_1_32;
6130 iwn_prph_write(sc, IWN_APMG_DIGITAL_SVR, tmp);
6131 }
6132 iwn_nic_unlock(sc);
6133
6134 if (sc->sc_flags & IWN_FLAG_INTERNAL_PA) {
6135 /* Use internal power amplifier only. */
6136 IWN_WRITE(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_2X2_IPA);
6137 }
6138 if (sc->hw_type == IWN_HW_REV_TYPE_6050 && sc->calib_ver >= 6) {
6139 /* Indicate that ROM calibration version is >=6. */
6140 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_CALIB_VER6);
6141 }
6142 return 0;
6143}
6144
6145/*
6146 * Take NIC ownership over Intel Active Management Technology (AMT).
6147 */
6148static int
6149iwn_hw_prepare(struct iwn_softc *sc)
6150{
6151 int ntries;
6152
6153 /* Check if hardware is ready. */
6154 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY);
6155 for (ntries = 0; ntries < 5; ntries++) {
6156 if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
6157 IWN_HW_IF_CONFIG_NIC_READY)
6158 return 0;
6159 DELAY(10);
6160 }
6161
6162 /* Hardware not ready, force into ready state. */
6163 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_PREPARE);
6164 for (ntries = 0; ntries < 15000; ntries++) {
6165 if (!(IWN_READ(sc, IWN_HW_IF_CONFIG) &
6166 IWN_HW_IF_CONFIG_PREPARE_DONE))
6167 break;
6168 DELAY(10);
6169 }
6170 if (ntries == 15000)
6171 return ETIMEDOUT;
6172
6173 /* Hardware should be ready now. */
6174 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY);
6175 for (ntries = 0; ntries < 5; ntries++) {
6176 if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
6177 IWN_HW_IF_CONFIG_NIC_READY)
6178 return 0;
6179 DELAY(10);
6180 }
6181 return ETIMEDOUT;
6182}
6183
6184static int
6185iwn_hw_init(struct iwn_softc *sc)
6186{
6187 const struct iwn_hal *hal = sc->sc_hal;
6188 int error, chnl, qid;
6189
6190 /* Clear pending interrupts. */
6191 IWN_WRITE(sc, IWN_INT, 0xffffffff);
6192
6193 error = iwn_apm_init(sc);
6194 if (error != 0) {
6195 device_printf(sc->sc_dev,
6196 "%s: could not power ON adapter, error %d\n",
6197 __func__, error);
6198 return error;
6199 }
6200
6201 /* Select VMAIN power source. */
6202 error = iwn_nic_lock(sc);
6203 if (error != 0)
6204 return error;
6205 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_PWR_SRC_MASK);
6206 iwn_nic_unlock(sc);
6207
6208 /* Perform adapter-specific initialization. */
6209 error = hal->nic_config(sc);
6210 if (error != 0)
6211 return error;
6212
6213 /* Initialize RX ring. */
6214 error = iwn_nic_lock(sc);
6215 if (error != 0)
6216 return error;
6217 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0);
6218 IWN_WRITE(sc, IWN_FH_RX_WPTR, 0);
6219 /* Set physical address of RX ring (256-byte aligned.) */
6220 IWN_WRITE(sc, IWN_FH_RX_BASE, sc->rxq.desc_dma.paddr >> 8);
6221 /* Set physical address of RX status (16-byte aligned.) */
6222 IWN_WRITE(sc, IWN_FH_STATUS_WPTR, sc->rxq.stat_dma.paddr >> 4);
6223 /* Enable RX. */
6224 IWN_WRITE(sc, IWN_FH_RX_CONFIG,
6225 IWN_FH_RX_CONFIG_ENA |
6226 IWN_FH_RX_CONFIG_IGN_RXF_EMPTY | /* HW bug workaround */
6227 IWN_FH_RX_CONFIG_IRQ_DST_HOST |
6228 IWN_FH_RX_CONFIG_SINGLE_FRAME |
6229 IWN_FH_RX_CONFIG_RB_TIMEOUT(0) |
6230 IWN_FH_RX_CONFIG_NRBD(IWN_RX_RING_COUNT_LOG));
6231 iwn_nic_unlock(sc);
6232 IWN_WRITE(sc, IWN_FH_RX_WPTR, (IWN_RX_RING_COUNT - 1) & ~7);
6233
6234 error = iwn_nic_lock(sc);
6235 if (error != 0)
6236 return error;
6237
6238 /* Initialize TX scheduler. */
6239 iwn_prph_write(sc, hal->sched_txfact_addr, 0);
6240
6241 /* Set physical address of "keep warm" page (16-byte aligned.) */
6242 IWN_WRITE(sc, IWN_FH_KW_ADDR, sc->kw_dma.paddr >> 4);
6243
6244 /* Initialize TX rings. */
6245 for (qid = 0; qid < hal->ntxqs; qid++) {
6246 struct iwn_tx_ring *txq = &sc->txq[qid];
6247
6248 /* Set physical address of TX ring (256-byte aligned.) */
6249 IWN_WRITE(sc, IWN_FH_CBBC_QUEUE(qid),
6250 txq->desc_dma.paddr >> 8);
6251 }
6252 iwn_nic_unlock(sc);
6253
6254 /* Enable DMA channels. */
6255 for (chnl = 0; chnl < hal->ndmachnls; chnl++) {
6256 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl),
6257 IWN_FH_TX_CONFIG_DMA_ENA |
6258 IWN_FH_TX_CONFIG_DMA_CREDIT_ENA);
6259 }
6260
6261 /* Clear "radio off" and "commands blocked" bits. */
6262 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
6263 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CMD_BLOCKED);
6264
6265 /* Clear pending interrupts. */
6266 IWN_WRITE(sc, IWN_INT, 0xffffffff);
6267 /* Enable interrupt coalescing. */
6268 IWN_WRITE(sc, IWN_INT_COALESCING, 512 / 8);
6269 /* Enable interrupts. */
6270 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
6271
6272 /* _Really_ make sure "radio off" bit is cleared! */
6273 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
6274 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
6275
6276 error = hal->load_firmware(sc);
6277 if (error != 0) {
6278 device_printf(sc->sc_dev,
6279 "%s: could not load firmware, error %d\n",
6280 __func__, error);
6281 return error;
6282 }
6283 /* Wait at most one second for firmware alive notification. */
6284 error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz);
6285 if (error != 0) {
6286 device_printf(sc->sc_dev,
6287 "%s: timeout waiting for adapter to initialize, error %d\n",
6288 __func__, error);
6289 return error;
6290 }
6291 /* Do post-firmware initialization. */
6292 return hal->post_alive(sc);
6293}
6294
6295static void
6296iwn_hw_stop(struct iwn_softc *sc)
6297{
6298 const struct iwn_hal *hal = sc->sc_hal;
6299 int chnl, qid, ntries;
6300
6301 IWN_WRITE(sc, IWN_RESET, IWN_RESET_NEVO);
6302
6303 /* Disable interrupts. */
6304 IWN_WRITE(sc, IWN_INT_MASK, 0);
6305 IWN_WRITE(sc, IWN_INT, 0xffffffff);
6306 IWN_WRITE(sc, IWN_FH_INT, 0xffffffff);
6307 sc->sc_flags &= ~IWN_FLAG_USE_ICT;
6308
6309 /* Make sure we no longer hold the NIC lock. */
6310 iwn_nic_unlock(sc);
6311
6312 /* Stop TX scheduler. */
6313 iwn_prph_write(sc, hal->sched_txfact_addr, 0);
6314
6315 /* Stop all DMA channels. */
6316 if (iwn_nic_lock(sc) == 0) {
6317 for (chnl = 0; chnl < hal->ndmachnls; chnl++) {
6318 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 0);
6319 for (ntries = 0; ntries < 200; ntries++) {
6320 if (IWN_READ(sc, IWN_FH_TX_STATUS) &
6321 IWN_FH_TX_STATUS_IDLE(chnl))
6322 break;
6323 DELAY(10);
6324 }
6325 }
6326 iwn_nic_unlock(sc);
6327 }
6328
6329 /* Stop RX ring. */
6330 iwn_reset_rx_ring(sc, &sc->rxq);
6331
6332 /* Reset all TX rings. */
6333 for (qid = 0; qid < hal->ntxqs; qid++)
6334 iwn_reset_tx_ring(sc, &sc->txq[qid]);
6335
6336 if (iwn_nic_lock(sc) == 0) {
6337 iwn_prph_write(sc, IWN_APMG_CLK_DIS,
6338 IWN_APMG_CLK_CTRL_DMA_CLK_RQT);
6339 iwn_nic_unlock(sc);
6340 }
6341 DELAY(5);
6342
6343 /* Power OFF adapter. */
6344 iwn_apm_stop(sc);
6345}
6346
6347static void
6348iwn_init_locked(struct iwn_softc *sc)
6349{
6350 struct ifnet *ifp = sc->sc_ifp;
6351 int error;
6352
6353 IWN_LOCK_ASSERT(sc);
6354
6355 error = iwn_hw_prepare(sc);
6356 if (error != 0) {
6357 device_printf(sc->sc_dev, "%s: hardware not ready, eror %d\n",
6358 __func__, error);
6359 goto fail;
6360 }
6361
6362 /* Initialize interrupt mask to default value. */
6363 sc->int_mask = IWN_INT_MASK_DEF;
6364 sc->sc_flags &= ~IWN_FLAG_USE_ICT;
6365
6366 /* Check that the radio is not disabled by hardware switch. */
6367 if (!(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) {
6368 device_printf(sc->sc_dev,
6369 "radio is disabled by hardware switch\n");
6370
6371 /* Enable interrupts to get RF toggle notifications. */
6372 IWN_WRITE(sc, IWN_INT, 0xffffffff);
6373 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
6374 return;
6375 }
6376
6377 /* Read firmware images from the filesystem. */
6378 error = iwn_read_firmware(sc);
6379 if (error != 0) {
6380 device_printf(sc->sc_dev,
6381 "%s: could not read firmware, error %d\n",
6382 __func__, error);
6383 goto fail;
6384 }
6385
6386 /* Initialize hardware and upload firmware. */
6387 error = iwn_hw_init(sc);
6388 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
6389 sc->fw_fp = NULL;
6390 if (error != 0) {
6391 device_printf(sc->sc_dev,
6392 "%s: could not initialize hardware, error %d\n",
6393 __func__, error);
6394 goto fail;
6395 }
6396
6397 /* Configure adapter now that it is ready. */
6398 error = iwn_config(sc);
6399 if (error != 0) {
6400 device_printf(sc->sc_dev,
6401 "%s: could not configure device, error %d\n",
6402 __func__, error);
6403 goto fail;
6404 }
6405
6406 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
6407 ifp->if_drv_flags |= IFF_DRV_RUNNING;
6408
4971
4972 /* Link LED always on while associated. */
4973 iwn_set_led(sc, IWN_LED_LINK, 0, 1);
4974
4975 return 0;
4976#undef MS
4977}
4978
4979#if 0 /* HT */
4980/*
4981 * This function is called by upper layer when an ADDBA request is received
4982 * from another STA and before the ADDBA response is sent.
4983 */
4984static int
4985iwn_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
4986 uint8_t tid)
4987{
4988 struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
4989 struct iwn_softc *sc = ic->ic_softc;
4990 struct iwn_node *wn = (void *)ni;
4991 struct iwn_node_info node;
4992
4993 memset(&node, 0, sizeof node);
4994 node.id = wn->id;
4995 node.control = IWN_NODE_UPDATE;
4996 node.flags = IWN_FLAG_SET_ADDBA;
4997 node.addba_tid = tid;
4998 node.addba_ssn = htole16(ba->ba_winstart);
4999 DPRINTF(sc, IWN_DEBUG_RECV, "ADDBA RA=%d TID=%d SSN=%d\n",
5000 wn->id, tid, ba->ba_winstart));
5001 return sc->sc_hal->add_node(sc, &node, 1);
5002}
5003
5004/*
5005 * This function is called by upper layer on teardown of an HT-immediate
5006 * Block Ack agreement (eg. uppon receipt of a DELBA frame.)
5007 */
5008static void
5009iwn_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
5010 uint8_t tid)
5011{
5012 struct iwn_softc *sc = ic->ic_softc;
5013 struct iwn_node *wn = (void *)ni;
5014 struct iwn_node_info node;
5015
5016 memset(&node, 0, sizeof node);
5017 node.id = wn->id;
5018 node.control = IWN_NODE_UPDATE;
5019 node.flags = IWN_FLAG_SET_DELBA;
5020 node.delba_tid = tid;
5021 DPRINTF(sc, IWN_DEBUG_RECV, "DELBA RA=%d TID=%d\n", wn->id, tid);
5022 (void)sc->sc_hal->add_node(sc, &node, 1);
5023}
5024
5025/*
5026 * This function is called by upper layer when an ADDBA response is received
5027 * from another STA.
5028 */
5029static int
5030iwn_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
5031 uint8_t tid)
5032{
5033 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
5034 struct iwn_softc *sc = ic->ic_softc;
5035 const struct iwn_hal *hal = sc->sc_hal;
5036 struct iwn_node *wn = (void *)ni;
5037 struct iwn_node_info node;
5038 int error;
5039
5040 /* Enable TX for the specified RA/TID. */
5041 wn->disable_tid &= ~(1 << tid);
5042 memset(&node, 0, sizeof node);
5043 node.id = wn->id;
5044 node.control = IWN_NODE_UPDATE;
5045 node.flags = IWN_FLAG_SET_DISABLE_TID;
5046 node.disable_tid = htole16(wn->disable_tid);
5047 error = hal->add_node(sc, &node, 1);
5048 if (error != 0)
5049 return error;
5050
5051 if ((error = iwn_nic_lock(sc)) != 0)
5052 return error;
5053 hal->ampdu_tx_start(sc, ni, tid, ba->ba_winstart);
5054 iwn_nic_unlock(sc);
5055 return 0;
5056}
5057
5058static void
5059iwn_ampdu_tx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
5060 uint8_t tid)
5061{
5062 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
5063 struct iwn_softc *sc = ic->ic_softc;
5064 int error;
5065
5066 error = iwn_nic_lock(sc);
5067 if (error != 0)
5068 return;
5069 sc->sc_hal->ampdu_tx_stop(sc, tid, ba->ba_winstart);
5070 iwn_nic_unlock(sc);
5071}
5072
5073static void
5074iwn4965_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni,
5075 uint8_t tid, uint16_t ssn)
5076{
5077 struct iwn_node *wn = (void *)ni;
5078 int qid = 7 + tid;
5079
5080 /* Stop TX scheduler while we're changing its configuration. */
5081 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5082 IWN4965_TXQ_STATUS_CHGACT);
5083
5084 /* Assign RA/TID translation to the queue. */
5085 iwn_mem_write_2(sc, sc->sched_base + IWN4965_SCHED_TRANS_TBL(qid),
5086 wn->id << 4 | tid);
5087
5088 /* Enable chain-building mode for the queue. */
5089 iwn_prph_setbits(sc, IWN4965_SCHED_QCHAIN_SEL, 1 << qid);
5090
5091 /* Set starting sequence number from the ADDBA request. */
5092 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
5093 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn);
5094
5095 /* Set scheduler window size. */
5096 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid),
5097 IWN_SCHED_WINSZ);
5098 /* Set scheduler frame limit. */
5099 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid) + 4,
5100 IWN_SCHED_LIMIT << 16);
5101
5102 /* Enable interrupts for the queue. */
5103 iwn_prph_setbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid);
5104
5105 /* Mark the queue as active. */
5106 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5107 IWN4965_TXQ_STATUS_ACTIVE | IWN4965_TXQ_STATUS_AGGR_ENA |
5108 iwn_tid2fifo[tid] << 1);
5109}
5110
5111static void
5112iwn4965_ampdu_tx_stop(struct iwn_softc *sc, uint8_t tid, uint16_t ssn)
5113{
5114 int qid = 7 + tid;
5115
5116 /* Stop TX scheduler while we're changing its configuration. */
5117 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5118 IWN4965_TXQ_STATUS_CHGACT);
5119
5120 /* Set starting sequence number from the ADDBA request. */
5121 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
5122 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn);
5123
5124 /* Disable interrupts for the queue. */
5125 iwn_prph_clrbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid);
5126
5127 /* Mark the queue as inactive. */
5128 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5129 IWN4965_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid] << 1);
5130}
5131
5132static void
5133iwn5000_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni,
5134 uint8_t tid, uint16_t ssn)
5135{
5136 struct iwn_node *wn = (void *)ni;
5137 int qid = 10 + tid;
5138
5139 /* Stop TX scheduler while we're changing its configuration. */
5140 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
5141 IWN5000_TXQ_STATUS_CHGACT);
5142
5143 /* Assign RA/TID translation to the queue. */
5144 iwn_mem_write_2(sc, sc->sched_base + IWN5000_SCHED_TRANS_TBL(qid),
5145 wn->id << 4 | tid);
5146
5147 /* Enable chain-building mode for the queue. */
5148 iwn_prph_setbits(sc, IWN5000_SCHED_QCHAIN_SEL, 1 << qid);
5149
5150 /* Enable aggregation for the queue. */
5151 iwn_prph_setbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid);
5152
5153 /* Set starting sequence number from the ADDBA request. */
5154 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
5155 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn);
5156
5157 /* Set scheduler window size and frame limit. */
5158 iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid) + 4,
5159 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ);
5160
5161 /* Enable interrupts for the queue. */
5162 iwn_prph_setbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid);
5163
5164 /* Mark the queue as active. */
5165 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
5166 IWN5000_TXQ_STATUS_ACTIVE | iwn_tid2fifo[tid]);
5167}
5168
5169static void
5170iwn5000_ampdu_tx_stop(struct iwn_softc *sc, uint8_t tid, uint16_t ssn)
5171{
5172 int qid = 10 + tid;
5173
5174 /* Stop TX scheduler while we're changing its configuration. */
5175 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
5176 IWN5000_TXQ_STATUS_CHGACT);
5177
5178 /* Disable aggregation for the queue. */
5179 iwn_prph_clrbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid);
5180
5181 /* Set starting sequence number from the ADDBA request. */
5182 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
5183 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn);
5184
5185 /* Disable interrupts for the queue. */
5186 iwn_prph_clrbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid);
5187
5188 /* Mark the queue as inactive. */
5189 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
5190 IWN5000_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid]);
5191}
5192#endif
5193
5194/*
5195 * Send calibration results to the runtime firmware. These results were
5196 * obtained on first boot from the initialization firmware, or by reading
5197 * the EEPROM for crystal calibration.
5198 */
5199static int
5200iwn5000_send_calib_results(struct iwn_softc *sc)
5201{
5202 struct iwn_calib_info *calib_result;
5203 int idx, error;
5204
5205 for (idx = 0; idx < IWN_CALIB_NUM; idx++) {
5206 calib_result = &sc->calib_results[idx];
5207
5208 /* No support for this type of calibration. */
5209 if ((sc->calib_init & (1 << idx)) == 0)
5210 continue;
5211
5212 /* No calibration result available. */
5213 if (calib_result->buf == NULL)
5214 continue;
5215
5216 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5217 "%s: send calibration result idx=%d, len=%d\n",
5218 __func__, idx, calib_result->len);
5219
5220 error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, calib_result->buf,
5221 calib_result->len, 0);
5222 if (error != 0) {
5223 device_printf(sc->sc_dev,
5224 "%s: could not send calibration result "
5225 "idx=%d, error=%d\n",
5226 __func__, idx, error);
5227 return error;
5228 }
5229 }
5230 return 0;
5231}
5232
5233/*
5234 * Save calibration result at the given index. The index determines
5235 * in which order the results are sent to the runtime firmware.
5236 */
5237static int
5238iwn5000_save_calib_result(struct iwn_softc *sc, struct iwn_phy_calib *calib,
5239 int len, int idx)
5240{
5241 struct iwn_calib_info *calib_result = &sc->calib_results[idx];
5242
5243 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5244 "%s: saving calibration result code=%d, idx=%d, len=%d\n",
5245 __func__, calib->code, idx, len);
5246
5247 if (calib_result->buf != NULL)
5248 free(calib_result->buf, M_DEVBUF);
5249
5250 calib_result->buf = malloc(len, M_DEVBUF, M_NOWAIT);
5251 if (calib_result->buf == NULL) {
5252 device_printf(sc->sc_dev,
5253 "%s: not enough memory for calibration result "
5254 "code=%d, len=%d\n", __func__, calib->code, len);
5255 return ENOMEM;
5256 }
5257
5258 calib_result->len = len;
5259 memcpy(calib_result->buf, calib, len);
5260 return 0;
5261}
5262
5263static void
5264iwn5000_free_calib_results(struct iwn_softc *sc)
5265{
5266 struct iwn_calib_info *calib_result;
5267 int idx;
5268
5269 for (idx = 0; idx < IWN_CALIB_NUM; idx++) {
5270 calib_result = &sc->calib_results[idx];
5271
5272 if (calib_result->buf != NULL)
5273 free(calib_result->buf, M_DEVBUF);
5274
5275 calib_result->buf = NULL;
5276 calib_result->len = 0;
5277 }
5278}
5279
5280/*
5281 * Obtain the crystal calibration result from the EEPROM.
5282 */
5283static int
5284iwn5000_chrystal_calib(struct iwn_softc *sc)
5285{
5286 struct iwn5000_phy_calib_crystal cmd;
5287 uint32_t base, crystal;
5288 uint16_t val;
5289
5290 /* Read crystal calibration. */
5291 iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2);
5292 base = le16toh(val);
5293 iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL, &crystal,
5294 sizeof(uint32_t));
5295 DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: crystal calibration=0x%08x\n",
5296 __func__, le32toh(crystal));
5297
5298 memset(&cmd, 0, sizeof cmd);
5299 cmd.code = IWN5000_PHY_CALIB_CRYSTAL;
5300 cmd.ngroups = 1;
5301 cmd.isvalid = 1;
5302 cmd.cap_pin[0] = le32toh(crystal) & 0xff;
5303 cmd.cap_pin[1] = (le32toh(crystal) >> 16) & 0xff;
5304
5305 return iwn5000_save_calib_result(sc, (struct iwn_phy_calib *)&cmd,
5306 sizeof cmd, IWN_CALIB_IDX_XTAL);
5307}
5308
5309/*
5310 * Query calibration results from the initialization firmware. We do this
5311 * only once at first boot.
5312 */
5313static int
5314iwn5000_send_calib_query(struct iwn_softc *sc, uint32_t cfg)
5315{
5316#define CALIB_INIT_CFG 0xffffffff;
5317 struct iwn5000_calib_config cmd;
5318 int error;
5319
5320 memset(&cmd, 0, sizeof cmd);
5321 cmd.ucode.once.enable = CALIB_INIT_CFG;
5322 if (cfg == 0) {
5323 cmd.ucode.once.start = CALIB_INIT_CFG;
5324 cmd.ucode.once.send = CALIB_INIT_CFG;
5325 cmd.ucode.flags = CALIB_INIT_CFG;
5326 } else
5327 cmd.ucode.once.start = cfg;
5328
5329 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5330 "%s: query calibration results, cfg %x\n", __func__, cfg);
5331
5332 error = iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof cmd, 0);
5333 if (error != 0)
5334 return error;
5335
5336 /* Wait at most two seconds for calibration to complete. */
5337 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE))
5338 error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", 2 * hz);
5339
5340 return error;
5341#undef CALIB_INIT_CFG
5342}
5343
5344/*
5345 * Process a CALIBRATION_RESULT notification sent by the initialization
5346 * firmware on response to a CMD_CALIB_CONFIG command.
5347 */
5348static int
5349iwn5000_rx_calib_result(struct iwn_softc *sc, struct iwn_rx_desc *desc,
5350 struct iwn_rx_data *data)
5351{
5352#define FRAME_SIZE_MASK 0x3fff
5353 struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1);
5354 int len, idx;
5355
5356 bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
5357 len = (le32toh(desc->len) & FRAME_SIZE_MASK);
5358
5359 /* Remove length field itself. */
5360 len -= 4;
5361
5362 /*
5363 * Determine the order in which the results will be send to the
5364 * runtime firmware.
5365 */
5366 switch (calib->code) {
5367 case IWN5000_PHY_CALIB_DC:
5368 idx = IWN_CALIB_IDX_DC;
5369 break;
5370 case IWN5000_PHY_CALIB_LO:
5371 idx = IWN_CALIB_IDX_LO;
5372 break;
5373 case IWN5000_PHY_CALIB_TX_IQ:
5374 idx = IWN_CALIB_IDX_TX_IQ;
5375 break;
5376 case IWN5000_PHY_CALIB_TX_IQ_PERIODIC:
5377 idx = IWN_CALIB_IDX_TX_IQ_PERIODIC;
5378 break;
5379 case IWN5000_PHY_CALIB_BASE_BAND:
5380 idx = IWN_CALIB_IDX_BASE_BAND;
5381 break;
5382 default:
5383 DPRINTF(sc, IWN_DEBUG_CALIBRATE,
5384 "%s: unknown calibration code=%d\n", __func__, calib->code);
5385 return EINVAL;
5386 }
5387 return iwn5000_save_calib_result(sc, calib, len, idx);
5388#undef FRAME_SIZE_MASK
5389}
5390
5391static int
5392iwn5000_send_wimax_coex(struct iwn_softc *sc)
5393{
5394 struct iwn5000_wimax_coex wimax;
5395
5396#ifdef notyet
5397 if (sc->hw_type == IWN_HW_REV_TYPE_6050) {
5398 /* Enable WiMAX coexistence for combo adapters. */
5399 wimax.flags =
5400 IWN_WIMAX_COEX_ASSOC_WA_UNMASK |
5401 IWN_WIMAX_COEX_UNASSOC_WA_UNMASK |
5402 IWN_WIMAX_COEX_STA_TABLE_VALID |
5403 IWN_WIMAX_COEX_ENABLE;
5404 memcpy(wimax.events, iwn6050_wimax_events,
5405 sizeof iwn6050_wimax_events);
5406 } else
5407#endif
5408 {
5409 /* Disable WiMAX coexistence. */
5410 wimax.flags = 0;
5411 memset(wimax.events, 0, sizeof wimax.events);
5412 }
5413 DPRINTF(sc, IWN_DEBUG_RESET, "%s: Configuring WiMAX coexistence\n",
5414 __func__);
5415 return iwn_cmd(sc, IWN5000_CMD_WIMAX_COEX, &wimax, sizeof wimax, 0);
5416}
5417
5418/*
5419 * This function is called after the runtime firmware notifies us of its
5420 * readiness (called in a process context.)
5421 */
5422static int
5423iwn4965_post_alive(struct iwn_softc *sc)
5424{
5425 int error, qid;
5426
5427 if ((error = iwn_nic_lock(sc)) != 0)
5428 return error;
5429
5430 /* Clear TX scheduler state in SRAM. */
5431 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR);
5432 iwn_mem_set_region_4(sc, sc->sched_base + IWN4965_SCHED_CTX_OFF, 0,
5433 IWN4965_SCHED_CTX_LEN / sizeof (uint32_t));
5434
5435 /* Set physical address of TX scheduler rings (1KB aligned.) */
5436 iwn_prph_write(sc, IWN4965_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10);
5437
5438 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY);
5439
5440 /* Disable chain mode for all our 16 queues. */
5441 iwn_prph_write(sc, IWN4965_SCHED_QCHAIN_SEL, 0);
5442
5443 for (qid = 0; qid < IWN4965_NTXQUEUES; qid++) {
5444 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), 0);
5445 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0);
5446
5447 /* Set scheduler window size. */
5448 iwn_mem_write(sc, sc->sched_base +
5449 IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ);
5450 /* Set scheduler frame limit. */
5451 iwn_mem_write(sc, sc->sched_base +
5452 IWN4965_SCHED_QUEUE_OFFSET(qid) + 4,
5453 IWN_SCHED_LIMIT << 16);
5454 }
5455
5456 /* Enable interrupts for all our 16 queues. */
5457 iwn_prph_write(sc, IWN4965_SCHED_INTR_MASK, 0xffff);
5458 /* Identify TX FIFO rings (0-7). */
5459 iwn_prph_write(sc, IWN4965_SCHED_TXFACT, 0xff);
5460
5461 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */
5462 for (qid = 0; qid < 7; qid++) {
5463 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 4, 5, 6 };
5464 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5465 IWN4965_TXQ_STATUS_ACTIVE | qid2fifo[qid] << 1);
5466 }
5467 iwn_nic_unlock(sc);
5468 return 0;
5469}
5470
5471/*
5472 * This function is called after the initialization or runtime firmware
5473 * notifies us of its readiness (called in a process context.)
5474 */
5475static int
5476iwn5000_post_alive(struct iwn_softc *sc)
5477{
5478 int error, qid;
5479
5480 /* Switch to using ICT interrupt mode. */
5481 iwn5000_ict_reset(sc);
5482
5483 error = iwn_nic_lock(sc);
5484 if (error != 0)
5485 return error;
5486
5487 /* Clear TX scheduler state in SRAM. */
5488 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR);
5489 iwn_mem_set_region_4(sc, sc->sched_base + IWN5000_SCHED_CTX_OFF, 0,
5490 IWN5000_SCHED_CTX_LEN / sizeof (uint32_t));
5491
5492 /* Set physical address of TX scheduler rings (1KB aligned.) */
5493 iwn_prph_write(sc, IWN5000_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10);
5494
5495 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY);
5496
5497 /* Enable chain mode for all queues, except command queue. */
5498 iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffef);
5499 iwn_prph_write(sc, IWN5000_SCHED_AGGR_SEL, 0);
5500
5501 for (qid = 0; qid < IWN5000_NTXQUEUES; qid++) {
5502 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), 0);
5503 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0);
5504
5505 iwn_mem_write(sc, sc->sched_base +
5506 IWN5000_SCHED_QUEUE_OFFSET(qid), 0);
5507 /* Set scheduler window size and frame limit. */
5508 iwn_mem_write(sc, sc->sched_base +
5509 IWN5000_SCHED_QUEUE_OFFSET(qid) + 4,
5510 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ);
5511 }
5512
5513 /* Enable interrupts for all our 20 queues. */
5514 iwn_prph_write(sc, IWN5000_SCHED_INTR_MASK, 0xfffff);
5515 /* Identify TX FIFO rings (0-7). */
5516 iwn_prph_write(sc, IWN5000_SCHED_TXFACT, 0xff);
5517
5518 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */
5519 for (qid = 0; qid < 7; qid++) {
5520 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 7, 5, 6 };
5521 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
5522 IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]);
5523 }
5524 iwn_nic_unlock(sc);
5525
5526 /* Configure WiMAX coexistence for combo adapters. */
5527 error = iwn5000_send_wimax_coex(sc);
5528 if (error != 0) {
5529 device_printf(sc->sc_dev,
5530 "%s: could not configure WiMAX coexistence, error %d\n",
5531 __func__, error);
5532 return error;
5533 }
5534
5535 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) {
5536 /*
5537 * Start calibration by setting and sending the chrystal
5538 * calibration first, this must be done before we are able
5539 * to query the other calibration results.
5540 */
5541 error = iwn5000_chrystal_calib(sc);
5542 if (error != 0) {
5543 device_printf(sc->sc_dev,
5544 "%s: could not set chrystal calibration, "
5545 "error=%d\n", __func__, error);
5546 return error;
5547 }
5548 error = iwn5000_send_calib_results(sc);
5549 if (error != 0) {
5550 device_printf(sc->sc_dev,
5551 "%s: could not send chrystal calibration, "
5552 "error=%d\n", __func__, error);
5553 return error;
5554 }
5555
5556 /*
5557 * Query other calibration results from the initialization
5558 * firmware.
5559 */
5560 error = iwn5000_send_calib_query(sc, 0);
5561 if (error != 0) {
5562 device_printf(sc->sc_dev,
5563 "%s: could not query calibration, error=%d\n",
5564 __func__, error);
5565 return error;
5566 }
5567
5568 /*
5569 * We have the calibration results now, reboot with the
5570 * runtime firmware (call ourselves recursively!)
5571 */
5572 iwn_hw_stop(sc);
5573 error = iwn_hw_init(sc);
5574 } else {
5575 /*
5576 * Send calibration results obtained from the initialization
5577 * firmware to the runtime firmware.
5578 */
5579 error = iwn5000_send_calib_results(sc);
5580
5581 /*
5582 * Tell the runtime firmware to do certain calibration types.
5583 */
5584 if (sc->calib_runtime != 0) {
5585 error = iwn5000_send_calib_query(sc, sc->calib_runtime);
5586 if (error != 0) {
5587 device_printf(sc->sc_dev,
5588 "%s: could not send query calibration, "
5589 "error=%d, cfg=%x\n", __func__, error,
5590 sc->calib_runtime);
5591 }
5592 }
5593 }
5594 return error;
5595}
5596
5597/*
5598 * The firmware boot code is small and is intended to be copied directly into
5599 * the NIC internal memory (no DMA transfer.)
5600 */
5601static int
5602iwn4965_load_bootcode(struct iwn_softc *sc, const uint8_t *ucode, int size)
5603{
5604 int error, ntries;
5605
5606 size /= sizeof (uint32_t);
5607
5608 error = iwn_nic_lock(sc);
5609 if (error != 0)
5610 return error;
5611
5612 /* Copy microcode image into NIC memory. */
5613 iwn_prph_write_region_4(sc, IWN_BSM_SRAM_BASE,
5614 (const uint32_t *)ucode, size);
5615
5616 iwn_prph_write(sc, IWN_BSM_WR_MEM_SRC, 0);
5617 iwn_prph_write(sc, IWN_BSM_WR_MEM_DST, IWN_FW_TEXT_BASE);
5618 iwn_prph_write(sc, IWN_BSM_WR_DWCOUNT, size);
5619
5620 /* Start boot load now. */
5621 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START);
5622
5623 /* Wait for transfer to complete. */
5624 for (ntries = 0; ntries < 1000; ntries++) {
5625 if (!(iwn_prph_read(sc, IWN_BSM_WR_CTRL) &
5626 IWN_BSM_WR_CTRL_START))
5627 break;
5628 DELAY(10);
5629 }
5630 if (ntries == 1000) {
5631 device_printf(sc->sc_dev, "%s: could not load boot firmware\n",
5632 __func__);
5633 iwn_nic_unlock(sc);
5634 return ETIMEDOUT;
5635 }
5636
5637 /* Enable boot after power up. */
5638 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START_EN);
5639
5640 iwn_nic_unlock(sc);
5641 return 0;
5642}
5643
5644static int
5645iwn4965_load_firmware(struct iwn_softc *sc)
5646{
5647 struct iwn_fw_info *fw = &sc->fw;
5648 struct iwn_dma_info *dma = &sc->fw_dma;
5649 int error;
5650
5651 /* Copy initialization sections into pre-allocated DMA-safe memory. */
5652 memcpy(dma->vaddr, fw->init.data, fw->init.datasz);
5653 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
5654 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ,
5655 fw->init.text, fw->init.textsz);
5656 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
5657
5658 /* Tell adapter where to find initialization sections. */
5659 error = iwn_nic_lock(sc);
5660 if (error != 0)
5661 return error;
5662 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4);
5663 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->init.datasz);
5664 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR,
5665 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4);
5666 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, fw->init.textsz);
5667 iwn_nic_unlock(sc);
5668
5669 /* Load firmware boot code. */
5670 error = iwn4965_load_bootcode(sc, fw->boot.text, fw->boot.textsz);
5671 if (error != 0) {
5672 device_printf(sc->sc_dev, "%s: could not load boot firmware\n",
5673 __func__);
5674 return error;
5675 }
5676 /* Now press "execute". */
5677 IWN_WRITE(sc, IWN_RESET, 0);
5678
5679 /* Wait at most one second for first alive notification. */
5680 error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz);
5681 if (error) {
5682 device_printf(sc->sc_dev,
5683 "%s: timeout waiting for adapter to initialize, error %d\n",
5684 __func__, error);
5685 return error;
5686 }
5687
5688 /* Retrieve current temperature for initial TX power calibration. */
5689 sc->rawtemp = sc->ucode_info.temp[3].chan20MHz;
5690 sc->temp = iwn4965_get_temperature(sc);
5691
5692 /* Copy runtime sections into pre-allocated DMA-safe memory. */
5693 memcpy(dma->vaddr, fw->main.data, fw->main.datasz);
5694 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
5695 memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ,
5696 fw->main.text, fw->main.textsz);
5697 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
5698
5699 /* Tell adapter where to find runtime sections. */
5700 error = iwn_nic_lock(sc);
5701 if (error != 0)
5702 return error;
5703
5704 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4);
5705 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->main.datasz);
5706 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR,
5707 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4);
5708 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE,
5709 IWN_FW_UPDATED | fw->main.textsz);
5710 iwn_nic_unlock(sc);
5711
5712 return 0;
5713}
5714
5715static int
5716iwn5000_load_firmware_section(struct iwn_softc *sc, uint32_t dst,
5717 const uint8_t *section, int size)
5718{
5719 struct iwn_dma_info *dma = &sc->fw_dma;
5720 int error;
5721
5722 /* Copy firmware section into pre-allocated DMA-safe memory. */
5723 memcpy(dma->vaddr, section, size);
5724 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
5725
5726 error = iwn_nic_lock(sc);
5727 if (error != 0)
5728 return error;
5729
5730 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL),
5731 IWN_FH_TX_CONFIG_DMA_PAUSE);
5732
5733 IWN_WRITE(sc, IWN_FH_SRAM_ADDR(IWN_SRVC_DMACHNL), dst);
5734 IWN_WRITE(sc, IWN_FH_TFBD_CTRL0(IWN_SRVC_DMACHNL),
5735 IWN_LOADDR(dma->paddr));
5736 IWN_WRITE(sc, IWN_FH_TFBD_CTRL1(IWN_SRVC_DMACHNL),
5737 IWN_HIADDR(dma->paddr) << 28 | size);
5738 IWN_WRITE(sc, IWN_FH_TXBUF_STATUS(IWN_SRVC_DMACHNL),
5739 IWN_FH_TXBUF_STATUS_TBNUM(1) |
5740 IWN_FH_TXBUF_STATUS_TBIDX(1) |
5741 IWN_FH_TXBUF_STATUS_TFBD_VALID);
5742
5743 /* Kick Flow Handler to start DMA transfer. */
5744 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL),
5745 IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_CIRQ_HOST_ENDTFD);
5746
5747 iwn_nic_unlock(sc);
5748
5749 /* Wait at most five seconds for FH DMA transfer to complete. */
5750 return msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", 5 * hz);
5751}
5752
5753static int
5754iwn5000_load_firmware(struct iwn_softc *sc)
5755{
5756 struct iwn_fw_part *fw;
5757 int error;
5758
5759 /* Load the initialization firmware on first boot only. */
5760 fw = (sc->sc_flags & IWN_FLAG_CALIB_DONE) ?
5761 &sc->fw.main : &sc->fw.init;
5762
5763 error = iwn5000_load_firmware_section(sc, IWN_FW_TEXT_BASE,
5764 fw->text, fw->textsz);
5765 if (error != 0) {
5766 device_printf(sc->sc_dev,
5767 "%s: could not load firmware %s section, error %d\n",
5768 __func__, ".text", error);
5769 return error;
5770 }
5771 error = iwn5000_load_firmware_section(sc, IWN_FW_DATA_BASE,
5772 fw->data, fw->datasz);
5773 if (error != 0) {
5774 device_printf(sc->sc_dev,
5775 "%s: could not load firmware %s section, error %d\n",
5776 __func__, ".data", error);
5777 return error;
5778 }
5779
5780 /* Now press "execute". */
5781 IWN_WRITE(sc, IWN_RESET, 0);
5782 return 0;
5783}
5784
5785/*
5786 * Extract text and data sections from a legacy firmware image.
5787 */
5788static int
5789iwn_read_firmware_leg(struct iwn_softc *sc, struct iwn_fw_info *fw)
5790{
5791 const uint32_t *ptr;
5792 size_t hdrlen = 24;
5793 uint32_t rev;
5794
5795 ptr = (const uint32_t *)fw->data;
5796 rev = le32toh(*ptr++);
5797
5798 /* Check firmware API version. */
5799 if (IWN_FW_API(rev) <= 1) {
5800 device_printf(sc->sc_dev,
5801 "%s: bad firmware, need API version >=2\n", __func__);
5802 return EINVAL;
5803 }
5804 if (IWN_FW_API(rev) >= 3) {
5805 /* Skip build number (version 2 header). */
5806 hdrlen += 4;
5807 ptr++;
5808 }
5809 if (fw->size < hdrlen) {
5810 device_printf(sc->sc_dev,
5811 "%s: firmware file too short: %zu bytes\n",
5812 __func__, fw->size);
5813 return EINVAL;
5814 }
5815 fw->main.textsz = le32toh(*ptr++);
5816 fw->main.datasz = le32toh(*ptr++);
5817 fw->init.textsz = le32toh(*ptr++);
5818 fw->init.datasz = le32toh(*ptr++);
5819 fw->boot.textsz = le32toh(*ptr++);
5820
5821 /* Check that all firmware sections fit. */
5822 if (fw->size < hdrlen + fw->main.textsz + fw->main.datasz +
5823 fw->init.textsz + fw->init.datasz + fw->boot.textsz) {
5824 device_printf(sc->sc_dev,
5825 "%s: firmware file too short: %zu bytes\n",
5826 __func__, fw->size);
5827 return EINVAL;
5828 }
5829
5830 /* Get pointers to firmware sections. */
5831 fw->main.text = (const uint8_t *)ptr;
5832 fw->main.data = fw->main.text + fw->main.textsz;
5833 fw->init.text = fw->main.data + fw->main.datasz;
5834 fw->init.data = fw->init.text + fw->init.textsz;
5835 fw->boot.text = fw->init.data + fw->init.datasz;
5836
5837 return 0;
5838}
5839
5840/*
5841 * Extract text and data sections from a TLV firmware image.
5842 */
5843static int
5844iwn_read_firmware_tlv(struct iwn_softc *sc, struct iwn_fw_info *fw,
5845 uint16_t alt)
5846{
5847 const struct iwn_fw_tlv_hdr *hdr;
5848 const struct iwn_fw_tlv *tlv;
5849 const uint8_t *ptr, *end;
5850 uint64_t altmask;
5851 uint32_t len;
5852
5853 if (fw->size < sizeof (*hdr)) {
5854 device_printf(sc->sc_dev,
5855 "%s: firmware file too short: %zu bytes\n",
5856 __func__, fw->size);
5857 return EINVAL;
5858 }
5859 hdr = (const struct iwn_fw_tlv_hdr *)fw->data;
5860 if (hdr->signature != htole32(IWN_FW_SIGNATURE)) {
5861 device_printf(sc->sc_dev,
5862 "%s: bad firmware file signature 0x%08x\n",
5863 __func__, le32toh(hdr->signature));
5864 return EINVAL;
5865 }
5866
5867 /*
5868 * Select the closest supported alternative that is less than
5869 * or equal to the specified one.
5870 */
5871 altmask = le64toh(hdr->altmask);
5872 while (alt > 0 && !(altmask & (1ULL << alt)))
5873 alt--; /* Downgrade. */
5874
5875 ptr = (const uint8_t *)(hdr + 1);
5876 end = (const uint8_t *)(fw->data + fw->size);
5877
5878 /* Parse type-length-value fields. */
5879 while (ptr + sizeof (*tlv) <= end) {
5880 tlv = (const struct iwn_fw_tlv *)ptr;
5881 len = le32toh(tlv->len);
5882
5883 ptr += sizeof (*tlv);
5884 if (ptr + len > end) {
5885 device_printf(sc->sc_dev,
5886 "%s: firmware file too short: %zu bytes\n",
5887 __func__, fw->size);
5888 return EINVAL;
5889 }
5890 /* Skip other alternatives. */
5891 if (tlv->alt != 0 && tlv->alt != htole16(alt))
5892 goto next;
5893
5894 switch (le16toh(tlv->type)) {
5895 case IWN_FW_TLV_MAIN_TEXT:
5896 fw->main.text = ptr;
5897 fw->main.textsz = len;
5898 break;
5899 case IWN_FW_TLV_MAIN_DATA:
5900 fw->main.data = ptr;
5901 fw->main.datasz = len;
5902 break;
5903 case IWN_FW_TLV_INIT_TEXT:
5904 fw->init.text = ptr;
5905 fw->init.textsz = len;
5906 break;
5907 case IWN_FW_TLV_INIT_DATA:
5908 fw->init.data = ptr;
5909 fw->init.datasz = len;
5910 break;
5911 case IWN_FW_TLV_BOOT_TEXT:
5912 fw->boot.text = ptr;
5913 fw->boot.textsz = len;
5914 break;
5915 default:
5916 DPRINTF(sc, IWN_DEBUG_RESET,
5917 "%s: TLV type %d not handled\n",
5918 __func__, le16toh(tlv->type));
5919 break;
5920 }
5921next: /* TLV fields are 32-bit aligned. */
5922 ptr += (len + 3) & ~3;
5923 }
5924 return 0;
5925}
5926
5927static int
5928iwn_read_firmware(struct iwn_softc *sc)
5929{
5930 const struct iwn_hal *hal = sc->sc_hal;
5931 struct iwn_fw_info *fw = &sc->fw;
5932 int error;
5933
5934 IWN_UNLOCK(sc);
5935
5936 memset(fw, 0, sizeof (*fw));
5937
5938 /* Read firmware image from filesystem. */
5939 sc->fw_fp = firmware_get(sc->fwname);
5940 if (sc->fw_fp == NULL) {
5941 device_printf(sc->sc_dev,
5942 "%s: could not load firmare image \"%s\"\n", __func__,
5943 sc->fwname);
5944 IWN_LOCK(sc);
5945 return EINVAL;
5946 }
5947 IWN_LOCK(sc);
5948
5949 fw->size = sc->fw_fp->datasize;
5950 fw->data = (const uint8_t *)sc->fw_fp->data;
5951 if (fw->size < sizeof (uint32_t)) {
5952 device_printf(sc->sc_dev,
5953 "%s: firmware file too short: %zu bytes\n",
5954 __func__, fw->size);
5955 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
5956 sc->fw_fp = NULL;
5957 return EINVAL;
5958 }
5959
5960 /* Retrieve text and data sections. */
5961 if (*(const uint32_t *)fw->data != 0) /* Legacy image. */
5962 error = iwn_read_firmware_leg(sc, fw);
5963 else
5964 error = iwn_read_firmware_tlv(sc, fw, 1);
5965 if (error != 0) {
5966 device_printf(sc->sc_dev,
5967 "%s: could not read firmware sections\n", __func__);
5968 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
5969 sc->fw_fp = NULL;
5970 return error;
5971 }
5972
5973 /* Make sure text and data sections fit in hardware memory. */
5974 if (fw->main.textsz > hal->fw_text_maxsz ||
5975 fw->main.datasz > hal->fw_data_maxsz ||
5976 fw->init.textsz > hal->fw_text_maxsz ||
5977 fw->init.datasz > hal->fw_data_maxsz ||
5978 fw->boot.textsz > IWN_FW_BOOT_TEXT_MAXSZ ||
5979 (fw->boot.textsz & 3) != 0) {
5980 device_printf(sc->sc_dev,
5981 "%s: firmware sections too large\n", __func__);
5982 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
5983 sc->fw_fp = NULL;
5984 return EINVAL;
5985 }
5986
5987 /* We can proceed with loading the firmware. */
5988 return 0;
5989}
5990
5991static int
5992iwn_clock_wait(struct iwn_softc *sc)
5993{
5994 int ntries;
5995
5996 /* Set "initialization complete" bit. */
5997 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE);
5998
5999 /* Wait for clock stabilization. */
6000 for (ntries = 0; ntries < 2500; ntries++) {
6001 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_MAC_CLOCK_READY)
6002 return 0;
6003 DELAY(10);
6004 }
6005 device_printf(sc->sc_dev,
6006 "%s: timeout waiting for clock stabilization\n", __func__);
6007 return ETIMEDOUT;
6008}
6009
6010static int
6011iwn_apm_init(struct iwn_softc *sc)
6012{
6013 uint32_t tmp;
6014 int error;
6015
6016 /* Disable L0s exit timer (NMI bug workaround.) */
6017 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_DIS_L0S_TIMER);
6018 /* Don't wait for ICH L0s (ICH bug workaround.) */
6019 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_L1A_NO_L0S_RX);
6020
6021 /* Set FH wait threshold to max (HW bug under stress workaround.) */
6022 IWN_SETBITS(sc, IWN_DBG_HPET_MEM, 0xffff0000);
6023
6024 /* Enable HAP INTA to move adapter from L1a to L0s. */
6025 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_HAP_WAKE_L1A);
6026
6027 /* Retrieve PCIe Active State Power Management (ASPM). */
6028 tmp = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1);
6029 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */
6030 if (tmp & 0x02) /* L1 Entry enabled. */
6031 IWN_SETBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA);
6032 else
6033 IWN_CLRBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA);
6034
6035 if (sc->hw_type != IWN_HW_REV_TYPE_4965 &&
6036 sc->hw_type <= IWN_HW_REV_TYPE_1000)
6037 IWN_SETBITS(sc, IWN_ANA_PLL, IWN_ANA_PLL_INIT);
6038
6039 /* Wait for clock stabilization before accessing prph. */
6040 error = iwn_clock_wait(sc);
6041 if (error != 0)
6042 return error;
6043
6044 error = iwn_nic_lock(sc);
6045 if (error != 0)
6046 return error;
6047
6048 if (sc->hw_type == IWN_HW_REV_TYPE_4965) {
6049 /* Enable DMA and BSM (Bootstrap State Machine.) */
6050 iwn_prph_write(sc, IWN_APMG_CLK_EN,
6051 IWN_APMG_CLK_CTRL_DMA_CLK_RQT |
6052 IWN_APMG_CLK_CTRL_BSM_CLK_RQT);
6053 } else {
6054 /* Enable DMA. */
6055 iwn_prph_write(sc, IWN_APMG_CLK_EN,
6056 IWN_APMG_CLK_CTRL_DMA_CLK_RQT);
6057 }
6058 DELAY(20);
6059
6060 /* Disable L1-Active. */
6061 iwn_prph_setbits(sc, IWN_APMG_PCI_STT, IWN_APMG_PCI_STT_L1A_DIS);
6062 iwn_nic_unlock(sc);
6063
6064 return 0;
6065}
6066
6067static void
6068iwn_apm_stop_master(struct iwn_softc *sc)
6069{
6070 int ntries;
6071
6072 /* Stop busmaster DMA activity. */
6073 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_STOP_MASTER);
6074 for (ntries = 0; ntries < 100; ntries++) {
6075 if (IWN_READ(sc, IWN_RESET) & IWN_RESET_MASTER_DISABLED)
6076 return;
6077 DELAY(10);
6078 }
6079 device_printf(sc->sc_dev, "%s: timeout waiting for master\n",
6080 __func__);
6081}
6082
6083static void
6084iwn_apm_stop(struct iwn_softc *sc)
6085{
6086 iwn_apm_stop_master(sc);
6087
6088 /* Reset the entire device. */
6089 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_SW);
6090 DELAY(10);
6091 /* Clear "initialization complete" bit. */
6092 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE);
6093}
6094
6095static int
6096iwn4965_nic_config(struct iwn_softc *sc)
6097{
6098 if (IWN_RFCFG_TYPE(sc->rfcfg) == 1) {
6099 /*
6100 * I don't believe this to be correct but this is what the
6101 * vendor driver is doing. Probably the bits should not be
6102 * shifted in IWN_RFCFG_*.
6103 */
6104 IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
6105 IWN_RFCFG_TYPE(sc->rfcfg) |
6106 IWN_RFCFG_STEP(sc->rfcfg) |
6107 IWN_RFCFG_DASH(sc->rfcfg));
6108 }
6109 IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
6110 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI);
6111 return 0;
6112}
6113
6114static int
6115iwn5000_nic_config(struct iwn_softc *sc)
6116{
6117 uint32_t tmp;
6118 int error;
6119
6120 if (IWN_RFCFG_TYPE(sc->rfcfg) < 3) {
6121 IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
6122 IWN_RFCFG_TYPE(sc->rfcfg) |
6123 IWN_RFCFG_STEP(sc->rfcfg) |
6124 IWN_RFCFG_DASH(sc->rfcfg));
6125 }
6126 IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
6127 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI);
6128
6129 error = iwn_nic_lock(sc);
6130 if (error != 0)
6131 return error;
6132 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_EARLY_PWROFF_DIS);
6133
6134 if (sc->hw_type == IWN_HW_REV_TYPE_1000) {
6135 /*
6136 * Select first Switching Voltage Regulator (1.32V) to
6137 * solve a stability issue related to noisy DC2DC line
6138 * in the silicon of 1000 Series.
6139 */
6140 tmp = iwn_prph_read(sc, IWN_APMG_DIGITAL_SVR);
6141 tmp &= ~IWN_APMG_DIGITAL_SVR_VOLTAGE_MASK;
6142 tmp |= IWN_APMG_DIGITAL_SVR_VOLTAGE_1_32;
6143 iwn_prph_write(sc, IWN_APMG_DIGITAL_SVR, tmp);
6144 }
6145 iwn_nic_unlock(sc);
6146
6147 if (sc->sc_flags & IWN_FLAG_INTERNAL_PA) {
6148 /* Use internal power amplifier only. */
6149 IWN_WRITE(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_2X2_IPA);
6150 }
6151 if (sc->hw_type == IWN_HW_REV_TYPE_6050 && sc->calib_ver >= 6) {
6152 /* Indicate that ROM calibration version is >=6. */
6153 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_CALIB_VER6);
6154 }
6155 return 0;
6156}
6157
6158/*
6159 * Take NIC ownership over Intel Active Management Technology (AMT).
6160 */
6161static int
6162iwn_hw_prepare(struct iwn_softc *sc)
6163{
6164 int ntries;
6165
6166 /* Check if hardware is ready. */
6167 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY);
6168 for (ntries = 0; ntries < 5; ntries++) {
6169 if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
6170 IWN_HW_IF_CONFIG_NIC_READY)
6171 return 0;
6172 DELAY(10);
6173 }
6174
6175 /* Hardware not ready, force into ready state. */
6176 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_PREPARE);
6177 for (ntries = 0; ntries < 15000; ntries++) {
6178 if (!(IWN_READ(sc, IWN_HW_IF_CONFIG) &
6179 IWN_HW_IF_CONFIG_PREPARE_DONE))
6180 break;
6181 DELAY(10);
6182 }
6183 if (ntries == 15000)
6184 return ETIMEDOUT;
6185
6186 /* Hardware should be ready now. */
6187 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY);
6188 for (ntries = 0; ntries < 5; ntries++) {
6189 if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
6190 IWN_HW_IF_CONFIG_NIC_READY)
6191 return 0;
6192 DELAY(10);
6193 }
6194 return ETIMEDOUT;
6195}
6196
6197static int
6198iwn_hw_init(struct iwn_softc *sc)
6199{
6200 const struct iwn_hal *hal = sc->sc_hal;
6201 int error, chnl, qid;
6202
6203 /* Clear pending interrupts. */
6204 IWN_WRITE(sc, IWN_INT, 0xffffffff);
6205
6206 error = iwn_apm_init(sc);
6207 if (error != 0) {
6208 device_printf(sc->sc_dev,
6209 "%s: could not power ON adapter, error %d\n",
6210 __func__, error);
6211 return error;
6212 }
6213
6214 /* Select VMAIN power source. */
6215 error = iwn_nic_lock(sc);
6216 if (error != 0)
6217 return error;
6218 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_PWR_SRC_MASK);
6219 iwn_nic_unlock(sc);
6220
6221 /* Perform adapter-specific initialization. */
6222 error = hal->nic_config(sc);
6223 if (error != 0)
6224 return error;
6225
6226 /* Initialize RX ring. */
6227 error = iwn_nic_lock(sc);
6228 if (error != 0)
6229 return error;
6230 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0);
6231 IWN_WRITE(sc, IWN_FH_RX_WPTR, 0);
6232 /* Set physical address of RX ring (256-byte aligned.) */
6233 IWN_WRITE(sc, IWN_FH_RX_BASE, sc->rxq.desc_dma.paddr >> 8);
6234 /* Set physical address of RX status (16-byte aligned.) */
6235 IWN_WRITE(sc, IWN_FH_STATUS_WPTR, sc->rxq.stat_dma.paddr >> 4);
6236 /* Enable RX. */
6237 IWN_WRITE(sc, IWN_FH_RX_CONFIG,
6238 IWN_FH_RX_CONFIG_ENA |
6239 IWN_FH_RX_CONFIG_IGN_RXF_EMPTY | /* HW bug workaround */
6240 IWN_FH_RX_CONFIG_IRQ_DST_HOST |
6241 IWN_FH_RX_CONFIG_SINGLE_FRAME |
6242 IWN_FH_RX_CONFIG_RB_TIMEOUT(0) |
6243 IWN_FH_RX_CONFIG_NRBD(IWN_RX_RING_COUNT_LOG));
6244 iwn_nic_unlock(sc);
6245 IWN_WRITE(sc, IWN_FH_RX_WPTR, (IWN_RX_RING_COUNT - 1) & ~7);
6246
6247 error = iwn_nic_lock(sc);
6248 if (error != 0)
6249 return error;
6250
6251 /* Initialize TX scheduler. */
6252 iwn_prph_write(sc, hal->sched_txfact_addr, 0);
6253
6254 /* Set physical address of "keep warm" page (16-byte aligned.) */
6255 IWN_WRITE(sc, IWN_FH_KW_ADDR, sc->kw_dma.paddr >> 4);
6256
6257 /* Initialize TX rings. */
6258 for (qid = 0; qid < hal->ntxqs; qid++) {
6259 struct iwn_tx_ring *txq = &sc->txq[qid];
6260
6261 /* Set physical address of TX ring (256-byte aligned.) */
6262 IWN_WRITE(sc, IWN_FH_CBBC_QUEUE(qid),
6263 txq->desc_dma.paddr >> 8);
6264 }
6265 iwn_nic_unlock(sc);
6266
6267 /* Enable DMA channels. */
6268 for (chnl = 0; chnl < hal->ndmachnls; chnl++) {
6269 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl),
6270 IWN_FH_TX_CONFIG_DMA_ENA |
6271 IWN_FH_TX_CONFIG_DMA_CREDIT_ENA);
6272 }
6273
6274 /* Clear "radio off" and "commands blocked" bits. */
6275 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
6276 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CMD_BLOCKED);
6277
6278 /* Clear pending interrupts. */
6279 IWN_WRITE(sc, IWN_INT, 0xffffffff);
6280 /* Enable interrupt coalescing. */
6281 IWN_WRITE(sc, IWN_INT_COALESCING, 512 / 8);
6282 /* Enable interrupts. */
6283 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
6284
6285 /* _Really_ make sure "radio off" bit is cleared! */
6286 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
6287 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
6288
6289 error = hal->load_firmware(sc);
6290 if (error != 0) {
6291 device_printf(sc->sc_dev,
6292 "%s: could not load firmware, error %d\n",
6293 __func__, error);
6294 return error;
6295 }
6296 /* Wait at most one second for firmware alive notification. */
6297 error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz);
6298 if (error != 0) {
6299 device_printf(sc->sc_dev,
6300 "%s: timeout waiting for adapter to initialize, error %d\n",
6301 __func__, error);
6302 return error;
6303 }
6304 /* Do post-firmware initialization. */
6305 return hal->post_alive(sc);
6306}
6307
6308static void
6309iwn_hw_stop(struct iwn_softc *sc)
6310{
6311 const struct iwn_hal *hal = sc->sc_hal;
6312 int chnl, qid, ntries;
6313
6314 IWN_WRITE(sc, IWN_RESET, IWN_RESET_NEVO);
6315
6316 /* Disable interrupts. */
6317 IWN_WRITE(sc, IWN_INT_MASK, 0);
6318 IWN_WRITE(sc, IWN_INT, 0xffffffff);
6319 IWN_WRITE(sc, IWN_FH_INT, 0xffffffff);
6320 sc->sc_flags &= ~IWN_FLAG_USE_ICT;
6321
6322 /* Make sure we no longer hold the NIC lock. */
6323 iwn_nic_unlock(sc);
6324
6325 /* Stop TX scheduler. */
6326 iwn_prph_write(sc, hal->sched_txfact_addr, 0);
6327
6328 /* Stop all DMA channels. */
6329 if (iwn_nic_lock(sc) == 0) {
6330 for (chnl = 0; chnl < hal->ndmachnls; chnl++) {
6331 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 0);
6332 for (ntries = 0; ntries < 200; ntries++) {
6333 if (IWN_READ(sc, IWN_FH_TX_STATUS) &
6334 IWN_FH_TX_STATUS_IDLE(chnl))
6335 break;
6336 DELAY(10);
6337 }
6338 }
6339 iwn_nic_unlock(sc);
6340 }
6341
6342 /* Stop RX ring. */
6343 iwn_reset_rx_ring(sc, &sc->rxq);
6344
6345 /* Reset all TX rings. */
6346 for (qid = 0; qid < hal->ntxqs; qid++)
6347 iwn_reset_tx_ring(sc, &sc->txq[qid]);
6348
6349 if (iwn_nic_lock(sc) == 0) {
6350 iwn_prph_write(sc, IWN_APMG_CLK_DIS,
6351 IWN_APMG_CLK_CTRL_DMA_CLK_RQT);
6352 iwn_nic_unlock(sc);
6353 }
6354 DELAY(5);
6355
6356 /* Power OFF adapter. */
6357 iwn_apm_stop(sc);
6358}
6359
6360static void
6361iwn_init_locked(struct iwn_softc *sc)
6362{
6363 struct ifnet *ifp = sc->sc_ifp;
6364 int error;
6365
6366 IWN_LOCK_ASSERT(sc);
6367
6368 error = iwn_hw_prepare(sc);
6369 if (error != 0) {
6370 device_printf(sc->sc_dev, "%s: hardware not ready, eror %d\n",
6371 __func__, error);
6372 goto fail;
6373 }
6374
6375 /* Initialize interrupt mask to default value. */
6376 sc->int_mask = IWN_INT_MASK_DEF;
6377 sc->sc_flags &= ~IWN_FLAG_USE_ICT;
6378
6379 /* Check that the radio is not disabled by hardware switch. */
6380 if (!(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) {
6381 device_printf(sc->sc_dev,
6382 "radio is disabled by hardware switch\n");
6383
6384 /* Enable interrupts to get RF toggle notifications. */
6385 IWN_WRITE(sc, IWN_INT, 0xffffffff);
6386 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
6387 return;
6388 }
6389
6390 /* Read firmware images from the filesystem. */
6391 error = iwn_read_firmware(sc);
6392 if (error != 0) {
6393 device_printf(sc->sc_dev,
6394 "%s: could not read firmware, error %d\n",
6395 __func__, error);
6396 goto fail;
6397 }
6398
6399 /* Initialize hardware and upload firmware. */
6400 error = iwn_hw_init(sc);
6401 firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
6402 sc->fw_fp = NULL;
6403 if (error != 0) {
6404 device_printf(sc->sc_dev,
6405 "%s: could not initialize hardware, error %d\n",
6406 __func__, error);
6407 goto fail;
6408 }
6409
6410 /* Configure adapter now that it is ready. */
6411 error = iwn_config(sc);
6412 if (error != 0) {
6413 device_printf(sc->sc_dev,
6414 "%s: could not configure device, error %d\n",
6415 __func__, error);
6416 goto fail;
6417 }
6418
6419 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
6420 ifp->if_drv_flags |= IFF_DRV_RUNNING;
6421
6422 callout_reset(&sc->watchdog_to, hz, iwn_watchdog, sc);
6409 return;
6410
6411fail:
6412 iwn_stop_locked(sc);
6413}
6414
6415static void
6416iwn_init(void *arg)
6417{
6418 struct iwn_softc *sc = arg;
6419 struct ifnet *ifp = sc->sc_ifp;
6420 struct ieee80211com *ic = ifp->if_l2com;
6421
6422 IWN_LOCK(sc);
6423 iwn_init_locked(sc);
6424 IWN_UNLOCK(sc);
6425
6426 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
6427 ieee80211_start_all(ic);
6428}
6429
6430static void
6431iwn_stop_locked(struct iwn_softc *sc)
6432{
6433 struct ifnet *ifp = sc->sc_ifp;
6434
6435 IWN_LOCK_ASSERT(sc);
6436
6437 sc->sc_tx_timer = 0;
6423 return;
6424
6425fail:
6426 iwn_stop_locked(sc);
6427}
6428
6429static void
6430iwn_init(void *arg)
6431{
6432 struct iwn_softc *sc = arg;
6433 struct ifnet *ifp = sc->sc_ifp;
6434 struct ieee80211com *ic = ifp->if_l2com;
6435
6436 IWN_LOCK(sc);
6437 iwn_init_locked(sc);
6438 IWN_UNLOCK(sc);
6439
6440 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
6441 ieee80211_start_all(ic);
6442}
6443
6444static void
6445iwn_stop_locked(struct iwn_softc *sc)
6446{
6447 struct ifnet *ifp = sc->sc_ifp;
6448
6449 IWN_LOCK_ASSERT(sc);
6450
6451 sc->sc_tx_timer = 0;
6438 callout_stop(&sc->sc_timer_to);
6452 callout_stop(&sc->watchdog_to);
6453 callout_stop(&sc->calib_to);
6439 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
6440
6441 /* Power OFF hardware. */
6442 iwn_hw_stop(sc);
6443}
6444
6445static void
6446iwn_stop(struct iwn_softc *sc)
6447{
6448 IWN_LOCK(sc);
6449 iwn_stop_locked(sc);
6450 IWN_UNLOCK(sc);
6451}
6452
6453/*
6454 * Callback from net80211 to start a scan.
6455 */
6456static void
6457iwn_scan_start(struct ieee80211com *ic)
6458{
6459 struct ifnet *ifp = ic->ic_ifp;
6460 struct iwn_softc *sc = ifp->if_softc;
6461
6462 IWN_LOCK(sc);
6463 /* make the link LED blink while we're scanning */
6464 iwn_set_led(sc, IWN_LED_LINK, 20, 2);
6465 IWN_UNLOCK(sc);
6466}
6467
6468/*
6469 * Callback from net80211 to terminate a scan.
6470 */
6471static void
6472iwn_scan_end(struct ieee80211com *ic)
6473{
6474 struct ifnet *ifp = ic->ic_ifp;
6475 struct iwn_softc *sc = ifp->if_softc;
6476 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6477
6478 IWN_LOCK(sc);
6479 if (vap->iv_state == IEEE80211_S_RUN) {
6480 /* Set link LED to ON status if we are associated */
6481 iwn_set_led(sc, IWN_LED_LINK, 0, 1);
6482 }
6483 IWN_UNLOCK(sc);
6484}
6485
6486/*
6487 * Callback from net80211 to force a channel change.
6488 */
6489static void
6490iwn_set_channel(struct ieee80211com *ic)
6491{
6492 const struct ieee80211_channel *c = ic->ic_curchan;
6493 struct ifnet *ifp = ic->ic_ifp;
6494 struct iwn_softc *sc = ifp->if_softc;
6495
6496 IWN_LOCK(sc);
6497 sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq);
6498 sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags);
6499 sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq);
6500 sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags);
6501 IWN_UNLOCK(sc);
6502}
6503
6504/*
6505 * Callback from net80211 to start scanning of the current channel.
6506 */
6507static void
6508iwn_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6509{
6510 struct ieee80211vap *vap = ss->ss_vap;
6511 struct iwn_softc *sc = vap->iv_ic->ic_ifp->if_softc;
6512 int error;
6513
6514 IWN_LOCK(sc);
6515 error = iwn_scan(sc);
6516 IWN_UNLOCK(sc);
6517 if (error != 0)
6518 ieee80211_cancel_scan(vap);
6519}
6520
6521/*
6522 * Callback from net80211 to handle the minimum dwell time being met.
6523 * The intent is to terminate the scan but we just let the firmware
6524 * notify us when it's finished as we have no safe way to abort it.
6525 */
6526static void
6527iwn_scan_mindwell(struct ieee80211_scan_state *ss)
6528{
6529 /* NB: don't try to abort scan; wait for firmware to finish */
6530}
6531
6532static struct iwn_eeprom_chan *
6533iwn_find_eeprom_channel(struct iwn_softc *sc, struct ieee80211_channel *c)
6534{
6535 int i, j;
6536
6537 for (j = 0; j < 7; j++) {
6538 for (i = 0; i < iwn_bands[j].nchan; i++) {
6539 if (iwn_bands[j].chan[i] == c->ic_ieee)
6540 return &sc->eeprom_channels[j][i];
6541 }
6542 }
6543
6544 return NULL;
6545}
6546
6547/*
6548 * Enforce flags read from EEPROM.
6549 */
6550static int
6551iwn_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
6552 int nchan, struct ieee80211_channel chans[])
6553{
6554 struct iwn_softc *sc = ic->ic_ifp->if_softc;
6555 int i;
6556
6557 for (i = 0; i < nchan; i++) {
6558 struct ieee80211_channel *c = &chans[i];
6559 struct iwn_eeprom_chan *channel;
6560
6561 channel = iwn_find_eeprom_channel(sc, c);
6562 if (channel == NULL) {
6563 if_printf(ic->ic_ifp,
6564 "%s: invalid channel %u freq %u/0x%x\n",
6565 __func__, c->ic_ieee, c->ic_freq, c->ic_flags);
6566 return EINVAL;
6567 }
6568 c->ic_flags |= iwn_eeprom_channel_flags(channel);
6569 }
6570
6571 return 0;
6572}
6573
6574static void
6575iwn_hw_reset(void *arg0, int pending)
6576{
6577 struct iwn_softc *sc = arg0;
6578 struct ifnet *ifp = sc->sc_ifp;
6579 struct ieee80211com *ic = ifp->if_l2com;
6580
6581 iwn_stop(sc);
6582 iwn_init(sc);
6583 ieee80211_notify_radio(ic, 1);
6584}
6585
6586static void
6587iwn_radio_on(void *arg0, int pending)
6588{
6589 struct iwn_softc *sc = arg0;
6590 struct ifnet *ifp = sc->sc_ifp;
6591 struct ieee80211com *ic = ifp->if_l2com;
6592 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6593
6594 if (vap != NULL) {
6595 iwn_init(sc);
6596 ieee80211_init(vap);
6597 }
6598}
6599
6600static void
6601iwn_radio_off(void *arg0, int pending)
6602{
6603 struct iwn_softc *sc = arg0;
6604 struct ifnet *ifp = sc->sc_ifp;
6605 struct ieee80211com *ic = ifp->if_l2com;
6606 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6607
6608 iwn_stop(sc);
6609 if (vap != NULL)
6610 ieee80211_stop(vap);
6611
6612 /* Enable interrupts to get RF toggle notification. */
6613 IWN_LOCK(sc);
6614 IWN_WRITE(sc, IWN_INT, 0xffffffff);
6615 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
6616 IWN_UNLOCK(sc);
6617}
6618
6619static void
6620iwn_sysctlattach(struct iwn_softc *sc)
6621{
6622 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
6623 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
6624
6625#ifdef IWN_DEBUG
6626 sc->sc_debug = 0;
6627 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6628 "debug", CTLFLAG_RW, &sc->sc_debug, 0, "control debugging printfs");
6629#endif
6630}
6631
6632static int
6633iwn_shutdown(device_t dev)
6634{
6635 struct iwn_softc *sc = device_get_softc(dev);
6636
6637 iwn_stop(sc);
6638 return 0;
6639}
6640
6641static int
6642iwn_suspend(device_t dev)
6643{
6644 struct iwn_softc *sc = device_get_softc(dev);
6645 struct ifnet *ifp = sc->sc_ifp;
6646 struct ieee80211com *ic = ifp->if_l2com;
6647 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6648
6649 iwn_stop(sc);
6650 if (vap != NULL)
6651 ieee80211_stop(vap);
6652 return 0;
6653}
6654
6655static int
6656iwn_resume(device_t dev)
6657{
6658 struct iwn_softc *sc = device_get_softc(dev);
6659 struct ifnet *ifp = sc->sc_ifp;
6660 struct ieee80211com *ic = ifp->if_l2com;
6661 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6662
6663 /* Clear device-specific "PCI retry timeout" register (41h). */
6664 pci_write_config(dev, 0x41, 0, 1);
6665
6666 if (ifp->if_flags & IFF_UP) {
6667 iwn_init(sc);
6668 if (vap != NULL)
6669 ieee80211_init(vap);
6670 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
6671 iwn_start(ifp);
6672 }
6673 return 0;
6674}
6675
6676#ifdef IWN_DEBUG
6677static const char *
6678iwn_intr_str(uint8_t cmd)
6679{
6680 switch (cmd) {
6681 /* Notifications */
6682 case IWN_UC_READY: return "UC_READY";
6683 case IWN_ADD_NODE_DONE: return "ADD_NODE_DONE";
6684 case IWN_TX_DONE: return "TX_DONE";
6685 case IWN_START_SCAN: return "START_SCAN";
6686 case IWN_STOP_SCAN: return "STOP_SCAN";
6687 case IWN_RX_STATISTICS: return "RX_STATS";
6688 case IWN_BEACON_STATISTICS: return "BEACON_STATS";
6689 case IWN_STATE_CHANGED: return "STATE_CHANGED";
6690 case IWN_BEACON_MISSED: return "BEACON_MISSED";
6691 case IWN_RX_PHY: return "RX_PHY";
6692 case IWN_MPDU_RX_DONE: return "MPDU_RX_DONE";
6693 case IWN_RX_DONE: return "RX_DONE";
6694
6695 /* Command Notifications */
6696 case IWN_CMD_RXON: return "IWN_CMD_RXON";
6697 case IWN_CMD_RXON_ASSOC: return "IWN_CMD_RXON_ASSOC";
6698 case IWN_CMD_EDCA_PARAMS: return "IWN_CMD_EDCA_PARAMS";
6699 case IWN_CMD_TIMING: return "IWN_CMD_TIMING";
6700 case IWN_CMD_LINK_QUALITY: return "IWN_CMD_LINK_QUALITY";
6701 case IWN_CMD_SET_LED: return "IWN_CMD_SET_LED";
6702 case IWN5000_CMD_WIMAX_COEX: return "IWN5000_CMD_WIMAX_COEX";
6703 case IWN5000_CMD_CALIB_CONFIG: return "IWN5000_CMD_CALIB_CONFIG";
6704 case IWN5000_CMD_CALIB_RESULT: return "IWN5000_CMD_CALIB_RESULT";
6705 case IWN5000_CMD_CALIB_COMPLETE: return "IWN5000_CMD_CALIB_COMPLETE";
6706 case IWN_CMD_SET_POWER_MODE: return "IWN_CMD_SET_POWER_MODE";
6707 case IWN_CMD_SCAN: return "IWN_CMD_SCAN";
6708 case IWN_CMD_SCAN_RESULTS: return "IWN_CMD_SCAN_RESULTS";
6709 case IWN_CMD_TXPOWER: return "IWN_CMD_TXPOWER";
6710 case IWN_CMD_TXPOWER_DBM: return "IWN_CMD_TXPOWER_DBM";
6711 case IWN5000_CMD_TX_ANT_CONFIG: return "IWN5000_CMD_TX_ANT_CONFIG";
6712 case IWN_CMD_BT_COEX: return "IWN_CMD_BT_COEX";
6713 case IWN_CMD_SET_CRITICAL_TEMP: return "IWN_CMD_SET_CRITICAL_TEMP";
6714 case IWN_CMD_SET_SENSITIVITY: return "IWN_CMD_SET_SENSITIVITY";
6715 case IWN_CMD_PHY_CALIB: return "IWN_CMD_PHY_CALIB";
6716 }
6717 return "UNKNOWN INTR NOTIF/CMD";
6718}
6719#endif /* IWN_DEBUG */
6720
6721static device_method_t iwn_methods[] = {
6722 /* Device interface */
6723 DEVMETHOD(device_probe, iwn_probe),
6724 DEVMETHOD(device_attach, iwn_attach),
6725 DEVMETHOD(device_detach, iwn_detach),
6726 DEVMETHOD(device_shutdown, iwn_shutdown),
6727 DEVMETHOD(device_suspend, iwn_suspend),
6728 DEVMETHOD(device_resume, iwn_resume),
6729 { 0, 0 }
6730};
6731
6732static driver_t iwn_driver = {
6733 "iwn",
6734 iwn_methods,
6735 sizeof (struct iwn_softc)
6736};
6737static devclass_t iwn_devclass;
6738
6739DRIVER_MODULE(iwn, pci, iwn_driver, iwn_devclass, 0, 0);
6740MODULE_DEPEND(iwn, pci, 1, 1, 1);
6741MODULE_DEPEND(iwn, firmware, 1, 1, 1);
6742MODULE_DEPEND(iwn, wlan, 1, 1, 1);
6454 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
6455
6456 /* Power OFF hardware. */
6457 iwn_hw_stop(sc);
6458}
6459
6460static void
6461iwn_stop(struct iwn_softc *sc)
6462{
6463 IWN_LOCK(sc);
6464 iwn_stop_locked(sc);
6465 IWN_UNLOCK(sc);
6466}
6467
6468/*
6469 * Callback from net80211 to start a scan.
6470 */
6471static void
6472iwn_scan_start(struct ieee80211com *ic)
6473{
6474 struct ifnet *ifp = ic->ic_ifp;
6475 struct iwn_softc *sc = ifp->if_softc;
6476
6477 IWN_LOCK(sc);
6478 /* make the link LED blink while we're scanning */
6479 iwn_set_led(sc, IWN_LED_LINK, 20, 2);
6480 IWN_UNLOCK(sc);
6481}
6482
6483/*
6484 * Callback from net80211 to terminate a scan.
6485 */
6486static void
6487iwn_scan_end(struct ieee80211com *ic)
6488{
6489 struct ifnet *ifp = ic->ic_ifp;
6490 struct iwn_softc *sc = ifp->if_softc;
6491 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6492
6493 IWN_LOCK(sc);
6494 if (vap->iv_state == IEEE80211_S_RUN) {
6495 /* Set link LED to ON status if we are associated */
6496 iwn_set_led(sc, IWN_LED_LINK, 0, 1);
6497 }
6498 IWN_UNLOCK(sc);
6499}
6500
6501/*
6502 * Callback from net80211 to force a channel change.
6503 */
6504static void
6505iwn_set_channel(struct ieee80211com *ic)
6506{
6507 const struct ieee80211_channel *c = ic->ic_curchan;
6508 struct ifnet *ifp = ic->ic_ifp;
6509 struct iwn_softc *sc = ifp->if_softc;
6510
6511 IWN_LOCK(sc);
6512 sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq);
6513 sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags);
6514 sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq);
6515 sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags);
6516 IWN_UNLOCK(sc);
6517}
6518
6519/*
6520 * Callback from net80211 to start scanning of the current channel.
6521 */
6522static void
6523iwn_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6524{
6525 struct ieee80211vap *vap = ss->ss_vap;
6526 struct iwn_softc *sc = vap->iv_ic->ic_ifp->if_softc;
6527 int error;
6528
6529 IWN_LOCK(sc);
6530 error = iwn_scan(sc);
6531 IWN_UNLOCK(sc);
6532 if (error != 0)
6533 ieee80211_cancel_scan(vap);
6534}
6535
6536/*
6537 * Callback from net80211 to handle the minimum dwell time being met.
6538 * The intent is to terminate the scan but we just let the firmware
6539 * notify us when it's finished as we have no safe way to abort it.
6540 */
6541static void
6542iwn_scan_mindwell(struct ieee80211_scan_state *ss)
6543{
6544 /* NB: don't try to abort scan; wait for firmware to finish */
6545}
6546
6547static struct iwn_eeprom_chan *
6548iwn_find_eeprom_channel(struct iwn_softc *sc, struct ieee80211_channel *c)
6549{
6550 int i, j;
6551
6552 for (j = 0; j < 7; j++) {
6553 for (i = 0; i < iwn_bands[j].nchan; i++) {
6554 if (iwn_bands[j].chan[i] == c->ic_ieee)
6555 return &sc->eeprom_channels[j][i];
6556 }
6557 }
6558
6559 return NULL;
6560}
6561
6562/*
6563 * Enforce flags read from EEPROM.
6564 */
6565static int
6566iwn_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
6567 int nchan, struct ieee80211_channel chans[])
6568{
6569 struct iwn_softc *sc = ic->ic_ifp->if_softc;
6570 int i;
6571
6572 for (i = 0; i < nchan; i++) {
6573 struct ieee80211_channel *c = &chans[i];
6574 struct iwn_eeprom_chan *channel;
6575
6576 channel = iwn_find_eeprom_channel(sc, c);
6577 if (channel == NULL) {
6578 if_printf(ic->ic_ifp,
6579 "%s: invalid channel %u freq %u/0x%x\n",
6580 __func__, c->ic_ieee, c->ic_freq, c->ic_flags);
6581 return EINVAL;
6582 }
6583 c->ic_flags |= iwn_eeprom_channel_flags(channel);
6584 }
6585
6586 return 0;
6587}
6588
6589static void
6590iwn_hw_reset(void *arg0, int pending)
6591{
6592 struct iwn_softc *sc = arg0;
6593 struct ifnet *ifp = sc->sc_ifp;
6594 struct ieee80211com *ic = ifp->if_l2com;
6595
6596 iwn_stop(sc);
6597 iwn_init(sc);
6598 ieee80211_notify_radio(ic, 1);
6599}
6600
6601static void
6602iwn_radio_on(void *arg0, int pending)
6603{
6604 struct iwn_softc *sc = arg0;
6605 struct ifnet *ifp = sc->sc_ifp;
6606 struct ieee80211com *ic = ifp->if_l2com;
6607 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6608
6609 if (vap != NULL) {
6610 iwn_init(sc);
6611 ieee80211_init(vap);
6612 }
6613}
6614
6615static void
6616iwn_radio_off(void *arg0, int pending)
6617{
6618 struct iwn_softc *sc = arg0;
6619 struct ifnet *ifp = sc->sc_ifp;
6620 struct ieee80211com *ic = ifp->if_l2com;
6621 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6622
6623 iwn_stop(sc);
6624 if (vap != NULL)
6625 ieee80211_stop(vap);
6626
6627 /* Enable interrupts to get RF toggle notification. */
6628 IWN_LOCK(sc);
6629 IWN_WRITE(sc, IWN_INT, 0xffffffff);
6630 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
6631 IWN_UNLOCK(sc);
6632}
6633
6634static void
6635iwn_sysctlattach(struct iwn_softc *sc)
6636{
6637 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
6638 struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
6639
6640#ifdef IWN_DEBUG
6641 sc->sc_debug = 0;
6642 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6643 "debug", CTLFLAG_RW, &sc->sc_debug, 0, "control debugging printfs");
6644#endif
6645}
6646
6647static int
6648iwn_shutdown(device_t dev)
6649{
6650 struct iwn_softc *sc = device_get_softc(dev);
6651
6652 iwn_stop(sc);
6653 return 0;
6654}
6655
6656static int
6657iwn_suspend(device_t dev)
6658{
6659 struct iwn_softc *sc = device_get_softc(dev);
6660 struct ifnet *ifp = sc->sc_ifp;
6661 struct ieee80211com *ic = ifp->if_l2com;
6662 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6663
6664 iwn_stop(sc);
6665 if (vap != NULL)
6666 ieee80211_stop(vap);
6667 return 0;
6668}
6669
6670static int
6671iwn_resume(device_t dev)
6672{
6673 struct iwn_softc *sc = device_get_softc(dev);
6674 struct ifnet *ifp = sc->sc_ifp;
6675 struct ieee80211com *ic = ifp->if_l2com;
6676 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6677
6678 /* Clear device-specific "PCI retry timeout" register (41h). */
6679 pci_write_config(dev, 0x41, 0, 1);
6680
6681 if (ifp->if_flags & IFF_UP) {
6682 iwn_init(sc);
6683 if (vap != NULL)
6684 ieee80211_init(vap);
6685 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
6686 iwn_start(ifp);
6687 }
6688 return 0;
6689}
6690
6691#ifdef IWN_DEBUG
6692static const char *
6693iwn_intr_str(uint8_t cmd)
6694{
6695 switch (cmd) {
6696 /* Notifications */
6697 case IWN_UC_READY: return "UC_READY";
6698 case IWN_ADD_NODE_DONE: return "ADD_NODE_DONE";
6699 case IWN_TX_DONE: return "TX_DONE";
6700 case IWN_START_SCAN: return "START_SCAN";
6701 case IWN_STOP_SCAN: return "STOP_SCAN";
6702 case IWN_RX_STATISTICS: return "RX_STATS";
6703 case IWN_BEACON_STATISTICS: return "BEACON_STATS";
6704 case IWN_STATE_CHANGED: return "STATE_CHANGED";
6705 case IWN_BEACON_MISSED: return "BEACON_MISSED";
6706 case IWN_RX_PHY: return "RX_PHY";
6707 case IWN_MPDU_RX_DONE: return "MPDU_RX_DONE";
6708 case IWN_RX_DONE: return "RX_DONE";
6709
6710 /* Command Notifications */
6711 case IWN_CMD_RXON: return "IWN_CMD_RXON";
6712 case IWN_CMD_RXON_ASSOC: return "IWN_CMD_RXON_ASSOC";
6713 case IWN_CMD_EDCA_PARAMS: return "IWN_CMD_EDCA_PARAMS";
6714 case IWN_CMD_TIMING: return "IWN_CMD_TIMING";
6715 case IWN_CMD_LINK_QUALITY: return "IWN_CMD_LINK_QUALITY";
6716 case IWN_CMD_SET_LED: return "IWN_CMD_SET_LED";
6717 case IWN5000_CMD_WIMAX_COEX: return "IWN5000_CMD_WIMAX_COEX";
6718 case IWN5000_CMD_CALIB_CONFIG: return "IWN5000_CMD_CALIB_CONFIG";
6719 case IWN5000_CMD_CALIB_RESULT: return "IWN5000_CMD_CALIB_RESULT";
6720 case IWN5000_CMD_CALIB_COMPLETE: return "IWN5000_CMD_CALIB_COMPLETE";
6721 case IWN_CMD_SET_POWER_MODE: return "IWN_CMD_SET_POWER_MODE";
6722 case IWN_CMD_SCAN: return "IWN_CMD_SCAN";
6723 case IWN_CMD_SCAN_RESULTS: return "IWN_CMD_SCAN_RESULTS";
6724 case IWN_CMD_TXPOWER: return "IWN_CMD_TXPOWER";
6725 case IWN_CMD_TXPOWER_DBM: return "IWN_CMD_TXPOWER_DBM";
6726 case IWN5000_CMD_TX_ANT_CONFIG: return "IWN5000_CMD_TX_ANT_CONFIG";
6727 case IWN_CMD_BT_COEX: return "IWN_CMD_BT_COEX";
6728 case IWN_CMD_SET_CRITICAL_TEMP: return "IWN_CMD_SET_CRITICAL_TEMP";
6729 case IWN_CMD_SET_SENSITIVITY: return "IWN_CMD_SET_SENSITIVITY";
6730 case IWN_CMD_PHY_CALIB: return "IWN_CMD_PHY_CALIB";
6731 }
6732 return "UNKNOWN INTR NOTIF/CMD";
6733}
6734#endif /* IWN_DEBUG */
6735
6736static device_method_t iwn_methods[] = {
6737 /* Device interface */
6738 DEVMETHOD(device_probe, iwn_probe),
6739 DEVMETHOD(device_attach, iwn_attach),
6740 DEVMETHOD(device_detach, iwn_detach),
6741 DEVMETHOD(device_shutdown, iwn_shutdown),
6742 DEVMETHOD(device_suspend, iwn_suspend),
6743 DEVMETHOD(device_resume, iwn_resume),
6744 { 0, 0 }
6745};
6746
6747static driver_t iwn_driver = {
6748 "iwn",
6749 iwn_methods,
6750 sizeof (struct iwn_softc)
6751};
6752static devclass_t iwn_devclass;
6753
6754DRIVER_MODULE(iwn, pci, iwn_driver, iwn_devclass, 0, 0);
6755MODULE_DEPEND(iwn, pci, 1, 1, 1);
6756MODULE_DEPEND(iwn, firmware, 1, 1, 1);
6757MODULE_DEPEND(iwn, wlan, 1, 1, 1);