1/*	$OpenBSD: if_iwx.c,v 1.181 2024/02/16 11:44:52 stsp Exp $	*/
2
3/*
4 * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
5 *   Author: Stefan Sperling <stsp@openbsd.org>
6 * Copyright (c) 2014 Fixup Software Ltd.
7 * Copyright (c) 2017, 2019, 2020 Stefan Sperling <stsp@openbsd.org>
8 *
9 * Permission to use, copy, modify, and distribute this software for any
10 * purpose with or without fee is hereby granted, provided that the above
11 * copyright notice and this permission notice appear in all copies.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*-
23 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
24 * which were used as the reference documentation for this implementation.
25 *
26 ******************************************************************************
27 *
28 * This file is provided under a dual BSD/GPLv2 license.  When using or
29 * redistributing this file, you may do so under either license.
30 *
31 * GPL LICENSE SUMMARY
32 *
33 * Copyright(c) 2017 Intel Deutschland GmbH
34 * Copyright(c) 2018 - 2019 Intel Corporation
35 *
36 * This program is free software; you can redistribute it and/or modify
37 * it under the terms of version 2 of the GNU General Public License as
38 * published by the Free Software Foundation.
39 *
40 * This program is distributed in the hope that it will be useful, but
41 * WITHOUT ANY WARRANTY; without even the implied warranty of
42 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43 * General Public License for more details.
44 *
45 * BSD LICENSE
46 *
47 * Copyright(c) 2017 Intel Deutschland GmbH
48 * Copyright(c) 2018 - 2019 Intel Corporation
49 * All rights reserved.
50 *
51 * Redistribution and use in source and binary forms, with or without
52 * modification, are permitted provided that the following conditions
53 * are met:
54 *
55 *  * Redistributions of source code must retain the above copyright
56 *    notice, this list of conditions and the following disclaimer.
57 *  * Redistributions in binary form must reproduce the above copyright
58 *    notice, this list of conditions and the following disclaimer in
59 *    the documentation and/or other materials provided with the
60 *    distribution.
61 *  * Neither the name Intel Corporation nor the names of its
62 *    contributors may be used to endorse or promote products derived
63 *    from this software without specific prior written permission.
64 *
65 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
66 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
67 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
68 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
69 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
70 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
71 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
72 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
73 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
74 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
75 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
76 *
77 *****************************************************************************
78 */
79
80/*-
81 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
82 *
83 * Permission to use, copy, modify, and distribute this software for any
84 * purpose with or without fee is hereby granted, provided that the above
85 * copyright notice and this permission notice appear in all copies.
86 *
87 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
88 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
89 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
90 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
91 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
92 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
93 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
94 */
95
96//#include "bpfilter.h"
97
98#include <sys/param.h>
99#include <sys/conf.h>
100#include <sys/kernel.h>
101#include <sys/malloc.h>
102#include <sys/mbuf.h>
103#include <sys/mutex.h>
104#include <sys/proc.h>
105#include <sys/rwlock.h>
106#include <sys/socket.h>
107#include <sys/sockio.h>
108#include <sys/systm.h>
109#include <sys/endian.h>
110
111#include <sys/refcnt.h>
112#include <sys/task.h>
113#include <machine/bus.h>
114//#include <machine/intr.h>
115
116#include <dev/pci/pcireg.h>
117#include <dev/pci/pcivar.h>
118//#include <dev/pci/pcidevs.h>
119
120#if NBPFILTER > 0
121#include <net/bpf.h>
122#endif
123#include <net/if.h>
124#include <net/if_dl.h>
125#include <net/if_media.h>
126#include <net/if_types.h>
127
128#include <netinet/in.h>
129#include <netinet/if_ether.h>
130
131#include <net80211/ieee80211_var.h>
132#include <net80211/ieee80211_radiotap.h>
133#include <net80211/ieee80211_priv.h> /* for SEQ_LT */
134#undef DPRINTF /* defined in ieee80211_priv.h */
135
136#ifdef __FreeBSD_version
137#include <sys/device.h>
138#include <net/ifq.h>
139#define DEVNAME(_s) gDriverName
140#define SC_DEV_FOR_PCI sc->sc_dev
141#define mallocarray(nmemb, size, type, flags) malloc((size) * (nmemb), (type), (flags))
142#else
143#define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
144#endif
145
146#define IC2IFP(_ic_) (&(_ic_)->ic_if)
147
148#define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
149#define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
150
151#ifdef IWX_DEBUG
152#define DPRINTF(x)	do { if (iwx_debug > 0) printf x; } while (0)
153#define DPRINTFN(n, x)	do { if (iwx_debug >= (n)) printf x; } while (0)
154int iwx_debug = 1;
155#else
156#define DPRINTF(x)	do { ; } while (0)
157#define DPRINTFN(n, x)	do { ; } while (0)
158#endif
159
160#include <dev/pci/if_iwxreg.h>
161#include <dev/pci/if_iwxvar.h>
162
163const uint8_t iwx_nvm_channels_8000[] = {
164	/* 2.4 GHz */
165	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
166	/* 5 GHz */
167	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
168	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
169	149, 153, 157, 161, 165, 169, 173, 177, 181
170};
171
172static const uint8_t iwx_nvm_channels_uhb[] = {
173	/* 2.4 GHz */
174	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
175	/* 5 GHz */
176	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
177	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
178	149, 153, 157, 161, 165, 169, 173, 177, 181,
179	/* 6-7 GHz */
180	1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61, 65, 69,
181	73, 77, 81, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, 125, 129,
182	133, 137, 141, 145, 149, 153, 157, 161, 165, 169, 173, 177, 181, 185,
183	189, 193, 197, 201, 205, 209, 213, 217, 221, 225, 229, 233
184};
185
186#define IWX_NUM_2GHZ_CHANNELS	14
187#define IWX_NUM_5GHZ_CHANNELS	37
188
189const struct iwx_rate {
190	uint16_t rate;
191	uint8_t plcp;
192	uint8_t ht_plcp;
193} iwx_rates[] = {
194		/* Legacy */		/* HT */
195	{   2,	IWX_RATE_1M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP  },
196	{   4,	IWX_RATE_2M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP },
197	{  11,	IWX_RATE_5M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP  },
198	{  22,	IWX_RATE_11M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP },
199	{  12,	IWX_RATE_6M_PLCP,	IWX_RATE_HT_SISO_MCS_0_PLCP },
200	{  18,	IWX_RATE_9M_PLCP,	IWX_RATE_HT_SISO_MCS_INV_PLCP  },
201	{  24,	IWX_RATE_12M_PLCP,	IWX_RATE_HT_SISO_MCS_1_PLCP },
202	{  26,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_8_PLCP },
203	{  36,	IWX_RATE_18M_PLCP,	IWX_RATE_HT_SISO_MCS_2_PLCP },
204	{  48,	IWX_RATE_24M_PLCP,	IWX_RATE_HT_SISO_MCS_3_PLCP },
205	{  52,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_9_PLCP },
206	{  72,	IWX_RATE_36M_PLCP,	IWX_RATE_HT_SISO_MCS_4_PLCP },
207	{  78,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_10_PLCP },
208	{  96,	IWX_RATE_48M_PLCP,	IWX_RATE_HT_SISO_MCS_5_PLCP },
209	{ 104,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_11_PLCP },
210	{ 108,	IWX_RATE_54M_PLCP,	IWX_RATE_HT_SISO_MCS_6_PLCP },
211	{ 128,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_SISO_MCS_7_PLCP },
212	{ 156,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_12_PLCP },
213	{ 208,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_13_PLCP },
214	{ 234,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_14_PLCP },
215	{ 260,	IWX_RATE_INVM_PLCP,	IWX_RATE_HT_MIMO2_MCS_15_PLCP },
216};
217#define IWX_RIDX_CCK	0
218#define IWX_RIDX_OFDM	4
219#define IWX_RIDX_MAX	(nitems(iwx_rates)-1)
220#define IWX_RIDX_IS_CCK(_i_) ((_i_) < IWX_RIDX_OFDM)
221#define IWX_RIDX_IS_OFDM(_i_) ((_i_) >= IWX_RIDX_OFDM)
222#define IWX_RVAL_IS_OFDM(_i_) ((_i_) >= 12 && (_i_) != 22)
223
224/* Convert an MCS index into an iwx_rates[] index. */
225const int iwx_mcs2ridx[] = {
226	IWX_RATE_MCS_0_INDEX,
227	IWX_RATE_MCS_1_INDEX,
228	IWX_RATE_MCS_2_INDEX,
229	IWX_RATE_MCS_3_INDEX,
230	IWX_RATE_MCS_4_INDEX,
231	IWX_RATE_MCS_5_INDEX,
232	IWX_RATE_MCS_6_INDEX,
233	IWX_RATE_MCS_7_INDEX,
234	IWX_RATE_MCS_8_INDEX,
235	IWX_RATE_MCS_9_INDEX,
236	IWX_RATE_MCS_10_INDEX,
237	IWX_RATE_MCS_11_INDEX,
238	IWX_RATE_MCS_12_INDEX,
239	IWX_RATE_MCS_13_INDEX,
240	IWX_RATE_MCS_14_INDEX,
241	IWX_RATE_MCS_15_INDEX,
242};
243
244uint8_t	iwx_lookup_cmd_ver(struct iwx_softc *, uint8_t, uint8_t);
245uint8_t	iwx_lookup_notif_ver(struct iwx_softc *, uint8_t, uint8_t);
246int	iwx_is_mimo_ht_plcp(uint8_t);
247int	iwx_store_cscheme(struct iwx_softc *, uint8_t *, size_t);
248int	iwx_alloc_fw_monitor_block(struct iwx_softc *, uint8_t, uint8_t);
249int	iwx_alloc_fw_monitor(struct iwx_softc *, uint8_t);
250int	iwx_apply_debug_destination(struct iwx_softc *);
251void	iwx_set_ltr(struct iwx_softc *);
252int	iwx_ctxt_info_init(struct iwx_softc *, const struct iwx_fw_sects *);
253int	iwx_ctxt_info_gen3_init(struct iwx_softc *,
254	    const struct iwx_fw_sects *);
255void	iwx_ctxt_info_free_fw_img(struct iwx_softc *);
256void	iwx_ctxt_info_free_paging(struct iwx_softc *);
257int	iwx_init_fw_sec(struct iwx_softc *, const struct iwx_fw_sects *,
258	    struct iwx_context_info_dram *);
259void	iwx_fw_version_str(char *, size_t, uint32_t, uint32_t, uint32_t);
260int	iwx_firmware_store_section(struct iwx_softc *, enum iwx_ucode_type,
261	    uint8_t *, size_t);
262int	iwx_set_default_calib(struct iwx_softc *, const void *);
263void	iwx_fw_info_free(struct iwx_fw_info *);
264int	iwx_read_firmware(struct iwx_softc *);
265uint32_t iwx_prph_addr_mask(struct iwx_softc *);
266uint32_t iwx_read_prph_unlocked(struct iwx_softc *, uint32_t);
267uint32_t iwx_read_prph(struct iwx_softc *, uint32_t);
268void	iwx_write_prph_unlocked(struct iwx_softc *, uint32_t, uint32_t);
269void	iwx_write_prph(struct iwx_softc *, uint32_t, uint32_t);
270uint32_t iwx_read_umac_prph_unlocked(struct iwx_softc *, uint32_t);
271uint32_t iwx_read_umac_prph(struct iwx_softc *, uint32_t);
272void	iwx_write_umac_prph_unlocked(struct iwx_softc *, uint32_t, uint32_t);
273void	iwx_write_umac_prph(struct iwx_softc *, uint32_t, uint32_t);
274int	iwx_read_mem(struct iwx_softc *, uint32_t, void *, int);
275int	iwx_write_mem(struct iwx_softc *, uint32_t, const void *, int);
276int	iwx_write_mem32(struct iwx_softc *, uint32_t, uint32_t);
277int	iwx_poll_bit(struct iwx_softc *, int, uint32_t, uint32_t, int);
278int	iwx_nic_lock(struct iwx_softc *);
279void	iwx_nic_assert_locked(struct iwx_softc *);
280void	iwx_nic_unlock(struct iwx_softc *);
281int	iwx_set_bits_mask_prph(struct iwx_softc *, uint32_t, uint32_t,
282	    uint32_t);
283int	iwx_set_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
284int	iwx_clear_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
285int	iwx_dma_contig_alloc(bus_dma_tag_t, struct iwx_dma_info *, bus_size_t,
286	    bus_size_t);
287void	iwx_dma_contig_free(struct iwx_dma_info *);
288int	iwx_alloc_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
289void	iwx_disable_rx_dma(struct iwx_softc *);
290void	iwx_reset_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
291void	iwx_free_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
292int	iwx_alloc_tx_ring(struct iwx_softc *, struct iwx_tx_ring *, int);
293void	iwx_reset_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
294void	iwx_free_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
295void	iwx_enable_rfkill_int(struct iwx_softc *);
296int	iwx_check_rfkill(struct iwx_softc *);
297void	iwx_enable_interrupts(struct iwx_softc *);
298void	iwx_enable_fwload_interrupt(struct iwx_softc *);
299void	iwx_restore_interrupts(struct iwx_softc *);
300void	iwx_disable_interrupts(struct iwx_softc *);
301void	iwx_ict_reset(struct iwx_softc *);
302int	iwx_set_hw_ready(struct iwx_softc *);
303int	iwx_prepare_card_hw(struct iwx_softc *);
304int	iwx_force_power_gating(struct iwx_softc *);
305void	iwx_apm_config(struct iwx_softc *);
306int	iwx_apm_init(struct iwx_softc *);
307void	iwx_apm_stop(struct iwx_softc *);
308int	iwx_allow_mcast(struct iwx_softc *);
309void	iwx_init_msix_hw(struct iwx_softc *);
310void	iwx_conf_msix_hw(struct iwx_softc *, int);
311int	iwx_clear_persistence_bit(struct iwx_softc *);
312int	iwx_start_hw(struct iwx_softc *);
313void	iwx_stop_device(struct iwx_softc *);
314void	iwx_nic_config(struct iwx_softc *);
315int	iwx_nic_rx_init(struct iwx_softc *);
316int	iwx_nic_init(struct iwx_softc *);
317int	iwx_enable_txq(struct iwx_softc *, int, int, int, int);
318int	iwx_disable_txq(struct iwx_softc *sc, int, int, uint8_t);
319void	iwx_post_alive(struct iwx_softc *);
320int	iwx_schedule_session_protection(struct iwx_softc *, struct iwx_node *,
321	    uint32_t);
322void	iwx_unprotect_session(struct iwx_softc *, struct iwx_node *);
323void	iwx_init_channel_map(struct iwx_softc *, uint16_t *, uint32_t *, int);
324void	iwx_setup_ht_rates(struct iwx_softc *);
325void	iwx_setup_vht_rates(struct iwx_softc *);
326int	iwx_mimo_enabled(struct iwx_softc *);
327void	iwx_mac_ctxt_task(void *);
328void	iwx_phy_ctxt_task(void *);
329void	iwx_updatechan(struct ieee80211com *);
330void	iwx_updateprot(struct ieee80211com *);
331void	iwx_updateslot(struct ieee80211com *);
332void	iwx_updateedca(struct ieee80211com *);
333void	iwx_updatedtim(struct ieee80211com *);
334void	iwx_init_reorder_buffer(struct iwx_reorder_buffer *, uint16_t,
335	    uint16_t);
336void	iwx_clear_reorder_buffer(struct iwx_softc *, struct iwx_rxba_data *);
337int	iwx_ampdu_rx_start(struct ieee80211com *, struct ieee80211_node *,
338	    uint8_t);
339void	iwx_ampdu_rx_stop(struct ieee80211com *, struct ieee80211_node *,
340	    uint8_t);
341int	iwx_ampdu_tx_start(struct ieee80211com *, struct ieee80211_node *,
342	    uint8_t);
343void	iwx_rx_ba_session_expired(void *);
344void	iwx_rx_bar_frame_release(struct iwx_softc *, struct iwx_rx_packet *,
345	    struct mbuf_list *);
346void	iwx_reorder_timer_expired(void *);
347void	iwx_sta_rx_agg(struct iwx_softc *, struct ieee80211_node *, uint8_t,
348	    uint16_t, uint16_t, int, int);
349void	iwx_sta_tx_agg_start(struct iwx_softc *, struct ieee80211_node *,
350	    uint8_t);
351void	iwx_ba_task(void *);
352
353void	iwx_set_mac_addr_from_csr(struct iwx_softc *, struct iwx_nvm_data *);
354int	iwx_is_valid_mac_addr(const uint8_t *);
355void	iwx_flip_hw_address(uint32_t, uint32_t, uint8_t *);
356int	iwx_nvm_get(struct iwx_softc *);
357int	iwx_load_firmware(struct iwx_softc *);
358int	iwx_start_fw(struct iwx_softc *);
359int	iwx_pnvm_handle_section(struct iwx_softc *, const uint8_t *, size_t);
360int	iwx_pnvm_parse(struct iwx_softc *, const uint8_t *, size_t);
361void	iwx_ctxt_info_gen3_set_pnvm(struct iwx_softc *);
362int	iwx_load_pnvm(struct iwx_softc *);
363int	iwx_send_tx_ant_cfg(struct iwx_softc *, uint8_t);
364int	iwx_send_phy_cfg_cmd(struct iwx_softc *);
365int	iwx_load_ucode_wait_alive(struct iwx_softc *);
366int	iwx_send_dqa_cmd(struct iwx_softc *);
367int	iwx_run_init_mvm_ucode(struct iwx_softc *, int);
368int	iwx_config_ltr(struct iwx_softc *);
369void	iwx_update_rx_desc(struct iwx_softc *, struct iwx_rx_ring *, int);
370int	iwx_rx_addbuf(struct iwx_softc *, int, int);
371int	iwx_rxmq_get_signal_strength(struct iwx_softc *, struct iwx_rx_mpdu_desc *);
372void	iwx_rx_rx_phy_cmd(struct iwx_softc *, struct iwx_rx_packet *,
373	    struct iwx_rx_data *);
374int	iwx_get_noise(const struct iwx_statistics_rx_non_phy *);
375int	iwx_rx_hwdecrypt(struct iwx_softc *, struct mbuf *, uint32_t,
376	    struct ieee80211_rxinfo *);
377int	iwx_ccmp_decap(struct iwx_softc *, struct mbuf *,
378	    struct ieee80211_node *, struct ieee80211_rxinfo *);
379void	iwx_rx_frame(struct iwx_softc *, struct mbuf *, int, uint32_t, int, int,
380	    uint32_t, struct ieee80211_rxinfo *, struct mbuf_list *);
381void	iwx_clear_tx_desc(struct iwx_softc *, struct iwx_tx_ring *, int);
382void	iwx_txd_done(struct iwx_softc *, struct iwx_tx_data *);
383void	iwx_txq_advance(struct iwx_softc *, struct iwx_tx_ring *, uint16_t);
384void	iwx_rx_tx_cmd(struct iwx_softc *, struct iwx_rx_packet *,
385	    struct iwx_rx_data *);
386void	iwx_clear_oactive(struct iwx_softc *, struct iwx_tx_ring *);
387void	iwx_rx_bmiss(struct iwx_softc *, struct iwx_rx_packet *,
388	    struct iwx_rx_data *);
389int	iwx_binding_cmd(struct iwx_softc *, struct iwx_node *, uint32_t);
390uint8_t	iwx_get_vht_ctrl_pos(struct ieee80211com *, struct ieee80211_channel *);
391int	iwx_phy_ctxt_cmd_uhb_v3_v4(struct iwx_softc *, struct iwx_phy_ctxt *,
392	    uint8_t, uint8_t, uint32_t, uint8_t, uint8_t, int);
393int	iwx_phy_ctxt_cmd_v3_v4(struct iwx_softc *, struct iwx_phy_ctxt *,
394	    uint8_t, uint8_t, uint32_t, uint8_t, uint8_t, int);
395int	iwx_phy_ctxt_cmd(struct iwx_softc *, struct iwx_phy_ctxt *, uint8_t,
396	    uint8_t, uint32_t, uint32_t, uint8_t, uint8_t);
397int	iwx_send_cmd(struct iwx_softc *, struct iwx_host_cmd *);
398int	iwx_send_cmd_pdu(struct iwx_softc *, uint32_t, uint32_t, uint16_t,
399	    const void *);
400int	iwx_send_cmd_status(struct iwx_softc *, struct iwx_host_cmd *,
401	    uint32_t *);
402int	iwx_send_cmd_pdu_status(struct iwx_softc *, uint32_t, uint16_t,
403	    const void *, uint32_t *);
404void	iwx_free_resp(struct iwx_softc *, struct iwx_host_cmd *);
405void	iwx_cmd_done(struct iwx_softc *, int, int, int);
406uint32_t iwx_fw_rateidx_ofdm(uint8_t);
407uint32_t iwx_fw_rateidx_cck(uint8_t);
408const struct iwx_rate *iwx_tx_fill_cmd(struct iwx_softc *, struct iwx_node *,
409	    struct ieee80211_frame *, uint16_t *, uint32_t *);
410void	iwx_tx_update_byte_tbl(struct iwx_softc *, struct iwx_tx_ring *, int,
411	    uint16_t, uint16_t);
412int	iwx_tx(struct iwx_softc *, struct mbuf *, struct ieee80211_node *);
413int	iwx_flush_sta_tids(struct iwx_softc *, int, uint16_t);
414int	iwx_drain_sta(struct iwx_softc *sc, struct iwx_node *, int);
415int	iwx_flush_sta(struct iwx_softc *, struct iwx_node *);
416int	iwx_beacon_filter_send_cmd(struct iwx_softc *,
417	    struct iwx_beacon_filter_cmd *);
418int	iwx_update_beacon_abort(struct iwx_softc *, struct iwx_node *, int);
419void	iwx_power_build_cmd(struct iwx_softc *, struct iwx_node *,
420	    struct iwx_mac_power_cmd *);
421int	iwx_power_mac_update_mode(struct iwx_softc *, struct iwx_node *);
422int	iwx_power_update_device(struct iwx_softc *);
423int	iwx_enable_beacon_filter(struct iwx_softc *, struct iwx_node *);
424int	iwx_disable_beacon_filter(struct iwx_softc *);
425int	iwx_add_sta_cmd(struct iwx_softc *, struct iwx_node *, int);
426int	iwx_rm_sta_cmd(struct iwx_softc *, struct iwx_node *);
427int	iwx_rm_sta(struct iwx_softc *, struct iwx_node *);
428int	iwx_fill_probe_req(struct iwx_softc *, struct iwx_scan_probe_req *);
429int	iwx_config_umac_scan_reduced(struct iwx_softc *);
430uint16_t iwx_scan_umac_flags_v2(struct iwx_softc *, int);
431void	iwx_scan_umac_dwell_v10(struct iwx_softc *,
432	    struct iwx_scan_general_params_v10 *, int);
433void	iwx_scan_umac_fill_general_p_v10(struct iwx_softc *,
434	    struct iwx_scan_general_params_v10 *, uint16_t, int);
435void	iwx_scan_umac_fill_ch_p_v6(struct iwx_softc *,
436	    struct iwx_scan_channel_params_v6 *, uint32_t, int);
437int	iwx_umac_scan_v14(struct iwx_softc *, int);
438void	iwx_mcc_update(struct iwx_softc *, struct iwx_mcc_chub_notif *);
439uint8_t	iwx_ridx2rate(struct ieee80211_rateset *, int);
440int	iwx_rval2ridx(int);
441void	iwx_ack_rates(struct iwx_softc *, struct iwx_node *, int *, int *);
442void	iwx_mac_ctxt_cmd_common(struct iwx_softc *, struct iwx_node *,
443	    struct iwx_mac_ctx_cmd *, uint32_t);
444void	iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *, struct iwx_node *,
445	    struct iwx_mac_data_sta *, int);
446int	iwx_mac_ctxt_cmd(struct iwx_softc *, struct iwx_node *, uint32_t, int);
447int	iwx_clear_statistics(struct iwx_softc *);
448void	iwx_add_task(struct iwx_softc *, struct taskq *, struct task *);
449void	iwx_del_task(struct iwx_softc *, struct taskq *, struct task *);
450int	iwx_scan(struct iwx_softc *);
451int	iwx_bgscan(struct ieee80211com *);
452void	iwx_bgscan_done(struct ieee80211com *,
453	    struct ieee80211_node_switch_bss_arg *, size_t);
454void	iwx_bgscan_done_task(void *);
455int	iwx_umac_scan_abort(struct iwx_softc *);
456int	iwx_scan_abort(struct iwx_softc *);
457int	iwx_enable_mgmt_queue(struct iwx_softc *);
458int	iwx_disable_mgmt_queue(struct iwx_softc *);
459int	iwx_rs_rval2idx(uint8_t);
460uint16_t iwx_rs_ht_rates(struct iwx_softc *, struct ieee80211_node *, int);
461uint16_t iwx_rs_vht_rates(struct iwx_softc *, struct ieee80211_node *, int);
462int	iwx_rs_init_v3(struct iwx_softc *, struct iwx_node *);
463int	iwx_rs_init_v4(struct iwx_softc *, struct iwx_node *);
464int	iwx_rs_init(struct iwx_softc *, struct iwx_node *);
465int	iwx_enable_data_tx_queues(struct iwx_softc *);
466int	iwx_phy_send_rlc(struct iwx_softc *, struct iwx_phy_ctxt *,
467	    uint8_t, uint8_t);
468int	iwx_phy_ctxt_update(struct iwx_softc *, struct iwx_phy_ctxt *,
469	    struct ieee80211_channel *, uint8_t, uint8_t, uint32_t, uint8_t,
470	    uint8_t);
471int	iwx_auth(struct iwx_softc *);
472int	iwx_deauth(struct iwx_softc *);
473int	iwx_run(struct iwx_softc *);
474int	iwx_run_stop(struct iwx_softc *);
475struct ieee80211_node *iwx_node_alloc(struct ieee80211com *);
476int	iwx_set_key(struct ieee80211com *, struct ieee80211_node *,
477	    struct ieee80211_key *);
478void	iwx_setkey_task(void *);
479void	iwx_delete_key(struct ieee80211com *,
480	    struct ieee80211_node *, struct ieee80211_key *);
481int	iwx_media_change(struct ifnet *);
482void	iwx_newstate_task(void *);
483int	iwx_newstate(struct ieee80211com *, enum ieee80211_state, int);
484void	iwx_endscan(struct iwx_softc *);
485void	iwx_fill_sf_command(struct iwx_softc *, struct iwx_sf_cfg_cmd *,
486	    struct ieee80211_node *);
487int	iwx_sf_config(struct iwx_softc *, int);
488int	iwx_send_bt_init_conf(struct iwx_softc *);
489int	iwx_send_soc_conf(struct iwx_softc *);
490int	iwx_send_update_mcc_cmd(struct iwx_softc *, const char *);
491int	iwx_send_temp_report_ths_cmd(struct iwx_softc *);
492int	iwx_init_hw(struct iwx_softc *);
493int	iwx_init(struct ifnet *);
494void	iwx_start(struct ifnet *);
495void	iwx_stop(struct ifnet *);
496void	iwx_watchdog(struct ifnet *);
497int	iwx_ioctl(struct ifnet *, u_long, caddr_t);
498const char *iwx_desc_lookup(uint32_t);
499void	iwx_nic_error(struct iwx_softc *);
500void	iwx_dump_driver_status(struct iwx_softc *);
501void	iwx_nic_umac_error(struct iwx_softc *);
502int	iwx_detect_duplicate(struct iwx_softc *, struct mbuf *,
503	    struct iwx_rx_mpdu_desc *, struct ieee80211_rxinfo *);
504int	iwx_is_sn_less(uint16_t, uint16_t, uint16_t);
505void	iwx_release_frames(struct iwx_softc *, struct ieee80211_node *,
506	    struct iwx_rxba_data *, struct iwx_reorder_buffer *, uint16_t,
507	    struct mbuf_list *);
508int	iwx_oldsn_workaround(struct iwx_softc *, struct ieee80211_node *,
509	    int, struct iwx_reorder_buffer *, uint32_t, uint32_t);
510int	iwx_rx_reorder(struct iwx_softc *, struct mbuf *, int,
511	    struct iwx_rx_mpdu_desc *, int, int, uint32_t,
512	    struct ieee80211_rxinfo *, struct mbuf_list *);
513void	iwx_rx_mpdu_mq(struct iwx_softc *, struct mbuf *, void *, size_t,
514	    struct mbuf_list *);
515int	iwx_rx_pkt_valid(struct iwx_rx_packet *);
516void	iwx_rx_pkt(struct iwx_softc *, struct iwx_rx_data *,
517	    struct mbuf_list *);
518void	iwx_notif_intr(struct iwx_softc *);
519int	iwx_intr(void *);
520int	iwx_intr_msix(void *);
521int	iwx_match(struct device *, void *, void *);
522int	iwx_preinit(struct iwx_softc *);
523void	iwx_attach_hook(struct device *);
524const struct iwx_device_cfg *iwx_find_device_cfg(struct iwx_softc *);
525//void	iwx_attach(struct device *, struct device *, void *);
526void	iwx_init_task(void *);
527int	iwx_activate(struct device *, int);
528void	iwx_resume(struct iwx_softc *);
529int	iwx_wakeup(struct iwx_softc *);
530
531#if NBPFILTER > 0
532void	iwx_radiotap_attach(struct iwx_softc *);
533#endif
534
535uint8_t
536iwx_lookup_cmd_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
537{
538	const struct iwx_fw_cmd_version *entry;
539	int i;
540
541	for (i = 0; i < sc->n_cmd_versions; i++) {
542		entry = &sc->cmd_versions[i];
543		if (entry->group == grp && entry->cmd == cmd)
544			return entry->cmd_ver;
545	}
546
547	return IWX_FW_CMD_VER_UNKNOWN;
548}
549
550uint8_t
551iwx_lookup_notif_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
552{
553	const struct iwx_fw_cmd_version *entry;
554	int i;
555
556	for (i = 0; i < sc->n_cmd_versions; i++) {
557		entry = &sc->cmd_versions[i];
558		if (entry->group == grp && entry->cmd == cmd)
559			return entry->notif_ver;
560	}
561
562	return IWX_FW_CMD_VER_UNKNOWN;
563}
564
565int
566iwx_is_mimo_ht_plcp(uint8_t ht_plcp)
567{
568	switch (ht_plcp) {
569	case IWX_RATE_HT_MIMO2_MCS_8_PLCP:
570	case IWX_RATE_HT_MIMO2_MCS_9_PLCP:
571	case IWX_RATE_HT_MIMO2_MCS_10_PLCP:
572	case IWX_RATE_HT_MIMO2_MCS_11_PLCP:
573	case IWX_RATE_HT_MIMO2_MCS_12_PLCP:
574	case IWX_RATE_HT_MIMO2_MCS_13_PLCP:
575	case IWX_RATE_HT_MIMO2_MCS_14_PLCP:
576	case IWX_RATE_HT_MIMO2_MCS_15_PLCP:
577		return 1;
578	default:
579		break;
580	}
581
582	return 0;
583}
584
585int
586iwx_store_cscheme(struct iwx_softc *sc, uint8_t *data, size_t dlen)
587{
588	struct iwx_fw_cscheme_list *l = (void *)data;
589
590	if (dlen < sizeof(*l) ||
591	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
592		return EINVAL;
593
594	/* we don't actually store anything for now, always use s/w crypto */
595
596	return 0;
597}
598
599int
600iwx_ctxt_info_alloc_dma(struct iwx_softc *sc,
601    const struct iwx_fw_onesect *sec, struct iwx_dma_info *dram)
602{
603	int err = iwx_dma_contig_alloc(sc->sc_dmat, dram, sec->fws_len, 0);
604	if (err) {
605		printf("%s: could not allocate context info DMA memory\n",
606		    DEVNAME(sc));
607		return err;
608	}
609
610	memcpy(dram->vaddr, sec->fws_data, sec->fws_len);
611
612	return 0;
613}
614
615void iwx_ctxt_info_free_paging(struct iwx_softc *sc)
616{
617	struct iwx_self_init_dram *dram = &sc->init_dram;
618	int i;
619
620	if (!dram->paging)
621		return;
622
623	/* free paging*/
624	for (i = 0; i < dram->paging_cnt; i++)
625		iwx_dma_contig_free(&dram->paging[i]);
626
627	free(dram->paging, M_DEVBUF, dram->paging_cnt * sizeof(*dram->paging));
628	dram->paging_cnt = 0;
629	dram->paging = NULL;
630}
631
632int
633iwx_get_num_sections(const struct iwx_fw_sects *fws, int start)
634{
635	int i = 0;
636
637	while (start < fws->fw_count &&
638	       fws->fw_sect[start].fws_devoff != IWX_CPU1_CPU2_SEPARATOR_SECTION &&
639	       fws->fw_sect[start].fws_devoff != IWX_PAGING_SEPARATOR_SECTION) {
640		start++;
641		i++;
642	}
643
644	return i;
645}
646
647int
648iwx_init_fw_sec(struct iwx_softc *sc, const struct iwx_fw_sects *fws,
649    struct iwx_context_info_dram *ctxt_dram)
650{
651	struct iwx_self_init_dram *dram = &sc->init_dram;
652	int i, ret, fw_cnt = 0;
653
654	KASSERT(dram->paging == NULL);
655
656	dram->lmac_cnt = iwx_get_num_sections(fws, 0);
657	/* add 1 due to separator */
658	dram->umac_cnt = iwx_get_num_sections(fws, dram->lmac_cnt + 1);
659	/* add 2 due to separators */
660	dram->paging_cnt = iwx_get_num_sections(fws,
661	    dram->lmac_cnt + dram->umac_cnt + 2);
662
663	dram->fw = mallocarray(dram->umac_cnt + dram->lmac_cnt,
664	    sizeof(*dram->fw), M_DEVBUF,  M_ZERO | M_NOWAIT);
665	if (!dram->fw) {
666		printf("%s: could not allocate memory for firmware sections\n",
667		    DEVNAME(sc));
668		return ENOMEM;
669	}
670
671	dram->paging = mallocarray(dram->paging_cnt, sizeof(*dram->paging),
672	    M_DEVBUF, M_ZERO | M_NOWAIT);
673	if (!dram->paging) {
674		printf("%s: could not allocate memory for firmware paging\n",
675		    DEVNAME(sc));
676		return ENOMEM;
677	}
678
679	/* initialize lmac sections */
680	for (i = 0; i < dram->lmac_cnt; i++) {
681		ret = iwx_ctxt_info_alloc_dma(sc, &fws->fw_sect[i],
682						   &dram->fw[fw_cnt]);
683		if (ret)
684			return ret;
685		ctxt_dram->lmac_img[i] =
686			htole64(dram->fw[fw_cnt].paddr);
687		DPRINTF(("%s: firmware LMAC section %d at 0x%llx size %lld\n", __func__, i,
688		    (unsigned long long)dram->fw[fw_cnt].paddr,
689		    (unsigned long long)dram->fw[fw_cnt].size));
690		fw_cnt++;
691	}
692
693	/* initialize umac sections */
694	for (i = 0; i < dram->umac_cnt; i++) {
695		/* access FW with +1 to make up for lmac separator */
696		ret = iwx_ctxt_info_alloc_dma(sc,
697		    &fws->fw_sect[fw_cnt + 1], &dram->fw[fw_cnt]);
698		if (ret)
699			return ret;
700		ctxt_dram->umac_img[i] =
701			htole64(dram->fw[fw_cnt].paddr);
702		DPRINTF(("%s: firmware UMAC section %d at 0x%llx size %lld\n", __func__, i,
703			(unsigned long long)dram->fw[fw_cnt].paddr,
704			(unsigned long long)dram->fw[fw_cnt].size));
705		fw_cnt++;
706	}
707
708	/*
709	 * Initialize paging.
710	 * Paging memory isn't stored in dram->fw as the umac and lmac - it is
711	 * stored separately.
712	 * This is since the timing of its release is different -
713	 * while fw memory can be released on alive, the paging memory can be
714	 * freed only when the device goes down.
715	 * Given that, the logic here in accessing the fw image is a bit
716	 * different - fw_cnt isn't changing so loop counter is added to it.
717	 */
718	for (i = 0; i < dram->paging_cnt; i++) {
719		/* access FW with +2 to make up for lmac & umac separators */
720		int fw_idx = fw_cnt + i + 2;
721
722		ret = iwx_ctxt_info_alloc_dma(sc,
723		    &fws->fw_sect[fw_idx], &dram->paging[i]);
724		if (ret)
725			return ret;
726
727		ctxt_dram->virtual_img[i] = htole64(dram->paging[i].paddr);
728		DPRINTF(("%s: firmware paging section %d at 0x%llx size %lld\n", __func__, i,
729		    (unsigned long long)dram->paging[i].paddr,
730		    (unsigned long long)dram->paging[i].size));
731	}
732
733	return 0;
734}
735
736void
737iwx_fw_version_str(char *buf, size_t bufsize,
738    uint32_t major, uint32_t minor, uint32_t api)
739{
740	/*
741	 * Starting with major version 35 the Linux driver prints the minor
742	 * version in hexadecimal.
743	 */
744	if (major >= 35)
745		snprintf(buf, bufsize, "%u.%08x.%u", major, minor, api);
746	else
747		snprintf(buf, bufsize, "%u.%u.%u", major, minor, api);
748}
749
750int
751iwx_alloc_fw_monitor_block(struct iwx_softc *sc, uint8_t max_power,
752    uint8_t min_power)
753{
754	struct iwx_dma_info *fw_mon = &sc->fw_mon;
755	uint32_t size = 0;
756	uint8_t power;
757	int err;
758
759	if (fw_mon->size)
760		return 0;
761
762	for (power = max_power; power >= min_power; power--) {
763		size = (1 << power);
764
765		err = iwx_dma_contig_alloc(sc->sc_dmat, fw_mon, size, 0);
766		if (err)
767			continue;
768
769		DPRINTF(("%s: allocated 0x%08x bytes for firmware monitor.\n",
770			 DEVNAME(sc), size));
771		break;
772	}
773
774	if (err) {
775		fw_mon->size = 0;
776		return err;
777	}
778
779	if (power != max_power)
780		DPRINTF(("%s: Sorry - debug buffer is only %luK while you requested %luK\n",
781			DEVNAME(sc), (unsigned long)(1 << (power - 10)),
782			(unsigned long)(1 << (max_power - 10))));
783
784	return 0;
785}
786
787int
788iwx_alloc_fw_monitor(struct iwx_softc *sc, uint8_t max_power)
789{
790	if (!max_power) {
791		/* default max_power is maximum */
792		max_power = 26;
793	} else {
794		max_power += 11;
795	}
796
797	if (max_power > 26) {
798		 DPRINTF(("%s: External buffer size for monitor is too big %d, "
799		     "check the FW TLV\n", DEVNAME(sc), max_power));
800		return 0;
801	}
802
803	if (sc->fw_mon.size)
804		return 0;
805
806	return iwx_alloc_fw_monitor_block(sc, max_power, 11);
807}
808
809int
810iwx_apply_debug_destination(struct iwx_softc *sc)
811{
812	struct iwx_fw_dbg_dest_tlv_v1 *dest_v1;
813	int i, err;
814	uint8_t mon_mode, size_power, base_shift, end_shift;
815	uint32_t base_reg, end_reg;
816
817	dest_v1 = sc->sc_fw.dbg_dest_tlv_v1;
818	mon_mode = dest_v1->monitor_mode;
819	size_power = dest_v1->size_power;
820	base_reg = le32toh(dest_v1->base_reg);
821	end_reg = le32toh(dest_v1->end_reg);
822	base_shift = dest_v1->base_shift;
823	end_shift = dest_v1->end_shift;
824
825	DPRINTF(("%s: applying debug destination %d\n", DEVNAME(sc), mon_mode));
826
827	if (mon_mode == EXTERNAL_MODE) {
828		err = iwx_alloc_fw_monitor(sc, size_power);
829		if (err)
830			return err;
831	}
832
833	if (!iwx_nic_lock(sc))
834		return EBUSY;
835
836	for (i = 0; i < sc->sc_fw.n_dest_reg; i++) {
837		uint32_t addr, val;
838		uint8_t op;
839
840		addr = le32toh(dest_v1->reg_ops[i].addr);
841		val = le32toh(dest_v1->reg_ops[i].val);
842		op = dest_v1->reg_ops[i].op;
843
844		DPRINTF(("%s: op=%u addr=%u val=%u\n", __func__, op, addr, val));
845		switch (op) {
846		case CSR_ASSIGN:
847			IWX_WRITE(sc, addr, val);
848			break;
849		case CSR_SETBIT:
850			IWX_SETBITS(sc, addr, (1 << val));
851			break;
852		case CSR_CLEARBIT:
853			IWX_CLRBITS(sc, addr, (1 << val));
854			break;
855		case PRPH_ASSIGN:
856			iwx_write_prph(sc, addr, val);
857			break;
858		case PRPH_SETBIT:
859			err = iwx_set_bits_prph(sc, addr, (1 << val));
860			if (err)
861				return err;
862			break;
863		case PRPH_CLEARBIT:
864			err = iwx_clear_bits_prph(sc, addr, (1 << val));
865			if (err)
866				return err;
867			break;
868		case PRPH_BLOCKBIT:
869			if (iwx_read_prph(sc, addr) & (1 << val))
870				goto monitor;
871			break;
872		default:
873			DPRINTF(("%s: FW debug - unknown OP %d\n",
874			    DEVNAME(sc), op));
875			break;
876		}
877	}
878
879monitor:
880	if (mon_mode == EXTERNAL_MODE && sc->fw_mon.size) {
881		iwx_write_prph(sc, le32toh(base_reg),
882		    sc->fw_mon.paddr >> base_shift);
883		iwx_write_prph(sc, end_reg,
884		    (sc->fw_mon.paddr + sc->fw_mon.size - 256)
885		    >> end_shift);
886	}
887
888	iwx_nic_unlock(sc);
889	return 0;
890}
891
892void
893iwx_set_ltr(struct iwx_softc *sc)
894{
895	uint32_t ltr_val = IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
896	    ((IWX_CSR_LTR_LONG_VAL_AD_SCALE_USEC <<
897	    IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE_SHIFT) &
898	    IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE_MASK) |
899	    ((250 << IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL_SHIFT) &
900	    IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL_MASK) |
901	    IWX_CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
902	    ((IWX_CSR_LTR_LONG_VAL_AD_SCALE_USEC <<
903	    IWX_CSR_LTR_LONG_VAL_AD_SNOOP_SCALE_SHIFT) &
904	    IWX_CSR_LTR_LONG_VAL_AD_SNOOP_SCALE_MASK) |
905	    (250 & IWX_CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
906
907	/*
908	 * To workaround hardware latency issues during the boot process,
909	 * initialize the LTR to ~250 usec (see ltr_val above).
910	 * The firmware initializes this again later (to a smaller value).
911	 */
912	if (!sc->sc_integrated) {
913		IWX_WRITE(sc, IWX_CSR_LTR_LONG_VAL_AD, ltr_val);
914	} else if (sc->sc_integrated &&
915		   sc->sc_device_family == IWX_DEVICE_FAMILY_22000) {
916		iwx_write_prph(sc, IWX_HPM_MAC_LTR_CSR,
917		    IWX_HPM_MAC_LRT_ENABLE_ALL);
918		iwx_write_prph(sc, IWX_HPM_UMAC_LTR, ltr_val);
919	}
920}
921
922int
923iwx_ctxt_info_init(struct iwx_softc *sc, const struct iwx_fw_sects *fws)
924{
925	struct iwx_context_info *ctxt_info;
926	struct iwx_context_info_rbd_cfg *rx_cfg;
927	uint32_t control_flags = 0;
928	uint64_t paddr;
929	int err;
930
931	ctxt_info = sc->ctxt_info_dma.vaddr;
932	memset(ctxt_info, 0, sizeof(*ctxt_info));
933
934	ctxt_info->version.version = 0;
935	ctxt_info->version.mac_id =
936		htole16((uint16_t)IWX_READ(sc, IWX_CSR_HW_REV));
937	/* size is in DWs */
938	ctxt_info->version.size = htole16(sizeof(*ctxt_info) / 4);
939
940	KASSERT(IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) < 0xF);
941	control_flags = IWX_CTXT_INFO_TFD_FORMAT_LONG |
942			(IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) <<
943			 IWX_CTXT_INFO_RB_CB_SIZE_POS) |
944			(IWX_CTXT_INFO_RB_SIZE_4K << IWX_CTXT_INFO_RB_SIZE_POS);
945	ctxt_info->control.control_flags = htole32(control_flags);
946
947	/* initialize RX default queue */
948	rx_cfg = &ctxt_info->rbd_cfg;
949	rx_cfg->free_rbd_addr = htole64(sc->rxq.free_desc_dma.paddr);
950	rx_cfg->used_rbd_addr = htole64(sc->rxq.used_desc_dma.paddr);
951	rx_cfg->status_wr_ptr = htole64(sc->rxq.stat_dma.paddr);
952
953	/* initialize TX command queue */
954	ctxt_info->hcmd_cfg.cmd_queue_addr =
955	    htole64(sc->txq[IWX_DQA_CMD_QUEUE].desc_dma.paddr);
956	ctxt_info->hcmd_cfg.cmd_queue_size =
957		IWX_TFD_QUEUE_CB_SIZE(IWX_TX_RING_COUNT);
958
959	/* allocate ucode sections in dram and set addresses */
960	err = iwx_init_fw_sec(sc, fws, &ctxt_info->dram);
961	if (err) {
962		iwx_ctxt_info_free_fw_img(sc);
963		return err;
964	}
965
966	/* Configure debug, if exists */
967	if (sc->sc_fw.dbg_dest_tlv_v1) {
968		err = iwx_apply_debug_destination(sc);
969		if (err) {
970			iwx_ctxt_info_free_fw_img(sc);
971			return err;
972		}
973	}
974
975	/*
976	 * Write the context info DMA base address. The device expects a
977	 * 64-bit address but a simple bus_space_write_8 to this register
978	 * won't work on some devices, such as the AX201.
979	 */
980	paddr = sc->ctxt_info_dma.paddr;
981	IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA, paddr & 0xffffffff);
982	IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA + 4, paddr >> 32);
983
984	/* kick FW self load */
985	if (!iwx_nic_lock(sc)) {
986		iwx_ctxt_info_free_fw_img(sc);
987		return EBUSY;
988	}
989
990	iwx_set_ltr(sc);
991	iwx_write_prph(sc, IWX_UREG_CPU_INIT_RUN, 1);
992	iwx_nic_unlock(sc);
993
994	/* Context info will be released upon alive or failure to get one */
995
996	return 0;
997}
998
999int
1000iwx_ctxt_info_gen3_init(struct iwx_softc *sc, const struct iwx_fw_sects *fws)
1001{
1002	struct iwx_context_info_gen3 *ctxt_info_gen3;
1003	struct iwx_prph_scratch *prph_scratch;
1004	struct iwx_prph_scratch_ctrl_cfg *prph_sc_ctrl;
1005	uint16_t cb_size;
1006	uint32_t control_flags, scratch_size;
1007	uint64_t paddr;
1008	int err;
1009
1010	if (sc->sc_fw.iml == NULL || sc->sc_fw.iml_len == 0) {
1011		printf("%s: no image loader found in firmware file\n",
1012		    DEVNAME(sc));
1013		iwx_ctxt_info_free_fw_img(sc);
1014		return EINVAL;
1015	}
1016
1017	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->iml_dma,
1018	    sc->sc_fw.iml_len, 0);
1019	if (err) {
1020		printf("%s: could not allocate DMA memory for "
1021		    "firmware image loader\n", DEVNAME(sc));
1022		iwx_ctxt_info_free_fw_img(sc);
1023		return ENOMEM;
1024	}
1025
1026	prph_scratch = sc->prph_scratch_dma.vaddr;
1027	memset(prph_scratch, 0, sizeof(*prph_scratch));
1028	prph_sc_ctrl = &prph_scratch->ctrl_cfg;
1029	prph_sc_ctrl->version.version = 0;
1030	prph_sc_ctrl->version.mac_id = htole16(IWX_READ(sc, IWX_CSR_HW_REV));
1031	prph_sc_ctrl->version.size = htole16(sizeof(*prph_scratch) / 4);
1032
1033	control_flags = IWX_PRPH_SCRATCH_RB_SIZE_4K |
1034	    IWX_PRPH_SCRATCH_MTR_MODE |
1035	    (IWX_PRPH_MTR_FORMAT_256B & IWX_PRPH_SCRATCH_MTR_FORMAT);
1036	if (sc->sc_imr_enabled)
1037		control_flags |= IWX_PRPH_SCRATCH_IMR_DEBUG_EN;
1038	prph_sc_ctrl->control.control_flags = htole32(control_flags);
1039
1040	/* initialize RX default queue */
1041	prph_sc_ctrl->rbd_cfg.free_rbd_addr =
1042	    htole64(sc->rxq.free_desc_dma.paddr);
1043
1044	/* allocate ucode sections in dram and set addresses */
1045	err = iwx_init_fw_sec(sc, fws, &prph_scratch->dram);
1046	if (err) {
1047		iwx_dma_contig_free(&sc->iml_dma);
1048		iwx_ctxt_info_free_fw_img(sc);
1049		return err;
1050	}
1051
1052	ctxt_info_gen3 = sc->ctxt_info_dma.vaddr;
1053	memset(ctxt_info_gen3, 0, sizeof(*ctxt_info_gen3));
1054	ctxt_info_gen3->prph_info_base_addr = htole64(sc->prph_info_dma.paddr);
1055	ctxt_info_gen3->prph_scratch_base_addr =
1056	    htole64(sc->prph_scratch_dma.paddr);
1057	scratch_size = sizeof(*prph_scratch);
1058	ctxt_info_gen3->prph_scratch_size = htole32(scratch_size);
1059	ctxt_info_gen3->cr_head_idx_arr_base_addr =
1060	    htole64(sc->rxq.stat_dma.paddr);
1061	ctxt_info_gen3->tr_tail_idx_arr_base_addr =
1062	    htole64(sc->prph_info_dma.paddr + PAGE_SIZE / 2);
1063	ctxt_info_gen3->cr_tail_idx_arr_base_addr =
1064	    htole64(sc->prph_info_dma.paddr + 3 * PAGE_SIZE / 4);
1065	ctxt_info_gen3->mtr_base_addr =
1066	    htole64(sc->txq[IWX_DQA_CMD_QUEUE].desc_dma.paddr);
1067	ctxt_info_gen3->mcr_base_addr = htole64(sc->rxq.used_desc_dma.paddr);
1068	cb_size = IWX_TFD_QUEUE_CB_SIZE(IWX_TX_RING_COUNT);
1069	ctxt_info_gen3->mtr_size = htole16(cb_size);
1070	cb_size = IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE);
1071	ctxt_info_gen3->mcr_size = htole16(cb_size);
1072
1073	memcpy(sc->iml_dma.vaddr, sc->sc_fw.iml, sc->sc_fw.iml_len);
1074
1075	paddr = sc->ctxt_info_dma.paddr;
1076	IWX_WRITE(sc, IWX_CSR_CTXT_INFO_ADDR, paddr & 0xffffffff);
1077	IWX_WRITE(sc, IWX_CSR_CTXT_INFO_ADDR + 4, paddr >> 32);
1078
1079	paddr = sc->iml_dma.paddr;
1080	IWX_WRITE(sc, IWX_CSR_IML_DATA_ADDR, paddr & 0xffffffff);
1081	IWX_WRITE(sc, IWX_CSR_IML_DATA_ADDR + 4, paddr >> 32);
1082	IWX_WRITE(sc, IWX_CSR_IML_SIZE_ADDR, sc->sc_fw.iml_len);
1083
1084	IWX_SETBITS(sc, IWX_CSR_CTXT_INFO_BOOT_CTRL,
1085		    IWX_CSR_AUTO_FUNC_BOOT_ENA);
1086
1087	/* kick FW self load */
1088	if (!iwx_nic_lock(sc)) {
1089		iwx_dma_contig_free(&sc->iml_dma);
1090		iwx_ctxt_info_free_fw_img(sc);
1091		return EBUSY;
1092	}
1093	iwx_set_ltr(sc);
1094	iwx_write_umac_prph(sc, IWX_UREG_CPU_INIT_RUN, 1);
1095	iwx_nic_unlock(sc);
1096
1097	/* Context info will be released upon alive or failure to get one */
1098	return 0;
1099}
1100
1101void
1102iwx_ctxt_info_free_fw_img(struct iwx_softc *sc)
1103{
1104	struct iwx_self_init_dram *dram = &sc->init_dram;
1105	int i;
1106
1107	if (!dram->fw)
1108		return;
1109
1110	for (i = 0; i < dram->lmac_cnt + dram->umac_cnt; i++)
1111		iwx_dma_contig_free(&dram->fw[i]);
1112
1113	free(dram->fw, M_DEVBUF,
1114	    (dram->lmac_cnt + dram->umac_cnt) * sizeof(*dram->fw));
1115	dram->lmac_cnt = 0;
1116	dram->umac_cnt = 0;
1117	dram->fw = NULL;
1118}
1119
1120int
1121iwx_firmware_store_section(struct iwx_softc *sc, enum iwx_ucode_type type,
1122    uint8_t *data, size_t dlen)
1123{
1124	struct iwx_fw_sects *fws;
1125	struct iwx_fw_onesect *fwone;
1126
1127	if (type >= IWX_UCODE_TYPE_MAX)
1128		return EINVAL;
1129	if (dlen < sizeof(uint32_t))
1130		return EINVAL;
1131
1132	fws = &sc->sc_fw.fw_sects[type];
1133	DPRINTF(("%s: ucode type %d section %d\n", DEVNAME(sc), type, fws->fw_count));
1134	if (fws->fw_count >= IWX_UCODE_SECT_MAX)
1135		return EINVAL;
1136
1137	fwone = &fws->fw_sect[fws->fw_count];
1138
1139	/* first 32bit are device load offset */
1140	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
1141
1142	/* rest is data */
1143	fwone->fws_data = data + sizeof(uint32_t);
1144	fwone->fws_len = dlen - sizeof(uint32_t);
1145
1146	fws->fw_count++;
1147	fws->fw_totlen += fwone->fws_len;
1148
1149	return 0;
1150}
1151
1152#define IWX_DEFAULT_SCAN_CHANNELS	40
1153/* Newer firmware might support more channels. Raise this value if needed. */
1154#define IWX_MAX_SCAN_CHANNELS		67 /* as of iwx-cc-a0-62 firmware */
1155
1156struct iwx_tlv_calib_data {
1157	uint32_t ucode_type;
1158	struct iwx_tlv_calib_ctrl calib;
1159} __packed;
1160
1161int
1162iwx_set_default_calib(struct iwx_softc *sc, const void *data)
1163{
1164	const struct iwx_tlv_calib_data *def_calib = data;
1165	uint32_t ucode_type = le32toh(def_calib->ucode_type);
1166
1167	if (ucode_type >= IWX_UCODE_TYPE_MAX)
1168		return EINVAL;
1169
1170	sc->sc_default_calib[ucode_type].flow_trigger =
1171	    def_calib->calib.flow_trigger;
1172	sc->sc_default_calib[ucode_type].event_trigger =
1173	    def_calib->calib.event_trigger;
1174
1175	return 0;
1176}
1177
1178void
1179iwx_fw_info_free(struct iwx_fw_info *fw)
1180{
1181	free(fw->fw_rawdata, M_DEVBUF, fw->fw_rawsize);
1182	fw->fw_rawdata = NULL;
1183	fw->fw_rawsize = 0;
1184	/* don't touch fw->fw_status */
1185	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
1186	free(fw->iml, M_DEVBUF, fw->iml_len);
1187	fw->iml = NULL;
1188	fw->iml_len = 0;
1189}
1190
1191#define IWX_FW_ADDR_CACHE_CONTROL 0xC0000000
1192
1193int
1194iwx_read_firmware(struct iwx_softc *sc)
1195{
1196	struct ieee80211com *ic = &sc->sc_ic;
1197	struct iwx_fw_info *fw = &sc->sc_fw;
1198	struct iwx_tlv_ucode_header *uhdr;
1199	struct iwx_ucode_tlv tlv;
1200	uint32_t tlv_type;
1201	uint8_t *data;
1202	int err;
1203	size_t len;
1204
1205	if (fw->fw_status == IWX_FW_STATUS_DONE)
1206		return 0;
1207
1208	while (fw->fw_status == IWX_FW_STATUS_INPROGRESS)
1209		tsleep_nsec(&sc->sc_fw, 0, "iwxfwp", INFSLP);
1210	fw->fw_status = IWX_FW_STATUS_INPROGRESS;
1211
1212	if (fw->fw_rawdata != NULL)
1213		iwx_fw_info_free(fw);
1214
1215	err = loadfirmware(sc->sc_fwname,
1216	    (u_char **)&fw->fw_rawdata, &fw->fw_rawsize);
1217	if (err) {
1218		printf("%s: could not read firmware %s (error %d)\n",
1219		    DEVNAME(sc), sc->sc_fwname, err);
1220		goto out;
1221	}
1222
1223	if (ic->ic_if.if_flags & IFF_DEBUG)
1224		printf("%s: using firmware %s\n", DEVNAME(sc), sc->sc_fwname);
1225
1226	sc->sc_capaflags = 0;
1227	sc->sc_capa_n_scan_channels = IWX_DEFAULT_SCAN_CHANNELS;
1228	memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
1229	memset(sc->sc_ucode_api, 0, sizeof(sc->sc_ucode_api));
1230	sc->n_cmd_versions = 0;
1231
1232	uhdr = (void *)fw->fw_rawdata;
1233	if (*(uint32_t *)fw->fw_rawdata != 0
1234	    || le32toh(uhdr->magic) != IWX_TLV_UCODE_MAGIC) {
1235		printf("%s: invalid firmware %s\n",
1236		    DEVNAME(sc), sc->sc_fwname);
1237		err = EINVAL;
1238		goto out;
1239	}
1240
1241	iwx_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
1242	    IWX_UCODE_MAJOR(le32toh(uhdr->ver)),
1243	    IWX_UCODE_MINOR(le32toh(uhdr->ver)),
1244	    IWX_UCODE_API(le32toh(uhdr->ver)));
1245
1246	data = uhdr->data;
1247	len = fw->fw_rawsize - sizeof(*uhdr);
1248
1249	while (len >= sizeof(tlv)) {
1250		size_t tlv_len;
1251		void *tlv_data;
1252
1253		memcpy(&tlv, data, sizeof(tlv));
1254		tlv_len = le32toh(tlv.length);
1255		tlv_type = le32toh(tlv.type);
1256
1257		len -= sizeof(tlv);
1258		data += sizeof(tlv);
1259		tlv_data = data;
1260
1261		if (len < tlv_len) {
1262			printf("%s: firmware too short: %zu bytes\n",
1263			    DEVNAME(sc), len);
1264			err = EINVAL;
1265			goto parse_out;
1266		}
1267
1268		switch (tlv_type) {
1269		case IWX_UCODE_TLV_PROBE_MAX_LEN:
1270			if (tlv_len < sizeof(uint32_t)) {
1271				err = EINVAL;
1272				goto parse_out;
1273			}
1274			sc->sc_capa_max_probe_len
1275			    = le32toh(*(uint32_t *)tlv_data);
1276			if (sc->sc_capa_max_probe_len >
1277			    IWX_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
1278				err = EINVAL;
1279				goto parse_out;
1280			}
1281			break;
1282		case IWX_UCODE_TLV_PAN:
1283			if (tlv_len) {
1284				err = EINVAL;
1285				goto parse_out;
1286			}
1287			sc->sc_capaflags |= IWX_UCODE_TLV_FLAGS_PAN;
1288			break;
1289		case IWX_UCODE_TLV_FLAGS:
1290			if (tlv_len < sizeof(uint32_t)) {
1291				err = EINVAL;
1292				goto parse_out;
1293			}
1294			/*
1295			 * Apparently there can be many flags, but Linux driver
1296			 * parses only the first one, and so do we.
1297			 *
1298			 * XXX: why does this override IWX_UCODE_TLV_PAN?
1299			 * Intentional or a bug?  Observations from
1300			 * current firmware file:
1301			 *  1) TLV_PAN is parsed first
1302			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
1303			 * ==> this resets TLV_PAN to itself... hnnnk
1304			 */
1305			sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
1306			break;
1307		case IWX_UCODE_TLV_CSCHEME:
1308			err = iwx_store_cscheme(sc, tlv_data, tlv_len);
1309			if (err)
1310				goto parse_out;
1311			break;
1312		case IWX_UCODE_TLV_NUM_OF_CPU: {
1313			uint32_t num_cpu;
1314			if (tlv_len != sizeof(uint32_t)) {
1315				err = EINVAL;
1316				goto parse_out;
1317			}
1318			num_cpu = le32toh(*(uint32_t *)tlv_data);
1319			if (num_cpu < 1 || num_cpu > 2) {
1320				err = EINVAL;
1321				goto parse_out;
1322			}
1323			break;
1324		}
1325		case IWX_UCODE_TLV_SEC_RT:
1326			err = iwx_firmware_store_section(sc,
1327			    IWX_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
1328			if (err)
1329				goto parse_out;
1330			break;
1331		case IWX_UCODE_TLV_SEC_INIT:
1332			err = iwx_firmware_store_section(sc,
1333			    IWX_UCODE_TYPE_INIT, tlv_data, tlv_len);
1334			if (err)
1335				goto parse_out;
1336			break;
1337		case IWX_UCODE_TLV_SEC_WOWLAN:
1338			err = iwx_firmware_store_section(sc,
1339			    IWX_UCODE_TYPE_WOW, tlv_data, tlv_len);
1340			if (err)
1341				goto parse_out;
1342			break;
1343		case IWX_UCODE_TLV_DEF_CALIB:
1344			if (tlv_len != sizeof(struct iwx_tlv_calib_data)) {
1345				err = EINVAL;
1346				goto parse_out;
1347			}
1348			err = iwx_set_default_calib(sc, tlv_data);
1349			if (err)
1350				goto parse_out;
1351			break;
1352		case IWX_UCODE_TLV_PHY_SKU:
1353			if (tlv_len != sizeof(uint32_t)) {
1354				err = EINVAL;
1355				goto parse_out;
1356			}
1357			sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
1358			break;
1359
1360		case IWX_UCODE_TLV_API_CHANGES_SET: {
1361			struct iwx_ucode_api *api;
1362			int idx, i;
1363			if (tlv_len != sizeof(*api)) {
1364				err = EINVAL;
1365				goto parse_out;
1366			}
1367			api = (struct iwx_ucode_api *)tlv_data;
1368			idx = le32toh(api->api_index);
1369			if (idx >= howmany(IWX_NUM_UCODE_TLV_API, 32)) {
1370				err = EINVAL;
1371				goto parse_out;
1372			}
1373			for (i = 0; i < 32; i++) {
1374				if ((le32toh(api->api_flags) & (1 << i)) == 0)
1375					continue;
1376				setbit(sc->sc_ucode_api, i + (32 * idx));
1377			}
1378			break;
1379		}
1380
1381		case IWX_UCODE_TLV_ENABLED_CAPABILITIES: {
1382			struct iwx_ucode_capa *capa;
1383			int idx, i;
1384			if (tlv_len != sizeof(*capa)) {
1385				err = EINVAL;
1386				goto parse_out;
1387			}
1388			capa = (struct iwx_ucode_capa *)tlv_data;
1389			idx = le32toh(capa->api_index);
1390			if (idx >= howmany(IWX_NUM_UCODE_TLV_CAPA, 32)) {
1391				goto parse_out;
1392			}
1393			for (i = 0; i < 32; i++) {
1394				if ((le32toh(capa->api_capa) & (1 << i)) == 0)
1395					continue;
1396				setbit(sc->sc_enabled_capa, i + (32 * idx));
1397			}
1398			break;
1399		}
1400
1401		case IWX_UCODE_TLV_SDIO_ADMA_ADDR:
1402		case IWX_UCODE_TLV_FW_GSCAN_CAPA:
1403			/* ignore, not used by current driver */
1404			break;
1405
1406		case IWX_UCODE_TLV_SEC_RT_USNIFFER:
1407			err = iwx_firmware_store_section(sc,
1408			    IWX_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
1409			    tlv_len);
1410			if (err)
1411				goto parse_out;
1412			break;
1413
1414		case IWX_UCODE_TLV_PAGING:
1415			if (tlv_len != sizeof(uint32_t)) {
1416				err = EINVAL;
1417				goto parse_out;
1418			}
1419			break;
1420
1421		case IWX_UCODE_TLV_N_SCAN_CHANNELS:
1422			if (tlv_len != sizeof(uint32_t)) {
1423				err = EINVAL;
1424				goto parse_out;
1425			}
1426			sc->sc_capa_n_scan_channels =
1427			  le32toh(*(uint32_t *)tlv_data);
1428			if (sc->sc_capa_n_scan_channels > IWX_MAX_SCAN_CHANNELS) {
1429				err = ERANGE;
1430				goto parse_out;
1431			}
1432			break;
1433
1434		case IWX_UCODE_TLV_FW_VERSION:
1435			if (tlv_len != sizeof(uint32_t) * 3) {
1436				err = EINVAL;
1437				goto parse_out;
1438			}
1439
1440			iwx_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
1441			    le32toh(((uint32_t *)tlv_data)[0]),
1442			    le32toh(((uint32_t *)tlv_data)[1]),
1443			    le32toh(((uint32_t *)tlv_data)[2]));
1444			break;
1445
1446		case IWX_UCODE_TLV_FW_DBG_DEST: {
1447			struct iwx_fw_dbg_dest_tlv_v1 *dest_v1 = NULL;
1448
1449			fw->dbg_dest_ver = (uint8_t *)tlv_data;
1450			if (*fw->dbg_dest_ver != 0) {
1451				err = EINVAL;
1452				goto parse_out;
1453			}
1454
1455			if (fw->dbg_dest_tlv_init)
1456				break;
1457			fw->dbg_dest_tlv_init = true;
1458
1459			dest_v1 = (void *)tlv_data;
1460			fw->dbg_dest_tlv_v1 = dest_v1;
1461			fw->n_dest_reg = tlv_len -
1462			    offsetof(struct iwx_fw_dbg_dest_tlv_v1, reg_ops);
1463			fw->n_dest_reg /= sizeof(dest_v1->reg_ops[0]);
1464			DPRINTF(("%s: found debug dest; n_dest_reg=%d\n", __func__, fw->n_dest_reg));
1465			break;
1466		}
1467
1468		case IWX_UCODE_TLV_FW_DBG_CONF: {
1469			struct iwx_fw_dbg_conf_tlv *conf = (void *)tlv_data;
1470
1471			if (!fw->dbg_dest_tlv_init ||
1472			    conf->id >= nitems(fw->dbg_conf_tlv) ||
1473			    fw->dbg_conf_tlv[conf->id] != NULL)
1474				break;
1475
1476			DPRINTF(("Found debug configuration: %d\n", conf->id));
1477			fw->dbg_conf_tlv[conf->id] = conf;
1478			fw->dbg_conf_tlv_len[conf->id] = tlv_len;
1479			break;
1480		}
1481
1482		case IWX_UCODE_TLV_UMAC_DEBUG_ADDRS: {
1483			struct iwx_umac_debug_addrs *dbg_ptrs =
1484				(void *)tlv_data;
1485
1486			if (tlv_len != sizeof(*dbg_ptrs)) {
1487				err = EINVAL;
1488				goto parse_out;
1489			}
1490			if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000)
1491				break;
1492			sc->sc_uc.uc_umac_error_event_table =
1493				le32toh(dbg_ptrs->error_info_addr) &
1494				~IWX_FW_ADDR_CACHE_CONTROL;
1495			sc->sc_uc.error_event_table_tlv_status |=
1496				IWX_ERROR_EVENT_TABLE_UMAC;
1497			break;
1498		}
1499
1500		case IWX_UCODE_TLV_LMAC_DEBUG_ADDRS: {
1501			struct iwx_lmac_debug_addrs *dbg_ptrs =
1502				(void *)tlv_data;
1503
1504			if (tlv_len != sizeof(*dbg_ptrs)) {
1505				err = EINVAL;
1506				goto parse_out;
1507			}
1508			if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000)
1509				break;
1510			sc->sc_uc.uc_lmac_error_event_table[0] =
1511				le32toh(dbg_ptrs->error_event_table_ptr) &
1512				~IWX_FW_ADDR_CACHE_CONTROL;
1513			sc->sc_uc.error_event_table_tlv_status |=
1514				IWX_ERROR_EVENT_TABLE_LMAC1;
1515			break;
1516		}
1517
1518		case IWX_UCODE_TLV_FW_MEM_SEG:
1519			break;
1520
1521		case IWX_UCODE_TLV_IML:
1522			if (sc->sc_fw.iml != NULL) {
1523				free(fw->iml, M_DEVBUF, fw->iml_len);
1524				fw->iml_len = 0;
1525			}
1526			sc->sc_fw.iml = malloc(tlv_len, M_DEVBUF,
1527			    M_WAIT | M_CANFAIL | M_ZERO);
1528			if (sc->sc_fw.iml == NULL) {
1529				err = ENOMEM;
1530				goto parse_out;
1531			}
1532			memcpy(sc->sc_fw.iml, tlv_data, tlv_len);
1533			sc->sc_fw.iml_len = tlv_len;
1534			break;
1535
1536		case IWX_UCODE_TLV_CMD_VERSIONS:
1537			if (tlv_len % sizeof(struct iwx_fw_cmd_version)) {
1538				tlv_len /= sizeof(struct iwx_fw_cmd_version);
1539				tlv_len *= sizeof(struct iwx_fw_cmd_version);
1540			}
1541			if (sc->n_cmd_versions != 0) {
1542				err = EINVAL;
1543				goto parse_out;
1544			}
1545			if (tlv_len > sizeof(sc->cmd_versions)) {
1546				err = EINVAL;
1547				goto parse_out;
1548			}
1549			memcpy(&sc->cmd_versions[0], tlv_data, tlv_len);
1550			sc->n_cmd_versions = tlv_len / sizeof(struct iwx_fw_cmd_version);
1551			break;
1552
1553		case IWX_UCODE_TLV_FW_RECOVERY_INFO:
1554			break;
1555
1556		case IWX_UCODE_TLV_FW_FSEQ_VERSION:
1557		case IWX_UCODE_TLV_PHY_INTEGRATION_VERSION:
1558		case IWX_UCODE_TLV_FW_NUM_STATIONS:
1559		case IWX_UCODE_TLV_FW_NUM_BEACONS:
1560			break;
1561
1562		/* undocumented TLVs found in iwx-cc-a0-46 image */
1563		case 58:
1564		case 0x1000003:
1565		case 0x1000004:
1566			break;
1567
1568		/* undocumented TLVs found in iwx-cc-a0-48 image */
1569		case 0x1000000:
1570		case 0x1000002:
1571			break;
1572
1573		case IWX_UCODE_TLV_TYPE_DEBUG_INFO:
1574		case IWX_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
1575		case IWX_UCODE_TLV_TYPE_HCMD:
1576		case IWX_UCODE_TLV_TYPE_REGIONS:
1577		case IWX_UCODE_TLV_TYPE_TRIGGERS:
1578		case IWX_UCODE_TLV_TYPE_CONF_SET:
1579		case IWX_UCODE_TLV_SEC_TABLE_ADDR:
1580		case IWX_UCODE_TLV_D3_KEK_KCK_ADDR:
1581		case IWX_UCODE_TLV_CURRENT_PC:
1582			break;
1583
1584		/* undocumented TLV found in iwx-cc-a0-67 image */
1585		case 0x100000b:
1586			break;
1587
1588		/* undocumented TLV found in iwx-ty-a0-gf-a0-73 image */
1589		case 0x101:
1590			break;
1591
1592		/* undocumented TLV found in iwx-ty-a0-gf-a0-77 image */
1593		case 0x100000c:
1594			break;
1595
1596		default:
1597			err = EINVAL;
1598			goto parse_out;
1599		}
1600
1601		/*
1602		 * Check for size_t overflow and ignore missing padding at
1603		 * end of firmware file.
1604		 */
1605		if (roundup(tlv_len, 4) > len)
1606			break;
1607
1608		len -= roundup(tlv_len, 4);
1609		data += roundup(tlv_len, 4);
1610	}
1611
1612	KASSERT(err == 0);
1613
1614 parse_out:
1615	if (err) {
1616		printf("%s: firmware parse error %d, "
1617		    "section type %d\n", DEVNAME(sc), err, tlv_type);
1618	}
1619
1620 out:
1621	if (err) {
1622		fw->fw_status = IWX_FW_STATUS_NONE;
1623		if (fw->fw_rawdata != NULL)
1624			iwx_fw_info_free(fw);
1625	} else
1626		fw->fw_status = IWX_FW_STATUS_DONE;
1627	wakeup(&sc->sc_fw);
1628
1629	return err;
1630}
1631
1632uint32_t
1633iwx_prph_addr_mask(struct iwx_softc *sc)
1634{
1635	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
1636		return 0x00ffffff;
1637	else
1638		return 0x000fffff;
1639}
1640
1641uint32_t
1642iwx_read_prph_unlocked(struct iwx_softc *sc, uint32_t addr)
1643{
1644	uint32_t mask = iwx_prph_addr_mask(sc);
1645	IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_RADDR, ((addr & mask) | (3 << 24)));
1646	IWX_BARRIER_READ_WRITE(sc);
1647	return IWX_READ(sc, IWX_HBUS_TARG_PRPH_RDAT);
1648}
1649
1650uint32_t
1651iwx_read_prph(struct iwx_softc *sc, uint32_t addr)
1652{
1653	iwx_nic_assert_locked(sc);
1654	return iwx_read_prph_unlocked(sc, addr);
1655}
1656
1657void
1658iwx_write_prph_unlocked(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1659{
1660	uint32_t mask = iwx_prph_addr_mask(sc);
1661	IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_WADDR, ((addr & mask) | (3 << 24)));
1662	IWX_BARRIER_WRITE(sc);
1663	IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_WDAT, val);
1664}
1665
1666void
1667iwx_write_prph(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1668{
1669	iwx_nic_assert_locked(sc);
1670	iwx_write_prph_unlocked(sc, addr, val);
1671}
1672
1673void
1674iwx_write_prph64(struct iwx_softc *sc, uint64_t addr, uint64_t val)
1675{
1676	iwx_write_prph(sc, (uint32_t)addr, val & 0xffffffff);
1677	iwx_write_prph(sc, (uint32_t)addr + 4, val >> 32);
1678}
1679
1680uint32_t
1681iwx_read_umac_prph_unlocked(struct iwx_softc *sc, uint32_t addr)
1682{
1683	return iwx_read_prph_unlocked(sc, addr + sc->sc_umac_prph_offset);
1684}
1685
1686uint32_t
1687iwx_read_umac_prph(struct iwx_softc *sc, uint32_t addr)
1688{
1689	return iwx_read_prph(sc, addr + sc->sc_umac_prph_offset);
1690}
1691
1692void
1693iwx_write_umac_prph_unlocked(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1694{
1695	iwx_write_prph_unlocked(sc, addr + sc->sc_umac_prph_offset, val);
1696}
1697
1698void
1699iwx_write_umac_prph(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1700{
1701	iwx_write_prph(sc, addr + sc->sc_umac_prph_offset, val);
1702}
1703
1704int
1705iwx_read_mem(struct iwx_softc *sc, uint32_t addr, void *buf, int dwords)
1706{
1707	int offs, err = 0;
1708	uint32_t *vals = buf;
1709
1710	if (iwx_nic_lock(sc)) {
1711		IWX_WRITE(sc, IWX_HBUS_TARG_MEM_RADDR, addr);
1712		for (offs = 0; offs < dwords; offs++)
1713			vals[offs] = le32toh(IWX_READ(sc, IWX_HBUS_TARG_MEM_RDAT));
1714		iwx_nic_unlock(sc);
1715	} else {
1716		err = EBUSY;
1717	}
1718	return err;
1719}
1720
1721int
1722iwx_write_mem(struct iwx_softc *sc, uint32_t addr, const void *buf, int dwords)
1723{
1724	int offs;
1725	const uint32_t *vals = buf;
1726
1727	if (iwx_nic_lock(sc)) {
1728		IWX_WRITE(sc, IWX_HBUS_TARG_MEM_WADDR, addr);
1729		/* WADDR auto-increments */
1730		for (offs = 0; offs < dwords; offs++) {
1731			uint32_t val = vals ? vals[offs] : 0;
1732			IWX_WRITE(sc, IWX_HBUS_TARG_MEM_WDAT, val);
1733		}
1734		iwx_nic_unlock(sc);
1735	} else {
1736		return EBUSY;
1737	}
1738	return 0;
1739}
1740
1741int
1742iwx_write_mem32(struct iwx_softc *sc, uint32_t addr, uint32_t val)
1743{
1744	return iwx_write_mem(sc, addr, &val, 1);
1745}
1746
1747int
1748iwx_poll_bit(struct iwx_softc *sc, int reg, uint32_t bits, uint32_t mask,
1749    int timo)
1750{
1751	for (;;) {
1752		if ((IWX_READ(sc, reg) & mask) == (bits & mask)) {
1753			return 1;
1754		}
1755		if (timo < 10) {
1756			return 0;
1757		}
1758		timo -= 10;
1759		DELAY(10);
1760	}
1761}
1762
1763int
1764iwx_nic_lock(struct iwx_softc *sc)
1765{
1766	if (sc->sc_nic_locks > 0) {
1767		iwx_nic_assert_locked(sc);
1768		sc->sc_nic_locks++;
1769		return 1; /* already locked */
1770	}
1771
1772	IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
1773	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1774
1775	DELAY(2);
1776
1777	if (iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
1778	    IWX_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1779	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
1780	     | IWX_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 150000)) {
1781		sc->sc_nic_locks++;
1782		return 1;
1783	}
1784
1785	printf("%s: acquiring device failed\n", DEVNAME(sc));
1786	return 0;
1787}
1788
1789void
1790iwx_nic_assert_locked(struct iwx_softc *sc)
1791{
1792	if (sc->sc_nic_locks <= 0)
1793		panic("%s: nic locks counter %d", DEVNAME(sc), sc->sc_nic_locks);
1794}
1795
1796void
1797iwx_nic_unlock(struct iwx_softc *sc)
1798{
1799	if (sc->sc_nic_locks > 0) {
1800		if (--sc->sc_nic_locks == 0)
1801			IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
1802			    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1803	} else
1804		printf("%s: NIC already unlocked\n", DEVNAME(sc));
1805}
1806
1807int
1808iwx_set_bits_mask_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits,
1809    uint32_t mask)
1810{
1811	uint32_t val;
1812
1813	if (iwx_nic_lock(sc)) {
1814		val = iwx_read_prph(sc, reg) & mask;
1815		val |= bits;
1816		iwx_write_prph(sc, reg, val);
1817		iwx_nic_unlock(sc);
1818		return 0;
1819	}
1820	return EBUSY;
1821}
1822
1823int
1824iwx_set_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
1825{
1826	return iwx_set_bits_mask_prph(sc, reg, bits, ~0);
1827}
1828
1829int
1830iwx_clear_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
1831{
1832	return iwx_set_bits_mask_prph(sc, reg, 0, ~bits);
1833}
1834
1835int
1836iwx_dma_contig_alloc(bus_dma_tag_t tag, struct iwx_dma_info *dma,
1837    bus_size_t size, bus_size_t alignment)
1838{
1839	int nsegs, err;
1840	caddr_t va;
1841
1842	dma->tag = tag;
1843	dma->size = size;
1844
1845	err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
1846	    &dma->map);
1847	if (err)
1848		goto fail;
1849
1850	err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
1851	    BUS_DMA_NOWAIT | BUS_DMA_ZERO);
1852	if (err)
1853		goto fail;
1854
1855	if (nsegs > 1) {
1856		err = ENOMEM;
1857		goto fail;
1858	}
1859
1860	err = bus_dmamem_map(tag, &dma->seg, 1, size, &va,
1861	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1862	if (err)
1863		goto fail;
1864	dma->vaddr = va;
1865
1866	err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
1867	    BUS_DMA_NOWAIT);
1868	if (err)
1869		goto fail;
1870
1871	bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
1872	dma->paddr = dma->map->dm_segs[0].ds_addr;
1873
1874	return 0;
1875
1876fail:	iwx_dma_contig_free(dma);
1877	return err;
1878}
1879
1880void
1881iwx_dma_contig_free(struct iwx_dma_info *dma)
1882{
1883	if (dma->map != NULL) {
1884		if (dma->vaddr != NULL) {
1885			bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1886			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1887			bus_dmamap_unload(dma->tag, dma->map);
1888			bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1889			bus_dmamem_free(dma->tag, &dma->seg, 1);
1890			dma->vaddr = NULL;
1891		}
1892		bus_dmamap_destroy(dma->tag, dma->map);
1893		dma->map = NULL;
1894	}
1895}
1896
1897int
1898iwx_alloc_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1899{
1900	bus_size_t size;
1901	int i, err;
1902
1903	ring->cur = 0;
1904
1905	/* Allocate RX descriptors (256-byte aligned). */
1906	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
1907		size = sizeof(struct iwx_rx_transfer_desc);
1908	else
1909		size = sizeof(uint64_t);
1910	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma,
1911	    size * IWX_RX_MQ_RING_COUNT, 256);
1912	if (err) {
1913		printf("%s: could not allocate RX ring DMA memory\n",
1914		    DEVNAME(sc));
1915		goto fail;
1916	}
1917	ring->desc = ring->free_desc_dma.vaddr;
1918
1919	/* Allocate RX status area (16-byte aligned). */
1920	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
1921		size = sizeof(uint16_t);
1922	else
1923		size = sizeof(*ring->stat);
1924	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma, size, 16);
1925	if (err) {
1926		printf("%s: could not allocate RX status DMA memory\n",
1927		    DEVNAME(sc));
1928		goto fail;
1929	}
1930	ring->stat = ring->stat_dma.vaddr;
1931
1932	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
1933		size = sizeof(struct iwx_rx_completion_desc);
1934	else
1935		size = sizeof(uint32_t);
1936	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
1937	    size * IWX_RX_MQ_RING_COUNT, 256);
1938	if (err) {
1939		printf("%s: could not allocate RX ring DMA memory\n",
1940		    DEVNAME(sc));
1941		goto fail;
1942	}
1943
1944	for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) {
1945		struct iwx_rx_data *data = &ring->data[i];
1946
1947		memset(data, 0, sizeof(*data));
1948		err = bus_dmamap_create(sc->sc_dmat, IWX_RBUF_SIZE, 1,
1949		    IWX_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1950		    &data->map);
1951		if (err) {
1952			printf("%s: could not create RX buf DMA map\n",
1953			    DEVNAME(sc));
1954			goto fail;
1955		}
1956
1957		err = iwx_rx_addbuf(sc, IWX_RBUF_SIZE, i);
1958		if (err)
1959			goto fail;
1960	}
1961	return 0;
1962
1963fail:	iwx_free_rx_ring(sc, ring);
1964	return err;
1965}
1966
1967void
1968iwx_disable_rx_dma(struct iwx_softc *sc)
1969{
1970	int ntries;
1971
1972	if (iwx_nic_lock(sc)) {
1973		if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
1974			iwx_write_umac_prph(sc, IWX_RFH_RXF_DMA_CFG_GEN3, 0);
1975		else
1976			iwx_write_prph(sc, IWX_RFH_RXF_DMA_CFG, 0);
1977		for (ntries = 0; ntries < 1000; ntries++) {
1978			if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
1979				if (iwx_read_umac_prph(sc,
1980				    IWX_RFH_GEN_STATUS_GEN3) & IWX_RXF_DMA_IDLE)
1981					break;
1982			} else {
1983				if (iwx_read_prph(sc, IWX_RFH_GEN_STATUS) &
1984				    IWX_RXF_DMA_IDLE)
1985					break;
1986			}
1987			DELAY(10);
1988		}
1989		iwx_nic_unlock(sc);
1990	}
1991}
1992
1993void
1994iwx_reset_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
1995{
1996	ring->cur = 0;
1997	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1998	    ring->stat_dma.size, BUS_DMASYNC_PREWRITE);
1999	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
2000		uint16_t *status = sc->rxq.stat_dma.vaddr;
2001		*status = 0;
2002	} else
2003		memset(ring->stat, 0, sizeof(*ring->stat));
2004	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
2005	    ring->stat_dma.size, BUS_DMASYNC_POSTWRITE);
2006
2007}
2008
2009void
2010iwx_free_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
2011{
2012	int i;
2013
2014	iwx_dma_contig_free(&ring->free_desc_dma);
2015	iwx_dma_contig_free(&ring->stat_dma);
2016	iwx_dma_contig_free(&ring->used_desc_dma);
2017
2018	for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) {
2019		struct iwx_rx_data *data = &ring->data[i];
2020
2021		if (data->m != NULL) {
2022			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
2023			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2024			bus_dmamap_unload(sc->sc_dmat, data->map);
2025			m_freem(data->m);
2026			data->m = NULL;
2027		}
2028		if (data->map != NULL)
2029			bus_dmamap_destroy(sc->sc_dmat, data->map);
2030	}
2031}
2032
2033int
2034iwx_alloc_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring, int qid)
2035{
2036	bus_addr_t paddr;
2037	bus_size_t size;
2038	int i, err;
2039	size_t bc_tbl_size;
2040	bus_size_t bc_align;
2041
2042	ring->qid = qid;
2043	ring->queued = 0;
2044	ring->cur = 0;
2045	ring->cur_hw = 0;
2046	ring->tail = 0;
2047	ring->tail_hw = 0;
2048
2049	/* Allocate TX descriptors (256-byte aligned). */
2050	size = IWX_TX_RING_COUNT * sizeof(struct iwx_tfh_tfd);
2051	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
2052	if (err) {
2053		printf("%s: could not allocate TX ring DMA memory\n",
2054		    DEVNAME(sc));
2055		goto fail;
2056	}
2057	ring->desc = ring->desc_dma.vaddr;
2058
2059	/*
2060	 * The hardware supports up to 512 Tx rings which is more
2061	 * than we currently need.
2062	 *
2063	 * In DQA mode we use 1 command queue + 1 default queue for
2064	 * management, control, and non-QoS data frames.
2065	 * The command is queue sc->txq[0], our default queue is sc->txq[1].
2066	 *
2067	 * Tx aggregation requires additional queues, one queue per TID for
2068	 * which aggregation is enabled. We map TID 0-7 to sc->txq[2:9].
2069	 * Firmware may assign its own internal IDs for these queues
2070	 * depending on which TID gets aggregation enabled first.
2071	 * The driver maintains a table mapping driver-side queue IDs
2072	 * to firmware-side queue IDs.
2073	 */
2074
2075	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
2076		bc_tbl_size = sizeof(struct iwx_gen3_bc_tbl_entry) *
2077		    IWX_TFD_QUEUE_BC_SIZE_GEN3_AX210;
2078		bc_align = 128;
2079	} else {
2080		bc_tbl_size = sizeof(struct iwx_agn_scd_bc_tbl);
2081		bc_align = 64;
2082	}
2083	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->bc_tbl, bc_tbl_size,
2084	    bc_align);
2085	if (err) {
2086		printf("%s: could not allocate byte count table DMA memory\n",
2087		    DEVNAME(sc));
2088		goto fail;
2089	}
2090
2091	size = IWX_TX_RING_COUNT * sizeof(struct iwx_device_cmd);
2092	err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size,
2093	    IWX_FIRST_TB_SIZE_ALIGN);
2094	if (err) {
2095		printf("%s: could not allocate cmd DMA memory\n", DEVNAME(sc));
2096		goto fail;
2097	}
2098	ring->cmd = ring->cmd_dma.vaddr;
2099
2100	paddr = ring->cmd_dma.paddr;
2101	for (i = 0; i < IWX_TX_RING_COUNT; i++) {
2102		struct iwx_tx_data *data = &ring->data[i];
2103		size_t mapsize;
2104
2105		data->cmd_paddr = paddr;
2106		paddr += sizeof(struct iwx_device_cmd);
2107
2108		/* FW commands may require more mapped space than packets. */
2109		if (qid == IWX_DQA_CMD_QUEUE)
2110			mapsize = (sizeof(struct iwx_cmd_header) +
2111			    IWX_MAX_CMD_PAYLOAD_SIZE);
2112		else
2113			mapsize = MCLBYTES;
2114		err = bus_dmamap_create(sc->sc_dmat, mapsize,
2115		    IWX_TFH_NUM_TBS - 2, mapsize, 0, BUS_DMA_NOWAIT,
2116		    &data->map);
2117		if (err) {
2118			printf("%s: could not create TX buf DMA map\n",
2119			    DEVNAME(sc));
2120			goto fail;
2121		}
2122	}
2123	KASSERT(paddr == ring->cmd_dma.paddr + size);
2124	return 0;
2125
2126fail:	iwx_free_tx_ring(sc, ring);
2127	return err;
2128}
2129
2130void
2131iwx_reset_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
2132{
2133	int i;
2134
2135	for (i = 0; i < IWX_TX_RING_COUNT; i++) {
2136		struct iwx_tx_data *data = &ring->data[i];
2137
2138		if (data->m != NULL) {
2139			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
2140			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2141			bus_dmamap_unload(sc->sc_dmat, data->map);
2142			m_freem(data->m);
2143			data->m = NULL;
2144		}
2145	}
2146
2147	/* Clear byte count table. */
2148	memset(ring->bc_tbl.vaddr, 0, ring->bc_tbl.size);
2149
2150	/* Clear TX descriptors. */
2151	memset(ring->desc, 0, ring->desc_dma.size);
2152	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
2153	    ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
2154	sc->qfullmsk &= ~(1 << ring->qid);
2155	sc->qenablemsk &= ~(1 << ring->qid);
2156	for (i = 0; i < nitems(sc->aggqid); i++) {
2157		if (sc->aggqid[i] == ring->qid) {
2158			sc->aggqid[i] = 0;
2159			break;
2160		}
2161	}
2162	ring->queued = 0;
2163	ring->cur = 0;
2164	ring->cur_hw = 0;
2165	ring->tail = 0;
2166	ring->tail_hw = 0;
2167	ring->tid = 0;
2168}
2169
2170void
2171iwx_free_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
2172{
2173	int i;
2174
2175	iwx_dma_contig_free(&ring->desc_dma);
2176	iwx_dma_contig_free(&ring->cmd_dma);
2177	iwx_dma_contig_free(&ring->bc_tbl);
2178
2179	for (i = 0; i < IWX_TX_RING_COUNT; i++) {
2180		struct iwx_tx_data *data = &ring->data[i];
2181
2182		if (data->m != NULL) {
2183			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
2184			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2185			bus_dmamap_unload(sc->sc_dmat, data->map);
2186			m_freem(data->m);
2187			data->m = NULL;
2188		}
2189		if (data->map != NULL)
2190			bus_dmamap_destroy(sc->sc_dmat, data->map);
2191	}
2192}
2193
2194void
2195iwx_enable_rfkill_int(struct iwx_softc *sc)
2196{
2197	if (!sc->sc_msix) {
2198		sc->sc_intmask = IWX_CSR_INT_BIT_RF_KILL;
2199		IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2200	} else {
2201		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2202		    sc->sc_fh_init_mask);
2203		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2204		    ~IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL);
2205		sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL;
2206	}
2207
2208	IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
2209	    IWX_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
2210}
2211
2212int
2213iwx_check_rfkill(struct iwx_softc *sc)
2214{
2215	uint32_t v;
2216	int rv;
2217
2218	/*
2219	 * "documentation" is not really helpful here:
2220	 *  27:	HW_RF_KILL_SW
2221	 *	Indicates state of (platform's) hardware RF-Kill switch
2222	 *
2223	 * But apparently when it's off, it's on ...
2224	 */
2225	v = IWX_READ(sc, IWX_CSR_GP_CNTRL);
2226	rv = (v & IWX_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
2227	if (rv) {
2228		sc->sc_flags |= IWX_FLAG_RFKILL;
2229	} else {
2230		sc->sc_flags &= ~IWX_FLAG_RFKILL;
2231	}
2232
2233	return rv;
2234}
2235
2236void
2237iwx_enable_interrupts(struct iwx_softc *sc)
2238{
2239	if (!sc->sc_msix) {
2240		sc->sc_intmask = IWX_CSR_INI_SET_MASK;
2241		IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2242	} else {
2243		/*
2244		 * fh/hw_mask keeps all the unmasked causes.
2245		 * Unlike msi, in msix cause is enabled when it is unset.
2246		 */
2247		sc->sc_hw_mask = sc->sc_hw_init_mask;
2248		sc->sc_fh_mask = sc->sc_fh_init_mask;
2249		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2250		    ~sc->sc_fh_mask);
2251		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2252		    ~sc->sc_hw_mask);
2253	}
2254}
2255
2256void
2257iwx_enable_fwload_interrupt(struct iwx_softc *sc)
2258{
2259	if (!sc->sc_msix) {
2260		sc->sc_intmask = IWX_CSR_INT_BIT_ALIVE | IWX_CSR_INT_BIT_FH_RX;
2261		IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2262	} else {
2263		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2264		    ~IWX_MSIX_HW_INT_CAUSES_REG_ALIVE);
2265		sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_ALIVE;
2266		/*
2267		 * Leave all the FH causes enabled to get the ALIVE
2268		 * notification.
2269		 */
2270		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2271		    ~sc->sc_fh_init_mask);
2272		sc->sc_fh_mask = sc->sc_fh_init_mask;
2273	}
2274}
2275
2276void
2277iwx_restore_interrupts(struct iwx_softc *sc)
2278{
2279	IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
2280}
2281
2282void
2283iwx_disable_interrupts(struct iwx_softc *sc)
2284{
2285	if (!sc->sc_msix) {
2286		IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
2287
2288		/* acknowledge all interrupts */
2289		IWX_WRITE(sc, IWX_CSR_INT, ~0);
2290		IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0);
2291	} else {
2292		IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2293		    sc->sc_fh_init_mask);
2294		IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2295		    sc->sc_hw_init_mask);
2296	}
2297}
2298
2299void
2300iwx_ict_reset(struct iwx_softc *sc)
2301{
2302	iwx_disable_interrupts(sc);
2303
2304	memset(sc->ict_dma.vaddr, 0, IWX_ICT_SIZE);
2305	sc->ict_cur = 0;
2306
2307	/* Set physical address of ICT (4KB aligned). */
2308	IWX_WRITE(sc, IWX_CSR_DRAM_INT_TBL_REG,
2309	    IWX_CSR_DRAM_INT_TBL_ENABLE
2310	    | IWX_CSR_DRAM_INIT_TBL_WRAP_CHECK
2311	    | IWX_CSR_DRAM_INIT_TBL_WRITE_POINTER
2312	    | sc->ict_dma.paddr >> IWX_ICT_PADDR_SHIFT);
2313
2314	/* Switch to ICT interrupt mode in driver. */
2315	sc->sc_flags |= IWX_FLAG_USE_ICT;
2316
2317	IWX_WRITE(sc, IWX_CSR_INT, ~0);
2318	iwx_enable_interrupts(sc);
2319}
2320
2321#define IWX_HW_READY_TIMEOUT 50
2322int
2323iwx_set_hw_ready(struct iwx_softc *sc)
2324{
2325	int ready;
2326
2327	IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2328	    IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
2329
2330	ready = iwx_poll_bit(sc, IWX_CSR_HW_IF_CONFIG_REG,
2331	    IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
2332	    IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
2333	    IWX_HW_READY_TIMEOUT);
2334	if (ready)
2335		IWX_SETBITS(sc, IWX_CSR_MBOX_SET_REG,
2336		    IWX_CSR_MBOX_SET_REG_OS_ALIVE);
2337
2338	return ready;
2339}
2340#undef IWX_HW_READY_TIMEOUT
2341
2342int
2343iwx_prepare_card_hw(struct iwx_softc *sc)
2344{
2345	int t = 0;
2346	int ntries;
2347
2348	if (iwx_set_hw_ready(sc))
2349		return 0;
2350
2351	IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2352	    IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2353	DELAY(1000);
2354
2355	for (ntries = 0; ntries < 10; ntries++) {
2356		/* If HW is not ready, prepare the conditions to check again */
2357		IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2358		    IWX_CSR_HW_IF_CONFIG_REG_PREPARE);
2359
2360		do {
2361			if (iwx_set_hw_ready(sc))
2362				return 0;
2363			DELAY(200);
2364			t += 200;
2365		} while (t < 150000);
2366		DELAY(25000);
2367	}
2368
2369	return ETIMEDOUT;
2370}
2371
2372int
2373iwx_force_power_gating(struct iwx_softc *sc)
2374{
2375	int err;
2376
2377	err = iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2378	    IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
2379	if (err)
2380		return err;
2381	DELAY(20);
2382	err = iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2383	    IWX_HPM_HIPM_GEN_CFG_CR_PG_EN |
2384	    IWX_HPM_HIPM_GEN_CFG_CR_SLP_EN);
2385	if (err)
2386		return err;
2387	DELAY(20);
2388	err = iwx_clear_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
2389	    IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
2390	return err;
2391}
2392
2393void
2394iwx_apm_config(struct iwx_softc *sc)
2395{
2396	pcireg_t lctl, cap;
2397
2398	/*
2399	 * L0S states have been found to be unstable with our devices
2400	 * and in newer hardware they are not officially supported at
2401	 * all, so we must always set the L0S_DISABLED bit.
2402	 */
2403	IWX_SETBITS(sc, IWX_CSR_GIO_REG, IWX_CSR_GIO_REG_VAL_L0S_DISABLED);
2404
2405	lctl = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
2406	    sc->sc_cap_off + PCI_PCIE_LCSR);
2407	sc->sc_pm_support = !(lctl & PCI_PCIE_LCSR_ASPM_L0S);
2408	cap = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
2409	    sc->sc_cap_off + PCI_PCIE_DCSR2);
2410	sc->sc_ltr_enabled = (cap & PCI_PCIE_DCSR2_LTREN) ? 1 : 0;
2411	DPRINTF(("%s: L1 %sabled - LTR %sabled\n",
2412	    DEVNAME(sc),
2413	    (lctl & PCI_PCIE_LCSR_ASPM_L1) ? "En" : "Dis",
2414	    sc->sc_ltr_enabled ? "En" : "Dis"));
2415}
2416
2417/*
2418 * Start up NIC's basic functionality after it has been reset
2419 * e.g. after platform boot or shutdown.
2420 * NOTE:  This does not load uCode nor start the embedded processor
2421 */
2422int
2423iwx_apm_init(struct iwx_softc *sc)
2424{
2425	int err = 0;
2426
2427	/*
2428	 * Disable L0s without affecting L1;
2429	 *  don't wait for ICH L0s (ICH bug W/A)
2430	 */
2431	IWX_SETBITS(sc, IWX_CSR_GIO_CHICKEN_BITS,
2432	    IWX_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
2433
2434	/* Set FH wait threshold to maximum (HW error during stress W/A) */
2435	IWX_SETBITS(sc, IWX_CSR_DBG_HPET_MEM_REG, IWX_CSR_DBG_HPET_MEM_REG_VAL);
2436
2437	/*
2438	 * Enable HAP INTA (interrupt from management bus) to
2439	 * wake device's PCI Express link L1a -> L0s
2440	 */
2441	IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2442	    IWX_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
2443
2444	iwx_apm_config(sc);
2445
2446	/*
2447	 * Set "initialization complete" bit to move adapter from
2448	 * D0U* --> D0A* (powered-up active) state.
2449	 */
2450	IWX_SETBITS(sc, IWX_CSR_GP_CNTRL, IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2451
2452	/*
2453	 * Wait for clock stabilization; once stabilized, access to
2454	 * device-internal resources is supported, e.g. iwx_write_prph()
2455	 * and accesses to uCode SRAM.
2456	 */
2457	if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
2458	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2459	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
2460		printf("%s: timeout waiting for clock stabilization\n",
2461		    DEVNAME(sc));
2462		err = ETIMEDOUT;
2463		goto out;
2464	}
2465 out:
2466	if (err)
2467		printf("%s: apm init error %d\n", DEVNAME(sc), err);
2468	return err;
2469}
2470
2471void
2472iwx_apm_stop(struct iwx_softc *sc)
2473{
2474	IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2475	    IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2476	IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
2477	    IWX_CSR_HW_IF_CONFIG_REG_PREPARE |
2478	    IWX_CSR_HW_IF_CONFIG_REG_ENABLE_PME);
2479	DELAY(1000);
2480	IWX_CLRBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
2481	    IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
2482	DELAY(5000);
2483
2484	/* stop device's busmaster DMA activity */
2485	IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_STOP_MASTER);
2486
2487	if (!iwx_poll_bit(sc, IWX_CSR_RESET,
2488	    IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED,
2489	    IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
2490		printf("%s: timeout waiting for master\n", DEVNAME(sc));
2491
2492	/*
2493	 * Clear "initialization complete" bit to move adapter from
2494	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
2495	 */
2496	IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
2497	    IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2498}
2499
2500void
2501iwx_init_msix_hw(struct iwx_softc *sc)
2502{
2503	iwx_conf_msix_hw(sc, 0);
2504
2505	if (!sc->sc_msix)
2506		return;
2507
2508	sc->sc_fh_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_FH_INT_MASK_AD);
2509	sc->sc_fh_mask = sc->sc_fh_init_mask;
2510	sc->sc_hw_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_HW_INT_MASK_AD);
2511	sc->sc_hw_mask = sc->sc_hw_init_mask;
2512}
2513
2514void
2515iwx_conf_msix_hw(struct iwx_softc *sc, int stopped)
2516{
2517	int vector = 0;
2518
2519	if (!sc->sc_msix) {
2520		/* Newer chips default to MSIX. */
2521		if (!stopped && iwx_nic_lock(sc)) {
2522			iwx_write_umac_prph(sc, IWX_UREG_CHICK,
2523			    IWX_UREG_CHICK_MSI_ENABLE);
2524			iwx_nic_unlock(sc);
2525		}
2526		return;
2527	}
2528
2529	if (!stopped && iwx_nic_lock(sc)) {
2530		iwx_write_umac_prph(sc, IWX_UREG_CHICK,
2531		    IWX_UREG_CHICK_MSIX_ENABLE);
2532		iwx_nic_unlock(sc);
2533	}
2534
2535	/* Disable all interrupts */
2536	IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD, ~0);
2537	IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD, ~0);
2538
2539	/* Map fallback-queue (command/mgmt) to a single vector */
2540	IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(0),
2541	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2542	/* Map RSS queue (data) to the same vector */
2543	IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(1),
2544	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2545
2546	/* Enable the RX queues cause interrupts */
2547	IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2548	    IWX_MSIX_FH_INT_CAUSES_Q0 | IWX_MSIX_FH_INT_CAUSES_Q1);
2549
2550	/* Map non-RX causes to the same vector */
2551	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH0_NUM),
2552	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2553	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH1_NUM),
2554	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2555	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_S2D),
2556	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2557	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_FH_ERR),
2558	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2559	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_ALIVE),
2560	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2561	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_WAKEUP),
2562	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2563	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_RESET_DONE),
2564	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2565	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_CT_KILL),
2566	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2567	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_RF_KILL),
2568	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2569	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_PERIODIC),
2570	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2571	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SW_ERR),
2572	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2573	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SCD),
2574	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2575	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_FH_TX),
2576	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2577	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HW_ERR),
2578	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2579	IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HAP),
2580	    vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
2581
2582	/* Enable non-RX causes interrupts */
2583	IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
2584	    IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM |
2585	    IWX_MSIX_FH_INT_CAUSES_D2S_CH1_NUM |
2586	    IWX_MSIX_FH_INT_CAUSES_S2D |
2587	    IWX_MSIX_FH_INT_CAUSES_FH_ERR);
2588	IWX_CLRBITS(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
2589	    IWX_MSIX_HW_INT_CAUSES_REG_ALIVE |
2590	    IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP |
2591	    IWX_MSIX_HW_INT_CAUSES_REG_RESET_DONE |
2592	    IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL |
2593	    IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL |
2594	    IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC |
2595	    IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR |
2596	    IWX_MSIX_HW_INT_CAUSES_REG_SCD |
2597	    IWX_MSIX_HW_INT_CAUSES_REG_FH_TX |
2598	    IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR |
2599	    IWX_MSIX_HW_INT_CAUSES_REG_HAP);
2600}
2601
2602int
2603iwx_clear_persistence_bit(struct iwx_softc *sc)
2604{
2605	uint32_t hpm, wprot;
2606
2607	hpm = iwx_read_prph_unlocked(sc, IWX_HPM_DEBUG);
2608	if (hpm != 0xa5a5a5a0 && (hpm & IWX_PERSISTENCE_BIT)) {
2609		wprot = iwx_read_prph_unlocked(sc, IWX_PREG_PRPH_WPROT_22000);
2610		if (wprot & IWX_PREG_WFPM_ACCESS) {
2611			printf("%s: cannot clear persistence bit\n",
2612			    DEVNAME(sc));
2613			return EPERM;
2614		}
2615		iwx_write_prph_unlocked(sc, IWX_HPM_DEBUG,
2616		    hpm & ~IWX_PERSISTENCE_BIT);
2617	}
2618
2619	return 0;
2620}
2621
2622int
2623iwx_start_hw(struct iwx_softc *sc)
2624{
2625	int err;
2626
2627	err = iwx_prepare_card_hw(sc);
2628	if (err)
2629		return err;
2630
2631	if (sc->sc_device_family == IWX_DEVICE_FAMILY_22000) {
2632		err = iwx_clear_persistence_bit(sc);
2633		if (err)
2634			return err;
2635	}
2636
2637	/* Reset the entire device */
2638	IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2639	DELAY(5000);
2640
2641	if (sc->sc_device_family == IWX_DEVICE_FAMILY_22000 &&
2642	    sc->sc_integrated) {
2643		IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
2644		    IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2645		DELAY(20);
2646		if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
2647		    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2648		    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
2649			printf("%s: timeout waiting for clock stabilization\n",
2650			    DEVNAME(sc));
2651			return ETIMEDOUT;
2652		}
2653
2654		err = iwx_force_power_gating(sc);
2655		if (err)
2656			return err;
2657
2658		/* Reset the entire device */
2659		IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2660		DELAY(5000);
2661	}
2662
2663	err = iwx_apm_init(sc);
2664	if (err)
2665		return err;
2666
2667	iwx_init_msix_hw(sc);
2668
2669	iwx_enable_rfkill_int(sc);
2670	iwx_check_rfkill(sc);
2671
2672	return 0;
2673}
2674
2675void
2676iwx_stop_device(struct iwx_softc *sc)
2677{
2678	struct ieee80211com *ic = &sc->sc_ic;
2679	struct ieee80211_node *ni = ic->ic_bss;
2680	int i;
2681
2682	iwx_disable_interrupts(sc);
2683	sc->sc_flags &= ~IWX_FLAG_USE_ICT;
2684
2685	iwx_disable_rx_dma(sc);
2686	iwx_reset_rx_ring(sc, &sc->rxq);
2687	for (i = 0; i < nitems(sc->txq); i++)
2688		iwx_reset_tx_ring(sc, &sc->txq[i]);
2689	for (i = 0; i < IEEE80211_NUM_TID; i++) {
2690		struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[i];
2691		if (ba->ba_state != IEEE80211_BA_AGREED)
2692			continue;
2693		ieee80211_delba_request(ic, ni, 0, 1, i);
2694	}
2695
2696	/* Make sure (redundant) we've released our request to stay awake */
2697	IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
2698	    IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2699	if (sc->sc_nic_locks > 0)
2700		printf("%s: %d active NIC locks forcefully cleared\n",
2701		    DEVNAME(sc), sc->sc_nic_locks);
2702	sc->sc_nic_locks = 0;
2703
2704	/* Stop the device, and put it in low power state */
2705	iwx_apm_stop(sc);
2706
2707	/* Reset the on-board processor. */
2708	IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
2709	DELAY(5000);
2710
2711	/*
2712	 * Upon stop, the IVAR table gets erased, so msi-x won't
2713	 * work. This causes a bug in RF-KILL flows, since the interrupt
2714	 * that enables radio won't fire on the correct irq, and the
2715	 * driver won't be able to handle the interrupt.
2716	 * Configure the IVAR table again after reset.
2717	 */
2718	iwx_conf_msix_hw(sc, 1);
2719
2720	/*
2721	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
2722	 * Clear the interrupt again.
2723	 */
2724	iwx_disable_interrupts(sc);
2725
2726	/* Even though we stop the HW we still want the RF kill interrupt. */
2727	iwx_enable_rfkill_int(sc);
2728	iwx_check_rfkill(sc);
2729
2730	iwx_prepare_card_hw(sc);
2731
2732	iwx_ctxt_info_free_paging(sc);
2733	iwx_dma_contig_free(&sc->pnvm_dma);
2734}
2735
2736void
2737iwx_nic_config(struct iwx_softc *sc)
2738{
2739	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
2740	uint32_t mask, val, reg_val = 0;
2741
2742	radio_cfg_type = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_TYPE) >>
2743	    IWX_FW_PHY_CFG_RADIO_TYPE_POS;
2744	radio_cfg_step = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_STEP) >>
2745	    IWX_FW_PHY_CFG_RADIO_STEP_POS;
2746	radio_cfg_dash = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_DASH) >>
2747	    IWX_FW_PHY_CFG_RADIO_DASH_POS;
2748
2749	reg_val |= IWX_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
2750	    IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
2751	reg_val |= IWX_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
2752	    IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
2753
2754	/* radio configuration */
2755	reg_val |= radio_cfg_type << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
2756	reg_val |= radio_cfg_step << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
2757	reg_val |= radio_cfg_dash << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
2758
2759	mask = IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
2760	    IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
2761	    IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
2762	    IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
2763	    IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
2764	    IWX_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
2765	    IWX_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
2766
2767	val = IWX_READ(sc, IWX_CSR_HW_IF_CONFIG_REG);
2768	val &= ~mask;
2769	val |= reg_val;
2770	IWX_WRITE(sc, IWX_CSR_HW_IF_CONFIG_REG, val);
2771}
2772
2773int
2774iwx_nic_rx_init(struct iwx_softc *sc)
2775{
2776	IWX_WRITE_1(sc, IWX_CSR_INT_COALESCING, IWX_HOST_INT_TIMEOUT_DEF);
2777
2778	/*
2779	 * We don't configure the RFH; the firmware will do that.
2780	 * Rx descriptors are set when firmware sends an ALIVE interrupt.
2781	 */
2782	return 0;
2783}
2784
2785int
2786iwx_nic_init(struct iwx_softc *sc)
2787{
2788	int err;
2789
2790	iwx_apm_init(sc);
2791	if (sc->sc_device_family < IWX_DEVICE_FAMILY_AX210)
2792		iwx_nic_config(sc);
2793
2794	err = iwx_nic_rx_init(sc);
2795	if (err)
2796		return err;
2797
2798	IWX_SETBITS(sc, IWX_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
2799
2800	return 0;
2801}
2802
2803/* Map a TID to an ieee80211_edca_ac category. */
2804const uint8_t iwx_tid_to_ac[IWX_MAX_TID_COUNT] = {
2805	EDCA_AC_BE,
2806	EDCA_AC_BK,
2807	EDCA_AC_BK,
2808	EDCA_AC_BE,
2809	EDCA_AC_VI,
2810	EDCA_AC_VI,
2811	EDCA_AC_VO,
2812	EDCA_AC_VO,
2813};
2814
2815/* Map ieee80211_edca_ac categories to firmware Tx FIFO. */
2816const uint8_t iwx_ac_to_tx_fifo[] = {
2817	IWX_GEN2_EDCA_TX_FIFO_BE,
2818	IWX_GEN2_EDCA_TX_FIFO_BK,
2819	IWX_GEN2_EDCA_TX_FIFO_VI,
2820	IWX_GEN2_EDCA_TX_FIFO_VO,
2821};
2822
2823int
2824iwx_enable_txq(struct iwx_softc *sc, int sta_id, int qid, int tid,
2825    int num_slots)
2826{
2827	struct iwx_rx_packet *pkt;
2828	struct iwx_tx_queue_cfg_rsp *resp;
2829	struct iwx_tx_queue_cfg_cmd cmd_v0;
2830	struct iwx_scd_queue_cfg_cmd cmd_v3;
2831	struct iwx_host_cmd hcmd = {
2832		.flags = IWX_CMD_WANT_RESP,
2833		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2834	};
2835	struct iwx_tx_ring *ring = &sc->txq[qid];
2836	int err, fwqid, cmd_ver;
2837	uint32_t wr_idx;
2838	size_t resp_len;
2839
2840	iwx_reset_tx_ring(sc, ring);
2841
2842	cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
2843	    IWX_SCD_QUEUE_CONFIG_CMD);
2844	if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN) {
2845		memset(&cmd_v0, 0, sizeof(cmd_v0));
2846		cmd_v0.sta_id = sta_id;
2847		cmd_v0.tid = tid;
2848		cmd_v0.flags = htole16(IWX_TX_QUEUE_CFG_ENABLE_QUEUE);
2849		cmd_v0.cb_size = htole32(IWX_TFD_QUEUE_CB_SIZE(num_slots));
2850		cmd_v0.byte_cnt_addr = htole64(ring->bc_tbl.paddr);
2851		cmd_v0.tfdq_addr = htole64(ring->desc_dma.paddr);
2852		hcmd.id = IWX_SCD_QUEUE_CFG;
2853		hcmd.data[0] = &cmd_v0;
2854		hcmd.len[0] = sizeof(cmd_v0);
2855	} else if (cmd_ver == 3) {
2856		memset(&cmd_v3, 0, sizeof(cmd_v3));
2857		cmd_v3.operation = htole32(IWX_SCD_QUEUE_ADD);
2858		cmd_v3.u.add.tfdq_dram_addr = htole64(ring->desc_dma.paddr);
2859		cmd_v3.u.add.bc_dram_addr = htole64(ring->bc_tbl.paddr);
2860		cmd_v3.u.add.cb_size = htole32(IWX_TFD_QUEUE_CB_SIZE(num_slots));
2861		cmd_v3.u.add.flags = htole32(0);
2862		cmd_v3.u.add.sta_mask = htole32(1 << sta_id);
2863		cmd_v3.u.add.tid = tid;
2864		hcmd.id = IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
2865		    IWX_SCD_QUEUE_CONFIG_CMD);
2866		hcmd.data[0] = &cmd_v3;
2867		hcmd.len[0] = sizeof(cmd_v3);
2868	} else {
2869		printf("%s: unsupported SCD_QUEUE_CFG command version %d\n",
2870		    DEVNAME(sc), cmd_ver);
2871		return ENOTSUP;
2872	}
2873
2874	err = iwx_send_cmd(sc, &hcmd);
2875	if (err)
2876		return err;
2877
2878	pkt = hcmd.resp_pkt;
2879	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
2880		err = EIO;
2881		goto out;
2882	}
2883
2884	resp_len = iwx_rx_packet_payload_len(pkt);
2885	if (resp_len != sizeof(*resp)) {
2886		err = EIO;
2887		goto out;
2888	}
2889
2890	resp = (void *)pkt->data;
2891	fwqid = le16toh(resp->queue_number);
2892	wr_idx = le16toh(resp->write_pointer);
2893
2894	/* Unlike iwlwifi, we do not support dynamic queue ID assignment. */
2895	if (fwqid != qid) {
2896		err = EIO;
2897		goto out;
2898	}
2899
2900	if (wr_idx != ring->cur_hw) {
2901		err = EIO;
2902		goto out;
2903	}
2904
2905	sc->qenablemsk |= (1 << qid);
2906	ring->tid = tid;
2907out:
2908	iwx_free_resp(sc, &hcmd);
2909	return err;
2910}
2911
2912int
2913iwx_disable_txq(struct iwx_softc *sc, int sta_id, int qid, uint8_t tid)
2914{
2915	struct iwx_rx_packet *pkt;
2916	struct iwx_tx_queue_cfg_rsp *resp;
2917	struct iwx_tx_queue_cfg_cmd cmd_v0;
2918	struct iwx_scd_queue_cfg_cmd cmd_v3;
2919	struct iwx_host_cmd hcmd = {
2920		.flags = IWX_CMD_WANT_RESP,
2921		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
2922	};
2923	struct iwx_tx_ring *ring = &sc->txq[qid];
2924	int err, cmd_ver;
2925
2926	cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
2927	    IWX_SCD_QUEUE_CONFIG_CMD);
2928	if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN) {
2929		memset(&cmd_v0, 0, sizeof(cmd_v0));
2930		cmd_v0.sta_id = sta_id;
2931		cmd_v0.tid = tid;
2932		cmd_v0.flags = htole16(0); /* clear "queue enabled" flag */
2933		cmd_v0.cb_size = htole32(0);
2934		cmd_v0.byte_cnt_addr = htole64(0);
2935		cmd_v0.tfdq_addr = htole64(0);
2936		hcmd.id = IWX_SCD_QUEUE_CFG;
2937		hcmd.data[0] = &cmd_v0;
2938		hcmd.len[0] = sizeof(cmd_v0);
2939	} else if (cmd_ver == 3) {
2940		memset(&cmd_v3, 0, sizeof(cmd_v3));
2941		cmd_v3.operation = htole32(IWX_SCD_QUEUE_REMOVE);
2942		cmd_v3.u.remove.sta_mask = htole32(1 << sta_id);
2943		cmd_v3.u.remove.tid = tid;
2944		hcmd.id = IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
2945		    IWX_SCD_QUEUE_CONFIG_CMD);
2946		hcmd.data[0] = &cmd_v3;
2947		hcmd.len[0] = sizeof(cmd_v3);
2948	} else {
2949		printf("%s: unsupported SCD_QUEUE_CFG command version %d\n",
2950		    DEVNAME(sc), cmd_ver);
2951		return ENOTSUP;
2952	}
2953
2954	err = iwx_send_cmd(sc, &hcmd);
2955	if (err)
2956		return err;
2957
2958	pkt = hcmd.resp_pkt;
2959	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
2960		err = EIO;
2961		goto out;
2962	}
2963
2964	sc->qenablemsk &= ~(1 << qid);
2965	iwx_reset_tx_ring(sc, ring);
2966out:
2967	iwx_free_resp(sc, &hcmd);
2968	return err;
2969}
2970
2971void
2972iwx_post_alive(struct iwx_softc *sc)
2973{
2974	int txcmd_ver;
2975
2976	iwx_ict_reset(sc);
2977
2978	txcmd_ver = iwx_lookup_notif_ver(sc, IWX_LONG_GROUP, IWX_TX_CMD) ;
2979	if (txcmd_ver != IWX_FW_CMD_VER_UNKNOWN && txcmd_ver > 6)
2980		sc->sc_rate_n_flags_version = 2;
2981	else
2982		sc->sc_rate_n_flags_version = 1;
2983
2984	txcmd_ver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_TX_CMD);
2985}
2986
2987int
2988iwx_schedule_session_protection(struct iwx_softc *sc, struct iwx_node *in,
2989    uint32_t duration_tu)
2990{
2991	struct iwx_session_prot_cmd cmd = {
2992		.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
2993		    in->in_color)),
2994		.action = htole32(IWX_FW_CTXT_ACTION_ADD),
2995		.conf_id = htole32(IWX_SESSION_PROTECT_CONF_ASSOC),
2996		.duration_tu = htole32(duration_tu),
2997	};
2998	uint32_t cmd_id;
2999	int err;
3000
3001	cmd_id = iwx_cmd_id(IWX_SESSION_PROTECTION_CMD, IWX_MAC_CONF_GROUP, 0);
3002	err = iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
3003	if (!err)
3004		sc->sc_flags |= IWX_FLAG_TE_ACTIVE;
3005	return err;
3006}
3007
3008void
3009iwx_unprotect_session(struct iwx_softc *sc, struct iwx_node *in)
3010{
3011	struct iwx_session_prot_cmd cmd = {
3012		.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
3013		    in->in_color)),
3014		.action = htole32(IWX_FW_CTXT_ACTION_REMOVE),
3015		.conf_id = htole32(IWX_SESSION_PROTECT_CONF_ASSOC),
3016		.duration_tu = 0,
3017	};
3018	uint32_t cmd_id;
3019
3020	/* Do nothing if the time event has already ended. */
3021	if ((sc->sc_flags & IWX_FLAG_TE_ACTIVE) == 0)
3022		return;
3023
3024	cmd_id = iwx_cmd_id(IWX_SESSION_PROTECTION_CMD, IWX_MAC_CONF_GROUP, 0);
3025	if (iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd) == 0)
3026		sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
3027}
3028
3029/*
3030 * NVM read access and content parsing.  We do not support
3031 * external NVM or writing NVM.
3032 */
3033
3034uint8_t
3035iwx_fw_valid_tx_ant(struct iwx_softc *sc)
3036{
3037	uint8_t tx_ant;
3038
3039	tx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_TX_CHAIN)
3040	    >> IWX_FW_PHY_CFG_TX_CHAIN_POS);
3041
3042	if (sc->sc_nvm.valid_tx_ant)
3043		tx_ant &= sc->sc_nvm.valid_tx_ant;
3044
3045	return tx_ant;
3046}
3047
3048uint8_t
3049iwx_fw_valid_rx_ant(struct iwx_softc *sc)
3050{
3051	uint8_t rx_ant;
3052
3053	rx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RX_CHAIN)
3054	    >> IWX_FW_PHY_CFG_RX_CHAIN_POS);
3055
3056	if (sc->sc_nvm.valid_rx_ant)
3057		rx_ant &= sc->sc_nvm.valid_rx_ant;
3058
3059	return rx_ant;
3060}
3061
3062void
3063iwx_init_channel_map(struct iwx_softc *sc, uint16_t *channel_profile_v3,
3064    uint32_t *channel_profile_v4, int nchan_profile)
3065{
3066	struct ieee80211com *ic = &sc->sc_ic;
3067	struct iwx_nvm_data *data = &sc->sc_nvm;
3068	int ch_idx;
3069	struct ieee80211_channel *channel;
3070	uint32_t ch_flags;
3071	int is_5ghz;
3072	int flags, hw_value;
3073	int nchan;
3074	const uint8_t *nvm_channels;
3075
3076	if (sc->sc_uhb_supported) {
3077		nchan = nitems(iwx_nvm_channels_uhb);
3078		nvm_channels = iwx_nvm_channels_uhb;
3079	} else {
3080		nchan = nitems(iwx_nvm_channels_8000);
3081		nvm_channels = iwx_nvm_channels_8000;
3082	}
3083
3084	for (ch_idx = 0; ch_idx < nchan && ch_idx < nchan_profile; ch_idx++) {
3085		if (channel_profile_v4)
3086			ch_flags = le32_to_cpup(channel_profile_v4 + ch_idx);
3087		else
3088			ch_flags = le16_to_cpup(channel_profile_v3 + ch_idx);
3089
3090		/* net80211 cannot handle 6 GHz channel numbers yet */
3091		if (ch_idx >= IWX_NUM_2GHZ_CHANNELS + IWX_NUM_5GHZ_CHANNELS)
3092			break;
3093
3094		is_5ghz = ch_idx >= IWX_NUM_2GHZ_CHANNELS;
3095		if (is_5ghz && !data->sku_cap_band_52GHz_enable)
3096			ch_flags &= ~IWX_NVM_CHANNEL_VALID;
3097
3098		hw_value = nvm_channels[ch_idx];
3099		channel = &ic->ic_channels[hw_value];
3100
3101		if (!(ch_flags & IWX_NVM_CHANNEL_VALID)) {
3102			channel->ic_freq = 0;
3103			channel->ic_flags = 0;
3104			continue;
3105		}
3106
3107		if (!is_5ghz) {
3108			flags = IEEE80211_CHAN_2GHZ;
3109			channel->ic_flags
3110			    = IEEE80211_CHAN_CCK
3111			    | IEEE80211_CHAN_OFDM
3112			    | IEEE80211_CHAN_DYN
3113			    | IEEE80211_CHAN_2GHZ;
3114		} else {
3115			flags = IEEE80211_CHAN_5GHZ;
3116			channel->ic_flags =
3117			    IEEE80211_CHAN_A;
3118		}
3119		channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
3120
3121		if (!(ch_flags & IWX_NVM_CHANNEL_ACTIVE))
3122			channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
3123
3124		if (data->sku_cap_11n_enable) {
3125			channel->ic_flags |= IEEE80211_CHAN_HT;
3126			if (ch_flags & IWX_NVM_CHANNEL_40MHZ)
3127				channel->ic_flags |= IEEE80211_CHAN_40MHZ;
3128		}
3129
3130		if (is_5ghz && data->sku_cap_11ac_enable) {
3131			channel->ic_flags |= IEEE80211_CHAN_VHT;
3132			if (ch_flags & IWX_NVM_CHANNEL_80MHZ)
3133				channel->ic_xflags |= IEEE80211_CHANX_80MHZ;
3134		}
3135	}
3136}
3137
3138int
3139iwx_mimo_enabled(struct iwx_softc *sc)
3140{
3141	struct ieee80211com *ic = &sc->sc_ic;
3142
3143	return !sc->sc_nvm.sku_cap_mimo_disable &&
3144	    (ic->ic_userflags & IEEE80211_F_NOMIMO) == 0;
3145}
3146
3147void
3148iwx_setup_ht_rates(struct iwx_softc *sc)
3149{
3150	struct ieee80211com *ic = &sc->sc_ic;
3151	uint8_t rx_ant;
3152
3153	/* TX is supported with the same MCS as RX. */
3154	ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED;
3155
3156	memset(ic->ic_sup_mcs, 0, sizeof(ic->ic_sup_mcs));
3157	ic->ic_sup_mcs[0] = 0xff;		/* MCS 0-7 */
3158
3159	if (!iwx_mimo_enabled(sc))
3160		return;
3161
3162	rx_ant = iwx_fw_valid_rx_ant(sc);
3163	if ((rx_ant & IWX_ANT_AB) == IWX_ANT_AB ||
3164	    (rx_ant & IWX_ANT_BC) == IWX_ANT_BC)
3165		ic->ic_sup_mcs[1] = 0xff;	/* MCS 8-15 */
3166}
3167
3168void
3169iwx_setup_vht_rates(struct iwx_softc *sc)
3170{
3171	struct ieee80211com *ic = &sc->sc_ic;
3172	uint8_t rx_ant = iwx_fw_valid_rx_ant(sc);
3173	int n;
3174
3175	ic->ic_vht_rxmcs = (IEEE80211_VHT_MCS_0_9 <<
3176	    IEEE80211_VHT_MCS_FOR_SS_SHIFT(1));
3177
3178	if (iwx_mimo_enabled(sc) &&
3179	    ((rx_ant & IWX_ANT_AB) == IWX_ANT_AB ||
3180	    (rx_ant & IWX_ANT_BC) == IWX_ANT_BC)) {
3181		ic->ic_vht_rxmcs |= (IEEE80211_VHT_MCS_0_9 <<
3182		    IEEE80211_VHT_MCS_FOR_SS_SHIFT(2));
3183	} else {
3184		ic->ic_vht_rxmcs |= (IEEE80211_VHT_MCS_SS_NOT_SUPP <<
3185		    IEEE80211_VHT_MCS_FOR_SS_SHIFT(2));
3186	}
3187
3188	for (n = 3; n <= IEEE80211_VHT_NUM_SS; n++) {
3189		ic->ic_vht_rxmcs |= (IEEE80211_VHT_MCS_SS_NOT_SUPP <<
3190		    IEEE80211_VHT_MCS_FOR_SS_SHIFT(n));
3191	}
3192
3193	ic->ic_vht_txmcs = ic->ic_vht_rxmcs;
3194}
3195
3196void
3197iwx_init_reorder_buffer(struct iwx_reorder_buffer *reorder_buf,
3198    uint16_t ssn, uint16_t buf_size)
3199{
3200	reorder_buf->head_sn = ssn;
3201	reorder_buf->num_stored = 0;
3202	reorder_buf->buf_size = buf_size;
3203	reorder_buf->last_amsdu = 0;
3204	reorder_buf->last_sub_index = 0;
3205	reorder_buf->removed = 0;
3206	reorder_buf->valid = 0;
3207	reorder_buf->consec_oldsn_drops = 0;
3208	reorder_buf->consec_oldsn_ampdu_gp2 = 0;
3209	reorder_buf->consec_oldsn_prev_drop = 0;
3210}
3211
3212void
3213iwx_clear_reorder_buffer(struct iwx_softc *sc, struct iwx_rxba_data *rxba)
3214{
3215	int i;
3216	struct iwx_reorder_buffer *reorder_buf = &rxba->reorder_buf;
3217	struct iwx_reorder_buf_entry *entry;
3218
3219	for (i = 0; i < reorder_buf->buf_size; i++) {
3220		entry = &rxba->entries[i];
3221		ml_purge(&entry->frames);
3222		timerclear(&entry->reorder_time);
3223	}
3224
3225	reorder_buf->removed = 1;
3226	timeout_del(&reorder_buf->reorder_timer);
3227	timerclear(&rxba->last_rx);
3228	timeout_del(&rxba->session_timer);
3229	rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID;
3230}
3231
3232#define RX_REORDER_BUF_TIMEOUT_MQ_USEC (100000ULL)
3233
3234void
3235iwx_rx_ba_session_expired(void *arg)
3236{
3237	struct iwx_rxba_data *rxba = arg;
3238	struct iwx_softc *sc = rxba->sc;
3239	struct ieee80211com *ic = &sc->sc_ic;
3240	struct ieee80211_node *ni = ic->ic_bss;
3241	struct timeval now, timeout, expiry;
3242	int s;
3243
3244	s = splnet();
3245	if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0 &&
3246	    ic->ic_state == IEEE80211_S_RUN &&
3247	    rxba->baid != IWX_RX_REORDER_DATA_INVALID_BAID) {
3248		getmicrouptime(&now);
3249		USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC, &timeout);
3250		timeradd(&rxba->last_rx, &timeout, &expiry);
3251		if (timercmp(&now, &expiry, <)) {
3252			timeout_add_usec(&rxba->session_timer, rxba->timeout);
3253		} else {
3254			ic->ic_stats.is_ht_rx_ba_timeout++;
3255			ieee80211_delba_request(ic, ni,
3256			    IEEE80211_REASON_TIMEOUT, 0, rxba->tid);
3257		}
3258	}
3259	splx(s);
3260}
3261
3262void
3263iwx_rx_bar_frame_release(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
3264    struct mbuf_list *ml)
3265{
3266	struct ieee80211com *ic = &sc->sc_ic;
3267	struct ieee80211_node *ni = ic->ic_bss;
3268	struct iwx_bar_frame_release *release = (void *)pkt->data;
3269	struct iwx_reorder_buffer *buf;
3270	struct iwx_rxba_data *rxba;
3271	unsigned int baid, nssn, sta_id, tid;
3272
3273	if (iwx_rx_packet_payload_len(pkt) < sizeof(*release))
3274		return;
3275
3276	baid = (le32toh(release->ba_info) & IWX_BAR_FRAME_RELEASE_BAID_MASK) >>
3277	    IWX_BAR_FRAME_RELEASE_BAID_SHIFT;
3278	if (baid == IWX_RX_REORDER_DATA_INVALID_BAID ||
3279	    baid >= nitems(sc->sc_rxba_data))
3280		return;
3281
3282	rxba = &sc->sc_rxba_data[baid];
3283	if (rxba->baid == IWX_RX_REORDER_DATA_INVALID_BAID)
3284		return;
3285
3286	tid = le32toh(release->sta_tid) & IWX_BAR_FRAME_RELEASE_TID_MASK;
3287	sta_id = (le32toh(release->sta_tid) &
3288	    IWX_BAR_FRAME_RELEASE_STA_MASK) >> IWX_BAR_FRAME_RELEASE_STA_SHIFT;
3289	if (tid != rxba->tid || rxba->sta_id != IWX_STATION_ID)
3290		return;
3291
3292	nssn = le32toh(release->ba_info) & IWX_BAR_FRAME_RELEASE_NSSN_MASK;
3293	buf = &rxba->reorder_buf;
3294	iwx_release_frames(sc, ni, rxba, buf, nssn, ml);
3295}
3296
3297void
3298iwx_reorder_timer_expired(void *arg)
3299{
3300	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
3301	struct iwx_reorder_buffer *buf = arg;
3302	struct iwx_rxba_data *rxba = iwx_rxba_data_from_reorder_buf(buf);
3303	struct iwx_reorder_buf_entry *entries = &rxba->entries[0];
3304	struct iwx_softc *sc = rxba->sc;
3305	struct ieee80211com *ic = &sc->sc_ic;
3306	struct ieee80211_node *ni = ic->ic_bss;
3307	int i, s;
3308	uint16_t sn = 0, index = 0;
3309	int expired = 0;
3310	int cont = 0;
3311	struct timeval now, timeout, expiry;
3312
3313	if (!buf->num_stored || buf->removed)
3314		return;
3315
3316	s = splnet();
3317	getmicrouptime(&now);
3318	USEC_TO_TIMEVAL(RX_REORDER_BUF_TIMEOUT_MQ_USEC, &timeout);
3319
3320	for (i = 0; i < buf->buf_size ; i++) {
3321		index = (buf->head_sn + i) % buf->buf_size;
3322
3323		if (ml_empty(&entries[index].frames)) {
3324			/*
3325			 * If there is a hole and the next frame didn't expire
3326			 * we want to break and not advance SN.
3327			 */
3328			cont = 0;
3329			continue;
3330		}
3331		timeradd(&entries[index].reorder_time, &timeout, &expiry);
3332		if (!cont && timercmp(&now, &expiry, <))
3333			break;
3334
3335		expired = 1;
3336		/* continue until next hole after this expired frame */
3337		cont = 1;
3338		sn = (buf->head_sn + (i + 1)) & 0xfff;
3339	}
3340
3341	if (expired) {
3342		/* SN is set to the last expired frame + 1 */
3343		iwx_release_frames(sc, ni, rxba, buf, sn, &ml);
3344		if_input(&sc->sc_ic.ic_if, &ml);
3345		ic->ic_stats.is_ht_rx_ba_window_gap_timeout++;
3346	} else {
3347		/*
3348		 * If no frame expired and there are stored frames, index is now
3349		 * pointing to the first unexpired frame - modify reorder timeout
3350		 * accordingly.
3351		 */
3352		timeout_add_usec(&buf->reorder_timer,
3353		    RX_REORDER_BUF_TIMEOUT_MQ_USEC);
3354	}
3355
3356	splx(s);
3357}
3358
3359#define IWX_MAX_RX_BA_SESSIONS 16
3360
3361struct iwx_rxba_data *
3362iwx_find_rxba_data(struct iwx_softc *sc, uint8_t tid)
3363{
3364	int i;
3365
3366	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
3367		if (sc->sc_rxba_data[i].baid ==
3368		    IWX_RX_REORDER_DATA_INVALID_BAID)
3369			continue;
3370		if (sc->sc_rxba_data[i].tid == tid)
3371			return &sc->sc_rxba_data[i];
3372	}
3373
3374	return NULL;
3375}
3376
3377int
3378iwx_sta_rx_agg_baid_cfg_cmd(struct iwx_softc *sc, struct ieee80211_node *ni,
3379    uint8_t tid, uint16_t ssn, uint16_t winsize, int timeout_val, int start,
3380    uint8_t *baid)
3381{
3382	struct iwx_rx_baid_cfg_cmd cmd;
3383	uint32_t new_baid = 0;
3384	int err;
3385
3386	splassert(IPL_NET);
3387
3388	memset(&cmd, 0, sizeof(cmd));
3389
3390	if (start) {
3391		cmd.action = IWX_RX_BAID_ACTION_ADD;
3392		cmd.alloc.sta_id_mask = htole32(1 << IWX_STATION_ID);
3393		cmd.alloc.tid = tid;
3394		cmd.alloc.ssn = htole16(ssn);
3395		cmd.alloc.win_size = htole16(winsize);
3396	} else {
3397		struct iwx_rxba_data *rxba;
3398
3399		rxba = iwx_find_rxba_data(sc, tid);
3400		if (rxba == NULL)
3401			return ENOENT;
3402		*baid = rxba->baid;
3403
3404		cmd.action = IWX_RX_BAID_ACTION_REMOVE;
3405		if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
3406		    IWX_RX_BAID_ALLOCATION_CONFIG_CMD) == 1) {
3407			cmd.remove_v1.baid = rxba->baid;
3408		} else {
3409			cmd.remove.sta_id_mask = htole32(1 << IWX_STATION_ID);
3410			cmd.remove.tid = tid;
3411		}
3412	}
3413
3414	err = iwx_send_cmd_pdu_status(sc, IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
3415	    IWX_RX_BAID_ALLOCATION_CONFIG_CMD), sizeof(cmd), &cmd, &new_baid);
3416	if (err)
3417		return err;
3418
3419	if (start) {
3420		if (new_baid >= nitems(sc->sc_rxba_data))
3421			return ERANGE;
3422		*baid = new_baid;
3423	}
3424
3425	return 0;
3426}
3427
3428int
3429iwx_sta_rx_agg_sta_cmd(struct iwx_softc *sc, struct ieee80211_node *ni,
3430    uint8_t tid, uint16_t ssn, uint16_t winsize, int timeout_val, int start,
3431    uint8_t *baid)
3432{
3433	struct iwx_add_sta_cmd cmd;
3434	struct iwx_node *in = (void *)ni;
3435	int err;
3436	uint32_t status;
3437
3438	splassert(IPL_NET);
3439
3440	memset(&cmd, 0, sizeof(cmd));
3441
3442	cmd.sta_id = IWX_STATION_ID;
3443	cmd.mac_id_n_color
3444	    = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
3445	cmd.add_modify = IWX_STA_MODE_MODIFY;
3446
3447	if (start) {
3448		cmd.add_immediate_ba_tid = (uint8_t)tid;
3449		cmd.add_immediate_ba_ssn = htole16(ssn);
3450		cmd.rx_ba_window = htole16(winsize);
3451	} else {
3452		struct iwx_rxba_data *rxba;
3453
3454		rxba = iwx_find_rxba_data(sc, tid);
3455		if (rxba == NULL)
3456			return ENOENT;
3457		*baid = rxba->baid;
3458
3459		cmd.remove_immediate_ba_tid = (uint8_t)tid;
3460	}
3461	cmd.modify_mask = start ? IWX_STA_MODIFY_ADD_BA_TID :
3462	    IWX_STA_MODIFY_REMOVE_BA_TID;
3463
3464	status = IWX_ADD_STA_SUCCESS;
3465	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, sizeof(cmd), &cmd,
3466	    &status);
3467	if (err)
3468		return err;
3469
3470	if ((status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
3471		return EIO;
3472
3473	if (!(status & IWX_ADD_STA_BAID_VALID_MASK))
3474		return EINVAL;
3475
3476	if (start) {
3477		*baid = (status & IWX_ADD_STA_BAID_MASK) >>
3478		    IWX_ADD_STA_BAID_SHIFT;
3479		if (*baid == IWX_RX_REORDER_DATA_INVALID_BAID ||
3480		    *baid >= nitems(sc->sc_rxba_data))
3481			return ERANGE;
3482	}
3483
3484	return 0;
3485}
3486
3487void
3488iwx_sta_rx_agg(struct iwx_softc *sc, struct ieee80211_node *ni, uint8_t tid,
3489    uint16_t ssn, uint16_t winsize, int timeout_val, int start)
3490{
3491	struct ieee80211com *ic = &sc->sc_ic;
3492	int err, s;
3493	struct iwx_rxba_data *rxba = NULL;
3494	uint8_t baid = 0;
3495
3496	s = splnet();
3497
3498	if (start && sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS) {
3499		ieee80211_addba_req_refuse(ic, ni, tid);
3500		splx(s);
3501		return;
3502	}
3503
3504	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_BAID_ML_SUPPORT)) {
3505		err = iwx_sta_rx_agg_baid_cfg_cmd(sc, ni, tid, ssn, winsize,
3506		    timeout_val, start, &baid);
3507	} else {
3508		err = iwx_sta_rx_agg_sta_cmd(sc, ni, tid, ssn, winsize,
3509		    timeout_val, start, &baid);
3510	}
3511	if (err) {
3512		ieee80211_addba_req_refuse(ic, ni, tid);
3513		splx(s);
3514		return;
3515	}
3516
3517	rxba = &sc->sc_rxba_data[baid];
3518
3519	/* Deaggregation is done in hardware. */
3520	if (start) {
3521		if (rxba->baid != IWX_RX_REORDER_DATA_INVALID_BAID) {
3522			ieee80211_addba_req_refuse(ic, ni, tid);
3523			splx(s);
3524			return;
3525		}
3526		rxba->sta_id = IWX_STATION_ID;
3527		rxba->tid = tid;
3528		rxba->baid = baid;
3529		rxba->timeout = timeout_val;
3530		getmicrouptime(&rxba->last_rx);
3531		iwx_init_reorder_buffer(&rxba->reorder_buf, ssn,
3532		    winsize);
3533		if (timeout_val != 0) {
3534			struct ieee80211_rx_ba *ba;
3535			timeout_add_usec(&rxba->session_timer,
3536			    timeout_val);
3537			/* XXX disable net80211's BA timeout handler */
3538			ba = &ni->ni_rx_ba[tid];
3539			ba->ba_timeout_val = 0;
3540		}
3541	} else
3542		iwx_clear_reorder_buffer(sc, rxba);
3543
3544	if (start) {
3545		sc->sc_rx_ba_sessions++;
3546		ieee80211_addba_req_accept(ic, ni, tid);
3547	} else if (sc->sc_rx_ba_sessions > 0)
3548		sc->sc_rx_ba_sessions--;
3549
3550	splx(s);
3551}
3552
3553void
3554iwx_mac_ctxt_task(void *arg)
3555{
3556	struct iwx_softc *sc = arg;
3557	struct ieee80211com *ic = &sc->sc_ic;
3558	struct iwx_node *in = (void *)ic->ic_bss;
3559	int err, s = splnet();
3560
3561	if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) ||
3562	    ic->ic_state != IEEE80211_S_RUN) {
3563		refcnt_rele_wake(&sc->task_refs);
3564		splx(s);
3565		return;
3566	}
3567
3568	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 1);
3569	if (err)
3570		printf("%s: failed to update MAC\n", DEVNAME(sc));
3571
3572	iwx_unprotect_session(sc, in);
3573
3574	refcnt_rele_wake(&sc->task_refs);
3575	splx(s);
3576}
3577
3578void
3579iwx_phy_ctxt_task(void *arg)
3580{
3581	struct iwx_softc *sc = arg;
3582	struct ieee80211com *ic = &sc->sc_ic;
3583	struct iwx_node *in = (void *)ic->ic_bss;
3584	struct ieee80211_node *ni = &in->in_ni;
3585	uint8_t chains, sco, vht_chan_width;
3586	int err, s = splnet();
3587
3588	if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) ||
3589	    ic->ic_state != IEEE80211_S_RUN ||
3590	    in->in_phyctxt == NULL) {
3591		refcnt_rele_wake(&sc->task_refs);
3592		splx(s);
3593		return;
3594	}
3595
3596	chains = iwx_mimo_enabled(sc) ? 2 : 1;
3597	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
3598	    IEEE80211_CHAN_40MHZ_ALLOWED(ni->ni_chan) &&
3599	    ieee80211_node_supports_ht_chan40(ni))
3600		sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK);
3601	else
3602		sco = IEEE80211_HTOP0_SCO_SCN;
3603	if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
3604	    IEEE80211_CHAN_80MHZ_ALLOWED(in->in_ni.ni_chan) &&
3605	    ieee80211_node_supports_vht_chan80(ni))
3606		vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80;
3607	else
3608		vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT;
3609	if (in->in_phyctxt->sco != sco ||
3610	    in->in_phyctxt->vht_chan_width != vht_chan_width) {
3611		err = iwx_phy_ctxt_update(sc, in->in_phyctxt,
3612		    in->in_phyctxt->channel, chains, chains, 0, sco,
3613		    vht_chan_width);
3614		if (err)
3615			printf("%s: failed to update PHY\n", DEVNAME(sc));
3616	}
3617
3618	refcnt_rele_wake(&sc->task_refs);
3619	splx(s);
3620}
3621
3622void
3623iwx_updatechan(struct ieee80211com *ic)
3624{
3625	struct iwx_softc *sc = ic->ic_softc;
3626
3627	if (ic->ic_state == IEEE80211_S_RUN &&
3628	    !task_pending(&sc->newstate_task))
3629		iwx_add_task(sc, systq, &sc->phy_ctxt_task);
3630}
3631
3632void
3633iwx_updateprot(struct ieee80211com *ic)
3634{
3635	struct iwx_softc *sc = ic->ic_softc;
3636
3637	if (ic->ic_state == IEEE80211_S_RUN &&
3638	    !task_pending(&sc->newstate_task))
3639		iwx_add_task(sc, systq, &sc->mac_ctxt_task);
3640}
3641
3642void
3643iwx_updateslot(struct ieee80211com *ic)
3644{
3645	struct iwx_softc *sc = ic->ic_softc;
3646
3647	if (ic->ic_state == IEEE80211_S_RUN &&
3648	    !task_pending(&sc->newstate_task))
3649		iwx_add_task(sc, systq, &sc->mac_ctxt_task);
3650}
3651
3652void
3653iwx_updateedca(struct ieee80211com *ic)
3654{
3655	struct iwx_softc *sc = ic->ic_softc;
3656
3657	if (ic->ic_state == IEEE80211_S_RUN &&
3658	    !task_pending(&sc->newstate_task))
3659		iwx_add_task(sc, systq, &sc->mac_ctxt_task);
3660}
3661
3662void
3663iwx_updatedtim(struct ieee80211com *ic)
3664{
3665	struct iwx_softc *sc = ic->ic_softc;
3666
3667	if (ic->ic_state == IEEE80211_S_RUN &&
3668	    !task_pending(&sc->newstate_task))
3669		iwx_add_task(sc, systq, &sc->mac_ctxt_task);
3670}
3671
3672void
3673iwx_sta_tx_agg_start(struct iwx_softc *sc, struct ieee80211_node *ni,
3674    uint8_t tid)
3675{
3676	struct ieee80211com *ic = &sc->sc_ic;
3677	struct ieee80211_tx_ba *ba;
3678	int err, qid;
3679	struct iwx_tx_ring *ring;
3680
3681	/* Ensure we can map this TID to an aggregation queue. */
3682	if (tid >= IWX_MAX_TID_COUNT)
3683		return;
3684
3685	ba = &ni->ni_tx_ba[tid];
3686	if (ba->ba_state != IEEE80211_BA_REQUESTED)
3687		return;
3688
3689	qid = sc->aggqid[tid];
3690	if (qid == 0) {
3691		/* Firmware should pick the next unused Tx queue. */
3692		qid = fls(sc->qenablemsk);
3693	}
3694
3695	/*
3696	 * Simply enable the queue.
3697	 * Firmware handles Tx Ba session setup and teardown.
3698	 */
3699	if ((sc->qenablemsk & (1 << qid)) == 0) {
3700		if (!iwx_nic_lock(sc)) {
3701			ieee80211_addba_resp_refuse(ic, ni, tid,
3702			    IEEE80211_STATUS_UNSPECIFIED);
3703			return;
3704		}
3705		err = iwx_enable_txq(sc, IWX_STATION_ID, qid, tid,
3706		    IWX_TX_RING_COUNT);
3707		iwx_nic_unlock(sc);
3708		if (err) {
3709			printf("%s: could not enable Tx queue %d "
3710			    "(error %d)\n", DEVNAME(sc), qid, err);
3711			ieee80211_addba_resp_refuse(ic, ni, tid,
3712			    IEEE80211_STATUS_UNSPECIFIED);
3713			return;
3714		}
3715
3716		ba->ba_winstart = 0;
3717	} else
3718		ba->ba_winstart = ni->ni_qos_txseqs[tid];
3719
3720	ba->ba_winend = (ba->ba_winstart + ba->ba_winsize - 1) & 0xfff;
3721
3722	ring = &sc->txq[qid];
3723	ba->ba_timeout_val = 0;
3724	ieee80211_addba_resp_accept(ic, ni, tid);
3725	sc->aggqid[tid] = qid;
3726}
3727
3728void
3729iwx_ba_task(void *arg)
3730{
3731	struct iwx_softc *sc = arg;
3732	struct ieee80211com *ic = &sc->sc_ic;
3733	struct ieee80211_node *ni = ic->ic_bss;
3734	int s = splnet();
3735	int tid;
3736
3737	for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
3738		if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
3739			break;
3740		if (sc->ba_rx.start_tidmask & (1 << tid)) {
3741			struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
3742			iwx_sta_rx_agg(sc, ni, tid, ba->ba_winstart,
3743			    ba->ba_winsize, ba->ba_timeout_val, 1);
3744			sc->ba_rx.start_tidmask &= ~(1 << tid);
3745		} else if (sc->ba_rx.stop_tidmask & (1 << tid)) {
3746			iwx_sta_rx_agg(sc, ni, tid, 0, 0, 0, 0);
3747			sc->ba_rx.stop_tidmask &= ~(1 << tid);
3748		}
3749	}
3750
3751	for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
3752		if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
3753			break;
3754		if (sc->ba_tx.start_tidmask & (1 << tid)) {
3755			iwx_sta_tx_agg_start(sc, ni, tid);
3756			sc->ba_tx.start_tidmask &= ~(1 << tid);
3757		}
3758	}
3759
3760	refcnt_rele_wake(&sc->task_refs);
3761	splx(s);
3762}
3763
3764/*
3765 * This function is called by upper layer when an ADDBA request is received
3766 * from another STA and before the ADDBA response is sent.
3767 */
3768int
3769iwx_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
3770    uint8_t tid)
3771{
3772	struct iwx_softc *sc = IC2IFP(ic)->if_softc;
3773
3774	if (sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS ||
3775	    tid >= IWX_MAX_TID_COUNT)
3776		return ENOSPC;
3777
3778	if (sc->ba_rx.start_tidmask & (1 << tid))
3779		return EBUSY;
3780
3781	sc->ba_rx.start_tidmask |= (1 << tid);
3782	iwx_add_task(sc, systq, &sc->ba_task);
3783
3784	return EBUSY;
3785}
3786
3787/*
3788 * This function is called by upper layer on teardown of an HT-immediate
3789 * Block Ack agreement (eg. upon receipt of a DELBA frame).
3790 */
3791void
3792iwx_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
3793    uint8_t tid)
3794{
3795	struct iwx_softc *sc = IC2IFP(ic)->if_softc;
3796
3797	if (tid >= IWX_MAX_TID_COUNT || sc->ba_rx.stop_tidmask & (1 << tid))
3798		return;
3799
3800	sc->ba_rx.stop_tidmask |= (1 << tid);
3801	iwx_add_task(sc, systq, &sc->ba_task);
3802}
3803
3804int
3805iwx_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
3806    uint8_t tid)
3807{
3808	struct iwx_softc *sc = IC2IFP(ic)->if_softc;
3809	struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
3810
3811	/*
3812	 * Require a firmware version which uses an internal AUX queue.
3813	 * The value of IWX_FIRST_AGG_TX_QUEUE would be incorrect otherwise.
3814	 */
3815	if (sc->first_data_qid != IWX_DQA_CMD_QUEUE + 1)
3816		return ENOTSUP;
3817
3818	/* Ensure we can map this TID to an aggregation queue. */
3819	if (tid >= IWX_MAX_TID_COUNT)
3820		return EINVAL;
3821
3822	/* We only support a fixed Tx aggregation window size, for now. */
3823	if (ba->ba_winsize != IWX_FRAME_LIMIT)
3824		return ENOTSUP;
3825
3826	/* Is firmware already using an agg queue with this TID? */
3827	if (sc->aggqid[tid] != 0)
3828		return ENOSPC;
3829
3830	/* Are we already processing an ADDBA request? */
3831	if (sc->ba_tx.start_tidmask & (1 << tid))
3832		return EBUSY;
3833
3834	sc->ba_tx.start_tidmask |= (1 << tid);
3835	iwx_add_task(sc, systq, &sc->ba_task);
3836
3837	return EBUSY;
3838}
3839
3840void
3841iwx_set_mac_addr_from_csr(struct iwx_softc *sc, struct iwx_nvm_data *data)
3842{
3843	uint32_t mac_addr0, mac_addr1;
3844
3845	memset(data->hw_addr, 0, sizeof(data->hw_addr));
3846
3847	if (!iwx_nic_lock(sc))
3848		return;
3849
3850	mac_addr0 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR0_STRAP(sc)));
3851	mac_addr1 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR1_STRAP(sc)));
3852
3853	iwx_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
3854
3855	/* If OEM fused a valid address, use it instead of the one in OTP. */
3856	if (iwx_is_valid_mac_addr(data->hw_addr)) {
3857		iwx_nic_unlock(sc);
3858		return;
3859	}
3860
3861	mac_addr0 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR0_OTP(sc)));
3862	mac_addr1 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR1_OTP(sc)));
3863
3864	iwx_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
3865
3866	iwx_nic_unlock(sc);
3867}
3868
3869int
3870iwx_is_valid_mac_addr(const uint8_t *addr)
3871{
3872	static const uint8_t reserved_mac[] = {
3873		0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
3874	};
3875
3876	return (memcmp(reserved_mac, addr, ETHER_ADDR_LEN) != 0 &&
3877	    memcmp(etherbroadcastaddr, addr, sizeof(etherbroadcastaddr)) != 0 &&
3878	    memcmp(etheranyaddr, addr, sizeof(etheranyaddr)) != 0 &&
3879	    !ETHER_IS_MULTICAST(addr));
3880}
3881
3882void
3883iwx_flip_hw_address(uint32_t mac_addr0, uint32_t mac_addr1, uint8_t *dest)
3884{
3885	const uint8_t *hw_addr;
3886
3887	hw_addr = (const uint8_t *)&mac_addr0;
3888	dest[0] = hw_addr[3];
3889	dest[1] = hw_addr[2];
3890	dest[2] = hw_addr[1];
3891	dest[3] = hw_addr[0];
3892
3893	hw_addr = (const uint8_t *)&mac_addr1;
3894	dest[4] = hw_addr[1];
3895	dest[5] = hw_addr[0];
3896}
3897
3898int
3899iwx_nvm_get(struct iwx_softc *sc)
3900{
3901	struct iwx_nvm_get_info cmd = {};
3902	struct iwx_nvm_data *nvm = &sc->sc_nvm;
3903	struct iwx_host_cmd hcmd = {
3904		.flags = IWX_CMD_WANT_RESP | IWX_CMD_SEND_IN_RFKILL,
3905		.data = { &cmd, },
3906		.len = { sizeof(cmd) },
3907		.id = IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
3908		    IWX_NVM_GET_INFO)
3909	};
3910	int err;
3911	uint32_t mac_flags;
3912	/*
3913	 * All the values in iwx_nvm_get_info_rsp v4 are the same as
3914	 * in v3, except for the channel profile part of the
3915	 * regulatory.  So we can just access the new struct, with the
3916	 * exception of the latter.
3917	 */
3918	struct iwx_nvm_get_info_rsp *rsp;
3919	struct iwx_nvm_get_info_rsp_v3 *rsp_v3;
3920	int v4 = isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REGULATORY_NVM_INFO);
3921	size_t resp_len = v4 ? sizeof(*rsp) : sizeof(*rsp_v3);
3922
3923	hcmd.resp_pkt_len = sizeof(struct iwx_rx_packet) + resp_len;
3924	err = iwx_send_cmd(sc, &hcmd);
3925	if (err)
3926		return err;
3927
3928	if (iwx_rx_packet_payload_len(hcmd.resp_pkt) != resp_len) {
3929		err = EIO;
3930		goto out;
3931	}
3932
3933	memset(nvm, 0, sizeof(*nvm));
3934
3935	iwx_set_mac_addr_from_csr(sc, nvm);
3936	if (!iwx_is_valid_mac_addr(nvm->hw_addr)) {
3937		printf("%s: no valid mac address was found\n", DEVNAME(sc));
3938		err = EINVAL;
3939		goto out;
3940	}
3941
3942	rsp = (void *)hcmd.resp_pkt->data;
3943
3944	/* Initialize general data */
3945	nvm->nvm_version = le16toh(rsp->general.nvm_version);
3946	nvm->n_hw_addrs = rsp->general.n_hw_addrs;
3947
3948	/* Initialize MAC sku data */
3949	mac_flags = le32toh(rsp->mac_sku.mac_sku_flags);
3950	nvm->sku_cap_11ac_enable =
3951		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AC_ENABLED);
3952	nvm->sku_cap_11n_enable =
3953		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11N_ENABLED);
3954	nvm->sku_cap_11ax_enable =
3955		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AX_ENABLED);
3956	nvm->sku_cap_band_24GHz_enable =
3957		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED);
3958	nvm->sku_cap_band_52GHz_enable =
3959		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED);
3960	nvm->sku_cap_mimo_disable =
3961		!!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_MIMO_DISABLED);
3962
3963	/* Initialize PHY sku data */
3964	nvm->valid_tx_ant = (uint8_t)le32toh(rsp->phy_sku.tx_chains);
3965	nvm->valid_rx_ant = (uint8_t)le32toh(rsp->phy_sku.rx_chains);
3966
3967	if (le32toh(rsp->regulatory.lar_enabled) &&
3968	    isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_SUPPORT)) {
3969		nvm->lar_enabled = 1;
3970	}
3971
3972	if (v4) {
3973		iwx_init_channel_map(sc, NULL,
3974		    rsp->regulatory.channel_profile, IWX_NUM_CHANNELS);
3975	} else {
3976		rsp_v3 = (void *)rsp;
3977		iwx_init_channel_map(sc, rsp_v3->regulatory.channel_profile,
3978		    NULL, IWX_NUM_CHANNELS_V1);
3979	}
3980out:
3981	iwx_free_resp(sc, &hcmd);
3982	return err;
3983}
3984
3985int
3986iwx_load_firmware(struct iwx_softc *sc)
3987{
3988	struct iwx_fw_sects *fws;
3989	int err;
3990
3991	splassert(IPL_NET);
3992
3993	sc->sc_uc.uc_intr = 0;
3994	sc->sc_uc.uc_ok = 0;
3995
3996	fws = &sc->sc_fw.fw_sects[IWX_UCODE_TYPE_REGULAR];
3997	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
3998		err = iwx_ctxt_info_gen3_init(sc, fws);
3999	else
4000		err = iwx_ctxt_info_init(sc, fws);
4001	if (err) {
4002		printf("%s: could not init context info\n", DEVNAME(sc));
4003		return err;
4004	}
4005
4006	/* wait for the firmware to load */
4007	err = tsleep_nsec(&sc->sc_uc, 0, "iwxuc", SEC_TO_NSEC(1));
4008	if (err || !sc->sc_uc.uc_ok) {
4009		printf("%s: could not load firmware, %d\n", DEVNAME(sc), err);
4010		iwx_ctxt_info_free_paging(sc);
4011	}
4012
4013	iwx_dma_contig_free(&sc->iml_dma);
4014	iwx_ctxt_info_free_fw_img(sc);
4015
4016	if (!sc->sc_uc.uc_ok)
4017		return EINVAL;
4018
4019	return err;
4020}
4021
4022int
4023iwx_start_fw(struct iwx_softc *sc)
4024{
4025	int err;
4026
4027	IWX_WRITE(sc, IWX_CSR_INT, ~0);
4028
4029	iwx_disable_interrupts(sc);
4030
4031	/* make sure rfkill handshake bits are cleared */
4032	IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR, IWX_CSR_UCODE_SW_BIT_RFKILL);
4033	IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR,
4034	    IWX_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
4035
4036	/* clear (again), then enable firmware load interrupt */
4037	IWX_WRITE(sc, IWX_CSR_INT, ~0);
4038
4039	err = iwx_nic_init(sc);
4040	if (err) {
4041		printf("%s: unable to init nic\n", DEVNAME(sc));
4042		return err;
4043	}
4044
4045	iwx_enable_fwload_interrupt(sc);
4046
4047	return iwx_load_firmware(sc);
4048}
4049
4050int
4051iwx_pnvm_handle_section(struct iwx_softc *sc, const uint8_t *data,
4052    size_t len)
4053{
4054	const struct iwx_ucode_tlv *tlv;
4055	uint32_t sha1 = 0;
4056	uint16_t mac_type = 0, rf_id = 0;
4057	uint8_t *pnvm_data = NULL, *tmp;
4058	int hw_match = 0;
4059	uint32_t size = 0;
4060	int err;
4061
4062	while (len >= sizeof(*tlv)) {
4063		uint32_t tlv_len, tlv_type;
4064
4065		len -= sizeof(*tlv);
4066		tlv = (const void *)data;
4067
4068		tlv_len = le32toh(tlv->length);
4069		tlv_type = le32toh(tlv->type);
4070
4071		if (len < tlv_len) {
4072			printf("%s: invalid TLV len: %zd/%u\n",
4073			    DEVNAME(sc), len, tlv_len);
4074			err = EINVAL;
4075			goto out;
4076		}
4077
4078		data += sizeof(*tlv);
4079
4080		switch (tlv_type) {
4081		case IWX_UCODE_TLV_PNVM_VERSION:
4082			if (tlv_len < sizeof(uint32_t))
4083				break;
4084
4085			sha1 = le32_to_cpup((const uint32_t *)data);
4086			break;
4087		case IWX_UCODE_TLV_HW_TYPE:
4088			if (tlv_len < 2 * sizeof(uint16_t))
4089				break;
4090
4091			if (hw_match)
4092				break;
4093
4094			mac_type = le16_to_cpup((const uint16_t *)data);
4095			rf_id = le16_to_cpup((const uint16_t *)(data +
4096			    sizeof(uint16_t)));
4097
4098			if (mac_type == IWX_CSR_HW_REV_TYPE(sc->sc_hw_rev) &&
4099			    rf_id == IWX_CSR_HW_RFID_TYPE(sc->sc_hw_rf_id))
4100				hw_match = 1;
4101			break;
4102		case IWX_UCODE_TLV_SEC_RT: {
4103			const struct iwx_pnvm_section *section;
4104			uint32_t data_len;
4105
4106			section = (const void *)data;
4107			data_len = tlv_len - sizeof(*section);
4108
4109			/* TODO: remove, this is a deprecated separator */
4110			if (le32_to_cpup((const uint32_t *)data) == 0xddddeeee)
4111				break;
4112
4113			tmp = malloc(size + data_len, M_DEVBUF,
4114			    M_WAITOK | M_CANFAIL | M_ZERO);
4115			if (tmp == NULL) {
4116				err = ENOMEM;
4117				goto out;
4118			}
4119			memcpy(tmp, pnvm_data, size);
4120			memcpy(tmp + size, section->data, data_len);
4121			free(pnvm_data, M_DEVBUF, size);
4122			pnvm_data = tmp;
4123			size += data_len;
4124			break;
4125		}
4126		case IWX_UCODE_TLV_PNVM_SKU:
4127			/* New PNVM section started, stop parsing. */
4128			goto done;
4129		default:
4130			break;
4131		}
4132
4133		if (roundup(tlv_len, 4) > len)
4134			break;
4135		len -= roundup(tlv_len, 4);
4136		data += roundup(tlv_len, 4);
4137	}
4138done:
4139	if (!hw_match || size == 0) {
4140		err = ENOENT;
4141		goto out;
4142	}
4143
4144	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->pnvm_dma, size, 0);
4145	if (err) {
4146		printf("%s: could not allocate DMA memory for PNVM\n",
4147		    DEVNAME(sc));
4148		err = ENOMEM;
4149		goto out;
4150	}
4151	memcpy(sc->pnvm_dma.vaddr, pnvm_data, size);
4152	iwx_ctxt_info_gen3_set_pnvm(sc);
4153	sc->sc_pnvm_ver = sha1;
4154out:
4155	free(pnvm_data, M_DEVBUF, size);
4156	return err;
4157}
4158
4159int
4160iwx_pnvm_parse(struct iwx_softc *sc, const uint8_t *data, size_t len)
4161{
4162	const struct iwx_ucode_tlv *tlv;
4163
4164	while (len >= sizeof(*tlv)) {
4165		uint32_t tlv_len, tlv_type;
4166
4167		len -= sizeof(*tlv);
4168		tlv = (const void *)data;
4169
4170		tlv_len = le32toh(tlv->length);
4171		tlv_type = le32toh(tlv->type);
4172
4173		if (len < tlv_len || roundup(tlv_len, 4) > len)
4174			return EINVAL;
4175
4176		if (tlv_type == IWX_UCODE_TLV_PNVM_SKU) {
4177			const struct iwx_sku_id *sku_id =
4178				(const void *)(data + sizeof(*tlv));
4179
4180			data += sizeof(*tlv) + roundup(tlv_len, 4);
4181			len -= roundup(tlv_len, 4);
4182
4183			if (sc->sc_sku_id[0] == le32toh(sku_id->data[0]) &&
4184			    sc->sc_sku_id[1] == le32toh(sku_id->data[1]) &&
4185			    sc->sc_sku_id[2] == le32toh(sku_id->data[2]) &&
4186			    iwx_pnvm_handle_section(sc, data, len) == 0)
4187				return 0;
4188		} else {
4189			data += sizeof(*tlv) + roundup(tlv_len, 4);
4190			len -= roundup(tlv_len, 4);
4191		}
4192	}
4193
4194	return ENOENT;
4195}
4196
4197/* Make AX210 firmware loading context point at PNVM image in DMA memory. */
4198void
4199iwx_ctxt_info_gen3_set_pnvm(struct iwx_softc *sc)
4200{
4201	struct iwx_prph_scratch *prph_scratch;
4202	struct iwx_prph_scratch_ctrl_cfg *prph_sc_ctrl;
4203
4204	prph_scratch = sc->prph_scratch_dma.vaddr;
4205	prph_sc_ctrl = &prph_scratch->ctrl_cfg;
4206
4207	prph_sc_ctrl->pnvm_cfg.pnvm_base_addr = htole64(sc->pnvm_dma.paddr);
4208	prph_sc_ctrl->pnvm_cfg.pnvm_size = htole32(sc->pnvm_dma.size);
4209
4210	bus_dmamap_sync(sc->sc_dmat, sc->pnvm_dma.map, 0, sc->pnvm_dma.size,
4211	    BUS_DMASYNC_PREWRITE);
4212}
4213
4214/*
4215 * Load platform-NVM (non-volatile-memory) data from the filesystem.
4216 * This data apparently contains regulatory information and affects device
4217 * channel configuration.
4218 * The SKU of AX210 devices tells us which PNVM file section is needed.
4219 * Pre-AX210 devices store NVM data onboard.
4220 */
4221int
4222iwx_load_pnvm(struct iwx_softc *sc)
4223{
4224	const int wait_flags = IWX_PNVM_COMPLETE;
4225	int s, err = 0;
4226	u_char *pnvm_data = NULL;
4227	size_t pnvm_size = 0;
4228
4229	if (sc->sc_sku_id[0] == 0 &&
4230	    sc->sc_sku_id[1] == 0 &&
4231	    sc->sc_sku_id[2] == 0)
4232		return 0;
4233
4234	if (sc->sc_pnvm_name) {
4235		if (sc->pnvm_dma.vaddr == NULL) {
4236			err = loadfirmware(sc->sc_pnvm_name,
4237			    &pnvm_data, &pnvm_size);
4238			if (err) {
4239				printf("%s: could not read %s (error %d)\n",
4240				    DEVNAME(sc), sc->sc_pnvm_name, err);
4241				return err;
4242			}
4243
4244			err = iwx_pnvm_parse(sc, pnvm_data, pnvm_size);
4245			if (err && err != ENOENT) {
4246				free(pnvm_data, M_DEVBUF, pnvm_size);
4247				return err;
4248			}
4249		} else
4250			iwx_ctxt_info_gen3_set_pnvm(sc);
4251	}
4252
4253	s = splnet();
4254
4255	if (!iwx_nic_lock(sc)) {
4256		splx(s);
4257		free(pnvm_data, M_DEVBUF, pnvm_size);
4258		return EBUSY;
4259	}
4260
4261	/*
4262	 * If we don't have a platform NVM file simply ask firmware
4263	 * to proceed without it.
4264	 */
4265
4266	iwx_write_umac_prph(sc, IWX_UREG_DOORBELL_TO_ISR6,
4267	    IWX_UREG_DOORBELL_TO_ISR6_PNVM);
4268
4269	/* Wait for the pnvm complete notification from firmware. */
4270	while ((sc->sc_init_complete & wait_flags) != wait_flags) {
4271		err = tsleep_nsec(&sc->sc_init_complete, 0, "iwxinit",
4272		    SEC_TO_NSEC(2));
4273		if (err)
4274			break;
4275	}
4276
4277	splx(s);
4278	iwx_nic_unlock(sc);
4279	free(pnvm_data, M_DEVBUF, pnvm_size);
4280	return err;
4281}
4282
4283int
4284iwx_send_tx_ant_cfg(struct iwx_softc *sc, uint8_t valid_tx_ant)
4285{
4286	struct iwx_tx_ant_cfg_cmd tx_ant_cmd = {
4287		.valid = htole32(valid_tx_ant),
4288	};
4289
4290	return iwx_send_cmd_pdu(sc, IWX_TX_ANT_CONFIGURATION_CMD,
4291	    0, sizeof(tx_ant_cmd), &tx_ant_cmd);
4292}
4293
4294int
4295iwx_send_phy_cfg_cmd(struct iwx_softc *sc)
4296{
4297	struct iwx_phy_cfg_cmd phy_cfg_cmd;
4298
4299	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
4300	phy_cfg_cmd.calib_control.event_trigger =
4301	    sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].event_trigger;
4302	phy_cfg_cmd.calib_control.flow_trigger =
4303	    sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].flow_trigger;
4304
4305	return iwx_send_cmd_pdu(sc, IWX_PHY_CONFIGURATION_CMD, 0,
4306	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
4307}
4308
4309int
4310iwx_send_dqa_cmd(struct iwx_softc *sc)
4311{
4312	struct iwx_dqa_enable_cmd dqa_cmd = {
4313		.cmd_queue = htole32(IWX_DQA_CMD_QUEUE),
4314	};
4315	uint32_t cmd_id;
4316
4317	cmd_id = iwx_cmd_id(IWX_DQA_ENABLE_CMD, IWX_DATA_PATH_GROUP, 0);
4318	return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
4319}
4320
4321int
4322iwx_load_ucode_wait_alive(struct iwx_softc *sc)
4323{
4324	int err;
4325
4326	err = iwx_read_firmware(sc);
4327	if (err)
4328		return err;
4329
4330	err = iwx_start_fw(sc);
4331	if (err)
4332		return err;
4333
4334	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4335		err = iwx_load_pnvm(sc);
4336		if (err)
4337			return err;
4338	}
4339
4340	iwx_post_alive(sc);
4341
4342	return 0;
4343}
4344
4345int
4346iwx_run_init_mvm_ucode(struct iwx_softc *sc, int readnvm)
4347{
4348	const int wait_flags = IWX_INIT_COMPLETE;
4349	struct iwx_nvm_access_complete_cmd nvm_complete = {};
4350	struct iwx_init_extended_cfg_cmd init_cfg = {
4351		.init_flags = htole32(IWX_INIT_NVM),
4352	};
4353	int err, s;
4354
4355	if ((sc->sc_flags & IWX_FLAG_RFKILL) && !readnvm) {
4356		printf("%s: radio is disabled by hardware switch\n",
4357		    DEVNAME(sc));
4358		return EPERM;
4359	}
4360
4361	s = splnet();
4362	sc->sc_init_complete = 0;
4363	err = iwx_load_ucode_wait_alive(sc);
4364	if (err) {
4365		printf("%s: failed to load init firmware\n", DEVNAME(sc));
4366		splx(s);
4367		return err;
4368	}
4369
4370	/*
4371	 * Send init config command to mark that we are sending NVM
4372	 * access commands
4373	 */
4374	err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_SYSTEM_GROUP,
4375	    IWX_INIT_EXTENDED_CFG_CMD), 0, sizeof(init_cfg), &init_cfg);
4376	if (err) {
4377		splx(s);
4378		return err;
4379	}
4380
4381	err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
4382	    IWX_NVM_ACCESS_COMPLETE), 0, sizeof(nvm_complete), &nvm_complete);
4383	if (err) {
4384		splx(s);
4385		return err;
4386	}
4387
4388	/* Wait for the init complete notification from the firmware. */
4389	while ((sc->sc_init_complete & wait_flags) != wait_flags) {
4390		err = tsleep_nsec(&sc->sc_init_complete, 0, "iwxinit",
4391		    SEC_TO_NSEC(2));
4392		if (err) {
4393			splx(s);
4394			return err;
4395		}
4396	}
4397	splx(s);
4398	if (readnvm) {
4399		err = iwx_nvm_get(sc);
4400		if (err) {
4401			printf("%s: failed to read nvm\n", DEVNAME(sc));
4402			return err;
4403		}
4404		if (IEEE80211_ADDR_EQ(etheranyaddr, sc->sc_ic.ic_myaddr))
4405			IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
4406			    sc->sc_nvm.hw_addr);
4407
4408	}
4409	return 0;
4410}
4411
4412int
4413iwx_config_ltr(struct iwx_softc *sc)
4414{
4415	struct iwx_ltr_config_cmd cmd = {
4416		.flags = htole32(IWX_LTR_CFG_FLAG_FEATURE_ENABLE),
4417	};
4418
4419	if (!sc->sc_ltr_enabled)
4420		return 0;
4421
4422	return iwx_send_cmd_pdu(sc, IWX_LTR_CONFIG, 0, sizeof(cmd), &cmd);
4423}
4424
4425void
4426iwx_update_rx_desc(struct iwx_softc *sc, struct iwx_rx_ring *ring, int idx)
4427{
4428	struct iwx_rx_data *data = &ring->data[idx];
4429
4430	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4431		struct iwx_rx_transfer_desc *desc = ring->desc;
4432		desc[idx].rbid = htole16(idx & 0xffff);
4433		desc[idx].addr = htole64(data->map->dm_segs[0].ds_addr);
4434		bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,
4435		    idx * sizeof(*desc), sizeof(*desc),
4436		    BUS_DMASYNC_PREWRITE);
4437	} else {
4438		((uint64_t *)ring->desc)[idx] =
4439		    htole64(data->map->dm_segs[0].ds_addr | (idx & 0x0fff));
4440		bus_dmamap_sync(sc->sc_dmat, ring->free_desc_dma.map,
4441		    idx * sizeof(uint64_t), sizeof(uint64_t),
4442		    BUS_DMASYNC_PREWRITE);
4443	}
4444}
4445
4446int
4447iwx_rx_addbuf(struct iwx_softc *sc, int size, int idx)
4448{
4449	struct iwx_rx_ring *ring = &sc->rxq;
4450	struct iwx_rx_data *data = &ring->data[idx];
4451	struct mbuf *m;
4452	int err;
4453	int fatal = 0;
4454
4455	m = m_gethdr(M_DONTWAIT, MT_DATA);
4456	if (m == NULL)
4457		return ENOBUFS;
4458
4459	if (size <= MCLBYTES) {
4460		MCLGET(m, M_DONTWAIT);
4461	} else {
4462		MCLGETL(m, M_DONTWAIT, IWX_RBUF_SIZE);
4463	}
4464	if ((m->m_flags & M_EXT) == 0) {
4465		m_freem(m);
4466		return ENOBUFS;
4467	}
4468
4469	if (data->m != NULL) {
4470		bus_dmamap_unload(sc->sc_dmat, data->map);
4471		fatal = 1;
4472	}
4473
4474	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4475	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4476	    BUS_DMA_READ|BUS_DMA_NOWAIT);
4477	if (err) {
4478		/* XXX */
4479		if (fatal)
4480			panic("%s: could not load RX mbuf", DEVNAME(sc));
4481		m_freem(m);
4482		return err;
4483	}
4484	data->m = m;
4485	bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
4486
4487	/* Update RX descriptor. */
4488	iwx_update_rx_desc(sc, ring, idx);
4489
4490	return 0;
4491}
4492
4493int
4494iwx_rxmq_get_signal_strength(struct iwx_softc *sc,
4495    struct iwx_rx_mpdu_desc *desc)
4496{
4497	int energy_a, energy_b;
4498
4499	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
4500		energy_a = desc->v3.energy_a;
4501		energy_b = desc->v3.energy_b;
4502	} else {
4503		energy_a = desc->v1.energy_a;
4504		energy_b = desc->v1.energy_b;
4505	}
4506	energy_a = energy_a ? -energy_a : -256;
4507	energy_b = energy_b ? -energy_b : -256;
4508	return MAX(energy_a, energy_b);
4509}
4510
4511void
4512iwx_rx_rx_phy_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
4513    struct iwx_rx_data *data)
4514{
4515	struct iwx_rx_phy_info *phy_info = (void *)pkt->data;
4516
4517	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
4518	    sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
4519
4520	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
4521}
4522
4523/*
4524 * Retrieve the average noise (in dBm) among receivers.
4525 */
4526int
4527iwx_get_noise(const struct iwx_statistics_rx_non_phy *stats)
4528{
4529	int i, total, nbant, noise;
4530
4531	total = nbant = noise = 0;
4532	for (i = 0; i < 3; i++) {
4533		noise = letoh32(stats->beacon_silence_rssi[i]) & 0xff;
4534		if (noise) {
4535			total += noise;
4536			nbant++;
4537		}
4538	}
4539
4540	/* There should be at least one antenna but check anyway. */
4541	return (nbant == 0) ? -127 : (total / nbant) - 107;
4542}
4543
4544int
4545iwx_ccmp_decap(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
4546    struct ieee80211_rxinfo *rxi)
4547{
4548	struct ieee80211com *ic = &sc->sc_ic;
4549	struct ieee80211_key *k;
4550	struct ieee80211_frame *wh;
4551	uint64_t pn, *prsc;
4552	uint8_t *ivp;
4553	uint8_t tid;
4554	int hdrlen, hasqos;
4555
4556	wh = mtod(m, struct ieee80211_frame *);
4557	hdrlen = ieee80211_get_hdrlen(wh);
4558	ivp = (uint8_t *)wh + hdrlen;
4559
4560	/* find key for decryption */
4561	k = ieee80211_get_rxkey(ic, m, ni);
4562	if (k == NULL || k->k_cipher != IEEE80211_CIPHER_CCMP)
4563		return 1;
4564
4565	/* Check that ExtIV bit is be set. */
4566	if (!(ivp[3] & IEEE80211_WEP_EXTIV))
4567		return 1;
4568
4569	hasqos = ieee80211_has_qos(wh);
4570	tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
4571	prsc = &k->k_rsc[tid];
4572
4573	/* Extract the 48-bit PN from the CCMP header. */
4574	pn = (uint64_t)ivp[0]       |
4575	     (uint64_t)ivp[1] <<  8 |
4576	     (uint64_t)ivp[4] << 16 |
4577	     (uint64_t)ivp[5] << 24 |
4578	     (uint64_t)ivp[6] << 32 |
4579	     (uint64_t)ivp[7] << 40;
4580	if (rxi->rxi_flags & IEEE80211_RXI_HWDEC_SAME_PN) {
4581		if (pn < *prsc) {
4582			ic->ic_stats.is_ccmp_replays++;
4583			return 1;
4584		}
4585	} else if (pn <= *prsc) {
4586		ic->ic_stats.is_ccmp_replays++;
4587		return 1;
4588	}
4589	/* Last seen packet number is updated in ieee80211_inputm(). */
4590
4591	/*
4592	 * Some firmware versions strip the MIC, and some don't. It is not
4593	 * clear which of the capability flags could tell us what to expect.
4594	 * For now, keep things simple and just leave the MIC in place if
4595	 * it is present.
4596	 *
4597	 * The IV will be stripped by ieee80211_inputm().
4598	 */
4599	return 0;
4600}
4601
4602int
4603iwx_rx_hwdecrypt(struct iwx_softc *sc, struct mbuf *m, uint32_t rx_pkt_status,
4604    struct ieee80211_rxinfo *rxi)
4605{
4606	struct ieee80211com *ic = &sc->sc_ic;
4607	struct ifnet *ifp = IC2IFP(ic);
4608	struct ieee80211_frame *wh;
4609	struct ieee80211_node *ni;
4610	int ret = 0;
4611	uint8_t type, subtype;
4612
4613	wh = mtod(m, struct ieee80211_frame *);
4614
4615	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4616	if (type == IEEE80211_FC0_TYPE_CTL)
4617		return 0;
4618
4619	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4620	if (ieee80211_has_qos(wh) && (subtype & IEEE80211_FC0_SUBTYPE_NODATA))
4621		return 0;
4622
4623	ni = ieee80211_find_rxnode(ic, wh);
4624	/* Handle hardware decryption. */
4625	if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL)
4626	    && (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) &&
4627	    (ni->ni_flags & IEEE80211_NODE_RXPROT) &&
4628	    ((!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4629	    ni->ni_rsncipher == IEEE80211_CIPHER_CCMP) ||
4630	    (IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4631	    ni->ni_rsngroupcipher == IEEE80211_CIPHER_CCMP))) {
4632		if ((rx_pkt_status & IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) !=
4633		    IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
4634			ic->ic_stats.is_ccmp_dec_errs++;
4635			ret = 1;
4636			goto out;
4637		}
4638		/* Check whether decryption was successful or not. */
4639		if ((rx_pkt_status &
4640		    (IWX_RX_MPDU_RES_STATUS_DEC_DONE |
4641		    IWX_RX_MPDU_RES_STATUS_MIC_OK)) !=
4642		    (IWX_RX_MPDU_RES_STATUS_DEC_DONE |
4643		    IWX_RX_MPDU_RES_STATUS_MIC_OK)) {
4644			ic->ic_stats.is_ccmp_dec_errs++;
4645			ret = 1;
4646			goto out;
4647		}
4648		rxi->rxi_flags |= IEEE80211_RXI_HWDEC;
4649	}
4650out:
4651	if (ret)
4652		ifp->if_ierrors++;
4653	ieee80211_release_node(ic, ni);
4654	return ret;
4655}
4656
4657void
4658iwx_rx_frame(struct iwx_softc *sc, struct mbuf *m, int chanidx,
4659    uint32_t rx_pkt_status, int is_shortpre, int rate_n_flags,
4660    uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
4661    struct mbuf_list *ml)
4662{
4663	struct ieee80211com *ic = &sc->sc_ic;
4664	struct ifnet *ifp = IC2IFP(ic);
4665	struct ieee80211_frame *wh;
4666	struct ieee80211_node *ni;
4667
4668	if (chanidx < 0 || chanidx >= nitems(ic->ic_channels))
4669		chanidx = ieee80211_chan2ieee(ic, ic->ic_ibss_chan);
4670
4671	wh = mtod(m, struct ieee80211_frame *);
4672	ni = ieee80211_find_rxnode(ic, wh);
4673	if ((rxi->rxi_flags & IEEE80211_RXI_HWDEC) &&
4674	    iwx_ccmp_decap(sc, m, ni, rxi) != 0) {
4675		ifp->if_ierrors++;
4676		m_freem(m);
4677		ieee80211_release_node(ic, ni);
4678		return;
4679	}
4680
4681#if NBPFILTER > 0
4682	if (sc->sc_drvbpf != NULL) {
4683		struct iwx_rx_radiotap_header *tap = &sc->sc_rxtap;
4684		uint16_t chan_flags;
4685		int have_legacy_rate = 1;
4686		uint8_t mcs, rate;
4687
4688		tap->wr_flags = 0;
4689		if (is_shortpre)
4690			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
4691		tap->wr_chan_freq =
4692		    htole16(ic->ic_channels[chanidx].ic_freq);
4693		chan_flags = ic->ic_channels[chanidx].ic_flags;
4694		if (ic->ic_curmode != IEEE80211_MODE_11N &&
4695		    ic->ic_curmode != IEEE80211_MODE_11AC) {
4696			chan_flags &= ~IEEE80211_CHAN_HT;
4697			chan_flags &= ~IEEE80211_CHAN_40MHZ;
4698		}
4699		if (ic->ic_curmode != IEEE80211_MODE_11AC)
4700			chan_flags &= ~IEEE80211_CHAN_VHT;
4701		tap->wr_chan_flags = htole16(chan_flags);
4702		tap->wr_dbm_antsignal = (int8_t)rxi->rxi_rssi;
4703		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
4704		tap->wr_tsft = device_timestamp;
4705		if (sc->sc_rate_n_flags_version >= 2) {
4706			uint32_t mod_type = (rate_n_flags &
4707			    IWX_RATE_MCS_MOD_TYPE_MSK);
4708			const struct ieee80211_rateset *rs = NULL;
4709			uint32_t ridx;
4710			have_legacy_rate = (mod_type == IWX_RATE_MCS_CCK_MSK ||
4711			    mod_type == IWX_RATE_MCS_LEGACY_OFDM_MSK);
4712			mcs = (rate_n_flags & IWX_RATE_HT_MCS_CODE_MSK);
4713			ridx = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK);
4714			if (mod_type == IWX_RATE_MCS_CCK_MSK)
4715				rs = &ieee80211_std_rateset_11b;
4716			else if (mod_type == IWX_RATE_MCS_LEGACY_OFDM_MSK)
4717				rs = &ieee80211_std_rateset_11a;
4718			if (rs && ridx < rs->rs_nrates) {
4719				rate = (rs->rs_rates[ridx] &
4720				    IEEE80211_RATE_VAL);
4721			} else
4722				rate = 0;
4723		} else {
4724			have_legacy_rate = ((rate_n_flags &
4725			    (IWX_RATE_MCS_HT_MSK_V1 |
4726			    IWX_RATE_MCS_VHT_MSK_V1)) == 0);
4727			mcs = (rate_n_flags &
4728			    (IWX_RATE_HT_MCS_RATE_CODE_MSK_V1 |
4729			    IWX_RATE_HT_MCS_NSS_MSK_V1));
4730			rate = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK_V1);
4731		}
4732		if (!have_legacy_rate) {
4733			tap->wr_rate = (0x80 | mcs);
4734		} else {
4735			switch (rate) {
4736			/* CCK rates. */
4737			case  10: tap->wr_rate =   2; break;
4738			case  20: tap->wr_rate =   4; break;
4739			case  55: tap->wr_rate =  11; break;
4740			case 110: tap->wr_rate =  22; break;
4741			/* OFDM rates. */
4742			case 0xd: tap->wr_rate =  12; break;
4743			case 0xf: tap->wr_rate =  18; break;
4744			case 0x5: tap->wr_rate =  24; break;
4745			case 0x7: tap->wr_rate =  36; break;
4746			case 0x9: tap->wr_rate =  48; break;
4747			case 0xb: tap->wr_rate =  72; break;
4748			case 0x1: tap->wr_rate =  96; break;
4749			case 0x3: tap->wr_rate = 108; break;
4750			/* Unknown rate: should not happen. */
4751			default:  tap->wr_rate =   0;
4752			}
4753		}
4754
4755		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_rxtap_len,
4756		    m, BPF_DIRECTION_IN);
4757	}
4758#endif
4759	ieee80211_inputm(IC2IFP(ic), m, ni, rxi, ml);
4760	ieee80211_release_node(ic, ni);
4761}
4762
4763/*
4764 * Drop duplicate 802.11 retransmissions
4765 * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
4766 * and handle pseudo-duplicate frames which result from deaggregation
4767 * of A-MSDU frames in hardware.
4768 */
4769int
4770iwx_detect_duplicate(struct iwx_softc *sc, struct mbuf *m,
4771    struct iwx_rx_mpdu_desc *desc, struct ieee80211_rxinfo *rxi)
4772{
4773	struct ieee80211com *ic = &sc->sc_ic;
4774	struct iwx_node *in = (void *)ic->ic_bss;
4775	struct iwx_rxq_dup_data *dup_data = &in->dup_data;
4776	uint8_t tid = IWX_MAX_TID_COUNT, subframe_idx;
4777	struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
4778	uint8_t type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4779	uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4780	int hasqos = ieee80211_has_qos(wh);
4781	uint16_t seq;
4782
4783	if (type == IEEE80211_FC0_TYPE_CTL ||
4784	    (hasqos && (subtype & IEEE80211_FC0_SUBTYPE_NODATA)) ||
4785	    IEEE80211_IS_MULTICAST(wh->i_addr1))
4786		return 0;
4787
4788	if (hasqos) {
4789		tid = (ieee80211_get_qos(wh) & IEEE80211_QOS_TID);
4790		if (tid > IWX_MAX_TID_COUNT)
4791			tid = IWX_MAX_TID_COUNT;
4792	}
4793
4794	/* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
4795	subframe_idx = desc->amsdu_info &
4796		IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
4797
4798	seq = letoh16(*(u_int16_t *)wh->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT;
4799	if ((wh->i_fc[1] & IEEE80211_FC1_RETRY) &&
4800	    dup_data->last_seq[tid] == seq &&
4801	    dup_data->last_sub_frame[tid] >= subframe_idx)
4802		return 1;
4803
4804	/*
4805	 * Allow the same frame sequence number for all A-MSDU subframes
4806	 * following the first subframe.
4807	 * Otherwise these subframes would be discarded as replays.
4808	 */
4809	if (dup_data->last_seq[tid] == seq &&
4810	    subframe_idx > dup_data->last_sub_frame[tid] &&
4811	    (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU)) {
4812		rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ;
4813	}
4814
4815	dup_data->last_seq[tid] = seq;
4816	dup_data->last_sub_frame[tid] = subframe_idx;
4817
4818	return 0;
4819}
4820
4821/*
4822 * Returns true if sn2 - buffer_size < sn1 < sn2.
4823 * To be used only in order to compare reorder buffer head with NSSN.
4824 * We fully trust NSSN unless it is behind us due to reorder timeout.
4825 * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN.
4826 */
4827int
4828iwx_is_sn_less(uint16_t sn1, uint16_t sn2, uint16_t buffer_size)
4829{
4830	return SEQ_LT(sn1, sn2) && !SEQ_LT(sn1, sn2 - buffer_size);
4831}
4832
4833void
4834iwx_release_frames(struct iwx_softc *sc, struct ieee80211_node *ni,
4835    struct iwx_rxba_data *rxba, struct iwx_reorder_buffer *reorder_buf,
4836    uint16_t nssn, struct mbuf_list *ml)
4837{
4838	struct iwx_reorder_buf_entry *entries = &rxba->entries[0];
4839	uint16_t ssn = reorder_buf->head_sn;
4840
4841	/* ignore nssn smaller than head sn - this can happen due to timeout */
4842	if (iwx_is_sn_less(nssn, ssn, reorder_buf->buf_size))
4843		goto set_timer;
4844
4845	while (iwx_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
4846		int index = ssn % reorder_buf->buf_size;
4847		struct mbuf *m;
4848		int chanidx, is_shortpre;
4849		uint32_t rx_pkt_status, rate_n_flags, device_timestamp;
4850		struct ieee80211_rxinfo *rxi;
4851
4852		/* This data is the same for all A-MSDU subframes. */
4853		chanidx = entries[index].chanidx;
4854		rx_pkt_status = entries[index].rx_pkt_status;
4855		is_shortpre = entries[index].is_shortpre;
4856		rate_n_flags = entries[index].rate_n_flags;
4857		device_timestamp = entries[index].device_timestamp;
4858		rxi = &entries[index].rxi;
4859
4860		/*
4861		 * Empty the list. Will have more than one frame for A-MSDU.
4862		 * Empty list is valid as well since nssn indicates frames were
4863		 * received.
4864		 */
4865		while ((m = ml_dequeue(&entries[index].frames)) != NULL) {
4866			iwx_rx_frame(sc, m, chanidx, rx_pkt_status, is_shortpre,
4867			    rate_n_flags, device_timestamp, rxi, ml);
4868			reorder_buf->num_stored--;
4869
4870			/*
4871			 * Allow the same frame sequence number and CCMP PN for
4872			 * all A-MSDU subframes following the first subframe.
4873			 * Otherwise they would be discarded as replays.
4874			 */
4875			rxi->rxi_flags |= IEEE80211_RXI_SAME_SEQ;
4876			rxi->rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN;
4877		}
4878
4879		ssn = (ssn + 1) & 0xfff;
4880	}
4881	reorder_buf->head_sn = nssn;
4882
4883set_timer:
4884	if (reorder_buf->num_stored && !reorder_buf->removed) {
4885		timeout_add_usec(&reorder_buf->reorder_timer,
4886		    RX_REORDER_BUF_TIMEOUT_MQ_USEC);
4887	} else
4888		timeout_del(&reorder_buf->reorder_timer);
4889}
4890
4891int
4892iwx_oldsn_workaround(struct iwx_softc *sc, struct ieee80211_node *ni, int tid,
4893    struct iwx_reorder_buffer *buffer, uint32_t reorder_data, uint32_t gp2)
4894{
4895	struct ieee80211com *ic = &sc->sc_ic;
4896
4897	if (gp2 != buffer->consec_oldsn_ampdu_gp2) {
4898		/* we have a new (A-)MPDU ... */
4899
4900		/*
4901		 * reset counter to 0 if we didn't have any oldsn in
4902		 * the last A-MPDU (as detected by GP2 being identical)
4903		 */
4904		if (!buffer->consec_oldsn_prev_drop)
4905			buffer->consec_oldsn_drops = 0;
4906
4907		/* either way, update our tracking state */
4908		buffer->consec_oldsn_ampdu_gp2 = gp2;
4909	} else if (buffer->consec_oldsn_prev_drop) {
4910		/*
4911		 * tracking state didn't change, and we had an old SN
4912		 * indication before - do nothing in this case, we
4913		 * already noted this one down and are waiting for the
4914		 * next A-MPDU (by GP2)
4915		 */
4916		return 0;
4917	}
4918
4919	/* return unless this MPDU has old SN */
4920	if (!(reorder_data & IWX_RX_MPDU_REORDER_BA_OLD_SN))
4921		return 0;
4922
4923	/* update state */
4924	buffer->consec_oldsn_prev_drop = 1;
4925	buffer->consec_oldsn_drops++;
4926
4927	/* if limit is reached, send del BA and reset state */
4928	if (buffer->consec_oldsn_drops == IWX_AMPDU_CONSEC_DROPS_DELBA) {
4929		ieee80211_delba_request(ic, ni, IEEE80211_REASON_UNSPECIFIED,
4930		    0, tid);
4931		buffer->consec_oldsn_prev_drop = 0;
4932		buffer->consec_oldsn_drops = 0;
4933		return 1;
4934	}
4935
4936	return 0;
4937}
4938
4939/*
4940 * Handle re-ordering of frames which were de-aggregated in hardware.
4941 * Returns 1 if the MPDU was consumed (buffered or dropped).
4942 * Returns 0 if the MPDU should be passed to upper layer.
4943 */
4944int
4945iwx_rx_reorder(struct iwx_softc *sc, struct mbuf *m, int chanidx,
4946    struct iwx_rx_mpdu_desc *desc, int is_shortpre, int rate_n_flags,
4947    uint32_t device_timestamp, struct ieee80211_rxinfo *rxi,
4948    struct mbuf_list *ml)
4949{
4950	struct ieee80211com *ic = &sc->sc_ic;
4951	struct ieee80211_frame *wh;
4952	struct ieee80211_node *ni;
4953	struct iwx_rxba_data *rxba;
4954	struct iwx_reorder_buffer *buffer;
4955	uint32_t reorder_data = le32toh(desc->reorder_data);
4956	int is_amsdu = (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU);
4957	int last_subframe =
4958		(desc->amsdu_info & IWX_RX_MPDU_AMSDU_LAST_SUBFRAME);
4959	uint8_t tid;
4960	uint8_t subframe_idx = (desc->amsdu_info &
4961	    IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
4962	struct iwx_reorder_buf_entry *entries;
4963	int index;
4964	uint16_t nssn, sn;
4965	uint8_t baid, type, subtype;
4966	int hasqos;
4967
4968	wh = mtod(m, struct ieee80211_frame *);
4969	hasqos = ieee80211_has_qos(wh);
4970	tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
4971
4972	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4973	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4974
4975	/*
4976	 * We are only interested in Block Ack requests and unicast QoS data.
4977	 */
4978	if (IEEE80211_IS_MULTICAST(wh->i_addr1))
4979		return 0;
4980	if (hasqos) {
4981		if (subtype & IEEE80211_FC0_SUBTYPE_NODATA)
4982			return 0;
4983	} else {
4984		if (type != IEEE80211_FC0_TYPE_CTL ||
4985		    subtype != IEEE80211_FC0_SUBTYPE_BAR)
4986			return 0;
4987	}
4988
4989	baid = (reorder_data & IWX_RX_MPDU_REORDER_BAID_MASK) >>
4990		IWX_RX_MPDU_REORDER_BAID_SHIFT;
4991	if (baid == IWX_RX_REORDER_DATA_INVALID_BAID ||
4992	    baid >= nitems(sc->sc_rxba_data))
4993		return 0;
4994
4995	rxba = &sc->sc_rxba_data[baid];
4996	if (rxba->baid == IWX_RX_REORDER_DATA_INVALID_BAID ||
4997	    tid != rxba->tid || rxba->sta_id != IWX_STATION_ID)
4998		return 0;
4999
5000	if (rxba->timeout != 0)
5001		getmicrouptime(&rxba->last_rx);
5002
5003	/* Bypass A-MPDU re-ordering in net80211. */
5004	rxi->rxi_flags |= IEEE80211_RXI_AMPDU_DONE;
5005
5006	nssn = reorder_data & IWX_RX_MPDU_REORDER_NSSN_MASK;
5007	sn = (reorder_data & IWX_RX_MPDU_REORDER_SN_MASK) >>
5008		IWX_RX_MPDU_REORDER_SN_SHIFT;
5009
5010	buffer = &rxba->reorder_buf;
5011	entries = &rxba->entries[0];
5012
5013	if (!buffer->valid) {
5014		if (reorder_data & IWX_RX_MPDU_REORDER_BA_OLD_SN)
5015			return 0;
5016		buffer->valid = 1;
5017	}
5018
5019	ni = ieee80211_find_rxnode(ic, wh);
5020	if (type == IEEE80211_FC0_TYPE_CTL &&
5021	    subtype == IEEE80211_FC0_SUBTYPE_BAR) {
5022		iwx_release_frames(sc, ni, rxba, buffer, nssn, ml);
5023		goto drop;
5024	}
5025
5026	/*
5027	 * If there was a significant jump in the nssn - adjust.
5028	 * If the SN is smaller than the NSSN it might need to first go into
5029	 * the reorder buffer, in which case we just release up to it and the
5030	 * rest of the function will take care of storing it and releasing up to
5031	 * the nssn.
5032	 */
5033	if (!iwx_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
5034	    buffer->buf_size) ||
5035	    !SEQ_LT(sn, buffer->head_sn + buffer->buf_size)) {
5036		uint16_t min_sn = SEQ_LT(sn, nssn) ? sn : nssn;
5037		ic->ic_stats.is_ht_rx_frame_above_ba_winend++;
5038		iwx_release_frames(sc, ni, rxba, buffer, min_sn, ml);
5039	}
5040
5041	if (iwx_oldsn_workaround(sc, ni, tid, buffer, reorder_data,
5042	    device_timestamp)) {
5043		 /* BA session will be torn down. */
5044		ic->ic_stats.is_ht_rx_ba_window_jump++;
5045		goto drop;
5046
5047	}
5048
5049	/* drop any outdated packets */
5050	if (SEQ_LT(sn, buffer->head_sn)) {
5051		ic->ic_stats.is_ht_rx_frame_below_ba_winstart++;
5052		goto drop;
5053	}
5054
5055	/* release immediately if allowed by nssn and no stored frames */
5056	if (!buffer->num_stored && SEQ_LT(sn, nssn)) {
5057		if (iwx_is_sn_less(buffer->head_sn, nssn, buffer->buf_size) &&
5058		   (!is_amsdu || last_subframe))
5059			buffer->head_sn = nssn;
5060		ieee80211_release_node(ic, ni);
5061		return 0;
5062	}
5063
5064	/*
5065	 * release immediately if there are no stored frames, and the sn is
5066	 * equal to the head.
5067	 * This can happen due to reorder timer, where NSSN is behind head_sn.
5068	 * When we released everything, and we got the next frame in the
5069	 * sequence, according to the NSSN we can't release immediately,
5070	 * while technically there is no hole and we can move forward.
5071	 */
5072	if (!buffer->num_stored && sn == buffer->head_sn) {
5073		if (!is_amsdu || last_subframe)
5074			buffer->head_sn = (buffer->head_sn + 1) & 0xfff;
5075		ieee80211_release_node(ic, ni);
5076		return 0;
5077	}
5078
5079	index = sn % buffer->buf_size;
5080
5081	/*
5082	 * Check if we already stored this frame
5083	 * As AMSDU is either received or not as whole, logic is simple:
5084	 * If we have frames in that position in the buffer and the last frame
5085	 * originated from AMSDU had a different SN then it is a retransmission.
5086	 * If it is the same SN then if the subframe index is incrementing it
5087	 * is the same AMSDU - otherwise it is a retransmission.
5088	 */
5089	if (!ml_empty(&entries[index].frames)) {
5090		if (!is_amsdu) {
5091			ic->ic_stats.is_ht_rx_ba_no_buf++;
5092			goto drop;
5093		} else if (sn != buffer->last_amsdu ||
5094		    buffer->last_sub_index >= subframe_idx) {
5095			ic->ic_stats.is_ht_rx_ba_no_buf++;
5096			goto drop;
5097		}
5098	} else {
5099		/* This data is the same for all A-MSDU subframes. */
5100		entries[index].chanidx = chanidx;
5101		entries[index].is_shortpre = is_shortpre;
5102		entries[index].rate_n_flags = rate_n_flags;
5103		entries[index].device_timestamp = device_timestamp;
5104		memcpy(&entries[index].rxi, rxi, sizeof(entries[index].rxi));
5105	}
5106
5107	/* put in reorder buffer */
5108	ml_enqueue(&entries[index].frames, m);
5109	buffer->num_stored++;
5110	getmicrouptime(&entries[index].reorder_time);
5111
5112	if (is_amsdu) {
5113		buffer->last_amsdu = sn;
5114		buffer->last_sub_index = subframe_idx;
5115	}
5116
5117	/*
5118	 * We cannot trust NSSN for AMSDU sub-frames that are not the last.
5119	 * The reason is that NSSN advances on the first sub-frame, and may
5120	 * cause the reorder buffer to advance before all the sub-frames arrive.
5121	 * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
5122	 * SN 1. NSSN for first sub frame will be 3 with the result of driver
5123	 * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
5124	 * already ahead and it will be dropped.
5125	 * If the last sub-frame is not on this queue - we will get frame
5126	 * release notification with up to date NSSN.
5127	 */
5128	if (!is_amsdu || last_subframe)
5129		iwx_release_frames(sc, ni, rxba, buffer, nssn, ml);
5130
5131	ieee80211_release_node(ic, ni);
5132	return 1;
5133
5134drop:
5135	m_freem(m);
5136	ieee80211_release_node(ic, ni);
5137	return 1;
5138}
5139
5140void
5141iwx_rx_mpdu_mq(struct iwx_softc *sc, struct mbuf *m, void *pktdata,
5142    size_t maxlen, struct mbuf_list *ml)
5143{
5144	struct ieee80211com *ic = &sc->sc_ic;
5145	struct ieee80211_rxinfo rxi;
5146	struct iwx_rx_mpdu_desc *desc;
5147	uint32_t len, hdrlen, rate_n_flags, device_timestamp;
5148	int rssi;
5149	uint8_t chanidx;
5150	uint16_t phy_info;
5151	size_t desc_size;
5152
5153	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
5154		desc_size = sizeof(*desc);
5155	else
5156		desc_size = IWX_RX_DESC_SIZE_V1;
5157
5158	if (maxlen < desc_size) {
5159		m_freem(m);
5160		return; /* drop */
5161	}
5162
5163	desc = (struct iwx_rx_mpdu_desc *)pktdata;
5164
5165	if (!(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_CRC_OK)) ||
5166	    !(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
5167		m_freem(m);
5168		return; /* drop */
5169	}
5170
5171	len = le16toh(desc->mpdu_len);
5172	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
5173		/* Allow control frames in monitor mode. */
5174		if (len < sizeof(struct ieee80211_frame_cts)) {
5175			ic->ic_stats.is_rx_tooshort++;
5176			IC2IFP(ic)->if_ierrors++;
5177			m_freem(m);
5178			return;
5179		}
5180	} else if (len < sizeof(struct ieee80211_frame)) {
5181		ic->ic_stats.is_rx_tooshort++;
5182		IC2IFP(ic)->if_ierrors++;
5183		m_freem(m);
5184		return;
5185	}
5186	if (len > maxlen - desc_size) {
5187		IC2IFP(ic)->if_ierrors++;
5188		m_freem(m);
5189		return;
5190	}
5191
5192	m->m_data = pktdata + desc_size;
5193	m->m_pkthdr.len = m->m_len = len;
5194
5195	/* Account for padding following the frame header. */
5196	if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_PAD) {
5197		struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
5198		int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
5199		if (type == IEEE80211_FC0_TYPE_CTL) {
5200			switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
5201			case IEEE80211_FC0_SUBTYPE_CTS:
5202				hdrlen = sizeof(struct ieee80211_frame_cts);
5203				break;
5204			case IEEE80211_FC0_SUBTYPE_ACK:
5205				hdrlen = sizeof(struct ieee80211_frame_ack);
5206				break;
5207			default:
5208				hdrlen = sizeof(struct ieee80211_frame_min);
5209				break;
5210			}
5211		} else
5212			hdrlen = ieee80211_get_hdrlen(wh);
5213
5214		if ((le16toh(desc->status) &
5215		    IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
5216		    IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
5217			/* Padding is inserted after the IV. */
5218			hdrlen += IEEE80211_CCMP_HDRLEN;
5219		}
5220
5221		memmove(m->m_data + 2, m->m_data, hdrlen);
5222		m_adj(m, 2);
5223	}
5224
5225	memset(&rxi, 0, sizeof(rxi));
5226
5227	/*
5228	 * Hardware de-aggregates A-MSDUs and copies the same MAC header
5229	 * in place for each subframe. But it leaves the 'A-MSDU present'
5230	 * bit set in the frame header. We need to clear this bit ourselves.
5231	 * (XXX This workaround is not required on AX200/AX201 devices that
5232	 * have been tested by me, but it's unclear when this problem was
5233	 * fixed in the hardware. It definitely affects the 9k generation.
5234	 * Leaving this in place for now since some 9k/AX200 hybrids seem
5235	 * to exist that we may eventually add support for.)
5236	 *
5237	 * And we must allow the same CCMP PN for subframes following the
5238	 * first subframe. Otherwise they would be discarded as replays.
5239	 */
5240	if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU) {
5241		struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
5242		uint8_t subframe_idx = (desc->amsdu_info &
5243		    IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
5244		if (subframe_idx > 0)
5245			rxi.rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN;
5246		if (ieee80211_has_qos(wh) && ieee80211_has_addr4(wh) &&
5247		    m->m_len >= sizeof(struct ieee80211_qosframe_addr4)) {
5248			struct ieee80211_qosframe_addr4 *qwh4 = mtod(m,
5249			    struct ieee80211_qosframe_addr4 *);
5250			qwh4->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
5251		} else if (ieee80211_has_qos(wh) &&
5252		    m->m_len >= sizeof(struct ieee80211_qosframe)) {
5253			struct ieee80211_qosframe *qwh = mtod(m,
5254			    struct ieee80211_qosframe *);
5255			qwh->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
5256		}
5257	}
5258
5259	/*
5260	 * Verify decryption before duplicate detection. The latter uses
5261	 * the TID supplied in QoS frame headers and this TID is implicitly
5262	 * verified as part of the CCMP nonce.
5263	 */
5264	if (iwx_rx_hwdecrypt(sc, m, le16toh(desc->status), &rxi)) {
5265		m_freem(m);
5266		return;
5267	}
5268
5269	if (iwx_detect_duplicate(sc, m, desc, &rxi)) {
5270		m_freem(m);
5271		return;
5272	}
5273
5274	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
5275		rate_n_flags = le32toh(desc->v3.rate_n_flags);
5276		chanidx = desc->v3.channel;
5277		device_timestamp = le32toh(desc->v3.gp2_on_air_rise);
5278	} else {
5279		rate_n_flags = le32toh(desc->v1.rate_n_flags);
5280		chanidx = desc->v1.channel;
5281		device_timestamp = le32toh(desc->v1.gp2_on_air_rise);
5282	}
5283
5284	phy_info = le16toh(desc->phy_info);
5285
5286	rssi = iwx_rxmq_get_signal_strength(sc, desc);
5287	rssi = (0 - IWX_MIN_DBM) + rssi;	/* normalize */
5288	rssi = MIN(rssi, ic->ic_max_rssi);	/* clip to max. 100% */
5289
5290	rxi.rxi_rssi = rssi;
5291	rxi.rxi_tstamp = device_timestamp;
5292	rxi.rxi_chan = chanidx;
5293
5294	if (iwx_rx_reorder(sc, m, chanidx, desc,
5295	    (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE),
5296	    rate_n_flags, device_timestamp, &rxi, ml))
5297		return;
5298
5299	iwx_rx_frame(sc, m, chanidx, le16toh(desc->status),
5300	    (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE),
5301	    rate_n_flags, device_timestamp, &rxi, ml);
5302}
5303
5304void
5305iwx_clear_tx_desc(struct iwx_softc *sc, struct iwx_tx_ring *ring, int idx)
5306{
5307	struct iwx_tfh_tfd *desc = &ring->desc[idx];
5308	uint8_t num_tbs = le16toh(desc->num_tbs) & 0x1f;
5309	int i;
5310
5311	/* First TB is never cleared - it is bidirectional DMA data. */
5312	for (i = 1; i < num_tbs; i++) {
5313		struct iwx_tfh_tb *tb = &desc->tbs[i];
5314		memset(tb, 0, sizeof(*tb));
5315	}
5316	desc->num_tbs = htole16(1);
5317
5318	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
5319	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
5320	    sizeof(*desc), BUS_DMASYNC_PREWRITE);
5321}
5322
5323void
5324iwx_txd_done(struct iwx_softc *sc, struct iwx_tx_data *txd)
5325{
5326	struct ieee80211com *ic = &sc->sc_ic;
5327
5328	bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
5329	    BUS_DMASYNC_POSTWRITE);
5330	bus_dmamap_unload(sc->sc_dmat, txd->map);
5331	m_freem(txd->m);
5332	txd->m = NULL;
5333
5334	KASSERT(txd->in);
5335	ieee80211_release_node(ic, &txd->in->in_ni);
5336	txd->in = NULL;
5337}
5338
5339void
5340iwx_txq_advance(struct iwx_softc *sc, struct iwx_tx_ring *ring, uint16_t idx)
5341{
5342 	struct iwx_tx_data *txd;
5343
5344	while (ring->tail_hw != idx) {
5345		txd = &ring->data[ring->tail];
5346		if (txd->m != NULL) {
5347			iwx_clear_tx_desc(sc, ring, ring->tail);
5348			iwx_tx_update_byte_tbl(sc, ring, ring->tail, 0, 0);
5349			iwx_txd_done(sc, txd);
5350			ring->queued--;
5351		}
5352		ring->tail = (ring->tail + 1) % IWX_TX_RING_COUNT;
5353		ring->tail_hw = (ring->tail_hw + 1) % sc->max_tfd_queue_size;
5354	}
5355}
5356
5357void
5358iwx_rx_tx_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
5359    struct iwx_rx_data *data)
5360{
5361	struct ieee80211com *ic = &sc->sc_ic;
5362	struct ifnet *ifp = IC2IFP(ic);
5363	struct iwx_cmd_header *cmd_hdr = &pkt->hdr;
5364	int qid = cmd_hdr->qid, status, txfail;
5365	struct iwx_tx_ring *ring = &sc->txq[qid];
5366	struct iwx_tx_resp *tx_resp = (void *)pkt->data;
5367	uint32_t ssn;
5368	uint32_t len = iwx_rx_packet_len(pkt);
5369
5370	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWX_RBUF_SIZE,
5371	    BUS_DMASYNC_POSTREAD);
5372
5373	/* Sanity checks. */
5374	if (sizeof(*tx_resp) > len)
5375		return;
5376	if (qid < IWX_FIRST_AGG_TX_QUEUE && tx_resp->frame_count > 1)
5377		return;
5378	if (qid >= IWX_FIRST_AGG_TX_QUEUE && sizeof(*tx_resp) + sizeof(ssn) +
5379	    tx_resp->frame_count * sizeof(tx_resp->status) > len)
5380		return;
5381
5382	sc->sc_tx_timer[qid] = 0;
5383
5384	if (tx_resp->frame_count > 1) /* A-MPDU */
5385		return;
5386
5387	status = le16toh(tx_resp->status.status) & IWX_TX_STATUS_MSK;
5388	txfail = (status != IWX_TX_STATUS_SUCCESS &&
5389	    status != IWX_TX_STATUS_DIRECT_DONE);
5390
5391	if (txfail)
5392		ifp->if_oerrors++;
5393
5394	/*
5395	 * On hardware supported by iwx(4) the SSN counter corresponds
5396	 * to a Tx ring index rather than a sequence number.
5397	 * Frames up to this index (non-inclusive) can now be freed.
5398	 */
5399	memcpy(&ssn, &tx_resp->status + tx_resp->frame_count, sizeof(ssn));
5400	ssn = le32toh(ssn);
5401	if (ssn < sc->max_tfd_queue_size) {
5402		iwx_txq_advance(sc, ring, ssn);
5403		iwx_clear_oactive(sc, ring);
5404	}
5405}
5406
5407void
5408iwx_clear_oactive(struct iwx_softc *sc, struct iwx_tx_ring *ring)
5409{
5410	struct ieee80211com *ic = &sc->sc_ic;
5411	struct ifnet *ifp = IC2IFP(ic);
5412
5413	if (ring->queued < IWX_TX_RING_LOMARK) {
5414		sc->qfullmsk &= ~(1 << ring->qid);
5415		if (sc->qfullmsk == 0 && ifq_is_oactive(&ifp->if_snd)) {
5416			ifq_clr_oactive(&ifp->if_snd);
5417			/*
5418			 * Well, we're in interrupt context, but then again
5419			 * I guess net80211 does all sorts of stunts in
5420			 * interrupt context, so maybe this is no biggie.
5421			 */
5422			(*ifp->if_start)(ifp);
5423		}
5424	}
5425}
5426
5427void
5428iwx_rx_compressed_ba(struct iwx_softc *sc, struct iwx_rx_packet *pkt)
5429{
5430	struct iwx_compressed_ba_notif *ba_res = (void *)pkt->data;
5431	struct ieee80211com *ic = &sc->sc_ic;
5432	struct ieee80211_node *ni;
5433	struct ieee80211_tx_ba *ba;
5434	struct iwx_node *in;
5435	struct iwx_tx_ring *ring;
5436	uint16_t i, tfd_cnt, ra_tid_cnt, idx;
5437	int qid;
5438
5439	if (ic->ic_state != IEEE80211_S_RUN)
5440		return;
5441
5442	if (iwx_rx_packet_payload_len(pkt) < sizeof(*ba_res))
5443		return;
5444
5445	if (ba_res->sta_id != IWX_STATION_ID)
5446		return;
5447
5448	ni = ic->ic_bss;
5449	in = (void *)ni;
5450
5451	tfd_cnt = le16toh(ba_res->tfd_cnt);
5452	ra_tid_cnt = le16toh(ba_res->ra_tid_cnt);
5453	if (!tfd_cnt || iwx_rx_packet_payload_len(pkt) < (sizeof(*ba_res) +
5454	    sizeof(ba_res->ra_tid[0]) * ra_tid_cnt +
5455	    sizeof(ba_res->tfd[0]) * tfd_cnt))
5456		return;
5457
5458	for (i = 0; i < tfd_cnt; i++) {
5459		struct iwx_compressed_ba_tfd *ba_tfd = &ba_res->tfd[i];
5460		uint8_t tid;
5461
5462		tid = ba_tfd->tid;
5463		if (tid >= nitems(sc->aggqid))
5464			continue;
5465
5466		qid = sc->aggqid[tid];
5467		if (qid != htole16(ba_tfd->q_num))
5468			continue;
5469
5470		ring = &sc->txq[qid];
5471
5472		ba = &ni->ni_tx_ba[tid];
5473		if (ba->ba_state != IEEE80211_BA_AGREED)
5474			continue;
5475
5476		idx = le16toh(ba_tfd->tfd_index);
5477		sc->sc_tx_timer[qid] = 0;
5478		iwx_txq_advance(sc, ring, idx);
5479		iwx_clear_oactive(sc, ring);
5480	}
5481}
5482
5483void
5484iwx_rx_bmiss(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
5485    struct iwx_rx_data *data)
5486{
5487	struct ieee80211com *ic = &sc->sc_ic;
5488	struct iwx_missed_beacons_notif *mbn = (void *)pkt->data;
5489	uint32_t missed;
5490
5491	if ((ic->ic_opmode != IEEE80211_M_STA) ||
5492	    (ic->ic_state != IEEE80211_S_RUN))
5493		return;
5494
5495	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
5496	    sizeof(*mbn), BUS_DMASYNC_POSTREAD);
5497
5498	missed = le32toh(mbn->consec_missed_beacons_since_last_rx);
5499	if (missed > ic->ic_bmissthres && ic->ic_mgt_timer == 0) {
5500		if (ic->ic_if.if_flags & IFF_DEBUG)
5501			printf("%s: receiving no beacons from %s; checking if "
5502			    "this AP is still responding to probe requests\n",
5503			    DEVNAME(sc), ether_sprintf(ic->ic_bss->ni_macaddr));
5504		/*
5505		 * Rather than go directly to scan state, try to send a
5506		 * directed probe request first. If that fails then the
5507		 * state machine will drop us into scanning after timing
5508		 * out waiting for a probe response.
5509		 */
5510		IEEE80211_SEND_MGMT(ic, ic->ic_bss,
5511		    IEEE80211_FC0_SUBTYPE_PROBE_REQ, 0);
5512	}
5513
5514}
5515
5516int
5517iwx_binding_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action)
5518{
5519	struct iwx_binding_cmd cmd;
5520	struct iwx_phy_ctxt *phyctxt = in->in_phyctxt;
5521	uint32_t mac_id = IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color);
5522	int i, err, active = (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE);
5523	uint32_t status;
5524
5525	if (action == IWX_FW_CTXT_ACTION_ADD && active)
5526		panic("binding already added");
5527	if (action == IWX_FW_CTXT_ACTION_REMOVE && !active)
5528		panic("binding already removed");
5529
5530	if (phyctxt == NULL) /* XXX race with iwx_stop() */
5531		return EINVAL;
5532
5533	memset(&cmd, 0, sizeof(cmd));
5534
5535	cmd.id_and_color
5536	    = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
5537	cmd.action = htole32(action);
5538	cmd.phy = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
5539
5540	cmd.macs[0] = htole32(mac_id);
5541	for (i = 1; i < IWX_MAX_MACS_IN_BINDING; i++)
5542		cmd.macs[i] = htole32(IWX_FW_CTXT_INVALID);
5543
5544	if (IEEE80211_IS_CHAN_2GHZ(phyctxt->channel) ||
5545	    !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
5546		cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
5547	else
5548		cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
5549
5550	status = 0;
5551	err = iwx_send_cmd_pdu_status(sc, IWX_BINDING_CONTEXT_CMD, sizeof(cmd),
5552	    &cmd, &status);
5553	if (err == 0 && status != 0)
5554		err = EIO;
5555
5556	return err;
5557}
5558
5559uint8_t
5560iwx_get_vht_ctrl_pos(struct ieee80211com *ic, struct ieee80211_channel *chan)
5561{
5562	int center_idx = ic->ic_bss->ni_vht_chan_center_freq_idx0;
5563	int primary_idx = ic->ic_bss->ni_primary_chan;
5564	/*
5565	 * The FW is expected to check the control channel position only
5566	 * when in HT/VHT and the channel width is not 20MHz. Return
5567	 * this value as the default one:
5568	 */
5569	uint8_t pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5570
5571	switch (primary_idx - center_idx) {
5572	case -6:
5573		pos = IWX_PHY_VHT_CTRL_POS_2_BELOW;
5574		break;
5575	case -2:
5576		pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5577		break;
5578	case 2:
5579		pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
5580		break;
5581	case 6:
5582		pos = IWX_PHY_VHT_CTRL_POS_2_ABOVE;
5583		break;
5584	default:
5585		break;
5586	}
5587
5588	return pos;
5589}
5590
5591int
5592iwx_phy_ctxt_cmd_uhb_v3_v4(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
5593    uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, uint8_t sco,
5594    uint8_t vht_chan_width, int cmdver)
5595{
5596	struct ieee80211com *ic = &sc->sc_ic;
5597	struct iwx_phy_context_cmd_uhb cmd;
5598	uint8_t active_cnt, idle_cnt;
5599	struct ieee80211_channel *chan = ctxt->channel;
5600
5601	memset(&cmd, 0, sizeof(cmd));
5602	cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,
5603	    ctxt->color));
5604	cmd.action = htole32(action);
5605
5606	if (IEEE80211_IS_CHAN_2GHZ(ctxt->channel) ||
5607	    !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
5608		cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
5609	else
5610		cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
5611
5612	cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
5613	    IWX_PHY_BAND_24 : IWX_PHY_BAND_5;
5614	cmd.ci.channel = htole32(ieee80211_chan2ieee(ic, chan));
5615	if (vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80) {
5616		cmd.ci.ctrl_pos = iwx_get_vht_ctrl_pos(ic, chan);
5617		cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE80;
5618	} else if (chan->ic_flags & IEEE80211_CHAN_40MHZ) {
5619		if (sco == IEEE80211_HTOP0_SCO_SCA) {
5620			/* secondary chan above -> control chan below */
5621			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5622			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
5623		} else if (sco == IEEE80211_HTOP0_SCO_SCB) {
5624			/* secondary chan below -> control chan above */
5625			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
5626			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
5627		} else {
5628			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
5629			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5630		}
5631	} else {
5632		cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
5633		cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5634	}
5635
5636	if (cmdver < 4 && iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
5637	    IWX_RLC_CONFIG_CMD) != 2) {
5638		idle_cnt = chains_static;
5639		active_cnt = chains_dynamic;
5640		cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
5641		    IWX_PHY_RX_CHAIN_VALID_POS);
5642		cmd.rxchain_info |= htole32(idle_cnt <<
5643		    IWX_PHY_RX_CHAIN_CNT_POS);
5644		cmd.rxchain_info |= htole32(active_cnt <<
5645		    IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
5646	}
5647
5648	return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
5649}
5650
5651int
5652iwx_phy_ctxt_cmd_v3_v4(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
5653    uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, uint8_t sco,
5654    uint8_t vht_chan_width, int cmdver)
5655{
5656	struct ieee80211com *ic = &sc->sc_ic;
5657	struct iwx_phy_context_cmd cmd;
5658	uint8_t active_cnt, idle_cnt;
5659	struct ieee80211_channel *chan = ctxt->channel;
5660
5661	memset(&cmd, 0, sizeof(cmd));
5662	cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,
5663	    ctxt->color));
5664	cmd.action = htole32(action);
5665
5666	if (IEEE80211_IS_CHAN_2GHZ(ctxt->channel) ||
5667	    !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
5668		cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
5669	else
5670		cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
5671
5672	cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
5673	    IWX_PHY_BAND_24 : IWX_PHY_BAND_5;
5674	cmd.ci.channel = ieee80211_chan2ieee(ic, chan);
5675	if (vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80) {
5676		cmd.ci.ctrl_pos = iwx_get_vht_ctrl_pos(ic, chan);
5677		cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE80;
5678	} else if (chan->ic_flags & IEEE80211_CHAN_40MHZ) {
5679		if (sco == IEEE80211_HTOP0_SCO_SCA) {
5680			/* secondary chan above -> control chan below */
5681			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5682			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
5683		} else if (sco == IEEE80211_HTOP0_SCO_SCB) {
5684			/* secondary chan below -> control chan above */
5685			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
5686			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
5687		} else {
5688			cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
5689			cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5690		}
5691	} else {
5692		cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
5693		cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
5694	}
5695
5696	if (cmdver < 4 && iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
5697	    IWX_RLC_CONFIG_CMD) != 2) {
5698		idle_cnt = chains_static;
5699		active_cnt = chains_dynamic;
5700		cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
5701		    IWX_PHY_RX_CHAIN_VALID_POS);
5702		cmd.rxchain_info |= htole32(idle_cnt <<
5703		    IWX_PHY_RX_CHAIN_CNT_POS);
5704		cmd.rxchain_info |= htole32(active_cnt <<
5705		    IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
5706	}
5707
5708	return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
5709}
5710
5711int
5712iwx_phy_ctxt_cmd(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
5713    uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
5714    uint32_t apply_time, uint8_t sco, uint8_t vht_chan_width)
5715{
5716	int cmdver;
5717
5718	cmdver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_PHY_CONTEXT_CMD);
5719	if (cmdver != 3 && cmdver != 4) {
5720		printf("%s: firmware does not support phy-context-cmd v3/v4\n",
5721		    DEVNAME(sc));
5722		return ENOTSUP;
5723	}
5724
5725	/*
5726	 * Intel increased the size of the fw_channel_info struct and neglected
5727	 * to bump the phy_context_cmd struct, which contains an fw_channel_info
5728	 * member in the middle.
5729	 * To keep things simple we use a separate function to handle the larger
5730	 * variant of the phy context command.
5731	 */
5732	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS)) {
5733		return iwx_phy_ctxt_cmd_uhb_v3_v4(sc, ctxt, chains_static,
5734		    chains_dynamic, action, sco, vht_chan_width, cmdver);
5735	}
5736
5737	return iwx_phy_ctxt_cmd_v3_v4(sc, ctxt, chains_static, chains_dynamic,
5738	    action, sco, vht_chan_width, cmdver);
5739}
5740
5741int
5742iwx_send_cmd(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
5743{
5744	struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE];
5745	struct iwx_tfh_tfd *desc;
5746	struct iwx_tx_data *txdata;
5747	struct iwx_device_cmd *cmd;
5748	struct mbuf *m;
5749	bus_addr_t paddr;
5750	uint64_t addr;
5751	int err = 0, i, paylen, off, s;
5752	int idx, code, async, group_id;
5753	size_t hdrlen, datasz;
5754	uint8_t *data;
5755	int generation = sc->sc_generation;
5756
5757	code = hcmd->id;
5758	async = hcmd->flags & IWX_CMD_ASYNC;
5759	idx = ring->cur;
5760
5761	for (i = 0, paylen = 0; i < nitems(hcmd->len); i++) {
5762		paylen += hcmd->len[i];
5763	}
5764
5765	/* If this command waits for a response, allocate response buffer. */
5766	hcmd->resp_pkt = NULL;
5767	if (hcmd->flags & IWX_CMD_WANT_RESP) {
5768		uint8_t *resp_buf;
5769		KASSERT(!async);
5770		KASSERT(hcmd->resp_pkt_len >= sizeof(struct iwx_rx_packet));
5771		KASSERT(hcmd->resp_pkt_len <= IWX_CMD_RESP_MAX);
5772		if (sc->sc_cmd_resp_pkt[idx] != NULL)
5773			return ENOSPC;
5774		resp_buf = malloc(hcmd->resp_pkt_len, M_DEVBUF,
5775		    M_NOWAIT | M_ZERO);
5776		if (resp_buf == NULL)
5777			return ENOMEM;
5778		sc->sc_cmd_resp_pkt[idx] = resp_buf;
5779		sc->sc_cmd_resp_len[idx] = hcmd->resp_pkt_len;
5780	} else {
5781		sc->sc_cmd_resp_pkt[idx] = NULL;
5782	}
5783
5784	s = splnet();
5785
5786	desc = &ring->desc[idx];
5787	txdata = &ring->data[idx];
5788
5789	/*
5790	 * XXX Intel inside (tm)
5791	 * Firmware API versions >= 50 reject old-style commands in
5792	 * group 0 with a "BAD_COMMAND" firmware error. We must pretend
5793	 * that such commands were in the LONG_GROUP instead in order
5794	 * for firmware to accept them.
5795	 */
5796	if (iwx_cmd_groupid(code) == 0) {
5797		code = IWX_WIDE_ID(IWX_LONG_GROUP, code);
5798		txdata->flags |= IWX_TXDATA_FLAG_CMD_IS_NARROW;
5799	} else
5800		txdata->flags &= ~IWX_TXDATA_FLAG_CMD_IS_NARROW;
5801
5802	group_id = iwx_cmd_groupid(code);
5803
5804	hdrlen = sizeof(cmd->hdr_wide);
5805	datasz = sizeof(cmd->data_wide);
5806
5807	if (paylen > datasz) {
5808		/* Command is too large to fit in pre-allocated space. */
5809		size_t totlen = hdrlen + paylen;
5810		if (paylen > IWX_MAX_CMD_PAYLOAD_SIZE) {
5811			printf("%s: firmware command too long (%zd bytes)\n",
5812			    DEVNAME(sc), totlen);
5813			err = EINVAL;
5814			goto out;
5815		}
5816		m = MCLGETL(NULL, M_DONTWAIT, totlen);
5817		if (m == NULL) {
5818			printf("%s: could not get fw cmd mbuf (%zd bytes)\n",
5819			    DEVNAME(sc), totlen);
5820			err = ENOMEM;
5821			goto out;
5822		}
5823		cmd = mtod(m, struct iwx_device_cmd *);
5824		err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,
5825		    totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
5826		if (err) {
5827			printf("%s: could not load fw cmd mbuf (%zd bytes)\n",
5828			    DEVNAME(sc), totlen);
5829			m_freem(m);
5830			goto out;
5831		}
5832		txdata->m = m; /* mbuf will be freed in iwx_cmd_done() */
5833		paddr = txdata->map->dm_segs[0].ds_addr;
5834	} else {
5835		cmd = &ring->cmd[idx];
5836		paddr = txdata->cmd_paddr;
5837	}
5838
5839	memset(cmd, 0, sizeof(*cmd));
5840	cmd->hdr_wide.opcode = iwx_cmd_opcode(code);
5841	cmd->hdr_wide.group_id = group_id;
5842	cmd->hdr_wide.qid = ring->qid;
5843	cmd->hdr_wide.idx = idx;
5844	cmd->hdr_wide.length = htole16(paylen);
5845	cmd->hdr_wide.version = iwx_cmd_version(code);
5846	data = cmd->data_wide;
5847
5848	for (i = 0, off = 0; i < nitems(hcmd->data); i++) {
5849		if (hcmd->len[i] == 0)
5850			continue;
5851		memcpy(data + off, hcmd->data[i], hcmd->len[i]);
5852		off += hcmd->len[i];
5853	}
5854	KASSERT(off == paylen);
5855
5856	desc->tbs[0].tb_len = htole16(MIN(hdrlen + paylen, IWX_FIRST_TB_SIZE));
5857	addr = htole64(paddr);
5858	memcpy(&desc->tbs[0].addr, &addr, sizeof(addr));
5859	if (hdrlen + paylen > IWX_FIRST_TB_SIZE) {
5860		desc->tbs[1].tb_len = htole16(hdrlen + paylen -
5861		    IWX_FIRST_TB_SIZE);
5862		addr = htole64(paddr + IWX_FIRST_TB_SIZE);
5863		memcpy(&desc->tbs[1].addr, &addr, sizeof(addr));
5864		desc->num_tbs = htole16(2);
5865	} else
5866		desc->num_tbs = htole16(1);
5867
5868	if (paylen > datasz) {
5869		bus_dmamap_sync(sc->sc_dmat, txdata->map, 0,
5870		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
5871	} else {
5872		bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
5873		    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
5874		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
5875	}
5876	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
5877	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
5878	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
5879	/* Kick command ring. */
5880	DPRINTF(("%s: sending command 0x%x\n", __func__, code));
5881	ring->queued++;
5882	ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT;
5883	ring->cur_hw = (ring->cur_hw + 1) % sc->max_tfd_queue_size;
5884	IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur_hw);
5885
5886	if (!async) {
5887		err = tsleep_nsec(desc, PCATCH, "iwxcmd", SEC_TO_NSEC(1));
5888		if (err == 0) {
5889			/* if hardware is no longer up, return error */
5890			if (generation != sc->sc_generation) {
5891				err = ENXIO;
5892				goto out;
5893			}
5894
5895			/* Response buffer will be freed in iwx_free_resp(). */
5896			hcmd->resp_pkt = (void *)sc->sc_cmd_resp_pkt[idx];
5897			sc->sc_cmd_resp_pkt[idx] = NULL;
5898		} else if (generation == sc->sc_generation) {
5899			free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
5900			    sc->sc_cmd_resp_len[idx]);
5901			sc->sc_cmd_resp_pkt[idx] = NULL;
5902		}
5903	}
5904 out:
5905	splx(s);
5906
5907	return err;
5908}
5909
5910int
5911iwx_send_cmd_pdu(struct iwx_softc *sc, uint32_t id, uint32_t flags,
5912    uint16_t len, const void *data)
5913{
5914	struct iwx_host_cmd cmd = {
5915		.id = id,
5916		.len = { len, },
5917		.data = { data, },
5918		.flags = flags,
5919	};
5920
5921	return iwx_send_cmd(sc, &cmd);
5922}
5923
5924int
5925iwx_send_cmd_status(struct iwx_softc *sc, struct iwx_host_cmd *cmd,
5926    uint32_t *status)
5927{
5928	struct iwx_rx_packet *pkt;
5929	struct iwx_cmd_response *resp;
5930	int err, resp_len;
5931
5932	KASSERT((cmd->flags & IWX_CMD_WANT_RESP) == 0);
5933	cmd->flags |= IWX_CMD_WANT_RESP;
5934	cmd->resp_pkt_len = sizeof(*pkt) + sizeof(*resp);
5935
5936	err = iwx_send_cmd(sc, cmd);
5937	if (err)
5938		return err;
5939
5940	pkt = cmd->resp_pkt;
5941	if (pkt == NULL || (pkt->hdr.flags & IWX_CMD_FAILED_MSK))
5942		return EIO;
5943
5944	resp_len = iwx_rx_packet_payload_len(pkt);
5945	if (resp_len != sizeof(*resp)) {
5946		iwx_free_resp(sc, cmd);
5947		return EIO;
5948	}
5949
5950	resp = (void *)pkt->data;
5951	*status = le32toh(resp->status);
5952	iwx_free_resp(sc, cmd);
5953	return err;
5954}
5955
5956int
5957iwx_send_cmd_pdu_status(struct iwx_softc *sc, uint32_t id, uint16_t len,
5958    const void *data, uint32_t *status)
5959{
5960	struct iwx_host_cmd cmd = {
5961		.id = id,
5962		.len = { len, },
5963		.data = { data, },
5964	};
5965
5966	return iwx_send_cmd_status(sc, &cmd, status);
5967}
5968
5969void
5970iwx_free_resp(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
5971{
5972	KASSERT((hcmd->flags & (IWX_CMD_WANT_RESP)) == IWX_CMD_WANT_RESP);
5973	free(hcmd->resp_pkt, M_DEVBUF, hcmd->resp_pkt_len);
5974	hcmd->resp_pkt = NULL;
5975}
5976
5977void
5978iwx_cmd_done(struct iwx_softc *sc, int qid, int idx, int code)
5979{
5980	struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE];
5981	struct iwx_tx_data *data;
5982
5983	if (qid != IWX_DQA_CMD_QUEUE) {
5984		return;	/* Not a command ack. */
5985	}
5986
5987	data = &ring->data[idx];
5988
5989	if (data->m != NULL) {
5990		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
5991		    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
5992		bus_dmamap_unload(sc->sc_dmat, data->map);
5993		m_freem(data->m);
5994		data->m = NULL;
5995	}
5996	wakeup(&ring->desc[idx]);
5997
5998	DPRINTF(("%s: command 0x%x done\n", __func__, code));
5999	if (ring->queued == 0) {
6000		DPRINTF(("%s: unexpected firmware response to command 0x%x\n",
6001			DEVNAME(sc), code));
6002	} else if (ring->queued > 0)
6003		ring->queued--;
6004}
6005
6006uint32_t
6007iwx_fw_rateidx_ofdm(uint8_t rval)
6008{
6009	/* Firmware expects indices which match our 11a rate set. */
6010	const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11a;
6011	int i;
6012
6013	for (i = 0; i < rs->rs_nrates; i++) {
6014		if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
6015			return i;
6016	}
6017
6018	return 0;
6019}
6020
6021uint32_t
6022iwx_fw_rateidx_cck(uint8_t rval)
6023{
6024	/* Firmware expects indices which match our 11b rate set. */
6025	const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11b;
6026	int i;
6027
6028	for (i = 0; i < rs->rs_nrates; i++) {
6029		if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
6030			return i;
6031	}
6032
6033	return 0;
6034}
6035
6036/*
6037 * Determine the Tx command flags and Tx rate+flags to use.
6038 * Return the selected Tx rate.
6039 */
6040const struct iwx_rate *
6041iwx_tx_fill_cmd(struct iwx_softc *sc, struct iwx_node *in,
6042    struct ieee80211_frame *wh, uint16_t *flags, uint32_t *rate_n_flags)
6043{
6044	struct ieee80211com *ic = &sc->sc_ic;
6045	struct ieee80211_node *ni = &in->in_ni;
6046	struct ieee80211_rateset *rs = &ni->ni_rates;
6047	const struct iwx_rate *rinfo;
6048	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
6049	int min_ridx = iwx_rval2ridx(ieee80211_min_basic_rate(ic));
6050	int ridx, rate_flags;
6051	uint8_t rval;
6052
6053	*flags = 0;
6054
6055	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
6056	    type != IEEE80211_FC0_TYPE_DATA) {
6057		/* for non-data, use the lowest supported rate */
6058		ridx = min_ridx;
6059		*flags |= IWX_TX_FLAGS_CMD_RATE;
6060	} else if (ni->ni_flags & IEEE80211_NODE_HT) {
6061		ridx = iwx_mcs2ridx[ni->ni_txmcs];
6062	} else {
6063		rval = (rs->rs_rates[ni->ni_txrate] & IEEE80211_RATE_VAL);
6064		ridx = iwx_rval2ridx(rval);
6065		if (ridx < min_ridx)
6066			ridx = min_ridx;
6067	}
6068
6069	if ((ic->ic_flags & IEEE80211_F_RSNON) &&
6070	    ni->ni_rsn_supp_state == RSNA_SUPP_PTKNEGOTIATING)
6071		*flags |= IWX_TX_FLAGS_HIGH_PRI;
6072
6073	rinfo = &iwx_rates[ridx];
6074
6075	/*
6076	 * Do not fill rate_n_flags if firmware controls the Tx rate.
6077	 * For data frames we rely on Tx rate scaling in firmware by default.
6078	 */
6079	if ((*flags & IWX_TX_FLAGS_CMD_RATE) == 0) {
6080		*rate_n_flags = 0;
6081		return rinfo;
6082	}
6083
6084	/*
6085	 * Forcing a CCK/OFDM legacy rate is important for management frames.
6086	 * Association will only succeed if we do this correctly.
6087	 */
6088	rate_flags = IWX_RATE_MCS_ANT_A_MSK;
6089	if (IWX_RIDX_IS_CCK(ridx)) {
6090		if (sc->sc_rate_n_flags_version >= 2)
6091			rate_flags |= IWX_RATE_MCS_CCK_MSK;
6092		else
6093			rate_flags |= IWX_RATE_MCS_CCK_MSK_V1;
6094	} else if (sc->sc_rate_n_flags_version >= 2)
6095		rate_flags |= IWX_RATE_MCS_LEGACY_OFDM_MSK;
6096
6097	rval = (rs->rs_rates[ni->ni_txrate] & IEEE80211_RATE_VAL);
6098	if (sc->sc_rate_n_flags_version >= 2) {
6099		if (rate_flags & IWX_RATE_MCS_LEGACY_OFDM_MSK) {
6100			rate_flags |= (iwx_fw_rateidx_ofdm(rval) &
6101			    IWX_RATE_LEGACY_RATE_MSK);
6102		} else {
6103			rate_flags |= (iwx_fw_rateidx_cck(rval) &
6104			    IWX_RATE_LEGACY_RATE_MSK);
6105		}
6106	} else
6107		rate_flags |= rinfo->plcp;
6108
6109	*rate_n_flags = rate_flags;
6110
6111	return rinfo;
6112}
6113
6114void
6115iwx_tx_update_byte_tbl(struct iwx_softc *sc, struct iwx_tx_ring *txq,
6116    int idx, uint16_t byte_cnt, uint16_t num_tbs)
6117{
6118	uint8_t filled_tfd_size, num_fetch_chunks;
6119	uint16_t len = byte_cnt;
6120	uint16_t bc_ent;
6121
6122	filled_tfd_size = offsetof(struct iwx_tfh_tfd, tbs) +
6123			  num_tbs * sizeof(struct iwx_tfh_tb);
6124	/*
6125	 * filled_tfd_size contains the number of filled bytes in the TFD.
6126	 * Dividing it by 64 will give the number of chunks to fetch
6127	 * to SRAM- 0 for one chunk, 1 for 2 and so on.
6128	 * If, for example, TFD contains only 3 TBs then 32 bytes
6129	 * of the TFD are used, and only one chunk of 64 bytes should
6130	 * be fetched
6131	 */
6132	num_fetch_chunks = howmany(filled_tfd_size, 64) - 1;
6133
6134	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
6135		struct iwx_gen3_bc_tbl_entry *scd_bc_tbl = txq->bc_tbl.vaddr;
6136		/* Starting from AX210, the HW expects bytes */
6137		bc_ent = htole16(len | (num_fetch_chunks << 14));
6138		scd_bc_tbl[idx].tfd_offset = bc_ent;
6139	} else {
6140		struct iwx_agn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.vaddr;
6141		/* Before AX210, the HW expects DW */
6142		len = howmany(len, 4);
6143		bc_ent = htole16(len | (num_fetch_chunks << 12));
6144		scd_bc_tbl->tfd_offset[idx] = bc_ent;
6145	}
6146
6147	bus_dmamap_sync(sc->sc_dmat, txq->bc_tbl.map, 0,
6148	    txq->bc_tbl.map->dm_mapsize, BUS_DMASYNC_PREWRITE);
6149}
6150
6151int
6152iwx_tx(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
6153{
6154	struct ieee80211com *ic = &sc->sc_ic;
6155	struct iwx_node *in = (void *)ni;
6156	struct iwx_tx_ring *ring;
6157	struct iwx_tx_data *data;
6158	struct iwx_tfh_tfd *desc;
6159	struct iwx_device_cmd *cmd;
6160	struct ieee80211_frame *wh;
6161	struct ieee80211_key *k = NULL;
6162	const struct iwx_rate *rinfo;
6163	uint64_t paddr;
6164	u_int hdrlen;
6165	bus_dma_segment_t *seg;
6166	uint32_t rate_n_flags;
6167	uint16_t num_tbs, flags, offload_assist = 0;
6168	uint8_t type, subtype;
6169	int i, totlen, err, pad, qid;
6170	size_t txcmd_size;
6171
6172	wh = mtod(m, struct ieee80211_frame *);
6173	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
6174	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
6175	if (type == IEEE80211_FC0_TYPE_CTL)
6176		hdrlen = sizeof(struct ieee80211_frame_min);
6177	else
6178		hdrlen = ieee80211_get_hdrlen(wh);
6179
6180	qid = sc->first_data_qid;
6181
6182	/* Put QoS frames on the data queue which maps to their TID. */
6183	if (ieee80211_has_qos(wh)) {
6184		struct ieee80211_tx_ba *ba;
6185		uint16_t qos = ieee80211_get_qos(wh);
6186		uint8_t tid = qos & IEEE80211_QOS_TID;
6187
6188		ba = &ni->ni_tx_ba[tid];
6189		if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
6190		    type == IEEE80211_FC0_TYPE_DATA &&
6191		    subtype != IEEE80211_FC0_SUBTYPE_NODATA &&
6192		    sc->aggqid[tid] != 0 &&
6193		    ba->ba_state == IEEE80211_BA_AGREED) {
6194			qid = sc->aggqid[tid];
6195		}
6196	}
6197
6198	ring = &sc->txq[qid];
6199	desc = &ring->desc[ring->cur];
6200	memset(desc, 0, sizeof(*desc));
6201	data = &ring->data[ring->cur];
6202
6203	cmd = &ring->cmd[ring->cur];
6204	cmd->hdr.code = IWX_TX_CMD;
6205	cmd->hdr.flags = 0;
6206	cmd->hdr.qid = ring->qid;
6207	cmd->hdr.idx = ring->cur;
6208
6209	rinfo = iwx_tx_fill_cmd(sc, in, wh, &flags, &rate_n_flags);
6210
6211#if NBPFILTER > 0
6212	if (sc->sc_drvbpf != NULL) {
6213		struct iwx_tx_radiotap_header *tap = &sc->sc_txtap;
6214		uint16_t chan_flags;
6215
6216		tap->wt_flags = 0;
6217		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
6218		chan_flags = ni->ni_chan->ic_flags;
6219		if (ic->ic_curmode != IEEE80211_MODE_11N &&
6220		    ic->ic_curmode != IEEE80211_MODE_11AC) {
6221			chan_flags &= ~IEEE80211_CHAN_HT;
6222			chan_flags &= ~IEEE80211_CHAN_40MHZ;
6223		}
6224		if (ic->ic_curmode != IEEE80211_MODE_11AC)
6225			chan_flags &= ~IEEE80211_CHAN_VHT;
6226		tap->wt_chan_flags = htole16(chan_flags);
6227		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
6228		    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
6229		    type == IEEE80211_FC0_TYPE_DATA &&
6230		    rinfo->ht_plcp != IWX_RATE_HT_SISO_MCS_INV_PLCP) {
6231			tap->wt_rate = (0x80 | rinfo->ht_plcp);
6232		} else
6233			tap->wt_rate = rinfo->rate;
6234		if ((ic->ic_flags & IEEE80211_F_WEPON) &&
6235		    (wh->i_fc[1] & IEEE80211_FC1_PROTECTED))
6236			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
6237
6238		bpf_mtap_hdr(sc->sc_drvbpf, tap, sc->sc_txtap_len,
6239		    m, BPF_DIRECTION_OUT);
6240	}
6241#endif
6242
6243	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
6244                k = ieee80211_get_txkey(ic, wh, ni);
6245		if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
6246			if ((m = ieee80211_encrypt(ic, m, k)) == NULL)
6247				return ENOBUFS;
6248			/* 802.11 header may have moved. */
6249			wh = mtod(m, struct ieee80211_frame *);
6250			flags |= IWX_TX_FLAGS_ENCRYPT_DIS;
6251		} else {
6252			k->k_tsc++;
6253			/* Hardware increments PN internally and adds IV. */
6254		}
6255	} else
6256		flags |= IWX_TX_FLAGS_ENCRYPT_DIS;
6257
6258	totlen = m->m_pkthdr.len;
6259
6260	if (hdrlen & 3) {
6261		/* First segment length must be a multiple of 4. */
6262		pad = 4 - (hdrlen & 3);
6263		offload_assist |= IWX_TX_CMD_OFFLD_PAD;
6264	} else
6265		pad = 0;
6266
6267	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
6268		struct iwx_tx_cmd_gen3 *tx = (void *)cmd->data;
6269		memset(tx, 0, sizeof(*tx));
6270		tx->len = htole16(totlen);
6271		tx->offload_assist = htole32(offload_assist);
6272		tx->flags = htole16(flags);
6273		tx->rate_n_flags = htole32(rate_n_flags);
6274		memcpy(tx->hdr, wh, hdrlen);
6275		txcmd_size = sizeof(*tx);
6276	} else {
6277		struct iwx_tx_cmd_gen2 *tx = (void *)cmd->data;
6278		memset(tx, 0, sizeof(*tx));
6279		tx->len = htole16(totlen);
6280		tx->offload_assist = htole16(offload_assist);
6281		tx->flags = htole32(flags);
6282		tx->rate_n_flags = htole32(rate_n_flags);
6283		memcpy(tx->hdr, wh, hdrlen);
6284		txcmd_size = sizeof(*tx);
6285	}
6286
6287	/* Trim 802.11 header. */
6288	m_adj(m, hdrlen);
6289
6290	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
6291	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
6292	if (err && err != EFBIG) {
6293		printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc), err);
6294		m_freem(m);
6295		return err;
6296	}
6297	if (err) {
6298#ifdef __FreeBSD_version
6299		/* Too many DMA segments, linearize mbuf. */
6300		struct mbuf* m1 = m_collapse(m, M_NOWAIT, IWX_TFH_NUM_TBS - 2);
6301		if (m1 == NULL) {
6302			device_printf(sc->sc_dev,
6303				"%s: could not defrag mbuf\n", __func__);
6304			m_freem(m);
6305			return (ENOBUFS);
6306		}
6307		m = m1;
6308#else
6309		/* Too many DMA segments, linearize mbuf. */
6310		if (m_defrag(m, M_DONTWAIT)) {
6311			m_freem(m);
6312			return ENOBUFS;
6313		}
6314#endif
6315		err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
6316		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
6317		if (err) {
6318			printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc),
6319			    err);
6320			m_freem(m);
6321			return err;
6322		}
6323	}
6324	data->m = m;
6325	data->in = in;
6326
6327	/* Fill TX descriptor. */
6328	num_tbs = 2 + data->map->dm_nsegs;
6329	desc->num_tbs = htole16(num_tbs);
6330
6331	desc->tbs[0].tb_len = htole16(IWX_FIRST_TB_SIZE);
6332	paddr = htole64(data->cmd_paddr);
6333	memcpy(&desc->tbs[0].addr, &paddr, sizeof(paddr));
6334	if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[0].tb_len)) >> 32)
6335		DPRINTF(("%s: TB0 crosses 32bit boundary\n", __func__));
6336	desc->tbs[1].tb_len = htole16(sizeof(struct iwx_cmd_header) +
6337	    txcmd_size + hdrlen + pad - IWX_FIRST_TB_SIZE);
6338	paddr = htole64(data->cmd_paddr + IWX_FIRST_TB_SIZE);
6339	memcpy(&desc->tbs[1].addr, &paddr, sizeof(paddr));
6340
6341	if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[1].tb_len)) >> 32)
6342		DPRINTF(("%s: TB1 crosses 32bit boundary\n", __func__));
6343
6344	/* Other DMA segments are for data payload. */
6345	seg = data->map->dm_segs;
6346	for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
6347		desc->tbs[i + 2].tb_len = htole16(seg->ds_len);
6348		paddr = htole64(seg->ds_addr);
6349		memcpy(&desc->tbs[i + 2].addr, &paddr, sizeof(paddr));
6350		if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[i + 2].tb_len)) >> 32)
6351			DPRINTF(("%s: TB%d crosses 32bit boundary\n", __func__, i + 2));
6352	}
6353
6354	bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
6355	    BUS_DMASYNC_PREWRITE);
6356	bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
6357	    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
6358	    sizeof (*cmd), BUS_DMASYNC_PREWRITE);
6359	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
6360	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
6361	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
6362
6363	iwx_tx_update_byte_tbl(sc, ring, ring->cur, totlen, num_tbs);
6364
6365	/* Kick TX ring. */
6366	ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT;
6367	ring->cur_hw = (ring->cur_hw + 1) % sc->max_tfd_queue_size;
6368	IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur_hw);
6369
6370	/* Mark TX ring as full if we reach a certain threshold. */
6371	if (++ring->queued > IWX_TX_RING_HIMARK) {
6372		sc->qfullmsk |= 1 << ring->qid;
6373	}
6374
6375	if (ic->ic_if.if_flags & IFF_UP)
6376		sc->sc_tx_timer[ring->qid] = 15;
6377
6378	return 0;
6379}
6380
6381int
6382iwx_flush_sta_tids(struct iwx_softc *sc, int sta_id, uint16_t tids)
6383{
6384	struct iwx_rx_packet *pkt;
6385	struct iwx_tx_path_flush_cmd_rsp *resp;
6386	struct iwx_tx_path_flush_cmd flush_cmd = {
6387		.sta_id = htole32(sta_id),
6388		.tid_mask = htole16(tids),
6389	};
6390	struct iwx_host_cmd hcmd = {
6391		.id = IWX_TXPATH_FLUSH,
6392		.len = { sizeof(flush_cmd), },
6393		.data = { &flush_cmd, },
6394		.flags = IWX_CMD_WANT_RESP,
6395		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
6396	};
6397	int err, resp_len, i, num_flushed_queues;
6398
6399	err = iwx_send_cmd(sc, &hcmd);
6400	if (err)
6401		return err;
6402
6403	pkt = hcmd.resp_pkt;
6404	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
6405		err = EIO;
6406		goto out;
6407	}
6408
6409	resp_len = iwx_rx_packet_payload_len(pkt);
6410	if (resp_len != sizeof(*resp)) {
6411		err = EIO;
6412		goto out;
6413	}
6414
6415	resp = (void *)pkt->data;
6416
6417	if (le16toh(resp->sta_id) != sta_id) {
6418		err = EIO;
6419		goto out;
6420	}
6421
6422	num_flushed_queues = le16toh(resp->num_flushed_queues);
6423	if (num_flushed_queues > IWX_TX_FLUSH_QUEUE_RSP) {
6424		err = EIO;
6425		goto out;
6426	}
6427
6428	for (i = 0; i < num_flushed_queues; i++) {
6429		struct iwx_flush_queue_info *queue_info = &resp->queues[i];
6430		uint16_t tid = le16toh(queue_info->tid);
6431		uint16_t read_after = le16toh(queue_info->read_after_flush);
6432		uint16_t qid = le16toh(queue_info->queue_num);
6433		struct iwx_tx_ring *txq;
6434
6435		if (qid >= nitems(sc->txq))
6436			continue;
6437
6438		txq = &sc->txq[qid];
6439		if (tid != txq->tid)
6440			continue;
6441
6442		iwx_txq_advance(sc, txq, read_after);
6443	}
6444out:
6445	iwx_free_resp(sc, &hcmd);
6446	return err;
6447}
6448
6449#define IWX_FLUSH_WAIT_MS	2000
6450
6451int
6452iwx_drain_sta(struct iwx_softc *sc, struct iwx_node* in, int drain)
6453{
6454	struct iwx_add_sta_cmd cmd;
6455	int err;
6456	uint32_t status;
6457
6458	memset(&cmd, 0, sizeof(cmd));
6459	cmd.mac_id_n_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
6460	    in->in_color));
6461	cmd.sta_id = IWX_STATION_ID;
6462	cmd.add_modify = IWX_STA_MODE_MODIFY;
6463	cmd.station_flags = drain ? htole32(IWX_STA_FLG_DRAIN_FLOW) : 0;
6464	cmd.station_flags_msk = htole32(IWX_STA_FLG_DRAIN_FLOW);
6465
6466	status = IWX_ADD_STA_SUCCESS;
6467	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA,
6468	    sizeof(cmd), &cmd, &status);
6469	if (err) {
6470		printf("%s: could not update sta (error %d)\n",
6471		    DEVNAME(sc), err);
6472		return err;
6473	}
6474
6475	switch (status & IWX_ADD_STA_STATUS_MASK) {
6476	case IWX_ADD_STA_SUCCESS:
6477		break;
6478	default:
6479		err = EIO;
6480		printf("%s: Couldn't %s draining for station\n",
6481		    DEVNAME(sc), drain ? "enable" : "disable");
6482		break;
6483	}
6484
6485	return err;
6486}
6487
6488int
6489iwx_flush_sta(struct iwx_softc *sc, struct iwx_node *in)
6490{
6491	int err;
6492
6493	splassert(IPL_NET);
6494
6495	sc->sc_flags |= IWX_FLAG_TXFLUSH;
6496
6497	err = iwx_drain_sta(sc, in, 1);
6498	if (err)
6499		goto done;
6500
6501	err = iwx_flush_sta_tids(sc, IWX_STATION_ID, 0xffff);
6502	if (err) {
6503		printf("%s: could not flush Tx path (error %d)\n",
6504		    DEVNAME(sc), err);
6505		goto done;
6506	}
6507
6508	err = iwx_drain_sta(sc, in, 0);
6509done:
6510	sc->sc_flags &= ~IWX_FLAG_TXFLUSH;
6511	return err;
6512}
6513
6514#define IWX_POWER_KEEP_ALIVE_PERIOD_SEC    25
6515
6516int
6517iwx_beacon_filter_send_cmd(struct iwx_softc *sc,
6518    struct iwx_beacon_filter_cmd *cmd)
6519{
6520	return iwx_send_cmd_pdu(sc, IWX_REPLY_BEACON_FILTERING_CMD,
6521	    0, sizeof(struct iwx_beacon_filter_cmd), cmd);
6522}
6523
6524int
6525iwx_update_beacon_abort(struct iwx_softc *sc, struct iwx_node *in, int enable)
6526{
6527	struct iwx_beacon_filter_cmd cmd = {
6528		IWX_BF_CMD_CONFIG_DEFAULTS,
6529		.bf_enable_beacon_filter = htole32(1),
6530		.ba_enable_beacon_abort = htole32(enable),
6531	};
6532
6533	if (!sc->sc_bf.bf_enabled)
6534		return 0;
6535
6536	sc->sc_bf.ba_enabled = enable;
6537	return iwx_beacon_filter_send_cmd(sc, &cmd);
6538}
6539
6540void
6541iwx_power_build_cmd(struct iwx_softc *sc, struct iwx_node *in,
6542    struct iwx_mac_power_cmd *cmd)
6543{
6544	struct ieee80211com *ic = &sc->sc_ic;
6545	struct ieee80211_node *ni = &in->in_ni;
6546	int dtim_period, dtim_msec, keep_alive;
6547
6548	cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
6549	    in->in_color));
6550	if (ni->ni_dtimperiod)
6551		dtim_period = ni->ni_dtimperiod;
6552	else
6553		dtim_period = 1;
6554
6555	/*
6556	 * Regardless of power management state the driver must set
6557	 * keep alive period. FW will use it for sending keep alive NDPs
6558	 * immediately after association. Check that keep alive period
6559	 * is at least 3 * DTIM.
6560	 */
6561	dtim_msec = dtim_period * ni->ni_intval;
6562	keep_alive = MAX(3 * dtim_msec, 1000 * IWX_POWER_KEEP_ALIVE_PERIOD_SEC);
6563	keep_alive = roundup(keep_alive, 1000) / 1000;
6564	cmd->keep_alive_seconds = htole16(keep_alive);
6565
6566	if (ic->ic_opmode != IEEE80211_M_MONITOR)
6567		cmd->flags = htole16(IWX_POWER_FLAGS_POWER_SAVE_ENA_MSK);
6568}
6569
6570int
6571iwx_power_mac_update_mode(struct iwx_softc *sc, struct iwx_node *in)
6572{
6573	int err;
6574	int ba_enable;
6575	struct iwx_mac_power_cmd cmd;
6576
6577	memset(&cmd, 0, sizeof(cmd));
6578
6579	iwx_power_build_cmd(sc, in, &cmd);
6580
6581	err = iwx_send_cmd_pdu(sc, IWX_MAC_PM_POWER_TABLE, 0,
6582	    sizeof(cmd), &cmd);
6583	if (err != 0)
6584		return err;
6585
6586	ba_enable = !!(cmd.flags &
6587	    htole16(IWX_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
6588	return iwx_update_beacon_abort(sc, in, ba_enable);
6589}
6590
6591int
6592iwx_power_update_device(struct iwx_softc *sc)
6593{
6594	struct iwx_device_power_cmd cmd = { };
6595	struct ieee80211com *ic = &sc->sc_ic;
6596
6597	if (ic->ic_opmode != IEEE80211_M_MONITOR)
6598		cmd.flags = htole16(IWX_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
6599
6600	return iwx_send_cmd_pdu(sc,
6601	    IWX_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
6602}
6603
6604int
6605iwx_enable_beacon_filter(struct iwx_softc *sc, struct iwx_node *in)
6606{
6607	struct iwx_beacon_filter_cmd cmd = {
6608		IWX_BF_CMD_CONFIG_DEFAULTS,
6609		.bf_enable_beacon_filter = htole32(1),
6610		.ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled),
6611	};
6612	int err;
6613
6614	err = iwx_beacon_filter_send_cmd(sc, &cmd);
6615	if (err == 0)
6616		sc->sc_bf.bf_enabled = 1;
6617
6618	return err;
6619}
6620
6621int
6622iwx_disable_beacon_filter(struct iwx_softc *sc)
6623{
6624	struct iwx_beacon_filter_cmd cmd;
6625	int err;
6626
6627	memset(&cmd, 0, sizeof(cmd));
6628
6629	err = iwx_beacon_filter_send_cmd(sc, &cmd);
6630	if (err == 0)
6631		sc->sc_bf.bf_enabled = 0;
6632
6633	return err;
6634}
6635
6636int
6637iwx_add_sta_cmd(struct iwx_softc *sc, struct iwx_node *in, int update)
6638{
6639	struct iwx_add_sta_cmd add_sta_cmd;
6640	int err;
6641	uint32_t status, aggsize;
6642	const uint32_t max_aggsize = (IWX_STA_FLG_MAX_AGG_SIZE_64K >>
6643		    IWX_STA_FLG_MAX_AGG_SIZE_SHIFT);
6644	struct ieee80211com *ic = &sc->sc_ic;
6645
6646	if (!update && (sc->sc_flags & IWX_FLAG_STA_ACTIVE))
6647		panic("STA already added");
6648
6649	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
6650
6651	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
6652		add_sta_cmd.sta_id = IWX_MONITOR_STA_ID;
6653		add_sta_cmd.station_type = IWX_STA_GENERAL_PURPOSE;
6654	} else {
6655		add_sta_cmd.sta_id = IWX_STATION_ID;
6656		add_sta_cmd.station_type = IWX_STA_LINK;
6657	}
6658	add_sta_cmd.mac_id_n_color
6659	    = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
6660	if (!update) {
6661		if (ic->ic_opmode == IEEE80211_M_MONITOR)
6662			IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
6663			    etheranyaddr);
6664		else
6665			IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
6666			    in->in_macaddr);
6667	}
6668	add_sta_cmd.add_modify = update ? 1 : 0;
6669	add_sta_cmd.station_flags_msk
6670	    |= htole32(IWX_STA_FLG_FAT_EN_MSK | IWX_STA_FLG_MIMO_EN_MSK);
6671
6672	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
6673		add_sta_cmd.station_flags_msk
6674		    |= htole32(IWX_STA_FLG_MAX_AGG_SIZE_MSK |
6675		    IWX_STA_FLG_AGG_MPDU_DENS_MSK);
6676
6677		if (iwx_mimo_enabled(sc)) {
6678			if (in->in_ni.ni_flags & IEEE80211_NODE_VHT) {
6679				uint16_t rx_mcs = (in->in_ni.ni_vht_rxmcs &
6680				    IEEE80211_VHT_MCS_FOR_SS_MASK(2)) >>
6681				    IEEE80211_VHT_MCS_FOR_SS_SHIFT(2);
6682				if (rx_mcs != IEEE80211_VHT_MCS_SS_NOT_SUPP) {
6683					add_sta_cmd.station_flags |=
6684					    htole32(IWX_STA_FLG_MIMO_EN_MIMO2);
6685				}
6686			} else {
6687				if (in->in_ni.ni_rxmcs[1] != 0) {
6688					add_sta_cmd.station_flags |=
6689					    htole32(IWX_STA_FLG_MIMO_EN_MIMO2);
6690				}
6691				if (in->in_ni.ni_rxmcs[2] != 0) {
6692					add_sta_cmd.station_flags |=
6693					    htole32(IWX_STA_FLG_MIMO_EN_MIMO3);
6694				}
6695			}
6696		}
6697
6698		if (IEEE80211_CHAN_40MHZ_ALLOWED(in->in_ni.ni_chan) &&
6699		    ieee80211_node_supports_ht_chan40(&in->in_ni)) {
6700			add_sta_cmd.station_flags |= htole32(
6701			    IWX_STA_FLG_FAT_EN_40MHZ);
6702		}
6703
6704		if (in->in_ni.ni_flags & IEEE80211_NODE_VHT) {
6705			if (IEEE80211_CHAN_80MHZ_ALLOWED(in->in_ni.ni_chan) &&
6706			    ieee80211_node_supports_vht_chan80(&in->in_ni)) {
6707				add_sta_cmd.station_flags |= htole32(
6708				    IWX_STA_FLG_FAT_EN_80MHZ);
6709			}
6710			aggsize = (in->in_ni.ni_vhtcaps &
6711			    IEEE80211_VHTCAP_MAX_AMPDU_LEN_MASK) >>
6712			    IEEE80211_VHTCAP_MAX_AMPDU_LEN_SHIFT;
6713		} else {
6714			aggsize = (in->in_ni.ni_ampdu_param &
6715			    IEEE80211_AMPDU_PARAM_LE);
6716		}
6717		if (aggsize > max_aggsize)
6718			aggsize = max_aggsize;
6719		add_sta_cmd.station_flags |= htole32((aggsize <<
6720		    IWX_STA_FLG_MAX_AGG_SIZE_SHIFT) &
6721		    IWX_STA_FLG_MAX_AGG_SIZE_MSK);
6722
6723		switch (in->in_ni.ni_ampdu_param & IEEE80211_AMPDU_PARAM_SS) {
6724		case IEEE80211_AMPDU_PARAM_SS_2:
6725			add_sta_cmd.station_flags
6726			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_2US);
6727			break;
6728		case IEEE80211_AMPDU_PARAM_SS_4:
6729			add_sta_cmd.station_flags
6730			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_4US);
6731			break;
6732		case IEEE80211_AMPDU_PARAM_SS_8:
6733			add_sta_cmd.station_flags
6734			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_8US);
6735			break;
6736		case IEEE80211_AMPDU_PARAM_SS_16:
6737			add_sta_cmd.station_flags
6738			    |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_16US);
6739			break;
6740		default:
6741			break;
6742		}
6743	}
6744
6745	status = IWX_ADD_STA_SUCCESS;
6746	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, sizeof(add_sta_cmd),
6747	    &add_sta_cmd, &status);
6748	if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
6749		err = EIO;
6750
6751	return err;
6752}
6753
6754int
6755iwx_rm_sta_cmd(struct iwx_softc *sc, struct iwx_node *in)
6756{
6757	struct ieee80211com *ic = &sc->sc_ic;
6758	struct iwx_rm_sta_cmd rm_sta_cmd;
6759	int err;
6760
6761	if ((sc->sc_flags & IWX_FLAG_STA_ACTIVE) == 0)
6762		panic("sta already removed");
6763
6764	memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
6765	if (ic->ic_opmode == IEEE80211_M_MONITOR)
6766		rm_sta_cmd.sta_id = IWX_MONITOR_STA_ID;
6767	else
6768		rm_sta_cmd.sta_id = IWX_STATION_ID;
6769
6770	err = iwx_send_cmd_pdu(sc, IWX_REMOVE_STA, 0, sizeof(rm_sta_cmd),
6771	    &rm_sta_cmd);
6772
6773	return err;
6774}
6775
6776int
6777iwx_rm_sta(struct iwx_softc *sc, struct iwx_node *in)
6778{
6779	struct ieee80211com *ic = &sc->sc_ic;
6780	struct ieee80211_node *ni = &in->in_ni;
6781	int err, i, cmd_ver;
6782
6783	err = iwx_flush_sta(sc, in);
6784	if (err) {
6785		printf("%s: could not flush Tx path (error %d)\n",
6786		    DEVNAME(sc), err);
6787		return err;
6788	}
6789
6790	/*
6791	 * New SCD_QUEUE_CONFIG API requires explicit queue removal
6792	 * before a station gets removed.
6793	 */
6794	cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
6795	    IWX_SCD_QUEUE_CONFIG_CMD);
6796	if (cmd_ver != 0 && cmd_ver != IWX_FW_CMD_VER_UNKNOWN) {
6797		err = iwx_disable_mgmt_queue(sc);
6798		if (err)
6799			return err;
6800		for (i = IWX_FIRST_AGG_TX_QUEUE;
6801		    i < IWX_LAST_AGG_TX_QUEUE; i++) {
6802			struct iwx_tx_ring *ring = &sc->txq[i];
6803			if ((sc->qenablemsk & (1 << i)) == 0)
6804				continue;
6805			err = iwx_disable_txq(sc, IWX_STATION_ID,
6806			    ring->qid, ring->tid);
6807			if (err) {
6808				printf("%s: could not disable Tx queue %d "
6809				    "(error %d)\n", DEVNAME(sc), ring->qid,
6810				    err);
6811				return err;
6812			}
6813		}
6814	}
6815
6816	err = iwx_rm_sta_cmd(sc, in);
6817	if (err) {
6818		printf("%s: could not remove STA (error %d)\n",
6819		    DEVNAME(sc), err);
6820		return err;
6821	}
6822
6823	in->in_flags = 0;
6824
6825	sc->sc_rx_ba_sessions = 0;
6826	sc->ba_rx.start_tidmask = 0;
6827	sc->ba_rx.stop_tidmask = 0;
6828	memset(sc->aggqid, 0, sizeof(sc->aggqid));
6829	sc->ba_tx.start_tidmask = 0;
6830	sc->ba_tx.stop_tidmask = 0;
6831	for (i = IWX_FIRST_AGG_TX_QUEUE; i < IWX_LAST_AGG_TX_QUEUE; i++)
6832		sc->qenablemsk &= ~(1 << i);
6833	for (i = 0; i < IEEE80211_NUM_TID; i++) {
6834		struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[i];
6835		if (ba->ba_state != IEEE80211_BA_AGREED)
6836			continue;
6837		ieee80211_delba_request(ic, ni, 0, 1, i);
6838	}
6839
6840	return 0;
6841}
6842
6843uint8_t
6844iwx_umac_scan_fill_channels(struct iwx_softc *sc,
6845    struct iwx_scan_channel_cfg_umac *chan, size_t chan_nitems,
6846    int n_ssids, uint32_t channel_cfg_flags)
6847{
6848	struct ieee80211com *ic = &sc->sc_ic;
6849	struct ieee80211_channel *c;
6850	uint8_t nchan;
6851
6852	for (nchan = 0, c = &ic->ic_channels[1];
6853	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
6854	    nchan < chan_nitems &&
6855	    nchan < sc->sc_capa_n_scan_channels;
6856	    c++) {
6857		uint8_t channel_num;
6858
6859		if (c->ic_flags == 0)
6860			continue;
6861
6862		channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
6863		if (isset(sc->sc_ucode_api,
6864		    IWX_UCODE_TLV_API_SCAN_EXT_CHAN_VER)) {
6865			chan->v2.channel_num = channel_num;
6866			if (IEEE80211_IS_CHAN_2GHZ(c))
6867				chan->v2.band = IWX_PHY_BAND_24;
6868			else
6869				chan->v2.band = IWX_PHY_BAND_5;
6870			chan->v2.iter_count = 1;
6871			chan->v2.iter_interval = 0;
6872		} else {
6873			chan->v1.channel_num = channel_num;
6874			chan->v1.iter_count = 1;
6875			chan->v1.iter_interval = htole16(0);
6876		}
6877
6878		chan->flags = htole32(channel_cfg_flags);
6879		chan++;
6880		nchan++;
6881	}
6882
6883	return nchan;
6884}
6885
6886int
6887iwx_fill_probe_req(struct iwx_softc *sc, struct iwx_scan_probe_req *preq)
6888{
6889	struct ieee80211com *ic = &sc->sc_ic;
6890	struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
6891	struct ieee80211_rateset *rs;
6892	size_t remain = sizeof(preq->buf);
6893	uint8_t *frm, *pos;
6894
6895	memset(preq, 0, sizeof(*preq));
6896
6897	if (remain < sizeof(*wh) + 2)
6898		return ENOBUFS;
6899
6900	/*
6901	 * Build a probe request frame.  Most of the following code is a
6902	 * copy & paste of what is done in net80211.
6903	 */
6904	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
6905	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
6906	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
6907	IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
6908	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
6909	IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
6910	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
6911	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
6912
6913	frm = (uint8_t *)(wh + 1);
6914	*frm++ = IEEE80211_ELEMID_SSID;
6915	*frm++ = 0;
6916	/* hardware inserts SSID */
6917
6918	/* Tell the firmware where the MAC header is. */
6919	preq->mac_header.offset = 0;
6920	preq->mac_header.len = htole16(frm - (uint8_t *)wh);
6921	remain -= frm - (uint8_t *)wh;
6922
6923	/* Fill in 2GHz IEs and tell firmware where they are. */
6924	rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
6925	if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
6926		if (remain < 4 + rs->rs_nrates)
6927			return ENOBUFS;
6928	} else if (remain < 2 + rs->rs_nrates)
6929		return ENOBUFS;
6930	preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
6931	pos = frm;
6932	frm = ieee80211_add_rates(frm, rs);
6933	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
6934		frm = ieee80211_add_xrates(frm, rs);
6935	remain -= frm - pos;
6936
6937	if (isset(sc->sc_enabled_capa,
6938	    IWX_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
6939		if (remain < 3)
6940			return ENOBUFS;
6941		*frm++ = IEEE80211_ELEMID_DSPARMS;
6942		*frm++ = 1;
6943		*frm++ = 0;
6944		remain -= 3;
6945	}
6946	preq->band_data[0].len = htole16(frm - pos);
6947
6948	if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
6949		/* Fill in 5GHz IEs. */
6950		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
6951		if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
6952			if (remain < 4 + rs->rs_nrates)
6953				return ENOBUFS;
6954		} else if (remain < 2 + rs->rs_nrates)
6955			return ENOBUFS;
6956		preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
6957		pos = frm;
6958		frm = ieee80211_add_rates(frm, rs);
6959		if (rs->rs_nrates > IEEE80211_RATE_SIZE)
6960			frm = ieee80211_add_xrates(frm, rs);
6961		preq->band_data[1].len = htole16(frm - pos);
6962		remain -= frm - pos;
6963		if (ic->ic_flags & IEEE80211_F_VHTON) {
6964			if (remain < 14)
6965				return ENOBUFS;
6966			frm = ieee80211_add_vhtcaps(frm, ic);
6967			remain -= frm - pos;
6968			preq->band_data[1].len = htole16(frm - pos);
6969		}
6970	}
6971
6972	/* Send 11n IEs on both 2GHz and 5GHz bands. */
6973	preq->common_data.offset = htole16(frm - (uint8_t *)wh);
6974	pos = frm;
6975	if (ic->ic_flags & IEEE80211_F_HTON) {
6976		if (remain < 28)
6977			return ENOBUFS;
6978		frm = ieee80211_add_htcaps(frm, ic);
6979		/* XXX add WME info? */
6980		remain -= frm - pos;
6981	}
6982
6983	preq->common_data.len = htole16(frm - pos);
6984
6985	return 0;
6986}
6987
6988int
6989iwx_config_umac_scan_reduced(struct iwx_softc *sc)
6990{
6991	struct iwx_scan_config scan_cfg;
6992	struct iwx_host_cmd hcmd = {
6993		.id = iwx_cmd_id(IWX_SCAN_CFG_CMD, IWX_LONG_GROUP, 0),
6994		.len[0] = sizeof(scan_cfg),
6995		.data[0] = &scan_cfg,
6996		.flags = 0,
6997	};
6998	int cmdver;
6999
7000	if (!isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REDUCED_SCAN_CONFIG)) {
7001		printf("%s: firmware does not support reduced scan config\n",
7002		    DEVNAME(sc));
7003		return ENOTSUP;
7004	}
7005
7006	memset(&scan_cfg, 0, sizeof(scan_cfg));
7007
7008	/*
7009	 * SCAN_CFG version >= 5 implies that the broadcast
7010	 * STA ID field is deprecated.
7011	 */
7012	cmdver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_SCAN_CFG_CMD);
7013	if (cmdver == IWX_FW_CMD_VER_UNKNOWN || cmdver < 5)
7014		scan_cfg.bcast_sta_id = 0xff;
7015
7016	scan_cfg.tx_chains = htole32(iwx_fw_valid_tx_ant(sc));
7017	scan_cfg.rx_chains = htole32(iwx_fw_valid_rx_ant(sc));
7018
7019	return iwx_send_cmd(sc, &hcmd);
7020}
7021
7022uint16_t
7023iwx_scan_umac_flags_v2(struct iwx_softc *sc, int bgscan)
7024{
7025	struct ieee80211com *ic = &sc->sc_ic;
7026	uint16_t flags = 0;
7027
7028	if (ic->ic_des_esslen == 0)
7029		flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE;
7030
7031	flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL;
7032	flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE;
7033	flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL;
7034
7035	return flags;
7036}
7037
7038#define IWX_SCAN_DWELL_ACTIVE		10
7039#define IWX_SCAN_DWELL_PASSIVE		110
7040
7041/* adaptive dwell max budget time [TU] for full scan */
7042#define IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
7043/* adaptive dwell max budget time [TU] for directed scan */
7044#define IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100
7045/* adaptive dwell default high band APs number */
7046#define IWX_SCAN_ADWELL_DEFAULT_HB_N_APS 8
7047/* adaptive dwell default low band APs number */
7048#define IWX_SCAN_ADWELL_DEFAULT_LB_N_APS 2
7049/* adaptive dwell default APs number in social channels (1, 6, 11) */
7050#define IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
7051/* adaptive dwell number of APs override for p2p friendly GO channels */
7052#define IWX_SCAN_ADWELL_N_APS_GO_FRIENDLY 10
7053/* adaptive dwell number of APs override for social channels */
7054#define IWX_SCAN_ADWELL_N_APS_SOCIAL_CHS 2
7055
7056void
7057iwx_scan_umac_dwell_v10(struct iwx_softc *sc,
7058    struct iwx_scan_general_params_v10 *general_params, int bgscan)
7059{
7060	uint32_t suspend_time, max_out_time;
7061	uint8_t active_dwell, passive_dwell;
7062
7063	active_dwell = IWX_SCAN_DWELL_ACTIVE;
7064	passive_dwell = IWX_SCAN_DWELL_PASSIVE;
7065
7066	general_params->adwell_default_social_chn =
7067		IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
7068	general_params->adwell_default_2g = IWX_SCAN_ADWELL_DEFAULT_LB_N_APS;
7069	general_params->adwell_default_5g = IWX_SCAN_ADWELL_DEFAULT_HB_N_APS;
7070
7071	if (bgscan)
7072		general_params->adwell_max_budget =
7073			htole16(IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
7074	else
7075		general_params->adwell_max_budget =
7076			htole16(IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
7077
7078	general_params->scan_priority = htole32(IWX_SCAN_PRIORITY_EXT_6);
7079	if (bgscan) {
7080		max_out_time = htole32(120);
7081		suspend_time = htole32(120);
7082	} else {
7083		max_out_time = htole32(0);
7084		suspend_time = htole32(0);
7085	}
7086	general_params->max_out_of_time[IWX_SCAN_LB_LMAC_IDX] =
7087		htole32(max_out_time);
7088	general_params->suspend_time[IWX_SCAN_LB_LMAC_IDX] =
7089		htole32(suspend_time);
7090	general_params->max_out_of_time[IWX_SCAN_HB_LMAC_IDX] =
7091		htole32(max_out_time);
7092	general_params->suspend_time[IWX_SCAN_HB_LMAC_IDX] =
7093		htole32(suspend_time);
7094
7095	general_params->active_dwell[IWX_SCAN_LB_LMAC_IDX] = active_dwell;
7096	general_params->passive_dwell[IWX_SCAN_LB_LMAC_IDX] = passive_dwell;
7097	general_params->active_dwell[IWX_SCAN_HB_LMAC_IDX] = active_dwell;
7098	general_params->passive_dwell[IWX_SCAN_HB_LMAC_IDX] = passive_dwell;
7099}
7100
7101void
7102iwx_scan_umac_fill_general_p_v10(struct iwx_softc *sc,
7103    struct iwx_scan_general_params_v10 *gp, uint16_t gen_flags, int bgscan)
7104{
7105	iwx_scan_umac_dwell_v10(sc, gp, bgscan);
7106
7107	gp->flags = htole16(gen_flags);
7108
7109	if (gen_flags & IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1)
7110		gp->num_of_fragments[IWX_SCAN_LB_LMAC_IDX] = 3;
7111	if (gen_flags & IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2)
7112		gp->num_of_fragments[IWX_SCAN_HB_LMAC_IDX] = 3;
7113
7114	gp->scan_start_mac_id = 0;
7115}
7116
7117void
7118iwx_scan_umac_fill_ch_p_v6(struct iwx_softc *sc,
7119    struct iwx_scan_channel_params_v6 *cp, uint32_t channel_cfg_flags,
7120    int n_ssid)
7121{
7122	cp->flags = IWX_SCAN_CHANNEL_FLAG_ENABLE_CHAN_ORDER;
7123
7124	cp->count = iwx_umac_scan_fill_channels(sc, cp->channel_config,
7125	    nitems(cp->channel_config), n_ssid, channel_cfg_flags);
7126
7127	cp->n_aps_override[0] = IWX_SCAN_ADWELL_N_APS_GO_FRIENDLY;
7128	cp->n_aps_override[1] = IWX_SCAN_ADWELL_N_APS_SOCIAL_CHS;
7129}
7130
7131int
7132iwx_umac_scan_v14(struct iwx_softc *sc, int bgscan)
7133{
7134	struct ieee80211com *ic = &sc->sc_ic;
7135	struct iwx_host_cmd hcmd = {
7136		.id = iwx_cmd_id(IWX_SCAN_REQ_UMAC, IWX_LONG_GROUP, 0),
7137		.len = { 0, },
7138		.data = { NULL, },
7139		.flags = 0,
7140	};
7141	struct iwx_scan_req_umac_v14 *cmd;
7142	struct iwx_scan_req_params_v14 *scan_p;
7143	int err, async = bgscan, n_ssid = 0;
7144	uint16_t gen_flags;
7145	uint32_t bitmap_ssid = 0;
7146
7147	cmd = malloc(sizeof(*cmd), M_DEVBUF,
7148	    (async ? M_NOWAIT : M_WAIT) | M_CANFAIL | M_ZERO);
7149	if (cmd == NULL)
7150		return ENOMEM;
7151
7152	scan_p = &cmd->scan_params;
7153
7154	cmd->ooc_priority = htole32(IWX_SCAN_PRIORITY_EXT_6);
7155	cmd->uid = htole32(0);
7156
7157	gen_flags = iwx_scan_umac_flags_v2(sc, bgscan);
7158	iwx_scan_umac_fill_general_p_v10(sc, &scan_p->general_params,
7159	    gen_flags, bgscan);
7160
7161	scan_p->periodic_params.schedule[0].interval = htole16(0);
7162	scan_p->periodic_params.schedule[0].iter_count = 1;
7163
7164	err = iwx_fill_probe_req(sc, &scan_p->probe_params.preq);
7165	if (err) {
7166		free(cmd, M_DEVBUF, sizeof(*cmd));
7167		return err;
7168	}
7169
7170	if (ic->ic_des_esslen != 0) {
7171		scan_p->probe_params.direct_scan[0].id = IEEE80211_ELEMID_SSID;
7172		scan_p->probe_params.direct_scan[0].len = ic->ic_des_esslen;
7173		memcpy(scan_p->probe_params.direct_scan[0].ssid,
7174		    ic->ic_des_essid, ic->ic_des_esslen);
7175		bitmap_ssid |= (1 << 0);
7176		n_ssid = 1;
7177	}
7178
7179	iwx_scan_umac_fill_ch_p_v6(sc, &scan_p->channel_params, bitmap_ssid,
7180	    n_ssid);
7181
7182	hcmd.len[0] = sizeof(*cmd);
7183	hcmd.data[0] = (void *)cmd;
7184	hcmd.flags |= async ? IWX_CMD_ASYNC : 0;
7185
7186	err = iwx_send_cmd(sc, &hcmd);
7187	free(cmd, M_DEVBUF, sizeof(*cmd));
7188	return err;
7189}
7190
7191void
7192iwx_mcc_update(struct iwx_softc *sc, struct iwx_mcc_chub_notif *notif)
7193{
7194	struct ieee80211com *ic = &sc->sc_ic;
7195	struct ifnet *ifp = IC2IFP(ic);
7196	char alpha2[3];
7197
7198	snprintf(alpha2, sizeof(alpha2), "%c%c",
7199	    (le16toh(notif->mcc) & 0xff00) >> 8, le16toh(notif->mcc) & 0xff);
7200
7201	if (ifp->if_flags & IFF_DEBUG) {
7202		printf("%s: firmware has detected regulatory domain '%s' "
7203		    "(0x%x)\n", DEVNAME(sc), alpha2, le16toh(notif->mcc));
7204	}
7205
7206	/* TODO: Schedule a task to send MCC_UPDATE_CMD? */
7207}
7208
7209uint8_t
7210iwx_ridx2rate(struct ieee80211_rateset *rs, int ridx)
7211{
7212	int i;
7213	uint8_t rval;
7214
7215	for (i = 0; i < rs->rs_nrates; i++) {
7216		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
7217		if (rval == iwx_rates[ridx].rate)
7218			return rs->rs_rates[i];
7219	}
7220
7221	return 0;
7222}
7223
7224int
7225iwx_rval2ridx(int rval)
7226{
7227	int ridx;
7228
7229	for (ridx = 0; ridx < nitems(iwx_rates); ridx++) {
7230		if (iwx_rates[ridx].plcp == IWX_RATE_INVM_PLCP)
7231			continue;
7232		if (rval == iwx_rates[ridx].rate)
7233			break;
7234	}
7235
7236       return ridx;
7237}
7238
7239void
7240iwx_ack_rates(struct iwx_softc *sc, struct iwx_node *in, int *cck_rates,
7241    int *ofdm_rates)
7242{
7243	struct ieee80211_node *ni = &in->in_ni;
7244	struct ieee80211_rateset *rs = &ni->ni_rates;
7245	int lowest_present_ofdm = -1;
7246	int lowest_present_cck = -1;
7247	uint8_t cck = 0;
7248	uint8_t ofdm = 0;
7249	int i;
7250
7251	if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
7252	    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
7253		for (i = IWX_FIRST_CCK_RATE; i < IWX_FIRST_OFDM_RATE; i++) {
7254			if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
7255				continue;
7256			cck |= (1 << i);
7257			if (lowest_present_cck == -1 || lowest_present_cck > i)
7258				lowest_present_cck = i;
7259		}
7260	}
7261	for (i = IWX_FIRST_OFDM_RATE; i <= IWX_LAST_NON_HT_RATE; i++) {
7262		if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
7263			continue;
7264		ofdm |= (1 << (i - IWX_FIRST_OFDM_RATE));
7265		if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
7266			lowest_present_ofdm = i;
7267	}
7268
7269	/*
7270	 * Now we've got the basic rates as bitmaps in the ofdm and cck
7271	 * variables. This isn't sufficient though, as there might not
7272	 * be all the right rates in the bitmap. E.g. if the only basic
7273	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
7274	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
7275	 *
7276	 *    [...] a STA responding to a received frame shall transmit
7277	 *    its Control Response frame [...] at the highest rate in the
7278	 *    BSSBasicRateSet parameter that is less than or equal to the
7279	 *    rate of the immediately previous frame in the frame exchange
7280	 *    sequence ([...]) and that is of the same modulation class
7281	 *    ([...]) as the received frame. If no rate contained in the
7282	 *    BSSBasicRateSet parameter meets these conditions, then the
7283	 *    control frame sent in response to a received frame shall be
7284	 *    transmitted at the highest mandatory rate of the PHY that is
7285	 *    less than or equal to the rate of the received frame, and
7286	 *    that is of the same modulation class as the received frame.
7287	 *
7288	 * As a consequence, we need to add all mandatory rates that are
7289	 * lower than all of the basic rates to these bitmaps.
7290	 */
7291
7292	if (IWX_RATE_24M_INDEX < lowest_present_ofdm)
7293		ofdm |= IWX_RATE_BIT_MSK(24) >> IWX_FIRST_OFDM_RATE;
7294	if (IWX_RATE_12M_INDEX < lowest_present_ofdm)
7295		ofdm |= IWX_RATE_BIT_MSK(12) >> IWX_FIRST_OFDM_RATE;
7296	/* 6M already there or needed so always add */
7297	ofdm |= IWX_RATE_BIT_MSK(6) >> IWX_FIRST_OFDM_RATE;
7298
7299	/*
7300	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
7301	 * Note, however:
7302	 *  - if no CCK rates are basic, it must be ERP since there must
7303	 *    be some basic rates at all, so they're OFDM => ERP PHY
7304	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
7305	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
7306	 *  - if 5.5M is basic, 1M and 2M are mandatory
7307	 *  - if 2M is basic, 1M is mandatory
7308	 *  - if 1M is basic, that's the only valid ACK rate.
7309	 * As a consequence, it's not as complicated as it sounds, just add
7310	 * any lower rates to the ACK rate bitmap.
7311	 */
7312	if (IWX_RATE_11M_INDEX < lowest_present_cck)
7313		cck |= IWX_RATE_BIT_MSK(11) >> IWX_FIRST_CCK_RATE;
7314	if (IWX_RATE_5M_INDEX < lowest_present_cck)
7315		cck |= IWX_RATE_BIT_MSK(5) >> IWX_FIRST_CCK_RATE;
7316	if (IWX_RATE_2M_INDEX < lowest_present_cck)
7317		cck |= IWX_RATE_BIT_MSK(2) >> IWX_FIRST_CCK_RATE;
7318	/* 1M already there or needed so always add */
7319	cck |= IWX_RATE_BIT_MSK(1) >> IWX_FIRST_CCK_RATE;
7320
7321	*cck_rates = cck;
7322	*ofdm_rates = ofdm;
7323}
7324
7325void
7326iwx_mac_ctxt_cmd_common(struct iwx_softc *sc, struct iwx_node *in,
7327    struct iwx_mac_ctx_cmd *cmd, uint32_t action)
7328{
7329#define IWX_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
7330	struct ieee80211com *ic = &sc->sc_ic;
7331	struct ieee80211_node *ni = ic->ic_bss;
7332	int cck_ack_rates, ofdm_ack_rates;
7333	int i;
7334
7335	cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
7336	    in->in_color));
7337	cmd->action = htole32(action);
7338
7339	if (action == IWX_FW_CTXT_ACTION_REMOVE)
7340		return;
7341
7342	if (ic->ic_opmode == IEEE80211_M_MONITOR)
7343		cmd->mac_type = htole32(IWX_FW_MAC_TYPE_LISTENER);
7344	else if (ic->ic_opmode == IEEE80211_M_STA)
7345		cmd->mac_type = htole32(IWX_FW_MAC_TYPE_BSS_STA);
7346	else
7347		panic("unsupported operating mode %d", ic->ic_opmode);
7348	cmd->tsf_id = htole32(IWX_TSF_ID_A);
7349
7350	IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
7351	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7352		IEEE80211_ADDR_COPY(cmd->bssid_addr, etherbroadcastaddr);
7353		return;
7354	}
7355
7356	IEEE80211_ADDR_COPY(cmd->bssid_addr, in->in_macaddr);
7357	iwx_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
7358	cmd->cck_rates = htole32(cck_ack_rates);
7359	cmd->ofdm_rates = htole32(ofdm_ack_rates);
7360
7361	cmd->cck_short_preamble
7362	    = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
7363	      ? IWX_MAC_FLG_SHORT_PREAMBLE : 0);
7364	cmd->short_slot
7365	    = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
7366	      ? IWX_MAC_FLG_SHORT_SLOT : 0);
7367
7368	for (i = 0; i < EDCA_NUM_AC; i++) {
7369		struct ieee80211_edca_ac_params *ac = &ic->ic_edca_ac[i];
7370		int txf = iwx_ac_to_tx_fifo[i];
7371
7372		cmd->ac[txf].cw_min = htole16(IWX_EXP2(ac->ac_ecwmin));
7373		cmd->ac[txf].cw_max = htole16(IWX_EXP2(ac->ac_ecwmax));
7374		cmd->ac[txf].aifsn = ac->ac_aifsn;
7375		cmd->ac[txf].fifos_mask = (1 << txf);
7376		cmd->ac[txf].edca_txop = htole16(ac->ac_txoplimit * 32);
7377	}
7378	if (ni->ni_flags & IEEE80211_NODE_QOS)
7379		cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_UPDATE_EDCA);
7380
7381	if (ni->ni_flags & IEEE80211_NODE_HT) {
7382		enum ieee80211_htprot htprot =
7383		    (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
7384		switch (htprot) {
7385		case IEEE80211_HTPROT_NONE:
7386			break;
7387		case IEEE80211_HTPROT_NONMEMBER:
7388		case IEEE80211_HTPROT_NONHT_MIXED:
7389			cmd->protection_flags |=
7390			    htole32(IWX_MAC_PROT_FLG_HT_PROT |
7391			    IWX_MAC_PROT_FLG_FAT_PROT);
7392			break;
7393		case IEEE80211_HTPROT_20MHZ:
7394			if (in->in_phyctxt &&
7395			    (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
7396			    in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)) {
7397				cmd->protection_flags |=
7398				    htole32(IWX_MAC_PROT_FLG_HT_PROT |
7399				    IWX_MAC_PROT_FLG_FAT_PROT);
7400			}
7401			break;
7402		default:
7403			break;
7404		}
7405
7406		cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_TGN);
7407	}
7408	if (ic->ic_flags & IEEE80211_F_USEPROT)
7409		cmd->protection_flags |= htole32(IWX_MAC_PROT_FLG_TGG_PROTECT);
7410
7411	cmd->filter_flags = htole32(IWX_MAC_FILTER_ACCEPT_GRP);
7412#undef IWX_EXP2
7413}
7414
7415void
7416iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *sc, struct iwx_node *in,
7417    struct iwx_mac_data_sta *sta, int assoc)
7418{
7419	struct ieee80211_node *ni = &in->in_ni;
7420	uint32_t dtim_off;
7421	uint64_t tsf;
7422
7423	dtim_off = ni->ni_dtimcount * ni->ni_intval * IEEE80211_DUR_TU;
7424	memcpy(&tsf, ni->ni_tstamp, sizeof(tsf));
7425	tsf = letoh64(tsf);
7426
7427	sta->is_assoc = htole32(assoc);
7428	if (assoc) {
7429		sta->dtim_time = htole32(ni->ni_rstamp + dtim_off);
7430		sta->dtim_tsf = htole64(tsf + dtim_off);
7431		sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp);
7432	}
7433	sta->bi = htole32(ni->ni_intval);
7434	sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtimperiod);
7435	sta->data_policy = htole32(0);
7436	sta->listen_interval = htole32(10);
7437	sta->assoc_id = htole32(ni->ni_associd);
7438}
7439
7440int
7441iwx_mac_ctxt_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action,
7442    int assoc)
7443{
7444	struct ieee80211com *ic = &sc->sc_ic;
7445	struct ieee80211_node *ni = &in->in_ni;
7446	struct iwx_mac_ctx_cmd cmd;
7447	int active = (sc->sc_flags & IWX_FLAG_MAC_ACTIVE);
7448
7449	if (action == IWX_FW_CTXT_ACTION_ADD && active)
7450		panic("MAC already added");
7451	if (action == IWX_FW_CTXT_ACTION_REMOVE && !active)
7452		panic("MAC already removed");
7453
7454	memset(&cmd, 0, sizeof(cmd));
7455
7456	iwx_mac_ctxt_cmd_common(sc, in, &cmd, action);
7457
7458	if (action == IWX_FW_CTXT_ACTION_REMOVE) {
7459		return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0,
7460		    sizeof(cmd), &cmd);
7461	}
7462
7463	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
7464		cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_PROMISC |
7465		    IWX_MAC_FILTER_IN_CONTROL_AND_MGMT |
7466		    IWX_MAC_FILTER_ACCEPT_GRP |
7467		    IWX_MAC_FILTER_IN_BEACON |
7468		    IWX_MAC_FILTER_IN_PROBE_REQUEST |
7469		    IWX_MAC_FILTER_IN_CRC32);
7470	} else if (!assoc || !ni->ni_associd || !ni->ni_dtimperiod) {
7471		/*
7472		 * Allow beacons to pass through as long as we are not
7473		 * associated or we do not have dtim period information.
7474		 */
7475		cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_BEACON);
7476	}
7477	iwx_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
7478	return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
7479}
7480
7481int
7482iwx_clear_statistics(struct iwx_softc *sc)
7483{
7484	struct iwx_statistics_cmd scmd = {
7485		.flags = htole32(IWX_STATISTICS_FLG_CLEAR)
7486	};
7487	struct iwx_host_cmd cmd = {
7488		.id = IWX_STATISTICS_CMD,
7489		.len[0] = sizeof(scmd),
7490		.data[0] = &scmd,
7491		.flags = IWX_CMD_WANT_RESP,
7492		.resp_pkt_len = sizeof(struct iwx_notif_statistics),
7493	};
7494	int err;
7495
7496	err = iwx_send_cmd(sc, &cmd);
7497	if (err)
7498		return err;
7499
7500	iwx_free_resp(sc, &cmd);
7501	return 0;
7502}
7503
7504void
7505iwx_add_task(struct iwx_softc *sc, struct taskq *taskq, struct task *task)
7506{
7507	int s = splnet();
7508
7509	if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
7510		splx(s);
7511		return;
7512	}
7513
7514	refcnt_take(&sc->task_refs);
7515	if (!task_add(taskq, task))
7516		refcnt_rele_wake(&sc->task_refs);
7517	splx(s);
7518}
7519
7520void
7521iwx_del_task(struct iwx_softc *sc, struct taskq *taskq, struct task *task)
7522{
7523	if (task_del(taskq, task))
7524		refcnt_rele(&sc->task_refs);
7525}
7526
7527int
7528iwx_scan(struct iwx_softc *sc)
7529{
7530	struct ieee80211com *ic = &sc->sc_ic;
7531	struct ifnet *ifp = IC2IFP(ic);
7532	int err;
7533
7534	if (sc->sc_flags & IWX_FLAG_BGSCAN) {
7535		err = iwx_scan_abort(sc);
7536		if (err) {
7537			printf("%s: could not abort background scan\n",
7538			    DEVNAME(sc));
7539			return err;
7540		}
7541	}
7542
7543	err = iwx_umac_scan_v14(sc, 0);
7544	if (err) {
7545		printf("%s: could not initiate scan\n", DEVNAME(sc));
7546		return err;
7547	}
7548
7549	/*
7550	 * The current mode might have been fixed during association.
7551	 * Ensure all channels get scanned.
7552	 */
7553	if (IFM_SUBTYPE(ic->ic_media.ifm_cur->ifm_media) == IFM_AUTO)
7554		ieee80211_setmode(ic, IEEE80211_MODE_AUTO);
7555
7556	sc->sc_flags |= IWX_FLAG_SCANNING;
7557	if (ifp->if_flags & IFF_DEBUG)
7558		printf("%s: %s -> %s\n", ifp->if_xname,
7559		    ieee80211_state_name[ic->ic_state],
7560		    ieee80211_state_name[IEEE80211_S_SCAN]);
7561	if ((sc->sc_flags & IWX_FLAG_BGSCAN) == 0) {
7562		ieee80211_set_link_state(ic, LINK_STATE_DOWN);
7563		ieee80211_node_cleanup(ic, ic->ic_bss);
7564	}
7565	ic->ic_state = IEEE80211_S_SCAN;
7566	wakeup(&ic->ic_state); /* wake iwx_init() */
7567
7568	return 0;
7569}
7570
7571int
7572iwx_bgscan(struct ieee80211com *ic)
7573{
7574	struct iwx_softc *sc = IC2IFP(ic)->if_softc;
7575	int err;
7576
7577	if (sc->sc_flags & IWX_FLAG_SCANNING)
7578		return 0;
7579
7580	err = iwx_umac_scan_v14(sc, 1);
7581	if (err) {
7582		printf("%s: could not initiate scan\n", DEVNAME(sc));
7583		return err;
7584	}
7585
7586	sc->sc_flags |= IWX_FLAG_BGSCAN;
7587	return 0;
7588}
7589
7590void
7591iwx_bgscan_done(struct ieee80211com *ic,
7592    struct ieee80211_node_switch_bss_arg *arg, size_t arg_size)
7593{
7594	struct iwx_softc *sc = ic->ic_softc;
7595
7596	free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
7597	sc->bgscan_unref_arg = arg;
7598	sc->bgscan_unref_arg_size = arg_size;
7599	iwx_add_task(sc, systq, &sc->bgscan_done_task);
7600}
7601
7602void
7603iwx_bgscan_done_task(void *arg)
7604{
7605	struct iwx_softc *sc = arg;
7606	struct ieee80211com *ic = &sc->sc_ic;
7607	struct iwx_node *in = (void *)ic->ic_bss;
7608	struct ieee80211_node *ni = &in->in_ni;
7609	int tid, err = 0, s = splnet();
7610
7611	if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) ||
7612	    (ic->ic_flags & IEEE80211_F_BGSCAN) == 0 ||
7613	    ic->ic_state != IEEE80211_S_RUN) {
7614		err = ENXIO;
7615		goto done;
7616	}
7617
7618	err = iwx_flush_sta(sc, in);
7619	if (err)
7620		goto done;
7621
7622	for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
7623		int qid = IWX_FIRST_AGG_TX_QUEUE + tid;
7624
7625		if (sc->aggqid[tid] == 0)
7626			continue;
7627
7628		err = iwx_disable_txq(sc, IWX_STATION_ID, qid, tid);
7629		if (err)
7630			goto done;
7631#if 0 /* disabled for now; we are going to DEAUTH soon anyway */
7632		IEEE80211_SEND_ACTION(ic, ni, IEEE80211_CATEG_BA,
7633		    IEEE80211_ACTION_DELBA,
7634		    IEEE80211_REASON_AUTH_LEAVE << 16 |
7635		    IEEE80211_FC1_DIR_TODS << 8 | tid);
7636#endif
7637		ieee80211_node_tx_ba_clear(ni, tid);
7638		sc->aggqid[tid] = 0;
7639	}
7640
7641	/*
7642	 * Tx queues have been flushed and Tx agg has been stopped.
7643	 * Allow roaming to proceed.
7644	 */
7645	ni->ni_unref_arg = sc->bgscan_unref_arg;
7646	ni->ni_unref_arg_size = sc->bgscan_unref_arg_size;
7647	sc->bgscan_unref_arg = NULL;
7648	sc->bgscan_unref_arg_size = 0;
7649	ieee80211_node_tx_stopped(ic, &in->in_ni);
7650done:
7651	if (err) {
7652		free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
7653		sc->bgscan_unref_arg = NULL;
7654		sc->bgscan_unref_arg_size = 0;
7655		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
7656			task_add(systq, &sc->init_task);
7657	}
7658	refcnt_rele_wake(&sc->task_refs);
7659	splx(s);
7660}
7661
7662int
7663iwx_umac_scan_abort(struct iwx_softc *sc)
7664{
7665	struct iwx_umac_scan_abort cmd = { 0 };
7666
7667	return iwx_send_cmd_pdu(sc,
7668	    IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_ABORT_UMAC),
7669	    0, sizeof(cmd), &cmd);
7670}
7671
7672int
7673iwx_scan_abort(struct iwx_softc *sc)
7674{
7675	int err;
7676
7677	err = iwx_umac_scan_abort(sc);
7678	if (err == 0)
7679		sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
7680	return err;
7681}
7682
7683int
7684iwx_enable_mgmt_queue(struct iwx_softc *sc)
7685{
7686	int err;
7687
7688	sc->first_data_qid = IWX_DQA_CMD_QUEUE + 1;
7689
7690	/*
7691	 * Non-QoS frames use the "MGMT" TID and queue.
7692	 * Other TIDs and data queues are reserved for QoS data frames.
7693	 */
7694	err = iwx_enable_txq(sc, IWX_STATION_ID, sc->first_data_qid,
7695	    IWX_MGMT_TID, IWX_TX_RING_COUNT);
7696	if (err) {
7697		printf("%s: could not enable Tx queue %d (error %d)\n",
7698		    DEVNAME(sc), sc->first_data_qid, err);
7699		return err;
7700	}
7701
7702	return 0;
7703}
7704
7705int
7706iwx_disable_mgmt_queue(struct iwx_softc *sc)
7707{
7708	int err, cmd_ver;
7709
7710	/* Explicit removal is only required with old SCD_QUEUE_CFG command. */
7711	cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
7712	    IWX_SCD_QUEUE_CONFIG_CMD);
7713	if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN)
7714		return 0;
7715
7716	sc->first_data_qid = IWX_DQA_CMD_QUEUE + 1;
7717
7718	err = iwx_disable_txq(sc, IWX_STATION_ID, sc->first_data_qid,
7719	    IWX_MGMT_TID);
7720	if (err) {
7721		printf("%s: could not disable Tx queue %d (error %d)\n",
7722		    DEVNAME(sc), sc->first_data_qid, err);
7723		return err;
7724	}
7725
7726	return 0;
7727}
7728
7729int
7730iwx_rs_rval2idx(uint8_t rval)
7731{
7732	/* Firmware expects indices which match our 11g rate set. */
7733	const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11g;
7734	int i;
7735
7736	for (i = 0; i < rs->rs_nrates; i++) {
7737		if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
7738			return i;
7739	}
7740
7741	return -1;
7742}
7743
7744uint16_t
7745iwx_rs_ht_rates(struct iwx_softc *sc, struct ieee80211_node *ni, int rsidx)
7746{
7747	struct ieee80211com *ic = &sc->sc_ic;
7748	const struct ieee80211_ht_rateset *rs;
7749	uint16_t htrates = 0;
7750	int mcs;
7751
7752	rs = &ieee80211_std_ratesets_11n[rsidx];
7753	for (mcs = rs->min_mcs; mcs <= rs->max_mcs; mcs++) {
7754		if (!isset(ni->ni_rxmcs, mcs) ||
7755		    !isset(ic->ic_sup_mcs, mcs))
7756			continue;
7757		htrates |= (1 << (mcs - rs->min_mcs));
7758	}
7759
7760	return htrates;
7761}
7762
7763uint16_t
7764iwx_rs_vht_rates(struct iwx_softc *sc, struct ieee80211_node *ni, int num_ss)
7765{
7766	uint16_t rx_mcs;
7767	int max_mcs = -1;
7768
7769	rx_mcs = (ni->ni_vht_rxmcs & IEEE80211_VHT_MCS_FOR_SS_MASK(num_ss)) >>
7770	    IEEE80211_VHT_MCS_FOR_SS_SHIFT(num_ss);
7771	switch (rx_mcs) {
7772	case IEEE80211_VHT_MCS_SS_NOT_SUPP:
7773		break;
7774	case IEEE80211_VHT_MCS_0_7:
7775		max_mcs = 7;
7776		break;
7777	case IEEE80211_VHT_MCS_0_8:
7778		max_mcs = 8;
7779		break;
7780	case IEEE80211_VHT_MCS_0_9:
7781		/* Disable VHT MCS 9 for 20MHz-only stations. */
7782		if (!ieee80211_node_supports_ht_chan40(ni))
7783			max_mcs = 8;
7784		else
7785			max_mcs = 9;
7786		break;
7787	default:
7788		/* Should not happen; Values above cover the possible range. */
7789		panic("invalid VHT Rx MCS value %u", rx_mcs);
7790	}
7791
7792	return ((1 << (max_mcs + 1)) - 1);
7793}
7794
7795int
7796iwx_rs_init_v3(struct iwx_softc *sc, struct iwx_node *in)
7797{
7798	struct ieee80211_node *ni = &in->in_ni;
7799	struct ieee80211_rateset *rs = &ni->ni_rates;
7800	struct iwx_tlc_config_cmd_v3 cfg_cmd;
7801	uint32_t cmd_id;
7802	int i;
7803	size_t cmd_size = sizeof(cfg_cmd);
7804
7805	memset(&cfg_cmd, 0, sizeof(cfg_cmd));
7806
7807	for (i = 0; i < rs->rs_nrates; i++) {
7808		uint8_t rval = rs->rs_rates[i] & IEEE80211_RATE_VAL;
7809		int idx = iwx_rs_rval2idx(rval);
7810		if (idx == -1)
7811			return EINVAL;
7812		cfg_cmd.non_ht_rates |= (1 << idx);
7813	}
7814
7815	if (ni->ni_flags & IEEE80211_NODE_VHT) {
7816		cfg_cmd.mode = IWX_TLC_MNG_MODE_VHT;
7817		cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
7818		    htole16(iwx_rs_vht_rates(sc, ni, 1));
7819		cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
7820		    htole16(iwx_rs_vht_rates(sc, ni, 2));
7821	} else if (ni->ni_flags & IEEE80211_NODE_HT) {
7822		cfg_cmd.mode = IWX_TLC_MNG_MODE_HT;
7823		cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
7824		    htole16(iwx_rs_ht_rates(sc, ni,
7825		    IEEE80211_HT_RATESET_SISO));
7826		cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
7827		    htole16(iwx_rs_ht_rates(sc, ni,
7828		    IEEE80211_HT_RATESET_MIMO2));
7829	} else
7830		cfg_cmd.mode = IWX_TLC_MNG_MODE_NON_HT;
7831
7832	cfg_cmd.sta_id = IWX_STATION_ID;
7833	if (in->in_phyctxt->vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80)
7834		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_80MHZ;
7835	else if (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
7836	    in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)
7837		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ;
7838	else
7839		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ;
7840	cfg_cmd.chains = IWX_TLC_MNG_CHAIN_A_MSK | IWX_TLC_MNG_CHAIN_B_MSK;
7841	if (ni->ni_flags & IEEE80211_NODE_VHT)
7842		cfg_cmd.max_mpdu_len = htole16(3895);
7843	else
7844		cfg_cmd.max_mpdu_len = htole16(3839);
7845	if (ni->ni_flags & IEEE80211_NODE_HT) {
7846		if (ieee80211_node_supports_ht_sgi20(ni)) {
7847			cfg_cmd.sgi_ch_width_supp |= (1 <<
7848			    IWX_TLC_MNG_CH_WIDTH_20MHZ);
7849		}
7850		if (ieee80211_node_supports_ht_sgi40(ni)) {
7851			cfg_cmd.sgi_ch_width_supp |= (1 <<
7852			    IWX_TLC_MNG_CH_WIDTH_40MHZ);
7853		}
7854	}
7855	if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
7856	    ieee80211_node_supports_vht_sgi80(ni))
7857		cfg_cmd.sgi_ch_width_supp |= (1 << IWX_TLC_MNG_CH_WIDTH_80MHZ);
7858
7859	cmd_id = iwx_cmd_id(IWX_TLC_MNG_CONFIG_CMD, IWX_DATA_PATH_GROUP, 0);
7860	return iwx_send_cmd_pdu(sc, cmd_id, IWX_CMD_ASYNC, cmd_size, &cfg_cmd);
7861}
7862
7863int
7864iwx_rs_init_v4(struct iwx_softc *sc, struct iwx_node *in)
7865{
7866	struct ieee80211_node *ni = &in->in_ni;
7867	struct ieee80211_rateset *rs = &ni->ni_rates;
7868	struct iwx_tlc_config_cmd_v4 cfg_cmd;
7869	uint32_t cmd_id;
7870	int i;
7871	size_t cmd_size = sizeof(cfg_cmd);
7872
7873	memset(&cfg_cmd, 0, sizeof(cfg_cmd));
7874
7875	for (i = 0; i < rs->rs_nrates; i++) {
7876		uint8_t rval = rs->rs_rates[i] & IEEE80211_RATE_VAL;
7877		int idx = iwx_rs_rval2idx(rval);
7878		if (idx == -1)
7879			return EINVAL;
7880		cfg_cmd.non_ht_rates |= (1 << idx);
7881	}
7882
7883	if (ni->ni_flags & IEEE80211_NODE_VHT) {
7884		cfg_cmd.mode = IWX_TLC_MNG_MODE_VHT;
7885		cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
7886		    htole16(iwx_rs_vht_rates(sc, ni, 1));
7887		cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
7888		    htole16(iwx_rs_vht_rates(sc, ni, 2));
7889	} else if (ni->ni_flags & IEEE80211_NODE_HT) {
7890		cfg_cmd.mode = IWX_TLC_MNG_MODE_HT;
7891		cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
7892		    htole16(iwx_rs_ht_rates(sc, ni,
7893		    IEEE80211_HT_RATESET_SISO));
7894		cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
7895		    htole16(iwx_rs_ht_rates(sc, ni,
7896		    IEEE80211_HT_RATESET_MIMO2));
7897	} else
7898		cfg_cmd.mode = IWX_TLC_MNG_MODE_NON_HT;
7899
7900	cfg_cmd.sta_id = IWX_STATION_ID;
7901	if (in->in_phyctxt->vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80)
7902		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_80MHZ;
7903	else if (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
7904	    in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)
7905		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ;
7906	else
7907		cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ;
7908	cfg_cmd.chains = IWX_TLC_MNG_CHAIN_A_MSK | IWX_TLC_MNG_CHAIN_B_MSK;
7909	if (ni->ni_flags & IEEE80211_NODE_VHT)
7910		cfg_cmd.max_mpdu_len = htole16(3895);
7911	else
7912		cfg_cmd.max_mpdu_len = htole16(3839);
7913	if (ni->ni_flags & IEEE80211_NODE_HT) {
7914		if (ieee80211_node_supports_ht_sgi20(ni)) {
7915			cfg_cmd.sgi_ch_width_supp |= (1 <<
7916			    IWX_TLC_MNG_CH_WIDTH_20MHZ);
7917		}
7918		if (ieee80211_node_supports_ht_sgi40(ni)) {
7919			cfg_cmd.sgi_ch_width_supp |= (1 <<
7920			    IWX_TLC_MNG_CH_WIDTH_40MHZ);
7921		}
7922	}
7923	if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
7924	    ieee80211_node_supports_vht_sgi80(ni))
7925		cfg_cmd.sgi_ch_width_supp |= (1 << IWX_TLC_MNG_CH_WIDTH_80MHZ);
7926
7927	cmd_id = iwx_cmd_id(IWX_TLC_MNG_CONFIG_CMD, IWX_DATA_PATH_GROUP, 0);
7928	return iwx_send_cmd_pdu(sc, cmd_id, IWX_CMD_ASYNC, cmd_size, &cfg_cmd);
7929}
7930
7931int
7932iwx_rs_init(struct iwx_softc *sc, struct iwx_node *in)
7933{
7934	int cmd_ver;
7935
7936	cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
7937	    IWX_TLC_MNG_CONFIG_CMD);
7938	if (cmd_ver == 4)
7939		return iwx_rs_init_v4(sc, in);
7940	return iwx_rs_init_v3(sc, in);
7941}
7942
7943void
7944iwx_rs_update(struct iwx_softc *sc, struct iwx_tlc_update_notif *notif)
7945{
7946	struct ieee80211com *ic = &sc->sc_ic;
7947	struct ieee80211_node *ni = ic->ic_bss;
7948	struct ieee80211_rateset *rs = &ni->ni_rates;
7949	uint32_t rate_n_flags;
7950	uint8_t plcp, rval;
7951	int i, cmd_ver, rate_n_flags_ver2 = 0;
7952
7953	if (notif->sta_id != IWX_STATION_ID ||
7954	    (le32toh(notif->flags) & IWX_TLC_NOTIF_FLAG_RATE) == 0)
7955		return;
7956
7957	rate_n_flags = le32toh(notif->rate);
7958
7959	cmd_ver = iwx_lookup_notif_ver(sc, IWX_DATA_PATH_GROUP,
7960	    IWX_TLC_MNG_UPDATE_NOTIF);
7961	if (cmd_ver != IWX_FW_CMD_VER_UNKNOWN && cmd_ver >= 3)
7962		rate_n_flags_ver2 = 1;
7963	if (rate_n_flags_ver2) {
7964		uint32_t mod_type = (rate_n_flags & IWX_RATE_MCS_MOD_TYPE_MSK);
7965		if (mod_type == IWX_RATE_MCS_VHT_MSK) {
7966			ni->ni_txmcs = (rate_n_flags &
7967			    IWX_RATE_HT_MCS_CODE_MSK);
7968			ni->ni_vht_ss = ((rate_n_flags &
7969			    IWX_RATE_MCS_NSS_MSK) >>
7970			    IWX_RATE_MCS_NSS_POS) + 1;
7971			return;
7972		} else if (mod_type == IWX_RATE_MCS_HT_MSK) {
7973			ni->ni_txmcs = IWX_RATE_HT_MCS_INDEX(rate_n_flags);
7974			return;
7975		}
7976	} else {
7977		if (rate_n_flags & IWX_RATE_MCS_VHT_MSK_V1) {
7978			ni->ni_txmcs = (rate_n_flags &
7979			    IWX_RATE_VHT_MCS_RATE_CODE_MSK);
7980			ni->ni_vht_ss = ((rate_n_flags &
7981			    IWX_RATE_VHT_MCS_NSS_MSK) >>
7982			    IWX_RATE_VHT_MCS_NSS_POS) + 1;
7983			return;
7984		} else if (rate_n_flags & IWX_RATE_MCS_HT_MSK_V1) {
7985			ni->ni_txmcs = (rate_n_flags &
7986			    (IWX_RATE_HT_MCS_RATE_CODE_MSK_V1 |
7987			    IWX_RATE_HT_MCS_NSS_MSK_V1));
7988			return;
7989		}
7990	}
7991
7992	if (rate_n_flags_ver2) {
7993		const struct ieee80211_rateset *rs;
7994		uint32_t ridx = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK);
7995		if (rate_n_flags & IWX_RATE_MCS_LEGACY_OFDM_MSK)
7996			rs = &ieee80211_std_rateset_11a;
7997		else
7998			rs = &ieee80211_std_rateset_11b;
7999		if (ridx < rs->rs_nrates)
8000			rval = (rs->rs_rates[ridx] & IEEE80211_RATE_VAL);
8001		else
8002			rval = 0;
8003	} else {
8004		plcp = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK_V1);
8005
8006		rval = 0;
8007		for (i = IWX_RATE_1M_INDEX; i < nitems(iwx_rates); i++) {
8008			if (iwx_rates[i].plcp == plcp) {
8009				rval = iwx_rates[i].rate;
8010				break;
8011			}
8012		}
8013	}
8014
8015	if (rval) {
8016		uint8_t rv;
8017		for (i = 0; i < rs->rs_nrates; i++) {
8018			rv = rs->rs_rates[i] & IEEE80211_RATE_VAL;
8019			if (rv == rval) {
8020				ni->ni_txrate = i;
8021				break;
8022			}
8023		}
8024	}
8025}
8026
8027int
8028iwx_phy_send_rlc(struct iwx_softc *sc, struct iwx_phy_ctxt *phyctxt,
8029    uint8_t chains_static, uint8_t chains_dynamic)
8030{
8031	struct iwx_rlc_config_cmd cmd;
8032	uint32_t cmd_id;
8033	uint8_t active_cnt, idle_cnt;
8034
8035	memset(&cmd, 0, sizeof(cmd));
8036
8037	idle_cnt = chains_static;
8038	active_cnt = chains_dynamic;
8039
8040	cmd.phy_id = htole32(phyctxt->id);
8041	cmd.rlc.rx_chain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
8042	    IWX_PHY_RX_CHAIN_VALID_POS);
8043	cmd.rlc.rx_chain_info |= htole32(idle_cnt << IWX_PHY_RX_CHAIN_CNT_POS);
8044	cmd.rlc.rx_chain_info |= htole32(active_cnt <<
8045	    IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
8046
8047	cmd_id = iwx_cmd_id(IWX_RLC_CONFIG_CMD, IWX_DATA_PATH_GROUP, 2);
8048	return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
8049}
8050
8051int
8052iwx_phy_ctxt_update(struct iwx_softc *sc, struct iwx_phy_ctxt *phyctxt,
8053    struct ieee80211_channel *chan, uint8_t chains_static,
8054    uint8_t chains_dynamic, uint32_t apply_time, uint8_t sco,
8055    uint8_t vht_chan_width)
8056{
8057	uint16_t band_flags = (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ);
8058	int err;
8059
8060	if (isset(sc->sc_enabled_capa,
8061	    IWX_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
8062	    (phyctxt->channel->ic_flags & band_flags) !=
8063	    (chan->ic_flags & band_flags)) {
8064		err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
8065		    chains_dynamic, IWX_FW_CTXT_ACTION_REMOVE, apply_time, sco,
8066		    vht_chan_width);
8067		if (err) {
8068			printf("%s: could not remove PHY context "
8069			    "(error %d)\n", DEVNAME(sc), err);
8070			return err;
8071		}
8072		phyctxt->channel = chan;
8073		err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
8074		    chains_dynamic, IWX_FW_CTXT_ACTION_ADD, apply_time, sco,
8075		    vht_chan_width);
8076		if (err) {
8077			printf("%s: could not add PHY context "
8078			    "(error %d)\n", DEVNAME(sc), err);
8079			return err;
8080		}
8081	} else {
8082		phyctxt->channel = chan;
8083		err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
8084		    chains_dynamic, IWX_FW_CTXT_ACTION_MODIFY, apply_time, sco,
8085		    vht_chan_width);
8086		if (err) {
8087			printf("%s: could not update PHY context (error %d)\n",
8088			    DEVNAME(sc), err);
8089			return err;
8090		}
8091	}
8092
8093	phyctxt->sco = sco;
8094	phyctxt->vht_chan_width = vht_chan_width;
8095
8096	if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
8097	    IWX_RLC_CONFIG_CMD) == 2)
8098		return iwx_phy_send_rlc(sc, phyctxt,
8099		    chains_static, chains_dynamic);
8100
8101	return 0;
8102}
8103
8104int
8105iwx_auth(struct iwx_softc *sc)
8106{
8107	struct ieee80211com *ic = &sc->sc_ic;
8108	struct iwx_node *in = (void *)ic->ic_bss;
8109	uint32_t duration;
8110	int generation = sc->sc_generation, err;
8111
8112	splassert(IPL_NET);
8113
8114	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8115		err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
8116		    ic->ic_ibss_chan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
8117		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
8118		if (err)
8119			return err;
8120	} else {
8121		err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
8122		    in->in_ni.ni_chan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
8123		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
8124		if (err)
8125			return err;
8126	}
8127	in->in_phyctxt = &sc->sc_phyctxt[0];
8128	IEEE80211_ADDR_COPY(in->in_macaddr, in->in_ni.ni_macaddr);
8129
8130	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD, 0);
8131	if (err) {
8132		printf("%s: could not add MAC context (error %d)\n",
8133		    DEVNAME(sc), err);
8134		return err;
8135 	}
8136	sc->sc_flags |= IWX_FLAG_MAC_ACTIVE;
8137
8138	err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD);
8139	if (err) {
8140		printf("%s: could not add binding (error %d)\n",
8141		    DEVNAME(sc), err);
8142		goto rm_mac_ctxt;
8143	}
8144	sc->sc_flags |= IWX_FLAG_BINDING_ACTIVE;
8145
8146	err = iwx_add_sta_cmd(sc, in, 0);
8147	if (err) {
8148		printf("%s: could not add sta (error %d)\n",
8149		    DEVNAME(sc), err);
8150		goto rm_binding;
8151	}
8152	sc->sc_flags |= IWX_FLAG_STA_ACTIVE;
8153
8154	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8155		err = iwx_enable_txq(sc, IWX_MONITOR_STA_ID,
8156		    IWX_DQA_INJECT_MONITOR_QUEUE, IWX_MGMT_TID,
8157		    IWX_TX_RING_COUNT);
8158		if (err)
8159			goto rm_sta;
8160		return 0;
8161	}
8162
8163	err = iwx_enable_mgmt_queue(sc);
8164	if (err)
8165		goto rm_sta;
8166
8167	err = iwx_clear_statistics(sc);
8168	if (err)
8169		goto rm_mgmt_queue;
8170
8171	/*
8172	 * Prevent the FW from wandering off channel during association
8173	 * by "protecting" the session with a time event.
8174	 */
8175	if (in->in_ni.ni_intval)
8176		duration = in->in_ni.ni_intval * 9;
8177	else
8178		duration = 900;
8179	return iwx_schedule_session_protection(sc, in, duration);
8180rm_mgmt_queue:
8181	if (generation == sc->sc_generation)
8182		iwx_disable_mgmt_queue(sc);
8183rm_sta:
8184	if (generation == sc->sc_generation) {
8185		iwx_rm_sta_cmd(sc, in);
8186		sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
8187	}
8188rm_binding:
8189	if (generation == sc->sc_generation) {
8190		iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE);
8191		sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
8192	}
8193rm_mac_ctxt:
8194	if (generation == sc->sc_generation) {
8195		iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0);
8196		sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
8197	}
8198	return err;
8199}
8200
8201int
8202iwx_deauth(struct iwx_softc *sc)
8203{
8204	struct ieee80211com *ic = &sc->sc_ic;
8205	struct iwx_node *in = (void *)ic->ic_bss;
8206	int err;
8207
8208	splassert(IPL_NET);
8209
8210	iwx_unprotect_session(sc, in);
8211
8212	if (sc->sc_flags & IWX_FLAG_STA_ACTIVE) {
8213		err = iwx_rm_sta(sc, in);
8214		if (err)
8215			return err;
8216		sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
8217	}
8218
8219	if (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE) {
8220		err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE);
8221		if (err) {
8222			printf("%s: could not remove binding (error %d)\n",
8223			    DEVNAME(sc), err);
8224			return err;
8225		}
8226		sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
8227	}
8228
8229	if (sc->sc_flags & IWX_FLAG_MAC_ACTIVE) {
8230		err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0);
8231		if (err) {
8232			printf("%s: could not remove MAC context (error %d)\n",
8233			    DEVNAME(sc), err);
8234			return err;
8235		}
8236		sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
8237	}
8238
8239	/* Move unused PHY context to a default channel. */
8240	err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
8241	    &ic->ic_channels[1], 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
8242	    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
8243	if (err)
8244		return err;
8245
8246	return 0;
8247}
8248
8249int
8250iwx_run(struct iwx_softc *sc)
8251{
8252	struct ieee80211com *ic = &sc->sc_ic;
8253	struct iwx_node *in = (void *)ic->ic_bss;
8254	struct ieee80211_node *ni = &in->in_ni;
8255	int err;
8256
8257	splassert(IPL_NET);
8258
8259	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8260		/* Add a MAC context and a sniffing STA. */
8261		err = iwx_auth(sc);
8262		if (err)
8263			return err;
8264	}
8265
8266	/* Configure Rx chains for MIMO and configure 40 MHz channel. */
8267	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
8268		uint8_t chains = iwx_mimo_enabled(sc) ? 2 : 1;
8269		err = iwx_phy_ctxt_update(sc, in->in_phyctxt,
8270		    in->in_phyctxt->channel, chains, chains,
8271		    0, IEEE80211_HTOP0_SCO_SCN,
8272		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
8273		if (err) {
8274			printf("%s: failed to update PHY\n", DEVNAME(sc));
8275			return err;
8276		}
8277	} else if (ni->ni_flags & IEEE80211_NODE_HT) {
8278		uint8_t chains = iwx_mimo_enabled(sc) ? 2 : 1;
8279		uint8_t sco, vht_chan_width;
8280		if (IEEE80211_CHAN_40MHZ_ALLOWED(in->in_ni.ni_chan) &&
8281		    ieee80211_node_supports_ht_chan40(ni))
8282			sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK);
8283		else
8284			sco = IEEE80211_HTOP0_SCO_SCN;
8285		if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
8286		    IEEE80211_CHAN_80MHZ_ALLOWED(in->in_ni.ni_chan) &&
8287		    ieee80211_node_supports_vht_chan80(ni))
8288			vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80;
8289		else
8290			vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT;
8291		err = iwx_phy_ctxt_update(sc, in->in_phyctxt,
8292		    in->in_phyctxt->channel, chains, chains,
8293		    0, sco, vht_chan_width);
8294		if (err) {
8295			printf("%s: failed to update PHY\n", DEVNAME(sc));
8296			return err;
8297		}
8298	}
8299
8300	/* Update STA again to apply HT and VHT settings. */
8301	err = iwx_add_sta_cmd(sc, in, 1);
8302	if (err) {
8303		printf("%s: could not update STA (error %d)\n",
8304		    DEVNAME(sc), err);
8305		return err;
8306	}
8307
8308	/* We have now been assigned an associd by the AP. */
8309	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 1);
8310	if (err) {
8311		printf("%s: failed to update MAC\n", DEVNAME(sc));
8312		return err;
8313	}
8314
8315	err = iwx_sf_config(sc, IWX_SF_FULL_ON);
8316	if (err) {
8317		printf("%s: could not set sf full on (error %d)\n",
8318		    DEVNAME(sc), err);
8319		return err;
8320	}
8321
8322	err = iwx_allow_mcast(sc);
8323	if (err) {
8324		printf("%s: could not allow mcast (error %d)\n",
8325		    DEVNAME(sc), err);
8326		return err;
8327	}
8328
8329	err = iwx_power_update_device(sc);
8330	if (err) {
8331		printf("%s: could not send power command (error %d)\n",
8332		    DEVNAME(sc), err);
8333		return err;
8334	}
8335#ifdef notyet
8336	/*
8337	 * Disabled for now. Default beacon filter settings
8338	 * prevent net80211 from getting ERP and HT protection
8339	 * updates from beacons.
8340	 */
8341	err = iwx_enable_beacon_filter(sc, in);
8342	if (err) {
8343		printf("%s: could not enable beacon filter\n",
8344		    DEVNAME(sc));
8345		return err;
8346	}
8347#endif
8348	err = iwx_power_mac_update_mode(sc, in);
8349	if (err) {
8350		printf("%s: could not update MAC power (error %d)\n",
8351		    DEVNAME(sc), err);
8352		return err;
8353	}
8354
8355	if (ic->ic_opmode == IEEE80211_M_MONITOR)
8356		return 0;
8357
8358	/* Start at lowest available bit-rate. Firmware will raise. */
8359	in->in_ni.ni_txrate = 0;
8360	in->in_ni.ni_txmcs = 0;
8361
8362	err = iwx_rs_init(sc, in);
8363	if (err) {
8364		printf("%s: could not init rate scaling (error %d)\n",
8365		    DEVNAME(sc), err);
8366		return err;
8367	}
8368
8369	return 0;
8370}
8371
8372int
8373iwx_run_stop(struct iwx_softc *sc)
8374{
8375	struct ieee80211com *ic = &sc->sc_ic;
8376	struct iwx_node *in = (void *)ic->ic_bss;
8377	struct ieee80211_node *ni = &in->in_ni;
8378	int err, i;
8379
8380	splassert(IPL_NET);
8381
8382	err = iwx_flush_sta(sc, in);
8383	if (err) {
8384		printf("%s: could not flush Tx path (error %d)\n",
8385		    DEVNAME(sc), err);
8386		return err;
8387	}
8388
8389	/*
8390	 * Stop Rx BA sessions now. We cannot rely on the BA task
8391	 * for this when moving out of RUN state since it runs in a
8392	 * separate thread.
8393	 * Note that in->in_ni (struct ieee80211_node) already represents
8394	 * our new access point in case we are roaming between APs.
8395	 * This means we cannot rely on struct ieee802111_node to tell
8396	 * us which BA sessions exist.
8397	 */
8398	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
8399		struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
8400		if (rxba->baid == IWX_RX_REORDER_DATA_INVALID_BAID)
8401			continue;
8402		iwx_sta_rx_agg(sc, ni, rxba->tid, 0, 0, 0, 0);
8403	}
8404
8405	err = iwx_sf_config(sc, IWX_SF_INIT_OFF);
8406	if (err)
8407		return err;
8408
8409	err = iwx_disable_beacon_filter(sc);
8410	if (err) {
8411		printf("%s: could not disable beacon filter (error %d)\n",
8412		    DEVNAME(sc), err);
8413		return err;
8414	}
8415
8416	/* Mark station as disassociated. */
8417	err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 0);
8418	if (err) {
8419		printf("%s: failed to update MAC\n", DEVNAME(sc));
8420		return err;
8421	}
8422
8423	return 0;
8424}
8425
8426struct ieee80211_node *
8427iwx_node_alloc(struct ieee80211com *ic)
8428{
8429	return malloc(sizeof (struct iwx_node), M_DEVBUF, M_NOWAIT | M_ZERO);
8430}
8431
8432int
8433iwx_set_key(struct ieee80211com *ic, struct ieee80211_node *ni,
8434    struct ieee80211_key *k)
8435{
8436	struct iwx_softc *sc = ic->ic_softc;
8437	struct iwx_node *in = (void *)ni;
8438	struct iwx_setkey_task_arg *a;
8439	int err;
8440
8441	if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
8442		/* Fallback to software crypto for other ciphers. */
8443		err = ieee80211_set_key(ic, ni, k);
8444		if (!err && in != NULL && (k->k_flags & IEEE80211_KEY_GROUP))
8445			in->in_flags |= IWX_NODE_FLAG_HAVE_GROUP_KEY;
8446		return err;
8447	}
8448
8449	if (sc->setkey_nkeys >= nitems(sc->setkey_arg))
8450		return ENOSPC;
8451
8452	a = &sc->setkey_arg[sc->setkey_cur];
8453	a->sta_id = IWX_STATION_ID;
8454	a->ni = ni;
8455	a->k = k;
8456	sc->setkey_cur = (sc->setkey_cur + 1) % nitems(sc->setkey_arg);
8457	sc->setkey_nkeys++;
8458	iwx_add_task(sc, systq, &sc->setkey_task);
8459	return EBUSY;
8460}
8461
8462int
8463iwx_add_sta_key(struct iwx_softc *sc, int sta_id, struct ieee80211_node *ni,
8464    struct ieee80211_key *k)
8465{
8466	struct ieee80211com *ic = &sc->sc_ic;
8467	struct iwx_node *in = (void *)ni;
8468	struct iwx_add_sta_key_cmd cmd;
8469	uint32_t status;
8470	const int want_keymask = (IWX_NODE_FLAG_HAVE_PAIRWISE_KEY |
8471	    IWX_NODE_FLAG_HAVE_GROUP_KEY);
8472	int err;
8473
8474	/*
8475	 * Keys are stored in 'ni' so 'k' is valid if 'ni' is valid.
8476	 * Currently we only implement station mode where 'ni' is always
8477	 * ic->ic_bss so there is no need to validate arguments beyond this:
8478	 */
8479	KASSERT(ni == ic->ic_bss);
8480
8481	memset(&cmd, 0, sizeof(cmd));
8482
8483	cmd.common.key_flags = htole16(IWX_STA_KEY_FLG_CCM |
8484	    IWX_STA_KEY_FLG_WEP_KEY_MAP |
8485	    ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) &
8486	    IWX_STA_KEY_FLG_KEYID_MSK));
8487	if (k->k_flags & IEEE80211_KEY_GROUP) {
8488		cmd.common.key_offset = 1;
8489		cmd.common.key_flags |= htole16(IWX_STA_KEY_MULTICAST);
8490	} else
8491		cmd.common.key_offset = 0;
8492
8493	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
8494	cmd.common.sta_id = sta_id;
8495
8496	cmd.transmit_seq_cnt = htole64(k->k_tsc);
8497
8498	status = IWX_ADD_STA_SUCCESS;
8499	err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA_KEY, sizeof(cmd), &cmd,
8500	    &status);
8501	if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
8502		return ECANCELED;
8503	if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
8504		err = EIO;
8505	if (err) {
8506		IEEE80211_SEND_MGMT(ic, ni, IEEE80211_FC0_SUBTYPE_DEAUTH,
8507		    IEEE80211_REASON_AUTH_LEAVE);
8508		ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
8509		return err;
8510	}
8511
8512	if (k->k_flags & IEEE80211_KEY_GROUP)
8513		in->in_flags |= IWX_NODE_FLAG_HAVE_GROUP_KEY;
8514	else
8515		in->in_flags |= IWX_NODE_FLAG_HAVE_PAIRWISE_KEY;
8516
8517	if ((in->in_flags & want_keymask) == want_keymask) {
8518		DPRINTF(("marking port %s valid\n",
8519		    ether_sprintf(ni->ni_macaddr)));
8520		ni->ni_port_valid = 1;
8521		ieee80211_set_link_state(ic, LINK_STATE_UP);
8522	}
8523
8524	return 0;
8525}
8526
8527void
8528iwx_setkey_task(void *arg)
8529{
8530	struct iwx_softc *sc = arg;
8531	struct iwx_setkey_task_arg *a;
8532	int err = 0, s = splnet();
8533
8534	while (sc->setkey_nkeys > 0) {
8535		if (err || (sc->sc_flags & IWX_FLAG_SHUTDOWN))
8536			break;
8537		a = &sc->setkey_arg[sc->setkey_tail];
8538		err = iwx_add_sta_key(sc, a->sta_id, a->ni, a->k);
8539		a->sta_id = 0;
8540		a->ni = NULL;
8541		a->k = NULL;
8542		sc->setkey_tail = (sc->setkey_tail + 1) %
8543		    nitems(sc->setkey_arg);
8544		sc->setkey_nkeys--;
8545	}
8546
8547	refcnt_rele_wake(&sc->task_refs);
8548	splx(s);
8549}
8550
8551void
8552iwx_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni,
8553    struct ieee80211_key *k)
8554{
8555	struct iwx_softc *sc = ic->ic_softc;
8556	struct iwx_add_sta_key_cmd cmd;
8557
8558	if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
8559		/* Fallback to software crypto for other ciphers. */
8560                ieee80211_delete_key(ic, ni, k);
8561		return;
8562	}
8563
8564	if ((sc->sc_flags & IWX_FLAG_STA_ACTIVE) == 0)
8565		return;
8566
8567	memset(&cmd, 0, sizeof(cmd));
8568
8569	cmd.common.key_flags = htole16(IWX_STA_KEY_NOT_VALID |
8570	    IWX_STA_KEY_FLG_NO_ENC | IWX_STA_KEY_FLG_WEP_KEY_MAP |
8571	    ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) &
8572	    IWX_STA_KEY_FLG_KEYID_MSK));
8573	memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
8574	if (k->k_flags & IEEE80211_KEY_GROUP)
8575		cmd.common.key_offset = 1;
8576	else
8577		cmd.common.key_offset = 0;
8578	cmd.common.sta_id = IWX_STATION_ID;
8579
8580	iwx_send_cmd_pdu(sc, IWX_ADD_STA_KEY, IWX_CMD_ASYNC, sizeof(cmd), &cmd);
8581}
8582
8583int
8584iwx_media_change(struct ifnet *ifp)
8585{
8586	int err;
8587
8588	err = ieee80211_media_change(ifp);
8589	if (err != ENETRESET)
8590		return err;
8591
8592	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
8593	    (IFF_UP | IFF_RUNNING)) {
8594		iwx_stop(ifp);
8595		err = iwx_init(ifp);
8596	}
8597	return err;
8598}
8599
8600void
8601iwx_newstate_task(void *psc)
8602{
8603	struct iwx_softc *sc = (struct iwx_softc *)psc;
8604	struct ieee80211com *ic = &sc->sc_ic;
8605	enum ieee80211_state nstate = sc->ns_nstate;
8606	enum ieee80211_state ostate = ic->ic_state;
8607	int arg = sc->ns_arg;
8608	int err = 0, s = splnet();
8609
8610	if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
8611		/* iwx_stop() is waiting for us. */
8612		refcnt_rele_wake(&sc->task_refs);
8613		splx(s);
8614		return;
8615	}
8616
8617	if (ostate == IEEE80211_S_SCAN) {
8618		if (nstate == ostate) {
8619			if (sc->sc_flags & IWX_FLAG_SCANNING) {
8620				refcnt_rele_wake(&sc->task_refs);
8621				splx(s);
8622				return;
8623			}
8624			/* Firmware is no longer scanning. Do another scan. */
8625			goto next_scan;
8626		}
8627	}
8628
8629	if (nstate <= ostate) {
8630		switch (ostate) {
8631		case IEEE80211_S_RUN:
8632			err = iwx_run_stop(sc);
8633			if (err)
8634				goto out;
8635			/* FALLTHROUGH */
8636		case IEEE80211_S_ASSOC:
8637		case IEEE80211_S_AUTH:
8638			if (nstate <= IEEE80211_S_AUTH) {
8639				err = iwx_deauth(sc);
8640				if (err)
8641					goto out;
8642			}
8643			/* FALLTHROUGH */
8644		case IEEE80211_S_SCAN:
8645		case IEEE80211_S_INIT:
8646			break;
8647		}
8648
8649		/* Die now if iwx_stop() was called while we were sleeping. */
8650		if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
8651			refcnt_rele_wake(&sc->task_refs);
8652			splx(s);
8653			return;
8654		}
8655	}
8656
8657	switch (nstate) {
8658	case IEEE80211_S_INIT:
8659		break;
8660
8661	case IEEE80211_S_SCAN:
8662next_scan:
8663		err = iwx_scan(sc);
8664		if (err)
8665			break;
8666		refcnt_rele_wake(&sc->task_refs);
8667		splx(s);
8668		return;
8669
8670	case IEEE80211_S_AUTH:
8671		err = iwx_auth(sc);
8672		break;
8673
8674	case IEEE80211_S_ASSOC:
8675		break;
8676
8677	case IEEE80211_S_RUN:
8678		err = iwx_run(sc);
8679		break;
8680	}
8681
8682out:
8683	if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0) {
8684		if (err)
8685			task_add(systq, &sc->init_task);
8686		else
8687			sc->sc_newstate(ic, nstate, arg);
8688	}
8689	refcnt_rele_wake(&sc->task_refs);
8690	splx(s);
8691}
8692
8693int
8694iwx_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
8695{
8696	struct ifnet *ifp = IC2IFP(ic);
8697	struct iwx_softc *sc = ifp->if_softc;
8698
8699	/*
8700	 * Prevent attempts to transition towards the same state, unless
8701	 * we are scanning in which case a SCAN -> SCAN transition
8702	 * triggers another scan iteration. And AUTH -> AUTH is needed
8703	 * to support band-steering.
8704	 */
8705	if (sc->ns_nstate == nstate && nstate != IEEE80211_S_SCAN &&
8706	    nstate != IEEE80211_S_AUTH)
8707		return 0;
8708
8709	if (ic->ic_state == IEEE80211_S_RUN) {
8710		iwx_del_task(sc, systq, &sc->ba_task);
8711		iwx_del_task(sc, systq, &sc->setkey_task);
8712		memset(sc->setkey_arg, 0, sizeof(sc->setkey_arg));
8713		sc->setkey_cur = sc->setkey_tail = sc->setkey_nkeys = 0;
8714		iwx_del_task(sc, systq, &sc->mac_ctxt_task);
8715		iwx_del_task(sc, systq, &sc->phy_ctxt_task);
8716		iwx_del_task(sc, systq, &sc->bgscan_done_task);
8717	}
8718
8719	sc->ns_nstate = nstate;
8720	sc->ns_arg = arg;
8721
8722	iwx_add_task(sc, sc->sc_nswq, &sc->newstate_task);
8723
8724	return 0;
8725}
8726
8727void
8728iwx_endscan(struct iwx_softc *sc)
8729{
8730	struct ieee80211com *ic = &sc->sc_ic;
8731
8732	if ((sc->sc_flags & (IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN)) == 0)
8733		return;
8734
8735	sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
8736	ieee80211_end_scan(&ic->ic_if);
8737}
8738
8739/*
8740 * Aging and idle timeouts for the different possible scenarios
8741 * in default configuration
8742 */
8743static const uint32_t
8744iwx_sf_full_timeout_def[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = {
8745	{
8746		htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
8747		htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
8748	},
8749	{
8750		htole32(IWX_SF_AGG_UNICAST_AGING_TIMER_DEF),
8751		htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER_DEF)
8752	},
8753	{
8754		htole32(IWX_SF_MCAST_AGING_TIMER_DEF),
8755		htole32(IWX_SF_MCAST_IDLE_TIMER_DEF)
8756	},
8757	{
8758		htole32(IWX_SF_BA_AGING_TIMER_DEF),
8759		htole32(IWX_SF_BA_IDLE_TIMER_DEF)
8760	},
8761	{
8762		htole32(IWX_SF_TX_RE_AGING_TIMER_DEF),
8763		htole32(IWX_SF_TX_RE_IDLE_TIMER_DEF)
8764	},
8765};
8766
8767/*
8768 * Aging and idle timeouts for the different possible scenarios
8769 * in single BSS MAC configuration.
8770 */
8771static const uint32_t
8772iwx_sf_full_timeout[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = {
8773	{
8774		htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER),
8775		htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER)
8776	},
8777	{
8778		htole32(IWX_SF_AGG_UNICAST_AGING_TIMER),
8779		htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER)
8780	},
8781	{
8782		htole32(IWX_SF_MCAST_AGING_TIMER),
8783		htole32(IWX_SF_MCAST_IDLE_TIMER)
8784	},
8785	{
8786		htole32(IWX_SF_BA_AGING_TIMER),
8787		htole32(IWX_SF_BA_IDLE_TIMER)
8788	},
8789	{
8790		htole32(IWX_SF_TX_RE_AGING_TIMER),
8791		htole32(IWX_SF_TX_RE_IDLE_TIMER)
8792	},
8793};
8794
8795void
8796iwx_fill_sf_command(struct iwx_softc *sc, struct iwx_sf_cfg_cmd *sf_cmd,
8797    struct ieee80211_node *ni)
8798{
8799	int i, j, watermark;
8800
8801	sf_cmd->watermark[IWX_SF_LONG_DELAY_ON] = htole32(IWX_SF_W_MARK_SCAN);
8802
8803	/*
8804	 * If we are in association flow - check antenna configuration
8805	 * capabilities of the AP station, and choose the watermark accordingly.
8806	 */
8807	if (ni) {
8808		if (ni->ni_flags & IEEE80211_NODE_HT) {
8809			if (ni->ni_rxmcs[1] != 0)
8810				watermark = IWX_SF_W_MARK_MIMO2;
8811			else
8812				watermark = IWX_SF_W_MARK_SISO;
8813		} else {
8814			watermark = IWX_SF_W_MARK_LEGACY;
8815		}
8816	/* default watermark value for unassociated mode. */
8817	} else {
8818		watermark = IWX_SF_W_MARK_MIMO2;
8819	}
8820	sf_cmd->watermark[IWX_SF_FULL_ON] = htole32(watermark);
8821
8822	for (i = 0; i < IWX_SF_NUM_SCENARIO; i++) {
8823		for (j = 0; j < IWX_SF_NUM_TIMEOUT_TYPES; j++) {
8824			sf_cmd->long_delay_timeouts[i][j] =
8825					htole32(IWX_SF_LONG_DELAY_AGING_TIMER);
8826		}
8827	}
8828
8829	if (ni) {
8830		memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout,
8831		       sizeof(iwx_sf_full_timeout));
8832	} else {
8833		memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout_def,
8834		       sizeof(iwx_sf_full_timeout_def));
8835	}
8836
8837}
8838
8839int
8840iwx_sf_config(struct iwx_softc *sc, int new_state)
8841{
8842	struct ieee80211com *ic = &sc->sc_ic;
8843	struct iwx_sf_cfg_cmd sf_cmd = {
8844		.state = htole32(new_state),
8845	};
8846	int err = 0;
8847
8848	switch (new_state) {
8849	case IWX_SF_UNINIT:
8850	case IWX_SF_INIT_OFF:
8851		iwx_fill_sf_command(sc, &sf_cmd, NULL);
8852		break;
8853	case IWX_SF_FULL_ON:
8854		iwx_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
8855		break;
8856	default:
8857		return EINVAL;
8858	}
8859
8860	err = iwx_send_cmd_pdu(sc, IWX_REPLY_SF_CFG_CMD, IWX_CMD_ASYNC,
8861				   sizeof(sf_cmd), &sf_cmd);
8862	return err;
8863}
8864
8865int
8866iwx_send_bt_init_conf(struct iwx_softc *sc)
8867{
8868	struct iwx_bt_coex_cmd bt_cmd;
8869
8870	bt_cmd.mode = htole32(IWX_BT_COEX_WIFI);
8871	bt_cmd.enabled_modules = 0;
8872
8873	return iwx_send_cmd_pdu(sc, IWX_BT_CONFIG, 0, sizeof(bt_cmd),
8874	    &bt_cmd);
8875}
8876
8877int
8878iwx_send_soc_conf(struct iwx_softc *sc)
8879{
8880	struct iwx_soc_configuration_cmd cmd;
8881	int err;
8882	uint32_t cmd_id, flags = 0;
8883
8884	memset(&cmd, 0, sizeof(cmd));
8885
8886	/*
8887	 * In VER_1 of this command, the discrete value is considered
8888	 * an integer; In VER_2, it's a bitmask.  Since we have only 2
8889	 * values in VER_1, this is backwards-compatible with VER_2,
8890	 * as long as we don't set any other flag bits.
8891	 */
8892	if (!sc->sc_integrated) { /* VER_1 */
8893		flags = IWX_SOC_CONFIG_CMD_FLAGS_DISCRETE;
8894	} else { /* VER_2 */
8895		uint8_t scan_cmd_ver;
8896		if (sc->sc_ltr_delay != IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE)
8897			flags |= (sc->sc_ltr_delay &
8898			    IWX_SOC_FLAGS_LTR_APPLY_DELAY_MASK);
8899		scan_cmd_ver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP,
8900		    IWX_SCAN_REQ_UMAC);
8901		if (scan_cmd_ver != IWX_FW_CMD_VER_UNKNOWN &&
8902		    scan_cmd_ver >= 2 && sc->sc_low_latency_xtal)
8903			flags |= IWX_SOC_CONFIG_CMD_FLAGS_LOW_LATENCY;
8904	}
8905	cmd.flags = htole32(flags);
8906
8907	cmd.latency = htole32(sc->sc_xtal_latency);
8908
8909	cmd_id = iwx_cmd_id(IWX_SOC_CONFIGURATION_CMD, IWX_SYSTEM_GROUP, 0);
8910	err = iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
8911	if (err)
8912		printf("%s: failed to set soc latency: %d\n", DEVNAME(sc), err);
8913	return err;
8914}
8915
8916int
8917iwx_send_update_mcc_cmd(struct iwx_softc *sc, const char *alpha2)
8918{
8919	struct iwx_mcc_update_cmd mcc_cmd;
8920	struct iwx_host_cmd hcmd = {
8921		.id = IWX_MCC_UPDATE_CMD,
8922		.flags = IWX_CMD_WANT_RESP,
8923		.data = { &mcc_cmd },
8924	};
8925	struct iwx_rx_packet *pkt;
8926	struct iwx_mcc_update_resp *resp;
8927	size_t resp_len;
8928	int err;
8929
8930	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
8931	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
8932	if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
8933	    isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_MULTI_MCC))
8934		mcc_cmd.source_id = IWX_MCC_SOURCE_GET_CURRENT;
8935	else
8936		mcc_cmd.source_id = IWX_MCC_SOURCE_OLD_FW;
8937
8938	hcmd.len[0] = sizeof(struct iwx_mcc_update_cmd);
8939	hcmd.resp_pkt_len = IWX_CMD_RESP_MAX;
8940
8941	err = iwx_send_cmd(sc, &hcmd);
8942	if (err)
8943		return err;
8944
8945	pkt = hcmd.resp_pkt;
8946	if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
8947		err = EIO;
8948		goto out;
8949	}
8950
8951	resp_len = iwx_rx_packet_payload_len(pkt);
8952	if (resp_len < sizeof(*resp)) {
8953		err = EIO;
8954		goto out;
8955	}
8956
8957	resp = (void *)pkt->data;
8958	if (resp_len != sizeof(*resp) +
8959	    resp->n_channels * sizeof(resp->channels[0])) {
8960		err = EIO;
8961		goto out;
8962	}
8963
8964	DPRINTF(("MCC status=0x%x mcc=0x%x cap=0x%x time=0x%x geo_info=0x%x source_id=0x%d n_channels=%u\n",
8965	    resp->status, resp->mcc, resp->cap, resp->time, resp->geo_info, resp->source_id, resp->n_channels));
8966
8967	/* Update channel map for net80211 and our scan configuration. */
8968	iwx_init_channel_map(sc, NULL, resp->channels, resp->n_channels);
8969
8970out:
8971	iwx_free_resp(sc, &hcmd);
8972
8973	return err;
8974}
8975
8976int
8977iwx_send_temp_report_ths_cmd(struct iwx_softc *sc)
8978{
8979	struct iwx_temp_report_ths_cmd cmd;
8980	int err;
8981
8982	/*
8983	 * In order to give responsibility for critical-temperature-kill
8984	 * and TX backoff to FW we need to send an empty temperature
8985	 * reporting command at init time.
8986	 */
8987	memset(&cmd, 0, sizeof(cmd));
8988
8989	err = iwx_send_cmd_pdu(sc,
8990	    IWX_WIDE_ID(IWX_PHY_OPS_GROUP, IWX_TEMP_REPORTING_THRESHOLDS_CMD),
8991	    0, sizeof(cmd), &cmd);
8992	if (err)
8993		printf("%s: TEMP_REPORT_THS_CMD command failed (error %d)\n",
8994		    DEVNAME(sc), err);
8995
8996	return err;
8997}
8998
8999int
9000iwx_init_hw(struct iwx_softc *sc)
9001{
9002	struct ieee80211com *ic = &sc->sc_ic;
9003	int err, i;
9004
9005	err = iwx_run_init_mvm_ucode(sc, 0);
9006	if (err)
9007		return err;
9008
9009	if (!iwx_nic_lock(sc))
9010		return EBUSY;
9011
9012	err = iwx_send_tx_ant_cfg(sc, iwx_fw_valid_tx_ant(sc));
9013	if (err) {
9014		printf("%s: could not init tx ant config (error %d)\n",
9015		    DEVNAME(sc), err);
9016		goto err;
9017	}
9018
9019	if (sc->sc_tx_with_siso_diversity) {
9020		err = iwx_send_phy_cfg_cmd(sc);
9021		if (err) {
9022			printf("%s: could not send phy config (error %d)\n",
9023			    DEVNAME(sc), err);
9024			goto err;
9025		}
9026	}
9027
9028	err = iwx_send_bt_init_conf(sc);
9029	if (err) {
9030		printf("%s: could not init bt coex (error %d)\n",
9031		    DEVNAME(sc), err);
9032		return err;
9033	}
9034
9035	err = iwx_send_soc_conf(sc);
9036	if (err)
9037		return err;
9038
9039	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_DQA_SUPPORT)) {
9040		err = iwx_send_dqa_cmd(sc);
9041		if (err)
9042			return err;
9043	}
9044
9045	for (i = 0; i < IWX_NUM_PHY_CTX; i++) {
9046		/*
9047		 * The channel used here isn't relevant as it's
9048		 * going to be overwritten in the other flows.
9049		 * For now use the first channel we have.
9050		 */
9051		sc->sc_phyctxt[i].id = i;
9052		sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
9053		err = iwx_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
9054		    IWX_FW_CTXT_ACTION_ADD, 0, IEEE80211_HTOP0_SCO_SCN,
9055		    IEEE80211_VHTOP0_CHAN_WIDTH_HT);
9056		if (err) {
9057			printf("%s: could not add phy context %d (error %d)\n",
9058			    DEVNAME(sc), i, err);
9059			goto err;
9060		}
9061		if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
9062		    IWX_RLC_CONFIG_CMD) == 2) {
9063			err = iwx_phy_send_rlc(sc, &sc->sc_phyctxt[i], 1, 1);
9064			if (err) {
9065				printf("%s: could not configure RLC for PHY "
9066				    "%d (error %d)\n", DEVNAME(sc), i, err);
9067				goto err;
9068			}
9069		}
9070	}
9071
9072	err = iwx_config_ltr(sc);
9073	if (err) {
9074		printf("%s: PCIe LTR configuration failed (error %d)\n",
9075		    DEVNAME(sc), err);
9076	}
9077
9078	if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CT_KILL_BY_FW)) {
9079		err = iwx_send_temp_report_ths_cmd(sc);
9080		if (err)
9081			goto err;
9082	}
9083
9084	err = iwx_power_update_device(sc);
9085	if (err) {
9086		printf("%s: could not send power command (error %d)\n",
9087		    DEVNAME(sc), err);
9088		goto err;
9089	}
9090
9091	if (sc->sc_nvm.lar_enabled) {
9092		err = iwx_send_update_mcc_cmd(sc, "ZZ");
9093		if (err) {
9094			printf("%s: could not init LAR (error %d)\n",
9095			    DEVNAME(sc), err);
9096			goto err;
9097		}
9098	}
9099
9100	err = iwx_config_umac_scan_reduced(sc);
9101	if (err) {
9102		printf("%s: could not configure scan (error %d)\n",
9103		    DEVNAME(sc), err);
9104		goto err;
9105	}
9106
9107	err = iwx_disable_beacon_filter(sc);
9108	if (err) {
9109		printf("%s: could not disable beacon filter (error %d)\n",
9110		    DEVNAME(sc), err);
9111		goto err;
9112	}
9113
9114err:
9115	iwx_nic_unlock(sc);
9116	return err;
9117}
9118
9119/* Allow multicast from our BSSID. */
9120int
9121iwx_allow_mcast(struct iwx_softc *sc)
9122{
9123	struct ieee80211com *ic = &sc->sc_ic;
9124	struct iwx_node *in = (void *)ic->ic_bss;
9125	struct iwx_mcast_filter_cmd *cmd;
9126	size_t size;
9127	int err;
9128
9129	size = roundup(sizeof(*cmd), 4);
9130	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
9131	if (cmd == NULL)
9132		return ENOMEM;
9133	cmd->filter_own = 1;
9134	cmd->port_id = 0;
9135	cmd->count = 0;
9136	cmd->pass_all = 1;
9137	IEEE80211_ADDR_COPY(cmd->bssid, in->in_macaddr);
9138
9139	err = iwx_send_cmd_pdu(sc, IWX_MCAST_FILTER_CMD,
9140	    0, size, cmd);
9141	free(cmd, M_DEVBUF, size);
9142	return err;
9143}
9144
9145int
9146iwx_init(struct ifnet *ifp)
9147{
9148	struct iwx_softc *sc = ifp->if_softc;
9149	struct ieee80211com *ic = &sc->sc_ic;
9150	int err, generation;
9151
9152	rw_assert_wrlock(&sc->ioctl_rwl);
9153
9154	generation = ++sc->sc_generation;
9155
9156	err = iwx_preinit(sc);
9157	if (err)
9158		return err;
9159
9160	err = iwx_start_hw(sc);
9161	if (err) {
9162		printf("%s: could not initialize hardware\n", DEVNAME(sc));
9163		return err;
9164	}
9165
9166	err = iwx_init_hw(sc);
9167	if (err) {
9168		if (generation == sc->sc_generation)
9169			iwx_stop_device(sc);
9170		return err;
9171	}
9172
9173	if (sc->sc_nvm.sku_cap_11n_enable)
9174		iwx_setup_ht_rates(sc);
9175	if (sc->sc_nvm.sku_cap_11ac_enable)
9176		iwx_setup_vht_rates(sc);
9177
9178	KASSERT(sc->task_refs.r_refs == 0);
9179	refcnt_init(&sc->task_refs);
9180	ifq_clr_oactive(&ifp->if_snd);
9181	ifp->if_flags |= IFF_RUNNING;
9182
9183	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
9184		ic->ic_bss->ni_chan = ic->ic_ibss_chan;
9185		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
9186		return 0;
9187	}
9188
9189	ieee80211_begin_scan(ifp);
9190
9191	/*
9192	 * ieee80211_begin_scan() ends up scheduling iwx_newstate_task().
9193	 * Wait until the transition to SCAN state has completed.
9194	 */
9195	do {
9196		err = tsleep_nsec(&ic->ic_state, PCATCH, "iwxinit",
9197		    SEC_TO_NSEC(1));
9198		if (generation != sc->sc_generation)
9199			return ENXIO;
9200		if (err) {
9201			iwx_stop(ifp);
9202			return err;
9203		}
9204	} while (ic->ic_state != IEEE80211_S_SCAN);
9205
9206	return 0;
9207}
9208
9209void
9210iwx_start(struct ifnet *ifp)
9211{
9212	struct iwx_softc *sc = ifp->if_softc;
9213	struct ieee80211com *ic = &sc->sc_ic;
9214	struct ieee80211_node *ni;
9215	struct ether_header *eh;
9216	struct mbuf *m;
9217
9218	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
9219		return;
9220
9221	for (;;) {
9222		/* why isn't this done per-queue? */
9223		if (sc->qfullmsk != 0) {
9224			ifq_set_oactive(&ifp->if_snd);
9225			break;
9226		}
9227
9228		/* Don't queue additional frames while flushing Tx queues. */
9229		if (sc->sc_flags & IWX_FLAG_TXFLUSH)
9230			break;
9231
9232		/* need to send management frames even if we're not RUNning */
9233		m = mq_dequeue(&ic->ic_mgtq);
9234		if (m) {
9235			ni = m->m_pkthdr.ph_cookie;
9236			goto sendit;
9237		}
9238
9239		if (ic->ic_state != IEEE80211_S_RUN ||
9240		    (ic->ic_xflags & IEEE80211_F_TX_MGMT_ONLY))
9241			break;
9242
9243		m = ifq_dequeue(&ifp->if_snd);
9244		if (!m)
9245			break;
9246		if (m->m_len < sizeof (*eh) &&
9247		    (m = m_pullup(m, sizeof (*eh))) == NULL) {
9248			ifp->if_oerrors++;
9249			continue;
9250		}
9251#if NBPFILTER > 0
9252		if (ifp->if_bpf != NULL)
9253			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
9254#endif
9255		if ((m = ieee80211_encap(ifp, m, &ni)) == NULL) {
9256			ifp->if_oerrors++;
9257			continue;
9258		}
9259
9260 sendit:
9261#if NBPFILTER > 0
9262		if (ic->ic_rawbpf != NULL)
9263			bpf_mtap(ic->ic_rawbpf, m, BPF_DIRECTION_OUT);
9264#endif
9265		if (iwx_tx(sc, m, ni) != 0) {
9266			ieee80211_release_node(ic, ni);
9267			ifp->if_oerrors++;
9268			continue;
9269		}
9270
9271		if (ifp->if_flags & IFF_UP)
9272			ifp->if_timer = 1;
9273	}
9274
9275	return;
9276}
9277
9278void
9279iwx_stop(struct ifnet *ifp)
9280{
9281	struct iwx_softc *sc = ifp->if_softc;
9282	struct ieee80211com *ic = &sc->sc_ic;
9283	struct iwx_node *in = (void *)ic->ic_bss;
9284	int i, s = splnet();
9285
9286	rw_assert_wrlock(&sc->ioctl_rwl);
9287
9288	sc->sc_flags |= IWX_FLAG_SHUTDOWN; /* Disallow new tasks. */
9289
9290	/* Cancel scheduled tasks and let any stale tasks finish up. */
9291	task_del(systq, &sc->init_task);
9292	iwx_del_task(sc, sc->sc_nswq, &sc->newstate_task);
9293	iwx_del_task(sc, systq, &sc->ba_task);
9294	iwx_del_task(sc, systq, &sc->setkey_task);
9295	memset(sc->setkey_arg, 0, sizeof(sc->setkey_arg));
9296	sc->setkey_cur = sc->setkey_tail = sc->setkey_nkeys = 0;
9297	iwx_del_task(sc, systq, &sc->mac_ctxt_task);
9298	iwx_del_task(sc, systq, &sc->phy_ctxt_task);
9299	iwx_del_task(sc, systq, &sc->bgscan_done_task);
9300	KASSERT(sc->task_refs.r_refs >= 1);
9301	refcnt_finalize(&sc->task_refs, "iwxstop");
9302
9303	iwx_stop_device(sc);
9304
9305	free(sc->bgscan_unref_arg, M_DEVBUF, sc->bgscan_unref_arg_size);
9306	sc->bgscan_unref_arg = NULL;
9307	sc->bgscan_unref_arg_size = 0;
9308
9309	/* Reset soft state. */
9310
9311	sc->sc_generation++;
9312	for (i = 0; i < nitems(sc->sc_cmd_resp_pkt); i++) {
9313		free(sc->sc_cmd_resp_pkt[i], M_DEVBUF, sc->sc_cmd_resp_len[i]);
9314		sc->sc_cmd_resp_pkt[i] = NULL;
9315		sc->sc_cmd_resp_len[i] = 0;
9316	}
9317	ifp->if_flags &= ~IFF_RUNNING;
9318	ifq_clr_oactive(&ifp->if_snd);
9319
9320	in->in_phyctxt = NULL;
9321	in->in_flags = 0;
9322	IEEE80211_ADDR_COPY(in->in_macaddr, etheranyaddr);
9323
9324	sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
9325	sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
9326	sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
9327	sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
9328	sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
9329	sc->sc_flags &= ~IWX_FLAG_HW_ERR;
9330	sc->sc_flags &= ~IWX_FLAG_SHUTDOWN;
9331	sc->sc_flags &= ~IWX_FLAG_TXFLUSH;
9332
9333	sc->sc_rx_ba_sessions = 0;
9334	sc->ba_rx.start_tidmask = 0;
9335	sc->ba_rx.stop_tidmask = 0;
9336	memset(sc->aggqid, 0, sizeof(sc->aggqid));
9337	sc->ba_tx.start_tidmask = 0;
9338	sc->ba_tx.stop_tidmask = 0;
9339
9340	sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
9341	sc->ns_nstate = IEEE80211_S_INIT;
9342
9343	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
9344		struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
9345		iwx_clear_reorder_buffer(sc, rxba);
9346	}
9347	memset(sc->sc_tx_timer, 0, sizeof(sc->sc_tx_timer));
9348	ifp->if_timer = 0;
9349
9350	splx(s);
9351}
9352
9353void
9354iwx_watchdog(struct ifnet *ifp)
9355{
9356	struct iwx_softc *sc = ifp->if_softc;
9357	int i;
9358
9359	ifp->if_timer = 0;
9360
9361	/*
9362	 * We maintain a separate timer for each Tx queue because
9363	 * Tx aggregation queues can get "stuck" while other queues
9364	 * keep working. The Linux driver uses a similar workaround.
9365	 */
9366	for (i = 0; i < nitems(sc->sc_tx_timer); i++) {
9367		if (sc->sc_tx_timer[i] > 0) {
9368			if (--sc->sc_tx_timer[i] == 0) {
9369				printf("%s: device timeout\n", DEVNAME(sc));
9370				if (ifp->if_flags & IFF_DEBUG) {
9371					iwx_nic_error(sc);
9372					iwx_dump_driver_status(sc);
9373				}
9374				if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
9375					task_add(systq, &sc->init_task);
9376				ifp->if_oerrors++;
9377				return;
9378			}
9379			ifp->if_timer = 1;
9380		}
9381	}
9382
9383	ieee80211_watchdog(ifp);
9384}
9385
9386int
9387iwx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
9388{
9389	struct iwx_softc *sc = ifp->if_softc;
9390	int s, err = 0, generation = sc->sc_generation;
9391
9392	/*
9393	 * Prevent processes from entering this function while another
9394	 * process is tsleep'ing in it.
9395	 */
9396	err = rw_enter(&sc->ioctl_rwl, RW_WRITE | RW_INTR);
9397	if (err == 0 && generation != sc->sc_generation) {
9398		rw_exit(&sc->ioctl_rwl);
9399		return ENXIO;
9400	}
9401	if (err)
9402		return err;
9403	s = splnet();
9404
9405	switch (cmd) {
9406	case SIOCSIFADDR:
9407		ifp->if_flags |= IFF_UP;
9408		/* FALLTHROUGH */
9409	case SIOCSIFFLAGS:
9410		if (ifp->if_flags & IFF_UP) {
9411			if (!(ifp->if_flags & IFF_RUNNING)) {
9412				/* Force reload of firmware image from disk. */
9413				sc->sc_fw.fw_status = IWX_FW_STATUS_NONE;
9414				err = iwx_init(ifp);
9415			}
9416		} else {
9417			if (ifp->if_flags & IFF_RUNNING)
9418				iwx_stop(ifp);
9419		}
9420		break;
9421
9422	default:
9423		err = ieee80211_ioctl(ifp, cmd, data);
9424	}
9425
9426	if (err == ENETRESET) {
9427		err = 0;
9428		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
9429		    (IFF_UP | IFF_RUNNING)) {
9430			iwx_stop(ifp);
9431			err = iwx_init(ifp);
9432		}
9433	}
9434
9435	splx(s);
9436	rw_exit(&sc->ioctl_rwl);
9437
9438	return err;
9439}
9440
9441/*
9442 * Note: This structure is read from the device with IO accesses,
9443 * and the reading already does the endian conversion. As it is
9444 * read with uint32_t-sized accesses, any members with a different size
9445 * need to be ordered correctly though!
9446 */
9447struct iwx_error_event_table {
9448	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
9449	uint32_t error_id;		/* type of error */
9450	uint32_t trm_hw_status0;	/* TRM HW status */
9451	uint32_t trm_hw_status1;	/* TRM HW status */
9452	uint32_t blink2;		/* branch link */
9453	uint32_t ilink1;		/* interrupt link */
9454	uint32_t ilink2;		/* interrupt link */
9455	uint32_t data1;		/* error-specific data */
9456	uint32_t data2;		/* error-specific data */
9457	uint32_t data3;		/* error-specific data */
9458	uint32_t bcon_time;		/* beacon timer */
9459	uint32_t tsf_low;		/* network timestamp function timer */
9460	uint32_t tsf_hi;		/* network timestamp function timer */
9461	uint32_t gp1;		/* GP1 timer register */
9462	uint32_t gp2;		/* GP2 timer register */
9463	uint32_t fw_rev_type;	/* firmware revision type */
9464	uint32_t major;		/* uCode version major */
9465	uint32_t minor;		/* uCode version minor */
9466	uint32_t hw_ver;		/* HW Silicon version */
9467	uint32_t brd_ver;		/* HW board version */
9468	uint32_t log_pc;		/* log program counter */
9469	uint32_t frame_ptr;		/* frame pointer */
9470	uint32_t stack_ptr;		/* stack pointer */
9471	uint32_t hcmd;		/* last host command header */
9472	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
9473				 * rxtx_flag */
9474	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
9475				 * host_flag */
9476	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
9477				 * enc_flag */
9478	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
9479				 * time_flag */
9480	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
9481				 * wico interrupt */
9482	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
9483	uint32_t wait_event;		/* wait event() caller address */
9484	uint32_t l2p_control;	/* L2pControlField */
9485	uint32_t l2p_duration;	/* L2pDurationField */
9486	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
9487	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
9488	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
9489				 * (LMPM_PMG_SEL) */
9490	uint32_t u_timestamp;	/* indicate when the date and time of the
9491				 * compilation */
9492	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
9493} __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
9494
9495/*
9496 * UMAC error struct - relevant starting from family 8000 chip.
9497 * Note: This structure is read from the device with IO accesses,
9498 * and the reading already does the endian conversion. As it is
9499 * read with u32-sized accesses, any members with a different size
9500 * need to be ordered correctly though!
9501 */
9502struct iwx_umac_error_event_table {
9503	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
9504	uint32_t error_id;	/* type of error */
9505	uint32_t blink1;	/* branch link */
9506	uint32_t blink2;	/* branch link */
9507	uint32_t ilink1;	/* interrupt link */
9508	uint32_t ilink2;	/* interrupt link */
9509	uint32_t data1;		/* error-specific data */
9510	uint32_t data2;		/* error-specific data */
9511	uint32_t data3;		/* error-specific data */
9512	uint32_t umac_major;
9513	uint32_t umac_minor;
9514	uint32_t frame_pointer;	/* core register 27*/
9515	uint32_t stack_pointer;	/* core register 28 */
9516	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
9517	uint32_t nic_isr_pref;	/* ISR status register */
9518} __packed;
9519
9520#define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
9521#define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
9522
9523void
9524iwx_nic_umac_error(struct iwx_softc *sc)
9525{
9526	struct iwx_umac_error_event_table table;
9527	uint32_t base;
9528
9529	base = sc->sc_uc.uc_umac_error_event_table;
9530
9531	if (base < 0x400000) {
9532		printf("%s: Invalid error log pointer 0x%08x\n",
9533		    DEVNAME(sc), base);
9534		return;
9535	}
9536
9537	if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
9538		printf("%s: reading errlog failed\n", DEVNAME(sc));
9539		return;
9540	}
9541
9542	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
9543		printf("%s: Start UMAC Error Log Dump:\n", DEVNAME(sc));
9544		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
9545			sc->sc_flags, table.valid);
9546	}
9547
9548	printf("%s: 0x%08X | %s\n", DEVNAME(sc), table.error_id,
9549		iwx_desc_lookup(table.error_id));
9550	printf("%s: 0x%08X | umac branchlink1\n", DEVNAME(sc), table.blink1);
9551	printf("%s: 0x%08X | umac branchlink2\n", DEVNAME(sc), table.blink2);
9552	printf("%s: 0x%08X | umac interruptlink1\n", DEVNAME(sc), table.ilink1);
9553	printf("%s: 0x%08X | umac interruptlink2\n", DEVNAME(sc), table.ilink2);
9554	printf("%s: 0x%08X | umac data1\n", DEVNAME(sc), table.data1);
9555	printf("%s: 0x%08X | umac data2\n", DEVNAME(sc), table.data2);
9556	printf("%s: 0x%08X | umac data3\n", DEVNAME(sc), table.data3);
9557	printf("%s: 0x%08X | umac major\n", DEVNAME(sc), table.umac_major);
9558	printf("%s: 0x%08X | umac minor\n", DEVNAME(sc), table.umac_minor);
9559	printf("%s: 0x%08X | frame pointer\n", DEVNAME(sc),
9560	    table.frame_pointer);
9561	printf("%s: 0x%08X | stack pointer\n", DEVNAME(sc),
9562	    table.stack_pointer);
9563	printf("%s: 0x%08X | last host cmd\n", DEVNAME(sc), table.cmd_header);
9564	printf("%s: 0x%08X | isr status reg\n", DEVNAME(sc),
9565	    table.nic_isr_pref);
9566}
9567
9568#define IWX_FW_SYSASSERT_CPU_MASK 0xf0000000
9569static struct {
9570	const char *name;
9571	uint8_t num;
9572} advanced_lookup[] = {
9573	{ "NMI_INTERRUPT_WDG", 0x34 },
9574	{ "SYSASSERT", 0x35 },
9575	{ "UCODE_VERSION_MISMATCH", 0x37 },
9576	{ "BAD_COMMAND", 0x38 },
9577	{ "BAD_COMMAND", 0x39 },
9578	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
9579	{ "FATAL_ERROR", 0x3D },
9580	{ "NMI_TRM_HW_ERR", 0x46 },
9581	{ "NMI_INTERRUPT_TRM", 0x4C },
9582	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
9583	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
9584	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
9585	{ "NMI_INTERRUPT_HOST", 0x66 },
9586	{ "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
9587	{ "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
9588	{ "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
9589	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
9590	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
9591	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
9592	{ "ADVANCED_SYSASSERT", 0 },
9593};
9594
9595const char *
9596iwx_desc_lookup(uint32_t num)
9597{
9598	int i;
9599
9600	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
9601		if (advanced_lookup[i].num ==
9602		    (num & ~IWX_FW_SYSASSERT_CPU_MASK))
9603			return advanced_lookup[i].name;
9604
9605	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
9606	return advanced_lookup[i].name;
9607}
9608
9609/*
9610 * Support for dumping the error log seemed like a good idea ...
9611 * but it's mostly hex junk and the only sensible thing is the
9612 * hw/ucode revision (which we know anyway).  Since it's here,
9613 * I'll just leave it in, just in case e.g. the Intel guys want to
9614 * help us decipher some "ADVANCED_SYSASSERT" later.
9615 */
9616void
9617iwx_nic_error(struct iwx_softc *sc)
9618{
9619	struct iwx_error_event_table table;
9620	uint32_t base;
9621
9622	printf("%s: dumping device error log\n", DEVNAME(sc));
9623	base = sc->sc_uc.uc_lmac_error_event_table[0];
9624	if (base < 0x400000) {
9625		printf("%s: Invalid error log pointer 0x%08x\n",
9626		    DEVNAME(sc), base);
9627		return;
9628	}
9629
9630	if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
9631		printf("%s: reading errlog failed\n", DEVNAME(sc));
9632		return;
9633	}
9634
9635	if (!table.valid) {
9636		printf("%s: errlog not found, skipping\n", DEVNAME(sc));
9637		return;
9638	}
9639
9640	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
9641		printf("%s: Start Error Log Dump:\n", DEVNAME(sc));
9642		printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
9643		    sc->sc_flags, table.valid);
9644	}
9645
9646	printf("%s: 0x%08X | %-28s\n", DEVNAME(sc), table.error_id,
9647	    iwx_desc_lookup(table.error_id));
9648	printf("%s: %08X | trm_hw_status0\n", DEVNAME(sc),
9649	    table.trm_hw_status0);
9650	printf("%s: %08X | trm_hw_status1\n", DEVNAME(sc),
9651	    table.trm_hw_status1);
9652	printf("%s: %08X | branchlink2\n", DEVNAME(sc), table.blink2);
9653	printf("%s: %08X | interruptlink1\n", DEVNAME(sc), table.ilink1);
9654	printf("%s: %08X | interruptlink2\n", DEVNAME(sc), table.ilink2);
9655	printf("%s: %08X | data1\n", DEVNAME(sc), table.data1);
9656	printf("%s: %08X | data2\n", DEVNAME(sc), table.data2);
9657	printf("%s: %08X | data3\n", DEVNAME(sc), table.data3);
9658	printf("%s: %08X | beacon time\n", DEVNAME(sc), table.bcon_time);
9659	printf("%s: %08X | tsf low\n", DEVNAME(sc), table.tsf_low);
9660	printf("%s: %08X | tsf hi\n", DEVNAME(sc), table.tsf_hi);
9661	printf("%s: %08X | time gp1\n", DEVNAME(sc), table.gp1);
9662	printf("%s: %08X | time gp2\n", DEVNAME(sc), table.gp2);
9663	printf("%s: %08X | uCode revision type\n", DEVNAME(sc),
9664	    table.fw_rev_type);
9665	printf("%s: %08X | uCode version major\n", DEVNAME(sc),
9666	    table.major);
9667	printf("%s: %08X | uCode version minor\n", DEVNAME(sc),
9668	    table.minor);
9669	printf("%s: %08X | hw version\n", DEVNAME(sc), table.hw_ver);
9670	printf("%s: %08X | board version\n", DEVNAME(sc), table.brd_ver);
9671	printf("%s: %08X | hcmd\n", DEVNAME(sc), table.hcmd);
9672	printf("%s: %08X | isr0\n", DEVNAME(sc), table.isr0);
9673	printf("%s: %08X | isr1\n", DEVNAME(sc), table.isr1);
9674	printf("%s: %08X | isr2\n", DEVNAME(sc), table.isr2);
9675	printf("%s: %08X | isr3\n", DEVNAME(sc), table.isr3);
9676	printf("%s: %08X | isr4\n", DEVNAME(sc), table.isr4);
9677	printf("%s: %08X | last cmd Id\n", DEVNAME(sc), table.last_cmd_id);
9678	printf("%s: %08X | wait_event\n", DEVNAME(sc), table.wait_event);
9679	printf("%s: %08X | l2p_control\n", DEVNAME(sc), table.l2p_control);
9680	printf("%s: %08X | l2p_duration\n", DEVNAME(sc), table.l2p_duration);
9681	printf("%s: %08X | l2p_mhvalid\n", DEVNAME(sc), table.l2p_mhvalid);
9682	printf("%s: %08X | l2p_addr_match\n", DEVNAME(sc), table.l2p_addr_match);
9683	printf("%s: %08X | lmpm_pmg_sel\n", DEVNAME(sc), table.lmpm_pmg_sel);
9684	printf("%s: %08X | timestamp\n", DEVNAME(sc), table.u_timestamp);
9685	printf("%s: %08X | flow_handler\n", DEVNAME(sc), table.flow_handler);
9686
9687	if (sc->sc_uc.uc_umac_error_event_table)
9688		iwx_nic_umac_error(sc);
9689}
9690
9691void
9692iwx_dump_driver_status(struct iwx_softc *sc)
9693{
9694	int i;
9695
9696	printf("driver status:\n");
9697	for (i = 0; i < nitems(sc->txq); i++) {
9698		struct iwx_tx_ring *ring = &sc->txq[i];
9699		printf("  tx ring %2d: qid=%-2d cur=%-3d "
9700		    "cur_hw=%-3d queued=%-3d\n",
9701		    i, ring->qid, ring->cur, ring->cur_hw,
9702		    ring->queued);
9703	}
9704	printf("  rx ring: cur=%d\n", sc->rxq.cur);
9705	printf("  802.11 state %s\n",
9706	    ieee80211_state_name[sc->sc_ic.ic_state]);
9707}
9708
9709#define SYNC_RESP_STRUCT(_var_, _pkt_)					\
9710do {									\
9711	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
9712	    sizeof(*(_var_)), BUS_DMASYNC_POSTREAD);			\
9713	_var_ = (void *)((_pkt_)+1);					\
9714} while (/*CONSTCOND*/0)
9715
9716#define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
9717do {									\
9718	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
9719	    sizeof(len), BUS_DMASYNC_POSTREAD);				\
9720	_ptr_ = (void *)((_pkt_)+1);					\
9721} while (/*CONSTCOND*/0)
9722
9723int
9724iwx_rx_pkt_valid(struct iwx_rx_packet *pkt)
9725{
9726	int qid, idx, code;
9727
9728	qid = pkt->hdr.qid & ~0x80;
9729	idx = pkt->hdr.idx;
9730	code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
9731
9732	return (!(qid == 0 && idx == 0 && code == 0) &&
9733	    pkt->len_n_flags != htole32(IWX_FH_RSCSR_FRAME_INVALID));
9734}
9735
9736void
9737iwx_rx_pkt(struct iwx_softc *sc, struct iwx_rx_data *data, struct mbuf_list *ml)
9738{
9739	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
9740	struct iwx_rx_packet *pkt, *nextpkt;
9741	uint32_t offset = 0, nextoff = 0, nmpdu = 0, len;
9742	struct mbuf *m0, *m;
9743	const size_t minsz = sizeof(pkt->len_n_flags) + sizeof(pkt->hdr);
9744	int qid, idx, code, handled = 1;
9745
9746	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWX_RBUF_SIZE,
9747	    BUS_DMASYNC_POSTREAD);
9748
9749	m0 = data->m;
9750	while (m0 && offset + minsz < IWX_RBUF_SIZE) {
9751		pkt = (struct iwx_rx_packet *)(m0->m_data + offset);
9752		qid = pkt->hdr.qid;
9753		idx = pkt->hdr.idx;
9754
9755		code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
9756
9757		if (!iwx_rx_pkt_valid(pkt))
9758			break;
9759
9760		/*
9761		 * XXX Intel inside (tm)
9762		 * Any commands in the LONG_GROUP could actually be in the
9763		 * LEGACY group. Firmware API versions >= 50 reject commands
9764		 * in group 0, forcing us to use this hack.
9765		 */
9766		if (iwx_cmd_groupid(code) == IWX_LONG_GROUP) {
9767			struct iwx_tx_ring *ring = &sc->txq[qid];
9768			struct iwx_tx_data *txdata = &ring->data[idx];
9769			if (txdata->flags & IWX_TXDATA_FLAG_CMD_IS_NARROW)
9770				code = iwx_cmd_opcode(code);
9771		}
9772
9773		len = sizeof(pkt->len_n_flags) + iwx_rx_packet_len(pkt);
9774		if (len < minsz || len > (IWX_RBUF_SIZE - offset))
9775			break;
9776
9777		if (code == IWX_REPLY_RX_MPDU_CMD && ++nmpdu == 1) {
9778			/* Take mbuf m0 off the RX ring. */
9779			if (iwx_rx_addbuf(sc, IWX_RBUF_SIZE, sc->rxq.cur)) {
9780				ifp->if_ierrors++;
9781				break;
9782			}
9783			KASSERT(data->m != m0);
9784		}
9785
9786		switch (code) {
9787		case IWX_REPLY_RX_PHY_CMD:
9788			iwx_rx_rx_phy_cmd(sc, pkt, data);
9789			break;
9790
9791		case IWX_REPLY_RX_MPDU_CMD: {
9792			size_t maxlen = IWX_RBUF_SIZE - offset - minsz;
9793			nextoff = offset +
9794			    roundup(len, IWX_FH_RSCSR_FRAME_ALIGN);
9795			nextpkt = (struct iwx_rx_packet *)
9796			    (m0->m_data + nextoff);
9797			/* AX210 devices ship only one packet per Rx buffer. */
9798			if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210 ||
9799			    nextoff + minsz >= IWX_RBUF_SIZE ||
9800			    !iwx_rx_pkt_valid(nextpkt)) {
9801				/* No need to copy last frame in buffer. */
9802				if (offset > 0)
9803					m_adj(m0, offset);
9804				iwx_rx_mpdu_mq(sc, m0, pkt->data, maxlen, ml);
9805				m0 = NULL; /* stack owns m0 now; abort loop */
9806			} else {
9807				/*
9808				 * Create an mbuf which points to the current
9809				 * packet. Always copy from offset zero to
9810				 * preserve m_pkthdr.
9811				 */
9812				m = m_copym(m0, 0, M_COPYALL, M_DONTWAIT);
9813				if (m == NULL) {
9814					ifp->if_ierrors++;
9815					m_freem(m0);
9816					m0 = NULL;
9817					break;
9818				}
9819				m_adj(m, offset);
9820				iwx_rx_mpdu_mq(sc, m, pkt->data, maxlen, ml);
9821			}
9822 			break;
9823		}
9824
9825		case IWX_BAR_FRAME_RELEASE:
9826			iwx_rx_bar_frame_release(sc, pkt, ml);
9827			break;
9828
9829		case IWX_TX_CMD:
9830			iwx_rx_tx_cmd(sc, pkt, data);
9831			break;
9832
9833		case IWX_BA_NOTIF:
9834			iwx_rx_compressed_ba(sc, pkt);
9835			break;
9836
9837		case IWX_MISSED_BEACONS_NOTIFICATION:
9838			iwx_rx_bmiss(sc, pkt, data);
9839			break;
9840
9841		case IWX_MFUART_LOAD_NOTIFICATION:
9842			break;
9843
9844		case IWX_ALIVE: {
9845			struct iwx_alive_resp_v4 *resp4;
9846			struct iwx_alive_resp_v5 *resp5;
9847			struct iwx_alive_resp_v6 *resp6;
9848
9849			DPRINTF(("%s: firmware alive\n", __func__));
9850			sc->sc_uc.uc_ok = 0;
9851
9852			/*
9853			 * For v5 and above, we can check the version, for older
9854			 * versions we need to check the size.
9855			 */
9856			 if (iwx_lookup_notif_ver(sc, IWX_LEGACY_GROUP,
9857			    IWX_ALIVE) == 6) {
9858				SYNC_RESP_STRUCT(resp6, pkt);
9859				if (iwx_rx_packet_payload_len(pkt) !=
9860				    sizeof(*resp6)) {
9861					sc->sc_uc.uc_intr = 1;
9862					wakeup(&sc->sc_uc);
9863					break;
9864				}
9865				sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
9866				    resp6->lmac_data[0].dbg_ptrs.error_event_table_ptr);
9867				sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
9868				    resp6->lmac_data[1].dbg_ptrs.error_event_table_ptr);
9869				sc->sc_uc.uc_log_event_table = le32toh(
9870				    resp6->lmac_data[0].dbg_ptrs.log_event_table_ptr);
9871				sc->sc_uc.uc_umac_error_event_table = le32toh(
9872				    resp6->umac_data.dbg_ptrs.error_info_addr);
9873				sc->sc_sku_id[0] =
9874				    le32toh(resp6->sku_id.data[0]);
9875				sc->sc_sku_id[1] =
9876				    le32toh(resp6->sku_id.data[1]);
9877				sc->sc_sku_id[2] =
9878				    le32toh(resp6->sku_id.data[2]);
9879				if (resp6->status == IWX_ALIVE_STATUS_OK)
9880					sc->sc_uc.uc_ok = 1;
9881			 } else if (iwx_lookup_notif_ver(sc, IWX_LEGACY_GROUP,
9882			    IWX_ALIVE) == 5) {
9883				SYNC_RESP_STRUCT(resp5, pkt);
9884				if (iwx_rx_packet_payload_len(pkt) !=
9885				    sizeof(*resp5)) {
9886					sc->sc_uc.uc_intr = 1;
9887					wakeup(&sc->sc_uc);
9888					break;
9889				}
9890				sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
9891				    resp5->lmac_data[0].dbg_ptrs.error_event_table_ptr);
9892				sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
9893				    resp5->lmac_data[1].dbg_ptrs.error_event_table_ptr);
9894				sc->sc_uc.uc_log_event_table = le32toh(
9895				    resp5->lmac_data[0].dbg_ptrs.log_event_table_ptr);
9896				sc->sc_uc.uc_umac_error_event_table = le32toh(
9897				    resp5->umac_data.dbg_ptrs.error_info_addr);
9898				sc->sc_sku_id[0] =
9899				    le32toh(resp5->sku_id.data[0]);
9900				sc->sc_sku_id[1] =
9901				    le32toh(resp5->sku_id.data[1]);
9902				sc->sc_sku_id[2] =
9903				    le32toh(resp5->sku_id.data[2]);
9904				if (resp5->status == IWX_ALIVE_STATUS_OK)
9905					sc->sc_uc.uc_ok = 1;
9906			} else if (iwx_rx_packet_payload_len(pkt) == sizeof(*resp4)) {
9907				SYNC_RESP_STRUCT(resp4, pkt);
9908				sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
9909				    resp4->lmac_data[0].dbg_ptrs.error_event_table_ptr);
9910				sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
9911				    resp4->lmac_data[1].dbg_ptrs.error_event_table_ptr);
9912				sc->sc_uc.uc_log_event_table = le32toh(
9913				    resp4->lmac_data[0].dbg_ptrs.log_event_table_ptr);
9914				sc->sc_uc.uc_umac_error_event_table = le32toh(
9915				    resp4->umac_data.dbg_ptrs.error_info_addr);
9916				if (resp4->status == IWX_ALIVE_STATUS_OK)
9917					sc->sc_uc.uc_ok = 1;
9918			}
9919
9920			sc->sc_uc.uc_intr = 1;
9921			wakeup(&sc->sc_uc);
9922			break;
9923		}
9924
9925		case IWX_STATISTICS_NOTIFICATION: {
9926			struct iwx_notif_statistics *stats;
9927			SYNC_RESP_STRUCT(stats, pkt);
9928			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
9929			sc->sc_noise = iwx_get_noise(&stats->rx.general);
9930			break;
9931		}
9932
9933		case IWX_DTS_MEASUREMENT_NOTIFICATION:
9934		case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
9935				 IWX_DTS_MEASUREMENT_NOTIF_WIDE):
9936		case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
9937				 IWX_TEMP_REPORTING_THRESHOLDS_CMD):
9938			break;
9939
9940		case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
9941		    IWX_CT_KILL_NOTIFICATION): {
9942			struct iwx_ct_kill_notif *notif;
9943			SYNC_RESP_STRUCT(notif, pkt);
9944			printf("%s: device at critical temperature (%u degC), "
9945			    "stopping device\n",
9946			    DEVNAME(sc), le16toh(notif->temperature));
9947			sc->sc_flags |= IWX_FLAG_HW_ERR;
9948			task_add(systq, &sc->init_task);
9949			break;
9950		}
9951
9952		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
9953		    IWX_SCD_QUEUE_CONFIG_CMD):
9954		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
9955		    IWX_RX_BAID_ALLOCATION_CONFIG_CMD):
9956		case IWX_WIDE_ID(IWX_MAC_CONF_GROUP,
9957		    IWX_SESSION_PROTECTION_CMD):
9958		case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
9959		    IWX_NVM_GET_INFO):
9960		case IWX_ADD_STA_KEY:
9961		case IWX_PHY_CONFIGURATION_CMD:
9962		case IWX_TX_ANT_CONFIGURATION_CMD:
9963		case IWX_ADD_STA:
9964		case IWX_MAC_CONTEXT_CMD:
9965		case IWX_REPLY_SF_CFG_CMD:
9966		case IWX_POWER_TABLE_CMD:
9967		case IWX_LTR_CONFIG:
9968		case IWX_PHY_CONTEXT_CMD:
9969		case IWX_BINDING_CONTEXT_CMD:
9970		case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_CFG_CMD):
9971		case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_REQ_UMAC):
9972		case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_ABORT_UMAC):
9973		case IWX_REPLY_BEACON_FILTERING_CMD:
9974		case IWX_MAC_PM_POWER_TABLE:
9975		case IWX_TIME_QUOTA_CMD:
9976		case IWX_REMOVE_STA:
9977		case IWX_TXPATH_FLUSH:
9978		case IWX_BT_CONFIG:
9979		case IWX_MCC_UPDATE_CMD:
9980		case IWX_TIME_EVENT_CMD:
9981		case IWX_STATISTICS_CMD:
9982		case IWX_SCD_QUEUE_CFG: {
9983			size_t pkt_len;
9984
9985			if (sc->sc_cmd_resp_pkt[idx] == NULL)
9986				break;
9987
9988			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
9989			    sizeof(*pkt), BUS_DMASYNC_POSTREAD);
9990
9991			pkt_len = sizeof(pkt->len_n_flags) +
9992			    iwx_rx_packet_len(pkt);
9993
9994			if ((pkt->hdr.flags & IWX_CMD_FAILED_MSK) ||
9995			    pkt_len < sizeof(*pkt) ||
9996			    pkt_len > sc->sc_cmd_resp_len[idx]) {
9997				free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
9998				    sc->sc_cmd_resp_len[idx]);
9999				sc->sc_cmd_resp_pkt[idx] = NULL;
10000				break;
10001			}
10002
10003			bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
10004			    pkt_len - sizeof(*pkt), BUS_DMASYNC_POSTREAD);
10005			memcpy(sc->sc_cmd_resp_pkt[idx], pkt, pkt_len);
10006			break;
10007		}
10008
10009		case IWX_INIT_COMPLETE_NOTIF:
10010			sc->sc_init_complete |= IWX_INIT_COMPLETE;
10011			wakeup(&sc->sc_init_complete);
10012			break;
10013
10014		case IWX_SCAN_COMPLETE_UMAC: {
10015			struct iwx_umac_scan_complete *notif;
10016			SYNC_RESP_STRUCT(notif, pkt);
10017			iwx_endscan(sc);
10018			break;
10019		}
10020
10021		case IWX_SCAN_ITERATION_COMPLETE_UMAC: {
10022			struct iwx_umac_scan_iter_complete_notif *notif;
10023			SYNC_RESP_STRUCT(notif, pkt);
10024			iwx_endscan(sc);
10025			break;
10026		}
10027
10028		case IWX_MCC_CHUB_UPDATE_CMD: {
10029			struct iwx_mcc_chub_notif *notif;
10030			SYNC_RESP_STRUCT(notif, pkt);
10031			iwx_mcc_update(sc, notif);
10032			break;
10033		}
10034
10035		case IWX_REPLY_ERROR: {
10036			struct iwx_error_resp *resp;
10037			SYNC_RESP_STRUCT(resp, pkt);
10038			printf("%s: firmware error 0x%x, cmd 0x%x\n",
10039				DEVNAME(sc), le32toh(resp->error_type),
10040				resp->cmd_id);
10041			break;
10042		}
10043
10044		case IWX_TIME_EVENT_NOTIFICATION: {
10045			struct iwx_time_event_notif *notif;
10046			uint32_t action;
10047			SYNC_RESP_STRUCT(notif, pkt);
10048
10049			if (sc->sc_time_event_uid != le32toh(notif->unique_id))
10050				break;
10051			action = le32toh(notif->action);
10052			if (action & IWX_TE_V2_NOTIF_HOST_EVENT_END)
10053				sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
10054			break;
10055		}
10056
10057		case IWX_WIDE_ID(IWX_MAC_CONF_GROUP,
10058		    IWX_SESSION_PROTECTION_NOTIF): {
10059			struct iwx_session_prot_notif *notif;
10060			uint32_t status, start, conf_id;
10061
10062			SYNC_RESP_STRUCT(notif, pkt);
10063
10064			status = le32toh(notif->status);
10065			start = le32toh(notif->start);
10066			conf_id = le32toh(notif->conf_id);
10067			/* Check for end of successful PROTECT_CONF_ASSOC. */
10068			if (status == 1 && start == 0 &&
10069			    conf_id == IWX_SESSION_PROTECT_CONF_ASSOC)
10070				sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
10071			break;
10072		}
10073
10074		case IWX_WIDE_ID(IWX_SYSTEM_GROUP,
10075		    IWX_FSEQ_VER_MISMATCH_NOTIFICATION):
10076		    break;
10077
10078		/*
10079		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
10080		 * messages. Just ignore them for now.
10081		 */
10082		case IWX_DEBUG_LOG_MSG:
10083			break;
10084
10085		case IWX_MCAST_FILTER_CMD:
10086			break;
10087
10088		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_DQA_ENABLE_CMD):
10089			break;
10090
10091		case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_SOC_CONFIGURATION_CMD):
10092			break;
10093
10094		case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_INIT_EXTENDED_CFG_CMD):
10095			break;
10096
10097		case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
10098		    IWX_NVM_ACCESS_COMPLETE):
10099			break;
10100
10101		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_RX_NO_DATA_NOTIF):
10102			break; /* happens in monitor mode; ignore for now */
10103
10104		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_TLC_MNG_CONFIG_CMD):
10105			break;
10106
10107		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
10108		    IWX_TLC_MNG_UPDATE_NOTIF): {
10109			struct iwx_tlc_update_notif *notif;
10110			SYNC_RESP_STRUCT(notif, pkt);
10111			if (iwx_rx_packet_payload_len(pkt) == sizeof(*notif))
10112				iwx_rs_update(sc, notif);
10113			break;
10114		}
10115
10116		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_RLC_CONFIG_CMD):
10117			break;
10118
10119		/*
10120		 * Ignore for now. The Linux driver only acts on this request
10121		 * with 160Mhz channels in 11ax mode.
10122		 */
10123		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
10124		    IWX_THERMAL_DUAL_CHAIN_REQUEST):
10125			DPRINTF(("%s: thermal dual-chain request received\n",
10126			    DEVNAME(sc)));
10127			break;
10128
10129		/* undocumented notification from iwx-ty-a0-gf-a0-77 image */
10130		case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, 0xf8):
10131			break;
10132
10133		case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
10134		    IWX_PNVM_INIT_COMPLETE):
10135			sc->sc_init_complete |= IWX_PNVM_COMPLETE;
10136			wakeup(&sc->sc_init_complete);
10137			break;
10138
10139		default:
10140			handled = 0;
10141			printf("%s: unhandled firmware response 0x%x/0x%x "
10142			    "rx ring %d[%d]\n",
10143			    DEVNAME(sc), code, pkt->len_n_flags,
10144			    (qid & ~0x80), idx);
10145			break;
10146		}
10147
10148		/*
10149		 * uCode sets bit 0x80 when it originates the notification,
10150		 * i.e. when the notification is not a direct response to a
10151		 * command sent by the driver.
10152		 * For example, uCode issues IWX_REPLY_RX when it sends a
10153		 * received frame to the driver.
10154		 */
10155		if (handled && !(qid & (1 << 7))) {
10156			iwx_cmd_done(sc, qid, idx, code);
10157		}
10158
10159		offset += roundup(len, IWX_FH_RSCSR_FRAME_ALIGN);
10160
10161		/* AX210 devices ship only one packet per Rx buffer. */
10162		if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
10163			break;
10164	}
10165
10166	if (m0 && m0 != data->m)
10167		m_freem(m0);
10168}
10169
10170void
10171iwx_notif_intr(struct iwx_softc *sc)
10172{
10173	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
10174	uint16_t hw;
10175
10176	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
10177	    0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
10178
10179	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
10180		uint16_t *status = sc->rxq.stat_dma.vaddr;
10181		hw = le16toh(*status) & 0xfff;
10182	} else
10183		hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
10184	hw &= (IWX_RX_MQ_RING_COUNT - 1);
10185	while (sc->rxq.cur != hw) {
10186		struct iwx_rx_data *data = &sc->rxq.data[sc->rxq.cur];
10187		iwx_rx_pkt(sc, data, &ml);
10188		sc->rxq.cur = (sc->rxq.cur + 1) % IWX_RX_MQ_RING_COUNT;
10189	}
10190	if_input(&sc->sc_ic.ic_if, &ml);
10191
10192	/*
10193	 * Tell the firmware what we have processed.
10194	 * Seems like the hardware gets upset unless we align the write by 8??
10195	 */
10196	hw = (hw == 0) ? IWX_RX_MQ_RING_COUNT - 1 : hw - 1;
10197	IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, hw & ~7);
10198}
10199
10200int
10201iwx_intr(void *arg)
10202{
10203	struct iwx_softc *sc = arg;
10204	struct ieee80211com *ic = &sc->sc_ic;
10205	struct ifnet *ifp = IC2IFP(ic);
10206	int handled = 0;
10207	int r1, r2, rv = 0;
10208
10209	IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
10210
10211	if (sc->sc_flags & IWX_FLAG_USE_ICT) {
10212		uint32_t *ict = sc->ict_dma.vaddr;
10213		int tmp;
10214
10215		tmp = htole32(ict[sc->ict_cur]);
10216		if (!tmp)
10217			goto out_ena;
10218
10219		/*
10220		 * ok, there was something.  keep plowing until we have all.
10221		 */
10222		r1 = r2 = 0;
10223		while (tmp) {
10224			r1 |= tmp;
10225			ict[sc->ict_cur] = 0;
10226			sc->ict_cur = (sc->ict_cur+1) % IWX_ICT_COUNT;
10227			tmp = htole32(ict[sc->ict_cur]);
10228		}
10229
10230		/* this is where the fun begins.  don't ask */
10231		if (r1 == 0xffffffff)
10232			r1 = 0;
10233
10234		/* i am not expected to understand this */
10235		if (r1 & 0xc0000)
10236			r1 |= 0x8000;
10237		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
10238	} else {
10239		r1 = IWX_READ(sc, IWX_CSR_INT);
10240		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
10241			goto out;
10242		r2 = IWX_READ(sc, IWX_CSR_FH_INT_STATUS);
10243	}
10244	if (r1 == 0 && r2 == 0) {
10245		goto out_ena;
10246	}
10247
10248	IWX_WRITE(sc, IWX_CSR_INT, r1 | ~sc->sc_intmask);
10249
10250	if (r1 & IWX_CSR_INT_BIT_ALIVE) {
10251		int i;
10252
10253		/* Firmware has now configured the RFH. */
10254		for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++)
10255			iwx_update_rx_desc(sc, &sc->rxq, i);
10256		IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8);
10257	}
10258
10259	handled |= (r1 & (IWX_CSR_INT_BIT_ALIVE /*| IWX_CSR_INT_BIT_SCD*/));
10260
10261	if (r1 & IWX_CSR_INT_BIT_RF_KILL) {
10262		handled |= IWX_CSR_INT_BIT_RF_KILL;
10263		iwx_check_rfkill(sc);
10264		task_add(systq, &sc->init_task);
10265		rv = 1;
10266		goto out_ena;
10267	}
10268
10269	if (r1 & IWX_CSR_INT_BIT_SW_ERR) {
10270		if (ifp->if_flags & IFF_DEBUG) {
10271			iwx_nic_error(sc);
10272			iwx_dump_driver_status(sc);
10273		}
10274		printf("%s: fatal firmware error\n", DEVNAME(sc));
10275		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
10276			task_add(systq, &sc->init_task);
10277		rv = 1;
10278		goto out;
10279
10280	}
10281
10282	if (r1 & IWX_CSR_INT_BIT_HW_ERR) {
10283		handled |= IWX_CSR_INT_BIT_HW_ERR;
10284		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
10285		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0) {
10286			sc->sc_flags |= IWX_FLAG_HW_ERR;
10287			task_add(systq, &sc->init_task);
10288		}
10289		rv = 1;
10290		goto out;
10291	}
10292
10293	/* firmware chunk loaded */
10294	if (r1 & IWX_CSR_INT_BIT_FH_TX) {
10295		IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_TX_MASK);
10296		handled |= IWX_CSR_INT_BIT_FH_TX;
10297
10298		sc->sc_fw_chunk_done = 1;
10299		wakeup(&sc->sc_fw);
10300	}
10301
10302	if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX |
10303	    IWX_CSR_INT_BIT_RX_PERIODIC)) {
10304		if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX)) {
10305			handled |= (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX);
10306			IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_RX_MASK);
10307		}
10308		if (r1 & IWX_CSR_INT_BIT_RX_PERIODIC) {
10309			handled |= IWX_CSR_INT_BIT_RX_PERIODIC;
10310			IWX_WRITE(sc, IWX_CSR_INT, IWX_CSR_INT_BIT_RX_PERIODIC);
10311		}
10312
10313		/* Disable periodic interrupt; we use it as just a one-shot. */
10314		IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG, IWX_CSR_INT_PERIODIC_DIS);
10315
10316		/*
10317		 * Enable periodic interrupt in 8 msec only if we received
10318		 * real RX interrupt (instead of just periodic int), to catch
10319		 * any dangling Rx interrupt.  If it was just the periodic
10320		 * interrupt, there was no dangling Rx activity, and no need
10321		 * to extend the periodic interrupt; one-shot is enough.
10322		 */
10323		if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX))
10324			IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG,
10325			    IWX_CSR_INT_PERIODIC_ENA);
10326
10327		iwx_notif_intr(sc);
10328	}
10329
10330	rv = 1;
10331
10332 out_ena:
10333	iwx_restore_interrupts(sc);
10334 out:
10335	return rv;
10336}
10337
10338int
10339iwx_intr_msix(void *arg)
10340{
10341	struct iwx_softc *sc = arg;
10342	struct ieee80211com *ic = &sc->sc_ic;
10343	struct ifnet *ifp = IC2IFP(ic);
10344	uint32_t inta_fh, inta_hw;
10345	int vector = 0;
10346
10347	inta_fh = IWX_READ(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD);
10348	inta_hw = IWX_READ(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD);
10349	IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
10350	IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
10351	inta_fh &= sc->sc_fh_mask;
10352	inta_hw &= sc->sc_hw_mask;
10353
10354	if (inta_fh & IWX_MSIX_FH_INT_CAUSES_Q0 ||
10355	    inta_fh & IWX_MSIX_FH_INT_CAUSES_Q1) {
10356		iwx_notif_intr(sc);
10357	}
10358
10359	/* firmware chunk loaded */
10360	if (inta_fh & IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
10361		sc->sc_fw_chunk_done = 1;
10362		wakeup(&sc->sc_fw);
10363	}
10364
10365	if ((inta_fh & IWX_MSIX_FH_INT_CAUSES_FH_ERR) ||
10366	    (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
10367	    (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
10368		if (ifp->if_flags & IFF_DEBUG) {
10369			iwx_nic_error(sc);
10370			iwx_dump_driver_status(sc);
10371		}
10372		printf("%s: fatal firmware error\n", DEVNAME(sc));
10373		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0)
10374			task_add(systq, &sc->init_task);
10375		return 1;
10376	}
10377
10378	if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL) {
10379		iwx_check_rfkill(sc);
10380		task_add(systq, &sc->init_task);
10381	}
10382
10383	if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR) {
10384		printf("%s: hardware error, stopping device \n", DEVNAME(sc));
10385		if ((sc->sc_flags & IWX_FLAG_SHUTDOWN) == 0) {
10386			sc->sc_flags |= IWX_FLAG_HW_ERR;
10387			task_add(systq, &sc->init_task);
10388		}
10389		return 1;
10390	}
10391
10392	if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_ALIVE) {
10393		int i;
10394
10395		/* Firmware has now configured the RFH. */
10396		for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++)
10397			iwx_update_rx_desc(sc, &sc->rxq, i);
10398		IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8);
10399	}
10400
10401	/*
10402	 * Before sending the interrupt the HW disables it to prevent
10403	 * a nested interrupt. This is done by writing 1 to the corresponding
10404	 * bit in the mask register. After handling the interrupt, it should be
10405	 * re-enabled by clearing this bit. This register is defined as
10406	 * write 1 clear (W1C) register, meaning that it's being clear
10407	 * by writing 1 to the bit.
10408	 */
10409	IWX_WRITE(sc, IWX_CSR_MSIX_AUTOMASK_ST_AD, 1 << vector);
10410	return 1;
10411}
10412
10413typedef void *iwx_match_t;
10414
10415static const struct pci_matchid iwx_devices[] = {
10416#ifdef __FreeBSD_version
10417#define	PCI_VENDOR_INTEL			0x8086
10418#define	PCI_PRODUCT_INTEL_WL_22500_1	0x2723		/* Wi-Fi 6 AX200 */
10419#define	PCI_PRODUCT_INTEL_WL_22500_2	0x02f0		/* Wi-Fi 6 AX201 */
10420#define	PCI_PRODUCT_INTEL_WL_22500_3	0xa0f0		/* Wi-Fi 6 AX201 */
10421#define	PCI_PRODUCT_INTEL_WL_22500_4	0x34f0		/* Wi-Fi 6 AX201 */
10422#define	PCI_PRODUCT_INTEL_WL_22500_5	0x06f0		/* Wi-Fi 6 AX201 */
10423#define	PCI_PRODUCT_INTEL_WL_22500_6	0x43f0		/* Wi-Fi 6 AX201 */
10424#define	PCI_PRODUCT_INTEL_WL_22500_7	0x3df0		/* Wi-Fi 6 AX201 */
10425#define	PCI_PRODUCT_INTEL_WL_22500_8	0x4df0		/* Wi-Fi 6 AX201 */
10426#define	PCI_PRODUCT_INTEL_WL_22500_9	0x2725		/* Wi-Fi 6 AX210 */
10427#define	PCI_PRODUCT_INTEL_WL_22500_10	0x2726		/* Wi-Fi 6 AX211 */
10428#define	PCI_PRODUCT_INTEL_WL_22500_11	0x51f0		/* Wi-Fi 6 AX211 */
10429#define	PCI_PRODUCT_INTEL_WL_22500_12	0x7a70		/* Wi-Fi 6 AX211 */
10430#define	PCI_PRODUCT_INTEL_WL_22500_13	0x7af0		/* Wi-Fi 6 AX211 */
10431#define	PCI_PRODUCT_INTEL_WL_22500_14	0x7e40		/* Wi-Fi 6 AX210 */
10432#define	PCI_PRODUCT_INTEL_WL_22500_15	0x7f70		/* Wi-Fi 6 AX211 */
10433#define	PCI_PRODUCT_INTEL_WL_22500_16	0x54f0		/* Wi-Fi 6 AX211 */
10434#define	PCI_PRODUCT_INTEL_WL_22500_17	0x51f1		/* Wi-Fi 6 AX211 */
10435#endif
10436	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_1 },
10437	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_2 },
10438	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_3 },
10439	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_4,},
10440	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_5,},
10441	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_6,},
10442	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_7,},
10443	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_8,},
10444	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_9,},
10445	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_10,},
10446	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_11,},
10447	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_12,},
10448	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_13,},
10449	/* _14 is an MA device, not yet supported */
10450	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_15,},
10451	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_16,},
10452	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_WL_22500_17,},
10453};
10454
10455#ifdef __FreeBSD_version
10456static int
10457iwx_probe(device_t dev)
10458{
10459	int i;
10460
10461	for (i = 0; i < nitems(iwx_devices); i++) {
10462		if (pci_get_vendor(dev) == iwx_devices[i].pm_vid &&
10463			pci_get_device(dev) == iwx_devices[i].pm_pid) {
10464			return (BUS_PROBE_DEFAULT);
10465		}
10466	}
10467
10468	return (ENXIO);
10469}
10470#else
10471int
10472iwx_match(struct device *parent, iwx_match_t match __unused, void *aux)
10473{
10474	struct pci_attach_args *pa = aux;
10475	return pci_matchbyid(pa, iwx_devices, nitems(iwx_devices));
10476}
10477#endif
10478
10479/*
10480 * The device info table below contains device-specific config overrides.
10481 * The most important parameter derived from this table is the name of the
10482 * firmware image to load.
10483 *
10484 * The Linux iwlwifi driver uses an "old" and a "new" device info table.
10485 * The "old" table matches devices based on PCI vendor/product IDs only.
10486 * The "new" table extends this with various device parameters derived
10487 * from MAC type, and RF type.
10488 *
10489 * In iwlwifi "old" and "new" tables share the same array, where "old"
10490 * entries contain dummy values for data defined only for "new" entries.
10491 * As of 2022, Linux developers are still in the process of moving entries
10492 * from "old" to "new" style and it looks like this effort has stalled in
10493 * in some work-in-progress state for quite a while. Linux commits moving
10494 * entries from "old" to "new" have at times been reverted due to regressions.
10495 * Part of this complexity comes from iwlwifi supporting both iwm(4) and iwx(4)
10496 * devices in the same driver.
10497 *
10498 * Our table below contains mostly "new" entries declared in iwlwifi
10499 * with the _IWL_DEV_INFO() macro (with a leading underscore).
10500 * Other devices are matched based on PCI vendor/product ID as usual,
10501 * unless matching specific PCI subsystem vendor/product IDs is required.
10502 *
10503 * Some "old"-style entries are required to identify the firmware image to use.
10504 * Others might be used to print a specific marketing name into Linux dmesg,
10505 * but we can't be sure whether the corresponding devices would be matched
10506 * correctly in the absence of their entries. So we include them just in case.
10507 */
10508
10509struct iwx_dev_info {
10510	uint16_t device;
10511	uint16_t subdevice;
10512	uint16_t mac_type;
10513	uint16_t rf_type;
10514	uint8_t mac_step;
10515	uint8_t rf_id;
10516	uint8_t no_160;
10517	uint8_t cores;
10518	uint8_t cdb;
10519	uint8_t jacket;
10520	const struct iwx_device_cfg *cfg;
10521};
10522
10523#define _IWX_DEV_INFO(_device, _subdevice, _mac_type, _mac_step, _rf_type, \
10524		      _rf_id, _no_160, _cores, _cdb, _jacket, _cfg) \
10525	{ .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg),  \
10526	  .mac_type = _mac_type, .rf_type = _rf_type,	   \
10527	  .no_160 = _no_160, .cores = _cores, .rf_id = _rf_id,		   \
10528	  .mac_step = _mac_step, .cdb = _cdb, .jacket = _jacket }
10529
10530#define IWX_DEV_INFO(_device, _subdevice, _cfg) \
10531	_IWX_DEV_INFO(_device, _subdevice, IWX_CFG_ANY, IWX_CFG_ANY,	   \
10532		      IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_ANY,  \
10533		      IWX_CFG_ANY, IWX_CFG_ANY, _cfg)
10534
10535/*
10536 * When adding entries to this table keep in mind that entries must
10537 * be listed in the same order as in the Linux driver. Code walks this
10538 * table backwards and uses the first matching entry it finds.
10539 * Device firmware must be available in fw_update(8).
10540 */
10541static const struct iwx_dev_info iwx_dev_info_table[] = {
10542	/* So with HR */
10543	IWX_DEV_INFO(0x2725, 0x0090, iwx_2ax_cfg_so_gf_a0),
10544	IWX_DEV_INFO(0x2725, 0x0020, iwx_2ax_cfg_ty_gf_a0),
10545	IWX_DEV_INFO(0x2725, 0x2020, iwx_2ax_cfg_ty_gf_a0),
10546	IWX_DEV_INFO(0x2725, 0x0024, iwx_2ax_cfg_ty_gf_a0),
10547	IWX_DEV_INFO(0x2725, 0x0310, iwx_2ax_cfg_ty_gf_a0),
10548	IWX_DEV_INFO(0x2725, 0x0510, iwx_2ax_cfg_ty_gf_a0),
10549	IWX_DEV_INFO(0x2725, 0x0A10, iwx_2ax_cfg_ty_gf_a0),
10550	IWX_DEV_INFO(0x2725, 0xE020, iwx_2ax_cfg_ty_gf_a0),
10551	IWX_DEV_INFO(0x2725, 0xE024, iwx_2ax_cfg_ty_gf_a0),
10552	IWX_DEV_INFO(0x2725, 0x4020, iwx_2ax_cfg_ty_gf_a0),
10553	IWX_DEV_INFO(0x2725, 0x6020, iwx_2ax_cfg_ty_gf_a0),
10554	IWX_DEV_INFO(0x2725, 0x6024, iwx_2ax_cfg_ty_gf_a0),
10555	IWX_DEV_INFO(0x2725, 0x1673, iwx_2ax_cfg_ty_gf_a0), /* killer_1675w */
10556	IWX_DEV_INFO(0x2725, 0x1674, iwx_2ax_cfg_ty_gf_a0), /* killer_1675x */
10557	IWX_DEV_INFO(0x51f0, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
10558	IWX_DEV_INFO(0x51f0, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
10559	IWX_DEV_INFO(0x51f1, 0x1691, iwx_2ax_cfg_so_gf4_a0),
10560	IWX_DEV_INFO(0x51f1, 0x1692, iwx_2ax_cfg_so_gf4_a0),
10561	IWX_DEV_INFO(0x54f0, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
10562	IWX_DEV_INFO(0x54f0, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
10563	IWX_DEV_INFO(0x7a70, 0x0090, iwx_2ax_cfg_so_gf_a0_long),
10564	IWX_DEV_INFO(0x7a70, 0x0098, iwx_2ax_cfg_so_gf_a0_long),
10565	IWX_DEV_INFO(0x7a70, 0x00b0, iwx_2ax_cfg_so_gf4_a0_long),
10566	IWX_DEV_INFO(0x7a70, 0x0310, iwx_2ax_cfg_so_gf_a0_long),
10567	IWX_DEV_INFO(0x7a70, 0x0510, iwx_2ax_cfg_so_gf_a0_long),
10568	IWX_DEV_INFO(0x7a70, 0x0a10, iwx_2ax_cfg_so_gf_a0_long),
10569	IWX_DEV_INFO(0x7af0, 0x0090, iwx_2ax_cfg_so_gf_a0),
10570	IWX_DEV_INFO(0x7af0, 0x0098, iwx_2ax_cfg_so_gf_a0),
10571	IWX_DEV_INFO(0x7af0, 0x00b0, iwx_2ax_cfg_so_gf4_a0),
10572	IWX_DEV_INFO(0x7a70, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
10573	IWX_DEV_INFO(0x7a70, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
10574	IWX_DEV_INFO(0x7af0, 0x0310, iwx_2ax_cfg_so_gf_a0),
10575	IWX_DEV_INFO(0x7af0, 0x0510, iwx_2ax_cfg_so_gf_a0),
10576	IWX_DEV_INFO(0x7af0, 0x0a10, iwx_2ax_cfg_so_gf_a0),
10577	IWX_DEV_INFO(0x7f70, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
10578	IWX_DEV_INFO(0x7f70, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
10579
10580	/* So with GF2 */
10581	IWX_DEV_INFO(0x2726, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
10582	IWX_DEV_INFO(0x2726, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
10583	IWX_DEV_INFO(0x51f0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
10584	IWX_DEV_INFO(0x51f0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
10585	IWX_DEV_INFO(0x54f0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
10586	IWX_DEV_INFO(0x54f0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
10587	IWX_DEV_INFO(0x7a70, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
10588	IWX_DEV_INFO(0x7a70, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
10589	IWX_DEV_INFO(0x7af0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
10590	IWX_DEV_INFO(0x7af0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
10591	IWX_DEV_INFO(0x7f70, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
10592	IWX_DEV_INFO(0x7f70, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
10593
10594	/* Qu with Jf, C step */
10595	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10596		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10597		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
10598		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10599		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9461_160 */
10600	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10601		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10602		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
10603		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10604		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* iwl9461 */
10605	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10606		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10607		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
10608		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10609		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9462_160 */
10610	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10611		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10612		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
10613		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10614		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9462 */
10615	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10616		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10617		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10618		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10619		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9560_160 */
10620	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10621		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10622		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10623		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10624		      IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9560 */
10625	_IWX_DEV_INFO(IWX_CFG_ANY, 0x1551,
10626		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10627		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10628		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10629		      IWX_CFG_ANY,
10630		      iwx_9560_qu_c0_jf_b0_cfg), /* 9560_killer_1550s */
10631	_IWX_DEV_INFO(IWX_CFG_ANY, 0x1552,
10632		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10633		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10634		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10635		      IWX_CFG_ANY,
10636		      iwx_9560_qu_c0_jf_b0_cfg), /* 9560_killer_1550i */
10637
10638	/* QuZ with Jf */
10639	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10640		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
10641		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10642		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10643		      IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9461_160 */
10644	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10645		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
10646		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10647		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10648		      IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9461 */
10649	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10650		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
10651		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
10652		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10653		      IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9462_160 */
10654	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10655		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
10656		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
10657		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10658		      IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9462 */
10659	_IWX_DEV_INFO(IWX_CFG_ANY, 0x1551,
10660		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
10661		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10662		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10663		      IWX_CFG_ANY,
10664		      iwx_9560_quz_a0_jf_b0_cfg), /* killer_1550s */
10665	_IWX_DEV_INFO(IWX_CFG_ANY, 0x1552,
10666		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
10667		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10668		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10669		      IWX_CFG_ANY,
10670		      iwx_9560_quz_a0_jf_b0_cfg), /* 9560_killer_1550i */
10671
10672	/* Qu with Hr, B step */
10673	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10674		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_B_STEP,
10675		      IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
10676		      IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10677		      iwx_qu_b0_hr1_b0), /* AX101 */
10678	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10679		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_B_STEP,
10680		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
10681		      IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10682		      iwx_qu_b0_hr_b0), /* AX203 */
10683
10684	/* Qu with Hr, C step */
10685	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10686		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10687		      IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
10688		      IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10689		      iwx_qu_c0_hr1_b0), /* AX101 */
10690	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10691		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10692		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
10693		      IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10694		      iwx_qu_c0_hr_b0), /* AX203 */
10695	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10696		      IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
10697		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
10698		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10699		      iwx_qu_c0_hr_b0), /* AX201 */
10700
10701	/* QuZ with Hr */
10702	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10703		      IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
10704		      IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
10705		      IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10706		      iwx_quz_a0_hr1_b0), /* AX101 */
10707	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10708		      IWX_CFG_MAC_TYPE_QUZ, IWX_SILICON_B_STEP,
10709		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
10710		      IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10711		      iwx_cfg_quz_a0_hr_b0), /* AX203 */
10712
10713	/* SoF with JF2 */
10714	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10715		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
10716		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10717		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10718		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560_160 */
10719	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10720		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
10721		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10722		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10723		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560 */
10724
10725	/* SoF with JF */
10726	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10727		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
10728		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
10729		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10730		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_160 */
10731	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10732		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
10733		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
10734		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10735		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462_160 */
10736	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10737		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
10738		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
10739		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10740		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_name */
10741	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10742		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
10743		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
10744		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10745		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462 */
10746
10747	/* So with Hr */
10748	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10749		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
10750		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
10751		      IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10752		      iwx_cfg_so_a0_hr_b0), /* AX203 */
10753	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10754		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
10755		      IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
10756		      IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10757		      iwx_cfg_so_a0_hr_b0), /* ax101 */
10758	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10759		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
10760		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
10761		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10762		      iwx_cfg_so_a0_hr_b0), /* ax201 */
10763
10764	/* So-F with Hr */
10765	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10766		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
10767		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
10768		      IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10769		      iwx_cfg_so_a0_hr_b0), /* AX203 */
10770	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10771		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
10772		      IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
10773		      IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10774		      iwx_cfg_so_a0_hr_b0), /* AX101 */
10775	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10776		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
10777		      IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
10778		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10779		      iwx_cfg_so_a0_hr_b0), /* AX201 */
10780
10781	/* So-F with GF */
10782	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10783		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
10784		      IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
10785		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10786		      iwx_2ax_cfg_so_gf_a0), /* AX211 */
10787	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10788		      IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
10789		      IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
10790		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_CDB, IWX_CFG_ANY,
10791		      iwx_2ax_cfg_so_gf4_a0), /* AX411 */
10792
10793	/* So with GF */
10794	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10795		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
10796		      IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
10797		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
10798		      iwx_2ax_cfg_so_gf_a0), /* AX211 */
10799	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10800		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
10801		      IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
10802		      IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_CDB, IWX_CFG_ANY,
10803		      iwx_2ax_cfg_so_gf4_a0), /* AX411 */
10804
10805	/* So with JF2 */
10806	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10807		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
10808		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10809		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10810		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560_160 */
10811	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10812		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
10813		      IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
10814		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10815		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560 */
10816
10817	/* So with JF */
10818	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10819		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
10820		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
10821		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10822		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_160 */
10823	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10824		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
10825		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
10826		      IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10827		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462_160 */
10828	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10829		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
10830		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
10831		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10832		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* iwl9461 */
10833	_IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
10834		      IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
10835		      IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
10836		      IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
10837		      IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462 */
10838};
10839
10840int
10841iwx_preinit(struct iwx_softc *sc)
10842{
10843	struct ieee80211com *ic = &sc->sc_ic;
10844	struct ifnet *ifp = IC2IFP(ic);
10845	int err;
10846
10847	err = iwx_prepare_card_hw(sc);
10848	if (err) {
10849		printf("%s: could not initialize hardware\n", DEVNAME(sc));
10850		return err;
10851	}
10852
10853	if (sc->attached) {
10854#ifndef __FreeBSD_version
10855		/* Update MAC in case the upper layers changed it. */
10856		IEEE80211_ADDR_COPY(sc->sc_ic.ic_myaddr,
10857		    ((struct arpcom *)ifp)->ac_enaddr);
10858#endif
10859		return 0;
10860	}
10861
10862	err = iwx_start_hw(sc);
10863	if (err) {
10864		printf("%s: could not initialize hardware\n", DEVNAME(sc));
10865		return err;
10866	}
10867
10868	err = iwx_run_init_mvm_ucode(sc, 1);
10869	iwx_stop_device(sc);
10870	if (err)
10871		return err;
10872
10873	/* Print version info and MAC address on first successful fw load. */
10874	sc->attached = 1;
10875	if (sc->sc_pnvm_ver) {
10876		printf("%s: hw rev 0x%x, fw %s, pnvm %08x, "
10877		    "address %s\n",
10878		    DEVNAME(sc), sc->sc_hw_rev & IWX_CSR_HW_REV_TYPE_MSK,
10879		    sc->sc_fwver, sc->sc_pnvm_ver,
10880		    ether_sprintf(sc->sc_nvm.hw_addr));
10881	} else {
10882		printf("%s: hw rev 0x%x, fw %s, address %s\n",
10883		    DEVNAME(sc), sc->sc_hw_rev & IWX_CSR_HW_REV_TYPE_MSK,
10884		    sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
10885	}
10886
10887	if (sc->sc_nvm.sku_cap_11n_enable)
10888		iwx_setup_ht_rates(sc);
10889	if (sc->sc_nvm.sku_cap_11ac_enable)
10890		iwx_setup_vht_rates(sc);
10891
10892	/* not all hardware can do 5GHz band */
10893	if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
10894		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
10895		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
10896
10897	/* Configure channel information obtained from firmware. */
10898	ieee80211_channel_init(ifp);
10899
10900#ifdef __HAIKU__
10901	IEEE80211_ADDR_COPY(IF_LLADDR(ifp), ic->ic_myaddr);
10902#else
10903	/* Configure MAC address. */
10904	err = if_setlladdr(ifp, ic->ic_myaddr);
10905	if (err)
10906		printf("%s: could not set MAC address (error %d)\n",
10907		    DEVNAME(sc), err);
10908#endif
10909
10910	ieee80211_media_init(ifp, iwx_media_change, ieee80211_media_status);
10911
10912	return 0;
10913}
10914
10915void
10916iwx_attach_hook(struct device *self)
10917{
10918	struct iwx_softc *sc = (void *)self;
10919
10920	KASSERT(!cold);
10921
10922	iwx_preinit(sc);
10923}
10924
10925const struct iwx_device_cfg *
10926iwx_find_device_cfg(struct iwx_softc *sc)
10927{
10928	pcireg_t sreg;
10929	pci_product_id_t sdev_id;
10930	uint16_t mac_type, rf_type;
10931	uint8_t mac_step, cdb, jacket, rf_id, no_160, cores;
10932	int i;
10933
10934	sreg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_SUBSYS_ID_REG);
10935	sdev_id = PCI_PRODUCT(sreg);
10936	mac_type = IWX_CSR_HW_REV_TYPE(sc->sc_hw_rev);
10937	mac_step = IWX_CSR_HW_REV_STEP(sc->sc_hw_rev << 2);
10938	rf_type = IWX_CSR_HW_RFID_TYPE(sc->sc_hw_rf_id);
10939	cdb = IWX_CSR_HW_RFID_IS_CDB(sc->sc_hw_rf_id);
10940	jacket = IWX_CSR_HW_RFID_IS_JACKET(sc->sc_hw_rf_id);
10941
10942	rf_id = IWX_SUBDEVICE_RF_ID(sdev_id);
10943	no_160 = IWX_SUBDEVICE_NO_160(sdev_id);
10944	cores = IWX_SUBDEVICE_CORES(sdev_id);
10945
10946	for (i = nitems(iwx_dev_info_table) - 1; i >= 0; i--) {
10947		 const struct iwx_dev_info *dev_info = &iwx_dev_info_table[i];
10948
10949		if (dev_info->device != (uint16_t)IWX_CFG_ANY &&
10950		    dev_info->device != sc->sc_pid)
10951			continue;
10952
10953		if (dev_info->subdevice != (uint16_t)IWX_CFG_ANY &&
10954		    dev_info->subdevice != sdev_id)
10955			continue;
10956
10957		if (dev_info->mac_type != (uint16_t)IWX_CFG_ANY &&
10958		    dev_info->mac_type != mac_type)
10959			continue;
10960
10961		if (dev_info->mac_step != (uint8_t)IWX_CFG_ANY &&
10962		    dev_info->mac_step != mac_step)
10963			continue;
10964
10965		if (dev_info->rf_type != (uint16_t)IWX_CFG_ANY &&
10966		    dev_info->rf_type != rf_type)
10967			continue;
10968
10969		if (dev_info->cdb != (uint8_t)IWX_CFG_ANY &&
10970		    dev_info->cdb != cdb)
10971			continue;
10972
10973		if (dev_info->jacket != (uint8_t)IWX_CFG_ANY &&
10974		    dev_info->jacket != jacket)
10975			continue;
10976
10977		if (dev_info->rf_id != (uint8_t)IWX_CFG_ANY &&
10978		    dev_info->rf_id != rf_id)
10979			continue;
10980
10981		if (dev_info->no_160 != (uint8_t)IWX_CFG_ANY &&
10982		    dev_info->no_160 != no_160)
10983			continue;
10984
10985		if (dev_info->cores != (uint8_t)IWX_CFG_ANY &&
10986		    dev_info->cores != cores)
10987			continue;
10988
10989		return dev_info->cfg;
10990	}
10991
10992	return NULL;
10993}
10994
10995
10996#ifdef __FreeBSD_version
10997static int
10998iwx_attach(device_t dev)
10999#else
11000void
11001iwx_attach(struct device *parent, struct device *self, void *aux)
11002#endif
11003{
11004#ifdef __FreeBSD_version
11005#define pa dev
11006	struct iwx_softc *sc = device_get_softc(dev);
11007#else
11008	struct iwx_softc *sc = (void *)self;
11009	struct pci_attach_args *pa = aux;
11010#endif
11011	pci_intr_handle_t ih;
11012	pcireg_t reg, memtype;
11013	struct ieee80211com *ic = &sc->sc_ic;
11014	struct ifnet *ifp = &ic->ic_if;
11015	const char *intrstr;
11016	const struct iwx_device_cfg *cfg;
11017	int err;
11018	int txq_i, i, j;
11019	size_t ctxt_info_size;
11020
11021#ifdef __FreeBSD_version
11022	sc->sc_dev = dev;
11023	sc->sc_pid = pci_get_device(dev);
11024	sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
11025	bus_dma_tag_create(sc->sc_dmat, 1, 0,
11026		BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
11027		BUS_SPACE_MAXSIZE_32BIT, BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
11028		&sc->sc_dmat);
11029	pci_enable_busmaster(sc->sc_dev);
11030
11031	if_alloc_inplace(ifp, IFT_ETHER);
11032#else
11033	sc->sc_pid = PCI_PRODUCT(pa->pa_id);
11034	sc->sc_pct = pa->pa_pc;
11035	sc->sc_pcitag = pa->pa_tag;
11036	sc->sc_dmat = pa->pa_dmat;
11037#endif
11038
11039	rw_init(&sc->ioctl_rwl, "iwxioctl");
11040
11041	err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
11042	    PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
11043	if (err == 0) {
11044		printf("%s: PCIe capability structure not found!\n",
11045		    DEVNAME(sc));
11046		goto fail;
11047	}
11048
11049	/*
11050	 * We disable the RETRY_TIMEOUT register (0x41) to keep
11051	 * PCI Tx retries from interfering with C3 CPU state.
11052	 */
11053	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
11054	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
11055
11056	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
11057	err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
11058	    &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz, 0);
11059	if (err) {
11060		printf("%s: can't map mem space\n", DEVNAME(sc));
11061		goto fail;
11062	}
11063
11064	if (pci_intr_map_msix(pa, 0, &ih) == 0) {
11065		sc->sc_msix = 1;
11066	} else if (pci_intr_map_msi(pa, &ih)) {
11067#ifndef __HAIKU__
11068		if (pci_intr_map(pa, &ih)) {
11069#else
11070		{
11071#endif
11072			printf("%s: can't map interrupt\n", DEVNAME(sc));
11073			goto fail;
11074		}
11075		/* Hardware bug workaround. */
11076		reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
11077		    PCI_COMMAND_STATUS_REG);
11078		if (reg & PCI_COMMAND_INTERRUPT_DISABLE)
11079			reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
11080		pci_conf_write(sc->sc_pct, sc->sc_pcitag,
11081		    PCI_COMMAND_STATUS_REG, reg);
11082	}
11083
11084	intrstr = pci_intr_string(sc->sc_pct, ih);
11085	if (sc->sc_msix)
11086		sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
11087		    iwx_intr_msix, sc, DEVNAME(sc));
11088	else
11089		sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
11090		    iwx_intr, sc, DEVNAME(sc));
11091
11092	if (sc->sc_ih == NULL) {
11093		printf("\n");
11094		printf("%s: can't establish interrupt", DEVNAME(sc));
11095		if (intrstr != NULL)
11096			printf(" at %s", intrstr);
11097		printf("\n");
11098		goto fail;
11099	}
11100	printf(", %s\n", intrstr);
11101
11102	/* Clear pending interrupts. */
11103	IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
11104	IWX_WRITE(sc, IWX_CSR_INT, ~0);
11105	IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0);
11106
11107	sc->sc_hw_rev = IWX_READ(sc, IWX_CSR_HW_REV);
11108	sc->sc_hw_rf_id = IWX_READ(sc, IWX_CSR_HW_RF_ID);
11109
11110	/*
11111	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
11112	 * changed, and now the revision step also includes bit 0-1 (no more
11113	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
11114	 * in the old format.
11115	 */
11116	sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
11117			(IWX_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
11118
11119#ifdef __FreeBSD_version
11120#undef PCI_PRODUCT
11121#define PCI_PRODUCT(pa) pci_get_device(dev)
11122#endif
11123	switch (PCI_PRODUCT(pa->pa_id)) {
11124	case PCI_PRODUCT_INTEL_WL_22500_1:
11125		sc->sc_fwname = IWX_CC_A_FW;
11126		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
11127		sc->sc_integrated = 0;
11128		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE;
11129		sc->sc_low_latency_xtal = 0;
11130		sc->sc_xtal_latency = 0;
11131		sc->sc_tx_with_siso_diversity = 0;
11132		sc->sc_uhb_supported = 0;
11133		break;
11134	case PCI_PRODUCT_INTEL_WL_22500_2:
11135	case PCI_PRODUCT_INTEL_WL_22500_5:
11136		/* These devices should be QuZ only. */
11137		if (sc->sc_hw_rev != IWX_CSR_HW_REV_TYPE_QUZ) {
11138			printf("%s: unsupported AX201 adapter\n", DEVNAME(sc));
11139			goto fail;
11140		}
11141		sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
11142		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
11143		sc->sc_integrated = 1;
11144		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_200;
11145		sc->sc_low_latency_xtal = 0;
11146		sc->sc_xtal_latency = 500;
11147		sc->sc_tx_with_siso_diversity = 0;
11148		sc->sc_uhb_supported = 0;
11149		break;
11150	case PCI_PRODUCT_INTEL_WL_22500_3:
11151		if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
11152			sc->sc_fwname = IWX_QU_C_HR_B_FW;
11153		else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
11154			sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
11155		else
11156			sc->sc_fwname = IWX_QU_B_HR_B_FW;
11157		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
11158		sc->sc_integrated = 1;
11159		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_200;
11160		sc->sc_low_latency_xtal = 0;
11161		sc->sc_xtal_latency = 500;
11162		sc->sc_tx_with_siso_diversity = 0;
11163		sc->sc_uhb_supported = 0;
11164		break;
11165	case PCI_PRODUCT_INTEL_WL_22500_4:
11166	case PCI_PRODUCT_INTEL_WL_22500_7:
11167	case PCI_PRODUCT_INTEL_WL_22500_8:
11168		if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
11169			sc->sc_fwname = IWX_QU_C_HR_B_FW;
11170		else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
11171			sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
11172		else
11173			sc->sc_fwname = IWX_QU_B_HR_B_FW;
11174		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
11175		sc->sc_integrated = 1;
11176		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_1820;
11177		sc->sc_low_latency_xtal = 0;
11178		sc->sc_xtal_latency = 1820;
11179		sc->sc_tx_with_siso_diversity = 0;
11180		sc->sc_uhb_supported = 0;
11181		break;
11182	case PCI_PRODUCT_INTEL_WL_22500_6:
11183		if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
11184			sc->sc_fwname = IWX_QU_C_HR_B_FW;
11185		else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
11186			sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
11187		else
11188			sc->sc_fwname = IWX_QU_B_HR_B_FW;
11189		sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
11190		sc->sc_integrated = 1;
11191		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_2500;
11192		sc->sc_low_latency_xtal = 1;
11193		sc->sc_xtal_latency = 12000;
11194		sc->sc_tx_with_siso_diversity = 0;
11195		sc->sc_uhb_supported = 0;
11196		break;
11197	case PCI_PRODUCT_INTEL_WL_22500_9:
11198	case PCI_PRODUCT_INTEL_WL_22500_10:
11199	case PCI_PRODUCT_INTEL_WL_22500_11:
11200	case PCI_PRODUCT_INTEL_WL_22500_13:
11201	/* _14 is an MA device, not yet supported */
11202	case PCI_PRODUCT_INTEL_WL_22500_15:
11203	case PCI_PRODUCT_INTEL_WL_22500_16:
11204		sc->sc_fwname = IWX_SO_A_GF_A_FW;
11205		sc->sc_pnvm_name = IWX_SO_A_GF_A_PNVM;
11206		sc->sc_device_family = IWX_DEVICE_FAMILY_AX210;
11207		sc->sc_integrated = 0;
11208		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE;
11209		sc->sc_low_latency_xtal = 0;
11210		sc->sc_xtal_latency = 0;
11211		sc->sc_tx_with_siso_diversity = 0;
11212		sc->sc_uhb_supported = 1;
11213		break;
11214	case PCI_PRODUCT_INTEL_WL_22500_12:
11215	case PCI_PRODUCT_INTEL_WL_22500_17:
11216		sc->sc_fwname = IWX_SO_A_GF_A_FW;
11217		sc->sc_pnvm_name = IWX_SO_A_GF_A_PNVM;
11218		sc->sc_device_family = IWX_DEVICE_FAMILY_AX210;
11219		sc->sc_integrated = 1;
11220		sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_2500;
11221		sc->sc_low_latency_xtal = 1;
11222		sc->sc_xtal_latency = 12000;
11223		sc->sc_tx_with_siso_diversity = 0;
11224		sc->sc_uhb_supported = 0;
11225		sc->sc_imr_enabled = 1;
11226		break;
11227	default:
11228		printf("%s: unknown adapter type\n", DEVNAME(sc));
11229		goto fail;
11230	}
11231#ifdef __FreeBSD_version
11232#undef PCI_PRODUCT
11233#endif
11234
11235	cfg = iwx_find_device_cfg(sc);
11236	if (cfg) {
11237		sc->sc_fwname = cfg->fw_name;
11238		sc->sc_pnvm_name = cfg->pnvm_name;
11239		sc->sc_tx_with_siso_diversity = cfg->tx_with_siso_diversity;
11240		sc->sc_uhb_supported = cfg->uhb_supported;
11241		if (cfg->xtal_latency) {
11242			sc->sc_xtal_latency = cfg->xtal_latency;
11243			sc->sc_low_latency_xtal = cfg->low_latency_xtal;
11244		}
11245	}
11246
11247	sc->mac_addr_from_csr = 0x380; /* differs on BZ hw generation */
11248
11249	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
11250		sc->sc_umac_prph_offset = 0x300000;
11251		sc->max_tfd_queue_size = IWX_TFD_QUEUE_SIZE_MAX_GEN3;
11252	} else
11253		sc->max_tfd_queue_size = IWX_TFD_QUEUE_SIZE_MAX;
11254
11255	/* Allocate DMA memory for loading firmware. */
11256	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
11257		ctxt_info_size = sizeof(struct iwx_context_info_gen3);
11258	else
11259		ctxt_info_size = sizeof(struct iwx_context_info);
11260	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ctxt_info_dma,
11261	    ctxt_info_size, 0);
11262	if (err) {
11263		printf("%s: could not allocate memory for loading firmware\n",
11264		    DEVNAME(sc));
11265		goto fail;
11266	}
11267
11268	if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
11269		err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->prph_scratch_dma,
11270		    sizeof(struct iwx_prph_scratch), 0);
11271		if (err) {
11272			printf("%s: could not allocate prph scratch memory\n",
11273			    DEVNAME(sc));
11274			goto fail1;
11275		}
11276
11277		/*
11278		 * Allocate prph information. The driver doesn't use this.
11279		 * We use the second half of this page to give the device
11280		 * some dummy TR/CR tail pointers - which shouldn't be
11281		 * necessary as we don't use this, but the hardware still
11282		 * reads/writes there and we can't let it go do that with
11283		 * a NULL pointer.
11284		 */
11285		KASSERT(sizeof(struct iwx_prph_info) < PAGE_SIZE / 2);
11286		err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->prph_info_dma,
11287		    PAGE_SIZE, 0);
11288		if (err) {
11289			printf("%s: could not allocate prph info memory\n",
11290			    DEVNAME(sc));
11291			goto fail1;
11292		}
11293	}
11294
11295	/* Allocate interrupt cause table (ICT).*/
11296	err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
11297	    IWX_ICT_SIZE, 1<<IWX_ICT_PADDR_SHIFT);
11298	if (err) {
11299		printf("%s: could not allocate ICT table\n", DEVNAME(sc));
11300		goto fail1;
11301	}
11302
11303	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
11304		err = iwx_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
11305		if (err) {
11306			printf("%s: could not allocate TX ring %d\n",
11307			    DEVNAME(sc), txq_i);
11308			goto fail4;
11309		}
11310	}
11311
11312	err = iwx_alloc_rx_ring(sc, &sc->rxq);
11313	if (err) {
11314		printf("%s: could not allocate RX ring\n", DEVNAME(sc));
11315		goto fail4;
11316	}
11317
11318	sc->sc_nswq = taskq_create("iwxns", 1, IPL_NET, 0);
11319	if (sc->sc_nswq == NULL)
11320		goto fail4;
11321
11322	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
11323	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
11324	ic->ic_state = IEEE80211_S_INIT;
11325
11326	/* Set device capabilities. */
11327	ic->ic_caps =
11328	    IEEE80211_C_QOS | IEEE80211_C_TX_AMPDU | /* A-MPDU */
11329	    IEEE80211_C_ADDBA_OFFLOAD | /* device sends ADDBA/DELBA frames */
11330	    IEEE80211_C_WEP |		/* WEP */
11331	    IEEE80211_C_RSN |		/* WPA/RSN */
11332	    IEEE80211_C_SCANALL |	/* device scans all channels at once */
11333	    IEEE80211_C_SCANALLBAND |	/* device scans all bands at once */
11334	    IEEE80211_C_MONITOR |	/* monitor mode supported */
11335	    IEEE80211_C_SHSLOT |	/* short slot time supported */
11336	    IEEE80211_C_SHPREAMBLE;	/* short preamble supported */
11337
11338	ic->ic_htcaps = IEEE80211_HTCAP_SGI20 | IEEE80211_HTCAP_SGI40;
11339	ic->ic_htcaps |= IEEE80211_HTCAP_CBW20_40;
11340	ic->ic_htcaps |=
11341	    (IEEE80211_HTCAP_SMPS_DIS << IEEE80211_HTCAP_SMPS_SHIFT);
11342	ic->ic_htxcaps = 0;
11343	ic->ic_txbfcaps = 0;
11344	ic->ic_aselcaps = 0;
11345	ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
11346
11347	ic->ic_vhtcaps = IEEE80211_VHTCAP_MAX_MPDU_LENGTH_3895 |
11348	    (IEEE80211_VHTCAP_MAX_AMPDU_LEN_64K <<
11349	    IEEE80211_VHTCAP_MAX_AMPDU_LEN_SHIFT) |
11350	    (IEEE80211_VHTCAP_CHAN_WIDTH_80 <<
11351	     IEEE80211_VHTCAP_CHAN_WIDTH_SHIFT) | IEEE80211_VHTCAP_SGI80 |
11352	    IEEE80211_VHTCAP_RX_ANT_PATTERN | IEEE80211_VHTCAP_TX_ANT_PATTERN;
11353
11354	ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
11355	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
11356	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
11357
11358	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
11359		sc->sc_phyctxt[i].id = i;
11360		sc->sc_phyctxt[i].sco = IEEE80211_HTOP0_SCO_SCN;
11361		sc->sc_phyctxt[i].vht_chan_width =
11362		    IEEE80211_VHTOP0_CHAN_WIDTH_HT;
11363	}
11364
11365	/* IBSS channel undefined for now. */
11366	ic->ic_ibss_chan = &ic->ic_channels[1];
11367
11368	ic->ic_max_rssi = IWX_MAX_DBM - IWX_MIN_DBM;
11369
11370	ifp->if_softc = sc;
11371	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
11372	ifp->if_ioctl = iwx_ioctl;
11373	ifp->if_start = iwx_start;
11374	ifp->if_watchdog = iwx_watchdog;
11375	memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
11376
11377	if_attach(ifp);
11378	ieee80211_ifattach(ifp);
11379	ieee80211_media_init(ifp, iwx_media_change, ieee80211_media_status);
11380
11381#if NBPFILTER > 0
11382	iwx_radiotap_attach(sc);
11383#endif
11384	for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
11385		struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
11386		rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID;
11387		rxba->sc = sc;
11388		timeout_set(&rxba->session_timer, iwx_rx_ba_session_expired,
11389		    rxba);
11390		timeout_set(&rxba->reorder_buf.reorder_timer,
11391		    iwx_reorder_timer_expired, &rxba->reorder_buf);
11392		for (j = 0; j < nitems(rxba->entries); j++)
11393			ml_init(&rxba->entries[j].frames);
11394	}
11395	task_set(&sc->init_task, iwx_init_task, sc);
11396	task_set(&sc->newstate_task, iwx_newstate_task, sc);
11397	task_set(&sc->ba_task, iwx_ba_task, sc);
11398	task_set(&sc->setkey_task, iwx_setkey_task, sc);
11399	task_set(&sc->mac_ctxt_task, iwx_mac_ctxt_task, sc);
11400	task_set(&sc->phy_ctxt_task, iwx_phy_ctxt_task, sc);
11401	task_set(&sc->bgscan_done_task, iwx_bgscan_done_task, sc);
11402
11403	ic->ic_node_alloc = iwx_node_alloc;
11404	ic->ic_bgscan_start = iwx_bgscan;
11405	ic->ic_bgscan_done = iwx_bgscan_done;
11406	ic->ic_set_key = iwx_set_key;
11407	ic->ic_delete_key = iwx_delete_key;
11408
11409	/* Override 802.11 state transition machine. */
11410	sc->sc_newstate = ic->ic_newstate;
11411	ic->ic_newstate = iwx_newstate;
11412	ic->ic_updatechan = iwx_updatechan;
11413	ic->ic_updateprot = iwx_updateprot;
11414	ic->ic_updateslot = iwx_updateslot;
11415	ic->ic_updateedca = iwx_updateedca;
11416	ic->ic_updatedtim = iwx_updatedtim;
11417	ic->ic_ampdu_rx_start = iwx_ampdu_rx_start;
11418	ic->ic_ampdu_rx_stop = iwx_ampdu_rx_stop;
11419	ic->ic_ampdu_tx_start = iwx_ampdu_tx_start;
11420	ic->ic_ampdu_tx_stop = NULL;
11421
11422#ifdef __HAIKU__
11423	iwx_preinit(sc);
11424#else
11425	/*
11426	 * We cannot read the MAC address without loading the
11427	 * firmware from disk. Postpone until mountroot is done.
11428	 */
11429	config_mountroot(self, iwx_attach_hook);
11430#endif
11431
11432	return 0;
11433
11434fail4:	while (--txq_i >= 0)
11435		iwx_free_tx_ring(sc, &sc->txq[txq_i]);
11436	iwx_free_rx_ring(sc, &sc->rxq);
11437	if (sc->ict_dma.vaddr != NULL)
11438		iwx_dma_contig_free(&sc->ict_dma);
11439
11440fail1:	iwx_dma_contig_free(&sc->ctxt_info_dma);
11441	iwx_dma_contig_free(&sc->prph_scratch_dma);
11442	iwx_dma_contig_free(&sc->prph_info_dma);
11443#ifdef __HAIKU__
11444fail:
11445	if_free_inplace(ifp);
11446#endif
11447	return -1;
11448}
11449
11450#if NBPFILTER > 0
11451void
11452iwx_radiotap_attach(struct iwx_softc *sc)
11453{
11454	bpfattach(&sc->sc_drvbpf, &sc->sc_ic.ic_if, DLT_IEEE802_11_RADIO,
11455	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN);
11456
11457	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
11458	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
11459	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWX_RX_RADIOTAP_PRESENT);
11460
11461	sc->sc_txtap_len = sizeof sc->sc_txtapu;
11462	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
11463	sc->sc_txtap.wt_ihdr.it_present = htole32(IWX_TX_RADIOTAP_PRESENT);
11464}
11465#endif
11466
11467void
11468iwx_init_task(void *arg1)
11469{
11470	struct iwx_softc *sc = arg1;
11471	struct ifnet *ifp = &sc->sc_ic.ic_if;
11472	int s = splnet();
11473	int generation = sc->sc_generation;
11474	int fatal = (sc->sc_flags & (IWX_FLAG_HW_ERR | IWX_FLAG_RFKILL));
11475
11476	rw_enter_write(&sc->ioctl_rwl);
11477	if (generation != sc->sc_generation) {
11478		rw_exit(&sc->ioctl_rwl);
11479		splx(s);
11480		return;
11481	}
11482
11483	if (ifp->if_flags & IFF_RUNNING)
11484		iwx_stop(ifp);
11485	else
11486		sc->sc_flags &= ~IWX_FLAG_HW_ERR;
11487
11488	if (!fatal && (ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
11489		iwx_init(ifp);
11490
11491	rw_exit(&sc->ioctl_rwl);
11492	splx(s);
11493}
11494
11495void
11496iwx_resume(struct iwx_softc *sc)
11497{
11498	pcireg_t reg;
11499
11500	/*
11501	 * We disable the RETRY_TIMEOUT register (0x41) to keep
11502	 * PCI Tx retries from interfering with C3 CPU state.
11503	 */
11504	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
11505	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
11506
11507	if (!sc->sc_msix) {
11508		/* Hardware bug workaround. */
11509		reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
11510		    PCI_COMMAND_STATUS_REG);
11511		if (reg & PCI_COMMAND_INTERRUPT_DISABLE)
11512			reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
11513		pci_conf_write(sc->sc_pct, sc->sc_pcitag,
11514		    PCI_COMMAND_STATUS_REG, reg);
11515	}
11516
11517	iwx_disable_interrupts(sc);
11518}
11519
11520int
11521iwx_wakeup(struct iwx_softc *sc)
11522{
11523	struct ieee80211com *ic = &sc->sc_ic;
11524	struct ifnet *ifp = &sc->sc_ic.ic_if;
11525	int err;
11526
11527	rw_enter_write(&sc->ioctl_rwl);
11528
11529	err = iwx_start_hw(sc);
11530	if (err) {
11531		rw_exit(&sc->ioctl_rwl);
11532		return err;
11533	}
11534
11535	err = iwx_init_hw(sc);
11536	if (err) {
11537		iwx_stop_device(sc);
11538		rw_exit(&sc->ioctl_rwl);
11539		return err;
11540	}
11541
11542	refcnt_init(&sc->task_refs);
11543	ifq_clr_oactive(&ifp->if_snd);
11544	ifp->if_flags |= IFF_RUNNING;
11545
11546	if (ic->ic_opmode == IEEE80211_M_MONITOR)
11547		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
11548	else
11549		ieee80211_begin_scan(ifp);
11550
11551	rw_exit(&sc->ioctl_rwl);
11552	return 0;
11553}
11554
11555#ifdef __FreeBSD_version
11556static device_method_t iwx_pci_methods[] = {
11557	/* Device interface */
11558	DEVMETHOD(device_probe,         iwx_probe),
11559	DEVMETHOD(device_attach,        iwx_attach),
11560#if 0
11561	DEVMETHOD(device_detach,        iwx_detach),
11562	DEVMETHOD(device_suspend,       iwx_suspend),
11563	DEVMETHOD(device_resume,        iwx_resume),
11564#endif
11565
11566	DEVMETHOD_END
11567};
11568
11569static driver_t iwx_pci_driver = {
11570	"iwx",
11571	iwx_pci_methods,
11572	sizeof (struct iwx_softc)
11573};
11574
11575static devclass_t iwx_devclass;
11576
11577DRIVER_MODULE(iwx, pci, iwx_pci_driver, iwx_devclass, NULL, NULL);
11578#else
11579int
11580iwx_activate(struct device *self, int act)
11581{
11582	struct iwx_softc *sc = (struct iwx_softc *)self;
11583	struct ifnet *ifp = &sc->sc_ic.ic_if;
11584	int err = 0;
11585
11586	switch (act) {
11587	case DVACT_QUIESCE:
11588		if (ifp->if_flags & IFF_RUNNING) {
11589			rw_enter_write(&sc->ioctl_rwl);
11590			iwx_stop(ifp);
11591			rw_exit(&sc->ioctl_rwl);
11592		}
11593		break;
11594	case DVACT_RESUME:
11595		iwx_resume(sc);
11596		break;
11597	case DVACT_WAKEUP:
11598		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP) {
11599			err = iwx_wakeup(sc);
11600			if (err)
11601				printf("%s: could not initialize hardware\n",
11602				    DEVNAME(sc));
11603		}
11604		break;
11605	}
11606
11607	return 0;
11608}
11609
11610struct cfdriver iwx_cd = {
11611	NULL, "iwx", DV_IFNET
11612};
11613
11614const struct cfattach iwx_ca = {
11615	sizeof(struct iwx_softc), iwx_match, iwx_attach,
11616	NULL, iwx_activate
11617};
11618#endif
11619