if_iwm.c revision 330208
1/*	$OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $	*/
2
3/*
4 * Copyright (c) 2014 genua mbh <info@genua.de>
5 * Copyright (c) 2014 Fixup Software Ltd.
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20/*-
21 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22 * which were used as the reference documentation for this implementation.
23 *
24 * Driver version we are currently based off of is
25 * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26 *
27 ***********************************************************************
28 *
29 * This file is provided under a dual BSD/GPLv2 license.  When using or
30 * redistributing this file, you may do so under either license.
31 *
32 * GPL LICENSE SUMMARY
33 *
34 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35 *
36 * This program is free software; you can redistribute it and/or modify
37 * it under the terms of version 2 of the GNU General Public License as
38 * published by the Free Software Foundation.
39 *
40 * This program is distributed in the hope that it will be useful, but
41 * WITHOUT ANY WARRANTY; without even the implied warranty of
42 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43 * General Public License for more details.
44 *
45 * You should have received a copy of the GNU General Public License
46 * along with this program; if not, write to the Free Software
47 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48 * USA
49 *
50 * The full GNU General Public License is included in this distribution
51 * in the file called COPYING.
52 *
53 * Contact Information:
54 *  Intel Linux Wireless <ilw@linux.intel.com>
55 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56 *
57 *
58 * BSD LICENSE
59 *
60 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61 * All rights reserved.
62 *
63 * Redistribution and use in source and binary forms, with or without
64 * modification, are permitted provided that the following conditions
65 * are met:
66 *
67 *  * Redistributions of source code must retain the above copyright
68 *    notice, this list of conditions and the following disclaimer.
69 *  * Redistributions in binary form must reproduce the above copyright
70 *    notice, this list of conditions and the following disclaimer in
71 *    the documentation and/or other materials provided with the
72 *    distribution.
73 *  * Neither the name Intel Corporation nor the names of its
74 *    contributors may be used to endorse or promote products derived
75 *    from this software without specific prior written permission.
76 *
77 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88 */
89
90/*-
91 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92 *
93 * Permission to use, copy, modify, and distribute this software for any
94 * purpose with or without fee is hereby granted, provided that the above
95 * copyright notice and this permission notice appear in all copies.
96 *
97 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104 */
105#include <sys/cdefs.h>
106__FBSDID("$FreeBSD: stable/11/sys/dev/iwm/if_iwm.c 330208 2018-03-01 06:39:44Z eadler $");
107
108#include "opt_wlan.h"
109
110#include <sys/param.h>
111#include <sys/bus.h>
112#include <sys/conf.h>
113#include <sys/endian.h>
114#include <sys/firmware.h>
115#include <sys/kernel.h>
116#include <sys/malloc.h>
117#include <sys/mbuf.h>
118#include <sys/mutex.h>
119#include <sys/module.h>
120#include <sys/proc.h>
121#include <sys/rman.h>
122#include <sys/socket.h>
123#include <sys/sockio.h>
124#include <sys/sysctl.h>
125#include <sys/linker.h>
126
127#include <machine/bus.h>
128#include <machine/endian.h>
129#include <machine/resource.h>
130
131#include <dev/pci/pcivar.h>
132#include <dev/pci/pcireg.h>
133
134#include <net/bpf.h>
135
136#include <net/if.h>
137#include <net/if_var.h>
138#include <net/if_arp.h>
139#include <net/if_dl.h>
140#include <net/if_media.h>
141#include <net/if_types.h>
142
143#include <netinet/in.h>
144#include <netinet/in_systm.h>
145#include <netinet/if_ether.h>
146#include <netinet/ip.h>
147
148#include <net80211/ieee80211_var.h>
149#include <net80211/ieee80211_regdomain.h>
150#include <net80211/ieee80211_ratectl.h>
151#include <net80211/ieee80211_radiotap.h>
152
153#include <dev/iwm/if_iwmreg.h>
154#include <dev/iwm/if_iwmvar.h>
155#include <dev/iwm/if_iwm_config.h>
156#include <dev/iwm/if_iwm_debug.h>
157#include <dev/iwm/if_iwm_notif_wait.h>
158#include <dev/iwm/if_iwm_util.h>
159#include <dev/iwm/if_iwm_binding.h>
160#include <dev/iwm/if_iwm_phy_db.h>
161#include <dev/iwm/if_iwm_mac_ctxt.h>
162#include <dev/iwm/if_iwm_phy_ctxt.h>
163#include <dev/iwm/if_iwm_time_event.h>
164#include <dev/iwm/if_iwm_power.h>
165#include <dev/iwm/if_iwm_scan.h>
166
167#include <dev/iwm/if_iwm_pcie_trans.h>
168#include <dev/iwm/if_iwm_led.h>
169#include <dev/iwm/if_iwm_fw.h>
170
171/* From DragonflyBSD */
172#define mtodoff(m, t, off)      ((t)((m)->m_data + (off)))
173
174const uint8_t iwm_nvm_channels[] = {
175	/* 2.4 GHz */
176	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
177	/* 5 GHz */
178	36, 40, 44, 48, 52, 56, 60, 64,
179	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
180	149, 153, 157, 161, 165
181};
182_Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
183    "IWM_NUM_CHANNELS is too small");
184
185const uint8_t iwm_nvm_channels_8000[] = {
186	/* 2.4 GHz */
187	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
188	/* 5 GHz */
189	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
190	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
191	149, 153, 157, 161, 165, 169, 173, 177, 181
192};
193_Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
194    "IWM_NUM_CHANNELS_8000 is too small");
195
196#define IWM_NUM_2GHZ_CHANNELS	14
197#define IWM_N_HW_ADDR_MASK	0xF
198
199/*
200 * XXX For now, there's simply a fixed set of rate table entries
201 * that are populated.
202 */
203const struct iwm_rate {
204	uint8_t rate;
205	uint8_t plcp;
206} iwm_rates[] = {
207	{   2,	IWM_RATE_1M_PLCP  },
208	{   4,	IWM_RATE_2M_PLCP  },
209	{  11,	IWM_RATE_5M_PLCP  },
210	{  22,	IWM_RATE_11M_PLCP },
211	{  12,	IWM_RATE_6M_PLCP  },
212	{  18,	IWM_RATE_9M_PLCP  },
213	{  24,	IWM_RATE_12M_PLCP },
214	{  36,	IWM_RATE_18M_PLCP },
215	{  48,	IWM_RATE_24M_PLCP },
216	{  72,	IWM_RATE_36M_PLCP },
217	{  96,	IWM_RATE_48M_PLCP },
218	{ 108,	IWM_RATE_54M_PLCP },
219};
220#define IWM_RIDX_CCK	0
221#define IWM_RIDX_OFDM	4
222#define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
223#define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
224#define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
225
226struct iwm_nvm_section {
227	uint16_t length;
228	uint8_t *data;
229};
230
231#define IWM_MVM_UCODE_ALIVE_TIMEOUT	hz
232#define IWM_MVM_UCODE_CALIB_TIMEOUT	(2*hz)
233
234struct iwm_mvm_alive_data {
235	int valid;
236	uint32_t scd_base_addr;
237};
238
239static int	iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
240static int	iwm_firmware_store_section(struct iwm_softc *,
241                                           enum iwm_ucode_type,
242                                           const uint8_t *, size_t);
243static int	iwm_set_default_calib(struct iwm_softc *, const void *);
244static void	iwm_fw_info_free(struct iwm_fw_info *);
245static int	iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
246static int	iwm_alloc_fwmem(struct iwm_softc *);
247static int	iwm_alloc_sched(struct iwm_softc *);
248static int	iwm_alloc_kw(struct iwm_softc *);
249static int	iwm_alloc_ict(struct iwm_softc *);
250static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
251static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
252static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
253static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
254                                  int);
255static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
256static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
257static void	iwm_enable_interrupts(struct iwm_softc *);
258static void	iwm_restore_interrupts(struct iwm_softc *);
259static void	iwm_disable_interrupts(struct iwm_softc *);
260static void	iwm_ict_reset(struct iwm_softc *);
261static int	iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
262static void	iwm_stop_device(struct iwm_softc *);
263static void	iwm_mvm_nic_config(struct iwm_softc *);
264static int	iwm_nic_rx_init(struct iwm_softc *);
265static int	iwm_nic_tx_init(struct iwm_softc *);
266static int	iwm_nic_init(struct iwm_softc *);
267static int	iwm_enable_txq(struct iwm_softc *, int, int, int);
268static int	iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
269static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
270                                   uint16_t, uint8_t *, uint16_t *);
271static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
272				     uint16_t *, uint32_t);
273static uint32_t	iwm_eeprom_channel_flags(uint16_t);
274static void	iwm_add_channel_band(struct iwm_softc *,
275		    struct ieee80211_channel[], int, int *, int, size_t,
276		    const uint8_t[]);
277static void	iwm_init_channel_map(struct ieee80211com *, int, int *,
278		    struct ieee80211_channel[]);
279static struct iwm_nvm_data *
280	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
281			   const uint16_t *, const uint16_t *,
282			   const uint16_t *, const uint16_t *,
283			   const uint16_t *);
284static void	iwm_free_nvm_data(struct iwm_nvm_data *);
285static void	iwm_set_hw_address_family_8000(struct iwm_softc *,
286					       struct iwm_nvm_data *,
287					       const uint16_t *,
288					       const uint16_t *);
289static int	iwm_get_sku(const struct iwm_softc *, const uint16_t *,
290			    const uint16_t *);
291static int	iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
292static int	iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
293				  const uint16_t *);
294static int	iwm_get_n_hw_addrs(const struct iwm_softc *,
295				   const uint16_t *);
296static void	iwm_set_radio_cfg(const struct iwm_softc *,
297				  struct iwm_nvm_data *, uint32_t);
298static struct iwm_nvm_data *
299	iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
300static int	iwm_nvm_init(struct iwm_softc *);
301static int	iwm_pcie_load_section(struct iwm_softc *, uint8_t,
302				      const struct iwm_fw_desc *);
303static int	iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
304					     bus_addr_t, uint32_t);
305static int	iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
306						const struct iwm_fw_sects *,
307						int, int *);
308static int	iwm_pcie_load_cpu_sections(struct iwm_softc *,
309					   const struct iwm_fw_sects *,
310					   int, int *);
311static int	iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
312					       const struct iwm_fw_sects *);
313static int	iwm_pcie_load_given_ucode(struct iwm_softc *,
314					  const struct iwm_fw_sects *);
315static int	iwm_start_fw(struct iwm_softc *, const struct iwm_fw_sects *);
316static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
317static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
318static int	iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
319                                              enum iwm_ucode_type);
320static int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
321static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
322static int	iwm_mvm_get_signal_strength(struct iwm_softc *,
323					    struct iwm_rx_phy_info *);
324static void	iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
325                                      struct iwm_rx_packet *);
326static int	iwm_get_noise(struct iwm_softc *sc,
327		    const struct iwm_mvm_statistics_rx_non_phy *);
328static boolean_t iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct mbuf *,
329				    uint32_t, boolean_t);
330static int	iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
331                                         struct iwm_rx_packet *,
332				         struct iwm_node *);
333static void	iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *);
334static void	iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
335#if 0
336static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
337                                 uint16_t);
338#endif
339static const struct iwm_rate *
340	iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
341			struct mbuf *, struct iwm_tx_cmd *);
342static int	iwm_tx(struct iwm_softc *, struct mbuf *,
343                       struct ieee80211_node *, int);
344static int	iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
345			     const struct ieee80211_bpf_params *);
346static int	iwm_mvm_flush_tx_path(struct iwm_softc *sc,
347				      uint32_t tfd_msk, uint32_t flags);
348static int	iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
349					        struct iwm_mvm_add_sta_cmd *,
350                                                int *);
351static int	iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
352                                       int);
353static int	iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
354static int	iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
355static int	iwm_mvm_add_int_sta_common(struct iwm_softc *,
356                                           struct iwm_int_sta *,
357				           const uint8_t *, uint16_t, uint16_t);
358static int	iwm_mvm_add_aux_sta(struct iwm_softc *);
359static int	iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_vap *);
360static int	iwm_auth(struct ieee80211vap *, struct iwm_softc *);
361static int	iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
362static int	iwm_release(struct iwm_softc *, struct iwm_node *);
363static struct ieee80211_node *
364		iwm_node_alloc(struct ieee80211vap *,
365		               const uint8_t[IEEE80211_ADDR_LEN]);
366static void	iwm_setrates(struct iwm_softc *, struct iwm_node *);
367static int	iwm_media_change(struct ifnet *);
368static int	iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
369static void	iwm_endscan_cb(void *, int);
370static void	iwm_mvm_fill_sf_command(struct iwm_softc *,
371					struct iwm_sf_cfg_cmd *,
372					struct ieee80211_node *);
373static int	iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state);
374static int	iwm_send_bt_init_conf(struct iwm_softc *);
375static int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
376static void	iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
377static int	iwm_init_hw(struct iwm_softc *);
378static void	iwm_init(struct iwm_softc *);
379static void	iwm_start(struct iwm_softc *);
380static void	iwm_stop(struct iwm_softc *);
381static void	iwm_watchdog(void *);
382static void	iwm_parent(struct ieee80211com *);
383#ifdef IWM_DEBUG
384static const char *
385		iwm_desc_lookup(uint32_t);
386static void	iwm_nic_error(struct iwm_softc *);
387static void	iwm_nic_umac_error(struct iwm_softc *);
388#endif
389static void	iwm_handle_rxb(struct iwm_softc *, struct mbuf *);
390static void	iwm_notif_intr(struct iwm_softc *);
391static void	iwm_intr(void *);
392static int	iwm_attach(device_t);
393static int	iwm_is_valid_ether_addr(uint8_t *);
394static void	iwm_preinit(void *);
395static int	iwm_detach_local(struct iwm_softc *sc, int);
396static void	iwm_init_task(void *);
397static void	iwm_radiotap_attach(struct iwm_softc *);
398static struct ieee80211vap *
399		iwm_vap_create(struct ieee80211com *,
400		               const char [IFNAMSIZ], int,
401		               enum ieee80211_opmode, int,
402		               const uint8_t [IEEE80211_ADDR_LEN],
403		               const uint8_t [IEEE80211_ADDR_LEN]);
404static void	iwm_vap_delete(struct ieee80211vap *);
405static void	iwm_scan_start(struct ieee80211com *);
406static void	iwm_scan_end(struct ieee80211com *);
407static void	iwm_update_mcast(struct ieee80211com *);
408static void	iwm_set_channel(struct ieee80211com *);
409static void	iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
410static void	iwm_scan_mindwell(struct ieee80211_scan_state *);
411static int	iwm_detach(device_t);
412
413/*
414 * Firmware parser.
415 */
416
417static int
418iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
419{
420	const struct iwm_fw_cscheme_list *l = (const void *)data;
421
422	if (dlen < sizeof(*l) ||
423	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
424		return EINVAL;
425
426	/* we don't actually store anything for now, always use s/w crypto */
427
428	return 0;
429}
430
431static int
432iwm_firmware_store_section(struct iwm_softc *sc,
433    enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
434{
435	struct iwm_fw_sects *fws;
436	struct iwm_fw_desc *fwone;
437
438	if (type >= IWM_UCODE_TYPE_MAX)
439		return EINVAL;
440	if (dlen < sizeof(uint32_t))
441		return EINVAL;
442
443	fws = &sc->sc_fw.fw_sects[type];
444	if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
445		return EINVAL;
446
447	fwone = &fws->fw_sect[fws->fw_count];
448
449	/* first 32bit are device load offset */
450	memcpy(&fwone->offset, data, sizeof(uint32_t));
451
452	/* rest is data */
453	fwone->data = data + sizeof(uint32_t);
454	fwone->len = dlen - sizeof(uint32_t);
455
456	fws->fw_count++;
457
458	return 0;
459}
460
461#define IWM_DEFAULT_SCAN_CHANNELS 40
462
463/* iwlwifi: iwl-drv.c */
464struct iwm_tlv_calib_data {
465	uint32_t ucode_type;
466	struct iwm_tlv_calib_ctrl calib;
467} __packed;
468
469static int
470iwm_set_default_calib(struct iwm_softc *sc, const void *data)
471{
472	const struct iwm_tlv_calib_data *def_calib = data;
473	uint32_t ucode_type = le32toh(def_calib->ucode_type);
474
475	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
476		device_printf(sc->sc_dev,
477		    "Wrong ucode_type %u for default "
478		    "calibration.\n", ucode_type);
479		return EINVAL;
480	}
481
482	sc->sc_default_calib[ucode_type].flow_trigger =
483	    def_calib->calib.flow_trigger;
484	sc->sc_default_calib[ucode_type].event_trigger =
485	    def_calib->calib.event_trigger;
486
487	return 0;
488}
489
490static int
491iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data,
492			struct iwm_ucode_capabilities *capa)
493{
494	const struct iwm_ucode_api *ucode_api = (const void *)data;
495	uint32_t api_index = le32toh(ucode_api->api_index);
496	uint32_t api_flags = le32toh(ucode_api->api_flags);
497	int i;
498
499	if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
500		device_printf(sc->sc_dev,
501		    "api flags index %d larger than supported by driver\n",
502		    api_index);
503		/* don't return an error so we can load FW that has more bits */
504		return 0;
505	}
506
507	for (i = 0; i < 32; i++) {
508		if (api_flags & (1U << i))
509			setbit(capa->enabled_api, i + 32 * api_index);
510	}
511
512	return 0;
513}
514
515static int
516iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data,
517			   struct iwm_ucode_capabilities *capa)
518{
519	const struct iwm_ucode_capa *ucode_capa = (const void *)data;
520	uint32_t api_index = le32toh(ucode_capa->api_index);
521	uint32_t api_flags = le32toh(ucode_capa->api_capa);
522	int i;
523
524	if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
525		device_printf(sc->sc_dev,
526		    "capa flags index %d larger than supported by driver\n",
527		    api_index);
528		/* don't return an error so we can load FW that has more bits */
529		return 0;
530	}
531
532	for (i = 0; i < 32; i++) {
533		if (api_flags & (1U << i))
534			setbit(capa->enabled_capa, i + 32 * api_index);
535	}
536
537	return 0;
538}
539
540static void
541iwm_fw_info_free(struct iwm_fw_info *fw)
542{
543	firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
544	fw->fw_fp = NULL;
545	/* don't touch fw->fw_status */
546	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
547}
548
549static int
550iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
551{
552	struct iwm_fw_info *fw = &sc->sc_fw;
553	const struct iwm_tlv_ucode_header *uhdr;
554	const struct iwm_ucode_tlv *tlv;
555	struct iwm_ucode_capabilities *capa = &sc->ucode_capa;
556	enum iwm_ucode_tlv_type tlv_type;
557	const struct firmware *fwp;
558	const uint8_t *data;
559	uint32_t tlv_len;
560	uint32_t usniffer_img;
561	const uint8_t *tlv_data;
562	uint32_t paging_mem_size;
563	int num_of_cpus;
564	int error = 0;
565	size_t len;
566
567	if (fw->fw_status == IWM_FW_STATUS_DONE &&
568	    ucode_type != IWM_UCODE_INIT)
569		return 0;
570
571	while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
572		msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
573	fw->fw_status = IWM_FW_STATUS_INPROGRESS;
574
575	if (fw->fw_fp != NULL)
576		iwm_fw_info_free(fw);
577
578	/*
579	 * Load firmware into driver memory.
580	 * fw_fp will be set.
581	 */
582	IWM_UNLOCK(sc);
583	fwp = firmware_get(sc->cfg->fw_name);
584	IWM_LOCK(sc);
585	if (fwp == NULL) {
586		device_printf(sc->sc_dev,
587		    "could not read firmware %s (error %d)\n",
588		    sc->cfg->fw_name, error);
589		goto out;
590	}
591	fw->fw_fp = fwp;
592
593	/* (Re-)Initialize default values. */
594	capa->flags = 0;
595	capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH;
596	capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
597	memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa));
598	memset(capa->enabled_api, 0, sizeof(capa->enabled_api));
599	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
600
601	/*
602	 * Parse firmware contents
603	 */
604
605	uhdr = (const void *)fw->fw_fp->data;
606	if (*(const uint32_t *)fw->fw_fp->data != 0
607	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
608		device_printf(sc->sc_dev, "invalid firmware %s\n",
609		    sc->cfg->fw_name);
610		error = EINVAL;
611		goto out;
612	}
613
614	snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%u.%u (API ver %u)",
615	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
616	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
617	    IWM_UCODE_API(le32toh(uhdr->ver)));
618	data = uhdr->data;
619	len = fw->fw_fp->datasize - sizeof(*uhdr);
620
621	while (len >= sizeof(*tlv)) {
622		len -= sizeof(*tlv);
623		tlv = (const void *)data;
624
625		tlv_len = le32toh(tlv->length);
626		tlv_type = le32toh(tlv->type);
627		tlv_data = tlv->data;
628
629		if (len < tlv_len) {
630			device_printf(sc->sc_dev,
631			    "firmware too short: %zu bytes\n",
632			    len);
633			error = EINVAL;
634			goto parse_out;
635		}
636		len -= roundup2(tlv_len, 4);
637		data += sizeof(tlv) + roundup2(tlv_len, 4);
638
639		switch ((int)tlv_type) {
640		case IWM_UCODE_TLV_PROBE_MAX_LEN:
641			if (tlv_len != sizeof(uint32_t)) {
642				device_printf(sc->sc_dev,
643				    "%s: PROBE_MAX_LEN (%d) != sizeof(uint32_t)\n",
644				    __func__,
645				    (int) tlv_len);
646				error = EINVAL;
647				goto parse_out;
648			}
649			capa->max_probe_length =
650			    le32_to_cpup((const uint32_t *)tlv_data);
651			/* limit it to something sensible */
652			if (capa->max_probe_length >
653			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
654				IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
655				    "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
656				    "ridiculous\n", __func__);
657				error = EINVAL;
658				goto parse_out;
659			}
660			break;
661		case IWM_UCODE_TLV_PAN:
662			if (tlv_len) {
663				device_printf(sc->sc_dev,
664				    "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
665				    __func__,
666				    (int) tlv_len);
667				error = EINVAL;
668				goto parse_out;
669			}
670			capa->flags |= IWM_UCODE_TLV_FLAGS_PAN;
671			break;
672		case IWM_UCODE_TLV_FLAGS:
673			if (tlv_len < sizeof(uint32_t)) {
674				device_printf(sc->sc_dev,
675				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
676				    __func__,
677				    (int) tlv_len);
678				error = EINVAL;
679				goto parse_out;
680			}
681			if (tlv_len % sizeof(uint32_t)) {
682				device_printf(sc->sc_dev,
683				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) %% sizeof(uint32_t)\n",
684				    __func__,
685				    (int) tlv_len);
686				error = EINVAL;
687				goto parse_out;
688			}
689			/*
690			 * Apparently there can be many flags, but Linux driver
691			 * parses only the first one, and so do we.
692			 *
693			 * XXX: why does this override IWM_UCODE_TLV_PAN?
694			 * Intentional or a bug?  Observations from
695			 * current firmware file:
696			 *  1) TLV_PAN is parsed first
697			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
698			 * ==> this resets TLV_PAN to itself... hnnnk
699			 */
700			capa->flags = le32_to_cpup((const uint32_t *)tlv_data);
701			break;
702		case IWM_UCODE_TLV_CSCHEME:
703			if ((error = iwm_store_cscheme(sc,
704			    tlv_data, tlv_len)) != 0) {
705				device_printf(sc->sc_dev,
706				    "%s: iwm_store_cscheme(): returned %d\n",
707				    __func__,
708				    error);
709				goto parse_out;
710			}
711			break;
712		case IWM_UCODE_TLV_NUM_OF_CPU:
713			if (tlv_len != sizeof(uint32_t)) {
714				device_printf(sc->sc_dev,
715				    "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) != sizeof(uint32_t)\n",
716				    __func__,
717				    (int) tlv_len);
718				error = EINVAL;
719				goto parse_out;
720			}
721			num_of_cpus = le32_to_cpup((const uint32_t *)tlv_data);
722			if (num_of_cpus == 2) {
723				fw->fw_sects[IWM_UCODE_REGULAR].is_dual_cpus =
724					TRUE;
725				fw->fw_sects[IWM_UCODE_INIT].is_dual_cpus =
726					TRUE;
727				fw->fw_sects[IWM_UCODE_WOWLAN].is_dual_cpus =
728					TRUE;
729			} else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
730				device_printf(sc->sc_dev,
731				    "%s: Driver supports only 1 or 2 CPUs\n",
732				    __func__);
733				error = EINVAL;
734				goto parse_out;
735			}
736			break;
737		case IWM_UCODE_TLV_SEC_RT:
738			if ((error = iwm_firmware_store_section(sc,
739			    IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
740				device_printf(sc->sc_dev,
741				    "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
742				    __func__,
743				    error);
744				goto parse_out;
745			}
746			break;
747		case IWM_UCODE_TLV_SEC_INIT:
748			if ((error = iwm_firmware_store_section(sc,
749			    IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
750				device_printf(sc->sc_dev,
751				    "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
752				    __func__,
753				    error);
754				goto parse_out;
755			}
756			break;
757		case IWM_UCODE_TLV_SEC_WOWLAN:
758			if ((error = iwm_firmware_store_section(sc,
759			    IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
760				device_printf(sc->sc_dev,
761				    "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
762				    __func__,
763				    error);
764				goto parse_out;
765			}
766			break;
767		case IWM_UCODE_TLV_DEF_CALIB:
768			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
769				device_printf(sc->sc_dev,
770				    "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
771				    __func__,
772				    (int) tlv_len,
773				    (int) sizeof(struct iwm_tlv_calib_data));
774				error = EINVAL;
775				goto parse_out;
776			}
777			if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
778				device_printf(sc->sc_dev,
779				    "%s: iwm_set_default_calib() failed: %d\n",
780				    __func__,
781				    error);
782				goto parse_out;
783			}
784			break;
785		case IWM_UCODE_TLV_PHY_SKU:
786			if (tlv_len != sizeof(uint32_t)) {
787				error = EINVAL;
788				device_printf(sc->sc_dev,
789				    "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
790				    __func__,
791				    (int) tlv_len);
792				goto parse_out;
793			}
794			sc->sc_fw.phy_config =
795			    le32_to_cpup((const uint32_t *)tlv_data);
796			sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
797						  IWM_FW_PHY_CFG_TX_CHAIN) >>
798						  IWM_FW_PHY_CFG_TX_CHAIN_POS;
799			sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
800						  IWM_FW_PHY_CFG_RX_CHAIN) >>
801						  IWM_FW_PHY_CFG_RX_CHAIN_POS;
802			break;
803
804		case IWM_UCODE_TLV_API_CHANGES_SET: {
805			if (tlv_len != sizeof(struct iwm_ucode_api)) {
806				error = EINVAL;
807				goto parse_out;
808			}
809			if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) {
810				error = EINVAL;
811				goto parse_out;
812			}
813			break;
814		}
815
816		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
817			if (tlv_len != sizeof(struct iwm_ucode_capa)) {
818				error = EINVAL;
819				goto parse_out;
820			}
821			if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) {
822				error = EINVAL;
823				goto parse_out;
824			}
825			break;
826		}
827
828		case 48: /* undocumented TLV */
829		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
830		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
831			/* ignore, not used by current driver */
832			break;
833
834		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
835			if ((error = iwm_firmware_store_section(sc,
836			    IWM_UCODE_REGULAR_USNIFFER, tlv_data,
837			    tlv_len)) != 0)
838				goto parse_out;
839			break;
840
841		case IWM_UCODE_TLV_PAGING:
842			if (tlv_len != sizeof(uint32_t)) {
843				error = EINVAL;
844				goto parse_out;
845			}
846			paging_mem_size = le32_to_cpup((const uint32_t *)tlv_data);
847
848			IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
849			    "%s: Paging: paging enabled (size = %u bytes)\n",
850			    __func__, paging_mem_size);
851			if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
852				device_printf(sc->sc_dev,
853					"%s: Paging: driver supports up to %u bytes for paging image\n",
854					__func__, IWM_MAX_PAGING_IMAGE_SIZE);
855				error = EINVAL;
856				goto out;
857			}
858			if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
859				device_printf(sc->sc_dev,
860				    "%s: Paging: image isn't multiple %u\n",
861				    __func__, IWM_FW_PAGING_SIZE);
862				error = EINVAL;
863				goto out;
864			}
865
866			sc->sc_fw.fw_sects[IWM_UCODE_REGULAR].paging_mem_size =
867			    paging_mem_size;
868			usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
869			sc->sc_fw.fw_sects[usniffer_img].paging_mem_size =
870			    paging_mem_size;
871			break;
872
873		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
874			if (tlv_len != sizeof(uint32_t)) {
875				error = EINVAL;
876				goto parse_out;
877			}
878			capa->n_scan_channels =
879			    le32_to_cpup((const uint32_t *)tlv_data);
880			break;
881
882		case IWM_UCODE_TLV_FW_VERSION:
883			if (tlv_len != sizeof(uint32_t) * 3) {
884				error = EINVAL;
885				goto parse_out;
886			}
887			snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
888			    "%d.%d.%d",
889			    le32toh(((const uint32_t *)tlv_data)[0]),
890			    le32toh(((const uint32_t *)tlv_data)[1]),
891			    le32toh(((const uint32_t *)tlv_data)[2]));
892			break;
893
894		case IWM_UCODE_TLV_FW_MEM_SEG:
895			break;
896
897		default:
898			device_printf(sc->sc_dev,
899			    "%s: unknown firmware section %d, abort\n",
900			    __func__, tlv_type);
901			error = EINVAL;
902			goto parse_out;
903		}
904	}
905
906	KASSERT(error == 0, ("unhandled error"));
907
908 parse_out:
909	if (error) {
910		device_printf(sc->sc_dev, "firmware parse error %d, "
911		    "section type %d\n", error, tlv_type);
912	}
913
914 out:
915	if (error) {
916		fw->fw_status = IWM_FW_STATUS_NONE;
917		if (fw->fw_fp != NULL)
918			iwm_fw_info_free(fw);
919	} else
920		fw->fw_status = IWM_FW_STATUS_DONE;
921	wakeup(&sc->sc_fw);
922
923	return error;
924}
925
926/*
927 * DMA resource routines
928 */
929
930/* fwmem is used to load firmware onto the card */
931static int
932iwm_alloc_fwmem(struct iwm_softc *sc)
933{
934	/* Must be aligned on a 16-byte boundary. */
935	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
936	    IWM_FH_MEM_TB_MAX_LENGTH, 16);
937}
938
939/* tx scheduler rings.  not used? */
940static int
941iwm_alloc_sched(struct iwm_softc *sc)
942{
943	/* TX scheduler rings must be aligned on a 1KB boundary. */
944	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
945	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
946}
947
948/* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
949static int
950iwm_alloc_kw(struct iwm_softc *sc)
951{
952	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
953}
954
955/* interrupt cause table */
956static int
957iwm_alloc_ict(struct iwm_softc *sc)
958{
959	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
960	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
961}
962
963static int
964iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
965{
966	bus_size_t size;
967	int i, error;
968
969	ring->cur = 0;
970
971	/* Allocate RX descriptors (256-byte aligned). */
972	size = IWM_RX_RING_COUNT * sizeof(uint32_t);
973	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
974	if (error != 0) {
975		device_printf(sc->sc_dev,
976		    "could not allocate RX ring DMA memory\n");
977		goto fail;
978	}
979	ring->desc = ring->desc_dma.vaddr;
980
981	/* Allocate RX status area (16-byte aligned). */
982	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
983	    sizeof(*ring->stat), 16);
984	if (error != 0) {
985		device_printf(sc->sc_dev,
986		    "could not allocate RX status DMA memory\n");
987		goto fail;
988	}
989	ring->stat = ring->stat_dma.vaddr;
990
991        /* Create RX buffer DMA tag. */
992        error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
993            BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
994            IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
995        if (error != 0) {
996                device_printf(sc->sc_dev,
997                    "%s: could not create RX buf DMA tag, error %d\n",
998                    __func__, error);
999                goto fail;
1000        }
1001
1002	/* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
1003	error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
1004	if (error != 0) {
1005		device_printf(sc->sc_dev,
1006		    "%s: could not create RX buf DMA map, error %d\n",
1007		    __func__, error);
1008		goto fail;
1009	}
1010	/*
1011	 * Allocate and map RX buffers.
1012	 */
1013	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1014		struct iwm_rx_data *data = &ring->data[i];
1015		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1016		if (error != 0) {
1017			device_printf(sc->sc_dev,
1018			    "%s: could not create RX buf DMA map, error %d\n",
1019			    __func__, error);
1020			goto fail;
1021		}
1022		data->m = NULL;
1023
1024		if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1025			goto fail;
1026		}
1027	}
1028	return 0;
1029
1030fail:	iwm_free_rx_ring(sc, ring);
1031	return error;
1032}
1033
1034static void
1035iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1036{
1037	/* Reset the ring state */
1038	ring->cur = 0;
1039
1040	/*
1041	 * The hw rx ring index in shared memory must also be cleared,
1042	 * otherwise the discrepancy can cause reprocessing chaos.
1043	 */
1044	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1045}
1046
1047static void
1048iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1049{
1050	int i;
1051
1052	iwm_dma_contig_free(&ring->desc_dma);
1053	iwm_dma_contig_free(&ring->stat_dma);
1054
1055	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1056		struct iwm_rx_data *data = &ring->data[i];
1057
1058		if (data->m != NULL) {
1059			bus_dmamap_sync(ring->data_dmat, data->map,
1060			    BUS_DMASYNC_POSTREAD);
1061			bus_dmamap_unload(ring->data_dmat, data->map);
1062			m_freem(data->m);
1063			data->m = NULL;
1064		}
1065		if (data->map != NULL) {
1066			bus_dmamap_destroy(ring->data_dmat, data->map);
1067			data->map = NULL;
1068		}
1069	}
1070	if (ring->spare_map != NULL) {
1071		bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1072		ring->spare_map = NULL;
1073	}
1074	if (ring->data_dmat != NULL) {
1075		bus_dma_tag_destroy(ring->data_dmat);
1076		ring->data_dmat = NULL;
1077	}
1078}
1079
1080static int
1081iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1082{
1083	bus_addr_t paddr;
1084	bus_size_t size;
1085	size_t maxsize;
1086	int nsegments;
1087	int i, error;
1088
1089	ring->qid = qid;
1090	ring->queued = 0;
1091	ring->cur = 0;
1092
1093	/* Allocate TX descriptors (256-byte aligned). */
1094	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1095	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1096	if (error != 0) {
1097		device_printf(sc->sc_dev,
1098		    "could not allocate TX ring DMA memory\n");
1099		goto fail;
1100	}
1101	ring->desc = ring->desc_dma.vaddr;
1102
1103	/*
1104	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1105	 * to allocate commands space for other rings.
1106	 */
1107	if (qid > IWM_MVM_CMD_QUEUE)
1108		return 0;
1109
1110	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1111	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1112	if (error != 0) {
1113		device_printf(sc->sc_dev,
1114		    "could not allocate TX cmd DMA memory\n");
1115		goto fail;
1116	}
1117	ring->cmd = ring->cmd_dma.vaddr;
1118
1119	/* FW commands may require more mapped space than packets. */
1120	if (qid == IWM_MVM_CMD_QUEUE) {
1121		maxsize = IWM_RBUF_SIZE;
1122		nsegments = 1;
1123	} else {
1124		maxsize = MCLBYTES;
1125		nsegments = IWM_MAX_SCATTER - 2;
1126	}
1127
1128	error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1129	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1130            nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1131	if (error != 0) {
1132		device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1133		goto fail;
1134	}
1135
1136	paddr = ring->cmd_dma.paddr;
1137	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1138		struct iwm_tx_data *data = &ring->data[i];
1139
1140		data->cmd_paddr = paddr;
1141		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1142		    + offsetof(struct iwm_tx_cmd, scratch);
1143		paddr += sizeof(struct iwm_device_cmd);
1144
1145		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1146		if (error != 0) {
1147			device_printf(sc->sc_dev,
1148			    "could not create TX buf DMA map\n");
1149			goto fail;
1150		}
1151	}
1152	KASSERT(paddr == ring->cmd_dma.paddr + size,
1153	    ("invalid physical address"));
1154	return 0;
1155
1156fail:	iwm_free_tx_ring(sc, ring);
1157	return error;
1158}
1159
1160static void
1161iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1162{
1163	int i;
1164
1165	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1166		struct iwm_tx_data *data = &ring->data[i];
1167
1168		if (data->m != NULL) {
1169			bus_dmamap_sync(ring->data_dmat, data->map,
1170			    BUS_DMASYNC_POSTWRITE);
1171			bus_dmamap_unload(ring->data_dmat, data->map);
1172			m_freem(data->m);
1173			data->m = NULL;
1174		}
1175	}
1176	/* Clear TX descriptors. */
1177	memset(ring->desc, 0, ring->desc_dma.size);
1178	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1179	    BUS_DMASYNC_PREWRITE);
1180	sc->qfullmsk &= ~(1 << ring->qid);
1181	ring->queued = 0;
1182	ring->cur = 0;
1183
1184	if (ring->qid == IWM_MVM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1185		iwm_pcie_clear_cmd_in_flight(sc);
1186}
1187
1188static void
1189iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1190{
1191	int i;
1192
1193	iwm_dma_contig_free(&ring->desc_dma);
1194	iwm_dma_contig_free(&ring->cmd_dma);
1195
1196	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1197		struct iwm_tx_data *data = &ring->data[i];
1198
1199		if (data->m != NULL) {
1200			bus_dmamap_sync(ring->data_dmat, data->map,
1201			    BUS_DMASYNC_POSTWRITE);
1202			bus_dmamap_unload(ring->data_dmat, data->map);
1203			m_freem(data->m);
1204			data->m = NULL;
1205		}
1206		if (data->map != NULL) {
1207			bus_dmamap_destroy(ring->data_dmat, data->map);
1208			data->map = NULL;
1209		}
1210	}
1211	if (ring->data_dmat != NULL) {
1212		bus_dma_tag_destroy(ring->data_dmat);
1213		ring->data_dmat = NULL;
1214	}
1215}
1216
1217/*
1218 * High-level hardware frobbing routines
1219 */
1220
1221static void
1222iwm_enable_interrupts(struct iwm_softc *sc)
1223{
1224	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1225	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1226}
1227
1228static void
1229iwm_restore_interrupts(struct iwm_softc *sc)
1230{
1231	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1232}
1233
1234static void
1235iwm_disable_interrupts(struct iwm_softc *sc)
1236{
1237	/* disable interrupts */
1238	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1239
1240	/* acknowledge all interrupts */
1241	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1242	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1243}
1244
1245static void
1246iwm_ict_reset(struct iwm_softc *sc)
1247{
1248	iwm_disable_interrupts(sc);
1249
1250	/* Reset ICT table. */
1251	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1252	sc->ict_cur = 0;
1253
1254	/* Set physical address of ICT table (4KB aligned). */
1255	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1256	    IWM_CSR_DRAM_INT_TBL_ENABLE
1257	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1258	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1259	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1260
1261	/* Switch to ICT interrupt mode in driver. */
1262	sc->sc_flags |= IWM_FLAG_USE_ICT;
1263
1264	/* Re-enable interrupts. */
1265	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1266	iwm_enable_interrupts(sc);
1267}
1268
1269/* iwlwifi pcie/trans.c */
1270
1271/*
1272 * Since this .. hard-resets things, it's time to actually
1273 * mark the first vap (if any) as having no mac context.
1274 * It's annoying, but since the driver is potentially being
1275 * stop/start'ed whilst active (thanks openbsd port!) we
1276 * have to correctly track this.
1277 */
1278static void
1279iwm_stop_device(struct iwm_softc *sc)
1280{
1281	struct ieee80211com *ic = &sc->sc_ic;
1282	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1283	int chnl, qid;
1284	uint32_t mask = 0;
1285
1286	/* tell the device to stop sending interrupts */
1287	iwm_disable_interrupts(sc);
1288
1289	/*
1290	 * FreeBSD-local: mark the first vap as not-uploaded,
1291	 * so the next transition through auth/assoc
1292	 * will correctly populate the MAC context.
1293	 */
1294	if (vap) {
1295		struct iwm_vap *iv = IWM_VAP(vap);
1296		iv->phy_ctxt = NULL;
1297		iv->is_uploaded = 0;
1298	}
1299
1300	/* device going down, Stop using ICT table */
1301	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1302
1303	/* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1304
1305	if (iwm_nic_lock(sc)) {
1306		iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1307
1308		/* Stop each Tx DMA channel */
1309		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1310			IWM_WRITE(sc,
1311			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1312			mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1313		}
1314
1315		/* Wait for DMA channels to be idle */
1316		if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1317		    5000)) {
1318			device_printf(sc->sc_dev,
1319			    "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1320			    IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1321		}
1322		iwm_nic_unlock(sc);
1323	}
1324	iwm_pcie_rx_stop(sc);
1325
1326	/* Stop RX ring. */
1327	iwm_reset_rx_ring(sc, &sc->rxq);
1328
1329	/* Reset all TX rings. */
1330	for (qid = 0; qid < nitems(sc->txq); qid++)
1331		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1332
1333	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1334		/* Power-down device's busmaster DMA clocks */
1335		if (iwm_nic_lock(sc)) {
1336			iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1337			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1338			iwm_nic_unlock(sc);
1339		}
1340		DELAY(5);
1341	}
1342
1343	/* Make sure (redundant) we've released our request to stay awake */
1344	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1345	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1346
1347	/* Stop the device, and put it in low power state */
1348	iwm_apm_stop(sc);
1349
1350	/* Upon stop, the APM issues an interrupt if HW RF kill is set.
1351	 * Clean again the interrupt here
1352	 */
1353	iwm_disable_interrupts(sc);
1354	/* stop and reset the on-board processor */
1355	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1356
1357	/*
1358	 * Even if we stop the HW, we still want the RF kill
1359	 * interrupt
1360	 */
1361	iwm_enable_rfkill_int(sc);
1362	iwm_check_rfkill(sc);
1363}
1364
1365/* iwlwifi: mvm/ops.c */
1366static void
1367iwm_mvm_nic_config(struct iwm_softc *sc)
1368{
1369	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1370	uint32_t reg_val = 0;
1371	uint32_t phy_config = iwm_mvm_get_phy_config(sc);
1372
1373	radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1374	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1375	radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1376	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1377	radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1378	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1379
1380	/* SKU control */
1381	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1382	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1383	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1384	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1385
1386	/* radio configuration */
1387	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1388	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1389	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1390
1391	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1392
1393	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1394	    "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1395	    radio_cfg_step, radio_cfg_dash);
1396
1397	/*
1398	 * W/A : NIC is stuck in a reset state after Early PCIe power off
1399	 * (PCIe power is lost before PERST# is asserted), causing ME FW
1400	 * to lose ownership and not being able to obtain it back.
1401	 */
1402	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1403		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1404		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1405		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1406	}
1407}
1408
1409static int
1410iwm_nic_rx_init(struct iwm_softc *sc)
1411{
1412	/*
1413	 * Initialize RX ring.  This is from the iwn driver.
1414	 */
1415	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1416
1417	/* Stop Rx DMA */
1418	iwm_pcie_rx_stop(sc);
1419
1420	if (!iwm_nic_lock(sc))
1421		return EBUSY;
1422
1423	/* reset and flush pointers */
1424	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1425	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1426	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1427	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1428
1429	/* Set physical address of RX ring (256-byte aligned). */
1430	IWM_WRITE(sc,
1431	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1432
1433	/* Set physical address of RX status (16-byte aligned). */
1434	IWM_WRITE(sc,
1435	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1436
1437	/* Enable Rx DMA
1438	 * XXX 5000 HW isn't supported by the iwm(4) driver.
1439	 * IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
1440	 *      the credit mechanism in 5000 HW RX FIFO
1441	 * Direct rx interrupts to hosts
1442	 * Rx buffer size 4 or 8k or 12k
1443	 * RB timeout 0x10
1444	 * 256 RBDs
1445	 */
1446	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1447	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
1448	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
1449	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
1450	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
1451	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1452	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1453
1454	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1455
1456	/* W/A for interrupt coalescing bug in 7260 and 3160 */
1457	if (sc->cfg->host_interrupt_operation_mode)
1458		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1459
1460	/*
1461	 * Thus sayeth el jefe (iwlwifi) via a comment:
1462	 *
1463	 * This value should initially be 0 (before preparing any
1464	 * RBs), should be 8 after preparing the first 8 RBs (for example)
1465	 */
1466	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1467
1468	iwm_nic_unlock(sc);
1469
1470	return 0;
1471}
1472
1473static int
1474iwm_nic_tx_init(struct iwm_softc *sc)
1475{
1476	int qid;
1477
1478	if (!iwm_nic_lock(sc))
1479		return EBUSY;
1480
1481	/* Deactivate TX scheduler. */
1482	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1483
1484	/* Set physical address of "keep warm" page (16-byte aligned). */
1485	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1486
1487	/* Initialize TX rings. */
1488	for (qid = 0; qid < nitems(sc->txq); qid++) {
1489		struct iwm_tx_ring *txq = &sc->txq[qid];
1490
1491		/* Set physical address of TX ring (256-byte aligned). */
1492		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1493		    txq->desc_dma.paddr >> 8);
1494		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1495		    "%s: loading ring %d descriptors (%p) at %lx\n",
1496		    __func__,
1497		    qid, txq->desc,
1498		    (unsigned long) (txq->desc_dma.paddr >> 8));
1499	}
1500
1501	iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1502
1503	iwm_nic_unlock(sc);
1504
1505	return 0;
1506}
1507
1508static int
1509iwm_nic_init(struct iwm_softc *sc)
1510{
1511	int error;
1512
1513	iwm_apm_init(sc);
1514	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1515		iwm_set_pwr(sc);
1516
1517	iwm_mvm_nic_config(sc);
1518
1519	if ((error = iwm_nic_rx_init(sc)) != 0)
1520		return error;
1521
1522	/*
1523	 * Ditto for TX, from iwn
1524	 */
1525	if ((error = iwm_nic_tx_init(sc)) != 0)
1526		return error;
1527
1528	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1529	    "%s: shadow registers enabled\n", __func__);
1530	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1531
1532	return 0;
1533}
1534
1535const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1536	IWM_MVM_TX_FIFO_VO,
1537	IWM_MVM_TX_FIFO_VI,
1538	IWM_MVM_TX_FIFO_BE,
1539	IWM_MVM_TX_FIFO_BK,
1540};
1541
1542static int
1543iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1544{
1545	if (!iwm_nic_lock(sc)) {
1546		device_printf(sc->sc_dev,
1547		    "%s: cannot enable txq %d\n",
1548		    __func__,
1549		    qid);
1550		return EBUSY;
1551	}
1552
1553	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1554
1555	if (qid == IWM_MVM_CMD_QUEUE) {
1556		/* unactivate before configuration */
1557		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1558		    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1559		    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1560
1561		iwm_nic_unlock(sc);
1562
1563		iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1564
1565		if (!iwm_nic_lock(sc)) {
1566			device_printf(sc->sc_dev,
1567			    "%s: cannot enable txq %d\n", __func__, qid);
1568			return EBUSY;
1569		}
1570		iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1571		iwm_nic_unlock(sc);
1572
1573		iwm_write_mem32(sc, sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1574		/* Set scheduler window size and frame limit. */
1575		iwm_write_mem32(sc,
1576		    sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1577		    sizeof(uint32_t),
1578		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1579		    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1580		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1581		    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1582
1583		if (!iwm_nic_lock(sc)) {
1584			device_printf(sc->sc_dev,
1585			    "%s: cannot enable txq %d\n", __func__, qid);
1586			return EBUSY;
1587		}
1588		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1589		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1590		    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1591		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1592		    IWM_SCD_QUEUE_STTS_REG_MSK);
1593	} else {
1594		struct iwm_scd_txq_cfg_cmd cmd;
1595		int error;
1596
1597		iwm_nic_unlock(sc);
1598
1599		memset(&cmd, 0, sizeof(cmd));
1600		cmd.scd_queue = qid;
1601		cmd.enable = 1;
1602		cmd.sta_id = sta_id;
1603		cmd.tx_fifo = fifo;
1604		cmd.aggregate = 0;
1605		cmd.window = IWM_FRAME_LIMIT;
1606
1607		error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1608		    sizeof(cmd), &cmd);
1609		if (error) {
1610			device_printf(sc->sc_dev,
1611			    "cannot enable txq %d\n", qid);
1612			return error;
1613		}
1614
1615		if (!iwm_nic_lock(sc))
1616			return EBUSY;
1617	}
1618
1619	iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1620	    iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1621
1622	iwm_nic_unlock(sc);
1623
1624	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1625	    __func__, qid, fifo);
1626
1627	return 0;
1628}
1629
1630static int
1631iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1632{
1633	int error, chnl;
1634
1635	int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1636	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1637
1638	if (!iwm_nic_lock(sc))
1639		return EBUSY;
1640
1641	iwm_ict_reset(sc);
1642
1643	sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1644	if (scd_base_addr != 0 &&
1645	    scd_base_addr != sc->scd_base_addr) {
1646		device_printf(sc->sc_dev,
1647		    "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1648		    __func__, sc->scd_base_addr, scd_base_addr);
1649	}
1650
1651	iwm_nic_unlock(sc);
1652
1653	/* reset context data, TX status and translation data */
1654	error = iwm_write_mem(sc,
1655	    sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1656	    NULL, clear_dwords);
1657	if (error)
1658		return EBUSY;
1659
1660	if (!iwm_nic_lock(sc))
1661		return EBUSY;
1662
1663	/* Set physical address of TX scheduler rings (1KB aligned). */
1664	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1665
1666	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1667
1668	iwm_nic_unlock(sc);
1669
1670	/* enable command channel */
1671	error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1672	if (error)
1673		return error;
1674
1675	if (!iwm_nic_lock(sc))
1676		return EBUSY;
1677
1678	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1679
1680	/* Enable DMA channels. */
1681	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1682		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1683		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1684		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1685	}
1686
1687	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1688	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1689
1690	iwm_nic_unlock(sc);
1691
1692	/* Enable L1-Active */
1693	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
1694		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1695		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1696	}
1697
1698	return error;
1699}
1700
1701/*
1702 * NVM read access and content parsing.  We do not support
1703 * external NVM or writing NVM.
1704 * iwlwifi/mvm/nvm.c
1705 */
1706
1707/* Default NVM size to read */
1708#define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
1709
1710#define IWM_NVM_WRITE_OPCODE 1
1711#define IWM_NVM_READ_OPCODE 0
1712
1713/* load nvm chunk response */
1714enum {
1715	IWM_READ_NVM_CHUNK_SUCCEED = 0,
1716	IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1717};
1718
1719static int
1720iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1721	uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1722{
1723	struct iwm_nvm_access_cmd nvm_access_cmd = {
1724		.offset = htole16(offset),
1725		.length = htole16(length),
1726		.type = htole16(section),
1727		.op_code = IWM_NVM_READ_OPCODE,
1728	};
1729	struct iwm_nvm_access_resp *nvm_resp;
1730	struct iwm_rx_packet *pkt;
1731	struct iwm_host_cmd cmd = {
1732		.id = IWM_NVM_ACCESS_CMD,
1733		.flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1734		.data = { &nvm_access_cmd, },
1735	};
1736	int ret, bytes_read, offset_read;
1737	uint8_t *resp_data;
1738
1739	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1740
1741	ret = iwm_send_cmd(sc, &cmd);
1742	if (ret) {
1743		device_printf(sc->sc_dev,
1744		    "Could not send NVM_ACCESS command (error=%d)\n", ret);
1745		return ret;
1746	}
1747
1748	pkt = cmd.resp_pkt;
1749
1750	/* Extract NVM response */
1751	nvm_resp = (void *)pkt->data;
1752	ret = le16toh(nvm_resp->status);
1753	bytes_read = le16toh(nvm_resp->length);
1754	offset_read = le16toh(nvm_resp->offset);
1755	resp_data = nvm_resp->data;
1756	if (ret) {
1757		if ((offset != 0) &&
1758		    (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1759			/*
1760			 * meaning of NOT_VALID_ADDRESS:
1761			 * driver try to read chunk from address that is
1762			 * multiple of 2K and got an error since addr is empty.
1763			 * meaning of (offset != 0): driver already
1764			 * read valid data from another chunk so this case
1765			 * is not an error.
1766			 */
1767			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1768				    "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1769				    offset);
1770			*len = 0;
1771			ret = 0;
1772		} else {
1773			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1774				    "NVM access command failed with status %d\n", ret);
1775			ret = EIO;
1776		}
1777		goto exit;
1778	}
1779
1780	if (offset_read != offset) {
1781		device_printf(sc->sc_dev,
1782		    "NVM ACCESS response with invalid offset %d\n",
1783		    offset_read);
1784		ret = EINVAL;
1785		goto exit;
1786	}
1787
1788	if (bytes_read > length) {
1789		device_printf(sc->sc_dev,
1790		    "NVM ACCESS response with too much data "
1791		    "(%d bytes requested, %d bytes received)\n",
1792		    length, bytes_read);
1793		ret = EINVAL;
1794		goto exit;
1795	}
1796
1797	/* Write data to NVM */
1798	memcpy(data + offset, resp_data, bytes_read);
1799	*len = bytes_read;
1800
1801 exit:
1802	iwm_free_resp(sc, &cmd);
1803	return ret;
1804}
1805
1806/*
1807 * Reads an NVM section completely.
1808 * NICs prior to 7000 family don't have a real NVM, but just read
1809 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1810 * by uCode, we need to manually check in this case that we don't
1811 * overflow and try to read more than the EEPROM size.
1812 * For 7000 family NICs, we supply the maximal size we can read, and
1813 * the uCode fills the response with as much data as we can,
1814 * without overflowing, so no check is needed.
1815 */
1816static int
1817iwm_nvm_read_section(struct iwm_softc *sc,
1818	uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1819{
1820	uint16_t seglen, length, offset = 0;
1821	int ret;
1822
1823	/* Set nvm section read length */
1824	length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1825
1826	seglen = length;
1827
1828	/* Read the NVM until exhausted (reading less than requested) */
1829	while (seglen == length) {
1830		/* Check no memory assumptions fail and cause an overflow */
1831		if ((size_read + offset + length) >
1832		    sc->cfg->eeprom_size) {
1833			device_printf(sc->sc_dev,
1834			    "EEPROM size is too small for NVM\n");
1835			return ENOBUFS;
1836		}
1837
1838		ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1839		if (ret) {
1840			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1841				    "Cannot read NVM from section %d offset %d, length %d\n",
1842				    section, offset, length);
1843			return ret;
1844		}
1845		offset += seglen;
1846	}
1847
1848	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1849		    "NVM section %d read completed\n", section);
1850	*len = offset;
1851	return 0;
1852}
1853
1854/*
1855 * BEGIN IWM_NVM_PARSE
1856 */
1857
1858/* iwlwifi/iwl-nvm-parse.c */
1859
1860/* NVM offsets (in words) definitions */
1861enum iwm_nvm_offsets {
1862	/* NVM HW-Section offset (in words) definitions */
1863	IWM_HW_ADDR = 0x15,
1864
1865/* NVM SW-Section offset (in words) definitions */
1866	IWM_NVM_SW_SECTION = 0x1C0,
1867	IWM_NVM_VERSION = 0,
1868	IWM_RADIO_CFG = 1,
1869	IWM_SKU = 2,
1870	IWM_N_HW_ADDRS = 3,
1871	IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1872
1873/* NVM calibration section offset (in words) definitions */
1874	IWM_NVM_CALIB_SECTION = 0x2B8,
1875	IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1876};
1877
1878enum iwm_8000_nvm_offsets {
1879	/* NVM HW-Section offset (in words) definitions */
1880	IWM_HW_ADDR0_WFPM_8000 = 0x12,
1881	IWM_HW_ADDR1_WFPM_8000 = 0x16,
1882	IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1883	IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1884	IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1885
1886	/* NVM SW-Section offset (in words) definitions */
1887	IWM_NVM_SW_SECTION_8000 = 0x1C0,
1888	IWM_NVM_VERSION_8000 = 0,
1889	IWM_RADIO_CFG_8000 = 0,
1890	IWM_SKU_8000 = 2,
1891	IWM_N_HW_ADDRS_8000 = 3,
1892
1893	/* NVM REGULATORY -Section offset (in words) definitions */
1894	IWM_NVM_CHANNELS_8000 = 0,
1895	IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1896	IWM_NVM_LAR_OFFSET_8000 = 0x507,
1897	IWM_NVM_LAR_ENABLED_8000 = 0x7,
1898
1899	/* NVM calibration section offset (in words) definitions */
1900	IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1901	IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1902};
1903
1904/* SKU Capabilities (actual values from NVM definition) */
1905enum nvm_sku_bits {
1906	IWM_NVM_SKU_CAP_BAND_24GHZ	= (1 << 0),
1907	IWM_NVM_SKU_CAP_BAND_52GHZ	= (1 << 1),
1908	IWM_NVM_SKU_CAP_11N_ENABLE	= (1 << 2),
1909	IWM_NVM_SKU_CAP_11AC_ENABLE	= (1 << 3),
1910};
1911
1912/* radio config bits (actual values from NVM definition) */
1913#define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1914#define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1915#define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1916#define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1917#define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1918#define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1919
1920#define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)	(x & 0xF)
1921#define IWM_NVM_RF_CFG_DASH_MSK_8000(x)		((x >> 4) & 0xF)
1922#define IWM_NVM_RF_CFG_STEP_MSK_8000(x)		((x >> 8) & 0xF)
1923#define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)		((x >> 12) & 0xFFF)
1924#define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)	((x >> 24) & 0xF)
1925#define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)	((x >> 28) & 0xF)
1926
1927#define DEFAULT_MAX_TX_POWER 16
1928
1929/**
1930 * enum iwm_nvm_channel_flags - channel flags in NVM
1931 * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1932 * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1933 * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1934 * @IWM_NVM_CHANNEL_RADAR: radar detection required
1935 * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1936 * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1937 * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1938 * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1939 * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1940 * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1941 */
1942enum iwm_nvm_channel_flags {
1943	IWM_NVM_CHANNEL_VALID = (1 << 0),
1944	IWM_NVM_CHANNEL_IBSS = (1 << 1),
1945	IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1946	IWM_NVM_CHANNEL_RADAR = (1 << 4),
1947	IWM_NVM_CHANNEL_DFS = (1 << 7),
1948	IWM_NVM_CHANNEL_WIDE = (1 << 8),
1949	IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1950	IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1951	IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1952};
1953
1954/*
1955 * Translate EEPROM flags to net80211.
1956 */
1957static uint32_t
1958iwm_eeprom_channel_flags(uint16_t ch_flags)
1959{
1960	uint32_t nflags;
1961
1962	nflags = 0;
1963	if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1964		nflags |= IEEE80211_CHAN_PASSIVE;
1965	if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1966		nflags |= IEEE80211_CHAN_NOADHOC;
1967	if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1968		nflags |= IEEE80211_CHAN_DFS;
1969		/* Just in case. */
1970		nflags |= IEEE80211_CHAN_NOADHOC;
1971	}
1972
1973	return (nflags);
1974}
1975
1976static void
1977iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1978    int maxchans, int *nchans, int ch_idx, size_t ch_num,
1979    const uint8_t bands[])
1980{
1981	const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
1982	uint32_t nflags;
1983	uint16_t ch_flags;
1984	uint8_t ieee;
1985	int error;
1986
1987	for (; ch_idx < ch_num; ch_idx++) {
1988		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1989		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1990			ieee = iwm_nvm_channels[ch_idx];
1991		else
1992			ieee = iwm_nvm_channels_8000[ch_idx];
1993
1994		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1995			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1996			    "Ch. %d Flags %x [%sGHz] - No traffic\n",
1997			    ieee, ch_flags,
1998			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1999			    "5.2" : "2.4");
2000			continue;
2001		}
2002
2003		nflags = iwm_eeprom_channel_flags(ch_flags);
2004		error = ieee80211_add_channel(chans, maxchans, nchans,
2005		    ieee, 0, 0, nflags, bands);
2006		if (error != 0)
2007			break;
2008
2009		IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2010		    "Ch. %d Flags %x [%sGHz] - Added\n",
2011		    ieee, ch_flags,
2012		    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2013		    "5.2" : "2.4");
2014	}
2015}
2016
2017static void
2018iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2019    struct ieee80211_channel chans[])
2020{
2021	struct iwm_softc *sc = ic->ic_softc;
2022	struct iwm_nvm_data *data = sc->nvm_data;
2023	uint8_t bands[IEEE80211_MODE_BYTES];
2024	size_t ch_num;
2025
2026	memset(bands, 0, sizeof(bands));
2027	/* 1-13: 11b/g channels. */
2028	setbit(bands, IEEE80211_MODE_11B);
2029	setbit(bands, IEEE80211_MODE_11G);
2030	iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2031	    IWM_NUM_2GHZ_CHANNELS - 1, bands);
2032
2033	/* 14: 11b channel only. */
2034	clrbit(bands, IEEE80211_MODE_11G);
2035	iwm_add_channel_band(sc, chans, maxchans, nchans,
2036	    IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2037
2038	if (data->sku_cap_band_52GHz_enable) {
2039		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2040			ch_num = nitems(iwm_nvm_channels);
2041		else
2042			ch_num = nitems(iwm_nvm_channels_8000);
2043		memset(bands, 0, sizeof(bands));
2044		setbit(bands, IEEE80211_MODE_11A);
2045		iwm_add_channel_band(sc, chans, maxchans, nchans,
2046		    IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2047	}
2048}
2049
2050static void
2051iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2052	const uint16_t *mac_override, const uint16_t *nvm_hw)
2053{
2054	const uint8_t *hw_addr;
2055
2056	if (mac_override) {
2057		static const uint8_t reserved_mac[] = {
2058			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2059		};
2060
2061		hw_addr = (const uint8_t *)(mac_override +
2062				 IWM_MAC_ADDRESS_OVERRIDE_8000);
2063
2064		/*
2065		 * Store the MAC address from MAO section.
2066		 * No byte swapping is required in MAO section
2067		 */
2068		IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2069
2070		/*
2071		 * Force the use of the OTP MAC address in case of reserved MAC
2072		 * address in the NVM, or if address is given but invalid.
2073		 */
2074		if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2075		    !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2076		    iwm_is_valid_ether_addr(data->hw_addr) &&
2077		    !IEEE80211_IS_MULTICAST(data->hw_addr))
2078			return;
2079
2080		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2081		    "%s: mac address from nvm override section invalid\n",
2082		    __func__);
2083	}
2084
2085	if (nvm_hw) {
2086		/* read the mac address from WFMP registers */
2087		uint32_t mac_addr0 =
2088		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2089		uint32_t mac_addr1 =
2090		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2091
2092		hw_addr = (const uint8_t *)&mac_addr0;
2093		data->hw_addr[0] = hw_addr[3];
2094		data->hw_addr[1] = hw_addr[2];
2095		data->hw_addr[2] = hw_addr[1];
2096		data->hw_addr[3] = hw_addr[0];
2097
2098		hw_addr = (const uint8_t *)&mac_addr1;
2099		data->hw_addr[4] = hw_addr[1];
2100		data->hw_addr[5] = hw_addr[0];
2101
2102		return;
2103	}
2104
2105	device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2106	memset(data->hw_addr, 0, sizeof(data->hw_addr));
2107}
2108
2109static int
2110iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2111	    const uint16_t *phy_sku)
2112{
2113	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2114		return le16_to_cpup(nvm_sw + IWM_SKU);
2115
2116	return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2117}
2118
2119static int
2120iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2121{
2122	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2123		return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2124	else
2125		return le32_to_cpup((const uint32_t *)(nvm_sw +
2126						IWM_NVM_VERSION_8000));
2127}
2128
2129static int
2130iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2131		  const uint16_t *phy_sku)
2132{
2133        if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2134                return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2135
2136        return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2137}
2138
2139static int
2140iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2141{
2142	int n_hw_addr;
2143
2144	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2145		return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2146
2147	n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2148
2149        return n_hw_addr & IWM_N_HW_ADDR_MASK;
2150}
2151
2152static void
2153iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2154		  uint32_t radio_cfg)
2155{
2156	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2157		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2158		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2159		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2160		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2161		return;
2162	}
2163
2164	/* set the radio configuration for family 8000 */
2165	data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2166	data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2167	data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2168	data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2169	data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2170	data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2171}
2172
2173static int
2174iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2175		   const uint16_t *nvm_hw, const uint16_t *mac_override)
2176{
2177#ifdef notyet /* for FAMILY 9000 */
2178	if (cfg->mac_addr_from_csr) {
2179		iwm_set_hw_address_from_csr(sc, data);
2180        } else
2181#endif
2182	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2183		const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2184
2185		/* The byte order is little endian 16 bit, meaning 214365 */
2186		data->hw_addr[0] = hw_addr[1];
2187		data->hw_addr[1] = hw_addr[0];
2188		data->hw_addr[2] = hw_addr[3];
2189		data->hw_addr[3] = hw_addr[2];
2190		data->hw_addr[4] = hw_addr[5];
2191		data->hw_addr[5] = hw_addr[4];
2192	} else {
2193		iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2194	}
2195
2196	if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2197		device_printf(sc->sc_dev, "no valid mac address was found\n");
2198		return EINVAL;
2199	}
2200
2201	return 0;
2202}
2203
2204static struct iwm_nvm_data *
2205iwm_parse_nvm_data(struct iwm_softc *sc,
2206		   const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2207		   const uint16_t *nvm_calib, const uint16_t *mac_override,
2208		   const uint16_t *phy_sku, const uint16_t *regulatory)
2209{
2210	struct iwm_nvm_data *data;
2211	uint32_t sku, radio_cfg;
2212
2213	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2214		data = malloc(sizeof(*data) +
2215		    IWM_NUM_CHANNELS * sizeof(uint16_t),
2216		    M_DEVBUF, M_NOWAIT | M_ZERO);
2217	} else {
2218		data = malloc(sizeof(*data) +
2219		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2220		    M_DEVBUF, M_NOWAIT | M_ZERO);
2221	}
2222	if (!data)
2223		return NULL;
2224
2225	data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2226
2227	radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2228	iwm_set_radio_cfg(sc, data, radio_cfg);
2229
2230	sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2231	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2232	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2233	data->sku_cap_11n_enable = 0;
2234
2235	data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2236
2237	/* If no valid mac address was found - bail out */
2238	if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2239		free(data, M_DEVBUF);
2240		return NULL;
2241	}
2242
2243	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2244		memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2245		    IWM_NUM_CHANNELS * sizeof(uint16_t));
2246	} else {
2247		memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2248		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2249	}
2250
2251	return data;
2252}
2253
2254static void
2255iwm_free_nvm_data(struct iwm_nvm_data *data)
2256{
2257	if (data != NULL)
2258		free(data, M_DEVBUF);
2259}
2260
2261static struct iwm_nvm_data *
2262iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2263{
2264	const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2265
2266	/* Checking for required sections */
2267	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2268		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2269		    !sections[sc->cfg->nvm_hw_section_num].data) {
2270			device_printf(sc->sc_dev,
2271			    "Can't parse empty OTP/NVM sections\n");
2272			return NULL;
2273		}
2274	} else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2275		/* SW and REGULATORY sections are mandatory */
2276		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2277		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2278			device_printf(sc->sc_dev,
2279			    "Can't parse empty OTP/NVM sections\n");
2280			return NULL;
2281		}
2282		/* MAC_OVERRIDE or at least HW section must exist */
2283		if (!sections[sc->cfg->nvm_hw_section_num].data &&
2284		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2285			device_printf(sc->sc_dev,
2286			    "Can't parse mac_address, empty sections\n");
2287			return NULL;
2288		}
2289
2290		/* PHY_SKU section is mandatory in B0 */
2291		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2292			device_printf(sc->sc_dev,
2293			    "Can't parse phy_sku in B0, empty sections\n");
2294			return NULL;
2295		}
2296	} else {
2297		panic("unknown device family %d\n", sc->cfg->device_family);
2298	}
2299
2300	hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2301	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2302	calib = (const uint16_t *)
2303	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2304	regulatory = (const uint16_t *)
2305	    sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2306	mac_override = (const uint16_t *)
2307	    sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2308	phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2309
2310	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2311	    phy_sku, regulatory);
2312}
2313
2314static int
2315iwm_nvm_init(struct iwm_softc *sc)
2316{
2317	struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2318	int i, ret, section;
2319	uint32_t size_read = 0;
2320	uint8_t *nvm_buffer, *temp;
2321	uint16_t len;
2322
2323	memset(nvm_sections, 0, sizeof(nvm_sections));
2324
2325	if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2326		return EINVAL;
2327
2328	/* load NVM values from nic */
2329	/* Read From FW NVM */
2330	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2331
2332	nvm_buffer = malloc(sc->cfg->eeprom_size, M_DEVBUF, M_NOWAIT | M_ZERO);
2333	if (!nvm_buffer)
2334		return ENOMEM;
2335	for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2336		/* we override the constness for initial read */
2337		ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2338					   &len, size_read);
2339		if (ret)
2340			continue;
2341		size_read += len;
2342		temp = malloc(len, M_DEVBUF, M_NOWAIT);
2343		if (!temp) {
2344			ret = ENOMEM;
2345			break;
2346		}
2347		memcpy(temp, nvm_buffer, len);
2348
2349		nvm_sections[section].data = temp;
2350		nvm_sections[section].length = len;
2351	}
2352	if (!size_read)
2353		device_printf(sc->sc_dev, "OTP is blank\n");
2354	free(nvm_buffer, M_DEVBUF);
2355
2356	sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2357	if (!sc->nvm_data)
2358		return EINVAL;
2359	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2360		    "nvm version = %x\n", sc->nvm_data->nvm_version);
2361
2362	for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2363		if (nvm_sections[i].data != NULL)
2364			free(nvm_sections[i].data, M_DEVBUF);
2365	}
2366
2367	return 0;
2368}
2369
2370static int
2371iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2372	const struct iwm_fw_desc *section)
2373{
2374	struct iwm_dma_info *dma = &sc->fw_dma;
2375	uint8_t *v_addr;
2376	bus_addr_t p_addr;
2377	uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2378	int ret = 0;
2379
2380	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2381		    "%s: [%d] uCode section being loaded...\n",
2382		    __func__, section_num);
2383
2384	v_addr = dma->vaddr;
2385	p_addr = dma->paddr;
2386
2387	for (offset = 0; offset < section->len; offset += chunk_sz) {
2388		uint32_t copy_size, dst_addr;
2389		int extended_addr = FALSE;
2390
2391		copy_size = MIN(chunk_sz, section->len - offset);
2392		dst_addr = section->offset + offset;
2393
2394		if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2395		    dst_addr <= IWM_FW_MEM_EXTENDED_END)
2396			extended_addr = TRUE;
2397
2398		if (extended_addr)
2399			iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2400					  IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2401
2402		memcpy(v_addr, (const uint8_t *)section->data + offset,
2403		    copy_size);
2404		bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2405		ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2406						   copy_size);
2407
2408		if (extended_addr)
2409			iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2410					    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2411
2412		if (ret) {
2413			device_printf(sc->sc_dev,
2414			    "%s: Could not load the [%d] uCode section\n",
2415			    __func__, section_num);
2416			break;
2417		}
2418	}
2419
2420	return ret;
2421}
2422
2423/*
2424 * ucode
2425 */
2426static int
2427iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2428			     bus_addr_t phy_addr, uint32_t byte_cnt)
2429{
2430	int ret;
2431
2432	sc->sc_fw_chunk_done = 0;
2433
2434	if (!iwm_nic_lock(sc))
2435		return EBUSY;
2436
2437	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2438	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2439
2440	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2441	    dst_addr);
2442
2443	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2444	    phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2445
2446	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2447	    (iwm_get_dma_hi_addr(phy_addr)
2448	     << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2449
2450	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2451	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2452	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2453	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2454
2455	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2456	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2457	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2458	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2459
2460	iwm_nic_unlock(sc);
2461
2462	/* wait up to 5s for this segment to load */
2463	ret = 0;
2464	while (!sc->sc_fw_chunk_done) {
2465		ret = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz);
2466		if (ret)
2467			break;
2468	}
2469
2470	if (ret != 0) {
2471		device_printf(sc->sc_dev,
2472		    "fw chunk addr 0x%x len %d failed to load\n",
2473		    dst_addr, byte_cnt);
2474		return ETIMEDOUT;
2475	}
2476
2477	return 0;
2478}
2479
2480static int
2481iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2482	const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2483{
2484	int shift_param;
2485	int i, ret = 0, sec_num = 0x1;
2486	uint32_t val, last_read_idx = 0;
2487
2488	if (cpu == 1) {
2489		shift_param = 0;
2490		*first_ucode_section = 0;
2491	} else {
2492		shift_param = 16;
2493		(*first_ucode_section)++;
2494	}
2495
2496	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2497		last_read_idx = i;
2498
2499		/*
2500		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2501		 * CPU1 to CPU2.
2502		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2503		 * CPU2 non paged to CPU2 paging sec.
2504		 */
2505		if (!image->fw_sect[i].data ||
2506		    image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2507		    image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2508			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2509				    "Break since Data not valid or Empty section, sec = %d\n",
2510				    i);
2511			break;
2512		}
2513		ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2514		if (ret)
2515			return ret;
2516
2517		/* Notify the ucode of the loaded section number and status */
2518		if (iwm_nic_lock(sc)) {
2519			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2520			val = val | (sec_num << shift_param);
2521			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2522			sec_num = (sec_num << 1) | 0x1;
2523			iwm_nic_unlock(sc);
2524		}
2525	}
2526
2527	*first_ucode_section = last_read_idx;
2528
2529	iwm_enable_interrupts(sc);
2530
2531	if (iwm_nic_lock(sc)) {
2532		if (cpu == 1)
2533			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2534		else
2535			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2536		iwm_nic_unlock(sc);
2537	}
2538
2539	return 0;
2540}
2541
2542static int
2543iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2544	const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2545{
2546	int shift_param;
2547	int i, ret = 0;
2548	uint32_t last_read_idx = 0;
2549
2550	if (cpu == 1) {
2551		shift_param = 0;
2552		*first_ucode_section = 0;
2553	} else {
2554		shift_param = 16;
2555		(*first_ucode_section)++;
2556	}
2557
2558	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2559		last_read_idx = i;
2560
2561		/*
2562		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2563		 * CPU1 to CPU2.
2564		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2565		 * CPU2 non paged to CPU2 paging sec.
2566		 */
2567		if (!image->fw_sect[i].data ||
2568		    image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2569		    image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2570			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2571				    "Break since Data not valid or Empty section, sec = %d\n",
2572				     i);
2573			break;
2574		}
2575
2576		ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2577		if (ret)
2578			return ret;
2579	}
2580
2581	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2582		iwm_set_bits_prph(sc,
2583				  IWM_CSR_UCODE_LOAD_STATUS_ADDR,
2584				  (IWM_LMPM_CPU_UCODE_LOADING_COMPLETED |
2585				   IWM_LMPM_CPU_HDRS_LOADING_COMPLETED |
2586				   IWM_LMPM_CPU_UCODE_LOADING_STARTED) <<
2587					shift_param);
2588
2589	*first_ucode_section = last_read_idx;
2590
2591	return 0;
2592
2593}
2594
2595static int
2596iwm_pcie_load_given_ucode(struct iwm_softc *sc,
2597	const struct iwm_fw_sects *image)
2598{
2599	int ret = 0;
2600	int first_ucode_section;
2601
2602	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2603		     image->is_dual_cpus ? "Dual" : "Single");
2604
2605	/* load to FW the binary non secured sections of CPU1 */
2606	ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2607	if (ret)
2608		return ret;
2609
2610	if (image->is_dual_cpus) {
2611		/* set CPU2 header address */
2612		if (iwm_nic_lock(sc)) {
2613			iwm_write_prph(sc,
2614				       IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2615				       IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2616			iwm_nic_unlock(sc);
2617		}
2618
2619		/* load to FW the binary sections of CPU2 */
2620		ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2621						 &first_ucode_section);
2622		if (ret)
2623			return ret;
2624	}
2625
2626	iwm_enable_interrupts(sc);
2627
2628	/* release CPU reset */
2629	IWM_WRITE(sc, IWM_CSR_RESET, 0);
2630
2631	return 0;
2632}
2633
2634int
2635iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2636	const struct iwm_fw_sects *image)
2637{
2638	int ret = 0;
2639	int first_ucode_section;
2640
2641	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2642		    image->is_dual_cpus ? "Dual" : "Single");
2643
2644	/* configure the ucode to be ready to get the secured image */
2645	/* release CPU reset */
2646	if (iwm_nic_lock(sc)) {
2647		iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
2648		    IWM_RELEASE_CPU_RESET_BIT);
2649		iwm_nic_unlock(sc);
2650	}
2651
2652	/* load to FW the binary Secured sections of CPU1 */
2653	ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2654	    &first_ucode_section);
2655	if (ret)
2656		return ret;
2657
2658	/* load to FW the binary sections of CPU2 */
2659	return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2660	    &first_ucode_section);
2661}
2662
2663/* XXX Get rid of this definition */
2664static inline void
2665iwm_enable_fw_load_int(struct iwm_softc *sc)
2666{
2667	IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2668	sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2669	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2670}
2671
2672/* XXX Add proper rfkill support code */
2673static int
2674iwm_start_fw(struct iwm_softc *sc,
2675	const struct iwm_fw_sects *fw)
2676{
2677	int ret;
2678
2679	/* This may fail if AMT took ownership of the device */
2680	if (iwm_prepare_card_hw(sc)) {
2681		device_printf(sc->sc_dev,
2682		    "%s: Exit HW not ready\n", __func__);
2683		ret = EIO;
2684		goto out;
2685	}
2686
2687	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2688
2689	iwm_disable_interrupts(sc);
2690
2691	/* make sure rfkill handshake bits are cleared */
2692	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2693	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2694	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2695
2696	/* clear (again), then enable host interrupts */
2697	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2698
2699	ret = iwm_nic_init(sc);
2700	if (ret) {
2701		device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2702		goto out;
2703	}
2704
2705	/*
2706	 * Now, we load the firmware and don't want to be interrupted, even
2707	 * by the RF-Kill interrupt (hence mask all the interrupt besides the
2708	 * FH_TX interrupt which is needed to load the firmware). If the
2709	 * RF-Kill switch is toggled, we will find out after having loaded
2710	 * the firmware and return the proper value to the caller.
2711	 */
2712	iwm_enable_fw_load_int(sc);
2713
2714	/* really make sure rfkill handshake bits are cleared */
2715	/* maybe we should write a few times more?  just to make sure */
2716	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2717	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2718
2719	/* Load the given image to the HW */
2720	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2721		ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2722	else
2723		ret = iwm_pcie_load_given_ucode(sc, fw);
2724
2725	/* XXX re-check RF-Kill state */
2726
2727out:
2728	return ret;
2729}
2730
2731static int
2732iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2733{
2734	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2735		.valid = htole32(valid_tx_ant),
2736	};
2737
2738	return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2739	    IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2740}
2741
2742/* iwlwifi: mvm/fw.c */
2743static int
2744iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2745{
2746	struct iwm_phy_cfg_cmd phy_cfg_cmd;
2747	enum iwm_ucode_type ucode_type = sc->cur_ucode;
2748
2749	/* Set parameters */
2750	phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc));
2751	phy_cfg_cmd.calib_control.event_trigger =
2752	    sc->sc_default_calib[ucode_type].event_trigger;
2753	phy_cfg_cmd.calib_control.flow_trigger =
2754	    sc->sc_default_calib[ucode_type].flow_trigger;
2755
2756	IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2757	    "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2758	return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2759	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2760}
2761
2762static int
2763iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2764{
2765	struct iwm_mvm_alive_data *alive_data = data;
2766	struct iwm_mvm_alive_resp_ver1 *palive1;
2767	struct iwm_mvm_alive_resp_ver2 *palive2;
2768	struct iwm_mvm_alive_resp *palive;
2769
2770	if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
2771		palive1 = (void *)pkt->data;
2772
2773		sc->support_umac_log = FALSE;
2774                sc->error_event_table =
2775                        le32toh(palive1->error_event_table_ptr);
2776                sc->log_event_table =
2777                        le32toh(palive1->log_event_table_ptr);
2778                alive_data->scd_base_addr = le32toh(palive1->scd_base_ptr);
2779
2780                alive_data->valid = le16toh(palive1->status) ==
2781                                    IWM_ALIVE_STATUS_OK;
2782                IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2783			    "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2784			     le16toh(palive1->status), palive1->ver_type,
2785                             palive1->ver_subtype, palive1->flags);
2786	} else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
2787		palive2 = (void *)pkt->data;
2788		sc->error_event_table =
2789			le32toh(palive2->error_event_table_ptr);
2790		sc->log_event_table =
2791			le32toh(palive2->log_event_table_ptr);
2792		alive_data->scd_base_addr = le32toh(palive2->scd_base_ptr);
2793		sc->umac_error_event_table =
2794                        le32toh(palive2->error_info_addr);
2795
2796		alive_data->valid = le16toh(palive2->status) ==
2797				    IWM_ALIVE_STATUS_OK;
2798		if (sc->umac_error_event_table)
2799			sc->support_umac_log = TRUE;
2800
2801		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2802			    "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2803			    le16toh(palive2->status), palive2->ver_type,
2804			    palive2->ver_subtype, palive2->flags);
2805
2806		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2807			    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2808			    palive2->umac_major, palive2->umac_minor);
2809	} else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2810		palive = (void *)pkt->data;
2811
2812		sc->error_event_table =
2813			le32toh(palive->error_event_table_ptr);
2814		sc->log_event_table =
2815			le32toh(palive->log_event_table_ptr);
2816		alive_data->scd_base_addr = le32toh(palive->scd_base_ptr);
2817		sc->umac_error_event_table =
2818			le32toh(palive->error_info_addr);
2819
2820		alive_data->valid = le16toh(palive->status) ==
2821				    IWM_ALIVE_STATUS_OK;
2822		if (sc->umac_error_event_table)
2823			sc->support_umac_log = TRUE;
2824
2825		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2826			    "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2827			    le16toh(palive->status), palive->ver_type,
2828			    palive->ver_subtype, palive->flags);
2829
2830		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2831			    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2832			    le32toh(palive->umac_major),
2833			    le32toh(palive->umac_minor));
2834	}
2835
2836	return TRUE;
2837}
2838
2839static int
2840iwm_wait_phy_db_entry(struct iwm_softc *sc,
2841	struct iwm_rx_packet *pkt, void *data)
2842{
2843	struct iwm_phy_db *phy_db = data;
2844
2845	if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2846		if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2847			device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2848			    __func__, pkt->hdr.code);
2849		}
2850		return TRUE;
2851	}
2852
2853	if (iwm_phy_db_set_section(phy_db, pkt)) {
2854		device_printf(sc->sc_dev,
2855		    "%s: iwm_phy_db_set_section failed\n", __func__);
2856	}
2857
2858	return FALSE;
2859}
2860
2861static int
2862iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2863	enum iwm_ucode_type ucode_type)
2864{
2865	struct iwm_notification_wait alive_wait;
2866	struct iwm_mvm_alive_data alive_data;
2867	const struct iwm_fw_sects *fw;
2868	enum iwm_ucode_type old_type = sc->cur_ucode;
2869	int error;
2870	static const uint16_t alive_cmd[] = { IWM_MVM_ALIVE };
2871
2872	if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2873		device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2874			error);
2875		return error;
2876	}
2877	fw = &sc->sc_fw.fw_sects[ucode_type];
2878	sc->cur_ucode = ucode_type;
2879	sc->ucode_loaded = FALSE;
2880
2881	memset(&alive_data, 0, sizeof(alive_data));
2882	iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2883				   alive_cmd, nitems(alive_cmd),
2884				   iwm_alive_fn, &alive_data);
2885
2886	error = iwm_start_fw(sc, fw);
2887	if (error) {
2888		device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2889		sc->cur_ucode = old_type;
2890		iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2891		return error;
2892	}
2893
2894	/*
2895	 * Some things may run in the background now, but we
2896	 * just wait for the ALIVE notification here.
2897	 */
2898	IWM_UNLOCK(sc);
2899	error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2900				      IWM_MVM_UCODE_ALIVE_TIMEOUT);
2901	IWM_LOCK(sc);
2902	if (error) {
2903		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2904			uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a;
2905			if (iwm_nic_lock(sc)) {
2906				a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS);
2907				b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS);
2908				iwm_nic_unlock(sc);
2909			}
2910			device_printf(sc->sc_dev,
2911			    "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2912			    a, b);
2913		}
2914		sc->cur_ucode = old_type;
2915		return error;
2916	}
2917
2918	if (!alive_data.valid) {
2919		device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2920		    __func__);
2921		sc->cur_ucode = old_type;
2922		return EIO;
2923	}
2924
2925	iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2926
2927	/*
2928	 * configure and operate fw paging mechanism.
2929	 * driver configures the paging flow only once, CPU2 paging image
2930	 * included in the IWM_UCODE_INIT image.
2931	 */
2932	if (fw->paging_mem_size) {
2933		error = iwm_save_fw_paging(sc, fw);
2934		if (error) {
2935			device_printf(sc->sc_dev,
2936			    "%s: failed to save the FW paging image\n",
2937			    __func__);
2938			return error;
2939		}
2940
2941		error = iwm_send_paging_cmd(sc, fw);
2942		if (error) {
2943			device_printf(sc->sc_dev,
2944			    "%s: failed to send the paging cmd\n", __func__);
2945			iwm_free_fw_paging(sc);
2946			return error;
2947		}
2948	}
2949
2950	if (!error)
2951		sc->ucode_loaded = TRUE;
2952	return error;
2953}
2954
2955/*
2956 * mvm misc bits
2957 */
2958
2959/*
2960 * follows iwlwifi/fw.c
2961 */
2962static int
2963iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2964{
2965	struct iwm_notification_wait calib_wait;
2966	static const uint16_t init_complete[] = {
2967		IWM_INIT_COMPLETE_NOTIF,
2968		IWM_CALIB_RES_NOTIF_PHY_DB
2969	};
2970	int ret;
2971
2972	/* do not operate with rfkill switch turned on */
2973	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2974		device_printf(sc->sc_dev,
2975		    "radio is disabled by hardware switch\n");
2976		return EPERM;
2977	}
2978
2979	iwm_init_notification_wait(sc->sc_notif_wait,
2980				   &calib_wait,
2981				   init_complete,
2982				   nitems(init_complete),
2983				   iwm_wait_phy_db_entry,
2984				   sc->sc_phy_db);
2985
2986	/* Will also start the device */
2987	ret = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
2988	if (ret) {
2989		device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
2990		    ret);
2991		goto error;
2992	}
2993
2994	if (justnvm) {
2995		/* Read nvm */
2996		ret = iwm_nvm_init(sc);
2997		if (ret) {
2998			device_printf(sc->sc_dev, "failed to read nvm\n");
2999			goto error;
3000		}
3001		IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
3002		goto error;
3003	}
3004
3005	ret = iwm_send_bt_init_conf(sc);
3006	if (ret) {
3007		device_printf(sc->sc_dev,
3008		    "failed to send bt coex configuration: %d\n", ret);
3009		goto error;
3010	}
3011
3012	/* Init Smart FIFO. */
3013	ret = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF);
3014	if (ret)
3015		goto error;
3016
3017	/* Send TX valid antennas before triggering calibrations */
3018	ret = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
3019	if (ret) {
3020		device_printf(sc->sc_dev,
3021		    "failed to send antennas before calibration: %d\n", ret);
3022		goto error;
3023	}
3024
3025	/*
3026	 * Send phy configurations command to init uCode
3027	 * to start the 16.0 uCode init image internal calibrations.
3028	 */
3029	ret = iwm_send_phy_cfg_cmd(sc);
3030	if (ret) {
3031		device_printf(sc->sc_dev,
3032		    "%s: Failed to run INIT calibrations: %d\n",
3033		    __func__, ret);
3034		goto error;
3035	}
3036
3037	/*
3038	 * Nothing to do but wait for the init complete notification
3039	 * from the firmware.
3040	 */
3041	IWM_UNLOCK(sc);
3042	ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
3043	    IWM_MVM_UCODE_CALIB_TIMEOUT);
3044	IWM_LOCK(sc);
3045
3046
3047	goto out;
3048
3049error:
3050	iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
3051out:
3052	return ret;
3053}
3054
3055/*
3056 * receive side
3057 */
3058
3059/* (re)stock rx ring, called at init-time and at runtime */
3060static int
3061iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3062{
3063	struct iwm_rx_ring *ring = &sc->rxq;
3064	struct iwm_rx_data *data = &ring->data[idx];
3065	struct mbuf *m;
3066	bus_dmamap_t dmamap;
3067	bus_dma_segment_t seg;
3068	int nsegs, error;
3069
3070	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3071	if (m == NULL)
3072		return ENOBUFS;
3073
3074	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3075	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3076	    &seg, &nsegs, BUS_DMA_NOWAIT);
3077	if (error != 0) {
3078		device_printf(sc->sc_dev,
3079		    "%s: can't map mbuf, error %d\n", __func__, error);
3080		m_freem(m);
3081		return error;
3082	}
3083
3084	if (data->m != NULL)
3085		bus_dmamap_unload(ring->data_dmat, data->map);
3086
3087	/* Swap ring->spare_map with data->map */
3088	dmamap = data->map;
3089	data->map = ring->spare_map;
3090	ring->spare_map = dmamap;
3091
3092	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3093	data->m = m;
3094
3095	/* Update RX descriptor. */
3096	KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
3097	ring->desc[idx] = htole32(seg.ds_addr >> 8);
3098	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3099	    BUS_DMASYNC_PREWRITE);
3100
3101	return 0;
3102}
3103
3104/* iwlwifi: mvm/rx.c */
3105/*
3106 * iwm_mvm_get_signal_strength - use new rx PHY INFO API
3107 * values are reported by the fw as positive values - need to negate
3108 * to obtain their dBM.  Account for missing antennas by replacing 0
3109 * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3110 */
3111static int
3112iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3113{
3114	int energy_a, energy_b, energy_c, max_energy;
3115	uint32_t val;
3116
3117	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3118	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3119	    IWM_RX_INFO_ENERGY_ANT_A_POS;
3120	energy_a = energy_a ? -energy_a : -256;
3121	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3122	    IWM_RX_INFO_ENERGY_ANT_B_POS;
3123	energy_b = energy_b ? -energy_b : -256;
3124	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3125	    IWM_RX_INFO_ENERGY_ANT_C_POS;
3126	energy_c = energy_c ? -energy_c : -256;
3127	max_energy = MAX(energy_a, energy_b);
3128	max_energy = MAX(max_energy, energy_c);
3129
3130	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3131	    "energy In A %d B %d C %d , and max %d\n",
3132	    energy_a, energy_b, energy_c, max_energy);
3133
3134	return max_energy;
3135}
3136
3137static void
3138iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3139{
3140	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3141
3142	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3143
3144	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3145}
3146
3147/*
3148 * Retrieve the average noise (in dBm) among receivers.
3149 */
3150static int
3151iwm_get_noise(struct iwm_softc *sc,
3152    const struct iwm_mvm_statistics_rx_non_phy *stats)
3153{
3154	int i, total, nbant, noise;
3155
3156	total = nbant = noise = 0;
3157	for (i = 0; i < 3; i++) {
3158		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3159		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3160		    __func__,
3161		    i,
3162		    noise);
3163
3164		if (noise) {
3165			total += noise;
3166			nbant++;
3167		}
3168	}
3169
3170	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3171	    __func__, nbant, total);
3172#if 0
3173	/* There should be at least one antenna but check anyway. */
3174	return (nbant == 0) ? -127 : (total / nbant) - 107;
3175#else
3176	/* For now, just hard-code it to -96 to be safe */
3177	return (-96);
3178#endif
3179}
3180
3181/*
3182 * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3183 *
3184 * Handles the actual data of the Rx packet from the fw
3185 */
3186static boolean_t
3187iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3188	boolean_t stolen)
3189{
3190	struct ieee80211com *ic = &sc->sc_ic;
3191	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3192	struct ieee80211_frame *wh;
3193	struct ieee80211_node *ni;
3194	struct ieee80211_rx_stats rxs;
3195	struct iwm_rx_phy_info *phy_info;
3196	struct iwm_rx_mpdu_res_start *rx_res;
3197	struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, offset);
3198	uint32_t len;
3199	uint32_t rx_pkt_status;
3200	int rssi;
3201
3202	phy_info = &sc->sc_last_phy_info;
3203	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3204	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3205	len = le16toh(rx_res->byte_count);
3206	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3207
3208	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3209		device_printf(sc->sc_dev,
3210		    "dsp size out of range [0,20]: %d\n",
3211		    phy_info->cfg_phy_cnt);
3212		goto fail;
3213	}
3214
3215	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3216	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3217		IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3218		    "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3219		goto fail;
3220	}
3221
3222	rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3223
3224	/* Map it to relative value */
3225	rssi = rssi - sc->sc_noise;
3226
3227	/* replenish ring for the buffer we're going to feed to the sharks */
3228	if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3229		device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3230		    __func__);
3231		goto fail;
3232	}
3233
3234	m->m_data = pkt->data + sizeof(*rx_res);
3235	m->m_pkthdr.len = m->m_len = len;
3236
3237	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3238	    "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3239
3240	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3241
3242	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3243	    "%s: phy_info: channel=%d, flags=0x%08x\n",
3244	    __func__,
3245	    le16toh(phy_info->channel),
3246	    le16toh(phy_info->phy_flags));
3247
3248	/*
3249	 * Populate an RX state struct with the provided information.
3250	 */
3251	bzero(&rxs, sizeof(rxs));
3252	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3253	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3254	rxs.c_ieee = le16toh(phy_info->channel);
3255	if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3256		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3257	} else {
3258		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3259	}
3260
3261	/* rssi is in 1/2db units */
3262	rxs.rssi = rssi * 2;
3263	rxs.nf = sc->sc_noise;
3264
3265	if (ieee80211_radiotap_active_vap(vap)) {
3266		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3267
3268		tap->wr_flags = 0;
3269		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3270			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3271		tap->wr_chan_freq = htole16(rxs.c_freq);
3272		/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3273		tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3274		tap->wr_dbm_antsignal = (int8_t)rssi;
3275		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3276		tap->wr_tsft = phy_info->system_timestamp;
3277		switch (phy_info->rate) {
3278		/* CCK rates. */
3279		case  10: tap->wr_rate =   2; break;
3280		case  20: tap->wr_rate =   4; break;
3281		case  55: tap->wr_rate =  11; break;
3282		case 110: tap->wr_rate =  22; break;
3283		/* OFDM rates. */
3284		case 0xd: tap->wr_rate =  12; break;
3285		case 0xf: tap->wr_rate =  18; break;
3286		case 0x5: tap->wr_rate =  24; break;
3287		case 0x7: tap->wr_rate =  36; break;
3288		case 0x9: tap->wr_rate =  48; break;
3289		case 0xb: tap->wr_rate =  72; break;
3290		case 0x1: tap->wr_rate =  96; break;
3291		case 0x3: tap->wr_rate = 108; break;
3292		/* Unknown rate: should not happen. */
3293		default:  tap->wr_rate =   0;
3294		}
3295	}
3296
3297	IWM_UNLOCK(sc);
3298	if (ni != NULL) {
3299		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3300		ieee80211_input_mimo(ni, m, &rxs);
3301		ieee80211_free_node(ni);
3302	} else {
3303		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3304		ieee80211_input_mimo_all(ic, m, &rxs);
3305	}
3306	IWM_LOCK(sc);
3307
3308	return TRUE;
3309
3310fail:	counter_u64_add(ic->ic_ierrors, 1);
3311	return FALSE;
3312}
3313
3314static int
3315iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3316	struct iwm_node *in)
3317{
3318	struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3319	struct ieee80211_node *ni = &in->in_ni;
3320	struct ieee80211vap *vap = ni->ni_vap;
3321	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3322	int failack = tx_resp->failure_frame;
3323
3324	KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3325
3326	/* Update rate control statistics. */
3327	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3328	    __func__,
3329	    (int) le16toh(tx_resp->status.status),
3330	    (int) le16toh(tx_resp->status.sequence),
3331	    tx_resp->frame_count,
3332	    tx_resp->bt_kill_count,
3333	    tx_resp->failure_rts,
3334	    tx_resp->failure_frame,
3335	    le32toh(tx_resp->initial_rate),
3336	    (int) le16toh(tx_resp->wireless_media_time));
3337
3338	if (status != IWM_TX_STATUS_SUCCESS &&
3339	    status != IWM_TX_STATUS_DIRECT_DONE) {
3340		ieee80211_ratectl_tx_complete(vap, ni,
3341		    IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
3342		return (1);
3343	} else {
3344		ieee80211_ratectl_tx_complete(vap, ni,
3345		    IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
3346		return (0);
3347	}
3348}
3349
3350static void
3351iwm_mvm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3352{
3353	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3354	int idx = cmd_hdr->idx;
3355	int qid = cmd_hdr->qid;
3356	struct iwm_tx_ring *ring = &sc->txq[qid];
3357	struct iwm_tx_data *txd = &ring->data[idx];
3358	struct iwm_node *in = txd->in;
3359	struct mbuf *m = txd->m;
3360	int status;
3361
3362	KASSERT(txd->done == 0, ("txd not done"));
3363	KASSERT(txd->in != NULL, ("txd without node"));
3364	KASSERT(txd->m != NULL, ("txd without mbuf"));
3365
3366	sc->sc_tx_timer = 0;
3367
3368	status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3369
3370	/* Unmap and free mbuf. */
3371	bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3372	bus_dmamap_unload(ring->data_dmat, txd->map);
3373
3374	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3375	    "free txd %p, in %p\n", txd, txd->in);
3376	txd->done = 1;
3377	txd->m = NULL;
3378	txd->in = NULL;
3379
3380	ieee80211_tx_complete(&in->in_ni, m, status);
3381
3382	if (--ring->queued < IWM_TX_RING_LOMARK) {
3383		sc->qfullmsk &= ~(1 << ring->qid);
3384		if (sc->qfullmsk == 0) {
3385			iwm_start(sc);
3386		}
3387	}
3388}
3389
3390/*
3391 * transmit side
3392 */
3393
3394/*
3395 * Process a "command done" firmware notification.  This is where we wakeup
3396 * processes waiting for a synchronous command completion.
3397 * from if_iwn
3398 */
3399static void
3400iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3401{
3402	struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3403	struct iwm_tx_data *data;
3404
3405	if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3406		return;	/* Not a command ack. */
3407	}
3408
3409	/* XXX wide commands? */
3410	IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3411	    "cmd notification type 0x%x qid %d idx %d\n",
3412	    pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3413
3414	data = &ring->data[pkt->hdr.idx];
3415
3416	/* If the command was mapped in an mbuf, free it. */
3417	if (data->m != NULL) {
3418		bus_dmamap_sync(ring->data_dmat, data->map,
3419		    BUS_DMASYNC_POSTWRITE);
3420		bus_dmamap_unload(ring->data_dmat, data->map);
3421		m_freem(data->m);
3422		data->m = NULL;
3423	}
3424	wakeup(&ring->desc[pkt->hdr.idx]);
3425
3426	if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3427		device_printf(sc->sc_dev,
3428		    "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3429		    __func__, pkt->hdr.idx, ring->queued, ring->cur);
3430		/* XXX call iwm_force_nmi() */
3431	}
3432
3433	KASSERT(ring->queued > 0, ("ring->queued is empty?"));
3434	ring->queued--;
3435	if (ring->queued == 0)
3436		iwm_pcie_clear_cmd_in_flight(sc);
3437}
3438
3439#if 0
3440/*
3441 * necessary only for block ack mode
3442 */
3443void
3444iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3445	uint16_t len)
3446{
3447	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3448	uint16_t w_val;
3449
3450	scd_bc_tbl = sc->sched_dma.vaddr;
3451
3452	len += 8; /* magic numbers came naturally from paris */
3453	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3454		len = roundup(len, 4) / 4;
3455
3456	w_val = htole16(sta_id << 12 | len);
3457
3458	/* Update TX scheduler. */
3459	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3460	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3461	    BUS_DMASYNC_PREWRITE);
3462
3463	/* I really wonder what this is ?!? */
3464	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3465		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3466		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3467		    BUS_DMASYNC_PREWRITE);
3468	}
3469}
3470#endif
3471
3472/*
3473 * Take an 802.11 (non-n) rate, find the relevant rate
3474 * table entry.  return the index into in_ridx[].
3475 *
3476 * The caller then uses that index back into in_ridx
3477 * to figure out the rate index programmed /into/
3478 * the firmware for this given node.
3479 */
3480static int
3481iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3482    uint8_t rate)
3483{
3484	int i;
3485	uint8_t r;
3486
3487	for (i = 0; i < nitems(in->in_ridx); i++) {
3488		r = iwm_rates[in->in_ridx[i]].rate;
3489		if (rate == r)
3490			return (i);
3491	}
3492
3493	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3494	    "%s: couldn't find an entry for rate=%d\n",
3495	    __func__,
3496	    rate);
3497
3498	/* XXX Return the first */
3499	/* XXX TODO: have it return the /lowest/ */
3500	return (0);
3501}
3502
3503static int
3504iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
3505{
3506	int i;
3507
3508	for (i = 0; i < nitems(iwm_rates); i++) {
3509		if (iwm_rates[i].rate == rate)
3510			return (i);
3511	}
3512	/* XXX error? */
3513	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3514	    "%s: couldn't find an entry for rate=%d\n",
3515	    __func__,
3516	    rate);
3517	return (0);
3518}
3519
3520/*
3521 * Fill in the rate related information for a transmit command.
3522 */
3523static const struct iwm_rate *
3524iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3525	struct mbuf *m, struct iwm_tx_cmd *tx)
3526{
3527	struct ieee80211_node *ni = &in->in_ni;
3528	struct ieee80211_frame *wh;
3529	const struct ieee80211_txparam *tp = ni->ni_txparms;
3530	const struct iwm_rate *rinfo;
3531	int type;
3532	int ridx, rate_flags;
3533
3534	wh = mtod(m, struct ieee80211_frame *);
3535	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3536
3537	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3538	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3539
3540	if (type == IEEE80211_FC0_TYPE_MGT) {
3541		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3542		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3543		    "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3544	} else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3545		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3546		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3547		    "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3548	} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3549		ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3550		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3551		    "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3552	} else if (m->m_flags & M_EAPOL) {
3553		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3554		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3555		    "%s: EAPOL\n", __func__);
3556	} else if (type == IEEE80211_FC0_TYPE_DATA) {
3557		int i;
3558
3559		/* for data frames, use RS table */
3560		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3561		/* XXX pass pktlen */
3562		(void) ieee80211_ratectl_rate(ni, NULL, 0);
3563		i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3564		ridx = in->in_ridx[i];
3565
3566		/* This is the index into the programmed table */
3567		tx->initial_rate_index = i;
3568		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3569
3570		IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3571		    "%s: start with i=%d, txrate %d\n",
3572		    __func__, i, iwm_rates[ridx].rate);
3573	} else {
3574		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3575		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DEFAULT (%d)\n",
3576		    __func__, tp->mgmtrate);
3577	}
3578
3579	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3580	    "%s: frame type=%d txrate %d\n",
3581	        __func__, type, iwm_rates[ridx].rate);
3582
3583	rinfo = &iwm_rates[ridx];
3584
3585	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3586	    __func__, ridx,
3587	    rinfo->rate,
3588	    !! (IWM_RIDX_IS_CCK(ridx))
3589	    );
3590
3591	/* XXX TODO: hard-coded TX antenna? */
3592	rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3593	if (IWM_RIDX_IS_CCK(ridx))
3594		rate_flags |= IWM_RATE_MCS_CCK_MSK;
3595	tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3596
3597	return rinfo;
3598}
3599
3600#define TB0_SIZE 16
3601static int
3602iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3603{
3604	struct ieee80211com *ic = &sc->sc_ic;
3605	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3606	struct iwm_node *in = IWM_NODE(ni);
3607	struct iwm_tx_ring *ring;
3608	struct iwm_tx_data *data;
3609	struct iwm_tfd *desc;
3610	struct iwm_device_cmd *cmd;
3611	struct iwm_tx_cmd *tx;
3612	struct ieee80211_frame *wh;
3613	struct ieee80211_key *k = NULL;
3614	struct mbuf *m1;
3615	const struct iwm_rate *rinfo;
3616	uint32_t flags;
3617	u_int hdrlen;
3618	bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3619	int nsegs;
3620	uint8_t tid, type;
3621	int i, totlen, error, pad;
3622
3623	wh = mtod(m, struct ieee80211_frame *);
3624	hdrlen = ieee80211_anyhdrsize(wh);
3625	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3626	tid = 0;
3627	ring = &sc->txq[ac];
3628	desc = &ring->desc[ring->cur];
3629	memset(desc, 0, sizeof(*desc));
3630	data = &ring->data[ring->cur];
3631
3632	/* Fill out iwm_tx_cmd to send to the firmware */
3633	cmd = &ring->cmd[ring->cur];
3634	cmd->hdr.code = IWM_TX_CMD;
3635	cmd->hdr.flags = 0;
3636	cmd->hdr.qid = ring->qid;
3637	cmd->hdr.idx = ring->cur;
3638
3639	tx = (void *)cmd->data;
3640	memset(tx, 0, sizeof(*tx));
3641
3642	rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3643
3644	/* Encrypt the frame if need be. */
3645	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3646		/* Retrieve key for TX && do software encryption. */
3647		k = ieee80211_crypto_encap(ni, m);
3648		if (k == NULL) {
3649			m_freem(m);
3650			return (ENOBUFS);
3651		}
3652		/* 802.11 header may have moved. */
3653		wh = mtod(m, struct ieee80211_frame *);
3654	}
3655
3656	if (ieee80211_radiotap_active_vap(vap)) {
3657		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3658
3659		tap->wt_flags = 0;
3660		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3661		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3662		tap->wt_rate = rinfo->rate;
3663		if (k != NULL)
3664			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3665		ieee80211_radiotap_tx(vap, m);
3666	}
3667
3668
3669	totlen = m->m_pkthdr.len;
3670
3671	flags = 0;
3672	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3673		flags |= IWM_TX_CMD_FLG_ACK;
3674	}
3675
3676	if (type == IEEE80211_FC0_TYPE_DATA
3677	    && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3678	    && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3679		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3680	}
3681
3682	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3683	    type != IEEE80211_FC0_TYPE_DATA)
3684		tx->sta_id = sc->sc_aux_sta.sta_id;
3685	else
3686		tx->sta_id = IWM_STATION_ID;
3687
3688	if (type == IEEE80211_FC0_TYPE_MGT) {
3689		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3690
3691		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3692		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3693			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3694		} else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3695			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3696		} else {
3697			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3698		}
3699	} else {
3700		tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3701	}
3702
3703	if (hdrlen & 3) {
3704		/* First segment length must be a multiple of 4. */
3705		flags |= IWM_TX_CMD_FLG_MH_PAD;
3706		pad = 4 - (hdrlen & 3);
3707	} else
3708		pad = 0;
3709
3710	tx->driver_txop = 0;
3711	tx->next_frame_len = 0;
3712
3713	tx->len = htole16(totlen);
3714	tx->tid_tspec = tid;
3715	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3716
3717	/* Set physical address of "scratch area". */
3718	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3719	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3720
3721	/* Copy 802.11 header in TX command. */
3722	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3723
3724	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3725
3726	tx->sec_ctl = 0;
3727	tx->tx_flags |= htole32(flags);
3728
3729	/* Trim 802.11 header. */
3730	m_adj(m, hdrlen);
3731	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3732	    segs, &nsegs, BUS_DMA_NOWAIT);
3733	if (error != 0) {
3734		if (error != EFBIG) {
3735			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3736			    error);
3737			m_freem(m);
3738			return error;
3739		}
3740		/* Too many DMA segments, linearize mbuf. */
3741		m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3742		if (m1 == NULL) {
3743			device_printf(sc->sc_dev,
3744			    "%s: could not defrag mbuf\n", __func__);
3745			m_freem(m);
3746			return (ENOBUFS);
3747		}
3748		m = m1;
3749
3750		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3751		    segs, &nsegs, BUS_DMA_NOWAIT);
3752		if (error != 0) {
3753			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3754			    error);
3755			m_freem(m);
3756			return error;
3757		}
3758	}
3759	data->m = m;
3760	data->in = in;
3761	data->done = 0;
3762
3763	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3764	    "sending txd %p, in %p\n", data, data->in);
3765	KASSERT(data->in != NULL, ("node is NULL"));
3766
3767	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3768	    "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3769	    ring->qid, ring->cur, totlen, nsegs,
3770	    le32toh(tx->tx_flags),
3771	    le32toh(tx->rate_n_flags),
3772	    tx->initial_rate_index
3773	    );
3774
3775	/* Fill TX descriptor. */
3776	desc->num_tbs = 2 + nsegs;
3777
3778	desc->tbs[0].lo = htole32(data->cmd_paddr);
3779	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3780	    (TB0_SIZE << 4);
3781	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3782	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3783	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3784	      + hdrlen + pad - TB0_SIZE) << 4);
3785
3786	/* Other DMA segments are for data payload. */
3787	for (i = 0; i < nsegs; i++) {
3788		seg = &segs[i];
3789		desc->tbs[i+2].lo = htole32(seg->ds_addr);
3790		desc->tbs[i+2].hi_n_len = \
3791		    htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3792		    | ((seg->ds_len) << 4);
3793	}
3794
3795	bus_dmamap_sync(ring->data_dmat, data->map,
3796	    BUS_DMASYNC_PREWRITE);
3797	bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3798	    BUS_DMASYNC_PREWRITE);
3799	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3800	    BUS_DMASYNC_PREWRITE);
3801
3802#if 0
3803	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3804#endif
3805
3806	/* Kick TX ring. */
3807	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3808	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3809
3810	/* Mark TX ring as full if we reach a certain threshold. */
3811	if (++ring->queued > IWM_TX_RING_HIMARK) {
3812		sc->qfullmsk |= 1 << ring->qid;
3813	}
3814
3815	return 0;
3816}
3817
3818static int
3819iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3820    const struct ieee80211_bpf_params *params)
3821{
3822	struct ieee80211com *ic = ni->ni_ic;
3823	struct iwm_softc *sc = ic->ic_softc;
3824	int error = 0;
3825
3826	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3827	    "->%s begin\n", __func__);
3828
3829	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3830		m_freem(m);
3831		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3832		    "<-%s not RUNNING\n", __func__);
3833		return (ENETDOWN);
3834        }
3835
3836	IWM_LOCK(sc);
3837	/* XXX fix this */
3838        if (params == NULL) {
3839		error = iwm_tx(sc, m, ni, 0);
3840	} else {
3841		error = iwm_tx(sc, m, ni, 0);
3842	}
3843	sc->sc_tx_timer = 5;
3844	IWM_UNLOCK(sc);
3845
3846        return (error);
3847}
3848
3849/*
3850 * mvm/tx.c
3851 */
3852
3853/*
3854 * Note that there are transports that buffer frames before they reach
3855 * the firmware. This means that after flush_tx_path is called, the
3856 * queue might not be empty. The race-free way to handle this is to:
3857 * 1) set the station as draining
3858 * 2) flush the Tx path
3859 * 3) wait for the transport queues to be empty
3860 */
3861int
3862iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3863{
3864	int ret;
3865	struct iwm_tx_path_flush_cmd flush_cmd = {
3866		.queues_ctl = htole32(tfd_msk),
3867		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3868	};
3869
3870	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3871	    sizeof(flush_cmd), &flush_cmd);
3872	if (ret)
3873                device_printf(sc->sc_dev,
3874		    "Flushing tx queue failed: %d\n", ret);
3875	return ret;
3876}
3877
3878/*
3879 * BEGIN mvm/sta.c
3880 */
3881
3882static int
3883iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
3884	struct iwm_mvm_add_sta_cmd *cmd, int *status)
3885{
3886	return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(*cmd),
3887	    cmd, status);
3888}
3889
3890/* send station add/update command to firmware */
3891static int
3892iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
3893{
3894	struct iwm_vap *ivp = IWM_VAP(in->in_ni.ni_vap);
3895	struct iwm_mvm_add_sta_cmd add_sta_cmd;
3896	int ret;
3897	uint32_t status;
3898
3899	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
3900
3901	add_sta_cmd.sta_id = IWM_STATION_ID;
3902	add_sta_cmd.mac_id_n_color
3903	    = htole32(IWM_FW_CMD_ID_AND_COLOR(ivp->id, ivp->color));
3904	if (!update) {
3905		int ac;
3906		for (ac = 0; ac < WME_NUM_AC; ac++) {
3907			add_sta_cmd.tfd_queue_msk |=
3908			    htole32(1 << iwm_mvm_ac_to_tx_fifo[ac]);
3909		}
3910		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
3911	}
3912	add_sta_cmd.add_modify = update ? 1 : 0;
3913	add_sta_cmd.station_flags_msk
3914	    |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
3915	add_sta_cmd.tid_disable_tx = htole16(0xffff);
3916	if (update)
3917		add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
3918
3919	status = IWM_ADD_STA_SUCCESS;
3920	ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
3921	if (ret)
3922		return ret;
3923
3924	switch (status & IWM_ADD_STA_STATUS_MASK) {
3925	case IWM_ADD_STA_SUCCESS:
3926		break;
3927	default:
3928		ret = EIO;
3929		device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
3930		break;
3931	}
3932
3933	return ret;
3934}
3935
3936static int
3937iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
3938{
3939	return iwm_mvm_sta_send_to_fw(sc, in, 0);
3940}
3941
3942static int
3943iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
3944{
3945	return iwm_mvm_sta_send_to_fw(sc, in, 1);
3946}
3947
3948static int
3949iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
3950	const uint8_t *addr, uint16_t mac_id, uint16_t color)
3951{
3952	struct iwm_mvm_add_sta_cmd cmd;
3953	int ret;
3954	uint32_t status;
3955
3956	memset(&cmd, 0, sizeof(cmd));
3957	cmd.sta_id = sta->sta_id;
3958	cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
3959
3960	cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
3961	cmd.tid_disable_tx = htole16(0xffff);
3962
3963	if (addr)
3964		IEEE80211_ADDR_COPY(cmd.addr, addr);
3965
3966	ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
3967	if (ret)
3968		return ret;
3969
3970	switch (status & IWM_ADD_STA_STATUS_MASK) {
3971	case IWM_ADD_STA_SUCCESS:
3972		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3973		    "%s: Internal station added.\n", __func__);
3974		return 0;
3975	default:
3976		device_printf(sc->sc_dev,
3977		    "%s: Add internal station failed, status=0x%x\n",
3978		    __func__, status);
3979		ret = EIO;
3980		break;
3981	}
3982	return ret;
3983}
3984
3985static int
3986iwm_mvm_add_aux_sta(struct iwm_softc *sc)
3987{
3988	int ret;
3989
3990	sc->sc_aux_sta.sta_id = IWM_AUX_STA_ID;
3991	sc->sc_aux_sta.tfd_queue_msk = (1 << IWM_MVM_AUX_QUEUE);
3992
3993	ret = iwm_enable_txq(sc, 0, IWM_MVM_AUX_QUEUE, IWM_MVM_TX_FIFO_MCAST);
3994	if (ret)
3995		return ret;
3996
3997	ret = iwm_mvm_add_int_sta_common(sc,
3998	    &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
3999
4000	if (ret)
4001		memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
4002	return ret;
4003}
4004
4005/*
4006 * END mvm/sta.c
4007 */
4008
4009/*
4010 * BEGIN mvm/quota.c
4011 */
4012
4013static int
4014iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp)
4015{
4016	struct iwm_time_quota_cmd cmd;
4017	int i, idx, ret, num_active_macs, quota, quota_rem;
4018	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
4019	int n_ifs[IWM_MAX_BINDINGS] = {0, };
4020	uint16_t id;
4021
4022	memset(&cmd, 0, sizeof(cmd));
4023
4024	/* currently, PHY ID == binding ID */
4025	if (ivp) {
4026		id = ivp->phy_ctxt->id;
4027		KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
4028		colors[id] = ivp->phy_ctxt->color;
4029
4030		if (1)
4031			n_ifs[id] = 1;
4032	}
4033
4034	/*
4035	 * The FW's scheduling session consists of
4036	 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
4037	 * equally between all the bindings that require quota
4038	 */
4039	num_active_macs = 0;
4040	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
4041		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
4042		num_active_macs += n_ifs[i];
4043	}
4044
4045	quota = 0;
4046	quota_rem = 0;
4047	if (num_active_macs) {
4048		quota = IWM_MVM_MAX_QUOTA / num_active_macs;
4049		quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
4050	}
4051
4052	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
4053		if (colors[i] < 0)
4054			continue;
4055
4056		cmd.quotas[idx].id_and_color =
4057			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
4058
4059		if (n_ifs[i] <= 0) {
4060			cmd.quotas[idx].quota = htole32(0);
4061			cmd.quotas[idx].max_duration = htole32(0);
4062		} else {
4063			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
4064			cmd.quotas[idx].max_duration = htole32(0);
4065		}
4066		idx++;
4067	}
4068
4069	/* Give the remainder of the session to the first binding */
4070	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
4071
4072	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
4073	    sizeof(cmd), &cmd);
4074	if (ret)
4075		device_printf(sc->sc_dev,
4076		    "%s: Failed to send quota: %d\n", __func__, ret);
4077	return ret;
4078}
4079
4080/*
4081 * END mvm/quota.c
4082 */
4083
4084/*
4085 * ieee80211 routines
4086 */
4087
4088/*
4089 * Change to AUTH state in 80211 state machine.  Roughly matches what
4090 * Linux does in bss_info_changed().
4091 */
4092static int
4093iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
4094{
4095	struct ieee80211_node *ni;
4096	struct iwm_node *in;
4097	struct iwm_vap *iv = IWM_VAP(vap);
4098	uint32_t duration;
4099	int error;
4100
4101	/*
4102	 * XXX i have a feeling that the vap node is being
4103	 * freed from underneath us. Grr.
4104	 */
4105	ni = ieee80211_ref_node(vap->iv_bss);
4106	in = IWM_NODE(ni);
4107	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
4108	    "%s: called; vap=%p, bss ni=%p\n",
4109	    __func__,
4110	    vap,
4111	    ni);
4112
4113	in->in_assoc = 0;
4114
4115	error = iwm_mvm_sf_config(sc, IWM_SF_FULL_ON);
4116	if (error != 0)
4117		return error;
4118
4119	error = iwm_allow_mcast(vap, sc);
4120	if (error) {
4121		device_printf(sc->sc_dev,
4122		    "%s: failed to set multicast\n", __func__);
4123		goto out;
4124	}
4125
4126	/*
4127	 * This is where it deviates from what Linux does.
4128	 *
4129	 * Linux iwlwifi doesn't reset the nic each time, nor does it
4130	 * call ctxt_add() here.  Instead, it adds it during vap creation,
4131	 * and always does a mac_ctx_changed().
4132	 *
4133	 * The openbsd port doesn't attempt to do that - it reset things
4134	 * at odd states and does the add here.
4135	 *
4136	 * So, until the state handling is fixed (ie, we never reset
4137	 * the NIC except for a firmware failure, which should drag
4138	 * the NIC back to IDLE, re-setup and re-add all the mac/phy
4139	 * contexts that are required), let's do a dirty hack here.
4140	 */
4141	if (iv->is_uploaded) {
4142		if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4143			device_printf(sc->sc_dev,
4144			    "%s: failed to update MAC\n", __func__);
4145			goto out;
4146		}
4147		if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4148		    in->in_ni.ni_chan, 1, 1)) != 0) {
4149			device_printf(sc->sc_dev,
4150			    "%s: failed update phy ctxt\n", __func__);
4151			goto out;
4152		}
4153		iv->phy_ctxt = &sc->sc_phyctxt[0];
4154
4155		if ((error = iwm_mvm_binding_add_vif(sc, iv)) != 0) {
4156			device_printf(sc->sc_dev,
4157			    "%s: binding update cmd\n", __func__);
4158			goto out;
4159		}
4160		if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4161			device_printf(sc->sc_dev,
4162			    "%s: failed to update sta\n", __func__);
4163			goto out;
4164		}
4165	} else {
4166		if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
4167			device_printf(sc->sc_dev,
4168			    "%s: failed to add MAC\n", __func__);
4169			goto out;
4170		}
4171		if ((error = iwm_mvm_power_update_mac(sc)) != 0) {
4172			device_printf(sc->sc_dev,
4173			    "%s: failed to update power management\n",
4174			    __func__);
4175			goto out;
4176		}
4177		if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4178		    in->in_ni.ni_chan, 1, 1)) != 0) {
4179			device_printf(sc->sc_dev,
4180			    "%s: failed add phy ctxt!\n", __func__);
4181			error = ETIMEDOUT;
4182			goto out;
4183		}
4184		iv->phy_ctxt = &sc->sc_phyctxt[0];
4185
4186		if ((error = iwm_mvm_binding_add_vif(sc, iv)) != 0) {
4187			device_printf(sc->sc_dev,
4188			    "%s: binding add cmd\n", __func__);
4189			goto out;
4190		}
4191		if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
4192			device_printf(sc->sc_dev,
4193			    "%s: failed to add sta\n", __func__);
4194			goto out;
4195		}
4196	}
4197
4198	/*
4199	 * Prevent the FW from wandering off channel during association
4200	 * by "protecting" the session with a time event.
4201	 */
4202	/* XXX duration is in units of TU, not MS */
4203	duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4204	iwm_mvm_protect_session(sc, iv, duration, 500 /* XXX magic number */);
4205	DELAY(100);
4206
4207	error = 0;
4208out:
4209	ieee80211_free_node(ni);
4210	return (error);
4211}
4212
4213static int
4214iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
4215{
4216	struct iwm_node *in = IWM_NODE(vap->iv_bss);
4217	int error;
4218
4219	if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4220		device_printf(sc->sc_dev,
4221		    "%s: failed to update STA\n", __func__);
4222		return error;
4223	}
4224
4225	in->in_assoc = 1;
4226	if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4227		device_printf(sc->sc_dev,
4228		    "%s: failed to update MAC\n", __func__);
4229		return error;
4230	}
4231
4232	return 0;
4233}
4234
4235static int
4236iwm_release(struct iwm_softc *sc, struct iwm_node *in)
4237{
4238	uint32_t tfd_msk;
4239
4240	/*
4241	 * Ok, so *technically* the proper set of calls for going
4242	 * from RUN back to SCAN is:
4243	 *
4244	 * iwm_mvm_power_mac_disable(sc, in);
4245	 * iwm_mvm_mac_ctxt_changed(sc, in);
4246	 * iwm_mvm_rm_sta(sc, in);
4247	 * iwm_mvm_update_quotas(sc, NULL);
4248	 * iwm_mvm_mac_ctxt_changed(sc, in);
4249	 * iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap));
4250	 * iwm_mvm_mac_ctxt_remove(sc, in);
4251	 *
4252	 * However, that freezes the device not matter which permutations
4253	 * and modifications are attempted.  Obviously, this driver is missing
4254	 * something since it works in the Linux driver, but figuring out what
4255	 * is missing is a little more complicated.  Now, since we're going
4256	 * back to nothing anyway, we'll just do a complete device reset.
4257	 * Up your's, device!
4258	 */
4259	/*
4260	 * Just using 0xf for the queues mask is fine as long as we only
4261	 * get here from RUN state.
4262	 */
4263	tfd_msk = 0xf;
4264	mbufq_drain(&sc->sc_snd);
4265	iwm_mvm_flush_tx_path(sc, tfd_msk, IWM_CMD_SYNC);
4266	/*
4267	 * We seem to get away with just synchronously sending the
4268	 * IWM_TXPATH_FLUSH command.
4269	 */
4270//	iwm_trans_wait_tx_queue_empty(sc, tfd_msk);
4271	iwm_stop_device(sc);
4272	iwm_init_hw(sc);
4273	if (in)
4274		in->in_assoc = 0;
4275	return 0;
4276
4277#if 0
4278	int error;
4279
4280	iwm_mvm_power_mac_disable(sc, in);
4281
4282	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4283		device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
4284		return error;
4285	}
4286
4287	if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
4288		device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4289		return error;
4290	}
4291	error = iwm_mvm_rm_sta(sc, in);
4292	in->in_assoc = 0;
4293	iwm_mvm_update_quotas(sc, NULL);
4294	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4295		device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4296		return error;
4297	}
4298	iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap));
4299
4300	iwm_mvm_mac_ctxt_remove(sc, in);
4301
4302	return error;
4303#endif
4304}
4305
4306static struct ieee80211_node *
4307iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4308{
4309	return malloc(sizeof (struct iwm_node), M_80211_NODE,
4310	    M_NOWAIT | M_ZERO);
4311}
4312
4313uint8_t
4314iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
4315{
4316	int i;
4317	uint8_t rval;
4318
4319	for (i = 0; i < rs->rs_nrates; i++) {
4320		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
4321		if (rval == iwm_rates[ridx].rate)
4322			return rs->rs_rates[i];
4323	}
4324
4325	return 0;
4326}
4327
4328static void
4329iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
4330{
4331	struct ieee80211_node *ni = &in->in_ni;
4332	struct iwm_lq_cmd *lq = &in->in_lq;
4333	int nrates = ni->ni_rates.rs_nrates;
4334	int i, ridx, tab = 0;
4335//	int txant = 0;
4336
4337	if (nrates > nitems(lq->rs_table)) {
4338		device_printf(sc->sc_dev,
4339		    "%s: node supports %d rates, driver handles "
4340		    "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4341		return;
4342	}
4343	if (nrates == 0) {
4344		device_printf(sc->sc_dev,
4345		    "%s: node supports 0 rates, odd!\n", __func__);
4346		return;
4347	}
4348
4349	/*
4350	 * XXX .. and most of iwm_node is not initialised explicitly;
4351	 * it's all just 0x0 passed to the firmware.
4352	 */
4353
4354	/* first figure out which rates we should support */
4355	/* XXX TODO: this isn't 11n aware /at all/ */
4356	memset(&in->in_ridx, -1, sizeof(in->in_ridx));
4357	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4358	    "%s: nrates=%d\n", __func__, nrates);
4359
4360	/*
4361	 * Loop over nrates and populate in_ridx from the highest
4362	 * rate to the lowest rate.  Remember, in_ridx[] has
4363	 * IEEE80211_RATE_MAXSIZE entries!
4364	 */
4365	for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4366		int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4367
4368		/* Map 802.11 rate to HW rate index. */
4369		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4370			if (iwm_rates[ridx].rate == rate)
4371				break;
4372		if (ridx > IWM_RIDX_MAX) {
4373			device_printf(sc->sc_dev,
4374			    "%s: WARNING: device rate for %d not found!\n",
4375			    __func__, rate);
4376		} else {
4377			IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4378			    "%s: rate: i: %d, rate=%d, ridx=%d\n",
4379			    __func__,
4380			    i,
4381			    rate,
4382			    ridx);
4383			in->in_ridx[i] = ridx;
4384		}
4385	}
4386
4387	/* then construct a lq_cmd based on those */
4388	memset(lq, 0, sizeof(*lq));
4389	lq->sta_id = IWM_STATION_ID;
4390
4391	/* For HT, always enable RTS/CTS to avoid excessive retries. */
4392	if (ni->ni_flags & IEEE80211_NODE_HT)
4393		lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4394
4395	/*
4396	 * are these used? (we don't do SISO or MIMO)
4397	 * need to set them to non-zero, though, or we get an error.
4398	 */
4399	lq->single_stream_ant_msk = 1;
4400	lq->dual_stream_ant_msk = 1;
4401
4402	/*
4403	 * Build the actual rate selection table.
4404	 * The lowest bits are the rates.  Additionally,
4405	 * CCK needs bit 9 to be set.  The rest of the bits
4406	 * we add to the table select the tx antenna
4407	 * Note that we add the rates in the highest rate first
4408	 * (opposite of ni_rates).
4409	 */
4410	/*
4411	 * XXX TODO: this should be looping over the min of nrates
4412	 * and LQ_MAX_RETRY_NUM.  Sigh.
4413	 */
4414	for (i = 0; i < nrates; i++) {
4415		int nextant;
4416
4417#if 0
4418		if (txant == 0)
4419			txant = iwm_mvm_get_valid_tx_ant(sc);
4420		nextant = 1<<(ffs(txant)-1);
4421		txant &= ~nextant;
4422#else
4423		nextant = iwm_mvm_get_valid_tx_ant(sc);
4424#endif
4425		/*
4426		 * Map the rate id into a rate index into
4427		 * our hardware table containing the
4428		 * configuration to use for this rate.
4429		 */
4430		ridx = in->in_ridx[i];
4431		tab = iwm_rates[ridx].plcp;
4432		tab |= nextant << IWM_RATE_MCS_ANT_POS;
4433		if (IWM_RIDX_IS_CCK(ridx))
4434			tab |= IWM_RATE_MCS_CCK_MSK;
4435		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4436		    "station rate i=%d, rate=%d, hw=%x\n",
4437		    i, iwm_rates[ridx].rate, tab);
4438		lq->rs_table[i] = htole32(tab);
4439	}
4440	/* then fill the rest with the lowest possible rate */
4441	for (i = nrates; i < nitems(lq->rs_table); i++) {
4442		KASSERT(tab != 0, ("invalid tab"));
4443		lq->rs_table[i] = htole32(tab);
4444	}
4445}
4446
4447static int
4448iwm_media_change(struct ifnet *ifp)
4449{
4450	struct ieee80211vap *vap = ifp->if_softc;
4451	struct ieee80211com *ic = vap->iv_ic;
4452	struct iwm_softc *sc = ic->ic_softc;
4453	int error;
4454
4455	error = ieee80211_media_change(ifp);
4456	if (error != ENETRESET)
4457		return error;
4458
4459	IWM_LOCK(sc);
4460	if (ic->ic_nrunning > 0) {
4461		iwm_stop(sc);
4462		iwm_init(sc);
4463	}
4464	IWM_UNLOCK(sc);
4465	return error;
4466}
4467
4468
4469static int
4470iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4471{
4472	struct iwm_vap *ivp = IWM_VAP(vap);
4473	struct ieee80211com *ic = vap->iv_ic;
4474	struct iwm_softc *sc = ic->ic_softc;
4475	struct iwm_node *in;
4476	int error;
4477
4478	IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4479	    "switching state %s -> %s\n",
4480	    ieee80211_state_name[vap->iv_state],
4481	    ieee80211_state_name[nstate]);
4482	IEEE80211_UNLOCK(ic);
4483	IWM_LOCK(sc);
4484
4485	if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4486		iwm_led_blink_stop(sc);
4487
4488	/* disable beacon filtering if we're hopping out of RUN */
4489	if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4490		iwm_mvm_disable_beacon_filter(sc);
4491
4492		if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4493			in->in_assoc = 0;
4494
4495		if (nstate == IEEE80211_S_INIT) {
4496			IWM_UNLOCK(sc);
4497			IEEE80211_LOCK(ic);
4498			error = ivp->iv_newstate(vap, nstate, arg);
4499			IEEE80211_UNLOCK(ic);
4500			IWM_LOCK(sc);
4501			iwm_release(sc, NULL);
4502			IWM_UNLOCK(sc);
4503			IEEE80211_LOCK(ic);
4504			return error;
4505		}
4506
4507		/*
4508		 * It's impossible to directly go RUN->SCAN. If we iwm_release()
4509		 * above then the card will be completely reinitialized,
4510		 * so the driver must do everything necessary to bring the card
4511		 * from INIT to SCAN.
4512		 *
4513		 * Additionally, upon receiving deauth frame from AP,
4514		 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4515		 * state. This will also fail with this driver, so bring the FSM
4516		 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4517		 *
4518		 * XXX TODO: fix this for FreeBSD!
4519		 */
4520		if (nstate == IEEE80211_S_SCAN ||
4521		    nstate == IEEE80211_S_AUTH ||
4522		    nstate == IEEE80211_S_ASSOC) {
4523			IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4524			    "Force transition to INIT; MGT=%d\n", arg);
4525			IWM_UNLOCK(sc);
4526			IEEE80211_LOCK(ic);
4527			/* Always pass arg as -1 since we can't Tx right now. */
4528			/*
4529			 * XXX arg is just ignored anyway when transitioning
4530			 *     to IEEE80211_S_INIT.
4531			 */
4532			vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4533			IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4534			    "Going INIT->SCAN\n");
4535			nstate = IEEE80211_S_SCAN;
4536			IEEE80211_UNLOCK(ic);
4537			IWM_LOCK(sc);
4538		}
4539	}
4540
4541	switch (nstate) {
4542	case IEEE80211_S_INIT:
4543		break;
4544
4545	case IEEE80211_S_AUTH:
4546		if ((error = iwm_auth(vap, sc)) != 0) {
4547			device_printf(sc->sc_dev,
4548			    "%s: could not move to auth state: %d\n",
4549			    __func__, error);
4550			break;
4551		}
4552		break;
4553
4554	case IEEE80211_S_ASSOC:
4555		if ((error = iwm_assoc(vap, sc)) != 0) {
4556			device_printf(sc->sc_dev,
4557			    "%s: failed to associate: %d\n", __func__,
4558			    error);
4559			break;
4560		}
4561		break;
4562
4563	case IEEE80211_S_RUN:
4564	{
4565		struct iwm_host_cmd cmd = {
4566			.id = IWM_LQ_CMD,
4567			.len = { sizeof(in->in_lq), },
4568			.flags = IWM_CMD_SYNC,
4569		};
4570
4571		/* Update the association state, now we have it all */
4572		/* (eg associd comes in at this point */
4573		error = iwm_assoc(vap, sc);
4574		if (error != 0) {
4575			device_printf(sc->sc_dev,
4576			    "%s: failed to update association state: %d\n",
4577			    __func__,
4578			    error);
4579			break;
4580		}
4581
4582		in = IWM_NODE(vap->iv_bss);
4583		iwm_mvm_enable_beacon_filter(sc, in);
4584		iwm_mvm_power_update_mac(sc);
4585		iwm_mvm_update_quotas(sc, ivp);
4586		iwm_setrates(sc, in);
4587
4588		cmd.data[0] = &in->in_lq;
4589		if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
4590			device_printf(sc->sc_dev,
4591			    "%s: IWM_LQ_CMD failed\n", __func__);
4592		}
4593
4594		iwm_mvm_led_enable(sc);
4595		break;
4596	}
4597
4598	default:
4599		break;
4600	}
4601	IWM_UNLOCK(sc);
4602	IEEE80211_LOCK(ic);
4603
4604	return (ivp->iv_newstate(vap, nstate, arg));
4605}
4606
4607void
4608iwm_endscan_cb(void *arg, int pending)
4609{
4610	struct iwm_softc *sc = arg;
4611	struct ieee80211com *ic = &sc->sc_ic;
4612
4613	IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4614	    "%s: scan ended\n",
4615	    __func__);
4616
4617	ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4618}
4619
4620/*
4621 * Aging and idle timeouts for the different possible scenarios
4622 * in default configuration
4623 */
4624static const uint32_t
4625iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4626	{
4627		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
4628		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
4629	},
4630	{
4631		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
4632		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
4633	},
4634	{
4635		htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
4636		htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
4637	},
4638	{
4639		htole32(IWM_SF_BA_AGING_TIMER_DEF),
4640		htole32(IWM_SF_BA_IDLE_TIMER_DEF)
4641	},
4642	{
4643		htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
4644		htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
4645	},
4646};
4647
4648/*
4649 * Aging and idle timeouts for the different possible scenarios
4650 * in single BSS MAC configuration.
4651 */
4652static const uint32_t
4653iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4654	{
4655		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
4656		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
4657	},
4658	{
4659		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
4660		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
4661	},
4662	{
4663		htole32(IWM_SF_MCAST_AGING_TIMER),
4664		htole32(IWM_SF_MCAST_IDLE_TIMER)
4665	},
4666	{
4667		htole32(IWM_SF_BA_AGING_TIMER),
4668		htole32(IWM_SF_BA_IDLE_TIMER)
4669	},
4670	{
4671		htole32(IWM_SF_TX_RE_AGING_TIMER),
4672		htole32(IWM_SF_TX_RE_IDLE_TIMER)
4673	},
4674};
4675
4676static void
4677iwm_mvm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
4678    struct ieee80211_node *ni)
4679{
4680	int i, j, watermark;
4681
4682	sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
4683
4684	/*
4685	 * If we are in association flow - check antenna configuration
4686	 * capabilities of the AP station, and choose the watermark accordingly.
4687	 */
4688	if (ni) {
4689		if (ni->ni_flags & IEEE80211_NODE_HT) {
4690#ifdef notyet
4691			if (ni->ni_rxmcs[2] != 0)
4692				watermark = IWM_SF_W_MARK_MIMO3;
4693			else if (ni->ni_rxmcs[1] != 0)
4694				watermark = IWM_SF_W_MARK_MIMO2;
4695			else
4696#endif
4697				watermark = IWM_SF_W_MARK_SISO;
4698		} else {
4699			watermark = IWM_SF_W_MARK_LEGACY;
4700		}
4701	/* default watermark value for unassociated mode. */
4702	} else {
4703		watermark = IWM_SF_W_MARK_MIMO2;
4704	}
4705	sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
4706
4707	for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
4708		for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
4709			sf_cmd->long_delay_timeouts[i][j] =
4710					htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
4711		}
4712	}
4713
4714	if (ni) {
4715		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
4716		       sizeof(iwm_sf_full_timeout));
4717	} else {
4718		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
4719		       sizeof(iwm_sf_full_timeout_def));
4720	}
4721}
4722
4723static int
4724iwm_mvm_sf_config(struct iwm_softc *sc, enum iwm_sf_state new_state)
4725{
4726	struct ieee80211com *ic = &sc->sc_ic;
4727	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4728	struct iwm_sf_cfg_cmd sf_cmd = {
4729		.state = htole32(IWM_SF_FULL_ON),
4730	};
4731	int ret = 0;
4732
4733	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4734		sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
4735
4736	switch (new_state) {
4737	case IWM_SF_UNINIT:
4738	case IWM_SF_INIT_OFF:
4739		iwm_mvm_fill_sf_command(sc, &sf_cmd, NULL);
4740		break;
4741	case IWM_SF_FULL_ON:
4742		iwm_mvm_fill_sf_command(sc, &sf_cmd, vap->iv_bss);
4743		break;
4744	default:
4745		IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE,
4746		    "Invalid state: %d. not sending Smart Fifo cmd\n",
4747			  new_state);
4748		return EINVAL;
4749	}
4750
4751	ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
4752				   sizeof(sf_cmd), &sf_cmd);
4753	return ret;
4754}
4755
4756static int
4757iwm_send_bt_init_conf(struct iwm_softc *sc)
4758{
4759	struct iwm_bt_coex_cmd bt_cmd;
4760
4761	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4762	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4763
4764	return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4765	    &bt_cmd);
4766}
4767
4768static int
4769iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4770{
4771	struct iwm_mcc_update_cmd mcc_cmd;
4772	struct iwm_host_cmd hcmd = {
4773		.id = IWM_MCC_UPDATE_CMD,
4774		.flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4775		.data = { &mcc_cmd },
4776	};
4777	int ret;
4778#ifdef IWM_DEBUG
4779	struct iwm_rx_packet *pkt;
4780	struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4781	struct iwm_mcc_update_resp *mcc_resp;
4782	int n_channels;
4783	uint16_t mcc;
4784#endif
4785	int resp_v2 = fw_has_capa(&sc->ucode_capa,
4786	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4787
4788	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4789	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4790	if (fw_has_api(&sc->ucode_capa, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4791	    fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
4792		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4793	else
4794		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4795
4796	if (resp_v2)
4797		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4798	else
4799		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4800
4801	IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4802	    "send MCC update to FW with '%c%c' src = %d\n",
4803	    alpha2[0], alpha2[1], mcc_cmd.source_id);
4804
4805	ret = iwm_send_cmd(sc, &hcmd);
4806	if (ret)
4807		return ret;
4808
4809#ifdef IWM_DEBUG
4810	pkt = hcmd.resp_pkt;
4811
4812	/* Extract MCC response */
4813	if (resp_v2) {
4814		mcc_resp = (void *)pkt->data;
4815		mcc = mcc_resp->mcc;
4816		n_channels =  le32toh(mcc_resp->n_channels);
4817	} else {
4818		mcc_resp_v1 = (void *)pkt->data;
4819		mcc = mcc_resp_v1->mcc;
4820		n_channels =  le32toh(mcc_resp_v1->n_channels);
4821	}
4822
4823	/* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4824	if (mcc == 0)
4825		mcc = 0x3030;  /* "00" - world */
4826
4827	IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4828	    "regulatory domain '%c%c' (%d channels available)\n",
4829	    mcc >> 8, mcc & 0xff, n_channels);
4830#endif
4831	iwm_free_resp(sc, &hcmd);
4832
4833	return 0;
4834}
4835
4836static void
4837iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4838{
4839	struct iwm_host_cmd cmd = {
4840		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4841		.len = { sizeof(uint32_t), },
4842		.data = { &backoff, },
4843	};
4844
4845	if (iwm_send_cmd(sc, &cmd) != 0) {
4846		device_printf(sc->sc_dev,
4847		    "failed to change thermal tx backoff\n");
4848	}
4849}
4850
4851static int
4852iwm_init_hw(struct iwm_softc *sc)
4853{
4854	struct ieee80211com *ic = &sc->sc_ic;
4855	int error, i, ac;
4856
4857	if ((error = iwm_start_hw(sc)) != 0) {
4858		printf("iwm_start_hw: failed %d\n", error);
4859		return error;
4860	}
4861
4862	if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4863		printf("iwm_run_init_mvm_ucode: failed %d\n", error);
4864		return error;
4865	}
4866
4867	/*
4868	 * should stop and start HW since that INIT
4869	 * image just loaded
4870	 */
4871	iwm_stop_device(sc);
4872	sc->sc_ps_disabled = FALSE;
4873	if ((error = iwm_start_hw(sc)) != 0) {
4874		device_printf(sc->sc_dev, "could not initialize hardware\n");
4875		return error;
4876	}
4877
4878	/* omstart, this time with the regular firmware */
4879	error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4880	if (error) {
4881		device_printf(sc->sc_dev, "could not load firmware\n");
4882		goto error;
4883	}
4884
4885	if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4886		device_printf(sc->sc_dev, "bt init conf failed\n");
4887		goto error;
4888	}
4889
4890	error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
4891	if (error != 0) {
4892		device_printf(sc->sc_dev, "antenna config failed\n");
4893		goto error;
4894	}
4895
4896	/* Send phy db control command and then phy db calibration */
4897	if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4898		goto error;
4899
4900	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4901		device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4902		goto error;
4903	}
4904
4905	/* Add auxiliary station for scanning */
4906	if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4907		device_printf(sc->sc_dev, "add_aux_sta failed\n");
4908		goto error;
4909	}
4910
4911	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4912		/*
4913		 * The channel used here isn't relevant as it's
4914		 * going to be overwritten in the other flows.
4915		 * For now use the first channel we have.
4916		 */
4917		if ((error = iwm_mvm_phy_ctxt_add(sc,
4918		    &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4919			goto error;
4920	}
4921
4922	/* Initialize tx backoffs to the minimum. */
4923	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4924		iwm_mvm_tt_tx_backoff(sc, 0);
4925
4926	error = iwm_mvm_power_update_device(sc);
4927	if (error)
4928		goto error;
4929
4930	if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
4931		if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4932			goto error;
4933	}
4934
4935	if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4936		if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4937			goto error;
4938	}
4939
4940	/* Enable Tx queues. */
4941	for (ac = 0; ac < WME_NUM_AC; ac++) {
4942		error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4943		    iwm_mvm_ac_to_tx_fifo[ac]);
4944		if (error)
4945			goto error;
4946	}
4947
4948	if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4949		device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4950		goto error;
4951	}
4952
4953	return 0;
4954
4955 error:
4956	iwm_stop_device(sc);
4957	return error;
4958}
4959
4960/* Allow multicast from our BSSID. */
4961static int
4962iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4963{
4964	struct ieee80211_node *ni = vap->iv_bss;
4965	struct iwm_mcast_filter_cmd *cmd;
4966	size_t size;
4967	int error;
4968
4969	size = roundup(sizeof(*cmd), 4);
4970	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4971	if (cmd == NULL)
4972		return ENOMEM;
4973	cmd->filter_own = 1;
4974	cmd->port_id = 0;
4975	cmd->count = 0;
4976	cmd->pass_all = 1;
4977	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4978
4979	error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4980	    IWM_CMD_SYNC, size, cmd);
4981	free(cmd, M_DEVBUF);
4982
4983	return (error);
4984}
4985
4986/*
4987 * ifnet interfaces
4988 */
4989
4990static void
4991iwm_init(struct iwm_softc *sc)
4992{
4993	int error;
4994
4995	if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4996		return;
4997	}
4998	sc->sc_generation++;
4999	sc->sc_flags &= ~IWM_FLAG_STOPPED;
5000
5001	if ((error = iwm_init_hw(sc)) != 0) {
5002		printf("iwm_init_hw failed %d\n", error);
5003		iwm_stop(sc);
5004		return;
5005	}
5006
5007	/*
5008	 * Ok, firmware loaded and we are jogging
5009	 */
5010	sc->sc_flags |= IWM_FLAG_HW_INITED;
5011	callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
5012}
5013
5014static int
5015iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
5016{
5017	struct iwm_softc *sc;
5018	int error;
5019
5020	sc = ic->ic_softc;
5021
5022	IWM_LOCK(sc);
5023	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
5024		IWM_UNLOCK(sc);
5025		return (ENXIO);
5026	}
5027	error = mbufq_enqueue(&sc->sc_snd, m);
5028	if (error) {
5029		IWM_UNLOCK(sc);
5030		return (error);
5031	}
5032	iwm_start(sc);
5033	IWM_UNLOCK(sc);
5034	return (0);
5035}
5036
5037/*
5038 * Dequeue packets from sendq and call send.
5039 */
5040static void
5041iwm_start(struct iwm_softc *sc)
5042{
5043	struct ieee80211_node *ni;
5044	struct mbuf *m;
5045	int ac = 0;
5046
5047	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
5048	while (sc->qfullmsk == 0 &&
5049		(m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
5050		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
5051		if (iwm_tx(sc, m, ni, ac) != 0) {
5052			if_inc_counter(ni->ni_vap->iv_ifp,
5053			    IFCOUNTER_OERRORS, 1);
5054			ieee80211_free_node(ni);
5055			continue;
5056		}
5057		sc->sc_tx_timer = 15;
5058	}
5059	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
5060}
5061
5062static void
5063iwm_stop(struct iwm_softc *sc)
5064{
5065
5066	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
5067	sc->sc_flags |= IWM_FLAG_STOPPED;
5068	sc->sc_generation++;
5069	iwm_led_blink_stop(sc);
5070	sc->sc_tx_timer = 0;
5071	iwm_stop_device(sc);
5072	sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5073}
5074
5075static void
5076iwm_watchdog(void *arg)
5077{
5078	struct iwm_softc *sc = arg;
5079	struct ieee80211com *ic = &sc->sc_ic;
5080
5081	if (sc->sc_tx_timer > 0) {
5082		if (--sc->sc_tx_timer == 0) {
5083			device_printf(sc->sc_dev, "device timeout\n");
5084#ifdef IWM_DEBUG
5085			iwm_nic_error(sc);
5086#endif
5087			ieee80211_restart_all(ic);
5088			counter_u64_add(sc->sc_ic.ic_oerrors, 1);
5089			return;
5090		}
5091	}
5092	callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
5093}
5094
5095static void
5096iwm_parent(struct ieee80211com *ic)
5097{
5098	struct iwm_softc *sc = ic->ic_softc;
5099	int startall = 0;
5100
5101	IWM_LOCK(sc);
5102	if (ic->ic_nrunning > 0) {
5103		if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
5104			iwm_init(sc);
5105			startall = 1;
5106		}
5107	} else if (sc->sc_flags & IWM_FLAG_HW_INITED)
5108		iwm_stop(sc);
5109	IWM_UNLOCK(sc);
5110	if (startall)
5111		ieee80211_start_all(ic);
5112}
5113
5114/*
5115 * The interrupt side of things
5116 */
5117
5118/*
5119 * error dumping routines are from iwlwifi/mvm/utils.c
5120 */
5121
5122/*
5123 * Note: This structure is read from the device with IO accesses,
5124 * and the reading already does the endian conversion. As it is
5125 * read with uint32_t-sized accesses, any members with a different size
5126 * need to be ordered correctly though!
5127 */
5128struct iwm_error_event_table {
5129	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
5130	uint32_t error_id;		/* type of error */
5131	uint32_t trm_hw_status0;	/* TRM HW status */
5132	uint32_t trm_hw_status1;	/* TRM HW status */
5133	uint32_t blink2;		/* branch link */
5134	uint32_t ilink1;		/* interrupt link */
5135	uint32_t ilink2;		/* interrupt link */
5136	uint32_t data1;		/* error-specific data */
5137	uint32_t data2;		/* error-specific data */
5138	uint32_t data3;		/* error-specific data */
5139	uint32_t bcon_time;		/* beacon timer */
5140	uint32_t tsf_low;		/* network timestamp function timer */
5141	uint32_t tsf_hi;		/* network timestamp function timer */
5142	uint32_t gp1;		/* GP1 timer register */
5143	uint32_t gp2;		/* GP2 timer register */
5144	uint32_t fw_rev_type;	/* firmware revision type */
5145	uint32_t major;		/* uCode version major */
5146	uint32_t minor;		/* uCode version minor */
5147	uint32_t hw_ver;		/* HW Silicon version */
5148	uint32_t brd_ver;		/* HW board version */
5149	uint32_t log_pc;		/* log program counter */
5150	uint32_t frame_ptr;		/* frame pointer */
5151	uint32_t stack_ptr;		/* stack pointer */
5152	uint32_t hcmd;		/* last host command header */
5153	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
5154				 * rxtx_flag */
5155	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
5156				 * host_flag */
5157	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
5158				 * enc_flag */
5159	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
5160				 * time_flag */
5161	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
5162				 * wico interrupt */
5163	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
5164	uint32_t wait_event;		/* wait event() caller address */
5165	uint32_t l2p_control;	/* L2pControlField */
5166	uint32_t l2p_duration;	/* L2pDurationField */
5167	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
5168	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
5169	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
5170				 * (LMPM_PMG_SEL) */
5171	uint32_t u_timestamp;	/* indicate when the date and time of the
5172				 * compilation */
5173	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
5174} __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
5175
5176/*
5177 * UMAC error struct - relevant starting from family 8000 chip.
5178 * Note: This structure is read from the device with IO accesses,
5179 * and the reading already does the endian conversion. As it is
5180 * read with u32-sized accesses, any members with a different size
5181 * need to be ordered correctly though!
5182 */
5183struct iwm_umac_error_event_table {
5184	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
5185	uint32_t error_id;	/* type of error */
5186	uint32_t blink1;	/* branch link */
5187	uint32_t blink2;	/* branch link */
5188	uint32_t ilink1;	/* interrupt link */
5189	uint32_t ilink2;	/* interrupt link */
5190	uint32_t data1;		/* error-specific data */
5191	uint32_t data2;		/* error-specific data */
5192	uint32_t data3;		/* error-specific data */
5193	uint32_t umac_major;
5194	uint32_t umac_minor;
5195	uint32_t frame_pointer;	/* core register 27*/
5196	uint32_t stack_pointer;	/* core register 28 */
5197	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
5198	uint32_t nic_isr_pref;	/* ISR status register */
5199} __packed;
5200
5201#define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
5202#define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
5203
5204#ifdef IWM_DEBUG
5205struct {
5206	const char *name;
5207	uint8_t num;
5208} advanced_lookup[] = {
5209	{ "NMI_INTERRUPT_WDG", 0x34 },
5210	{ "SYSASSERT", 0x35 },
5211	{ "UCODE_VERSION_MISMATCH", 0x37 },
5212	{ "BAD_COMMAND", 0x38 },
5213	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5214	{ "FATAL_ERROR", 0x3D },
5215	{ "NMI_TRM_HW_ERR", 0x46 },
5216	{ "NMI_INTERRUPT_TRM", 0x4C },
5217	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5218	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5219	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5220	{ "NMI_INTERRUPT_HOST", 0x66 },
5221	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
5222	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
5223	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5224	{ "ADVANCED_SYSASSERT", 0 },
5225};
5226
5227static const char *
5228iwm_desc_lookup(uint32_t num)
5229{
5230	int i;
5231
5232	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5233		if (advanced_lookup[i].num == num)
5234			return advanced_lookup[i].name;
5235
5236	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5237	return advanced_lookup[i].name;
5238}
5239
5240static void
5241iwm_nic_umac_error(struct iwm_softc *sc)
5242{
5243	struct iwm_umac_error_event_table table;
5244	uint32_t base;
5245
5246	base = sc->umac_error_event_table;
5247
5248	if (base < 0x800000) {
5249		device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5250		    base);
5251		return;
5252	}
5253
5254	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5255		device_printf(sc->sc_dev, "reading errlog failed\n");
5256		return;
5257	}
5258
5259	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5260		device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5261		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5262		    sc->sc_flags, table.valid);
5263	}
5264
5265	device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5266		iwm_desc_lookup(table.error_id));
5267	device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5268	device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5269	device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5270	    table.ilink1);
5271	device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5272	    table.ilink2);
5273	device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5274	device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5275	device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5276	device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5277	device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5278	device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5279	    table.frame_pointer);
5280	device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5281	    table.stack_pointer);
5282	device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5283	device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5284	    table.nic_isr_pref);
5285}
5286
5287/*
5288 * Support for dumping the error log seemed like a good idea ...
5289 * but it's mostly hex junk and the only sensible thing is the
5290 * hw/ucode revision (which we know anyway).  Since it's here,
5291 * I'll just leave it in, just in case e.g. the Intel guys want to
5292 * help us decipher some "ADVANCED_SYSASSERT" later.
5293 */
5294static void
5295iwm_nic_error(struct iwm_softc *sc)
5296{
5297	struct iwm_error_event_table table;
5298	uint32_t base;
5299
5300	device_printf(sc->sc_dev, "dumping device error log\n");
5301	base = sc->error_event_table;
5302	if (base < 0x800000) {
5303		device_printf(sc->sc_dev,
5304		    "Invalid error log pointer 0x%08x\n", base);
5305		return;
5306	}
5307
5308	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5309		device_printf(sc->sc_dev, "reading errlog failed\n");
5310		return;
5311	}
5312
5313	if (!table.valid) {
5314		device_printf(sc->sc_dev, "errlog not found, skipping\n");
5315		return;
5316	}
5317
5318	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5319		device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5320		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5321		    sc->sc_flags, table.valid);
5322	}
5323
5324	device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5325	    iwm_desc_lookup(table.error_id));
5326	device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5327	    table.trm_hw_status0);
5328	device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5329	    table.trm_hw_status1);
5330	device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5331	device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5332	device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5333	device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5334	device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5335	device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5336	device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5337	device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5338	device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5339	device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5340	device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5341	device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5342	    table.fw_rev_type);
5343	device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5344	device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5345	device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5346	device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5347	device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5348	device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5349	device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5350	device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5351	device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5352	device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5353	device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5354	device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5355	device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5356	device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5357	device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5358	device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5359	device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5360	device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5361	device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5362
5363	if (sc->umac_error_event_table)
5364		iwm_nic_umac_error(sc);
5365}
5366#endif
5367
5368static void
5369iwm_handle_rxb(struct iwm_softc *sc, struct mbuf *m)
5370{
5371	struct ieee80211com *ic = &sc->sc_ic;
5372	struct iwm_cmd_response *cresp;
5373	struct mbuf *m1;
5374	uint32_t offset = 0;
5375	uint32_t maxoff = IWM_RBUF_SIZE;
5376	uint32_t nextoff;
5377	boolean_t stolen = FALSE;
5378
5379#define HAVEROOM(a)	\
5380    ((a) + sizeof(uint32_t) + sizeof(struct iwm_cmd_header) < maxoff)
5381
5382	while (HAVEROOM(offset)) {
5383		struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *,
5384		    offset);
5385		int qid, idx, code, len;
5386
5387		qid = pkt->hdr.qid;
5388		idx = pkt->hdr.idx;
5389
5390		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5391
5392		/*
5393		 * randomly get these from the firmware, no idea why.
5394		 * they at least seem harmless, so just ignore them for now
5395		 */
5396		if ((pkt->hdr.code == 0 && (qid & ~0x80) == 0 && idx == 0) ||
5397		    pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID)) {
5398			break;
5399		}
5400
5401		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5402		    "rx packet qid=%d idx=%d type=%x\n",
5403		    qid & ~0x80, pkt->hdr.idx, code);
5404
5405		len = le32toh(pkt->len_n_flags) & IWM_FH_RSCSR_FRAME_SIZE_MSK;
5406		len += sizeof(uint32_t); /* account for status word */
5407		nextoff = offset + roundup2(len, IWM_FH_RSCSR_FRAME_ALIGN);
5408
5409		iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5410
5411		switch (code) {
5412		case IWM_REPLY_RX_PHY_CMD:
5413			iwm_mvm_rx_rx_phy_cmd(sc, pkt);
5414			break;
5415
5416		case IWM_REPLY_RX_MPDU_CMD: {
5417			/*
5418			 * If this is the last frame in the RX buffer, we
5419			 * can directly feed the mbuf to the sharks here.
5420			 */
5421			struct iwm_rx_packet *nextpkt = mtodoff(m,
5422			    struct iwm_rx_packet *, nextoff);
5423			if (!HAVEROOM(nextoff) ||
5424			    (nextpkt->hdr.code == 0 &&
5425			     (nextpkt->hdr.qid & ~0x80) == 0 &&
5426			     nextpkt->hdr.idx == 0) ||
5427			    (nextpkt->len_n_flags ==
5428			     htole32(IWM_FH_RSCSR_FRAME_INVALID))) {
5429				if (iwm_mvm_rx_rx_mpdu(sc, m, offset, stolen)) {
5430					stolen = FALSE;
5431					/* Make sure we abort the loop */
5432					nextoff = maxoff;
5433				}
5434				break;
5435			}
5436
5437			/*
5438			 * Use m_copym instead of m_split, because that
5439			 * makes it easier to keep a valid rx buffer in
5440			 * the ring, when iwm_mvm_rx_rx_mpdu() fails.
5441			 *
5442			 * We need to start m_copym() at offset 0, to get the
5443			 * M_PKTHDR flag preserved.
5444			 */
5445			m1 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
5446			if (m1) {
5447				if (iwm_mvm_rx_rx_mpdu(sc, m1, offset, stolen))
5448					stolen = TRUE;
5449				else
5450					m_freem(m1);
5451			}
5452			break;
5453		}
5454
5455		case IWM_TX_CMD:
5456			iwm_mvm_rx_tx_cmd(sc, pkt);
5457			break;
5458
5459		case IWM_MISSED_BEACONS_NOTIFICATION: {
5460			struct iwm_missed_beacons_notif *resp;
5461			int missed;
5462
5463			/* XXX look at mac_id to determine interface ID */
5464			struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5465
5466			resp = (void *)pkt->data;
5467			missed = le32toh(resp->consec_missed_beacons);
5468
5469			IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5470			    "%s: MISSED_BEACON: mac_id=%d, "
5471			    "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5472			    "num_rx=%d\n",
5473			    __func__,
5474			    le32toh(resp->mac_id),
5475			    le32toh(resp->consec_missed_beacons_since_last_rx),
5476			    le32toh(resp->consec_missed_beacons),
5477			    le32toh(resp->num_expected_beacons),
5478			    le32toh(resp->num_recvd_beacons));
5479
5480			/* Be paranoid */
5481			if (vap == NULL)
5482				break;
5483
5484			/* XXX no net80211 locking? */
5485			if (vap->iv_state == IEEE80211_S_RUN &&
5486			    (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5487				if (missed > vap->iv_bmissthreshold) {
5488					/* XXX bad locking; turn into task */
5489					IWM_UNLOCK(sc);
5490					ieee80211_beacon_miss(ic);
5491					IWM_LOCK(sc);
5492				}
5493			}
5494
5495			break;
5496		}
5497
5498		case IWM_MFUART_LOAD_NOTIFICATION:
5499			break;
5500
5501		case IWM_MVM_ALIVE:
5502			break;
5503
5504		case IWM_CALIB_RES_NOTIF_PHY_DB:
5505			break;
5506
5507		case IWM_STATISTICS_NOTIFICATION: {
5508			struct iwm_notif_statistics *stats;
5509			stats = (void *)pkt->data;
5510			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
5511			sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
5512			break;
5513		}
5514
5515		case IWM_NVM_ACCESS_CMD:
5516		case IWM_MCC_UPDATE_CMD:
5517			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5518				memcpy(sc->sc_cmd_resp,
5519				    pkt, sizeof(sc->sc_cmd_resp));
5520			}
5521			break;
5522
5523		case IWM_MCC_CHUB_UPDATE_CMD: {
5524			struct iwm_mcc_chub_notif *notif;
5525			notif = (void *)pkt->data;
5526
5527			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5528			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5529			sc->sc_fw_mcc[2] = '\0';
5530			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
5531			    "fw source %d sent CC '%s'\n",
5532			    notif->source_id, sc->sc_fw_mcc);
5533			break;
5534		}
5535
5536		case IWM_DTS_MEASUREMENT_NOTIFICATION:
5537		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5538				 IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5539			struct iwm_dts_measurement_notif_v1 *notif;
5540
5541			if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5542				device_printf(sc->sc_dev,
5543				    "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5544				break;
5545			}
5546			notif = (void *)pkt->data;
5547			IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5548			    "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5549			    notif->temp);
5550			break;
5551		}
5552
5553		case IWM_PHY_CONFIGURATION_CMD:
5554		case IWM_TX_ANT_CONFIGURATION_CMD:
5555		case IWM_ADD_STA:
5556		case IWM_MAC_CONTEXT_CMD:
5557		case IWM_REPLY_SF_CFG_CMD:
5558		case IWM_POWER_TABLE_CMD:
5559		case IWM_PHY_CONTEXT_CMD:
5560		case IWM_BINDING_CONTEXT_CMD:
5561		case IWM_TIME_EVENT_CMD:
5562		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5563		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5564		case IWM_SCAN_ABORT_UMAC:
5565		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5566		case IWM_SCAN_OFFLOAD_ABORT_CMD:
5567		case IWM_REPLY_BEACON_FILTERING_CMD:
5568		case IWM_MAC_PM_POWER_TABLE:
5569		case IWM_TIME_QUOTA_CMD:
5570		case IWM_REMOVE_STA:
5571		case IWM_TXPATH_FLUSH:
5572		case IWM_LQ_CMD:
5573		case IWM_FW_PAGING_BLOCK_CMD:
5574		case IWM_BT_CONFIG:
5575		case IWM_REPLY_THERMAL_MNG_BACKOFF:
5576			cresp = (void *)pkt->data;
5577			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5578				memcpy(sc->sc_cmd_resp,
5579				    pkt, sizeof(*pkt)+sizeof(*cresp));
5580			}
5581			break;
5582
5583		/* ignore */
5584		case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
5585			break;
5586
5587		case IWM_INIT_COMPLETE_NOTIF:
5588			break;
5589
5590		case IWM_SCAN_OFFLOAD_COMPLETE: {
5591			struct iwm_periodic_scan_complete *notif;
5592			notif = (void *)pkt->data;
5593			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5594				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5595				ieee80211_runtask(ic, &sc->sc_es_task);
5596			}
5597			break;
5598		}
5599
5600		case IWM_SCAN_ITERATION_COMPLETE: {
5601			struct iwm_lmac_scan_complete_notif *notif;
5602			notif = (void *)pkt->data;
5603			ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5604 			break;
5605		}
5606
5607		case IWM_SCAN_COMPLETE_UMAC: {
5608			struct iwm_umac_scan_complete *notif;
5609			notif = (void *)pkt->data;
5610
5611			IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
5612			    "UMAC scan complete, status=0x%x\n",
5613			    notif->status);
5614			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5615				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5616				ieee80211_runtask(ic, &sc->sc_es_task);
5617			}
5618			break;
5619		}
5620
5621		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5622			struct iwm_umac_scan_iter_complete_notif *notif;
5623			notif = (void *)pkt->data;
5624
5625			IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5626			    "complete, status=0x%x, %d channels scanned\n",
5627			    notif->status, notif->scanned_channels);
5628			ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5629			break;
5630		}
5631
5632		case IWM_REPLY_ERROR: {
5633			struct iwm_error_resp *resp;
5634			resp = (void *)pkt->data;
5635
5636			device_printf(sc->sc_dev,
5637			    "firmware error 0x%x, cmd 0x%x\n",
5638			    le32toh(resp->error_type),
5639			    resp->cmd_id);
5640			break;
5641		}
5642
5643		case IWM_TIME_EVENT_NOTIFICATION: {
5644			struct iwm_time_event_notif *notif;
5645			notif = (void *)pkt->data;
5646
5647			IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5648			    "TE notif status = 0x%x action = 0x%x\n",
5649			    notif->status, notif->action);
5650			break;
5651		}
5652
5653		case IWM_MCAST_FILTER_CMD:
5654			break;
5655
5656		case IWM_SCD_QUEUE_CFG: {
5657			struct iwm_scd_txq_cfg_rsp *rsp;
5658			rsp = (void *)pkt->data;
5659
5660			IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5661			    "queue cfg token=0x%x sta_id=%d "
5662			    "tid=%d scd_queue=%d\n",
5663			    rsp->token, rsp->sta_id, rsp->tid,
5664			    rsp->scd_queue);
5665			break;
5666		}
5667
5668		default:
5669			device_printf(sc->sc_dev,
5670			    "frame %d/%d %x UNHANDLED (this should "
5671			    "not happen)\n", qid & ~0x80, idx,
5672			    pkt->len_n_flags);
5673			break;
5674		}
5675
5676		/*
5677		 * Why test bit 0x80?  The Linux driver:
5678		 *
5679		 * There is one exception:  uCode sets bit 15 when it
5680		 * originates the response/notification, i.e. when the
5681		 * response/notification is not a direct response to a
5682		 * command sent by the driver.  For example, uCode issues
5683		 * IWM_REPLY_RX when it sends a received frame to the driver;
5684		 * it is not a direct response to any driver command.
5685		 *
5686		 * Ok, so since when is 7 == 15?  Well, the Linux driver
5687		 * uses a slightly different format for pkt->hdr, and "qid"
5688		 * is actually the upper byte of a two-byte field.
5689		 */
5690		if (!(qid & (1 << 7)))
5691			iwm_cmd_done(sc, pkt);
5692
5693		offset = nextoff;
5694	}
5695	if (stolen)
5696		m_freem(m);
5697#undef HAVEROOM
5698}
5699
5700/*
5701 * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5702 * Basic structure from if_iwn
5703 */
5704static void
5705iwm_notif_intr(struct iwm_softc *sc)
5706{
5707	uint16_t hw;
5708
5709	bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5710	    BUS_DMASYNC_POSTREAD);
5711
5712	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5713
5714	/*
5715	 * Process responses
5716	 */
5717	while (sc->rxq.cur != hw) {
5718		struct iwm_rx_ring *ring = &sc->rxq;
5719		struct iwm_rx_data *data = &ring->data[ring->cur];
5720
5721		bus_dmamap_sync(ring->data_dmat, data->map,
5722		    BUS_DMASYNC_POSTREAD);
5723
5724		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5725		    "%s: hw = %d cur = %d\n", __func__, hw, ring->cur);
5726		iwm_handle_rxb(sc, data->m);
5727
5728		ring->cur = (ring->cur + 1) % IWM_RX_RING_COUNT;
5729	}
5730
5731	/*
5732	 * Tell the firmware that it can reuse the ring entries that
5733	 * we have just processed.
5734	 * Seems like the hardware gets upset unless we align
5735	 * the write by 8??
5736	 */
5737	hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5738	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, rounddown2(hw, 8));
5739}
5740
5741static void
5742iwm_intr(void *arg)
5743{
5744	struct iwm_softc *sc = arg;
5745	int handled = 0;
5746	int r1, r2, rv = 0;
5747	int isperiodic = 0;
5748
5749	IWM_LOCK(sc);
5750	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5751
5752	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5753		uint32_t *ict = sc->ict_dma.vaddr;
5754		int tmp;
5755
5756		tmp = htole32(ict[sc->ict_cur]);
5757		if (!tmp)
5758			goto out_ena;
5759
5760		/*
5761		 * ok, there was something.  keep plowing until we have all.
5762		 */
5763		r1 = r2 = 0;
5764		while (tmp) {
5765			r1 |= tmp;
5766			ict[sc->ict_cur] = 0;
5767			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5768			tmp = htole32(ict[sc->ict_cur]);
5769		}
5770
5771		/* this is where the fun begins.  don't ask */
5772		if (r1 == 0xffffffff)
5773			r1 = 0;
5774
5775		/* i am not expected to understand this */
5776		if (r1 & 0xc0000)
5777			r1 |= 0x8000;
5778		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5779	} else {
5780		r1 = IWM_READ(sc, IWM_CSR_INT);
5781		/* "hardware gone" (where, fishing?) */
5782		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5783			goto out;
5784		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5785	}
5786	if (r1 == 0 && r2 == 0) {
5787		goto out_ena;
5788	}
5789
5790	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5791
5792	/* Safely ignore these bits for debug checks below */
5793	r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5794
5795	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5796		int i;
5797		struct ieee80211com *ic = &sc->sc_ic;
5798		struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5799
5800#ifdef IWM_DEBUG
5801		iwm_nic_error(sc);
5802#endif
5803		/* Dump driver status (TX and RX rings) while we're here. */
5804		device_printf(sc->sc_dev, "driver status:\n");
5805		for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5806			struct iwm_tx_ring *ring = &sc->txq[i];
5807			device_printf(sc->sc_dev,
5808			    "  tx ring %2d: qid=%-2d cur=%-3d "
5809			    "queued=%-3d\n",
5810			    i, ring->qid, ring->cur, ring->queued);
5811		}
5812		device_printf(sc->sc_dev,
5813		    "  rx ring: cur=%d\n", sc->rxq.cur);
5814		device_printf(sc->sc_dev,
5815		    "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5816
5817		/* Don't stop the device; just do a VAP restart */
5818		IWM_UNLOCK(sc);
5819
5820		if (vap == NULL) {
5821			printf("%s: null vap\n", __func__);
5822			return;
5823		}
5824
5825		device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5826		    "restarting\n", __func__, vap->iv_state);
5827
5828		ieee80211_restart_all(ic);
5829		return;
5830	}
5831
5832	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5833		handled |= IWM_CSR_INT_BIT_HW_ERR;
5834		device_printf(sc->sc_dev, "hardware error, stopping device\n");
5835		iwm_stop(sc);
5836		rv = 1;
5837		goto out;
5838	}
5839
5840	/* firmware chunk loaded */
5841	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5842		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5843		handled |= IWM_CSR_INT_BIT_FH_TX;
5844		sc->sc_fw_chunk_done = 1;
5845		wakeup(&sc->sc_fw);
5846	}
5847
5848	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5849		handled |= IWM_CSR_INT_BIT_RF_KILL;
5850		if (iwm_check_rfkill(sc)) {
5851			device_printf(sc->sc_dev,
5852			    "%s: rfkill switch, disabling interface\n",
5853			    __func__);
5854			iwm_stop(sc);
5855		}
5856	}
5857
5858	/*
5859	 * The Linux driver uses periodic interrupts to avoid races.
5860	 * We cargo-cult like it's going out of fashion.
5861	 */
5862	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5863		handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5864		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5865		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5866			IWM_WRITE_1(sc,
5867			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5868		isperiodic = 1;
5869	}
5870
5871	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5872		handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5873		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5874
5875		iwm_notif_intr(sc);
5876
5877		/* enable periodic interrupt, see above */
5878		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5879			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5880			    IWM_CSR_INT_PERIODIC_ENA);
5881	}
5882
5883	if (__predict_false(r1 & ~handled))
5884		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5885		    "%s: unhandled interrupts: %x\n", __func__, r1);
5886	rv = 1;
5887
5888 out_ena:
5889	iwm_restore_interrupts(sc);
5890 out:
5891	IWM_UNLOCK(sc);
5892	return;
5893}
5894
5895/*
5896 * Autoconf glue-sniffing
5897 */
5898#define	PCI_VENDOR_INTEL		0x8086
5899#define	PCI_PRODUCT_INTEL_WL_3160_1	0x08b3
5900#define	PCI_PRODUCT_INTEL_WL_3160_2	0x08b4
5901#define	PCI_PRODUCT_INTEL_WL_3165_1	0x3165
5902#define	PCI_PRODUCT_INTEL_WL_3165_2	0x3166
5903#define	PCI_PRODUCT_INTEL_WL_7260_1	0x08b1
5904#define	PCI_PRODUCT_INTEL_WL_7260_2	0x08b2
5905#define	PCI_PRODUCT_INTEL_WL_7265_1	0x095a
5906#define	PCI_PRODUCT_INTEL_WL_7265_2	0x095b
5907#define	PCI_PRODUCT_INTEL_WL_8260_1	0x24f3
5908#define	PCI_PRODUCT_INTEL_WL_8260_2	0x24f4
5909
5910static const struct iwm_devices {
5911	uint16_t		device;
5912	const struct iwm_cfg	*cfg;
5913} iwm_devices[] = {
5914	{ PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5915	{ PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5916	{ PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5917	{ PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5918	{ PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5919	{ PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5920	{ PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5921	{ PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5922	{ PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5923	{ PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5924};
5925
5926static int
5927iwm_probe(device_t dev)
5928{
5929	int i;
5930
5931	for (i = 0; i < nitems(iwm_devices); i++) {
5932		if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5933		    pci_get_device(dev) == iwm_devices[i].device) {
5934			device_set_desc(dev, iwm_devices[i].cfg->name);
5935			return (BUS_PROBE_DEFAULT);
5936		}
5937	}
5938
5939	return (ENXIO);
5940}
5941
5942static int
5943iwm_dev_check(device_t dev)
5944{
5945	struct iwm_softc *sc;
5946	uint16_t devid;
5947	int i;
5948
5949	sc = device_get_softc(dev);
5950
5951	devid = pci_get_device(dev);
5952	for (i = 0; i < nitems(iwm_devices); i++) {
5953		if (iwm_devices[i].device == devid) {
5954			sc->cfg = iwm_devices[i].cfg;
5955			return (0);
5956		}
5957	}
5958	device_printf(dev, "unknown adapter type\n");
5959	return ENXIO;
5960}
5961
5962/* PCI registers */
5963#define PCI_CFG_RETRY_TIMEOUT	0x041
5964
5965static int
5966iwm_pci_attach(device_t dev)
5967{
5968	struct iwm_softc *sc;
5969	int count, error, rid;
5970	uint16_t reg;
5971
5972	sc = device_get_softc(dev);
5973
5974	/* We disable the RETRY_TIMEOUT register (0x41) to keep
5975	 * PCI Tx retries from interfering with C3 CPU state */
5976	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5977
5978	/* Enable bus-mastering and hardware bug workaround. */
5979	pci_enable_busmaster(dev);
5980	reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5981	/* if !MSI */
5982	if (reg & PCIM_STATUS_INTxSTATE) {
5983		reg &= ~PCIM_STATUS_INTxSTATE;
5984	}
5985	pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5986
5987	rid = PCIR_BAR(0);
5988	sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5989	    RF_ACTIVE);
5990	if (sc->sc_mem == NULL) {
5991		device_printf(sc->sc_dev, "can't map mem space\n");
5992		return (ENXIO);
5993	}
5994	sc->sc_st = rman_get_bustag(sc->sc_mem);
5995	sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5996
5997	/* Install interrupt handler. */
5998	count = 1;
5999	rid = 0;
6000	if (pci_alloc_msi(dev, &count) == 0)
6001		rid = 1;
6002	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
6003	    (rid != 0 ? 0 : RF_SHAREABLE));
6004	if (sc->sc_irq == NULL) {
6005		device_printf(dev, "can't map interrupt\n");
6006			return (ENXIO);
6007	}
6008	error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
6009	    NULL, iwm_intr, sc, &sc->sc_ih);
6010	if (sc->sc_ih == NULL) {
6011		device_printf(dev, "can't establish interrupt");
6012			return (ENXIO);
6013	}
6014	sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
6015
6016	return (0);
6017}
6018
6019static void
6020iwm_pci_detach(device_t dev)
6021{
6022	struct iwm_softc *sc = device_get_softc(dev);
6023
6024	if (sc->sc_irq != NULL) {
6025		bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
6026		bus_release_resource(dev, SYS_RES_IRQ,
6027		    rman_get_rid(sc->sc_irq), sc->sc_irq);
6028		pci_release_msi(dev);
6029        }
6030	if (sc->sc_mem != NULL)
6031		bus_release_resource(dev, SYS_RES_MEMORY,
6032		    rman_get_rid(sc->sc_mem), sc->sc_mem);
6033}
6034
6035
6036
6037static int
6038iwm_attach(device_t dev)
6039{
6040	struct iwm_softc *sc = device_get_softc(dev);
6041	struct ieee80211com *ic = &sc->sc_ic;
6042	int error;
6043	int txq_i, i;
6044
6045	sc->sc_dev = dev;
6046	sc->sc_attached = 1;
6047	IWM_LOCK_INIT(sc);
6048	mbufq_init(&sc->sc_snd, ifqmaxlen);
6049	callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
6050	callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
6051	TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
6052
6053	sc->sc_notif_wait = iwm_notification_wait_init(sc);
6054	if (sc->sc_notif_wait == NULL) {
6055		device_printf(dev, "failed to init notification wait struct\n");
6056		goto fail;
6057	}
6058
6059	/* Init phy db */
6060	sc->sc_phy_db = iwm_phy_db_init(sc);
6061	if (!sc->sc_phy_db) {
6062		device_printf(dev, "Cannot init phy_db\n");
6063		goto fail;
6064	}
6065
6066	/* PCI attach */
6067	error = iwm_pci_attach(dev);
6068	if (error != 0)
6069		goto fail;
6070
6071	sc->sc_wantresp = -1;
6072
6073	/* Check device type */
6074	error = iwm_dev_check(dev);
6075	if (error != 0)
6076		goto fail;
6077
6078	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
6079	/*
6080	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
6081	 * changed, and now the revision step also includes bit 0-1 (no more
6082	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
6083	 * in the old format.
6084	 */
6085	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
6086		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
6087				(IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
6088
6089	if (iwm_prepare_card_hw(sc) != 0) {
6090		device_printf(dev, "could not initialize hardware\n");
6091		goto fail;
6092	}
6093
6094	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
6095		int ret;
6096		uint32_t hw_step;
6097
6098		/*
6099		 * In order to recognize C step the driver should read the
6100		 * chip version id located at the AUX bus MISC address.
6101		 */
6102		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
6103			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
6104		DELAY(2);
6105
6106		ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
6107				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6108				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6109				   25000);
6110		if (!ret) {
6111			device_printf(sc->sc_dev,
6112			    "Failed to wake up the nic\n");
6113			goto fail;
6114		}
6115
6116		if (iwm_nic_lock(sc)) {
6117			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
6118			hw_step |= IWM_ENABLE_WFPM;
6119			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
6120			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
6121			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
6122			if (hw_step == 0x3)
6123				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
6124						(IWM_SILICON_C_STEP << 2);
6125			iwm_nic_unlock(sc);
6126		} else {
6127			device_printf(sc->sc_dev, "Failed to lock the nic\n");
6128			goto fail;
6129		}
6130	}
6131
6132	/* special-case 7265D, it has the same PCI IDs. */
6133	if (sc->cfg == &iwm7265_cfg &&
6134	    (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
6135		sc->cfg = &iwm7265d_cfg;
6136	}
6137
6138	/* Allocate DMA memory for firmware transfers. */
6139	if ((error = iwm_alloc_fwmem(sc)) != 0) {
6140		device_printf(dev, "could not allocate memory for firmware\n");
6141		goto fail;
6142	}
6143
6144	/* Allocate "Keep Warm" page. */
6145	if ((error = iwm_alloc_kw(sc)) != 0) {
6146		device_printf(dev, "could not allocate keep warm page\n");
6147		goto fail;
6148	}
6149
6150	/* We use ICT interrupts */
6151	if ((error = iwm_alloc_ict(sc)) != 0) {
6152		device_printf(dev, "could not allocate ICT table\n");
6153		goto fail;
6154	}
6155
6156	/* Allocate TX scheduler "rings". */
6157	if ((error = iwm_alloc_sched(sc)) != 0) {
6158		device_printf(dev, "could not allocate TX scheduler rings\n");
6159		goto fail;
6160	}
6161
6162	/* Allocate TX rings */
6163	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
6164		if ((error = iwm_alloc_tx_ring(sc,
6165		    &sc->txq[txq_i], txq_i)) != 0) {
6166			device_printf(dev,
6167			    "could not allocate TX ring %d\n",
6168			    txq_i);
6169			goto fail;
6170		}
6171	}
6172
6173	/* Allocate RX ring. */
6174	if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6175		device_printf(dev, "could not allocate RX ring\n");
6176		goto fail;
6177	}
6178
6179	/* Clear pending interrupts. */
6180	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6181
6182	ic->ic_softc = sc;
6183	ic->ic_name = device_get_nameunit(sc->sc_dev);
6184	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
6185	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
6186
6187	/* Set device capabilities. */
6188	ic->ic_caps =
6189	    IEEE80211_C_STA |
6190	    IEEE80211_C_WPA |		/* WPA/RSN */
6191	    IEEE80211_C_WME |
6192	    IEEE80211_C_PMGT |
6193	    IEEE80211_C_SHSLOT |	/* short slot time supported */
6194	    IEEE80211_C_SHPREAMBLE	/* short preamble supported */
6195//	    IEEE80211_C_BGSCAN		/* capable of bg scanning */
6196	    ;
6197	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6198		sc->sc_phyctxt[i].id = i;
6199		sc->sc_phyctxt[i].color = 0;
6200		sc->sc_phyctxt[i].ref = 0;
6201		sc->sc_phyctxt[i].channel = NULL;
6202	}
6203
6204	/* Default noise floor */
6205	sc->sc_noise = -96;
6206
6207	/* Max RSSI */
6208	sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6209
6210	sc->sc_preinit_hook.ich_func = iwm_preinit;
6211	sc->sc_preinit_hook.ich_arg = sc;
6212	if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6213		device_printf(dev, "config_intrhook_establish failed\n");
6214		goto fail;
6215	}
6216
6217#ifdef IWM_DEBUG
6218	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6219	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6220	    CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6221#endif
6222
6223	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6224	    "<-%s\n", __func__);
6225
6226	return 0;
6227
6228	/* Free allocated memory if something failed during attachment. */
6229fail:
6230	iwm_detach_local(sc, 0);
6231
6232	return ENXIO;
6233}
6234
6235static int
6236iwm_is_valid_ether_addr(uint8_t *addr)
6237{
6238	char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6239
6240	if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6241		return (FALSE);
6242
6243	return (TRUE);
6244}
6245
6246static int
6247iwm_update_edca(struct ieee80211com *ic)
6248{
6249	struct iwm_softc *sc = ic->ic_softc;
6250
6251	device_printf(sc->sc_dev, "%s: called\n", __func__);
6252	return (0);
6253}
6254
6255static void
6256iwm_preinit(void *arg)
6257{
6258	struct iwm_softc *sc = arg;
6259	device_t dev = sc->sc_dev;
6260	struct ieee80211com *ic = &sc->sc_ic;
6261	int error;
6262
6263	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6264	    "->%s\n", __func__);
6265
6266	IWM_LOCK(sc);
6267	if ((error = iwm_start_hw(sc)) != 0) {
6268		device_printf(dev, "could not initialize hardware\n");
6269		IWM_UNLOCK(sc);
6270		goto fail;
6271	}
6272
6273	error = iwm_run_init_mvm_ucode(sc, 1);
6274	iwm_stop_device(sc);
6275	if (error) {
6276		IWM_UNLOCK(sc);
6277		goto fail;
6278	}
6279	device_printf(dev,
6280	    "hw rev 0x%x, fw ver %s, address %s\n",
6281	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6282	    sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6283
6284	/* not all hardware can do 5GHz band */
6285	if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6286		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6287		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6288	IWM_UNLOCK(sc);
6289
6290	iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6291	    ic->ic_channels);
6292
6293	/*
6294	 * At this point we've committed - if we fail to do setup,
6295	 * we now also have to tear down the net80211 state.
6296	 */
6297	ieee80211_ifattach(ic);
6298	ic->ic_vap_create = iwm_vap_create;
6299	ic->ic_vap_delete = iwm_vap_delete;
6300	ic->ic_raw_xmit = iwm_raw_xmit;
6301	ic->ic_node_alloc = iwm_node_alloc;
6302	ic->ic_scan_start = iwm_scan_start;
6303	ic->ic_scan_end = iwm_scan_end;
6304	ic->ic_update_mcast = iwm_update_mcast;
6305	ic->ic_getradiocaps = iwm_init_channel_map;
6306	ic->ic_set_channel = iwm_set_channel;
6307	ic->ic_scan_curchan = iwm_scan_curchan;
6308	ic->ic_scan_mindwell = iwm_scan_mindwell;
6309	ic->ic_wme.wme_update = iwm_update_edca;
6310	ic->ic_parent = iwm_parent;
6311	ic->ic_transmit = iwm_transmit;
6312	iwm_radiotap_attach(sc);
6313	if (bootverbose)
6314		ieee80211_announce(ic);
6315
6316	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6317	    "<-%s\n", __func__);
6318	config_intrhook_disestablish(&sc->sc_preinit_hook);
6319
6320	return;
6321fail:
6322	config_intrhook_disestablish(&sc->sc_preinit_hook);
6323	iwm_detach_local(sc, 0);
6324}
6325
6326/*
6327 * Attach the interface to 802.11 radiotap.
6328 */
6329static void
6330iwm_radiotap_attach(struct iwm_softc *sc)
6331{
6332        struct ieee80211com *ic = &sc->sc_ic;
6333
6334	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6335	    "->%s begin\n", __func__);
6336        ieee80211_radiotap_attach(ic,
6337            &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6338                IWM_TX_RADIOTAP_PRESENT,
6339            &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6340                IWM_RX_RADIOTAP_PRESENT);
6341	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6342	    "->%s end\n", __func__);
6343}
6344
6345static struct ieee80211vap *
6346iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6347    enum ieee80211_opmode opmode, int flags,
6348    const uint8_t bssid[IEEE80211_ADDR_LEN],
6349    const uint8_t mac[IEEE80211_ADDR_LEN])
6350{
6351	struct iwm_vap *ivp;
6352	struct ieee80211vap *vap;
6353
6354	if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6355		return NULL;
6356	ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6357	vap = &ivp->iv_vap;
6358	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6359	vap->iv_bmissthreshold = 10;            /* override default */
6360	/* Override with driver methods. */
6361	ivp->iv_newstate = vap->iv_newstate;
6362	vap->iv_newstate = iwm_newstate;
6363
6364	ivp->id = IWM_DEFAULT_MACID;
6365	ivp->color = IWM_DEFAULT_COLOR;
6366
6367	ieee80211_ratectl_init(vap);
6368	/* Complete setup. */
6369	ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6370	    mac);
6371	ic->ic_opmode = opmode;
6372
6373	return vap;
6374}
6375
6376static void
6377iwm_vap_delete(struct ieee80211vap *vap)
6378{
6379	struct iwm_vap *ivp = IWM_VAP(vap);
6380
6381	ieee80211_ratectl_deinit(vap);
6382	ieee80211_vap_detach(vap);
6383	free(ivp, M_80211_VAP);
6384}
6385
6386static void
6387iwm_scan_start(struct ieee80211com *ic)
6388{
6389	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6390	struct iwm_softc *sc = ic->ic_softc;
6391	int error;
6392
6393	IWM_LOCK(sc);
6394	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6395		/* This should not be possible */
6396		device_printf(sc->sc_dev,
6397		    "%s: Previous scan not completed yet\n", __func__);
6398	}
6399	if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6400		error = iwm_mvm_umac_scan(sc);
6401	else
6402		error = iwm_mvm_lmac_scan(sc);
6403	if (error != 0) {
6404		device_printf(sc->sc_dev, "could not initiate scan\n");
6405		IWM_UNLOCK(sc);
6406		ieee80211_cancel_scan(vap);
6407	} else {
6408		sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6409		iwm_led_blink_start(sc);
6410		IWM_UNLOCK(sc);
6411	}
6412}
6413
6414static void
6415iwm_scan_end(struct ieee80211com *ic)
6416{
6417	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6418	struct iwm_softc *sc = ic->ic_softc;
6419
6420	IWM_LOCK(sc);
6421	iwm_led_blink_stop(sc);
6422	if (vap->iv_state == IEEE80211_S_RUN)
6423		iwm_mvm_led_enable(sc);
6424	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6425		/*
6426		 * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6427		 * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6428		 * taskqueue.
6429		 */
6430		sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6431		iwm_mvm_scan_stop_wait(sc);
6432	}
6433	IWM_UNLOCK(sc);
6434
6435	/*
6436	 * Make sure we don't race, if sc_es_task is still enqueued here.
6437	 * This is to make sure that it won't call ieee80211_scan_done
6438	 * when we have already started the next scan.
6439	 */
6440	taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6441}
6442
6443static void
6444iwm_update_mcast(struct ieee80211com *ic)
6445{
6446}
6447
6448static void
6449iwm_set_channel(struct ieee80211com *ic)
6450{
6451}
6452
6453static void
6454iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6455{
6456}
6457
6458static void
6459iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6460{
6461	return;
6462}
6463
6464void
6465iwm_init_task(void *arg1)
6466{
6467	struct iwm_softc *sc = arg1;
6468
6469	IWM_LOCK(sc);
6470	while (sc->sc_flags & IWM_FLAG_BUSY)
6471		msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6472	sc->sc_flags |= IWM_FLAG_BUSY;
6473	iwm_stop(sc);
6474	if (sc->sc_ic.ic_nrunning > 0)
6475		iwm_init(sc);
6476	sc->sc_flags &= ~IWM_FLAG_BUSY;
6477	wakeup(&sc->sc_flags);
6478	IWM_UNLOCK(sc);
6479}
6480
6481static int
6482iwm_resume(device_t dev)
6483{
6484	struct iwm_softc *sc = device_get_softc(dev);
6485	int do_reinit = 0;
6486
6487	/*
6488	 * We disable the RETRY_TIMEOUT register (0x41) to keep
6489	 * PCI Tx retries from interfering with C3 CPU state.
6490	 */
6491	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6492	iwm_init_task(device_get_softc(dev));
6493
6494	IWM_LOCK(sc);
6495	if (sc->sc_flags & IWM_FLAG_SCANNING) {
6496		sc->sc_flags &= ~IWM_FLAG_SCANNING;
6497		do_reinit = 1;
6498	}
6499	IWM_UNLOCK(sc);
6500
6501	if (do_reinit)
6502		ieee80211_resume_all(&sc->sc_ic);
6503
6504	return 0;
6505}
6506
6507static int
6508iwm_suspend(device_t dev)
6509{
6510	int do_stop = 0;
6511	struct iwm_softc *sc = device_get_softc(dev);
6512
6513	do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6514
6515	ieee80211_suspend_all(&sc->sc_ic);
6516
6517	if (do_stop) {
6518		IWM_LOCK(sc);
6519		iwm_stop(sc);
6520		sc->sc_flags |= IWM_FLAG_SCANNING;
6521		IWM_UNLOCK(sc);
6522	}
6523
6524	return (0);
6525}
6526
6527static int
6528iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6529{
6530	struct iwm_fw_info *fw = &sc->sc_fw;
6531	device_t dev = sc->sc_dev;
6532	int i;
6533
6534	if (!sc->sc_attached)
6535		return 0;
6536	sc->sc_attached = 0;
6537
6538	if (do_net80211)
6539		ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6540
6541	callout_drain(&sc->sc_led_blink_to);
6542	callout_drain(&sc->sc_watchdog_to);
6543	iwm_stop_device(sc);
6544	if (do_net80211) {
6545		ieee80211_ifdetach(&sc->sc_ic);
6546	}
6547
6548	iwm_phy_db_free(sc->sc_phy_db);
6549	sc->sc_phy_db = NULL;
6550
6551	iwm_free_nvm_data(sc->nvm_data);
6552
6553	/* Free descriptor rings */
6554	iwm_free_rx_ring(sc, &sc->rxq);
6555	for (i = 0; i < nitems(sc->txq); i++)
6556		iwm_free_tx_ring(sc, &sc->txq[i]);
6557
6558	/* Free firmware */
6559	if (fw->fw_fp != NULL)
6560		iwm_fw_info_free(fw);
6561
6562	/* Free scheduler */
6563	iwm_dma_contig_free(&sc->sched_dma);
6564	iwm_dma_contig_free(&sc->ict_dma);
6565	iwm_dma_contig_free(&sc->kw_dma);
6566	iwm_dma_contig_free(&sc->fw_dma);
6567
6568	iwm_free_fw_paging(sc);
6569
6570	/* Finished with the hardware - detach things */
6571	iwm_pci_detach(dev);
6572
6573	if (sc->sc_notif_wait != NULL) {
6574		iwm_notification_wait_free(sc->sc_notif_wait);
6575		sc->sc_notif_wait = NULL;
6576	}
6577
6578	mbufq_drain(&sc->sc_snd);
6579	IWM_LOCK_DESTROY(sc);
6580
6581	return (0);
6582}
6583
6584static int
6585iwm_detach(device_t dev)
6586{
6587	struct iwm_softc *sc = device_get_softc(dev);
6588
6589	return (iwm_detach_local(sc, 1));
6590}
6591
6592static device_method_t iwm_pci_methods[] = {
6593        /* Device interface */
6594        DEVMETHOD(device_probe,         iwm_probe),
6595        DEVMETHOD(device_attach,        iwm_attach),
6596        DEVMETHOD(device_detach,        iwm_detach),
6597        DEVMETHOD(device_suspend,       iwm_suspend),
6598        DEVMETHOD(device_resume,        iwm_resume),
6599
6600        DEVMETHOD_END
6601};
6602
6603static driver_t iwm_pci_driver = {
6604        "iwm",
6605        iwm_pci_methods,
6606        sizeof (struct iwm_softc)
6607};
6608
6609static devclass_t iwm_devclass;
6610
6611DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6612MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6613MODULE_DEPEND(iwm, pci, 1, 1, 1);
6614MODULE_DEPEND(iwm, wlan, 1, 1, 1);
6615