if_iwm.c revision 330223
1/*	$OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $	*/
2
3/*
4 * Copyright (c) 2014 genua mbh <info@genua.de>
5 * Copyright (c) 2014 Fixup Software Ltd.
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20/*-
21 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22 * which were used as the reference documentation for this implementation.
23 *
24 * Driver version we are currently based off of is
25 * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26 *
27 ***********************************************************************
28 *
29 * This file is provided under a dual BSD/GPLv2 license.  When using or
30 * redistributing this file, you may do so under either license.
31 *
32 * GPL LICENSE SUMMARY
33 *
34 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35 *
36 * This program is free software; you can redistribute it and/or modify
37 * it under the terms of version 2 of the GNU General Public License as
38 * published by the Free Software Foundation.
39 *
40 * This program is distributed in the hope that it will be useful, but
41 * WITHOUT ANY WARRANTY; without even the implied warranty of
42 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43 * General Public License for more details.
44 *
45 * You should have received a copy of the GNU General Public License
46 * along with this program; if not, write to the Free Software
47 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48 * USA
49 *
50 * The full GNU General Public License is included in this distribution
51 * in the file called COPYING.
52 *
53 * Contact Information:
54 *  Intel Linux Wireless <ilw@linux.intel.com>
55 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56 *
57 *
58 * BSD LICENSE
59 *
60 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61 * All rights reserved.
62 *
63 * Redistribution and use in source and binary forms, with or without
64 * modification, are permitted provided that the following conditions
65 * are met:
66 *
67 *  * Redistributions of source code must retain the above copyright
68 *    notice, this list of conditions and the following disclaimer.
69 *  * Redistributions in binary form must reproduce the above copyright
70 *    notice, this list of conditions and the following disclaimer in
71 *    the documentation and/or other materials provided with the
72 *    distribution.
73 *  * Neither the name Intel Corporation nor the names of its
74 *    contributors may be used to endorse or promote products derived
75 *    from this software without specific prior written permission.
76 *
77 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88 */
89
90/*-
91 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92 *
93 * Permission to use, copy, modify, and distribute this software for any
94 * purpose with or without fee is hereby granted, provided that the above
95 * copyright notice and this permission notice appear in all copies.
96 *
97 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104 */
105#include <sys/cdefs.h>
106__FBSDID("$FreeBSD: stable/11/sys/dev/iwm/if_iwm.c 330223 2018-03-01 06:55:46Z eadler $");
107
108#include "opt_wlan.h"
109
110#include <sys/param.h>
111#include <sys/bus.h>
112#include <sys/conf.h>
113#include <sys/endian.h>
114#include <sys/firmware.h>
115#include <sys/kernel.h>
116#include <sys/malloc.h>
117#include <sys/mbuf.h>
118#include <sys/mutex.h>
119#include <sys/module.h>
120#include <sys/proc.h>
121#include <sys/rman.h>
122#include <sys/socket.h>
123#include <sys/sockio.h>
124#include <sys/sysctl.h>
125#include <sys/linker.h>
126
127#include <machine/bus.h>
128#include <machine/endian.h>
129#include <machine/resource.h>
130
131#include <dev/pci/pcivar.h>
132#include <dev/pci/pcireg.h>
133
134#include <net/bpf.h>
135
136#include <net/if.h>
137#include <net/if_var.h>
138#include <net/if_arp.h>
139#include <net/if_dl.h>
140#include <net/if_media.h>
141#include <net/if_types.h>
142
143#include <netinet/in.h>
144#include <netinet/in_systm.h>
145#include <netinet/if_ether.h>
146#include <netinet/ip.h>
147
148#include <net80211/ieee80211_var.h>
149#include <net80211/ieee80211_regdomain.h>
150#include <net80211/ieee80211_ratectl.h>
151#include <net80211/ieee80211_radiotap.h>
152
153#include <dev/iwm/if_iwmreg.h>
154#include <dev/iwm/if_iwmvar.h>
155#include <dev/iwm/if_iwm_config.h>
156#include <dev/iwm/if_iwm_debug.h>
157#include <dev/iwm/if_iwm_notif_wait.h>
158#include <dev/iwm/if_iwm_util.h>
159#include <dev/iwm/if_iwm_binding.h>
160#include <dev/iwm/if_iwm_phy_db.h>
161#include <dev/iwm/if_iwm_mac_ctxt.h>
162#include <dev/iwm/if_iwm_phy_ctxt.h>
163#include <dev/iwm/if_iwm_time_event.h>
164#include <dev/iwm/if_iwm_power.h>
165#include <dev/iwm/if_iwm_scan.h>
166#include <dev/iwm/if_iwm_sta.h>
167
168#include <dev/iwm/if_iwm_pcie_trans.h>
169#include <dev/iwm/if_iwm_led.h>
170#include <dev/iwm/if_iwm_fw.h>
171
172/* From DragonflyBSD */
173#define mtodoff(m, t, off)      ((t)((m)->m_data + (off)))
174
175const uint8_t iwm_nvm_channels[] = {
176	/* 2.4 GHz */
177	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
178	/* 5 GHz */
179	36, 40, 44, 48, 52, 56, 60, 64,
180	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
181	149, 153, 157, 161, 165
182};
183_Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
184    "IWM_NUM_CHANNELS is too small");
185
186const uint8_t iwm_nvm_channels_8000[] = {
187	/* 2.4 GHz */
188	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
189	/* 5 GHz */
190	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
191	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
192	149, 153, 157, 161, 165, 169, 173, 177, 181
193};
194_Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
195    "IWM_NUM_CHANNELS_8000 is too small");
196
197#define IWM_NUM_2GHZ_CHANNELS	14
198#define IWM_N_HW_ADDR_MASK	0xF
199
200/*
201 * XXX For now, there's simply a fixed set of rate table entries
202 * that are populated.
203 */
204const struct iwm_rate {
205	uint8_t rate;
206	uint8_t plcp;
207} iwm_rates[] = {
208	{   2,	IWM_RATE_1M_PLCP  },
209	{   4,	IWM_RATE_2M_PLCP  },
210	{  11,	IWM_RATE_5M_PLCP  },
211	{  22,	IWM_RATE_11M_PLCP },
212	{  12,	IWM_RATE_6M_PLCP  },
213	{  18,	IWM_RATE_9M_PLCP  },
214	{  24,	IWM_RATE_12M_PLCP },
215	{  36,	IWM_RATE_18M_PLCP },
216	{  48,	IWM_RATE_24M_PLCP },
217	{  72,	IWM_RATE_36M_PLCP },
218	{  96,	IWM_RATE_48M_PLCP },
219	{ 108,	IWM_RATE_54M_PLCP },
220};
221#define IWM_RIDX_CCK	0
222#define IWM_RIDX_OFDM	4
223#define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
224#define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
225#define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
226
227struct iwm_nvm_section {
228	uint16_t length;
229	uint8_t *data;
230};
231
232#define IWM_MVM_UCODE_ALIVE_TIMEOUT	hz
233#define IWM_MVM_UCODE_CALIB_TIMEOUT	(2*hz)
234
235struct iwm_mvm_alive_data {
236	int valid;
237	uint32_t scd_base_addr;
238};
239
240static int	iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
241static int	iwm_firmware_store_section(struct iwm_softc *,
242                                           enum iwm_ucode_type,
243                                           const uint8_t *, size_t);
244static int	iwm_set_default_calib(struct iwm_softc *, const void *);
245static void	iwm_fw_info_free(struct iwm_fw_info *);
246static int	iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
247static int	iwm_alloc_fwmem(struct iwm_softc *);
248static int	iwm_alloc_sched(struct iwm_softc *);
249static int	iwm_alloc_kw(struct iwm_softc *);
250static int	iwm_alloc_ict(struct iwm_softc *);
251static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
252static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
253static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
254static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
255                                  int);
256static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
257static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
258static void	iwm_enable_interrupts(struct iwm_softc *);
259static void	iwm_restore_interrupts(struct iwm_softc *);
260static void	iwm_disable_interrupts(struct iwm_softc *);
261static void	iwm_ict_reset(struct iwm_softc *);
262static int	iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
263static void	iwm_stop_device(struct iwm_softc *);
264static void	iwm_mvm_nic_config(struct iwm_softc *);
265static int	iwm_nic_rx_init(struct iwm_softc *);
266static int	iwm_nic_tx_init(struct iwm_softc *);
267static int	iwm_nic_init(struct iwm_softc *);
268static int	iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
269static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
270                                   uint16_t, uint8_t *, uint16_t *);
271static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
272				     uint16_t *, uint32_t);
273static uint32_t	iwm_eeprom_channel_flags(uint16_t);
274static void	iwm_add_channel_band(struct iwm_softc *,
275		    struct ieee80211_channel[], int, int *, int, size_t,
276		    const uint8_t[]);
277static void	iwm_init_channel_map(struct ieee80211com *, int, int *,
278		    struct ieee80211_channel[]);
279static struct iwm_nvm_data *
280	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
281			   const uint16_t *, const uint16_t *,
282			   const uint16_t *, const uint16_t *,
283			   const uint16_t *);
284static void	iwm_free_nvm_data(struct iwm_nvm_data *);
285static void	iwm_set_hw_address_family_8000(struct iwm_softc *,
286					       struct iwm_nvm_data *,
287					       const uint16_t *,
288					       const uint16_t *);
289static int	iwm_get_sku(const struct iwm_softc *, const uint16_t *,
290			    const uint16_t *);
291static int	iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
292static int	iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
293				  const uint16_t *);
294static int	iwm_get_n_hw_addrs(const struct iwm_softc *,
295				   const uint16_t *);
296static void	iwm_set_radio_cfg(const struct iwm_softc *,
297				  struct iwm_nvm_data *, uint32_t);
298static struct iwm_nvm_data *
299	iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
300static int	iwm_nvm_init(struct iwm_softc *);
301static int	iwm_pcie_load_section(struct iwm_softc *, uint8_t,
302				      const struct iwm_fw_desc *);
303static int	iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
304					     bus_addr_t, uint32_t);
305static int	iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
306						const struct iwm_fw_sects *,
307						int, int *);
308static int	iwm_pcie_load_cpu_sections(struct iwm_softc *,
309					   const struct iwm_fw_sects *,
310					   int, int *);
311static int	iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
312					       const struct iwm_fw_sects *);
313static int	iwm_pcie_load_given_ucode(struct iwm_softc *,
314					  const struct iwm_fw_sects *);
315static int	iwm_start_fw(struct iwm_softc *, const struct iwm_fw_sects *);
316static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
317static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
318static int	iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
319                                              enum iwm_ucode_type);
320static int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
321static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
322static int	iwm_mvm_get_signal_strength(struct iwm_softc *,
323					    struct iwm_rx_phy_info *);
324static void	iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
325                                      struct iwm_rx_packet *);
326static int	iwm_get_noise(struct iwm_softc *sc,
327		    const struct iwm_mvm_statistics_rx_non_phy *);
328static boolean_t iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct mbuf *,
329				    uint32_t, boolean_t);
330static int	iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
331                                         struct iwm_rx_packet *,
332				         struct iwm_node *);
333static void	iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *);
334static void	iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
335#if 0
336static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
337                                 uint16_t);
338#endif
339static const struct iwm_rate *
340	iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
341			struct mbuf *, struct iwm_tx_cmd *);
342static int	iwm_tx(struct iwm_softc *, struct mbuf *,
343                       struct ieee80211_node *, int);
344static int	iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
345			     const struct ieee80211_bpf_params *);
346static int	iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_vap *);
347static int	iwm_auth(struct ieee80211vap *, struct iwm_softc *);
348static int	iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
349static int	iwm_release(struct iwm_softc *, struct iwm_node *);
350static struct ieee80211_node *
351		iwm_node_alloc(struct ieee80211vap *,
352		               const uint8_t[IEEE80211_ADDR_LEN]);
353static void	iwm_setrates(struct iwm_softc *, struct iwm_node *);
354static int	iwm_media_change(struct ifnet *);
355static int	iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
356static void	iwm_endscan_cb(void *, int);
357static void	iwm_mvm_fill_sf_command(struct iwm_softc *,
358					struct iwm_sf_cfg_cmd *,
359					struct ieee80211_node *);
360static int	iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state);
361static int	iwm_send_bt_init_conf(struct iwm_softc *);
362static boolean_t iwm_mvm_is_lar_supported(struct iwm_softc *);
363static boolean_t iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *);
364static int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
365static void	iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
366static int	iwm_init_hw(struct iwm_softc *);
367static void	iwm_init(struct iwm_softc *);
368static void	iwm_start(struct iwm_softc *);
369static void	iwm_stop(struct iwm_softc *);
370static void	iwm_watchdog(void *);
371static void	iwm_parent(struct ieee80211com *);
372#ifdef IWM_DEBUG
373static const char *
374		iwm_desc_lookup(uint32_t);
375static void	iwm_nic_error(struct iwm_softc *);
376static void	iwm_nic_umac_error(struct iwm_softc *);
377#endif
378static void	iwm_handle_rxb(struct iwm_softc *, struct mbuf *);
379static void	iwm_notif_intr(struct iwm_softc *);
380static void	iwm_intr(void *);
381static int	iwm_attach(device_t);
382static int	iwm_is_valid_ether_addr(uint8_t *);
383static void	iwm_preinit(void *);
384static int	iwm_detach_local(struct iwm_softc *sc, int);
385static void	iwm_init_task(void *);
386static void	iwm_radiotap_attach(struct iwm_softc *);
387static struct ieee80211vap *
388		iwm_vap_create(struct ieee80211com *,
389		               const char [IFNAMSIZ], int,
390		               enum ieee80211_opmode, int,
391		               const uint8_t [IEEE80211_ADDR_LEN],
392		               const uint8_t [IEEE80211_ADDR_LEN]);
393static void	iwm_vap_delete(struct ieee80211vap *);
394static void	iwm_xmit_queue_drain(struct iwm_softc *);
395static void	iwm_scan_start(struct ieee80211com *);
396static void	iwm_scan_end(struct ieee80211com *);
397static void	iwm_update_mcast(struct ieee80211com *);
398static void	iwm_set_channel(struct ieee80211com *);
399static void	iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
400static void	iwm_scan_mindwell(struct ieee80211_scan_state *);
401static int	iwm_detach(device_t);
402
403static int	iwm_lar_disable = 0;
404TUNABLE_INT("hw.iwm.lar.disable", &iwm_lar_disable);
405
406/*
407 * Firmware parser.
408 */
409
410static int
411iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
412{
413	const struct iwm_fw_cscheme_list *l = (const void *)data;
414
415	if (dlen < sizeof(*l) ||
416	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
417		return EINVAL;
418
419	/* we don't actually store anything for now, always use s/w crypto */
420
421	return 0;
422}
423
424static int
425iwm_firmware_store_section(struct iwm_softc *sc,
426    enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
427{
428	struct iwm_fw_sects *fws;
429	struct iwm_fw_desc *fwone;
430
431	if (type >= IWM_UCODE_TYPE_MAX)
432		return EINVAL;
433	if (dlen < sizeof(uint32_t))
434		return EINVAL;
435
436	fws = &sc->sc_fw.fw_sects[type];
437	if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
438		return EINVAL;
439
440	fwone = &fws->fw_sect[fws->fw_count];
441
442	/* first 32bit are device load offset */
443	memcpy(&fwone->offset, data, sizeof(uint32_t));
444
445	/* rest is data */
446	fwone->data = data + sizeof(uint32_t);
447	fwone->len = dlen - sizeof(uint32_t);
448
449	fws->fw_count++;
450
451	return 0;
452}
453
454#define IWM_DEFAULT_SCAN_CHANNELS 40
455
456/* iwlwifi: iwl-drv.c */
457struct iwm_tlv_calib_data {
458	uint32_t ucode_type;
459	struct iwm_tlv_calib_ctrl calib;
460} __packed;
461
462static int
463iwm_set_default_calib(struct iwm_softc *sc, const void *data)
464{
465	const struct iwm_tlv_calib_data *def_calib = data;
466	uint32_t ucode_type = le32toh(def_calib->ucode_type);
467
468	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
469		device_printf(sc->sc_dev,
470		    "Wrong ucode_type %u for default "
471		    "calibration.\n", ucode_type);
472		return EINVAL;
473	}
474
475	sc->sc_default_calib[ucode_type].flow_trigger =
476	    def_calib->calib.flow_trigger;
477	sc->sc_default_calib[ucode_type].event_trigger =
478	    def_calib->calib.event_trigger;
479
480	return 0;
481}
482
483static int
484iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data,
485			struct iwm_ucode_capabilities *capa)
486{
487	const struct iwm_ucode_api *ucode_api = (const void *)data;
488	uint32_t api_index = le32toh(ucode_api->api_index);
489	uint32_t api_flags = le32toh(ucode_api->api_flags);
490	int i;
491
492	if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
493		device_printf(sc->sc_dev,
494		    "api flags index %d larger than supported by driver\n",
495		    api_index);
496		/* don't return an error so we can load FW that has more bits */
497		return 0;
498	}
499
500	for (i = 0; i < 32; i++) {
501		if (api_flags & (1U << i))
502			setbit(capa->enabled_api, i + 32 * api_index);
503	}
504
505	return 0;
506}
507
508static int
509iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data,
510			   struct iwm_ucode_capabilities *capa)
511{
512	const struct iwm_ucode_capa *ucode_capa = (const void *)data;
513	uint32_t api_index = le32toh(ucode_capa->api_index);
514	uint32_t api_flags = le32toh(ucode_capa->api_capa);
515	int i;
516
517	if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
518		device_printf(sc->sc_dev,
519		    "capa flags index %d larger than supported by driver\n",
520		    api_index);
521		/* don't return an error so we can load FW that has more bits */
522		return 0;
523	}
524
525	for (i = 0; i < 32; i++) {
526		if (api_flags & (1U << i))
527			setbit(capa->enabled_capa, i + 32 * api_index);
528	}
529
530	return 0;
531}
532
533static void
534iwm_fw_info_free(struct iwm_fw_info *fw)
535{
536	firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
537	fw->fw_fp = NULL;
538	/* don't touch fw->fw_status */
539	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
540}
541
542static int
543iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
544{
545	struct iwm_fw_info *fw = &sc->sc_fw;
546	const struct iwm_tlv_ucode_header *uhdr;
547	const struct iwm_ucode_tlv *tlv;
548	struct iwm_ucode_capabilities *capa = &sc->ucode_capa;
549	enum iwm_ucode_tlv_type tlv_type;
550	const struct firmware *fwp;
551	const uint8_t *data;
552	uint32_t tlv_len;
553	uint32_t usniffer_img;
554	const uint8_t *tlv_data;
555	uint32_t paging_mem_size;
556	int num_of_cpus;
557	int error = 0;
558	size_t len;
559
560	if (fw->fw_status == IWM_FW_STATUS_DONE &&
561	    ucode_type != IWM_UCODE_INIT)
562		return 0;
563
564	while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
565		msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
566	fw->fw_status = IWM_FW_STATUS_INPROGRESS;
567
568	if (fw->fw_fp != NULL)
569		iwm_fw_info_free(fw);
570
571	/*
572	 * Load firmware into driver memory.
573	 * fw_fp will be set.
574	 */
575	IWM_UNLOCK(sc);
576	fwp = firmware_get(sc->cfg->fw_name);
577	IWM_LOCK(sc);
578	if (fwp == NULL) {
579		device_printf(sc->sc_dev,
580		    "could not read firmware %s (error %d)\n",
581		    sc->cfg->fw_name, error);
582		goto out;
583	}
584	fw->fw_fp = fwp;
585
586	/* (Re-)Initialize default values. */
587	capa->flags = 0;
588	capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH;
589	capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
590	memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa));
591	memset(capa->enabled_api, 0, sizeof(capa->enabled_api));
592	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
593
594	/*
595	 * Parse firmware contents
596	 */
597
598	uhdr = (const void *)fw->fw_fp->data;
599	if (*(const uint32_t *)fw->fw_fp->data != 0
600	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
601		device_printf(sc->sc_dev, "invalid firmware %s\n",
602		    sc->cfg->fw_name);
603		error = EINVAL;
604		goto out;
605	}
606
607	snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%u.%u (API ver %u)",
608	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
609	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
610	    IWM_UCODE_API(le32toh(uhdr->ver)));
611	data = uhdr->data;
612	len = fw->fw_fp->datasize - sizeof(*uhdr);
613
614	while (len >= sizeof(*tlv)) {
615		len -= sizeof(*tlv);
616		tlv = (const void *)data;
617
618		tlv_len = le32toh(tlv->length);
619		tlv_type = le32toh(tlv->type);
620		tlv_data = tlv->data;
621
622		if (len < tlv_len) {
623			device_printf(sc->sc_dev,
624			    "firmware too short: %zu bytes\n",
625			    len);
626			error = EINVAL;
627			goto parse_out;
628		}
629		len -= roundup2(tlv_len, 4);
630		data += sizeof(tlv) + roundup2(tlv_len, 4);
631
632		switch ((int)tlv_type) {
633		case IWM_UCODE_TLV_PROBE_MAX_LEN:
634			if (tlv_len != sizeof(uint32_t)) {
635				device_printf(sc->sc_dev,
636				    "%s: PROBE_MAX_LEN (%d) != sizeof(uint32_t)\n",
637				    __func__,
638				    (int) tlv_len);
639				error = EINVAL;
640				goto parse_out;
641			}
642			capa->max_probe_length =
643			    le32_to_cpup((const uint32_t *)tlv_data);
644			/* limit it to something sensible */
645			if (capa->max_probe_length >
646			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
647				IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
648				    "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
649				    "ridiculous\n", __func__);
650				error = EINVAL;
651				goto parse_out;
652			}
653			break;
654		case IWM_UCODE_TLV_PAN:
655			if (tlv_len) {
656				device_printf(sc->sc_dev,
657				    "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
658				    __func__,
659				    (int) tlv_len);
660				error = EINVAL;
661				goto parse_out;
662			}
663			capa->flags |= IWM_UCODE_TLV_FLAGS_PAN;
664			break;
665		case IWM_UCODE_TLV_FLAGS:
666			if (tlv_len < sizeof(uint32_t)) {
667				device_printf(sc->sc_dev,
668				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
669				    __func__,
670				    (int) tlv_len);
671				error = EINVAL;
672				goto parse_out;
673			}
674			if (tlv_len % sizeof(uint32_t)) {
675				device_printf(sc->sc_dev,
676				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) %% sizeof(uint32_t)\n",
677				    __func__,
678				    (int) tlv_len);
679				error = EINVAL;
680				goto parse_out;
681			}
682			/*
683			 * Apparently there can be many flags, but Linux driver
684			 * parses only the first one, and so do we.
685			 *
686			 * XXX: why does this override IWM_UCODE_TLV_PAN?
687			 * Intentional or a bug?  Observations from
688			 * current firmware file:
689			 *  1) TLV_PAN is parsed first
690			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
691			 * ==> this resets TLV_PAN to itself... hnnnk
692			 */
693			capa->flags = le32_to_cpup((const uint32_t *)tlv_data);
694			break;
695		case IWM_UCODE_TLV_CSCHEME:
696			if ((error = iwm_store_cscheme(sc,
697			    tlv_data, tlv_len)) != 0) {
698				device_printf(sc->sc_dev,
699				    "%s: iwm_store_cscheme(): returned %d\n",
700				    __func__,
701				    error);
702				goto parse_out;
703			}
704			break;
705		case IWM_UCODE_TLV_NUM_OF_CPU:
706			if (tlv_len != sizeof(uint32_t)) {
707				device_printf(sc->sc_dev,
708				    "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) != sizeof(uint32_t)\n",
709				    __func__,
710				    (int) tlv_len);
711				error = EINVAL;
712				goto parse_out;
713			}
714			num_of_cpus = le32_to_cpup((const uint32_t *)tlv_data);
715			if (num_of_cpus == 2) {
716				fw->fw_sects[IWM_UCODE_REGULAR].is_dual_cpus =
717					TRUE;
718				fw->fw_sects[IWM_UCODE_INIT].is_dual_cpus =
719					TRUE;
720				fw->fw_sects[IWM_UCODE_WOWLAN].is_dual_cpus =
721					TRUE;
722			} else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
723				device_printf(sc->sc_dev,
724				    "%s: Driver supports only 1 or 2 CPUs\n",
725				    __func__);
726				error = EINVAL;
727				goto parse_out;
728			}
729			break;
730		case IWM_UCODE_TLV_SEC_RT:
731			if ((error = iwm_firmware_store_section(sc,
732			    IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
733				device_printf(sc->sc_dev,
734				    "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
735				    __func__,
736				    error);
737				goto parse_out;
738			}
739			break;
740		case IWM_UCODE_TLV_SEC_INIT:
741			if ((error = iwm_firmware_store_section(sc,
742			    IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
743				device_printf(sc->sc_dev,
744				    "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
745				    __func__,
746				    error);
747				goto parse_out;
748			}
749			break;
750		case IWM_UCODE_TLV_SEC_WOWLAN:
751			if ((error = iwm_firmware_store_section(sc,
752			    IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
753				device_printf(sc->sc_dev,
754				    "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
755				    __func__,
756				    error);
757				goto parse_out;
758			}
759			break;
760		case IWM_UCODE_TLV_DEF_CALIB:
761			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
762				device_printf(sc->sc_dev,
763				    "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
764				    __func__,
765				    (int) tlv_len,
766				    (int) sizeof(struct iwm_tlv_calib_data));
767				error = EINVAL;
768				goto parse_out;
769			}
770			if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
771				device_printf(sc->sc_dev,
772				    "%s: iwm_set_default_calib() failed: %d\n",
773				    __func__,
774				    error);
775				goto parse_out;
776			}
777			break;
778		case IWM_UCODE_TLV_PHY_SKU:
779			if (tlv_len != sizeof(uint32_t)) {
780				error = EINVAL;
781				device_printf(sc->sc_dev,
782				    "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
783				    __func__,
784				    (int) tlv_len);
785				goto parse_out;
786			}
787			sc->sc_fw.phy_config =
788			    le32_to_cpup((const uint32_t *)tlv_data);
789			sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
790						  IWM_FW_PHY_CFG_TX_CHAIN) >>
791						  IWM_FW_PHY_CFG_TX_CHAIN_POS;
792			sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
793						  IWM_FW_PHY_CFG_RX_CHAIN) >>
794						  IWM_FW_PHY_CFG_RX_CHAIN_POS;
795			break;
796
797		case IWM_UCODE_TLV_API_CHANGES_SET: {
798			if (tlv_len != sizeof(struct iwm_ucode_api)) {
799				error = EINVAL;
800				goto parse_out;
801			}
802			if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) {
803				error = EINVAL;
804				goto parse_out;
805			}
806			break;
807		}
808
809		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
810			if (tlv_len != sizeof(struct iwm_ucode_capa)) {
811				error = EINVAL;
812				goto parse_out;
813			}
814			if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) {
815				error = EINVAL;
816				goto parse_out;
817			}
818			break;
819		}
820
821		case 48: /* undocumented TLV */
822		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
823		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
824			/* ignore, not used by current driver */
825			break;
826
827		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
828			if ((error = iwm_firmware_store_section(sc,
829			    IWM_UCODE_REGULAR_USNIFFER, tlv_data,
830			    tlv_len)) != 0)
831				goto parse_out;
832			break;
833
834		case IWM_UCODE_TLV_PAGING:
835			if (tlv_len != sizeof(uint32_t)) {
836				error = EINVAL;
837				goto parse_out;
838			}
839			paging_mem_size = le32_to_cpup((const uint32_t *)tlv_data);
840
841			IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
842			    "%s: Paging: paging enabled (size = %u bytes)\n",
843			    __func__, paging_mem_size);
844			if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
845				device_printf(sc->sc_dev,
846					"%s: Paging: driver supports up to %u bytes for paging image\n",
847					__func__, IWM_MAX_PAGING_IMAGE_SIZE);
848				error = EINVAL;
849				goto out;
850			}
851			if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
852				device_printf(sc->sc_dev,
853				    "%s: Paging: image isn't multiple %u\n",
854				    __func__, IWM_FW_PAGING_SIZE);
855				error = EINVAL;
856				goto out;
857			}
858
859			sc->sc_fw.fw_sects[IWM_UCODE_REGULAR].paging_mem_size =
860			    paging_mem_size;
861			usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
862			sc->sc_fw.fw_sects[usniffer_img].paging_mem_size =
863			    paging_mem_size;
864			break;
865
866		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
867			if (tlv_len != sizeof(uint32_t)) {
868				error = EINVAL;
869				goto parse_out;
870			}
871			capa->n_scan_channels =
872			    le32_to_cpup((const uint32_t *)tlv_data);
873			break;
874
875		case IWM_UCODE_TLV_FW_VERSION:
876			if (tlv_len != sizeof(uint32_t) * 3) {
877				error = EINVAL;
878				goto parse_out;
879			}
880			snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
881			    "%d.%d.%d",
882			    le32toh(((const uint32_t *)tlv_data)[0]),
883			    le32toh(((const uint32_t *)tlv_data)[1]),
884			    le32toh(((const uint32_t *)tlv_data)[2]));
885			break;
886
887		case IWM_UCODE_TLV_FW_MEM_SEG:
888			break;
889
890		default:
891			device_printf(sc->sc_dev,
892			    "%s: unknown firmware section %d, abort\n",
893			    __func__, tlv_type);
894			error = EINVAL;
895			goto parse_out;
896		}
897	}
898
899	KASSERT(error == 0, ("unhandled error"));
900
901 parse_out:
902	if (error) {
903		device_printf(sc->sc_dev, "firmware parse error %d, "
904		    "section type %d\n", error, tlv_type);
905	}
906
907 out:
908	if (error) {
909		fw->fw_status = IWM_FW_STATUS_NONE;
910		if (fw->fw_fp != NULL)
911			iwm_fw_info_free(fw);
912	} else
913		fw->fw_status = IWM_FW_STATUS_DONE;
914	wakeup(&sc->sc_fw);
915
916	return error;
917}
918
919/*
920 * DMA resource routines
921 */
922
923/* fwmem is used to load firmware onto the card */
924static int
925iwm_alloc_fwmem(struct iwm_softc *sc)
926{
927	/* Must be aligned on a 16-byte boundary. */
928	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
929	    IWM_FH_MEM_TB_MAX_LENGTH, 16);
930}
931
932/* tx scheduler rings.  not used? */
933static int
934iwm_alloc_sched(struct iwm_softc *sc)
935{
936	/* TX scheduler rings must be aligned on a 1KB boundary. */
937	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
938	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
939}
940
941/* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
942static int
943iwm_alloc_kw(struct iwm_softc *sc)
944{
945	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
946}
947
948/* interrupt cause table */
949static int
950iwm_alloc_ict(struct iwm_softc *sc)
951{
952	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
953	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
954}
955
956static int
957iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
958{
959	bus_size_t size;
960	int i, error;
961
962	ring->cur = 0;
963
964	/* Allocate RX descriptors (256-byte aligned). */
965	size = IWM_RX_RING_COUNT * sizeof(uint32_t);
966	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
967	if (error != 0) {
968		device_printf(sc->sc_dev,
969		    "could not allocate RX ring DMA memory\n");
970		goto fail;
971	}
972	ring->desc = ring->desc_dma.vaddr;
973
974	/* Allocate RX status area (16-byte aligned). */
975	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
976	    sizeof(*ring->stat), 16);
977	if (error != 0) {
978		device_printf(sc->sc_dev,
979		    "could not allocate RX status DMA memory\n");
980		goto fail;
981	}
982	ring->stat = ring->stat_dma.vaddr;
983
984        /* Create RX buffer DMA tag. */
985        error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
986            BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
987            IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
988        if (error != 0) {
989                device_printf(sc->sc_dev,
990                    "%s: could not create RX buf DMA tag, error %d\n",
991                    __func__, error);
992                goto fail;
993        }
994
995	/* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
996	error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
997	if (error != 0) {
998		device_printf(sc->sc_dev,
999		    "%s: could not create RX buf DMA map, error %d\n",
1000		    __func__, error);
1001		goto fail;
1002	}
1003	/*
1004	 * Allocate and map RX buffers.
1005	 */
1006	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1007		struct iwm_rx_data *data = &ring->data[i];
1008		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1009		if (error != 0) {
1010			device_printf(sc->sc_dev,
1011			    "%s: could not create RX buf DMA map, error %d\n",
1012			    __func__, error);
1013			goto fail;
1014		}
1015		data->m = NULL;
1016
1017		if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1018			goto fail;
1019		}
1020	}
1021	return 0;
1022
1023fail:	iwm_free_rx_ring(sc, ring);
1024	return error;
1025}
1026
1027static void
1028iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1029{
1030	/* Reset the ring state */
1031	ring->cur = 0;
1032
1033	/*
1034	 * The hw rx ring index in shared memory must also be cleared,
1035	 * otherwise the discrepancy can cause reprocessing chaos.
1036	 */
1037	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1038}
1039
1040static void
1041iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1042{
1043	int i;
1044
1045	iwm_dma_contig_free(&ring->desc_dma);
1046	iwm_dma_contig_free(&ring->stat_dma);
1047
1048	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1049		struct iwm_rx_data *data = &ring->data[i];
1050
1051		if (data->m != NULL) {
1052			bus_dmamap_sync(ring->data_dmat, data->map,
1053			    BUS_DMASYNC_POSTREAD);
1054			bus_dmamap_unload(ring->data_dmat, data->map);
1055			m_freem(data->m);
1056			data->m = NULL;
1057		}
1058		if (data->map != NULL) {
1059			bus_dmamap_destroy(ring->data_dmat, data->map);
1060			data->map = NULL;
1061		}
1062	}
1063	if (ring->spare_map != NULL) {
1064		bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1065		ring->spare_map = NULL;
1066	}
1067	if (ring->data_dmat != NULL) {
1068		bus_dma_tag_destroy(ring->data_dmat);
1069		ring->data_dmat = NULL;
1070	}
1071}
1072
1073static int
1074iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1075{
1076	bus_addr_t paddr;
1077	bus_size_t size;
1078	size_t maxsize;
1079	int nsegments;
1080	int i, error;
1081
1082	ring->qid = qid;
1083	ring->queued = 0;
1084	ring->cur = 0;
1085
1086	/* Allocate TX descriptors (256-byte aligned). */
1087	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1088	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1089	if (error != 0) {
1090		device_printf(sc->sc_dev,
1091		    "could not allocate TX ring DMA memory\n");
1092		goto fail;
1093	}
1094	ring->desc = ring->desc_dma.vaddr;
1095
1096	/*
1097	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1098	 * to allocate commands space for other rings.
1099	 */
1100	if (qid > IWM_MVM_CMD_QUEUE)
1101		return 0;
1102
1103	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1104	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1105	if (error != 0) {
1106		device_printf(sc->sc_dev,
1107		    "could not allocate TX cmd DMA memory\n");
1108		goto fail;
1109	}
1110	ring->cmd = ring->cmd_dma.vaddr;
1111
1112	/* FW commands may require more mapped space than packets. */
1113	if (qid == IWM_MVM_CMD_QUEUE) {
1114		maxsize = IWM_RBUF_SIZE;
1115		nsegments = 1;
1116	} else {
1117		maxsize = MCLBYTES;
1118		nsegments = IWM_MAX_SCATTER - 2;
1119	}
1120
1121	error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1122	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1123            nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1124	if (error != 0) {
1125		device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1126		goto fail;
1127	}
1128
1129	paddr = ring->cmd_dma.paddr;
1130	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1131		struct iwm_tx_data *data = &ring->data[i];
1132
1133		data->cmd_paddr = paddr;
1134		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1135		    + offsetof(struct iwm_tx_cmd, scratch);
1136		paddr += sizeof(struct iwm_device_cmd);
1137
1138		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1139		if (error != 0) {
1140			device_printf(sc->sc_dev,
1141			    "could not create TX buf DMA map\n");
1142			goto fail;
1143		}
1144	}
1145	KASSERT(paddr == ring->cmd_dma.paddr + size,
1146	    ("invalid physical address"));
1147	return 0;
1148
1149fail:	iwm_free_tx_ring(sc, ring);
1150	return error;
1151}
1152
1153static void
1154iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1155{
1156	int i;
1157
1158	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1159		struct iwm_tx_data *data = &ring->data[i];
1160
1161		if (data->m != NULL) {
1162			bus_dmamap_sync(ring->data_dmat, data->map,
1163			    BUS_DMASYNC_POSTWRITE);
1164			bus_dmamap_unload(ring->data_dmat, data->map);
1165			m_freem(data->m);
1166			data->m = NULL;
1167		}
1168	}
1169	/* Clear TX descriptors. */
1170	memset(ring->desc, 0, ring->desc_dma.size);
1171	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1172	    BUS_DMASYNC_PREWRITE);
1173	sc->qfullmsk &= ~(1 << ring->qid);
1174	ring->queued = 0;
1175	ring->cur = 0;
1176
1177	if (ring->qid == IWM_MVM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1178		iwm_pcie_clear_cmd_in_flight(sc);
1179}
1180
1181static void
1182iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1183{
1184	int i;
1185
1186	iwm_dma_contig_free(&ring->desc_dma);
1187	iwm_dma_contig_free(&ring->cmd_dma);
1188
1189	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1190		struct iwm_tx_data *data = &ring->data[i];
1191
1192		if (data->m != NULL) {
1193			bus_dmamap_sync(ring->data_dmat, data->map,
1194			    BUS_DMASYNC_POSTWRITE);
1195			bus_dmamap_unload(ring->data_dmat, data->map);
1196			m_freem(data->m);
1197			data->m = NULL;
1198		}
1199		if (data->map != NULL) {
1200			bus_dmamap_destroy(ring->data_dmat, data->map);
1201			data->map = NULL;
1202		}
1203	}
1204	if (ring->data_dmat != NULL) {
1205		bus_dma_tag_destroy(ring->data_dmat);
1206		ring->data_dmat = NULL;
1207	}
1208}
1209
1210/*
1211 * High-level hardware frobbing routines
1212 */
1213
1214static void
1215iwm_enable_interrupts(struct iwm_softc *sc)
1216{
1217	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1218	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1219}
1220
1221static void
1222iwm_restore_interrupts(struct iwm_softc *sc)
1223{
1224	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1225}
1226
1227static void
1228iwm_disable_interrupts(struct iwm_softc *sc)
1229{
1230	/* disable interrupts */
1231	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1232
1233	/* acknowledge all interrupts */
1234	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1235	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1236}
1237
1238static void
1239iwm_ict_reset(struct iwm_softc *sc)
1240{
1241	iwm_disable_interrupts(sc);
1242
1243	/* Reset ICT table. */
1244	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1245	sc->ict_cur = 0;
1246
1247	/* Set physical address of ICT table (4KB aligned). */
1248	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1249	    IWM_CSR_DRAM_INT_TBL_ENABLE
1250	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1251	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1252	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1253
1254	/* Switch to ICT interrupt mode in driver. */
1255	sc->sc_flags |= IWM_FLAG_USE_ICT;
1256
1257	/* Re-enable interrupts. */
1258	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1259	iwm_enable_interrupts(sc);
1260}
1261
1262/* iwlwifi pcie/trans.c */
1263
1264/*
1265 * Since this .. hard-resets things, it's time to actually
1266 * mark the first vap (if any) as having no mac context.
1267 * It's annoying, but since the driver is potentially being
1268 * stop/start'ed whilst active (thanks openbsd port!) we
1269 * have to correctly track this.
1270 */
1271static void
1272iwm_stop_device(struct iwm_softc *sc)
1273{
1274	struct ieee80211com *ic = &sc->sc_ic;
1275	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1276	int chnl, qid;
1277	uint32_t mask = 0;
1278
1279	/* tell the device to stop sending interrupts */
1280	iwm_disable_interrupts(sc);
1281
1282	/*
1283	 * FreeBSD-local: mark the first vap as not-uploaded,
1284	 * so the next transition through auth/assoc
1285	 * will correctly populate the MAC context.
1286	 */
1287	if (vap) {
1288		struct iwm_vap *iv = IWM_VAP(vap);
1289		iv->phy_ctxt = NULL;
1290		iv->is_uploaded = 0;
1291	}
1292
1293	/* device going down, Stop using ICT table */
1294	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1295
1296	/* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1297
1298	if (iwm_nic_lock(sc)) {
1299		iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1300
1301		/* Stop each Tx DMA channel */
1302		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1303			IWM_WRITE(sc,
1304			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1305			mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1306		}
1307
1308		/* Wait for DMA channels to be idle */
1309		if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1310		    5000)) {
1311			device_printf(sc->sc_dev,
1312			    "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1313			    IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1314		}
1315		iwm_nic_unlock(sc);
1316	}
1317	iwm_pcie_rx_stop(sc);
1318
1319	/* Stop RX ring. */
1320	iwm_reset_rx_ring(sc, &sc->rxq);
1321
1322	/* Reset all TX rings. */
1323	for (qid = 0; qid < nitems(sc->txq); qid++)
1324		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1325
1326	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1327		/* Power-down device's busmaster DMA clocks */
1328		if (iwm_nic_lock(sc)) {
1329			iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1330			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1331			iwm_nic_unlock(sc);
1332		}
1333		DELAY(5);
1334	}
1335
1336	/* Make sure (redundant) we've released our request to stay awake */
1337	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1338	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1339
1340	/* Stop the device, and put it in low power state */
1341	iwm_apm_stop(sc);
1342
1343	/* Upon stop, the APM issues an interrupt if HW RF kill is set.
1344	 * Clean again the interrupt here
1345	 */
1346	iwm_disable_interrupts(sc);
1347	/* stop and reset the on-board processor */
1348	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1349
1350	/*
1351	 * Even if we stop the HW, we still want the RF kill
1352	 * interrupt
1353	 */
1354	iwm_enable_rfkill_int(sc);
1355	iwm_check_rfkill(sc);
1356}
1357
1358/* iwlwifi: mvm/ops.c */
1359static void
1360iwm_mvm_nic_config(struct iwm_softc *sc)
1361{
1362	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1363	uint32_t reg_val = 0;
1364	uint32_t phy_config = iwm_mvm_get_phy_config(sc);
1365
1366	radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1367	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1368	radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1369	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1370	radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1371	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1372
1373	/* SKU control */
1374	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1375	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1376	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1377	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1378
1379	/* radio configuration */
1380	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1381	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1382	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1383
1384	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1385
1386	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1387	    "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1388	    radio_cfg_step, radio_cfg_dash);
1389
1390	/*
1391	 * W/A : NIC is stuck in a reset state after Early PCIe power off
1392	 * (PCIe power is lost before PERST# is asserted), causing ME FW
1393	 * to lose ownership and not being able to obtain it back.
1394	 */
1395	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1396		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1397		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1398		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1399	}
1400}
1401
1402static int
1403iwm_nic_rx_init(struct iwm_softc *sc)
1404{
1405	/*
1406	 * Initialize RX ring.  This is from the iwn driver.
1407	 */
1408	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1409
1410	/* Stop Rx DMA */
1411	iwm_pcie_rx_stop(sc);
1412
1413	if (!iwm_nic_lock(sc))
1414		return EBUSY;
1415
1416	/* reset and flush pointers */
1417	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1418	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1419	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1420	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1421
1422	/* Set physical address of RX ring (256-byte aligned). */
1423	IWM_WRITE(sc,
1424	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1425
1426	/* Set physical address of RX status (16-byte aligned). */
1427	IWM_WRITE(sc,
1428	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1429
1430	/* Enable Rx DMA
1431	 * XXX 5000 HW isn't supported by the iwm(4) driver.
1432	 * IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
1433	 *      the credit mechanism in 5000 HW RX FIFO
1434	 * Direct rx interrupts to hosts
1435	 * Rx buffer size 4 or 8k or 12k
1436	 * RB timeout 0x10
1437	 * 256 RBDs
1438	 */
1439	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1440	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
1441	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
1442	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
1443	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
1444	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1445	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1446
1447	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1448
1449	/* W/A for interrupt coalescing bug in 7260 and 3160 */
1450	if (sc->cfg->host_interrupt_operation_mode)
1451		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1452
1453	/*
1454	 * Thus sayeth el jefe (iwlwifi) via a comment:
1455	 *
1456	 * This value should initially be 0 (before preparing any
1457	 * RBs), should be 8 after preparing the first 8 RBs (for example)
1458	 */
1459	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1460
1461	iwm_nic_unlock(sc);
1462
1463	return 0;
1464}
1465
1466static int
1467iwm_nic_tx_init(struct iwm_softc *sc)
1468{
1469	int qid;
1470
1471	if (!iwm_nic_lock(sc))
1472		return EBUSY;
1473
1474	/* Deactivate TX scheduler. */
1475	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1476
1477	/* Set physical address of "keep warm" page (16-byte aligned). */
1478	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1479
1480	/* Initialize TX rings. */
1481	for (qid = 0; qid < nitems(sc->txq); qid++) {
1482		struct iwm_tx_ring *txq = &sc->txq[qid];
1483
1484		/* Set physical address of TX ring (256-byte aligned). */
1485		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1486		    txq->desc_dma.paddr >> 8);
1487		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1488		    "%s: loading ring %d descriptors (%p) at %lx\n",
1489		    __func__,
1490		    qid, txq->desc,
1491		    (unsigned long) (txq->desc_dma.paddr >> 8));
1492	}
1493
1494	iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1495
1496	iwm_nic_unlock(sc);
1497
1498	return 0;
1499}
1500
1501static int
1502iwm_nic_init(struct iwm_softc *sc)
1503{
1504	int error;
1505
1506	iwm_apm_init(sc);
1507	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1508		iwm_set_pwr(sc);
1509
1510	iwm_mvm_nic_config(sc);
1511
1512	if ((error = iwm_nic_rx_init(sc)) != 0)
1513		return error;
1514
1515	/*
1516	 * Ditto for TX, from iwn
1517	 */
1518	if ((error = iwm_nic_tx_init(sc)) != 0)
1519		return error;
1520
1521	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1522	    "%s: shadow registers enabled\n", __func__);
1523	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1524
1525	return 0;
1526}
1527
1528int
1529iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1530{
1531	if (!iwm_nic_lock(sc)) {
1532		device_printf(sc->sc_dev,
1533		    "%s: cannot enable txq %d\n",
1534		    __func__,
1535		    qid);
1536		return EBUSY;
1537	}
1538
1539	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1540
1541	if (qid == IWM_MVM_CMD_QUEUE) {
1542		/* unactivate before configuration */
1543		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1544		    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1545		    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1546
1547		iwm_nic_unlock(sc);
1548
1549		iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1550
1551		if (!iwm_nic_lock(sc)) {
1552			device_printf(sc->sc_dev,
1553			    "%s: cannot enable txq %d\n", __func__, qid);
1554			return EBUSY;
1555		}
1556		iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1557		iwm_nic_unlock(sc);
1558
1559		iwm_write_mem32(sc, sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1560		/* Set scheduler window size and frame limit. */
1561		iwm_write_mem32(sc,
1562		    sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1563		    sizeof(uint32_t),
1564		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1565		    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1566		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1567		    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1568
1569		if (!iwm_nic_lock(sc)) {
1570			device_printf(sc->sc_dev,
1571			    "%s: cannot enable txq %d\n", __func__, qid);
1572			return EBUSY;
1573		}
1574		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1575		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1576		    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1577		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1578		    IWM_SCD_QUEUE_STTS_REG_MSK);
1579	} else {
1580		struct iwm_scd_txq_cfg_cmd cmd;
1581		int error;
1582
1583		iwm_nic_unlock(sc);
1584
1585		memset(&cmd, 0, sizeof(cmd));
1586		cmd.scd_queue = qid;
1587		cmd.enable = 1;
1588		cmd.sta_id = sta_id;
1589		cmd.tx_fifo = fifo;
1590		cmd.aggregate = 0;
1591		cmd.window = IWM_FRAME_LIMIT;
1592
1593		error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1594		    sizeof(cmd), &cmd);
1595		if (error) {
1596			device_printf(sc->sc_dev,
1597			    "cannot enable txq %d\n", qid);
1598			return error;
1599		}
1600
1601		if (!iwm_nic_lock(sc))
1602			return EBUSY;
1603	}
1604
1605	iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1606	    iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1607
1608	iwm_nic_unlock(sc);
1609
1610	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1611	    __func__, qid, fifo);
1612
1613	return 0;
1614}
1615
1616static int
1617iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1618{
1619	int error, chnl;
1620
1621	int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1622	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1623
1624	if (!iwm_nic_lock(sc))
1625		return EBUSY;
1626
1627	iwm_ict_reset(sc);
1628
1629	sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1630	if (scd_base_addr != 0 &&
1631	    scd_base_addr != sc->scd_base_addr) {
1632		device_printf(sc->sc_dev,
1633		    "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1634		    __func__, sc->scd_base_addr, scd_base_addr);
1635	}
1636
1637	iwm_nic_unlock(sc);
1638
1639	/* reset context data, TX status and translation data */
1640	error = iwm_write_mem(sc,
1641	    sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1642	    NULL, clear_dwords);
1643	if (error)
1644		return EBUSY;
1645
1646	if (!iwm_nic_lock(sc))
1647		return EBUSY;
1648
1649	/* Set physical address of TX scheduler rings (1KB aligned). */
1650	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1651
1652	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1653
1654	iwm_nic_unlock(sc);
1655
1656	/* enable command channel */
1657	error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1658	if (error)
1659		return error;
1660
1661	if (!iwm_nic_lock(sc))
1662		return EBUSY;
1663
1664	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1665
1666	/* Enable DMA channels. */
1667	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1668		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1669		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1670		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1671	}
1672
1673	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1674	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1675
1676	iwm_nic_unlock(sc);
1677
1678	/* Enable L1-Active */
1679	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
1680		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1681		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1682	}
1683
1684	return error;
1685}
1686
1687/*
1688 * NVM read access and content parsing.  We do not support
1689 * external NVM or writing NVM.
1690 * iwlwifi/mvm/nvm.c
1691 */
1692
1693/* Default NVM size to read */
1694#define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
1695
1696#define IWM_NVM_WRITE_OPCODE 1
1697#define IWM_NVM_READ_OPCODE 0
1698
1699/* load nvm chunk response */
1700enum {
1701	IWM_READ_NVM_CHUNK_SUCCEED = 0,
1702	IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1703};
1704
1705static int
1706iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1707	uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1708{
1709	struct iwm_nvm_access_cmd nvm_access_cmd = {
1710		.offset = htole16(offset),
1711		.length = htole16(length),
1712		.type = htole16(section),
1713		.op_code = IWM_NVM_READ_OPCODE,
1714	};
1715	struct iwm_nvm_access_resp *nvm_resp;
1716	struct iwm_rx_packet *pkt;
1717	struct iwm_host_cmd cmd = {
1718		.id = IWM_NVM_ACCESS_CMD,
1719		.flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1720		.data = { &nvm_access_cmd, },
1721	};
1722	int ret, bytes_read, offset_read;
1723	uint8_t *resp_data;
1724
1725	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1726
1727	ret = iwm_send_cmd(sc, &cmd);
1728	if (ret) {
1729		device_printf(sc->sc_dev,
1730		    "Could not send NVM_ACCESS command (error=%d)\n", ret);
1731		return ret;
1732	}
1733
1734	pkt = cmd.resp_pkt;
1735
1736	/* Extract NVM response */
1737	nvm_resp = (void *)pkt->data;
1738	ret = le16toh(nvm_resp->status);
1739	bytes_read = le16toh(nvm_resp->length);
1740	offset_read = le16toh(nvm_resp->offset);
1741	resp_data = nvm_resp->data;
1742	if (ret) {
1743		if ((offset != 0) &&
1744		    (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1745			/*
1746			 * meaning of NOT_VALID_ADDRESS:
1747			 * driver try to read chunk from address that is
1748			 * multiple of 2K and got an error since addr is empty.
1749			 * meaning of (offset != 0): driver already
1750			 * read valid data from another chunk so this case
1751			 * is not an error.
1752			 */
1753			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1754				    "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1755				    offset);
1756			*len = 0;
1757			ret = 0;
1758		} else {
1759			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1760				    "NVM access command failed with status %d\n", ret);
1761			ret = EIO;
1762		}
1763		goto exit;
1764	}
1765
1766	if (offset_read != offset) {
1767		device_printf(sc->sc_dev,
1768		    "NVM ACCESS response with invalid offset %d\n",
1769		    offset_read);
1770		ret = EINVAL;
1771		goto exit;
1772	}
1773
1774	if (bytes_read > length) {
1775		device_printf(sc->sc_dev,
1776		    "NVM ACCESS response with too much data "
1777		    "(%d bytes requested, %d bytes received)\n",
1778		    length, bytes_read);
1779		ret = EINVAL;
1780		goto exit;
1781	}
1782
1783	/* Write data to NVM */
1784	memcpy(data + offset, resp_data, bytes_read);
1785	*len = bytes_read;
1786
1787 exit:
1788	iwm_free_resp(sc, &cmd);
1789	return ret;
1790}
1791
1792/*
1793 * Reads an NVM section completely.
1794 * NICs prior to 7000 family don't have a real NVM, but just read
1795 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1796 * by uCode, we need to manually check in this case that we don't
1797 * overflow and try to read more than the EEPROM size.
1798 * For 7000 family NICs, we supply the maximal size we can read, and
1799 * the uCode fills the response with as much data as we can,
1800 * without overflowing, so no check is needed.
1801 */
1802static int
1803iwm_nvm_read_section(struct iwm_softc *sc,
1804	uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1805{
1806	uint16_t seglen, length, offset = 0;
1807	int ret;
1808
1809	/* Set nvm section read length */
1810	length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1811
1812	seglen = length;
1813
1814	/* Read the NVM until exhausted (reading less than requested) */
1815	while (seglen == length) {
1816		/* Check no memory assumptions fail and cause an overflow */
1817		if ((size_read + offset + length) >
1818		    sc->cfg->eeprom_size) {
1819			device_printf(sc->sc_dev,
1820			    "EEPROM size is too small for NVM\n");
1821			return ENOBUFS;
1822		}
1823
1824		ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1825		if (ret) {
1826			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1827				    "Cannot read NVM from section %d offset %d, length %d\n",
1828				    section, offset, length);
1829			return ret;
1830		}
1831		offset += seglen;
1832	}
1833
1834	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1835		    "NVM section %d read completed\n", section);
1836	*len = offset;
1837	return 0;
1838}
1839
1840/*
1841 * BEGIN IWM_NVM_PARSE
1842 */
1843
1844/* iwlwifi/iwl-nvm-parse.c */
1845
1846/* NVM offsets (in words) definitions */
1847enum iwm_nvm_offsets {
1848	/* NVM HW-Section offset (in words) definitions */
1849	IWM_HW_ADDR = 0x15,
1850
1851/* NVM SW-Section offset (in words) definitions */
1852	IWM_NVM_SW_SECTION = 0x1C0,
1853	IWM_NVM_VERSION = 0,
1854	IWM_RADIO_CFG = 1,
1855	IWM_SKU = 2,
1856	IWM_N_HW_ADDRS = 3,
1857	IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1858
1859/* NVM calibration section offset (in words) definitions */
1860	IWM_NVM_CALIB_SECTION = 0x2B8,
1861	IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1862};
1863
1864enum iwm_8000_nvm_offsets {
1865	/* NVM HW-Section offset (in words) definitions */
1866	IWM_HW_ADDR0_WFPM_8000 = 0x12,
1867	IWM_HW_ADDR1_WFPM_8000 = 0x16,
1868	IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1869	IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1870	IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1871
1872	/* NVM SW-Section offset (in words) definitions */
1873	IWM_NVM_SW_SECTION_8000 = 0x1C0,
1874	IWM_NVM_VERSION_8000 = 0,
1875	IWM_RADIO_CFG_8000 = 0,
1876	IWM_SKU_8000 = 2,
1877	IWM_N_HW_ADDRS_8000 = 3,
1878
1879	/* NVM REGULATORY -Section offset (in words) definitions */
1880	IWM_NVM_CHANNELS_8000 = 0,
1881	IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1882	IWM_NVM_LAR_OFFSET_8000 = 0x507,
1883	IWM_NVM_LAR_ENABLED_8000 = 0x7,
1884
1885	/* NVM calibration section offset (in words) definitions */
1886	IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1887	IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1888};
1889
1890/* SKU Capabilities (actual values from NVM definition) */
1891enum nvm_sku_bits {
1892	IWM_NVM_SKU_CAP_BAND_24GHZ	= (1 << 0),
1893	IWM_NVM_SKU_CAP_BAND_52GHZ	= (1 << 1),
1894	IWM_NVM_SKU_CAP_11N_ENABLE	= (1 << 2),
1895	IWM_NVM_SKU_CAP_11AC_ENABLE	= (1 << 3),
1896};
1897
1898/* radio config bits (actual values from NVM definition) */
1899#define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1900#define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1901#define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1902#define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1903#define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1904#define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1905
1906#define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)	(x & 0xF)
1907#define IWM_NVM_RF_CFG_DASH_MSK_8000(x)		((x >> 4) & 0xF)
1908#define IWM_NVM_RF_CFG_STEP_MSK_8000(x)		((x >> 8) & 0xF)
1909#define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)		((x >> 12) & 0xFFF)
1910#define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)	((x >> 24) & 0xF)
1911#define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)	((x >> 28) & 0xF)
1912
1913#define DEFAULT_MAX_TX_POWER 16
1914
1915/**
1916 * enum iwm_nvm_channel_flags - channel flags in NVM
1917 * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1918 * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1919 * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1920 * @IWM_NVM_CHANNEL_RADAR: radar detection required
1921 * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1922 * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1923 * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1924 * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1925 * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1926 * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1927 */
1928enum iwm_nvm_channel_flags {
1929	IWM_NVM_CHANNEL_VALID = (1 << 0),
1930	IWM_NVM_CHANNEL_IBSS = (1 << 1),
1931	IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1932	IWM_NVM_CHANNEL_RADAR = (1 << 4),
1933	IWM_NVM_CHANNEL_DFS = (1 << 7),
1934	IWM_NVM_CHANNEL_WIDE = (1 << 8),
1935	IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1936	IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1937	IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1938};
1939
1940/*
1941 * Translate EEPROM flags to net80211.
1942 */
1943static uint32_t
1944iwm_eeprom_channel_flags(uint16_t ch_flags)
1945{
1946	uint32_t nflags;
1947
1948	nflags = 0;
1949	if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1950		nflags |= IEEE80211_CHAN_PASSIVE;
1951	if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1952		nflags |= IEEE80211_CHAN_NOADHOC;
1953	if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1954		nflags |= IEEE80211_CHAN_DFS;
1955		/* Just in case. */
1956		nflags |= IEEE80211_CHAN_NOADHOC;
1957	}
1958
1959	return (nflags);
1960}
1961
1962static void
1963iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1964    int maxchans, int *nchans, int ch_idx, size_t ch_num,
1965    const uint8_t bands[])
1966{
1967	const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
1968	uint32_t nflags;
1969	uint16_t ch_flags;
1970	uint8_t ieee;
1971	int error;
1972
1973	for (; ch_idx < ch_num; ch_idx++) {
1974		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1975		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1976			ieee = iwm_nvm_channels[ch_idx];
1977		else
1978			ieee = iwm_nvm_channels_8000[ch_idx];
1979
1980		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1981			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1982			    "Ch. %d Flags %x [%sGHz] - No traffic\n",
1983			    ieee, ch_flags,
1984			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1985			    "5.2" : "2.4");
1986			continue;
1987		}
1988
1989		nflags = iwm_eeprom_channel_flags(ch_flags);
1990		error = ieee80211_add_channel(chans, maxchans, nchans,
1991		    ieee, 0, 0, nflags, bands);
1992		if (error != 0)
1993			break;
1994
1995		IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1996		    "Ch. %d Flags %x [%sGHz] - Added\n",
1997		    ieee, ch_flags,
1998		    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1999		    "5.2" : "2.4");
2000	}
2001}
2002
2003static void
2004iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2005    struct ieee80211_channel chans[])
2006{
2007	struct iwm_softc *sc = ic->ic_softc;
2008	struct iwm_nvm_data *data = sc->nvm_data;
2009	uint8_t bands[IEEE80211_MODE_BYTES];
2010	size_t ch_num;
2011
2012	memset(bands, 0, sizeof(bands));
2013	/* 1-13: 11b/g channels. */
2014	setbit(bands, IEEE80211_MODE_11B);
2015	setbit(bands, IEEE80211_MODE_11G);
2016	iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2017	    IWM_NUM_2GHZ_CHANNELS - 1, bands);
2018
2019	/* 14: 11b channel only. */
2020	clrbit(bands, IEEE80211_MODE_11G);
2021	iwm_add_channel_band(sc, chans, maxchans, nchans,
2022	    IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2023
2024	if (data->sku_cap_band_52GHz_enable) {
2025		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2026			ch_num = nitems(iwm_nvm_channels);
2027		else
2028			ch_num = nitems(iwm_nvm_channels_8000);
2029		memset(bands, 0, sizeof(bands));
2030		setbit(bands, IEEE80211_MODE_11A);
2031		iwm_add_channel_band(sc, chans, maxchans, nchans,
2032		    IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2033	}
2034}
2035
2036static void
2037iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2038	const uint16_t *mac_override, const uint16_t *nvm_hw)
2039{
2040	const uint8_t *hw_addr;
2041
2042	if (mac_override) {
2043		static const uint8_t reserved_mac[] = {
2044			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2045		};
2046
2047		hw_addr = (const uint8_t *)(mac_override +
2048				 IWM_MAC_ADDRESS_OVERRIDE_8000);
2049
2050		/*
2051		 * Store the MAC address from MAO section.
2052		 * No byte swapping is required in MAO section
2053		 */
2054		IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2055
2056		/*
2057		 * Force the use of the OTP MAC address in case of reserved MAC
2058		 * address in the NVM, or if address is given but invalid.
2059		 */
2060		if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2061		    !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2062		    iwm_is_valid_ether_addr(data->hw_addr) &&
2063		    !IEEE80211_IS_MULTICAST(data->hw_addr))
2064			return;
2065
2066		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2067		    "%s: mac address from nvm override section invalid\n",
2068		    __func__);
2069	}
2070
2071	if (nvm_hw) {
2072		/* read the mac address from WFMP registers */
2073		uint32_t mac_addr0 =
2074		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2075		uint32_t mac_addr1 =
2076		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2077
2078		hw_addr = (const uint8_t *)&mac_addr0;
2079		data->hw_addr[0] = hw_addr[3];
2080		data->hw_addr[1] = hw_addr[2];
2081		data->hw_addr[2] = hw_addr[1];
2082		data->hw_addr[3] = hw_addr[0];
2083
2084		hw_addr = (const uint8_t *)&mac_addr1;
2085		data->hw_addr[4] = hw_addr[1];
2086		data->hw_addr[5] = hw_addr[0];
2087
2088		return;
2089	}
2090
2091	device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2092	memset(data->hw_addr, 0, sizeof(data->hw_addr));
2093}
2094
2095static int
2096iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2097	    const uint16_t *phy_sku)
2098{
2099	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2100		return le16_to_cpup(nvm_sw + IWM_SKU);
2101
2102	return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2103}
2104
2105static int
2106iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2107{
2108	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2109		return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2110	else
2111		return le32_to_cpup((const uint32_t *)(nvm_sw +
2112						IWM_NVM_VERSION_8000));
2113}
2114
2115static int
2116iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2117		  const uint16_t *phy_sku)
2118{
2119        if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2120                return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2121
2122        return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2123}
2124
2125static int
2126iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2127{
2128	int n_hw_addr;
2129
2130	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2131		return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2132
2133	n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2134
2135        return n_hw_addr & IWM_N_HW_ADDR_MASK;
2136}
2137
2138static void
2139iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2140		  uint32_t radio_cfg)
2141{
2142	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2143		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2144		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2145		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2146		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2147		return;
2148	}
2149
2150	/* set the radio configuration for family 8000 */
2151	data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2152	data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2153	data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2154	data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2155	data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2156	data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2157}
2158
2159static int
2160iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2161		   const uint16_t *nvm_hw, const uint16_t *mac_override)
2162{
2163#ifdef notyet /* for FAMILY 9000 */
2164	if (cfg->mac_addr_from_csr) {
2165		iwm_set_hw_address_from_csr(sc, data);
2166        } else
2167#endif
2168	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2169		const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2170
2171		/* The byte order is little endian 16 bit, meaning 214365 */
2172		data->hw_addr[0] = hw_addr[1];
2173		data->hw_addr[1] = hw_addr[0];
2174		data->hw_addr[2] = hw_addr[3];
2175		data->hw_addr[3] = hw_addr[2];
2176		data->hw_addr[4] = hw_addr[5];
2177		data->hw_addr[5] = hw_addr[4];
2178	} else {
2179		iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2180	}
2181
2182	if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2183		device_printf(sc->sc_dev, "no valid mac address was found\n");
2184		return EINVAL;
2185	}
2186
2187	return 0;
2188}
2189
2190static struct iwm_nvm_data *
2191iwm_parse_nvm_data(struct iwm_softc *sc,
2192		   const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2193		   const uint16_t *nvm_calib, const uint16_t *mac_override,
2194		   const uint16_t *phy_sku, const uint16_t *regulatory)
2195{
2196	struct iwm_nvm_data *data;
2197	uint32_t sku, radio_cfg;
2198	uint16_t lar_config;
2199
2200	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2201		data = malloc(sizeof(*data) +
2202		    IWM_NUM_CHANNELS * sizeof(uint16_t),
2203		    M_DEVBUF, M_NOWAIT | M_ZERO);
2204	} else {
2205		data = malloc(sizeof(*data) +
2206		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2207		    M_DEVBUF, M_NOWAIT | M_ZERO);
2208	}
2209	if (!data)
2210		return NULL;
2211
2212	data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2213
2214	radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2215	iwm_set_radio_cfg(sc, data, radio_cfg);
2216
2217	sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2218	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2219	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2220	data->sku_cap_11n_enable = 0;
2221
2222	data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2223
2224	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2225		uint16_t lar_offset = data->nvm_version < 0xE39 ?
2226				       IWM_NVM_LAR_OFFSET_8000_OLD :
2227				       IWM_NVM_LAR_OFFSET_8000;
2228
2229		lar_config = le16_to_cpup(regulatory + lar_offset);
2230		data->lar_enabled = !!(lar_config &
2231				       IWM_NVM_LAR_ENABLED_8000);
2232	}
2233
2234	/* If no valid mac address was found - bail out */
2235	if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2236		free(data, M_DEVBUF);
2237		return NULL;
2238	}
2239
2240	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2241		memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2242		    IWM_NUM_CHANNELS * sizeof(uint16_t));
2243	} else {
2244		memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2245		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2246	}
2247
2248	return data;
2249}
2250
2251static void
2252iwm_free_nvm_data(struct iwm_nvm_data *data)
2253{
2254	if (data != NULL)
2255		free(data, M_DEVBUF);
2256}
2257
2258static struct iwm_nvm_data *
2259iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2260{
2261	const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2262
2263	/* Checking for required sections */
2264	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2265		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2266		    !sections[sc->cfg->nvm_hw_section_num].data) {
2267			device_printf(sc->sc_dev,
2268			    "Can't parse empty OTP/NVM sections\n");
2269			return NULL;
2270		}
2271	} else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2272		/* SW and REGULATORY sections are mandatory */
2273		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2274		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2275			device_printf(sc->sc_dev,
2276			    "Can't parse empty OTP/NVM sections\n");
2277			return NULL;
2278		}
2279		/* MAC_OVERRIDE or at least HW section must exist */
2280		if (!sections[sc->cfg->nvm_hw_section_num].data &&
2281		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2282			device_printf(sc->sc_dev,
2283			    "Can't parse mac_address, empty sections\n");
2284			return NULL;
2285		}
2286
2287		/* PHY_SKU section is mandatory in B0 */
2288		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2289			device_printf(sc->sc_dev,
2290			    "Can't parse phy_sku in B0, empty sections\n");
2291			return NULL;
2292		}
2293	} else {
2294		panic("unknown device family %d\n", sc->cfg->device_family);
2295	}
2296
2297	hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2298	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2299	calib = (const uint16_t *)
2300	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2301	regulatory = (const uint16_t *)
2302	    sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2303	mac_override = (const uint16_t *)
2304	    sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2305	phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2306
2307	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2308	    phy_sku, regulatory);
2309}
2310
2311static int
2312iwm_nvm_init(struct iwm_softc *sc)
2313{
2314	struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2315	int i, ret, section;
2316	uint32_t size_read = 0;
2317	uint8_t *nvm_buffer, *temp;
2318	uint16_t len;
2319
2320	memset(nvm_sections, 0, sizeof(nvm_sections));
2321
2322	if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2323		return EINVAL;
2324
2325	/* load NVM values from nic */
2326	/* Read From FW NVM */
2327	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2328
2329	nvm_buffer = malloc(sc->cfg->eeprom_size, M_DEVBUF, M_NOWAIT | M_ZERO);
2330	if (!nvm_buffer)
2331		return ENOMEM;
2332	for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2333		/* we override the constness for initial read */
2334		ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2335					   &len, size_read);
2336		if (ret)
2337			continue;
2338		size_read += len;
2339		temp = malloc(len, M_DEVBUF, M_NOWAIT);
2340		if (!temp) {
2341			ret = ENOMEM;
2342			break;
2343		}
2344		memcpy(temp, nvm_buffer, len);
2345
2346		nvm_sections[section].data = temp;
2347		nvm_sections[section].length = len;
2348	}
2349	if (!size_read)
2350		device_printf(sc->sc_dev, "OTP is blank\n");
2351	free(nvm_buffer, M_DEVBUF);
2352
2353	sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2354	if (!sc->nvm_data)
2355		return EINVAL;
2356	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2357		    "nvm version = %x\n", sc->nvm_data->nvm_version);
2358
2359	for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2360		if (nvm_sections[i].data != NULL)
2361			free(nvm_sections[i].data, M_DEVBUF);
2362	}
2363
2364	return 0;
2365}
2366
2367static int
2368iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2369	const struct iwm_fw_desc *section)
2370{
2371	struct iwm_dma_info *dma = &sc->fw_dma;
2372	uint8_t *v_addr;
2373	bus_addr_t p_addr;
2374	uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2375	int ret = 0;
2376
2377	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2378		    "%s: [%d] uCode section being loaded...\n",
2379		    __func__, section_num);
2380
2381	v_addr = dma->vaddr;
2382	p_addr = dma->paddr;
2383
2384	for (offset = 0; offset < section->len; offset += chunk_sz) {
2385		uint32_t copy_size, dst_addr;
2386		int extended_addr = FALSE;
2387
2388		copy_size = MIN(chunk_sz, section->len - offset);
2389		dst_addr = section->offset + offset;
2390
2391		if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2392		    dst_addr <= IWM_FW_MEM_EXTENDED_END)
2393			extended_addr = TRUE;
2394
2395		if (extended_addr)
2396			iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2397					  IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2398
2399		memcpy(v_addr, (const uint8_t *)section->data + offset,
2400		    copy_size);
2401		bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2402		ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2403						   copy_size);
2404
2405		if (extended_addr)
2406			iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2407					    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2408
2409		if (ret) {
2410			device_printf(sc->sc_dev,
2411			    "%s: Could not load the [%d] uCode section\n",
2412			    __func__, section_num);
2413			break;
2414		}
2415	}
2416
2417	return ret;
2418}
2419
2420/*
2421 * ucode
2422 */
2423static int
2424iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2425			     bus_addr_t phy_addr, uint32_t byte_cnt)
2426{
2427	int ret;
2428
2429	sc->sc_fw_chunk_done = 0;
2430
2431	if (!iwm_nic_lock(sc))
2432		return EBUSY;
2433
2434	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2435	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2436
2437	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2438	    dst_addr);
2439
2440	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2441	    phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2442
2443	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2444	    (iwm_get_dma_hi_addr(phy_addr)
2445	     << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2446
2447	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2448	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2449	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2450	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2451
2452	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2453	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2454	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2455	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2456
2457	iwm_nic_unlock(sc);
2458
2459	/* wait up to 5s for this segment to load */
2460	ret = 0;
2461	while (!sc->sc_fw_chunk_done) {
2462		ret = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz);
2463		if (ret)
2464			break;
2465	}
2466
2467	if (ret != 0) {
2468		device_printf(sc->sc_dev,
2469		    "fw chunk addr 0x%x len %d failed to load\n",
2470		    dst_addr, byte_cnt);
2471		return ETIMEDOUT;
2472	}
2473
2474	return 0;
2475}
2476
2477static int
2478iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2479	const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2480{
2481	int shift_param;
2482	int i, ret = 0, sec_num = 0x1;
2483	uint32_t val, last_read_idx = 0;
2484
2485	if (cpu == 1) {
2486		shift_param = 0;
2487		*first_ucode_section = 0;
2488	} else {
2489		shift_param = 16;
2490		(*first_ucode_section)++;
2491	}
2492
2493	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2494		last_read_idx = i;
2495
2496		/*
2497		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2498		 * CPU1 to CPU2.
2499		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2500		 * CPU2 non paged to CPU2 paging sec.
2501		 */
2502		if (!image->fw_sect[i].data ||
2503		    image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2504		    image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2505			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2506				    "Break since Data not valid or Empty section, sec = %d\n",
2507				    i);
2508			break;
2509		}
2510		ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2511		if (ret)
2512			return ret;
2513
2514		/* Notify the ucode of the loaded section number and status */
2515		if (iwm_nic_lock(sc)) {
2516			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2517			val = val | (sec_num << shift_param);
2518			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2519			sec_num = (sec_num << 1) | 0x1;
2520			iwm_nic_unlock(sc);
2521		}
2522	}
2523
2524	*first_ucode_section = last_read_idx;
2525
2526	iwm_enable_interrupts(sc);
2527
2528	if (iwm_nic_lock(sc)) {
2529		if (cpu == 1)
2530			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2531		else
2532			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2533		iwm_nic_unlock(sc);
2534	}
2535
2536	return 0;
2537}
2538
2539static int
2540iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2541	const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2542{
2543	int shift_param;
2544	int i, ret = 0;
2545	uint32_t last_read_idx = 0;
2546
2547	if (cpu == 1) {
2548		shift_param = 0;
2549		*first_ucode_section = 0;
2550	} else {
2551		shift_param = 16;
2552		(*first_ucode_section)++;
2553	}
2554
2555	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2556		last_read_idx = i;
2557
2558		/*
2559		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2560		 * CPU1 to CPU2.
2561		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2562		 * CPU2 non paged to CPU2 paging sec.
2563		 */
2564		if (!image->fw_sect[i].data ||
2565		    image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2566		    image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2567			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2568				    "Break since Data not valid or Empty section, sec = %d\n",
2569				     i);
2570			break;
2571		}
2572
2573		ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2574		if (ret)
2575			return ret;
2576	}
2577
2578	*first_ucode_section = last_read_idx;
2579
2580	return 0;
2581
2582}
2583
2584static int
2585iwm_pcie_load_given_ucode(struct iwm_softc *sc,
2586	const struct iwm_fw_sects *image)
2587{
2588	int ret = 0;
2589	int first_ucode_section;
2590
2591	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2592		     image->is_dual_cpus ? "Dual" : "Single");
2593
2594	/* load to FW the binary non secured sections of CPU1 */
2595	ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2596	if (ret)
2597		return ret;
2598
2599	if (image->is_dual_cpus) {
2600		/* set CPU2 header address */
2601		if (iwm_nic_lock(sc)) {
2602			iwm_write_prph(sc,
2603				       IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2604				       IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2605			iwm_nic_unlock(sc);
2606		}
2607
2608		/* load to FW the binary sections of CPU2 */
2609		ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2610						 &first_ucode_section);
2611		if (ret)
2612			return ret;
2613	}
2614
2615	iwm_enable_interrupts(sc);
2616
2617	/* release CPU reset */
2618	IWM_WRITE(sc, IWM_CSR_RESET, 0);
2619
2620	return 0;
2621}
2622
2623int
2624iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2625	const struct iwm_fw_sects *image)
2626{
2627	int ret = 0;
2628	int first_ucode_section;
2629
2630	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2631		    image->is_dual_cpus ? "Dual" : "Single");
2632
2633	/* configure the ucode to be ready to get the secured image */
2634	/* release CPU reset */
2635	if (iwm_nic_lock(sc)) {
2636		iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
2637		    IWM_RELEASE_CPU_RESET_BIT);
2638		iwm_nic_unlock(sc);
2639	}
2640
2641	/* load to FW the binary Secured sections of CPU1 */
2642	ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2643	    &first_ucode_section);
2644	if (ret)
2645		return ret;
2646
2647	/* load to FW the binary sections of CPU2 */
2648	return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2649	    &first_ucode_section);
2650}
2651
2652/* XXX Get rid of this definition */
2653static inline void
2654iwm_enable_fw_load_int(struct iwm_softc *sc)
2655{
2656	IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2657	sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2658	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2659}
2660
2661/* XXX Add proper rfkill support code */
2662static int
2663iwm_start_fw(struct iwm_softc *sc,
2664	const struct iwm_fw_sects *fw)
2665{
2666	int ret;
2667
2668	/* This may fail if AMT took ownership of the device */
2669	if (iwm_prepare_card_hw(sc)) {
2670		device_printf(sc->sc_dev,
2671		    "%s: Exit HW not ready\n", __func__);
2672		ret = EIO;
2673		goto out;
2674	}
2675
2676	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2677
2678	iwm_disable_interrupts(sc);
2679
2680	/* make sure rfkill handshake bits are cleared */
2681	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2682	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2683	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2684
2685	/* clear (again), then enable host interrupts */
2686	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2687
2688	ret = iwm_nic_init(sc);
2689	if (ret) {
2690		device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2691		goto out;
2692	}
2693
2694	/*
2695	 * Now, we load the firmware and don't want to be interrupted, even
2696	 * by the RF-Kill interrupt (hence mask all the interrupt besides the
2697	 * FH_TX interrupt which is needed to load the firmware). If the
2698	 * RF-Kill switch is toggled, we will find out after having loaded
2699	 * the firmware and return the proper value to the caller.
2700	 */
2701	iwm_enable_fw_load_int(sc);
2702
2703	/* really make sure rfkill handshake bits are cleared */
2704	/* maybe we should write a few times more?  just to make sure */
2705	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2706	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2707
2708	/* Load the given image to the HW */
2709	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2710		ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2711	else
2712		ret = iwm_pcie_load_given_ucode(sc, fw);
2713
2714	/* XXX re-check RF-Kill state */
2715
2716out:
2717	return ret;
2718}
2719
2720static int
2721iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2722{
2723	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2724		.valid = htole32(valid_tx_ant),
2725	};
2726
2727	return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2728	    IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2729}
2730
2731/* iwlwifi: mvm/fw.c */
2732static int
2733iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2734{
2735	struct iwm_phy_cfg_cmd phy_cfg_cmd;
2736	enum iwm_ucode_type ucode_type = sc->cur_ucode;
2737
2738	/* Set parameters */
2739	phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc));
2740	phy_cfg_cmd.calib_control.event_trigger =
2741	    sc->sc_default_calib[ucode_type].event_trigger;
2742	phy_cfg_cmd.calib_control.flow_trigger =
2743	    sc->sc_default_calib[ucode_type].flow_trigger;
2744
2745	IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2746	    "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2747	return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2748	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2749}
2750
2751static int
2752iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2753{
2754	struct iwm_mvm_alive_data *alive_data = data;
2755	struct iwm_mvm_alive_resp_ver1 *palive1;
2756	struct iwm_mvm_alive_resp_ver2 *palive2;
2757	struct iwm_mvm_alive_resp *palive;
2758
2759	if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
2760		palive1 = (void *)pkt->data;
2761
2762		sc->support_umac_log = FALSE;
2763                sc->error_event_table =
2764                        le32toh(palive1->error_event_table_ptr);
2765                sc->log_event_table =
2766                        le32toh(palive1->log_event_table_ptr);
2767                alive_data->scd_base_addr = le32toh(palive1->scd_base_ptr);
2768
2769                alive_data->valid = le16toh(palive1->status) ==
2770                                    IWM_ALIVE_STATUS_OK;
2771                IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2772			    "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2773			     le16toh(palive1->status), palive1->ver_type,
2774                             palive1->ver_subtype, palive1->flags);
2775	} else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
2776		palive2 = (void *)pkt->data;
2777		sc->error_event_table =
2778			le32toh(palive2->error_event_table_ptr);
2779		sc->log_event_table =
2780			le32toh(palive2->log_event_table_ptr);
2781		alive_data->scd_base_addr = le32toh(palive2->scd_base_ptr);
2782		sc->umac_error_event_table =
2783                        le32toh(palive2->error_info_addr);
2784
2785		alive_data->valid = le16toh(palive2->status) ==
2786				    IWM_ALIVE_STATUS_OK;
2787		if (sc->umac_error_event_table)
2788			sc->support_umac_log = TRUE;
2789
2790		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2791			    "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2792			    le16toh(palive2->status), palive2->ver_type,
2793			    palive2->ver_subtype, palive2->flags);
2794
2795		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2796			    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2797			    palive2->umac_major, palive2->umac_minor);
2798	} else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2799		palive = (void *)pkt->data;
2800
2801		sc->error_event_table =
2802			le32toh(palive->error_event_table_ptr);
2803		sc->log_event_table =
2804			le32toh(palive->log_event_table_ptr);
2805		alive_data->scd_base_addr = le32toh(palive->scd_base_ptr);
2806		sc->umac_error_event_table =
2807			le32toh(palive->error_info_addr);
2808
2809		alive_data->valid = le16toh(palive->status) ==
2810				    IWM_ALIVE_STATUS_OK;
2811		if (sc->umac_error_event_table)
2812			sc->support_umac_log = TRUE;
2813
2814		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2815			    "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2816			    le16toh(palive->status), palive->ver_type,
2817			    palive->ver_subtype, palive->flags);
2818
2819		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2820			    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2821			    le32toh(palive->umac_major),
2822			    le32toh(palive->umac_minor));
2823	}
2824
2825	return TRUE;
2826}
2827
2828static int
2829iwm_wait_phy_db_entry(struct iwm_softc *sc,
2830	struct iwm_rx_packet *pkt, void *data)
2831{
2832	struct iwm_phy_db *phy_db = data;
2833
2834	if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2835		if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2836			device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2837			    __func__, pkt->hdr.code);
2838		}
2839		return TRUE;
2840	}
2841
2842	if (iwm_phy_db_set_section(phy_db, pkt)) {
2843		device_printf(sc->sc_dev,
2844		    "%s: iwm_phy_db_set_section failed\n", __func__);
2845	}
2846
2847	return FALSE;
2848}
2849
2850static int
2851iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2852	enum iwm_ucode_type ucode_type)
2853{
2854	struct iwm_notification_wait alive_wait;
2855	struct iwm_mvm_alive_data alive_data;
2856	const struct iwm_fw_sects *fw;
2857	enum iwm_ucode_type old_type = sc->cur_ucode;
2858	int error;
2859	static const uint16_t alive_cmd[] = { IWM_MVM_ALIVE };
2860
2861	if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2862		device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2863			error);
2864		return error;
2865	}
2866	fw = &sc->sc_fw.fw_sects[ucode_type];
2867	sc->cur_ucode = ucode_type;
2868	sc->ucode_loaded = FALSE;
2869
2870	memset(&alive_data, 0, sizeof(alive_data));
2871	iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2872				   alive_cmd, nitems(alive_cmd),
2873				   iwm_alive_fn, &alive_data);
2874
2875	error = iwm_start_fw(sc, fw);
2876	if (error) {
2877		device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2878		sc->cur_ucode = old_type;
2879		iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2880		return error;
2881	}
2882
2883	/*
2884	 * Some things may run in the background now, but we
2885	 * just wait for the ALIVE notification here.
2886	 */
2887	IWM_UNLOCK(sc);
2888	error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2889				      IWM_MVM_UCODE_ALIVE_TIMEOUT);
2890	IWM_LOCK(sc);
2891	if (error) {
2892		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2893			uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a;
2894			if (iwm_nic_lock(sc)) {
2895				a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS);
2896				b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS);
2897				iwm_nic_unlock(sc);
2898			}
2899			device_printf(sc->sc_dev,
2900			    "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2901			    a, b);
2902		}
2903		sc->cur_ucode = old_type;
2904		return error;
2905	}
2906
2907	if (!alive_data.valid) {
2908		device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2909		    __func__);
2910		sc->cur_ucode = old_type;
2911		return EIO;
2912	}
2913
2914	iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2915
2916	/*
2917	 * configure and operate fw paging mechanism.
2918	 * driver configures the paging flow only once, CPU2 paging image
2919	 * included in the IWM_UCODE_INIT image.
2920	 */
2921	if (fw->paging_mem_size) {
2922		error = iwm_save_fw_paging(sc, fw);
2923		if (error) {
2924			device_printf(sc->sc_dev,
2925			    "%s: failed to save the FW paging image\n",
2926			    __func__);
2927			return error;
2928		}
2929
2930		error = iwm_send_paging_cmd(sc, fw);
2931		if (error) {
2932			device_printf(sc->sc_dev,
2933			    "%s: failed to send the paging cmd\n", __func__);
2934			iwm_free_fw_paging(sc);
2935			return error;
2936		}
2937	}
2938
2939	if (!error)
2940		sc->ucode_loaded = TRUE;
2941	return error;
2942}
2943
2944/*
2945 * mvm misc bits
2946 */
2947
2948/*
2949 * follows iwlwifi/fw.c
2950 */
2951static int
2952iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2953{
2954	struct iwm_notification_wait calib_wait;
2955	static const uint16_t init_complete[] = {
2956		IWM_INIT_COMPLETE_NOTIF,
2957		IWM_CALIB_RES_NOTIF_PHY_DB
2958	};
2959	int ret;
2960
2961	/* do not operate with rfkill switch turned on */
2962	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2963		device_printf(sc->sc_dev,
2964		    "radio is disabled by hardware switch\n");
2965		return EPERM;
2966	}
2967
2968	iwm_init_notification_wait(sc->sc_notif_wait,
2969				   &calib_wait,
2970				   init_complete,
2971				   nitems(init_complete),
2972				   iwm_wait_phy_db_entry,
2973				   sc->sc_phy_db);
2974
2975	/* Will also start the device */
2976	ret = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
2977	if (ret) {
2978		device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
2979		    ret);
2980		goto error;
2981	}
2982
2983	if (justnvm) {
2984		/* Read nvm */
2985		ret = iwm_nvm_init(sc);
2986		if (ret) {
2987			device_printf(sc->sc_dev, "failed to read nvm\n");
2988			goto error;
2989		}
2990		IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
2991		goto error;
2992	}
2993
2994	ret = iwm_send_bt_init_conf(sc);
2995	if (ret) {
2996		device_printf(sc->sc_dev,
2997		    "failed to send bt coex configuration: %d\n", ret);
2998		goto error;
2999	}
3000
3001	/* Init Smart FIFO. */
3002	ret = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF);
3003	if (ret)
3004		goto error;
3005
3006	/* Send TX valid antennas before triggering calibrations */
3007	ret = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
3008	if (ret) {
3009		device_printf(sc->sc_dev,
3010		    "failed to send antennas before calibration: %d\n", ret);
3011		goto error;
3012	}
3013
3014	/*
3015	 * Send phy configurations command to init uCode
3016	 * to start the 16.0 uCode init image internal calibrations.
3017	 */
3018	ret = iwm_send_phy_cfg_cmd(sc);
3019	if (ret) {
3020		device_printf(sc->sc_dev,
3021		    "%s: Failed to run INIT calibrations: %d\n",
3022		    __func__, ret);
3023		goto error;
3024	}
3025
3026	/*
3027	 * Nothing to do but wait for the init complete notification
3028	 * from the firmware.
3029	 */
3030	IWM_UNLOCK(sc);
3031	ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
3032	    IWM_MVM_UCODE_CALIB_TIMEOUT);
3033	IWM_LOCK(sc);
3034
3035
3036	goto out;
3037
3038error:
3039	iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
3040out:
3041	return ret;
3042}
3043
3044/*
3045 * receive side
3046 */
3047
3048/* (re)stock rx ring, called at init-time and at runtime */
3049static int
3050iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3051{
3052	struct iwm_rx_ring *ring = &sc->rxq;
3053	struct iwm_rx_data *data = &ring->data[idx];
3054	struct mbuf *m;
3055	bus_dmamap_t dmamap;
3056	bus_dma_segment_t seg;
3057	int nsegs, error;
3058
3059	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3060	if (m == NULL)
3061		return ENOBUFS;
3062
3063	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3064	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3065	    &seg, &nsegs, BUS_DMA_NOWAIT);
3066	if (error != 0) {
3067		device_printf(sc->sc_dev,
3068		    "%s: can't map mbuf, error %d\n", __func__, error);
3069		m_freem(m);
3070		return error;
3071	}
3072
3073	if (data->m != NULL)
3074		bus_dmamap_unload(ring->data_dmat, data->map);
3075
3076	/* Swap ring->spare_map with data->map */
3077	dmamap = data->map;
3078	data->map = ring->spare_map;
3079	ring->spare_map = dmamap;
3080
3081	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3082	data->m = m;
3083
3084	/* Update RX descriptor. */
3085	KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
3086	ring->desc[idx] = htole32(seg.ds_addr >> 8);
3087	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3088	    BUS_DMASYNC_PREWRITE);
3089
3090	return 0;
3091}
3092
3093/* iwlwifi: mvm/rx.c */
3094/*
3095 * iwm_mvm_get_signal_strength - use new rx PHY INFO API
3096 * values are reported by the fw as positive values - need to negate
3097 * to obtain their dBM.  Account for missing antennas by replacing 0
3098 * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3099 */
3100static int
3101iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3102{
3103	int energy_a, energy_b, energy_c, max_energy;
3104	uint32_t val;
3105
3106	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3107	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3108	    IWM_RX_INFO_ENERGY_ANT_A_POS;
3109	energy_a = energy_a ? -energy_a : -256;
3110	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3111	    IWM_RX_INFO_ENERGY_ANT_B_POS;
3112	energy_b = energy_b ? -energy_b : -256;
3113	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3114	    IWM_RX_INFO_ENERGY_ANT_C_POS;
3115	energy_c = energy_c ? -energy_c : -256;
3116	max_energy = MAX(energy_a, energy_b);
3117	max_energy = MAX(max_energy, energy_c);
3118
3119	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3120	    "energy In A %d B %d C %d , and max %d\n",
3121	    energy_a, energy_b, energy_c, max_energy);
3122
3123	return max_energy;
3124}
3125
3126static void
3127iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3128{
3129	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3130
3131	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3132
3133	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3134}
3135
3136/*
3137 * Retrieve the average noise (in dBm) among receivers.
3138 */
3139static int
3140iwm_get_noise(struct iwm_softc *sc,
3141    const struct iwm_mvm_statistics_rx_non_phy *stats)
3142{
3143	int i, total, nbant, noise;
3144
3145	total = nbant = noise = 0;
3146	for (i = 0; i < 3; i++) {
3147		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3148		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3149		    __func__,
3150		    i,
3151		    noise);
3152
3153		if (noise) {
3154			total += noise;
3155			nbant++;
3156		}
3157	}
3158
3159	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3160	    __func__, nbant, total);
3161#if 0
3162	/* There should be at least one antenna but check anyway. */
3163	return (nbant == 0) ? -127 : (total / nbant) - 107;
3164#else
3165	/* For now, just hard-code it to -96 to be safe */
3166	return (-96);
3167#endif
3168}
3169
3170/*
3171 * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3172 *
3173 * Handles the actual data of the Rx packet from the fw
3174 */
3175static boolean_t
3176iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3177	boolean_t stolen)
3178{
3179	struct ieee80211com *ic = &sc->sc_ic;
3180	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3181	struct ieee80211_frame *wh;
3182	struct ieee80211_node *ni;
3183	struct ieee80211_rx_stats rxs;
3184	struct iwm_rx_phy_info *phy_info;
3185	struct iwm_rx_mpdu_res_start *rx_res;
3186	struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, offset);
3187	uint32_t len;
3188	uint32_t rx_pkt_status;
3189	int rssi;
3190
3191	phy_info = &sc->sc_last_phy_info;
3192	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3193	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3194	len = le16toh(rx_res->byte_count);
3195	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3196
3197	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3198		device_printf(sc->sc_dev,
3199		    "dsp size out of range [0,20]: %d\n",
3200		    phy_info->cfg_phy_cnt);
3201		goto fail;
3202	}
3203
3204	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3205	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3206		IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3207		    "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3208		goto fail;
3209	}
3210
3211	rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3212
3213	/* Map it to relative value */
3214	rssi = rssi - sc->sc_noise;
3215
3216	/* replenish ring for the buffer we're going to feed to the sharks */
3217	if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3218		device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3219		    __func__);
3220		goto fail;
3221	}
3222
3223	m->m_data = pkt->data + sizeof(*rx_res);
3224	m->m_pkthdr.len = m->m_len = len;
3225
3226	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3227	    "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3228
3229	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3230
3231	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3232	    "%s: phy_info: channel=%d, flags=0x%08x\n",
3233	    __func__,
3234	    le16toh(phy_info->channel),
3235	    le16toh(phy_info->phy_flags));
3236
3237	/*
3238	 * Populate an RX state struct with the provided information.
3239	 */
3240	bzero(&rxs, sizeof(rxs));
3241	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3242	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3243	rxs.c_ieee = le16toh(phy_info->channel);
3244	if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3245		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3246	} else {
3247		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3248	}
3249
3250	/* rssi is in 1/2db units */
3251	rxs.rssi = rssi * 2;
3252	rxs.nf = sc->sc_noise;
3253
3254	if (ieee80211_radiotap_active_vap(vap)) {
3255		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3256
3257		tap->wr_flags = 0;
3258		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3259			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3260		tap->wr_chan_freq = htole16(rxs.c_freq);
3261		/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3262		tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3263		tap->wr_dbm_antsignal = (int8_t)rssi;
3264		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3265		tap->wr_tsft = phy_info->system_timestamp;
3266		switch (phy_info->rate) {
3267		/* CCK rates. */
3268		case  10: tap->wr_rate =   2; break;
3269		case  20: tap->wr_rate =   4; break;
3270		case  55: tap->wr_rate =  11; break;
3271		case 110: tap->wr_rate =  22; break;
3272		/* OFDM rates. */
3273		case 0xd: tap->wr_rate =  12; break;
3274		case 0xf: tap->wr_rate =  18; break;
3275		case 0x5: tap->wr_rate =  24; break;
3276		case 0x7: tap->wr_rate =  36; break;
3277		case 0x9: tap->wr_rate =  48; break;
3278		case 0xb: tap->wr_rate =  72; break;
3279		case 0x1: tap->wr_rate =  96; break;
3280		case 0x3: tap->wr_rate = 108; break;
3281		/* Unknown rate: should not happen. */
3282		default:  tap->wr_rate =   0;
3283		}
3284	}
3285
3286	IWM_UNLOCK(sc);
3287	if (ni != NULL) {
3288		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3289		ieee80211_input_mimo(ni, m, &rxs);
3290		ieee80211_free_node(ni);
3291	} else {
3292		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3293		ieee80211_input_mimo_all(ic, m, &rxs);
3294	}
3295	IWM_LOCK(sc);
3296
3297	return TRUE;
3298
3299fail:	counter_u64_add(ic->ic_ierrors, 1);
3300	return FALSE;
3301}
3302
3303static int
3304iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3305	struct iwm_node *in)
3306{
3307	struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3308	struct ieee80211_node *ni = &in->in_ni;
3309	struct ieee80211vap *vap = ni->ni_vap;
3310	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3311	int failack = tx_resp->failure_frame;
3312
3313	KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3314
3315	/* Update rate control statistics. */
3316	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3317	    __func__,
3318	    (int) le16toh(tx_resp->status.status),
3319	    (int) le16toh(tx_resp->status.sequence),
3320	    tx_resp->frame_count,
3321	    tx_resp->bt_kill_count,
3322	    tx_resp->failure_rts,
3323	    tx_resp->failure_frame,
3324	    le32toh(tx_resp->initial_rate),
3325	    (int) le16toh(tx_resp->wireless_media_time));
3326
3327	if (status != IWM_TX_STATUS_SUCCESS &&
3328	    status != IWM_TX_STATUS_DIRECT_DONE) {
3329		ieee80211_ratectl_tx_complete(vap, ni,
3330		    IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
3331		return (1);
3332	} else {
3333		ieee80211_ratectl_tx_complete(vap, ni,
3334		    IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
3335		return (0);
3336	}
3337}
3338
3339static void
3340iwm_mvm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3341{
3342	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3343	int idx = cmd_hdr->idx;
3344	int qid = cmd_hdr->qid;
3345	struct iwm_tx_ring *ring = &sc->txq[qid];
3346	struct iwm_tx_data *txd = &ring->data[idx];
3347	struct iwm_node *in = txd->in;
3348	struct mbuf *m = txd->m;
3349	int status;
3350
3351	KASSERT(txd->done == 0, ("txd not done"));
3352	KASSERT(txd->in != NULL, ("txd without node"));
3353	KASSERT(txd->m != NULL, ("txd without mbuf"));
3354
3355	sc->sc_tx_timer = 0;
3356
3357	status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3358
3359	/* Unmap and free mbuf. */
3360	bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3361	bus_dmamap_unload(ring->data_dmat, txd->map);
3362
3363	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3364	    "free txd %p, in %p\n", txd, txd->in);
3365	txd->done = 1;
3366	txd->m = NULL;
3367	txd->in = NULL;
3368
3369	ieee80211_tx_complete(&in->in_ni, m, status);
3370
3371	if (--ring->queued < IWM_TX_RING_LOMARK) {
3372		sc->qfullmsk &= ~(1 << ring->qid);
3373		if (sc->qfullmsk == 0) {
3374			iwm_start(sc);
3375		}
3376	}
3377}
3378
3379/*
3380 * transmit side
3381 */
3382
3383/*
3384 * Process a "command done" firmware notification.  This is where we wakeup
3385 * processes waiting for a synchronous command completion.
3386 * from if_iwn
3387 */
3388static void
3389iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3390{
3391	struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3392	struct iwm_tx_data *data;
3393
3394	if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3395		return;	/* Not a command ack. */
3396	}
3397
3398	/* XXX wide commands? */
3399	IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3400	    "cmd notification type 0x%x qid %d idx %d\n",
3401	    pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3402
3403	data = &ring->data[pkt->hdr.idx];
3404
3405	/* If the command was mapped in an mbuf, free it. */
3406	if (data->m != NULL) {
3407		bus_dmamap_sync(ring->data_dmat, data->map,
3408		    BUS_DMASYNC_POSTWRITE);
3409		bus_dmamap_unload(ring->data_dmat, data->map);
3410		m_freem(data->m);
3411		data->m = NULL;
3412	}
3413	wakeup(&ring->desc[pkt->hdr.idx]);
3414
3415	if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3416		device_printf(sc->sc_dev,
3417		    "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3418		    __func__, pkt->hdr.idx, ring->queued, ring->cur);
3419		/* XXX call iwm_force_nmi() */
3420	}
3421
3422	KASSERT(ring->queued > 0, ("ring->queued is empty?"));
3423	ring->queued--;
3424	if (ring->queued == 0)
3425		iwm_pcie_clear_cmd_in_flight(sc);
3426}
3427
3428#if 0
3429/*
3430 * necessary only for block ack mode
3431 */
3432void
3433iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3434	uint16_t len)
3435{
3436	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3437	uint16_t w_val;
3438
3439	scd_bc_tbl = sc->sched_dma.vaddr;
3440
3441	len += 8; /* magic numbers came naturally from paris */
3442	len = roundup(len, 4) / 4;
3443
3444	w_val = htole16(sta_id << 12 | len);
3445
3446	/* Update TX scheduler. */
3447	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3448	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3449	    BUS_DMASYNC_PREWRITE);
3450
3451	/* I really wonder what this is ?!? */
3452	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3453		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3454		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3455		    BUS_DMASYNC_PREWRITE);
3456	}
3457}
3458#endif
3459
3460/*
3461 * Take an 802.11 (non-n) rate, find the relevant rate
3462 * table entry.  return the index into in_ridx[].
3463 *
3464 * The caller then uses that index back into in_ridx
3465 * to figure out the rate index programmed /into/
3466 * the firmware for this given node.
3467 */
3468static int
3469iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3470    uint8_t rate)
3471{
3472	int i;
3473	uint8_t r;
3474
3475	for (i = 0; i < nitems(in->in_ridx); i++) {
3476		r = iwm_rates[in->in_ridx[i]].rate;
3477		if (rate == r)
3478			return (i);
3479	}
3480
3481	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3482	    "%s: couldn't find an entry for rate=%d\n",
3483	    __func__,
3484	    rate);
3485
3486	/* XXX Return the first */
3487	/* XXX TODO: have it return the /lowest/ */
3488	return (0);
3489}
3490
3491static int
3492iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
3493{
3494	int i;
3495
3496	for (i = 0; i < nitems(iwm_rates); i++) {
3497		if (iwm_rates[i].rate == rate)
3498			return (i);
3499	}
3500	/* XXX error? */
3501	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3502	    "%s: couldn't find an entry for rate=%d\n",
3503	    __func__,
3504	    rate);
3505	return (0);
3506}
3507
3508/*
3509 * Fill in the rate related information for a transmit command.
3510 */
3511static const struct iwm_rate *
3512iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3513	struct mbuf *m, struct iwm_tx_cmd *tx)
3514{
3515	struct ieee80211_node *ni = &in->in_ni;
3516	struct ieee80211_frame *wh;
3517	const struct ieee80211_txparam *tp = ni->ni_txparms;
3518	const struct iwm_rate *rinfo;
3519	int type;
3520	int ridx, rate_flags;
3521
3522	wh = mtod(m, struct ieee80211_frame *);
3523	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3524
3525	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3526	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3527
3528	if (type == IEEE80211_FC0_TYPE_MGT) {
3529		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3530		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3531		    "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3532	} else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3533		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3534		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3535		    "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3536	} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3537		ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3538		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3539		    "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3540	} else if (m->m_flags & M_EAPOL) {
3541		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3542		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3543		    "%s: EAPOL\n", __func__);
3544	} else if (type == IEEE80211_FC0_TYPE_DATA) {
3545		int i;
3546
3547		/* for data frames, use RS table */
3548		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3549		/* XXX pass pktlen */
3550		(void) ieee80211_ratectl_rate(ni, NULL, 0);
3551		i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3552		ridx = in->in_ridx[i];
3553
3554		/* This is the index into the programmed table */
3555		tx->initial_rate_index = i;
3556		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3557
3558		IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3559		    "%s: start with i=%d, txrate %d\n",
3560		    __func__, i, iwm_rates[ridx].rate);
3561	} else {
3562		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3563		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DEFAULT (%d)\n",
3564		    __func__, tp->mgmtrate);
3565	}
3566
3567	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3568	    "%s: frame type=%d txrate %d\n",
3569	        __func__, type, iwm_rates[ridx].rate);
3570
3571	rinfo = &iwm_rates[ridx];
3572
3573	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3574	    __func__, ridx,
3575	    rinfo->rate,
3576	    !! (IWM_RIDX_IS_CCK(ridx))
3577	    );
3578
3579	/* XXX TODO: hard-coded TX antenna? */
3580	rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3581	if (IWM_RIDX_IS_CCK(ridx))
3582		rate_flags |= IWM_RATE_MCS_CCK_MSK;
3583	tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3584
3585	return rinfo;
3586}
3587
3588#define TB0_SIZE 16
3589static int
3590iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3591{
3592	struct ieee80211com *ic = &sc->sc_ic;
3593	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3594	struct iwm_node *in = IWM_NODE(ni);
3595	struct iwm_tx_ring *ring;
3596	struct iwm_tx_data *data;
3597	struct iwm_tfd *desc;
3598	struct iwm_device_cmd *cmd;
3599	struct iwm_tx_cmd *tx;
3600	struct ieee80211_frame *wh;
3601	struct ieee80211_key *k = NULL;
3602	struct mbuf *m1;
3603	const struct iwm_rate *rinfo;
3604	uint32_t flags;
3605	u_int hdrlen;
3606	bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3607	int nsegs;
3608	uint8_t tid, type;
3609	int i, totlen, error, pad;
3610
3611	wh = mtod(m, struct ieee80211_frame *);
3612	hdrlen = ieee80211_anyhdrsize(wh);
3613	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3614	tid = 0;
3615	ring = &sc->txq[ac];
3616	desc = &ring->desc[ring->cur];
3617	memset(desc, 0, sizeof(*desc));
3618	data = &ring->data[ring->cur];
3619
3620	/* Fill out iwm_tx_cmd to send to the firmware */
3621	cmd = &ring->cmd[ring->cur];
3622	cmd->hdr.code = IWM_TX_CMD;
3623	cmd->hdr.flags = 0;
3624	cmd->hdr.qid = ring->qid;
3625	cmd->hdr.idx = ring->cur;
3626
3627	tx = (void *)cmd->data;
3628	memset(tx, 0, sizeof(*tx));
3629
3630	rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3631
3632	/* Encrypt the frame if need be. */
3633	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3634		/* Retrieve key for TX && do software encryption. */
3635		k = ieee80211_crypto_encap(ni, m);
3636		if (k == NULL) {
3637			m_freem(m);
3638			return (ENOBUFS);
3639		}
3640		/* 802.11 header may have moved. */
3641		wh = mtod(m, struct ieee80211_frame *);
3642	}
3643
3644	if (ieee80211_radiotap_active_vap(vap)) {
3645		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3646
3647		tap->wt_flags = 0;
3648		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3649		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3650		tap->wt_rate = rinfo->rate;
3651		if (k != NULL)
3652			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3653		ieee80211_radiotap_tx(vap, m);
3654	}
3655
3656
3657	totlen = m->m_pkthdr.len;
3658
3659	flags = 0;
3660	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3661		flags |= IWM_TX_CMD_FLG_ACK;
3662	}
3663
3664	if (type == IEEE80211_FC0_TYPE_DATA
3665	    && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3666	    && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3667		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3668	}
3669
3670	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3671	    type != IEEE80211_FC0_TYPE_DATA)
3672		tx->sta_id = sc->sc_aux_sta.sta_id;
3673	else
3674		tx->sta_id = IWM_STATION_ID;
3675
3676	if (type == IEEE80211_FC0_TYPE_MGT) {
3677		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3678
3679		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3680		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3681			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3682		} else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3683			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3684		} else {
3685			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3686		}
3687	} else {
3688		tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3689	}
3690
3691	if (hdrlen & 3) {
3692		/* First segment length must be a multiple of 4. */
3693		flags |= IWM_TX_CMD_FLG_MH_PAD;
3694		pad = 4 - (hdrlen & 3);
3695	} else
3696		pad = 0;
3697
3698	tx->driver_txop = 0;
3699	tx->next_frame_len = 0;
3700
3701	tx->len = htole16(totlen);
3702	tx->tid_tspec = tid;
3703	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3704
3705	/* Set physical address of "scratch area". */
3706	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3707	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3708
3709	/* Copy 802.11 header in TX command. */
3710	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3711
3712	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3713
3714	tx->sec_ctl = 0;
3715	tx->tx_flags |= htole32(flags);
3716
3717	/* Trim 802.11 header. */
3718	m_adj(m, hdrlen);
3719	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3720	    segs, &nsegs, BUS_DMA_NOWAIT);
3721	if (error != 0) {
3722		if (error != EFBIG) {
3723			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3724			    error);
3725			m_freem(m);
3726			return error;
3727		}
3728		/* Too many DMA segments, linearize mbuf. */
3729		m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3730		if (m1 == NULL) {
3731			device_printf(sc->sc_dev,
3732			    "%s: could not defrag mbuf\n", __func__);
3733			m_freem(m);
3734			return (ENOBUFS);
3735		}
3736		m = m1;
3737
3738		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3739		    segs, &nsegs, BUS_DMA_NOWAIT);
3740		if (error != 0) {
3741			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3742			    error);
3743			m_freem(m);
3744			return error;
3745		}
3746	}
3747	data->m = m;
3748	data->in = in;
3749	data->done = 0;
3750
3751	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3752	    "sending txd %p, in %p\n", data, data->in);
3753	KASSERT(data->in != NULL, ("node is NULL"));
3754
3755	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3756	    "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3757	    ring->qid, ring->cur, totlen, nsegs,
3758	    le32toh(tx->tx_flags),
3759	    le32toh(tx->rate_n_flags),
3760	    tx->initial_rate_index
3761	    );
3762
3763	/* Fill TX descriptor. */
3764	desc->num_tbs = 2 + nsegs;
3765
3766	desc->tbs[0].lo = htole32(data->cmd_paddr);
3767	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3768	    (TB0_SIZE << 4);
3769	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3770	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3771	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3772	      + hdrlen + pad - TB0_SIZE) << 4);
3773
3774	/* Other DMA segments are for data payload. */
3775	for (i = 0; i < nsegs; i++) {
3776		seg = &segs[i];
3777		desc->tbs[i+2].lo = htole32(seg->ds_addr);
3778		desc->tbs[i+2].hi_n_len = \
3779		    htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3780		    | ((seg->ds_len) << 4);
3781	}
3782
3783	bus_dmamap_sync(ring->data_dmat, data->map,
3784	    BUS_DMASYNC_PREWRITE);
3785	bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3786	    BUS_DMASYNC_PREWRITE);
3787	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3788	    BUS_DMASYNC_PREWRITE);
3789
3790#if 0
3791	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3792#endif
3793
3794	/* Kick TX ring. */
3795	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3796	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3797
3798	/* Mark TX ring as full if we reach a certain threshold. */
3799	if (++ring->queued > IWM_TX_RING_HIMARK) {
3800		sc->qfullmsk |= 1 << ring->qid;
3801	}
3802
3803	return 0;
3804}
3805
3806static int
3807iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3808    const struct ieee80211_bpf_params *params)
3809{
3810	struct ieee80211com *ic = ni->ni_ic;
3811	struct iwm_softc *sc = ic->ic_softc;
3812	int error = 0;
3813
3814	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3815	    "->%s begin\n", __func__);
3816
3817	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3818		m_freem(m);
3819		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3820		    "<-%s not RUNNING\n", __func__);
3821		return (ENETDOWN);
3822        }
3823
3824	IWM_LOCK(sc);
3825	/* XXX fix this */
3826        if (params == NULL) {
3827		error = iwm_tx(sc, m, ni, 0);
3828	} else {
3829		error = iwm_tx(sc, m, ni, 0);
3830	}
3831	sc->sc_tx_timer = 5;
3832	IWM_UNLOCK(sc);
3833
3834        return (error);
3835}
3836
3837/*
3838 * mvm/tx.c
3839 */
3840
3841/*
3842 * Note that there are transports that buffer frames before they reach
3843 * the firmware. This means that after flush_tx_path is called, the
3844 * queue might not be empty. The race-free way to handle this is to:
3845 * 1) set the station as draining
3846 * 2) flush the Tx path
3847 * 3) wait for the transport queues to be empty
3848 */
3849int
3850iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3851{
3852	int ret;
3853	struct iwm_tx_path_flush_cmd flush_cmd = {
3854		.queues_ctl = htole32(tfd_msk),
3855		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3856	};
3857
3858	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3859	    sizeof(flush_cmd), &flush_cmd);
3860	if (ret)
3861                device_printf(sc->sc_dev,
3862		    "Flushing tx queue failed: %d\n", ret);
3863	return ret;
3864}
3865
3866/*
3867 * BEGIN mvm/quota.c
3868 */
3869
3870static int
3871iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp)
3872{
3873	struct iwm_time_quota_cmd cmd;
3874	int i, idx, ret, num_active_macs, quota, quota_rem;
3875	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3876	int n_ifs[IWM_MAX_BINDINGS] = {0, };
3877	uint16_t id;
3878
3879	memset(&cmd, 0, sizeof(cmd));
3880
3881	/* currently, PHY ID == binding ID */
3882	if (ivp) {
3883		id = ivp->phy_ctxt->id;
3884		KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3885		colors[id] = ivp->phy_ctxt->color;
3886
3887		if (1)
3888			n_ifs[id] = 1;
3889	}
3890
3891	/*
3892	 * The FW's scheduling session consists of
3893	 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3894	 * equally between all the bindings that require quota
3895	 */
3896	num_active_macs = 0;
3897	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3898		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3899		num_active_macs += n_ifs[i];
3900	}
3901
3902	quota = 0;
3903	quota_rem = 0;
3904	if (num_active_macs) {
3905		quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3906		quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3907	}
3908
3909	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3910		if (colors[i] < 0)
3911			continue;
3912
3913		cmd.quotas[idx].id_and_color =
3914			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3915
3916		if (n_ifs[i] <= 0) {
3917			cmd.quotas[idx].quota = htole32(0);
3918			cmd.quotas[idx].max_duration = htole32(0);
3919		} else {
3920			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3921			cmd.quotas[idx].max_duration = htole32(0);
3922		}
3923		idx++;
3924	}
3925
3926	/* Give the remainder of the session to the first binding */
3927	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3928
3929	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3930	    sizeof(cmd), &cmd);
3931	if (ret)
3932		device_printf(sc->sc_dev,
3933		    "%s: Failed to send quota: %d\n", __func__, ret);
3934	return ret;
3935}
3936
3937/*
3938 * END mvm/quota.c
3939 */
3940
3941/*
3942 * ieee80211 routines
3943 */
3944
3945/*
3946 * Change to AUTH state in 80211 state machine.  Roughly matches what
3947 * Linux does in bss_info_changed().
3948 */
3949static int
3950iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3951{
3952	struct ieee80211_node *ni;
3953	struct iwm_node *in;
3954	struct iwm_vap *iv = IWM_VAP(vap);
3955	uint32_t duration;
3956	int error;
3957
3958	/*
3959	 * XXX i have a feeling that the vap node is being
3960	 * freed from underneath us. Grr.
3961	 */
3962	ni = ieee80211_ref_node(vap->iv_bss);
3963	in = IWM_NODE(ni);
3964	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3965	    "%s: called; vap=%p, bss ni=%p\n",
3966	    __func__,
3967	    vap,
3968	    ni);
3969
3970	in->in_assoc = 0;
3971
3972	/*
3973	 * Firmware bug - it'll crash if the beacon interval is less
3974	 * than 16. We can't avoid connecting at all, so refuse the
3975	 * station state change, this will cause net80211 to abandon
3976	 * attempts to connect to this AP, and eventually wpa_s will
3977	 * blacklist the AP...
3978	 */
3979	if (ni->ni_intval < 16) {
3980		device_printf(sc->sc_dev,
3981		    "AP %s beacon interval is %d, refusing due to firmware bug!\n",
3982		    ether_sprintf(ni->ni_bssid), ni->ni_intval);
3983		error = EINVAL;
3984		goto out;
3985	}
3986
3987	error = iwm_mvm_sf_config(sc, IWM_SF_FULL_ON);
3988	if (error != 0)
3989		return error;
3990
3991	error = iwm_allow_mcast(vap, sc);
3992	if (error) {
3993		device_printf(sc->sc_dev,
3994		    "%s: failed to set multicast\n", __func__);
3995		goto out;
3996	}
3997
3998	/*
3999	 * This is where it deviates from what Linux does.
4000	 *
4001	 * Linux iwlwifi doesn't reset the nic each time, nor does it
4002	 * call ctxt_add() here.  Instead, it adds it during vap creation,
4003	 * and always does a mac_ctx_changed().
4004	 *
4005	 * The openbsd port doesn't attempt to do that - it reset things
4006	 * at odd states and does the add here.
4007	 *
4008	 * So, until the state handling is fixed (ie, we never reset
4009	 * the NIC except for a firmware failure, which should drag
4010	 * the NIC back to IDLE, re-setup and re-add all the mac/phy
4011	 * contexts that are required), let's do a dirty hack here.
4012	 */
4013	if (iv->is_uploaded) {
4014		if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4015			device_printf(sc->sc_dev,
4016			    "%s: failed to update MAC\n", __func__);
4017			goto out;
4018		}
4019		if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4020		    in->in_ni.ni_chan, 1, 1)) != 0) {
4021			device_printf(sc->sc_dev,
4022			    "%s: failed update phy ctxt\n", __func__);
4023			goto out;
4024		}
4025		iv->phy_ctxt = &sc->sc_phyctxt[0];
4026
4027		if ((error = iwm_mvm_binding_add_vif(sc, iv)) != 0) {
4028			device_printf(sc->sc_dev,
4029			    "%s: binding update cmd\n", __func__);
4030			goto out;
4031		}
4032		if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4033			device_printf(sc->sc_dev,
4034			    "%s: failed to update sta\n", __func__);
4035			goto out;
4036		}
4037	} else {
4038		if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
4039			device_printf(sc->sc_dev,
4040			    "%s: failed to add MAC\n", __func__);
4041			goto out;
4042		}
4043		if ((error = iwm_mvm_power_update_mac(sc)) != 0) {
4044			device_printf(sc->sc_dev,
4045			    "%s: failed to update power management\n",
4046			    __func__);
4047			goto out;
4048		}
4049		if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4050		    in->in_ni.ni_chan, 1, 1)) != 0) {
4051			device_printf(sc->sc_dev,
4052			    "%s: failed add phy ctxt!\n", __func__);
4053			error = ETIMEDOUT;
4054			goto out;
4055		}
4056		iv->phy_ctxt = &sc->sc_phyctxt[0];
4057
4058		if ((error = iwm_mvm_binding_add_vif(sc, iv)) != 0) {
4059			device_printf(sc->sc_dev,
4060			    "%s: binding add cmd\n", __func__);
4061			goto out;
4062		}
4063		if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
4064			device_printf(sc->sc_dev,
4065			    "%s: failed to add sta\n", __func__);
4066			goto out;
4067		}
4068	}
4069
4070	/*
4071	 * Prevent the FW from wandering off channel during association
4072	 * by "protecting" the session with a time event.
4073	 */
4074	/* XXX duration is in units of TU, not MS */
4075	duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4076	iwm_mvm_protect_session(sc, iv, duration, 500 /* XXX magic number */);
4077	DELAY(100);
4078
4079	error = 0;
4080out:
4081	ieee80211_free_node(ni);
4082	return (error);
4083}
4084
4085static int
4086iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
4087{
4088	struct iwm_node *in = IWM_NODE(vap->iv_bss);
4089	int error;
4090
4091	if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4092		device_printf(sc->sc_dev,
4093		    "%s: failed to update STA\n", __func__);
4094		return error;
4095	}
4096
4097	in->in_assoc = 1;
4098	if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4099		device_printf(sc->sc_dev,
4100		    "%s: failed to update MAC\n", __func__);
4101		return error;
4102	}
4103
4104	return 0;
4105}
4106
4107static int
4108iwm_release(struct iwm_softc *sc, struct iwm_node *in)
4109{
4110	uint32_t tfd_msk;
4111
4112	/*
4113	 * Ok, so *technically* the proper set of calls for going
4114	 * from RUN back to SCAN is:
4115	 *
4116	 * iwm_mvm_power_mac_disable(sc, in);
4117	 * iwm_mvm_mac_ctxt_changed(sc, vap);
4118	 * iwm_mvm_rm_sta(sc, in);
4119	 * iwm_mvm_update_quotas(sc, NULL);
4120	 * iwm_mvm_mac_ctxt_changed(sc, in);
4121	 * iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap));
4122	 * iwm_mvm_mac_ctxt_remove(sc, in);
4123	 *
4124	 * However, that freezes the device not matter which permutations
4125	 * and modifications are attempted.  Obviously, this driver is missing
4126	 * something since it works in the Linux driver, but figuring out what
4127	 * is missing is a little more complicated.  Now, since we're going
4128	 * back to nothing anyway, we'll just do a complete device reset.
4129	 * Up your's, device!
4130	 */
4131	/*
4132	 * Just using 0xf for the queues mask is fine as long as we only
4133	 * get here from RUN state.
4134	 */
4135	tfd_msk = 0xf;
4136	iwm_xmit_queue_drain(sc);
4137	iwm_mvm_flush_tx_path(sc, tfd_msk, IWM_CMD_SYNC);
4138	/*
4139	 * We seem to get away with just synchronously sending the
4140	 * IWM_TXPATH_FLUSH command.
4141	 */
4142//	iwm_trans_wait_tx_queue_empty(sc, tfd_msk);
4143	iwm_stop_device(sc);
4144	iwm_init_hw(sc);
4145	if (in)
4146		in->in_assoc = 0;
4147	return 0;
4148
4149#if 0
4150	int error;
4151
4152	iwm_mvm_power_mac_disable(sc, in);
4153
4154	if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4155		device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
4156		return error;
4157	}
4158
4159	if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
4160		device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4161		return error;
4162	}
4163	error = iwm_mvm_rm_sta(sc, in);
4164	in->in_assoc = 0;
4165	iwm_mvm_update_quotas(sc, NULL);
4166	if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4167		device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4168		return error;
4169	}
4170	iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap));
4171
4172	iwm_mvm_mac_ctxt_remove(sc, in);
4173
4174	return error;
4175#endif
4176}
4177
4178static struct ieee80211_node *
4179iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4180{
4181	return malloc(sizeof (struct iwm_node), M_80211_NODE,
4182	    M_NOWAIT | M_ZERO);
4183}
4184
4185uint8_t
4186iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
4187{
4188	int i;
4189	uint8_t rval;
4190
4191	for (i = 0; i < rs->rs_nrates; i++) {
4192		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
4193		if (rval == iwm_rates[ridx].rate)
4194			return rs->rs_rates[i];
4195	}
4196
4197	return 0;
4198}
4199
4200static void
4201iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
4202{
4203	struct ieee80211_node *ni = &in->in_ni;
4204	struct iwm_lq_cmd *lq = &in->in_lq;
4205	int nrates = ni->ni_rates.rs_nrates;
4206	int i, ridx, tab = 0;
4207//	int txant = 0;
4208
4209	if (nrates > nitems(lq->rs_table)) {
4210		device_printf(sc->sc_dev,
4211		    "%s: node supports %d rates, driver handles "
4212		    "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4213		return;
4214	}
4215	if (nrates == 0) {
4216		device_printf(sc->sc_dev,
4217		    "%s: node supports 0 rates, odd!\n", __func__);
4218		return;
4219	}
4220
4221	/*
4222	 * XXX .. and most of iwm_node is not initialised explicitly;
4223	 * it's all just 0x0 passed to the firmware.
4224	 */
4225
4226	/* first figure out which rates we should support */
4227	/* XXX TODO: this isn't 11n aware /at all/ */
4228	memset(&in->in_ridx, -1, sizeof(in->in_ridx));
4229	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4230	    "%s: nrates=%d\n", __func__, nrates);
4231
4232	/*
4233	 * Loop over nrates and populate in_ridx from the highest
4234	 * rate to the lowest rate.  Remember, in_ridx[] has
4235	 * IEEE80211_RATE_MAXSIZE entries!
4236	 */
4237	for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4238		int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4239
4240		/* Map 802.11 rate to HW rate index. */
4241		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4242			if (iwm_rates[ridx].rate == rate)
4243				break;
4244		if (ridx > IWM_RIDX_MAX) {
4245			device_printf(sc->sc_dev,
4246			    "%s: WARNING: device rate for %d not found!\n",
4247			    __func__, rate);
4248		} else {
4249			IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4250			    "%s: rate: i: %d, rate=%d, ridx=%d\n",
4251			    __func__,
4252			    i,
4253			    rate,
4254			    ridx);
4255			in->in_ridx[i] = ridx;
4256		}
4257	}
4258
4259	/* then construct a lq_cmd based on those */
4260	memset(lq, 0, sizeof(*lq));
4261	lq->sta_id = IWM_STATION_ID;
4262
4263	/* For HT, always enable RTS/CTS to avoid excessive retries. */
4264	if (ni->ni_flags & IEEE80211_NODE_HT)
4265		lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4266
4267	/*
4268	 * are these used? (we don't do SISO or MIMO)
4269	 * need to set them to non-zero, though, or we get an error.
4270	 */
4271	lq->single_stream_ant_msk = 1;
4272	lq->dual_stream_ant_msk = 1;
4273
4274	/*
4275	 * Build the actual rate selection table.
4276	 * The lowest bits are the rates.  Additionally,
4277	 * CCK needs bit 9 to be set.  The rest of the bits
4278	 * we add to the table select the tx antenna
4279	 * Note that we add the rates in the highest rate first
4280	 * (opposite of ni_rates).
4281	 */
4282	/*
4283	 * XXX TODO: this should be looping over the min of nrates
4284	 * and LQ_MAX_RETRY_NUM.  Sigh.
4285	 */
4286	for (i = 0; i < nrates; i++) {
4287		int nextant;
4288
4289#if 0
4290		if (txant == 0)
4291			txant = iwm_mvm_get_valid_tx_ant(sc);
4292		nextant = 1<<(ffs(txant)-1);
4293		txant &= ~nextant;
4294#else
4295		nextant = iwm_mvm_get_valid_tx_ant(sc);
4296#endif
4297		/*
4298		 * Map the rate id into a rate index into
4299		 * our hardware table containing the
4300		 * configuration to use for this rate.
4301		 */
4302		ridx = in->in_ridx[i];
4303		tab = iwm_rates[ridx].plcp;
4304		tab |= nextant << IWM_RATE_MCS_ANT_POS;
4305		if (IWM_RIDX_IS_CCK(ridx))
4306			tab |= IWM_RATE_MCS_CCK_MSK;
4307		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4308		    "station rate i=%d, rate=%d, hw=%x\n",
4309		    i, iwm_rates[ridx].rate, tab);
4310		lq->rs_table[i] = htole32(tab);
4311	}
4312	/* then fill the rest with the lowest possible rate */
4313	for (i = nrates; i < nitems(lq->rs_table); i++) {
4314		KASSERT(tab != 0, ("invalid tab"));
4315		lq->rs_table[i] = htole32(tab);
4316	}
4317}
4318
4319static int
4320iwm_media_change(struct ifnet *ifp)
4321{
4322	struct ieee80211vap *vap = ifp->if_softc;
4323	struct ieee80211com *ic = vap->iv_ic;
4324	struct iwm_softc *sc = ic->ic_softc;
4325	int error;
4326
4327	error = ieee80211_media_change(ifp);
4328	if (error != ENETRESET)
4329		return error;
4330
4331	IWM_LOCK(sc);
4332	if (ic->ic_nrunning > 0) {
4333		iwm_stop(sc);
4334		iwm_init(sc);
4335	}
4336	IWM_UNLOCK(sc);
4337	return error;
4338}
4339
4340
4341static int
4342iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4343{
4344	struct iwm_vap *ivp = IWM_VAP(vap);
4345	struct ieee80211com *ic = vap->iv_ic;
4346	struct iwm_softc *sc = ic->ic_softc;
4347	struct iwm_node *in;
4348	int error;
4349
4350	IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4351	    "switching state %s -> %s\n",
4352	    ieee80211_state_name[vap->iv_state],
4353	    ieee80211_state_name[nstate]);
4354	IEEE80211_UNLOCK(ic);
4355	IWM_LOCK(sc);
4356
4357	if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4358		iwm_led_blink_stop(sc);
4359
4360	/* disable beacon filtering if we're hopping out of RUN */
4361	if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4362		iwm_mvm_disable_beacon_filter(sc);
4363
4364		if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4365			in->in_assoc = 0;
4366
4367		if (nstate == IEEE80211_S_INIT) {
4368			IWM_UNLOCK(sc);
4369			IEEE80211_LOCK(ic);
4370			error = ivp->iv_newstate(vap, nstate, arg);
4371			IEEE80211_UNLOCK(ic);
4372			IWM_LOCK(sc);
4373			iwm_release(sc, NULL);
4374			IWM_UNLOCK(sc);
4375			IEEE80211_LOCK(ic);
4376			return error;
4377		}
4378
4379		/*
4380		 * It's impossible to directly go RUN->SCAN. If we iwm_release()
4381		 * above then the card will be completely reinitialized,
4382		 * so the driver must do everything necessary to bring the card
4383		 * from INIT to SCAN.
4384		 *
4385		 * Additionally, upon receiving deauth frame from AP,
4386		 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4387		 * state. This will also fail with this driver, so bring the FSM
4388		 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4389		 *
4390		 * XXX TODO: fix this for FreeBSD!
4391		 */
4392		if (nstate == IEEE80211_S_SCAN ||
4393		    nstate == IEEE80211_S_AUTH ||
4394		    nstate == IEEE80211_S_ASSOC) {
4395			IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4396			    "Force transition to INIT; MGT=%d\n", arg);
4397			IWM_UNLOCK(sc);
4398			IEEE80211_LOCK(ic);
4399			/* Always pass arg as -1 since we can't Tx right now. */
4400			/*
4401			 * XXX arg is just ignored anyway when transitioning
4402			 *     to IEEE80211_S_INIT.
4403			 */
4404			vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4405			IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4406			    "Going INIT->SCAN\n");
4407			nstate = IEEE80211_S_SCAN;
4408			IEEE80211_UNLOCK(ic);
4409			IWM_LOCK(sc);
4410		}
4411	}
4412
4413	switch (nstate) {
4414	case IEEE80211_S_INIT:
4415	case IEEE80211_S_SCAN:
4416		if (vap->iv_state == IEEE80211_S_AUTH ||
4417		    vap->iv_state == IEEE80211_S_ASSOC) {
4418			int myerr;
4419			IWM_UNLOCK(sc);
4420			IEEE80211_LOCK(ic);
4421			myerr = ivp->iv_newstate(vap, nstate, arg);
4422			IEEE80211_UNLOCK(ic);
4423			IWM_LOCK(sc);
4424			error = iwm_mvm_rm_sta(sc, vap, FALSE);
4425                        if (error) {
4426                                device_printf(sc->sc_dev,
4427				    "%s: Failed to remove station: %d\n",
4428				    __func__, error);
4429			}
4430			error = iwm_mvm_mac_ctxt_changed(sc, vap);
4431                        if (error) {
4432                                device_printf(sc->sc_dev,
4433                                    "%s: Failed to change mac context: %d\n",
4434                                    __func__, error);
4435                        }
4436                        error = iwm_mvm_binding_remove_vif(sc, ivp);
4437                        if (error) {
4438                                device_printf(sc->sc_dev,
4439                                    "%s: Failed to remove channel ctx: %d\n",
4440                                    __func__, error);
4441                        }
4442			ivp->phy_ctxt = NULL;
4443			IWM_UNLOCK(sc);
4444			IEEE80211_LOCK(ic);
4445			return myerr;
4446		}
4447		break;
4448
4449	case IEEE80211_S_AUTH:
4450		if ((error = iwm_auth(vap, sc)) != 0) {
4451			device_printf(sc->sc_dev,
4452			    "%s: could not move to auth state: %d\n",
4453			    __func__, error);
4454			break;
4455		}
4456		break;
4457
4458	case IEEE80211_S_ASSOC:
4459		if ((error = iwm_assoc(vap, sc)) != 0) {
4460			device_printf(sc->sc_dev,
4461			    "%s: failed to associate: %d\n", __func__,
4462			    error);
4463			break;
4464		}
4465		break;
4466
4467	case IEEE80211_S_RUN:
4468	{
4469		struct iwm_host_cmd cmd = {
4470			.id = IWM_LQ_CMD,
4471			.len = { sizeof(in->in_lq), },
4472			.flags = IWM_CMD_SYNC,
4473		};
4474
4475		/* Update the association state, now we have it all */
4476		/* (eg associd comes in at this point */
4477		error = iwm_assoc(vap, sc);
4478		if (error != 0) {
4479			device_printf(sc->sc_dev,
4480			    "%s: failed to update association state: %d\n",
4481			    __func__,
4482			    error);
4483			break;
4484		}
4485
4486		in = IWM_NODE(vap->iv_bss);
4487		iwm_mvm_enable_beacon_filter(sc, in);
4488		iwm_mvm_power_update_mac(sc);
4489		iwm_mvm_update_quotas(sc, ivp);
4490		iwm_setrates(sc, in);
4491
4492		cmd.data[0] = &in->in_lq;
4493		if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
4494			device_printf(sc->sc_dev,
4495			    "%s: IWM_LQ_CMD failed\n", __func__);
4496		}
4497
4498		iwm_mvm_led_enable(sc);
4499		break;
4500	}
4501
4502	default:
4503		break;
4504	}
4505	IWM_UNLOCK(sc);
4506	IEEE80211_LOCK(ic);
4507
4508	return (ivp->iv_newstate(vap, nstate, arg));
4509}
4510
4511void
4512iwm_endscan_cb(void *arg, int pending)
4513{
4514	struct iwm_softc *sc = arg;
4515	struct ieee80211com *ic = &sc->sc_ic;
4516
4517	IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4518	    "%s: scan ended\n",
4519	    __func__);
4520
4521	ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4522}
4523
4524/*
4525 * Aging and idle timeouts for the different possible scenarios
4526 * in default configuration
4527 */
4528static const uint32_t
4529iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4530	{
4531		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
4532		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
4533	},
4534	{
4535		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
4536		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
4537	},
4538	{
4539		htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
4540		htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
4541	},
4542	{
4543		htole32(IWM_SF_BA_AGING_TIMER_DEF),
4544		htole32(IWM_SF_BA_IDLE_TIMER_DEF)
4545	},
4546	{
4547		htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
4548		htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
4549	},
4550};
4551
4552/*
4553 * Aging and idle timeouts for the different possible scenarios
4554 * in single BSS MAC configuration.
4555 */
4556static const uint32_t
4557iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4558	{
4559		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
4560		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
4561	},
4562	{
4563		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
4564		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
4565	},
4566	{
4567		htole32(IWM_SF_MCAST_AGING_TIMER),
4568		htole32(IWM_SF_MCAST_IDLE_TIMER)
4569	},
4570	{
4571		htole32(IWM_SF_BA_AGING_TIMER),
4572		htole32(IWM_SF_BA_IDLE_TIMER)
4573	},
4574	{
4575		htole32(IWM_SF_TX_RE_AGING_TIMER),
4576		htole32(IWM_SF_TX_RE_IDLE_TIMER)
4577	},
4578};
4579
4580static void
4581iwm_mvm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
4582    struct ieee80211_node *ni)
4583{
4584	int i, j, watermark;
4585
4586	sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
4587
4588	/*
4589	 * If we are in association flow - check antenna configuration
4590	 * capabilities of the AP station, and choose the watermark accordingly.
4591	 */
4592	if (ni) {
4593		if (ni->ni_flags & IEEE80211_NODE_HT) {
4594#ifdef notyet
4595			if (ni->ni_rxmcs[2] != 0)
4596				watermark = IWM_SF_W_MARK_MIMO3;
4597			else if (ni->ni_rxmcs[1] != 0)
4598				watermark = IWM_SF_W_MARK_MIMO2;
4599			else
4600#endif
4601				watermark = IWM_SF_W_MARK_SISO;
4602		} else {
4603			watermark = IWM_SF_W_MARK_LEGACY;
4604		}
4605	/* default watermark value for unassociated mode. */
4606	} else {
4607		watermark = IWM_SF_W_MARK_MIMO2;
4608	}
4609	sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
4610
4611	for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
4612		for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
4613			sf_cmd->long_delay_timeouts[i][j] =
4614					htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
4615		}
4616	}
4617
4618	if (ni) {
4619		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
4620		       sizeof(iwm_sf_full_timeout));
4621	} else {
4622		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
4623		       sizeof(iwm_sf_full_timeout_def));
4624	}
4625}
4626
4627static int
4628iwm_mvm_sf_config(struct iwm_softc *sc, enum iwm_sf_state new_state)
4629{
4630	struct ieee80211com *ic = &sc->sc_ic;
4631	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4632	struct iwm_sf_cfg_cmd sf_cmd = {
4633		.state = htole32(IWM_SF_FULL_ON),
4634	};
4635	int ret = 0;
4636
4637	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4638		sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
4639
4640	switch (new_state) {
4641	case IWM_SF_UNINIT:
4642	case IWM_SF_INIT_OFF:
4643		iwm_mvm_fill_sf_command(sc, &sf_cmd, NULL);
4644		break;
4645	case IWM_SF_FULL_ON:
4646		iwm_mvm_fill_sf_command(sc, &sf_cmd, vap->iv_bss);
4647		break;
4648	default:
4649		IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE,
4650		    "Invalid state: %d. not sending Smart Fifo cmd\n",
4651			  new_state);
4652		return EINVAL;
4653	}
4654
4655	ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
4656				   sizeof(sf_cmd), &sf_cmd);
4657	return ret;
4658}
4659
4660static int
4661iwm_send_bt_init_conf(struct iwm_softc *sc)
4662{
4663	struct iwm_bt_coex_cmd bt_cmd;
4664
4665	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4666	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4667
4668	return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4669	    &bt_cmd);
4670}
4671
4672static boolean_t
4673iwm_mvm_is_lar_supported(struct iwm_softc *sc)
4674{
4675	boolean_t nvm_lar = sc->nvm_data->lar_enabled;
4676	boolean_t tlv_lar = fw_has_capa(&sc->ucode_capa,
4677					IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
4678
4679	if (iwm_lar_disable)
4680		return FALSE;
4681
4682	/*
4683	 * Enable LAR only if it is supported by the FW (TLV) &&
4684	 * enabled in the NVM
4685	 */
4686	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4687		return nvm_lar && tlv_lar;
4688	else
4689		return tlv_lar;
4690}
4691
4692static boolean_t
4693iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *sc)
4694{
4695	return fw_has_api(&sc->ucode_capa,
4696			  IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4697	       fw_has_capa(&sc->ucode_capa,
4698			   IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC);
4699}
4700
4701static int
4702iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4703{
4704	struct iwm_mcc_update_cmd mcc_cmd;
4705	struct iwm_host_cmd hcmd = {
4706		.id = IWM_MCC_UPDATE_CMD,
4707		.flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4708		.data = { &mcc_cmd },
4709	};
4710	int ret;
4711#ifdef IWM_DEBUG
4712	struct iwm_rx_packet *pkt;
4713	struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4714	struct iwm_mcc_update_resp *mcc_resp;
4715	int n_channels;
4716	uint16_t mcc;
4717#endif
4718	int resp_v2 = fw_has_capa(&sc->ucode_capa,
4719	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4720
4721	if (!iwm_mvm_is_lar_supported(sc)) {
4722		IWM_DPRINTF(sc, IWM_DEBUG_LAR, "%s: no LAR support\n",
4723		    __func__);
4724		return 0;
4725	}
4726
4727	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4728	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4729	if (iwm_mvm_is_wifi_mcc_supported(sc))
4730		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4731	else
4732		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4733
4734	if (resp_v2)
4735		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4736	else
4737		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4738
4739	IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4740	    "send MCC update to FW with '%c%c' src = %d\n",
4741	    alpha2[0], alpha2[1], mcc_cmd.source_id);
4742
4743	ret = iwm_send_cmd(sc, &hcmd);
4744	if (ret)
4745		return ret;
4746
4747#ifdef IWM_DEBUG
4748	pkt = hcmd.resp_pkt;
4749
4750	/* Extract MCC response */
4751	if (resp_v2) {
4752		mcc_resp = (void *)pkt->data;
4753		mcc = mcc_resp->mcc;
4754		n_channels =  le32toh(mcc_resp->n_channels);
4755	} else {
4756		mcc_resp_v1 = (void *)pkt->data;
4757		mcc = mcc_resp_v1->mcc;
4758		n_channels =  le32toh(mcc_resp_v1->n_channels);
4759	}
4760
4761	/* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4762	if (mcc == 0)
4763		mcc = 0x3030;  /* "00" - world */
4764
4765	IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4766	    "regulatory domain '%c%c' (%d channels available)\n",
4767	    mcc >> 8, mcc & 0xff, n_channels);
4768#endif
4769	iwm_free_resp(sc, &hcmd);
4770
4771	return 0;
4772}
4773
4774static void
4775iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4776{
4777	struct iwm_host_cmd cmd = {
4778		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4779		.len = { sizeof(uint32_t), },
4780		.data = { &backoff, },
4781	};
4782
4783	if (iwm_send_cmd(sc, &cmd) != 0) {
4784		device_printf(sc->sc_dev,
4785		    "failed to change thermal tx backoff\n");
4786	}
4787}
4788
4789static int
4790iwm_init_hw(struct iwm_softc *sc)
4791{
4792	struct ieee80211com *ic = &sc->sc_ic;
4793	int error, i, ac;
4794
4795	if ((error = iwm_start_hw(sc)) != 0) {
4796		printf("iwm_start_hw: failed %d\n", error);
4797		return error;
4798	}
4799
4800	if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4801		printf("iwm_run_init_mvm_ucode: failed %d\n", error);
4802		return error;
4803	}
4804
4805	/*
4806	 * should stop and start HW since that INIT
4807	 * image just loaded
4808	 */
4809	iwm_stop_device(sc);
4810	sc->sc_ps_disabled = FALSE;
4811	if ((error = iwm_start_hw(sc)) != 0) {
4812		device_printf(sc->sc_dev, "could not initialize hardware\n");
4813		return error;
4814	}
4815
4816	/* omstart, this time with the regular firmware */
4817	error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4818	if (error) {
4819		device_printf(sc->sc_dev, "could not load firmware\n");
4820		goto error;
4821	}
4822
4823	if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4824		device_printf(sc->sc_dev, "bt init conf failed\n");
4825		goto error;
4826	}
4827
4828	error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
4829	if (error != 0) {
4830		device_printf(sc->sc_dev, "antenna config failed\n");
4831		goto error;
4832	}
4833
4834	/* Send phy db control command and then phy db calibration */
4835	if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4836		goto error;
4837
4838	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4839		device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4840		goto error;
4841	}
4842
4843	/* Add auxiliary station for scanning */
4844	if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4845		device_printf(sc->sc_dev, "add_aux_sta failed\n");
4846		goto error;
4847	}
4848
4849	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4850		/*
4851		 * The channel used here isn't relevant as it's
4852		 * going to be overwritten in the other flows.
4853		 * For now use the first channel we have.
4854		 */
4855		if ((error = iwm_mvm_phy_ctxt_add(sc,
4856		    &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4857			goto error;
4858	}
4859
4860	/* Initialize tx backoffs to the minimum. */
4861	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4862		iwm_mvm_tt_tx_backoff(sc, 0);
4863
4864	error = iwm_mvm_power_update_device(sc);
4865	if (error)
4866		goto error;
4867
4868	if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4869		goto error;
4870
4871	if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4872		if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4873			goto error;
4874	}
4875
4876	/* Enable Tx queues. */
4877	for (ac = 0; ac < WME_NUM_AC; ac++) {
4878		error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4879		    iwm_mvm_ac_to_tx_fifo[ac]);
4880		if (error)
4881			goto error;
4882	}
4883
4884	if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4885		device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4886		goto error;
4887	}
4888
4889	return 0;
4890
4891 error:
4892	iwm_stop_device(sc);
4893	return error;
4894}
4895
4896/* Allow multicast from our BSSID. */
4897static int
4898iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4899{
4900	struct ieee80211_node *ni = vap->iv_bss;
4901	struct iwm_mcast_filter_cmd *cmd;
4902	size_t size;
4903	int error;
4904
4905	size = roundup(sizeof(*cmd), 4);
4906	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4907	if (cmd == NULL)
4908		return ENOMEM;
4909	cmd->filter_own = 1;
4910	cmd->port_id = 0;
4911	cmd->count = 0;
4912	cmd->pass_all = 1;
4913	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4914
4915	error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4916	    IWM_CMD_SYNC, size, cmd);
4917	free(cmd, M_DEVBUF);
4918
4919	return (error);
4920}
4921
4922/*
4923 * ifnet interfaces
4924 */
4925
4926static void
4927iwm_init(struct iwm_softc *sc)
4928{
4929	int error;
4930
4931	if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4932		return;
4933	}
4934	sc->sc_generation++;
4935	sc->sc_flags &= ~IWM_FLAG_STOPPED;
4936
4937	if ((error = iwm_init_hw(sc)) != 0) {
4938		printf("iwm_init_hw failed %d\n", error);
4939		iwm_stop(sc);
4940		return;
4941	}
4942
4943	/*
4944	 * Ok, firmware loaded and we are jogging
4945	 */
4946	sc->sc_flags |= IWM_FLAG_HW_INITED;
4947	callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4948}
4949
4950static int
4951iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4952{
4953	struct iwm_softc *sc;
4954	int error;
4955
4956	sc = ic->ic_softc;
4957
4958	IWM_LOCK(sc);
4959	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4960		IWM_UNLOCK(sc);
4961		return (ENXIO);
4962	}
4963	error = mbufq_enqueue(&sc->sc_snd, m);
4964	if (error) {
4965		IWM_UNLOCK(sc);
4966		return (error);
4967	}
4968	iwm_start(sc);
4969	IWM_UNLOCK(sc);
4970	return (0);
4971}
4972
4973/*
4974 * Dequeue packets from sendq and call send.
4975 */
4976static void
4977iwm_start(struct iwm_softc *sc)
4978{
4979	struct ieee80211_node *ni;
4980	struct mbuf *m;
4981	int ac = 0;
4982
4983	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4984	while (sc->qfullmsk == 0 &&
4985		(m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4986		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4987		if (iwm_tx(sc, m, ni, ac) != 0) {
4988			if_inc_counter(ni->ni_vap->iv_ifp,
4989			    IFCOUNTER_OERRORS, 1);
4990			ieee80211_free_node(ni);
4991			continue;
4992		}
4993		sc->sc_tx_timer = 15;
4994	}
4995	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4996}
4997
4998static void
4999iwm_stop(struct iwm_softc *sc)
5000{
5001
5002	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
5003	sc->sc_flags |= IWM_FLAG_STOPPED;
5004	sc->sc_generation++;
5005	iwm_led_blink_stop(sc);
5006	sc->sc_tx_timer = 0;
5007	iwm_stop_device(sc);
5008	sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5009}
5010
5011static void
5012iwm_watchdog(void *arg)
5013{
5014	struct iwm_softc *sc = arg;
5015	struct ieee80211com *ic = &sc->sc_ic;
5016
5017	if (sc->sc_tx_timer > 0) {
5018		if (--sc->sc_tx_timer == 0) {
5019			device_printf(sc->sc_dev, "device timeout\n");
5020#ifdef IWM_DEBUG
5021			iwm_nic_error(sc);
5022#endif
5023			ieee80211_restart_all(ic);
5024			counter_u64_add(sc->sc_ic.ic_oerrors, 1);
5025			return;
5026		}
5027	}
5028	callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
5029}
5030
5031static void
5032iwm_parent(struct ieee80211com *ic)
5033{
5034	struct iwm_softc *sc = ic->ic_softc;
5035	int startall = 0;
5036
5037	IWM_LOCK(sc);
5038	if (ic->ic_nrunning > 0) {
5039		if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
5040			iwm_init(sc);
5041			startall = 1;
5042		}
5043	} else if (sc->sc_flags & IWM_FLAG_HW_INITED)
5044		iwm_stop(sc);
5045	IWM_UNLOCK(sc);
5046	if (startall)
5047		ieee80211_start_all(ic);
5048}
5049
5050/*
5051 * The interrupt side of things
5052 */
5053
5054/*
5055 * error dumping routines are from iwlwifi/mvm/utils.c
5056 */
5057
5058/*
5059 * Note: This structure is read from the device with IO accesses,
5060 * and the reading already does the endian conversion. As it is
5061 * read with uint32_t-sized accesses, any members with a different size
5062 * need to be ordered correctly though!
5063 */
5064struct iwm_error_event_table {
5065	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
5066	uint32_t error_id;		/* type of error */
5067	uint32_t trm_hw_status0;	/* TRM HW status */
5068	uint32_t trm_hw_status1;	/* TRM HW status */
5069	uint32_t blink2;		/* branch link */
5070	uint32_t ilink1;		/* interrupt link */
5071	uint32_t ilink2;		/* interrupt link */
5072	uint32_t data1;		/* error-specific data */
5073	uint32_t data2;		/* error-specific data */
5074	uint32_t data3;		/* error-specific data */
5075	uint32_t bcon_time;		/* beacon timer */
5076	uint32_t tsf_low;		/* network timestamp function timer */
5077	uint32_t tsf_hi;		/* network timestamp function timer */
5078	uint32_t gp1;		/* GP1 timer register */
5079	uint32_t gp2;		/* GP2 timer register */
5080	uint32_t fw_rev_type;	/* firmware revision type */
5081	uint32_t major;		/* uCode version major */
5082	uint32_t minor;		/* uCode version minor */
5083	uint32_t hw_ver;		/* HW Silicon version */
5084	uint32_t brd_ver;		/* HW board version */
5085	uint32_t log_pc;		/* log program counter */
5086	uint32_t frame_ptr;		/* frame pointer */
5087	uint32_t stack_ptr;		/* stack pointer */
5088	uint32_t hcmd;		/* last host command header */
5089	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
5090				 * rxtx_flag */
5091	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
5092				 * host_flag */
5093	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
5094				 * enc_flag */
5095	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
5096				 * time_flag */
5097	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
5098				 * wico interrupt */
5099	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
5100	uint32_t wait_event;		/* wait event() caller address */
5101	uint32_t l2p_control;	/* L2pControlField */
5102	uint32_t l2p_duration;	/* L2pDurationField */
5103	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
5104	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
5105	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
5106				 * (LMPM_PMG_SEL) */
5107	uint32_t u_timestamp;	/* indicate when the date and time of the
5108				 * compilation */
5109	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
5110} __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
5111
5112/*
5113 * UMAC error struct - relevant starting from family 8000 chip.
5114 * Note: This structure is read from the device with IO accesses,
5115 * and the reading already does the endian conversion. As it is
5116 * read with u32-sized accesses, any members with a different size
5117 * need to be ordered correctly though!
5118 */
5119struct iwm_umac_error_event_table {
5120	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
5121	uint32_t error_id;	/* type of error */
5122	uint32_t blink1;	/* branch link */
5123	uint32_t blink2;	/* branch link */
5124	uint32_t ilink1;	/* interrupt link */
5125	uint32_t ilink2;	/* interrupt link */
5126	uint32_t data1;		/* error-specific data */
5127	uint32_t data2;		/* error-specific data */
5128	uint32_t data3;		/* error-specific data */
5129	uint32_t umac_major;
5130	uint32_t umac_minor;
5131	uint32_t frame_pointer;	/* core register 27*/
5132	uint32_t stack_pointer;	/* core register 28 */
5133	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
5134	uint32_t nic_isr_pref;	/* ISR status register */
5135} __packed;
5136
5137#define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
5138#define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
5139
5140#ifdef IWM_DEBUG
5141struct {
5142	const char *name;
5143	uint8_t num;
5144} advanced_lookup[] = {
5145	{ "NMI_INTERRUPT_WDG", 0x34 },
5146	{ "SYSASSERT", 0x35 },
5147	{ "UCODE_VERSION_MISMATCH", 0x37 },
5148	{ "BAD_COMMAND", 0x38 },
5149	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5150	{ "FATAL_ERROR", 0x3D },
5151	{ "NMI_TRM_HW_ERR", 0x46 },
5152	{ "NMI_INTERRUPT_TRM", 0x4C },
5153	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5154	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5155	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5156	{ "NMI_INTERRUPT_HOST", 0x66 },
5157	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
5158	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
5159	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5160	{ "ADVANCED_SYSASSERT", 0 },
5161};
5162
5163static const char *
5164iwm_desc_lookup(uint32_t num)
5165{
5166	int i;
5167
5168	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5169		if (advanced_lookup[i].num == num)
5170			return advanced_lookup[i].name;
5171
5172	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5173	return advanced_lookup[i].name;
5174}
5175
5176static void
5177iwm_nic_umac_error(struct iwm_softc *sc)
5178{
5179	struct iwm_umac_error_event_table table;
5180	uint32_t base;
5181
5182	base = sc->umac_error_event_table;
5183
5184	if (base < 0x800000) {
5185		device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5186		    base);
5187		return;
5188	}
5189
5190	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5191		device_printf(sc->sc_dev, "reading errlog failed\n");
5192		return;
5193	}
5194
5195	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5196		device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5197		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5198		    sc->sc_flags, table.valid);
5199	}
5200
5201	device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5202		iwm_desc_lookup(table.error_id));
5203	device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5204	device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5205	device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5206	    table.ilink1);
5207	device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5208	    table.ilink2);
5209	device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5210	device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5211	device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5212	device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5213	device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5214	device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5215	    table.frame_pointer);
5216	device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5217	    table.stack_pointer);
5218	device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5219	device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5220	    table.nic_isr_pref);
5221}
5222
5223/*
5224 * Support for dumping the error log seemed like a good idea ...
5225 * but it's mostly hex junk and the only sensible thing is the
5226 * hw/ucode revision (which we know anyway).  Since it's here,
5227 * I'll just leave it in, just in case e.g. the Intel guys want to
5228 * help us decipher some "ADVANCED_SYSASSERT" later.
5229 */
5230static void
5231iwm_nic_error(struct iwm_softc *sc)
5232{
5233	struct iwm_error_event_table table;
5234	uint32_t base;
5235
5236	device_printf(sc->sc_dev, "dumping device error log\n");
5237	base = sc->error_event_table;
5238	if (base < 0x800000) {
5239		device_printf(sc->sc_dev,
5240		    "Invalid error log pointer 0x%08x\n", base);
5241		return;
5242	}
5243
5244	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5245		device_printf(sc->sc_dev, "reading errlog failed\n");
5246		return;
5247	}
5248
5249	if (!table.valid) {
5250		device_printf(sc->sc_dev, "errlog not found, skipping\n");
5251		return;
5252	}
5253
5254	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5255		device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5256		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5257		    sc->sc_flags, table.valid);
5258	}
5259
5260	device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5261	    iwm_desc_lookup(table.error_id));
5262	device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5263	    table.trm_hw_status0);
5264	device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5265	    table.trm_hw_status1);
5266	device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5267	device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5268	device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5269	device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5270	device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5271	device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5272	device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5273	device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5274	device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5275	device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5276	device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5277	device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5278	    table.fw_rev_type);
5279	device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5280	device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5281	device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5282	device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5283	device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5284	device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5285	device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5286	device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5287	device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5288	device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5289	device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5290	device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5291	device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5292	device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5293	device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5294	device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5295	device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5296	device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5297	device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5298
5299	if (sc->umac_error_event_table)
5300		iwm_nic_umac_error(sc);
5301}
5302#endif
5303
5304static void
5305iwm_handle_rxb(struct iwm_softc *sc, struct mbuf *m)
5306{
5307	struct ieee80211com *ic = &sc->sc_ic;
5308	struct iwm_cmd_response *cresp;
5309	struct mbuf *m1;
5310	uint32_t offset = 0;
5311	uint32_t maxoff = IWM_RBUF_SIZE;
5312	uint32_t nextoff;
5313	boolean_t stolen = FALSE;
5314
5315#define HAVEROOM(a)	\
5316    ((a) + sizeof(uint32_t) + sizeof(struct iwm_cmd_header) < maxoff)
5317
5318	while (HAVEROOM(offset)) {
5319		struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *,
5320		    offset);
5321		int qid, idx, code, len;
5322
5323		qid = pkt->hdr.qid;
5324		idx = pkt->hdr.idx;
5325
5326		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5327
5328		/*
5329		 * randomly get these from the firmware, no idea why.
5330		 * they at least seem harmless, so just ignore them for now
5331		 */
5332		if ((pkt->hdr.code == 0 && (qid & ~0x80) == 0 && idx == 0) ||
5333		    pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID)) {
5334			break;
5335		}
5336
5337		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5338		    "rx packet qid=%d idx=%d type=%x\n",
5339		    qid & ~0x80, pkt->hdr.idx, code);
5340
5341		len = iwm_rx_packet_len(pkt);
5342		len += sizeof(uint32_t); /* account for status word */
5343		nextoff = offset + roundup2(len, IWM_FH_RSCSR_FRAME_ALIGN);
5344
5345		iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5346
5347		switch (code) {
5348		case IWM_REPLY_RX_PHY_CMD:
5349			iwm_mvm_rx_rx_phy_cmd(sc, pkt);
5350			break;
5351
5352		case IWM_REPLY_RX_MPDU_CMD: {
5353			/*
5354			 * If this is the last frame in the RX buffer, we
5355			 * can directly feed the mbuf to the sharks here.
5356			 */
5357			struct iwm_rx_packet *nextpkt = mtodoff(m,
5358			    struct iwm_rx_packet *, nextoff);
5359			if (!HAVEROOM(nextoff) ||
5360			    (nextpkt->hdr.code == 0 &&
5361			     (nextpkt->hdr.qid & ~0x80) == 0 &&
5362			     nextpkt->hdr.idx == 0) ||
5363			    (nextpkt->len_n_flags ==
5364			     htole32(IWM_FH_RSCSR_FRAME_INVALID))) {
5365				if (iwm_mvm_rx_rx_mpdu(sc, m, offset, stolen)) {
5366					stolen = FALSE;
5367					/* Make sure we abort the loop */
5368					nextoff = maxoff;
5369				}
5370				break;
5371			}
5372
5373			/*
5374			 * Use m_copym instead of m_split, because that
5375			 * makes it easier to keep a valid rx buffer in
5376			 * the ring, when iwm_mvm_rx_rx_mpdu() fails.
5377			 *
5378			 * We need to start m_copym() at offset 0, to get the
5379			 * M_PKTHDR flag preserved.
5380			 */
5381			m1 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
5382			if (m1) {
5383				if (iwm_mvm_rx_rx_mpdu(sc, m1, offset, stolen))
5384					stolen = TRUE;
5385				else
5386					m_freem(m1);
5387			}
5388			break;
5389		}
5390
5391		case IWM_TX_CMD:
5392			iwm_mvm_rx_tx_cmd(sc, pkt);
5393			break;
5394
5395		case IWM_MISSED_BEACONS_NOTIFICATION: {
5396			struct iwm_missed_beacons_notif *resp;
5397			int missed;
5398
5399			/* XXX look at mac_id to determine interface ID */
5400			struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5401
5402			resp = (void *)pkt->data;
5403			missed = le32toh(resp->consec_missed_beacons);
5404
5405			IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5406			    "%s: MISSED_BEACON: mac_id=%d, "
5407			    "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5408			    "num_rx=%d\n",
5409			    __func__,
5410			    le32toh(resp->mac_id),
5411			    le32toh(resp->consec_missed_beacons_since_last_rx),
5412			    le32toh(resp->consec_missed_beacons),
5413			    le32toh(resp->num_expected_beacons),
5414			    le32toh(resp->num_recvd_beacons));
5415
5416			/* Be paranoid */
5417			if (vap == NULL)
5418				break;
5419
5420			/* XXX no net80211 locking? */
5421			if (vap->iv_state == IEEE80211_S_RUN &&
5422			    (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5423				if (missed > vap->iv_bmissthreshold) {
5424					/* XXX bad locking; turn into task */
5425					IWM_UNLOCK(sc);
5426					ieee80211_beacon_miss(ic);
5427					IWM_LOCK(sc);
5428				}
5429			}
5430
5431			break;
5432		}
5433
5434		case IWM_MFUART_LOAD_NOTIFICATION:
5435			break;
5436
5437		case IWM_MVM_ALIVE:
5438			break;
5439
5440		case IWM_CALIB_RES_NOTIF_PHY_DB:
5441			break;
5442
5443		case IWM_STATISTICS_NOTIFICATION: {
5444			struct iwm_notif_statistics *stats;
5445			stats = (void *)pkt->data;
5446			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
5447			sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
5448			break;
5449		}
5450
5451		case IWM_NVM_ACCESS_CMD:
5452		case IWM_MCC_UPDATE_CMD:
5453			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5454				memcpy(sc->sc_cmd_resp,
5455				    pkt, sizeof(sc->sc_cmd_resp));
5456			}
5457			break;
5458
5459		case IWM_MCC_CHUB_UPDATE_CMD: {
5460			struct iwm_mcc_chub_notif *notif;
5461			notif = (void *)pkt->data;
5462
5463			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5464			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5465			sc->sc_fw_mcc[2] = '\0';
5466			IWM_DPRINTF(sc, IWM_DEBUG_LAR,
5467			    "fw source %d sent CC '%s'\n",
5468			    notif->source_id, sc->sc_fw_mcc);
5469			break;
5470		}
5471
5472		case IWM_DTS_MEASUREMENT_NOTIFICATION:
5473		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5474				 IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5475			struct iwm_dts_measurement_notif_v1 *notif;
5476
5477			if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5478				device_printf(sc->sc_dev,
5479				    "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5480				break;
5481			}
5482			notif = (void *)pkt->data;
5483			IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5484			    "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5485			    notif->temp);
5486			break;
5487		}
5488
5489		case IWM_PHY_CONFIGURATION_CMD:
5490		case IWM_TX_ANT_CONFIGURATION_CMD:
5491		case IWM_ADD_STA:
5492		case IWM_MAC_CONTEXT_CMD:
5493		case IWM_REPLY_SF_CFG_CMD:
5494		case IWM_POWER_TABLE_CMD:
5495		case IWM_PHY_CONTEXT_CMD:
5496		case IWM_BINDING_CONTEXT_CMD:
5497		case IWM_TIME_EVENT_CMD:
5498		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5499		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5500		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
5501		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5502		case IWM_SCAN_OFFLOAD_ABORT_CMD:
5503		case IWM_REPLY_BEACON_FILTERING_CMD:
5504		case IWM_MAC_PM_POWER_TABLE:
5505		case IWM_TIME_QUOTA_CMD:
5506		case IWM_REMOVE_STA:
5507		case IWM_TXPATH_FLUSH:
5508		case IWM_LQ_CMD:
5509		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP,
5510				 IWM_FW_PAGING_BLOCK_CMD):
5511		case IWM_BT_CONFIG:
5512		case IWM_REPLY_THERMAL_MNG_BACKOFF:
5513			cresp = (void *)pkt->data;
5514			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5515				memcpy(sc->sc_cmd_resp,
5516				    pkt, sizeof(*pkt)+sizeof(*cresp));
5517			}
5518			break;
5519
5520		/* ignore */
5521		case IWM_PHY_DB_CMD:
5522			break;
5523
5524		case IWM_INIT_COMPLETE_NOTIF:
5525			break;
5526
5527		case IWM_SCAN_OFFLOAD_COMPLETE: {
5528			struct iwm_periodic_scan_complete *notif;
5529			notif = (void *)pkt->data;
5530			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5531				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5532				ieee80211_runtask(ic, &sc->sc_es_task);
5533			}
5534			break;
5535		}
5536
5537		case IWM_SCAN_ITERATION_COMPLETE: {
5538			struct iwm_lmac_scan_complete_notif *notif;
5539			notif = (void *)pkt->data;
5540			ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5541 			break;
5542		}
5543
5544		case IWM_SCAN_COMPLETE_UMAC: {
5545			struct iwm_umac_scan_complete *notif;
5546			notif = (void *)pkt->data;
5547
5548			IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
5549			    "UMAC scan complete, status=0x%x\n",
5550			    notif->status);
5551			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5552				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5553				ieee80211_runtask(ic, &sc->sc_es_task);
5554			}
5555			break;
5556		}
5557
5558		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5559			struct iwm_umac_scan_iter_complete_notif *notif;
5560			notif = (void *)pkt->data;
5561
5562			IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5563			    "complete, status=0x%x, %d channels scanned\n",
5564			    notif->status, notif->scanned_channels);
5565			ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5566			break;
5567		}
5568
5569		case IWM_REPLY_ERROR: {
5570			struct iwm_error_resp *resp;
5571			resp = (void *)pkt->data;
5572
5573			device_printf(sc->sc_dev,
5574			    "firmware error 0x%x, cmd 0x%x\n",
5575			    le32toh(resp->error_type),
5576			    resp->cmd_id);
5577			break;
5578		}
5579
5580		case IWM_TIME_EVENT_NOTIFICATION: {
5581			struct iwm_time_event_notif *notif;
5582			notif = (void *)pkt->data;
5583
5584			IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5585			    "TE notif status = 0x%x action = 0x%x\n",
5586			    notif->status, notif->action);
5587			break;
5588		}
5589
5590		/*
5591		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
5592		 * messages. Just ignore them for now.
5593		 */
5594		case IWM_DEBUG_LOG_MSG:
5595			break;
5596
5597		case IWM_MCAST_FILTER_CMD:
5598			break;
5599
5600		case IWM_SCD_QUEUE_CFG: {
5601			struct iwm_scd_txq_cfg_rsp *rsp;
5602			rsp = (void *)pkt->data;
5603
5604			IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5605			    "queue cfg token=0x%x sta_id=%d "
5606			    "tid=%d scd_queue=%d\n",
5607			    rsp->token, rsp->sta_id, rsp->tid,
5608			    rsp->scd_queue);
5609			break;
5610		}
5611
5612		default:
5613			device_printf(sc->sc_dev,
5614			    "frame %d/%d %x UNHANDLED (this should "
5615			    "not happen)\n", qid & ~0x80, idx,
5616			    pkt->len_n_flags);
5617			break;
5618		}
5619
5620		/*
5621		 * Why test bit 0x80?  The Linux driver:
5622		 *
5623		 * There is one exception:  uCode sets bit 15 when it
5624		 * originates the response/notification, i.e. when the
5625		 * response/notification is not a direct response to a
5626		 * command sent by the driver.  For example, uCode issues
5627		 * IWM_REPLY_RX when it sends a received frame to the driver;
5628		 * it is not a direct response to any driver command.
5629		 *
5630		 * Ok, so since when is 7 == 15?  Well, the Linux driver
5631		 * uses a slightly different format for pkt->hdr, and "qid"
5632		 * is actually the upper byte of a two-byte field.
5633		 */
5634		if (!(qid & (1 << 7)))
5635			iwm_cmd_done(sc, pkt);
5636
5637		offset = nextoff;
5638	}
5639	if (stolen)
5640		m_freem(m);
5641#undef HAVEROOM
5642}
5643
5644/*
5645 * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5646 * Basic structure from if_iwn
5647 */
5648static void
5649iwm_notif_intr(struct iwm_softc *sc)
5650{
5651	uint16_t hw;
5652
5653	bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5654	    BUS_DMASYNC_POSTREAD);
5655
5656	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5657
5658	/*
5659	 * Process responses
5660	 */
5661	while (sc->rxq.cur != hw) {
5662		struct iwm_rx_ring *ring = &sc->rxq;
5663		struct iwm_rx_data *data = &ring->data[ring->cur];
5664
5665		bus_dmamap_sync(ring->data_dmat, data->map,
5666		    BUS_DMASYNC_POSTREAD);
5667
5668		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5669		    "%s: hw = %d cur = %d\n", __func__, hw, ring->cur);
5670		iwm_handle_rxb(sc, data->m);
5671
5672		ring->cur = (ring->cur + 1) % IWM_RX_RING_COUNT;
5673	}
5674
5675	/*
5676	 * Tell the firmware that it can reuse the ring entries that
5677	 * we have just processed.
5678	 * Seems like the hardware gets upset unless we align
5679	 * the write by 8??
5680	 */
5681	hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5682	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, rounddown2(hw, 8));
5683}
5684
5685static void
5686iwm_intr(void *arg)
5687{
5688	struct iwm_softc *sc = arg;
5689	int handled = 0;
5690	int r1, r2, rv = 0;
5691	int isperiodic = 0;
5692
5693	IWM_LOCK(sc);
5694	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5695
5696	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5697		uint32_t *ict = sc->ict_dma.vaddr;
5698		int tmp;
5699
5700		tmp = htole32(ict[sc->ict_cur]);
5701		if (!tmp)
5702			goto out_ena;
5703
5704		/*
5705		 * ok, there was something.  keep plowing until we have all.
5706		 */
5707		r1 = r2 = 0;
5708		while (tmp) {
5709			r1 |= tmp;
5710			ict[sc->ict_cur] = 0;
5711			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5712			tmp = htole32(ict[sc->ict_cur]);
5713		}
5714
5715		/* this is where the fun begins.  don't ask */
5716		if (r1 == 0xffffffff)
5717			r1 = 0;
5718
5719		/* i am not expected to understand this */
5720		if (r1 & 0xc0000)
5721			r1 |= 0x8000;
5722		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5723	} else {
5724		r1 = IWM_READ(sc, IWM_CSR_INT);
5725		/* "hardware gone" (where, fishing?) */
5726		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5727			goto out;
5728		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5729	}
5730	if (r1 == 0 && r2 == 0) {
5731		goto out_ena;
5732	}
5733
5734	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5735
5736	/* Safely ignore these bits for debug checks below */
5737	r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5738
5739	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5740		int i;
5741		struct ieee80211com *ic = &sc->sc_ic;
5742		struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5743
5744#ifdef IWM_DEBUG
5745		iwm_nic_error(sc);
5746#endif
5747		/* Dump driver status (TX and RX rings) while we're here. */
5748		device_printf(sc->sc_dev, "driver status:\n");
5749		for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5750			struct iwm_tx_ring *ring = &sc->txq[i];
5751			device_printf(sc->sc_dev,
5752			    "  tx ring %2d: qid=%-2d cur=%-3d "
5753			    "queued=%-3d\n",
5754			    i, ring->qid, ring->cur, ring->queued);
5755		}
5756		device_printf(sc->sc_dev,
5757		    "  rx ring: cur=%d\n", sc->rxq.cur);
5758		device_printf(sc->sc_dev,
5759		    "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5760
5761		/* Don't stop the device; just do a VAP restart */
5762		IWM_UNLOCK(sc);
5763
5764		if (vap == NULL) {
5765			printf("%s: null vap\n", __func__);
5766			return;
5767		}
5768
5769		device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5770		    "restarting\n", __func__, vap->iv_state);
5771
5772		ieee80211_restart_all(ic);
5773		return;
5774	}
5775
5776	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5777		handled |= IWM_CSR_INT_BIT_HW_ERR;
5778		device_printf(sc->sc_dev, "hardware error, stopping device\n");
5779		iwm_stop(sc);
5780		rv = 1;
5781		goto out;
5782	}
5783
5784	/* firmware chunk loaded */
5785	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5786		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5787		handled |= IWM_CSR_INT_BIT_FH_TX;
5788		sc->sc_fw_chunk_done = 1;
5789		wakeup(&sc->sc_fw);
5790	}
5791
5792	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5793		handled |= IWM_CSR_INT_BIT_RF_KILL;
5794		if (iwm_check_rfkill(sc)) {
5795			device_printf(sc->sc_dev,
5796			    "%s: rfkill switch, disabling interface\n",
5797			    __func__);
5798			iwm_stop(sc);
5799		}
5800	}
5801
5802	/*
5803	 * The Linux driver uses periodic interrupts to avoid races.
5804	 * We cargo-cult like it's going out of fashion.
5805	 */
5806	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5807		handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5808		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5809		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5810			IWM_WRITE_1(sc,
5811			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5812		isperiodic = 1;
5813	}
5814
5815	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5816		handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5817		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5818
5819		iwm_notif_intr(sc);
5820
5821		/* enable periodic interrupt, see above */
5822		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5823			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5824			    IWM_CSR_INT_PERIODIC_ENA);
5825	}
5826
5827	if (__predict_false(r1 & ~handled))
5828		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5829		    "%s: unhandled interrupts: %x\n", __func__, r1);
5830	rv = 1;
5831
5832 out_ena:
5833	iwm_restore_interrupts(sc);
5834 out:
5835	IWM_UNLOCK(sc);
5836	return;
5837}
5838
5839/*
5840 * Autoconf glue-sniffing
5841 */
5842#define	PCI_VENDOR_INTEL		0x8086
5843#define	PCI_PRODUCT_INTEL_WL_3160_1	0x08b3
5844#define	PCI_PRODUCT_INTEL_WL_3160_2	0x08b4
5845#define	PCI_PRODUCT_INTEL_WL_3165_1	0x3165
5846#define	PCI_PRODUCT_INTEL_WL_3165_2	0x3166
5847#define	PCI_PRODUCT_INTEL_WL_7260_1	0x08b1
5848#define	PCI_PRODUCT_INTEL_WL_7260_2	0x08b2
5849#define	PCI_PRODUCT_INTEL_WL_7265_1	0x095a
5850#define	PCI_PRODUCT_INTEL_WL_7265_2	0x095b
5851#define	PCI_PRODUCT_INTEL_WL_8260_1	0x24f3
5852#define	PCI_PRODUCT_INTEL_WL_8260_2	0x24f4
5853
5854static const struct iwm_devices {
5855	uint16_t		device;
5856	const struct iwm_cfg	*cfg;
5857} iwm_devices[] = {
5858	{ PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5859	{ PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5860	{ PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5861	{ PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5862	{ PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5863	{ PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5864	{ PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5865	{ PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5866	{ PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5867	{ PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5868};
5869
5870static int
5871iwm_probe(device_t dev)
5872{
5873	int i;
5874
5875	for (i = 0; i < nitems(iwm_devices); i++) {
5876		if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5877		    pci_get_device(dev) == iwm_devices[i].device) {
5878			device_set_desc(dev, iwm_devices[i].cfg->name);
5879			return (BUS_PROBE_DEFAULT);
5880		}
5881	}
5882
5883	return (ENXIO);
5884}
5885
5886static int
5887iwm_dev_check(device_t dev)
5888{
5889	struct iwm_softc *sc;
5890	uint16_t devid;
5891	int i;
5892
5893	sc = device_get_softc(dev);
5894
5895	devid = pci_get_device(dev);
5896	for (i = 0; i < nitems(iwm_devices); i++) {
5897		if (iwm_devices[i].device == devid) {
5898			sc->cfg = iwm_devices[i].cfg;
5899			return (0);
5900		}
5901	}
5902	device_printf(dev, "unknown adapter type\n");
5903	return ENXIO;
5904}
5905
5906/* PCI registers */
5907#define PCI_CFG_RETRY_TIMEOUT	0x041
5908
5909static int
5910iwm_pci_attach(device_t dev)
5911{
5912	struct iwm_softc *sc;
5913	int count, error, rid;
5914	uint16_t reg;
5915
5916	sc = device_get_softc(dev);
5917
5918	/* We disable the RETRY_TIMEOUT register (0x41) to keep
5919	 * PCI Tx retries from interfering with C3 CPU state */
5920	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5921
5922	/* Enable bus-mastering and hardware bug workaround. */
5923	pci_enable_busmaster(dev);
5924	reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5925	/* if !MSI */
5926	if (reg & PCIM_STATUS_INTxSTATE) {
5927		reg &= ~PCIM_STATUS_INTxSTATE;
5928	}
5929	pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5930
5931	rid = PCIR_BAR(0);
5932	sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5933	    RF_ACTIVE);
5934	if (sc->sc_mem == NULL) {
5935		device_printf(sc->sc_dev, "can't map mem space\n");
5936		return (ENXIO);
5937	}
5938	sc->sc_st = rman_get_bustag(sc->sc_mem);
5939	sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5940
5941	/* Install interrupt handler. */
5942	count = 1;
5943	rid = 0;
5944	if (pci_alloc_msi(dev, &count) == 0)
5945		rid = 1;
5946	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5947	    (rid != 0 ? 0 : RF_SHAREABLE));
5948	if (sc->sc_irq == NULL) {
5949		device_printf(dev, "can't map interrupt\n");
5950			return (ENXIO);
5951	}
5952	error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5953	    NULL, iwm_intr, sc, &sc->sc_ih);
5954	if (sc->sc_ih == NULL) {
5955		device_printf(dev, "can't establish interrupt");
5956			return (ENXIO);
5957	}
5958	sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5959
5960	return (0);
5961}
5962
5963static void
5964iwm_pci_detach(device_t dev)
5965{
5966	struct iwm_softc *sc = device_get_softc(dev);
5967
5968	if (sc->sc_irq != NULL) {
5969		bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5970		bus_release_resource(dev, SYS_RES_IRQ,
5971		    rman_get_rid(sc->sc_irq), sc->sc_irq);
5972		pci_release_msi(dev);
5973        }
5974	if (sc->sc_mem != NULL)
5975		bus_release_resource(dev, SYS_RES_MEMORY,
5976		    rman_get_rid(sc->sc_mem), sc->sc_mem);
5977}
5978
5979
5980
5981static int
5982iwm_attach(device_t dev)
5983{
5984	struct iwm_softc *sc = device_get_softc(dev);
5985	struct ieee80211com *ic = &sc->sc_ic;
5986	int error;
5987	int txq_i, i;
5988
5989	sc->sc_dev = dev;
5990	sc->sc_attached = 1;
5991	IWM_LOCK_INIT(sc);
5992	mbufq_init(&sc->sc_snd, ifqmaxlen);
5993	callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5994	callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
5995	TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5996
5997	sc->sc_notif_wait = iwm_notification_wait_init(sc);
5998	if (sc->sc_notif_wait == NULL) {
5999		device_printf(dev, "failed to init notification wait struct\n");
6000		goto fail;
6001	}
6002
6003	/* Init phy db */
6004	sc->sc_phy_db = iwm_phy_db_init(sc);
6005	if (!sc->sc_phy_db) {
6006		device_printf(dev, "Cannot init phy_db\n");
6007		goto fail;
6008	}
6009
6010	/* PCI attach */
6011	error = iwm_pci_attach(dev);
6012	if (error != 0)
6013		goto fail;
6014
6015	sc->sc_wantresp = -1;
6016
6017	/* Check device type */
6018	error = iwm_dev_check(dev);
6019	if (error != 0)
6020		goto fail;
6021
6022	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
6023	/*
6024	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
6025	 * changed, and now the revision step also includes bit 0-1 (no more
6026	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
6027	 * in the old format.
6028	 */
6029	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
6030		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
6031				(IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
6032
6033	if (iwm_prepare_card_hw(sc) != 0) {
6034		device_printf(dev, "could not initialize hardware\n");
6035		goto fail;
6036	}
6037
6038	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
6039		int ret;
6040		uint32_t hw_step;
6041
6042		/*
6043		 * In order to recognize C step the driver should read the
6044		 * chip version id located at the AUX bus MISC address.
6045		 */
6046		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
6047			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
6048		DELAY(2);
6049
6050		ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
6051				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6052				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6053				   25000);
6054		if (!ret) {
6055			device_printf(sc->sc_dev,
6056			    "Failed to wake up the nic\n");
6057			goto fail;
6058		}
6059
6060		if (iwm_nic_lock(sc)) {
6061			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
6062			hw_step |= IWM_ENABLE_WFPM;
6063			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
6064			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
6065			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
6066			if (hw_step == 0x3)
6067				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
6068						(IWM_SILICON_C_STEP << 2);
6069			iwm_nic_unlock(sc);
6070		} else {
6071			device_printf(sc->sc_dev, "Failed to lock the nic\n");
6072			goto fail;
6073		}
6074	}
6075
6076	/* special-case 7265D, it has the same PCI IDs. */
6077	if (sc->cfg == &iwm7265_cfg &&
6078	    (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
6079		sc->cfg = &iwm7265d_cfg;
6080	}
6081
6082	/* Allocate DMA memory for firmware transfers. */
6083	if ((error = iwm_alloc_fwmem(sc)) != 0) {
6084		device_printf(dev, "could not allocate memory for firmware\n");
6085		goto fail;
6086	}
6087
6088	/* Allocate "Keep Warm" page. */
6089	if ((error = iwm_alloc_kw(sc)) != 0) {
6090		device_printf(dev, "could not allocate keep warm page\n");
6091		goto fail;
6092	}
6093
6094	/* We use ICT interrupts */
6095	if ((error = iwm_alloc_ict(sc)) != 0) {
6096		device_printf(dev, "could not allocate ICT table\n");
6097		goto fail;
6098	}
6099
6100	/* Allocate TX scheduler "rings". */
6101	if ((error = iwm_alloc_sched(sc)) != 0) {
6102		device_printf(dev, "could not allocate TX scheduler rings\n");
6103		goto fail;
6104	}
6105
6106	/* Allocate TX rings */
6107	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
6108		if ((error = iwm_alloc_tx_ring(sc,
6109		    &sc->txq[txq_i], txq_i)) != 0) {
6110			device_printf(dev,
6111			    "could not allocate TX ring %d\n",
6112			    txq_i);
6113			goto fail;
6114		}
6115	}
6116
6117	/* Allocate RX ring. */
6118	if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6119		device_printf(dev, "could not allocate RX ring\n");
6120		goto fail;
6121	}
6122
6123	/* Clear pending interrupts. */
6124	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6125
6126	ic->ic_softc = sc;
6127	ic->ic_name = device_get_nameunit(sc->sc_dev);
6128	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
6129	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
6130
6131	/* Set device capabilities. */
6132	ic->ic_caps =
6133	    IEEE80211_C_STA |
6134	    IEEE80211_C_WPA |		/* WPA/RSN */
6135	    IEEE80211_C_WME |
6136	    IEEE80211_C_PMGT |
6137	    IEEE80211_C_SHSLOT |	/* short slot time supported */
6138	    IEEE80211_C_SHPREAMBLE	/* short preamble supported */
6139//	    IEEE80211_C_BGSCAN		/* capable of bg scanning */
6140	    ;
6141	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6142		sc->sc_phyctxt[i].id = i;
6143		sc->sc_phyctxt[i].color = 0;
6144		sc->sc_phyctxt[i].ref = 0;
6145		sc->sc_phyctxt[i].channel = NULL;
6146	}
6147
6148	/* Default noise floor */
6149	sc->sc_noise = -96;
6150
6151	/* Max RSSI */
6152	sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6153
6154	sc->sc_preinit_hook.ich_func = iwm_preinit;
6155	sc->sc_preinit_hook.ich_arg = sc;
6156	if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6157		device_printf(dev, "config_intrhook_establish failed\n");
6158		goto fail;
6159	}
6160
6161#ifdef IWM_DEBUG
6162	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6163	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6164	    CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6165#endif
6166
6167	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6168	    "<-%s\n", __func__);
6169
6170	return 0;
6171
6172	/* Free allocated memory if something failed during attachment. */
6173fail:
6174	iwm_detach_local(sc, 0);
6175
6176	return ENXIO;
6177}
6178
6179static int
6180iwm_is_valid_ether_addr(uint8_t *addr)
6181{
6182	char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6183
6184	if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6185		return (FALSE);
6186
6187	return (TRUE);
6188}
6189
6190static int
6191iwm_wme_update(struct ieee80211com *ic)
6192{
6193#define IWM_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
6194	struct iwm_softc *sc = ic->ic_softc;
6195	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6196	struct iwm_vap *ivp = IWM_VAP(vap);
6197	struct iwm_node *in;
6198	struct wmeParams tmp[WME_NUM_AC];
6199	int aci, error;
6200
6201	if (vap == NULL)
6202		return (0);
6203
6204	IEEE80211_LOCK(ic);
6205	for (aci = 0; aci < WME_NUM_AC; aci++)
6206		tmp[aci] = ic->ic_wme.wme_chanParams.cap_wmeParams[aci];
6207	IEEE80211_UNLOCK(ic);
6208
6209	IWM_LOCK(sc);
6210	for (aci = 0; aci < WME_NUM_AC; aci++) {
6211		const struct wmeParams *ac = &tmp[aci];
6212		ivp->queue_params[aci].aifsn = ac->wmep_aifsn;
6213		ivp->queue_params[aci].cw_min = IWM_EXP2(ac->wmep_logcwmin);
6214		ivp->queue_params[aci].cw_max = IWM_EXP2(ac->wmep_logcwmax);
6215		ivp->queue_params[aci].edca_txop =
6216		    IEEE80211_TXOP_TO_US(ac->wmep_txopLimit);
6217	}
6218	ivp->have_wme = TRUE;
6219	if (ivp->is_uploaded && vap->iv_bss != NULL) {
6220		in = IWM_NODE(vap->iv_bss);
6221		if (in->in_assoc) {
6222			if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
6223				device_printf(sc->sc_dev,
6224				    "%s: failed to update MAC\n", __func__);
6225			}
6226		}
6227	}
6228	IWM_UNLOCK(sc);
6229
6230	return (0);
6231#undef IWM_EXP2
6232}
6233
6234static void
6235iwm_preinit(void *arg)
6236{
6237	struct iwm_softc *sc = arg;
6238	device_t dev = sc->sc_dev;
6239	struct ieee80211com *ic = &sc->sc_ic;
6240	int error;
6241
6242	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6243	    "->%s\n", __func__);
6244
6245	IWM_LOCK(sc);
6246	if ((error = iwm_start_hw(sc)) != 0) {
6247		device_printf(dev, "could not initialize hardware\n");
6248		IWM_UNLOCK(sc);
6249		goto fail;
6250	}
6251
6252	error = iwm_run_init_mvm_ucode(sc, 1);
6253	iwm_stop_device(sc);
6254	if (error) {
6255		IWM_UNLOCK(sc);
6256		goto fail;
6257	}
6258	device_printf(dev,
6259	    "hw rev 0x%x, fw ver %s, address %s\n",
6260	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6261	    sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6262
6263	/* not all hardware can do 5GHz band */
6264	if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6265		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6266		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6267	IWM_UNLOCK(sc);
6268
6269	iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6270	    ic->ic_channels);
6271
6272	/*
6273	 * At this point we've committed - if we fail to do setup,
6274	 * we now also have to tear down the net80211 state.
6275	 */
6276	ieee80211_ifattach(ic);
6277	ic->ic_vap_create = iwm_vap_create;
6278	ic->ic_vap_delete = iwm_vap_delete;
6279	ic->ic_raw_xmit = iwm_raw_xmit;
6280	ic->ic_node_alloc = iwm_node_alloc;
6281	ic->ic_scan_start = iwm_scan_start;
6282	ic->ic_scan_end = iwm_scan_end;
6283	ic->ic_update_mcast = iwm_update_mcast;
6284	ic->ic_getradiocaps = iwm_init_channel_map;
6285	ic->ic_set_channel = iwm_set_channel;
6286	ic->ic_scan_curchan = iwm_scan_curchan;
6287	ic->ic_scan_mindwell = iwm_scan_mindwell;
6288	ic->ic_wme.wme_update = iwm_wme_update;
6289	ic->ic_parent = iwm_parent;
6290	ic->ic_transmit = iwm_transmit;
6291	iwm_radiotap_attach(sc);
6292	if (bootverbose)
6293		ieee80211_announce(ic);
6294
6295	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6296	    "<-%s\n", __func__);
6297	config_intrhook_disestablish(&sc->sc_preinit_hook);
6298
6299	return;
6300fail:
6301	config_intrhook_disestablish(&sc->sc_preinit_hook);
6302	iwm_detach_local(sc, 0);
6303}
6304
6305/*
6306 * Attach the interface to 802.11 radiotap.
6307 */
6308static void
6309iwm_radiotap_attach(struct iwm_softc *sc)
6310{
6311        struct ieee80211com *ic = &sc->sc_ic;
6312
6313	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6314	    "->%s begin\n", __func__);
6315        ieee80211_radiotap_attach(ic,
6316            &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6317                IWM_TX_RADIOTAP_PRESENT,
6318            &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6319                IWM_RX_RADIOTAP_PRESENT);
6320	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6321	    "->%s end\n", __func__);
6322}
6323
6324static struct ieee80211vap *
6325iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6326    enum ieee80211_opmode opmode, int flags,
6327    const uint8_t bssid[IEEE80211_ADDR_LEN],
6328    const uint8_t mac[IEEE80211_ADDR_LEN])
6329{
6330	struct iwm_vap *ivp;
6331	struct ieee80211vap *vap;
6332
6333	if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6334		return NULL;
6335	ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6336	vap = &ivp->iv_vap;
6337	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6338	vap->iv_bmissthreshold = 10;            /* override default */
6339	/* Override with driver methods. */
6340	ivp->iv_newstate = vap->iv_newstate;
6341	vap->iv_newstate = iwm_newstate;
6342
6343	ivp->id = IWM_DEFAULT_MACID;
6344	ivp->color = IWM_DEFAULT_COLOR;
6345
6346	ivp->have_wme = FALSE;
6347
6348	ieee80211_ratectl_init(vap);
6349	/* Complete setup. */
6350	ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6351	    mac);
6352	ic->ic_opmode = opmode;
6353
6354	return vap;
6355}
6356
6357static void
6358iwm_vap_delete(struct ieee80211vap *vap)
6359{
6360	struct iwm_vap *ivp = IWM_VAP(vap);
6361
6362	ieee80211_ratectl_deinit(vap);
6363	ieee80211_vap_detach(vap);
6364	free(ivp, M_80211_VAP);
6365}
6366
6367static void
6368iwm_xmit_queue_drain(struct iwm_softc *sc)
6369{
6370	struct mbuf *m;
6371	struct ieee80211_node *ni;
6372
6373	while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
6374		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
6375		ieee80211_free_node(ni);
6376		m_freem(m);
6377	}
6378}
6379
6380static void
6381iwm_scan_start(struct ieee80211com *ic)
6382{
6383	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6384	struct iwm_softc *sc = ic->ic_softc;
6385	int error;
6386
6387	IWM_LOCK(sc);
6388	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6389		/* This should not be possible */
6390		device_printf(sc->sc_dev,
6391		    "%s: Previous scan not completed yet\n", __func__);
6392	}
6393	if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6394		error = iwm_mvm_umac_scan(sc);
6395	else
6396		error = iwm_mvm_lmac_scan(sc);
6397	if (error != 0) {
6398		device_printf(sc->sc_dev, "could not initiate scan\n");
6399		IWM_UNLOCK(sc);
6400		ieee80211_cancel_scan(vap);
6401	} else {
6402		sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6403		iwm_led_blink_start(sc);
6404		IWM_UNLOCK(sc);
6405	}
6406}
6407
6408static void
6409iwm_scan_end(struct ieee80211com *ic)
6410{
6411	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6412	struct iwm_softc *sc = ic->ic_softc;
6413
6414	IWM_LOCK(sc);
6415	iwm_led_blink_stop(sc);
6416	if (vap->iv_state == IEEE80211_S_RUN)
6417		iwm_mvm_led_enable(sc);
6418	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6419		/*
6420		 * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6421		 * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6422		 * taskqueue.
6423		 */
6424		sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6425		iwm_mvm_scan_stop_wait(sc);
6426	}
6427	IWM_UNLOCK(sc);
6428
6429	/*
6430	 * Make sure we don't race, if sc_es_task is still enqueued here.
6431	 * This is to make sure that it won't call ieee80211_scan_done
6432	 * when we have already started the next scan.
6433	 */
6434	taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6435}
6436
6437static void
6438iwm_update_mcast(struct ieee80211com *ic)
6439{
6440}
6441
6442static void
6443iwm_set_channel(struct ieee80211com *ic)
6444{
6445}
6446
6447static void
6448iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6449{
6450}
6451
6452static void
6453iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6454{
6455	return;
6456}
6457
6458void
6459iwm_init_task(void *arg1)
6460{
6461	struct iwm_softc *sc = arg1;
6462
6463	IWM_LOCK(sc);
6464	while (sc->sc_flags & IWM_FLAG_BUSY)
6465		msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6466	sc->sc_flags |= IWM_FLAG_BUSY;
6467	iwm_stop(sc);
6468	if (sc->sc_ic.ic_nrunning > 0)
6469		iwm_init(sc);
6470	sc->sc_flags &= ~IWM_FLAG_BUSY;
6471	wakeup(&sc->sc_flags);
6472	IWM_UNLOCK(sc);
6473}
6474
6475static int
6476iwm_resume(device_t dev)
6477{
6478	struct iwm_softc *sc = device_get_softc(dev);
6479	int do_reinit = 0;
6480
6481	/*
6482	 * We disable the RETRY_TIMEOUT register (0x41) to keep
6483	 * PCI Tx retries from interfering with C3 CPU state.
6484	 */
6485	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6486	iwm_init_task(device_get_softc(dev));
6487
6488	IWM_LOCK(sc);
6489	if (sc->sc_flags & IWM_FLAG_SCANNING) {
6490		sc->sc_flags &= ~IWM_FLAG_SCANNING;
6491		do_reinit = 1;
6492	}
6493	IWM_UNLOCK(sc);
6494
6495	if (do_reinit)
6496		ieee80211_resume_all(&sc->sc_ic);
6497
6498	return 0;
6499}
6500
6501static int
6502iwm_suspend(device_t dev)
6503{
6504	int do_stop = 0;
6505	struct iwm_softc *sc = device_get_softc(dev);
6506
6507	do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6508
6509	ieee80211_suspend_all(&sc->sc_ic);
6510
6511	if (do_stop) {
6512		IWM_LOCK(sc);
6513		iwm_stop(sc);
6514		sc->sc_flags |= IWM_FLAG_SCANNING;
6515		IWM_UNLOCK(sc);
6516	}
6517
6518	return (0);
6519}
6520
6521static int
6522iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6523{
6524	struct iwm_fw_info *fw = &sc->sc_fw;
6525	device_t dev = sc->sc_dev;
6526	int i;
6527
6528	if (!sc->sc_attached)
6529		return 0;
6530	sc->sc_attached = 0;
6531
6532	if (do_net80211)
6533		ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6534
6535	callout_drain(&sc->sc_led_blink_to);
6536	callout_drain(&sc->sc_watchdog_to);
6537	iwm_stop_device(sc);
6538	if (do_net80211) {
6539		IWM_LOCK(sc);
6540		iwm_xmit_queue_drain(sc);
6541		IWM_UNLOCK(sc);
6542		ieee80211_ifdetach(&sc->sc_ic);
6543	}
6544
6545	iwm_phy_db_free(sc->sc_phy_db);
6546	sc->sc_phy_db = NULL;
6547
6548	iwm_free_nvm_data(sc->nvm_data);
6549
6550	/* Free descriptor rings */
6551	iwm_free_rx_ring(sc, &sc->rxq);
6552	for (i = 0; i < nitems(sc->txq); i++)
6553		iwm_free_tx_ring(sc, &sc->txq[i]);
6554
6555	/* Free firmware */
6556	if (fw->fw_fp != NULL)
6557		iwm_fw_info_free(fw);
6558
6559	/* Free scheduler */
6560	iwm_dma_contig_free(&sc->sched_dma);
6561	iwm_dma_contig_free(&sc->ict_dma);
6562	iwm_dma_contig_free(&sc->kw_dma);
6563	iwm_dma_contig_free(&sc->fw_dma);
6564
6565	iwm_free_fw_paging(sc);
6566
6567	/* Finished with the hardware - detach things */
6568	iwm_pci_detach(dev);
6569
6570	if (sc->sc_notif_wait != NULL) {
6571		iwm_notification_wait_free(sc->sc_notif_wait);
6572		sc->sc_notif_wait = NULL;
6573	}
6574
6575	IWM_LOCK_DESTROY(sc);
6576
6577	return (0);
6578}
6579
6580static int
6581iwm_detach(device_t dev)
6582{
6583	struct iwm_softc *sc = device_get_softc(dev);
6584
6585	return (iwm_detach_local(sc, 1));
6586}
6587
6588static device_method_t iwm_pci_methods[] = {
6589        /* Device interface */
6590        DEVMETHOD(device_probe,         iwm_probe),
6591        DEVMETHOD(device_attach,        iwm_attach),
6592        DEVMETHOD(device_detach,        iwm_detach),
6593        DEVMETHOD(device_suspend,       iwm_suspend),
6594        DEVMETHOD(device_resume,        iwm_resume),
6595
6596        DEVMETHOD_END
6597};
6598
6599static driver_t iwm_pci_driver = {
6600        "iwm",
6601        iwm_pci_methods,
6602        sizeof (struct iwm_softc)
6603};
6604
6605static devclass_t iwm_devclass;
6606
6607DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6608MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6609MODULE_DEPEND(iwm, pci, 1, 1, 1);
6610MODULE_DEPEND(iwm, wlan, 1, 1, 1);
6611