if_iwm.c revision 330454
1/*	$OpenBSD: if_iwm.c,v 1.167 2017/04/04 00:40:52 claudio Exp $	*/
2
3/*
4 * Copyright (c) 2014 genua mbh <info@genua.de>
5 * Copyright (c) 2014 Fixup Software Ltd.
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20/*-
21 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22 * which were used as the reference documentation for this implementation.
23 *
24 * Driver version we are currently based off of is
25 * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26 *
27 ***********************************************************************
28 *
29 * This file is provided under a dual BSD/GPLv2 license.  When using or
30 * redistributing this file, you may do so under either license.
31 *
32 * GPL LICENSE SUMMARY
33 *
34 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35 *
36 * This program is free software; you can redistribute it and/or modify
37 * it under the terms of version 2 of the GNU General Public License as
38 * published by the Free Software Foundation.
39 *
40 * This program is distributed in the hope that it will be useful, but
41 * WITHOUT ANY WARRANTY; without even the implied warranty of
42 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43 * General Public License for more details.
44 *
45 * You should have received a copy of the GNU General Public License
46 * along with this program; if not, write to the Free Software
47 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48 * USA
49 *
50 * The full GNU General Public License is included in this distribution
51 * in the file called COPYING.
52 *
53 * Contact Information:
54 *  Intel Linux Wireless <ilw@linux.intel.com>
55 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56 *
57 *
58 * BSD LICENSE
59 *
60 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61 * All rights reserved.
62 *
63 * Redistribution and use in source and binary forms, with or without
64 * modification, are permitted provided that the following conditions
65 * are met:
66 *
67 *  * Redistributions of source code must retain the above copyright
68 *    notice, this list of conditions and the following disclaimer.
69 *  * Redistributions in binary form must reproduce the above copyright
70 *    notice, this list of conditions and the following disclaimer in
71 *    the documentation and/or other materials provided with the
72 *    distribution.
73 *  * Neither the name Intel Corporation nor the names of its
74 *    contributors may be used to endorse or promote products derived
75 *    from this software without specific prior written permission.
76 *
77 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88 */
89
90/*-
91 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92 *
93 * Permission to use, copy, modify, and distribute this software for any
94 * purpose with or without fee is hereby granted, provided that the above
95 * copyright notice and this permission notice appear in all copies.
96 *
97 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104 */
105#include <sys/cdefs.h>
106__FBSDID("$FreeBSD: stable/11/sys/dev/iwm/if_iwm.c 330454 2018-03-05 08:01:08Z eadler $");
107
108#include "opt_wlan.h"
109
110#include <sys/param.h>
111#include <sys/bus.h>
112#include <sys/conf.h>
113#include <sys/endian.h>
114#include <sys/firmware.h>
115#include <sys/kernel.h>
116#include <sys/malloc.h>
117#include <sys/mbuf.h>
118#include <sys/mutex.h>
119#include <sys/module.h>
120#include <sys/proc.h>
121#include <sys/rman.h>
122#include <sys/socket.h>
123#include <sys/sockio.h>
124#include <sys/sysctl.h>
125#include <sys/linker.h>
126
127#include <machine/bus.h>
128#include <machine/endian.h>
129#include <machine/resource.h>
130
131#include <dev/pci/pcivar.h>
132#include <dev/pci/pcireg.h>
133
134#include <net/bpf.h>
135
136#include <net/if.h>
137#include <net/if_var.h>
138#include <net/if_arp.h>
139#include <net/if_dl.h>
140#include <net/if_media.h>
141#include <net/if_types.h>
142
143#include <netinet/in.h>
144#include <netinet/in_systm.h>
145#include <netinet/if_ether.h>
146#include <netinet/ip.h>
147
148#include <net80211/ieee80211_var.h>
149#include <net80211/ieee80211_regdomain.h>
150#include <net80211/ieee80211_ratectl.h>
151#include <net80211/ieee80211_radiotap.h>
152
153#include <dev/iwm/if_iwmreg.h>
154#include <dev/iwm/if_iwmvar.h>
155#include <dev/iwm/if_iwm_config.h>
156#include <dev/iwm/if_iwm_debug.h>
157#include <dev/iwm/if_iwm_notif_wait.h>
158#include <dev/iwm/if_iwm_util.h>
159#include <dev/iwm/if_iwm_binding.h>
160#include <dev/iwm/if_iwm_phy_db.h>
161#include <dev/iwm/if_iwm_mac_ctxt.h>
162#include <dev/iwm/if_iwm_phy_ctxt.h>
163#include <dev/iwm/if_iwm_time_event.h>
164#include <dev/iwm/if_iwm_power.h>
165#include <dev/iwm/if_iwm_scan.h>
166#include <dev/iwm/if_iwm_sta.h>
167
168#include <dev/iwm/if_iwm_pcie_trans.h>
169#include <dev/iwm/if_iwm_led.h>
170#include <dev/iwm/if_iwm_fw.h>
171
172/* From DragonflyBSD */
173#define mtodoff(m, t, off)      ((t)((m)->m_data + (off)))
174
175const uint8_t iwm_nvm_channels[] = {
176	/* 2.4 GHz */
177	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
178	/* 5 GHz */
179	36, 40, 44, 48, 52, 56, 60, 64,
180	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
181	149, 153, 157, 161, 165
182};
183_Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
184    "IWM_NUM_CHANNELS is too small");
185
186const uint8_t iwm_nvm_channels_8000[] = {
187	/* 2.4 GHz */
188	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
189	/* 5 GHz */
190	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
191	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
192	149, 153, 157, 161, 165, 169, 173, 177, 181
193};
194_Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
195    "IWM_NUM_CHANNELS_8000 is too small");
196
197#define IWM_NUM_2GHZ_CHANNELS	14
198#define IWM_N_HW_ADDR_MASK	0xF
199
200/*
201 * XXX For now, there's simply a fixed set of rate table entries
202 * that are populated.
203 */
204const struct iwm_rate {
205	uint8_t rate;
206	uint8_t plcp;
207} iwm_rates[] = {
208	{   2,	IWM_RATE_1M_PLCP  },
209	{   4,	IWM_RATE_2M_PLCP  },
210	{  11,	IWM_RATE_5M_PLCP  },
211	{  22,	IWM_RATE_11M_PLCP },
212	{  12,	IWM_RATE_6M_PLCP  },
213	{  18,	IWM_RATE_9M_PLCP  },
214	{  24,	IWM_RATE_12M_PLCP },
215	{  36,	IWM_RATE_18M_PLCP },
216	{  48,	IWM_RATE_24M_PLCP },
217	{  72,	IWM_RATE_36M_PLCP },
218	{  96,	IWM_RATE_48M_PLCP },
219	{ 108,	IWM_RATE_54M_PLCP },
220};
221#define IWM_RIDX_CCK	0
222#define IWM_RIDX_OFDM	4
223#define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
224#define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
225#define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
226
227struct iwm_nvm_section {
228	uint16_t length;
229	uint8_t *data;
230};
231
232#define IWM_MVM_UCODE_ALIVE_TIMEOUT	hz
233#define IWM_MVM_UCODE_CALIB_TIMEOUT	(2*hz)
234
235struct iwm_mvm_alive_data {
236	int valid;
237	uint32_t scd_base_addr;
238};
239
240static int	iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
241static int	iwm_firmware_store_section(struct iwm_softc *,
242                                           enum iwm_ucode_type,
243                                           const uint8_t *, size_t);
244static int	iwm_set_default_calib(struct iwm_softc *, const void *);
245static void	iwm_fw_info_free(struct iwm_fw_info *);
246static int	iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
247static int	iwm_alloc_fwmem(struct iwm_softc *);
248static int	iwm_alloc_sched(struct iwm_softc *);
249static int	iwm_alloc_kw(struct iwm_softc *);
250static int	iwm_alloc_ict(struct iwm_softc *);
251static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
252static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
253static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
254static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
255                                  int);
256static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
257static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
258static void	iwm_enable_interrupts(struct iwm_softc *);
259static void	iwm_restore_interrupts(struct iwm_softc *);
260static void	iwm_disable_interrupts(struct iwm_softc *);
261static void	iwm_ict_reset(struct iwm_softc *);
262static int	iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
263static void	iwm_stop_device(struct iwm_softc *);
264static void	iwm_mvm_nic_config(struct iwm_softc *);
265static int	iwm_nic_rx_init(struct iwm_softc *);
266static int	iwm_nic_tx_init(struct iwm_softc *);
267static int	iwm_nic_init(struct iwm_softc *);
268static int	iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
269static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
270                                   uint16_t, uint8_t *, uint16_t *);
271static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
272				     uint16_t *, uint32_t);
273static uint32_t	iwm_eeprom_channel_flags(uint16_t);
274static void	iwm_add_channel_band(struct iwm_softc *,
275		    struct ieee80211_channel[], int, int *, int, size_t,
276		    const uint8_t[]);
277static void	iwm_init_channel_map(struct ieee80211com *, int, int *,
278		    struct ieee80211_channel[]);
279static struct iwm_nvm_data *
280	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
281			   const uint16_t *, const uint16_t *,
282			   const uint16_t *, const uint16_t *,
283			   const uint16_t *);
284static void	iwm_free_nvm_data(struct iwm_nvm_data *);
285static void	iwm_set_hw_address_family_8000(struct iwm_softc *,
286					       struct iwm_nvm_data *,
287					       const uint16_t *,
288					       const uint16_t *);
289static int	iwm_get_sku(const struct iwm_softc *, const uint16_t *,
290			    const uint16_t *);
291static int	iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
292static int	iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
293				  const uint16_t *);
294static int	iwm_get_n_hw_addrs(const struct iwm_softc *,
295				   const uint16_t *);
296static void	iwm_set_radio_cfg(const struct iwm_softc *,
297				  struct iwm_nvm_data *, uint32_t);
298static struct iwm_nvm_data *
299	iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
300static int	iwm_nvm_init(struct iwm_softc *);
301static int	iwm_pcie_load_section(struct iwm_softc *, uint8_t,
302				      const struct iwm_fw_desc *);
303static int	iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
304					     bus_addr_t, uint32_t);
305static int	iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
306						const struct iwm_fw_sects *,
307						int, int *);
308static int	iwm_pcie_load_cpu_sections(struct iwm_softc *,
309					   const struct iwm_fw_sects *,
310					   int, int *);
311static int	iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
312					       const struct iwm_fw_sects *);
313static int	iwm_pcie_load_given_ucode(struct iwm_softc *,
314					  const struct iwm_fw_sects *);
315static int	iwm_start_fw(struct iwm_softc *, const struct iwm_fw_sects *);
316static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
317static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
318static int	iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
319                                              enum iwm_ucode_type);
320static int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
321static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
322static int	iwm_mvm_get_signal_strength(struct iwm_softc *,
323					    struct iwm_rx_phy_info *);
324static void	iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
325                                      struct iwm_rx_packet *);
326static int	iwm_get_noise(struct iwm_softc *,
327		    const struct iwm_mvm_statistics_rx_non_phy *);
328static void	iwm_mvm_handle_rx_statistics(struct iwm_softc *,
329		    struct iwm_rx_packet *);
330static boolean_t iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct mbuf *,
331				    uint32_t, boolean_t);
332static int	iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
333                                         struct iwm_rx_packet *,
334				         struct iwm_node *);
335static void	iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *);
336static void	iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
337#if 0
338static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
339                                 uint16_t);
340#endif
341static const struct iwm_rate *
342	iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
343			struct mbuf *, struct iwm_tx_cmd *);
344static int	iwm_tx(struct iwm_softc *, struct mbuf *,
345                       struct ieee80211_node *, int);
346static int	iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
347			     const struct ieee80211_bpf_params *);
348static int	iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_vap *);
349static int	iwm_auth(struct ieee80211vap *, struct iwm_softc *);
350static int	iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
351static int	iwm_release(struct iwm_softc *, struct iwm_node *);
352static struct ieee80211_node *
353		iwm_node_alloc(struct ieee80211vap *,
354		               const uint8_t[IEEE80211_ADDR_LEN]);
355static void	iwm_setrates(struct iwm_softc *, struct iwm_node *);
356static int	iwm_media_change(struct ifnet *);
357static int	iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
358static void	iwm_endscan_cb(void *, int);
359static void	iwm_mvm_fill_sf_command(struct iwm_softc *,
360					struct iwm_sf_cfg_cmd *,
361					struct ieee80211_node *);
362static int	iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state);
363static int	iwm_send_bt_init_conf(struct iwm_softc *);
364static boolean_t iwm_mvm_is_lar_supported(struct iwm_softc *);
365static boolean_t iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *);
366static int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
367static void	iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
368static int	iwm_init_hw(struct iwm_softc *);
369static void	iwm_init(struct iwm_softc *);
370static void	iwm_start(struct iwm_softc *);
371static void	iwm_stop(struct iwm_softc *);
372static void	iwm_watchdog(void *);
373static void	iwm_parent(struct ieee80211com *);
374#ifdef IWM_DEBUG
375static const char *
376		iwm_desc_lookup(uint32_t);
377static void	iwm_nic_error(struct iwm_softc *);
378static void	iwm_nic_umac_error(struct iwm_softc *);
379#endif
380static void	iwm_handle_rxb(struct iwm_softc *, struct mbuf *);
381static void	iwm_notif_intr(struct iwm_softc *);
382static void	iwm_intr(void *);
383static int	iwm_attach(device_t);
384static int	iwm_is_valid_ether_addr(uint8_t *);
385static void	iwm_preinit(void *);
386static int	iwm_detach_local(struct iwm_softc *sc, int);
387static void	iwm_init_task(void *);
388static void	iwm_radiotap_attach(struct iwm_softc *);
389static struct ieee80211vap *
390		iwm_vap_create(struct ieee80211com *,
391		               const char [IFNAMSIZ], int,
392		               enum ieee80211_opmode, int,
393		               const uint8_t [IEEE80211_ADDR_LEN],
394		               const uint8_t [IEEE80211_ADDR_LEN]);
395static void	iwm_vap_delete(struct ieee80211vap *);
396static void	iwm_xmit_queue_drain(struct iwm_softc *);
397static void	iwm_scan_start(struct ieee80211com *);
398static void	iwm_scan_end(struct ieee80211com *);
399static void	iwm_update_mcast(struct ieee80211com *);
400static void	iwm_set_channel(struct ieee80211com *);
401static void	iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
402static void	iwm_scan_mindwell(struct ieee80211_scan_state *);
403static int	iwm_detach(device_t);
404
405static int	iwm_lar_disable = 0;
406TUNABLE_INT("hw.iwm.lar.disable", &iwm_lar_disable);
407
408/*
409 * Firmware parser.
410 */
411
412static int
413iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
414{
415	const struct iwm_fw_cscheme_list *l = (const void *)data;
416
417	if (dlen < sizeof(*l) ||
418	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
419		return EINVAL;
420
421	/* we don't actually store anything for now, always use s/w crypto */
422
423	return 0;
424}
425
426static int
427iwm_firmware_store_section(struct iwm_softc *sc,
428    enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
429{
430	struct iwm_fw_sects *fws;
431	struct iwm_fw_desc *fwone;
432
433	if (type >= IWM_UCODE_TYPE_MAX)
434		return EINVAL;
435	if (dlen < sizeof(uint32_t))
436		return EINVAL;
437
438	fws = &sc->sc_fw.fw_sects[type];
439	if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
440		return EINVAL;
441
442	fwone = &fws->fw_sect[fws->fw_count];
443
444	/* first 32bit are device load offset */
445	memcpy(&fwone->offset, data, sizeof(uint32_t));
446
447	/* rest is data */
448	fwone->data = data + sizeof(uint32_t);
449	fwone->len = dlen - sizeof(uint32_t);
450
451	fws->fw_count++;
452
453	return 0;
454}
455
456#define IWM_DEFAULT_SCAN_CHANNELS 40
457
458/* iwlwifi: iwl-drv.c */
459struct iwm_tlv_calib_data {
460	uint32_t ucode_type;
461	struct iwm_tlv_calib_ctrl calib;
462} __packed;
463
464static int
465iwm_set_default_calib(struct iwm_softc *sc, const void *data)
466{
467	const struct iwm_tlv_calib_data *def_calib = data;
468	uint32_t ucode_type = le32toh(def_calib->ucode_type);
469
470	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
471		device_printf(sc->sc_dev,
472		    "Wrong ucode_type %u for default "
473		    "calibration.\n", ucode_type);
474		return EINVAL;
475	}
476
477	sc->sc_default_calib[ucode_type].flow_trigger =
478	    def_calib->calib.flow_trigger;
479	sc->sc_default_calib[ucode_type].event_trigger =
480	    def_calib->calib.event_trigger;
481
482	return 0;
483}
484
485static int
486iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data,
487			struct iwm_ucode_capabilities *capa)
488{
489	const struct iwm_ucode_api *ucode_api = (const void *)data;
490	uint32_t api_index = le32toh(ucode_api->api_index);
491	uint32_t api_flags = le32toh(ucode_api->api_flags);
492	int i;
493
494	if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
495		device_printf(sc->sc_dev,
496		    "api flags index %d larger than supported by driver\n",
497		    api_index);
498		/* don't return an error so we can load FW that has more bits */
499		return 0;
500	}
501
502	for (i = 0; i < 32; i++) {
503		if (api_flags & (1U << i))
504			setbit(capa->enabled_api, i + 32 * api_index);
505	}
506
507	return 0;
508}
509
510static int
511iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data,
512			   struct iwm_ucode_capabilities *capa)
513{
514	const struct iwm_ucode_capa *ucode_capa = (const void *)data;
515	uint32_t api_index = le32toh(ucode_capa->api_index);
516	uint32_t api_flags = le32toh(ucode_capa->api_capa);
517	int i;
518
519	if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
520		device_printf(sc->sc_dev,
521		    "capa flags index %d larger than supported by driver\n",
522		    api_index);
523		/* don't return an error so we can load FW that has more bits */
524		return 0;
525	}
526
527	for (i = 0; i < 32; i++) {
528		if (api_flags & (1U << i))
529			setbit(capa->enabled_capa, i + 32 * api_index);
530	}
531
532	return 0;
533}
534
535static void
536iwm_fw_info_free(struct iwm_fw_info *fw)
537{
538	firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
539	fw->fw_fp = NULL;
540	/* don't touch fw->fw_status */
541	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
542}
543
544static int
545iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
546{
547	struct iwm_fw_info *fw = &sc->sc_fw;
548	const struct iwm_tlv_ucode_header *uhdr;
549	const struct iwm_ucode_tlv *tlv;
550	struct iwm_ucode_capabilities *capa = &sc->ucode_capa;
551	enum iwm_ucode_tlv_type tlv_type;
552	const struct firmware *fwp;
553	const uint8_t *data;
554	uint32_t tlv_len;
555	uint32_t usniffer_img;
556	const uint8_t *tlv_data;
557	uint32_t paging_mem_size;
558	int num_of_cpus;
559	int error = 0;
560	size_t len;
561
562	if (fw->fw_status == IWM_FW_STATUS_DONE &&
563	    ucode_type != IWM_UCODE_INIT)
564		return 0;
565
566	while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
567		msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
568	fw->fw_status = IWM_FW_STATUS_INPROGRESS;
569
570	if (fw->fw_fp != NULL)
571		iwm_fw_info_free(fw);
572
573	/*
574	 * Load firmware into driver memory.
575	 * fw_fp will be set.
576	 */
577	IWM_UNLOCK(sc);
578	fwp = firmware_get(sc->cfg->fw_name);
579	IWM_LOCK(sc);
580	if (fwp == NULL) {
581		device_printf(sc->sc_dev,
582		    "could not read firmware %s (error %d)\n",
583		    sc->cfg->fw_name, error);
584		goto out;
585	}
586	fw->fw_fp = fwp;
587
588	/* (Re-)Initialize default values. */
589	capa->flags = 0;
590	capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH;
591	capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
592	memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa));
593	memset(capa->enabled_api, 0, sizeof(capa->enabled_api));
594	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
595
596	/*
597	 * Parse firmware contents
598	 */
599
600	uhdr = (const void *)fw->fw_fp->data;
601	if (*(const uint32_t *)fw->fw_fp->data != 0
602	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
603		device_printf(sc->sc_dev, "invalid firmware %s\n",
604		    sc->cfg->fw_name);
605		error = EINVAL;
606		goto out;
607	}
608
609	snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%u.%u (API ver %u)",
610	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
611	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
612	    IWM_UCODE_API(le32toh(uhdr->ver)));
613	data = uhdr->data;
614	len = fw->fw_fp->datasize - sizeof(*uhdr);
615
616	while (len >= sizeof(*tlv)) {
617		len -= sizeof(*tlv);
618		tlv = (const void *)data;
619
620		tlv_len = le32toh(tlv->length);
621		tlv_type = le32toh(tlv->type);
622		tlv_data = tlv->data;
623
624		if (len < tlv_len) {
625			device_printf(sc->sc_dev,
626			    "firmware too short: %zu bytes\n",
627			    len);
628			error = EINVAL;
629			goto parse_out;
630		}
631		len -= roundup2(tlv_len, 4);
632		data += sizeof(tlv) + roundup2(tlv_len, 4);
633
634		switch ((int)tlv_type) {
635		case IWM_UCODE_TLV_PROBE_MAX_LEN:
636			if (tlv_len != sizeof(uint32_t)) {
637				device_printf(sc->sc_dev,
638				    "%s: PROBE_MAX_LEN (%d) != sizeof(uint32_t)\n",
639				    __func__,
640				    (int) tlv_len);
641				error = EINVAL;
642				goto parse_out;
643			}
644			capa->max_probe_length =
645			    le32_to_cpup((const uint32_t *)tlv_data);
646			/* limit it to something sensible */
647			if (capa->max_probe_length >
648			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
649				IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
650				    "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
651				    "ridiculous\n", __func__);
652				error = EINVAL;
653				goto parse_out;
654			}
655			break;
656		case IWM_UCODE_TLV_PAN:
657			if (tlv_len) {
658				device_printf(sc->sc_dev,
659				    "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
660				    __func__,
661				    (int) tlv_len);
662				error = EINVAL;
663				goto parse_out;
664			}
665			capa->flags |= IWM_UCODE_TLV_FLAGS_PAN;
666			break;
667		case IWM_UCODE_TLV_FLAGS:
668			if (tlv_len < sizeof(uint32_t)) {
669				device_printf(sc->sc_dev,
670				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
671				    __func__,
672				    (int) tlv_len);
673				error = EINVAL;
674				goto parse_out;
675			}
676			if (tlv_len % sizeof(uint32_t)) {
677				device_printf(sc->sc_dev,
678				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) %% sizeof(uint32_t)\n",
679				    __func__,
680				    (int) tlv_len);
681				error = EINVAL;
682				goto parse_out;
683			}
684			/*
685			 * Apparently there can be many flags, but Linux driver
686			 * parses only the first one, and so do we.
687			 *
688			 * XXX: why does this override IWM_UCODE_TLV_PAN?
689			 * Intentional or a bug?  Observations from
690			 * current firmware file:
691			 *  1) TLV_PAN is parsed first
692			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
693			 * ==> this resets TLV_PAN to itself... hnnnk
694			 */
695			capa->flags = le32_to_cpup((const uint32_t *)tlv_data);
696			break;
697		case IWM_UCODE_TLV_CSCHEME:
698			if ((error = iwm_store_cscheme(sc,
699			    tlv_data, tlv_len)) != 0) {
700				device_printf(sc->sc_dev,
701				    "%s: iwm_store_cscheme(): returned %d\n",
702				    __func__,
703				    error);
704				goto parse_out;
705			}
706			break;
707		case IWM_UCODE_TLV_NUM_OF_CPU:
708			if (tlv_len != sizeof(uint32_t)) {
709				device_printf(sc->sc_dev,
710				    "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) != sizeof(uint32_t)\n",
711				    __func__,
712				    (int) tlv_len);
713				error = EINVAL;
714				goto parse_out;
715			}
716			num_of_cpus = le32_to_cpup((const uint32_t *)tlv_data);
717			if (num_of_cpus == 2) {
718				fw->fw_sects[IWM_UCODE_REGULAR].is_dual_cpus =
719					TRUE;
720				fw->fw_sects[IWM_UCODE_INIT].is_dual_cpus =
721					TRUE;
722				fw->fw_sects[IWM_UCODE_WOWLAN].is_dual_cpus =
723					TRUE;
724			} else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
725				device_printf(sc->sc_dev,
726				    "%s: Driver supports only 1 or 2 CPUs\n",
727				    __func__);
728				error = EINVAL;
729				goto parse_out;
730			}
731			break;
732		case IWM_UCODE_TLV_SEC_RT:
733			if ((error = iwm_firmware_store_section(sc,
734			    IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
735				device_printf(sc->sc_dev,
736				    "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
737				    __func__,
738				    error);
739				goto parse_out;
740			}
741			break;
742		case IWM_UCODE_TLV_SEC_INIT:
743			if ((error = iwm_firmware_store_section(sc,
744			    IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
745				device_printf(sc->sc_dev,
746				    "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
747				    __func__,
748				    error);
749				goto parse_out;
750			}
751			break;
752		case IWM_UCODE_TLV_SEC_WOWLAN:
753			if ((error = iwm_firmware_store_section(sc,
754			    IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
755				device_printf(sc->sc_dev,
756				    "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
757				    __func__,
758				    error);
759				goto parse_out;
760			}
761			break;
762		case IWM_UCODE_TLV_DEF_CALIB:
763			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
764				device_printf(sc->sc_dev,
765				    "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
766				    __func__,
767				    (int) tlv_len,
768				    (int) sizeof(struct iwm_tlv_calib_data));
769				error = EINVAL;
770				goto parse_out;
771			}
772			if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
773				device_printf(sc->sc_dev,
774				    "%s: iwm_set_default_calib() failed: %d\n",
775				    __func__,
776				    error);
777				goto parse_out;
778			}
779			break;
780		case IWM_UCODE_TLV_PHY_SKU:
781			if (tlv_len != sizeof(uint32_t)) {
782				error = EINVAL;
783				device_printf(sc->sc_dev,
784				    "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
785				    __func__,
786				    (int) tlv_len);
787				goto parse_out;
788			}
789			sc->sc_fw.phy_config =
790			    le32_to_cpup((const uint32_t *)tlv_data);
791			sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
792						  IWM_FW_PHY_CFG_TX_CHAIN) >>
793						  IWM_FW_PHY_CFG_TX_CHAIN_POS;
794			sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
795						  IWM_FW_PHY_CFG_RX_CHAIN) >>
796						  IWM_FW_PHY_CFG_RX_CHAIN_POS;
797			break;
798
799		case IWM_UCODE_TLV_API_CHANGES_SET: {
800			if (tlv_len != sizeof(struct iwm_ucode_api)) {
801				error = EINVAL;
802				goto parse_out;
803			}
804			if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) {
805				error = EINVAL;
806				goto parse_out;
807			}
808			break;
809		}
810
811		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
812			if (tlv_len != sizeof(struct iwm_ucode_capa)) {
813				error = EINVAL;
814				goto parse_out;
815			}
816			if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) {
817				error = EINVAL;
818				goto parse_out;
819			}
820			break;
821		}
822
823		case 48: /* undocumented TLV */
824		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
825		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
826			/* ignore, not used by current driver */
827			break;
828
829		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
830			if ((error = iwm_firmware_store_section(sc,
831			    IWM_UCODE_REGULAR_USNIFFER, tlv_data,
832			    tlv_len)) != 0)
833				goto parse_out;
834			break;
835
836		case IWM_UCODE_TLV_PAGING:
837			if (tlv_len != sizeof(uint32_t)) {
838				error = EINVAL;
839				goto parse_out;
840			}
841			paging_mem_size = le32_to_cpup((const uint32_t *)tlv_data);
842
843			IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
844			    "%s: Paging: paging enabled (size = %u bytes)\n",
845			    __func__, paging_mem_size);
846			if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
847				device_printf(sc->sc_dev,
848					"%s: Paging: driver supports up to %u bytes for paging image\n",
849					__func__, IWM_MAX_PAGING_IMAGE_SIZE);
850				error = EINVAL;
851				goto out;
852			}
853			if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
854				device_printf(sc->sc_dev,
855				    "%s: Paging: image isn't multiple %u\n",
856				    __func__, IWM_FW_PAGING_SIZE);
857				error = EINVAL;
858				goto out;
859			}
860
861			sc->sc_fw.fw_sects[IWM_UCODE_REGULAR].paging_mem_size =
862			    paging_mem_size;
863			usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
864			sc->sc_fw.fw_sects[usniffer_img].paging_mem_size =
865			    paging_mem_size;
866			break;
867
868		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
869			if (tlv_len != sizeof(uint32_t)) {
870				error = EINVAL;
871				goto parse_out;
872			}
873			capa->n_scan_channels =
874			    le32_to_cpup((const uint32_t *)tlv_data);
875			break;
876
877		case IWM_UCODE_TLV_FW_VERSION:
878			if (tlv_len != sizeof(uint32_t) * 3) {
879				error = EINVAL;
880				goto parse_out;
881			}
882			snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
883			    "%d.%d.%d",
884			    le32toh(((const uint32_t *)tlv_data)[0]),
885			    le32toh(((const uint32_t *)tlv_data)[1]),
886			    le32toh(((const uint32_t *)tlv_data)[2]));
887			break;
888
889		case IWM_UCODE_TLV_FW_MEM_SEG:
890			break;
891
892		default:
893			device_printf(sc->sc_dev,
894			    "%s: unknown firmware section %d, abort\n",
895			    __func__, tlv_type);
896			error = EINVAL;
897			goto parse_out;
898		}
899	}
900
901	KASSERT(error == 0, ("unhandled error"));
902
903 parse_out:
904	if (error) {
905		device_printf(sc->sc_dev, "firmware parse error %d, "
906		    "section type %d\n", error, tlv_type);
907	}
908
909 out:
910	if (error) {
911		fw->fw_status = IWM_FW_STATUS_NONE;
912		if (fw->fw_fp != NULL)
913			iwm_fw_info_free(fw);
914	} else
915		fw->fw_status = IWM_FW_STATUS_DONE;
916	wakeup(&sc->sc_fw);
917
918	return error;
919}
920
921/*
922 * DMA resource routines
923 */
924
925/* fwmem is used to load firmware onto the card */
926static int
927iwm_alloc_fwmem(struct iwm_softc *sc)
928{
929	/* Must be aligned on a 16-byte boundary. */
930	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
931	    IWM_FH_MEM_TB_MAX_LENGTH, 16);
932}
933
934/* tx scheduler rings.  not used? */
935static int
936iwm_alloc_sched(struct iwm_softc *sc)
937{
938	/* TX scheduler rings must be aligned on a 1KB boundary. */
939	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
940	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
941}
942
943/* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
944static int
945iwm_alloc_kw(struct iwm_softc *sc)
946{
947	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
948}
949
950/* interrupt cause table */
951static int
952iwm_alloc_ict(struct iwm_softc *sc)
953{
954	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
955	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
956}
957
958static int
959iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
960{
961	bus_size_t size;
962	int i, error;
963
964	ring->cur = 0;
965
966	/* Allocate RX descriptors (256-byte aligned). */
967	size = IWM_RX_RING_COUNT * sizeof(uint32_t);
968	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
969	if (error != 0) {
970		device_printf(sc->sc_dev,
971		    "could not allocate RX ring DMA memory\n");
972		goto fail;
973	}
974	ring->desc = ring->desc_dma.vaddr;
975
976	/* Allocate RX status area (16-byte aligned). */
977	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
978	    sizeof(*ring->stat), 16);
979	if (error != 0) {
980		device_printf(sc->sc_dev,
981		    "could not allocate RX status DMA memory\n");
982		goto fail;
983	}
984	ring->stat = ring->stat_dma.vaddr;
985
986        /* Create RX buffer DMA tag. */
987        error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
988            BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
989            IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
990        if (error != 0) {
991                device_printf(sc->sc_dev,
992                    "%s: could not create RX buf DMA tag, error %d\n",
993                    __func__, error);
994                goto fail;
995        }
996
997	/* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
998	error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
999	if (error != 0) {
1000		device_printf(sc->sc_dev,
1001		    "%s: could not create RX buf DMA map, error %d\n",
1002		    __func__, error);
1003		goto fail;
1004	}
1005	/*
1006	 * Allocate and map RX buffers.
1007	 */
1008	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1009		struct iwm_rx_data *data = &ring->data[i];
1010		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1011		if (error != 0) {
1012			device_printf(sc->sc_dev,
1013			    "%s: could not create RX buf DMA map, error %d\n",
1014			    __func__, error);
1015			goto fail;
1016		}
1017		data->m = NULL;
1018
1019		if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1020			goto fail;
1021		}
1022	}
1023	return 0;
1024
1025fail:	iwm_free_rx_ring(sc, ring);
1026	return error;
1027}
1028
1029static void
1030iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1031{
1032	/* Reset the ring state */
1033	ring->cur = 0;
1034
1035	/*
1036	 * The hw rx ring index in shared memory must also be cleared,
1037	 * otherwise the discrepancy can cause reprocessing chaos.
1038	 */
1039	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1040}
1041
1042static void
1043iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1044{
1045	int i;
1046
1047	iwm_dma_contig_free(&ring->desc_dma);
1048	iwm_dma_contig_free(&ring->stat_dma);
1049
1050	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1051		struct iwm_rx_data *data = &ring->data[i];
1052
1053		if (data->m != NULL) {
1054			bus_dmamap_sync(ring->data_dmat, data->map,
1055			    BUS_DMASYNC_POSTREAD);
1056			bus_dmamap_unload(ring->data_dmat, data->map);
1057			m_freem(data->m);
1058			data->m = NULL;
1059		}
1060		if (data->map != NULL) {
1061			bus_dmamap_destroy(ring->data_dmat, data->map);
1062			data->map = NULL;
1063		}
1064	}
1065	if (ring->spare_map != NULL) {
1066		bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1067		ring->spare_map = NULL;
1068	}
1069	if (ring->data_dmat != NULL) {
1070		bus_dma_tag_destroy(ring->data_dmat);
1071		ring->data_dmat = NULL;
1072	}
1073}
1074
1075static int
1076iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1077{
1078	bus_addr_t paddr;
1079	bus_size_t size;
1080	size_t maxsize;
1081	int nsegments;
1082	int i, error;
1083
1084	ring->qid = qid;
1085	ring->queued = 0;
1086	ring->cur = 0;
1087
1088	/* Allocate TX descriptors (256-byte aligned). */
1089	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1090	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1091	if (error != 0) {
1092		device_printf(sc->sc_dev,
1093		    "could not allocate TX ring DMA memory\n");
1094		goto fail;
1095	}
1096	ring->desc = ring->desc_dma.vaddr;
1097
1098	/*
1099	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1100	 * to allocate commands space for other rings.
1101	 */
1102	if (qid > IWM_MVM_CMD_QUEUE)
1103		return 0;
1104
1105	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1106	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1107	if (error != 0) {
1108		device_printf(sc->sc_dev,
1109		    "could not allocate TX cmd DMA memory\n");
1110		goto fail;
1111	}
1112	ring->cmd = ring->cmd_dma.vaddr;
1113
1114	/* FW commands may require more mapped space than packets. */
1115	if (qid == IWM_MVM_CMD_QUEUE) {
1116		maxsize = IWM_RBUF_SIZE;
1117		nsegments = 1;
1118	} else {
1119		maxsize = MCLBYTES;
1120		nsegments = IWM_MAX_SCATTER - 2;
1121	}
1122
1123	error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1124	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1125            nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1126	if (error != 0) {
1127		device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1128		goto fail;
1129	}
1130
1131	paddr = ring->cmd_dma.paddr;
1132	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1133		struct iwm_tx_data *data = &ring->data[i];
1134
1135		data->cmd_paddr = paddr;
1136		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1137		    + offsetof(struct iwm_tx_cmd, scratch);
1138		paddr += sizeof(struct iwm_device_cmd);
1139
1140		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1141		if (error != 0) {
1142			device_printf(sc->sc_dev,
1143			    "could not create TX buf DMA map\n");
1144			goto fail;
1145		}
1146	}
1147	KASSERT(paddr == ring->cmd_dma.paddr + size,
1148	    ("invalid physical address"));
1149	return 0;
1150
1151fail:	iwm_free_tx_ring(sc, ring);
1152	return error;
1153}
1154
1155static void
1156iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1157{
1158	int i;
1159
1160	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1161		struct iwm_tx_data *data = &ring->data[i];
1162
1163		if (data->m != NULL) {
1164			bus_dmamap_sync(ring->data_dmat, data->map,
1165			    BUS_DMASYNC_POSTWRITE);
1166			bus_dmamap_unload(ring->data_dmat, data->map);
1167			m_freem(data->m);
1168			data->m = NULL;
1169		}
1170	}
1171	/* Clear TX descriptors. */
1172	memset(ring->desc, 0, ring->desc_dma.size);
1173	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1174	    BUS_DMASYNC_PREWRITE);
1175	sc->qfullmsk &= ~(1 << ring->qid);
1176	ring->queued = 0;
1177	ring->cur = 0;
1178
1179	if (ring->qid == IWM_MVM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1180		iwm_pcie_clear_cmd_in_flight(sc);
1181}
1182
1183static void
1184iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1185{
1186	int i;
1187
1188	iwm_dma_contig_free(&ring->desc_dma);
1189	iwm_dma_contig_free(&ring->cmd_dma);
1190
1191	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1192		struct iwm_tx_data *data = &ring->data[i];
1193
1194		if (data->m != NULL) {
1195			bus_dmamap_sync(ring->data_dmat, data->map,
1196			    BUS_DMASYNC_POSTWRITE);
1197			bus_dmamap_unload(ring->data_dmat, data->map);
1198			m_freem(data->m);
1199			data->m = NULL;
1200		}
1201		if (data->map != NULL) {
1202			bus_dmamap_destroy(ring->data_dmat, data->map);
1203			data->map = NULL;
1204		}
1205	}
1206	if (ring->data_dmat != NULL) {
1207		bus_dma_tag_destroy(ring->data_dmat);
1208		ring->data_dmat = NULL;
1209	}
1210}
1211
1212/*
1213 * High-level hardware frobbing routines
1214 */
1215
1216static void
1217iwm_enable_interrupts(struct iwm_softc *sc)
1218{
1219	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1220	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1221}
1222
1223static void
1224iwm_restore_interrupts(struct iwm_softc *sc)
1225{
1226	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1227}
1228
1229static void
1230iwm_disable_interrupts(struct iwm_softc *sc)
1231{
1232	/* disable interrupts */
1233	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1234
1235	/* acknowledge all interrupts */
1236	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1237	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1238}
1239
1240static void
1241iwm_ict_reset(struct iwm_softc *sc)
1242{
1243	iwm_disable_interrupts(sc);
1244
1245	/* Reset ICT table. */
1246	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1247	sc->ict_cur = 0;
1248
1249	/* Set physical address of ICT table (4KB aligned). */
1250	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1251	    IWM_CSR_DRAM_INT_TBL_ENABLE
1252	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1253	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1254	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1255
1256	/* Switch to ICT interrupt mode in driver. */
1257	sc->sc_flags |= IWM_FLAG_USE_ICT;
1258
1259	/* Re-enable interrupts. */
1260	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1261	iwm_enable_interrupts(sc);
1262}
1263
1264/* iwlwifi pcie/trans.c */
1265
1266/*
1267 * Since this .. hard-resets things, it's time to actually
1268 * mark the first vap (if any) as having no mac context.
1269 * It's annoying, but since the driver is potentially being
1270 * stop/start'ed whilst active (thanks openbsd port!) we
1271 * have to correctly track this.
1272 */
1273static void
1274iwm_stop_device(struct iwm_softc *sc)
1275{
1276	struct ieee80211com *ic = &sc->sc_ic;
1277	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1278	int chnl, qid;
1279	uint32_t mask = 0;
1280
1281	/* tell the device to stop sending interrupts */
1282	iwm_disable_interrupts(sc);
1283
1284	/*
1285	 * FreeBSD-local: mark the first vap as not-uploaded,
1286	 * so the next transition through auth/assoc
1287	 * will correctly populate the MAC context.
1288	 */
1289	if (vap) {
1290		struct iwm_vap *iv = IWM_VAP(vap);
1291		iv->phy_ctxt = NULL;
1292		iv->is_uploaded = 0;
1293	}
1294
1295	/* device going down, Stop using ICT table */
1296	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1297
1298	/* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1299
1300	if (iwm_nic_lock(sc)) {
1301		iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1302
1303		/* Stop each Tx DMA channel */
1304		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1305			IWM_WRITE(sc,
1306			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1307			mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1308		}
1309
1310		/* Wait for DMA channels to be idle */
1311		if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1312		    5000)) {
1313			device_printf(sc->sc_dev,
1314			    "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1315			    IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1316		}
1317		iwm_nic_unlock(sc);
1318	}
1319	iwm_pcie_rx_stop(sc);
1320
1321	/* Stop RX ring. */
1322	iwm_reset_rx_ring(sc, &sc->rxq);
1323
1324	/* Reset all TX rings. */
1325	for (qid = 0; qid < nitems(sc->txq); qid++)
1326		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1327
1328	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1329		/* Power-down device's busmaster DMA clocks */
1330		if (iwm_nic_lock(sc)) {
1331			iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1332			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1333			iwm_nic_unlock(sc);
1334		}
1335		DELAY(5);
1336	}
1337
1338	/* Make sure (redundant) we've released our request to stay awake */
1339	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1340	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1341
1342	/* Stop the device, and put it in low power state */
1343	iwm_apm_stop(sc);
1344
1345	/* Upon stop, the APM issues an interrupt if HW RF kill is set.
1346	 * Clean again the interrupt here
1347	 */
1348	iwm_disable_interrupts(sc);
1349	/* stop and reset the on-board processor */
1350	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1351
1352	/*
1353	 * Even if we stop the HW, we still want the RF kill
1354	 * interrupt
1355	 */
1356	iwm_enable_rfkill_int(sc);
1357	iwm_check_rfkill(sc);
1358}
1359
1360/* iwlwifi: mvm/ops.c */
1361static void
1362iwm_mvm_nic_config(struct iwm_softc *sc)
1363{
1364	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1365	uint32_t reg_val = 0;
1366	uint32_t phy_config = iwm_mvm_get_phy_config(sc);
1367
1368	radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1369	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1370	radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1371	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1372	radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1373	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1374
1375	/* SKU control */
1376	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1377	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1378	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1379	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1380
1381	/* radio configuration */
1382	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1383	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1384	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1385
1386	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1387
1388	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1389	    "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1390	    radio_cfg_step, radio_cfg_dash);
1391
1392	/*
1393	 * W/A : NIC is stuck in a reset state after Early PCIe power off
1394	 * (PCIe power is lost before PERST# is asserted), causing ME FW
1395	 * to lose ownership and not being able to obtain it back.
1396	 */
1397	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1398		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1399		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1400		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1401	}
1402}
1403
1404static int
1405iwm_nic_rx_init(struct iwm_softc *sc)
1406{
1407	/*
1408	 * Initialize RX ring.  This is from the iwn driver.
1409	 */
1410	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1411
1412	/* Stop Rx DMA */
1413	iwm_pcie_rx_stop(sc);
1414
1415	if (!iwm_nic_lock(sc))
1416		return EBUSY;
1417
1418	/* reset and flush pointers */
1419	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1420	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1421	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1422	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1423
1424	/* Set physical address of RX ring (256-byte aligned). */
1425	IWM_WRITE(sc,
1426	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1427
1428	/* Set physical address of RX status (16-byte aligned). */
1429	IWM_WRITE(sc,
1430	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1431
1432	/* Enable Rx DMA
1433	 * XXX 5000 HW isn't supported by the iwm(4) driver.
1434	 * IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
1435	 *      the credit mechanism in 5000 HW RX FIFO
1436	 * Direct rx interrupts to hosts
1437	 * Rx buffer size 4 or 8k or 12k
1438	 * RB timeout 0x10
1439	 * 256 RBDs
1440	 */
1441	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1442	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
1443	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
1444	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
1445	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
1446	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1447	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1448
1449	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1450
1451	/* W/A for interrupt coalescing bug in 7260 and 3160 */
1452	if (sc->cfg->host_interrupt_operation_mode)
1453		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1454
1455	/*
1456	 * Thus sayeth el jefe (iwlwifi) via a comment:
1457	 *
1458	 * This value should initially be 0 (before preparing any
1459	 * RBs), should be 8 after preparing the first 8 RBs (for example)
1460	 */
1461	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1462
1463	iwm_nic_unlock(sc);
1464
1465	return 0;
1466}
1467
1468static int
1469iwm_nic_tx_init(struct iwm_softc *sc)
1470{
1471	int qid;
1472
1473	if (!iwm_nic_lock(sc))
1474		return EBUSY;
1475
1476	/* Deactivate TX scheduler. */
1477	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1478
1479	/* Set physical address of "keep warm" page (16-byte aligned). */
1480	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1481
1482	/* Initialize TX rings. */
1483	for (qid = 0; qid < nitems(sc->txq); qid++) {
1484		struct iwm_tx_ring *txq = &sc->txq[qid];
1485
1486		/* Set physical address of TX ring (256-byte aligned). */
1487		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1488		    txq->desc_dma.paddr >> 8);
1489		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1490		    "%s: loading ring %d descriptors (%p) at %lx\n",
1491		    __func__,
1492		    qid, txq->desc,
1493		    (unsigned long) (txq->desc_dma.paddr >> 8));
1494	}
1495
1496	iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1497
1498	iwm_nic_unlock(sc);
1499
1500	return 0;
1501}
1502
1503static int
1504iwm_nic_init(struct iwm_softc *sc)
1505{
1506	int error;
1507
1508	iwm_apm_init(sc);
1509	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1510		iwm_set_pwr(sc);
1511
1512	iwm_mvm_nic_config(sc);
1513
1514	if ((error = iwm_nic_rx_init(sc)) != 0)
1515		return error;
1516
1517	/*
1518	 * Ditto for TX, from iwn
1519	 */
1520	if ((error = iwm_nic_tx_init(sc)) != 0)
1521		return error;
1522
1523	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1524	    "%s: shadow registers enabled\n", __func__);
1525	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1526
1527	return 0;
1528}
1529
1530int
1531iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1532{
1533	if (!iwm_nic_lock(sc)) {
1534		device_printf(sc->sc_dev,
1535		    "%s: cannot enable txq %d\n",
1536		    __func__,
1537		    qid);
1538		return EBUSY;
1539	}
1540
1541	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1542
1543	if (qid == IWM_MVM_CMD_QUEUE) {
1544		/* unactivate before configuration */
1545		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1546		    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1547		    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1548
1549		iwm_nic_unlock(sc);
1550
1551		iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1552
1553		if (!iwm_nic_lock(sc)) {
1554			device_printf(sc->sc_dev,
1555			    "%s: cannot enable txq %d\n", __func__, qid);
1556			return EBUSY;
1557		}
1558		iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1559		iwm_nic_unlock(sc);
1560
1561		iwm_write_mem32(sc, sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1562		/* Set scheduler window size and frame limit. */
1563		iwm_write_mem32(sc,
1564		    sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1565		    sizeof(uint32_t),
1566		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1567		    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1568		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1569		    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1570
1571		if (!iwm_nic_lock(sc)) {
1572			device_printf(sc->sc_dev,
1573			    "%s: cannot enable txq %d\n", __func__, qid);
1574			return EBUSY;
1575		}
1576		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1577		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1578		    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1579		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1580		    IWM_SCD_QUEUE_STTS_REG_MSK);
1581	} else {
1582		struct iwm_scd_txq_cfg_cmd cmd;
1583		int error;
1584
1585		iwm_nic_unlock(sc);
1586
1587		memset(&cmd, 0, sizeof(cmd));
1588		cmd.scd_queue = qid;
1589		cmd.enable = 1;
1590		cmd.sta_id = sta_id;
1591		cmd.tx_fifo = fifo;
1592		cmd.aggregate = 0;
1593		cmd.window = IWM_FRAME_LIMIT;
1594
1595		error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1596		    sizeof(cmd), &cmd);
1597		if (error) {
1598			device_printf(sc->sc_dev,
1599			    "cannot enable txq %d\n", qid);
1600			return error;
1601		}
1602
1603		if (!iwm_nic_lock(sc))
1604			return EBUSY;
1605	}
1606
1607	iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1608	    iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1609
1610	iwm_nic_unlock(sc);
1611
1612	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1613	    __func__, qid, fifo);
1614
1615	return 0;
1616}
1617
1618static int
1619iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1620{
1621	int error, chnl;
1622
1623	int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1624	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1625
1626	if (!iwm_nic_lock(sc))
1627		return EBUSY;
1628
1629	iwm_ict_reset(sc);
1630
1631	sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1632	if (scd_base_addr != 0 &&
1633	    scd_base_addr != sc->scd_base_addr) {
1634		device_printf(sc->sc_dev,
1635		    "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1636		    __func__, sc->scd_base_addr, scd_base_addr);
1637	}
1638
1639	iwm_nic_unlock(sc);
1640
1641	/* reset context data, TX status and translation data */
1642	error = iwm_write_mem(sc,
1643	    sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1644	    NULL, clear_dwords);
1645	if (error)
1646		return EBUSY;
1647
1648	if (!iwm_nic_lock(sc))
1649		return EBUSY;
1650
1651	/* Set physical address of TX scheduler rings (1KB aligned). */
1652	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1653
1654	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1655
1656	iwm_nic_unlock(sc);
1657
1658	/* enable command channel */
1659	error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1660	if (error)
1661		return error;
1662
1663	if (!iwm_nic_lock(sc))
1664		return EBUSY;
1665
1666	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1667
1668	/* Enable DMA channels. */
1669	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1670		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1671		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1672		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1673	}
1674
1675	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1676	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1677
1678	iwm_nic_unlock(sc);
1679
1680	/* Enable L1-Active */
1681	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
1682		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1683		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1684	}
1685
1686	return error;
1687}
1688
1689/*
1690 * NVM read access and content parsing.  We do not support
1691 * external NVM or writing NVM.
1692 * iwlwifi/mvm/nvm.c
1693 */
1694
1695/* Default NVM size to read */
1696#define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
1697
1698#define IWM_NVM_WRITE_OPCODE 1
1699#define IWM_NVM_READ_OPCODE 0
1700
1701/* load nvm chunk response */
1702enum {
1703	IWM_READ_NVM_CHUNK_SUCCEED = 0,
1704	IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1705};
1706
1707static int
1708iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1709	uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1710{
1711	struct iwm_nvm_access_cmd nvm_access_cmd = {
1712		.offset = htole16(offset),
1713		.length = htole16(length),
1714		.type = htole16(section),
1715		.op_code = IWM_NVM_READ_OPCODE,
1716	};
1717	struct iwm_nvm_access_resp *nvm_resp;
1718	struct iwm_rx_packet *pkt;
1719	struct iwm_host_cmd cmd = {
1720		.id = IWM_NVM_ACCESS_CMD,
1721		.flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1722		.data = { &nvm_access_cmd, },
1723	};
1724	int ret, bytes_read, offset_read;
1725	uint8_t *resp_data;
1726
1727	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1728
1729	ret = iwm_send_cmd(sc, &cmd);
1730	if (ret) {
1731		device_printf(sc->sc_dev,
1732		    "Could not send NVM_ACCESS command (error=%d)\n", ret);
1733		return ret;
1734	}
1735
1736	pkt = cmd.resp_pkt;
1737
1738	/* Extract NVM response */
1739	nvm_resp = (void *)pkt->data;
1740	ret = le16toh(nvm_resp->status);
1741	bytes_read = le16toh(nvm_resp->length);
1742	offset_read = le16toh(nvm_resp->offset);
1743	resp_data = nvm_resp->data;
1744	if (ret) {
1745		if ((offset != 0) &&
1746		    (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1747			/*
1748			 * meaning of NOT_VALID_ADDRESS:
1749			 * driver try to read chunk from address that is
1750			 * multiple of 2K and got an error since addr is empty.
1751			 * meaning of (offset != 0): driver already
1752			 * read valid data from another chunk so this case
1753			 * is not an error.
1754			 */
1755			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1756				    "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1757				    offset);
1758			*len = 0;
1759			ret = 0;
1760		} else {
1761			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1762				    "NVM access command failed with status %d\n", ret);
1763			ret = EIO;
1764		}
1765		goto exit;
1766	}
1767
1768	if (offset_read != offset) {
1769		device_printf(sc->sc_dev,
1770		    "NVM ACCESS response with invalid offset %d\n",
1771		    offset_read);
1772		ret = EINVAL;
1773		goto exit;
1774	}
1775
1776	if (bytes_read > length) {
1777		device_printf(sc->sc_dev,
1778		    "NVM ACCESS response with too much data "
1779		    "(%d bytes requested, %d bytes received)\n",
1780		    length, bytes_read);
1781		ret = EINVAL;
1782		goto exit;
1783	}
1784
1785	/* Write data to NVM */
1786	memcpy(data + offset, resp_data, bytes_read);
1787	*len = bytes_read;
1788
1789 exit:
1790	iwm_free_resp(sc, &cmd);
1791	return ret;
1792}
1793
1794/*
1795 * Reads an NVM section completely.
1796 * NICs prior to 7000 family don't have a real NVM, but just read
1797 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1798 * by uCode, we need to manually check in this case that we don't
1799 * overflow and try to read more than the EEPROM size.
1800 * For 7000 family NICs, we supply the maximal size we can read, and
1801 * the uCode fills the response with as much data as we can,
1802 * without overflowing, so no check is needed.
1803 */
1804static int
1805iwm_nvm_read_section(struct iwm_softc *sc,
1806	uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1807{
1808	uint16_t seglen, length, offset = 0;
1809	int ret;
1810
1811	/* Set nvm section read length */
1812	length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1813
1814	seglen = length;
1815
1816	/* Read the NVM until exhausted (reading less than requested) */
1817	while (seglen == length) {
1818		/* Check no memory assumptions fail and cause an overflow */
1819		if ((size_read + offset + length) >
1820		    sc->cfg->eeprom_size) {
1821			device_printf(sc->sc_dev,
1822			    "EEPROM size is too small for NVM\n");
1823			return ENOBUFS;
1824		}
1825
1826		ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1827		if (ret) {
1828			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1829				    "Cannot read NVM from section %d offset %d, length %d\n",
1830				    section, offset, length);
1831			return ret;
1832		}
1833		offset += seglen;
1834	}
1835
1836	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1837		    "NVM section %d read completed\n", section);
1838	*len = offset;
1839	return 0;
1840}
1841
1842/*
1843 * BEGIN IWM_NVM_PARSE
1844 */
1845
1846/* iwlwifi/iwl-nvm-parse.c */
1847
1848/* NVM offsets (in words) definitions */
1849enum iwm_nvm_offsets {
1850	/* NVM HW-Section offset (in words) definitions */
1851	IWM_HW_ADDR = 0x15,
1852
1853/* NVM SW-Section offset (in words) definitions */
1854	IWM_NVM_SW_SECTION = 0x1C0,
1855	IWM_NVM_VERSION = 0,
1856	IWM_RADIO_CFG = 1,
1857	IWM_SKU = 2,
1858	IWM_N_HW_ADDRS = 3,
1859	IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1860
1861/* NVM calibration section offset (in words) definitions */
1862	IWM_NVM_CALIB_SECTION = 0x2B8,
1863	IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1864};
1865
1866enum iwm_8000_nvm_offsets {
1867	/* NVM HW-Section offset (in words) definitions */
1868	IWM_HW_ADDR0_WFPM_8000 = 0x12,
1869	IWM_HW_ADDR1_WFPM_8000 = 0x16,
1870	IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1871	IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1872	IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1873
1874	/* NVM SW-Section offset (in words) definitions */
1875	IWM_NVM_SW_SECTION_8000 = 0x1C0,
1876	IWM_NVM_VERSION_8000 = 0,
1877	IWM_RADIO_CFG_8000 = 0,
1878	IWM_SKU_8000 = 2,
1879	IWM_N_HW_ADDRS_8000 = 3,
1880
1881	/* NVM REGULATORY -Section offset (in words) definitions */
1882	IWM_NVM_CHANNELS_8000 = 0,
1883	IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1884	IWM_NVM_LAR_OFFSET_8000 = 0x507,
1885	IWM_NVM_LAR_ENABLED_8000 = 0x7,
1886
1887	/* NVM calibration section offset (in words) definitions */
1888	IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1889	IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1890};
1891
1892/* SKU Capabilities (actual values from NVM definition) */
1893enum nvm_sku_bits {
1894	IWM_NVM_SKU_CAP_BAND_24GHZ	= (1 << 0),
1895	IWM_NVM_SKU_CAP_BAND_52GHZ	= (1 << 1),
1896	IWM_NVM_SKU_CAP_11N_ENABLE	= (1 << 2),
1897	IWM_NVM_SKU_CAP_11AC_ENABLE	= (1 << 3),
1898};
1899
1900/* radio config bits (actual values from NVM definition) */
1901#define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1902#define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1903#define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1904#define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1905#define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1906#define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1907
1908#define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)	(x & 0xF)
1909#define IWM_NVM_RF_CFG_DASH_MSK_8000(x)		((x >> 4) & 0xF)
1910#define IWM_NVM_RF_CFG_STEP_MSK_8000(x)		((x >> 8) & 0xF)
1911#define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)		((x >> 12) & 0xFFF)
1912#define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)	((x >> 24) & 0xF)
1913#define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)	((x >> 28) & 0xF)
1914
1915#define DEFAULT_MAX_TX_POWER 16
1916
1917/**
1918 * enum iwm_nvm_channel_flags - channel flags in NVM
1919 * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1920 * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1921 * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1922 * @IWM_NVM_CHANNEL_RADAR: radar detection required
1923 * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1924 * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1925 * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1926 * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1927 * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1928 * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1929 */
1930enum iwm_nvm_channel_flags {
1931	IWM_NVM_CHANNEL_VALID = (1 << 0),
1932	IWM_NVM_CHANNEL_IBSS = (1 << 1),
1933	IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1934	IWM_NVM_CHANNEL_RADAR = (1 << 4),
1935	IWM_NVM_CHANNEL_DFS = (1 << 7),
1936	IWM_NVM_CHANNEL_WIDE = (1 << 8),
1937	IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1938	IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1939	IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1940};
1941
1942/*
1943 * Translate EEPROM flags to net80211.
1944 */
1945static uint32_t
1946iwm_eeprom_channel_flags(uint16_t ch_flags)
1947{
1948	uint32_t nflags;
1949
1950	nflags = 0;
1951	if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1952		nflags |= IEEE80211_CHAN_PASSIVE;
1953	if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1954		nflags |= IEEE80211_CHAN_NOADHOC;
1955	if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1956		nflags |= IEEE80211_CHAN_DFS;
1957		/* Just in case. */
1958		nflags |= IEEE80211_CHAN_NOADHOC;
1959	}
1960
1961	return (nflags);
1962}
1963
1964static void
1965iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1966    int maxchans, int *nchans, int ch_idx, size_t ch_num,
1967    const uint8_t bands[])
1968{
1969	const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
1970	uint32_t nflags;
1971	uint16_t ch_flags;
1972	uint8_t ieee;
1973	int error;
1974
1975	for (; ch_idx < ch_num; ch_idx++) {
1976		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1977		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1978			ieee = iwm_nvm_channels[ch_idx];
1979		else
1980			ieee = iwm_nvm_channels_8000[ch_idx];
1981
1982		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1983			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1984			    "Ch. %d Flags %x [%sGHz] - No traffic\n",
1985			    ieee, ch_flags,
1986			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1987			    "5.2" : "2.4");
1988			continue;
1989		}
1990
1991		nflags = iwm_eeprom_channel_flags(ch_flags);
1992		error = ieee80211_add_channel(chans, maxchans, nchans,
1993		    ieee, 0, 0, nflags, bands);
1994		if (error != 0)
1995			break;
1996
1997		IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1998		    "Ch. %d Flags %x [%sGHz] - Added\n",
1999		    ieee, ch_flags,
2000		    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2001		    "5.2" : "2.4");
2002	}
2003}
2004
2005static void
2006iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2007    struct ieee80211_channel chans[])
2008{
2009	struct iwm_softc *sc = ic->ic_softc;
2010	struct iwm_nvm_data *data = sc->nvm_data;
2011	uint8_t bands[IEEE80211_MODE_BYTES];
2012	size_t ch_num;
2013
2014	memset(bands, 0, sizeof(bands));
2015	/* 1-13: 11b/g channels. */
2016	setbit(bands, IEEE80211_MODE_11B);
2017	setbit(bands, IEEE80211_MODE_11G);
2018	iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2019	    IWM_NUM_2GHZ_CHANNELS - 1, bands);
2020
2021	/* 14: 11b channel only. */
2022	clrbit(bands, IEEE80211_MODE_11G);
2023	iwm_add_channel_band(sc, chans, maxchans, nchans,
2024	    IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2025
2026	if (data->sku_cap_band_52GHz_enable) {
2027		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2028			ch_num = nitems(iwm_nvm_channels);
2029		else
2030			ch_num = nitems(iwm_nvm_channels_8000);
2031		memset(bands, 0, sizeof(bands));
2032		setbit(bands, IEEE80211_MODE_11A);
2033		iwm_add_channel_band(sc, chans, maxchans, nchans,
2034		    IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2035	}
2036}
2037
2038static void
2039iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2040	const uint16_t *mac_override, const uint16_t *nvm_hw)
2041{
2042	const uint8_t *hw_addr;
2043
2044	if (mac_override) {
2045		static const uint8_t reserved_mac[] = {
2046			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2047		};
2048
2049		hw_addr = (const uint8_t *)(mac_override +
2050				 IWM_MAC_ADDRESS_OVERRIDE_8000);
2051
2052		/*
2053		 * Store the MAC address from MAO section.
2054		 * No byte swapping is required in MAO section
2055		 */
2056		IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2057
2058		/*
2059		 * Force the use of the OTP MAC address in case of reserved MAC
2060		 * address in the NVM, or if address is given but invalid.
2061		 */
2062		if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2063		    !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2064		    iwm_is_valid_ether_addr(data->hw_addr) &&
2065		    !IEEE80211_IS_MULTICAST(data->hw_addr))
2066			return;
2067
2068		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2069		    "%s: mac address from nvm override section invalid\n",
2070		    __func__);
2071	}
2072
2073	if (nvm_hw) {
2074		/* read the mac address from WFMP registers */
2075		uint32_t mac_addr0 =
2076		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2077		uint32_t mac_addr1 =
2078		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2079
2080		hw_addr = (const uint8_t *)&mac_addr0;
2081		data->hw_addr[0] = hw_addr[3];
2082		data->hw_addr[1] = hw_addr[2];
2083		data->hw_addr[2] = hw_addr[1];
2084		data->hw_addr[3] = hw_addr[0];
2085
2086		hw_addr = (const uint8_t *)&mac_addr1;
2087		data->hw_addr[4] = hw_addr[1];
2088		data->hw_addr[5] = hw_addr[0];
2089
2090		return;
2091	}
2092
2093	device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2094	memset(data->hw_addr, 0, sizeof(data->hw_addr));
2095}
2096
2097static int
2098iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2099	    const uint16_t *phy_sku)
2100{
2101	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2102		return le16_to_cpup(nvm_sw + IWM_SKU);
2103
2104	return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2105}
2106
2107static int
2108iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2109{
2110	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2111		return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2112	else
2113		return le32_to_cpup((const uint32_t *)(nvm_sw +
2114						IWM_NVM_VERSION_8000));
2115}
2116
2117static int
2118iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2119		  const uint16_t *phy_sku)
2120{
2121        if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2122                return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2123
2124        return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2125}
2126
2127static int
2128iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2129{
2130	int n_hw_addr;
2131
2132	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2133		return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2134
2135	n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2136
2137        return n_hw_addr & IWM_N_HW_ADDR_MASK;
2138}
2139
2140static void
2141iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2142		  uint32_t radio_cfg)
2143{
2144	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2145		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2146		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2147		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2148		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2149		return;
2150	}
2151
2152	/* set the radio configuration for family 8000 */
2153	data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2154	data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2155	data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2156	data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2157	data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2158	data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2159}
2160
2161static int
2162iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2163		   const uint16_t *nvm_hw, const uint16_t *mac_override)
2164{
2165#ifdef notyet /* for FAMILY 9000 */
2166	if (cfg->mac_addr_from_csr) {
2167		iwm_set_hw_address_from_csr(sc, data);
2168        } else
2169#endif
2170	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2171		const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2172
2173		/* The byte order is little endian 16 bit, meaning 214365 */
2174		data->hw_addr[0] = hw_addr[1];
2175		data->hw_addr[1] = hw_addr[0];
2176		data->hw_addr[2] = hw_addr[3];
2177		data->hw_addr[3] = hw_addr[2];
2178		data->hw_addr[4] = hw_addr[5];
2179		data->hw_addr[5] = hw_addr[4];
2180	} else {
2181		iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2182	}
2183
2184	if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2185		device_printf(sc->sc_dev, "no valid mac address was found\n");
2186		return EINVAL;
2187	}
2188
2189	return 0;
2190}
2191
2192static struct iwm_nvm_data *
2193iwm_parse_nvm_data(struct iwm_softc *sc,
2194		   const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2195		   const uint16_t *nvm_calib, const uint16_t *mac_override,
2196		   const uint16_t *phy_sku, const uint16_t *regulatory)
2197{
2198	struct iwm_nvm_data *data;
2199	uint32_t sku, radio_cfg;
2200	uint16_t lar_config;
2201
2202	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2203		data = malloc(sizeof(*data) +
2204		    IWM_NUM_CHANNELS * sizeof(uint16_t),
2205		    M_DEVBUF, M_NOWAIT | M_ZERO);
2206	} else {
2207		data = malloc(sizeof(*data) +
2208		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2209		    M_DEVBUF, M_NOWAIT | M_ZERO);
2210	}
2211	if (!data)
2212		return NULL;
2213
2214	data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2215
2216	radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2217	iwm_set_radio_cfg(sc, data, radio_cfg);
2218
2219	sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2220	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2221	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2222	data->sku_cap_11n_enable = 0;
2223
2224	data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2225
2226	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2227		uint16_t lar_offset = data->nvm_version < 0xE39 ?
2228				       IWM_NVM_LAR_OFFSET_8000_OLD :
2229				       IWM_NVM_LAR_OFFSET_8000;
2230
2231		lar_config = le16_to_cpup(regulatory + lar_offset);
2232		data->lar_enabled = !!(lar_config &
2233				       IWM_NVM_LAR_ENABLED_8000);
2234	}
2235
2236	/* If no valid mac address was found - bail out */
2237	if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2238		free(data, M_DEVBUF);
2239		return NULL;
2240	}
2241
2242	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2243		memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2244		    IWM_NUM_CHANNELS * sizeof(uint16_t));
2245	} else {
2246		memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2247		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2248	}
2249
2250	return data;
2251}
2252
2253static void
2254iwm_free_nvm_data(struct iwm_nvm_data *data)
2255{
2256	if (data != NULL)
2257		free(data, M_DEVBUF);
2258}
2259
2260static struct iwm_nvm_data *
2261iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2262{
2263	const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2264
2265	/* Checking for required sections */
2266	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2267		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2268		    !sections[sc->cfg->nvm_hw_section_num].data) {
2269			device_printf(sc->sc_dev,
2270			    "Can't parse empty OTP/NVM sections\n");
2271			return NULL;
2272		}
2273	} else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2274		/* SW and REGULATORY sections are mandatory */
2275		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2276		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2277			device_printf(sc->sc_dev,
2278			    "Can't parse empty OTP/NVM sections\n");
2279			return NULL;
2280		}
2281		/* MAC_OVERRIDE or at least HW section must exist */
2282		if (!sections[sc->cfg->nvm_hw_section_num].data &&
2283		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2284			device_printf(sc->sc_dev,
2285			    "Can't parse mac_address, empty sections\n");
2286			return NULL;
2287		}
2288
2289		/* PHY_SKU section is mandatory in B0 */
2290		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2291			device_printf(sc->sc_dev,
2292			    "Can't parse phy_sku in B0, empty sections\n");
2293			return NULL;
2294		}
2295	} else {
2296		panic("unknown device family %d\n", sc->cfg->device_family);
2297	}
2298
2299	hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2300	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2301	calib = (const uint16_t *)
2302	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2303	regulatory = (const uint16_t *)
2304	    sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2305	mac_override = (const uint16_t *)
2306	    sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2307	phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2308
2309	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2310	    phy_sku, regulatory);
2311}
2312
2313static int
2314iwm_nvm_init(struct iwm_softc *sc)
2315{
2316	struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2317	int i, ret, section;
2318	uint32_t size_read = 0;
2319	uint8_t *nvm_buffer, *temp;
2320	uint16_t len;
2321
2322	memset(nvm_sections, 0, sizeof(nvm_sections));
2323
2324	if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2325		return EINVAL;
2326
2327	/* load NVM values from nic */
2328	/* Read From FW NVM */
2329	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2330
2331	nvm_buffer = malloc(sc->cfg->eeprom_size, M_DEVBUF, M_NOWAIT | M_ZERO);
2332	if (!nvm_buffer)
2333		return ENOMEM;
2334	for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2335		/* we override the constness for initial read */
2336		ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2337					   &len, size_read);
2338		if (ret)
2339			continue;
2340		size_read += len;
2341		temp = malloc(len, M_DEVBUF, M_NOWAIT);
2342		if (!temp) {
2343			ret = ENOMEM;
2344			break;
2345		}
2346		memcpy(temp, nvm_buffer, len);
2347
2348		nvm_sections[section].data = temp;
2349		nvm_sections[section].length = len;
2350	}
2351	if (!size_read)
2352		device_printf(sc->sc_dev, "OTP is blank\n");
2353	free(nvm_buffer, M_DEVBUF);
2354
2355	sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2356	if (!sc->nvm_data)
2357		return EINVAL;
2358	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2359		    "nvm version = %x\n", sc->nvm_data->nvm_version);
2360
2361	for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2362		if (nvm_sections[i].data != NULL)
2363			free(nvm_sections[i].data, M_DEVBUF);
2364	}
2365
2366	return 0;
2367}
2368
2369static int
2370iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2371	const struct iwm_fw_desc *section)
2372{
2373	struct iwm_dma_info *dma = &sc->fw_dma;
2374	uint8_t *v_addr;
2375	bus_addr_t p_addr;
2376	uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2377	int ret = 0;
2378
2379	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2380		    "%s: [%d] uCode section being loaded...\n",
2381		    __func__, section_num);
2382
2383	v_addr = dma->vaddr;
2384	p_addr = dma->paddr;
2385
2386	for (offset = 0; offset < section->len; offset += chunk_sz) {
2387		uint32_t copy_size, dst_addr;
2388		int extended_addr = FALSE;
2389
2390		copy_size = MIN(chunk_sz, section->len - offset);
2391		dst_addr = section->offset + offset;
2392
2393		if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2394		    dst_addr <= IWM_FW_MEM_EXTENDED_END)
2395			extended_addr = TRUE;
2396
2397		if (extended_addr)
2398			iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2399					  IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2400
2401		memcpy(v_addr, (const uint8_t *)section->data + offset,
2402		    copy_size);
2403		bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2404		ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2405						   copy_size);
2406
2407		if (extended_addr)
2408			iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2409					    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2410
2411		if (ret) {
2412			device_printf(sc->sc_dev,
2413			    "%s: Could not load the [%d] uCode section\n",
2414			    __func__, section_num);
2415			break;
2416		}
2417	}
2418
2419	return ret;
2420}
2421
2422/*
2423 * ucode
2424 */
2425static int
2426iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2427			     bus_addr_t phy_addr, uint32_t byte_cnt)
2428{
2429	int ret;
2430
2431	sc->sc_fw_chunk_done = 0;
2432
2433	if (!iwm_nic_lock(sc))
2434		return EBUSY;
2435
2436	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2437	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2438
2439	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2440	    dst_addr);
2441
2442	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2443	    phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2444
2445	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2446	    (iwm_get_dma_hi_addr(phy_addr)
2447	     << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2448
2449	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2450	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2451	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2452	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2453
2454	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2455	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2456	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2457	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2458
2459	iwm_nic_unlock(sc);
2460
2461	/* wait up to 5s for this segment to load */
2462	ret = 0;
2463	while (!sc->sc_fw_chunk_done) {
2464		ret = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz);
2465		if (ret)
2466			break;
2467	}
2468
2469	if (ret != 0) {
2470		device_printf(sc->sc_dev,
2471		    "fw chunk addr 0x%x len %d failed to load\n",
2472		    dst_addr, byte_cnt);
2473		return ETIMEDOUT;
2474	}
2475
2476	return 0;
2477}
2478
2479static int
2480iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2481	const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2482{
2483	int shift_param;
2484	int i, ret = 0, sec_num = 0x1;
2485	uint32_t val, last_read_idx = 0;
2486
2487	if (cpu == 1) {
2488		shift_param = 0;
2489		*first_ucode_section = 0;
2490	} else {
2491		shift_param = 16;
2492		(*first_ucode_section)++;
2493	}
2494
2495	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2496		last_read_idx = i;
2497
2498		/*
2499		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2500		 * CPU1 to CPU2.
2501		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2502		 * CPU2 non paged to CPU2 paging sec.
2503		 */
2504		if (!image->fw_sect[i].data ||
2505		    image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2506		    image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2507			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2508				    "Break since Data not valid or Empty section, sec = %d\n",
2509				    i);
2510			break;
2511		}
2512		ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2513		if (ret)
2514			return ret;
2515
2516		/* Notify the ucode of the loaded section number and status */
2517		if (iwm_nic_lock(sc)) {
2518			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2519			val = val | (sec_num << shift_param);
2520			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2521			sec_num = (sec_num << 1) | 0x1;
2522			iwm_nic_unlock(sc);
2523		}
2524	}
2525
2526	*first_ucode_section = last_read_idx;
2527
2528	iwm_enable_interrupts(sc);
2529
2530	if (iwm_nic_lock(sc)) {
2531		if (cpu == 1)
2532			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2533		else
2534			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2535		iwm_nic_unlock(sc);
2536	}
2537
2538	return 0;
2539}
2540
2541static int
2542iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2543	const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2544{
2545	int shift_param;
2546	int i, ret = 0;
2547	uint32_t last_read_idx = 0;
2548
2549	if (cpu == 1) {
2550		shift_param = 0;
2551		*first_ucode_section = 0;
2552	} else {
2553		shift_param = 16;
2554		(*first_ucode_section)++;
2555	}
2556
2557	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2558		last_read_idx = i;
2559
2560		/*
2561		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2562		 * CPU1 to CPU2.
2563		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2564		 * CPU2 non paged to CPU2 paging sec.
2565		 */
2566		if (!image->fw_sect[i].data ||
2567		    image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2568		    image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2569			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2570				    "Break since Data not valid or Empty section, sec = %d\n",
2571				     i);
2572			break;
2573		}
2574
2575		ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2576		if (ret)
2577			return ret;
2578	}
2579
2580	*first_ucode_section = last_read_idx;
2581
2582	return 0;
2583
2584}
2585
2586static int
2587iwm_pcie_load_given_ucode(struct iwm_softc *sc,
2588	const struct iwm_fw_sects *image)
2589{
2590	int ret = 0;
2591	int first_ucode_section;
2592
2593	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2594		     image->is_dual_cpus ? "Dual" : "Single");
2595
2596	/* load to FW the binary non secured sections of CPU1 */
2597	ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2598	if (ret)
2599		return ret;
2600
2601	if (image->is_dual_cpus) {
2602		/* set CPU2 header address */
2603		if (iwm_nic_lock(sc)) {
2604			iwm_write_prph(sc,
2605				       IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2606				       IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2607			iwm_nic_unlock(sc);
2608		}
2609
2610		/* load to FW the binary sections of CPU2 */
2611		ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2612						 &first_ucode_section);
2613		if (ret)
2614			return ret;
2615	}
2616
2617	iwm_enable_interrupts(sc);
2618
2619	/* release CPU reset */
2620	IWM_WRITE(sc, IWM_CSR_RESET, 0);
2621
2622	return 0;
2623}
2624
2625int
2626iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2627	const struct iwm_fw_sects *image)
2628{
2629	int ret = 0;
2630	int first_ucode_section;
2631
2632	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2633		    image->is_dual_cpus ? "Dual" : "Single");
2634
2635	/* configure the ucode to be ready to get the secured image */
2636	/* release CPU reset */
2637	if (iwm_nic_lock(sc)) {
2638		iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
2639		    IWM_RELEASE_CPU_RESET_BIT);
2640		iwm_nic_unlock(sc);
2641	}
2642
2643	/* load to FW the binary Secured sections of CPU1 */
2644	ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2645	    &first_ucode_section);
2646	if (ret)
2647		return ret;
2648
2649	/* load to FW the binary sections of CPU2 */
2650	return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2651	    &first_ucode_section);
2652}
2653
2654/* XXX Get rid of this definition */
2655static inline void
2656iwm_enable_fw_load_int(struct iwm_softc *sc)
2657{
2658	IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2659	sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2660	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2661}
2662
2663/* XXX Add proper rfkill support code */
2664static int
2665iwm_start_fw(struct iwm_softc *sc,
2666	const struct iwm_fw_sects *fw)
2667{
2668	int ret;
2669
2670	/* This may fail if AMT took ownership of the device */
2671	if (iwm_prepare_card_hw(sc)) {
2672		device_printf(sc->sc_dev,
2673		    "%s: Exit HW not ready\n", __func__);
2674		ret = EIO;
2675		goto out;
2676	}
2677
2678	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2679
2680	iwm_disable_interrupts(sc);
2681
2682	/* make sure rfkill handshake bits are cleared */
2683	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2684	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2685	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2686
2687	/* clear (again), then enable host interrupts */
2688	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2689
2690	ret = iwm_nic_init(sc);
2691	if (ret) {
2692		device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2693		goto out;
2694	}
2695
2696	/*
2697	 * Now, we load the firmware and don't want to be interrupted, even
2698	 * by the RF-Kill interrupt (hence mask all the interrupt besides the
2699	 * FH_TX interrupt which is needed to load the firmware). If the
2700	 * RF-Kill switch is toggled, we will find out after having loaded
2701	 * the firmware and return the proper value to the caller.
2702	 */
2703	iwm_enable_fw_load_int(sc);
2704
2705	/* really make sure rfkill handshake bits are cleared */
2706	/* maybe we should write a few times more?  just to make sure */
2707	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2708	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2709
2710	/* Load the given image to the HW */
2711	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2712		ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2713	else
2714		ret = iwm_pcie_load_given_ucode(sc, fw);
2715
2716	/* XXX re-check RF-Kill state */
2717
2718out:
2719	return ret;
2720}
2721
2722static int
2723iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2724{
2725	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2726		.valid = htole32(valid_tx_ant),
2727	};
2728
2729	return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2730	    IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2731}
2732
2733/* iwlwifi: mvm/fw.c */
2734static int
2735iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2736{
2737	struct iwm_phy_cfg_cmd phy_cfg_cmd;
2738	enum iwm_ucode_type ucode_type = sc->cur_ucode;
2739
2740	/* Set parameters */
2741	phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc));
2742	phy_cfg_cmd.calib_control.event_trigger =
2743	    sc->sc_default_calib[ucode_type].event_trigger;
2744	phy_cfg_cmd.calib_control.flow_trigger =
2745	    sc->sc_default_calib[ucode_type].flow_trigger;
2746
2747	IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2748	    "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2749	return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2750	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2751}
2752
2753static int
2754iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2755{
2756	struct iwm_mvm_alive_data *alive_data = data;
2757	struct iwm_mvm_alive_resp_ver1 *palive1;
2758	struct iwm_mvm_alive_resp_ver2 *palive2;
2759	struct iwm_mvm_alive_resp *palive;
2760
2761	if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
2762		palive1 = (void *)pkt->data;
2763
2764		sc->support_umac_log = FALSE;
2765                sc->error_event_table =
2766                        le32toh(palive1->error_event_table_ptr);
2767                sc->log_event_table =
2768                        le32toh(palive1->log_event_table_ptr);
2769                alive_data->scd_base_addr = le32toh(palive1->scd_base_ptr);
2770
2771                alive_data->valid = le16toh(palive1->status) ==
2772                                    IWM_ALIVE_STATUS_OK;
2773                IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2774			    "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2775			     le16toh(palive1->status), palive1->ver_type,
2776                             palive1->ver_subtype, palive1->flags);
2777	} else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
2778		palive2 = (void *)pkt->data;
2779		sc->error_event_table =
2780			le32toh(palive2->error_event_table_ptr);
2781		sc->log_event_table =
2782			le32toh(palive2->log_event_table_ptr);
2783		alive_data->scd_base_addr = le32toh(palive2->scd_base_ptr);
2784		sc->umac_error_event_table =
2785                        le32toh(palive2->error_info_addr);
2786
2787		alive_data->valid = le16toh(palive2->status) ==
2788				    IWM_ALIVE_STATUS_OK;
2789		if (sc->umac_error_event_table)
2790			sc->support_umac_log = TRUE;
2791
2792		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2793			    "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2794			    le16toh(palive2->status), palive2->ver_type,
2795			    palive2->ver_subtype, palive2->flags);
2796
2797		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2798			    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2799			    palive2->umac_major, palive2->umac_minor);
2800	} else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2801		palive = (void *)pkt->data;
2802
2803		sc->error_event_table =
2804			le32toh(palive->error_event_table_ptr);
2805		sc->log_event_table =
2806			le32toh(palive->log_event_table_ptr);
2807		alive_data->scd_base_addr = le32toh(palive->scd_base_ptr);
2808		sc->umac_error_event_table =
2809			le32toh(palive->error_info_addr);
2810
2811		alive_data->valid = le16toh(palive->status) ==
2812				    IWM_ALIVE_STATUS_OK;
2813		if (sc->umac_error_event_table)
2814			sc->support_umac_log = TRUE;
2815
2816		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2817			    "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2818			    le16toh(palive->status), palive->ver_type,
2819			    palive->ver_subtype, palive->flags);
2820
2821		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2822			    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2823			    le32toh(palive->umac_major),
2824			    le32toh(palive->umac_minor));
2825	}
2826
2827	return TRUE;
2828}
2829
2830static int
2831iwm_wait_phy_db_entry(struct iwm_softc *sc,
2832	struct iwm_rx_packet *pkt, void *data)
2833{
2834	struct iwm_phy_db *phy_db = data;
2835
2836	if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2837		if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2838			device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2839			    __func__, pkt->hdr.code);
2840		}
2841		return TRUE;
2842	}
2843
2844	if (iwm_phy_db_set_section(phy_db, pkt)) {
2845		device_printf(sc->sc_dev,
2846		    "%s: iwm_phy_db_set_section failed\n", __func__);
2847	}
2848
2849	return FALSE;
2850}
2851
2852static int
2853iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2854	enum iwm_ucode_type ucode_type)
2855{
2856	struct iwm_notification_wait alive_wait;
2857	struct iwm_mvm_alive_data alive_data;
2858	const struct iwm_fw_sects *fw;
2859	enum iwm_ucode_type old_type = sc->cur_ucode;
2860	int error;
2861	static const uint16_t alive_cmd[] = { IWM_MVM_ALIVE };
2862
2863	if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2864		device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2865			error);
2866		return error;
2867	}
2868	fw = &sc->sc_fw.fw_sects[ucode_type];
2869	sc->cur_ucode = ucode_type;
2870	sc->ucode_loaded = FALSE;
2871
2872	memset(&alive_data, 0, sizeof(alive_data));
2873	iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2874				   alive_cmd, nitems(alive_cmd),
2875				   iwm_alive_fn, &alive_data);
2876
2877	error = iwm_start_fw(sc, fw);
2878	if (error) {
2879		device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2880		sc->cur_ucode = old_type;
2881		iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2882		return error;
2883	}
2884
2885	/*
2886	 * Some things may run in the background now, but we
2887	 * just wait for the ALIVE notification here.
2888	 */
2889	IWM_UNLOCK(sc);
2890	error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2891				      IWM_MVM_UCODE_ALIVE_TIMEOUT);
2892	IWM_LOCK(sc);
2893	if (error) {
2894		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2895			uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a;
2896			if (iwm_nic_lock(sc)) {
2897				a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS);
2898				b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS);
2899				iwm_nic_unlock(sc);
2900			}
2901			device_printf(sc->sc_dev,
2902			    "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2903			    a, b);
2904		}
2905		sc->cur_ucode = old_type;
2906		return error;
2907	}
2908
2909	if (!alive_data.valid) {
2910		device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2911		    __func__);
2912		sc->cur_ucode = old_type;
2913		return EIO;
2914	}
2915
2916	iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2917
2918	/*
2919	 * configure and operate fw paging mechanism.
2920	 * driver configures the paging flow only once, CPU2 paging image
2921	 * included in the IWM_UCODE_INIT image.
2922	 */
2923	if (fw->paging_mem_size) {
2924		error = iwm_save_fw_paging(sc, fw);
2925		if (error) {
2926			device_printf(sc->sc_dev,
2927			    "%s: failed to save the FW paging image\n",
2928			    __func__);
2929			return error;
2930		}
2931
2932		error = iwm_send_paging_cmd(sc, fw);
2933		if (error) {
2934			device_printf(sc->sc_dev,
2935			    "%s: failed to send the paging cmd\n", __func__);
2936			iwm_free_fw_paging(sc);
2937			return error;
2938		}
2939	}
2940
2941	if (!error)
2942		sc->ucode_loaded = TRUE;
2943	return error;
2944}
2945
2946/*
2947 * mvm misc bits
2948 */
2949
2950/*
2951 * follows iwlwifi/fw.c
2952 */
2953static int
2954iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2955{
2956	struct iwm_notification_wait calib_wait;
2957	static const uint16_t init_complete[] = {
2958		IWM_INIT_COMPLETE_NOTIF,
2959		IWM_CALIB_RES_NOTIF_PHY_DB
2960	};
2961	int ret;
2962
2963	/* do not operate with rfkill switch turned on */
2964	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2965		device_printf(sc->sc_dev,
2966		    "radio is disabled by hardware switch\n");
2967		return EPERM;
2968	}
2969
2970	iwm_init_notification_wait(sc->sc_notif_wait,
2971				   &calib_wait,
2972				   init_complete,
2973				   nitems(init_complete),
2974				   iwm_wait_phy_db_entry,
2975				   sc->sc_phy_db);
2976
2977	/* Will also start the device */
2978	ret = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
2979	if (ret) {
2980		device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
2981		    ret);
2982		goto error;
2983	}
2984
2985	if (justnvm) {
2986		/* Read nvm */
2987		ret = iwm_nvm_init(sc);
2988		if (ret) {
2989			device_printf(sc->sc_dev, "failed to read nvm\n");
2990			goto error;
2991		}
2992		IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
2993		goto error;
2994	}
2995
2996	ret = iwm_send_bt_init_conf(sc);
2997	if (ret) {
2998		device_printf(sc->sc_dev,
2999		    "failed to send bt coex configuration: %d\n", ret);
3000		goto error;
3001	}
3002
3003	/* Init Smart FIFO. */
3004	ret = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF);
3005	if (ret)
3006		goto error;
3007
3008	/* Send TX valid antennas before triggering calibrations */
3009	ret = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
3010	if (ret) {
3011		device_printf(sc->sc_dev,
3012		    "failed to send antennas before calibration: %d\n", ret);
3013		goto error;
3014	}
3015
3016	/*
3017	 * Send phy configurations command to init uCode
3018	 * to start the 16.0 uCode init image internal calibrations.
3019	 */
3020	ret = iwm_send_phy_cfg_cmd(sc);
3021	if (ret) {
3022		device_printf(sc->sc_dev,
3023		    "%s: Failed to run INIT calibrations: %d\n",
3024		    __func__, ret);
3025		goto error;
3026	}
3027
3028	/*
3029	 * Nothing to do but wait for the init complete notification
3030	 * from the firmware.
3031	 */
3032	IWM_UNLOCK(sc);
3033	ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
3034	    IWM_MVM_UCODE_CALIB_TIMEOUT);
3035	IWM_LOCK(sc);
3036
3037
3038	goto out;
3039
3040error:
3041	iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
3042out:
3043	return ret;
3044}
3045
3046/*
3047 * receive side
3048 */
3049
3050/* (re)stock rx ring, called at init-time and at runtime */
3051static int
3052iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3053{
3054	struct iwm_rx_ring *ring = &sc->rxq;
3055	struct iwm_rx_data *data = &ring->data[idx];
3056	struct mbuf *m;
3057	bus_dmamap_t dmamap;
3058	bus_dma_segment_t seg;
3059	int nsegs, error;
3060
3061	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3062	if (m == NULL)
3063		return ENOBUFS;
3064
3065	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3066	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3067	    &seg, &nsegs, BUS_DMA_NOWAIT);
3068	if (error != 0) {
3069		device_printf(sc->sc_dev,
3070		    "%s: can't map mbuf, error %d\n", __func__, error);
3071		m_freem(m);
3072		return error;
3073	}
3074
3075	if (data->m != NULL)
3076		bus_dmamap_unload(ring->data_dmat, data->map);
3077
3078	/* Swap ring->spare_map with data->map */
3079	dmamap = data->map;
3080	data->map = ring->spare_map;
3081	ring->spare_map = dmamap;
3082
3083	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3084	data->m = m;
3085
3086	/* Update RX descriptor. */
3087	KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
3088	ring->desc[idx] = htole32(seg.ds_addr >> 8);
3089	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3090	    BUS_DMASYNC_PREWRITE);
3091
3092	return 0;
3093}
3094
3095/* iwlwifi: mvm/rx.c */
3096/*
3097 * iwm_mvm_get_signal_strength - use new rx PHY INFO API
3098 * values are reported by the fw as positive values - need to negate
3099 * to obtain their dBM.  Account for missing antennas by replacing 0
3100 * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3101 */
3102static int
3103iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3104{
3105	int energy_a, energy_b, energy_c, max_energy;
3106	uint32_t val;
3107
3108	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3109	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3110	    IWM_RX_INFO_ENERGY_ANT_A_POS;
3111	energy_a = energy_a ? -energy_a : -256;
3112	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3113	    IWM_RX_INFO_ENERGY_ANT_B_POS;
3114	energy_b = energy_b ? -energy_b : -256;
3115	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3116	    IWM_RX_INFO_ENERGY_ANT_C_POS;
3117	energy_c = energy_c ? -energy_c : -256;
3118	max_energy = MAX(energy_a, energy_b);
3119	max_energy = MAX(max_energy, energy_c);
3120
3121	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3122	    "energy In A %d B %d C %d , and max %d\n",
3123	    energy_a, energy_b, energy_c, max_energy);
3124
3125	return max_energy;
3126}
3127
3128static void
3129iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3130{
3131	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3132
3133	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3134
3135	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3136}
3137
3138/*
3139 * Retrieve the average noise (in dBm) among receivers.
3140 */
3141static int
3142iwm_get_noise(struct iwm_softc *sc,
3143    const struct iwm_mvm_statistics_rx_non_phy *stats)
3144{
3145	int i, total, nbant, noise;
3146
3147	total = nbant = noise = 0;
3148	for (i = 0; i < 3; i++) {
3149		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3150		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3151		    __func__,
3152		    i,
3153		    noise);
3154
3155		if (noise) {
3156			total += noise;
3157			nbant++;
3158		}
3159	}
3160
3161	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3162	    __func__, nbant, total);
3163#if 0
3164	/* There should be at least one antenna but check anyway. */
3165	return (nbant == 0) ? -127 : (total / nbant) - 107;
3166#else
3167	/* For now, just hard-code it to -96 to be safe */
3168	return (-96);
3169#endif
3170}
3171
3172static void
3173iwm_mvm_handle_rx_statistics(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3174{
3175	struct iwm_notif_statistics_v10 *stats = (void *)&pkt->data;
3176
3177	memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
3178	sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
3179}
3180
3181/*
3182 * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3183 *
3184 * Handles the actual data of the Rx packet from the fw
3185 */
3186static boolean_t
3187iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3188	boolean_t stolen)
3189{
3190	struct ieee80211com *ic = &sc->sc_ic;
3191	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3192	struct ieee80211_frame *wh;
3193	struct ieee80211_node *ni;
3194	struct ieee80211_rx_stats rxs;
3195	struct iwm_rx_phy_info *phy_info;
3196	struct iwm_rx_mpdu_res_start *rx_res;
3197	struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, offset);
3198	uint32_t len;
3199	uint32_t rx_pkt_status;
3200	int rssi;
3201
3202	phy_info = &sc->sc_last_phy_info;
3203	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3204	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3205	len = le16toh(rx_res->byte_count);
3206	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3207
3208	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3209		device_printf(sc->sc_dev,
3210		    "dsp size out of range [0,20]: %d\n",
3211		    phy_info->cfg_phy_cnt);
3212		goto fail;
3213	}
3214
3215	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3216	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3217		IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3218		    "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3219		goto fail;
3220	}
3221
3222	rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3223
3224	/* Map it to relative value */
3225	rssi = rssi - sc->sc_noise;
3226
3227	/* replenish ring for the buffer we're going to feed to the sharks */
3228	if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3229		device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3230		    __func__);
3231		goto fail;
3232	}
3233
3234	m->m_data = pkt->data + sizeof(*rx_res);
3235	m->m_pkthdr.len = m->m_len = len;
3236
3237	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3238	    "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3239
3240	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3241
3242	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3243	    "%s: phy_info: channel=%d, flags=0x%08x\n",
3244	    __func__,
3245	    le16toh(phy_info->channel),
3246	    le16toh(phy_info->phy_flags));
3247
3248	/*
3249	 * Populate an RX state struct with the provided information.
3250	 */
3251	bzero(&rxs, sizeof(rxs));
3252	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3253	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3254	rxs.c_ieee = le16toh(phy_info->channel);
3255	if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3256		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3257	} else {
3258		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3259	}
3260
3261	/* rssi is in 1/2db units */
3262	rxs.c_rssi = rssi * 2;
3263	rxs.c_nf = sc->sc_noise;
3264	if (ieee80211_add_rx_params(m, &rxs) == 0) {
3265		if (ni)
3266			ieee80211_free_node(ni);
3267		goto fail;
3268	}
3269
3270	if (ieee80211_radiotap_active_vap(vap)) {
3271		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3272
3273		tap->wr_flags = 0;
3274		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3275			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3276		tap->wr_chan_freq = htole16(rxs.c_freq);
3277		/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3278		tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3279		tap->wr_dbm_antsignal = (int8_t)rssi;
3280		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3281		tap->wr_tsft = phy_info->system_timestamp;
3282		switch (phy_info->rate) {
3283		/* CCK rates. */
3284		case  10: tap->wr_rate =   2; break;
3285		case  20: tap->wr_rate =   4; break;
3286		case  55: tap->wr_rate =  11; break;
3287		case 110: tap->wr_rate =  22; break;
3288		/* OFDM rates. */
3289		case 0xd: tap->wr_rate =  12; break;
3290		case 0xf: tap->wr_rate =  18; break;
3291		case 0x5: tap->wr_rate =  24; break;
3292		case 0x7: tap->wr_rate =  36; break;
3293		case 0x9: tap->wr_rate =  48; break;
3294		case 0xb: tap->wr_rate =  72; break;
3295		case 0x1: tap->wr_rate =  96; break;
3296		case 0x3: tap->wr_rate = 108; break;
3297		/* Unknown rate: should not happen. */
3298		default:  tap->wr_rate =   0;
3299		}
3300	}
3301
3302	IWM_UNLOCK(sc);
3303	if (ni != NULL) {
3304		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3305		ieee80211_input_mimo(ni, m);
3306		ieee80211_free_node(ni);
3307	} else {
3308		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3309		ieee80211_input_mimo_all(ic, m);
3310	}
3311	IWM_LOCK(sc);
3312
3313	return TRUE;
3314
3315fail:
3316	counter_u64_add(ic->ic_ierrors, 1);
3317	return FALSE;
3318}
3319
3320static int
3321iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3322	struct iwm_node *in)
3323{
3324	struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3325	struct ieee80211_node *ni = &in->in_ni;
3326	struct ieee80211vap *vap = ni->ni_vap;
3327	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3328	int failack = tx_resp->failure_frame;
3329
3330	KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3331
3332	/* Update rate control statistics. */
3333	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3334	    __func__,
3335	    (int) le16toh(tx_resp->status.status),
3336	    (int) le16toh(tx_resp->status.sequence),
3337	    tx_resp->frame_count,
3338	    tx_resp->bt_kill_count,
3339	    tx_resp->failure_rts,
3340	    tx_resp->failure_frame,
3341	    le32toh(tx_resp->initial_rate),
3342	    (int) le16toh(tx_resp->wireless_media_time));
3343
3344	if (status != IWM_TX_STATUS_SUCCESS &&
3345	    status != IWM_TX_STATUS_DIRECT_DONE) {
3346		ieee80211_ratectl_tx_complete(vap, ni,
3347		    IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
3348		return (1);
3349	} else {
3350		ieee80211_ratectl_tx_complete(vap, ni,
3351		    IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
3352		return (0);
3353	}
3354}
3355
3356static void
3357iwm_mvm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3358{
3359	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3360	int idx = cmd_hdr->idx;
3361	int qid = cmd_hdr->qid;
3362	struct iwm_tx_ring *ring = &sc->txq[qid];
3363	struct iwm_tx_data *txd = &ring->data[idx];
3364	struct iwm_node *in = txd->in;
3365	struct mbuf *m = txd->m;
3366	int status;
3367
3368	KASSERT(txd->done == 0, ("txd not done"));
3369	KASSERT(txd->in != NULL, ("txd without node"));
3370	KASSERT(txd->m != NULL, ("txd without mbuf"));
3371
3372	sc->sc_tx_timer = 0;
3373
3374	status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3375
3376	/* Unmap and free mbuf. */
3377	bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3378	bus_dmamap_unload(ring->data_dmat, txd->map);
3379
3380	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3381	    "free txd %p, in %p\n", txd, txd->in);
3382	txd->done = 1;
3383	txd->m = NULL;
3384	txd->in = NULL;
3385
3386	ieee80211_tx_complete(&in->in_ni, m, status);
3387
3388	if (--ring->queued < IWM_TX_RING_LOMARK) {
3389		sc->qfullmsk &= ~(1 << ring->qid);
3390		if (sc->qfullmsk == 0) {
3391			iwm_start(sc);
3392		}
3393	}
3394}
3395
3396/*
3397 * transmit side
3398 */
3399
3400/*
3401 * Process a "command done" firmware notification.  This is where we wakeup
3402 * processes waiting for a synchronous command completion.
3403 * from if_iwn
3404 */
3405static void
3406iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3407{
3408	struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3409	struct iwm_tx_data *data;
3410
3411	if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3412		return;	/* Not a command ack. */
3413	}
3414
3415	/* XXX wide commands? */
3416	IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3417	    "cmd notification type 0x%x qid %d idx %d\n",
3418	    pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3419
3420	data = &ring->data[pkt->hdr.idx];
3421
3422	/* If the command was mapped in an mbuf, free it. */
3423	if (data->m != NULL) {
3424		bus_dmamap_sync(ring->data_dmat, data->map,
3425		    BUS_DMASYNC_POSTWRITE);
3426		bus_dmamap_unload(ring->data_dmat, data->map);
3427		m_freem(data->m);
3428		data->m = NULL;
3429	}
3430	wakeup(&ring->desc[pkt->hdr.idx]);
3431
3432	if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3433		device_printf(sc->sc_dev,
3434		    "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3435		    __func__, pkt->hdr.idx, ring->queued, ring->cur);
3436		/* XXX call iwm_force_nmi() */
3437	}
3438
3439	KASSERT(ring->queued > 0, ("ring->queued is empty?"));
3440	ring->queued--;
3441	if (ring->queued == 0)
3442		iwm_pcie_clear_cmd_in_flight(sc);
3443}
3444
3445#if 0
3446/*
3447 * necessary only for block ack mode
3448 */
3449void
3450iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3451	uint16_t len)
3452{
3453	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3454	uint16_t w_val;
3455
3456	scd_bc_tbl = sc->sched_dma.vaddr;
3457
3458	len += 8; /* magic numbers came naturally from paris */
3459	len = roundup(len, 4) / 4;
3460
3461	w_val = htole16(sta_id << 12 | len);
3462
3463	/* Update TX scheduler. */
3464	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3465	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3466	    BUS_DMASYNC_PREWRITE);
3467
3468	/* I really wonder what this is ?!? */
3469	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3470		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3471		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3472		    BUS_DMASYNC_PREWRITE);
3473	}
3474}
3475#endif
3476
3477/*
3478 * Take an 802.11 (non-n) rate, find the relevant rate
3479 * table entry.  return the index into in_ridx[].
3480 *
3481 * The caller then uses that index back into in_ridx
3482 * to figure out the rate index programmed /into/
3483 * the firmware for this given node.
3484 */
3485static int
3486iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3487    uint8_t rate)
3488{
3489	int i;
3490	uint8_t r;
3491
3492	for (i = 0; i < nitems(in->in_ridx); i++) {
3493		r = iwm_rates[in->in_ridx[i]].rate;
3494		if (rate == r)
3495			return (i);
3496	}
3497
3498	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3499	    "%s: couldn't find an entry for rate=%d\n",
3500	    __func__,
3501	    rate);
3502
3503	/* XXX Return the first */
3504	/* XXX TODO: have it return the /lowest/ */
3505	return (0);
3506}
3507
3508static int
3509iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
3510{
3511	int i;
3512
3513	for (i = 0; i < nitems(iwm_rates); i++) {
3514		if (iwm_rates[i].rate == rate)
3515			return (i);
3516	}
3517	/* XXX error? */
3518	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3519	    "%s: couldn't find an entry for rate=%d\n",
3520	    __func__,
3521	    rate);
3522	return (0);
3523}
3524
3525/*
3526 * Fill in the rate related information for a transmit command.
3527 */
3528static const struct iwm_rate *
3529iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3530	struct mbuf *m, struct iwm_tx_cmd *tx)
3531{
3532	struct ieee80211_node *ni = &in->in_ni;
3533	struct ieee80211_frame *wh;
3534	const struct ieee80211_txparam *tp = ni->ni_txparms;
3535	const struct iwm_rate *rinfo;
3536	int type;
3537	int ridx, rate_flags;
3538
3539	wh = mtod(m, struct ieee80211_frame *);
3540	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3541
3542	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3543	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3544
3545	if (type == IEEE80211_FC0_TYPE_MGT) {
3546		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3547		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3548		    "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3549	} else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3550		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3551		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3552		    "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3553	} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3554		ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3555		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3556		    "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3557	} else if (m->m_flags & M_EAPOL) {
3558		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3559		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3560		    "%s: EAPOL\n", __func__);
3561	} else if (type == IEEE80211_FC0_TYPE_DATA) {
3562		int i;
3563
3564		/* for data frames, use RS table */
3565		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3566		/* XXX pass pktlen */
3567		(void) ieee80211_ratectl_rate(ni, NULL, 0);
3568		i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3569		ridx = in->in_ridx[i];
3570
3571		/* This is the index into the programmed table */
3572		tx->initial_rate_index = i;
3573		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3574
3575		IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3576		    "%s: start with i=%d, txrate %d\n",
3577		    __func__, i, iwm_rates[ridx].rate);
3578	} else {
3579		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3580		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DEFAULT (%d)\n",
3581		    __func__, tp->mgmtrate);
3582	}
3583
3584	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3585	    "%s: frame type=%d txrate %d\n",
3586	        __func__, type, iwm_rates[ridx].rate);
3587
3588	rinfo = &iwm_rates[ridx];
3589
3590	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3591	    __func__, ridx,
3592	    rinfo->rate,
3593	    !! (IWM_RIDX_IS_CCK(ridx))
3594	    );
3595
3596	/* XXX TODO: hard-coded TX antenna? */
3597	rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3598	if (IWM_RIDX_IS_CCK(ridx))
3599		rate_flags |= IWM_RATE_MCS_CCK_MSK;
3600	tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3601
3602	return rinfo;
3603}
3604
3605#define TB0_SIZE 16
3606static int
3607iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3608{
3609	struct ieee80211com *ic = &sc->sc_ic;
3610	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3611	struct iwm_node *in = IWM_NODE(ni);
3612	struct iwm_tx_ring *ring;
3613	struct iwm_tx_data *data;
3614	struct iwm_tfd *desc;
3615	struct iwm_device_cmd *cmd;
3616	struct iwm_tx_cmd *tx;
3617	struct ieee80211_frame *wh;
3618	struct ieee80211_key *k = NULL;
3619	struct mbuf *m1;
3620	const struct iwm_rate *rinfo;
3621	uint32_t flags;
3622	u_int hdrlen;
3623	bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3624	int nsegs;
3625	uint8_t tid, type;
3626	int i, totlen, error, pad;
3627
3628	wh = mtod(m, struct ieee80211_frame *);
3629	hdrlen = ieee80211_anyhdrsize(wh);
3630	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3631	tid = 0;
3632	ring = &sc->txq[ac];
3633	desc = &ring->desc[ring->cur];
3634	memset(desc, 0, sizeof(*desc));
3635	data = &ring->data[ring->cur];
3636
3637	/* Fill out iwm_tx_cmd to send to the firmware */
3638	cmd = &ring->cmd[ring->cur];
3639	cmd->hdr.code = IWM_TX_CMD;
3640	cmd->hdr.flags = 0;
3641	cmd->hdr.qid = ring->qid;
3642	cmd->hdr.idx = ring->cur;
3643
3644	tx = (void *)cmd->data;
3645	memset(tx, 0, sizeof(*tx));
3646
3647	rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3648
3649	/* Encrypt the frame if need be. */
3650	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3651		/* Retrieve key for TX && do software encryption. */
3652		k = ieee80211_crypto_encap(ni, m);
3653		if (k == NULL) {
3654			m_freem(m);
3655			return (ENOBUFS);
3656		}
3657		/* 802.11 header may have moved. */
3658		wh = mtod(m, struct ieee80211_frame *);
3659	}
3660
3661	if (ieee80211_radiotap_active_vap(vap)) {
3662		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3663
3664		tap->wt_flags = 0;
3665		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3666		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3667		tap->wt_rate = rinfo->rate;
3668		if (k != NULL)
3669			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3670		ieee80211_radiotap_tx(vap, m);
3671	}
3672
3673
3674	totlen = m->m_pkthdr.len;
3675
3676	flags = 0;
3677	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3678		flags |= IWM_TX_CMD_FLG_ACK;
3679	}
3680
3681	if (type == IEEE80211_FC0_TYPE_DATA
3682	    && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3683	    && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3684		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3685	}
3686
3687	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3688	    type != IEEE80211_FC0_TYPE_DATA)
3689		tx->sta_id = sc->sc_aux_sta.sta_id;
3690	else
3691		tx->sta_id = IWM_STATION_ID;
3692
3693	if (type == IEEE80211_FC0_TYPE_MGT) {
3694		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3695
3696		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3697		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3698			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3699		} else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3700			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3701		} else {
3702			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3703		}
3704	} else {
3705		tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3706	}
3707
3708	if (hdrlen & 3) {
3709		/* First segment length must be a multiple of 4. */
3710		flags |= IWM_TX_CMD_FLG_MH_PAD;
3711		pad = 4 - (hdrlen & 3);
3712	} else
3713		pad = 0;
3714
3715	tx->driver_txop = 0;
3716	tx->next_frame_len = 0;
3717
3718	tx->len = htole16(totlen);
3719	tx->tid_tspec = tid;
3720	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3721
3722	/* Set physical address of "scratch area". */
3723	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3724	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3725
3726	/* Copy 802.11 header in TX command. */
3727	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3728
3729	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3730
3731	tx->sec_ctl = 0;
3732	tx->tx_flags |= htole32(flags);
3733
3734	/* Trim 802.11 header. */
3735	m_adj(m, hdrlen);
3736	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3737	    segs, &nsegs, BUS_DMA_NOWAIT);
3738	if (error != 0) {
3739		if (error != EFBIG) {
3740			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3741			    error);
3742			m_freem(m);
3743			return error;
3744		}
3745		/* Too many DMA segments, linearize mbuf. */
3746		m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3747		if (m1 == NULL) {
3748			device_printf(sc->sc_dev,
3749			    "%s: could not defrag mbuf\n", __func__);
3750			m_freem(m);
3751			return (ENOBUFS);
3752		}
3753		m = m1;
3754
3755		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3756		    segs, &nsegs, BUS_DMA_NOWAIT);
3757		if (error != 0) {
3758			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3759			    error);
3760			m_freem(m);
3761			return error;
3762		}
3763	}
3764	data->m = m;
3765	data->in = in;
3766	data->done = 0;
3767
3768	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3769	    "sending txd %p, in %p\n", data, data->in);
3770	KASSERT(data->in != NULL, ("node is NULL"));
3771
3772	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3773	    "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3774	    ring->qid, ring->cur, totlen, nsegs,
3775	    le32toh(tx->tx_flags),
3776	    le32toh(tx->rate_n_flags),
3777	    tx->initial_rate_index
3778	    );
3779
3780	/* Fill TX descriptor. */
3781	desc->num_tbs = 2 + nsegs;
3782
3783	desc->tbs[0].lo = htole32(data->cmd_paddr);
3784	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3785	    (TB0_SIZE << 4);
3786	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3787	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3788	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3789	      + hdrlen + pad - TB0_SIZE) << 4);
3790
3791	/* Other DMA segments are for data payload. */
3792	for (i = 0; i < nsegs; i++) {
3793		seg = &segs[i];
3794		desc->tbs[i+2].lo = htole32(seg->ds_addr);
3795		desc->tbs[i+2].hi_n_len = \
3796		    htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3797		    | ((seg->ds_len) << 4);
3798	}
3799
3800	bus_dmamap_sync(ring->data_dmat, data->map,
3801	    BUS_DMASYNC_PREWRITE);
3802	bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3803	    BUS_DMASYNC_PREWRITE);
3804	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3805	    BUS_DMASYNC_PREWRITE);
3806
3807#if 0
3808	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3809#endif
3810
3811	/* Kick TX ring. */
3812	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3813	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3814
3815	/* Mark TX ring as full if we reach a certain threshold. */
3816	if (++ring->queued > IWM_TX_RING_HIMARK) {
3817		sc->qfullmsk |= 1 << ring->qid;
3818	}
3819
3820	return 0;
3821}
3822
3823static int
3824iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3825    const struct ieee80211_bpf_params *params)
3826{
3827	struct ieee80211com *ic = ni->ni_ic;
3828	struct iwm_softc *sc = ic->ic_softc;
3829	int error = 0;
3830
3831	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3832	    "->%s begin\n", __func__);
3833
3834	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3835		m_freem(m);
3836		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3837		    "<-%s not RUNNING\n", __func__);
3838		return (ENETDOWN);
3839        }
3840
3841	IWM_LOCK(sc);
3842	/* XXX fix this */
3843        if (params == NULL) {
3844		error = iwm_tx(sc, m, ni, 0);
3845	} else {
3846		error = iwm_tx(sc, m, ni, 0);
3847	}
3848	sc->sc_tx_timer = 5;
3849	IWM_UNLOCK(sc);
3850
3851        return (error);
3852}
3853
3854/*
3855 * mvm/tx.c
3856 */
3857
3858/*
3859 * Note that there are transports that buffer frames before they reach
3860 * the firmware. This means that after flush_tx_path is called, the
3861 * queue might not be empty. The race-free way to handle this is to:
3862 * 1) set the station as draining
3863 * 2) flush the Tx path
3864 * 3) wait for the transport queues to be empty
3865 */
3866int
3867iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3868{
3869	int ret;
3870	struct iwm_tx_path_flush_cmd flush_cmd = {
3871		.queues_ctl = htole32(tfd_msk),
3872		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3873	};
3874
3875	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3876	    sizeof(flush_cmd), &flush_cmd);
3877	if (ret)
3878                device_printf(sc->sc_dev,
3879		    "Flushing tx queue failed: %d\n", ret);
3880	return ret;
3881}
3882
3883/*
3884 * BEGIN mvm/quota.c
3885 */
3886
3887static int
3888iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp)
3889{
3890	struct iwm_time_quota_cmd cmd;
3891	int i, idx, ret, num_active_macs, quota, quota_rem;
3892	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3893	int n_ifs[IWM_MAX_BINDINGS] = {0, };
3894	uint16_t id;
3895
3896	memset(&cmd, 0, sizeof(cmd));
3897
3898	/* currently, PHY ID == binding ID */
3899	if (ivp) {
3900		id = ivp->phy_ctxt->id;
3901		KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3902		colors[id] = ivp->phy_ctxt->color;
3903
3904		if (1)
3905			n_ifs[id] = 1;
3906	}
3907
3908	/*
3909	 * The FW's scheduling session consists of
3910	 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3911	 * equally between all the bindings that require quota
3912	 */
3913	num_active_macs = 0;
3914	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3915		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3916		num_active_macs += n_ifs[i];
3917	}
3918
3919	quota = 0;
3920	quota_rem = 0;
3921	if (num_active_macs) {
3922		quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3923		quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3924	}
3925
3926	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3927		if (colors[i] < 0)
3928			continue;
3929
3930		cmd.quotas[idx].id_and_color =
3931			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3932
3933		if (n_ifs[i] <= 0) {
3934			cmd.quotas[idx].quota = htole32(0);
3935			cmd.quotas[idx].max_duration = htole32(0);
3936		} else {
3937			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3938			cmd.quotas[idx].max_duration = htole32(0);
3939		}
3940		idx++;
3941	}
3942
3943	/* Give the remainder of the session to the first binding */
3944	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3945
3946	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3947	    sizeof(cmd), &cmd);
3948	if (ret)
3949		device_printf(sc->sc_dev,
3950		    "%s: Failed to send quota: %d\n", __func__, ret);
3951	return ret;
3952}
3953
3954/*
3955 * END mvm/quota.c
3956 */
3957
3958/*
3959 * ieee80211 routines
3960 */
3961
3962/*
3963 * Change to AUTH state in 80211 state machine.  Roughly matches what
3964 * Linux does in bss_info_changed().
3965 */
3966static int
3967iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3968{
3969	struct ieee80211_node *ni;
3970	struct iwm_node *in;
3971	struct iwm_vap *iv = IWM_VAP(vap);
3972	uint32_t duration;
3973	int error;
3974
3975	/*
3976	 * XXX i have a feeling that the vap node is being
3977	 * freed from underneath us. Grr.
3978	 */
3979	ni = ieee80211_ref_node(vap->iv_bss);
3980	in = IWM_NODE(ni);
3981	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3982	    "%s: called; vap=%p, bss ni=%p\n",
3983	    __func__,
3984	    vap,
3985	    ni);
3986
3987	in->in_assoc = 0;
3988
3989	/*
3990	 * Firmware bug - it'll crash if the beacon interval is less
3991	 * than 16. We can't avoid connecting at all, so refuse the
3992	 * station state change, this will cause net80211 to abandon
3993	 * attempts to connect to this AP, and eventually wpa_s will
3994	 * blacklist the AP...
3995	 */
3996	if (ni->ni_intval < 16) {
3997		device_printf(sc->sc_dev,
3998		    "AP %s beacon interval is %d, refusing due to firmware bug!\n",
3999		    ether_sprintf(ni->ni_bssid), ni->ni_intval);
4000		error = EINVAL;
4001		goto out;
4002	}
4003
4004	error = iwm_mvm_sf_config(sc, IWM_SF_FULL_ON);
4005	if (error != 0)
4006		return error;
4007
4008	error = iwm_allow_mcast(vap, sc);
4009	if (error) {
4010		device_printf(sc->sc_dev,
4011		    "%s: failed to set multicast\n", __func__);
4012		goto out;
4013	}
4014
4015	/*
4016	 * This is where it deviates from what Linux does.
4017	 *
4018	 * Linux iwlwifi doesn't reset the nic each time, nor does it
4019	 * call ctxt_add() here.  Instead, it adds it during vap creation,
4020	 * and always does a mac_ctx_changed().
4021	 *
4022	 * The openbsd port doesn't attempt to do that - it reset things
4023	 * at odd states and does the add here.
4024	 *
4025	 * So, until the state handling is fixed (ie, we never reset
4026	 * the NIC except for a firmware failure, which should drag
4027	 * the NIC back to IDLE, re-setup and re-add all the mac/phy
4028	 * contexts that are required), let's do a dirty hack here.
4029	 */
4030	if (iv->is_uploaded) {
4031		if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4032			device_printf(sc->sc_dev,
4033			    "%s: failed to update MAC\n", __func__);
4034			goto out;
4035		}
4036		if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4037		    in->in_ni.ni_chan, 1, 1)) != 0) {
4038			device_printf(sc->sc_dev,
4039			    "%s: failed update phy ctxt\n", __func__);
4040			goto out;
4041		}
4042		iv->phy_ctxt = &sc->sc_phyctxt[0];
4043
4044		if ((error = iwm_mvm_binding_add_vif(sc, iv)) != 0) {
4045			device_printf(sc->sc_dev,
4046			    "%s: binding update cmd\n", __func__);
4047			goto out;
4048		}
4049		if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4050			device_printf(sc->sc_dev,
4051			    "%s: failed to update sta\n", __func__);
4052			goto out;
4053		}
4054	} else {
4055		if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
4056			device_printf(sc->sc_dev,
4057			    "%s: failed to add MAC\n", __func__);
4058			goto out;
4059		}
4060		if ((error = iwm_mvm_power_update_mac(sc)) != 0) {
4061			device_printf(sc->sc_dev,
4062			    "%s: failed to update power management\n",
4063			    __func__);
4064			goto out;
4065		}
4066		if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4067		    in->in_ni.ni_chan, 1, 1)) != 0) {
4068			device_printf(sc->sc_dev,
4069			    "%s: failed add phy ctxt!\n", __func__);
4070			error = ETIMEDOUT;
4071			goto out;
4072		}
4073		iv->phy_ctxt = &sc->sc_phyctxt[0];
4074
4075		if ((error = iwm_mvm_binding_add_vif(sc, iv)) != 0) {
4076			device_printf(sc->sc_dev,
4077			    "%s: binding add cmd\n", __func__);
4078			goto out;
4079		}
4080		if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
4081			device_printf(sc->sc_dev,
4082			    "%s: failed to add sta\n", __func__);
4083			goto out;
4084		}
4085	}
4086
4087	/*
4088	 * Prevent the FW from wandering off channel during association
4089	 * by "protecting" the session with a time event.
4090	 */
4091	/* XXX duration is in units of TU, not MS */
4092	duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4093	iwm_mvm_protect_session(sc, iv, duration, 500 /* XXX magic number */);
4094	DELAY(100);
4095
4096	error = 0;
4097out:
4098	ieee80211_free_node(ni);
4099	return (error);
4100}
4101
4102static int
4103iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
4104{
4105	struct iwm_node *in = IWM_NODE(vap->iv_bss);
4106	int error;
4107
4108	if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4109		device_printf(sc->sc_dev,
4110		    "%s: failed to update STA\n", __func__);
4111		return error;
4112	}
4113
4114	in->in_assoc = 1;
4115	if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4116		device_printf(sc->sc_dev,
4117		    "%s: failed to update MAC\n", __func__);
4118		return error;
4119	}
4120
4121	return 0;
4122}
4123
4124static int
4125iwm_release(struct iwm_softc *sc, struct iwm_node *in)
4126{
4127	uint32_t tfd_msk;
4128
4129	/*
4130	 * Ok, so *technically* the proper set of calls for going
4131	 * from RUN back to SCAN is:
4132	 *
4133	 * iwm_mvm_power_mac_disable(sc, in);
4134	 * iwm_mvm_mac_ctxt_changed(sc, vap);
4135	 * iwm_mvm_rm_sta(sc, in);
4136	 * iwm_mvm_update_quotas(sc, NULL);
4137	 * iwm_mvm_mac_ctxt_changed(sc, in);
4138	 * iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap));
4139	 * iwm_mvm_mac_ctxt_remove(sc, in);
4140	 *
4141	 * However, that freezes the device not matter which permutations
4142	 * and modifications are attempted.  Obviously, this driver is missing
4143	 * something since it works in the Linux driver, but figuring out what
4144	 * is missing is a little more complicated.  Now, since we're going
4145	 * back to nothing anyway, we'll just do a complete device reset.
4146	 * Up your's, device!
4147	 */
4148	/*
4149	 * Just using 0xf for the queues mask is fine as long as we only
4150	 * get here from RUN state.
4151	 */
4152	tfd_msk = 0xf;
4153	iwm_xmit_queue_drain(sc);
4154	iwm_mvm_flush_tx_path(sc, tfd_msk, IWM_CMD_SYNC);
4155	/*
4156	 * We seem to get away with just synchronously sending the
4157	 * IWM_TXPATH_FLUSH command.
4158	 */
4159//	iwm_trans_wait_tx_queue_empty(sc, tfd_msk);
4160	iwm_stop_device(sc);
4161	iwm_init_hw(sc);
4162	if (in)
4163		in->in_assoc = 0;
4164	return 0;
4165
4166#if 0
4167	int error;
4168
4169	iwm_mvm_power_mac_disable(sc, in);
4170
4171	if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4172		device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
4173		return error;
4174	}
4175
4176	if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
4177		device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4178		return error;
4179	}
4180	error = iwm_mvm_rm_sta(sc, in);
4181	in->in_assoc = 0;
4182	iwm_mvm_update_quotas(sc, NULL);
4183	if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4184		device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4185		return error;
4186	}
4187	iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap));
4188
4189	iwm_mvm_mac_ctxt_remove(sc, in);
4190
4191	return error;
4192#endif
4193}
4194
4195static struct ieee80211_node *
4196iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4197{
4198	return malloc(sizeof (struct iwm_node), M_80211_NODE,
4199	    M_NOWAIT | M_ZERO);
4200}
4201
4202uint8_t
4203iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
4204{
4205	int i;
4206	uint8_t rval;
4207
4208	for (i = 0; i < rs->rs_nrates; i++) {
4209		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
4210		if (rval == iwm_rates[ridx].rate)
4211			return rs->rs_rates[i];
4212	}
4213
4214	return 0;
4215}
4216
4217static void
4218iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
4219{
4220	struct ieee80211_node *ni = &in->in_ni;
4221	struct iwm_lq_cmd *lq = &in->in_lq;
4222	int nrates = ni->ni_rates.rs_nrates;
4223	int i, ridx, tab = 0;
4224//	int txant = 0;
4225
4226	if (nrates > nitems(lq->rs_table)) {
4227		device_printf(sc->sc_dev,
4228		    "%s: node supports %d rates, driver handles "
4229		    "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4230		return;
4231	}
4232	if (nrates == 0) {
4233		device_printf(sc->sc_dev,
4234		    "%s: node supports 0 rates, odd!\n", __func__);
4235		return;
4236	}
4237
4238	/*
4239	 * XXX .. and most of iwm_node is not initialised explicitly;
4240	 * it's all just 0x0 passed to the firmware.
4241	 */
4242
4243	/* first figure out which rates we should support */
4244	/* XXX TODO: this isn't 11n aware /at all/ */
4245	memset(&in->in_ridx, -1, sizeof(in->in_ridx));
4246	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4247	    "%s: nrates=%d\n", __func__, nrates);
4248
4249	/*
4250	 * Loop over nrates and populate in_ridx from the highest
4251	 * rate to the lowest rate.  Remember, in_ridx[] has
4252	 * IEEE80211_RATE_MAXSIZE entries!
4253	 */
4254	for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4255		int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4256
4257		/* Map 802.11 rate to HW rate index. */
4258		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4259			if (iwm_rates[ridx].rate == rate)
4260				break;
4261		if (ridx > IWM_RIDX_MAX) {
4262			device_printf(sc->sc_dev,
4263			    "%s: WARNING: device rate for %d not found!\n",
4264			    __func__, rate);
4265		} else {
4266			IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4267			    "%s: rate: i: %d, rate=%d, ridx=%d\n",
4268			    __func__,
4269			    i,
4270			    rate,
4271			    ridx);
4272			in->in_ridx[i] = ridx;
4273		}
4274	}
4275
4276	/* then construct a lq_cmd based on those */
4277	memset(lq, 0, sizeof(*lq));
4278	lq->sta_id = IWM_STATION_ID;
4279
4280	/* For HT, always enable RTS/CTS to avoid excessive retries. */
4281	if (ni->ni_flags & IEEE80211_NODE_HT)
4282		lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4283
4284	/*
4285	 * are these used? (we don't do SISO or MIMO)
4286	 * need to set them to non-zero, though, or we get an error.
4287	 */
4288	lq->single_stream_ant_msk = 1;
4289	lq->dual_stream_ant_msk = 1;
4290
4291	/*
4292	 * Build the actual rate selection table.
4293	 * The lowest bits are the rates.  Additionally,
4294	 * CCK needs bit 9 to be set.  The rest of the bits
4295	 * we add to the table select the tx antenna
4296	 * Note that we add the rates in the highest rate first
4297	 * (opposite of ni_rates).
4298	 */
4299	/*
4300	 * XXX TODO: this should be looping over the min of nrates
4301	 * and LQ_MAX_RETRY_NUM.  Sigh.
4302	 */
4303	for (i = 0; i < nrates; i++) {
4304		int nextant;
4305
4306#if 0
4307		if (txant == 0)
4308			txant = iwm_mvm_get_valid_tx_ant(sc);
4309		nextant = 1<<(ffs(txant)-1);
4310		txant &= ~nextant;
4311#else
4312		nextant = iwm_mvm_get_valid_tx_ant(sc);
4313#endif
4314		/*
4315		 * Map the rate id into a rate index into
4316		 * our hardware table containing the
4317		 * configuration to use for this rate.
4318		 */
4319		ridx = in->in_ridx[i];
4320		tab = iwm_rates[ridx].plcp;
4321		tab |= nextant << IWM_RATE_MCS_ANT_POS;
4322		if (IWM_RIDX_IS_CCK(ridx))
4323			tab |= IWM_RATE_MCS_CCK_MSK;
4324		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4325		    "station rate i=%d, rate=%d, hw=%x\n",
4326		    i, iwm_rates[ridx].rate, tab);
4327		lq->rs_table[i] = htole32(tab);
4328	}
4329	/* then fill the rest with the lowest possible rate */
4330	for (i = nrates; i < nitems(lq->rs_table); i++) {
4331		KASSERT(tab != 0, ("invalid tab"));
4332		lq->rs_table[i] = htole32(tab);
4333	}
4334}
4335
4336static int
4337iwm_media_change(struct ifnet *ifp)
4338{
4339	struct ieee80211vap *vap = ifp->if_softc;
4340	struct ieee80211com *ic = vap->iv_ic;
4341	struct iwm_softc *sc = ic->ic_softc;
4342	int error;
4343
4344	error = ieee80211_media_change(ifp);
4345	if (error != ENETRESET)
4346		return error;
4347
4348	IWM_LOCK(sc);
4349	if (ic->ic_nrunning > 0) {
4350		iwm_stop(sc);
4351		iwm_init(sc);
4352	}
4353	IWM_UNLOCK(sc);
4354	return error;
4355}
4356
4357
4358static int
4359iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4360{
4361	struct iwm_vap *ivp = IWM_VAP(vap);
4362	struct ieee80211com *ic = vap->iv_ic;
4363	struct iwm_softc *sc = ic->ic_softc;
4364	struct iwm_node *in;
4365	int error;
4366
4367	IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4368	    "switching state %s -> %s\n",
4369	    ieee80211_state_name[vap->iv_state],
4370	    ieee80211_state_name[nstate]);
4371	IEEE80211_UNLOCK(ic);
4372	IWM_LOCK(sc);
4373
4374	if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4375		iwm_led_blink_stop(sc);
4376
4377	/* disable beacon filtering if we're hopping out of RUN */
4378	if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4379		iwm_mvm_disable_beacon_filter(sc);
4380
4381		if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4382			in->in_assoc = 0;
4383
4384		if (nstate == IEEE80211_S_INIT) {
4385			IWM_UNLOCK(sc);
4386			IEEE80211_LOCK(ic);
4387			error = ivp->iv_newstate(vap, nstate, arg);
4388			IEEE80211_UNLOCK(ic);
4389			IWM_LOCK(sc);
4390			iwm_release(sc, NULL);
4391			IWM_UNLOCK(sc);
4392			IEEE80211_LOCK(ic);
4393			return error;
4394		}
4395
4396		/*
4397		 * It's impossible to directly go RUN->SCAN. If we iwm_release()
4398		 * above then the card will be completely reinitialized,
4399		 * so the driver must do everything necessary to bring the card
4400		 * from INIT to SCAN.
4401		 *
4402		 * Additionally, upon receiving deauth frame from AP,
4403		 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4404		 * state. This will also fail with this driver, so bring the FSM
4405		 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4406		 *
4407		 * XXX TODO: fix this for FreeBSD!
4408		 */
4409		if (nstate == IEEE80211_S_SCAN ||
4410		    nstate == IEEE80211_S_AUTH ||
4411		    nstate == IEEE80211_S_ASSOC) {
4412			IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4413			    "Force transition to INIT; MGT=%d\n", arg);
4414			IWM_UNLOCK(sc);
4415			IEEE80211_LOCK(ic);
4416			/* Always pass arg as -1 since we can't Tx right now. */
4417			/*
4418			 * XXX arg is just ignored anyway when transitioning
4419			 *     to IEEE80211_S_INIT.
4420			 */
4421			vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4422			IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4423			    "Going INIT->SCAN\n");
4424			nstate = IEEE80211_S_SCAN;
4425			IEEE80211_UNLOCK(ic);
4426			IWM_LOCK(sc);
4427		}
4428	}
4429
4430	switch (nstate) {
4431	case IEEE80211_S_INIT:
4432	case IEEE80211_S_SCAN:
4433		if (vap->iv_state == IEEE80211_S_AUTH ||
4434		    vap->iv_state == IEEE80211_S_ASSOC) {
4435			int myerr;
4436			IWM_UNLOCK(sc);
4437			IEEE80211_LOCK(ic);
4438			myerr = ivp->iv_newstate(vap, nstate, arg);
4439			IEEE80211_UNLOCK(ic);
4440			IWM_LOCK(sc);
4441			error = iwm_mvm_rm_sta(sc, vap, FALSE);
4442                        if (error) {
4443                                device_printf(sc->sc_dev,
4444				    "%s: Failed to remove station: %d\n",
4445				    __func__, error);
4446			}
4447			error = iwm_mvm_mac_ctxt_changed(sc, vap);
4448                        if (error) {
4449                                device_printf(sc->sc_dev,
4450                                    "%s: Failed to change mac context: %d\n",
4451                                    __func__, error);
4452                        }
4453                        error = iwm_mvm_binding_remove_vif(sc, ivp);
4454                        if (error) {
4455                                device_printf(sc->sc_dev,
4456                                    "%s: Failed to remove channel ctx: %d\n",
4457                                    __func__, error);
4458                        }
4459			ivp->phy_ctxt = NULL;
4460			IWM_UNLOCK(sc);
4461			IEEE80211_LOCK(ic);
4462			return myerr;
4463		}
4464		break;
4465
4466	case IEEE80211_S_AUTH:
4467		if ((error = iwm_auth(vap, sc)) != 0) {
4468			device_printf(sc->sc_dev,
4469			    "%s: could not move to auth state: %d\n",
4470			    __func__, error);
4471			break;
4472		}
4473		break;
4474
4475	case IEEE80211_S_ASSOC:
4476		if ((error = iwm_assoc(vap, sc)) != 0) {
4477			device_printf(sc->sc_dev,
4478			    "%s: failed to associate: %d\n", __func__,
4479			    error);
4480			break;
4481		}
4482		break;
4483
4484	case IEEE80211_S_RUN:
4485		/* Update the association state, now we have it all */
4486		/* (eg associd comes in at this point */
4487		error = iwm_assoc(vap, sc);
4488		if (error != 0) {
4489			device_printf(sc->sc_dev,
4490			    "%s: failed to update association state: %d\n",
4491			    __func__,
4492			    error);
4493			break;
4494		}
4495
4496		in = IWM_NODE(vap->iv_bss);
4497		iwm_mvm_enable_beacon_filter(sc, in);
4498		iwm_mvm_power_update_mac(sc);
4499		iwm_mvm_update_quotas(sc, ivp);
4500		iwm_setrates(sc, in);
4501
4502		if ((error = iwm_mvm_send_lq_cmd(sc, &in->in_lq, TRUE)) != 0) {
4503			device_printf(sc->sc_dev,
4504			    "%s: IWM_LQ_CMD failed: %d\n", __func__, error);
4505		}
4506
4507		iwm_mvm_led_enable(sc);
4508		break;
4509
4510	default:
4511		break;
4512	}
4513	IWM_UNLOCK(sc);
4514	IEEE80211_LOCK(ic);
4515
4516	return (ivp->iv_newstate(vap, nstate, arg));
4517}
4518
4519void
4520iwm_endscan_cb(void *arg, int pending)
4521{
4522	struct iwm_softc *sc = arg;
4523	struct ieee80211com *ic = &sc->sc_ic;
4524
4525	IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4526	    "%s: scan ended\n",
4527	    __func__);
4528
4529	ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4530}
4531
4532/*
4533 * Aging and idle timeouts for the different possible scenarios
4534 * in default configuration
4535 */
4536static const uint32_t
4537iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4538	{
4539		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
4540		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
4541	},
4542	{
4543		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
4544		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
4545	},
4546	{
4547		htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
4548		htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
4549	},
4550	{
4551		htole32(IWM_SF_BA_AGING_TIMER_DEF),
4552		htole32(IWM_SF_BA_IDLE_TIMER_DEF)
4553	},
4554	{
4555		htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
4556		htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
4557	},
4558};
4559
4560/*
4561 * Aging and idle timeouts for the different possible scenarios
4562 * in single BSS MAC configuration.
4563 */
4564static const uint32_t
4565iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4566	{
4567		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
4568		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
4569	},
4570	{
4571		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
4572		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
4573	},
4574	{
4575		htole32(IWM_SF_MCAST_AGING_TIMER),
4576		htole32(IWM_SF_MCAST_IDLE_TIMER)
4577	},
4578	{
4579		htole32(IWM_SF_BA_AGING_TIMER),
4580		htole32(IWM_SF_BA_IDLE_TIMER)
4581	},
4582	{
4583		htole32(IWM_SF_TX_RE_AGING_TIMER),
4584		htole32(IWM_SF_TX_RE_IDLE_TIMER)
4585	},
4586};
4587
4588static void
4589iwm_mvm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
4590    struct ieee80211_node *ni)
4591{
4592	int i, j, watermark;
4593
4594	sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
4595
4596	/*
4597	 * If we are in association flow - check antenna configuration
4598	 * capabilities of the AP station, and choose the watermark accordingly.
4599	 */
4600	if (ni) {
4601		if (ni->ni_flags & IEEE80211_NODE_HT) {
4602#ifdef notyet
4603			if (ni->ni_rxmcs[2] != 0)
4604				watermark = IWM_SF_W_MARK_MIMO3;
4605			else if (ni->ni_rxmcs[1] != 0)
4606				watermark = IWM_SF_W_MARK_MIMO2;
4607			else
4608#endif
4609				watermark = IWM_SF_W_MARK_SISO;
4610		} else {
4611			watermark = IWM_SF_W_MARK_LEGACY;
4612		}
4613	/* default watermark value for unassociated mode. */
4614	} else {
4615		watermark = IWM_SF_W_MARK_MIMO2;
4616	}
4617	sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
4618
4619	for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
4620		for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
4621			sf_cmd->long_delay_timeouts[i][j] =
4622					htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
4623		}
4624	}
4625
4626	if (ni) {
4627		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
4628		       sizeof(iwm_sf_full_timeout));
4629	} else {
4630		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
4631		       sizeof(iwm_sf_full_timeout_def));
4632	}
4633}
4634
4635static int
4636iwm_mvm_sf_config(struct iwm_softc *sc, enum iwm_sf_state new_state)
4637{
4638	struct ieee80211com *ic = &sc->sc_ic;
4639	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4640	struct iwm_sf_cfg_cmd sf_cmd = {
4641		.state = htole32(IWM_SF_FULL_ON),
4642	};
4643	int ret = 0;
4644
4645	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4646		sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
4647
4648	switch (new_state) {
4649	case IWM_SF_UNINIT:
4650	case IWM_SF_INIT_OFF:
4651		iwm_mvm_fill_sf_command(sc, &sf_cmd, NULL);
4652		break;
4653	case IWM_SF_FULL_ON:
4654		iwm_mvm_fill_sf_command(sc, &sf_cmd, vap->iv_bss);
4655		break;
4656	default:
4657		IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE,
4658		    "Invalid state: %d. not sending Smart Fifo cmd\n",
4659			  new_state);
4660		return EINVAL;
4661	}
4662
4663	ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
4664				   sizeof(sf_cmd), &sf_cmd);
4665	return ret;
4666}
4667
4668static int
4669iwm_send_bt_init_conf(struct iwm_softc *sc)
4670{
4671	struct iwm_bt_coex_cmd bt_cmd;
4672
4673	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4674	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4675
4676	return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4677	    &bt_cmd);
4678}
4679
4680static boolean_t
4681iwm_mvm_is_lar_supported(struct iwm_softc *sc)
4682{
4683	boolean_t nvm_lar = sc->nvm_data->lar_enabled;
4684	boolean_t tlv_lar = fw_has_capa(&sc->ucode_capa,
4685					IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
4686
4687	if (iwm_lar_disable)
4688		return FALSE;
4689
4690	/*
4691	 * Enable LAR only if it is supported by the FW (TLV) &&
4692	 * enabled in the NVM
4693	 */
4694	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4695		return nvm_lar && tlv_lar;
4696	else
4697		return tlv_lar;
4698}
4699
4700static boolean_t
4701iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *sc)
4702{
4703	return fw_has_api(&sc->ucode_capa,
4704			  IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4705	       fw_has_capa(&sc->ucode_capa,
4706			   IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC);
4707}
4708
4709static int
4710iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4711{
4712	struct iwm_mcc_update_cmd mcc_cmd;
4713	struct iwm_host_cmd hcmd = {
4714		.id = IWM_MCC_UPDATE_CMD,
4715		.flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4716		.data = { &mcc_cmd },
4717	};
4718	int ret;
4719#ifdef IWM_DEBUG
4720	struct iwm_rx_packet *pkt;
4721	struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4722	struct iwm_mcc_update_resp *mcc_resp;
4723	int n_channels;
4724	uint16_t mcc;
4725#endif
4726	int resp_v2 = fw_has_capa(&sc->ucode_capa,
4727	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4728
4729	if (!iwm_mvm_is_lar_supported(sc)) {
4730		IWM_DPRINTF(sc, IWM_DEBUG_LAR, "%s: no LAR support\n",
4731		    __func__);
4732		return 0;
4733	}
4734
4735	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4736	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4737	if (iwm_mvm_is_wifi_mcc_supported(sc))
4738		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4739	else
4740		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4741
4742	if (resp_v2)
4743		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4744	else
4745		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4746
4747	IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4748	    "send MCC update to FW with '%c%c' src = %d\n",
4749	    alpha2[0], alpha2[1], mcc_cmd.source_id);
4750
4751	ret = iwm_send_cmd(sc, &hcmd);
4752	if (ret)
4753		return ret;
4754
4755#ifdef IWM_DEBUG
4756	pkt = hcmd.resp_pkt;
4757
4758	/* Extract MCC response */
4759	if (resp_v2) {
4760		mcc_resp = (void *)pkt->data;
4761		mcc = mcc_resp->mcc;
4762		n_channels =  le32toh(mcc_resp->n_channels);
4763	} else {
4764		mcc_resp_v1 = (void *)pkt->data;
4765		mcc = mcc_resp_v1->mcc;
4766		n_channels =  le32toh(mcc_resp_v1->n_channels);
4767	}
4768
4769	/* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4770	if (mcc == 0)
4771		mcc = 0x3030;  /* "00" - world */
4772
4773	IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4774	    "regulatory domain '%c%c' (%d channels available)\n",
4775	    mcc >> 8, mcc & 0xff, n_channels);
4776#endif
4777	iwm_free_resp(sc, &hcmd);
4778
4779	return 0;
4780}
4781
4782static void
4783iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4784{
4785	struct iwm_host_cmd cmd = {
4786		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4787		.len = { sizeof(uint32_t), },
4788		.data = { &backoff, },
4789	};
4790
4791	if (iwm_send_cmd(sc, &cmd) != 0) {
4792		device_printf(sc->sc_dev,
4793		    "failed to change thermal tx backoff\n");
4794	}
4795}
4796
4797static int
4798iwm_init_hw(struct iwm_softc *sc)
4799{
4800	struct ieee80211com *ic = &sc->sc_ic;
4801	int error, i, ac;
4802
4803	if ((error = iwm_start_hw(sc)) != 0) {
4804		printf("iwm_start_hw: failed %d\n", error);
4805		return error;
4806	}
4807
4808	if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4809		printf("iwm_run_init_mvm_ucode: failed %d\n", error);
4810		return error;
4811	}
4812
4813	/*
4814	 * should stop and start HW since that INIT
4815	 * image just loaded
4816	 */
4817	iwm_stop_device(sc);
4818	sc->sc_ps_disabled = FALSE;
4819	if ((error = iwm_start_hw(sc)) != 0) {
4820		device_printf(sc->sc_dev, "could not initialize hardware\n");
4821		return error;
4822	}
4823
4824	/* omstart, this time with the regular firmware */
4825	error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4826	if (error) {
4827		device_printf(sc->sc_dev, "could not load firmware\n");
4828		goto error;
4829	}
4830
4831	if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4832		device_printf(sc->sc_dev, "bt init conf failed\n");
4833		goto error;
4834	}
4835
4836	error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
4837	if (error != 0) {
4838		device_printf(sc->sc_dev, "antenna config failed\n");
4839		goto error;
4840	}
4841
4842	/* Send phy db control command and then phy db calibration */
4843	if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4844		goto error;
4845
4846	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4847		device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4848		goto error;
4849	}
4850
4851	/* Add auxiliary station for scanning */
4852	if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4853		device_printf(sc->sc_dev, "add_aux_sta failed\n");
4854		goto error;
4855	}
4856
4857	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4858		/*
4859		 * The channel used here isn't relevant as it's
4860		 * going to be overwritten in the other flows.
4861		 * For now use the first channel we have.
4862		 */
4863		if ((error = iwm_mvm_phy_ctxt_add(sc,
4864		    &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4865			goto error;
4866	}
4867
4868	/* Initialize tx backoffs to the minimum. */
4869	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4870		iwm_mvm_tt_tx_backoff(sc, 0);
4871
4872	error = iwm_mvm_power_update_device(sc);
4873	if (error)
4874		goto error;
4875
4876	if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4877		goto error;
4878
4879	if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4880		if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4881			goto error;
4882	}
4883
4884	/* Enable Tx queues. */
4885	for (ac = 0; ac < WME_NUM_AC; ac++) {
4886		error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4887		    iwm_mvm_ac_to_tx_fifo[ac]);
4888		if (error)
4889			goto error;
4890	}
4891
4892	if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4893		device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4894		goto error;
4895	}
4896
4897	return 0;
4898
4899 error:
4900	iwm_stop_device(sc);
4901	return error;
4902}
4903
4904/* Allow multicast from our BSSID. */
4905static int
4906iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4907{
4908	struct ieee80211_node *ni = vap->iv_bss;
4909	struct iwm_mcast_filter_cmd *cmd;
4910	size_t size;
4911	int error;
4912
4913	size = roundup(sizeof(*cmd), 4);
4914	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4915	if (cmd == NULL)
4916		return ENOMEM;
4917	cmd->filter_own = 1;
4918	cmd->port_id = 0;
4919	cmd->count = 0;
4920	cmd->pass_all = 1;
4921	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4922
4923	error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4924	    IWM_CMD_SYNC, size, cmd);
4925	free(cmd, M_DEVBUF);
4926
4927	return (error);
4928}
4929
4930/*
4931 * ifnet interfaces
4932 */
4933
4934static void
4935iwm_init(struct iwm_softc *sc)
4936{
4937	int error;
4938
4939	if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4940		return;
4941	}
4942	sc->sc_generation++;
4943	sc->sc_flags &= ~IWM_FLAG_STOPPED;
4944
4945	if ((error = iwm_init_hw(sc)) != 0) {
4946		printf("iwm_init_hw failed %d\n", error);
4947		iwm_stop(sc);
4948		return;
4949	}
4950
4951	/*
4952	 * Ok, firmware loaded and we are jogging
4953	 */
4954	sc->sc_flags |= IWM_FLAG_HW_INITED;
4955	callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4956}
4957
4958static int
4959iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4960{
4961	struct iwm_softc *sc;
4962	int error;
4963
4964	sc = ic->ic_softc;
4965
4966	IWM_LOCK(sc);
4967	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4968		IWM_UNLOCK(sc);
4969		return (ENXIO);
4970	}
4971	error = mbufq_enqueue(&sc->sc_snd, m);
4972	if (error) {
4973		IWM_UNLOCK(sc);
4974		return (error);
4975	}
4976	iwm_start(sc);
4977	IWM_UNLOCK(sc);
4978	return (0);
4979}
4980
4981/*
4982 * Dequeue packets from sendq and call send.
4983 */
4984static void
4985iwm_start(struct iwm_softc *sc)
4986{
4987	struct ieee80211_node *ni;
4988	struct mbuf *m;
4989	int ac = 0;
4990
4991	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4992	while (sc->qfullmsk == 0 &&
4993		(m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4994		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4995		if (iwm_tx(sc, m, ni, ac) != 0) {
4996			if_inc_counter(ni->ni_vap->iv_ifp,
4997			    IFCOUNTER_OERRORS, 1);
4998			ieee80211_free_node(ni);
4999			continue;
5000		}
5001		sc->sc_tx_timer = 15;
5002	}
5003	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
5004}
5005
5006static void
5007iwm_stop(struct iwm_softc *sc)
5008{
5009
5010	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
5011	sc->sc_flags |= IWM_FLAG_STOPPED;
5012	sc->sc_generation++;
5013	iwm_led_blink_stop(sc);
5014	sc->sc_tx_timer = 0;
5015	iwm_stop_device(sc);
5016	sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5017}
5018
5019static void
5020iwm_watchdog(void *arg)
5021{
5022	struct iwm_softc *sc = arg;
5023	struct ieee80211com *ic = &sc->sc_ic;
5024
5025	if (sc->sc_tx_timer > 0) {
5026		if (--sc->sc_tx_timer == 0) {
5027			device_printf(sc->sc_dev, "device timeout\n");
5028#ifdef IWM_DEBUG
5029			iwm_nic_error(sc);
5030#endif
5031			ieee80211_restart_all(ic);
5032			counter_u64_add(sc->sc_ic.ic_oerrors, 1);
5033			return;
5034		}
5035	}
5036	callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
5037}
5038
5039static void
5040iwm_parent(struct ieee80211com *ic)
5041{
5042	struct iwm_softc *sc = ic->ic_softc;
5043	int startall = 0;
5044
5045	IWM_LOCK(sc);
5046	if (ic->ic_nrunning > 0) {
5047		if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
5048			iwm_init(sc);
5049			startall = 1;
5050		}
5051	} else if (sc->sc_flags & IWM_FLAG_HW_INITED)
5052		iwm_stop(sc);
5053	IWM_UNLOCK(sc);
5054	if (startall)
5055		ieee80211_start_all(ic);
5056}
5057
5058/*
5059 * The interrupt side of things
5060 */
5061
5062/*
5063 * error dumping routines are from iwlwifi/mvm/utils.c
5064 */
5065
5066/*
5067 * Note: This structure is read from the device with IO accesses,
5068 * and the reading already does the endian conversion. As it is
5069 * read with uint32_t-sized accesses, any members with a different size
5070 * need to be ordered correctly though!
5071 */
5072struct iwm_error_event_table {
5073	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
5074	uint32_t error_id;		/* type of error */
5075	uint32_t trm_hw_status0;	/* TRM HW status */
5076	uint32_t trm_hw_status1;	/* TRM HW status */
5077	uint32_t blink2;		/* branch link */
5078	uint32_t ilink1;		/* interrupt link */
5079	uint32_t ilink2;		/* interrupt link */
5080	uint32_t data1;		/* error-specific data */
5081	uint32_t data2;		/* error-specific data */
5082	uint32_t data3;		/* error-specific data */
5083	uint32_t bcon_time;		/* beacon timer */
5084	uint32_t tsf_low;		/* network timestamp function timer */
5085	uint32_t tsf_hi;		/* network timestamp function timer */
5086	uint32_t gp1;		/* GP1 timer register */
5087	uint32_t gp2;		/* GP2 timer register */
5088	uint32_t fw_rev_type;	/* firmware revision type */
5089	uint32_t major;		/* uCode version major */
5090	uint32_t minor;		/* uCode version minor */
5091	uint32_t hw_ver;		/* HW Silicon version */
5092	uint32_t brd_ver;		/* HW board version */
5093	uint32_t log_pc;		/* log program counter */
5094	uint32_t frame_ptr;		/* frame pointer */
5095	uint32_t stack_ptr;		/* stack pointer */
5096	uint32_t hcmd;		/* last host command header */
5097	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
5098				 * rxtx_flag */
5099	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
5100				 * host_flag */
5101	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
5102				 * enc_flag */
5103	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
5104				 * time_flag */
5105	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
5106				 * wico interrupt */
5107	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
5108	uint32_t wait_event;		/* wait event() caller address */
5109	uint32_t l2p_control;	/* L2pControlField */
5110	uint32_t l2p_duration;	/* L2pDurationField */
5111	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
5112	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
5113	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
5114				 * (LMPM_PMG_SEL) */
5115	uint32_t u_timestamp;	/* indicate when the date and time of the
5116				 * compilation */
5117	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
5118} __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
5119
5120/*
5121 * UMAC error struct - relevant starting from family 8000 chip.
5122 * Note: This structure is read from the device with IO accesses,
5123 * and the reading already does the endian conversion. As it is
5124 * read with u32-sized accesses, any members with a different size
5125 * need to be ordered correctly though!
5126 */
5127struct iwm_umac_error_event_table {
5128	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
5129	uint32_t error_id;	/* type of error */
5130	uint32_t blink1;	/* branch link */
5131	uint32_t blink2;	/* branch link */
5132	uint32_t ilink1;	/* interrupt link */
5133	uint32_t ilink2;	/* interrupt link */
5134	uint32_t data1;		/* error-specific data */
5135	uint32_t data2;		/* error-specific data */
5136	uint32_t data3;		/* error-specific data */
5137	uint32_t umac_major;
5138	uint32_t umac_minor;
5139	uint32_t frame_pointer;	/* core register 27*/
5140	uint32_t stack_pointer;	/* core register 28 */
5141	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
5142	uint32_t nic_isr_pref;	/* ISR status register */
5143} __packed;
5144
5145#define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
5146#define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
5147
5148#ifdef IWM_DEBUG
5149struct {
5150	const char *name;
5151	uint8_t num;
5152} advanced_lookup[] = {
5153	{ "NMI_INTERRUPT_WDG", 0x34 },
5154	{ "SYSASSERT", 0x35 },
5155	{ "UCODE_VERSION_MISMATCH", 0x37 },
5156	{ "BAD_COMMAND", 0x38 },
5157	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5158	{ "FATAL_ERROR", 0x3D },
5159	{ "NMI_TRM_HW_ERR", 0x46 },
5160	{ "NMI_INTERRUPT_TRM", 0x4C },
5161	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5162	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5163	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5164	{ "NMI_INTERRUPT_HOST", 0x66 },
5165	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
5166	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
5167	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5168	{ "ADVANCED_SYSASSERT", 0 },
5169};
5170
5171static const char *
5172iwm_desc_lookup(uint32_t num)
5173{
5174	int i;
5175
5176	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5177		if (advanced_lookup[i].num == num)
5178			return advanced_lookup[i].name;
5179
5180	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5181	return advanced_lookup[i].name;
5182}
5183
5184static void
5185iwm_nic_umac_error(struct iwm_softc *sc)
5186{
5187	struct iwm_umac_error_event_table table;
5188	uint32_t base;
5189
5190	base = sc->umac_error_event_table;
5191
5192	if (base < 0x800000) {
5193		device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5194		    base);
5195		return;
5196	}
5197
5198	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5199		device_printf(sc->sc_dev, "reading errlog failed\n");
5200		return;
5201	}
5202
5203	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5204		device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5205		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5206		    sc->sc_flags, table.valid);
5207	}
5208
5209	device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5210		iwm_desc_lookup(table.error_id));
5211	device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5212	device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5213	device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5214	    table.ilink1);
5215	device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5216	    table.ilink2);
5217	device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5218	device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5219	device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5220	device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5221	device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5222	device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5223	    table.frame_pointer);
5224	device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5225	    table.stack_pointer);
5226	device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5227	device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5228	    table.nic_isr_pref);
5229}
5230
5231/*
5232 * Support for dumping the error log seemed like a good idea ...
5233 * but it's mostly hex junk and the only sensible thing is the
5234 * hw/ucode revision (which we know anyway).  Since it's here,
5235 * I'll just leave it in, just in case e.g. the Intel guys want to
5236 * help us decipher some "ADVANCED_SYSASSERT" later.
5237 */
5238static void
5239iwm_nic_error(struct iwm_softc *sc)
5240{
5241	struct iwm_error_event_table table;
5242	uint32_t base;
5243
5244	device_printf(sc->sc_dev, "dumping device error log\n");
5245	base = sc->error_event_table;
5246	if (base < 0x800000) {
5247		device_printf(sc->sc_dev,
5248		    "Invalid error log pointer 0x%08x\n", base);
5249		return;
5250	}
5251
5252	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5253		device_printf(sc->sc_dev, "reading errlog failed\n");
5254		return;
5255	}
5256
5257	if (!table.valid) {
5258		device_printf(sc->sc_dev, "errlog not found, skipping\n");
5259		return;
5260	}
5261
5262	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5263		device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5264		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5265		    sc->sc_flags, table.valid);
5266	}
5267
5268	device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5269	    iwm_desc_lookup(table.error_id));
5270	device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5271	    table.trm_hw_status0);
5272	device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5273	    table.trm_hw_status1);
5274	device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5275	device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5276	device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5277	device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5278	device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5279	device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5280	device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5281	device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5282	device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5283	device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5284	device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5285	device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5286	    table.fw_rev_type);
5287	device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5288	device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5289	device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5290	device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5291	device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5292	device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5293	device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5294	device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5295	device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5296	device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5297	device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5298	device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5299	device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5300	device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5301	device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5302	device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5303	device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5304	device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5305	device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5306
5307	if (sc->umac_error_event_table)
5308		iwm_nic_umac_error(sc);
5309}
5310#endif
5311
5312static void
5313iwm_handle_rxb(struct iwm_softc *sc, struct mbuf *m)
5314{
5315	struct ieee80211com *ic = &sc->sc_ic;
5316	struct iwm_cmd_response *cresp;
5317	struct mbuf *m1;
5318	uint32_t offset = 0;
5319	uint32_t maxoff = IWM_RBUF_SIZE;
5320	uint32_t nextoff;
5321	boolean_t stolen = FALSE;
5322
5323#define HAVEROOM(a)	\
5324    ((a) + sizeof(uint32_t) + sizeof(struct iwm_cmd_header) < maxoff)
5325
5326	while (HAVEROOM(offset)) {
5327		struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *,
5328		    offset);
5329		int qid, idx, code, len;
5330
5331		qid = pkt->hdr.qid;
5332		idx = pkt->hdr.idx;
5333
5334		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5335
5336		/*
5337		 * randomly get these from the firmware, no idea why.
5338		 * they at least seem harmless, so just ignore them for now
5339		 */
5340		if ((pkt->hdr.code == 0 && (qid & ~0x80) == 0 && idx == 0) ||
5341		    pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID)) {
5342			break;
5343		}
5344
5345		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5346		    "rx packet qid=%d idx=%d type=%x\n",
5347		    qid & ~0x80, pkt->hdr.idx, code);
5348
5349		len = iwm_rx_packet_len(pkt);
5350		len += sizeof(uint32_t); /* account for status word */
5351		nextoff = offset + roundup2(len, IWM_FH_RSCSR_FRAME_ALIGN);
5352
5353		iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5354
5355		switch (code) {
5356		case IWM_REPLY_RX_PHY_CMD:
5357			iwm_mvm_rx_rx_phy_cmd(sc, pkt);
5358			break;
5359
5360		case IWM_REPLY_RX_MPDU_CMD: {
5361			/*
5362			 * If this is the last frame in the RX buffer, we
5363			 * can directly feed the mbuf to the sharks here.
5364			 */
5365			struct iwm_rx_packet *nextpkt = mtodoff(m,
5366			    struct iwm_rx_packet *, nextoff);
5367			if (!HAVEROOM(nextoff) ||
5368			    (nextpkt->hdr.code == 0 &&
5369			     (nextpkt->hdr.qid & ~0x80) == 0 &&
5370			     nextpkt->hdr.idx == 0) ||
5371			    (nextpkt->len_n_flags ==
5372			     htole32(IWM_FH_RSCSR_FRAME_INVALID))) {
5373				if (iwm_mvm_rx_rx_mpdu(sc, m, offset, stolen)) {
5374					stolen = FALSE;
5375					/* Make sure we abort the loop */
5376					nextoff = maxoff;
5377				}
5378				break;
5379			}
5380
5381			/*
5382			 * Use m_copym instead of m_split, because that
5383			 * makes it easier to keep a valid rx buffer in
5384			 * the ring, when iwm_mvm_rx_rx_mpdu() fails.
5385			 *
5386			 * We need to start m_copym() at offset 0, to get the
5387			 * M_PKTHDR flag preserved.
5388			 */
5389			m1 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
5390			if (m1) {
5391				if (iwm_mvm_rx_rx_mpdu(sc, m1, offset, stolen))
5392					stolen = TRUE;
5393				else
5394					m_freem(m1);
5395			}
5396			break;
5397		}
5398
5399		case IWM_TX_CMD:
5400			iwm_mvm_rx_tx_cmd(sc, pkt);
5401			break;
5402
5403		case IWM_MISSED_BEACONS_NOTIFICATION: {
5404			struct iwm_missed_beacons_notif *resp;
5405			int missed;
5406
5407			/* XXX look at mac_id to determine interface ID */
5408			struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5409
5410			resp = (void *)pkt->data;
5411			missed = le32toh(resp->consec_missed_beacons);
5412
5413			IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5414			    "%s: MISSED_BEACON: mac_id=%d, "
5415			    "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5416			    "num_rx=%d\n",
5417			    __func__,
5418			    le32toh(resp->mac_id),
5419			    le32toh(resp->consec_missed_beacons_since_last_rx),
5420			    le32toh(resp->consec_missed_beacons),
5421			    le32toh(resp->num_expected_beacons),
5422			    le32toh(resp->num_recvd_beacons));
5423
5424			/* Be paranoid */
5425			if (vap == NULL)
5426				break;
5427
5428			/* XXX no net80211 locking? */
5429			if (vap->iv_state == IEEE80211_S_RUN &&
5430			    (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5431				if (missed > vap->iv_bmissthreshold) {
5432					/* XXX bad locking; turn into task */
5433					IWM_UNLOCK(sc);
5434					ieee80211_beacon_miss(ic);
5435					IWM_LOCK(sc);
5436				}
5437			}
5438
5439			break;
5440		}
5441
5442		case IWM_MFUART_LOAD_NOTIFICATION:
5443			break;
5444
5445		case IWM_MVM_ALIVE:
5446			break;
5447
5448		case IWM_CALIB_RES_NOTIF_PHY_DB:
5449			break;
5450
5451		case IWM_STATISTICS_NOTIFICATION:
5452			iwm_mvm_handle_rx_statistics(sc, pkt);
5453			break;
5454
5455		case IWM_NVM_ACCESS_CMD:
5456		case IWM_MCC_UPDATE_CMD:
5457			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5458				memcpy(sc->sc_cmd_resp,
5459				    pkt, sizeof(sc->sc_cmd_resp));
5460			}
5461			break;
5462
5463		case IWM_MCC_CHUB_UPDATE_CMD: {
5464			struct iwm_mcc_chub_notif *notif;
5465			notif = (void *)pkt->data;
5466
5467			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5468			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5469			sc->sc_fw_mcc[2] = '\0';
5470			IWM_DPRINTF(sc, IWM_DEBUG_LAR,
5471			    "fw source %d sent CC '%s'\n",
5472			    notif->source_id, sc->sc_fw_mcc);
5473			break;
5474		}
5475
5476		case IWM_DTS_MEASUREMENT_NOTIFICATION:
5477		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5478				 IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5479			struct iwm_dts_measurement_notif_v1 *notif;
5480
5481			if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5482				device_printf(sc->sc_dev,
5483				    "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5484				break;
5485			}
5486			notif = (void *)pkt->data;
5487			IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5488			    "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5489			    notif->temp);
5490			break;
5491		}
5492
5493		case IWM_PHY_CONFIGURATION_CMD:
5494		case IWM_TX_ANT_CONFIGURATION_CMD:
5495		case IWM_ADD_STA:
5496		case IWM_MAC_CONTEXT_CMD:
5497		case IWM_REPLY_SF_CFG_CMD:
5498		case IWM_POWER_TABLE_CMD:
5499		case IWM_PHY_CONTEXT_CMD:
5500		case IWM_BINDING_CONTEXT_CMD:
5501		case IWM_TIME_EVENT_CMD:
5502		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5503		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5504		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
5505		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5506		case IWM_SCAN_OFFLOAD_ABORT_CMD:
5507		case IWM_REPLY_BEACON_FILTERING_CMD:
5508		case IWM_MAC_PM_POWER_TABLE:
5509		case IWM_TIME_QUOTA_CMD:
5510		case IWM_REMOVE_STA:
5511		case IWM_TXPATH_FLUSH:
5512		case IWM_LQ_CMD:
5513		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP,
5514				 IWM_FW_PAGING_BLOCK_CMD):
5515		case IWM_BT_CONFIG:
5516		case IWM_REPLY_THERMAL_MNG_BACKOFF:
5517			cresp = (void *)pkt->data;
5518			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5519				memcpy(sc->sc_cmd_resp,
5520				    pkt, sizeof(*pkt)+sizeof(*cresp));
5521			}
5522			break;
5523
5524		/* ignore */
5525		case IWM_PHY_DB_CMD:
5526			break;
5527
5528		case IWM_INIT_COMPLETE_NOTIF:
5529			break;
5530
5531		case IWM_SCAN_OFFLOAD_COMPLETE: {
5532			struct iwm_periodic_scan_complete *notif;
5533			notif = (void *)pkt->data;
5534			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5535				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5536				ieee80211_runtask(ic, &sc->sc_es_task);
5537			}
5538			break;
5539		}
5540
5541		case IWM_SCAN_ITERATION_COMPLETE: {
5542			struct iwm_lmac_scan_complete_notif *notif;
5543			notif = (void *)pkt->data;
5544			ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5545 			break;
5546		}
5547
5548		case IWM_SCAN_COMPLETE_UMAC: {
5549			struct iwm_umac_scan_complete *notif;
5550			notif = (void *)pkt->data;
5551
5552			IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
5553			    "UMAC scan complete, status=0x%x\n",
5554			    notif->status);
5555			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5556				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5557				ieee80211_runtask(ic, &sc->sc_es_task);
5558			}
5559			break;
5560		}
5561
5562		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5563			struct iwm_umac_scan_iter_complete_notif *notif;
5564			notif = (void *)pkt->data;
5565
5566			IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5567			    "complete, status=0x%x, %d channels scanned\n",
5568			    notif->status, notif->scanned_channels);
5569			ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5570			break;
5571		}
5572
5573		case IWM_REPLY_ERROR: {
5574			struct iwm_error_resp *resp;
5575			resp = (void *)pkt->data;
5576
5577			device_printf(sc->sc_dev,
5578			    "firmware error 0x%x, cmd 0x%x\n",
5579			    le32toh(resp->error_type),
5580			    resp->cmd_id);
5581			break;
5582		}
5583
5584		case IWM_TIME_EVENT_NOTIFICATION: {
5585			struct iwm_time_event_notif *notif;
5586			notif = (void *)pkt->data;
5587
5588			IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5589			    "TE notif status = 0x%x action = 0x%x\n",
5590			    notif->status, notif->action);
5591			break;
5592		}
5593
5594		/*
5595		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
5596		 * messages. Just ignore them for now.
5597		 */
5598		case IWM_DEBUG_LOG_MSG:
5599			break;
5600
5601		case IWM_MCAST_FILTER_CMD:
5602			break;
5603
5604		case IWM_SCD_QUEUE_CFG: {
5605			struct iwm_scd_txq_cfg_rsp *rsp;
5606			rsp = (void *)pkt->data;
5607
5608			IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5609			    "queue cfg token=0x%x sta_id=%d "
5610			    "tid=%d scd_queue=%d\n",
5611			    rsp->token, rsp->sta_id, rsp->tid,
5612			    rsp->scd_queue);
5613			break;
5614		}
5615
5616		default:
5617			device_printf(sc->sc_dev,
5618			    "frame %d/%d %x UNHANDLED (this should "
5619			    "not happen)\n", qid & ~0x80, idx,
5620			    pkt->len_n_flags);
5621			break;
5622		}
5623
5624		/*
5625		 * Why test bit 0x80?  The Linux driver:
5626		 *
5627		 * There is one exception:  uCode sets bit 15 when it
5628		 * originates the response/notification, i.e. when the
5629		 * response/notification is not a direct response to a
5630		 * command sent by the driver.  For example, uCode issues
5631		 * IWM_REPLY_RX when it sends a received frame to the driver;
5632		 * it is not a direct response to any driver command.
5633		 *
5634		 * Ok, so since when is 7 == 15?  Well, the Linux driver
5635		 * uses a slightly different format for pkt->hdr, and "qid"
5636		 * is actually the upper byte of a two-byte field.
5637		 */
5638		if (!(qid & (1 << 7)))
5639			iwm_cmd_done(sc, pkt);
5640
5641		offset = nextoff;
5642	}
5643	if (stolen)
5644		m_freem(m);
5645#undef HAVEROOM
5646}
5647
5648/*
5649 * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5650 * Basic structure from if_iwn
5651 */
5652static void
5653iwm_notif_intr(struct iwm_softc *sc)
5654{
5655	uint16_t hw;
5656
5657	bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5658	    BUS_DMASYNC_POSTREAD);
5659
5660	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5661
5662	/*
5663	 * Process responses
5664	 */
5665	while (sc->rxq.cur != hw) {
5666		struct iwm_rx_ring *ring = &sc->rxq;
5667		struct iwm_rx_data *data = &ring->data[ring->cur];
5668
5669		bus_dmamap_sync(ring->data_dmat, data->map,
5670		    BUS_DMASYNC_POSTREAD);
5671
5672		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5673		    "%s: hw = %d cur = %d\n", __func__, hw, ring->cur);
5674		iwm_handle_rxb(sc, data->m);
5675
5676		ring->cur = (ring->cur + 1) % IWM_RX_RING_COUNT;
5677	}
5678
5679	/*
5680	 * Tell the firmware that it can reuse the ring entries that
5681	 * we have just processed.
5682	 * Seems like the hardware gets upset unless we align
5683	 * the write by 8??
5684	 */
5685	hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5686	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, rounddown2(hw, 8));
5687}
5688
5689static void
5690iwm_intr(void *arg)
5691{
5692	struct iwm_softc *sc = arg;
5693	int handled = 0;
5694	int r1, r2, rv = 0;
5695	int isperiodic = 0;
5696
5697	IWM_LOCK(sc);
5698	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5699
5700	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5701		uint32_t *ict = sc->ict_dma.vaddr;
5702		int tmp;
5703
5704		tmp = htole32(ict[sc->ict_cur]);
5705		if (!tmp)
5706			goto out_ena;
5707
5708		/*
5709		 * ok, there was something.  keep plowing until we have all.
5710		 */
5711		r1 = r2 = 0;
5712		while (tmp) {
5713			r1 |= tmp;
5714			ict[sc->ict_cur] = 0;
5715			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5716			tmp = htole32(ict[sc->ict_cur]);
5717		}
5718
5719		/* this is where the fun begins.  don't ask */
5720		if (r1 == 0xffffffff)
5721			r1 = 0;
5722
5723		/* i am not expected to understand this */
5724		if (r1 & 0xc0000)
5725			r1 |= 0x8000;
5726		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5727	} else {
5728		r1 = IWM_READ(sc, IWM_CSR_INT);
5729		/* "hardware gone" (where, fishing?) */
5730		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5731			goto out;
5732		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5733	}
5734	if (r1 == 0 && r2 == 0) {
5735		goto out_ena;
5736	}
5737
5738	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5739
5740	/* Safely ignore these bits for debug checks below */
5741	r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5742
5743	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5744		int i;
5745		struct ieee80211com *ic = &sc->sc_ic;
5746		struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5747
5748#ifdef IWM_DEBUG
5749		iwm_nic_error(sc);
5750#endif
5751		/* Dump driver status (TX and RX rings) while we're here. */
5752		device_printf(sc->sc_dev, "driver status:\n");
5753		for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5754			struct iwm_tx_ring *ring = &sc->txq[i];
5755			device_printf(sc->sc_dev,
5756			    "  tx ring %2d: qid=%-2d cur=%-3d "
5757			    "queued=%-3d\n",
5758			    i, ring->qid, ring->cur, ring->queued);
5759		}
5760		device_printf(sc->sc_dev,
5761		    "  rx ring: cur=%d\n", sc->rxq.cur);
5762		device_printf(sc->sc_dev,
5763		    "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5764
5765		/* Don't stop the device; just do a VAP restart */
5766		IWM_UNLOCK(sc);
5767
5768		if (vap == NULL) {
5769			printf("%s: null vap\n", __func__);
5770			return;
5771		}
5772
5773		device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5774		    "restarting\n", __func__, vap->iv_state);
5775
5776		ieee80211_restart_all(ic);
5777		return;
5778	}
5779
5780	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5781		handled |= IWM_CSR_INT_BIT_HW_ERR;
5782		device_printf(sc->sc_dev, "hardware error, stopping device\n");
5783		iwm_stop(sc);
5784		rv = 1;
5785		goto out;
5786	}
5787
5788	/* firmware chunk loaded */
5789	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5790		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5791		handled |= IWM_CSR_INT_BIT_FH_TX;
5792		sc->sc_fw_chunk_done = 1;
5793		wakeup(&sc->sc_fw);
5794	}
5795
5796	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5797		handled |= IWM_CSR_INT_BIT_RF_KILL;
5798		if (iwm_check_rfkill(sc)) {
5799			device_printf(sc->sc_dev,
5800			    "%s: rfkill switch, disabling interface\n",
5801			    __func__);
5802			iwm_stop(sc);
5803		}
5804	}
5805
5806	/*
5807	 * The Linux driver uses periodic interrupts to avoid races.
5808	 * We cargo-cult like it's going out of fashion.
5809	 */
5810	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5811		handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5812		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5813		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5814			IWM_WRITE_1(sc,
5815			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5816		isperiodic = 1;
5817	}
5818
5819	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5820		handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5821		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5822
5823		iwm_notif_intr(sc);
5824
5825		/* enable periodic interrupt, see above */
5826		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5827			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5828			    IWM_CSR_INT_PERIODIC_ENA);
5829	}
5830
5831	if (__predict_false(r1 & ~handled))
5832		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5833		    "%s: unhandled interrupts: %x\n", __func__, r1);
5834	rv = 1;
5835
5836 out_ena:
5837	iwm_restore_interrupts(sc);
5838 out:
5839	IWM_UNLOCK(sc);
5840	return;
5841}
5842
5843/*
5844 * Autoconf glue-sniffing
5845 */
5846#define	PCI_VENDOR_INTEL		0x8086
5847#define	PCI_PRODUCT_INTEL_WL_3160_1	0x08b3
5848#define	PCI_PRODUCT_INTEL_WL_3160_2	0x08b4
5849#define	PCI_PRODUCT_INTEL_WL_3165_1	0x3165
5850#define	PCI_PRODUCT_INTEL_WL_3165_2	0x3166
5851#define	PCI_PRODUCT_INTEL_WL_7260_1	0x08b1
5852#define	PCI_PRODUCT_INTEL_WL_7260_2	0x08b2
5853#define	PCI_PRODUCT_INTEL_WL_7265_1	0x095a
5854#define	PCI_PRODUCT_INTEL_WL_7265_2	0x095b
5855#define	PCI_PRODUCT_INTEL_WL_8260_1	0x24f3
5856#define	PCI_PRODUCT_INTEL_WL_8260_2	0x24f4
5857#define	PCI_PRODUCT_INTEL_WL_8265_1	0x24fd
5858
5859static const struct iwm_devices {
5860	uint16_t		device;
5861	const struct iwm_cfg	*cfg;
5862} iwm_devices[] = {
5863	{ PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5864	{ PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5865	{ PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5866	{ PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5867	{ PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5868	{ PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5869	{ PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5870	{ PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5871	{ PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5872	{ PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5873	{ PCI_PRODUCT_INTEL_WL_8265_1, &iwm8265_cfg },
5874};
5875
5876static int
5877iwm_probe(device_t dev)
5878{
5879	int i;
5880
5881	for (i = 0; i < nitems(iwm_devices); i++) {
5882		if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5883		    pci_get_device(dev) == iwm_devices[i].device) {
5884			device_set_desc(dev, iwm_devices[i].cfg->name);
5885			return (BUS_PROBE_DEFAULT);
5886		}
5887	}
5888
5889	return (ENXIO);
5890}
5891
5892static int
5893iwm_dev_check(device_t dev)
5894{
5895	struct iwm_softc *sc;
5896	uint16_t devid;
5897	int i;
5898
5899	sc = device_get_softc(dev);
5900
5901	devid = pci_get_device(dev);
5902	for (i = 0; i < nitems(iwm_devices); i++) {
5903		if (iwm_devices[i].device == devid) {
5904			sc->cfg = iwm_devices[i].cfg;
5905			return (0);
5906		}
5907	}
5908	device_printf(dev, "unknown adapter type\n");
5909	return ENXIO;
5910}
5911
5912/* PCI registers */
5913#define PCI_CFG_RETRY_TIMEOUT	0x041
5914
5915static int
5916iwm_pci_attach(device_t dev)
5917{
5918	struct iwm_softc *sc;
5919	int count, error, rid;
5920	uint16_t reg;
5921
5922	sc = device_get_softc(dev);
5923
5924	/* We disable the RETRY_TIMEOUT register (0x41) to keep
5925	 * PCI Tx retries from interfering with C3 CPU state */
5926	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5927
5928	/* Enable bus-mastering and hardware bug workaround. */
5929	pci_enable_busmaster(dev);
5930	reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5931	/* if !MSI */
5932	if (reg & PCIM_STATUS_INTxSTATE) {
5933		reg &= ~PCIM_STATUS_INTxSTATE;
5934	}
5935	pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5936
5937	rid = PCIR_BAR(0);
5938	sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5939	    RF_ACTIVE);
5940	if (sc->sc_mem == NULL) {
5941		device_printf(sc->sc_dev, "can't map mem space\n");
5942		return (ENXIO);
5943	}
5944	sc->sc_st = rman_get_bustag(sc->sc_mem);
5945	sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5946
5947	/* Install interrupt handler. */
5948	count = 1;
5949	rid = 0;
5950	if (pci_alloc_msi(dev, &count) == 0)
5951		rid = 1;
5952	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5953	    (rid != 0 ? 0 : RF_SHAREABLE));
5954	if (sc->sc_irq == NULL) {
5955		device_printf(dev, "can't map interrupt\n");
5956			return (ENXIO);
5957	}
5958	error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5959	    NULL, iwm_intr, sc, &sc->sc_ih);
5960	if (sc->sc_ih == NULL) {
5961		device_printf(dev, "can't establish interrupt");
5962			return (ENXIO);
5963	}
5964	sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5965
5966	return (0);
5967}
5968
5969static void
5970iwm_pci_detach(device_t dev)
5971{
5972	struct iwm_softc *sc = device_get_softc(dev);
5973
5974	if (sc->sc_irq != NULL) {
5975		bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5976		bus_release_resource(dev, SYS_RES_IRQ,
5977		    rman_get_rid(sc->sc_irq), sc->sc_irq);
5978		pci_release_msi(dev);
5979        }
5980	if (sc->sc_mem != NULL)
5981		bus_release_resource(dev, SYS_RES_MEMORY,
5982		    rman_get_rid(sc->sc_mem), sc->sc_mem);
5983}
5984
5985
5986
5987static int
5988iwm_attach(device_t dev)
5989{
5990	struct iwm_softc *sc = device_get_softc(dev);
5991	struct ieee80211com *ic = &sc->sc_ic;
5992	int error;
5993	int txq_i, i;
5994
5995	sc->sc_dev = dev;
5996	sc->sc_attached = 1;
5997	IWM_LOCK_INIT(sc);
5998	mbufq_init(&sc->sc_snd, ifqmaxlen);
5999	callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
6000	callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
6001	TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
6002
6003	sc->sc_notif_wait = iwm_notification_wait_init(sc);
6004	if (sc->sc_notif_wait == NULL) {
6005		device_printf(dev, "failed to init notification wait struct\n");
6006		goto fail;
6007	}
6008
6009	/* Init phy db */
6010	sc->sc_phy_db = iwm_phy_db_init(sc);
6011	if (!sc->sc_phy_db) {
6012		device_printf(dev, "Cannot init phy_db\n");
6013		goto fail;
6014	}
6015
6016	/* PCI attach */
6017	error = iwm_pci_attach(dev);
6018	if (error != 0)
6019		goto fail;
6020
6021	sc->sc_wantresp = -1;
6022
6023	/* Check device type */
6024	error = iwm_dev_check(dev);
6025	if (error != 0)
6026		goto fail;
6027
6028	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
6029	/*
6030	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
6031	 * changed, and now the revision step also includes bit 0-1 (no more
6032	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
6033	 * in the old format.
6034	 */
6035	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
6036		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
6037				(IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
6038
6039	if (iwm_prepare_card_hw(sc) != 0) {
6040		device_printf(dev, "could not initialize hardware\n");
6041		goto fail;
6042	}
6043
6044	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
6045		int ret;
6046		uint32_t hw_step;
6047
6048		/*
6049		 * In order to recognize C step the driver should read the
6050		 * chip version id located at the AUX bus MISC address.
6051		 */
6052		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
6053			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
6054		DELAY(2);
6055
6056		ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
6057				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6058				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6059				   25000);
6060		if (!ret) {
6061			device_printf(sc->sc_dev,
6062			    "Failed to wake up the nic\n");
6063			goto fail;
6064		}
6065
6066		if (iwm_nic_lock(sc)) {
6067			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
6068			hw_step |= IWM_ENABLE_WFPM;
6069			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
6070			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
6071			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
6072			if (hw_step == 0x3)
6073				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
6074						(IWM_SILICON_C_STEP << 2);
6075			iwm_nic_unlock(sc);
6076		} else {
6077			device_printf(sc->sc_dev, "Failed to lock the nic\n");
6078			goto fail;
6079		}
6080	}
6081
6082	/* special-case 7265D, it has the same PCI IDs. */
6083	if (sc->cfg == &iwm7265_cfg &&
6084	    (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
6085		sc->cfg = &iwm7265d_cfg;
6086	}
6087
6088	/* Allocate DMA memory for firmware transfers. */
6089	if ((error = iwm_alloc_fwmem(sc)) != 0) {
6090		device_printf(dev, "could not allocate memory for firmware\n");
6091		goto fail;
6092	}
6093
6094	/* Allocate "Keep Warm" page. */
6095	if ((error = iwm_alloc_kw(sc)) != 0) {
6096		device_printf(dev, "could not allocate keep warm page\n");
6097		goto fail;
6098	}
6099
6100	/* We use ICT interrupts */
6101	if ((error = iwm_alloc_ict(sc)) != 0) {
6102		device_printf(dev, "could not allocate ICT table\n");
6103		goto fail;
6104	}
6105
6106	/* Allocate TX scheduler "rings". */
6107	if ((error = iwm_alloc_sched(sc)) != 0) {
6108		device_printf(dev, "could not allocate TX scheduler rings\n");
6109		goto fail;
6110	}
6111
6112	/* Allocate TX rings */
6113	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
6114		if ((error = iwm_alloc_tx_ring(sc,
6115		    &sc->txq[txq_i], txq_i)) != 0) {
6116			device_printf(dev,
6117			    "could not allocate TX ring %d\n",
6118			    txq_i);
6119			goto fail;
6120		}
6121	}
6122
6123	/* Allocate RX ring. */
6124	if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6125		device_printf(dev, "could not allocate RX ring\n");
6126		goto fail;
6127	}
6128
6129	/* Clear pending interrupts. */
6130	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6131
6132	ic->ic_softc = sc;
6133	ic->ic_name = device_get_nameunit(sc->sc_dev);
6134	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
6135	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
6136
6137	/* Set device capabilities. */
6138	ic->ic_caps =
6139	    IEEE80211_C_STA |
6140	    IEEE80211_C_WPA |		/* WPA/RSN */
6141	    IEEE80211_C_WME |
6142	    IEEE80211_C_PMGT |
6143	    IEEE80211_C_SHSLOT |	/* short slot time supported */
6144	    IEEE80211_C_SHPREAMBLE	/* short preamble supported */
6145//	    IEEE80211_C_BGSCAN		/* capable of bg scanning */
6146	    ;
6147	/* Advertise full-offload scanning */
6148	ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
6149	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6150		sc->sc_phyctxt[i].id = i;
6151		sc->sc_phyctxt[i].color = 0;
6152		sc->sc_phyctxt[i].ref = 0;
6153		sc->sc_phyctxt[i].channel = NULL;
6154	}
6155
6156	/* Default noise floor */
6157	sc->sc_noise = -96;
6158
6159	/* Max RSSI */
6160	sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6161
6162	sc->sc_preinit_hook.ich_func = iwm_preinit;
6163	sc->sc_preinit_hook.ich_arg = sc;
6164	if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6165		device_printf(dev, "config_intrhook_establish failed\n");
6166		goto fail;
6167	}
6168
6169#ifdef IWM_DEBUG
6170	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6171	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6172	    CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6173#endif
6174
6175	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6176	    "<-%s\n", __func__);
6177
6178	return 0;
6179
6180	/* Free allocated memory if something failed during attachment. */
6181fail:
6182	iwm_detach_local(sc, 0);
6183
6184	return ENXIO;
6185}
6186
6187static int
6188iwm_is_valid_ether_addr(uint8_t *addr)
6189{
6190	char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6191
6192	if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6193		return (FALSE);
6194
6195	return (TRUE);
6196}
6197
6198static int
6199iwm_wme_update(struct ieee80211com *ic)
6200{
6201#define IWM_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
6202	struct iwm_softc *sc = ic->ic_softc;
6203	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6204	struct iwm_vap *ivp = IWM_VAP(vap);
6205	struct iwm_node *in;
6206	struct wmeParams tmp[WME_NUM_AC];
6207	int aci, error;
6208
6209	if (vap == NULL)
6210		return (0);
6211
6212	IEEE80211_LOCK(ic);
6213	for (aci = 0; aci < WME_NUM_AC; aci++)
6214		tmp[aci] = ic->ic_wme.wme_chanParams.cap_wmeParams[aci];
6215	IEEE80211_UNLOCK(ic);
6216
6217	IWM_LOCK(sc);
6218	for (aci = 0; aci < WME_NUM_AC; aci++) {
6219		const struct wmeParams *ac = &tmp[aci];
6220		ivp->queue_params[aci].aifsn = ac->wmep_aifsn;
6221		ivp->queue_params[aci].cw_min = IWM_EXP2(ac->wmep_logcwmin);
6222		ivp->queue_params[aci].cw_max = IWM_EXP2(ac->wmep_logcwmax);
6223		ivp->queue_params[aci].edca_txop =
6224		    IEEE80211_TXOP_TO_US(ac->wmep_txopLimit);
6225	}
6226	ivp->have_wme = TRUE;
6227	if (ivp->is_uploaded && vap->iv_bss != NULL) {
6228		in = IWM_NODE(vap->iv_bss);
6229		if (in->in_assoc) {
6230			if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
6231				device_printf(sc->sc_dev,
6232				    "%s: failed to update MAC\n", __func__);
6233			}
6234		}
6235	}
6236	IWM_UNLOCK(sc);
6237
6238	return (0);
6239#undef IWM_EXP2
6240}
6241
6242static void
6243iwm_preinit(void *arg)
6244{
6245	struct iwm_softc *sc = arg;
6246	device_t dev = sc->sc_dev;
6247	struct ieee80211com *ic = &sc->sc_ic;
6248	int error;
6249
6250	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6251	    "->%s\n", __func__);
6252
6253	IWM_LOCK(sc);
6254	if ((error = iwm_start_hw(sc)) != 0) {
6255		device_printf(dev, "could not initialize hardware\n");
6256		IWM_UNLOCK(sc);
6257		goto fail;
6258	}
6259
6260	error = iwm_run_init_mvm_ucode(sc, 1);
6261	iwm_stop_device(sc);
6262	if (error) {
6263		IWM_UNLOCK(sc);
6264		goto fail;
6265	}
6266	device_printf(dev,
6267	    "hw rev 0x%x, fw ver %s, address %s\n",
6268	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6269	    sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6270
6271	/* not all hardware can do 5GHz band */
6272	if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6273		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6274		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6275	IWM_UNLOCK(sc);
6276
6277	iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6278	    ic->ic_channels);
6279
6280	/*
6281	 * At this point we've committed - if we fail to do setup,
6282	 * we now also have to tear down the net80211 state.
6283	 */
6284	ieee80211_ifattach(ic);
6285	ic->ic_vap_create = iwm_vap_create;
6286	ic->ic_vap_delete = iwm_vap_delete;
6287	ic->ic_raw_xmit = iwm_raw_xmit;
6288	ic->ic_node_alloc = iwm_node_alloc;
6289	ic->ic_scan_start = iwm_scan_start;
6290	ic->ic_scan_end = iwm_scan_end;
6291	ic->ic_update_mcast = iwm_update_mcast;
6292	ic->ic_getradiocaps = iwm_init_channel_map;
6293	ic->ic_set_channel = iwm_set_channel;
6294	ic->ic_scan_curchan = iwm_scan_curchan;
6295	ic->ic_scan_mindwell = iwm_scan_mindwell;
6296	ic->ic_wme.wme_update = iwm_wme_update;
6297	ic->ic_parent = iwm_parent;
6298	ic->ic_transmit = iwm_transmit;
6299	iwm_radiotap_attach(sc);
6300	if (bootverbose)
6301		ieee80211_announce(ic);
6302
6303	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6304	    "<-%s\n", __func__);
6305	config_intrhook_disestablish(&sc->sc_preinit_hook);
6306
6307	return;
6308fail:
6309	config_intrhook_disestablish(&sc->sc_preinit_hook);
6310	iwm_detach_local(sc, 0);
6311}
6312
6313/*
6314 * Attach the interface to 802.11 radiotap.
6315 */
6316static void
6317iwm_radiotap_attach(struct iwm_softc *sc)
6318{
6319        struct ieee80211com *ic = &sc->sc_ic;
6320
6321	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6322	    "->%s begin\n", __func__);
6323        ieee80211_radiotap_attach(ic,
6324            &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6325                IWM_TX_RADIOTAP_PRESENT,
6326            &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6327                IWM_RX_RADIOTAP_PRESENT);
6328	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6329	    "->%s end\n", __func__);
6330}
6331
6332static struct ieee80211vap *
6333iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6334    enum ieee80211_opmode opmode, int flags,
6335    const uint8_t bssid[IEEE80211_ADDR_LEN],
6336    const uint8_t mac[IEEE80211_ADDR_LEN])
6337{
6338	struct iwm_vap *ivp;
6339	struct ieee80211vap *vap;
6340
6341	if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6342		return NULL;
6343	ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6344	vap = &ivp->iv_vap;
6345	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6346	vap->iv_bmissthreshold = 10;            /* override default */
6347	/* Override with driver methods. */
6348	ivp->iv_newstate = vap->iv_newstate;
6349	vap->iv_newstate = iwm_newstate;
6350
6351	ivp->id = IWM_DEFAULT_MACID;
6352	ivp->color = IWM_DEFAULT_COLOR;
6353
6354	ivp->have_wme = FALSE;
6355
6356	ieee80211_ratectl_init(vap);
6357	/* Complete setup. */
6358	ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6359	    mac);
6360	ic->ic_opmode = opmode;
6361
6362	return vap;
6363}
6364
6365static void
6366iwm_vap_delete(struct ieee80211vap *vap)
6367{
6368	struct iwm_vap *ivp = IWM_VAP(vap);
6369
6370	ieee80211_ratectl_deinit(vap);
6371	ieee80211_vap_detach(vap);
6372	free(ivp, M_80211_VAP);
6373}
6374
6375static void
6376iwm_xmit_queue_drain(struct iwm_softc *sc)
6377{
6378	struct mbuf *m;
6379	struct ieee80211_node *ni;
6380
6381	while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
6382		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
6383		ieee80211_free_node(ni);
6384		m_freem(m);
6385	}
6386}
6387
6388static void
6389iwm_scan_start(struct ieee80211com *ic)
6390{
6391	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6392	struct iwm_softc *sc = ic->ic_softc;
6393	int error;
6394
6395	IWM_LOCK(sc);
6396	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6397		/* This should not be possible */
6398		device_printf(sc->sc_dev,
6399		    "%s: Previous scan not completed yet\n", __func__);
6400	}
6401	if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6402		error = iwm_mvm_umac_scan(sc);
6403	else
6404		error = iwm_mvm_lmac_scan(sc);
6405	if (error != 0) {
6406		device_printf(sc->sc_dev, "could not initiate scan\n");
6407		IWM_UNLOCK(sc);
6408		ieee80211_cancel_scan(vap);
6409	} else {
6410		sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6411		iwm_led_blink_start(sc);
6412		IWM_UNLOCK(sc);
6413	}
6414}
6415
6416static void
6417iwm_scan_end(struct ieee80211com *ic)
6418{
6419	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6420	struct iwm_softc *sc = ic->ic_softc;
6421
6422	IWM_LOCK(sc);
6423	iwm_led_blink_stop(sc);
6424	if (vap->iv_state == IEEE80211_S_RUN)
6425		iwm_mvm_led_enable(sc);
6426	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6427		/*
6428		 * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6429		 * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6430		 * taskqueue.
6431		 */
6432		sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6433		iwm_mvm_scan_stop_wait(sc);
6434	}
6435	IWM_UNLOCK(sc);
6436
6437	/*
6438	 * Make sure we don't race, if sc_es_task is still enqueued here.
6439	 * This is to make sure that it won't call ieee80211_scan_done
6440	 * when we have already started the next scan.
6441	 */
6442	taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6443}
6444
6445static void
6446iwm_update_mcast(struct ieee80211com *ic)
6447{
6448}
6449
6450static void
6451iwm_set_channel(struct ieee80211com *ic)
6452{
6453}
6454
6455static void
6456iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6457{
6458}
6459
6460static void
6461iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6462{
6463	return;
6464}
6465
6466void
6467iwm_init_task(void *arg1)
6468{
6469	struct iwm_softc *sc = arg1;
6470
6471	IWM_LOCK(sc);
6472	while (sc->sc_flags & IWM_FLAG_BUSY)
6473		msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6474	sc->sc_flags |= IWM_FLAG_BUSY;
6475	iwm_stop(sc);
6476	if (sc->sc_ic.ic_nrunning > 0)
6477		iwm_init(sc);
6478	sc->sc_flags &= ~IWM_FLAG_BUSY;
6479	wakeup(&sc->sc_flags);
6480	IWM_UNLOCK(sc);
6481}
6482
6483static int
6484iwm_resume(device_t dev)
6485{
6486	struct iwm_softc *sc = device_get_softc(dev);
6487	int do_reinit = 0;
6488
6489	/*
6490	 * We disable the RETRY_TIMEOUT register (0x41) to keep
6491	 * PCI Tx retries from interfering with C3 CPU state.
6492	 */
6493	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6494	iwm_init_task(device_get_softc(dev));
6495
6496	IWM_LOCK(sc);
6497	if (sc->sc_flags & IWM_FLAG_SCANNING) {
6498		sc->sc_flags &= ~IWM_FLAG_SCANNING;
6499		do_reinit = 1;
6500	}
6501	IWM_UNLOCK(sc);
6502
6503	if (do_reinit)
6504		ieee80211_resume_all(&sc->sc_ic);
6505
6506	return 0;
6507}
6508
6509static int
6510iwm_suspend(device_t dev)
6511{
6512	int do_stop = 0;
6513	struct iwm_softc *sc = device_get_softc(dev);
6514
6515	do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6516
6517	ieee80211_suspend_all(&sc->sc_ic);
6518
6519	if (do_stop) {
6520		IWM_LOCK(sc);
6521		iwm_stop(sc);
6522		sc->sc_flags |= IWM_FLAG_SCANNING;
6523		IWM_UNLOCK(sc);
6524	}
6525
6526	return (0);
6527}
6528
6529static int
6530iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6531{
6532	struct iwm_fw_info *fw = &sc->sc_fw;
6533	device_t dev = sc->sc_dev;
6534	int i;
6535
6536	if (!sc->sc_attached)
6537		return 0;
6538	sc->sc_attached = 0;
6539
6540	if (do_net80211)
6541		ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6542
6543	callout_drain(&sc->sc_led_blink_to);
6544	callout_drain(&sc->sc_watchdog_to);
6545	iwm_stop_device(sc);
6546	if (do_net80211) {
6547		IWM_LOCK(sc);
6548		iwm_xmit_queue_drain(sc);
6549		IWM_UNLOCK(sc);
6550		ieee80211_ifdetach(&sc->sc_ic);
6551	}
6552
6553	iwm_phy_db_free(sc->sc_phy_db);
6554	sc->sc_phy_db = NULL;
6555
6556	iwm_free_nvm_data(sc->nvm_data);
6557
6558	/* Free descriptor rings */
6559	iwm_free_rx_ring(sc, &sc->rxq);
6560	for (i = 0; i < nitems(sc->txq); i++)
6561		iwm_free_tx_ring(sc, &sc->txq[i]);
6562
6563	/* Free firmware */
6564	if (fw->fw_fp != NULL)
6565		iwm_fw_info_free(fw);
6566
6567	/* Free scheduler */
6568	iwm_dma_contig_free(&sc->sched_dma);
6569	iwm_dma_contig_free(&sc->ict_dma);
6570	iwm_dma_contig_free(&sc->kw_dma);
6571	iwm_dma_contig_free(&sc->fw_dma);
6572
6573	iwm_free_fw_paging(sc);
6574
6575	/* Finished with the hardware - detach things */
6576	iwm_pci_detach(dev);
6577
6578	if (sc->sc_notif_wait != NULL) {
6579		iwm_notification_wait_free(sc->sc_notif_wait);
6580		sc->sc_notif_wait = NULL;
6581	}
6582
6583	IWM_LOCK_DESTROY(sc);
6584
6585	return (0);
6586}
6587
6588static int
6589iwm_detach(device_t dev)
6590{
6591	struct iwm_softc *sc = device_get_softc(dev);
6592
6593	return (iwm_detach_local(sc, 1));
6594}
6595
6596static device_method_t iwm_pci_methods[] = {
6597        /* Device interface */
6598        DEVMETHOD(device_probe,         iwm_probe),
6599        DEVMETHOD(device_attach,        iwm_attach),
6600        DEVMETHOD(device_detach,        iwm_detach),
6601        DEVMETHOD(device_suspend,       iwm_suspend),
6602        DEVMETHOD(device_resume,        iwm_resume),
6603
6604        DEVMETHOD_END
6605};
6606
6607static driver_t iwm_pci_driver = {
6608        "iwm",
6609        iwm_pci_methods,
6610        sizeof (struct iwm_softc)
6611};
6612
6613static devclass_t iwm_devclass;
6614
6615DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6616MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6617MODULE_DEPEND(iwm, pci, 1, 1, 1);
6618MODULE_DEPEND(iwm, wlan, 1, 1, 1);
6619