if_iwm.c revision 330204
1/*	$OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $	*/
2
3/*
4 * Copyright (c) 2014 genua mbh <info@genua.de>
5 * Copyright (c) 2014 Fixup Software Ltd.
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20/*-
21 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22 * which were used as the reference documentation for this implementation.
23 *
24 * Driver version we are currently based off of is
25 * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26 *
27 ***********************************************************************
28 *
29 * This file is provided under a dual BSD/GPLv2 license.  When using or
30 * redistributing this file, you may do so under either license.
31 *
32 * GPL LICENSE SUMMARY
33 *
34 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35 *
36 * This program is free software; you can redistribute it and/or modify
37 * it under the terms of version 2 of the GNU General Public License as
38 * published by the Free Software Foundation.
39 *
40 * This program is distributed in the hope that it will be useful, but
41 * WITHOUT ANY WARRANTY; without even the implied warranty of
42 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43 * General Public License for more details.
44 *
45 * You should have received a copy of the GNU General Public License
46 * along with this program; if not, write to the Free Software
47 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48 * USA
49 *
50 * The full GNU General Public License is included in this distribution
51 * in the file called COPYING.
52 *
53 * Contact Information:
54 *  Intel Linux Wireless <ilw@linux.intel.com>
55 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56 *
57 *
58 * BSD LICENSE
59 *
60 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61 * All rights reserved.
62 *
63 * Redistribution and use in source and binary forms, with or without
64 * modification, are permitted provided that the following conditions
65 * are met:
66 *
67 *  * Redistributions of source code must retain the above copyright
68 *    notice, this list of conditions and the following disclaimer.
69 *  * Redistributions in binary form must reproduce the above copyright
70 *    notice, this list of conditions and the following disclaimer in
71 *    the documentation and/or other materials provided with the
72 *    distribution.
73 *  * Neither the name Intel Corporation nor the names of its
74 *    contributors may be used to endorse or promote products derived
75 *    from this software without specific prior written permission.
76 *
77 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88 */
89
90/*-
91 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92 *
93 * Permission to use, copy, modify, and distribute this software for any
94 * purpose with or without fee is hereby granted, provided that the above
95 * copyright notice and this permission notice appear in all copies.
96 *
97 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104 */
105#include <sys/cdefs.h>
106__FBSDID("$FreeBSD: stable/11/sys/dev/iwm/if_iwm.c 330204 2018-03-01 06:36:41Z eadler $");
107
108#include "opt_wlan.h"
109
110#include <sys/param.h>
111#include <sys/bus.h>
112#include <sys/conf.h>
113#include <sys/endian.h>
114#include <sys/firmware.h>
115#include <sys/kernel.h>
116#include <sys/malloc.h>
117#include <sys/mbuf.h>
118#include <sys/mutex.h>
119#include <sys/module.h>
120#include <sys/proc.h>
121#include <sys/rman.h>
122#include <sys/socket.h>
123#include <sys/sockio.h>
124#include <sys/sysctl.h>
125#include <sys/linker.h>
126
127#include <machine/bus.h>
128#include <machine/endian.h>
129#include <machine/resource.h>
130
131#include <dev/pci/pcivar.h>
132#include <dev/pci/pcireg.h>
133
134#include <net/bpf.h>
135
136#include <net/if.h>
137#include <net/if_var.h>
138#include <net/if_arp.h>
139#include <net/if_dl.h>
140#include <net/if_media.h>
141#include <net/if_types.h>
142
143#include <netinet/in.h>
144#include <netinet/in_systm.h>
145#include <netinet/if_ether.h>
146#include <netinet/ip.h>
147
148#include <net80211/ieee80211_var.h>
149#include <net80211/ieee80211_regdomain.h>
150#include <net80211/ieee80211_ratectl.h>
151#include <net80211/ieee80211_radiotap.h>
152
153#include <dev/iwm/if_iwmreg.h>
154#include <dev/iwm/if_iwmvar.h>
155#include <dev/iwm/if_iwm_config.h>
156#include <dev/iwm/if_iwm_debug.h>
157#include <dev/iwm/if_iwm_notif_wait.h>
158#include <dev/iwm/if_iwm_util.h>
159#include <dev/iwm/if_iwm_binding.h>
160#include <dev/iwm/if_iwm_phy_db.h>
161#include <dev/iwm/if_iwm_mac_ctxt.h>
162#include <dev/iwm/if_iwm_phy_ctxt.h>
163#include <dev/iwm/if_iwm_time_event.h>
164#include <dev/iwm/if_iwm_power.h>
165#include <dev/iwm/if_iwm_scan.h>
166
167#include <dev/iwm/if_iwm_pcie_trans.h>
168#include <dev/iwm/if_iwm_led.h>
169#include <dev/iwm/if_iwm_fw.h>
170
171const uint8_t iwm_nvm_channels[] = {
172	/* 2.4 GHz */
173	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
174	/* 5 GHz */
175	36, 40, 44, 48, 52, 56, 60, 64,
176	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
177	149, 153, 157, 161, 165
178};
179_Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
180    "IWM_NUM_CHANNELS is too small");
181
182const uint8_t iwm_nvm_channels_8000[] = {
183	/* 2.4 GHz */
184	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
185	/* 5 GHz */
186	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
187	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
188	149, 153, 157, 161, 165, 169, 173, 177, 181
189};
190_Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
191    "IWM_NUM_CHANNELS_8000 is too small");
192
193#define IWM_NUM_2GHZ_CHANNELS	14
194#define IWM_N_HW_ADDR_MASK	0xF
195
196/*
197 * XXX For now, there's simply a fixed set of rate table entries
198 * that are populated.
199 */
200const struct iwm_rate {
201	uint8_t rate;
202	uint8_t plcp;
203} iwm_rates[] = {
204	{   2,	IWM_RATE_1M_PLCP  },
205	{   4,	IWM_RATE_2M_PLCP  },
206	{  11,	IWM_RATE_5M_PLCP  },
207	{  22,	IWM_RATE_11M_PLCP },
208	{  12,	IWM_RATE_6M_PLCP  },
209	{  18,	IWM_RATE_9M_PLCP  },
210	{  24,	IWM_RATE_12M_PLCP },
211	{  36,	IWM_RATE_18M_PLCP },
212	{  48,	IWM_RATE_24M_PLCP },
213	{  72,	IWM_RATE_36M_PLCP },
214	{  96,	IWM_RATE_48M_PLCP },
215	{ 108,	IWM_RATE_54M_PLCP },
216};
217#define IWM_RIDX_CCK	0
218#define IWM_RIDX_OFDM	4
219#define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
220#define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
221#define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
222
223struct iwm_nvm_section {
224	uint16_t length;
225	uint8_t *data;
226};
227
228#define IWM_MVM_UCODE_ALIVE_TIMEOUT	hz
229#define IWM_MVM_UCODE_CALIB_TIMEOUT	(2*hz)
230
231struct iwm_mvm_alive_data {
232	int valid;
233	uint32_t scd_base_addr;
234};
235
236static int	iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
237static int	iwm_firmware_store_section(struct iwm_softc *,
238                                           enum iwm_ucode_type,
239                                           const uint8_t *, size_t);
240static int	iwm_set_default_calib(struct iwm_softc *, const void *);
241static void	iwm_fw_info_free(struct iwm_fw_info *);
242static int	iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
243static int	iwm_alloc_fwmem(struct iwm_softc *);
244static int	iwm_alloc_sched(struct iwm_softc *);
245static int	iwm_alloc_kw(struct iwm_softc *);
246static int	iwm_alloc_ict(struct iwm_softc *);
247static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
248static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
249static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
250static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
251                                  int);
252static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
253static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
254static void	iwm_enable_interrupts(struct iwm_softc *);
255static void	iwm_restore_interrupts(struct iwm_softc *);
256static void	iwm_disable_interrupts(struct iwm_softc *);
257static void	iwm_ict_reset(struct iwm_softc *);
258static int	iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
259static void	iwm_stop_device(struct iwm_softc *);
260static void	iwm_mvm_nic_config(struct iwm_softc *);
261static int	iwm_nic_rx_init(struct iwm_softc *);
262static int	iwm_nic_tx_init(struct iwm_softc *);
263static int	iwm_nic_init(struct iwm_softc *);
264static int	iwm_enable_txq(struct iwm_softc *, int, int, int);
265static int	iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
266static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
267                                   uint16_t, uint8_t *, uint16_t *);
268static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
269				     uint16_t *, uint32_t);
270static uint32_t	iwm_eeprom_channel_flags(uint16_t);
271static void	iwm_add_channel_band(struct iwm_softc *,
272		    struct ieee80211_channel[], int, int *, int, size_t,
273		    const uint8_t[]);
274static void	iwm_init_channel_map(struct ieee80211com *, int, int *,
275		    struct ieee80211_channel[]);
276static struct iwm_nvm_data *
277	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
278			   const uint16_t *, const uint16_t *,
279			   const uint16_t *, const uint16_t *,
280			   const uint16_t *);
281static void	iwm_free_nvm_data(struct iwm_nvm_data *);
282static void	iwm_set_hw_address_family_8000(struct iwm_softc *,
283					       struct iwm_nvm_data *,
284					       const uint16_t *,
285					       const uint16_t *);
286static int	iwm_get_sku(const struct iwm_softc *, const uint16_t *,
287			    const uint16_t *);
288static int	iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
289static int	iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
290				  const uint16_t *);
291static int	iwm_get_n_hw_addrs(const struct iwm_softc *,
292				   const uint16_t *);
293static void	iwm_set_radio_cfg(const struct iwm_softc *,
294				  struct iwm_nvm_data *, uint32_t);
295static struct iwm_nvm_data *
296	iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
297static int	iwm_nvm_init(struct iwm_softc *);
298static int	iwm_pcie_load_section(struct iwm_softc *, uint8_t,
299				      const struct iwm_fw_desc *);
300static int	iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
301					     bus_addr_t, uint32_t);
302static int	iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
303						const struct iwm_fw_sects *,
304						int, int *);
305static int	iwm_pcie_load_cpu_sections(struct iwm_softc *,
306					   const struct iwm_fw_sects *,
307					   int, int *);
308static int	iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
309					       const struct iwm_fw_sects *);
310static int	iwm_pcie_load_given_ucode(struct iwm_softc *,
311					  const struct iwm_fw_sects *);
312static int	iwm_start_fw(struct iwm_softc *, const struct iwm_fw_sects *);
313static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
314static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
315static int	iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
316                                              enum iwm_ucode_type);
317static int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
318static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
319static int	iwm_mvm_get_signal_strength(struct iwm_softc *,
320					    struct iwm_rx_phy_info *);
321static void	iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
322                                      struct iwm_rx_packet *,
323                                      struct iwm_rx_data *);
324static int	iwm_get_noise(struct iwm_softc *sc,
325		    const struct iwm_mvm_statistics_rx_non_phy *);
326static void	iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct mbuf *);
327static int	iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
328                                         struct iwm_rx_packet *,
329				         struct iwm_node *);
330static void	iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
331                                  struct iwm_rx_data *);
332static void	iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
333#if 0
334static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
335                                 uint16_t);
336#endif
337static const struct iwm_rate *
338	iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
339			struct mbuf *, struct iwm_tx_cmd *);
340static int	iwm_tx(struct iwm_softc *, struct mbuf *,
341                       struct ieee80211_node *, int);
342static int	iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
343			     const struct ieee80211_bpf_params *);
344static int	iwm_mvm_flush_tx_path(struct iwm_softc *sc,
345				      uint32_t tfd_msk, uint32_t flags);
346static int	iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
347					        struct iwm_mvm_add_sta_cmd *,
348                                                int *);
349static int	iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
350                                       int);
351static int	iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
352static int	iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
353static int	iwm_mvm_add_int_sta_common(struct iwm_softc *,
354                                           struct iwm_int_sta *,
355				           const uint8_t *, uint16_t, uint16_t);
356static int	iwm_mvm_add_aux_sta(struct iwm_softc *);
357static int	iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_vap *);
358static int	iwm_auth(struct ieee80211vap *, struct iwm_softc *);
359static int	iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
360static int	iwm_release(struct iwm_softc *, struct iwm_node *);
361static struct ieee80211_node *
362		iwm_node_alloc(struct ieee80211vap *,
363		               const uint8_t[IEEE80211_ADDR_LEN]);
364static void	iwm_setrates(struct iwm_softc *, struct iwm_node *);
365static int	iwm_media_change(struct ifnet *);
366static int	iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
367static void	iwm_endscan_cb(void *, int);
368static void	iwm_mvm_fill_sf_command(struct iwm_softc *,
369					struct iwm_sf_cfg_cmd *,
370					struct ieee80211_node *);
371static int	iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state);
372static int	iwm_send_bt_init_conf(struct iwm_softc *);
373static int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
374static void	iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
375static int	iwm_init_hw(struct iwm_softc *);
376static void	iwm_init(struct iwm_softc *);
377static void	iwm_start(struct iwm_softc *);
378static void	iwm_stop(struct iwm_softc *);
379static void	iwm_watchdog(void *);
380static void	iwm_parent(struct ieee80211com *);
381#ifdef IWM_DEBUG
382static const char *
383		iwm_desc_lookup(uint32_t);
384static void	iwm_nic_error(struct iwm_softc *);
385static void	iwm_nic_umac_error(struct iwm_softc *);
386#endif
387static void	iwm_notif_intr(struct iwm_softc *);
388static void	iwm_intr(void *);
389static int	iwm_attach(device_t);
390static int	iwm_is_valid_ether_addr(uint8_t *);
391static void	iwm_preinit(void *);
392static int	iwm_detach_local(struct iwm_softc *sc, int);
393static void	iwm_init_task(void *);
394static void	iwm_radiotap_attach(struct iwm_softc *);
395static struct ieee80211vap *
396		iwm_vap_create(struct ieee80211com *,
397		               const char [IFNAMSIZ], int,
398		               enum ieee80211_opmode, int,
399		               const uint8_t [IEEE80211_ADDR_LEN],
400		               const uint8_t [IEEE80211_ADDR_LEN]);
401static void	iwm_vap_delete(struct ieee80211vap *);
402static void	iwm_scan_start(struct ieee80211com *);
403static void	iwm_scan_end(struct ieee80211com *);
404static void	iwm_update_mcast(struct ieee80211com *);
405static void	iwm_set_channel(struct ieee80211com *);
406static void	iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
407static void	iwm_scan_mindwell(struct ieee80211_scan_state *);
408static int	iwm_detach(device_t);
409
410/*
411 * Firmware parser.
412 */
413
414static int
415iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
416{
417	const struct iwm_fw_cscheme_list *l = (const void *)data;
418
419	if (dlen < sizeof(*l) ||
420	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
421		return EINVAL;
422
423	/* we don't actually store anything for now, always use s/w crypto */
424
425	return 0;
426}
427
428static int
429iwm_firmware_store_section(struct iwm_softc *sc,
430    enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
431{
432	struct iwm_fw_sects *fws;
433	struct iwm_fw_desc *fwone;
434
435	if (type >= IWM_UCODE_TYPE_MAX)
436		return EINVAL;
437	if (dlen < sizeof(uint32_t))
438		return EINVAL;
439
440	fws = &sc->sc_fw.fw_sects[type];
441	if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
442		return EINVAL;
443
444	fwone = &fws->fw_sect[fws->fw_count];
445
446	/* first 32bit are device load offset */
447	memcpy(&fwone->offset, data, sizeof(uint32_t));
448
449	/* rest is data */
450	fwone->data = data + sizeof(uint32_t);
451	fwone->len = dlen - sizeof(uint32_t);
452
453	fws->fw_count++;
454
455	return 0;
456}
457
458#define IWM_DEFAULT_SCAN_CHANNELS 40
459
460/* iwlwifi: iwl-drv.c */
461struct iwm_tlv_calib_data {
462	uint32_t ucode_type;
463	struct iwm_tlv_calib_ctrl calib;
464} __packed;
465
466static int
467iwm_set_default_calib(struct iwm_softc *sc, const void *data)
468{
469	const struct iwm_tlv_calib_data *def_calib = data;
470	uint32_t ucode_type = le32toh(def_calib->ucode_type);
471
472	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
473		device_printf(sc->sc_dev,
474		    "Wrong ucode_type %u for default "
475		    "calibration.\n", ucode_type);
476		return EINVAL;
477	}
478
479	sc->sc_default_calib[ucode_type].flow_trigger =
480	    def_calib->calib.flow_trigger;
481	sc->sc_default_calib[ucode_type].event_trigger =
482	    def_calib->calib.event_trigger;
483
484	return 0;
485}
486
487static int
488iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data,
489			struct iwm_ucode_capabilities *capa)
490{
491	const struct iwm_ucode_api *ucode_api = (const void *)data;
492	uint32_t api_index = le32toh(ucode_api->api_index);
493	uint32_t api_flags = le32toh(ucode_api->api_flags);
494	int i;
495
496	if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
497		device_printf(sc->sc_dev,
498		    "api flags index %d larger than supported by driver\n",
499		    api_index);
500		/* don't return an error so we can load FW that has more bits */
501		return 0;
502	}
503
504	for (i = 0; i < 32; i++) {
505		if (api_flags & (1U << i))
506			setbit(capa->enabled_api, i + 32 * api_index);
507	}
508
509	return 0;
510}
511
512static int
513iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data,
514			   struct iwm_ucode_capabilities *capa)
515{
516	const struct iwm_ucode_capa *ucode_capa = (const void *)data;
517	uint32_t api_index = le32toh(ucode_capa->api_index);
518	uint32_t api_flags = le32toh(ucode_capa->api_capa);
519	int i;
520
521	if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
522		device_printf(sc->sc_dev,
523		    "capa flags index %d larger than supported by driver\n",
524		    api_index);
525		/* don't return an error so we can load FW that has more bits */
526		return 0;
527	}
528
529	for (i = 0; i < 32; i++) {
530		if (api_flags & (1U << i))
531			setbit(capa->enabled_capa, i + 32 * api_index);
532	}
533
534	return 0;
535}
536
537static void
538iwm_fw_info_free(struct iwm_fw_info *fw)
539{
540	firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
541	fw->fw_fp = NULL;
542	/* don't touch fw->fw_status */
543	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
544}
545
546static int
547iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
548{
549	struct iwm_fw_info *fw = &sc->sc_fw;
550	const struct iwm_tlv_ucode_header *uhdr;
551	struct iwm_ucode_tlv tlv;
552	struct iwm_ucode_capabilities *capa = &sc->ucode_capa;
553	enum iwm_ucode_tlv_type tlv_type;
554	const struct firmware *fwp;
555	const uint8_t *data;
556	uint32_t usniffer_img;
557	uint32_t paging_mem_size;
558	int num_of_cpus;
559	int error = 0;
560	size_t len;
561
562	if (fw->fw_status == IWM_FW_STATUS_DONE &&
563	    ucode_type != IWM_UCODE_INIT)
564		return 0;
565
566	while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
567		msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
568	fw->fw_status = IWM_FW_STATUS_INPROGRESS;
569
570	if (fw->fw_fp != NULL)
571		iwm_fw_info_free(fw);
572
573	/*
574	 * Load firmware into driver memory.
575	 * fw_fp will be set.
576	 */
577	IWM_UNLOCK(sc);
578	fwp = firmware_get(sc->cfg->fw_name);
579	IWM_LOCK(sc);
580	if (fwp == NULL) {
581		device_printf(sc->sc_dev,
582		    "could not read firmware %s (error %d)\n",
583		    sc->cfg->fw_name, error);
584		goto out;
585	}
586	fw->fw_fp = fwp;
587
588	/* (Re-)Initialize default values. */
589	capa->flags = 0;
590	capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH;
591	capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
592	memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa));
593	memset(capa->enabled_api, 0, sizeof(capa->enabled_api));
594	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
595
596	/*
597	 * Parse firmware contents
598	 */
599
600	uhdr = (const void *)fw->fw_fp->data;
601	if (*(const uint32_t *)fw->fw_fp->data != 0
602	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
603		device_printf(sc->sc_dev, "invalid firmware %s\n",
604		    sc->cfg->fw_name);
605		error = EINVAL;
606		goto out;
607	}
608
609	snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
610	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
611	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
612	    IWM_UCODE_API(le32toh(uhdr->ver)));
613	data = uhdr->data;
614	len = fw->fw_fp->datasize - sizeof(*uhdr);
615
616	while (len >= sizeof(tlv)) {
617		size_t tlv_len;
618		const void *tlv_data;
619
620		memcpy(&tlv, data, sizeof(tlv));
621		tlv_len = le32toh(tlv.length);
622		tlv_type = le32toh(tlv.type);
623
624		len -= sizeof(tlv);
625		data += sizeof(tlv);
626		tlv_data = data;
627
628		if (len < tlv_len) {
629			device_printf(sc->sc_dev,
630			    "firmware too short: %zu bytes\n",
631			    len);
632			error = EINVAL;
633			goto parse_out;
634		}
635
636		switch ((int)tlv_type) {
637		case IWM_UCODE_TLV_PROBE_MAX_LEN:
638			if (tlv_len < sizeof(uint32_t)) {
639				device_printf(sc->sc_dev,
640				    "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
641				    __func__,
642				    (int) tlv_len);
643				error = EINVAL;
644				goto parse_out;
645			}
646			capa->max_probe_length =
647			    le32toh(*(const uint32_t *)tlv_data);
648			/* limit it to something sensible */
649			if (capa->max_probe_length >
650			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
651				IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
652				    "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
653				    "ridiculous\n", __func__);
654				error = EINVAL;
655				goto parse_out;
656			}
657			break;
658		case IWM_UCODE_TLV_PAN:
659			if (tlv_len) {
660				device_printf(sc->sc_dev,
661				    "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
662				    __func__,
663				    (int) tlv_len);
664				error = EINVAL;
665				goto parse_out;
666			}
667			capa->flags |= IWM_UCODE_TLV_FLAGS_PAN;
668			break;
669		case IWM_UCODE_TLV_FLAGS:
670			if (tlv_len < sizeof(uint32_t)) {
671				device_printf(sc->sc_dev,
672				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
673				    __func__,
674				    (int) tlv_len);
675				error = EINVAL;
676				goto parse_out;
677			}
678			/*
679			 * Apparently there can be many flags, but Linux driver
680			 * parses only the first one, and so do we.
681			 *
682			 * XXX: why does this override IWM_UCODE_TLV_PAN?
683			 * Intentional or a bug?  Observations from
684			 * current firmware file:
685			 *  1) TLV_PAN is parsed first
686			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
687			 * ==> this resets TLV_PAN to itself... hnnnk
688			 */
689			capa->flags = le32toh(*(const uint32_t *)tlv_data);
690			break;
691		case IWM_UCODE_TLV_CSCHEME:
692			if ((error = iwm_store_cscheme(sc,
693			    tlv_data, tlv_len)) != 0) {
694				device_printf(sc->sc_dev,
695				    "%s: iwm_store_cscheme(): returned %d\n",
696				    __func__,
697				    error);
698				goto parse_out;
699			}
700			break;
701		case IWM_UCODE_TLV_NUM_OF_CPU:
702			if (tlv_len != sizeof(uint32_t)) {
703				device_printf(sc->sc_dev,
704				    "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) != sizeof(uint32_t)\n",
705				    __func__,
706				    (int) tlv_len);
707				error = EINVAL;
708				goto parse_out;
709			}
710			num_of_cpus = le32toh(*(const uint32_t *)tlv_data);
711			if (num_of_cpus == 2) {
712				fw->fw_sects[IWM_UCODE_REGULAR].is_dual_cpus =
713					TRUE;
714				fw->fw_sects[IWM_UCODE_INIT].is_dual_cpus =
715					TRUE;
716				fw->fw_sects[IWM_UCODE_WOWLAN].is_dual_cpus =
717					TRUE;
718			} else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
719				device_printf(sc->sc_dev,
720				    "%s: Driver supports only 1 or 2 CPUs\n",
721				    __func__);
722				error = EINVAL;
723				goto parse_out;
724			}
725			break;
726		case IWM_UCODE_TLV_SEC_RT:
727			if ((error = iwm_firmware_store_section(sc,
728			    IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
729				device_printf(sc->sc_dev,
730				    "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
731				    __func__,
732				    error);
733				goto parse_out;
734			}
735			break;
736		case IWM_UCODE_TLV_SEC_INIT:
737			if ((error = iwm_firmware_store_section(sc,
738			    IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
739				device_printf(sc->sc_dev,
740				    "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
741				    __func__,
742				    error);
743				goto parse_out;
744			}
745			break;
746		case IWM_UCODE_TLV_SEC_WOWLAN:
747			if ((error = iwm_firmware_store_section(sc,
748			    IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
749				device_printf(sc->sc_dev,
750				    "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
751				    __func__,
752				    error);
753				goto parse_out;
754			}
755			break;
756		case IWM_UCODE_TLV_DEF_CALIB:
757			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
758				device_printf(sc->sc_dev,
759				    "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
760				    __func__,
761				    (int) tlv_len,
762				    (int) sizeof(struct iwm_tlv_calib_data));
763				error = EINVAL;
764				goto parse_out;
765			}
766			if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
767				device_printf(sc->sc_dev,
768				    "%s: iwm_set_default_calib() failed: %d\n",
769				    __func__,
770				    error);
771				goto parse_out;
772			}
773			break;
774		case IWM_UCODE_TLV_PHY_SKU:
775			if (tlv_len != sizeof(uint32_t)) {
776				error = EINVAL;
777				device_printf(sc->sc_dev,
778				    "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
779				    __func__,
780				    (int) tlv_len);
781				goto parse_out;
782			}
783			sc->sc_fw.phy_config =
784			    le32toh(*(const uint32_t *)tlv_data);
785			sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
786						  IWM_FW_PHY_CFG_TX_CHAIN) >>
787						  IWM_FW_PHY_CFG_TX_CHAIN_POS;
788			sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
789						  IWM_FW_PHY_CFG_RX_CHAIN) >>
790						  IWM_FW_PHY_CFG_RX_CHAIN_POS;
791			break;
792
793		case IWM_UCODE_TLV_API_CHANGES_SET: {
794			if (tlv_len != sizeof(struct iwm_ucode_api)) {
795				error = EINVAL;
796				goto parse_out;
797			}
798			if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) {
799				error = EINVAL;
800				goto parse_out;
801			}
802			break;
803		}
804
805		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
806			if (tlv_len != sizeof(struct iwm_ucode_capa)) {
807				error = EINVAL;
808				goto parse_out;
809			}
810			if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) {
811				error = EINVAL;
812				goto parse_out;
813			}
814			break;
815		}
816
817		case 48: /* undocumented TLV */
818		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
819		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
820			/* ignore, not used by current driver */
821			break;
822
823		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
824			if ((error = iwm_firmware_store_section(sc,
825			    IWM_UCODE_REGULAR_USNIFFER, tlv_data,
826			    tlv_len)) != 0)
827				goto parse_out;
828			break;
829
830		case IWM_UCODE_TLV_PAGING:
831			if (tlv_len != sizeof(uint32_t)) {
832				error = EINVAL;
833				goto parse_out;
834			}
835			paging_mem_size = le32toh(*(const uint32_t *)tlv_data);
836
837			IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
838			    "%s: Paging: paging enabled (size = %u bytes)\n",
839			    __func__, paging_mem_size);
840			if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
841				device_printf(sc->sc_dev,
842					"%s: Paging: driver supports up to %u bytes for paging image\n",
843					__func__, IWM_MAX_PAGING_IMAGE_SIZE);
844				error = EINVAL;
845				goto out;
846			}
847			if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
848				device_printf(sc->sc_dev,
849				    "%s: Paging: image isn't multiple %u\n",
850				    __func__, IWM_FW_PAGING_SIZE);
851				error = EINVAL;
852				goto out;
853			}
854
855			sc->sc_fw.fw_sects[IWM_UCODE_REGULAR].paging_mem_size =
856			    paging_mem_size;
857			usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
858			sc->sc_fw.fw_sects[usniffer_img].paging_mem_size =
859			    paging_mem_size;
860			break;
861
862		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
863			if (tlv_len != sizeof(uint32_t)) {
864				error = EINVAL;
865				goto parse_out;
866			}
867			capa->n_scan_channels =
868			    le32toh(*(const uint32_t *)tlv_data);
869			break;
870
871		case IWM_UCODE_TLV_FW_VERSION:
872			if (tlv_len != sizeof(uint32_t) * 3) {
873				error = EINVAL;
874				goto parse_out;
875			}
876			snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
877			    "%d.%d.%d",
878			    le32toh(((const uint32_t *)tlv_data)[0]),
879			    le32toh(((const uint32_t *)tlv_data)[1]),
880			    le32toh(((const uint32_t *)tlv_data)[2]));
881			break;
882
883		case IWM_UCODE_TLV_FW_MEM_SEG:
884			break;
885
886		default:
887			device_printf(sc->sc_dev,
888			    "%s: unknown firmware section %d, abort\n",
889			    __func__, tlv_type);
890			error = EINVAL;
891			goto parse_out;
892		}
893
894		len -= roundup(tlv_len, 4);
895		data += roundup(tlv_len, 4);
896	}
897
898	KASSERT(error == 0, ("unhandled error"));
899
900 parse_out:
901	if (error) {
902		device_printf(sc->sc_dev, "firmware parse error %d, "
903		    "section type %d\n", error, tlv_type);
904	}
905
906 out:
907	if (error) {
908		fw->fw_status = IWM_FW_STATUS_NONE;
909		if (fw->fw_fp != NULL)
910			iwm_fw_info_free(fw);
911	} else
912		fw->fw_status = IWM_FW_STATUS_DONE;
913	wakeup(&sc->sc_fw);
914
915	return error;
916}
917
918/*
919 * DMA resource routines
920 */
921
922/* fwmem is used to load firmware onto the card */
923static int
924iwm_alloc_fwmem(struct iwm_softc *sc)
925{
926	/* Must be aligned on a 16-byte boundary. */
927	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
928	    IWM_FH_MEM_TB_MAX_LENGTH, 16);
929}
930
931/* tx scheduler rings.  not used? */
932static int
933iwm_alloc_sched(struct iwm_softc *sc)
934{
935	/* TX scheduler rings must be aligned on a 1KB boundary. */
936	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
937	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
938}
939
940/* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
941static int
942iwm_alloc_kw(struct iwm_softc *sc)
943{
944	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
945}
946
947/* interrupt cause table */
948static int
949iwm_alloc_ict(struct iwm_softc *sc)
950{
951	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
952	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
953}
954
955static int
956iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
957{
958	bus_size_t size;
959	int i, error;
960
961	ring->cur = 0;
962
963	/* Allocate RX descriptors (256-byte aligned). */
964	size = IWM_RX_RING_COUNT * sizeof(uint32_t);
965	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
966	if (error != 0) {
967		device_printf(sc->sc_dev,
968		    "could not allocate RX ring DMA memory\n");
969		goto fail;
970	}
971	ring->desc = ring->desc_dma.vaddr;
972
973	/* Allocate RX status area (16-byte aligned). */
974	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
975	    sizeof(*ring->stat), 16);
976	if (error != 0) {
977		device_printf(sc->sc_dev,
978		    "could not allocate RX status DMA memory\n");
979		goto fail;
980	}
981	ring->stat = ring->stat_dma.vaddr;
982
983        /* Create RX buffer DMA tag. */
984        error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
985            BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
986            IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
987        if (error != 0) {
988                device_printf(sc->sc_dev,
989                    "%s: could not create RX buf DMA tag, error %d\n",
990                    __func__, error);
991                goto fail;
992        }
993
994	/* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
995	error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
996	if (error != 0) {
997		device_printf(sc->sc_dev,
998		    "%s: could not create RX buf DMA map, error %d\n",
999		    __func__, error);
1000		goto fail;
1001	}
1002	/*
1003	 * Allocate and map RX buffers.
1004	 */
1005	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1006		struct iwm_rx_data *data = &ring->data[i];
1007		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1008		if (error != 0) {
1009			device_printf(sc->sc_dev,
1010			    "%s: could not create RX buf DMA map, error %d\n",
1011			    __func__, error);
1012			goto fail;
1013		}
1014		data->m = NULL;
1015
1016		if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1017			goto fail;
1018		}
1019	}
1020	return 0;
1021
1022fail:	iwm_free_rx_ring(sc, ring);
1023	return error;
1024}
1025
1026static void
1027iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1028{
1029	/* Reset the ring state */
1030	ring->cur = 0;
1031
1032	/*
1033	 * The hw rx ring index in shared memory must also be cleared,
1034	 * otherwise the discrepancy can cause reprocessing chaos.
1035	 */
1036	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1037}
1038
1039static void
1040iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1041{
1042	int i;
1043
1044	iwm_dma_contig_free(&ring->desc_dma);
1045	iwm_dma_contig_free(&ring->stat_dma);
1046
1047	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1048		struct iwm_rx_data *data = &ring->data[i];
1049
1050		if (data->m != NULL) {
1051			bus_dmamap_sync(ring->data_dmat, data->map,
1052			    BUS_DMASYNC_POSTREAD);
1053			bus_dmamap_unload(ring->data_dmat, data->map);
1054			m_freem(data->m);
1055			data->m = NULL;
1056		}
1057		if (data->map != NULL) {
1058			bus_dmamap_destroy(ring->data_dmat, data->map);
1059			data->map = NULL;
1060		}
1061	}
1062	if (ring->spare_map != NULL) {
1063		bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1064		ring->spare_map = NULL;
1065	}
1066	if (ring->data_dmat != NULL) {
1067		bus_dma_tag_destroy(ring->data_dmat);
1068		ring->data_dmat = NULL;
1069	}
1070}
1071
1072static int
1073iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1074{
1075	bus_addr_t paddr;
1076	bus_size_t size;
1077	size_t maxsize;
1078	int nsegments;
1079	int i, error;
1080
1081	ring->qid = qid;
1082	ring->queued = 0;
1083	ring->cur = 0;
1084
1085	/* Allocate TX descriptors (256-byte aligned). */
1086	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1087	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1088	if (error != 0) {
1089		device_printf(sc->sc_dev,
1090		    "could not allocate TX ring DMA memory\n");
1091		goto fail;
1092	}
1093	ring->desc = ring->desc_dma.vaddr;
1094
1095	/*
1096	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1097	 * to allocate commands space for other rings.
1098	 */
1099	if (qid > IWM_MVM_CMD_QUEUE)
1100		return 0;
1101
1102	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1103	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1104	if (error != 0) {
1105		device_printf(sc->sc_dev,
1106		    "could not allocate TX cmd DMA memory\n");
1107		goto fail;
1108	}
1109	ring->cmd = ring->cmd_dma.vaddr;
1110
1111	/* FW commands may require more mapped space than packets. */
1112	if (qid == IWM_MVM_CMD_QUEUE) {
1113		maxsize = IWM_RBUF_SIZE;
1114		nsegments = 1;
1115	} else {
1116		maxsize = MCLBYTES;
1117		nsegments = IWM_MAX_SCATTER - 2;
1118	}
1119
1120	error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1121	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1122            nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1123	if (error != 0) {
1124		device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1125		goto fail;
1126	}
1127
1128	paddr = ring->cmd_dma.paddr;
1129	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1130		struct iwm_tx_data *data = &ring->data[i];
1131
1132		data->cmd_paddr = paddr;
1133		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1134		    + offsetof(struct iwm_tx_cmd, scratch);
1135		paddr += sizeof(struct iwm_device_cmd);
1136
1137		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1138		if (error != 0) {
1139			device_printf(sc->sc_dev,
1140			    "could not create TX buf DMA map\n");
1141			goto fail;
1142		}
1143	}
1144	KASSERT(paddr == ring->cmd_dma.paddr + size,
1145	    ("invalid physical address"));
1146	return 0;
1147
1148fail:	iwm_free_tx_ring(sc, ring);
1149	return error;
1150}
1151
1152static void
1153iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1154{
1155	int i;
1156
1157	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1158		struct iwm_tx_data *data = &ring->data[i];
1159
1160		if (data->m != NULL) {
1161			bus_dmamap_sync(ring->data_dmat, data->map,
1162			    BUS_DMASYNC_POSTWRITE);
1163			bus_dmamap_unload(ring->data_dmat, data->map);
1164			m_freem(data->m);
1165			data->m = NULL;
1166		}
1167	}
1168	/* Clear TX descriptors. */
1169	memset(ring->desc, 0, ring->desc_dma.size);
1170	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1171	    BUS_DMASYNC_PREWRITE);
1172	sc->qfullmsk &= ~(1 << ring->qid);
1173	ring->queued = 0;
1174	ring->cur = 0;
1175
1176	if (ring->qid == IWM_MVM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1177		iwm_pcie_clear_cmd_in_flight(sc);
1178}
1179
1180static void
1181iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1182{
1183	int i;
1184
1185	iwm_dma_contig_free(&ring->desc_dma);
1186	iwm_dma_contig_free(&ring->cmd_dma);
1187
1188	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1189		struct iwm_tx_data *data = &ring->data[i];
1190
1191		if (data->m != NULL) {
1192			bus_dmamap_sync(ring->data_dmat, data->map,
1193			    BUS_DMASYNC_POSTWRITE);
1194			bus_dmamap_unload(ring->data_dmat, data->map);
1195			m_freem(data->m);
1196			data->m = NULL;
1197		}
1198		if (data->map != NULL) {
1199			bus_dmamap_destroy(ring->data_dmat, data->map);
1200			data->map = NULL;
1201		}
1202	}
1203	if (ring->data_dmat != NULL) {
1204		bus_dma_tag_destroy(ring->data_dmat);
1205		ring->data_dmat = NULL;
1206	}
1207}
1208
1209/*
1210 * High-level hardware frobbing routines
1211 */
1212
1213static void
1214iwm_enable_interrupts(struct iwm_softc *sc)
1215{
1216	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1217	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1218}
1219
1220static void
1221iwm_restore_interrupts(struct iwm_softc *sc)
1222{
1223	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1224}
1225
1226static void
1227iwm_disable_interrupts(struct iwm_softc *sc)
1228{
1229	/* disable interrupts */
1230	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1231
1232	/* acknowledge all interrupts */
1233	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1234	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1235}
1236
1237static void
1238iwm_ict_reset(struct iwm_softc *sc)
1239{
1240	iwm_disable_interrupts(sc);
1241
1242	/* Reset ICT table. */
1243	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1244	sc->ict_cur = 0;
1245
1246	/* Set physical address of ICT table (4KB aligned). */
1247	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1248	    IWM_CSR_DRAM_INT_TBL_ENABLE
1249	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1250	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1251	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1252
1253	/* Switch to ICT interrupt mode in driver. */
1254	sc->sc_flags |= IWM_FLAG_USE_ICT;
1255
1256	/* Re-enable interrupts. */
1257	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1258	iwm_enable_interrupts(sc);
1259}
1260
1261/* iwlwifi pcie/trans.c */
1262
1263/*
1264 * Since this .. hard-resets things, it's time to actually
1265 * mark the first vap (if any) as having no mac context.
1266 * It's annoying, but since the driver is potentially being
1267 * stop/start'ed whilst active (thanks openbsd port!) we
1268 * have to correctly track this.
1269 */
1270static void
1271iwm_stop_device(struct iwm_softc *sc)
1272{
1273	struct ieee80211com *ic = &sc->sc_ic;
1274	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1275	int chnl, qid;
1276	uint32_t mask = 0;
1277
1278	/* tell the device to stop sending interrupts */
1279	iwm_disable_interrupts(sc);
1280
1281	/*
1282	 * FreeBSD-local: mark the first vap as not-uploaded,
1283	 * so the next transition through auth/assoc
1284	 * will correctly populate the MAC context.
1285	 */
1286	if (vap) {
1287		struct iwm_vap *iv = IWM_VAP(vap);
1288		iv->phy_ctxt = NULL;
1289		iv->is_uploaded = 0;
1290	}
1291
1292	/* device going down, Stop using ICT table */
1293	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1294
1295	/* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1296
1297	if (iwm_nic_lock(sc)) {
1298		iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1299
1300		/* Stop each Tx DMA channel */
1301		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1302			IWM_WRITE(sc,
1303			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1304			mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1305		}
1306
1307		/* Wait for DMA channels to be idle */
1308		if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1309		    5000)) {
1310			device_printf(sc->sc_dev,
1311			    "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1312			    IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1313		}
1314		iwm_nic_unlock(sc);
1315	}
1316	iwm_pcie_rx_stop(sc);
1317
1318	/* Stop RX ring. */
1319	iwm_reset_rx_ring(sc, &sc->rxq);
1320
1321	/* Reset all TX rings. */
1322	for (qid = 0; qid < nitems(sc->txq); qid++)
1323		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1324
1325	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1326		/* Power-down device's busmaster DMA clocks */
1327		if (iwm_nic_lock(sc)) {
1328			iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1329			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1330			iwm_nic_unlock(sc);
1331		}
1332		DELAY(5);
1333	}
1334
1335	/* Make sure (redundant) we've released our request to stay awake */
1336	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1337	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1338
1339	/* Stop the device, and put it in low power state */
1340	iwm_apm_stop(sc);
1341
1342	/* Upon stop, the APM issues an interrupt if HW RF kill is set.
1343	 * Clean again the interrupt here
1344	 */
1345	iwm_disable_interrupts(sc);
1346	/* stop and reset the on-board processor */
1347	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1348
1349	/*
1350	 * Even if we stop the HW, we still want the RF kill
1351	 * interrupt
1352	 */
1353	iwm_enable_rfkill_int(sc);
1354	iwm_check_rfkill(sc);
1355}
1356
1357/* iwlwifi: mvm/ops.c */
1358static void
1359iwm_mvm_nic_config(struct iwm_softc *sc)
1360{
1361	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1362	uint32_t reg_val = 0;
1363	uint32_t phy_config = iwm_mvm_get_phy_config(sc);
1364
1365	radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1366	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1367	radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1368	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1369	radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1370	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1371
1372	/* SKU control */
1373	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1374	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1375	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1376	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1377
1378	/* radio configuration */
1379	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1380	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1381	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1382
1383	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1384
1385	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1386	    "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1387	    radio_cfg_step, radio_cfg_dash);
1388
1389	/*
1390	 * W/A : NIC is stuck in a reset state after Early PCIe power off
1391	 * (PCIe power is lost before PERST# is asserted), causing ME FW
1392	 * to lose ownership and not being able to obtain it back.
1393	 */
1394	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1395		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1396		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1397		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1398	}
1399}
1400
1401static int
1402iwm_nic_rx_init(struct iwm_softc *sc)
1403{
1404	/*
1405	 * Initialize RX ring.  This is from the iwn driver.
1406	 */
1407	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1408
1409	/* Stop Rx DMA */
1410	iwm_pcie_rx_stop(sc);
1411
1412	if (!iwm_nic_lock(sc))
1413		return EBUSY;
1414
1415	/* reset and flush pointers */
1416	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1417	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1418	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1419	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1420
1421	/* Set physical address of RX ring (256-byte aligned). */
1422	IWM_WRITE(sc,
1423	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1424
1425	/* Set physical address of RX status (16-byte aligned). */
1426	IWM_WRITE(sc,
1427	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1428
1429	/* Enable RX. */
1430	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1431	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
1432	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
1433	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
1434	    IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK	|
1435	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1436	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
1437	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1438
1439	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1440
1441	/* W/A for interrupt coalescing bug in 7260 and 3160 */
1442	if (sc->cfg->host_interrupt_operation_mode)
1443		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1444
1445	/*
1446	 * Thus sayeth el jefe (iwlwifi) via a comment:
1447	 *
1448	 * This value should initially be 0 (before preparing any
1449	 * RBs), should be 8 after preparing the first 8 RBs (for example)
1450	 */
1451	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1452
1453	iwm_nic_unlock(sc);
1454
1455	return 0;
1456}
1457
1458static int
1459iwm_nic_tx_init(struct iwm_softc *sc)
1460{
1461	int qid;
1462
1463	if (!iwm_nic_lock(sc))
1464		return EBUSY;
1465
1466	/* Deactivate TX scheduler. */
1467	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1468
1469	/* Set physical address of "keep warm" page (16-byte aligned). */
1470	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1471
1472	/* Initialize TX rings. */
1473	for (qid = 0; qid < nitems(sc->txq); qid++) {
1474		struct iwm_tx_ring *txq = &sc->txq[qid];
1475
1476		/* Set physical address of TX ring (256-byte aligned). */
1477		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1478		    txq->desc_dma.paddr >> 8);
1479		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1480		    "%s: loading ring %d descriptors (%p) at %lx\n",
1481		    __func__,
1482		    qid, txq->desc,
1483		    (unsigned long) (txq->desc_dma.paddr >> 8));
1484	}
1485
1486	iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1487
1488	iwm_nic_unlock(sc);
1489
1490	return 0;
1491}
1492
1493static int
1494iwm_nic_init(struct iwm_softc *sc)
1495{
1496	int error;
1497
1498	iwm_apm_init(sc);
1499	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1500		iwm_set_pwr(sc);
1501
1502	iwm_mvm_nic_config(sc);
1503
1504	if ((error = iwm_nic_rx_init(sc)) != 0)
1505		return error;
1506
1507	/*
1508	 * Ditto for TX, from iwn
1509	 */
1510	if ((error = iwm_nic_tx_init(sc)) != 0)
1511		return error;
1512
1513	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1514	    "%s: shadow registers enabled\n", __func__);
1515	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1516
1517	return 0;
1518}
1519
1520const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1521	IWM_MVM_TX_FIFO_VO,
1522	IWM_MVM_TX_FIFO_VI,
1523	IWM_MVM_TX_FIFO_BE,
1524	IWM_MVM_TX_FIFO_BK,
1525};
1526
1527static int
1528iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1529{
1530	if (!iwm_nic_lock(sc)) {
1531		device_printf(sc->sc_dev,
1532		    "%s: cannot enable txq %d\n",
1533		    __func__,
1534		    qid);
1535		return EBUSY;
1536	}
1537
1538	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1539
1540	if (qid == IWM_MVM_CMD_QUEUE) {
1541		/* unactivate before configuration */
1542		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1543		    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1544		    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1545
1546		iwm_nic_unlock(sc);
1547
1548		iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1549
1550		if (!iwm_nic_lock(sc)) {
1551			device_printf(sc->sc_dev,
1552			    "%s: cannot enable txq %d\n", __func__, qid);
1553			return EBUSY;
1554		}
1555		iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1556		iwm_nic_unlock(sc);
1557
1558		iwm_write_mem32(sc, sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1559		/* Set scheduler window size and frame limit. */
1560		iwm_write_mem32(sc,
1561		    sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1562		    sizeof(uint32_t),
1563		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1564		    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1565		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1566		    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1567
1568		if (!iwm_nic_lock(sc)) {
1569			device_printf(sc->sc_dev,
1570			    "%s: cannot enable txq %d\n", __func__, qid);
1571			return EBUSY;
1572		}
1573		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1574		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1575		    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1576		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1577		    IWM_SCD_QUEUE_STTS_REG_MSK);
1578	} else {
1579		struct iwm_scd_txq_cfg_cmd cmd;
1580		int error;
1581
1582		iwm_nic_unlock(sc);
1583
1584		memset(&cmd, 0, sizeof(cmd));
1585		cmd.scd_queue = qid;
1586		cmd.enable = 1;
1587		cmd.sta_id = sta_id;
1588		cmd.tx_fifo = fifo;
1589		cmd.aggregate = 0;
1590		cmd.window = IWM_FRAME_LIMIT;
1591
1592		error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1593		    sizeof(cmd), &cmd);
1594		if (error) {
1595			device_printf(sc->sc_dev,
1596			    "cannot enable txq %d\n", qid);
1597			return error;
1598		}
1599
1600		if (!iwm_nic_lock(sc))
1601			return EBUSY;
1602	}
1603
1604	iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1605	    iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1606
1607	iwm_nic_unlock(sc);
1608
1609	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1610	    __func__, qid, fifo);
1611
1612	return 0;
1613}
1614
1615static int
1616iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1617{
1618	int error, chnl;
1619
1620	int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1621	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1622
1623	if (!iwm_nic_lock(sc))
1624		return EBUSY;
1625
1626	iwm_ict_reset(sc);
1627
1628	sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1629	if (scd_base_addr != 0 &&
1630	    scd_base_addr != sc->scd_base_addr) {
1631		device_printf(sc->sc_dev,
1632		    "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1633		    __func__, sc->scd_base_addr, scd_base_addr);
1634	}
1635
1636	iwm_nic_unlock(sc);
1637
1638	/* reset context data, TX status and translation data */
1639	error = iwm_write_mem(sc,
1640	    sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1641	    NULL, clear_dwords);
1642	if (error)
1643		return EBUSY;
1644
1645	if (!iwm_nic_lock(sc))
1646		return EBUSY;
1647
1648	/* Set physical address of TX scheduler rings (1KB aligned). */
1649	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1650
1651	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1652
1653	iwm_nic_unlock(sc);
1654
1655	/* enable command channel */
1656	error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1657	if (error)
1658		return error;
1659
1660	if (!iwm_nic_lock(sc))
1661		return EBUSY;
1662
1663	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1664
1665	/* Enable DMA channels. */
1666	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1667		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1668		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1669		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1670	}
1671
1672	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1673	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1674
1675	iwm_nic_unlock(sc);
1676
1677	/* Enable L1-Active */
1678	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
1679		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1680		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1681	}
1682
1683	return error;
1684}
1685
1686/*
1687 * NVM read access and content parsing.  We do not support
1688 * external NVM or writing NVM.
1689 * iwlwifi/mvm/nvm.c
1690 */
1691
1692/* Default NVM size to read */
1693#define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
1694
1695#define IWM_NVM_WRITE_OPCODE 1
1696#define IWM_NVM_READ_OPCODE 0
1697
1698/* load nvm chunk response */
1699enum {
1700	IWM_READ_NVM_CHUNK_SUCCEED = 0,
1701	IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1702};
1703
1704static int
1705iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1706	uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1707{
1708	struct iwm_nvm_access_cmd nvm_access_cmd = {
1709		.offset = htole16(offset),
1710		.length = htole16(length),
1711		.type = htole16(section),
1712		.op_code = IWM_NVM_READ_OPCODE,
1713	};
1714	struct iwm_nvm_access_resp *nvm_resp;
1715	struct iwm_rx_packet *pkt;
1716	struct iwm_host_cmd cmd = {
1717		.id = IWM_NVM_ACCESS_CMD,
1718		.flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1719		.data = { &nvm_access_cmd, },
1720	};
1721	int ret, bytes_read, offset_read;
1722	uint8_t *resp_data;
1723
1724	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1725
1726	ret = iwm_send_cmd(sc, &cmd);
1727	if (ret) {
1728		device_printf(sc->sc_dev,
1729		    "Could not send NVM_ACCESS command (error=%d)\n", ret);
1730		return ret;
1731	}
1732
1733	pkt = cmd.resp_pkt;
1734
1735	/* Extract NVM response */
1736	nvm_resp = (void *)pkt->data;
1737	ret = le16toh(nvm_resp->status);
1738	bytes_read = le16toh(nvm_resp->length);
1739	offset_read = le16toh(nvm_resp->offset);
1740	resp_data = nvm_resp->data;
1741	if (ret) {
1742		if ((offset != 0) &&
1743		    (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1744			/*
1745			 * meaning of NOT_VALID_ADDRESS:
1746			 * driver try to read chunk from address that is
1747			 * multiple of 2K and got an error since addr is empty.
1748			 * meaning of (offset != 0): driver already
1749			 * read valid data from another chunk so this case
1750			 * is not an error.
1751			 */
1752			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1753				    "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1754				    offset);
1755			*len = 0;
1756			ret = 0;
1757		} else {
1758			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1759				    "NVM access command failed with status %d\n", ret);
1760			ret = EIO;
1761		}
1762		goto exit;
1763	}
1764
1765	if (offset_read != offset) {
1766		device_printf(sc->sc_dev,
1767		    "NVM ACCESS response with invalid offset %d\n",
1768		    offset_read);
1769		ret = EINVAL;
1770		goto exit;
1771	}
1772
1773	if (bytes_read > length) {
1774		device_printf(sc->sc_dev,
1775		    "NVM ACCESS response with too much data "
1776		    "(%d bytes requested, %d bytes received)\n",
1777		    length, bytes_read);
1778		ret = EINVAL;
1779		goto exit;
1780	}
1781
1782	/* Write data to NVM */
1783	memcpy(data + offset, resp_data, bytes_read);
1784	*len = bytes_read;
1785
1786 exit:
1787	iwm_free_resp(sc, &cmd);
1788	return ret;
1789}
1790
1791/*
1792 * Reads an NVM section completely.
1793 * NICs prior to 7000 family don't have a real NVM, but just read
1794 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1795 * by uCode, we need to manually check in this case that we don't
1796 * overflow and try to read more than the EEPROM size.
1797 * For 7000 family NICs, we supply the maximal size we can read, and
1798 * the uCode fills the response with as much data as we can,
1799 * without overflowing, so no check is needed.
1800 */
1801static int
1802iwm_nvm_read_section(struct iwm_softc *sc,
1803	uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1804{
1805	uint16_t seglen, length, offset = 0;
1806	int ret;
1807
1808	/* Set nvm section read length */
1809	length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1810
1811	seglen = length;
1812
1813	/* Read the NVM until exhausted (reading less than requested) */
1814	while (seglen == length) {
1815		/* Check no memory assumptions fail and cause an overflow */
1816		if ((size_read + offset + length) >
1817		    sc->cfg->eeprom_size) {
1818			device_printf(sc->sc_dev,
1819			    "EEPROM size is too small for NVM\n");
1820			return ENOBUFS;
1821		}
1822
1823		ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1824		if (ret) {
1825			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1826				    "Cannot read NVM from section %d offset %d, length %d\n",
1827				    section, offset, length);
1828			return ret;
1829		}
1830		offset += seglen;
1831	}
1832
1833	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1834		    "NVM section %d read completed\n", section);
1835	*len = offset;
1836	return 0;
1837}
1838
1839/*
1840 * BEGIN IWM_NVM_PARSE
1841 */
1842
1843/* iwlwifi/iwl-nvm-parse.c */
1844
1845/* NVM offsets (in words) definitions */
1846enum iwm_nvm_offsets {
1847	/* NVM HW-Section offset (in words) definitions */
1848	IWM_HW_ADDR = 0x15,
1849
1850/* NVM SW-Section offset (in words) definitions */
1851	IWM_NVM_SW_SECTION = 0x1C0,
1852	IWM_NVM_VERSION = 0,
1853	IWM_RADIO_CFG = 1,
1854	IWM_SKU = 2,
1855	IWM_N_HW_ADDRS = 3,
1856	IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1857
1858/* NVM calibration section offset (in words) definitions */
1859	IWM_NVM_CALIB_SECTION = 0x2B8,
1860	IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1861};
1862
1863enum iwm_8000_nvm_offsets {
1864	/* NVM HW-Section offset (in words) definitions */
1865	IWM_HW_ADDR0_WFPM_8000 = 0x12,
1866	IWM_HW_ADDR1_WFPM_8000 = 0x16,
1867	IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1868	IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1869	IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1870
1871	/* NVM SW-Section offset (in words) definitions */
1872	IWM_NVM_SW_SECTION_8000 = 0x1C0,
1873	IWM_NVM_VERSION_8000 = 0,
1874	IWM_RADIO_CFG_8000 = 0,
1875	IWM_SKU_8000 = 2,
1876	IWM_N_HW_ADDRS_8000 = 3,
1877
1878	/* NVM REGULATORY -Section offset (in words) definitions */
1879	IWM_NVM_CHANNELS_8000 = 0,
1880	IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1881	IWM_NVM_LAR_OFFSET_8000 = 0x507,
1882	IWM_NVM_LAR_ENABLED_8000 = 0x7,
1883
1884	/* NVM calibration section offset (in words) definitions */
1885	IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1886	IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1887};
1888
1889/* SKU Capabilities (actual values from NVM definition) */
1890enum nvm_sku_bits {
1891	IWM_NVM_SKU_CAP_BAND_24GHZ	= (1 << 0),
1892	IWM_NVM_SKU_CAP_BAND_52GHZ	= (1 << 1),
1893	IWM_NVM_SKU_CAP_11N_ENABLE	= (1 << 2),
1894	IWM_NVM_SKU_CAP_11AC_ENABLE	= (1 << 3),
1895};
1896
1897/* radio config bits (actual values from NVM definition) */
1898#define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1899#define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1900#define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1901#define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1902#define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1903#define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1904
1905#define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)	(x & 0xF)
1906#define IWM_NVM_RF_CFG_DASH_MSK_8000(x)		((x >> 4) & 0xF)
1907#define IWM_NVM_RF_CFG_STEP_MSK_8000(x)		((x >> 8) & 0xF)
1908#define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)		((x >> 12) & 0xFFF)
1909#define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)	((x >> 24) & 0xF)
1910#define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)	((x >> 28) & 0xF)
1911
1912#define DEFAULT_MAX_TX_POWER 16
1913
1914/**
1915 * enum iwm_nvm_channel_flags - channel flags in NVM
1916 * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1917 * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1918 * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1919 * @IWM_NVM_CHANNEL_RADAR: radar detection required
1920 * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1921 * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1922 * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1923 * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1924 * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1925 * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1926 */
1927enum iwm_nvm_channel_flags {
1928	IWM_NVM_CHANNEL_VALID = (1 << 0),
1929	IWM_NVM_CHANNEL_IBSS = (1 << 1),
1930	IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1931	IWM_NVM_CHANNEL_RADAR = (1 << 4),
1932	IWM_NVM_CHANNEL_DFS = (1 << 7),
1933	IWM_NVM_CHANNEL_WIDE = (1 << 8),
1934	IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1935	IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1936	IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1937};
1938
1939/*
1940 * Translate EEPROM flags to net80211.
1941 */
1942static uint32_t
1943iwm_eeprom_channel_flags(uint16_t ch_flags)
1944{
1945	uint32_t nflags;
1946
1947	nflags = 0;
1948	if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1949		nflags |= IEEE80211_CHAN_PASSIVE;
1950	if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1951		nflags |= IEEE80211_CHAN_NOADHOC;
1952	if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1953		nflags |= IEEE80211_CHAN_DFS;
1954		/* Just in case. */
1955		nflags |= IEEE80211_CHAN_NOADHOC;
1956	}
1957
1958	return (nflags);
1959}
1960
1961static void
1962iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1963    int maxchans, int *nchans, int ch_idx, size_t ch_num,
1964    const uint8_t bands[])
1965{
1966	const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
1967	uint32_t nflags;
1968	uint16_t ch_flags;
1969	uint8_t ieee;
1970	int error;
1971
1972	for (; ch_idx < ch_num; ch_idx++) {
1973		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1974		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1975			ieee = iwm_nvm_channels[ch_idx];
1976		else
1977			ieee = iwm_nvm_channels_8000[ch_idx];
1978
1979		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1980			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1981			    "Ch. %d Flags %x [%sGHz] - No traffic\n",
1982			    ieee, ch_flags,
1983			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1984			    "5.2" : "2.4");
1985			continue;
1986		}
1987
1988		nflags = iwm_eeprom_channel_flags(ch_flags);
1989		error = ieee80211_add_channel(chans, maxchans, nchans,
1990		    ieee, 0, 0, nflags, bands);
1991		if (error != 0)
1992			break;
1993
1994		IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1995		    "Ch. %d Flags %x [%sGHz] - Added\n",
1996		    ieee, ch_flags,
1997		    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1998		    "5.2" : "2.4");
1999	}
2000}
2001
2002static void
2003iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2004    struct ieee80211_channel chans[])
2005{
2006	struct iwm_softc *sc = ic->ic_softc;
2007	struct iwm_nvm_data *data = sc->nvm_data;
2008	uint8_t bands[IEEE80211_MODE_BYTES];
2009	size_t ch_num;
2010
2011	memset(bands, 0, sizeof(bands));
2012	/* 1-13: 11b/g channels. */
2013	setbit(bands, IEEE80211_MODE_11B);
2014	setbit(bands, IEEE80211_MODE_11G);
2015	iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2016	    IWM_NUM_2GHZ_CHANNELS - 1, bands);
2017
2018	/* 14: 11b channel only. */
2019	clrbit(bands, IEEE80211_MODE_11G);
2020	iwm_add_channel_band(sc, chans, maxchans, nchans,
2021	    IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2022
2023	if (data->sku_cap_band_52GHz_enable) {
2024		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2025			ch_num = nitems(iwm_nvm_channels);
2026		else
2027			ch_num = nitems(iwm_nvm_channels_8000);
2028		memset(bands, 0, sizeof(bands));
2029		setbit(bands, IEEE80211_MODE_11A);
2030		iwm_add_channel_band(sc, chans, maxchans, nchans,
2031		    IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2032	}
2033}
2034
2035static void
2036iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2037	const uint16_t *mac_override, const uint16_t *nvm_hw)
2038{
2039	const uint8_t *hw_addr;
2040
2041	if (mac_override) {
2042		static const uint8_t reserved_mac[] = {
2043			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2044		};
2045
2046		hw_addr = (const uint8_t *)(mac_override +
2047				 IWM_MAC_ADDRESS_OVERRIDE_8000);
2048
2049		/*
2050		 * Store the MAC address from MAO section.
2051		 * No byte swapping is required in MAO section
2052		 */
2053		IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2054
2055		/*
2056		 * Force the use of the OTP MAC address in case of reserved MAC
2057		 * address in the NVM, or if address is given but invalid.
2058		 */
2059		if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2060		    !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2061		    iwm_is_valid_ether_addr(data->hw_addr) &&
2062		    !IEEE80211_IS_MULTICAST(data->hw_addr))
2063			return;
2064
2065		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2066		    "%s: mac address from nvm override section invalid\n",
2067		    __func__);
2068	}
2069
2070	if (nvm_hw) {
2071		/* read the mac address from WFMP registers */
2072		uint32_t mac_addr0 =
2073		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2074		uint32_t mac_addr1 =
2075		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2076
2077		hw_addr = (const uint8_t *)&mac_addr0;
2078		data->hw_addr[0] = hw_addr[3];
2079		data->hw_addr[1] = hw_addr[2];
2080		data->hw_addr[2] = hw_addr[1];
2081		data->hw_addr[3] = hw_addr[0];
2082
2083		hw_addr = (const uint8_t *)&mac_addr1;
2084		data->hw_addr[4] = hw_addr[1];
2085		data->hw_addr[5] = hw_addr[0];
2086
2087		return;
2088	}
2089
2090	device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2091	memset(data->hw_addr, 0, sizeof(data->hw_addr));
2092}
2093
2094static int
2095iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2096	    const uint16_t *phy_sku)
2097{
2098	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2099		return le16_to_cpup(nvm_sw + IWM_SKU);
2100
2101	return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2102}
2103
2104static int
2105iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2106{
2107	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2108		return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2109	else
2110		return le32_to_cpup((const uint32_t *)(nvm_sw +
2111						IWM_NVM_VERSION_8000));
2112}
2113
2114static int
2115iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2116		  const uint16_t *phy_sku)
2117{
2118        if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2119                return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2120
2121        return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2122}
2123
2124static int
2125iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2126{
2127	int n_hw_addr;
2128
2129	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2130		return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2131
2132	n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2133
2134        return n_hw_addr & IWM_N_HW_ADDR_MASK;
2135}
2136
2137static void
2138iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2139		  uint32_t radio_cfg)
2140{
2141	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2142		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2143		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2144		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2145		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2146		return;
2147	}
2148
2149	/* set the radio configuration for family 8000 */
2150	data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2151	data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2152	data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2153	data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2154	data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2155	data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2156}
2157
2158static int
2159iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2160		   const uint16_t *nvm_hw, const uint16_t *mac_override)
2161{
2162#ifdef notyet /* for FAMILY 9000 */
2163	if (cfg->mac_addr_from_csr) {
2164		iwm_set_hw_address_from_csr(sc, data);
2165        } else
2166#endif
2167	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2168		const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2169
2170		/* The byte order is little endian 16 bit, meaning 214365 */
2171		data->hw_addr[0] = hw_addr[1];
2172		data->hw_addr[1] = hw_addr[0];
2173		data->hw_addr[2] = hw_addr[3];
2174		data->hw_addr[3] = hw_addr[2];
2175		data->hw_addr[4] = hw_addr[5];
2176		data->hw_addr[5] = hw_addr[4];
2177	} else {
2178		iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2179	}
2180
2181	if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2182		device_printf(sc->sc_dev, "no valid mac address was found\n");
2183		return EINVAL;
2184	}
2185
2186	return 0;
2187}
2188
2189static struct iwm_nvm_data *
2190iwm_parse_nvm_data(struct iwm_softc *sc,
2191		   const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2192		   const uint16_t *nvm_calib, const uint16_t *mac_override,
2193		   const uint16_t *phy_sku, const uint16_t *regulatory)
2194{
2195	struct iwm_nvm_data *data;
2196	uint32_t sku, radio_cfg;
2197
2198	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2199		data = malloc(sizeof(*data) +
2200		    IWM_NUM_CHANNELS * sizeof(uint16_t),
2201		    M_DEVBUF, M_NOWAIT | M_ZERO);
2202	} else {
2203		data = malloc(sizeof(*data) +
2204		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2205		    M_DEVBUF, M_NOWAIT | M_ZERO);
2206	}
2207	if (!data)
2208		return NULL;
2209
2210	data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2211
2212	radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2213	iwm_set_radio_cfg(sc, data, radio_cfg);
2214
2215	sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2216	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2217	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2218	data->sku_cap_11n_enable = 0;
2219
2220	data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2221
2222	/* If no valid mac address was found - bail out */
2223	if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2224		free(data, M_DEVBUF);
2225		return NULL;
2226	}
2227
2228	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2229		memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2230		    IWM_NUM_CHANNELS * sizeof(uint16_t));
2231	} else {
2232		memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2233		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2234	}
2235
2236	return data;
2237}
2238
2239static void
2240iwm_free_nvm_data(struct iwm_nvm_data *data)
2241{
2242	if (data != NULL)
2243		free(data, M_DEVBUF);
2244}
2245
2246static struct iwm_nvm_data *
2247iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2248{
2249	const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2250
2251	/* Checking for required sections */
2252	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2253		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2254		    !sections[sc->cfg->nvm_hw_section_num].data) {
2255			device_printf(sc->sc_dev,
2256			    "Can't parse empty OTP/NVM sections\n");
2257			return NULL;
2258		}
2259	} else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2260		/* SW and REGULATORY sections are mandatory */
2261		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2262		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2263			device_printf(sc->sc_dev,
2264			    "Can't parse empty OTP/NVM sections\n");
2265			return NULL;
2266		}
2267		/* MAC_OVERRIDE or at least HW section must exist */
2268		if (!sections[sc->cfg->nvm_hw_section_num].data &&
2269		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2270			device_printf(sc->sc_dev,
2271			    "Can't parse mac_address, empty sections\n");
2272			return NULL;
2273		}
2274
2275		/* PHY_SKU section is mandatory in B0 */
2276		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2277			device_printf(sc->sc_dev,
2278			    "Can't parse phy_sku in B0, empty sections\n");
2279			return NULL;
2280		}
2281	} else {
2282		panic("unknown device family %d\n", sc->cfg->device_family);
2283	}
2284
2285	hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2286	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2287	calib = (const uint16_t *)
2288	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2289	regulatory = (const uint16_t *)
2290	    sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2291	mac_override = (const uint16_t *)
2292	    sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2293	phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2294
2295	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2296	    phy_sku, regulatory);
2297}
2298
2299static int
2300iwm_nvm_init(struct iwm_softc *sc)
2301{
2302	struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2303	int i, ret, section;
2304	uint32_t size_read = 0;
2305	uint8_t *nvm_buffer, *temp;
2306	uint16_t len;
2307
2308	memset(nvm_sections, 0, sizeof(nvm_sections));
2309
2310	if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2311		return EINVAL;
2312
2313	/* load NVM values from nic */
2314	/* Read From FW NVM */
2315	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2316
2317	nvm_buffer = malloc(sc->cfg->eeprom_size, M_DEVBUF, M_NOWAIT | M_ZERO);
2318	if (!nvm_buffer)
2319		return ENOMEM;
2320	for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2321		/* we override the constness for initial read */
2322		ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2323					   &len, size_read);
2324		if (ret)
2325			continue;
2326		size_read += len;
2327		temp = malloc(len, M_DEVBUF, M_NOWAIT);
2328		if (!temp) {
2329			ret = ENOMEM;
2330			break;
2331		}
2332		memcpy(temp, nvm_buffer, len);
2333
2334		nvm_sections[section].data = temp;
2335		nvm_sections[section].length = len;
2336	}
2337	if (!size_read)
2338		device_printf(sc->sc_dev, "OTP is blank\n");
2339	free(nvm_buffer, M_DEVBUF);
2340
2341	sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2342	if (!sc->nvm_data)
2343		return EINVAL;
2344	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2345		    "nvm version = %x\n", sc->nvm_data->nvm_version);
2346
2347	for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2348		if (nvm_sections[i].data != NULL)
2349			free(nvm_sections[i].data, M_DEVBUF);
2350	}
2351
2352	return 0;
2353}
2354
2355static int
2356iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2357	const struct iwm_fw_desc *section)
2358{
2359	struct iwm_dma_info *dma = &sc->fw_dma;
2360	uint8_t *v_addr;
2361	bus_addr_t p_addr;
2362	uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2363	int ret = 0;
2364
2365	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2366		    "%s: [%d] uCode section being loaded...\n",
2367		    __func__, section_num);
2368
2369	v_addr = dma->vaddr;
2370	p_addr = dma->paddr;
2371
2372	for (offset = 0; offset < section->len; offset += chunk_sz) {
2373		uint32_t copy_size, dst_addr;
2374		int extended_addr = FALSE;
2375
2376		copy_size = MIN(chunk_sz, section->len - offset);
2377		dst_addr = section->offset + offset;
2378
2379		if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2380		    dst_addr <= IWM_FW_MEM_EXTENDED_END)
2381			extended_addr = TRUE;
2382
2383		if (extended_addr)
2384			iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2385					  IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2386
2387		memcpy(v_addr, (const uint8_t *)section->data + offset,
2388		    copy_size);
2389		bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2390		ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2391						   copy_size);
2392
2393		if (extended_addr)
2394			iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2395					    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2396
2397		if (ret) {
2398			device_printf(sc->sc_dev,
2399			    "%s: Could not load the [%d] uCode section\n",
2400			    __func__, section_num);
2401			break;
2402		}
2403	}
2404
2405	return ret;
2406}
2407
2408/*
2409 * ucode
2410 */
2411static int
2412iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2413			     bus_addr_t phy_addr, uint32_t byte_cnt)
2414{
2415	int ret;
2416
2417	sc->sc_fw_chunk_done = 0;
2418
2419	if (!iwm_nic_lock(sc))
2420		return EBUSY;
2421
2422	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2423	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2424
2425	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2426	    dst_addr);
2427
2428	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2429	    phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2430
2431	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2432	    (iwm_get_dma_hi_addr(phy_addr)
2433	     << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2434
2435	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2436	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2437	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2438	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2439
2440	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2441	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2442	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2443	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2444
2445	iwm_nic_unlock(sc);
2446
2447	/* wait up to 5s for this segment to load */
2448	ret = 0;
2449	while (!sc->sc_fw_chunk_done) {
2450		ret = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz);
2451		if (ret)
2452			break;
2453	}
2454
2455	if (ret != 0) {
2456		device_printf(sc->sc_dev,
2457		    "fw chunk addr 0x%x len %d failed to load\n",
2458		    dst_addr, byte_cnt);
2459		return ETIMEDOUT;
2460	}
2461
2462	return 0;
2463}
2464
2465static int
2466iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2467	const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2468{
2469	int shift_param;
2470	int i, ret = 0, sec_num = 0x1;
2471	uint32_t val, last_read_idx = 0;
2472
2473	if (cpu == 1) {
2474		shift_param = 0;
2475		*first_ucode_section = 0;
2476	} else {
2477		shift_param = 16;
2478		(*first_ucode_section)++;
2479	}
2480
2481	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2482		last_read_idx = i;
2483
2484		/*
2485		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2486		 * CPU1 to CPU2.
2487		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2488		 * CPU2 non paged to CPU2 paging sec.
2489		 */
2490		if (!image->fw_sect[i].data ||
2491		    image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2492		    image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2493			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2494				    "Break since Data not valid or Empty section, sec = %d\n",
2495				    i);
2496			break;
2497		}
2498		ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2499		if (ret)
2500			return ret;
2501
2502		/* Notify the ucode of the loaded section number and status */
2503		if (iwm_nic_lock(sc)) {
2504			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2505			val = val | (sec_num << shift_param);
2506			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2507			sec_num = (sec_num << 1) | 0x1;
2508			iwm_nic_unlock(sc);
2509		}
2510	}
2511
2512	*first_ucode_section = last_read_idx;
2513
2514	iwm_enable_interrupts(sc);
2515
2516	if (iwm_nic_lock(sc)) {
2517		if (cpu == 1)
2518			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2519		else
2520			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2521		iwm_nic_unlock(sc);
2522	}
2523
2524	return 0;
2525}
2526
2527static int
2528iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2529	const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2530{
2531	int shift_param;
2532	int i, ret = 0;
2533	uint32_t last_read_idx = 0;
2534
2535	if (cpu == 1) {
2536		shift_param = 0;
2537		*first_ucode_section = 0;
2538	} else {
2539		shift_param = 16;
2540		(*first_ucode_section)++;
2541	}
2542
2543	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2544		last_read_idx = i;
2545
2546		/*
2547		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2548		 * CPU1 to CPU2.
2549		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2550		 * CPU2 non paged to CPU2 paging sec.
2551		 */
2552		if (!image->fw_sect[i].data ||
2553		    image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2554		    image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2555			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2556				    "Break since Data not valid or Empty section, sec = %d\n",
2557				     i);
2558			break;
2559		}
2560
2561		ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2562		if (ret)
2563			return ret;
2564	}
2565
2566	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2567		iwm_set_bits_prph(sc,
2568				  IWM_CSR_UCODE_LOAD_STATUS_ADDR,
2569				  (IWM_LMPM_CPU_UCODE_LOADING_COMPLETED |
2570				   IWM_LMPM_CPU_HDRS_LOADING_COMPLETED |
2571				   IWM_LMPM_CPU_UCODE_LOADING_STARTED) <<
2572					shift_param);
2573
2574	*first_ucode_section = last_read_idx;
2575
2576	return 0;
2577
2578}
2579
2580static int
2581iwm_pcie_load_given_ucode(struct iwm_softc *sc,
2582	const struct iwm_fw_sects *image)
2583{
2584	int ret = 0;
2585	int first_ucode_section;
2586
2587	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2588		     image->is_dual_cpus ? "Dual" : "Single");
2589
2590	/* load to FW the binary non secured sections of CPU1 */
2591	ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2592	if (ret)
2593		return ret;
2594
2595	if (image->is_dual_cpus) {
2596		/* set CPU2 header address */
2597		if (iwm_nic_lock(sc)) {
2598			iwm_write_prph(sc,
2599				       IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2600				       IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2601			iwm_nic_unlock(sc);
2602		}
2603
2604		/* load to FW the binary sections of CPU2 */
2605		ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2606						 &first_ucode_section);
2607		if (ret)
2608			return ret;
2609	}
2610
2611	iwm_enable_interrupts(sc);
2612
2613	/* release CPU reset */
2614	IWM_WRITE(sc, IWM_CSR_RESET, 0);
2615
2616	return 0;
2617}
2618
2619int
2620iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2621	const struct iwm_fw_sects *image)
2622{
2623	int ret = 0;
2624	int first_ucode_section;
2625
2626	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2627		    image->is_dual_cpus ? "Dual" : "Single");
2628
2629	/* configure the ucode to be ready to get the secured image */
2630	/* release CPU reset */
2631	if (iwm_nic_lock(sc)) {
2632		iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
2633		    IWM_RELEASE_CPU_RESET_BIT);
2634		iwm_nic_unlock(sc);
2635	}
2636
2637	/* load to FW the binary Secured sections of CPU1 */
2638	ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2639	    &first_ucode_section);
2640	if (ret)
2641		return ret;
2642
2643	/* load to FW the binary sections of CPU2 */
2644	return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2645	    &first_ucode_section);
2646}
2647
2648/* XXX Get rid of this definition */
2649static inline void
2650iwm_enable_fw_load_int(struct iwm_softc *sc)
2651{
2652	IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2653	sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2654	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2655}
2656
2657/* XXX Add proper rfkill support code */
2658static int
2659iwm_start_fw(struct iwm_softc *sc,
2660	const struct iwm_fw_sects *fw)
2661{
2662	int ret;
2663
2664	/* This may fail if AMT took ownership of the device */
2665	if (iwm_prepare_card_hw(sc)) {
2666		device_printf(sc->sc_dev,
2667		    "%s: Exit HW not ready\n", __func__);
2668		ret = EIO;
2669		goto out;
2670	}
2671
2672	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2673
2674	iwm_disable_interrupts(sc);
2675
2676	/* make sure rfkill handshake bits are cleared */
2677	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2678	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2679	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2680
2681	/* clear (again), then enable host interrupts */
2682	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2683
2684	ret = iwm_nic_init(sc);
2685	if (ret) {
2686		device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2687		goto out;
2688	}
2689
2690	/*
2691	 * Now, we load the firmware and don't want to be interrupted, even
2692	 * by the RF-Kill interrupt (hence mask all the interrupt besides the
2693	 * FH_TX interrupt which is needed to load the firmware). If the
2694	 * RF-Kill switch is toggled, we will find out after having loaded
2695	 * the firmware and return the proper value to the caller.
2696	 */
2697	iwm_enable_fw_load_int(sc);
2698
2699	/* really make sure rfkill handshake bits are cleared */
2700	/* maybe we should write a few times more?  just to make sure */
2701	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2702	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2703
2704	/* Load the given image to the HW */
2705	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2706		ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2707	else
2708		ret = iwm_pcie_load_given_ucode(sc, fw);
2709
2710	/* XXX re-check RF-Kill state */
2711
2712out:
2713	return ret;
2714}
2715
2716static int
2717iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2718{
2719	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2720		.valid = htole32(valid_tx_ant),
2721	};
2722
2723	return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2724	    IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2725}
2726
2727/* iwlwifi: mvm/fw.c */
2728static int
2729iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2730{
2731	struct iwm_phy_cfg_cmd phy_cfg_cmd;
2732	enum iwm_ucode_type ucode_type = sc->cur_ucode;
2733
2734	/* Set parameters */
2735	phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc));
2736	phy_cfg_cmd.calib_control.event_trigger =
2737	    sc->sc_default_calib[ucode_type].event_trigger;
2738	phy_cfg_cmd.calib_control.flow_trigger =
2739	    sc->sc_default_calib[ucode_type].flow_trigger;
2740
2741	IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2742	    "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2743	return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2744	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2745}
2746
2747static int
2748iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2749{
2750	struct iwm_mvm_alive_data *alive_data = data;
2751	struct iwm_mvm_alive_resp_ver1 *palive1;
2752	struct iwm_mvm_alive_resp_ver2 *palive2;
2753	struct iwm_mvm_alive_resp *palive;
2754
2755	if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
2756		palive1 = (void *)pkt->data;
2757
2758		sc->support_umac_log = FALSE;
2759                sc->error_event_table =
2760                        le32toh(palive1->error_event_table_ptr);
2761                sc->log_event_table =
2762                        le32toh(palive1->log_event_table_ptr);
2763                alive_data->scd_base_addr = le32toh(palive1->scd_base_ptr);
2764
2765                alive_data->valid = le16toh(palive1->status) ==
2766                                    IWM_ALIVE_STATUS_OK;
2767                IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2768			    "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2769			     le16toh(palive1->status), palive1->ver_type,
2770                             palive1->ver_subtype, palive1->flags);
2771	} else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
2772		palive2 = (void *)pkt->data;
2773		sc->error_event_table =
2774			le32toh(palive2->error_event_table_ptr);
2775		sc->log_event_table =
2776			le32toh(palive2->log_event_table_ptr);
2777		alive_data->scd_base_addr = le32toh(palive2->scd_base_ptr);
2778		sc->umac_error_event_table =
2779                        le32toh(palive2->error_info_addr);
2780
2781		alive_data->valid = le16toh(palive2->status) ==
2782				    IWM_ALIVE_STATUS_OK;
2783		if (sc->umac_error_event_table)
2784			sc->support_umac_log = TRUE;
2785
2786		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2787			    "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2788			    le16toh(palive2->status), palive2->ver_type,
2789			    palive2->ver_subtype, palive2->flags);
2790
2791		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2792			    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2793			    palive2->umac_major, palive2->umac_minor);
2794	} else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2795		palive = (void *)pkt->data;
2796
2797		sc->error_event_table =
2798			le32toh(palive->error_event_table_ptr);
2799		sc->log_event_table =
2800			le32toh(palive->log_event_table_ptr);
2801		alive_data->scd_base_addr = le32toh(palive->scd_base_ptr);
2802		sc->umac_error_event_table =
2803			le32toh(palive->error_info_addr);
2804
2805		alive_data->valid = le16toh(palive->status) ==
2806				    IWM_ALIVE_STATUS_OK;
2807		if (sc->umac_error_event_table)
2808			sc->support_umac_log = TRUE;
2809
2810		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2811			    "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2812			    le16toh(palive->status), palive->ver_type,
2813			    palive->ver_subtype, palive->flags);
2814
2815		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2816			    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2817			    le32toh(palive->umac_major),
2818			    le32toh(palive->umac_minor));
2819	}
2820
2821	return TRUE;
2822}
2823
2824static int
2825iwm_wait_phy_db_entry(struct iwm_softc *sc,
2826	struct iwm_rx_packet *pkt, void *data)
2827{
2828	struct iwm_phy_db *phy_db = data;
2829
2830	if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2831		if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2832			device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2833			    __func__, pkt->hdr.code);
2834		}
2835		return TRUE;
2836	}
2837
2838	if (iwm_phy_db_set_section(phy_db, pkt)) {
2839		device_printf(sc->sc_dev,
2840		    "%s: iwm_phy_db_set_section failed\n", __func__);
2841	}
2842
2843	return FALSE;
2844}
2845
2846static int
2847iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2848	enum iwm_ucode_type ucode_type)
2849{
2850	struct iwm_notification_wait alive_wait;
2851	struct iwm_mvm_alive_data alive_data;
2852	const struct iwm_fw_sects *fw;
2853	enum iwm_ucode_type old_type = sc->cur_ucode;
2854	int error;
2855	static const uint16_t alive_cmd[] = { IWM_MVM_ALIVE };
2856
2857	if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2858		device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2859			error);
2860		return error;
2861	}
2862	fw = &sc->sc_fw.fw_sects[ucode_type];
2863	sc->cur_ucode = ucode_type;
2864	sc->ucode_loaded = FALSE;
2865
2866	memset(&alive_data, 0, sizeof(alive_data));
2867	iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2868				   alive_cmd, nitems(alive_cmd),
2869				   iwm_alive_fn, &alive_data);
2870
2871	error = iwm_start_fw(sc, fw);
2872	if (error) {
2873		device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2874		sc->cur_ucode = old_type;
2875		iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2876		return error;
2877	}
2878
2879	/*
2880	 * Some things may run in the background now, but we
2881	 * just wait for the ALIVE notification here.
2882	 */
2883	IWM_UNLOCK(sc);
2884	error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2885				      IWM_MVM_UCODE_ALIVE_TIMEOUT);
2886	IWM_LOCK(sc);
2887	if (error) {
2888		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2889			uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a;
2890			if (iwm_nic_lock(sc)) {
2891				a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS);
2892				b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS);
2893				iwm_nic_unlock(sc);
2894			}
2895			device_printf(sc->sc_dev,
2896			    "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2897			    a, b);
2898		}
2899		sc->cur_ucode = old_type;
2900		return error;
2901	}
2902
2903	if (!alive_data.valid) {
2904		device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2905		    __func__);
2906		sc->cur_ucode = old_type;
2907		return EIO;
2908	}
2909
2910	iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2911
2912	/*
2913	 * configure and operate fw paging mechanism.
2914	 * driver configures the paging flow only once, CPU2 paging image
2915	 * included in the IWM_UCODE_INIT image.
2916	 */
2917	if (fw->paging_mem_size) {
2918		error = iwm_save_fw_paging(sc, fw);
2919		if (error) {
2920			device_printf(sc->sc_dev,
2921			    "%s: failed to save the FW paging image\n",
2922			    __func__);
2923			return error;
2924		}
2925
2926		error = iwm_send_paging_cmd(sc, fw);
2927		if (error) {
2928			device_printf(sc->sc_dev,
2929			    "%s: failed to send the paging cmd\n", __func__);
2930			iwm_free_fw_paging(sc);
2931			return error;
2932		}
2933	}
2934
2935	if (!error)
2936		sc->ucode_loaded = TRUE;
2937	return error;
2938}
2939
2940/*
2941 * mvm misc bits
2942 */
2943
2944/*
2945 * follows iwlwifi/fw.c
2946 */
2947static int
2948iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2949{
2950	struct iwm_notification_wait calib_wait;
2951	static const uint16_t init_complete[] = {
2952		IWM_INIT_COMPLETE_NOTIF,
2953		IWM_CALIB_RES_NOTIF_PHY_DB
2954	};
2955	int ret;
2956
2957	/* do not operate with rfkill switch turned on */
2958	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2959		device_printf(sc->sc_dev,
2960		    "radio is disabled by hardware switch\n");
2961		return EPERM;
2962	}
2963
2964	iwm_init_notification_wait(sc->sc_notif_wait,
2965				   &calib_wait,
2966				   init_complete,
2967				   nitems(init_complete),
2968				   iwm_wait_phy_db_entry,
2969				   sc->sc_phy_db);
2970
2971	/* Will also start the device */
2972	ret = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
2973	if (ret) {
2974		device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
2975		    ret);
2976		goto error;
2977	}
2978
2979	if (justnvm) {
2980		/* Read nvm */
2981		ret = iwm_nvm_init(sc);
2982		if (ret) {
2983			device_printf(sc->sc_dev, "failed to read nvm\n");
2984			goto error;
2985		}
2986		IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
2987		goto error;
2988	}
2989
2990	ret = iwm_send_bt_init_conf(sc);
2991	if (ret) {
2992		device_printf(sc->sc_dev,
2993		    "failed to send bt coex configuration: %d\n", ret);
2994		goto error;
2995	}
2996
2997	/* Init Smart FIFO. */
2998	ret = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF);
2999	if (ret)
3000		goto error;
3001
3002	/* Send TX valid antennas before triggering calibrations */
3003	ret = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
3004	if (ret) {
3005		device_printf(sc->sc_dev,
3006		    "failed to send antennas before calibration: %d\n", ret);
3007		goto error;
3008	}
3009
3010	/*
3011	 * Send phy configurations command to init uCode
3012	 * to start the 16.0 uCode init image internal calibrations.
3013	 */
3014	ret = iwm_send_phy_cfg_cmd(sc);
3015	if (ret) {
3016		device_printf(sc->sc_dev,
3017		    "%s: Failed to run INIT calibrations: %d\n",
3018		    __func__, ret);
3019		goto error;
3020	}
3021
3022	/*
3023	 * Nothing to do but wait for the init complete notification
3024	 * from the firmware.
3025	 */
3026	IWM_UNLOCK(sc);
3027	ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
3028	    IWM_MVM_UCODE_CALIB_TIMEOUT);
3029	IWM_LOCK(sc);
3030
3031
3032	goto out;
3033
3034error:
3035	iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
3036out:
3037	return ret;
3038}
3039
3040/*
3041 * receive side
3042 */
3043
3044/* (re)stock rx ring, called at init-time and at runtime */
3045static int
3046iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3047{
3048	struct iwm_rx_ring *ring = &sc->rxq;
3049	struct iwm_rx_data *data = &ring->data[idx];
3050	struct mbuf *m;
3051	bus_dmamap_t dmamap;
3052	bus_dma_segment_t seg;
3053	int nsegs, error;
3054
3055	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3056	if (m == NULL)
3057		return ENOBUFS;
3058
3059	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3060	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3061	    &seg, &nsegs, BUS_DMA_NOWAIT);
3062	if (error != 0) {
3063		device_printf(sc->sc_dev,
3064		    "%s: can't map mbuf, error %d\n", __func__, error);
3065		m_freem(m);
3066		return error;
3067	}
3068
3069	if (data->m != NULL)
3070		bus_dmamap_unload(ring->data_dmat, data->map);
3071
3072	/* Swap ring->spare_map with data->map */
3073	dmamap = data->map;
3074	data->map = ring->spare_map;
3075	ring->spare_map = dmamap;
3076
3077	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3078	data->m = m;
3079
3080	/* Update RX descriptor. */
3081	KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
3082	ring->desc[idx] = htole32(seg.ds_addr >> 8);
3083	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3084	    BUS_DMASYNC_PREWRITE);
3085
3086	return 0;
3087}
3088
3089/* iwlwifi: mvm/rx.c */
3090/*
3091 * iwm_mvm_get_signal_strength - use new rx PHY INFO API
3092 * values are reported by the fw as positive values - need to negate
3093 * to obtain their dBM.  Account for missing antennas by replacing 0
3094 * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3095 */
3096static int
3097iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3098{
3099	int energy_a, energy_b, energy_c, max_energy;
3100	uint32_t val;
3101
3102	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3103	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3104	    IWM_RX_INFO_ENERGY_ANT_A_POS;
3105	energy_a = energy_a ? -energy_a : -256;
3106	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3107	    IWM_RX_INFO_ENERGY_ANT_B_POS;
3108	energy_b = energy_b ? -energy_b : -256;
3109	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3110	    IWM_RX_INFO_ENERGY_ANT_C_POS;
3111	energy_c = energy_c ? -energy_c : -256;
3112	max_energy = MAX(energy_a, energy_b);
3113	max_energy = MAX(max_energy, energy_c);
3114
3115	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3116	    "energy In A %d B %d C %d , and max %d\n",
3117	    energy_a, energy_b, energy_c, max_energy);
3118
3119	return max_energy;
3120}
3121
3122static void
3123iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
3124	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3125{
3126	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3127
3128	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3129	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3130
3131	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3132}
3133
3134/*
3135 * Retrieve the average noise (in dBm) among receivers.
3136 */
3137static int
3138iwm_get_noise(struct iwm_softc *sc,
3139    const struct iwm_mvm_statistics_rx_non_phy *stats)
3140{
3141	int i, total, nbant, noise;
3142
3143	total = nbant = noise = 0;
3144	for (i = 0; i < 3; i++) {
3145		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3146		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3147		    __func__,
3148		    i,
3149		    noise);
3150
3151		if (noise) {
3152			total += noise;
3153			nbant++;
3154		}
3155	}
3156
3157	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3158	    __func__, nbant, total);
3159#if 0
3160	/* There should be at least one antenna but check anyway. */
3161	return (nbant == 0) ? -127 : (total / nbant) - 107;
3162#else
3163	/* For now, just hard-code it to -96 to be safe */
3164	return (-96);
3165#endif
3166}
3167
3168/*
3169 * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3170 *
3171 * Handles the actual data of the Rx packet from the fw
3172 */
3173static void
3174iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m)
3175{
3176	struct ieee80211com *ic = &sc->sc_ic;
3177	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3178	struct ieee80211_frame *wh;
3179	struct ieee80211_node *ni;
3180	struct ieee80211_rx_stats rxs;
3181	struct iwm_rx_phy_info *phy_info;
3182	struct iwm_rx_mpdu_res_start *rx_res;
3183	struct iwm_rx_packet *pkt = mtod(m, struct iwm_rx_packet *);
3184	uint32_t len;
3185	uint32_t rx_pkt_status;
3186	int rssi;
3187
3188	phy_info = &sc->sc_last_phy_info;
3189	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3190	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3191	len = le16toh(rx_res->byte_count);
3192	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3193
3194	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3195		device_printf(sc->sc_dev,
3196		    "dsp size out of range [0,20]: %d\n",
3197		    phy_info->cfg_phy_cnt);
3198		goto fail;
3199	}
3200
3201	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3202	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3203		IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3204		    "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3205		goto fail;
3206	}
3207
3208	rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3209
3210	/* Map it to relative value */
3211	rssi = rssi - sc->sc_noise;
3212
3213	/* replenish ring for the buffer we're going to feed to the sharks */
3214	if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3215		device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3216		    __func__);
3217		goto fail;
3218	}
3219
3220	m->m_data = pkt->data + sizeof(*rx_res);
3221	m->m_pkthdr.len = m->m_len = len;
3222
3223	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3224	    "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3225
3226	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3227
3228	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3229	    "%s: phy_info: channel=%d, flags=0x%08x\n",
3230	    __func__,
3231	    le16toh(phy_info->channel),
3232	    le16toh(phy_info->phy_flags));
3233
3234	/*
3235	 * Populate an RX state struct with the provided information.
3236	 */
3237	bzero(&rxs, sizeof(rxs));
3238	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3239	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3240	rxs.c_ieee = le16toh(phy_info->channel);
3241	if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3242		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3243	} else {
3244		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3245	}
3246
3247	/* rssi is in 1/2db units */
3248	rxs.rssi = rssi * 2;
3249	rxs.nf = sc->sc_noise;
3250
3251	if (ieee80211_radiotap_active_vap(vap)) {
3252		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3253
3254		tap->wr_flags = 0;
3255		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3256			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3257		tap->wr_chan_freq = htole16(rxs.c_freq);
3258		/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3259		tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3260		tap->wr_dbm_antsignal = (int8_t)rssi;
3261		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3262		tap->wr_tsft = phy_info->system_timestamp;
3263		switch (phy_info->rate) {
3264		/* CCK rates. */
3265		case  10: tap->wr_rate =   2; break;
3266		case  20: tap->wr_rate =   4; break;
3267		case  55: tap->wr_rate =  11; break;
3268		case 110: tap->wr_rate =  22; break;
3269		/* OFDM rates. */
3270		case 0xd: tap->wr_rate =  12; break;
3271		case 0xf: tap->wr_rate =  18; break;
3272		case 0x5: tap->wr_rate =  24; break;
3273		case 0x7: tap->wr_rate =  36; break;
3274		case 0x9: tap->wr_rate =  48; break;
3275		case 0xb: tap->wr_rate =  72; break;
3276		case 0x1: tap->wr_rate =  96; break;
3277		case 0x3: tap->wr_rate = 108; break;
3278		/* Unknown rate: should not happen. */
3279		default:  tap->wr_rate =   0;
3280		}
3281	}
3282
3283	IWM_UNLOCK(sc);
3284	if (ni != NULL) {
3285		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3286		ieee80211_input_mimo(ni, m, &rxs);
3287		ieee80211_free_node(ni);
3288	} else {
3289		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3290		ieee80211_input_mimo_all(ic, m, &rxs);
3291	}
3292	IWM_LOCK(sc);
3293
3294	return;
3295
3296fail:	counter_u64_add(ic->ic_ierrors, 1);
3297}
3298
3299static int
3300iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3301	struct iwm_node *in)
3302{
3303	struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3304	struct ieee80211_node *ni = &in->in_ni;
3305	struct ieee80211vap *vap = ni->ni_vap;
3306	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3307	int failack = tx_resp->failure_frame;
3308
3309	KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3310
3311	/* Update rate control statistics. */
3312	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3313	    __func__,
3314	    (int) le16toh(tx_resp->status.status),
3315	    (int) le16toh(tx_resp->status.sequence),
3316	    tx_resp->frame_count,
3317	    tx_resp->bt_kill_count,
3318	    tx_resp->failure_rts,
3319	    tx_resp->failure_frame,
3320	    le32toh(tx_resp->initial_rate),
3321	    (int) le16toh(tx_resp->wireless_media_time));
3322
3323	if (status != IWM_TX_STATUS_SUCCESS &&
3324	    status != IWM_TX_STATUS_DIRECT_DONE) {
3325		ieee80211_ratectl_tx_complete(vap, ni,
3326		    IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
3327		return (1);
3328	} else {
3329		ieee80211_ratectl_tx_complete(vap, ni,
3330		    IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
3331		return (0);
3332	}
3333}
3334
3335static void
3336iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
3337	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3338{
3339	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3340	int idx = cmd_hdr->idx;
3341	int qid = cmd_hdr->qid;
3342	struct iwm_tx_ring *ring = &sc->txq[qid];
3343	struct iwm_tx_data *txd = &ring->data[idx];
3344	struct iwm_node *in = txd->in;
3345	struct mbuf *m = txd->m;
3346	int status;
3347
3348	KASSERT(txd->done == 0, ("txd not done"));
3349	KASSERT(txd->in != NULL, ("txd without node"));
3350	KASSERT(txd->m != NULL, ("txd without mbuf"));
3351
3352	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3353
3354	sc->sc_tx_timer = 0;
3355
3356	status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3357
3358	/* Unmap and free mbuf. */
3359	bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3360	bus_dmamap_unload(ring->data_dmat, txd->map);
3361
3362	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3363	    "free txd %p, in %p\n", txd, txd->in);
3364	txd->done = 1;
3365	txd->m = NULL;
3366	txd->in = NULL;
3367
3368	ieee80211_tx_complete(&in->in_ni, m, status);
3369
3370	if (--ring->queued < IWM_TX_RING_LOMARK) {
3371		sc->qfullmsk &= ~(1 << ring->qid);
3372		if (sc->qfullmsk == 0) {
3373			iwm_start(sc);
3374		}
3375	}
3376}
3377
3378/*
3379 * transmit side
3380 */
3381
3382/*
3383 * Process a "command done" firmware notification.  This is where we wakeup
3384 * processes waiting for a synchronous command completion.
3385 * from if_iwn
3386 */
3387static void
3388iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3389{
3390	struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3391	struct iwm_tx_data *data;
3392
3393	if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3394		return;	/* Not a command ack. */
3395	}
3396
3397	/* XXX wide commands? */
3398	IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3399	    "cmd notification type 0x%x qid %d idx %d\n",
3400	    pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3401
3402	data = &ring->data[pkt->hdr.idx];
3403
3404	/* If the command was mapped in an mbuf, free it. */
3405	if (data->m != NULL) {
3406		bus_dmamap_sync(ring->data_dmat, data->map,
3407		    BUS_DMASYNC_POSTWRITE);
3408		bus_dmamap_unload(ring->data_dmat, data->map);
3409		m_freem(data->m);
3410		data->m = NULL;
3411	}
3412	wakeup(&ring->desc[pkt->hdr.idx]);
3413
3414	if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3415		device_printf(sc->sc_dev,
3416		    "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3417		    __func__, pkt->hdr.idx, ring->queued, ring->cur);
3418		/* XXX call iwm_force_nmi() */
3419	}
3420
3421	KASSERT(ring->queued > 0, ("ring->queued is empty?"));
3422	ring->queued--;
3423	if (ring->queued == 0)
3424		iwm_pcie_clear_cmd_in_flight(sc);
3425}
3426
3427#if 0
3428/*
3429 * necessary only for block ack mode
3430 */
3431void
3432iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3433	uint16_t len)
3434{
3435	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3436	uint16_t w_val;
3437
3438	scd_bc_tbl = sc->sched_dma.vaddr;
3439
3440	len += 8; /* magic numbers came naturally from paris */
3441	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3442		len = roundup(len, 4) / 4;
3443
3444	w_val = htole16(sta_id << 12 | len);
3445
3446	/* Update TX scheduler. */
3447	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3448	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3449	    BUS_DMASYNC_PREWRITE);
3450
3451	/* I really wonder what this is ?!? */
3452	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3453		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3454		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3455		    BUS_DMASYNC_PREWRITE);
3456	}
3457}
3458#endif
3459
3460/*
3461 * Take an 802.11 (non-n) rate, find the relevant rate
3462 * table entry.  return the index into in_ridx[].
3463 *
3464 * The caller then uses that index back into in_ridx
3465 * to figure out the rate index programmed /into/
3466 * the firmware for this given node.
3467 */
3468static int
3469iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3470    uint8_t rate)
3471{
3472	int i;
3473	uint8_t r;
3474
3475	for (i = 0; i < nitems(in->in_ridx); i++) {
3476		r = iwm_rates[in->in_ridx[i]].rate;
3477		if (rate == r)
3478			return (i);
3479	}
3480
3481	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3482	    "%s: couldn't find an entry for rate=%d\n",
3483	    __func__,
3484	    rate);
3485
3486	/* XXX Return the first */
3487	/* XXX TODO: have it return the /lowest/ */
3488	return (0);
3489}
3490
3491static int
3492iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
3493{
3494	int i;
3495
3496	for (i = 0; i < nitems(iwm_rates); i++) {
3497		if (iwm_rates[i].rate == rate)
3498			return (i);
3499	}
3500	/* XXX error? */
3501	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3502	    "%s: couldn't find an entry for rate=%d\n",
3503	    __func__,
3504	    rate);
3505	return (0);
3506}
3507
3508/*
3509 * Fill in the rate related information for a transmit command.
3510 */
3511static const struct iwm_rate *
3512iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3513	struct mbuf *m, struct iwm_tx_cmd *tx)
3514{
3515	struct ieee80211_node *ni = &in->in_ni;
3516	struct ieee80211_frame *wh;
3517	const struct ieee80211_txparam *tp = ni->ni_txparms;
3518	const struct iwm_rate *rinfo;
3519	int type;
3520	int ridx, rate_flags;
3521
3522	wh = mtod(m, struct ieee80211_frame *);
3523	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3524
3525	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3526	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3527
3528	if (type == IEEE80211_FC0_TYPE_MGT) {
3529		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3530		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3531		    "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3532	} else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3533		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3534		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3535		    "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3536	} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3537		ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3538		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3539		    "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3540	} else if (m->m_flags & M_EAPOL) {
3541		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3542		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3543		    "%s: EAPOL\n", __func__);
3544	} else if (type == IEEE80211_FC0_TYPE_DATA) {
3545		int i;
3546
3547		/* for data frames, use RS table */
3548		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3549		/* XXX pass pktlen */
3550		(void) ieee80211_ratectl_rate(ni, NULL, 0);
3551		i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3552		ridx = in->in_ridx[i];
3553
3554		/* This is the index into the programmed table */
3555		tx->initial_rate_index = i;
3556		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3557
3558		IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3559		    "%s: start with i=%d, txrate %d\n",
3560		    __func__, i, iwm_rates[ridx].rate);
3561	} else {
3562		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3563		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DEFAULT (%d)\n",
3564		    __func__, tp->mgmtrate);
3565	}
3566
3567	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3568	    "%s: frame type=%d txrate %d\n",
3569	        __func__, type, iwm_rates[ridx].rate);
3570
3571	rinfo = &iwm_rates[ridx];
3572
3573	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3574	    __func__, ridx,
3575	    rinfo->rate,
3576	    !! (IWM_RIDX_IS_CCK(ridx))
3577	    );
3578
3579	/* XXX TODO: hard-coded TX antenna? */
3580	rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3581	if (IWM_RIDX_IS_CCK(ridx))
3582		rate_flags |= IWM_RATE_MCS_CCK_MSK;
3583	tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3584
3585	return rinfo;
3586}
3587
3588#define TB0_SIZE 16
3589static int
3590iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3591{
3592	struct ieee80211com *ic = &sc->sc_ic;
3593	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3594	struct iwm_node *in = IWM_NODE(ni);
3595	struct iwm_tx_ring *ring;
3596	struct iwm_tx_data *data;
3597	struct iwm_tfd *desc;
3598	struct iwm_device_cmd *cmd;
3599	struct iwm_tx_cmd *tx;
3600	struct ieee80211_frame *wh;
3601	struct ieee80211_key *k = NULL;
3602	struct mbuf *m1;
3603	const struct iwm_rate *rinfo;
3604	uint32_t flags;
3605	u_int hdrlen;
3606	bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3607	int nsegs;
3608	uint8_t tid, type;
3609	int i, totlen, error, pad;
3610
3611	wh = mtod(m, struct ieee80211_frame *);
3612	hdrlen = ieee80211_anyhdrsize(wh);
3613	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3614	tid = 0;
3615	ring = &sc->txq[ac];
3616	desc = &ring->desc[ring->cur];
3617	memset(desc, 0, sizeof(*desc));
3618	data = &ring->data[ring->cur];
3619
3620	/* Fill out iwm_tx_cmd to send to the firmware */
3621	cmd = &ring->cmd[ring->cur];
3622	cmd->hdr.code = IWM_TX_CMD;
3623	cmd->hdr.flags = 0;
3624	cmd->hdr.qid = ring->qid;
3625	cmd->hdr.idx = ring->cur;
3626
3627	tx = (void *)cmd->data;
3628	memset(tx, 0, sizeof(*tx));
3629
3630	rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3631
3632	/* Encrypt the frame if need be. */
3633	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3634		/* Retrieve key for TX && do software encryption. */
3635		k = ieee80211_crypto_encap(ni, m);
3636		if (k == NULL) {
3637			m_freem(m);
3638			return (ENOBUFS);
3639		}
3640		/* 802.11 header may have moved. */
3641		wh = mtod(m, struct ieee80211_frame *);
3642	}
3643
3644	if (ieee80211_radiotap_active_vap(vap)) {
3645		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3646
3647		tap->wt_flags = 0;
3648		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3649		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3650		tap->wt_rate = rinfo->rate;
3651		if (k != NULL)
3652			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3653		ieee80211_radiotap_tx(vap, m);
3654	}
3655
3656
3657	totlen = m->m_pkthdr.len;
3658
3659	flags = 0;
3660	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3661		flags |= IWM_TX_CMD_FLG_ACK;
3662	}
3663
3664	if (type == IEEE80211_FC0_TYPE_DATA
3665	    && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3666	    && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3667		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3668	}
3669
3670	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3671	    type != IEEE80211_FC0_TYPE_DATA)
3672		tx->sta_id = sc->sc_aux_sta.sta_id;
3673	else
3674		tx->sta_id = IWM_STATION_ID;
3675
3676	if (type == IEEE80211_FC0_TYPE_MGT) {
3677		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3678
3679		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3680		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3681			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3682		} else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3683			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3684		} else {
3685			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3686		}
3687	} else {
3688		tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3689	}
3690
3691	if (hdrlen & 3) {
3692		/* First segment length must be a multiple of 4. */
3693		flags |= IWM_TX_CMD_FLG_MH_PAD;
3694		pad = 4 - (hdrlen & 3);
3695	} else
3696		pad = 0;
3697
3698	tx->driver_txop = 0;
3699	tx->next_frame_len = 0;
3700
3701	tx->len = htole16(totlen);
3702	tx->tid_tspec = tid;
3703	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3704
3705	/* Set physical address of "scratch area". */
3706	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3707	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3708
3709	/* Copy 802.11 header in TX command. */
3710	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3711
3712	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3713
3714	tx->sec_ctl = 0;
3715	tx->tx_flags |= htole32(flags);
3716
3717	/* Trim 802.11 header. */
3718	m_adj(m, hdrlen);
3719	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3720	    segs, &nsegs, BUS_DMA_NOWAIT);
3721	if (error != 0) {
3722		if (error != EFBIG) {
3723			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3724			    error);
3725			m_freem(m);
3726			return error;
3727		}
3728		/* Too many DMA segments, linearize mbuf. */
3729		m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3730		if (m1 == NULL) {
3731			device_printf(sc->sc_dev,
3732			    "%s: could not defrag mbuf\n", __func__);
3733			m_freem(m);
3734			return (ENOBUFS);
3735		}
3736		m = m1;
3737
3738		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3739		    segs, &nsegs, BUS_DMA_NOWAIT);
3740		if (error != 0) {
3741			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3742			    error);
3743			m_freem(m);
3744			return error;
3745		}
3746	}
3747	data->m = m;
3748	data->in = in;
3749	data->done = 0;
3750
3751	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3752	    "sending txd %p, in %p\n", data, data->in);
3753	KASSERT(data->in != NULL, ("node is NULL"));
3754
3755	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3756	    "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3757	    ring->qid, ring->cur, totlen, nsegs,
3758	    le32toh(tx->tx_flags),
3759	    le32toh(tx->rate_n_flags),
3760	    tx->initial_rate_index
3761	    );
3762
3763	/* Fill TX descriptor. */
3764	desc->num_tbs = 2 + nsegs;
3765
3766	desc->tbs[0].lo = htole32(data->cmd_paddr);
3767	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3768	    (TB0_SIZE << 4);
3769	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3770	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3771	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3772	      + hdrlen + pad - TB0_SIZE) << 4);
3773
3774	/* Other DMA segments are for data payload. */
3775	for (i = 0; i < nsegs; i++) {
3776		seg = &segs[i];
3777		desc->tbs[i+2].lo = htole32(seg->ds_addr);
3778		desc->tbs[i+2].hi_n_len = \
3779		    htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3780		    | ((seg->ds_len) << 4);
3781	}
3782
3783	bus_dmamap_sync(ring->data_dmat, data->map,
3784	    BUS_DMASYNC_PREWRITE);
3785	bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3786	    BUS_DMASYNC_PREWRITE);
3787	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3788	    BUS_DMASYNC_PREWRITE);
3789
3790#if 0
3791	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3792#endif
3793
3794	/* Kick TX ring. */
3795	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3796	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3797
3798	/* Mark TX ring as full if we reach a certain threshold. */
3799	if (++ring->queued > IWM_TX_RING_HIMARK) {
3800		sc->qfullmsk |= 1 << ring->qid;
3801	}
3802
3803	return 0;
3804}
3805
3806static int
3807iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3808    const struct ieee80211_bpf_params *params)
3809{
3810	struct ieee80211com *ic = ni->ni_ic;
3811	struct iwm_softc *sc = ic->ic_softc;
3812	int error = 0;
3813
3814	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3815	    "->%s begin\n", __func__);
3816
3817	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3818		m_freem(m);
3819		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3820		    "<-%s not RUNNING\n", __func__);
3821		return (ENETDOWN);
3822        }
3823
3824	IWM_LOCK(sc);
3825	/* XXX fix this */
3826        if (params == NULL) {
3827		error = iwm_tx(sc, m, ni, 0);
3828	} else {
3829		error = iwm_tx(sc, m, ni, 0);
3830	}
3831	sc->sc_tx_timer = 5;
3832	IWM_UNLOCK(sc);
3833
3834        return (error);
3835}
3836
3837/*
3838 * mvm/tx.c
3839 */
3840
3841/*
3842 * Note that there are transports that buffer frames before they reach
3843 * the firmware. This means that after flush_tx_path is called, the
3844 * queue might not be empty. The race-free way to handle this is to:
3845 * 1) set the station as draining
3846 * 2) flush the Tx path
3847 * 3) wait for the transport queues to be empty
3848 */
3849int
3850iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3851{
3852	int ret;
3853	struct iwm_tx_path_flush_cmd flush_cmd = {
3854		.queues_ctl = htole32(tfd_msk),
3855		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3856	};
3857
3858	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3859	    sizeof(flush_cmd), &flush_cmd);
3860	if (ret)
3861                device_printf(sc->sc_dev,
3862		    "Flushing tx queue failed: %d\n", ret);
3863	return ret;
3864}
3865
3866/*
3867 * BEGIN mvm/sta.c
3868 */
3869
3870static int
3871iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
3872	struct iwm_mvm_add_sta_cmd *cmd, int *status)
3873{
3874	return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(*cmd),
3875	    cmd, status);
3876}
3877
3878/* send station add/update command to firmware */
3879static int
3880iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
3881{
3882	struct iwm_vap *ivp = IWM_VAP(in->in_ni.ni_vap);
3883	struct iwm_mvm_add_sta_cmd add_sta_cmd;
3884	int ret;
3885	uint32_t status;
3886
3887	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
3888
3889	add_sta_cmd.sta_id = IWM_STATION_ID;
3890	add_sta_cmd.mac_id_n_color
3891	    = htole32(IWM_FW_CMD_ID_AND_COLOR(ivp->id, ivp->color));
3892	if (!update) {
3893		int ac;
3894		for (ac = 0; ac < WME_NUM_AC; ac++) {
3895			add_sta_cmd.tfd_queue_msk |=
3896			    htole32(1 << iwm_mvm_ac_to_tx_fifo[ac]);
3897		}
3898		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
3899	}
3900	add_sta_cmd.add_modify = update ? 1 : 0;
3901	add_sta_cmd.station_flags_msk
3902	    |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
3903	add_sta_cmd.tid_disable_tx = htole16(0xffff);
3904	if (update)
3905		add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
3906
3907	status = IWM_ADD_STA_SUCCESS;
3908	ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
3909	if (ret)
3910		return ret;
3911
3912	switch (status) {
3913	case IWM_ADD_STA_SUCCESS:
3914		break;
3915	default:
3916		ret = EIO;
3917		device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
3918		break;
3919	}
3920
3921	return ret;
3922}
3923
3924static int
3925iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
3926{
3927	return iwm_mvm_sta_send_to_fw(sc, in, 0);
3928}
3929
3930static int
3931iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
3932{
3933	return iwm_mvm_sta_send_to_fw(sc, in, 1);
3934}
3935
3936static int
3937iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
3938	const uint8_t *addr, uint16_t mac_id, uint16_t color)
3939{
3940	struct iwm_mvm_add_sta_cmd cmd;
3941	int ret;
3942	uint32_t status;
3943
3944	memset(&cmd, 0, sizeof(cmd));
3945	cmd.sta_id = sta->sta_id;
3946	cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
3947
3948	cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
3949	cmd.tid_disable_tx = htole16(0xffff);
3950
3951	if (addr)
3952		IEEE80211_ADDR_COPY(cmd.addr, addr);
3953
3954	ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
3955	if (ret)
3956		return ret;
3957
3958	switch (status) {
3959	case IWM_ADD_STA_SUCCESS:
3960		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3961		    "%s: Internal station added.\n", __func__);
3962		return 0;
3963	default:
3964		device_printf(sc->sc_dev,
3965		    "%s: Add internal station failed, status=0x%x\n",
3966		    __func__, status);
3967		ret = EIO;
3968		break;
3969	}
3970	return ret;
3971}
3972
3973static int
3974iwm_mvm_add_aux_sta(struct iwm_softc *sc)
3975{
3976	int ret;
3977
3978	sc->sc_aux_sta.sta_id = IWM_AUX_STA_ID;
3979	sc->sc_aux_sta.tfd_queue_msk = (1 << IWM_MVM_AUX_QUEUE);
3980
3981	ret = iwm_enable_txq(sc, 0, IWM_MVM_AUX_QUEUE, IWM_MVM_TX_FIFO_MCAST);
3982	if (ret)
3983		return ret;
3984
3985	ret = iwm_mvm_add_int_sta_common(sc,
3986	    &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
3987
3988	if (ret)
3989		memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
3990	return ret;
3991}
3992
3993/*
3994 * END mvm/sta.c
3995 */
3996
3997/*
3998 * BEGIN mvm/quota.c
3999 */
4000
4001static int
4002iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp)
4003{
4004	struct iwm_time_quota_cmd cmd;
4005	int i, idx, ret, num_active_macs, quota, quota_rem;
4006	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
4007	int n_ifs[IWM_MAX_BINDINGS] = {0, };
4008	uint16_t id;
4009
4010	memset(&cmd, 0, sizeof(cmd));
4011
4012	/* currently, PHY ID == binding ID */
4013	if (ivp) {
4014		id = ivp->phy_ctxt->id;
4015		KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
4016		colors[id] = ivp->phy_ctxt->color;
4017
4018		if (1)
4019			n_ifs[id] = 1;
4020	}
4021
4022	/*
4023	 * The FW's scheduling session consists of
4024	 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
4025	 * equally between all the bindings that require quota
4026	 */
4027	num_active_macs = 0;
4028	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
4029		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
4030		num_active_macs += n_ifs[i];
4031	}
4032
4033	quota = 0;
4034	quota_rem = 0;
4035	if (num_active_macs) {
4036		quota = IWM_MVM_MAX_QUOTA / num_active_macs;
4037		quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
4038	}
4039
4040	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
4041		if (colors[i] < 0)
4042			continue;
4043
4044		cmd.quotas[idx].id_and_color =
4045			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
4046
4047		if (n_ifs[i] <= 0) {
4048			cmd.quotas[idx].quota = htole32(0);
4049			cmd.quotas[idx].max_duration = htole32(0);
4050		} else {
4051			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
4052			cmd.quotas[idx].max_duration = htole32(0);
4053		}
4054		idx++;
4055	}
4056
4057	/* Give the remainder of the session to the first binding */
4058	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
4059
4060	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
4061	    sizeof(cmd), &cmd);
4062	if (ret)
4063		device_printf(sc->sc_dev,
4064		    "%s: Failed to send quota: %d\n", __func__, ret);
4065	return ret;
4066}
4067
4068/*
4069 * END mvm/quota.c
4070 */
4071
4072/*
4073 * ieee80211 routines
4074 */
4075
4076/*
4077 * Change to AUTH state in 80211 state machine.  Roughly matches what
4078 * Linux does in bss_info_changed().
4079 */
4080static int
4081iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
4082{
4083	struct ieee80211_node *ni;
4084	struct iwm_node *in;
4085	struct iwm_vap *iv = IWM_VAP(vap);
4086	uint32_t duration;
4087	int error;
4088
4089	/*
4090	 * XXX i have a feeling that the vap node is being
4091	 * freed from underneath us. Grr.
4092	 */
4093	ni = ieee80211_ref_node(vap->iv_bss);
4094	in = IWM_NODE(ni);
4095	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
4096	    "%s: called; vap=%p, bss ni=%p\n",
4097	    __func__,
4098	    vap,
4099	    ni);
4100
4101	in->in_assoc = 0;
4102
4103	error = iwm_mvm_sf_config(sc, IWM_SF_FULL_ON);
4104	if (error != 0)
4105		return error;
4106
4107	error = iwm_allow_mcast(vap, sc);
4108	if (error) {
4109		device_printf(sc->sc_dev,
4110		    "%s: failed to set multicast\n", __func__);
4111		goto out;
4112	}
4113
4114	/*
4115	 * This is where it deviates from what Linux does.
4116	 *
4117	 * Linux iwlwifi doesn't reset the nic each time, nor does it
4118	 * call ctxt_add() here.  Instead, it adds it during vap creation,
4119	 * and always does a mac_ctx_changed().
4120	 *
4121	 * The openbsd port doesn't attempt to do that - it reset things
4122	 * at odd states and does the add here.
4123	 *
4124	 * So, until the state handling is fixed (ie, we never reset
4125	 * the NIC except for a firmware failure, which should drag
4126	 * the NIC back to IDLE, re-setup and re-add all the mac/phy
4127	 * contexts that are required), let's do a dirty hack here.
4128	 */
4129	if (iv->is_uploaded) {
4130		if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4131			device_printf(sc->sc_dev,
4132			    "%s: failed to update MAC\n", __func__);
4133			goto out;
4134		}
4135		if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4136		    in->in_ni.ni_chan, 1, 1)) != 0) {
4137			device_printf(sc->sc_dev,
4138			    "%s: failed update phy ctxt\n", __func__);
4139			goto out;
4140		}
4141		iv->phy_ctxt = &sc->sc_phyctxt[0];
4142
4143		if ((error = iwm_mvm_binding_add_vif(sc, iv)) != 0) {
4144			device_printf(sc->sc_dev,
4145			    "%s: binding update cmd\n", __func__);
4146			goto out;
4147		}
4148		if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4149			device_printf(sc->sc_dev,
4150			    "%s: failed to update sta\n", __func__);
4151			goto out;
4152		}
4153	} else {
4154		if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
4155			device_printf(sc->sc_dev,
4156			    "%s: failed to add MAC\n", __func__);
4157			goto out;
4158		}
4159		if ((error = iwm_mvm_power_update_mac(sc)) != 0) {
4160			device_printf(sc->sc_dev,
4161			    "%s: failed to update power management\n",
4162			    __func__);
4163			goto out;
4164		}
4165		if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4166		    in->in_ni.ni_chan, 1, 1)) != 0) {
4167			device_printf(sc->sc_dev,
4168			    "%s: failed add phy ctxt!\n", __func__);
4169			error = ETIMEDOUT;
4170			goto out;
4171		}
4172		iv->phy_ctxt = &sc->sc_phyctxt[0];
4173
4174		if ((error = iwm_mvm_binding_add_vif(sc, iv)) != 0) {
4175			device_printf(sc->sc_dev,
4176			    "%s: binding add cmd\n", __func__);
4177			goto out;
4178		}
4179		if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
4180			device_printf(sc->sc_dev,
4181			    "%s: failed to add sta\n", __func__);
4182			goto out;
4183		}
4184	}
4185
4186	/*
4187	 * Prevent the FW from wandering off channel during association
4188	 * by "protecting" the session with a time event.
4189	 */
4190	/* XXX duration is in units of TU, not MS */
4191	duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4192	iwm_mvm_protect_session(sc, iv, duration, 500 /* XXX magic number */);
4193	DELAY(100);
4194
4195	error = 0;
4196out:
4197	ieee80211_free_node(ni);
4198	return (error);
4199}
4200
4201static int
4202iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
4203{
4204	struct iwm_node *in = IWM_NODE(vap->iv_bss);
4205	int error;
4206
4207	if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4208		device_printf(sc->sc_dev,
4209		    "%s: failed to update STA\n", __func__);
4210		return error;
4211	}
4212
4213	in->in_assoc = 1;
4214	if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4215		device_printf(sc->sc_dev,
4216		    "%s: failed to update MAC\n", __func__);
4217		return error;
4218	}
4219
4220	return 0;
4221}
4222
4223static int
4224iwm_release(struct iwm_softc *sc, struct iwm_node *in)
4225{
4226	uint32_t tfd_msk;
4227
4228	/*
4229	 * Ok, so *technically* the proper set of calls for going
4230	 * from RUN back to SCAN is:
4231	 *
4232	 * iwm_mvm_power_mac_disable(sc, in);
4233	 * iwm_mvm_mac_ctxt_changed(sc, in);
4234	 * iwm_mvm_rm_sta(sc, in);
4235	 * iwm_mvm_update_quotas(sc, NULL);
4236	 * iwm_mvm_mac_ctxt_changed(sc, in);
4237	 * iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap));
4238	 * iwm_mvm_mac_ctxt_remove(sc, in);
4239	 *
4240	 * However, that freezes the device not matter which permutations
4241	 * and modifications are attempted.  Obviously, this driver is missing
4242	 * something since it works in the Linux driver, but figuring out what
4243	 * is missing is a little more complicated.  Now, since we're going
4244	 * back to nothing anyway, we'll just do a complete device reset.
4245	 * Up your's, device!
4246	 */
4247	/*
4248	 * Just using 0xf for the queues mask is fine as long as we only
4249	 * get here from RUN state.
4250	 */
4251	tfd_msk = 0xf;
4252	mbufq_drain(&sc->sc_snd);
4253	iwm_mvm_flush_tx_path(sc, tfd_msk, IWM_CMD_SYNC);
4254	/*
4255	 * We seem to get away with just synchronously sending the
4256	 * IWM_TXPATH_FLUSH command.
4257	 */
4258//	iwm_trans_wait_tx_queue_empty(sc, tfd_msk);
4259	iwm_stop_device(sc);
4260	iwm_init_hw(sc);
4261	if (in)
4262		in->in_assoc = 0;
4263	return 0;
4264
4265#if 0
4266	int error;
4267
4268	iwm_mvm_power_mac_disable(sc, in);
4269
4270	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4271		device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
4272		return error;
4273	}
4274
4275	if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
4276		device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4277		return error;
4278	}
4279	error = iwm_mvm_rm_sta(sc, in);
4280	in->in_assoc = 0;
4281	iwm_mvm_update_quotas(sc, NULL);
4282	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4283		device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4284		return error;
4285	}
4286	iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap));
4287
4288	iwm_mvm_mac_ctxt_remove(sc, in);
4289
4290	return error;
4291#endif
4292}
4293
4294static struct ieee80211_node *
4295iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4296{
4297	return malloc(sizeof (struct iwm_node), M_80211_NODE,
4298	    M_NOWAIT | M_ZERO);
4299}
4300
4301uint8_t
4302iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
4303{
4304	int i;
4305	uint8_t rval;
4306
4307	for (i = 0; i < rs->rs_nrates; i++) {
4308		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
4309		if (rval == iwm_rates[ridx].rate)
4310			return rs->rs_rates[i];
4311	}
4312
4313	return 0;
4314}
4315
4316static void
4317iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
4318{
4319	struct ieee80211_node *ni = &in->in_ni;
4320	struct iwm_lq_cmd *lq = &in->in_lq;
4321	int nrates = ni->ni_rates.rs_nrates;
4322	int i, ridx, tab = 0;
4323//	int txant = 0;
4324
4325	if (nrates > nitems(lq->rs_table)) {
4326		device_printf(sc->sc_dev,
4327		    "%s: node supports %d rates, driver handles "
4328		    "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4329		return;
4330	}
4331	if (nrates == 0) {
4332		device_printf(sc->sc_dev,
4333		    "%s: node supports 0 rates, odd!\n", __func__);
4334		return;
4335	}
4336
4337	/*
4338	 * XXX .. and most of iwm_node is not initialised explicitly;
4339	 * it's all just 0x0 passed to the firmware.
4340	 */
4341
4342	/* first figure out which rates we should support */
4343	/* XXX TODO: this isn't 11n aware /at all/ */
4344	memset(&in->in_ridx, -1, sizeof(in->in_ridx));
4345	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4346	    "%s: nrates=%d\n", __func__, nrates);
4347
4348	/*
4349	 * Loop over nrates and populate in_ridx from the highest
4350	 * rate to the lowest rate.  Remember, in_ridx[] has
4351	 * IEEE80211_RATE_MAXSIZE entries!
4352	 */
4353	for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4354		int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4355
4356		/* Map 802.11 rate to HW rate index. */
4357		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4358			if (iwm_rates[ridx].rate == rate)
4359				break;
4360		if (ridx > IWM_RIDX_MAX) {
4361			device_printf(sc->sc_dev,
4362			    "%s: WARNING: device rate for %d not found!\n",
4363			    __func__, rate);
4364		} else {
4365			IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4366			    "%s: rate: i: %d, rate=%d, ridx=%d\n",
4367			    __func__,
4368			    i,
4369			    rate,
4370			    ridx);
4371			in->in_ridx[i] = ridx;
4372		}
4373	}
4374
4375	/* then construct a lq_cmd based on those */
4376	memset(lq, 0, sizeof(*lq));
4377	lq->sta_id = IWM_STATION_ID;
4378
4379	/* For HT, always enable RTS/CTS to avoid excessive retries. */
4380	if (ni->ni_flags & IEEE80211_NODE_HT)
4381		lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4382
4383	/*
4384	 * are these used? (we don't do SISO or MIMO)
4385	 * need to set them to non-zero, though, or we get an error.
4386	 */
4387	lq->single_stream_ant_msk = 1;
4388	lq->dual_stream_ant_msk = 1;
4389
4390	/*
4391	 * Build the actual rate selection table.
4392	 * The lowest bits are the rates.  Additionally,
4393	 * CCK needs bit 9 to be set.  The rest of the bits
4394	 * we add to the table select the tx antenna
4395	 * Note that we add the rates in the highest rate first
4396	 * (opposite of ni_rates).
4397	 */
4398	/*
4399	 * XXX TODO: this should be looping over the min of nrates
4400	 * and LQ_MAX_RETRY_NUM.  Sigh.
4401	 */
4402	for (i = 0; i < nrates; i++) {
4403		int nextant;
4404
4405#if 0
4406		if (txant == 0)
4407			txant = iwm_mvm_get_valid_tx_ant(sc);
4408		nextant = 1<<(ffs(txant)-1);
4409		txant &= ~nextant;
4410#else
4411		nextant = iwm_mvm_get_valid_tx_ant(sc);
4412#endif
4413		/*
4414		 * Map the rate id into a rate index into
4415		 * our hardware table containing the
4416		 * configuration to use for this rate.
4417		 */
4418		ridx = in->in_ridx[i];
4419		tab = iwm_rates[ridx].plcp;
4420		tab |= nextant << IWM_RATE_MCS_ANT_POS;
4421		if (IWM_RIDX_IS_CCK(ridx))
4422			tab |= IWM_RATE_MCS_CCK_MSK;
4423		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4424		    "station rate i=%d, rate=%d, hw=%x\n",
4425		    i, iwm_rates[ridx].rate, tab);
4426		lq->rs_table[i] = htole32(tab);
4427	}
4428	/* then fill the rest with the lowest possible rate */
4429	for (i = nrates; i < nitems(lq->rs_table); i++) {
4430		KASSERT(tab != 0, ("invalid tab"));
4431		lq->rs_table[i] = htole32(tab);
4432	}
4433}
4434
4435static int
4436iwm_media_change(struct ifnet *ifp)
4437{
4438	struct ieee80211vap *vap = ifp->if_softc;
4439	struct ieee80211com *ic = vap->iv_ic;
4440	struct iwm_softc *sc = ic->ic_softc;
4441	int error;
4442
4443	error = ieee80211_media_change(ifp);
4444	if (error != ENETRESET)
4445		return error;
4446
4447	IWM_LOCK(sc);
4448	if (ic->ic_nrunning > 0) {
4449		iwm_stop(sc);
4450		iwm_init(sc);
4451	}
4452	IWM_UNLOCK(sc);
4453	return error;
4454}
4455
4456
4457static int
4458iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4459{
4460	struct iwm_vap *ivp = IWM_VAP(vap);
4461	struct ieee80211com *ic = vap->iv_ic;
4462	struct iwm_softc *sc = ic->ic_softc;
4463	struct iwm_node *in;
4464	int error;
4465
4466	IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4467	    "switching state %s -> %s\n",
4468	    ieee80211_state_name[vap->iv_state],
4469	    ieee80211_state_name[nstate]);
4470	IEEE80211_UNLOCK(ic);
4471	IWM_LOCK(sc);
4472
4473	if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4474		iwm_led_blink_stop(sc);
4475
4476	/* disable beacon filtering if we're hopping out of RUN */
4477	if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4478		iwm_mvm_disable_beacon_filter(sc);
4479
4480		if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4481			in->in_assoc = 0;
4482
4483		if (nstate == IEEE80211_S_INIT) {
4484			IWM_UNLOCK(sc);
4485			IEEE80211_LOCK(ic);
4486			error = ivp->iv_newstate(vap, nstate, arg);
4487			IEEE80211_UNLOCK(ic);
4488			IWM_LOCK(sc);
4489			iwm_release(sc, NULL);
4490			IWM_UNLOCK(sc);
4491			IEEE80211_LOCK(ic);
4492			return error;
4493		}
4494
4495		/*
4496		 * It's impossible to directly go RUN->SCAN. If we iwm_release()
4497		 * above then the card will be completely reinitialized,
4498		 * so the driver must do everything necessary to bring the card
4499		 * from INIT to SCAN.
4500		 *
4501		 * Additionally, upon receiving deauth frame from AP,
4502		 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4503		 * state. This will also fail with this driver, so bring the FSM
4504		 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4505		 *
4506		 * XXX TODO: fix this for FreeBSD!
4507		 */
4508		if (nstate == IEEE80211_S_SCAN ||
4509		    nstate == IEEE80211_S_AUTH ||
4510		    nstate == IEEE80211_S_ASSOC) {
4511			IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4512			    "Force transition to INIT; MGT=%d\n", arg);
4513			IWM_UNLOCK(sc);
4514			IEEE80211_LOCK(ic);
4515			/* Always pass arg as -1 since we can't Tx right now. */
4516			/*
4517			 * XXX arg is just ignored anyway when transitioning
4518			 *     to IEEE80211_S_INIT.
4519			 */
4520			vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4521			IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4522			    "Going INIT->SCAN\n");
4523			nstate = IEEE80211_S_SCAN;
4524			IEEE80211_UNLOCK(ic);
4525			IWM_LOCK(sc);
4526		}
4527	}
4528
4529	switch (nstate) {
4530	case IEEE80211_S_INIT:
4531		break;
4532
4533	case IEEE80211_S_AUTH:
4534		if ((error = iwm_auth(vap, sc)) != 0) {
4535			device_printf(sc->sc_dev,
4536			    "%s: could not move to auth state: %d\n",
4537			    __func__, error);
4538			break;
4539		}
4540		break;
4541
4542	case IEEE80211_S_ASSOC:
4543		if ((error = iwm_assoc(vap, sc)) != 0) {
4544			device_printf(sc->sc_dev,
4545			    "%s: failed to associate: %d\n", __func__,
4546			    error);
4547			break;
4548		}
4549		break;
4550
4551	case IEEE80211_S_RUN:
4552	{
4553		struct iwm_host_cmd cmd = {
4554			.id = IWM_LQ_CMD,
4555			.len = { sizeof(in->in_lq), },
4556			.flags = IWM_CMD_SYNC,
4557		};
4558
4559		/* Update the association state, now we have it all */
4560		/* (eg associd comes in at this point */
4561		error = iwm_assoc(vap, sc);
4562		if (error != 0) {
4563			device_printf(sc->sc_dev,
4564			    "%s: failed to update association state: %d\n",
4565			    __func__,
4566			    error);
4567			break;
4568		}
4569
4570		in = IWM_NODE(vap->iv_bss);
4571		iwm_mvm_enable_beacon_filter(sc, in);
4572		iwm_mvm_power_update_mac(sc);
4573		iwm_mvm_update_quotas(sc, ivp);
4574		iwm_setrates(sc, in);
4575
4576		cmd.data[0] = &in->in_lq;
4577		if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
4578			device_printf(sc->sc_dev,
4579			    "%s: IWM_LQ_CMD failed\n", __func__);
4580		}
4581
4582		iwm_mvm_led_enable(sc);
4583		break;
4584	}
4585
4586	default:
4587		break;
4588	}
4589	IWM_UNLOCK(sc);
4590	IEEE80211_LOCK(ic);
4591
4592	return (ivp->iv_newstate(vap, nstate, arg));
4593}
4594
4595void
4596iwm_endscan_cb(void *arg, int pending)
4597{
4598	struct iwm_softc *sc = arg;
4599	struct ieee80211com *ic = &sc->sc_ic;
4600
4601	IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4602	    "%s: scan ended\n",
4603	    __func__);
4604
4605	ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4606}
4607
4608/*
4609 * Aging and idle timeouts for the different possible scenarios
4610 * in default configuration
4611 */
4612static const uint32_t
4613iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4614	{
4615		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
4616		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
4617	},
4618	{
4619		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
4620		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
4621	},
4622	{
4623		htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
4624		htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
4625	},
4626	{
4627		htole32(IWM_SF_BA_AGING_TIMER_DEF),
4628		htole32(IWM_SF_BA_IDLE_TIMER_DEF)
4629	},
4630	{
4631		htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
4632		htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
4633	},
4634};
4635
4636/*
4637 * Aging and idle timeouts for the different possible scenarios
4638 * in single BSS MAC configuration.
4639 */
4640static const uint32_t
4641iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4642	{
4643		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
4644		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
4645	},
4646	{
4647		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
4648		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
4649	},
4650	{
4651		htole32(IWM_SF_MCAST_AGING_TIMER),
4652		htole32(IWM_SF_MCAST_IDLE_TIMER)
4653	},
4654	{
4655		htole32(IWM_SF_BA_AGING_TIMER),
4656		htole32(IWM_SF_BA_IDLE_TIMER)
4657	},
4658	{
4659		htole32(IWM_SF_TX_RE_AGING_TIMER),
4660		htole32(IWM_SF_TX_RE_IDLE_TIMER)
4661	},
4662};
4663
4664static void
4665iwm_mvm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
4666    struct ieee80211_node *ni)
4667{
4668	int i, j, watermark;
4669
4670	sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
4671
4672	/*
4673	 * If we are in association flow - check antenna configuration
4674	 * capabilities of the AP station, and choose the watermark accordingly.
4675	 */
4676	if (ni) {
4677		if (ni->ni_flags & IEEE80211_NODE_HT) {
4678#ifdef notyet
4679			if (ni->ni_rxmcs[2] != 0)
4680				watermark = IWM_SF_W_MARK_MIMO3;
4681			else if (ni->ni_rxmcs[1] != 0)
4682				watermark = IWM_SF_W_MARK_MIMO2;
4683			else
4684#endif
4685				watermark = IWM_SF_W_MARK_SISO;
4686		} else {
4687			watermark = IWM_SF_W_MARK_LEGACY;
4688		}
4689	/* default watermark value for unassociated mode. */
4690	} else {
4691		watermark = IWM_SF_W_MARK_MIMO2;
4692	}
4693	sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
4694
4695	for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
4696		for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
4697			sf_cmd->long_delay_timeouts[i][j] =
4698					htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
4699		}
4700	}
4701
4702	if (ni) {
4703		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
4704		       sizeof(iwm_sf_full_timeout));
4705	} else {
4706		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
4707		       sizeof(iwm_sf_full_timeout_def));
4708	}
4709}
4710
4711static int
4712iwm_mvm_sf_config(struct iwm_softc *sc, enum iwm_sf_state new_state)
4713{
4714	struct ieee80211com *ic = &sc->sc_ic;
4715	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4716	struct iwm_sf_cfg_cmd sf_cmd = {
4717		.state = htole32(IWM_SF_FULL_ON),
4718	};
4719	int ret = 0;
4720
4721	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4722		sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
4723
4724	switch (new_state) {
4725	case IWM_SF_UNINIT:
4726	case IWM_SF_INIT_OFF:
4727		iwm_mvm_fill_sf_command(sc, &sf_cmd, NULL);
4728		break;
4729	case IWM_SF_FULL_ON:
4730		iwm_mvm_fill_sf_command(sc, &sf_cmd, vap->iv_bss);
4731		break;
4732	default:
4733		IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE,
4734		    "Invalid state: %d. not sending Smart Fifo cmd\n",
4735			  new_state);
4736		return EINVAL;
4737	}
4738
4739	ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
4740				   sizeof(sf_cmd), &sf_cmd);
4741	return ret;
4742}
4743
4744static int
4745iwm_send_bt_init_conf(struct iwm_softc *sc)
4746{
4747	struct iwm_bt_coex_cmd bt_cmd;
4748
4749	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4750	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4751
4752	return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4753	    &bt_cmd);
4754}
4755
4756static int
4757iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4758{
4759	struct iwm_mcc_update_cmd mcc_cmd;
4760	struct iwm_host_cmd hcmd = {
4761		.id = IWM_MCC_UPDATE_CMD,
4762		.flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4763		.data = { &mcc_cmd },
4764	};
4765	int ret;
4766#ifdef IWM_DEBUG
4767	struct iwm_rx_packet *pkt;
4768	struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4769	struct iwm_mcc_update_resp *mcc_resp;
4770	int n_channels;
4771	uint16_t mcc;
4772#endif
4773	int resp_v2 = fw_has_capa(&sc->ucode_capa,
4774	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4775
4776	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4777	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4778	if (fw_has_api(&sc->ucode_capa, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4779	    fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
4780		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4781	else
4782		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4783
4784	if (resp_v2)
4785		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4786	else
4787		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4788
4789	IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4790	    "send MCC update to FW with '%c%c' src = %d\n",
4791	    alpha2[0], alpha2[1], mcc_cmd.source_id);
4792
4793	ret = iwm_send_cmd(sc, &hcmd);
4794	if (ret)
4795		return ret;
4796
4797#ifdef IWM_DEBUG
4798	pkt = hcmd.resp_pkt;
4799
4800	/* Extract MCC response */
4801	if (resp_v2) {
4802		mcc_resp = (void *)pkt->data;
4803		mcc = mcc_resp->mcc;
4804		n_channels =  le32toh(mcc_resp->n_channels);
4805	} else {
4806		mcc_resp_v1 = (void *)pkt->data;
4807		mcc = mcc_resp_v1->mcc;
4808		n_channels =  le32toh(mcc_resp_v1->n_channels);
4809	}
4810
4811	/* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4812	if (mcc == 0)
4813		mcc = 0x3030;  /* "00" - world */
4814
4815	IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4816	    "regulatory domain '%c%c' (%d channels available)\n",
4817	    mcc >> 8, mcc & 0xff, n_channels);
4818#endif
4819	iwm_free_resp(sc, &hcmd);
4820
4821	return 0;
4822}
4823
4824static void
4825iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4826{
4827	struct iwm_host_cmd cmd = {
4828		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4829		.len = { sizeof(uint32_t), },
4830		.data = { &backoff, },
4831	};
4832
4833	if (iwm_send_cmd(sc, &cmd) != 0) {
4834		device_printf(sc->sc_dev,
4835		    "failed to change thermal tx backoff\n");
4836	}
4837}
4838
4839static int
4840iwm_init_hw(struct iwm_softc *sc)
4841{
4842	struct ieee80211com *ic = &sc->sc_ic;
4843	int error, i, ac;
4844
4845	if ((error = iwm_start_hw(sc)) != 0) {
4846		printf("iwm_start_hw: failed %d\n", error);
4847		return error;
4848	}
4849
4850	if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4851		printf("iwm_run_init_mvm_ucode: failed %d\n", error);
4852		return error;
4853	}
4854
4855	/*
4856	 * should stop and start HW since that INIT
4857	 * image just loaded
4858	 */
4859	iwm_stop_device(sc);
4860	sc->sc_ps_disabled = FALSE;
4861	if ((error = iwm_start_hw(sc)) != 0) {
4862		device_printf(sc->sc_dev, "could not initialize hardware\n");
4863		return error;
4864	}
4865
4866	/* omstart, this time with the regular firmware */
4867	error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4868	if (error) {
4869		device_printf(sc->sc_dev, "could not load firmware\n");
4870		goto error;
4871	}
4872
4873	if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4874		device_printf(sc->sc_dev, "bt init conf failed\n");
4875		goto error;
4876	}
4877
4878	error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
4879	if (error != 0) {
4880		device_printf(sc->sc_dev, "antenna config failed\n");
4881		goto error;
4882	}
4883
4884	/* Send phy db control command and then phy db calibration */
4885	if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4886		goto error;
4887
4888	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4889		device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4890		goto error;
4891	}
4892
4893	/* Add auxiliary station for scanning */
4894	if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4895		device_printf(sc->sc_dev, "add_aux_sta failed\n");
4896		goto error;
4897	}
4898
4899	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4900		/*
4901		 * The channel used here isn't relevant as it's
4902		 * going to be overwritten in the other flows.
4903		 * For now use the first channel we have.
4904		 */
4905		if ((error = iwm_mvm_phy_ctxt_add(sc,
4906		    &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4907			goto error;
4908	}
4909
4910	/* Initialize tx backoffs to the minimum. */
4911	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4912		iwm_mvm_tt_tx_backoff(sc, 0);
4913
4914	error = iwm_mvm_power_update_device(sc);
4915	if (error)
4916		goto error;
4917
4918	if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
4919		if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4920			goto error;
4921	}
4922
4923	if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4924		if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4925			goto error;
4926	}
4927
4928	/* Enable Tx queues. */
4929	for (ac = 0; ac < WME_NUM_AC; ac++) {
4930		error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4931		    iwm_mvm_ac_to_tx_fifo[ac]);
4932		if (error)
4933			goto error;
4934	}
4935
4936	if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4937		device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4938		goto error;
4939	}
4940
4941	return 0;
4942
4943 error:
4944	iwm_stop_device(sc);
4945	return error;
4946}
4947
4948/* Allow multicast from our BSSID. */
4949static int
4950iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4951{
4952	struct ieee80211_node *ni = vap->iv_bss;
4953	struct iwm_mcast_filter_cmd *cmd;
4954	size_t size;
4955	int error;
4956
4957	size = roundup(sizeof(*cmd), 4);
4958	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4959	if (cmd == NULL)
4960		return ENOMEM;
4961	cmd->filter_own = 1;
4962	cmd->port_id = 0;
4963	cmd->count = 0;
4964	cmd->pass_all = 1;
4965	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4966
4967	error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4968	    IWM_CMD_SYNC, size, cmd);
4969	free(cmd, M_DEVBUF);
4970
4971	return (error);
4972}
4973
4974/*
4975 * ifnet interfaces
4976 */
4977
4978static void
4979iwm_init(struct iwm_softc *sc)
4980{
4981	int error;
4982
4983	if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4984		return;
4985	}
4986	sc->sc_generation++;
4987	sc->sc_flags &= ~IWM_FLAG_STOPPED;
4988
4989	if ((error = iwm_init_hw(sc)) != 0) {
4990		printf("iwm_init_hw failed %d\n", error);
4991		iwm_stop(sc);
4992		return;
4993	}
4994
4995	/*
4996	 * Ok, firmware loaded and we are jogging
4997	 */
4998	sc->sc_flags |= IWM_FLAG_HW_INITED;
4999	callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
5000}
5001
5002static int
5003iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
5004{
5005	struct iwm_softc *sc;
5006	int error;
5007
5008	sc = ic->ic_softc;
5009
5010	IWM_LOCK(sc);
5011	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
5012		IWM_UNLOCK(sc);
5013		return (ENXIO);
5014	}
5015	error = mbufq_enqueue(&sc->sc_snd, m);
5016	if (error) {
5017		IWM_UNLOCK(sc);
5018		return (error);
5019	}
5020	iwm_start(sc);
5021	IWM_UNLOCK(sc);
5022	return (0);
5023}
5024
5025/*
5026 * Dequeue packets from sendq and call send.
5027 */
5028static void
5029iwm_start(struct iwm_softc *sc)
5030{
5031	struct ieee80211_node *ni;
5032	struct mbuf *m;
5033	int ac = 0;
5034
5035	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
5036	while (sc->qfullmsk == 0 &&
5037		(m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
5038		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
5039		if (iwm_tx(sc, m, ni, ac) != 0) {
5040			if_inc_counter(ni->ni_vap->iv_ifp,
5041			    IFCOUNTER_OERRORS, 1);
5042			ieee80211_free_node(ni);
5043			continue;
5044		}
5045		sc->sc_tx_timer = 15;
5046	}
5047	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
5048}
5049
5050static void
5051iwm_stop(struct iwm_softc *sc)
5052{
5053
5054	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
5055	sc->sc_flags |= IWM_FLAG_STOPPED;
5056	sc->sc_generation++;
5057	iwm_led_blink_stop(sc);
5058	sc->sc_tx_timer = 0;
5059	iwm_stop_device(sc);
5060	sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5061}
5062
5063static void
5064iwm_watchdog(void *arg)
5065{
5066	struct iwm_softc *sc = arg;
5067	struct ieee80211com *ic = &sc->sc_ic;
5068
5069	if (sc->sc_tx_timer > 0) {
5070		if (--sc->sc_tx_timer == 0) {
5071			device_printf(sc->sc_dev, "device timeout\n");
5072#ifdef IWM_DEBUG
5073			iwm_nic_error(sc);
5074#endif
5075			ieee80211_restart_all(ic);
5076			counter_u64_add(sc->sc_ic.ic_oerrors, 1);
5077			return;
5078		}
5079	}
5080	callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
5081}
5082
5083static void
5084iwm_parent(struct ieee80211com *ic)
5085{
5086	struct iwm_softc *sc = ic->ic_softc;
5087	int startall = 0;
5088
5089	IWM_LOCK(sc);
5090	if (ic->ic_nrunning > 0) {
5091		if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
5092			iwm_init(sc);
5093			startall = 1;
5094		}
5095	} else if (sc->sc_flags & IWM_FLAG_HW_INITED)
5096		iwm_stop(sc);
5097	IWM_UNLOCK(sc);
5098	if (startall)
5099		ieee80211_start_all(ic);
5100}
5101
5102/*
5103 * The interrupt side of things
5104 */
5105
5106/*
5107 * error dumping routines are from iwlwifi/mvm/utils.c
5108 */
5109
5110/*
5111 * Note: This structure is read from the device with IO accesses,
5112 * and the reading already does the endian conversion. As it is
5113 * read with uint32_t-sized accesses, any members with a different size
5114 * need to be ordered correctly though!
5115 */
5116struct iwm_error_event_table {
5117	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
5118	uint32_t error_id;		/* type of error */
5119	uint32_t trm_hw_status0;	/* TRM HW status */
5120	uint32_t trm_hw_status1;	/* TRM HW status */
5121	uint32_t blink2;		/* branch link */
5122	uint32_t ilink1;		/* interrupt link */
5123	uint32_t ilink2;		/* interrupt link */
5124	uint32_t data1;		/* error-specific data */
5125	uint32_t data2;		/* error-specific data */
5126	uint32_t data3;		/* error-specific data */
5127	uint32_t bcon_time;		/* beacon timer */
5128	uint32_t tsf_low;		/* network timestamp function timer */
5129	uint32_t tsf_hi;		/* network timestamp function timer */
5130	uint32_t gp1;		/* GP1 timer register */
5131	uint32_t gp2;		/* GP2 timer register */
5132	uint32_t fw_rev_type;	/* firmware revision type */
5133	uint32_t major;		/* uCode version major */
5134	uint32_t minor;		/* uCode version minor */
5135	uint32_t hw_ver;		/* HW Silicon version */
5136	uint32_t brd_ver;		/* HW board version */
5137	uint32_t log_pc;		/* log program counter */
5138	uint32_t frame_ptr;		/* frame pointer */
5139	uint32_t stack_ptr;		/* stack pointer */
5140	uint32_t hcmd;		/* last host command header */
5141	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
5142				 * rxtx_flag */
5143	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
5144				 * host_flag */
5145	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
5146				 * enc_flag */
5147	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
5148				 * time_flag */
5149	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
5150				 * wico interrupt */
5151	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
5152	uint32_t wait_event;		/* wait event() caller address */
5153	uint32_t l2p_control;	/* L2pControlField */
5154	uint32_t l2p_duration;	/* L2pDurationField */
5155	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
5156	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
5157	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
5158				 * (LMPM_PMG_SEL) */
5159	uint32_t u_timestamp;	/* indicate when the date and time of the
5160				 * compilation */
5161	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
5162} __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
5163
5164/*
5165 * UMAC error struct - relevant starting from family 8000 chip.
5166 * Note: This structure is read from the device with IO accesses,
5167 * and the reading already does the endian conversion. As it is
5168 * read with u32-sized accesses, any members with a different size
5169 * need to be ordered correctly though!
5170 */
5171struct iwm_umac_error_event_table {
5172	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
5173	uint32_t error_id;	/* type of error */
5174	uint32_t blink1;	/* branch link */
5175	uint32_t blink2;	/* branch link */
5176	uint32_t ilink1;	/* interrupt link */
5177	uint32_t ilink2;	/* interrupt link */
5178	uint32_t data1;		/* error-specific data */
5179	uint32_t data2;		/* error-specific data */
5180	uint32_t data3;		/* error-specific data */
5181	uint32_t umac_major;
5182	uint32_t umac_minor;
5183	uint32_t frame_pointer;	/* core register 27*/
5184	uint32_t stack_pointer;	/* core register 28 */
5185	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
5186	uint32_t nic_isr_pref;	/* ISR status register */
5187} __packed;
5188
5189#define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
5190#define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
5191
5192#ifdef IWM_DEBUG
5193struct {
5194	const char *name;
5195	uint8_t num;
5196} advanced_lookup[] = {
5197	{ "NMI_INTERRUPT_WDG", 0x34 },
5198	{ "SYSASSERT", 0x35 },
5199	{ "UCODE_VERSION_MISMATCH", 0x37 },
5200	{ "BAD_COMMAND", 0x38 },
5201	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5202	{ "FATAL_ERROR", 0x3D },
5203	{ "NMI_TRM_HW_ERR", 0x46 },
5204	{ "NMI_INTERRUPT_TRM", 0x4C },
5205	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5206	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5207	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5208	{ "NMI_INTERRUPT_HOST", 0x66 },
5209	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
5210	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
5211	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5212	{ "ADVANCED_SYSASSERT", 0 },
5213};
5214
5215static const char *
5216iwm_desc_lookup(uint32_t num)
5217{
5218	int i;
5219
5220	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5221		if (advanced_lookup[i].num == num)
5222			return advanced_lookup[i].name;
5223
5224	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5225	return advanced_lookup[i].name;
5226}
5227
5228static void
5229iwm_nic_umac_error(struct iwm_softc *sc)
5230{
5231	struct iwm_umac_error_event_table table;
5232	uint32_t base;
5233
5234	base = sc->umac_error_event_table;
5235
5236	if (base < 0x800000) {
5237		device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5238		    base);
5239		return;
5240	}
5241
5242	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5243		device_printf(sc->sc_dev, "reading errlog failed\n");
5244		return;
5245	}
5246
5247	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5248		device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5249		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5250		    sc->sc_flags, table.valid);
5251	}
5252
5253	device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5254		iwm_desc_lookup(table.error_id));
5255	device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5256	device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5257	device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5258	    table.ilink1);
5259	device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5260	    table.ilink2);
5261	device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5262	device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5263	device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5264	device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5265	device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5266	device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5267	    table.frame_pointer);
5268	device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5269	    table.stack_pointer);
5270	device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5271	device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5272	    table.nic_isr_pref);
5273}
5274
5275/*
5276 * Support for dumping the error log seemed like a good idea ...
5277 * but it's mostly hex junk and the only sensible thing is the
5278 * hw/ucode revision (which we know anyway).  Since it's here,
5279 * I'll just leave it in, just in case e.g. the Intel guys want to
5280 * help us decipher some "ADVANCED_SYSASSERT" later.
5281 */
5282static void
5283iwm_nic_error(struct iwm_softc *sc)
5284{
5285	struct iwm_error_event_table table;
5286	uint32_t base;
5287
5288	device_printf(sc->sc_dev, "dumping device error log\n");
5289	base = sc->error_event_table;
5290	if (base < 0x800000) {
5291		device_printf(sc->sc_dev,
5292		    "Invalid error log pointer 0x%08x\n", base);
5293		return;
5294	}
5295
5296	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5297		device_printf(sc->sc_dev, "reading errlog failed\n");
5298		return;
5299	}
5300
5301	if (!table.valid) {
5302		device_printf(sc->sc_dev, "errlog not found, skipping\n");
5303		return;
5304	}
5305
5306	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5307		device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5308		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5309		    sc->sc_flags, table.valid);
5310	}
5311
5312	device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5313	    iwm_desc_lookup(table.error_id));
5314	device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5315	    table.trm_hw_status0);
5316	device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5317	    table.trm_hw_status1);
5318	device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5319	device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5320	device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5321	device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5322	device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5323	device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5324	device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5325	device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5326	device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5327	device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5328	device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5329	device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5330	    table.fw_rev_type);
5331	device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5332	device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5333	device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5334	device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5335	device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5336	device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5337	device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5338	device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5339	device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5340	device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5341	device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5342	device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5343	device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5344	device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5345	device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5346	device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5347	device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5348	device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5349	device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5350
5351	if (sc->umac_error_event_table)
5352		iwm_nic_umac_error(sc);
5353}
5354#endif
5355
5356#define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
5357
5358/*
5359 * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5360 * Basic structure from if_iwn
5361 */
5362static void
5363iwm_notif_intr(struct iwm_softc *sc)
5364{
5365	struct ieee80211com *ic = &sc->sc_ic;
5366	uint16_t hw;
5367
5368	bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5369	    BUS_DMASYNC_POSTREAD);
5370
5371	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5372
5373	/*
5374	 * Process responses
5375	 */
5376	while (sc->rxq.cur != hw) {
5377		struct iwm_rx_ring *ring = &sc->rxq;
5378		struct iwm_rx_data *data = &ring->data[ring->cur];
5379		struct iwm_rx_packet *pkt;
5380		struct iwm_cmd_response *cresp;
5381		int qid, idx, code;
5382
5383		bus_dmamap_sync(ring->data_dmat, data->map,
5384		    BUS_DMASYNC_POSTREAD);
5385		pkt = mtod(data->m, struct iwm_rx_packet *);
5386
5387		qid = pkt->hdr.qid & ~0x80;
5388		idx = pkt->hdr.idx;
5389
5390		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5391		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5392		    "rx packet qid=%d idx=%d type=%x %d %d\n",
5393		    pkt->hdr.qid & ~0x80, pkt->hdr.idx, code, ring->cur, hw);
5394
5395		/*
5396		 * randomly get these from the firmware, no idea why.
5397		 * they at least seem harmless, so just ignore them for now
5398		 */
5399		if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
5400		    || pkt->len_n_flags == htole32(0x55550000))) {
5401			ADVANCE_RXQ(sc);
5402			continue;
5403		}
5404
5405		iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5406
5407		switch (code) {
5408		case IWM_REPLY_RX_PHY_CMD:
5409			iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
5410			break;
5411
5412		case IWM_REPLY_RX_MPDU_CMD:
5413			iwm_mvm_rx_rx_mpdu(sc, data->m);
5414			break;
5415
5416		case IWM_TX_CMD:
5417			iwm_mvm_rx_tx_cmd(sc, pkt, data);
5418			break;
5419
5420		case IWM_MISSED_BEACONS_NOTIFICATION: {
5421			struct iwm_missed_beacons_notif *resp;
5422			int missed;
5423
5424			/* XXX look at mac_id to determine interface ID */
5425			struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5426
5427			resp = (void *)pkt->data;
5428			missed = le32toh(resp->consec_missed_beacons);
5429
5430			IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5431			    "%s: MISSED_BEACON: mac_id=%d, "
5432			    "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5433			    "num_rx=%d\n",
5434			    __func__,
5435			    le32toh(resp->mac_id),
5436			    le32toh(resp->consec_missed_beacons_since_last_rx),
5437			    le32toh(resp->consec_missed_beacons),
5438			    le32toh(resp->num_expected_beacons),
5439			    le32toh(resp->num_recvd_beacons));
5440
5441			/* Be paranoid */
5442			if (vap == NULL)
5443				break;
5444
5445			/* XXX no net80211 locking? */
5446			if (vap->iv_state == IEEE80211_S_RUN &&
5447			    (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5448				if (missed > vap->iv_bmissthreshold) {
5449					/* XXX bad locking; turn into task */
5450					IWM_UNLOCK(sc);
5451					ieee80211_beacon_miss(ic);
5452					IWM_LOCK(sc);
5453				}
5454			}
5455
5456			break;
5457		}
5458
5459		case IWM_MFUART_LOAD_NOTIFICATION:
5460			break;
5461
5462		case IWM_MVM_ALIVE:
5463			break;
5464
5465		case IWM_CALIB_RES_NOTIF_PHY_DB:
5466			break;
5467
5468		case IWM_STATISTICS_NOTIFICATION: {
5469			struct iwm_notif_statistics *stats;
5470			stats = (void *)pkt->data;
5471			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
5472			sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
5473			break;
5474		}
5475
5476		case IWM_NVM_ACCESS_CMD:
5477		case IWM_MCC_UPDATE_CMD:
5478			if (sc->sc_wantresp == ((qid << 16) | idx)) {
5479				memcpy(sc->sc_cmd_resp,
5480				    pkt, sizeof(sc->sc_cmd_resp));
5481			}
5482			break;
5483
5484		case IWM_MCC_CHUB_UPDATE_CMD: {
5485			struct iwm_mcc_chub_notif *notif;
5486			notif = (void *)pkt->data;
5487
5488			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5489			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5490			sc->sc_fw_mcc[2] = '\0';
5491			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
5492			    "fw source %d sent CC '%s'\n",
5493			    notif->source_id, sc->sc_fw_mcc);
5494			break;
5495		}
5496
5497		case IWM_DTS_MEASUREMENT_NOTIFICATION:
5498		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5499				 IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5500			struct iwm_dts_measurement_notif_v1 *notif;
5501
5502			if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5503				device_printf(sc->sc_dev,
5504				    "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5505				break;
5506			}
5507			notif = (void *)pkt->data;
5508			IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5509			    "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5510			    notif->temp);
5511			break;
5512		}
5513
5514		case IWM_PHY_CONFIGURATION_CMD:
5515		case IWM_TX_ANT_CONFIGURATION_CMD:
5516		case IWM_ADD_STA:
5517		case IWM_MAC_CONTEXT_CMD:
5518		case IWM_REPLY_SF_CFG_CMD:
5519		case IWM_POWER_TABLE_CMD:
5520		case IWM_PHY_CONTEXT_CMD:
5521		case IWM_BINDING_CONTEXT_CMD:
5522		case IWM_TIME_EVENT_CMD:
5523		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5524		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5525		case IWM_SCAN_ABORT_UMAC:
5526		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5527		case IWM_SCAN_OFFLOAD_ABORT_CMD:
5528		case IWM_REPLY_BEACON_FILTERING_CMD:
5529		case IWM_MAC_PM_POWER_TABLE:
5530		case IWM_TIME_QUOTA_CMD:
5531		case IWM_REMOVE_STA:
5532		case IWM_TXPATH_FLUSH:
5533		case IWM_LQ_CMD:
5534		case IWM_FW_PAGING_BLOCK_CMD:
5535		case IWM_BT_CONFIG:
5536		case IWM_REPLY_THERMAL_MNG_BACKOFF:
5537			cresp = (void *)pkt->data;
5538			if (sc->sc_wantresp == ((qid << 16) | idx)) {
5539				memcpy(sc->sc_cmd_resp,
5540				    pkt, sizeof(*pkt)+sizeof(*cresp));
5541			}
5542			break;
5543
5544		/* ignore */
5545		case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
5546			break;
5547
5548		case IWM_INIT_COMPLETE_NOTIF:
5549			break;
5550
5551		case IWM_SCAN_OFFLOAD_COMPLETE: {
5552			struct iwm_periodic_scan_complete *notif;
5553			notif = (void *)pkt->data;
5554			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5555				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5556				ieee80211_runtask(ic, &sc->sc_es_task);
5557			}
5558			break;
5559		}
5560
5561		case IWM_SCAN_ITERATION_COMPLETE: {
5562			struct iwm_lmac_scan_complete_notif *notif;
5563			notif = (void *)pkt->data;
5564			ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5565 			break;
5566		}
5567
5568		case IWM_SCAN_COMPLETE_UMAC: {
5569			struct iwm_umac_scan_complete *notif;
5570			notif = (void *)pkt->data;
5571
5572			IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
5573			    "UMAC scan complete, status=0x%x\n",
5574			    notif->status);
5575			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5576				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5577				ieee80211_runtask(ic, &sc->sc_es_task);
5578			}
5579			break;
5580		}
5581
5582		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5583			struct iwm_umac_scan_iter_complete_notif *notif;
5584			notif = (void *)pkt->data;
5585
5586			IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5587			    "complete, status=0x%x, %d channels scanned\n",
5588			    notif->status, notif->scanned_channels);
5589			ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5590			break;
5591		}
5592
5593		case IWM_REPLY_ERROR: {
5594			struct iwm_error_resp *resp;
5595			resp = (void *)pkt->data;
5596
5597			device_printf(sc->sc_dev,
5598			    "firmware error 0x%x, cmd 0x%x\n",
5599			    le32toh(resp->error_type),
5600			    resp->cmd_id);
5601			break;
5602		}
5603
5604		case IWM_TIME_EVENT_NOTIFICATION: {
5605			struct iwm_time_event_notif *notif;
5606			notif = (void *)pkt->data;
5607
5608			IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5609			    "TE notif status = 0x%x action = 0x%x\n",
5610			    notif->status, notif->action);
5611			break;
5612		}
5613
5614		case IWM_MCAST_FILTER_CMD:
5615			break;
5616
5617		case IWM_SCD_QUEUE_CFG: {
5618			struct iwm_scd_txq_cfg_rsp *rsp;
5619			rsp = (void *)pkt->data;
5620
5621			IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5622			    "queue cfg token=0x%x sta_id=%d "
5623			    "tid=%d scd_queue=%d\n",
5624			    rsp->token, rsp->sta_id, rsp->tid,
5625			    rsp->scd_queue);
5626			break;
5627		}
5628
5629		default:
5630			device_printf(sc->sc_dev,
5631			    "frame %d/%d %x UNHANDLED (this should "
5632			    "not happen)\n", qid, idx,
5633			    pkt->len_n_flags);
5634			break;
5635		}
5636
5637		/*
5638		 * Why test bit 0x80?  The Linux driver:
5639		 *
5640		 * There is one exception:  uCode sets bit 15 when it
5641		 * originates the response/notification, i.e. when the
5642		 * response/notification is not a direct response to a
5643		 * command sent by the driver.  For example, uCode issues
5644		 * IWM_REPLY_RX when it sends a received frame to the driver;
5645		 * it is not a direct response to any driver command.
5646		 *
5647		 * Ok, so since when is 7 == 15?  Well, the Linux driver
5648		 * uses a slightly different format for pkt->hdr, and "qid"
5649		 * is actually the upper byte of a two-byte field.
5650		 */
5651		if (!(pkt->hdr.qid & (1 << 7))) {
5652			iwm_cmd_done(sc, pkt);
5653		}
5654
5655		ADVANCE_RXQ(sc);
5656	}
5657
5658	/*
5659	 * Tell the firmware what we have processed.
5660	 * Seems like the hardware gets upset unless we align
5661	 * the write by 8??
5662	 */
5663	hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5664	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
5665}
5666
5667static void
5668iwm_intr(void *arg)
5669{
5670	struct iwm_softc *sc = arg;
5671	int handled = 0;
5672	int r1, r2, rv = 0;
5673	int isperiodic = 0;
5674
5675	IWM_LOCK(sc);
5676	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5677
5678	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5679		uint32_t *ict = sc->ict_dma.vaddr;
5680		int tmp;
5681
5682		tmp = htole32(ict[sc->ict_cur]);
5683		if (!tmp)
5684			goto out_ena;
5685
5686		/*
5687		 * ok, there was something.  keep plowing until we have all.
5688		 */
5689		r1 = r2 = 0;
5690		while (tmp) {
5691			r1 |= tmp;
5692			ict[sc->ict_cur] = 0;
5693			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5694			tmp = htole32(ict[sc->ict_cur]);
5695		}
5696
5697		/* this is where the fun begins.  don't ask */
5698		if (r1 == 0xffffffff)
5699			r1 = 0;
5700
5701		/* i am not expected to understand this */
5702		if (r1 & 0xc0000)
5703			r1 |= 0x8000;
5704		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5705	} else {
5706		r1 = IWM_READ(sc, IWM_CSR_INT);
5707		/* "hardware gone" (where, fishing?) */
5708		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5709			goto out;
5710		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5711	}
5712	if (r1 == 0 && r2 == 0) {
5713		goto out_ena;
5714	}
5715
5716	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5717
5718	/* Safely ignore these bits for debug checks below */
5719	r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5720
5721	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5722		int i;
5723		struct ieee80211com *ic = &sc->sc_ic;
5724		struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5725
5726#ifdef IWM_DEBUG
5727		iwm_nic_error(sc);
5728#endif
5729		/* Dump driver status (TX and RX rings) while we're here. */
5730		device_printf(sc->sc_dev, "driver status:\n");
5731		for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5732			struct iwm_tx_ring *ring = &sc->txq[i];
5733			device_printf(sc->sc_dev,
5734			    "  tx ring %2d: qid=%-2d cur=%-3d "
5735			    "queued=%-3d\n",
5736			    i, ring->qid, ring->cur, ring->queued);
5737		}
5738		device_printf(sc->sc_dev,
5739		    "  rx ring: cur=%d\n", sc->rxq.cur);
5740		device_printf(sc->sc_dev,
5741		    "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5742
5743		/* Don't stop the device; just do a VAP restart */
5744		IWM_UNLOCK(sc);
5745
5746		if (vap == NULL) {
5747			printf("%s: null vap\n", __func__);
5748			return;
5749		}
5750
5751		device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5752		    "restarting\n", __func__, vap->iv_state);
5753
5754		ieee80211_restart_all(ic);
5755		return;
5756	}
5757
5758	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5759		handled |= IWM_CSR_INT_BIT_HW_ERR;
5760		device_printf(sc->sc_dev, "hardware error, stopping device\n");
5761		iwm_stop(sc);
5762		rv = 1;
5763		goto out;
5764	}
5765
5766	/* firmware chunk loaded */
5767	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5768		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5769		handled |= IWM_CSR_INT_BIT_FH_TX;
5770		sc->sc_fw_chunk_done = 1;
5771		wakeup(&sc->sc_fw);
5772	}
5773
5774	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5775		handled |= IWM_CSR_INT_BIT_RF_KILL;
5776		if (iwm_check_rfkill(sc)) {
5777			device_printf(sc->sc_dev,
5778			    "%s: rfkill switch, disabling interface\n",
5779			    __func__);
5780			iwm_stop(sc);
5781		}
5782	}
5783
5784	/*
5785	 * The Linux driver uses periodic interrupts to avoid races.
5786	 * We cargo-cult like it's going out of fashion.
5787	 */
5788	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5789		handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5790		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5791		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5792			IWM_WRITE_1(sc,
5793			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5794		isperiodic = 1;
5795	}
5796
5797	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5798		handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5799		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5800
5801		iwm_notif_intr(sc);
5802
5803		/* enable periodic interrupt, see above */
5804		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5805			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5806			    IWM_CSR_INT_PERIODIC_ENA);
5807	}
5808
5809	if (__predict_false(r1 & ~handled))
5810		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5811		    "%s: unhandled interrupts: %x\n", __func__, r1);
5812	rv = 1;
5813
5814 out_ena:
5815	iwm_restore_interrupts(sc);
5816 out:
5817	IWM_UNLOCK(sc);
5818	return;
5819}
5820
5821/*
5822 * Autoconf glue-sniffing
5823 */
5824#define	PCI_VENDOR_INTEL		0x8086
5825#define	PCI_PRODUCT_INTEL_WL_3160_1	0x08b3
5826#define	PCI_PRODUCT_INTEL_WL_3160_2	0x08b4
5827#define	PCI_PRODUCT_INTEL_WL_3165_1	0x3165
5828#define	PCI_PRODUCT_INTEL_WL_3165_2	0x3166
5829#define	PCI_PRODUCT_INTEL_WL_7260_1	0x08b1
5830#define	PCI_PRODUCT_INTEL_WL_7260_2	0x08b2
5831#define	PCI_PRODUCT_INTEL_WL_7265_1	0x095a
5832#define	PCI_PRODUCT_INTEL_WL_7265_2	0x095b
5833#define	PCI_PRODUCT_INTEL_WL_8260_1	0x24f3
5834#define	PCI_PRODUCT_INTEL_WL_8260_2	0x24f4
5835
5836static const struct iwm_devices {
5837	uint16_t		device;
5838	const struct iwm_cfg	*cfg;
5839} iwm_devices[] = {
5840	{ PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5841	{ PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5842	{ PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5843	{ PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5844	{ PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5845	{ PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5846	{ PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5847	{ PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5848	{ PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5849	{ PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5850};
5851
5852static int
5853iwm_probe(device_t dev)
5854{
5855	int i;
5856
5857	for (i = 0; i < nitems(iwm_devices); i++) {
5858		if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5859		    pci_get_device(dev) == iwm_devices[i].device) {
5860			device_set_desc(dev, iwm_devices[i].cfg->name);
5861			return (BUS_PROBE_DEFAULT);
5862		}
5863	}
5864
5865	return (ENXIO);
5866}
5867
5868static int
5869iwm_dev_check(device_t dev)
5870{
5871	struct iwm_softc *sc;
5872	uint16_t devid;
5873	int i;
5874
5875	sc = device_get_softc(dev);
5876
5877	devid = pci_get_device(dev);
5878	for (i = 0; i < nitems(iwm_devices); i++) {
5879		if (iwm_devices[i].device == devid) {
5880			sc->cfg = iwm_devices[i].cfg;
5881			return (0);
5882		}
5883	}
5884	device_printf(dev, "unknown adapter type\n");
5885	return ENXIO;
5886}
5887
5888/* PCI registers */
5889#define PCI_CFG_RETRY_TIMEOUT	0x041
5890
5891static int
5892iwm_pci_attach(device_t dev)
5893{
5894	struct iwm_softc *sc;
5895	int count, error, rid;
5896	uint16_t reg;
5897
5898	sc = device_get_softc(dev);
5899
5900	/* We disable the RETRY_TIMEOUT register (0x41) to keep
5901	 * PCI Tx retries from interfering with C3 CPU state */
5902	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5903
5904	/* Enable bus-mastering and hardware bug workaround. */
5905	pci_enable_busmaster(dev);
5906	reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5907	/* if !MSI */
5908	if (reg & PCIM_STATUS_INTxSTATE) {
5909		reg &= ~PCIM_STATUS_INTxSTATE;
5910	}
5911	pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5912
5913	rid = PCIR_BAR(0);
5914	sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5915	    RF_ACTIVE);
5916	if (sc->sc_mem == NULL) {
5917		device_printf(sc->sc_dev, "can't map mem space\n");
5918		return (ENXIO);
5919	}
5920	sc->sc_st = rman_get_bustag(sc->sc_mem);
5921	sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5922
5923	/* Install interrupt handler. */
5924	count = 1;
5925	rid = 0;
5926	if (pci_alloc_msi(dev, &count) == 0)
5927		rid = 1;
5928	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5929	    (rid != 0 ? 0 : RF_SHAREABLE));
5930	if (sc->sc_irq == NULL) {
5931		device_printf(dev, "can't map interrupt\n");
5932			return (ENXIO);
5933	}
5934	error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5935	    NULL, iwm_intr, sc, &sc->sc_ih);
5936	if (sc->sc_ih == NULL) {
5937		device_printf(dev, "can't establish interrupt");
5938			return (ENXIO);
5939	}
5940	sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5941
5942	return (0);
5943}
5944
5945static void
5946iwm_pci_detach(device_t dev)
5947{
5948	struct iwm_softc *sc = device_get_softc(dev);
5949
5950	if (sc->sc_irq != NULL) {
5951		bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5952		bus_release_resource(dev, SYS_RES_IRQ,
5953		    rman_get_rid(sc->sc_irq), sc->sc_irq);
5954		pci_release_msi(dev);
5955        }
5956	if (sc->sc_mem != NULL)
5957		bus_release_resource(dev, SYS_RES_MEMORY,
5958		    rman_get_rid(sc->sc_mem), sc->sc_mem);
5959}
5960
5961
5962
5963static int
5964iwm_attach(device_t dev)
5965{
5966	struct iwm_softc *sc = device_get_softc(dev);
5967	struct ieee80211com *ic = &sc->sc_ic;
5968	int error;
5969	int txq_i, i;
5970
5971	sc->sc_dev = dev;
5972	sc->sc_attached = 1;
5973	IWM_LOCK_INIT(sc);
5974	mbufq_init(&sc->sc_snd, ifqmaxlen);
5975	callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5976	callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
5977	TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5978
5979	sc->sc_notif_wait = iwm_notification_wait_init(sc);
5980	if (sc->sc_notif_wait == NULL) {
5981		device_printf(dev, "failed to init notification wait struct\n");
5982		goto fail;
5983	}
5984
5985	/* Init phy db */
5986	sc->sc_phy_db = iwm_phy_db_init(sc);
5987	if (!sc->sc_phy_db) {
5988		device_printf(dev, "Cannot init phy_db\n");
5989		goto fail;
5990	}
5991
5992	/* PCI attach */
5993	error = iwm_pci_attach(dev);
5994	if (error != 0)
5995		goto fail;
5996
5997	sc->sc_wantresp = -1;
5998
5999	/* Check device type */
6000	error = iwm_dev_check(dev);
6001	if (error != 0)
6002		goto fail;
6003
6004	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
6005	/*
6006	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
6007	 * changed, and now the revision step also includes bit 0-1 (no more
6008	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
6009	 * in the old format.
6010	 */
6011	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
6012		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
6013				(IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
6014
6015	if (iwm_prepare_card_hw(sc) != 0) {
6016		device_printf(dev, "could not initialize hardware\n");
6017		goto fail;
6018	}
6019
6020	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
6021		int ret;
6022		uint32_t hw_step;
6023
6024		/*
6025		 * In order to recognize C step the driver should read the
6026		 * chip version id located at the AUX bus MISC address.
6027		 */
6028		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
6029			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
6030		DELAY(2);
6031
6032		ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
6033				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6034				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6035				   25000);
6036		if (!ret) {
6037			device_printf(sc->sc_dev,
6038			    "Failed to wake up the nic\n");
6039			goto fail;
6040		}
6041
6042		if (iwm_nic_lock(sc)) {
6043			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
6044			hw_step |= IWM_ENABLE_WFPM;
6045			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
6046			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
6047			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
6048			if (hw_step == 0x3)
6049				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
6050						(IWM_SILICON_C_STEP << 2);
6051			iwm_nic_unlock(sc);
6052		} else {
6053			device_printf(sc->sc_dev, "Failed to lock the nic\n");
6054			goto fail;
6055		}
6056	}
6057
6058	/* special-case 7265D, it has the same PCI IDs. */
6059	if (sc->cfg == &iwm7265_cfg &&
6060	    (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
6061		sc->cfg = &iwm7265d_cfg;
6062	}
6063
6064	/* Allocate DMA memory for firmware transfers. */
6065	if ((error = iwm_alloc_fwmem(sc)) != 0) {
6066		device_printf(dev, "could not allocate memory for firmware\n");
6067		goto fail;
6068	}
6069
6070	/* Allocate "Keep Warm" page. */
6071	if ((error = iwm_alloc_kw(sc)) != 0) {
6072		device_printf(dev, "could not allocate keep warm page\n");
6073		goto fail;
6074	}
6075
6076	/* We use ICT interrupts */
6077	if ((error = iwm_alloc_ict(sc)) != 0) {
6078		device_printf(dev, "could not allocate ICT table\n");
6079		goto fail;
6080	}
6081
6082	/* Allocate TX scheduler "rings". */
6083	if ((error = iwm_alloc_sched(sc)) != 0) {
6084		device_printf(dev, "could not allocate TX scheduler rings\n");
6085		goto fail;
6086	}
6087
6088	/* Allocate TX rings */
6089	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
6090		if ((error = iwm_alloc_tx_ring(sc,
6091		    &sc->txq[txq_i], txq_i)) != 0) {
6092			device_printf(dev,
6093			    "could not allocate TX ring %d\n",
6094			    txq_i);
6095			goto fail;
6096		}
6097	}
6098
6099	/* Allocate RX ring. */
6100	if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6101		device_printf(dev, "could not allocate RX ring\n");
6102		goto fail;
6103	}
6104
6105	/* Clear pending interrupts. */
6106	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6107
6108	ic->ic_softc = sc;
6109	ic->ic_name = device_get_nameunit(sc->sc_dev);
6110	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
6111	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
6112
6113	/* Set device capabilities. */
6114	ic->ic_caps =
6115	    IEEE80211_C_STA |
6116	    IEEE80211_C_WPA |		/* WPA/RSN */
6117	    IEEE80211_C_WME |
6118	    IEEE80211_C_PMGT |
6119	    IEEE80211_C_SHSLOT |	/* short slot time supported */
6120	    IEEE80211_C_SHPREAMBLE	/* short preamble supported */
6121//	    IEEE80211_C_BGSCAN		/* capable of bg scanning */
6122	    ;
6123	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6124		sc->sc_phyctxt[i].id = i;
6125		sc->sc_phyctxt[i].color = 0;
6126		sc->sc_phyctxt[i].ref = 0;
6127		sc->sc_phyctxt[i].channel = NULL;
6128	}
6129
6130	/* Default noise floor */
6131	sc->sc_noise = -96;
6132
6133	/* Max RSSI */
6134	sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6135
6136	sc->sc_preinit_hook.ich_func = iwm_preinit;
6137	sc->sc_preinit_hook.ich_arg = sc;
6138	if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6139		device_printf(dev, "config_intrhook_establish failed\n");
6140		goto fail;
6141	}
6142
6143#ifdef IWM_DEBUG
6144	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6145	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6146	    CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6147#endif
6148
6149	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6150	    "<-%s\n", __func__);
6151
6152	return 0;
6153
6154	/* Free allocated memory if something failed during attachment. */
6155fail:
6156	iwm_detach_local(sc, 0);
6157
6158	return ENXIO;
6159}
6160
6161static int
6162iwm_is_valid_ether_addr(uint8_t *addr)
6163{
6164	char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6165
6166	if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6167		return (FALSE);
6168
6169	return (TRUE);
6170}
6171
6172static int
6173iwm_update_edca(struct ieee80211com *ic)
6174{
6175	struct iwm_softc *sc = ic->ic_softc;
6176
6177	device_printf(sc->sc_dev, "%s: called\n", __func__);
6178	return (0);
6179}
6180
6181static void
6182iwm_preinit(void *arg)
6183{
6184	struct iwm_softc *sc = arg;
6185	device_t dev = sc->sc_dev;
6186	struct ieee80211com *ic = &sc->sc_ic;
6187	int error;
6188
6189	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6190	    "->%s\n", __func__);
6191
6192	IWM_LOCK(sc);
6193	if ((error = iwm_start_hw(sc)) != 0) {
6194		device_printf(dev, "could not initialize hardware\n");
6195		IWM_UNLOCK(sc);
6196		goto fail;
6197	}
6198
6199	error = iwm_run_init_mvm_ucode(sc, 1);
6200	iwm_stop_device(sc);
6201	if (error) {
6202		IWM_UNLOCK(sc);
6203		goto fail;
6204	}
6205	device_printf(dev,
6206	    "hw rev 0x%x, fw ver %s, address %s\n",
6207	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6208	    sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6209
6210	/* not all hardware can do 5GHz band */
6211	if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6212		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6213		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6214	IWM_UNLOCK(sc);
6215
6216	iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6217	    ic->ic_channels);
6218
6219	/*
6220	 * At this point we've committed - if we fail to do setup,
6221	 * we now also have to tear down the net80211 state.
6222	 */
6223	ieee80211_ifattach(ic);
6224	ic->ic_vap_create = iwm_vap_create;
6225	ic->ic_vap_delete = iwm_vap_delete;
6226	ic->ic_raw_xmit = iwm_raw_xmit;
6227	ic->ic_node_alloc = iwm_node_alloc;
6228	ic->ic_scan_start = iwm_scan_start;
6229	ic->ic_scan_end = iwm_scan_end;
6230	ic->ic_update_mcast = iwm_update_mcast;
6231	ic->ic_getradiocaps = iwm_init_channel_map;
6232	ic->ic_set_channel = iwm_set_channel;
6233	ic->ic_scan_curchan = iwm_scan_curchan;
6234	ic->ic_scan_mindwell = iwm_scan_mindwell;
6235	ic->ic_wme.wme_update = iwm_update_edca;
6236	ic->ic_parent = iwm_parent;
6237	ic->ic_transmit = iwm_transmit;
6238	iwm_radiotap_attach(sc);
6239	if (bootverbose)
6240		ieee80211_announce(ic);
6241
6242	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6243	    "<-%s\n", __func__);
6244	config_intrhook_disestablish(&sc->sc_preinit_hook);
6245
6246	return;
6247fail:
6248	config_intrhook_disestablish(&sc->sc_preinit_hook);
6249	iwm_detach_local(sc, 0);
6250}
6251
6252/*
6253 * Attach the interface to 802.11 radiotap.
6254 */
6255static void
6256iwm_radiotap_attach(struct iwm_softc *sc)
6257{
6258        struct ieee80211com *ic = &sc->sc_ic;
6259
6260	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6261	    "->%s begin\n", __func__);
6262        ieee80211_radiotap_attach(ic,
6263            &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6264                IWM_TX_RADIOTAP_PRESENT,
6265            &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6266                IWM_RX_RADIOTAP_PRESENT);
6267	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6268	    "->%s end\n", __func__);
6269}
6270
6271static struct ieee80211vap *
6272iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6273    enum ieee80211_opmode opmode, int flags,
6274    const uint8_t bssid[IEEE80211_ADDR_LEN],
6275    const uint8_t mac[IEEE80211_ADDR_LEN])
6276{
6277	struct iwm_vap *ivp;
6278	struct ieee80211vap *vap;
6279
6280	if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6281		return NULL;
6282	ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6283	vap = &ivp->iv_vap;
6284	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6285	vap->iv_bmissthreshold = 10;            /* override default */
6286	/* Override with driver methods. */
6287	ivp->iv_newstate = vap->iv_newstate;
6288	vap->iv_newstate = iwm_newstate;
6289
6290	ivp->id = IWM_DEFAULT_MACID;
6291	ivp->color = IWM_DEFAULT_COLOR;
6292
6293	ieee80211_ratectl_init(vap);
6294	/* Complete setup. */
6295	ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6296	    mac);
6297	ic->ic_opmode = opmode;
6298
6299	return vap;
6300}
6301
6302static void
6303iwm_vap_delete(struct ieee80211vap *vap)
6304{
6305	struct iwm_vap *ivp = IWM_VAP(vap);
6306
6307	ieee80211_ratectl_deinit(vap);
6308	ieee80211_vap_detach(vap);
6309	free(ivp, M_80211_VAP);
6310}
6311
6312static void
6313iwm_scan_start(struct ieee80211com *ic)
6314{
6315	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6316	struct iwm_softc *sc = ic->ic_softc;
6317	int error;
6318
6319	IWM_LOCK(sc);
6320	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6321		/* This should not be possible */
6322		device_printf(sc->sc_dev,
6323		    "%s: Previous scan not completed yet\n", __func__);
6324	}
6325	if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6326		error = iwm_mvm_umac_scan(sc);
6327	else
6328		error = iwm_mvm_lmac_scan(sc);
6329	if (error != 0) {
6330		device_printf(sc->sc_dev, "could not initiate scan\n");
6331		IWM_UNLOCK(sc);
6332		ieee80211_cancel_scan(vap);
6333	} else {
6334		sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6335		iwm_led_blink_start(sc);
6336		IWM_UNLOCK(sc);
6337	}
6338}
6339
6340static void
6341iwm_scan_end(struct ieee80211com *ic)
6342{
6343	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6344	struct iwm_softc *sc = ic->ic_softc;
6345
6346	IWM_LOCK(sc);
6347	iwm_led_blink_stop(sc);
6348	if (vap->iv_state == IEEE80211_S_RUN)
6349		iwm_mvm_led_enable(sc);
6350	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6351		/*
6352		 * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6353		 * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6354		 * taskqueue.
6355		 */
6356		sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6357		iwm_mvm_scan_stop_wait(sc);
6358	}
6359	IWM_UNLOCK(sc);
6360
6361	/*
6362	 * Make sure we don't race, if sc_es_task is still enqueued here.
6363	 * This is to make sure that it won't call ieee80211_scan_done
6364	 * when we have already started the next scan.
6365	 */
6366	taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6367}
6368
6369static void
6370iwm_update_mcast(struct ieee80211com *ic)
6371{
6372}
6373
6374static void
6375iwm_set_channel(struct ieee80211com *ic)
6376{
6377}
6378
6379static void
6380iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6381{
6382}
6383
6384static void
6385iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6386{
6387	return;
6388}
6389
6390void
6391iwm_init_task(void *arg1)
6392{
6393	struct iwm_softc *sc = arg1;
6394
6395	IWM_LOCK(sc);
6396	while (sc->sc_flags & IWM_FLAG_BUSY)
6397		msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6398	sc->sc_flags |= IWM_FLAG_BUSY;
6399	iwm_stop(sc);
6400	if (sc->sc_ic.ic_nrunning > 0)
6401		iwm_init(sc);
6402	sc->sc_flags &= ~IWM_FLAG_BUSY;
6403	wakeup(&sc->sc_flags);
6404	IWM_UNLOCK(sc);
6405}
6406
6407static int
6408iwm_resume(device_t dev)
6409{
6410	struct iwm_softc *sc = device_get_softc(dev);
6411	int do_reinit = 0;
6412
6413	/*
6414	 * We disable the RETRY_TIMEOUT register (0x41) to keep
6415	 * PCI Tx retries from interfering with C3 CPU state.
6416	 */
6417	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6418	iwm_init_task(device_get_softc(dev));
6419
6420	IWM_LOCK(sc);
6421	if (sc->sc_flags & IWM_FLAG_SCANNING) {
6422		sc->sc_flags &= ~IWM_FLAG_SCANNING;
6423		do_reinit = 1;
6424	}
6425	IWM_UNLOCK(sc);
6426
6427	if (do_reinit)
6428		ieee80211_resume_all(&sc->sc_ic);
6429
6430	return 0;
6431}
6432
6433static int
6434iwm_suspend(device_t dev)
6435{
6436	int do_stop = 0;
6437	struct iwm_softc *sc = device_get_softc(dev);
6438
6439	do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6440
6441	ieee80211_suspend_all(&sc->sc_ic);
6442
6443	if (do_stop) {
6444		IWM_LOCK(sc);
6445		iwm_stop(sc);
6446		sc->sc_flags |= IWM_FLAG_SCANNING;
6447		IWM_UNLOCK(sc);
6448	}
6449
6450	return (0);
6451}
6452
6453static int
6454iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6455{
6456	struct iwm_fw_info *fw = &sc->sc_fw;
6457	device_t dev = sc->sc_dev;
6458	int i;
6459
6460	if (!sc->sc_attached)
6461		return 0;
6462	sc->sc_attached = 0;
6463
6464	if (do_net80211)
6465		ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6466
6467	callout_drain(&sc->sc_led_blink_to);
6468	callout_drain(&sc->sc_watchdog_to);
6469	iwm_stop_device(sc);
6470	if (do_net80211) {
6471		ieee80211_ifdetach(&sc->sc_ic);
6472	}
6473
6474	iwm_phy_db_free(sc->sc_phy_db);
6475	sc->sc_phy_db = NULL;
6476
6477	iwm_free_nvm_data(sc->nvm_data);
6478
6479	/* Free descriptor rings */
6480	iwm_free_rx_ring(sc, &sc->rxq);
6481	for (i = 0; i < nitems(sc->txq); i++)
6482		iwm_free_tx_ring(sc, &sc->txq[i]);
6483
6484	/* Free firmware */
6485	if (fw->fw_fp != NULL)
6486		iwm_fw_info_free(fw);
6487
6488	/* Free scheduler */
6489	iwm_dma_contig_free(&sc->sched_dma);
6490	iwm_dma_contig_free(&sc->ict_dma);
6491	iwm_dma_contig_free(&sc->kw_dma);
6492	iwm_dma_contig_free(&sc->fw_dma);
6493
6494	iwm_free_fw_paging(sc);
6495
6496	/* Finished with the hardware - detach things */
6497	iwm_pci_detach(dev);
6498
6499	if (sc->sc_notif_wait != NULL) {
6500		iwm_notification_wait_free(sc->sc_notif_wait);
6501		sc->sc_notif_wait = NULL;
6502	}
6503
6504	mbufq_drain(&sc->sc_snd);
6505	IWM_LOCK_DESTROY(sc);
6506
6507	return (0);
6508}
6509
6510static int
6511iwm_detach(device_t dev)
6512{
6513	struct iwm_softc *sc = device_get_softc(dev);
6514
6515	return (iwm_detach_local(sc, 1));
6516}
6517
6518static device_method_t iwm_pci_methods[] = {
6519        /* Device interface */
6520        DEVMETHOD(device_probe,         iwm_probe),
6521        DEVMETHOD(device_attach,        iwm_attach),
6522        DEVMETHOD(device_detach,        iwm_detach),
6523        DEVMETHOD(device_suspend,       iwm_suspend),
6524        DEVMETHOD(device_resume,        iwm_resume),
6525
6526        DEVMETHOD_END
6527};
6528
6529static driver_t iwm_pci_driver = {
6530        "iwm",
6531        iwm_pci_methods,
6532        sizeof (struct iwm_softc)
6533};
6534
6535static devclass_t iwm_devclass;
6536
6537DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6538MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6539MODULE_DEPEND(iwm, pci, 1, 1, 1);
6540MODULE_DEPEND(iwm, wlan, 1, 1, 1);
6541