if_iwm.c revision 330194
1/*	$OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $	*/
2
3/*
4 * Copyright (c) 2014 genua mbh <info@genua.de>
5 * Copyright (c) 2014 Fixup Software Ltd.
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20/*-
21 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22 * which were used as the reference documentation for this implementation.
23 *
24 * Driver version we are currently based off of is
25 * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26 *
27 ***********************************************************************
28 *
29 * This file is provided under a dual BSD/GPLv2 license.  When using or
30 * redistributing this file, you may do so under either license.
31 *
32 * GPL LICENSE SUMMARY
33 *
34 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35 *
36 * This program is free software; you can redistribute it and/or modify
37 * it under the terms of version 2 of the GNU General Public License as
38 * published by the Free Software Foundation.
39 *
40 * This program is distributed in the hope that it will be useful, but
41 * WITHOUT ANY WARRANTY; without even the implied warranty of
42 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43 * General Public License for more details.
44 *
45 * You should have received a copy of the GNU General Public License
46 * along with this program; if not, write to the Free Software
47 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48 * USA
49 *
50 * The full GNU General Public License is included in this distribution
51 * in the file called COPYING.
52 *
53 * Contact Information:
54 *  Intel Linux Wireless <ilw@linux.intel.com>
55 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56 *
57 *
58 * BSD LICENSE
59 *
60 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61 * All rights reserved.
62 *
63 * Redistribution and use in source and binary forms, with or without
64 * modification, are permitted provided that the following conditions
65 * are met:
66 *
67 *  * Redistributions of source code must retain the above copyright
68 *    notice, this list of conditions and the following disclaimer.
69 *  * Redistributions in binary form must reproduce the above copyright
70 *    notice, this list of conditions and the following disclaimer in
71 *    the documentation and/or other materials provided with the
72 *    distribution.
73 *  * Neither the name Intel Corporation nor the names of its
74 *    contributors may be used to endorse or promote products derived
75 *    from this software without specific prior written permission.
76 *
77 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88 */
89
90/*-
91 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92 *
93 * Permission to use, copy, modify, and distribute this software for any
94 * purpose with or without fee is hereby granted, provided that the above
95 * copyright notice and this permission notice appear in all copies.
96 *
97 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104 */
105#include <sys/cdefs.h>
106__FBSDID("$FreeBSD: stable/11/sys/dev/iwm/if_iwm.c 330194 2018-03-01 06:25:03Z eadler $");
107
108#include "opt_wlan.h"
109
110#include <sys/param.h>
111#include <sys/bus.h>
112#include <sys/conf.h>
113#include <sys/endian.h>
114#include <sys/firmware.h>
115#include <sys/kernel.h>
116#include <sys/malloc.h>
117#include <sys/mbuf.h>
118#include <sys/mutex.h>
119#include <sys/module.h>
120#include <sys/proc.h>
121#include <sys/rman.h>
122#include <sys/socket.h>
123#include <sys/sockio.h>
124#include <sys/sysctl.h>
125#include <sys/linker.h>
126
127#include <machine/bus.h>
128#include <machine/endian.h>
129#include <machine/resource.h>
130
131#include <dev/pci/pcivar.h>
132#include <dev/pci/pcireg.h>
133
134#include <net/bpf.h>
135
136#include <net/if.h>
137#include <net/if_var.h>
138#include <net/if_arp.h>
139#include <net/if_dl.h>
140#include <net/if_media.h>
141#include <net/if_types.h>
142
143#include <netinet/in.h>
144#include <netinet/in_systm.h>
145#include <netinet/if_ether.h>
146#include <netinet/ip.h>
147
148#include <net80211/ieee80211_var.h>
149#include <net80211/ieee80211_regdomain.h>
150#include <net80211/ieee80211_ratectl.h>
151#include <net80211/ieee80211_radiotap.h>
152
153#include <dev/iwm/if_iwmreg.h>
154#include <dev/iwm/if_iwmvar.h>
155#include <dev/iwm/if_iwm_config.h>
156#include <dev/iwm/if_iwm_debug.h>
157#include <dev/iwm/if_iwm_notif_wait.h>
158#include <dev/iwm/if_iwm_util.h>
159#include <dev/iwm/if_iwm_binding.h>
160#include <dev/iwm/if_iwm_phy_db.h>
161#include <dev/iwm/if_iwm_mac_ctxt.h>
162#include <dev/iwm/if_iwm_phy_ctxt.h>
163#include <dev/iwm/if_iwm_time_event.h>
164#include <dev/iwm/if_iwm_power.h>
165#include <dev/iwm/if_iwm_scan.h>
166
167#include <dev/iwm/if_iwm_pcie_trans.h>
168#include <dev/iwm/if_iwm_led.h>
169#include <dev/iwm/if_iwm_fw.h>
170
171const uint8_t iwm_nvm_channels[] = {
172	/* 2.4 GHz */
173	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
174	/* 5 GHz */
175	36, 40, 44, 48, 52, 56, 60, 64,
176	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
177	149, 153, 157, 161, 165
178};
179_Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
180    "IWM_NUM_CHANNELS is too small");
181
182const uint8_t iwm_nvm_channels_8000[] = {
183	/* 2.4 GHz */
184	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
185	/* 5 GHz */
186	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
187	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
188	149, 153, 157, 161, 165, 169, 173, 177, 181
189};
190_Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
191    "IWM_NUM_CHANNELS_8000 is too small");
192
193#define IWM_NUM_2GHZ_CHANNELS	14
194#define IWM_N_HW_ADDR_MASK	0xF
195
196/*
197 * XXX For now, there's simply a fixed set of rate table entries
198 * that are populated.
199 */
200const struct iwm_rate {
201	uint8_t rate;
202	uint8_t plcp;
203} iwm_rates[] = {
204	{   2,	IWM_RATE_1M_PLCP  },
205	{   4,	IWM_RATE_2M_PLCP  },
206	{  11,	IWM_RATE_5M_PLCP  },
207	{  22,	IWM_RATE_11M_PLCP },
208	{  12,	IWM_RATE_6M_PLCP  },
209	{  18,	IWM_RATE_9M_PLCP  },
210	{  24,	IWM_RATE_12M_PLCP },
211	{  36,	IWM_RATE_18M_PLCP },
212	{  48,	IWM_RATE_24M_PLCP },
213	{  72,	IWM_RATE_36M_PLCP },
214	{  96,	IWM_RATE_48M_PLCP },
215	{ 108,	IWM_RATE_54M_PLCP },
216};
217#define IWM_RIDX_CCK	0
218#define IWM_RIDX_OFDM	4
219#define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
220#define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
221#define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
222
223struct iwm_nvm_section {
224	uint16_t length;
225	uint8_t *data;
226};
227
228#define IWM_MVM_UCODE_ALIVE_TIMEOUT	hz
229#define IWM_MVM_UCODE_CALIB_TIMEOUT	(2*hz)
230
231struct iwm_mvm_alive_data {
232	int valid;
233	uint32_t scd_base_addr;
234};
235
236static int	iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
237static int	iwm_firmware_store_section(struct iwm_softc *,
238                                           enum iwm_ucode_type,
239                                           const uint8_t *, size_t);
240static int	iwm_set_default_calib(struct iwm_softc *, const void *);
241static void	iwm_fw_info_free(struct iwm_fw_info *);
242static int	iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
243static int	iwm_alloc_fwmem(struct iwm_softc *);
244static int	iwm_alloc_sched(struct iwm_softc *);
245static int	iwm_alloc_kw(struct iwm_softc *);
246static int	iwm_alloc_ict(struct iwm_softc *);
247static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
248static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
249static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
250static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
251                                  int);
252static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
253static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
254static void	iwm_enable_interrupts(struct iwm_softc *);
255static void	iwm_restore_interrupts(struct iwm_softc *);
256static void	iwm_disable_interrupts(struct iwm_softc *);
257static void	iwm_ict_reset(struct iwm_softc *);
258static int	iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
259static void	iwm_stop_device(struct iwm_softc *);
260static void	iwm_mvm_nic_config(struct iwm_softc *);
261static int	iwm_nic_rx_init(struct iwm_softc *);
262static int	iwm_nic_tx_init(struct iwm_softc *);
263static int	iwm_nic_init(struct iwm_softc *);
264static int	iwm_enable_txq(struct iwm_softc *, int, int, int);
265static int	iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
266static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
267                                   uint16_t, uint8_t *, uint16_t *);
268static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
269				     uint16_t *, uint32_t);
270static uint32_t	iwm_eeprom_channel_flags(uint16_t);
271static void	iwm_add_channel_band(struct iwm_softc *,
272		    struct ieee80211_channel[], int, int *, int, size_t,
273		    const uint8_t[]);
274static void	iwm_init_channel_map(struct ieee80211com *, int, int *,
275		    struct ieee80211_channel[]);
276static struct iwm_nvm_data *
277	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
278			   const uint16_t *, const uint16_t *,
279			   const uint16_t *, const uint16_t *,
280			   const uint16_t *);
281static void	iwm_free_nvm_data(struct iwm_nvm_data *);
282static void	iwm_set_hw_address_family_8000(struct iwm_softc *,
283					       struct iwm_nvm_data *,
284					       const uint16_t *,
285					       const uint16_t *);
286static int	iwm_get_sku(const struct iwm_softc *, const uint16_t *,
287			    const uint16_t *);
288static int	iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
289static int	iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
290				  const uint16_t *);
291static int	iwm_get_n_hw_addrs(const struct iwm_softc *,
292				   const uint16_t *);
293static void	iwm_set_radio_cfg(const struct iwm_softc *,
294				  struct iwm_nvm_data *, uint32_t);
295static struct iwm_nvm_data *
296	iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
297static int	iwm_nvm_init(struct iwm_softc *);
298static int	iwm_pcie_load_section(struct iwm_softc *, uint8_t,
299				      const struct iwm_fw_desc *);
300static int	iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
301					     bus_addr_t, uint32_t);
302static int	iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
303						const struct iwm_fw_sects *,
304						int, int *);
305static int	iwm_pcie_load_cpu_sections(struct iwm_softc *,
306					   const struct iwm_fw_sects *,
307					   int, int *);
308static int	iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
309					       const struct iwm_fw_sects *);
310static int	iwm_pcie_load_given_ucode(struct iwm_softc *,
311					  const struct iwm_fw_sects *);
312static int	iwm_start_fw(struct iwm_softc *, const struct iwm_fw_sects *);
313static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
314static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
315static int	iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
316                                              enum iwm_ucode_type);
317static int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
318static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
319static int	iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
320static int	iwm_mvm_get_signal_strength(struct iwm_softc *,
321					    struct iwm_rx_phy_info *);
322static void	iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
323                                      struct iwm_rx_packet *,
324                                      struct iwm_rx_data *);
325static int	iwm_get_noise(struct iwm_softc *sc,
326		    const struct iwm_mvm_statistics_rx_non_phy *);
327static void	iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct mbuf *);
328static int	iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
329                                         struct iwm_rx_packet *,
330				         struct iwm_node *);
331static void	iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
332                                  struct iwm_rx_data *);
333static void	iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
334#if 0
335static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
336                                 uint16_t);
337#endif
338static const struct iwm_rate *
339	iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
340			struct mbuf *, struct iwm_tx_cmd *);
341static int	iwm_tx(struct iwm_softc *, struct mbuf *,
342                       struct ieee80211_node *, int);
343static int	iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
344			     const struct ieee80211_bpf_params *);
345static int	iwm_mvm_flush_tx_path(struct iwm_softc *sc,
346				      uint32_t tfd_msk, uint32_t flags);
347static int	iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
348					        struct iwm_mvm_add_sta_cmd_v7 *,
349                                                int *);
350static int	iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
351                                       int);
352static int	iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
353static int	iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
354static int	iwm_mvm_add_int_sta_common(struct iwm_softc *,
355                                           struct iwm_int_sta *,
356				           const uint8_t *, uint16_t, uint16_t);
357static int	iwm_mvm_add_aux_sta(struct iwm_softc *);
358static int	iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
359static int	iwm_auth(struct ieee80211vap *, struct iwm_softc *);
360static int	iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
361static int	iwm_release(struct iwm_softc *, struct iwm_node *);
362static struct ieee80211_node *
363		iwm_node_alloc(struct ieee80211vap *,
364		               const uint8_t[IEEE80211_ADDR_LEN]);
365static void	iwm_setrates(struct iwm_softc *, struct iwm_node *);
366static int	iwm_media_change(struct ifnet *);
367static int	iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
368static void	iwm_endscan_cb(void *, int);
369static void	iwm_mvm_fill_sf_command(struct iwm_softc *,
370					struct iwm_sf_cfg_cmd *,
371					struct ieee80211_node *);
372static int	iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state);
373static int	iwm_send_bt_init_conf(struct iwm_softc *);
374static int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
375static void	iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
376static int	iwm_init_hw(struct iwm_softc *);
377static void	iwm_init(struct iwm_softc *);
378static void	iwm_start(struct iwm_softc *);
379static void	iwm_stop(struct iwm_softc *);
380static void	iwm_watchdog(void *);
381static void	iwm_parent(struct ieee80211com *);
382#ifdef IWM_DEBUG
383static const char *
384		iwm_desc_lookup(uint32_t);
385static void	iwm_nic_error(struct iwm_softc *);
386static void	iwm_nic_umac_error(struct iwm_softc *);
387#endif
388static void	iwm_notif_intr(struct iwm_softc *);
389static void	iwm_intr(void *);
390static int	iwm_attach(device_t);
391static int	iwm_is_valid_ether_addr(uint8_t *);
392static void	iwm_preinit(void *);
393static int	iwm_detach_local(struct iwm_softc *sc, int);
394static void	iwm_init_task(void *);
395static void	iwm_radiotap_attach(struct iwm_softc *);
396static struct ieee80211vap *
397		iwm_vap_create(struct ieee80211com *,
398		               const char [IFNAMSIZ], int,
399		               enum ieee80211_opmode, int,
400		               const uint8_t [IEEE80211_ADDR_LEN],
401		               const uint8_t [IEEE80211_ADDR_LEN]);
402static void	iwm_vap_delete(struct ieee80211vap *);
403static void	iwm_scan_start(struct ieee80211com *);
404static void	iwm_scan_end(struct ieee80211com *);
405static void	iwm_update_mcast(struct ieee80211com *);
406static void	iwm_set_channel(struct ieee80211com *);
407static void	iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
408static void	iwm_scan_mindwell(struct ieee80211_scan_state *);
409static int	iwm_detach(device_t);
410
411/*
412 * Firmware parser.
413 */
414
415static int
416iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
417{
418	const struct iwm_fw_cscheme_list *l = (const void *)data;
419
420	if (dlen < sizeof(*l) ||
421	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
422		return EINVAL;
423
424	/* we don't actually store anything for now, always use s/w crypto */
425
426	return 0;
427}
428
429static int
430iwm_firmware_store_section(struct iwm_softc *sc,
431    enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
432{
433	struct iwm_fw_sects *fws;
434	struct iwm_fw_desc *fwone;
435
436	if (type >= IWM_UCODE_TYPE_MAX)
437		return EINVAL;
438	if (dlen < sizeof(uint32_t))
439		return EINVAL;
440
441	fws = &sc->sc_fw.fw_sects[type];
442	if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
443		return EINVAL;
444
445	fwone = &fws->fw_sect[fws->fw_count];
446
447	/* first 32bit are device load offset */
448	memcpy(&fwone->offset, data, sizeof(uint32_t));
449
450	/* rest is data */
451	fwone->data = data + sizeof(uint32_t);
452	fwone->len = dlen - sizeof(uint32_t);
453
454	fws->fw_count++;
455
456	return 0;
457}
458
459#define IWM_DEFAULT_SCAN_CHANNELS 40
460
461/* iwlwifi: iwl-drv.c */
462struct iwm_tlv_calib_data {
463	uint32_t ucode_type;
464	struct iwm_tlv_calib_ctrl calib;
465} __packed;
466
467static int
468iwm_set_default_calib(struct iwm_softc *sc, const void *data)
469{
470	const struct iwm_tlv_calib_data *def_calib = data;
471	uint32_t ucode_type = le32toh(def_calib->ucode_type);
472
473	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
474		device_printf(sc->sc_dev,
475		    "Wrong ucode_type %u for default "
476		    "calibration.\n", ucode_type);
477		return EINVAL;
478	}
479
480	sc->sc_default_calib[ucode_type].flow_trigger =
481	    def_calib->calib.flow_trigger;
482	sc->sc_default_calib[ucode_type].event_trigger =
483	    def_calib->calib.event_trigger;
484
485	return 0;
486}
487
488static void
489iwm_fw_info_free(struct iwm_fw_info *fw)
490{
491	firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
492	fw->fw_fp = NULL;
493	/* don't touch fw->fw_status */
494	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
495}
496
497static int
498iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
499{
500	struct iwm_fw_info *fw = &sc->sc_fw;
501	const struct iwm_tlv_ucode_header *uhdr;
502	struct iwm_ucode_tlv tlv;
503	enum iwm_ucode_tlv_type tlv_type;
504	const struct firmware *fwp;
505	const uint8_t *data;
506	uint32_t usniffer_img;
507	uint32_t paging_mem_size;
508	int num_of_cpus;
509	int error = 0;
510	size_t len;
511
512	if (fw->fw_status == IWM_FW_STATUS_DONE &&
513	    ucode_type != IWM_UCODE_INIT)
514		return 0;
515
516	while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
517		msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
518	fw->fw_status = IWM_FW_STATUS_INPROGRESS;
519
520	if (fw->fw_fp != NULL)
521		iwm_fw_info_free(fw);
522
523	/*
524	 * Load firmware into driver memory.
525	 * fw_fp will be set.
526	 */
527	IWM_UNLOCK(sc);
528	fwp = firmware_get(sc->cfg->fw_name);
529	IWM_LOCK(sc);
530	if (fwp == NULL) {
531		device_printf(sc->sc_dev,
532		    "could not read firmware %s (error %d)\n",
533		    sc->cfg->fw_name, error);
534		goto out;
535	}
536	fw->fw_fp = fwp;
537
538	/* (Re-)Initialize default values. */
539	sc->sc_capaflags = 0;
540	sc->sc_capa_n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
541	memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
542	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
543
544	/*
545	 * Parse firmware contents
546	 */
547
548	uhdr = (const void *)fw->fw_fp->data;
549	if (*(const uint32_t *)fw->fw_fp->data != 0
550	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
551		device_printf(sc->sc_dev, "invalid firmware %s\n",
552		    sc->cfg->fw_name);
553		error = EINVAL;
554		goto out;
555	}
556
557	snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
558	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
559	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
560	    IWM_UCODE_API(le32toh(uhdr->ver)));
561	data = uhdr->data;
562	len = fw->fw_fp->datasize - sizeof(*uhdr);
563
564	while (len >= sizeof(tlv)) {
565		size_t tlv_len;
566		const void *tlv_data;
567
568		memcpy(&tlv, data, sizeof(tlv));
569		tlv_len = le32toh(tlv.length);
570		tlv_type = le32toh(tlv.type);
571
572		len -= sizeof(tlv);
573		data += sizeof(tlv);
574		tlv_data = data;
575
576		if (len < tlv_len) {
577			device_printf(sc->sc_dev,
578			    "firmware too short: %zu bytes\n",
579			    len);
580			error = EINVAL;
581			goto parse_out;
582		}
583
584		switch ((int)tlv_type) {
585		case IWM_UCODE_TLV_PROBE_MAX_LEN:
586			if (tlv_len < sizeof(uint32_t)) {
587				device_printf(sc->sc_dev,
588				    "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
589				    __func__,
590				    (int) tlv_len);
591				error = EINVAL;
592				goto parse_out;
593			}
594			sc->sc_capa_max_probe_len
595			    = le32toh(*(const uint32_t *)tlv_data);
596			/* limit it to something sensible */
597			if (sc->sc_capa_max_probe_len >
598			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
599				IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
600				    "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
601				    "ridiculous\n", __func__);
602				error = EINVAL;
603				goto parse_out;
604			}
605			break;
606		case IWM_UCODE_TLV_PAN:
607			if (tlv_len) {
608				device_printf(sc->sc_dev,
609				    "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
610				    __func__,
611				    (int) tlv_len);
612				error = EINVAL;
613				goto parse_out;
614			}
615			sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
616			break;
617		case IWM_UCODE_TLV_FLAGS:
618			if (tlv_len < sizeof(uint32_t)) {
619				device_printf(sc->sc_dev,
620				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
621				    __func__,
622				    (int) tlv_len);
623				error = EINVAL;
624				goto parse_out;
625			}
626			/*
627			 * Apparently there can be many flags, but Linux driver
628			 * parses only the first one, and so do we.
629			 *
630			 * XXX: why does this override IWM_UCODE_TLV_PAN?
631			 * Intentional or a bug?  Observations from
632			 * current firmware file:
633			 *  1) TLV_PAN is parsed first
634			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
635			 * ==> this resets TLV_PAN to itself... hnnnk
636			 */
637			sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
638			break;
639		case IWM_UCODE_TLV_CSCHEME:
640			if ((error = iwm_store_cscheme(sc,
641			    tlv_data, tlv_len)) != 0) {
642				device_printf(sc->sc_dev,
643				    "%s: iwm_store_cscheme(): returned %d\n",
644				    __func__,
645				    error);
646				goto parse_out;
647			}
648			break;
649		case IWM_UCODE_TLV_NUM_OF_CPU:
650			if (tlv_len != sizeof(uint32_t)) {
651				device_printf(sc->sc_dev,
652				    "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) != sizeof(uint32_t)\n",
653				    __func__,
654				    (int) tlv_len);
655				error = EINVAL;
656				goto parse_out;
657			}
658			num_of_cpus = le32toh(*(const uint32_t *)tlv_data);
659			if (num_of_cpus == 2) {
660				fw->fw_sects[IWM_UCODE_REGULAR].is_dual_cpus =
661					TRUE;
662				fw->fw_sects[IWM_UCODE_INIT].is_dual_cpus =
663					TRUE;
664				fw->fw_sects[IWM_UCODE_WOWLAN].is_dual_cpus =
665					TRUE;
666			} else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
667				device_printf(sc->sc_dev,
668				    "%s: Driver supports only 1 or 2 CPUs\n",
669				    __func__);
670				error = EINVAL;
671				goto parse_out;
672			}
673			break;
674		case IWM_UCODE_TLV_SEC_RT:
675			if ((error = iwm_firmware_store_section(sc,
676			    IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
677				device_printf(sc->sc_dev,
678				    "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
679				    __func__,
680				    error);
681				goto parse_out;
682			}
683			break;
684		case IWM_UCODE_TLV_SEC_INIT:
685			if ((error = iwm_firmware_store_section(sc,
686			    IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
687				device_printf(sc->sc_dev,
688				    "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
689				    __func__,
690				    error);
691				goto parse_out;
692			}
693			break;
694		case IWM_UCODE_TLV_SEC_WOWLAN:
695			if ((error = iwm_firmware_store_section(sc,
696			    IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
697				device_printf(sc->sc_dev,
698				    "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
699				    __func__,
700				    error);
701				goto parse_out;
702			}
703			break;
704		case IWM_UCODE_TLV_DEF_CALIB:
705			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
706				device_printf(sc->sc_dev,
707				    "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
708				    __func__,
709				    (int) tlv_len,
710				    (int) sizeof(struct iwm_tlv_calib_data));
711				error = EINVAL;
712				goto parse_out;
713			}
714			if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
715				device_printf(sc->sc_dev,
716				    "%s: iwm_set_default_calib() failed: %d\n",
717				    __func__,
718				    error);
719				goto parse_out;
720			}
721			break;
722		case IWM_UCODE_TLV_PHY_SKU:
723			if (tlv_len != sizeof(uint32_t)) {
724				error = EINVAL;
725				device_printf(sc->sc_dev,
726				    "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
727				    __func__,
728				    (int) tlv_len);
729				goto parse_out;
730			}
731			sc->sc_fw.phy_config =
732			    le32toh(*(const uint32_t *)tlv_data);
733			sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
734						  IWM_FW_PHY_CFG_TX_CHAIN) >>
735						  IWM_FW_PHY_CFG_TX_CHAIN_POS;
736			sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
737						  IWM_FW_PHY_CFG_RX_CHAIN) >>
738						  IWM_FW_PHY_CFG_RX_CHAIN_POS;
739			break;
740
741		case IWM_UCODE_TLV_API_CHANGES_SET: {
742			const struct iwm_ucode_api *api;
743			if (tlv_len != sizeof(*api)) {
744				error = EINVAL;
745				goto parse_out;
746			}
747			api = (const struct iwm_ucode_api *)tlv_data;
748			/* Flags may exceed 32 bits in future firmware. */
749			if (le32toh(api->api_index) > 0) {
750				device_printf(sc->sc_dev,
751				    "unsupported API index %d\n",
752				    le32toh(api->api_index));
753				goto parse_out;
754			}
755			sc->sc_ucode_api = le32toh(api->api_flags);
756			break;
757		}
758
759		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
760			const struct iwm_ucode_capa *capa;
761			int idx, i;
762			if (tlv_len != sizeof(*capa)) {
763				error = EINVAL;
764				goto parse_out;
765			}
766			capa = (const struct iwm_ucode_capa *)tlv_data;
767			idx = le32toh(capa->api_index);
768			if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
769				device_printf(sc->sc_dev,
770				    "unsupported API index %d\n", idx);
771				goto parse_out;
772			}
773			for (i = 0; i < 32; i++) {
774				if ((le32toh(capa->api_capa) & (1U << i)) == 0)
775					continue;
776				setbit(sc->sc_enabled_capa, i + (32 * idx));
777			}
778			break;
779		}
780
781		case 48: /* undocumented TLV */
782		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
783		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
784			/* ignore, not used by current driver */
785			break;
786
787		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
788			if ((error = iwm_firmware_store_section(sc,
789			    IWM_UCODE_REGULAR_USNIFFER, tlv_data,
790			    tlv_len)) != 0)
791				goto parse_out;
792			break;
793
794		case IWM_UCODE_TLV_PAGING:
795			if (tlv_len != sizeof(uint32_t)) {
796				error = EINVAL;
797				goto parse_out;
798			}
799			paging_mem_size = le32toh(*(const uint32_t *)tlv_data);
800
801			IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
802			    "%s: Paging: paging enabled (size = %u bytes)\n",
803			    __func__, paging_mem_size);
804			if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
805				device_printf(sc->sc_dev,
806					"%s: Paging: driver supports up to %u bytes for paging image\n",
807					__func__, IWM_MAX_PAGING_IMAGE_SIZE);
808				error = EINVAL;
809				goto out;
810			}
811			if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
812				device_printf(sc->sc_dev,
813				    "%s: Paging: image isn't multiple %u\n",
814				    __func__, IWM_FW_PAGING_SIZE);
815				error = EINVAL;
816				goto out;
817			}
818
819			sc->sc_fw.fw_sects[IWM_UCODE_REGULAR].paging_mem_size =
820			    paging_mem_size;
821			usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
822			sc->sc_fw.fw_sects[usniffer_img].paging_mem_size =
823			    paging_mem_size;
824			break;
825
826		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
827			if (tlv_len != sizeof(uint32_t)) {
828				error = EINVAL;
829				goto parse_out;
830			}
831			sc->sc_capa_n_scan_channels =
832			  le32toh(*(const uint32_t *)tlv_data);
833			break;
834
835		case IWM_UCODE_TLV_FW_VERSION:
836			if (tlv_len != sizeof(uint32_t) * 3) {
837				error = EINVAL;
838				goto parse_out;
839			}
840			snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
841			    "%d.%d.%d",
842			    le32toh(((const uint32_t *)tlv_data)[0]),
843			    le32toh(((const uint32_t *)tlv_data)[1]),
844			    le32toh(((const uint32_t *)tlv_data)[2]));
845			break;
846
847		case IWM_UCODE_TLV_FW_MEM_SEG:
848			break;
849
850		default:
851			device_printf(sc->sc_dev,
852			    "%s: unknown firmware section %d, abort\n",
853			    __func__, tlv_type);
854			error = EINVAL;
855			goto parse_out;
856		}
857
858		len -= roundup(tlv_len, 4);
859		data += roundup(tlv_len, 4);
860	}
861
862	KASSERT(error == 0, ("unhandled error"));
863
864 parse_out:
865	if (error) {
866		device_printf(sc->sc_dev, "firmware parse error %d, "
867		    "section type %d\n", error, tlv_type);
868	}
869
870	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
871		device_printf(sc->sc_dev,
872		    "device uses unsupported power ops\n");
873		error = ENOTSUP;
874	}
875
876 out:
877	if (error) {
878		fw->fw_status = IWM_FW_STATUS_NONE;
879		if (fw->fw_fp != NULL)
880			iwm_fw_info_free(fw);
881	} else
882		fw->fw_status = IWM_FW_STATUS_DONE;
883	wakeup(&sc->sc_fw);
884
885	return error;
886}
887
888/*
889 * DMA resource routines
890 */
891
892/* fwmem is used to load firmware onto the card */
893static int
894iwm_alloc_fwmem(struct iwm_softc *sc)
895{
896	/* Must be aligned on a 16-byte boundary. */
897	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
898	    IWM_FH_MEM_TB_MAX_LENGTH, 16);
899}
900
901/* tx scheduler rings.  not used? */
902static int
903iwm_alloc_sched(struct iwm_softc *sc)
904{
905	/* TX scheduler rings must be aligned on a 1KB boundary. */
906	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
907	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
908}
909
910/* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
911static int
912iwm_alloc_kw(struct iwm_softc *sc)
913{
914	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
915}
916
917/* interrupt cause table */
918static int
919iwm_alloc_ict(struct iwm_softc *sc)
920{
921	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
922	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
923}
924
925static int
926iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
927{
928	bus_size_t size;
929	int i, error;
930
931	ring->cur = 0;
932
933	/* Allocate RX descriptors (256-byte aligned). */
934	size = IWM_RX_RING_COUNT * sizeof(uint32_t);
935	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
936	if (error != 0) {
937		device_printf(sc->sc_dev,
938		    "could not allocate RX ring DMA memory\n");
939		goto fail;
940	}
941	ring->desc = ring->desc_dma.vaddr;
942
943	/* Allocate RX status area (16-byte aligned). */
944	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
945	    sizeof(*ring->stat), 16);
946	if (error != 0) {
947		device_printf(sc->sc_dev,
948		    "could not allocate RX status DMA memory\n");
949		goto fail;
950	}
951	ring->stat = ring->stat_dma.vaddr;
952
953        /* Create RX buffer DMA tag. */
954        error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
955            BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
956            IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
957        if (error != 0) {
958                device_printf(sc->sc_dev,
959                    "%s: could not create RX buf DMA tag, error %d\n",
960                    __func__, error);
961                goto fail;
962        }
963
964	/* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
965	error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
966	if (error != 0) {
967		device_printf(sc->sc_dev,
968		    "%s: could not create RX buf DMA map, error %d\n",
969		    __func__, error);
970		goto fail;
971	}
972	/*
973	 * Allocate and map RX buffers.
974	 */
975	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
976		struct iwm_rx_data *data = &ring->data[i];
977		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
978		if (error != 0) {
979			device_printf(sc->sc_dev,
980			    "%s: could not create RX buf DMA map, error %d\n",
981			    __func__, error);
982			goto fail;
983		}
984		data->m = NULL;
985
986		if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
987			goto fail;
988		}
989	}
990	return 0;
991
992fail:	iwm_free_rx_ring(sc, ring);
993	return error;
994}
995
996static void
997iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
998{
999	/* Reset the ring state */
1000	ring->cur = 0;
1001
1002	/*
1003	 * The hw rx ring index in shared memory must also be cleared,
1004	 * otherwise the discrepancy can cause reprocessing chaos.
1005	 */
1006	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1007}
1008
1009static void
1010iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1011{
1012	int i;
1013
1014	iwm_dma_contig_free(&ring->desc_dma);
1015	iwm_dma_contig_free(&ring->stat_dma);
1016
1017	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1018		struct iwm_rx_data *data = &ring->data[i];
1019
1020		if (data->m != NULL) {
1021			bus_dmamap_sync(ring->data_dmat, data->map,
1022			    BUS_DMASYNC_POSTREAD);
1023			bus_dmamap_unload(ring->data_dmat, data->map);
1024			m_freem(data->m);
1025			data->m = NULL;
1026		}
1027		if (data->map != NULL) {
1028			bus_dmamap_destroy(ring->data_dmat, data->map);
1029			data->map = NULL;
1030		}
1031	}
1032	if (ring->spare_map != NULL) {
1033		bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1034		ring->spare_map = NULL;
1035	}
1036	if (ring->data_dmat != NULL) {
1037		bus_dma_tag_destroy(ring->data_dmat);
1038		ring->data_dmat = NULL;
1039	}
1040}
1041
1042static int
1043iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1044{
1045	bus_addr_t paddr;
1046	bus_size_t size;
1047	size_t maxsize;
1048	int nsegments;
1049	int i, error;
1050
1051	ring->qid = qid;
1052	ring->queued = 0;
1053	ring->cur = 0;
1054
1055	/* Allocate TX descriptors (256-byte aligned). */
1056	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1057	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1058	if (error != 0) {
1059		device_printf(sc->sc_dev,
1060		    "could not allocate TX ring DMA memory\n");
1061		goto fail;
1062	}
1063	ring->desc = ring->desc_dma.vaddr;
1064
1065	/*
1066	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1067	 * to allocate commands space for other rings.
1068	 */
1069	if (qid > IWM_MVM_CMD_QUEUE)
1070		return 0;
1071
1072	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1073	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1074	if (error != 0) {
1075		device_printf(sc->sc_dev,
1076		    "could not allocate TX cmd DMA memory\n");
1077		goto fail;
1078	}
1079	ring->cmd = ring->cmd_dma.vaddr;
1080
1081	/* FW commands may require more mapped space than packets. */
1082	if (qid == IWM_MVM_CMD_QUEUE) {
1083		maxsize = IWM_RBUF_SIZE;
1084		nsegments = 1;
1085	} else {
1086		maxsize = MCLBYTES;
1087		nsegments = IWM_MAX_SCATTER - 2;
1088	}
1089
1090	error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1091	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1092            nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1093	if (error != 0) {
1094		device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1095		goto fail;
1096	}
1097
1098	paddr = ring->cmd_dma.paddr;
1099	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1100		struct iwm_tx_data *data = &ring->data[i];
1101
1102		data->cmd_paddr = paddr;
1103		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1104		    + offsetof(struct iwm_tx_cmd, scratch);
1105		paddr += sizeof(struct iwm_device_cmd);
1106
1107		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1108		if (error != 0) {
1109			device_printf(sc->sc_dev,
1110			    "could not create TX buf DMA map\n");
1111			goto fail;
1112		}
1113	}
1114	KASSERT(paddr == ring->cmd_dma.paddr + size,
1115	    ("invalid physical address"));
1116	return 0;
1117
1118fail:	iwm_free_tx_ring(sc, ring);
1119	return error;
1120}
1121
1122static void
1123iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1124{
1125	int i;
1126
1127	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1128		struct iwm_tx_data *data = &ring->data[i];
1129
1130		if (data->m != NULL) {
1131			bus_dmamap_sync(ring->data_dmat, data->map,
1132			    BUS_DMASYNC_POSTWRITE);
1133			bus_dmamap_unload(ring->data_dmat, data->map);
1134			m_freem(data->m);
1135			data->m = NULL;
1136		}
1137	}
1138	/* Clear TX descriptors. */
1139	memset(ring->desc, 0, ring->desc_dma.size);
1140	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1141	    BUS_DMASYNC_PREWRITE);
1142	sc->qfullmsk &= ~(1 << ring->qid);
1143	ring->queued = 0;
1144	ring->cur = 0;
1145
1146	if (ring->qid == IWM_MVM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1147		iwm_pcie_clear_cmd_in_flight(sc);
1148}
1149
1150static void
1151iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1152{
1153	int i;
1154
1155	iwm_dma_contig_free(&ring->desc_dma);
1156	iwm_dma_contig_free(&ring->cmd_dma);
1157
1158	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1159		struct iwm_tx_data *data = &ring->data[i];
1160
1161		if (data->m != NULL) {
1162			bus_dmamap_sync(ring->data_dmat, data->map,
1163			    BUS_DMASYNC_POSTWRITE);
1164			bus_dmamap_unload(ring->data_dmat, data->map);
1165			m_freem(data->m);
1166			data->m = NULL;
1167		}
1168		if (data->map != NULL) {
1169			bus_dmamap_destroy(ring->data_dmat, data->map);
1170			data->map = NULL;
1171		}
1172	}
1173	if (ring->data_dmat != NULL) {
1174		bus_dma_tag_destroy(ring->data_dmat);
1175		ring->data_dmat = NULL;
1176	}
1177}
1178
1179/*
1180 * High-level hardware frobbing routines
1181 */
1182
1183static void
1184iwm_enable_interrupts(struct iwm_softc *sc)
1185{
1186	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1187	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1188}
1189
1190static void
1191iwm_restore_interrupts(struct iwm_softc *sc)
1192{
1193	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1194}
1195
1196static void
1197iwm_disable_interrupts(struct iwm_softc *sc)
1198{
1199	/* disable interrupts */
1200	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1201
1202	/* acknowledge all interrupts */
1203	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1204	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1205}
1206
1207static void
1208iwm_ict_reset(struct iwm_softc *sc)
1209{
1210	iwm_disable_interrupts(sc);
1211
1212	/* Reset ICT table. */
1213	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1214	sc->ict_cur = 0;
1215
1216	/* Set physical address of ICT table (4KB aligned). */
1217	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1218	    IWM_CSR_DRAM_INT_TBL_ENABLE
1219	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1220	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1221	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1222
1223	/* Switch to ICT interrupt mode in driver. */
1224	sc->sc_flags |= IWM_FLAG_USE_ICT;
1225
1226	/* Re-enable interrupts. */
1227	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1228	iwm_enable_interrupts(sc);
1229}
1230
1231/* iwlwifi pcie/trans.c */
1232
1233/*
1234 * Since this .. hard-resets things, it's time to actually
1235 * mark the first vap (if any) as having no mac context.
1236 * It's annoying, but since the driver is potentially being
1237 * stop/start'ed whilst active (thanks openbsd port!) we
1238 * have to correctly track this.
1239 */
1240static void
1241iwm_stop_device(struct iwm_softc *sc)
1242{
1243	struct ieee80211com *ic = &sc->sc_ic;
1244	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1245	int chnl, qid;
1246	uint32_t mask = 0;
1247
1248	/* tell the device to stop sending interrupts */
1249	iwm_disable_interrupts(sc);
1250
1251	/*
1252	 * FreeBSD-local: mark the first vap as not-uploaded,
1253	 * so the next transition through auth/assoc
1254	 * will correctly populate the MAC context.
1255	 */
1256	if (vap) {
1257		struct iwm_vap *iv = IWM_VAP(vap);
1258		iv->is_uploaded = 0;
1259	}
1260
1261	/* device going down, Stop using ICT table */
1262	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1263
1264	/* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1265
1266	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1267
1268	if (iwm_nic_lock(sc)) {
1269		/* Stop each Tx DMA channel */
1270		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1271			IWM_WRITE(sc,
1272			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1273			mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1274		}
1275
1276		/* Wait for DMA channels to be idle */
1277		if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1278		    5000)) {
1279			device_printf(sc->sc_dev,
1280			    "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1281			    IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1282		}
1283		iwm_nic_unlock(sc);
1284	}
1285	iwm_pcie_rx_stop(sc);
1286
1287	/* Stop RX ring. */
1288	iwm_reset_rx_ring(sc, &sc->rxq);
1289
1290	/* Reset all TX rings. */
1291	for (qid = 0; qid < nitems(sc->txq); qid++)
1292		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1293
1294	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1295		/* Power-down device's busmaster DMA clocks */
1296		iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1297		    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1298		DELAY(5);
1299	}
1300
1301	/* Make sure (redundant) we've released our request to stay awake */
1302	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1303	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1304
1305	/* Stop the device, and put it in low power state */
1306	iwm_apm_stop(sc);
1307
1308	/* Upon stop, the APM issues an interrupt if HW RF kill is set.
1309	 * Clean again the interrupt here
1310	 */
1311	iwm_disable_interrupts(sc);
1312	/* stop and reset the on-board processor */
1313	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1314
1315	/*
1316	 * Even if we stop the HW, we still want the RF kill
1317	 * interrupt
1318	 */
1319	iwm_enable_rfkill_int(sc);
1320	iwm_check_rfkill(sc);
1321}
1322
1323/* iwlwifi: mvm/ops.c */
1324static void
1325iwm_mvm_nic_config(struct iwm_softc *sc)
1326{
1327	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1328	uint32_t reg_val = 0;
1329	uint32_t phy_config = iwm_mvm_get_phy_config(sc);
1330
1331	radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1332	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1333	radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1334	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1335	radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1336	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1337
1338	/* SKU control */
1339	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1340	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1341	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1342	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1343
1344	/* radio configuration */
1345	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1346	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1347	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1348
1349	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1350
1351	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1352	    "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1353	    radio_cfg_step, radio_cfg_dash);
1354
1355	/*
1356	 * W/A : NIC is stuck in a reset state after Early PCIe power off
1357	 * (PCIe power is lost before PERST# is asserted), causing ME FW
1358	 * to lose ownership and not being able to obtain it back.
1359	 */
1360	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1361		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1362		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1363		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1364	}
1365}
1366
1367static int
1368iwm_nic_rx_init(struct iwm_softc *sc)
1369{
1370	/*
1371	 * Initialize RX ring.  This is from the iwn driver.
1372	 */
1373	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1374
1375	/* Stop Rx DMA */
1376	iwm_pcie_rx_stop(sc);
1377
1378	if (!iwm_nic_lock(sc))
1379		return EBUSY;
1380
1381	/* reset and flush pointers */
1382	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1383	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1384	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1385	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1386
1387	/* Set physical address of RX ring (256-byte aligned). */
1388	IWM_WRITE(sc,
1389	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1390
1391	/* Set physical address of RX status (16-byte aligned). */
1392	IWM_WRITE(sc,
1393	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1394
1395	/* Enable RX. */
1396	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1397	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
1398	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
1399	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
1400	    IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK	|
1401	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1402	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
1403	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1404
1405	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1406
1407	/* W/A for interrupt coalescing bug in 7260 and 3160 */
1408	if (sc->cfg->host_interrupt_operation_mode)
1409		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1410
1411	/*
1412	 * Thus sayeth el jefe (iwlwifi) via a comment:
1413	 *
1414	 * This value should initially be 0 (before preparing any
1415	 * RBs), should be 8 after preparing the first 8 RBs (for example)
1416	 */
1417	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1418
1419	iwm_nic_unlock(sc);
1420
1421	return 0;
1422}
1423
1424static int
1425iwm_nic_tx_init(struct iwm_softc *sc)
1426{
1427	int qid;
1428
1429	if (!iwm_nic_lock(sc))
1430		return EBUSY;
1431
1432	/* Deactivate TX scheduler. */
1433	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1434
1435	/* Set physical address of "keep warm" page (16-byte aligned). */
1436	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1437
1438	/* Initialize TX rings. */
1439	for (qid = 0; qid < nitems(sc->txq); qid++) {
1440		struct iwm_tx_ring *txq = &sc->txq[qid];
1441
1442		/* Set physical address of TX ring (256-byte aligned). */
1443		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1444		    txq->desc_dma.paddr >> 8);
1445		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1446		    "%s: loading ring %d descriptors (%p) at %lx\n",
1447		    __func__,
1448		    qid, txq->desc,
1449		    (unsigned long) (txq->desc_dma.paddr >> 8));
1450	}
1451
1452	iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1453
1454	iwm_nic_unlock(sc);
1455
1456	return 0;
1457}
1458
1459static int
1460iwm_nic_init(struct iwm_softc *sc)
1461{
1462	int error;
1463
1464	iwm_apm_init(sc);
1465	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1466		iwm_set_pwr(sc);
1467
1468	iwm_mvm_nic_config(sc);
1469
1470	if ((error = iwm_nic_rx_init(sc)) != 0)
1471		return error;
1472
1473	/*
1474	 * Ditto for TX, from iwn
1475	 */
1476	if ((error = iwm_nic_tx_init(sc)) != 0)
1477		return error;
1478
1479	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1480	    "%s: shadow registers enabled\n", __func__);
1481	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1482
1483	return 0;
1484}
1485
1486const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1487	IWM_MVM_TX_FIFO_VO,
1488	IWM_MVM_TX_FIFO_VI,
1489	IWM_MVM_TX_FIFO_BE,
1490	IWM_MVM_TX_FIFO_BK,
1491};
1492
1493static int
1494iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1495{
1496	if (!iwm_nic_lock(sc)) {
1497		device_printf(sc->sc_dev,
1498		    "%s: cannot enable txq %d\n",
1499		    __func__,
1500		    qid);
1501		return EBUSY;
1502	}
1503
1504	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1505
1506	if (qid == IWM_MVM_CMD_QUEUE) {
1507		/* unactivate before configuration */
1508		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1509		    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1510		    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1511
1512		iwm_nic_unlock(sc);
1513
1514		iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1515
1516		if (!iwm_nic_lock(sc)) {
1517			device_printf(sc->sc_dev,
1518			    "%s: cannot enable txq %d\n", __func__, qid);
1519			return EBUSY;
1520		}
1521		iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1522		iwm_nic_unlock(sc);
1523
1524		iwm_write_mem32(sc, sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1525		/* Set scheduler window size and frame limit. */
1526		iwm_write_mem32(sc,
1527		    sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1528		    sizeof(uint32_t),
1529		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1530		    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1531		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1532		    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1533
1534		if (!iwm_nic_lock(sc)) {
1535			device_printf(sc->sc_dev,
1536			    "%s: cannot enable txq %d\n", __func__, qid);
1537			return EBUSY;
1538		}
1539		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1540		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1541		    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1542		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1543		    IWM_SCD_QUEUE_STTS_REG_MSK);
1544	} else {
1545		struct iwm_scd_txq_cfg_cmd cmd;
1546		int error;
1547
1548		iwm_nic_unlock(sc);
1549
1550		memset(&cmd, 0, sizeof(cmd));
1551		cmd.scd_queue = qid;
1552		cmd.enable = 1;
1553		cmd.sta_id = sta_id;
1554		cmd.tx_fifo = fifo;
1555		cmd.aggregate = 0;
1556		cmd.window = IWM_FRAME_LIMIT;
1557
1558		error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1559		    sizeof(cmd), &cmd);
1560		if (error) {
1561			device_printf(sc->sc_dev,
1562			    "cannot enable txq %d\n", qid);
1563			return error;
1564		}
1565
1566		if (!iwm_nic_lock(sc))
1567			return EBUSY;
1568	}
1569
1570	iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1571	    iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1572
1573	iwm_nic_unlock(sc);
1574
1575	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1576	    __func__, qid, fifo);
1577
1578	return 0;
1579}
1580
1581static int
1582iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1583{
1584	int error, chnl;
1585
1586	int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1587	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1588
1589	if (!iwm_nic_lock(sc))
1590		return EBUSY;
1591
1592	iwm_ict_reset(sc);
1593
1594	iwm_nic_unlock(sc);
1595
1596	sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1597	if (scd_base_addr != 0 &&
1598	    scd_base_addr != sc->scd_base_addr) {
1599		device_printf(sc->sc_dev,
1600		    "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1601		    __func__, sc->scd_base_addr, scd_base_addr);
1602	}
1603
1604	/* reset context data, TX status and translation data */
1605	error = iwm_write_mem(sc,
1606	    sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1607	    NULL, clear_dwords);
1608	if (error)
1609		return EBUSY;
1610
1611	if (!iwm_nic_lock(sc))
1612		return EBUSY;
1613
1614	/* Set physical address of TX scheduler rings (1KB aligned). */
1615	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1616
1617	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1618
1619	iwm_nic_unlock(sc);
1620
1621	/* enable command channel */
1622	error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1623	if (error)
1624		return error;
1625
1626	if (!iwm_nic_lock(sc))
1627		return EBUSY;
1628
1629	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1630
1631	/* Enable DMA channels. */
1632	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1633		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1634		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1635		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1636	}
1637
1638	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1639	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1640
1641	iwm_nic_unlock(sc);
1642
1643	/* Enable L1-Active */
1644	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
1645		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1646		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1647	}
1648
1649	return error;
1650}
1651
1652/*
1653 * NVM read access and content parsing.  We do not support
1654 * external NVM or writing NVM.
1655 * iwlwifi/mvm/nvm.c
1656 */
1657
1658/* Default NVM size to read */
1659#define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
1660
1661#define IWM_NVM_WRITE_OPCODE 1
1662#define IWM_NVM_READ_OPCODE 0
1663
1664/* load nvm chunk response */
1665enum {
1666	IWM_READ_NVM_CHUNK_SUCCEED = 0,
1667	IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1668};
1669
1670static int
1671iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1672	uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1673{
1674	struct iwm_nvm_access_cmd nvm_access_cmd = {
1675		.offset = htole16(offset),
1676		.length = htole16(length),
1677		.type = htole16(section),
1678		.op_code = IWM_NVM_READ_OPCODE,
1679	};
1680	struct iwm_nvm_access_resp *nvm_resp;
1681	struct iwm_rx_packet *pkt;
1682	struct iwm_host_cmd cmd = {
1683		.id = IWM_NVM_ACCESS_CMD,
1684		.flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1685		.data = { &nvm_access_cmd, },
1686	};
1687	int ret, bytes_read, offset_read;
1688	uint8_t *resp_data;
1689
1690	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1691
1692	ret = iwm_send_cmd(sc, &cmd);
1693	if (ret) {
1694		device_printf(sc->sc_dev,
1695		    "Could not send NVM_ACCESS command (error=%d)\n", ret);
1696		return ret;
1697	}
1698
1699	pkt = cmd.resp_pkt;
1700
1701	/* Extract NVM response */
1702	nvm_resp = (void *)pkt->data;
1703	ret = le16toh(nvm_resp->status);
1704	bytes_read = le16toh(nvm_resp->length);
1705	offset_read = le16toh(nvm_resp->offset);
1706	resp_data = nvm_resp->data;
1707	if (ret) {
1708		if ((offset != 0) &&
1709		    (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1710			/*
1711			 * meaning of NOT_VALID_ADDRESS:
1712			 * driver try to read chunk from address that is
1713			 * multiple of 2K and got an error since addr is empty.
1714			 * meaning of (offset != 0): driver already
1715			 * read valid data from another chunk so this case
1716			 * is not an error.
1717			 */
1718			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1719				    "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1720				    offset);
1721			*len = 0;
1722			ret = 0;
1723		} else {
1724			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1725				    "NVM access command failed with status %d\n", ret);
1726			ret = EIO;
1727		}
1728		goto exit;
1729	}
1730
1731	if (offset_read != offset) {
1732		device_printf(sc->sc_dev,
1733		    "NVM ACCESS response with invalid offset %d\n",
1734		    offset_read);
1735		ret = EINVAL;
1736		goto exit;
1737	}
1738
1739	if (bytes_read > length) {
1740		device_printf(sc->sc_dev,
1741		    "NVM ACCESS response with too much data "
1742		    "(%d bytes requested, %d bytes received)\n",
1743		    length, bytes_read);
1744		ret = EINVAL;
1745		goto exit;
1746	}
1747
1748	/* Write data to NVM */
1749	memcpy(data + offset, resp_data, bytes_read);
1750	*len = bytes_read;
1751
1752 exit:
1753	iwm_free_resp(sc, &cmd);
1754	return ret;
1755}
1756
1757/*
1758 * Reads an NVM section completely.
1759 * NICs prior to 7000 family don't have a real NVM, but just read
1760 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1761 * by uCode, we need to manually check in this case that we don't
1762 * overflow and try to read more than the EEPROM size.
1763 * For 7000 family NICs, we supply the maximal size we can read, and
1764 * the uCode fills the response with as much data as we can,
1765 * without overflowing, so no check is needed.
1766 */
1767static int
1768iwm_nvm_read_section(struct iwm_softc *sc,
1769	uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1770{
1771	uint16_t seglen, length, offset = 0;
1772	int ret;
1773
1774	/* Set nvm section read length */
1775	length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1776
1777	seglen = length;
1778
1779	/* Read the NVM until exhausted (reading less than requested) */
1780	while (seglen == length) {
1781		/* Check no memory assumptions fail and cause an overflow */
1782		if ((size_read + offset + length) >
1783		    sc->cfg->eeprom_size) {
1784			device_printf(sc->sc_dev,
1785			    "EEPROM size is too small for NVM\n");
1786			return ENOBUFS;
1787		}
1788
1789		ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1790		if (ret) {
1791			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1792				    "Cannot read NVM from section %d offset %d, length %d\n",
1793				    section, offset, length);
1794			return ret;
1795		}
1796		offset += seglen;
1797	}
1798
1799	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1800		    "NVM section %d read completed\n", section);
1801	*len = offset;
1802	return 0;
1803}
1804
1805/*
1806 * BEGIN IWM_NVM_PARSE
1807 */
1808
1809/* iwlwifi/iwl-nvm-parse.c */
1810
1811/* NVM offsets (in words) definitions */
1812enum iwm_nvm_offsets {
1813	/* NVM HW-Section offset (in words) definitions */
1814	IWM_HW_ADDR = 0x15,
1815
1816/* NVM SW-Section offset (in words) definitions */
1817	IWM_NVM_SW_SECTION = 0x1C0,
1818	IWM_NVM_VERSION = 0,
1819	IWM_RADIO_CFG = 1,
1820	IWM_SKU = 2,
1821	IWM_N_HW_ADDRS = 3,
1822	IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1823
1824/* NVM calibration section offset (in words) definitions */
1825	IWM_NVM_CALIB_SECTION = 0x2B8,
1826	IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1827};
1828
1829enum iwm_8000_nvm_offsets {
1830	/* NVM HW-Section offset (in words) definitions */
1831	IWM_HW_ADDR0_WFPM_8000 = 0x12,
1832	IWM_HW_ADDR1_WFPM_8000 = 0x16,
1833	IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1834	IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1835	IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1836
1837	/* NVM SW-Section offset (in words) definitions */
1838	IWM_NVM_SW_SECTION_8000 = 0x1C0,
1839	IWM_NVM_VERSION_8000 = 0,
1840	IWM_RADIO_CFG_8000 = 0,
1841	IWM_SKU_8000 = 2,
1842	IWM_N_HW_ADDRS_8000 = 3,
1843
1844	/* NVM REGULATORY -Section offset (in words) definitions */
1845	IWM_NVM_CHANNELS_8000 = 0,
1846	IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1847	IWM_NVM_LAR_OFFSET_8000 = 0x507,
1848	IWM_NVM_LAR_ENABLED_8000 = 0x7,
1849
1850	/* NVM calibration section offset (in words) definitions */
1851	IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1852	IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1853};
1854
1855/* SKU Capabilities (actual values from NVM definition) */
1856enum nvm_sku_bits {
1857	IWM_NVM_SKU_CAP_BAND_24GHZ	= (1 << 0),
1858	IWM_NVM_SKU_CAP_BAND_52GHZ	= (1 << 1),
1859	IWM_NVM_SKU_CAP_11N_ENABLE	= (1 << 2),
1860	IWM_NVM_SKU_CAP_11AC_ENABLE	= (1 << 3),
1861};
1862
1863/* radio config bits (actual values from NVM definition) */
1864#define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1865#define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1866#define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1867#define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1868#define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1869#define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1870
1871#define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)	(x & 0xF)
1872#define IWM_NVM_RF_CFG_DASH_MSK_8000(x)		((x >> 4) & 0xF)
1873#define IWM_NVM_RF_CFG_STEP_MSK_8000(x)		((x >> 8) & 0xF)
1874#define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)		((x >> 12) & 0xFFF)
1875#define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)	((x >> 24) & 0xF)
1876#define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)	((x >> 28) & 0xF)
1877
1878#define DEFAULT_MAX_TX_POWER 16
1879
1880/**
1881 * enum iwm_nvm_channel_flags - channel flags in NVM
1882 * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1883 * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1884 * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1885 * @IWM_NVM_CHANNEL_RADAR: radar detection required
1886 * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1887 * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1888 * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1889 * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1890 * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1891 * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1892 */
1893enum iwm_nvm_channel_flags {
1894	IWM_NVM_CHANNEL_VALID = (1 << 0),
1895	IWM_NVM_CHANNEL_IBSS = (1 << 1),
1896	IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1897	IWM_NVM_CHANNEL_RADAR = (1 << 4),
1898	IWM_NVM_CHANNEL_DFS = (1 << 7),
1899	IWM_NVM_CHANNEL_WIDE = (1 << 8),
1900	IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1901	IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1902	IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1903};
1904
1905/*
1906 * Translate EEPROM flags to net80211.
1907 */
1908static uint32_t
1909iwm_eeprom_channel_flags(uint16_t ch_flags)
1910{
1911	uint32_t nflags;
1912
1913	nflags = 0;
1914	if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1915		nflags |= IEEE80211_CHAN_PASSIVE;
1916	if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1917		nflags |= IEEE80211_CHAN_NOADHOC;
1918	if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1919		nflags |= IEEE80211_CHAN_DFS;
1920		/* Just in case. */
1921		nflags |= IEEE80211_CHAN_NOADHOC;
1922	}
1923
1924	return (nflags);
1925}
1926
1927static void
1928iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1929    int maxchans, int *nchans, int ch_idx, size_t ch_num,
1930    const uint8_t bands[])
1931{
1932	const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
1933	uint32_t nflags;
1934	uint16_t ch_flags;
1935	uint8_t ieee;
1936	int error;
1937
1938	for (; ch_idx < ch_num; ch_idx++) {
1939		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1940		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1941			ieee = iwm_nvm_channels[ch_idx];
1942		else
1943			ieee = iwm_nvm_channels_8000[ch_idx];
1944
1945		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1946			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1947			    "Ch. %d Flags %x [%sGHz] - No traffic\n",
1948			    ieee, ch_flags,
1949			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1950			    "5.2" : "2.4");
1951			continue;
1952		}
1953
1954		nflags = iwm_eeprom_channel_flags(ch_flags);
1955		error = ieee80211_add_channel(chans, maxchans, nchans,
1956		    ieee, 0, 0, nflags, bands);
1957		if (error != 0)
1958			break;
1959
1960		IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1961		    "Ch. %d Flags %x [%sGHz] - Added\n",
1962		    ieee, ch_flags,
1963		    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1964		    "5.2" : "2.4");
1965	}
1966}
1967
1968static void
1969iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
1970    struct ieee80211_channel chans[])
1971{
1972	struct iwm_softc *sc = ic->ic_softc;
1973	struct iwm_nvm_data *data = sc->nvm_data;
1974	uint8_t bands[IEEE80211_MODE_BYTES];
1975	size_t ch_num;
1976
1977	memset(bands, 0, sizeof(bands));
1978	/* 1-13: 11b/g channels. */
1979	setbit(bands, IEEE80211_MODE_11B);
1980	setbit(bands, IEEE80211_MODE_11G);
1981	iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
1982	    IWM_NUM_2GHZ_CHANNELS - 1, bands);
1983
1984	/* 14: 11b channel only. */
1985	clrbit(bands, IEEE80211_MODE_11G);
1986	iwm_add_channel_band(sc, chans, maxchans, nchans,
1987	    IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
1988
1989	if (data->sku_cap_band_52GHz_enable) {
1990		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1991			ch_num = nitems(iwm_nvm_channels);
1992		else
1993			ch_num = nitems(iwm_nvm_channels_8000);
1994		memset(bands, 0, sizeof(bands));
1995		setbit(bands, IEEE80211_MODE_11A);
1996		iwm_add_channel_band(sc, chans, maxchans, nchans,
1997		    IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
1998	}
1999}
2000
2001static void
2002iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2003	const uint16_t *mac_override, const uint16_t *nvm_hw)
2004{
2005	const uint8_t *hw_addr;
2006
2007	if (mac_override) {
2008		static const uint8_t reserved_mac[] = {
2009			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2010		};
2011
2012		hw_addr = (const uint8_t *)(mac_override +
2013				 IWM_MAC_ADDRESS_OVERRIDE_8000);
2014
2015		/*
2016		 * Store the MAC address from MAO section.
2017		 * No byte swapping is required in MAO section
2018		 */
2019		IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2020
2021		/*
2022		 * Force the use of the OTP MAC address in case of reserved MAC
2023		 * address in the NVM, or if address is given but invalid.
2024		 */
2025		if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2026		    !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2027		    iwm_is_valid_ether_addr(data->hw_addr) &&
2028		    !IEEE80211_IS_MULTICAST(data->hw_addr))
2029			return;
2030
2031		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2032		    "%s: mac address from nvm override section invalid\n",
2033		    __func__);
2034	}
2035
2036	if (nvm_hw) {
2037		/* read the mac address from WFMP registers */
2038		uint32_t mac_addr0 =
2039		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2040		uint32_t mac_addr1 =
2041		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2042
2043		hw_addr = (const uint8_t *)&mac_addr0;
2044		data->hw_addr[0] = hw_addr[3];
2045		data->hw_addr[1] = hw_addr[2];
2046		data->hw_addr[2] = hw_addr[1];
2047		data->hw_addr[3] = hw_addr[0];
2048
2049		hw_addr = (const uint8_t *)&mac_addr1;
2050		data->hw_addr[4] = hw_addr[1];
2051		data->hw_addr[5] = hw_addr[0];
2052
2053		return;
2054	}
2055
2056	device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2057	memset(data->hw_addr, 0, sizeof(data->hw_addr));
2058}
2059
2060static int
2061iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2062	    const uint16_t *phy_sku)
2063{
2064	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2065		return le16_to_cpup(nvm_sw + IWM_SKU);
2066
2067	return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2068}
2069
2070static int
2071iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2072{
2073	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2074		return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2075	else
2076		return le32_to_cpup((const uint32_t *)(nvm_sw +
2077						IWM_NVM_VERSION_8000));
2078}
2079
2080static int
2081iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2082		  const uint16_t *phy_sku)
2083{
2084        if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2085                return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2086
2087        return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2088}
2089
2090static int
2091iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2092{
2093	int n_hw_addr;
2094
2095	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2096		return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2097
2098	n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2099
2100        return n_hw_addr & IWM_N_HW_ADDR_MASK;
2101}
2102
2103static void
2104iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2105		  uint32_t radio_cfg)
2106{
2107	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2108		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2109		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2110		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2111		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2112		return;
2113	}
2114
2115	/* set the radio configuration for family 8000 */
2116	data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2117	data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2118	data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2119	data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2120	data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2121	data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2122}
2123
2124static int
2125iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2126		   const uint16_t *nvm_hw, const uint16_t *mac_override)
2127{
2128#ifdef notyet /* for FAMILY 9000 */
2129	if (cfg->mac_addr_from_csr) {
2130		iwm_set_hw_address_from_csr(sc, data);
2131        } else
2132#endif
2133	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2134		const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2135
2136		/* The byte order is little endian 16 bit, meaning 214365 */
2137		data->hw_addr[0] = hw_addr[1];
2138		data->hw_addr[1] = hw_addr[0];
2139		data->hw_addr[2] = hw_addr[3];
2140		data->hw_addr[3] = hw_addr[2];
2141		data->hw_addr[4] = hw_addr[5];
2142		data->hw_addr[5] = hw_addr[4];
2143	} else {
2144		iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2145	}
2146
2147	if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2148		device_printf(sc->sc_dev, "no valid mac address was found\n");
2149		return EINVAL;
2150	}
2151
2152	return 0;
2153}
2154
2155static struct iwm_nvm_data *
2156iwm_parse_nvm_data(struct iwm_softc *sc,
2157		   const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2158		   const uint16_t *nvm_calib, const uint16_t *mac_override,
2159		   const uint16_t *phy_sku, const uint16_t *regulatory)
2160{
2161	struct iwm_nvm_data *data;
2162	uint32_t sku, radio_cfg;
2163
2164	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2165		data = malloc(sizeof(*data) +
2166		    IWM_NUM_CHANNELS * sizeof(uint16_t),
2167		    M_DEVBUF, M_NOWAIT | M_ZERO);
2168	} else {
2169		data = malloc(sizeof(*data) +
2170		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2171		    M_DEVBUF, M_NOWAIT | M_ZERO);
2172	}
2173	if (!data)
2174		return NULL;
2175
2176	data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2177
2178	radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2179	iwm_set_radio_cfg(sc, data, radio_cfg);
2180
2181	sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2182	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2183	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2184	data->sku_cap_11n_enable = 0;
2185
2186	data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2187
2188	/* If no valid mac address was found - bail out */
2189	if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2190		free(data, M_DEVBUF);
2191		return NULL;
2192	}
2193
2194	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2195		memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2196		    IWM_NUM_CHANNELS * sizeof(uint16_t));
2197	} else {
2198		memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2199		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2200	}
2201
2202	return data;
2203}
2204
2205static void
2206iwm_free_nvm_data(struct iwm_nvm_data *data)
2207{
2208	if (data != NULL)
2209		free(data, M_DEVBUF);
2210}
2211
2212static struct iwm_nvm_data *
2213iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2214{
2215	const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2216
2217	/* Checking for required sections */
2218	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2219		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2220		    !sections[sc->cfg->nvm_hw_section_num].data) {
2221			device_printf(sc->sc_dev,
2222			    "Can't parse empty OTP/NVM sections\n");
2223			return NULL;
2224		}
2225	} else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2226		/* SW and REGULATORY sections are mandatory */
2227		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2228		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2229			device_printf(sc->sc_dev,
2230			    "Can't parse empty OTP/NVM sections\n");
2231			return NULL;
2232		}
2233		/* MAC_OVERRIDE or at least HW section must exist */
2234		if (!sections[sc->cfg->nvm_hw_section_num].data &&
2235		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2236			device_printf(sc->sc_dev,
2237			    "Can't parse mac_address, empty sections\n");
2238			return NULL;
2239		}
2240
2241		/* PHY_SKU section is mandatory in B0 */
2242		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2243			device_printf(sc->sc_dev,
2244			    "Can't parse phy_sku in B0, empty sections\n");
2245			return NULL;
2246		}
2247	} else {
2248		panic("unknown device family %d\n", sc->cfg->device_family);
2249	}
2250
2251	hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2252	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2253	calib = (const uint16_t *)
2254	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2255	regulatory = (const uint16_t *)
2256	    sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2257	mac_override = (const uint16_t *)
2258	    sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2259	phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2260
2261	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2262	    phy_sku, regulatory);
2263}
2264
2265static int
2266iwm_nvm_init(struct iwm_softc *sc)
2267{
2268	struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2269	int i, ret, section;
2270	uint32_t size_read = 0;
2271	uint8_t *nvm_buffer, *temp;
2272	uint16_t len;
2273
2274	memset(nvm_sections, 0, sizeof(nvm_sections));
2275
2276	if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2277		return EINVAL;
2278
2279	/* load NVM values from nic */
2280	/* Read From FW NVM */
2281	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2282
2283	nvm_buffer = malloc(sc->cfg->eeprom_size, M_DEVBUF, M_NOWAIT | M_ZERO);
2284	if (!nvm_buffer)
2285		return ENOMEM;
2286	for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2287		/* we override the constness for initial read */
2288		ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2289					   &len, size_read);
2290		if (ret)
2291			continue;
2292		size_read += len;
2293		temp = malloc(len, M_DEVBUF, M_NOWAIT);
2294		if (!temp) {
2295			ret = ENOMEM;
2296			break;
2297		}
2298		memcpy(temp, nvm_buffer, len);
2299
2300		nvm_sections[section].data = temp;
2301		nvm_sections[section].length = len;
2302	}
2303	if (!size_read)
2304		device_printf(sc->sc_dev, "OTP is blank\n");
2305	free(nvm_buffer, M_DEVBUF);
2306
2307	sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2308	if (!sc->nvm_data)
2309		return EINVAL;
2310	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2311		    "nvm version = %x\n", sc->nvm_data->nvm_version);
2312
2313	for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2314		if (nvm_sections[i].data != NULL)
2315			free(nvm_sections[i].data, M_DEVBUF);
2316	}
2317
2318	return 0;
2319}
2320
2321static int
2322iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2323	const struct iwm_fw_desc *section)
2324{
2325	struct iwm_dma_info *dma = &sc->fw_dma;
2326	uint8_t *v_addr;
2327	bus_addr_t p_addr;
2328	uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2329	int ret = 0;
2330
2331	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2332		    "%s: [%d] uCode section being loaded...\n",
2333		    __func__, section_num);
2334
2335	v_addr = dma->vaddr;
2336	p_addr = dma->paddr;
2337
2338	for (offset = 0; offset < section->len; offset += chunk_sz) {
2339		uint32_t copy_size, dst_addr;
2340		int extended_addr = FALSE;
2341
2342		copy_size = MIN(chunk_sz, section->len - offset);
2343		dst_addr = section->offset + offset;
2344
2345		if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2346		    dst_addr <= IWM_FW_MEM_EXTENDED_END)
2347			extended_addr = TRUE;
2348
2349		if (extended_addr)
2350			iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2351					  IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2352
2353		memcpy(v_addr, (const uint8_t *)section->data + offset,
2354		    copy_size);
2355		bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2356		ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2357						   copy_size);
2358
2359		if (extended_addr)
2360			iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2361					    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2362
2363		if (ret) {
2364			device_printf(sc->sc_dev,
2365			    "%s: Could not load the [%d] uCode section\n",
2366			    __func__, section_num);
2367			break;
2368		}
2369	}
2370
2371	return ret;
2372}
2373
2374/*
2375 * ucode
2376 */
2377static int
2378iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2379			     bus_addr_t phy_addr, uint32_t byte_cnt)
2380{
2381	int ret;
2382
2383	sc->sc_fw_chunk_done = 0;
2384
2385	if (!iwm_nic_lock(sc))
2386		return EBUSY;
2387
2388	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2389	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2390
2391	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2392	    dst_addr);
2393
2394	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2395	    phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2396
2397	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2398	    (iwm_get_dma_hi_addr(phy_addr)
2399	     << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2400
2401	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2402	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2403	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2404	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2405
2406	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2407	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2408	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2409	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2410
2411	iwm_nic_unlock(sc);
2412
2413	/* wait up to 5s for this segment to load */
2414	ret = 0;
2415	while (!sc->sc_fw_chunk_done) {
2416		ret = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz);
2417		if (ret)
2418			break;
2419	}
2420
2421	if (ret != 0) {
2422		device_printf(sc->sc_dev,
2423		    "fw chunk addr 0x%x len %d failed to load\n",
2424		    dst_addr, byte_cnt);
2425		return ETIMEDOUT;
2426	}
2427
2428	return 0;
2429}
2430
2431static int
2432iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2433	const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2434{
2435	int shift_param;
2436	int i, ret = 0, sec_num = 0x1;
2437	uint32_t val, last_read_idx = 0;
2438
2439	if (cpu == 1) {
2440		shift_param = 0;
2441		*first_ucode_section = 0;
2442	} else {
2443		shift_param = 16;
2444		(*first_ucode_section)++;
2445	}
2446
2447	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2448		last_read_idx = i;
2449
2450		/*
2451		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2452		 * CPU1 to CPU2.
2453		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2454		 * CPU2 non paged to CPU2 paging sec.
2455		 */
2456		if (!image->fw_sect[i].data ||
2457		    image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2458		    image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2459			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2460				    "Break since Data not valid or Empty section, sec = %d\n",
2461				    i);
2462			break;
2463		}
2464		ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2465		if (ret)
2466			return ret;
2467
2468		/* Notify the ucode of the loaded section number and status */
2469		if (iwm_nic_lock(sc)) {
2470			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2471			val = val | (sec_num << shift_param);
2472			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2473			sec_num = (sec_num << 1) | 0x1;
2474			iwm_nic_unlock(sc);
2475		}
2476	}
2477
2478	*first_ucode_section = last_read_idx;
2479
2480	iwm_enable_interrupts(sc);
2481
2482	if (iwm_nic_lock(sc)) {
2483		if (cpu == 1)
2484			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2485		else
2486			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2487		iwm_nic_unlock(sc);
2488	}
2489
2490	return 0;
2491}
2492
2493static int
2494iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2495	const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2496{
2497	int shift_param;
2498	int i, ret = 0;
2499	uint32_t last_read_idx = 0;
2500
2501	if (cpu == 1) {
2502		shift_param = 0;
2503		*first_ucode_section = 0;
2504	} else {
2505		shift_param = 16;
2506		(*first_ucode_section)++;
2507	}
2508
2509	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2510		last_read_idx = i;
2511
2512		/*
2513		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2514		 * CPU1 to CPU2.
2515		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2516		 * CPU2 non paged to CPU2 paging sec.
2517		 */
2518		if (!image->fw_sect[i].data ||
2519		    image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2520		    image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2521			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2522				    "Break since Data not valid or Empty section, sec = %d\n",
2523				     i);
2524			break;
2525		}
2526
2527		ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2528		if (ret)
2529			return ret;
2530	}
2531
2532	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2533		iwm_set_bits_prph(sc,
2534				  IWM_CSR_UCODE_LOAD_STATUS_ADDR,
2535				  (IWM_LMPM_CPU_UCODE_LOADING_COMPLETED |
2536				   IWM_LMPM_CPU_HDRS_LOADING_COMPLETED |
2537				   IWM_LMPM_CPU_UCODE_LOADING_STARTED) <<
2538					shift_param);
2539
2540	*first_ucode_section = last_read_idx;
2541
2542	return 0;
2543
2544}
2545
2546static int
2547iwm_pcie_load_given_ucode(struct iwm_softc *sc,
2548	const struct iwm_fw_sects *image)
2549{
2550	int ret = 0;
2551	int first_ucode_section;
2552
2553	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2554		     image->is_dual_cpus ? "Dual" : "Single");
2555
2556	/* load to FW the binary non secured sections of CPU1 */
2557	ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2558	if (ret)
2559		return ret;
2560
2561	if (image->is_dual_cpus) {
2562		/* set CPU2 header address */
2563                iwm_write_prph(sc,
2564			       IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2565			       IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2566
2567		/* load to FW the binary sections of CPU2 */
2568		ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2569						 &first_ucode_section);
2570		if (ret)
2571			return ret;
2572	}
2573
2574	iwm_enable_interrupts(sc);
2575
2576	/* release CPU reset */
2577	IWM_WRITE(sc, IWM_CSR_RESET, 0);
2578
2579	return 0;
2580}
2581
2582int
2583iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2584	const struct iwm_fw_sects *image)
2585{
2586	int ret = 0;
2587	int first_ucode_section;
2588
2589	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2590		    image->is_dual_cpus ? "Dual" : "Single");
2591
2592	/* configure the ucode to be ready to get the secured image */
2593	/* release CPU reset */
2594	iwm_write_prph(sc, IWM_RELEASE_CPU_RESET, IWM_RELEASE_CPU_RESET_BIT);
2595
2596	/* load to FW the binary Secured sections of CPU1 */
2597	ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2598	    &first_ucode_section);
2599	if (ret)
2600		return ret;
2601
2602	/* load to FW the binary sections of CPU2 */
2603	return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2604	    &first_ucode_section);
2605}
2606
2607/* XXX Get rid of this definition */
2608static inline void
2609iwm_enable_fw_load_int(struct iwm_softc *sc)
2610{
2611	IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2612	sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2613	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2614}
2615
2616/* XXX Add proper rfkill support code */
2617static int
2618iwm_start_fw(struct iwm_softc *sc,
2619	const struct iwm_fw_sects *fw)
2620{
2621	int ret;
2622
2623	/* This may fail if AMT took ownership of the device */
2624	if (iwm_prepare_card_hw(sc)) {
2625		device_printf(sc->sc_dev,
2626		    "%s: Exit HW not ready\n", __func__);
2627		ret = EIO;
2628		goto out;
2629	}
2630
2631	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2632
2633	iwm_disable_interrupts(sc);
2634
2635	/* make sure rfkill handshake bits are cleared */
2636	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2637	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2638	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2639
2640	/* clear (again), then enable host interrupts */
2641	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2642
2643	ret = iwm_nic_init(sc);
2644	if (ret) {
2645		device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2646		goto out;
2647	}
2648
2649	/*
2650	 * Now, we load the firmware and don't want to be interrupted, even
2651	 * by the RF-Kill interrupt (hence mask all the interrupt besides the
2652	 * FH_TX interrupt which is needed to load the firmware). If the
2653	 * RF-Kill switch is toggled, we will find out after having loaded
2654	 * the firmware and return the proper value to the caller.
2655	 */
2656	iwm_enable_fw_load_int(sc);
2657
2658	/* really make sure rfkill handshake bits are cleared */
2659	/* maybe we should write a few times more?  just to make sure */
2660	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2661	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2662
2663	/* Load the given image to the HW */
2664	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2665		ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2666	else
2667		ret = iwm_pcie_load_given_ucode(sc, fw);
2668
2669	/* XXX re-check RF-Kill state */
2670
2671out:
2672	return ret;
2673}
2674
2675static int
2676iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2677{
2678	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2679		.valid = htole32(valid_tx_ant),
2680	};
2681
2682	return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2683	    IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2684}
2685
2686/* iwlwifi: mvm/fw.c */
2687static int
2688iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2689{
2690	struct iwm_phy_cfg_cmd phy_cfg_cmd;
2691	enum iwm_ucode_type ucode_type = sc->cur_ucode;
2692
2693	/* Set parameters */
2694	phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc));
2695	phy_cfg_cmd.calib_control.event_trigger =
2696	    sc->sc_default_calib[ucode_type].event_trigger;
2697	phy_cfg_cmd.calib_control.flow_trigger =
2698	    sc->sc_default_calib[ucode_type].flow_trigger;
2699
2700	IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2701	    "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2702	return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2703	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2704}
2705
2706static int
2707iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2708{
2709	struct iwm_mvm_alive_data *alive_data = data;
2710	struct iwm_mvm_alive_resp_ver1 *palive1;
2711	struct iwm_mvm_alive_resp_ver2 *palive2;
2712	struct iwm_mvm_alive_resp *palive;
2713
2714	if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
2715		palive1 = (void *)pkt->data;
2716
2717		sc->support_umac_log = FALSE;
2718                sc->error_event_table =
2719                        le32toh(palive1->error_event_table_ptr);
2720                sc->log_event_table =
2721                        le32toh(palive1->log_event_table_ptr);
2722                alive_data->scd_base_addr = le32toh(palive1->scd_base_ptr);
2723
2724                alive_data->valid = le16toh(palive1->status) ==
2725                                    IWM_ALIVE_STATUS_OK;
2726                IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2727			    "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2728			     le16toh(palive1->status), palive1->ver_type,
2729                             palive1->ver_subtype, palive1->flags);
2730	} else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
2731		palive2 = (void *)pkt->data;
2732		sc->error_event_table =
2733			le32toh(palive2->error_event_table_ptr);
2734		sc->log_event_table =
2735			le32toh(palive2->log_event_table_ptr);
2736		alive_data->scd_base_addr = le32toh(palive2->scd_base_ptr);
2737		sc->umac_error_event_table =
2738                        le32toh(palive2->error_info_addr);
2739
2740		alive_data->valid = le16toh(palive2->status) ==
2741				    IWM_ALIVE_STATUS_OK;
2742		if (sc->umac_error_event_table)
2743			sc->support_umac_log = TRUE;
2744
2745		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2746			    "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2747			    le16toh(palive2->status), palive2->ver_type,
2748			    palive2->ver_subtype, palive2->flags);
2749
2750		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2751			    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2752			    palive2->umac_major, palive2->umac_minor);
2753	} else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2754		palive = (void *)pkt->data;
2755
2756		sc->error_event_table =
2757			le32toh(palive->error_event_table_ptr);
2758		sc->log_event_table =
2759			le32toh(palive->log_event_table_ptr);
2760		alive_data->scd_base_addr = le32toh(palive->scd_base_ptr);
2761		sc->umac_error_event_table =
2762			le32toh(palive->error_info_addr);
2763
2764		alive_data->valid = le16toh(palive->status) ==
2765				    IWM_ALIVE_STATUS_OK;
2766		if (sc->umac_error_event_table)
2767			sc->support_umac_log = TRUE;
2768
2769		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2770			    "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2771			    le16toh(palive->status), palive->ver_type,
2772			    palive->ver_subtype, palive->flags);
2773
2774		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2775			    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2776			    le32toh(palive->umac_major),
2777			    le32toh(palive->umac_minor));
2778	}
2779
2780	return TRUE;
2781}
2782
2783static int
2784iwm_wait_phy_db_entry(struct iwm_softc *sc,
2785	struct iwm_rx_packet *pkt, void *data)
2786{
2787	struct iwm_phy_db *phy_db = data;
2788
2789	if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2790		if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2791			device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2792			    __func__, pkt->hdr.code);
2793		}
2794		return TRUE;
2795	}
2796
2797	if (iwm_phy_db_set_section(phy_db, pkt)) {
2798		device_printf(sc->sc_dev,
2799		    "%s: iwm_phy_db_set_section failed\n", __func__);
2800	}
2801
2802	return FALSE;
2803}
2804
2805static int
2806iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2807	enum iwm_ucode_type ucode_type)
2808{
2809	struct iwm_notification_wait alive_wait;
2810	struct iwm_mvm_alive_data alive_data;
2811	const struct iwm_fw_sects *fw;
2812	enum iwm_ucode_type old_type = sc->cur_ucode;
2813	int error;
2814	static const uint16_t alive_cmd[] = { IWM_MVM_ALIVE };
2815
2816	if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2817		device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2818			error);
2819		return error;
2820	}
2821	fw = &sc->sc_fw.fw_sects[ucode_type];
2822	sc->cur_ucode = ucode_type;
2823	sc->ucode_loaded = FALSE;
2824
2825	memset(&alive_data, 0, sizeof(alive_data));
2826	iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2827				   alive_cmd, nitems(alive_cmd),
2828				   iwm_alive_fn, &alive_data);
2829
2830	error = iwm_start_fw(sc, fw);
2831	if (error) {
2832		device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2833		sc->cur_ucode = old_type;
2834		iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2835		return error;
2836	}
2837
2838	/*
2839	 * Some things may run in the background now, but we
2840	 * just wait for the ALIVE notification here.
2841	 */
2842	IWM_UNLOCK(sc);
2843	error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2844				      IWM_MVM_UCODE_ALIVE_TIMEOUT);
2845	IWM_LOCK(sc);
2846	if (error) {
2847		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2848			device_printf(sc->sc_dev,
2849			    "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2850			    iwm_read_prph(sc, IWM_SB_CPU_1_STATUS),
2851			    iwm_read_prph(sc, IWM_SB_CPU_2_STATUS));
2852		}
2853		sc->cur_ucode = old_type;
2854		return error;
2855	}
2856
2857	if (!alive_data.valid) {
2858		device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2859		    __func__);
2860		sc->cur_ucode = old_type;
2861		return EIO;
2862	}
2863
2864	iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2865
2866	/*
2867	 * configure and operate fw paging mechanism.
2868	 * driver configures the paging flow only once, CPU2 paging image
2869	 * included in the IWM_UCODE_INIT image.
2870	 */
2871	if (fw->paging_mem_size) {
2872		error = iwm_save_fw_paging(sc, fw);
2873		if (error) {
2874			device_printf(sc->sc_dev,
2875			    "%s: failed to save the FW paging image\n",
2876			    __func__);
2877			return error;
2878		}
2879
2880		error = iwm_send_paging_cmd(sc, fw);
2881		if (error) {
2882			device_printf(sc->sc_dev,
2883			    "%s: failed to send the paging cmd\n", __func__);
2884			iwm_free_fw_paging(sc);
2885			return error;
2886		}
2887	}
2888
2889	if (!error)
2890		sc->ucode_loaded = TRUE;
2891	return error;
2892}
2893
2894/*
2895 * mvm misc bits
2896 */
2897
2898/*
2899 * follows iwlwifi/fw.c
2900 */
2901static int
2902iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2903{
2904	struct iwm_notification_wait calib_wait;
2905	static const uint16_t init_complete[] = {
2906		IWM_INIT_COMPLETE_NOTIF,
2907		IWM_CALIB_RES_NOTIF_PHY_DB
2908	};
2909	int ret;
2910
2911	/* do not operate with rfkill switch turned on */
2912	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2913		device_printf(sc->sc_dev,
2914		    "radio is disabled by hardware switch\n");
2915		return EPERM;
2916	}
2917
2918	iwm_init_notification_wait(sc->sc_notif_wait,
2919				   &calib_wait,
2920				   init_complete,
2921				   nitems(init_complete),
2922				   iwm_wait_phy_db_entry,
2923				   sc->sc_phy_db);
2924
2925	/* Will also start the device */
2926	ret = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
2927	if (ret) {
2928		device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
2929		    ret);
2930		goto error;
2931	}
2932
2933	if (justnvm) {
2934		/* Read nvm */
2935		ret = iwm_nvm_init(sc);
2936		if (ret) {
2937			device_printf(sc->sc_dev, "failed to read nvm\n");
2938			goto error;
2939		}
2940		IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
2941		goto error;
2942	}
2943
2944	ret = iwm_send_bt_init_conf(sc);
2945	if (ret) {
2946		device_printf(sc->sc_dev,
2947		    "failed to send bt coex configuration: %d\n", ret);
2948		goto error;
2949	}
2950
2951	/* Init Smart FIFO. */
2952	ret = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF);
2953	if (ret)
2954		goto error;
2955
2956	/* Send TX valid antennas before triggering calibrations */
2957	ret = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
2958	if (ret) {
2959		device_printf(sc->sc_dev,
2960		    "failed to send antennas before calibration: %d\n", ret);
2961		goto error;
2962	}
2963
2964	/*
2965	 * Send phy configurations command to init uCode
2966	 * to start the 16.0 uCode init image internal calibrations.
2967	 */
2968	ret = iwm_send_phy_cfg_cmd(sc);
2969	if (ret) {
2970		device_printf(sc->sc_dev,
2971		    "%s: Failed to run INIT calibrations: %d\n",
2972		    __func__, ret);
2973		goto error;
2974	}
2975
2976	/*
2977	 * Nothing to do but wait for the init complete notification
2978	 * from the firmware.
2979	 */
2980	IWM_UNLOCK(sc);
2981	ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
2982	    IWM_MVM_UCODE_CALIB_TIMEOUT);
2983	IWM_LOCK(sc);
2984
2985
2986	goto out;
2987
2988error:
2989	iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
2990out:
2991	return ret;
2992}
2993
2994/*
2995 * receive side
2996 */
2997
2998/* (re)stock rx ring, called at init-time and at runtime */
2999static int
3000iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3001{
3002	struct iwm_rx_ring *ring = &sc->rxq;
3003	struct iwm_rx_data *data = &ring->data[idx];
3004	struct mbuf *m;
3005	bus_dmamap_t dmamap = NULL;
3006	bus_dma_segment_t seg;
3007	int nsegs, error;
3008
3009	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3010	if (m == NULL)
3011		return ENOBUFS;
3012
3013	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3014	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3015	    &seg, &nsegs, BUS_DMA_NOWAIT);
3016	if (error != 0) {
3017		device_printf(sc->sc_dev,
3018		    "%s: can't map mbuf, error %d\n", __func__, error);
3019		goto fail;
3020	}
3021
3022	if (data->m != NULL)
3023		bus_dmamap_unload(ring->data_dmat, data->map);
3024
3025	/* Swap ring->spare_map with data->map */
3026	dmamap = data->map;
3027	data->map = ring->spare_map;
3028	ring->spare_map = dmamap;
3029
3030	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3031	data->m = m;
3032
3033	/* Update RX descriptor. */
3034	KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
3035	ring->desc[idx] = htole32(seg.ds_addr >> 8);
3036	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3037	    BUS_DMASYNC_PREWRITE);
3038
3039	return 0;
3040fail:
3041	m_freem(m);
3042	return error;
3043}
3044
3045/* iwlwifi: mvm/rx.c */
3046#define IWM_RSSI_OFFSET 50
3047static int
3048iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3049{
3050	int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
3051	uint32_t agc_a, agc_b;
3052	uint32_t val;
3053
3054	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
3055	agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
3056	agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
3057
3058	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
3059	rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
3060	rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
3061
3062	/*
3063	 * dBm = rssi dB - agc dB - constant.
3064	 * Higher AGC (higher radio gain) means lower signal.
3065	 */
3066	rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
3067	rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
3068	max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
3069
3070	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3071	    "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
3072	    rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
3073
3074	return max_rssi_dbm;
3075}
3076
3077/* iwlwifi: mvm/rx.c */
3078/*
3079 * iwm_mvm_get_signal_strength - use new rx PHY INFO API
3080 * values are reported by the fw as positive values - need to negate
3081 * to obtain their dBM.  Account for missing antennas by replacing 0
3082 * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3083 */
3084static int
3085iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3086{
3087	int energy_a, energy_b, energy_c, max_energy;
3088	uint32_t val;
3089
3090	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3091	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3092	    IWM_RX_INFO_ENERGY_ANT_A_POS;
3093	energy_a = energy_a ? -energy_a : -256;
3094	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3095	    IWM_RX_INFO_ENERGY_ANT_B_POS;
3096	energy_b = energy_b ? -energy_b : -256;
3097	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3098	    IWM_RX_INFO_ENERGY_ANT_C_POS;
3099	energy_c = energy_c ? -energy_c : -256;
3100	max_energy = MAX(energy_a, energy_b);
3101	max_energy = MAX(max_energy, energy_c);
3102
3103	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3104	    "energy In A %d B %d C %d , and max %d\n",
3105	    energy_a, energy_b, energy_c, max_energy);
3106
3107	return max_energy;
3108}
3109
3110static void
3111iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
3112	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3113{
3114	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3115
3116	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3117	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3118
3119	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3120}
3121
3122/*
3123 * Retrieve the average noise (in dBm) among receivers.
3124 */
3125static int
3126iwm_get_noise(struct iwm_softc *sc,
3127    const struct iwm_mvm_statistics_rx_non_phy *stats)
3128{
3129	int i, total, nbant, noise;
3130
3131	total = nbant = noise = 0;
3132	for (i = 0; i < 3; i++) {
3133		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3134		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3135		    __func__,
3136		    i,
3137		    noise);
3138
3139		if (noise) {
3140			total += noise;
3141			nbant++;
3142		}
3143	}
3144
3145	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3146	    __func__, nbant, total);
3147#if 0
3148	/* There should be at least one antenna but check anyway. */
3149	return (nbant == 0) ? -127 : (total / nbant) - 107;
3150#else
3151	/* For now, just hard-code it to -96 to be safe */
3152	return (-96);
3153#endif
3154}
3155
3156/*
3157 * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3158 *
3159 * Handles the actual data of the Rx packet from the fw
3160 */
3161static void
3162iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m)
3163{
3164	struct ieee80211com *ic = &sc->sc_ic;
3165	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3166	struct ieee80211_frame *wh;
3167	struct ieee80211_node *ni;
3168	struct ieee80211_rx_stats rxs;
3169	struct iwm_rx_phy_info *phy_info;
3170	struct iwm_rx_mpdu_res_start *rx_res;
3171	struct iwm_rx_packet *pkt = mtod(m, struct iwm_rx_packet *);
3172	uint32_t len;
3173	uint32_t rx_pkt_status;
3174	int rssi;
3175
3176	phy_info = &sc->sc_last_phy_info;
3177	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3178	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3179	len = le16toh(rx_res->byte_count);
3180	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3181
3182	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3183		device_printf(sc->sc_dev,
3184		    "dsp size out of range [0,20]: %d\n",
3185		    phy_info->cfg_phy_cnt);
3186		goto fail;
3187	}
3188
3189	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3190	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3191		IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3192		    "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3193		goto fail;
3194	}
3195
3196	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
3197		rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3198	} else {
3199		rssi = iwm_mvm_calc_rssi(sc, phy_info);
3200	}
3201
3202	/* Note: RSSI is absolute (ie a -ve value) */
3203	if (rssi < IWM_MIN_DBM)
3204		rssi = IWM_MIN_DBM;
3205	else if (rssi > IWM_MAX_DBM)
3206		rssi = IWM_MAX_DBM;
3207
3208	/* Map it to relative value */
3209	rssi = rssi - sc->sc_noise;
3210
3211	/* replenish ring for the buffer we're going to feed to the sharks */
3212	if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3213		device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3214		    __func__);
3215		goto fail;
3216	}
3217
3218	m->m_data = pkt->data + sizeof(*rx_res);
3219	m->m_pkthdr.len = m->m_len = len;
3220
3221	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3222	    "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3223
3224	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3225
3226	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3227	    "%s: phy_info: channel=%d, flags=0x%08x\n",
3228	    __func__,
3229	    le16toh(phy_info->channel),
3230	    le16toh(phy_info->phy_flags));
3231
3232	/*
3233	 * Populate an RX state struct with the provided information.
3234	 */
3235	bzero(&rxs, sizeof(rxs));
3236	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3237	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3238	rxs.c_ieee = le16toh(phy_info->channel);
3239	if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3240		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3241	} else {
3242		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3243	}
3244
3245	/* rssi is in 1/2db units */
3246	rxs.rssi = rssi * 2;
3247	rxs.nf = sc->sc_noise;
3248
3249	if (ieee80211_radiotap_active_vap(vap)) {
3250		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3251
3252		tap->wr_flags = 0;
3253		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3254			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3255		tap->wr_chan_freq = htole16(rxs.c_freq);
3256		/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3257		tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3258		tap->wr_dbm_antsignal = (int8_t)rssi;
3259		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3260		tap->wr_tsft = phy_info->system_timestamp;
3261		switch (phy_info->rate) {
3262		/* CCK rates. */
3263		case  10: tap->wr_rate =   2; break;
3264		case  20: tap->wr_rate =   4; break;
3265		case  55: tap->wr_rate =  11; break;
3266		case 110: tap->wr_rate =  22; break;
3267		/* OFDM rates. */
3268		case 0xd: tap->wr_rate =  12; break;
3269		case 0xf: tap->wr_rate =  18; break;
3270		case 0x5: tap->wr_rate =  24; break;
3271		case 0x7: tap->wr_rate =  36; break;
3272		case 0x9: tap->wr_rate =  48; break;
3273		case 0xb: tap->wr_rate =  72; break;
3274		case 0x1: tap->wr_rate =  96; break;
3275		case 0x3: tap->wr_rate = 108; break;
3276		/* Unknown rate: should not happen. */
3277		default:  tap->wr_rate =   0;
3278		}
3279	}
3280
3281	IWM_UNLOCK(sc);
3282	if (ni != NULL) {
3283		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3284		ieee80211_input_mimo(ni, m, &rxs);
3285		ieee80211_free_node(ni);
3286	} else {
3287		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3288		ieee80211_input_mimo_all(ic, m, &rxs);
3289	}
3290	IWM_LOCK(sc);
3291
3292	return;
3293
3294fail:	counter_u64_add(ic->ic_ierrors, 1);
3295}
3296
3297static int
3298iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3299	struct iwm_node *in)
3300{
3301	struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3302	struct ieee80211_node *ni = &in->in_ni;
3303	struct ieee80211vap *vap = ni->ni_vap;
3304	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3305	int failack = tx_resp->failure_frame;
3306
3307	KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3308
3309	/* Update rate control statistics. */
3310	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3311	    __func__,
3312	    (int) le16toh(tx_resp->status.status),
3313	    (int) le16toh(tx_resp->status.sequence),
3314	    tx_resp->frame_count,
3315	    tx_resp->bt_kill_count,
3316	    tx_resp->failure_rts,
3317	    tx_resp->failure_frame,
3318	    le32toh(tx_resp->initial_rate),
3319	    (int) le16toh(tx_resp->wireless_media_time));
3320
3321	if (status != IWM_TX_STATUS_SUCCESS &&
3322	    status != IWM_TX_STATUS_DIRECT_DONE) {
3323		ieee80211_ratectl_tx_complete(vap, ni,
3324		    IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
3325		return (1);
3326	} else {
3327		ieee80211_ratectl_tx_complete(vap, ni,
3328		    IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
3329		return (0);
3330	}
3331}
3332
3333static void
3334iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
3335	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3336{
3337	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3338	int idx = cmd_hdr->idx;
3339	int qid = cmd_hdr->qid;
3340	struct iwm_tx_ring *ring = &sc->txq[qid];
3341	struct iwm_tx_data *txd = &ring->data[idx];
3342	struct iwm_node *in = txd->in;
3343	struct mbuf *m = txd->m;
3344	int status;
3345
3346	KASSERT(txd->done == 0, ("txd not done"));
3347	KASSERT(txd->in != NULL, ("txd without node"));
3348	KASSERT(txd->m != NULL, ("txd without mbuf"));
3349
3350	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3351
3352	sc->sc_tx_timer = 0;
3353
3354	status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3355
3356	/* Unmap and free mbuf. */
3357	bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3358	bus_dmamap_unload(ring->data_dmat, txd->map);
3359
3360	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3361	    "free txd %p, in %p\n", txd, txd->in);
3362	txd->done = 1;
3363	txd->m = NULL;
3364	txd->in = NULL;
3365
3366	ieee80211_tx_complete(&in->in_ni, m, status);
3367
3368	if (--ring->queued < IWM_TX_RING_LOMARK) {
3369		sc->qfullmsk &= ~(1 << ring->qid);
3370		if (sc->qfullmsk == 0) {
3371			iwm_start(sc);
3372		}
3373	}
3374}
3375
3376/*
3377 * transmit side
3378 */
3379
3380/*
3381 * Process a "command done" firmware notification.  This is where we wakeup
3382 * processes waiting for a synchronous command completion.
3383 * from if_iwn
3384 */
3385static void
3386iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3387{
3388	struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3389	struct iwm_tx_data *data;
3390
3391	if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3392		return;	/* Not a command ack. */
3393	}
3394
3395	/* XXX wide commands? */
3396	IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3397	    "cmd notification type 0x%x qid %d idx %d\n",
3398	    pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3399
3400	data = &ring->data[pkt->hdr.idx];
3401
3402	/* If the command was mapped in an mbuf, free it. */
3403	if (data->m != NULL) {
3404		bus_dmamap_sync(ring->data_dmat, data->map,
3405		    BUS_DMASYNC_POSTWRITE);
3406		bus_dmamap_unload(ring->data_dmat, data->map);
3407		m_freem(data->m);
3408		data->m = NULL;
3409	}
3410	wakeup(&ring->desc[pkt->hdr.idx]);
3411
3412	if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3413		device_printf(sc->sc_dev,
3414		    "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3415		    __func__, pkt->hdr.idx, ring->queued, ring->cur);
3416		/* XXX call iwm_force_nmi() */
3417	}
3418
3419	KASSERT(ring->queued > 0, ("ring->queued is empty?"));
3420	ring->queued--;
3421	if (ring->queued == 0)
3422		iwm_pcie_clear_cmd_in_flight(sc);
3423}
3424
3425#if 0
3426/*
3427 * necessary only for block ack mode
3428 */
3429void
3430iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3431	uint16_t len)
3432{
3433	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3434	uint16_t w_val;
3435
3436	scd_bc_tbl = sc->sched_dma.vaddr;
3437
3438	len += 8; /* magic numbers came naturally from paris */
3439	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3440		len = roundup(len, 4) / 4;
3441
3442	w_val = htole16(sta_id << 12 | len);
3443
3444	/* Update TX scheduler. */
3445	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3446	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3447	    BUS_DMASYNC_PREWRITE);
3448
3449	/* I really wonder what this is ?!? */
3450	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3451		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3452		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3453		    BUS_DMASYNC_PREWRITE);
3454	}
3455}
3456#endif
3457
3458/*
3459 * Take an 802.11 (non-n) rate, find the relevant rate
3460 * table entry.  return the index into in_ridx[].
3461 *
3462 * The caller then uses that index back into in_ridx
3463 * to figure out the rate index programmed /into/
3464 * the firmware for this given node.
3465 */
3466static int
3467iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3468    uint8_t rate)
3469{
3470	int i;
3471	uint8_t r;
3472
3473	for (i = 0; i < nitems(in->in_ridx); i++) {
3474		r = iwm_rates[in->in_ridx[i]].rate;
3475		if (rate == r)
3476			return (i);
3477	}
3478
3479	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3480	    "%s: couldn't find an entry for rate=%d\n",
3481	    __func__,
3482	    rate);
3483
3484	/* XXX Return the first */
3485	/* XXX TODO: have it return the /lowest/ */
3486	return (0);
3487}
3488
3489static int
3490iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
3491{
3492	int i;
3493
3494	for (i = 0; i < nitems(iwm_rates); i++) {
3495		if (iwm_rates[i].rate == rate)
3496			return (i);
3497	}
3498	/* XXX error? */
3499	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3500	    "%s: couldn't find an entry for rate=%d\n",
3501	    __func__,
3502	    rate);
3503	return (0);
3504}
3505
3506/*
3507 * Fill in the rate related information for a transmit command.
3508 */
3509static const struct iwm_rate *
3510iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3511	struct mbuf *m, struct iwm_tx_cmd *tx)
3512{
3513	struct ieee80211_node *ni = &in->in_ni;
3514	struct ieee80211_frame *wh;
3515	const struct ieee80211_txparam *tp = ni->ni_txparms;
3516	const struct iwm_rate *rinfo;
3517	int type;
3518	int ridx, rate_flags;
3519
3520	wh = mtod(m, struct ieee80211_frame *);
3521	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3522
3523	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3524	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3525
3526	if (type == IEEE80211_FC0_TYPE_MGT) {
3527		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3528		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3529		    "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3530	} else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3531		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3532		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3533		    "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3534	} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3535		ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3536		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3537		    "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3538	} else if (m->m_flags & M_EAPOL) {
3539		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3540		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3541		    "%s: EAPOL\n", __func__);
3542	} else if (type == IEEE80211_FC0_TYPE_DATA) {
3543		int i;
3544
3545		/* for data frames, use RS table */
3546		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3547		/* XXX pass pktlen */
3548		(void) ieee80211_ratectl_rate(ni, NULL, 0);
3549		i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3550		ridx = in->in_ridx[i];
3551
3552		/* This is the index into the programmed table */
3553		tx->initial_rate_index = i;
3554		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3555
3556		IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3557		    "%s: start with i=%d, txrate %d\n",
3558		    __func__, i, iwm_rates[ridx].rate);
3559	} else {
3560		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3561		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DEFAULT (%d)\n",
3562		    __func__, tp->mgmtrate);
3563	}
3564
3565	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3566	    "%s: frame type=%d txrate %d\n",
3567	        __func__, type, iwm_rates[ridx].rate);
3568
3569	rinfo = &iwm_rates[ridx];
3570
3571	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3572	    __func__, ridx,
3573	    rinfo->rate,
3574	    !! (IWM_RIDX_IS_CCK(ridx))
3575	    );
3576
3577	/* XXX TODO: hard-coded TX antenna? */
3578	rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3579	if (IWM_RIDX_IS_CCK(ridx))
3580		rate_flags |= IWM_RATE_MCS_CCK_MSK;
3581	tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3582
3583	return rinfo;
3584}
3585
3586#define TB0_SIZE 16
3587static int
3588iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3589{
3590	struct ieee80211com *ic = &sc->sc_ic;
3591	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3592	struct iwm_node *in = IWM_NODE(ni);
3593	struct iwm_tx_ring *ring;
3594	struct iwm_tx_data *data;
3595	struct iwm_tfd *desc;
3596	struct iwm_device_cmd *cmd;
3597	struct iwm_tx_cmd *tx;
3598	struct ieee80211_frame *wh;
3599	struct ieee80211_key *k = NULL;
3600	struct mbuf *m1;
3601	const struct iwm_rate *rinfo;
3602	uint32_t flags;
3603	u_int hdrlen;
3604	bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3605	int nsegs;
3606	uint8_t tid, type;
3607	int i, totlen, error, pad;
3608
3609	wh = mtod(m, struct ieee80211_frame *);
3610	hdrlen = ieee80211_anyhdrsize(wh);
3611	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3612	tid = 0;
3613	ring = &sc->txq[ac];
3614	desc = &ring->desc[ring->cur];
3615	memset(desc, 0, sizeof(*desc));
3616	data = &ring->data[ring->cur];
3617
3618	/* Fill out iwm_tx_cmd to send to the firmware */
3619	cmd = &ring->cmd[ring->cur];
3620	cmd->hdr.code = IWM_TX_CMD;
3621	cmd->hdr.flags = 0;
3622	cmd->hdr.qid = ring->qid;
3623	cmd->hdr.idx = ring->cur;
3624
3625	tx = (void *)cmd->data;
3626	memset(tx, 0, sizeof(*tx));
3627
3628	rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3629
3630	/* Encrypt the frame if need be. */
3631	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3632		/* Retrieve key for TX && do software encryption. */
3633		k = ieee80211_crypto_encap(ni, m);
3634		if (k == NULL) {
3635			m_freem(m);
3636			return (ENOBUFS);
3637		}
3638		/* 802.11 header may have moved. */
3639		wh = mtod(m, struct ieee80211_frame *);
3640	}
3641
3642	if (ieee80211_radiotap_active_vap(vap)) {
3643		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3644
3645		tap->wt_flags = 0;
3646		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3647		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3648		tap->wt_rate = rinfo->rate;
3649		if (k != NULL)
3650			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3651		ieee80211_radiotap_tx(vap, m);
3652	}
3653
3654
3655	totlen = m->m_pkthdr.len;
3656
3657	flags = 0;
3658	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3659		flags |= IWM_TX_CMD_FLG_ACK;
3660	}
3661
3662	if (type == IEEE80211_FC0_TYPE_DATA
3663	    && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3664	    && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3665		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3666	}
3667
3668	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3669	    type != IEEE80211_FC0_TYPE_DATA)
3670		tx->sta_id = sc->sc_aux_sta.sta_id;
3671	else
3672		tx->sta_id = IWM_STATION_ID;
3673
3674	if (type == IEEE80211_FC0_TYPE_MGT) {
3675		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3676
3677		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3678		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3679			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3680		} else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3681			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3682		} else {
3683			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3684		}
3685	} else {
3686		tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3687	}
3688
3689	if (hdrlen & 3) {
3690		/* First segment length must be a multiple of 4. */
3691		flags |= IWM_TX_CMD_FLG_MH_PAD;
3692		pad = 4 - (hdrlen & 3);
3693	} else
3694		pad = 0;
3695
3696	tx->driver_txop = 0;
3697	tx->next_frame_len = 0;
3698
3699	tx->len = htole16(totlen);
3700	tx->tid_tspec = tid;
3701	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3702
3703	/* Set physical address of "scratch area". */
3704	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3705	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3706
3707	/* Copy 802.11 header in TX command. */
3708	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3709
3710	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3711
3712	tx->sec_ctl = 0;
3713	tx->tx_flags |= htole32(flags);
3714
3715	/* Trim 802.11 header. */
3716	m_adj(m, hdrlen);
3717	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3718	    segs, &nsegs, BUS_DMA_NOWAIT);
3719	if (error != 0) {
3720		if (error != EFBIG) {
3721			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3722			    error);
3723			m_freem(m);
3724			return error;
3725		}
3726		/* Too many DMA segments, linearize mbuf. */
3727		m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3728		if (m1 == NULL) {
3729			device_printf(sc->sc_dev,
3730			    "%s: could not defrag mbuf\n", __func__);
3731			m_freem(m);
3732			return (ENOBUFS);
3733		}
3734		m = m1;
3735
3736		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3737		    segs, &nsegs, BUS_DMA_NOWAIT);
3738		if (error != 0) {
3739			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3740			    error);
3741			m_freem(m);
3742			return error;
3743		}
3744	}
3745	data->m = m;
3746	data->in = in;
3747	data->done = 0;
3748
3749	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3750	    "sending txd %p, in %p\n", data, data->in);
3751	KASSERT(data->in != NULL, ("node is NULL"));
3752
3753	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3754	    "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3755	    ring->qid, ring->cur, totlen, nsegs,
3756	    le32toh(tx->tx_flags),
3757	    le32toh(tx->rate_n_flags),
3758	    tx->initial_rate_index
3759	    );
3760
3761	/* Fill TX descriptor. */
3762	desc->num_tbs = 2 + nsegs;
3763
3764	desc->tbs[0].lo = htole32(data->cmd_paddr);
3765	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3766	    (TB0_SIZE << 4);
3767	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3768	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3769	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3770	      + hdrlen + pad - TB0_SIZE) << 4);
3771
3772	/* Other DMA segments are for data payload. */
3773	for (i = 0; i < nsegs; i++) {
3774		seg = &segs[i];
3775		desc->tbs[i+2].lo = htole32(seg->ds_addr);
3776		desc->tbs[i+2].hi_n_len = \
3777		    htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3778		    | ((seg->ds_len) << 4);
3779	}
3780
3781	bus_dmamap_sync(ring->data_dmat, data->map,
3782	    BUS_DMASYNC_PREWRITE);
3783	bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3784	    BUS_DMASYNC_PREWRITE);
3785	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3786	    BUS_DMASYNC_PREWRITE);
3787
3788#if 0
3789	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3790#endif
3791
3792	/* Kick TX ring. */
3793	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3794	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3795
3796	/* Mark TX ring as full if we reach a certain threshold. */
3797	if (++ring->queued > IWM_TX_RING_HIMARK) {
3798		sc->qfullmsk |= 1 << ring->qid;
3799	}
3800
3801	return 0;
3802}
3803
3804static int
3805iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3806    const struct ieee80211_bpf_params *params)
3807{
3808	struct ieee80211com *ic = ni->ni_ic;
3809	struct iwm_softc *sc = ic->ic_softc;
3810	int error = 0;
3811
3812	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3813	    "->%s begin\n", __func__);
3814
3815	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3816		m_freem(m);
3817		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3818		    "<-%s not RUNNING\n", __func__);
3819		return (ENETDOWN);
3820        }
3821
3822	IWM_LOCK(sc);
3823	/* XXX fix this */
3824        if (params == NULL) {
3825		error = iwm_tx(sc, m, ni, 0);
3826	} else {
3827		error = iwm_tx(sc, m, ni, 0);
3828	}
3829	sc->sc_tx_timer = 5;
3830	IWM_UNLOCK(sc);
3831
3832        return (error);
3833}
3834
3835/*
3836 * mvm/tx.c
3837 */
3838
3839/*
3840 * Note that there are transports that buffer frames before they reach
3841 * the firmware. This means that after flush_tx_path is called, the
3842 * queue might not be empty. The race-free way to handle this is to:
3843 * 1) set the station as draining
3844 * 2) flush the Tx path
3845 * 3) wait for the transport queues to be empty
3846 */
3847int
3848iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3849{
3850	int ret;
3851	struct iwm_tx_path_flush_cmd flush_cmd = {
3852		.queues_ctl = htole32(tfd_msk),
3853		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3854	};
3855
3856	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3857	    sizeof(flush_cmd), &flush_cmd);
3858	if (ret)
3859                device_printf(sc->sc_dev,
3860		    "Flushing tx queue failed: %d\n", ret);
3861	return ret;
3862}
3863
3864/*
3865 * BEGIN mvm/sta.c
3866 */
3867
3868static int
3869iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
3870	struct iwm_mvm_add_sta_cmd_v7 *cmd, int *status)
3871{
3872	return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(*cmd),
3873	    cmd, status);
3874}
3875
3876/* send station add/update command to firmware */
3877static int
3878iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
3879{
3880	struct iwm_mvm_add_sta_cmd_v7 add_sta_cmd;
3881	int ret;
3882	uint32_t status;
3883
3884	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
3885
3886	add_sta_cmd.sta_id = IWM_STATION_ID;
3887	add_sta_cmd.mac_id_n_color
3888	    = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
3889	        IWM_DEFAULT_COLOR));
3890	if (!update) {
3891		int ac;
3892		for (ac = 0; ac < WME_NUM_AC; ac++) {
3893			add_sta_cmd.tfd_queue_msk |=
3894			    htole32(1 << iwm_mvm_ac_to_tx_fifo[ac]);
3895		}
3896		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
3897	}
3898	add_sta_cmd.add_modify = update ? 1 : 0;
3899	add_sta_cmd.station_flags_msk
3900	    |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
3901	add_sta_cmd.tid_disable_tx = htole16(0xffff);
3902	if (update)
3903		add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
3904
3905	status = IWM_ADD_STA_SUCCESS;
3906	ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
3907	if (ret)
3908		return ret;
3909
3910	switch (status) {
3911	case IWM_ADD_STA_SUCCESS:
3912		break;
3913	default:
3914		ret = EIO;
3915		device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
3916		break;
3917	}
3918
3919	return ret;
3920}
3921
3922static int
3923iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
3924{
3925	return iwm_mvm_sta_send_to_fw(sc, in, 0);
3926}
3927
3928static int
3929iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
3930{
3931	return iwm_mvm_sta_send_to_fw(sc, in, 1);
3932}
3933
3934static int
3935iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
3936	const uint8_t *addr, uint16_t mac_id, uint16_t color)
3937{
3938	struct iwm_mvm_add_sta_cmd_v7 cmd;
3939	int ret;
3940	uint32_t status;
3941
3942	memset(&cmd, 0, sizeof(cmd));
3943	cmd.sta_id = sta->sta_id;
3944	cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
3945
3946	cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
3947	cmd.tid_disable_tx = htole16(0xffff);
3948
3949	if (addr)
3950		IEEE80211_ADDR_COPY(cmd.addr, addr);
3951
3952	ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
3953	if (ret)
3954		return ret;
3955
3956	switch (status) {
3957	case IWM_ADD_STA_SUCCESS:
3958		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
3959		    "%s: Internal station added.\n", __func__);
3960		return 0;
3961	default:
3962		device_printf(sc->sc_dev,
3963		    "%s: Add internal station failed, status=0x%x\n",
3964		    __func__, status);
3965		ret = EIO;
3966		break;
3967	}
3968	return ret;
3969}
3970
3971static int
3972iwm_mvm_add_aux_sta(struct iwm_softc *sc)
3973{
3974	int ret;
3975
3976	sc->sc_aux_sta.sta_id = IWM_AUX_STA_ID;
3977	sc->sc_aux_sta.tfd_queue_msk = (1 << IWM_MVM_AUX_QUEUE);
3978
3979	ret = iwm_enable_txq(sc, 0, IWM_MVM_AUX_QUEUE, IWM_MVM_TX_FIFO_MCAST);
3980	if (ret)
3981		return ret;
3982
3983	ret = iwm_mvm_add_int_sta_common(sc,
3984	    &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
3985
3986	if (ret)
3987		memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
3988	return ret;
3989}
3990
3991/*
3992 * END mvm/sta.c
3993 */
3994
3995/*
3996 * BEGIN mvm/quota.c
3997 */
3998
3999static int
4000iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
4001{
4002	struct iwm_time_quota_cmd cmd;
4003	int i, idx, ret, num_active_macs, quota, quota_rem;
4004	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
4005	int n_ifs[IWM_MAX_BINDINGS] = {0, };
4006	uint16_t id;
4007
4008	memset(&cmd, 0, sizeof(cmd));
4009
4010	/* currently, PHY ID == binding ID */
4011	if (in) {
4012		id = in->in_phyctxt->id;
4013		KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
4014		colors[id] = in->in_phyctxt->color;
4015
4016		if (1)
4017			n_ifs[id] = 1;
4018	}
4019
4020	/*
4021	 * The FW's scheduling session consists of
4022	 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
4023	 * equally between all the bindings that require quota
4024	 */
4025	num_active_macs = 0;
4026	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
4027		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
4028		num_active_macs += n_ifs[i];
4029	}
4030
4031	quota = 0;
4032	quota_rem = 0;
4033	if (num_active_macs) {
4034		quota = IWM_MVM_MAX_QUOTA / num_active_macs;
4035		quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
4036	}
4037
4038	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
4039		if (colors[i] < 0)
4040			continue;
4041
4042		cmd.quotas[idx].id_and_color =
4043			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
4044
4045		if (n_ifs[i] <= 0) {
4046			cmd.quotas[idx].quota = htole32(0);
4047			cmd.quotas[idx].max_duration = htole32(0);
4048		} else {
4049			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
4050			cmd.quotas[idx].max_duration = htole32(0);
4051		}
4052		idx++;
4053	}
4054
4055	/* Give the remainder of the session to the first binding */
4056	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
4057
4058	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
4059	    sizeof(cmd), &cmd);
4060	if (ret)
4061		device_printf(sc->sc_dev,
4062		    "%s: Failed to send quota: %d\n", __func__, ret);
4063	return ret;
4064}
4065
4066/*
4067 * END mvm/quota.c
4068 */
4069
4070/*
4071 * ieee80211 routines
4072 */
4073
4074/*
4075 * Change to AUTH state in 80211 state machine.  Roughly matches what
4076 * Linux does in bss_info_changed().
4077 */
4078static int
4079iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
4080{
4081	struct ieee80211_node *ni;
4082	struct iwm_node *in;
4083	struct iwm_vap *iv = IWM_VAP(vap);
4084	uint32_t duration;
4085	int error;
4086
4087	/*
4088	 * XXX i have a feeling that the vap node is being
4089	 * freed from underneath us. Grr.
4090	 */
4091	ni = ieee80211_ref_node(vap->iv_bss);
4092	in = IWM_NODE(ni);
4093	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
4094	    "%s: called; vap=%p, bss ni=%p\n",
4095	    __func__,
4096	    vap,
4097	    ni);
4098
4099	in->in_assoc = 0;
4100
4101	error = iwm_mvm_sf_config(sc, IWM_SF_FULL_ON);
4102	if (error != 0)
4103		return error;
4104
4105	error = iwm_allow_mcast(vap, sc);
4106	if (error) {
4107		device_printf(sc->sc_dev,
4108		    "%s: failed to set multicast\n", __func__);
4109		goto out;
4110	}
4111
4112	/*
4113	 * This is where it deviates from what Linux does.
4114	 *
4115	 * Linux iwlwifi doesn't reset the nic each time, nor does it
4116	 * call ctxt_add() here.  Instead, it adds it during vap creation,
4117	 * and always does a mac_ctx_changed().
4118	 *
4119	 * The openbsd port doesn't attempt to do that - it reset things
4120	 * at odd states and does the add here.
4121	 *
4122	 * So, until the state handling is fixed (ie, we never reset
4123	 * the NIC except for a firmware failure, which should drag
4124	 * the NIC back to IDLE, re-setup and re-add all the mac/phy
4125	 * contexts that are required), let's do a dirty hack here.
4126	 */
4127	if (iv->is_uploaded) {
4128		if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4129			device_printf(sc->sc_dev,
4130			    "%s: failed to update MAC\n", __func__);
4131			goto out;
4132		}
4133		if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4134		    in->in_ni.ni_chan, 1, 1)) != 0) {
4135			device_printf(sc->sc_dev,
4136			    "%s: failed update phy ctxt\n", __func__);
4137			goto out;
4138		}
4139		in->in_phyctxt = &sc->sc_phyctxt[0];
4140
4141		if ((error = iwm_mvm_binding_update(sc, in)) != 0) {
4142			device_printf(sc->sc_dev,
4143			    "%s: binding update cmd\n", __func__);
4144			goto out;
4145		}
4146		if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4147			device_printf(sc->sc_dev,
4148			    "%s: failed to update sta\n", __func__);
4149			goto out;
4150		}
4151	} else {
4152		if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
4153			device_printf(sc->sc_dev,
4154			    "%s: failed to add MAC\n", __func__);
4155			goto out;
4156		}
4157		if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4158		    in->in_ni.ni_chan, 1, 1)) != 0) {
4159			device_printf(sc->sc_dev,
4160			    "%s: failed add phy ctxt!\n", __func__);
4161			error = ETIMEDOUT;
4162			goto out;
4163		}
4164		in->in_phyctxt = &sc->sc_phyctxt[0];
4165
4166		if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
4167			device_printf(sc->sc_dev,
4168			    "%s: binding add cmd\n", __func__);
4169			goto out;
4170		}
4171		if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
4172			device_printf(sc->sc_dev,
4173			    "%s: failed to add sta\n", __func__);
4174			goto out;
4175		}
4176	}
4177
4178	/*
4179	 * Prevent the FW from wandering off channel during association
4180	 * by "protecting" the session with a time event.
4181	 */
4182	/* XXX duration is in units of TU, not MS */
4183	duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4184	iwm_mvm_protect_session(sc, in, duration, 500 /* XXX magic number */);
4185	DELAY(100);
4186
4187	error = 0;
4188out:
4189	ieee80211_free_node(ni);
4190	return (error);
4191}
4192
4193static int
4194iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
4195{
4196	struct iwm_node *in = IWM_NODE(vap->iv_bss);
4197	int error;
4198
4199	if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4200		device_printf(sc->sc_dev,
4201		    "%s: failed to update STA\n", __func__);
4202		return error;
4203	}
4204
4205	in->in_assoc = 1;
4206	if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4207		device_printf(sc->sc_dev,
4208		    "%s: failed to update MAC\n", __func__);
4209		return error;
4210	}
4211
4212	return 0;
4213}
4214
4215static int
4216iwm_release(struct iwm_softc *sc, struct iwm_node *in)
4217{
4218	uint32_t tfd_msk;
4219
4220	/*
4221	 * Ok, so *technically* the proper set of calls for going
4222	 * from RUN back to SCAN is:
4223	 *
4224	 * iwm_mvm_power_mac_disable(sc, in);
4225	 * iwm_mvm_mac_ctxt_changed(sc, in);
4226	 * iwm_mvm_rm_sta(sc, in);
4227	 * iwm_mvm_update_quotas(sc, NULL);
4228	 * iwm_mvm_mac_ctxt_changed(sc, in);
4229	 * iwm_mvm_binding_remove_vif(sc, in);
4230	 * iwm_mvm_mac_ctxt_remove(sc, in);
4231	 *
4232	 * However, that freezes the device not matter which permutations
4233	 * and modifications are attempted.  Obviously, this driver is missing
4234	 * something since it works in the Linux driver, but figuring out what
4235	 * is missing is a little more complicated.  Now, since we're going
4236	 * back to nothing anyway, we'll just do a complete device reset.
4237	 * Up your's, device!
4238	 */
4239	/*
4240	 * Just using 0xf for the queues mask is fine as long as we only
4241	 * get here from RUN state.
4242	 */
4243	tfd_msk = 0xf;
4244	mbufq_drain(&sc->sc_snd);
4245	iwm_mvm_flush_tx_path(sc, tfd_msk, IWM_CMD_SYNC);
4246	/*
4247	 * We seem to get away with just synchronously sending the
4248	 * IWM_TXPATH_FLUSH command.
4249	 */
4250//	iwm_trans_wait_tx_queue_empty(sc, tfd_msk);
4251	iwm_stop_device(sc);
4252	iwm_init_hw(sc);
4253	if (in)
4254		in->in_assoc = 0;
4255	return 0;
4256
4257#if 0
4258	int error;
4259
4260	iwm_mvm_power_mac_disable(sc, in);
4261
4262	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4263		device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
4264		return error;
4265	}
4266
4267	if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
4268		device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4269		return error;
4270	}
4271	error = iwm_mvm_rm_sta(sc, in);
4272	in->in_assoc = 0;
4273	iwm_mvm_update_quotas(sc, NULL);
4274	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4275		device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4276		return error;
4277	}
4278	iwm_mvm_binding_remove_vif(sc, in);
4279
4280	iwm_mvm_mac_ctxt_remove(sc, in);
4281
4282	return error;
4283#endif
4284}
4285
4286static struct ieee80211_node *
4287iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4288{
4289	return malloc(sizeof (struct iwm_node), M_80211_NODE,
4290	    M_NOWAIT | M_ZERO);
4291}
4292
4293static void
4294iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
4295{
4296	struct ieee80211_node *ni = &in->in_ni;
4297	struct iwm_lq_cmd *lq = &in->in_lq;
4298	int nrates = ni->ni_rates.rs_nrates;
4299	int i, ridx, tab = 0;
4300//	int txant = 0;
4301
4302	if (nrates > nitems(lq->rs_table)) {
4303		device_printf(sc->sc_dev,
4304		    "%s: node supports %d rates, driver handles "
4305		    "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4306		return;
4307	}
4308	if (nrates == 0) {
4309		device_printf(sc->sc_dev,
4310		    "%s: node supports 0 rates, odd!\n", __func__);
4311		return;
4312	}
4313
4314	/*
4315	 * XXX .. and most of iwm_node is not initialised explicitly;
4316	 * it's all just 0x0 passed to the firmware.
4317	 */
4318
4319	/* first figure out which rates we should support */
4320	/* XXX TODO: this isn't 11n aware /at all/ */
4321	memset(&in->in_ridx, -1, sizeof(in->in_ridx));
4322	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4323	    "%s: nrates=%d\n", __func__, nrates);
4324
4325	/*
4326	 * Loop over nrates and populate in_ridx from the highest
4327	 * rate to the lowest rate.  Remember, in_ridx[] has
4328	 * IEEE80211_RATE_MAXSIZE entries!
4329	 */
4330	for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4331		int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4332
4333		/* Map 802.11 rate to HW rate index. */
4334		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4335			if (iwm_rates[ridx].rate == rate)
4336				break;
4337		if (ridx > IWM_RIDX_MAX) {
4338			device_printf(sc->sc_dev,
4339			    "%s: WARNING: device rate for %d not found!\n",
4340			    __func__, rate);
4341		} else {
4342			IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4343			    "%s: rate: i: %d, rate=%d, ridx=%d\n",
4344			    __func__,
4345			    i,
4346			    rate,
4347			    ridx);
4348			in->in_ridx[i] = ridx;
4349		}
4350	}
4351
4352	/* then construct a lq_cmd based on those */
4353	memset(lq, 0, sizeof(*lq));
4354	lq->sta_id = IWM_STATION_ID;
4355
4356	/* For HT, always enable RTS/CTS to avoid excessive retries. */
4357	if (ni->ni_flags & IEEE80211_NODE_HT)
4358		lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4359
4360	/*
4361	 * are these used? (we don't do SISO or MIMO)
4362	 * need to set them to non-zero, though, or we get an error.
4363	 */
4364	lq->single_stream_ant_msk = 1;
4365	lq->dual_stream_ant_msk = 1;
4366
4367	/*
4368	 * Build the actual rate selection table.
4369	 * The lowest bits are the rates.  Additionally,
4370	 * CCK needs bit 9 to be set.  The rest of the bits
4371	 * we add to the table select the tx antenna
4372	 * Note that we add the rates in the highest rate first
4373	 * (opposite of ni_rates).
4374	 */
4375	/*
4376	 * XXX TODO: this should be looping over the min of nrates
4377	 * and LQ_MAX_RETRY_NUM.  Sigh.
4378	 */
4379	for (i = 0; i < nrates; i++) {
4380		int nextant;
4381
4382#if 0
4383		if (txant == 0)
4384			txant = iwm_mvm_get_valid_tx_ant(sc);
4385		nextant = 1<<(ffs(txant)-1);
4386		txant &= ~nextant;
4387#else
4388		nextant = iwm_mvm_get_valid_tx_ant(sc);
4389#endif
4390		/*
4391		 * Map the rate id into a rate index into
4392		 * our hardware table containing the
4393		 * configuration to use for this rate.
4394		 */
4395		ridx = in->in_ridx[i];
4396		tab = iwm_rates[ridx].plcp;
4397		tab |= nextant << IWM_RATE_MCS_ANT_POS;
4398		if (IWM_RIDX_IS_CCK(ridx))
4399			tab |= IWM_RATE_MCS_CCK_MSK;
4400		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4401		    "station rate i=%d, rate=%d, hw=%x\n",
4402		    i, iwm_rates[ridx].rate, tab);
4403		lq->rs_table[i] = htole32(tab);
4404	}
4405	/* then fill the rest with the lowest possible rate */
4406	for (i = nrates; i < nitems(lq->rs_table); i++) {
4407		KASSERT(tab != 0, ("invalid tab"));
4408		lq->rs_table[i] = htole32(tab);
4409	}
4410}
4411
4412static int
4413iwm_media_change(struct ifnet *ifp)
4414{
4415	struct ieee80211vap *vap = ifp->if_softc;
4416	struct ieee80211com *ic = vap->iv_ic;
4417	struct iwm_softc *sc = ic->ic_softc;
4418	int error;
4419
4420	error = ieee80211_media_change(ifp);
4421	if (error != ENETRESET)
4422		return error;
4423
4424	IWM_LOCK(sc);
4425	if (ic->ic_nrunning > 0) {
4426		iwm_stop(sc);
4427		iwm_init(sc);
4428	}
4429	IWM_UNLOCK(sc);
4430	return error;
4431}
4432
4433
4434static int
4435iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4436{
4437	struct iwm_vap *ivp = IWM_VAP(vap);
4438	struct ieee80211com *ic = vap->iv_ic;
4439	struct iwm_softc *sc = ic->ic_softc;
4440	struct iwm_node *in;
4441	int error;
4442
4443	IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4444	    "switching state %s -> %s\n",
4445	    ieee80211_state_name[vap->iv_state],
4446	    ieee80211_state_name[nstate]);
4447	IEEE80211_UNLOCK(ic);
4448	IWM_LOCK(sc);
4449
4450	if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4451		iwm_led_blink_stop(sc);
4452
4453	/* disable beacon filtering if we're hopping out of RUN */
4454	if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4455		iwm_mvm_disable_beacon_filter(sc);
4456
4457		if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4458			in->in_assoc = 0;
4459
4460		if (nstate == IEEE80211_S_INIT) {
4461			IWM_UNLOCK(sc);
4462			IEEE80211_LOCK(ic);
4463			error = ivp->iv_newstate(vap, nstate, arg);
4464			IEEE80211_UNLOCK(ic);
4465			IWM_LOCK(sc);
4466			iwm_release(sc, NULL);
4467			IWM_UNLOCK(sc);
4468			IEEE80211_LOCK(ic);
4469			return error;
4470		}
4471
4472		/*
4473		 * It's impossible to directly go RUN->SCAN. If we iwm_release()
4474		 * above then the card will be completely reinitialized,
4475		 * so the driver must do everything necessary to bring the card
4476		 * from INIT to SCAN.
4477		 *
4478		 * Additionally, upon receiving deauth frame from AP,
4479		 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4480		 * state. This will also fail with this driver, so bring the FSM
4481		 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4482		 *
4483		 * XXX TODO: fix this for FreeBSD!
4484		 */
4485		if (nstate == IEEE80211_S_SCAN ||
4486		    nstate == IEEE80211_S_AUTH ||
4487		    nstate == IEEE80211_S_ASSOC) {
4488			IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4489			    "Force transition to INIT; MGT=%d\n", arg);
4490			IWM_UNLOCK(sc);
4491			IEEE80211_LOCK(ic);
4492			/* Always pass arg as -1 since we can't Tx right now. */
4493			/*
4494			 * XXX arg is just ignored anyway when transitioning
4495			 *     to IEEE80211_S_INIT.
4496			 */
4497			vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4498			IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4499			    "Going INIT->SCAN\n");
4500			nstate = IEEE80211_S_SCAN;
4501			IEEE80211_UNLOCK(ic);
4502			IWM_LOCK(sc);
4503		}
4504	}
4505
4506	switch (nstate) {
4507	case IEEE80211_S_INIT:
4508		break;
4509
4510	case IEEE80211_S_AUTH:
4511		if ((error = iwm_auth(vap, sc)) != 0) {
4512			device_printf(sc->sc_dev,
4513			    "%s: could not move to auth state: %d\n",
4514			    __func__, error);
4515			break;
4516		}
4517		break;
4518
4519	case IEEE80211_S_ASSOC:
4520		if ((error = iwm_assoc(vap, sc)) != 0) {
4521			device_printf(sc->sc_dev,
4522			    "%s: failed to associate: %d\n", __func__,
4523			    error);
4524			break;
4525		}
4526		break;
4527
4528	case IEEE80211_S_RUN:
4529	{
4530		struct iwm_host_cmd cmd = {
4531			.id = IWM_LQ_CMD,
4532			.len = { sizeof(in->in_lq), },
4533			.flags = IWM_CMD_SYNC,
4534		};
4535
4536		/* Update the association state, now we have it all */
4537		/* (eg associd comes in at this point */
4538		error = iwm_assoc(vap, sc);
4539		if (error != 0) {
4540			device_printf(sc->sc_dev,
4541			    "%s: failed to update association state: %d\n",
4542			    __func__,
4543			    error);
4544			break;
4545		}
4546
4547		in = IWM_NODE(vap->iv_bss);
4548		iwm_mvm_power_mac_update_mode(sc, in);
4549		iwm_mvm_enable_beacon_filter(sc, in);
4550		iwm_mvm_update_quotas(sc, in);
4551		iwm_setrates(sc, in);
4552
4553		cmd.data[0] = &in->in_lq;
4554		if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
4555			device_printf(sc->sc_dev,
4556			    "%s: IWM_LQ_CMD failed\n", __func__);
4557		}
4558
4559		iwm_mvm_led_enable(sc);
4560		break;
4561	}
4562
4563	default:
4564		break;
4565	}
4566	IWM_UNLOCK(sc);
4567	IEEE80211_LOCK(ic);
4568
4569	return (ivp->iv_newstate(vap, nstate, arg));
4570}
4571
4572void
4573iwm_endscan_cb(void *arg, int pending)
4574{
4575	struct iwm_softc *sc = arg;
4576	struct ieee80211com *ic = &sc->sc_ic;
4577
4578	IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4579	    "%s: scan ended\n",
4580	    __func__);
4581
4582	ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4583}
4584
4585/*
4586 * Aging and idle timeouts for the different possible scenarios
4587 * in default configuration
4588 */
4589static const uint32_t
4590iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4591	{
4592		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
4593		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
4594	},
4595	{
4596		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
4597		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
4598	},
4599	{
4600		htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
4601		htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
4602	},
4603	{
4604		htole32(IWM_SF_BA_AGING_TIMER_DEF),
4605		htole32(IWM_SF_BA_IDLE_TIMER_DEF)
4606	},
4607	{
4608		htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
4609		htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
4610	},
4611};
4612
4613/*
4614 * Aging and idle timeouts for the different possible scenarios
4615 * in single BSS MAC configuration.
4616 */
4617static const uint32_t
4618iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4619	{
4620		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
4621		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
4622	},
4623	{
4624		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
4625		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
4626	},
4627	{
4628		htole32(IWM_SF_MCAST_AGING_TIMER),
4629		htole32(IWM_SF_MCAST_IDLE_TIMER)
4630	},
4631	{
4632		htole32(IWM_SF_BA_AGING_TIMER),
4633		htole32(IWM_SF_BA_IDLE_TIMER)
4634	},
4635	{
4636		htole32(IWM_SF_TX_RE_AGING_TIMER),
4637		htole32(IWM_SF_TX_RE_IDLE_TIMER)
4638	},
4639};
4640
4641static void
4642iwm_mvm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
4643    struct ieee80211_node *ni)
4644{
4645	int i, j, watermark;
4646
4647	sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
4648
4649	/*
4650	 * If we are in association flow - check antenna configuration
4651	 * capabilities of the AP station, and choose the watermark accordingly.
4652	 */
4653	if (ni) {
4654		if (ni->ni_flags & IEEE80211_NODE_HT) {
4655#ifdef notyet
4656			if (ni->ni_rxmcs[2] != 0)
4657				watermark = IWM_SF_W_MARK_MIMO3;
4658			else if (ni->ni_rxmcs[1] != 0)
4659				watermark = IWM_SF_W_MARK_MIMO2;
4660			else
4661#endif
4662				watermark = IWM_SF_W_MARK_SISO;
4663		} else {
4664			watermark = IWM_SF_W_MARK_LEGACY;
4665		}
4666	/* default watermark value for unassociated mode. */
4667	} else {
4668		watermark = IWM_SF_W_MARK_MIMO2;
4669	}
4670	sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
4671
4672	for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
4673		for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
4674			sf_cmd->long_delay_timeouts[i][j] =
4675					htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
4676		}
4677	}
4678
4679	if (ni) {
4680		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
4681		       sizeof(iwm_sf_full_timeout));
4682	} else {
4683		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
4684		       sizeof(iwm_sf_full_timeout_def));
4685	}
4686}
4687
4688static int
4689iwm_mvm_sf_config(struct iwm_softc *sc, enum iwm_sf_state new_state)
4690{
4691	struct ieee80211com *ic = &sc->sc_ic;
4692	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4693	struct iwm_sf_cfg_cmd sf_cmd = {
4694		.state = htole32(IWM_SF_FULL_ON),
4695	};
4696	int ret = 0;
4697
4698	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4699		sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
4700
4701	switch (new_state) {
4702	case IWM_SF_UNINIT:
4703	case IWM_SF_INIT_OFF:
4704		iwm_mvm_fill_sf_command(sc, &sf_cmd, NULL);
4705		break;
4706	case IWM_SF_FULL_ON:
4707		iwm_mvm_fill_sf_command(sc, &sf_cmd, vap->iv_bss);
4708		break;
4709	default:
4710		IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE,
4711		    "Invalid state: %d. not sending Smart Fifo cmd\n",
4712			  new_state);
4713		return EINVAL;
4714	}
4715
4716	ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
4717				   sizeof(sf_cmd), &sf_cmd);
4718	return ret;
4719}
4720
4721static int
4722iwm_send_bt_init_conf(struct iwm_softc *sc)
4723{
4724	struct iwm_bt_coex_cmd bt_cmd;
4725
4726	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4727	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4728
4729	return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4730	    &bt_cmd);
4731}
4732
4733static int
4734iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4735{
4736	struct iwm_mcc_update_cmd mcc_cmd;
4737	struct iwm_host_cmd hcmd = {
4738		.id = IWM_MCC_UPDATE_CMD,
4739		.flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4740		.data = { &mcc_cmd },
4741	};
4742	int ret;
4743#ifdef IWM_DEBUG
4744	struct iwm_rx_packet *pkt;
4745	struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4746	struct iwm_mcc_update_resp *mcc_resp;
4747	int n_channels;
4748	uint16_t mcc;
4749#endif
4750	int resp_v2 = isset(sc->sc_enabled_capa,
4751	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4752
4753	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4754	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4755	if ((sc->sc_ucode_api & IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4756	    isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
4757		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4758	else
4759		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4760
4761	if (resp_v2)
4762		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4763	else
4764		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4765
4766	IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4767	    "send MCC update to FW with '%c%c' src = %d\n",
4768	    alpha2[0], alpha2[1], mcc_cmd.source_id);
4769
4770	ret = iwm_send_cmd(sc, &hcmd);
4771	if (ret)
4772		return ret;
4773
4774#ifdef IWM_DEBUG
4775	pkt = hcmd.resp_pkt;
4776
4777	/* Extract MCC response */
4778	if (resp_v2) {
4779		mcc_resp = (void *)pkt->data;
4780		mcc = mcc_resp->mcc;
4781		n_channels =  le32toh(mcc_resp->n_channels);
4782	} else {
4783		mcc_resp_v1 = (void *)pkt->data;
4784		mcc = mcc_resp_v1->mcc;
4785		n_channels =  le32toh(mcc_resp_v1->n_channels);
4786	}
4787
4788	/* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4789	if (mcc == 0)
4790		mcc = 0x3030;  /* "00" - world */
4791
4792	IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4793	    "regulatory domain '%c%c' (%d channels available)\n",
4794	    mcc >> 8, mcc & 0xff, n_channels);
4795#endif
4796	iwm_free_resp(sc, &hcmd);
4797
4798	return 0;
4799}
4800
4801static void
4802iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4803{
4804	struct iwm_host_cmd cmd = {
4805		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4806		.len = { sizeof(uint32_t), },
4807		.data = { &backoff, },
4808	};
4809
4810	if (iwm_send_cmd(sc, &cmd) != 0) {
4811		device_printf(sc->sc_dev,
4812		    "failed to change thermal tx backoff\n");
4813	}
4814}
4815
4816static int
4817iwm_init_hw(struct iwm_softc *sc)
4818{
4819	struct ieee80211com *ic = &sc->sc_ic;
4820	int error, i, ac;
4821
4822	if ((error = iwm_start_hw(sc)) != 0) {
4823		printf("iwm_start_hw: failed %d\n", error);
4824		return error;
4825	}
4826
4827	if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4828		printf("iwm_run_init_mvm_ucode: failed %d\n", error);
4829		return error;
4830	}
4831
4832	/*
4833	 * should stop and start HW since that INIT
4834	 * image just loaded
4835	 */
4836	iwm_stop_device(sc);
4837	if ((error = iwm_start_hw(sc)) != 0) {
4838		device_printf(sc->sc_dev, "could not initialize hardware\n");
4839		return error;
4840	}
4841
4842	/* omstart, this time with the regular firmware */
4843	error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4844	if (error) {
4845		device_printf(sc->sc_dev, "could not load firmware\n");
4846		goto error;
4847	}
4848
4849	if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4850		device_printf(sc->sc_dev, "bt init conf failed\n");
4851		goto error;
4852	}
4853
4854	error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
4855	if (error != 0) {
4856		device_printf(sc->sc_dev, "antenna config failed\n");
4857		goto error;
4858	}
4859
4860	/* Send phy db control command and then phy db calibration */
4861	if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4862		goto error;
4863
4864	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4865		device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4866		goto error;
4867	}
4868
4869	/* Add auxiliary station for scanning */
4870	if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4871		device_printf(sc->sc_dev, "add_aux_sta failed\n");
4872		goto error;
4873	}
4874
4875	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4876		/*
4877		 * The channel used here isn't relevant as it's
4878		 * going to be overwritten in the other flows.
4879		 * For now use the first channel we have.
4880		 */
4881		if ((error = iwm_mvm_phy_ctxt_add(sc,
4882		    &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4883			goto error;
4884	}
4885
4886	/* Initialize tx backoffs to the minimum. */
4887	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4888		iwm_mvm_tt_tx_backoff(sc, 0);
4889
4890	error = iwm_mvm_power_update_device(sc);
4891	if (error)
4892		goto error;
4893
4894	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
4895		if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4896			goto error;
4897	}
4898
4899	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4900		if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4901			goto error;
4902	}
4903
4904	/* Enable Tx queues. */
4905	for (ac = 0; ac < WME_NUM_AC; ac++) {
4906		error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4907		    iwm_mvm_ac_to_tx_fifo[ac]);
4908		if (error)
4909			goto error;
4910	}
4911
4912	if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4913		device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4914		goto error;
4915	}
4916
4917	return 0;
4918
4919 error:
4920	iwm_stop_device(sc);
4921	return error;
4922}
4923
4924/* Allow multicast from our BSSID. */
4925static int
4926iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4927{
4928	struct ieee80211_node *ni = vap->iv_bss;
4929	struct iwm_mcast_filter_cmd *cmd;
4930	size_t size;
4931	int error;
4932
4933	size = roundup(sizeof(*cmd), 4);
4934	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4935	if (cmd == NULL)
4936		return ENOMEM;
4937	cmd->filter_own = 1;
4938	cmd->port_id = 0;
4939	cmd->count = 0;
4940	cmd->pass_all = 1;
4941	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4942
4943	error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4944	    IWM_CMD_SYNC, size, cmd);
4945	free(cmd, M_DEVBUF);
4946
4947	return (error);
4948}
4949
4950/*
4951 * ifnet interfaces
4952 */
4953
4954static void
4955iwm_init(struct iwm_softc *sc)
4956{
4957	int error;
4958
4959	if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4960		return;
4961	}
4962	sc->sc_generation++;
4963	sc->sc_flags &= ~IWM_FLAG_STOPPED;
4964
4965	if ((error = iwm_init_hw(sc)) != 0) {
4966		printf("iwm_init_hw failed %d\n", error);
4967		iwm_stop(sc);
4968		return;
4969	}
4970
4971	/*
4972	 * Ok, firmware loaded and we are jogging
4973	 */
4974	sc->sc_flags |= IWM_FLAG_HW_INITED;
4975	callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4976}
4977
4978static int
4979iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4980{
4981	struct iwm_softc *sc;
4982	int error;
4983
4984	sc = ic->ic_softc;
4985
4986	IWM_LOCK(sc);
4987	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4988		IWM_UNLOCK(sc);
4989		return (ENXIO);
4990	}
4991	error = mbufq_enqueue(&sc->sc_snd, m);
4992	if (error) {
4993		IWM_UNLOCK(sc);
4994		return (error);
4995	}
4996	iwm_start(sc);
4997	IWM_UNLOCK(sc);
4998	return (0);
4999}
5000
5001/*
5002 * Dequeue packets from sendq and call send.
5003 */
5004static void
5005iwm_start(struct iwm_softc *sc)
5006{
5007	struct ieee80211_node *ni;
5008	struct mbuf *m;
5009	int ac = 0;
5010
5011	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
5012	while (sc->qfullmsk == 0 &&
5013		(m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
5014		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
5015		if (iwm_tx(sc, m, ni, ac) != 0) {
5016			if_inc_counter(ni->ni_vap->iv_ifp,
5017			    IFCOUNTER_OERRORS, 1);
5018			ieee80211_free_node(ni);
5019			continue;
5020		}
5021		sc->sc_tx_timer = 15;
5022	}
5023	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
5024}
5025
5026static void
5027iwm_stop(struct iwm_softc *sc)
5028{
5029
5030	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
5031	sc->sc_flags |= IWM_FLAG_STOPPED;
5032	sc->sc_generation++;
5033	iwm_led_blink_stop(sc);
5034	sc->sc_tx_timer = 0;
5035	iwm_stop_device(sc);
5036	sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5037}
5038
5039static void
5040iwm_watchdog(void *arg)
5041{
5042	struct iwm_softc *sc = arg;
5043	struct ieee80211com *ic = &sc->sc_ic;
5044
5045	if (sc->sc_tx_timer > 0) {
5046		if (--sc->sc_tx_timer == 0) {
5047			device_printf(sc->sc_dev, "device timeout\n");
5048#ifdef IWM_DEBUG
5049			iwm_nic_error(sc);
5050#endif
5051			ieee80211_restart_all(ic);
5052			counter_u64_add(sc->sc_ic.ic_oerrors, 1);
5053			return;
5054		}
5055	}
5056	callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
5057}
5058
5059static void
5060iwm_parent(struct ieee80211com *ic)
5061{
5062	struct iwm_softc *sc = ic->ic_softc;
5063	int startall = 0;
5064
5065	IWM_LOCK(sc);
5066	if (ic->ic_nrunning > 0) {
5067		if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
5068			iwm_init(sc);
5069			startall = 1;
5070		}
5071	} else if (sc->sc_flags & IWM_FLAG_HW_INITED)
5072		iwm_stop(sc);
5073	IWM_UNLOCK(sc);
5074	if (startall)
5075		ieee80211_start_all(ic);
5076}
5077
5078/*
5079 * The interrupt side of things
5080 */
5081
5082/*
5083 * error dumping routines are from iwlwifi/mvm/utils.c
5084 */
5085
5086/*
5087 * Note: This structure is read from the device with IO accesses,
5088 * and the reading already does the endian conversion. As it is
5089 * read with uint32_t-sized accesses, any members with a different size
5090 * need to be ordered correctly though!
5091 */
5092struct iwm_error_event_table {
5093	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
5094	uint32_t error_id;		/* type of error */
5095	uint32_t trm_hw_status0;	/* TRM HW status */
5096	uint32_t trm_hw_status1;	/* TRM HW status */
5097	uint32_t blink2;		/* branch link */
5098	uint32_t ilink1;		/* interrupt link */
5099	uint32_t ilink2;		/* interrupt link */
5100	uint32_t data1;		/* error-specific data */
5101	uint32_t data2;		/* error-specific data */
5102	uint32_t data3;		/* error-specific data */
5103	uint32_t bcon_time;		/* beacon timer */
5104	uint32_t tsf_low;		/* network timestamp function timer */
5105	uint32_t tsf_hi;		/* network timestamp function timer */
5106	uint32_t gp1;		/* GP1 timer register */
5107	uint32_t gp2;		/* GP2 timer register */
5108	uint32_t fw_rev_type;	/* firmware revision type */
5109	uint32_t major;		/* uCode version major */
5110	uint32_t minor;		/* uCode version minor */
5111	uint32_t hw_ver;		/* HW Silicon version */
5112	uint32_t brd_ver;		/* HW board version */
5113	uint32_t log_pc;		/* log program counter */
5114	uint32_t frame_ptr;		/* frame pointer */
5115	uint32_t stack_ptr;		/* stack pointer */
5116	uint32_t hcmd;		/* last host command header */
5117	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
5118				 * rxtx_flag */
5119	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
5120				 * host_flag */
5121	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
5122				 * enc_flag */
5123	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
5124				 * time_flag */
5125	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
5126				 * wico interrupt */
5127	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
5128	uint32_t wait_event;		/* wait event() caller address */
5129	uint32_t l2p_control;	/* L2pControlField */
5130	uint32_t l2p_duration;	/* L2pDurationField */
5131	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
5132	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
5133	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
5134				 * (LMPM_PMG_SEL) */
5135	uint32_t u_timestamp;	/* indicate when the date and time of the
5136				 * compilation */
5137	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
5138} __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
5139
5140/*
5141 * UMAC error struct - relevant starting from family 8000 chip.
5142 * Note: This structure is read from the device with IO accesses,
5143 * and the reading already does the endian conversion. As it is
5144 * read with u32-sized accesses, any members with a different size
5145 * need to be ordered correctly though!
5146 */
5147struct iwm_umac_error_event_table {
5148	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
5149	uint32_t error_id;	/* type of error */
5150	uint32_t blink1;	/* branch link */
5151	uint32_t blink2;	/* branch link */
5152	uint32_t ilink1;	/* interrupt link */
5153	uint32_t ilink2;	/* interrupt link */
5154	uint32_t data1;		/* error-specific data */
5155	uint32_t data2;		/* error-specific data */
5156	uint32_t data3;		/* error-specific data */
5157	uint32_t umac_major;
5158	uint32_t umac_minor;
5159	uint32_t frame_pointer;	/* core register 27*/
5160	uint32_t stack_pointer;	/* core register 28 */
5161	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
5162	uint32_t nic_isr_pref;	/* ISR status register */
5163} __packed;
5164
5165#define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
5166#define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
5167
5168#ifdef IWM_DEBUG
5169struct {
5170	const char *name;
5171	uint8_t num;
5172} advanced_lookup[] = {
5173	{ "NMI_INTERRUPT_WDG", 0x34 },
5174	{ "SYSASSERT", 0x35 },
5175	{ "UCODE_VERSION_MISMATCH", 0x37 },
5176	{ "BAD_COMMAND", 0x38 },
5177	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5178	{ "FATAL_ERROR", 0x3D },
5179	{ "NMI_TRM_HW_ERR", 0x46 },
5180	{ "NMI_INTERRUPT_TRM", 0x4C },
5181	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5182	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5183	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5184	{ "NMI_INTERRUPT_HOST", 0x66 },
5185	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
5186	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
5187	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5188	{ "ADVANCED_SYSASSERT", 0 },
5189};
5190
5191static const char *
5192iwm_desc_lookup(uint32_t num)
5193{
5194	int i;
5195
5196	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5197		if (advanced_lookup[i].num == num)
5198			return advanced_lookup[i].name;
5199
5200	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5201	return advanced_lookup[i].name;
5202}
5203
5204static void
5205iwm_nic_umac_error(struct iwm_softc *sc)
5206{
5207	struct iwm_umac_error_event_table table;
5208	uint32_t base;
5209
5210	base = sc->umac_error_event_table;
5211
5212	if (base < 0x800000) {
5213		device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5214		    base);
5215		return;
5216	}
5217
5218	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5219		device_printf(sc->sc_dev, "reading errlog failed\n");
5220		return;
5221	}
5222
5223	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5224		device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5225		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5226		    sc->sc_flags, table.valid);
5227	}
5228
5229	device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5230		iwm_desc_lookup(table.error_id));
5231	device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5232	device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5233	device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5234	    table.ilink1);
5235	device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5236	    table.ilink2);
5237	device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5238	device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5239	device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5240	device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5241	device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5242	device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5243	    table.frame_pointer);
5244	device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5245	    table.stack_pointer);
5246	device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5247	device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5248	    table.nic_isr_pref);
5249}
5250
5251/*
5252 * Support for dumping the error log seemed like a good idea ...
5253 * but it's mostly hex junk and the only sensible thing is the
5254 * hw/ucode revision (which we know anyway).  Since it's here,
5255 * I'll just leave it in, just in case e.g. the Intel guys want to
5256 * help us decipher some "ADVANCED_SYSASSERT" later.
5257 */
5258static void
5259iwm_nic_error(struct iwm_softc *sc)
5260{
5261	struct iwm_error_event_table table;
5262	uint32_t base;
5263
5264	device_printf(sc->sc_dev, "dumping device error log\n");
5265	base = sc->error_event_table;
5266	if (base < 0x800000) {
5267		device_printf(sc->sc_dev,
5268		    "Invalid error log pointer 0x%08x\n", base);
5269		return;
5270	}
5271
5272	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5273		device_printf(sc->sc_dev, "reading errlog failed\n");
5274		return;
5275	}
5276
5277	if (!table.valid) {
5278		device_printf(sc->sc_dev, "errlog not found, skipping\n");
5279		return;
5280	}
5281
5282	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5283		device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5284		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5285		    sc->sc_flags, table.valid);
5286	}
5287
5288	device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5289	    iwm_desc_lookup(table.error_id));
5290	device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5291	    table.trm_hw_status0);
5292	device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5293	    table.trm_hw_status1);
5294	device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5295	device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5296	device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5297	device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5298	device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5299	device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5300	device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5301	device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5302	device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5303	device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5304	device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5305	device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5306	    table.fw_rev_type);
5307	device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5308	device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5309	device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5310	device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5311	device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5312	device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5313	device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5314	device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5315	device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5316	device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5317	device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5318	device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5319	device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5320	device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5321	device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5322	device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5323	device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5324	device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5325	device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5326
5327	if (sc->umac_error_event_table)
5328		iwm_nic_umac_error(sc);
5329}
5330#endif
5331
5332#define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
5333
5334/*
5335 * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5336 * Basic structure from if_iwn
5337 */
5338static void
5339iwm_notif_intr(struct iwm_softc *sc)
5340{
5341	struct ieee80211com *ic = &sc->sc_ic;
5342	uint16_t hw;
5343
5344	bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5345	    BUS_DMASYNC_POSTREAD);
5346
5347	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5348
5349	/*
5350	 * Process responses
5351	 */
5352	while (sc->rxq.cur != hw) {
5353		struct iwm_rx_ring *ring = &sc->rxq;
5354		struct iwm_rx_data *data = &ring->data[ring->cur];
5355		struct iwm_rx_packet *pkt;
5356		struct iwm_cmd_response *cresp;
5357		int qid, idx, code;
5358
5359		bus_dmamap_sync(ring->data_dmat, data->map,
5360		    BUS_DMASYNC_POSTREAD);
5361		pkt = mtod(data->m, struct iwm_rx_packet *);
5362
5363		qid = pkt->hdr.qid & ~0x80;
5364		idx = pkt->hdr.idx;
5365
5366		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5367		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5368		    "rx packet qid=%d idx=%d type=%x %d %d\n",
5369		    pkt->hdr.qid & ~0x80, pkt->hdr.idx, code, ring->cur, hw);
5370
5371		/*
5372		 * randomly get these from the firmware, no idea why.
5373		 * they at least seem harmless, so just ignore them for now
5374		 */
5375		if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
5376		    || pkt->len_n_flags == htole32(0x55550000))) {
5377			ADVANCE_RXQ(sc);
5378			continue;
5379		}
5380
5381		iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5382
5383		switch (code) {
5384		case IWM_REPLY_RX_PHY_CMD:
5385			iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
5386			break;
5387
5388		case IWM_REPLY_RX_MPDU_CMD:
5389			iwm_mvm_rx_rx_mpdu(sc, data->m);
5390			break;
5391
5392		case IWM_TX_CMD:
5393			iwm_mvm_rx_tx_cmd(sc, pkt, data);
5394			break;
5395
5396		case IWM_MISSED_BEACONS_NOTIFICATION: {
5397			struct iwm_missed_beacons_notif *resp;
5398			int missed;
5399
5400			/* XXX look at mac_id to determine interface ID */
5401			struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5402
5403			resp = (void *)pkt->data;
5404			missed = le32toh(resp->consec_missed_beacons);
5405
5406			IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5407			    "%s: MISSED_BEACON: mac_id=%d, "
5408			    "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5409			    "num_rx=%d\n",
5410			    __func__,
5411			    le32toh(resp->mac_id),
5412			    le32toh(resp->consec_missed_beacons_since_last_rx),
5413			    le32toh(resp->consec_missed_beacons),
5414			    le32toh(resp->num_expected_beacons),
5415			    le32toh(resp->num_recvd_beacons));
5416
5417			/* Be paranoid */
5418			if (vap == NULL)
5419				break;
5420
5421			/* XXX no net80211 locking? */
5422			if (vap->iv_state == IEEE80211_S_RUN &&
5423			    (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5424				if (missed > vap->iv_bmissthreshold) {
5425					/* XXX bad locking; turn into task */
5426					IWM_UNLOCK(sc);
5427					ieee80211_beacon_miss(ic);
5428					IWM_LOCK(sc);
5429				}
5430			}
5431
5432			break;
5433		}
5434
5435		case IWM_MFUART_LOAD_NOTIFICATION:
5436			break;
5437
5438		case IWM_MVM_ALIVE:
5439			break;
5440
5441		case IWM_CALIB_RES_NOTIF_PHY_DB:
5442			break;
5443
5444		case IWM_STATISTICS_NOTIFICATION: {
5445			struct iwm_notif_statistics *stats;
5446			stats = (void *)pkt->data;
5447			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
5448			sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
5449			break;
5450		}
5451
5452		case IWM_NVM_ACCESS_CMD:
5453		case IWM_MCC_UPDATE_CMD:
5454			if (sc->sc_wantresp == ((qid << 16) | idx)) {
5455				memcpy(sc->sc_cmd_resp,
5456				    pkt, sizeof(sc->sc_cmd_resp));
5457			}
5458			break;
5459
5460		case IWM_MCC_CHUB_UPDATE_CMD: {
5461			struct iwm_mcc_chub_notif *notif;
5462			notif = (void *)pkt->data;
5463
5464			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5465			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5466			sc->sc_fw_mcc[2] = '\0';
5467			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
5468			    "fw source %d sent CC '%s'\n",
5469			    notif->source_id, sc->sc_fw_mcc);
5470			break;
5471		}
5472
5473		case IWM_DTS_MEASUREMENT_NOTIFICATION:
5474		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5475				 IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5476			struct iwm_dts_measurement_notif_v1 *notif;
5477
5478			if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5479				device_printf(sc->sc_dev,
5480				    "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5481				break;
5482			}
5483			notif = (void *)pkt->data;
5484			IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5485			    "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5486			    notif->temp);
5487			break;
5488		}
5489
5490		case IWM_PHY_CONFIGURATION_CMD:
5491		case IWM_TX_ANT_CONFIGURATION_CMD:
5492		case IWM_ADD_STA:
5493		case IWM_MAC_CONTEXT_CMD:
5494		case IWM_REPLY_SF_CFG_CMD:
5495		case IWM_POWER_TABLE_CMD:
5496		case IWM_PHY_CONTEXT_CMD:
5497		case IWM_BINDING_CONTEXT_CMD:
5498		case IWM_TIME_EVENT_CMD:
5499		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5500		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5501		case IWM_SCAN_ABORT_UMAC:
5502		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5503		case IWM_SCAN_OFFLOAD_ABORT_CMD:
5504		case IWM_REPLY_BEACON_FILTERING_CMD:
5505		case IWM_MAC_PM_POWER_TABLE:
5506		case IWM_TIME_QUOTA_CMD:
5507		case IWM_REMOVE_STA:
5508		case IWM_TXPATH_FLUSH:
5509		case IWM_LQ_CMD:
5510		case IWM_FW_PAGING_BLOCK_CMD:
5511		case IWM_BT_CONFIG:
5512		case IWM_REPLY_THERMAL_MNG_BACKOFF:
5513			cresp = (void *)pkt->data;
5514			if (sc->sc_wantresp == ((qid << 16) | idx)) {
5515				memcpy(sc->sc_cmd_resp,
5516				    pkt, sizeof(*pkt)+sizeof(*cresp));
5517			}
5518			break;
5519
5520		/* ignore */
5521		case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
5522			break;
5523
5524		case IWM_INIT_COMPLETE_NOTIF:
5525			break;
5526
5527		case IWM_SCAN_OFFLOAD_COMPLETE: {
5528			struct iwm_periodic_scan_complete *notif;
5529			notif = (void *)pkt->data;
5530			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5531				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5532				ieee80211_runtask(ic, &sc->sc_es_task);
5533			}
5534			break;
5535		}
5536
5537		case IWM_SCAN_ITERATION_COMPLETE: {
5538			struct iwm_lmac_scan_complete_notif *notif;
5539			notif = (void *)pkt->data;
5540			ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5541 			break;
5542		}
5543
5544		case IWM_SCAN_COMPLETE_UMAC: {
5545			struct iwm_umac_scan_complete *notif;
5546			notif = (void *)pkt->data;
5547
5548			IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
5549			    "UMAC scan complete, status=0x%x\n",
5550			    notif->status);
5551			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5552				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5553				ieee80211_runtask(ic, &sc->sc_es_task);
5554			}
5555			break;
5556		}
5557
5558		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5559			struct iwm_umac_scan_iter_complete_notif *notif;
5560			notif = (void *)pkt->data;
5561
5562			IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5563			    "complete, status=0x%x, %d channels scanned\n",
5564			    notif->status, notif->scanned_channels);
5565			ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5566			break;
5567		}
5568
5569		case IWM_REPLY_ERROR: {
5570			struct iwm_error_resp *resp;
5571			resp = (void *)pkt->data;
5572
5573			device_printf(sc->sc_dev,
5574			    "firmware error 0x%x, cmd 0x%x\n",
5575			    le32toh(resp->error_type),
5576			    resp->cmd_id);
5577			break;
5578		}
5579
5580		case IWM_TIME_EVENT_NOTIFICATION: {
5581			struct iwm_time_event_notif *notif;
5582			notif = (void *)pkt->data;
5583
5584			IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5585			    "TE notif status = 0x%x action = 0x%x\n",
5586			    notif->status, notif->action);
5587			break;
5588		}
5589
5590		case IWM_MCAST_FILTER_CMD:
5591			break;
5592
5593		case IWM_SCD_QUEUE_CFG: {
5594			struct iwm_scd_txq_cfg_rsp *rsp;
5595			rsp = (void *)pkt->data;
5596
5597			IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5598			    "queue cfg token=0x%x sta_id=%d "
5599			    "tid=%d scd_queue=%d\n",
5600			    rsp->token, rsp->sta_id, rsp->tid,
5601			    rsp->scd_queue);
5602			break;
5603		}
5604
5605		default:
5606			device_printf(sc->sc_dev,
5607			    "frame %d/%d %x UNHANDLED (this should "
5608			    "not happen)\n", qid, idx,
5609			    pkt->len_n_flags);
5610			break;
5611		}
5612
5613		/*
5614		 * Why test bit 0x80?  The Linux driver:
5615		 *
5616		 * There is one exception:  uCode sets bit 15 when it
5617		 * originates the response/notification, i.e. when the
5618		 * response/notification is not a direct response to a
5619		 * command sent by the driver.  For example, uCode issues
5620		 * IWM_REPLY_RX when it sends a received frame to the driver;
5621		 * it is not a direct response to any driver command.
5622		 *
5623		 * Ok, so since when is 7 == 15?  Well, the Linux driver
5624		 * uses a slightly different format for pkt->hdr, and "qid"
5625		 * is actually the upper byte of a two-byte field.
5626		 */
5627		if (!(pkt->hdr.qid & (1 << 7))) {
5628			iwm_cmd_done(sc, pkt);
5629		}
5630
5631		ADVANCE_RXQ(sc);
5632	}
5633
5634	/*
5635	 * Tell the firmware what we have processed.
5636	 * Seems like the hardware gets upset unless we align
5637	 * the write by 8??
5638	 */
5639	hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5640	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
5641}
5642
5643static void
5644iwm_intr(void *arg)
5645{
5646	struct iwm_softc *sc = arg;
5647	int handled = 0;
5648	int r1, r2, rv = 0;
5649	int isperiodic = 0;
5650
5651	IWM_LOCK(sc);
5652	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5653
5654	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5655		uint32_t *ict = sc->ict_dma.vaddr;
5656		int tmp;
5657
5658		tmp = htole32(ict[sc->ict_cur]);
5659		if (!tmp)
5660			goto out_ena;
5661
5662		/*
5663		 * ok, there was something.  keep plowing until we have all.
5664		 */
5665		r1 = r2 = 0;
5666		while (tmp) {
5667			r1 |= tmp;
5668			ict[sc->ict_cur] = 0;
5669			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5670			tmp = htole32(ict[sc->ict_cur]);
5671		}
5672
5673		/* this is where the fun begins.  don't ask */
5674		if (r1 == 0xffffffff)
5675			r1 = 0;
5676
5677		/* i am not expected to understand this */
5678		if (r1 & 0xc0000)
5679			r1 |= 0x8000;
5680		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5681	} else {
5682		r1 = IWM_READ(sc, IWM_CSR_INT);
5683		/* "hardware gone" (where, fishing?) */
5684		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5685			goto out;
5686		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5687	}
5688	if (r1 == 0 && r2 == 0) {
5689		goto out_ena;
5690	}
5691
5692	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5693
5694	/* Safely ignore these bits for debug checks below */
5695	r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5696
5697	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5698		int i;
5699		struct ieee80211com *ic = &sc->sc_ic;
5700		struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5701
5702#ifdef IWM_DEBUG
5703		iwm_nic_error(sc);
5704#endif
5705		/* Dump driver status (TX and RX rings) while we're here. */
5706		device_printf(sc->sc_dev, "driver status:\n");
5707		for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5708			struct iwm_tx_ring *ring = &sc->txq[i];
5709			device_printf(sc->sc_dev,
5710			    "  tx ring %2d: qid=%-2d cur=%-3d "
5711			    "queued=%-3d\n",
5712			    i, ring->qid, ring->cur, ring->queued);
5713		}
5714		device_printf(sc->sc_dev,
5715		    "  rx ring: cur=%d\n", sc->rxq.cur);
5716		device_printf(sc->sc_dev,
5717		    "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5718
5719		/* Don't stop the device; just do a VAP restart */
5720		IWM_UNLOCK(sc);
5721
5722		if (vap == NULL) {
5723			printf("%s: null vap\n", __func__);
5724			return;
5725		}
5726
5727		device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5728		    "restarting\n", __func__, vap->iv_state);
5729
5730		/* XXX TODO: turn this into a callout/taskqueue */
5731		ieee80211_restart_all(ic);
5732		return;
5733	}
5734
5735	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5736		handled |= IWM_CSR_INT_BIT_HW_ERR;
5737		device_printf(sc->sc_dev, "hardware error, stopping device\n");
5738		iwm_stop(sc);
5739		rv = 1;
5740		goto out;
5741	}
5742
5743	/* firmware chunk loaded */
5744	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5745		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5746		handled |= IWM_CSR_INT_BIT_FH_TX;
5747		sc->sc_fw_chunk_done = 1;
5748		wakeup(&sc->sc_fw);
5749	}
5750
5751	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5752		handled |= IWM_CSR_INT_BIT_RF_KILL;
5753		if (iwm_check_rfkill(sc)) {
5754			device_printf(sc->sc_dev,
5755			    "%s: rfkill switch, disabling interface\n",
5756			    __func__);
5757			iwm_stop(sc);
5758		}
5759	}
5760
5761	/*
5762	 * The Linux driver uses periodic interrupts to avoid races.
5763	 * We cargo-cult like it's going out of fashion.
5764	 */
5765	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5766		handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5767		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5768		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5769			IWM_WRITE_1(sc,
5770			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5771		isperiodic = 1;
5772	}
5773
5774	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5775		handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5776		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5777
5778		iwm_notif_intr(sc);
5779
5780		/* enable periodic interrupt, see above */
5781		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5782			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5783			    IWM_CSR_INT_PERIODIC_ENA);
5784	}
5785
5786	if (__predict_false(r1 & ~handled))
5787		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5788		    "%s: unhandled interrupts: %x\n", __func__, r1);
5789	rv = 1;
5790
5791 out_ena:
5792	iwm_restore_interrupts(sc);
5793 out:
5794	IWM_UNLOCK(sc);
5795	return;
5796}
5797
5798/*
5799 * Autoconf glue-sniffing
5800 */
5801#define	PCI_VENDOR_INTEL		0x8086
5802#define	PCI_PRODUCT_INTEL_WL_3160_1	0x08b3
5803#define	PCI_PRODUCT_INTEL_WL_3160_2	0x08b4
5804#define	PCI_PRODUCT_INTEL_WL_3165_1	0x3165
5805#define	PCI_PRODUCT_INTEL_WL_3165_2	0x3166
5806#define	PCI_PRODUCT_INTEL_WL_7260_1	0x08b1
5807#define	PCI_PRODUCT_INTEL_WL_7260_2	0x08b2
5808#define	PCI_PRODUCT_INTEL_WL_7265_1	0x095a
5809#define	PCI_PRODUCT_INTEL_WL_7265_2	0x095b
5810#define	PCI_PRODUCT_INTEL_WL_8260_1	0x24f3
5811#define	PCI_PRODUCT_INTEL_WL_8260_2	0x24f4
5812
5813static const struct iwm_devices {
5814	uint16_t		device;
5815	const struct iwm_cfg	*cfg;
5816} iwm_devices[] = {
5817	{ PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5818	{ PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5819	{ PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5820	{ PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5821	{ PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5822	{ PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5823	{ PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5824	{ PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5825	{ PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5826	{ PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5827};
5828
5829static int
5830iwm_probe(device_t dev)
5831{
5832	int i;
5833
5834	for (i = 0; i < nitems(iwm_devices); i++) {
5835		if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5836		    pci_get_device(dev) == iwm_devices[i].device) {
5837			device_set_desc(dev, iwm_devices[i].cfg->name);
5838			return (BUS_PROBE_DEFAULT);
5839		}
5840	}
5841
5842	return (ENXIO);
5843}
5844
5845static int
5846iwm_dev_check(device_t dev)
5847{
5848	struct iwm_softc *sc;
5849	uint16_t devid;
5850	int i;
5851
5852	sc = device_get_softc(dev);
5853
5854	devid = pci_get_device(dev);
5855	for (i = 0; i < nitems(iwm_devices); i++) {
5856		if (iwm_devices[i].device == devid) {
5857			sc->cfg = iwm_devices[i].cfg;
5858			return (0);
5859		}
5860	}
5861	device_printf(dev, "unknown adapter type\n");
5862	return ENXIO;
5863}
5864
5865/* PCI registers */
5866#define PCI_CFG_RETRY_TIMEOUT	0x041
5867
5868static int
5869iwm_pci_attach(device_t dev)
5870{
5871	struct iwm_softc *sc;
5872	int count, error, rid;
5873	uint16_t reg;
5874
5875	sc = device_get_softc(dev);
5876
5877	/* We disable the RETRY_TIMEOUT register (0x41) to keep
5878	 * PCI Tx retries from interfering with C3 CPU state */
5879	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5880
5881	/* Enable bus-mastering and hardware bug workaround. */
5882	pci_enable_busmaster(dev);
5883	reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5884	/* if !MSI */
5885	if (reg & PCIM_STATUS_INTxSTATE) {
5886		reg &= ~PCIM_STATUS_INTxSTATE;
5887	}
5888	pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5889
5890	rid = PCIR_BAR(0);
5891	sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5892	    RF_ACTIVE);
5893	if (sc->sc_mem == NULL) {
5894		device_printf(sc->sc_dev, "can't map mem space\n");
5895		return (ENXIO);
5896	}
5897	sc->sc_st = rman_get_bustag(sc->sc_mem);
5898	sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5899
5900	/* Install interrupt handler. */
5901	count = 1;
5902	rid = 0;
5903	if (pci_alloc_msi(dev, &count) == 0)
5904		rid = 1;
5905	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5906	    (rid != 0 ? 0 : RF_SHAREABLE));
5907	if (sc->sc_irq == NULL) {
5908		device_printf(dev, "can't map interrupt\n");
5909			return (ENXIO);
5910	}
5911	error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5912	    NULL, iwm_intr, sc, &sc->sc_ih);
5913	if (sc->sc_ih == NULL) {
5914		device_printf(dev, "can't establish interrupt");
5915			return (ENXIO);
5916	}
5917	sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5918
5919	return (0);
5920}
5921
5922static void
5923iwm_pci_detach(device_t dev)
5924{
5925	struct iwm_softc *sc = device_get_softc(dev);
5926
5927	if (sc->sc_irq != NULL) {
5928		bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5929		bus_release_resource(dev, SYS_RES_IRQ,
5930		    rman_get_rid(sc->sc_irq), sc->sc_irq);
5931		pci_release_msi(dev);
5932        }
5933	if (sc->sc_mem != NULL)
5934		bus_release_resource(dev, SYS_RES_MEMORY,
5935		    rman_get_rid(sc->sc_mem), sc->sc_mem);
5936}
5937
5938
5939
5940static int
5941iwm_attach(device_t dev)
5942{
5943	struct iwm_softc *sc = device_get_softc(dev);
5944	struct ieee80211com *ic = &sc->sc_ic;
5945	int error;
5946	int txq_i, i;
5947
5948	sc->sc_dev = dev;
5949	sc->sc_attached = 1;
5950	IWM_LOCK_INIT(sc);
5951	mbufq_init(&sc->sc_snd, ifqmaxlen);
5952	callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5953	callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
5954	TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5955
5956	sc->sc_notif_wait = iwm_notification_wait_init(sc);
5957	if (sc->sc_notif_wait == NULL) {
5958		device_printf(dev, "failed to init notification wait struct\n");
5959		goto fail;
5960	}
5961
5962	/* Init phy db */
5963	sc->sc_phy_db = iwm_phy_db_init(sc);
5964	if (!sc->sc_phy_db) {
5965		device_printf(dev, "Cannot init phy_db\n");
5966		goto fail;
5967	}
5968
5969	/* PCI attach */
5970	error = iwm_pci_attach(dev);
5971	if (error != 0)
5972		goto fail;
5973
5974	sc->sc_wantresp = -1;
5975
5976	/* Check device type */
5977	error = iwm_dev_check(dev);
5978	if (error != 0)
5979		goto fail;
5980
5981	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
5982	/*
5983	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
5984	 * changed, and now the revision step also includes bit 0-1 (no more
5985	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
5986	 * in the old format.
5987	 */
5988	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
5989		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
5990				(IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
5991
5992	if (iwm_prepare_card_hw(sc) != 0) {
5993		device_printf(dev, "could not initialize hardware\n");
5994		goto fail;
5995	}
5996
5997	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
5998		int ret;
5999		uint32_t hw_step;
6000
6001		/*
6002		 * In order to recognize C step the driver should read the
6003		 * chip version id located at the AUX bus MISC address.
6004		 */
6005		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
6006			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
6007		DELAY(2);
6008
6009		ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
6010				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6011				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6012				   25000);
6013		if (!ret) {
6014			device_printf(sc->sc_dev,
6015			    "Failed to wake up the nic\n");
6016			goto fail;
6017		}
6018
6019		if (iwm_nic_lock(sc)) {
6020			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
6021			hw_step |= IWM_ENABLE_WFPM;
6022			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
6023			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
6024			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
6025			if (hw_step == 0x3)
6026				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
6027						(IWM_SILICON_C_STEP << 2);
6028			iwm_nic_unlock(sc);
6029		} else {
6030			device_printf(sc->sc_dev, "Failed to lock the nic\n");
6031			goto fail;
6032		}
6033	}
6034
6035	/* special-case 7265D, it has the same PCI IDs. */
6036	if (sc->cfg == &iwm7265_cfg &&
6037	    (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
6038		sc->cfg = &iwm7265d_cfg;
6039	}
6040
6041	/* Allocate DMA memory for firmware transfers. */
6042	if ((error = iwm_alloc_fwmem(sc)) != 0) {
6043		device_printf(dev, "could not allocate memory for firmware\n");
6044		goto fail;
6045	}
6046
6047	/* Allocate "Keep Warm" page. */
6048	if ((error = iwm_alloc_kw(sc)) != 0) {
6049		device_printf(dev, "could not allocate keep warm page\n");
6050		goto fail;
6051	}
6052
6053	/* We use ICT interrupts */
6054	if ((error = iwm_alloc_ict(sc)) != 0) {
6055		device_printf(dev, "could not allocate ICT table\n");
6056		goto fail;
6057	}
6058
6059	/* Allocate TX scheduler "rings". */
6060	if ((error = iwm_alloc_sched(sc)) != 0) {
6061		device_printf(dev, "could not allocate TX scheduler rings\n");
6062		goto fail;
6063	}
6064
6065	/* Allocate TX rings */
6066	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
6067		if ((error = iwm_alloc_tx_ring(sc,
6068		    &sc->txq[txq_i], txq_i)) != 0) {
6069			device_printf(dev,
6070			    "could not allocate TX ring %d\n",
6071			    txq_i);
6072			goto fail;
6073		}
6074	}
6075
6076	/* Allocate RX ring. */
6077	if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6078		device_printf(dev, "could not allocate RX ring\n");
6079		goto fail;
6080	}
6081
6082	/* Clear pending interrupts. */
6083	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6084
6085	ic->ic_softc = sc;
6086	ic->ic_name = device_get_nameunit(sc->sc_dev);
6087	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
6088	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
6089
6090	/* Set device capabilities. */
6091	ic->ic_caps =
6092	    IEEE80211_C_STA |
6093	    IEEE80211_C_WPA |		/* WPA/RSN */
6094	    IEEE80211_C_WME |
6095	    IEEE80211_C_SHSLOT |	/* short slot time supported */
6096	    IEEE80211_C_SHPREAMBLE	/* short preamble supported */
6097//	    IEEE80211_C_BGSCAN		/* capable of bg scanning */
6098	    ;
6099	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6100		sc->sc_phyctxt[i].id = i;
6101		sc->sc_phyctxt[i].color = 0;
6102		sc->sc_phyctxt[i].ref = 0;
6103		sc->sc_phyctxt[i].channel = NULL;
6104	}
6105
6106	/* Default noise floor */
6107	sc->sc_noise = -96;
6108
6109	/* Max RSSI */
6110	sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6111
6112	sc->sc_preinit_hook.ich_func = iwm_preinit;
6113	sc->sc_preinit_hook.ich_arg = sc;
6114	if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6115		device_printf(dev, "config_intrhook_establish failed\n");
6116		goto fail;
6117	}
6118
6119#ifdef IWM_DEBUG
6120	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6121	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6122	    CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6123#endif
6124
6125	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6126	    "<-%s\n", __func__);
6127
6128	return 0;
6129
6130	/* Free allocated memory if something failed during attachment. */
6131fail:
6132	iwm_detach_local(sc, 0);
6133
6134	return ENXIO;
6135}
6136
6137static int
6138iwm_is_valid_ether_addr(uint8_t *addr)
6139{
6140	char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6141
6142	if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6143		return (FALSE);
6144
6145	return (TRUE);
6146}
6147
6148static int
6149iwm_update_edca(struct ieee80211com *ic)
6150{
6151	struct iwm_softc *sc = ic->ic_softc;
6152
6153	device_printf(sc->sc_dev, "%s: called\n", __func__);
6154	return (0);
6155}
6156
6157static void
6158iwm_preinit(void *arg)
6159{
6160	struct iwm_softc *sc = arg;
6161	device_t dev = sc->sc_dev;
6162	struct ieee80211com *ic = &sc->sc_ic;
6163	int error;
6164
6165	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6166	    "->%s\n", __func__);
6167
6168	IWM_LOCK(sc);
6169	if ((error = iwm_start_hw(sc)) != 0) {
6170		device_printf(dev, "could not initialize hardware\n");
6171		IWM_UNLOCK(sc);
6172		goto fail;
6173	}
6174
6175	error = iwm_run_init_mvm_ucode(sc, 1);
6176	iwm_stop_device(sc);
6177	if (error) {
6178		IWM_UNLOCK(sc);
6179		goto fail;
6180	}
6181	device_printf(dev,
6182	    "hw rev 0x%x, fw ver %s, address %s\n",
6183	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6184	    sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6185
6186	/* not all hardware can do 5GHz band */
6187	if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6188		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6189		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6190	IWM_UNLOCK(sc);
6191
6192	iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6193	    ic->ic_channels);
6194
6195	/*
6196	 * At this point we've committed - if we fail to do setup,
6197	 * we now also have to tear down the net80211 state.
6198	 */
6199	ieee80211_ifattach(ic);
6200	ic->ic_vap_create = iwm_vap_create;
6201	ic->ic_vap_delete = iwm_vap_delete;
6202	ic->ic_raw_xmit = iwm_raw_xmit;
6203	ic->ic_node_alloc = iwm_node_alloc;
6204	ic->ic_scan_start = iwm_scan_start;
6205	ic->ic_scan_end = iwm_scan_end;
6206	ic->ic_update_mcast = iwm_update_mcast;
6207	ic->ic_getradiocaps = iwm_init_channel_map;
6208	ic->ic_set_channel = iwm_set_channel;
6209	ic->ic_scan_curchan = iwm_scan_curchan;
6210	ic->ic_scan_mindwell = iwm_scan_mindwell;
6211	ic->ic_wme.wme_update = iwm_update_edca;
6212	ic->ic_parent = iwm_parent;
6213	ic->ic_transmit = iwm_transmit;
6214	iwm_radiotap_attach(sc);
6215	if (bootverbose)
6216		ieee80211_announce(ic);
6217
6218	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6219	    "<-%s\n", __func__);
6220	config_intrhook_disestablish(&sc->sc_preinit_hook);
6221
6222	return;
6223fail:
6224	config_intrhook_disestablish(&sc->sc_preinit_hook);
6225	iwm_detach_local(sc, 0);
6226}
6227
6228/*
6229 * Attach the interface to 802.11 radiotap.
6230 */
6231static void
6232iwm_radiotap_attach(struct iwm_softc *sc)
6233{
6234        struct ieee80211com *ic = &sc->sc_ic;
6235
6236	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6237	    "->%s begin\n", __func__);
6238        ieee80211_radiotap_attach(ic,
6239            &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6240                IWM_TX_RADIOTAP_PRESENT,
6241            &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6242                IWM_RX_RADIOTAP_PRESENT);
6243	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6244	    "->%s end\n", __func__);
6245}
6246
6247static struct ieee80211vap *
6248iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6249    enum ieee80211_opmode opmode, int flags,
6250    const uint8_t bssid[IEEE80211_ADDR_LEN],
6251    const uint8_t mac[IEEE80211_ADDR_LEN])
6252{
6253	struct iwm_vap *ivp;
6254	struct ieee80211vap *vap;
6255
6256	if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6257		return NULL;
6258	ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6259	vap = &ivp->iv_vap;
6260	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6261	vap->iv_bmissthreshold = 10;            /* override default */
6262	/* Override with driver methods. */
6263	ivp->iv_newstate = vap->iv_newstate;
6264	vap->iv_newstate = iwm_newstate;
6265
6266	ieee80211_ratectl_init(vap);
6267	/* Complete setup. */
6268	ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6269	    mac);
6270	ic->ic_opmode = opmode;
6271
6272	return vap;
6273}
6274
6275static void
6276iwm_vap_delete(struct ieee80211vap *vap)
6277{
6278	struct iwm_vap *ivp = IWM_VAP(vap);
6279
6280	ieee80211_ratectl_deinit(vap);
6281	ieee80211_vap_detach(vap);
6282	free(ivp, M_80211_VAP);
6283}
6284
6285static void
6286iwm_scan_start(struct ieee80211com *ic)
6287{
6288	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6289	struct iwm_softc *sc = ic->ic_softc;
6290	int error;
6291
6292	IWM_LOCK(sc);
6293	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6294		/* This should not be possible */
6295		device_printf(sc->sc_dev,
6296		    "%s: Previous scan not completed yet\n", __func__);
6297	}
6298	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6299		error = iwm_mvm_umac_scan(sc);
6300	else
6301		error = iwm_mvm_lmac_scan(sc);
6302	if (error != 0) {
6303		device_printf(sc->sc_dev, "could not initiate scan\n");
6304		IWM_UNLOCK(sc);
6305		ieee80211_cancel_scan(vap);
6306	} else {
6307		sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6308		iwm_led_blink_start(sc);
6309		IWM_UNLOCK(sc);
6310	}
6311}
6312
6313static void
6314iwm_scan_end(struct ieee80211com *ic)
6315{
6316	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6317	struct iwm_softc *sc = ic->ic_softc;
6318
6319	IWM_LOCK(sc);
6320	iwm_led_blink_stop(sc);
6321	if (vap->iv_state == IEEE80211_S_RUN)
6322		iwm_mvm_led_enable(sc);
6323	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6324		/*
6325		 * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6326		 * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6327		 * taskqueue.
6328		 */
6329		sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6330		iwm_mvm_scan_stop_wait(sc);
6331	}
6332	IWM_UNLOCK(sc);
6333
6334	/*
6335	 * Make sure we don't race, if sc_es_task is still enqueued here.
6336	 * This is to make sure that it won't call ieee80211_scan_done
6337	 * when we have already started the next scan.
6338	 */
6339	taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6340}
6341
6342static void
6343iwm_update_mcast(struct ieee80211com *ic)
6344{
6345}
6346
6347static void
6348iwm_set_channel(struct ieee80211com *ic)
6349{
6350}
6351
6352static void
6353iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6354{
6355}
6356
6357static void
6358iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6359{
6360	return;
6361}
6362
6363void
6364iwm_init_task(void *arg1)
6365{
6366	struct iwm_softc *sc = arg1;
6367
6368	IWM_LOCK(sc);
6369	while (sc->sc_flags & IWM_FLAG_BUSY)
6370		msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6371	sc->sc_flags |= IWM_FLAG_BUSY;
6372	iwm_stop(sc);
6373	if (sc->sc_ic.ic_nrunning > 0)
6374		iwm_init(sc);
6375	sc->sc_flags &= ~IWM_FLAG_BUSY;
6376	wakeup(&sc->sc_flags);
6377	IWM_UNLOCK(sc);
6378}
6379
6380static int
6381iwm_resume(device_t dev)
6382{
6383	struct iwm_softc *sc = device_get_softc(dev);
6384	int do_reinit = 0;
6385
6386	/*
6387	 * We disable the RETRY_TIMEOUT register (0x41) to keep
6388	 * PCI Tx retries from interfering with C3 CPU state.
6389	 */
6390	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6391	iwm_init_task(device_get_softc(dev));
6392
6393	IWM_LOCK(sc);
6394	if (sc->sc_flags & IWM_FLAG_SCANNING) {
6395		sc->sc_flags &= ~IWM_FLAG_SCANNING;
6396		do_reinit = 1;
6397	}
6398	IWM_UNLOCK(sc);
6399
6400	if (do_reinit)
6401		ieee80211_resume_all(&sc->sc_ic);
6402
6403	return 0;
6404}
6405
6406static int
6407iwm_suspend(device_t dev)
6408{
6409	int do_stop = 0;
6410	struct iwm_softc *sc = device_get_softc(dev);
6411
6412	do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6413
6414	ieee80211_suspend_all(&sc->sc_ic);
6415
6416	if (do_stop) {
6417		IWM_LOCK(sc);
6418		iwm_stop(sc);
6419		sc->sc_flags |= IWM_FLAG_SCANNING;
6420		IWM_UNLOCK(sc);
6421	}
6422
6423	return (0);
6424}
6425
6426static int
6427iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6428{
6429	struct iwm_fw_info *fw = &sc->sc_fw;
6430	device_t dev = sc->sc_dev;
6431	int i;
6432
6433	if (!sc->sc_attached)
6434		return 0;
6435	sc->sc_attached = 0;
6436
6437	if (do_net80211)
6438		ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6439
6440	callout_drain(&sc->sc_led_blink_to);
6441	callout_drain(&sc->sc_watchdog_to);
6442	iwm_stop_device(sc);
6443	if (do_net80211) {
6444		ieee80211_ifdetach(&sc->sc_ic);
6445	}
6446
6447	iwm_phy_db_free(sc->sc_phy_db);
6448	sc->sc_phy_db = NULL;
6449
6450	iwm_free_nvm_data(sc->nvm_data);
6451
6452	/* Free descriptor rings */
6453	iwm_free_rx_ring(sc, &sc->rxq);
6454	for (i = 0; i < nitems(sc->txq); i++)
6455		iwm_free_tx_ring(sc, &sc->txq[i]);
6456
6457	/* Free firmware */
6458	if (fw->fw_fp != NULL)
6459		iwm_fw_info_free(fw);
6460
6461	/* Free scheduler */
6462	iwm_dma_contig_free(&sc->sched_dma);
6463	iwm_dma_contig_free(&sc->ict_dma);
6464	iwm_dma_contig_free(&sc->kw_dma);
6465	iwm_dma_contig_free(&sc->fw_dma);
6466
6467	iwm_free_fw_paging(sc);
6468
6469	/* Finished with the hardware - detach things */
6470	iwm_pci_detach(dev);
6471
6472	if (sc->sc_notif_wait != NULL) {
6473		iwm_notification_wait_free(sc->sc_notif_wait);
6474		sc->sc_notif_wait = NULL;
6475	}
6476
6477	mbufq_drain(&sc->sc_snd);
6478	IWM_LOCK_DESTROY(sc);
6479
6480	return (0);
6481}
6482
6483static int
6484iwm_detach(device_t dev)
6485{
6486	struct iwm_softc *sc = device_get_softc(dev);
6487
6488	return (iwm_detach_local(sc, 1));
6489}
6490
6491static device_method_t iwm_pci_methods[] = {
6492        /* Device interface */
6493        DEVMETHOD(device_probe,         iwm_probe),
6494        DEVMETHOD(device_attach,        iwm_attach),
6495        DEVMETHOD(device_detach,        iwm_detach),
6496        DEVMETHOD(device_suspend,       iwm_suspend),
6497        DEVMETHOD(device_resume,        iwm_resume),
6498
6499        DEVMETHOD_END
6500};
6501
6502static driver_t iwm_pci_driver = {
6503        "iwm",
6504        iwm_pci_methods,
6505        sizeof (struct iwm_softc)
6506};
6507
6508static devclass_t iwm_devclass;
6509
6510DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6511MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6512MODULE_DEPEND(iwm, pci, 1, 1, 1);
6513MODULE_DEPEND(iwm, wlan, 1, 1, 1);
6514