if_iwm.c revision 330188
1/*	$OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $	*/
2
3/*
4 * Copyright (c) 2014 genua mbh <info@genua.de>
5 * Copyright (c) 2014 Fixup Software Ltd.
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20/*-
21 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22 * which were used as the reference documentation for this implementation.
23 *
24 * Driver version we are currently based off of is
25 * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26 *
27 ***********************************************************************
28 *
29 * This file is provided under a dual BSD/GPLv2 license.  When using or
30 * redistributing this file, you may do so under either license.
31 *
32 * GPL LICENSE SUMMARY
33 *
34 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35 *
36 * This program is free software; you can redistribute it and/or modify
37 * it under the terms of version 2 of the GNU General Public License as
38 * published by the Free Software Foundation.
39 *
40 * This program is distributed in the hope that it will be useful, but
41 * WITHOUT ANY WARRANTY; without even the implied warranty of
42 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43 * General Public License for more details.
44 *
45 * You should have received a copy of the GNU General Public License
46 * along with this program; if not, write to the Free Software
47 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48 * USA
49 *
50 * The full GNU General Public License is included in this distribution
51 * in the file called COPYING.
52 *
53 * Contact Information:
54 *  Intel Linux Wireless <ilw@linux.intel.com>
55 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56 *
57 *
58 * BSD LICENSE
59 *
60 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61 * All rights reserved.
62 *
63 * Redistribution and use in source and binary forms, with or without
64 * modification, are permitted provided that the following conditions
65 * are met:
66 *
67 *  * Redistributions of source code must retain the above copyright
68 *    notice, this list of conditions and the following disclaimer.
69 *  * Redistributions in binary form must reproduce the above copyright
70 *    notice, this list of conditions and the following disclaimer in
71 *    the documentation and/or other materials provided with the
72 *    distribution.
73 *  * Neither the name Intel Corporation nor the names of its
74 *    contributors may be used to endorse or promote products derived
75 *    from this software without specific prior written permission.
76 *
77 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88 */
89
90/*-
91 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92 *
93 * Permission to use, copy, modify, and distribute this software for any
94 * purpose with or without fee is hereby granted, provided that the above
95 * copyright notice and this permission notice appear in all copies.
96 *
97 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104 */
105#include <sys/cdefs.h>
106__FBSDID("$FreeBSD: stable/11/sys/dev/iwm/if_iwm.c 330188 2018-03-01 06:18:27Z eadler $");
107
108#include "opt_wlan.h"
109
110#include <sys/param.h>
111#include <sys/bus.h>
112#include <sys/conf.h>
113#include <sys/endian.h>
114#include <sys/firmware.h>
115#include <sys/kernel.h>
116#include <sys/malloc.h>
117#include <sys/mbuf.h>
118#include <sys/mutex.h>
119#include <sys/module.h>
120#include <sys/proc.h>
121#include <sys/rman.h>
122#include <sys/socket.h>
123#include <sys/sockio.h>
124#include <sys/sysctl.h>
125#include <sys/linker.h>
126
127#include <machine/bus.h>
128#include <machine/endian.h>
129#include <machine/resource.h>
130
131#include <dev/pci/pcivar.h>
132#include <dev/pci/pcireg.h>
133
134#include <net/bpf.h>
135
136#include <net/if.h>
137#include <net/if_var.h>
138#include <net/if_arp.h>
139#include <net/if_dl.h>
140#include <net/if_media.h>
141#include <net/if_types.h>
142
143#include <netinet/in.h>
144#include <netinet/in_systm.h>
145#include <netinet/if_ether.h>
146#include <netinet/ip.h>
147
148#include <net80211/ieee80211_var.h>
149#include <net80211/ieee80211_regdomain.h>
150#include <net80211/ieee80211_ratectl.h>
151#include <net80211/ieee80211_radiotap.h>
152
153#include <dev/iwm/if_iwmreg.h>
154#include <dev/iwm/if_iwmvar.h>
155#include <dev/iwm/if_iwm_config.h>
156#include <dev/iwm/if_iwm_debug.h>
157#include <dev/iwm/if_iwm_notif_wait.h>
158#include <dev/iwm/if_iwm_util.h>
159#include <dev/iwm/if_iwm_binding.h>
160#include <dev/iwm/if_iwm_phy_db.h>
161#include <dev/iwm/if_iwm_mac_ctxt.h>
162#include <dev/iwm/if_iwm_phy_ctxt.h>
163#include <dev/iwm/if_iwm_time_event.h>
164#include <dev/iwm/if_iwm_power.h>
165#include <dev/iwm/if_iwm_scan.h>
166
167#include <dev/iwm/if_iwm_pcie_trans.h>
168#include <dev/iwm/if_iwm_led.h>
169
170const uint8_t iwm_nvm_channels[] = {
171	/* 2.4 GHz */
172	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
173	/* 5 GHz */
174	36, 40, 44, 48, 52, 56, 60, 64,
175	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
176	149, 153, 157, 161, 165
177};
178_Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
179    "IWM_NUM_CHANNELS is too small");
180
181const uint8_t iwm_nvm_channels_8000[] = {
182	/* 2.4 GHz */
183	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
184	/* 5 GHz */
185	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
186	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
187	149, 153, 157, 161, 165, 169, 173, 177, 181
188};
189_Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
190    "IWM_NUM_CHANNELS_8000 is too small");
191
192#define IWM_NUM_2GHZ_CHANNELS	14
193#define IWM_N_HW_ADDR_MASK	0xF
194
195/*
196 * XXX For now, there's simply a fixed set of rate table entries
197 * that are populated.
198 */
199const struct iwm_rate {
200	uint8_t rate;
201	uint8_t plcp;
202} iwm_rates[] = {
203	{   2,	IWM_RATE_1M_PLCP  },
204	{   4,	IWM_RATE_2M_PLCP  },
205	{  11,	IWM_RATE_5M_PLCP  },
206	{  22,	IWM_RATE_11M_PLCP },
207	{  12,	IWM_RATE_6M_PLCP  },
208	{  18,	IWM_RATE_9M_PLCP  },
209	{  24,	IWM_RATE_12M_PLCP },
210	{  36,	IWM_RATE_18M_PLCP },
211	{  48,	IWM_RATE_24M_PLCP },
212	{  72,	IWM_RATE_36M_PLCP },
213	{  96,	IWM_RATE_48M_PLCP },
214	{ 108,	IWM_RATE_54M_PLCP },
215};
216#define IWM_RIDX_CCK	0
217#define IWM_RIDX_OFDM	4
218#define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
219#define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
220#define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
221
222struct iwm_nvm_section {
223	uint16_t length;
224	uint8_t *data;
225};
226
227#define IWM_MVM_UCODE_ALIVE_TIMEOUT	hz
228#define IWM_MVM_UCODE_CALIB_TIMEOUT	(2*hz)
229
230struct iwm_mvm_alive_data {
231	int valid;
232	uint32_t scd_base_addr;
233};
234
235static int	iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
236static int	iwm_firmware_store_section(struct iwm_softc *,
237                                           enum iwm_ucode_type,
238                                           const uint8_t *, size_t);
239static int	iwm_set_default_calib(struct iwm_softc *, const void *);
240static void	iwm_fw_info_free(struct iwm_fw_info *);
241static int	iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
242static void	iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
243static int	iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
244                                     bus_size_t, bus_size_t);
245static void	iwm_dma_contig_free(struct iwm_dma_info *);
246static int	iwm_alloc_fwmem(struct iwm_softc *);
247static int	iwm_alloc_sched(struct iwm_softc *);
248static int	iwm_alloc_kw(struct iwm_softc *);
249static int	iwm_alloc_ict(struct iwm_softc *);
250static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
251static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
252static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
253static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
254                                  int);
255static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
256static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
257static void	iwm_enable_interrupts(struct iwm_softc *);
258static void	iwm_restore_interrupts(struct iwm_softc *);
259static void	iwm_disable_interrupts(struct iwm_softc *);
260static void	iwm_ict_reset(struct iwm_softc *);
261static int	iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
262static void	iwm_stop_device(struct iwm_softc *);
263static void	iwm_mvm_nic_config(struct iwm_softc *);
264static int	iwm_nic_rx_init(struct iwm_softc *);
265static int	iwm_nic_tx_init(struct iwm_softc *);
266static int	iwm_nic_init(struct iwm_softc *);
267static int	iwm_enable_txq(struct iwm_softc *, int, int, int);
268static int	iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
269static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
270                                   uint16_t, uint8_t *, uint16_t *);
271static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
272				     uint16_t *, uint32_t);
273static uint32_t	iwm_eeprom_channel_flags(uint16_t);
274static void	iwm_add_channel_band(struct iwm_softc *,
275		    struct ieee80211_channel[], int, int *, int, size_t,
276		    const uint8_t[]);
277static void	iwm_init_channel_map(struct ieee80211com *, int, int *,
278		    struct ieee80211_channel[]);
279static struct iwm_nvm_data *
280	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
281			   const uint16_t *, const uint16_t *,
282			   const uint16_t *, const uint16_t *,
283			   const uint16_t *);
284static void	iwm_free_nvm_data(struct iwm_nvm_data *);
285static void	iwm_set_hw_address_family_8000(struct iwm_softc *,
286					       struct iwm_nvm_data *,
287					       const uint16_t *,
288					       const uint16_t *);
289static int	iwm_get_sku(const struct iwm_softc *, const uint16_t *,
290			    const uint16_t *);
291static int	iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
292static int	iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
293				  const uint16_t *);
294static int	iwm_get_n_hw_addrs(const struct iwm_softc *,
295				   const uint16_t *);
296static void	iwm_set_radio_cfg(const struct iwm_softc *,
297				  struct iwm_nvm_data *, uint32_t);
298static struct iwm_nvm_data *
299	iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
300static int	iwm_nvm_init(struct iwm_softc *);
301static int	iwm_pcie_load_section(struct iwm_softc *, uint8_t,
302				      const struct iwm_fw_desc *);
303static int	iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
304					     bus_addr_t, uint32_t);
305static int	iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
306						const struct iwm_fw_sects *,
307						int, int *);
308static int	iwm_pcie_load_cpu_sections(struct iwm_softc *,
309					   const struct iwm_fw_sects *,
310					   int, int *);
311static int	iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
312					       const struct iwm_fw_sects *);
313static int	iwm_pcie_load_given_ucode(struct iwm_softc *,
314					  const struct iwm_fw_sects *);
315static int	iwm_start_fw(struct iwm_softc *, const struct iwm_fw_sects *);
316static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
317static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
318static int	iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
319                                              enum iwm_ucode_type);
320static int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
321static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
322static int	iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
323static int	iwm_mvm_get_signal_strength(struct iwm_softc *,
324					    struct iwm_rx_phy_info *);
325static void	iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
326                                      struct iwm_rx_packet *,
327                                      struct iwm_rx_data *);
328static int	iwm_get_noise(struct iwm_softc *sc,
329		    const struct iwm_mvm_statistics_rx_non_phy *);
330static void	iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
331                                   struct iwm_rx_data *);
332static int	iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
333                                         struct iwm_rx_packet *,
334				         struct iwm_node *);
335static void	iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
336                                  struct iwm_rx_data *);
337static void	iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
338#if 0
339static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
340                                 uint16_t);
341#endif
342static const struct iwm_rate *
343	iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
344			struct mbuf *, struct iwm_tx_cmd *);
345static int	iwm_tx(struct iwm_softc *, struct mbuf *,
346                       struct ieee80211_node *, int);
347static int	iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
348			     const struct ieee80211_bpf_params *);
349static int	iwm_mvm_flush_tx_path(struct iwm_softc *sc,
350				      uint32_t tfd_msk, uint32_t flags);
351static int	iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
352					        struct iwm_mvm_add_sta_cmd_v7 *,
353                                                int *);
354static int	iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
355                                       int);
356static int	iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
357static int	iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
358static int	iwm_mvm_add_int_sta_common(struct iwm_softc *,
359                                           struct iwm_int_sta *,
360				           const uint8_t *, uint16_t, uint16_t);
361static int	iwm_mvm_add_aux_sta(struct iwm_softc *);
362static int	iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
363static int	iwm_auth(struct ieee80211vap *, struct iwm_softc *);
364static int	iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
365static int	iwm_release(struct iwm_softc *, struct iwm_node *);
366static struct ieee80211_node *
367		iwm_node_alloc(struct ieee80211vap *,
368		               const uint8_t[IEEE80211_ADDR_LEN]);
369static void	iwm_setrates(struct iwm_softc *, struct iwm_node *);
370static int	iwm_media_change(struct ifnet *);
371static int	iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
372static void	iwm_endscan_cb(void *, int);
373static void	iwm_mvm_fill_sf_command(struct iwm_softc *,
374					struct iwm_sf_cfg_cmd *,
375					struct ieee80211_node *);
376static int	iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state);
377static int	iwm_send_bt_init_conf(struct iwm_softc *);
378static int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
379static void	iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
380static int	iwm_init_hw(struct iwm_softc *);
381static void	iwm_init(struct iwm_softc *);
382static void	iwm_start(struct iwm_softc *);
383static void	iwm_stop(struct iwm_softc *);
384static void	iwm_watchdog(void *);
385static void	iwm_parent(struct ieee80211com *);
386#ifdef IWM_DEBUG
387static const char *
388		iwm_desc_lookup(uint32_t);
389static void	iwm_nic_error(struct iwm_softc *);
390static void	iwm_nic_umac_error(struct iwm_softc *);
391#endif
392static void	iwm_notif_intr(struct iwm_softc *);
393static void	iwm_intr(void *);
394static int	iwm_attach(device_t);
395static int	iwm_is_valid_ether_addr(uint8_t *);
396static void	iwm_preinit(void *);
397static int	iwm_detach_local(struct iwm_softc *sc, int);
398static void	iwm_init_task(void *);
399static void	iwm_radiotap_attach(struct iwm_softc *);
400static struct ieee80211vap *
401		iwm_vap_create(struct ieee80211com *,
402		               const char [IFNAMSIZ], int,
403		               enum ieee80211_opmode, int,
404		               const uint8_t [IEEE80211_ADDR_LEN],
405		               const uint8_t [IEEE80211_ADDR_LEN]);
406static void	iwm_vap_delete(struct ieee80211vap *);
407static void	iwm_scan_start(struct ieee80211com *);
408static void	iwm_scan_end(struct ieee80211com *);
409static void	iwm_update_mcast(struct ieee80211com *);
410static void	iwm_set_channel(struct ieee80211com *);
411static void	iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
412static void	iwm_scan_mindwell(struct ieee80211_scan_state *);
413static int	iwm_detach(device_t);
414
415/*
416 * Firmware parser.
417 */
418
419static int
420iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
421{
422	const struct iwm_fw_cscheme_list *l = (const void *)data;
423
424	if (dlen < sizeof(*l) ||
425	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
426		return EINVAL;
427
428	/* we don't actually store anything for now, always use s/w crypto */
429
430	return 0;
431}
432
433static int
434iwm_firmware_store_section(struct iwm_softc *sc,
435    enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
436{
437	struct iwm_fw_sects *fws;
438	struct iwm_fw_desc *fwone;
439
440	if (type >= IWM_UCODE_TYPE_MAX)
441		return EINVAL;
442	if (dlen < sizeof(uint32_t))
443		return EINVAL;
444
445	fws = &sc->sc_fw.fw_sects[type];
446	if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
447		return EINVAL;
448
449	fwone = &fws->fw_sect[fws->fw_count];
450
451	/* first 32bit are device load offset */
452	memcpy(&fwone->offset, data, sizeof(uint32_t));
453
454	/* rest is data */
455	fwone->data = data + sizeof(uint32_t);
456	fwone->len = dlen - sizeof(uint32_t);
457
458	fws->fw_count++;
459
460	return 0;
461}
462
463#define IWM_DEFAULT_SCAN_CHANNELS 40
464
465/* iwlwifi: iwl-drv.c */
466struct iwm_tlv_calib_data {
467	uint32_t ucode_type;
468	struct iwm_tlv_calib_ctrl calib;
469} __packed;
470
471static int
472iwm_set_default_calib(struct iwm_softc *sc, const void *data)
473{
474	const struct iwm_tlv_calib_data *def_calib = data;
475	uint32_t ucode_type = le32toh(def_calib->ucode_type);
476
477	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
478		device_printf(sc->sc_dev,
479		    "Wrong ucode_type %u for default "
480		    "calibration.\n", ucode_type);
481		return EINVAL;
482	}
483
484	sc->sc_default_calib[ucode_type].flow_trigger =
485	    def_calib->calib.flow_trigger;
486	sc->sc_default_calib[ucode_type].event_trigger =
487	    def_calib->calib.event_trigger;
488
489	return 0;
490}
491
492static void
493iwm_fw_info_free(struct iwm_fw_info *fw)
494{
495	firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
496	fw->fw_fp = NULL;
497	/* don't touch fw->fw_status */
498	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
499}
500
501static int
502iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
503{
504	struct iwm_fw_info *fw = &sc->sc_fw;
505	const struct iwm_tlv_ucode_header *uhdr;
506	struct iwm_ucode_tlv tlv;
507	enum iwm_ucode_tlv_type tlv_type;
508	const struct firmware *fwp;
509	const uint8_t *data;
510	uint32_t usniffer_img;
511	uint32_t paging_mem_size;
512	int num_of_cpus;
513	int error = 0;
514	size_t len;
515
516	if (fw->fw_status == IWM_FW_STATUS_DONE &&
517	    ucode_type != IWM_UCODE_INIT)
518		return 0;
519
520	while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
521		msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
522	fw->fw_status = IWM_FW_STATUS_INPROGRESS;
523
524	if (fw->fw_fp != NULL)
525		iwm_fw_info_free(fw);
526
527	/*
528	 * Load firmware into driver memory.
529	 * fw_fp will be set.
530	 */
531	IWM_UNLOCK(sc);
532	fwp = firmware_get(sc->cfg->fw_name);
533	IWM_LOCK(sc);
534	if (fwp == NULL) {
535		device_printf(sc->sc_dev,
536		    "could not read firmware %s (error %d)\n",
537		    sc->cfg->fw_name, error);
538		goto out;
539	}
540	fw->fw_fp = fwp;
541
542	/* (Re-)Initialize default values. */
543	sc->sc_capaflags = 0;
544	sc->sc_capa_n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
545	memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
546	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
547
548	/*
549	 * Parse firmware contents
550	 */
551
552	uhdr = (const void *)fw->fw_fp->data;
553	if (*(const uint32_t *)fw->fw_fp->data != 0
554	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
555		device_printf(sc->sc_dev, "invalid firmware %s\n",
556		    sc->cfg->fw_name);
557		error = EINVAL;
558		goto out;
559	}
560
561	snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
562	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
563	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
564	    IWM_UCODE_API(le32toh(uhdr->ver)));
565	data = uhdr->data;
566	len = fw->fw_fp->datasize - sizeof(*uhdr);
567
568	while (len >= sizeof(tlv)) {
569		size_t tlv_len;
570		const void *tlv_data;
571
572		memcpy(&tlv, data, sizeof(tlv));
573		tlv_len = le32toh(tlv.length);
574		tlv_type = le32toh(tlv.type);
575
576		len -= sizeof(tlv);
577		data += sizeof(tlv);
578		tlv_data = data;
579
580		if (len < tlv_len) {
581			device_printf(sc->sc_dev,
582			    "firmware too short: %zu bytes\n",
583			    len);
584			error = EINVAL;
585			goto parse_out;
586		}
587
588		switch ((int)tlv_type) {
589		case IWM_UCODE_TLV_PROBE_MAX_LEN:
590			if (tlv_len < sizeof(uint32_t)) {
591				device_printf(sc->sc_dev,
592				    "%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
593				    __func__,
594				    (int) tlv_len);
595				error = EINVAL;
596				goto parse_out;
597			}
598			sc->sc_capa_max_probe_len
599			    = le32toh(*(const uint32_t *)tlv_data);
600			/* limit it to something sensible */
601			if (sc->sc_capa_max_probe_len >
602			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
603				IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
604				    "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
605				    "ridiculous\n", __func__);
606				error = EINVAL;
607				goto parse_out;
608			}
609			break;
610		case IWM_UCODE_TLV_PAN:
611			if (tlv_len) {
612				device_printf(sc->sc_dev,
613				    "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
614				    __func__,
615				    (int) tlv_len);
616				error = EINVAL;
617				goto parse_out;
618			}
619			sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
620			break;
621		case IWM_UCODE_TLV_FLAGS:
622			if (tlv_len < sizeof(uint32_t)) {
623				device_printf(sc->sc_dev,
624				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
625				    __func__,
626				    (int) tlv_len);
627				error = EINVAL;
628				goto parse_out;
629			}
630			/*
631			 * Apparently there can be many flags, but Linux driver
632			 * parses only the first one, and so do we.
633			 *
634			 * XXX: why does this override IWM_UCODE_TLV_PAN?
635			 * Intentional or a bug?  Observations from
636			 * current firmware file:
637			 *  1) TLV_PAN is parsed first
638			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
639			 * ==> this resets TLV_PAN to itself... hnnnk
640			 */
641			sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
642			break;
643		case IWM_UCODE_TLV_CSCHEME:
644			if ((error = iwm_store_cscheme(sc,
645			    tlv_data, tlv_len)) != 0) {
646				device_printf(sc->sc_dev,
647				    "%s: iwm_store_cscheme(): returned %d\n",
648				    __func__,
649				    error);
650				goto parse_out;
651			}
652			break;
653		case IWM_UCODE_TLV_NUM_OF_CPU:
654			if (tlv_len != sizeof(uint32_t)) {
655				device_printf(sc->sc_dev,
656				    "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) != sizeof(uint32_t)\n",
657				    __func__,
658				    (int) tlv_len);
659				error = EINVAL;
660				goto parse_out;
661			}
662			num_of_cpus = le32toh(*(const uint32_t *)tlv_data);
663			if (num_of_cpus == 2) {
664				fw->fw_sects[IWM_UCODE_REGULAR].is_dual_cpus =
665					TRUE;
666				fw->fw_sects[IWM_UCODE_INIT].is_dual_cpus =
667					TRUE;
668				fw->fw_sects[IWM_UCODE_WOWLAN].is_dual_cpus =
669					TRUE;
670			} else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
671				device_printf(sc->sc_dev,
672				    "%s: Driver supports only 1 or 2 CPUs\n",
673				    __func__);
674				error = EINVAL;
675				goto parse_out;
676			}
677			break;
678		case IWM_UCODE_TLV_SEC_RT:
679			if ((error = iwm_firmware_store_section(sc,
680			    IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
681				device_printf(sc->sc_dev,
682				    "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
683				    __func__,
684				    error);
685				goto parse_out;
686			}
687			break;
688		case IWM_UCODE_TLV_SEC_INIT:
689			if ((error = iwm_firmware_store_section(sc,
690			    IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
691				device_printf(sc->sc_dev,
692				    "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
693				    __func__,
694				    error);
695				goto parse_out;
696			}
697			break;
698		case IWM_UCODE_TLV_SEC_WOWLAN:
699			if ((error = iwm_firmware_store_section(sc,
700			    IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
701				device_printf(sc->sc_dev,
702				    "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
703				    __func__,
704				    error);
705				goto parse_out;
706			}
707			break;
708		case IWM_UCODE_TLV_DEF_CALIB:
709			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
710				device_printf(sc->sc_dev,
711				    "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
712				    __func__,
713				    (int) tlv_len,
714				    (int) sizeof(struct iwm_tlv_calib_data));
715				error = EINVAL;
716				goto parse_out;
717			}
718			if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
719				device_printf(sc->sc_dev,
720				    "%s: iwm_set_default_calib() failed: %d\n",
721				    __func__,
722				    error);
723				goto parse_out;
724			}
725			break;
726		case IWM_UCODE_TLV_PHY_SKU:
727			if (tlv_len != sizeof(uint32_t)) {
728				error = EINVAL;
729				device_printf(sc->sc_dev,
730				    "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
731				    __func__,
732				    (int) tlv_len);
733				goto parse_out;
734			}
735			sc->sc_fw.phy_config =
736			    le32toh(*(const uint32_t *)tlv_data);
737			sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
738						  IWM_FW_PHY_CFG_TX_CHAIN) >>
739						  IWM_FW_PHY_CFG_TX_CHAIN_POS;
740			sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
741						  IWM_FW_PHY_CFG_RX_CHAIN) >>
742						  IWM_FW_PHY_CFG_RX_CHAIN_POS;
743			break;
744
745		case IWM_UCODE_TLV_API_CHANGES_SET: {
746			const struct iwm_ucode_api *api;
747			if (tlv_len != sizeof(*api)) {
748				error = EINVAL;
749				goto parse_out;
750			}
751			api = (const struct iwm_ucode_api *)tlv_data;
752			/* Flags may exceed 32 bits in future firmware. */
753			if (le32toh(api->api_index) > 0) {
754				device_printf(sc->sc_dev,
755				    "unsupported API index %d\n",
756				    le32toh(api->api_index));
757				goto parse_out;
758			}
759			sc->sc_ucode_api = le32toh(api->api_flags);
760			break;
761		}
762
763		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
764			const struct iwm_ucode_capa *capa;
765			int idx, i;
766			if (tlv_len != sizeof(*capa)) {
767				error = EINVAL;
768				goto parse_out;
769			}
770			capa = (const struct iwm_ucode_capa *)tlv_data;
771			idx = le32toh(capa->api_index);
772			if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
773				device_printf(sc->sc_dev,
774				    "unsupported API index %d\n", idx);
775				goto parse_out;
776			}
777			for (i = 0; i < 32; i++) {
778				if ((le32toh(capa->api_capa) & (1U << i)) == 0)
779					continue;
780				setbit(sc->sc_enabled_capa, i + (32 * idx));
781			}
782			break;
783		}
784
785		case 48: /* undocumented TLV */
786		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
787		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
788			/* ignore, not used by current driver */
789			break;
790
791		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
792			if ((error = iwm_firmware_store_section(sc,
793			    IWM_UCODE_REGULAR_USNIFFER, tlv_data,
794			    tlv_len)) != 0)
795				goto parse_out;
796			break;
797
798		case IWM_UCODE_TLV_PAGING:
799			if (tlv_len != sizeof(uint32_t)) {
800				error = EINVAL;
801				goto parse_out;
802			}
803			paging_mem_size = le32toh(*(const uint32_t *)tlv_data);
804
805			IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
806			    "%s: Paging: paging enabled (size = %u bytes)\n",
807			    __func__, paging_mem_size);
808			if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
809				device_printf(sc->sc_dev,
810					"%s: Paging: driver supports up to %u bytes for paging image\n",
811					__func__, IWM_MAX_PAGING_IMAGE_SIZE);
812				error = EINVAL;
813				goto out;
814			}
815			if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
816				device_printf(sc->sc_dev,
817				    "%s: Paging: image isn't multiple %u\n",
818				    __func__, IWM_FW_PAGING_SIZE);
819				error = EINVAL;
820				goto out;
821			}
822
823			sc->sc_fw.fw_sects[IWM_UCODE_REGULAR].paging_mem_size =
824			    paging_mem_size;
825			usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
826			sc->sc_fw.fw_sects[usniffer_img].paging_mem_size =
827			    paging_mem_size;
828			break;
829
830		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
831			if (tlv_len != sizeof(uint32_t)) {
832				error = EINVAL;
833				goto parse_out;
834			}
835			sc->sc_capa_n_scan_channels =
836			  le32toh(*(const uint32_t *)tlv_data);
837			break;
838
839		case IWM_UCODE_TLV_FW_VERSION:
840			if (tlv_len != sizeof(uint32_t) * 3) {
841				error = EINVAL;
842				goto parse_out;
843			}
844			snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
845			    "%d.%d.%d",
846			    le32toh(((const uint32_t *)tlv_data)[0]),
847			    le32toh(((const uint32_t *)tlv_data)[1]),
848			    le32toh(((const uint32_t *)tlv_data)[2]));
849			break;
850
851		case IWM_UCODE_TLV_FW_MEM_SEG:
852			break;
853
854		default:
855			device_printf(sc->sc_dev,
856			    "%s: unknown firmware section %d, abort\n",
857			    __func__, tlv_type);
858			error = EINVAL;
859			goto parse_out;
860		}
861
862		len -= roundup(tlv_len, 4);
863		data += roundup(tlv_len, 4);
864	}
865
866	KASSERT(error == 0, ("unhandled error"));
867
868 parse_out:
869	if (error) {
870		device_printf(sc->sc_dev, "firmware parse error %d, "
871		    "section type %d\n", error, tlv_type);
872	}
873
874	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
875		device_printf(sc->sc_dev,
876		    "device uses unsupported power ops\n");
877		error = ENOTSUP;
878	}
879
880 out:
881	if (error) {
882		fw->fw_status = IWM_FW_STATUS_NONE;
883		if (fw->fw_fp != NULL)
884			iwm_fw_info_free(fw);
885	} else
886		fw->fw_status = IWM_FW_STATUS_DONE;
887	wakeup(&sc->sc_fw);
888
889	return error;
890}
891
892/*
893 * DMA resource routines
894 */
895
896static void
897iwm_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
898{
899        if (error != 0)
900                return;
901	KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
902	*(bus_addr_t *)arg = segs[0].ds_addr;
903}
904
905static int
906iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
907    bus_size_t size, bus_size_t alignment)
908{
909	int error;
910
911	dma->tag = NULL;
912	dma->map = NULL;
913	dma->size = size;
914	dma->vaddr = NULL;
915
916	error = bus_dma_tag_create(tag, alignment,
917            0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
918            1, size, 0, NULL, NULL, &dma->tag);
919        if (error != 0)
920                goto fail;
921
922        error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
923            BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
924        if (error != 0)
925                goto fail;
926
927        error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
928            iwm_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
929        if (error != 0) {
930		bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
931		dma->vaddr = NULL;
932		goto fail;
933	}
934
935	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
936
937	return 0;
938
939fail:
940	iwm_dma_contig_free(dma);
941
942	return error;
943}
944
945static void
946iwm_dma_contig_free(struct iwm_dma_info *dma)
947{
948	if (dma->vaddr != NULL) {
949		bus_dmamap_sync(dma->tag, dma->map,
950		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
951		bus_dmamap_unload(dma->tag, dma->map);
952		bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
953		dma->vaddr = NULL;
954	}
955	if (dma->tag != NULL) {
956		bus_dma_tag_destroy(dma->tag);
957		dma->tag = NULL;
958	}
959}
960
961/* fwmem is used to load firmware onto the card */
962static int
963iwm_alloc_fwmem(struct iwm_softc *sc)
964{
965	/* Must be aligned on a 16-byte boundary. */
966	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
967	    IWM_FH_MEM_TB_MAX_LENGTH, 16);
968}
969
970/* tx scheduler rings.  not used? */
971static int
972iwm_alloc_sched(struct iwm_softc *sc)
973{
974	/* TX scheduler rings must be aligned on a 1KB boundary. */
975	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
976	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
977}
978
979/* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
980static int
981iwm_alloc_kw(struct iwm_softc *sc)
982{
983	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
984}
985
986/* interrupt cause table */
987static int
988iwm_alloc_ict(struct iwm_softc *sc)
989{
990	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
991	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
992}
993
994static int
995iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
996{
997	bus_size_t size;
998	int i, error;
999
1000	ring->cur = 0;
1001
1002	/* Allocate RX descriptors (256-byte aligned). */
1003	size = IWM_RX_RING_COUNT * sizeof(uint32_t);
1004	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1005	if (error != 0) {
1006		device_printf(sc->sc_dev,
1007		    "could not allocate RX ring DMA memory\n");
1008		goto fail;
1009	}
1010	ring->desc = ring->desc_dma.vaddr;
1011
1012	/* Allocate RX status area (16-byte aligned). */
1013	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1014	    sizeof(*ring->stat), 16);
1015	if (error != 0) {
1016		device_printf(sc->sc_dev,
1017		    "could not allocate RX status DMA memory\n");
1018		goto fail;
1019	}
1020	ring->stat = ring->stat_dma.vaddr;
1021
1022        /* Create RX buffer DMA tag. */
1023        error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1024            BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1025            IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
1026        if (error != 0) {
1027                device_printf(sc->sc_dev,
1028                    "%s: could not create RX buf DMA tag, error %d\n",
1029                    __func__, error);
1030                goto fail;
1031        }
1032
1033	/* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
1034	error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
1035	if (error != 0) {
1036		device_printf(sc->sc_dev,
1037		    "%s: could not create RX buf DMA map, error %d\n",
1038		    __func__, error);
1039		goto fail;
1040	}
1041	/*
1042	 * Allocate and map RX buffers.
1043	 */
1044	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1045		struct iwm_rx_data *data = &ring->data[i];
1046		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1047		if (error != 0) {
1048			device_printf(sc->sc_dev,
1049			    "%s: could not create RX buf DMA map, error %d\n",
1050			    __func__, error);
1051			goto fail;
1052		}
1053		data->m = NULL;
1054
1055		if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1056			goto fail;
1057		}
1058	}
1059	return 0;
1060
1061fail:	iwm_free_rx_ring(sc, ring);
1062	return error;
1063}
1064
1065static void
1066iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1067{
1068	/* Reset the ring state */
1069	ring->cur = 0;
1070
1071	/*
1072	 * The hw rx ring index in shared memory must also be cleared,
1073	 * otherwise the discrepancy can cause reprocessing chaos.
1074	 */
1075	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1076}
1077
1078static void
1079iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1080{
1081	int i;
1082
1083	iwm_dma_contig_free(&ring->desc_dma);
1084	iwm_dma_contig_free(&ring->stat_dma);
1085
1086	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1087		struct iwm_rx_data *data = &ring->data[i];
1088
1089		if (data->m != NULL) {
1090			bus_dmamap_sync(ring->data_dmat, data->map,
1091			    BUS_DMASYNC_POSTREAD);
1092			bus_dmamap_unload(ring->data_dmat, data->map);
1093			m_freem(data->m);
1094			data->m = NULL;
1095		}
1096		if (data->map != NULL) {
1097			bus_dmamap_destroy(ring->data_dmat, data->map);
1098			data->map = NULL;
1099		}
1100	}
1101	if (ring->spare_map != NULL) {
1102		bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1103		ring->spare_map = NULL;
1104	}
1105	if (ring->data_dmat != NULL) {
1106		bus_dma_tag_destroy(ring->data_dmat);
1107		ring->data_dmat = NULL;
1108	}
1109}
1110
1111static int
1112iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1113{
1114	bus_addr_t paddr;
1115	bus_size_t size;
1116	size_t maxsize;
1117	int nsegments;
1118	int i, error;
1119
1120	ring->qid = qid;
1121	ring->queued = 0;
1122	ring->cur = 0;
1123
1124	/* Allocate TX descriptors (256-byte aligned). */
1125	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1126	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1127	if (error != 0) {
1128		device_printf(sc->sc_dev,
1129		    "could not allocate TX ring DMA memory\n");
1130		goto fail;
1131	}
1132	ring->desc = ring->desc_dma.vaddr;
1133
1134	/*
1135	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1136	 * to allocate commands space for other rings.
1137	 */
1138	if (qid > IWM_MVM_CMD_QUEUE)
1139		return 0;
1140
1141	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1142	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1143	if (error != 0) {
1144		device_printf(sc->sc_dev,
1145		    "could not allocate TX cmd DMA memory\n");
1146		goto fail;
1147	}
1148	ring->cmd = ring->cmd_dma.vaddr;
1149
1150	/* FW commands may require more mapped space than packets. */
1151	if (qid == IWM_MVM_CMD_QUEUE) {
1152		maxsize = IWM_RBUF_SIZE;
1153		nsegments = 1;
1154	} else {
1155		maxsize = MCLBYTES;
1156		nsegments = IWM_MAX_SCATTER - 2;
1157	}
1158
1159	error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1160	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1161            nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1162	if (error != 0) {
1163		device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1164		goto fail;
1165	}
1166
1167	paddr = ring->cmd_dma.paddr;
1168	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1169		struct iwm_tx_data *data = &ring->data[i];
1170
1171		data->cmd_paddr = paddr;
1172		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1173		    + offsetof(struct iwm_tx_cmd, scratch);
1174		paddr += sizeof(struct iwm_device_cmd);
1175
1176		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1177		if (error != 0) {
1178			device_printf(sc->sc_dev,
1179			    "could not create TX buf DMA map\n");
1180			goto fail;
1181		}
1182	}
1183	KASSERT(paddr == ring->cmd_dma.paddr + size,
1184	    ("invalid physical address"));
1185	return 0;
1186
1187fail:	iwm_free_tx_ring(sc, ring);
1188	return error;
1189}
1190
1191static void
1192iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1193{
1194	int i;
1195
1196	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1197		struct iwm_tx_data *data = &ring->data[i];
1198
1199		if (data->m != NULL) {
1200			bus_dmamap_sync(ring->data_dmat, data->map,
1201			    BUS_DMASYNC_POSTWRITE);
1202			bus_dmamap_unload(ring->data_dmat, data->map);
1203			m_freem(data->m);
1204			data->m = NULL;
1205		}
1206	}
1207	/* Clear TX descriptors. */
1208	memset(ring->desc, 0, ring->desc_dma.size);
1209	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1210	    BUS_DMASYNC_PREWRITE);
1211	sc->qfullmsk &= ~(1 << ring->qid);
1212	ring->queued = 0;
1213	ring->cur = 0;
1214
1215	if (ring->qid == IWM_MVM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1216		iwm_pcie_clear_cmd_in_flight(sc);
1217}
1218
1219static void
1220iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1221{
1222	int i;
1223
1224	iwm_dma_contig_free(&ring->desc_dma);
1225	iwm_dma_contig_free(&ring->cmd_dma);
1226
1227	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1228		struct iwm_tx_data *data = &ring->data[i];
1229
1230		if (data->m != NULL) {
1231			bus_dmamap_sync(ring->data_dmat, data->map,
1232			    BUS_DMASYNC_POSTWRITE);
1233			bus_dmamap_unload(ring->data_dmat, data->map);
1234			m_freem(data->m);
1235			data->m = NULL;
1236		}
1237		if (data->map != NULL) {
1238			bus_dmamap_destroy(ring->data_dmat, data->map);
1239			data->map = NULL;
1240		}
1241	}
1242	if (ring->data_dmat != NULL) {
1243		bus_dma_tag_destroy(ring->data_dmat);
1244		ring->data_dmat = NULL;
1245	}
1246}
1247
1248/*
1249 * High-level hardware frobbing routines
1250 */
1251
1252static void
1253iwm_enable_interrupts(struct iwm_softc *sc)
1254{
1255	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1256	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1257}
1258
1259static void
1260iwm_restore_interrupts(struct iwm_softc *sc)
1261{
1262	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1263}
1264
1265static void
1266iwm_disable_interrupts(struct iwm_softc *sc)
1267{
1268	/* disable interrupts */
1269	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1270
1271	/* acknowledge all interrupts */
1272	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1273	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1274}
1275
1276static void
1277iwm_ict_reset(struct iwm_softc *sc)
1278{
1279	iwm_disable_interrupts(sc);
1280
1281	/* Reset ICT table. */
1282	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1283	sc->ict_cur = 0;
1284
1285	/* Set physical address of ICT table (4KB aligned). */
1286	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1287	    IWM_CSR_DRAM_INT_TBL_ENABLE
1288	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1289	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1290	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1291
1292	/* Switch to ICT interrupt mode in driver. */
1293	sc->sc_flags |= IWM_FLAG_USE_ICT;
1294
1295	/* Re-enable interrupts. */
1296	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1297	iwm_enable_interrupts(sc);
1298}
1299
1300/* iwlwifi pcie/trans.c */
1301
1302/*
1303 * Since this .. hard-resets things, it's time to actually
1304 * mark the first vap (if any) as having no mac context.
1305 * It's annoying, but since the driver is potentially being
1306 * stop/start'ed whilst active (thanks openbsd port!) we
1307 * have to correctly track this.
1308 */
1309static void
1310iwm_stop_device(struct iwm_softc *sc)
1311{
1312	struct ieee80211com *ic = &sc->sc_ic;
1313	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1314	int chnl, qid;
1315	uint32_t mask = 0;
1316
1317	/* tell the device to stop sending interrupts */
1318	iwm_disable_interrupts(sc);
1319
1320	/*
1321	 * FreeBSD-local: mark the first vap as not-uploaded,
1322	 * so the next transition through auth/assoc
1323	 * will correctly populate the MAC context.
1324	 */
1325	if (vap) {
1326		struct iwm_vap *iv = IWM_VAP(vap);
1327		iv->is_uploaded = 0;
1328	}
1329
1330	/* device going down, Stop using ICT table */
1331	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1332
1333	/* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1334
1335	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1336
1337	if (iwm_nic_lock(sc)) {
1338		/* Stop each Tx DMA channel */
1339		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1340			IWM_WRITE(sc,
1341			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1342			mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1343		}
1344
1345		/* Wait for DMA channels to be idle */
1346		if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1347		    5000)) {
1348			device_printf(sc->sc_dev,
1349			    "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1350			    IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1351		}
1352		iwm_nic_unlock(sc);
1353	}
1354	iwm_pcie_rx_stop(sc);
1355
1356	/* Stop RX ring. */
1357	iwm_reset_rx_ring(sc, &sc->rxq);
1358
1359	/* Reset all TX rings. */
1360	for (qid = 0; qid < nitems(sc->txq); qid++)
1361		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1362
1363	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1364		/* Power-down device's busmaster DMA clocks */
1365		iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1366		    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1367		DELAY(5);
1368	}
1369
1370	/* Make sure (redundant) we've released our request to stay awake */
1371	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1372	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1373
1374	/* Stop the device, and put it in low power state */
1375	iwm_apm_stop(sc);
1376
1377	/* Upon stop, the APM issues an interrupt if HW RF kill is set.
1378	 * Clean again the interrupt here
1379	 */
1380	iwm_disable_interrupts(sc);
1381	/* stop and reset the on-board processor */
1382	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1383
1384	/*
1385	 * Even if we stop the HW, we still want the RF kill
1386	 * interrupt
1387	 */
1388	iwm_enable_rfkill_int(sc);
1389	iwm_check_rfkill(sc);
1390}
1391
1392/* iwlwifi: mvm/ops.c */
1393static void
1394iwm_mvm_nic_config(struct iwm_softc *sc)
1395{
1396	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1397	uint32_t reg_val = 0;
1398	uint32_t phy_config = iwm_mvm_get_phy_config(sc);
1399
1400	radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1401	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1402	radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1403	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1404	radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1405	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1406
1407	/* SKU control */
1408	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1409	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1410	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1411	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1412
1413	/* radio configuration */
1414	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1415	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1416	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1417
1418	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1419
1420	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1421	    "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1422	    radio_cfg_step, radio_cfg_dash);
1423
1424	/*
1425	 * W/A : NIC is stuck in a reset state after Early PCIe power off
1426	 * (PCIe power is lost before PERST# is asserted), causing ME FW
1427	 * to lose ownership and not being able to obtain it back.
1428	 */
1429	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1430		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1431		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1432		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1433	}
1434}
1435
1436static int
1437iwm_nic_rx_init(struct iwm_softc *sc)
1438{
1439	/*
1440	 * Initialize RX ring.  This is from the iwn driver.
1441	 */
1442	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1443
1444	/* Stop Rx DMA */
1445	iwm_pcie_rx_stop(sc);
1446
1447	if (!iwm_nic_lock(sc))
1448		return EBUSY;
1449
1450	/* reset and flush pointers */
1451	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1452	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1453	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1454	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1455
1456	/* Set physical address of RX ring (256-byte aligned). */
1457	IWM_WRITE(sc,
1458	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1459
1460	/* Set physical address of RX status (16-byte aligned). */
1461	IWM_WRITE(sc,
1462	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1463
1464	/* Enable RX. */
1465	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1466	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
1467	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
1468	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
1469	    IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK	|
1470	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1471	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
1472	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1473
1474	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1475
1476	/* W/A for interrupt coalescing bug in 7260 and 3160 */
1477	if (sc->cfg->host_interrupt_operation_mode)
1478		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1479
1480	/*
1481	 * Thus sayeth el jefe (iwlwifi) via a comment:
1482	 *
1483	 * This value should initially be 0 (before preparing any
1484	 * RBs), should be 8 after preparing the first 8 RBs (for example)
1485	 */
1486	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1487
1488	iwm_nic_unlock(sc);
1489
1490	return 0;
1491}
1492
1493static int
1494iwm_nic_tx_init(struct iwm_softc *sc)
1495{
1496	int qid;
1497
1498	if (!iwm_nic_lock(sc))
1499		return EBUSY;
1500
1501	/* Deactivate TX scheduler. */
1502	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1503
1504	/* Set physical address of "keep warm" page (16-byte aligned). */
1505	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1506
1507	/* Initialize TX rings. */
1508	for (qid = 0; qid < nitems(sc->txq); qid++) {
1509		struct iwm_tx_ring *txq = &sc->txq[qid];
1510
1511		/* Set physical address of TX ring (256-byte aligned). */
1512		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1513		    txq->desc_dma.paddr >> 8);
1514		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1515		    "%s: loading ring %d descriptors (%p) at %lx\n",
1516		    __func__,
1517		    qid, txq->desc,
1518		    (unsigned long) (txq->desc_dma.paddr >> 8));
1519	}
1520
1521	iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1522
1523	iwm_nic_unlock(sc);
1524
1525	return 0;
1526}
1527
1528static int
1529iwm_nic_init(struct iwm_softc *sc)
1530{
1531	int error;
1532
1533	iwm_apm_init(sc);
1534	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1535		iwm_set_pwr(sc);
1536
1537	iwm_mvm_nic_config(sc);
1538
1539	if ((error = iwm_nic_rx_init(sc)) != 0)
1540		return error;
1541
1542	/*
1543	 * Ditto for TX, from iwn
1544	 */
1545	if ((error = iwm_nic_tx_init(sc)) != 0)
1546		return error;
1547
1548	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1549	    "%s: shadow registers enabled\n", __func__);
1550	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1551
1552	return 0;
1553}
1554
1555const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1556	IWM_MVM_TX_FIFO_VO,
1557	IWM_MVM_TX_FIFO_VI,
1558	IWM_MVM_TX_FIFO_BE,
1559	IWM_MVM_TX_FIFO_BK,
1560};
1561
1562static int
1563iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1564{
1565	if (!iwm_nic_lock(sc)) {
1566		device_printf(sc->sc_dev,
1567		    "%s: cannot enable txq %d\n",
1568		    __func__,
1569		    qid);
1570		return EBUSY;
1571	}
1572
1573	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1574
1575	if (qid == IWM_MVM_CMD_QUEUE) {
1576		/* unactivate before configuration */
1577		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1578		    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1579		    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1580
1581		iwm_nic_unlock(sc);
1582
1583		iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1584
1585		if (!iwm_nic_lock(sc)) {
1586			device_printf(sc->sc_dev,
1587			    "%s: cannot enable txq %d\n", __func__, qid);
1588			return EBUSY;
1589		}
1590		iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1591		iwm_nic_unlock(sc);
1592
1593		iwm_write_mem32(sc, sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1594		/* Set scheduler window size and frame limit. */
1595		iwm_write_mem32(sc,
1596		    sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1597		    sizeof(uint32_t),
1598		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1599		    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1600		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1601		    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1602
1603		if (!iwm_nic_lock(sc)) {
1604			device_printf(sc->sc_dev,
1605			    "%s: cannot enable txq %d\n", __func__, qid);
1606			return EBUSY;
1607		}
1608		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1609		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1610		    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1611		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1612		    IWM_SCD_QUEUE_STTS_REG_MSK);
1613	} else {
1614		struct iwm_scd_txq_cfg_cmd cmd;
1615		int error;
1616
1617		iwm_nic_unlock(sc);
1618
1619		memset(&cmd, 0, sizeof(cmd));
1620		cmd.scd_queue = qid;
1621		cmd.enable = 1;
1622		cmd.sta_id = sta_id;
1623		cmd.tx_fifo = fifo;
1624		cmd.aggregate = 0;
1625		cmd.window = IWM_FRAME_LIMIT;
1626
1627		error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1628		    sizeof(cmd), &cmd);
1629		if (error) {
1630			device_printf(sc->sc_dev,
1631			    "cannot enable txq %d\n", qid);
1632			return error;
1633		}
1634
1635		if (!iwm_nic_lock(sc))
1636			return EBUSY;
1637	}
1638
1639	iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1640	    iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1641
1642	iwm_nic_unlock(sc);
1643
1644	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1645	    __func__, qid, fifo);
1646
1647	return 0;
1648}
1649
1650static int
1651iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1652{
1653	int error, chnl;
1654
1655	int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1656	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1657
1658	if (!iwm_nic_lock(sc))
1659		return EBUSY;
1660
1661	iwm_ict_reset(sc);
1662
1663	iwm_nic_unlock(sc);
1664
1665	sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1666	if (scd_base_addr != 0 &&
1667	    scd_base_addr != sc->scd_base_addr) {
1668		device_printf(sc->sc_dev,
1669		    "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1670		    __func__, sc->scd_base_addr, scd_base_addr);
1671	}
1672
1673	/* reset context data, TX status and translation data */
1674	error = iwm_write_mem(sc,
1675	    sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1676	    NULL, clear_dwords);
1677	if (error)
1678		return EBUSY;
1679
1680	if (!iwm_nic_lock(sc))
1681		return EBUSY;
1682
1683	/* Set physical address of TX scheduler rings (1KB aligned). */
1684	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1685
1686	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1687
1688	iwm_nic_unlock(sc);
1689
1690	/* enable command channel */
1691	error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1692	if (error)
1693		return error;
1694
1695	if (!iwm_nic_lock(sc))
1696		return EBUSY;
1697
1698	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1699
1700	/* Enable DMA channels. */
1701	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1702		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1703		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1704		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1705	}
1706
1707	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1708	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1709
1710	iwm_nic_unlock(sc);
1711
1712	/* Enable L1-Active */
1713	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
1714		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1715		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1716	}
1717
1718	return error;
1719}
1720
1721/*
1722 * NVM read access and content parsing.  We do not support
1723 * external NVM or writing NVM.
1724 * iwlwifi/mvm/nvm.c
1725 */
1726
1727/* Default NVM size to read */
1728#define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
1729
1730#define IWM_NVM_WRITE_OPCODE 1
1731#define IWM_NVM_READ_OPCODE 0
1732
1733/* load nvm chunk response */
1734enum {
1735	IWM_READ_NVM_CHUNK_SUCCEED = 0,
1736	IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1737};
1738
1739static int
1740iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1741	uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1742{
1743	struct iwm_nvm_access_cmd nvm_access_cmd = {
1744		.offset = htole16(offset),
1745		.length = htole16(length),
1746		.type = htole16(section),
1747		.op_code = IWM_NVM_READ_OPCODE,
1748	};
1749	struct iwm_nvm_access_resp *nvm_resp;
1750	struct iwm_rx_packet *pkt;
1751	struct iwm_host_cmd cmd = {
1752		.id = IWM_NVM_ACCESS_CMD,
1753		.flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1754		.data = { &nvm_access_cmd, },
1755	};
1756	int ret, bytes_read, offset_read;
1757	uint8_t *resp_data;
1758
1759	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1760
1761	ret = iwm_send_cmd(sc, &cmd);
1762	if (ret) {
1763		device_printf(sc->sc_dev,
1764		    "Could not send NVM_ACCESS command (error=%d)\n", ret);
1765		return ret;
1766	}
1767
1768	pkt = cmd.resp_pkt;
1769
1770	/* Extract NVM response */
1771	nvm_resp = (void *)pkt->data;
1772	ret = le16toh(nvm_resp->status);
1773	bytes_read = le16toh(nvm_resp->length);
1774	offset_read = le16toh(nvm_resp->offset);
1775	resp_data = nvm_resp->data;
1776	if (ret) {
1777		if ((offset != 0) &&
1778		    (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1779			/*
1780			 * meaning of NOT_VALID_ADDRESS:
1781			 * driver try to read chunk from address that is
1782			 * multiple of 2K and got an error since addr is empty.
1783			 * meaning of (offset != 0): driver already
1784			 * read valid data from another chunk so this case
1785			 * is not an error.
1786			 */
1787			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1788				    "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1789				    offset);
1790			*len = 0;
1791			ret = 0;
1792		} else {
1793			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1794				    "NVM access command failed with status %d\n", ret);
1795			ret = EIO;
1796		}
1797		goto exit;
1798	}
1799
1800	if (offset_read != offset) {
1801		device_printf(sc->sc_dev,
1802		    "NVM ACCESS response with invalid offset %d\n",
1803		    offset_read);
1804		ret = EINVAL;
1805		goto exit;
1806	}
1807
1808	if (bytes_read > length) {
1809		device_printf(sc->sc_dev,
1810		    "NVM ACCESS response with too much data "
1811		    "(%d bytes requested, %d bytes received)\n",
1812		    length, bytes_read);
1813		ret = EINVAL;
1814		goto exit;
1815	}
1816
1817	/* Write data to NVM */
1818	memcpy(data + offset, resp_data, bytes_read);
1819	*len = bytes_read;
1820
1821 exit:
1822	iwm_free_resp(sc, &cmd);
1823	return ret;
1824}
1825
1826/*
1827 * Reads an NVM section completely.
1828 * NICs prior to 7000 family don't have a real NVM, but just read
1829 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1830 * by uCode, we need to manually check in this case that we don't
1831 * overflow and try to read more than the EEPROM size.
1832 * For 7000 family NICs, we supply the maximal size we can read, and
1833 * the uCode fills the response with as much data as we can,
1834 * without overflowing, so no check is needed.
1835 */
1836static int
1837iwm_nvm_read_section(struct iwm_softc *sc,
1838	uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1839{
1840	uint16_t seglen, length, offset = 0;
1841	int ret;
1842
1843	/* Set nvm section read length */
1844	length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1845
1846	seglen = length;
1847
1848	/* Read the NVM until exhausted (reading less than requested) */
1849	while (seglen == length) {
1850		/* Check no memory assumptions fail and cause an overflow */
1851		if ((size_read + offset + length) >
1852		    sc->cfg->eeprom_size) {
1853			device_printf(sc->sc_dev,
1854			    "EEPROM size is too small for NVM\n");
1855			return ENOBUFS;
1856		}
1857
1858		ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1859		if (ret) {
1860			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1861				    "Cannot read NVM from section %d offset %d, length %d\n",
1862				    section, offset, length);
1863			return ret;
1864		}
1865		offset += seglen;
1866	}
1867
1868	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1869		    "NVM section %d read completed\n", section);
1870	*len = offset;
1871	return 0;
1872}
1873
1874/*
1875 * BEGIN IWM_NVM_PARSE
1876 */
1877
1878/* iwlwifi/iwl-nvm-parse.c */
1879
1880/* NVM offsets (in words) definitions */
1881enum iwm_nvm_offsets {
1882	/* NVM HW-Section offset (in words) definitions */
1883	IWM_HW_ADDR = 0x15,
1884
1885/* NVM SW-Section offset (in words) definitions */
1886	IWM_NVM_SW_SECTION = 0x1C0,
1887	IWM_NVM_VERSION = 0,
1888	IWM_RADIO_CFG = 1,
1889	IWM_SKU = 2,
1890	IWM_N_HW_ADDRS = 3,
1891	IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1892
1893/* NVM calibration section offset (in words) definitions */
1894	IWM_NVM_CALIB_SECTION = 0x2B8,
1895	IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1896};
1897
1898enum iwm_8000_nvm_offsets {
1899	/* NVM HW-Section offset (in words) definitions */
1900	IWM_HW_ADDR0_WFPM_8000 = 0x12,
1901	IWM_HW_ADDR1_WFPM_8000 = 0x16,
1902	IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1903	IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1904	IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1905
1906	/* NVM SW-Section offset (in words) definitions */
1907	IWM_NVM_SW_SECTION_8000 = 0x1C0,
1908	IWM_NVM_VERSION_8000 = 0,
1909	IWM_RADIO_CFG_8000 = 0,
1910	IWM_SKU_8000 = 2,
1911	IWM_N_HW_ADDRS_8000 = 3,
1912
1913	/* NVM REGULATORY -Section offset (in words) definitions */
1914	IWM_NVM_CHANNELS_8000 = 0,
1915	IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1916	IWM_NVM_LAR_OFFSET_8000 = 0x507,
1917	IWM_NVM_LAR_ENABLED_8000 = 0x7,
1918
1919	/* NVM calibration section offset (in words) definitions */
1920	IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1921	IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1922};
1923
1924/* SKU Capabilities (actual values from NVM definition) */
1925enum nvm_sku_bits {
1926	IWM_NVM_SKU_CAP_BAND_24GHZ	= (1 << 0),
1927	IWM_NVM_SKU_CAP_BAND_52GHZ	= (1 << 1),
1928	IWM_NVM_SKU_CAP_11N_ENABLE	= (1 << 2),
1929	IWM_NVM_SKU_CAP_11AC_ENABLE	= (1 << 3),
1930};
1931
1932/* radio config bits (actual values from NVM definition) */
1933#define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1934#define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1935#define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1936#define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1937#define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1938#define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1939
1940#define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)	(x & 0xF)
1941#define IWM_NVM_RF_CFG_DASH_MSK_8000(x)		((x >> 4) & 0xF)
1942#define IWM_NVM_RF_CFG_STEP_MSK_8000(x)		((x >> 8) & 0xF)
1943#define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)		((x >> 12) & 0xFFF)
1944#define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)	((x >> 24) & 0xF)
1945#define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)	((x >> 28) & 0xF)
1946
1947#define DEFAULT_MAX_TX_POWER 16
1948
1949/**
1950 * enum iwm_nvm_channel_flags - channel flags in NVM
1951 * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1952 * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1953 * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1954 * @IWM_NVM_CHANNEL_RADAR: radar detection required
1955 * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1956 * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1957 * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1958 * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1959 * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1960 * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1961 */
1962enum iwm_nvm_channel_flags {
1963	IWM_NVM_CHANNEL_VALID = (1 << 0),
1964	IWM_NVM_CHANNEL_IBSS = (1 << 1),
1965	IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1966	IWM_NVM_CHANNEL_RADAR = (1 << 4),
1967	IWM_NVM_CHANNEL_DFS = (1 << 7),
1968	IWM_NVM_CHANNEL_WIDE = (1 << 8),
1969	IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1970	IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1971	IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1972};
1973
1974/*
1975 * Translate EEPROM flags to net80211.
1976 */
1977static uint32_t
1978iwm_eeprom_channel_flags(uint16_t ch_flags)
1979{
1980	uint32_t nflags;
1981
1982	nflags = 0;
1983	if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1984		nflags |= IEEE80211_CHAN_PASSIVE;
1985	if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1986		nflags |= IEEE80211_CHAN_NOADHOC;
1987	if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1988		nflags |= IEEE80211_CHAN_DFS;
1989		/* Just in case. */
1990		nflags |= IEEE80211_CHAN_NOADHOC;
1991	}
1992
1993	return (nflags);
1994}
1995
1996static void
1997iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1998    int maxchans, int *nchans, int ch_idx, size_t ch_num,
1999    const uint8_t bands[])
2000{
2001	const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
2002	uint32_t nflags;
2003	uint16_t ch_flags;
2004	uint8_t ieee;
2005	int error;
2006
2007	for (; ch_idx < ch_num; ch_idx++) {
2008		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2009		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2010			ieee = iwm_nvm_channels[ch_idx];
2011		else
2012			ieee = iwm_nvm_channels_8000[ch_idx];
2013
2014		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2015			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2016			    "Ch. %d Flags %x [%sGHz] - No traffic\n",
2017			    ieee, ch_flags,
2018			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2019			    "5.2" : "2.4");
2020			continue;
2021		}
2022
2023		nflags = iwm_eeprom_channel_flags(ch_flags);
2024		error = ieee80211_add_channel(chans, maxchans, nchans,
2025		    ieee, 0, 0, nflags, bands);
2026		if (error != 0)
2027			break;
2028
2029		IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
2030		    "Ch. %d Flags %x [%sGHz] - Added\n",
2031		    ieee, ch_flags,
2032		    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2033		    "5.2" : "2.4");
2034	}
2035}
2036
2037static void
2038iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2039    struct ieee80211_channel chans[])
2040{
2041	struct iwm_softc *sc = ic->ic_softc;
2042	struct iwm_nvm_data *data = sc->nvm_data;
2043	uint8_t bands[IEEE80211_MODE_BYTES];
2044	size_t ch_num;
2045
2046	memset(bands, 0, sizeof(bands));
2047	/* 1-13: 11b/g channels. */
2048	setbit(bands, IEEE80211_MODE_11B);
2049	setbit(bands, IEEE80211_MODE_11G);
2050	iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2051	    IWM_NUM_2GHZ_CHANNELS - 1, bands);
2052
2053	/* 14: 11b channel only. */
2054	clrbit(bands, IEEE80211_MODE_11G);
2055	iwm_add_channel_band(sc, chans, maxchans, nchans,
2056	    IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2057
2058	if (data->sku_cap_band_52GHz_enable) {
2059		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2060			ch_num = nitems(iwm_nvm_channels);
2061		else
2062			ch_num = nitems(iwm_nvm_channels_8000);
2063		memset(bands, 0, sizeof(bands));
2064		setbit(bands, IEEE80211_MODE_11A);
2065		iwm_add_channel_band(sc, chans, maxchans, nchans,
2066		    IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2067	}
2068}
2069
2070static void
2071iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2072	const uint16_t *mac_override, const uint16_t *nvm_hw)
2073{
2074	const uint8_t *hw_addr;
2075
2076	if (mac_override) {
2077		static const uint8_t reserved_mac[] = {
2078			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2079		};
2080
2081		hw_addr = (const uint8_t *)(mac_override +
2082				 IWM_MAC_ADDRESS_OVERRIDE_8000);
2083
2084		/*
2085		 * Store the MAC address from MAO section.
2086		 * No byte swapping is required in MAO section
2087		 */
2088		IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2089
2090		/*
2091		 * Force the use of the OTP MAC address in case of reserved MAC
2092		 * address in the NVM, or if address is given but invalid.
2093		 */
2094		if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2095		    !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2096		    iwm_is_valid_ether_addr(data->hw_addr) &&
2097		    !IEEE80211_IS_MULTICAST(data->hw_addr))
2098			return;
2099
2100		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2101		    "%s: mac address from nvm override section invalid\n",
2102		    __func__);
2103	}
2104
2105	if (nvm_hw) {
2106		/* read the mac address from WFMP registers */
2107		uint32_t mac_addr0 =
2108		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2109		uint32_t mac_addr1 =
2110		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2111
2112		hw_addr = (const uint8_t *)&mac_addr0;
2113		data->hw_addr[0] = hw_addr[3];
2114		data->hw_addr[1] = hw_addr[2];
2115		data->hw_addr[2] = hw_addr[1];
2116		data->hw_addr[3] = hw_addr[0];
2117
2118		hw_addr = (const uint8_t *)&mac_addr1;
2119		data->hw_addr[4] = hw_addr[1];
2120		data->hw_addr[5] = hw_addr[0];
2121
2122		return;
2123	}
2124
2125	device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2126	memset(data->hw_addr, 0, sizeof(data->hw_addr));
2127}
2128
2129static int
2130iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2131	    const uint16_t *phy_sku)
2132{
2133	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2134		return le16_to_cpup(nvm_sw + IWM_SKU);
2135
2136	return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2137}
2138
2139static int
2140iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2141{
2142	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2143		return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2144	else
2145		return le32_to_cpup((const uint32_t *)(nvm_sw +
2146						IWM_NVM_VERSION_8000));
2147}
2148
2149static int
2150iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2151		  const uint16_t *phy_sku)
2152{
2153        if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2154                return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2155
2156        return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2157}
2158
2159static int
2160iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2161{
2162	int n_hw_addr;
2163
2164	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2165		return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2166
2167	n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2168
2169        return n_hw_addr & IWM_N_HW_ADDR_MASK;
2170}
2171
2172static void
2173iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2174		  uint32_t radio_cfg)
2175{
2176	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2177		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2178		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2179		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2180		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2181		return;
2182	}
2183
2184	/* set the radio configuration for family 8000 */
2185	data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2186	data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2187	data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2188	data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2189	data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2190	data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2191}
2192
2193static int
2194iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2195		   const uint16_t *nvm_hw, const uint16_t *mac_override)
2196{
2197#ifdef notyet /* for FAMILY 9000 */
2198	if (cfg->mac_addr_from_csr) {
2199		iwm_set_hw_address_from_csr(sc, data);
2200        } else
2201#endif
2202	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2203		const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2204
2205		/* The byte order is little endian 16 bit, meaning 214365 */
2206		data->hw_addr[0] = hw_addr[1];
2207		data->hw_addr[1] = hw_addr[0];
2208		data->hw_addr[2] = hw_addr[3];
2209		data->hw_addr[3] = hw_addr[2];
2210		data->hw_addr[4] = hw_addr[5];
2211		data->hw_addr[5] = hw_addr[4];
2212	} else {
2213		iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2214	}
2215
2216	if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2217		device_printf(sc->sc_dev, "no valid mac address was found\n");
2218		return EINVAL;
2219	}
2220
2221	return 0;
2222}
2223
2224static struct iwm_nvm_data *
2225iwm_parse_nvm_data(struct iwm_softc *sc,
2226		   const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2227		   const uint16_t *nvm_calib, const uint16_t *mac_override,
2228		   const uint16_t *phy_sku, const uint16_t *regulatory)
2229{
2230	struct iwm_nvm_data *data;
2231	uint32_t sku, radio_cfg;
2232
2233	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2234		data = malloc(sizeof(*data) +
2235		    IWM_NUM_CHANNELS * sizeof(uint16_t),
2236		    M_DEVBUF, M_NOWAIT | M_ZERO);
2237	} else {
2238		data = malloc(sizeof(*data) +
2239		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2240		    M_DEVBUF, M_NOWAIT | M_ZERO);
2241	}
2242	if (!data)
2243		return NULL;
2244
2245	data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2246
2247	radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2248	iwm_set_radio_cfg(sc, data, radio_cfg);
2249
2250	sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2251	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2252	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2253	data->sku_cap_11n_enable = 0;
2254
2255	data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2256
2257	/* If no valid mac address was found - bail out */
2258	if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2259		free(data, M_DEVBUF);
2260		return NULL;
2261	}
2262
2263	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2264		memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2265		    IWM_NUM_CHANNELS * sizeof(uint16_t));
2266	} else {
2267		memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2268		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2269	}
2270
2271	return data;
2272}
2273
2274static void
2275iwm_free_nvm_data(struct iwm_nvm_data *data)
2276{
2277	if (data != NULL)
2278		free(data, M_DEVBUF);
2279}
2280
2281static struct iwm_nvm_data *
2282iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2283{
2284	const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2285
2286	/* Checking for required sections */
2287	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2288		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2289		    !sections[sc->cfg->nvm_hw_section_num].data) {
2290			device_printf(sc->sc_dev,
2291			    "Can't parse empty OTP/NVM sections\n");
2292			return NULL;
2293		}
2294	} else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2295		/* SW and REGULATORY sections are mandatory */
2296		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2297		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2298			device_printf(sc->sc_dev,
2299			    "Can't parse empty OTP/NVM sections\n");
2300			return NULL;
2301		}
2302		/* MAC_OVERRIDE or at least HW section must exist */
2303		if (!sections[sc->cfg->nvm_hw_section_num].data &&
2304		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2305			device_printf(sc->sc_dev,
2306			    "Can't parse mac_address, empty sections\n");
2307			return NULL;
2308		}
2309
2310		/* PHY_SKU section is mandatory in B0 */
2311		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2312			device_printf(sc->sc_dev,
2313			    "Can't parse phy_sku in B0, empty sections\n");
2314			return NULL;
2315		}
2316	} else {
2317		panic("unknown device family %d\n", sc->cfg->device_family);
2318	}
2319
2320	hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2321	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2322	calib = (const uint16_t *)
2323	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2324	regulatory = (const uint16_t *)
2325	    sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2326	mac_override = (const uint16_t *)
2327	    sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2328	phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2329
2330	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2331	    phy_sku, regulatory);
2332}
2333
2334static int
2335iwm_nvm_init(struct iwm_softc *sc)
2336{
2337	struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2338	int i, ret, section;
2339	uint32_t size_read = 0;
2340	uint8_t *nvm_buffer, *temp;
2341	uint16_t len;
2342
2343	memset(nvm_sections, 0, sizeof(nvm_sections));
2344
2345	if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2346		return EINVAL;
2347
2348	/* load NVM values from nic */
2349	/* Read From FW NVM */
2350	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2351
2352	nvm_buffer = malloc(sc->cfg->eeprom_size, M_DEVBUF, M_NOWAIT | M_ZERO);
2353	if (!nvm_buffer)
2354		return ENOMEM;
2355	for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2356		/* we override the constness for initial read */
2357		ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2358					   &len, size_read);
2359		if (ret)
2360			continue;
2361		size_read += len;
2362		temp = malloc(len, M_DEVBUF, M_NOWAIT);
2363		if (!temp) {
2364			ret = ENOMEM;
2365			break;
2366		}
2367		memcpy(temp, nvm_buffer, len);
2368
2369		nvm_sections[section].data = temp;
2370		nvm_sections[section].length = len;
2371	}
2372	if (!size_read)
2373		device_printf(sc->sc_dev, "OTP is blank\n");
2374	free(nvm_buffer, M_DEVBUF);
2375
2376	sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2377	if (!sc->nvm_data)
2378		return EINVAL;
2379	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2380		    "nvm version = %x\n", sc->nvm_data->nvm_version);
2381
2382	for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2383		if (nvm_sections[i].data != NULL)
2384			free(nvm_sections[i].data, M_DEVBUF);
2385	}
2386
2387	return 0;
2388}
2389
2390static int
2391iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2392	const struct iwm_fw_desc *section)
2393{
2394	struct iwm_dma_info *dma = &sc->fw_dma;
2395	uint8_t *v_addr;
2396	bus_addr_t p_addr;
2397	uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2398	int ret = 0;
2399
2400	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2401		    "%s: [%d] uCode section being loaded...\n",
2402		    __func__, section_num);
2403
2404	v_addr = dma->vaddr;
2405	p_addr = dma->paddr;
2406
2407	for (offset = 0; offset < section->len; offset += chunk_sz) {
2408		uint32_t copy_size, dst_addr;
2409		int extended_addr = FALSE;
2410
2411		copy_size = MIN(chunk_sz, section->len - offset);
2412		dst_addr = section->offset + offset;
2413
2414		if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2415		    dst_addr <= IWM_FW_MEM_EXTENDED_END)
2416			extended_addr = TRUE;
2417
2418		if (extended_addr)
2419			iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2420					  IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2421
2422		memcpy(v_addr, (const uint8_t *)section->data + offset,
2423		    copy_size);
2424		bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2425		ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2426						   copy_size);
2427
2428		if (extended_addr)
2429			iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2430					    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2431
2432		if (ret) {
2433			device_printf(sc->sc_dev,
2434			    "%s: Could not load the [%d] uCode section\n",
2435			    __func__, section_num);
2436			break;
2437		}
2438	}
2439
2440	return ret;
2441}
2442
2443/*
2444 * ucode
2445 */
2446static int
2447iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2448			     bus_addr_t phy_addr, uint32_t byte_cnt)
2449{
2450	int ret;
2451
2452	sc->sc_fw_chunk_done = 0;
2453
2454	if (!iwm_nic_lock(sc))
2455		return EBUSY;
2456
2457	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2458	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2459
2460	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2461	    dst_addr);
2462
2463	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2464	    phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2465
2466	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2467	    (iwm_get_dma_hi_addr(phy_addr)
2468	     << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2469
2470	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2471	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2472	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2473	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2474
2475	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2476	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2477	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2478	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2479
2480	iwm_nic_unlock(sc);
2481
2482	/* wait up to 5s for this segment to load */
2483	ret = 0;
2484	while (!sc->sc_fw_chunk_done) {
2485		ret = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz);
2486		if (ret)
2487			break;
2488	}
2489
2490	if (ret != 0) {
2491		device_printf(sc->sc_dev,
2492		    "fw chunk addr 0x%x len %d failed to load\n",
2493		    dst_addr, byte_cnt);
2494		return ETIMEDOUT;
2495	}
2496
2497	return 0;
2498}
2499
2500static int
2501iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2502	const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2503{
2504	int shift_param;
2505	int i, ret = 0, sec_num = 0x1;
2506	uint32_t val, last_read_idx = 0;
2507
2508	if (cpu == 1) {
2509		shift_param = 0;
2510		*first_ucode_section = 0;
2511	} else {
2512		shift_param = 16;
2513		(*first_ucode_section)++;
2514	}
2515
2516	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2517		last_read_idx = i;
2518
2519		/*
2520		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2521		 * CPU1 to CPU2.
2522		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2523		 * CPU2 non paged to CPU2 paging sec.
2524		 */
2525		if (!image->fw_sect[i].data ||
2526		    image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2527		    image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2528			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2529				    "Break since Data not valid or Empty section, sec = %d\n",
2530				    i);
2531			break;
2532		}
2533		ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2534		if (ret)
2535			return ret;
2536
2537		/* Notify the ucode of the loaded section number and status */
2538		if (iwm_nic_lock(sc)) {
2539			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2540			val = val | (sec_num << shift_param);
2541			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2542			sec_num = (sec_num << 1) | 0x1;
2543			iwm_nic_unlock(sc);
2544		}
2545	}
2546
2547	*first_ucode_section = last_read_idx;
2548
2549	iwm_enable_interrupts(sc);
2550
2551	if (iwm_nic_lock(sc)) {
2552		if (cpu == 1)
2553			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2554		else
2555			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2556		iwm_nic_unlock(sc);
2557	}
2558
2559	return 0;
2560}
2561
2562static int
2563iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2564	const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2565{
2566	int shift_param;
2567	int i, ret = 0;
2568	uint32_t last_read_idx = 0;
2569
2570	if (cpu == 1) {
2571		shift_param = 0;
2572		*first_ucode_section = 0;
2573	} else {
2574		shift_param = 16;
2575		(*first_ucode_section)++;
2576	}
2577
2578	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2579		last_read_idx = i;
2580
2581		/*
2582		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2583		 * CPU1 to CPU2.
2584		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2585		 * CPU2 non paged to CPU2 paging sec.
2586		 */
2587		if (!image->fw_sect[i].data ||
2588		    image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2589		    image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2590			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2591				    "Break since Data not valid or Empty section, sec = %d\n",
2592				     i);
2593			break;
2594		}
2595
2596		ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2597		if (ret)
2598			return ret;
2599	}
2600
2601	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2602		iwm_set_bits_prph(sc,
2603				  IWM_CSR_UCODE_LOAD_STATUS_ADDR,
2604				  (IWM_LMPM_CPU_UCODE_LOADING_COMPLETED |
2605				   IWM_LMPM_CPU_HDRS_LOADING_COMPLETED |
2606				   IWM_LMPM_CPU_UCODE_LOADING_STARTED) <<
2607					shift_param);
2608
2609	*first_ucode_section = last_read_idx;
2610
2611	return 0;
2612
2613}
2614
2615static int
2616iwm_pcie_load_given_ucode(struct iwm_softc *sc,
2617	const struct iwm_fw_sects *image)
2618{
2619	int ret = 0;
2620	int first_ucode_section;
2621
2622	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2623		     image->is_dual_cpus ? "Dual" : "Single");
2624
2625	/* load to FW the binary non secured sections of CPU1 */
2626	ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2627	if (ret)
2628		return ret;
2629
2630	if (image->is_dual_cpus) {
2631		/* set CPU2 header address */
2632                iwm_write_prph(sc,
2633			       IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2634			       IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2635
2636		/* load to FW the binary sections of CPU2 */
2637		ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2638						 &first_ucode_section);
2639		if (ret)
2640			return ret;
2641	}
2642
2643	iwm_enable_interrupts(sc);
2644
2645	/* release CPU reset */
2646	IWM_WRITE(sc, IWM_CSR_RESET, 0);
2647
2648	return 0;
2649}
2650
2651int
2652iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2653	const struct iwm_fw_sects *image)
2654{
2655	int ret = 0;
2656	int first_ucode_section;
2657
2658	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2659		    image->is_dual_cpus ? "Dual" : "Single");
2660
2661	/* configure the ucode to be ready to get the secured image */
2662	/* release CPU reset */
2663	iwm_write_prph(sc, IWM_RELEASE_CPU_RESET, IWM_RELEASE_CPU_RESET_BIT);
2664
2665	/* load to FW the binary Secured sections of CPU1 */
2666	ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2667	    &first_ucode_section);
2668	if (ret)
2669		return ret;
2670
2671	/* load to FW the binary sections of CPU2 */
2672	return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2673	    &first_ucode_section);
2674}
2675
2676/* XXX Get rid of this definition */
2677static inline void
2678iwm_enable_fw_load_int(struct iwm_softc *sc)
2679{
2680	IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2681	sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2682	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2683}
2684
2685/* XXX Add proper rfkill support code */
2686static int
2687iwm_start_fw(struct iwm_softc *sc,
2688	const struct iwm_fw_sects *fw)
2689{
2690	int ret;
2691
2692	/* This may fail if AMT took ownership of the device */
2693	if (iwm_prepare_card_hw(sc)) {
2694		device_printf(sc->sc_dev,
2695		    "%s: Exit HW not ready\n", __func__);
2696		ret = EIO;
2697		goto out;
2698	}
2699
2700	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2701
2702	iwm_disable_interrupts(sc);
2703
2704	/* make sure rfkill handshake bits are cleared */
2705	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2706	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2707	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2708
2709	/* clear (again), then enable host interrupts */
2710	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2711
2712	ret = iwm_nic_init(sc);
2713	if (ret) {
2714		device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2715		goto out;
2716	}
2717
2718	/*
2719	 * Now, we load the firmware and don't want to be interrupted, even
2720	 * by the RF-Kill interrupt (hence mask all the interrupt besides the
2721	 * FH_TX interrupt which is needed to load the firmware). If the
2722	 * RF-Kill switch is toggled, we will find out after having loaded
2723	 * the firmware and return the proper value to the caller.
2724	 */
2725	iwm_enable_fw_load_int(sc);
2726
2727	/* really make sure rfkill handshake bits are cleared */
2728	/* maybe we should write a few times more?  just to make sure */
2729	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2730	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2731
2732	/* Load the given image to the HW */
2733	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2734		ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2735	else
2736		ret = iwm_pcie_load_given_ucode(sc, fw);
2737
2738	/* XXX re-check RF-Kill state */
2739
2740out:
2741	return ret;
2742}
2743
2744static int
2745iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2746{
2747	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2748		.valid = htole32(valid_tx_ant),
2749	};
2750
2751	return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2752	    IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2753}
2754
2755/* iwlwifi: mvm/fw.c */
2756static int
2757iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2758{
2759	struct iwm_phy_cfg_cmd phy_cfg_cmd;
2760	enum iwm_ucode_type ucode_type = sc->cur_ucode;
2761
2762	/* Set parameters */
2763	phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc));
2764	phy_cfg_cmd.calib_control.event_trigger =
2765	    sc->sc_default_calib[ucode_type].event_trigger;
2766	phy_cfg_cmd.calib_control.flow_trigger =
2767	    sc->sc_default_calib[ucode_type].flow_trigger;
2768
2769	IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2770	    "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2771	return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2772	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2773}
2774
2775static int
2776iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2777{
2778	struct iwm_mvm_alive_data *alive_data = data;
2779	struct iwm_mvm_alive_resp_ver1 *palive1;
2780	struct iwm_mvm_alive_resp_ver2 *palive2;
2781	struct iwm_mvm_alive_resp *palive;
2782
2783	if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
2784		palive1 = (void *)pkt->data;
2785
2786		sc->support_umac_log = FALSE;
2787                sc->error_event_table =
2788                        le32toh(palive1->error_event_table_ptr);
2789                sc->log_event_table =
2790                        le32toh(palive1->log_event_table_ptr);
2791                alive_data->scd_base_addr = le32toh(palive1->scd_base_ptr);
2792
2793                alive_data->valid = le16toh(palive1->status) ==
2794                                    IWM_ALIVE_STATUS_OK;
2795                IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2796			    "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2797			     le16toh(palive1->status), palive1->ver_type,
2798                             palive1->ver_subtype, palive1->flags);
2799	} else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
2800		palive2 = (void *)pkt->data;
2801		sc->error_event_table =
2802			le32toh(palive2->error_event_table_ptr);
2803		sc->log_event_table =
2804			le32toh(palive2->log_event_table_ptr);
2805		alive_data->scd_base_addr = le32toh(palive2->scd_base_ptr);
2806		sc->umac_error_event_table =
2807                        le32toh(palive2->error_info_addr);
2808
2809		alive_data->valid = le16toh(palive2->status) ==
2810				    IWM_ALIVE_STATUS_OK;
2811		if (sc->umac_error_event_table)
2812			sc->support_umac_log = TRUE;
2813
2814		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2815			    "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2816			    le16toh(palive2->status), palive2->ver_type,
2817			    palive2->ver_subtype, palive2->flags);
2818
2819		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2820			    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2821			    palive2->umac_major, palive2->umac_minor);
2822	} else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2823		palive = (void *)pkt->data;
2824
2825		sc->error_event_table =
2826			le32toh(palive->error_event_table_ptr);
2827		sc->log_event_table =
2828			le32toh(palive->log_event_table_ptr);
2829		alive_data->scd_base_addr = le32toh(palive->scd_base_ptr);
2830		sc->umac_error_event_table =
2831			le32toh(palive->error_info_addr);
2832
2833		alive_data->valid = le16toh(palive->status) ==
2834				    IWM_ALIVE_STATUS_OK;
2835		if (sc->umac_error_event_table)
2836			sc->support_umac_log = TRUE;
2837
2838		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2839			    "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2840			    le16toh(palive->status), palive->ver_type,
2841			    palive->ver_subtype, palive->flags);
2842
2843		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2844			    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2845			    le32toh(palive->umac_major),
2846			    le32toh(palive->umac_minor));
2847	}
2848
2849	return TRUE;
2850}
2851
2852static int
2853iwm_wait_phy_db_entry(struct iwm_softc *sc,
2854	struct iwm_rx_packet *pkt, void *data)
2855{
2856	struct iwm_phy_db *phy_db = data;
2857
2858	if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2859		if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2860			device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2861			    __func__, pkt->hdr.code);
2862		}
2863		return TRUE;
2864	}
2865
2866	if (iwm_phy_db_set_section(phy_db, pkt)) {
2867		device_printf(sc->sc_dev,
2868		    "%s: iwm_phy_db_set_section failed\n", __func__);
2869	}
2870
2871	return FALSE;
2872}
2873
2874static int
2875iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2876	enum iwm_ucode_type ucode_type)
2877{
2878	struct iwm_notification_wait alive_wait;
2879	struct iwm_mvm_alive_data alive_data;
2880	const struct iwm_fw_sects *fw;
2881	enum iwm_ucode_type old_type = sc->cur_ucode;
2882	int error;
2883	static const uint16_t alive_cmd[] = { IWM_MVM_ALIVE };
2884
2885	if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2886		device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2887			error);
2888		return error;
2889	}
2890	fw = &sc->sc_fw.fw_sects[ucode_type];
2891	sc->cur_ucode = ucode_type;
2892	sc->ucode_loaded = FALSE;
2893
2894	memset(&alive_data, 0, sizeof(alive_data));
2895	iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2896				   alive_cmd, nitems(alive_cmd),
2897				   iwm_alive_fn, &alive_data);
2898
2899	error = iwm_start_fw(sc, fw);
2900	if (error) {
2901		device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2902		sc->cur_ucode = old_type;
2903		iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2904		return error;
2905	}
2906
2907	/*
2908	 * Some things may run in the background now, but we
2909	 * just wait for the ALIVE notification here.
2910	 */
2911	IWM_UNLOCK(sc);
2912	error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2913				      IWM_MVM_UCODE_ALIVE_TIMEOUT);
2914	IWM_LOCK(sc);
2915	if (error) {
2916		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2917			device_printf(sc->sc_dev,
2918			    "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2919			    iwm_read_prph(sc, IWM_SB_CPU_1_STATUS),
2920			    iwm_read_prph(sc, IWM_SB_CPU_2_STATUS));
2921		}
2922		sc->cur_ucode = old_type;
2923		return error;
2924	}
2925
2926	if (!alive_data.valid) {
2927		device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2928		    __func__);
2929		sc->cur_ucode = old_type;
2930		return EIO;
2931	}
2932
2933	iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2934
2935	/*
2936	 * configure and operate fw paging mechanism.
2937	 * driver configures the paging flow only once, CPU2 paging image
2938	 * included in the IWM_UCODE_INIT image.
2939	 */
2940	if (fw->paging_mem_size) {
2941		/* XXX implement FW paging */
2942		device_printf(sc->sc_dev,
2943		    "%s: XXX FW paging not implemented yet\n", __func__);
2944	}
2945
2946	if (!error)
2947		sc->ucode_loaded = TRUE;
2948	return error;
2949}
2950
2951/*
2952 * mvm misc bits
2953 */
2954
2955/*
2956 * follows iwlwifi/fw.c
2957 */
2958static int
2959iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2960{
2961	struct iwm_notification_wait calib_wait;
2962	static const uint16_t init_complete[] = {
2963		IWM_INIT_COMPLETE_NOTIF,
2964		IWM_CALIB_RES_NOTIF_PHY_DB
2965	};
2966	int ret;
2967
2968	/* do not operate with rfkill switch turned on */
2969	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2970		device_printf(sc->sc_dev,
2971		    "radio is disabled by hardware switch\n");
2972		return EPERM;
2973	}
2974
2975	iwm_init_notification_wait(sc->sc_notif_wait,
2976				   &calib_wait,
2977				   init_complete,
2978				   nitems(init_complete),
2979				   iwm_wait_phy_db_entry,
2980				   sc->sc_phy_db);
2981
2982	/* Will also start the device */
2983	ret = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
2984	if (ret) {
2985		device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
2986		    ret);
2987		goto error;
2988	}
2989
2990	if (justnvm) {
2991		/* Read nvm */
2992		ret = iwm_nvm_init(sc);
2993		if (ret) {
2994			device_printf(sc->sc_dev, "failed to read nvm\n");
2995			goto error;
2996		}
2997		IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
2998		goto error;
2999	}
3000
3001	ret = iwm_send_bt_init_conf(sc);
3002	if (ret) {
3003		device_printf(sc->sc_dev,
3004		    "failed to send bt coex configuration: %d\n", ret);
3005		goto error;
3006	}
3007
3008	/* Init Smart FIFO. */
3009	ret = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF);
3010	if (ret)
3011		goto error;
3012
3013	/* Send TX valid antennas before triggering calibrations */
3014	ret = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
3015	if (ret) {
3016		device_printf(sc->sc_dev,
3017		    "failed to send antennas before calibration: %d\n", ret);
3018		goto error;
3019	}
3020
3021	/*
3022	 * Send phy configurations command to init uCode
3023	 * to start the 16.0 uCode init image internal calibrations.
3024	 */
3025	ret = iwm_send_phy_cfg_cmd(sc);
3026	if (ret) {
3027		device_printf(sc->sc_dev,
3028		    "%s: Failed to run INIT calibrations: %d\n",
3029		    __func__, ret);
3030		goto error;
3031	}
3032
3033	/*
3034	 * Nothing to do but wait for the init complete notification
3035	 * from the firmware.
3036	 */
3037	IWM_UNLOCK(sc);
3038	ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
3039	    IWM_MVM_UCODE_CALIB_TIMEOUT);
3040	IWM_LOCK(sc);
3041
3042
3043	goto out;
3044
3045error:
3046	iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
3047out:
3048	return ret;
3049}
3050
3051/*
3052 * receive side
3053 */
3054
3055/* (re)stock rx ring, called at init-time and at runtime */
3056static int
3057iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3058{
3059	struct iwm_rx_ring *ring = &sc->rxq;
3060	struct iwm_rx_data *data = &ring->data[idx];
3061	struct mbuf *m;
3062	bus_dmamap_t dmamap = NULL;
3063	bus_dma_segment_t seg;
3064	int nsegs, error;
3065
3066	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3067	if (m == NULL)
3068		return ENOBUFS;
3069
3070	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3071	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3072	    &seg, &nsegs, BUS_DMA_NOWAIT);
3073	if (error != 0) {
3074		device_printf(sc->sc_dev,
3075		    "%s: can't map mbuf, error %d\n", __func__, error);
3076		goto fail;
3077	}
3078
3079	if (data->m != NULL)
3080		bus_dmamap_unload(ring->data_dmat, data->map);
3081
3082	/* Swap ring->spare_map with data->map */
3083	dmamap = data->map;
3084	data->map = ring->spare_map;
3085	ring->spare_map = dmamap;
3086
3087	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3088	data->m = m;
3089
3090	/* Update RX descriptor. */
3091	KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
3092	ring->desc[idx] = htole32(seg.ds_addr >> 8);
3093	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3094	    BUS_DMASYNC_PREWRITE);
3095
3096	return 0;
3097fail:
3098	m_freem(m);
3099	return error;
3100}
3101
3102/* iwlwifi: mvm/rx.c */
3103#define IWM_RSSI_OFFSET 50
3104static int
3105iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3106{
3107	int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
3108	uint32_t agc_a, agc_b;
3109	uint32_t val;
3110
3111	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
3112	agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
3113	agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
3114
3115	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
3116	rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
3117	rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
3118
3119	/*
3120	 * dBm = rssi dB - agc dB - constant.
3121	 * Higher AGC (higher radio gain) means lower signal.
3122	 */
3123	rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
3124	rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
3125	max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
3126
3127	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3128	    "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
3129	    rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
3130
3131	return max_rssi_dbm;
3132}
3133
3134/* iwlwifi: mvm/rx.c */
3135/*
3136 * iwm_mvm_get_signal_strength - use new rx PHY INFO API
3137 * values are reported by the fw as positive values - need to negate
3138 * to obtain their dBM.  Account for missing antennas by replacing 0
3139 * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3140 */
3141static int
3142iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3143{
3144	int energy_a, energy_b, energy_c, max_energy;
3145	uint32_t val;
3146
3147	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3148	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3149	    IWM_RX_INFO_ENERGY_ANT_A_POS;
3150	energy_a = energy_a ? -energy_a : -256;
3151	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3152	    IWM_RX_INFO_ENERGY_ANT_B_POS;
3153	energy_b = energy_b ? -energy_b : -256;
3154	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3155	    IWM_RX_INFO_ENERGY_ANT_C_POS;
3156	energy_c = energy_c ? -energy_c : -256;
3157	max_energy = MAX(energy_a, energy_b);
3158	max_energy = MAX(max_energy, energy_c);
3159
3160	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3161	    "energy In A %d B %d C %d , and max %d\n",
3162	    energy_a, energy_b, energy_c, max_energy);
3163
3164	return max_energy;
3165}
3166
3167static void
3168iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
3169	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3170{
3171	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3172
3173	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3174	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3175
3176	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3177}
3178
3179/*
3180 * Retrieve the average noise (in dBm) among receivers.
3181 */
3182static int
3183iwm_get_noise(struct iwm_softc *sc,
3184    const struct iwm_mvm_statistics_rx_non_phy *stats)
3185{
3186	int i, total, nbant, noise;
3187
3188	total = nbant = noise = 0;
3189	for (i = 0; i < 3; i++) {
3190		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3191		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3192		    __func__,
3193		    i,
3194		    noise);
3195
3196		if (noise) {
3197			total += noise;
3198			nbant++;
3199		}
3200	}
3201
3202	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3203	    __func__, nbant, total);
3204#if 0
3205	/* There should be at least one antenna but check anyway. */
3206	return (nbant == 0) ? -127 : (total / nbant) - 107;
3207#else
3208	/* For now, just hard-code it to -96 to be safe */
3209	return (-96);
3210#endif
3211}
3212
3213/*
3214 * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3215 *
3216 * Handles the actual data of the Rx packet from the fw
3217 */
3218static void
3219iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
3220	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3221{
3222	struct ieee80211com *ic = &sc->sc_ic;
3223	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3224	struct ieee80211_frame *wh;
3225	struct ieee80211_node *ni;
3226	struct ieee80211_rx_stats rxs;
3227	struct mbuf *m;
3228	struct iwm_rx_phy_info *phy_info;
3229	struct iwm_rx_mpdu_res_start *rx_res;
3230	uint32_t len;
3231	uint32_t rx_pkt_status;
3232	int rssi;
3233
3234	bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3235
3236	phy_info = &sc->sc_last_phy_info;
3237	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3238	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3239	len = le16toh(rx_res->byte_count);
3240	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3241
3242	m = data->m;
3243	m->m_data = pkt->data + sizeof(*rx_res);
3244	m->m_pkthdr.len = m->m_len = len;
3245
3246	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3247		device_printf(sc->sc_dev,
3248		    "dsp size out of range [0,20]: %d\n",
3249		    phy_info->cfg_phy_cnt);
3250		goto fail;
3251	}
3252
3253	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3254	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3255		IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3256		    "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3257		goto fail;
3258	}
3259
3260	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
3261		rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3262	} else {
3263		rssi = iwm_mvm_calc_rssi(sc, phy_info);
3264	}
3265
3266	/* Note: RSSI is absolute (ie a -ve value) */
3267	if (rssi < IWM_MIN_DBM)
3268		rssi = IWM_MIN_DBM;
3269	else if (rssi > IWM_MAX_DBM)
3270		rssi = IWM_MAX_DBM;
3271
3272	/* Map it to relative value */
3273	rssi = rssi - sc->sc_noise;
3274
3275	/* replenish ring for the buffer we're going to feed to the sharks */
3276	if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3277		device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3278		    __func__);
3279		goto fail;
3280	}
3281
3282	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3283	    "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3284
3285	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3286
3287	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3288	    "%s: phy_info: channel=%d, flags=0x%08x\n",
3289	    __func__,
3290	    le16toh(phy_info->channel),
3291	    le16toh(phy_info->phy_flags));
3292
3293	/*
3294	 * Populate an RX state struct with the provided information.
3295	 */
3296	bzero(&rxs, sizeof(rxs));
3297	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3298	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3299	rxs.c_ieee = le16toh(phy_info->channel);
3300	if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3301		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3302	} else {
3303		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3304	}
3305
3306	/* rssi is in 1/2db units */
3307	rxs.rssi = rssi * 2;
3308	rxs.nf = sc->sc_noise;
3309
3310	if (ieee80211_radiotap_active_vap(vap)) {
3311		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3312
3313		tap->wr_flags = 0;
3314		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3315			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3316		tap->wr_chan_freq = htole16(rxs.c_freq);
3317		/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3318		tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3319		tap->wr_dbm_antsignal = (int8_t)rssi;
3320		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3321		tap->wr_tsft = phy_info->system_timestamp;
3322		switch (phy_info->rate) {
3323		/* CCK rates. */
3324		case  10: tap->wr_rate =   2; break;
3325		case  20: tap->wr_rate =   4; break;
3326		case  55: tap->wr_rate =  11; break;
3327		case 110: tap->wr_rate =  22; break;
3328		/* OFDM rates. */
3329		case 0xd: tap->wr_rate =  12; break;
3330		case 0xf: tap->wr_rate =  18; break;
3331		case 0x5: tap->wr_rate =  24; break;
3332		case 0x7: tap->wr_rate =  36; break;
3333		case 0x9: tap->wr_rate =  48; break;
3334		case 0xb: tap->wr_rate =  72; break;
3335		case 0x1: tap->wr_rate =  96; break;
3336		case 0x3: tap->wr_rate = 108; break;
3337		/* Unknown rate: should not happen. */
3338		default:  tap->wr_rate =   0;
3339		}
3340	}
3341
3342	IWM_UNLOCK(sc);
3343	if (ni != NULL) {
3344		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3345		ieee80211_input_mimo(ni, m, &rxs);
3346		ieee80211_free_node(ni);
3347	} else {
3348		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3349		ieee80211_input_mimo_all(ic, m, &rxs);
3350	}
3351	IWM_LOCK(sc);
3352
3353	return;
3354
3355fail:	counter_u64_add(ic->ic_ierrors, 1);
3356}
3357
3358static int
3359iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3360	struct iwm_node *in)
3361{
3362	struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3363	struct ieee80211_node *ni = &in->in_ni;
3364	struct ieee80211vap *vap = ni->ni_vap;
3365	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3366	int failack = tx_resp->failure_frame;
3367
3368	KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3369
3370	/* Update rate control statistics. */
3371	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3372	    __func__,
3373	    (int) le16toh(tx_resp->status.status),
3374	    (int) le16toh(tx_resp->status.sequence),
3375	    tx_resp->frame_count,
3376	    tx_resp->bt_kill_count,
3377	    tx_resp->failure_rts,
3378	    tx_resp->failure_frame,
3379	    le32toh(tx_resp->initial_rate),
3380	    (int) le16toh(tx_resp->wireless_media_time));
3381
3382	if (status != IWM_TX_STATUS_SUCCESS &&
3383	    status != IWM_TX_STATUS_DIRECT_DONE) {
3384		ieee80211_ratectl_tx_complete(vap, ni,
3385		    IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
3386		return (1);
3387	} else {
3388		ieee80211_ratectl_tx_complete(vap, ni,
3389		    IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
3390		return (0);
3391	}
3392}
3393
3394static void
3395iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
3396	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3397{
3398	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3399	int idx = cmd_hdr->idx;
3400	int qid = cmd_hdr->qid;
3401	struct iwm_tx_ring *ring = &sc->txq[qid];
3402	struct iwm_tx_data *txd = &ring->data[idx];
3403	struct iwm_node *in = txd->in;
3404	struct mbuf *m = txd->m;
3405	int status;
3406
3407	KASSERT(txd->done == 0, ("txd not done"));
3408	KASSERT(txd->in != NULL, ("txd without node"));
3409	KASSERT(txd->m != NULL, ("txd without mbuf"));
3410
3411	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
3412
3413	sc->sc_tx_timer = 0;
3414
3415	status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3416
3417	/* Unmap and free mbuf. */
3418	bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3419	bus_dmamap_unload(ring->data_dmat, txd->map);
3420
3421	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3422	    "free txd %p, in %p\n", txd, txd->in);
3423	txd->done = 1;
3424	txd->m = NULL;
3425	txd->in = NULL;
3426
3427	ieee80211_tx_complete(&in->in_ni, m, status);
3428
3429	if (--ring->queued < IWM_TX_RING_LOMARK) {
3430		sc->qfullmsk &= ~(1 << ring->qid);
3431		if (sc->qfullmsk == 0) {
3432			iwm_start(sc);
3433		}
3434	}
3435}
3436
3437/*
3438 * transmit side
3439 */
3440
3441/*
3442 * Process a "command done" firmware notification.  This is where we wakeup
3443 * processes waiting for a synchronous command completion.
3444 * from if_iwn
3445 */
3446static void
3447iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3448{
3449	struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3450	struct iwm_tx_data *data;
3451
3452	if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3453		return;	/* Not a command ack. */
3454	}
3455
3456	/* XXX wide commands? */
3457	IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3458	    "cmd notification type 0x%x qid %d idx %d\n",
3459	    pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3460
3461	data = &ring->data[pkt->hdr.idx];
3462
3463	/* If the command was mapped in an mbuf, free it. */
3464	if (data->m != NULL) {
3465		bus_dmamap_sync(ring->data_dmat, data->map,
3466		    BUS_DMASYNC_POSTWRITE);
3467		bus_dmamap_unload(ring->data_dmat, data->map);
3468		m_freem(data->m);
3469		data->m = NULL;
3470	}
3471	wakeup(&ring->desc[pkt->hdr.idx]);
3472
3473	if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3474		device_printf(sc->sc_dev,
3475		    "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3476		    __func__, pkt->hdr.idx, ring->queued, ring->cur);
3477		/* XXX call iwm_force_nmi() */
3478	}
3479
3480	KASSERT(ring->queued > 0, ("ring->queued is empty?"));
3481	ring->queued--;
3482	if (ring->queued == 0)
3483		iwm_pcie_clear_cmd_in_flight(sc);
3484}
3485
3486#if 0
3487/*
3488 * necessary only for block ack mode
3489 */
3490void
3491iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3492	uint16_t len)
3493{
3494	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3495	uint16_t w_val;
3496
3497	scd_bc_tbl = sc->sched_dma.vaddr;
3498
3499	len += 8; /* magic numbers came naturally from paris */
3500	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3501		len = roundup(len, 4) / 4;
3502
3503	w_val = htole16(sta_id << 12 | len);
3504
3505	/* Update TX scheduler. */
3506	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3507	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3508	    BUS_DMASYNC_PREWRITE);
3509
3510	/* I really wonder what this is ?!? */
3511	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3512		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3513		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3514		    BUS_DMASYNC_PREWRITE);
3515	}
3516}
3517#endif
3518
3519/*
3520 * Take an 802.11 (non-n) rate, find the relevant rate
3521 * table entry.  return the index into in_ridx[].
3522 *
3523 * The caller then uses that index back into in_ridx
3524 * to figure out the rate index programmed /into/
3525 * the firmware for this given node.
3526 */
3527static int
3528iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3529    uint8_t rate)
3530{
3531	int i;
3532	uint8_t r;
3533
3534	for (i = 0; i < nitems(in->in_ridx); i++) {
3535		r = iwm_rates[in->in_ridx[i]].rate;
3536		if (rate == r)
3537			return (i);
3538	}
3539
3540	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3541	    "%s: couldn't find an entry for rate=%d\n",
3542	    __func__,
3543	    rate);
3544
3545	/* XXX Return the first */
3546	/* XXX TODO: have it return the /lowest/ */
3547	return (0);
3548}
3549
3550static int
3551iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
3552{
3553	int i;
3554
3555	for (i = 0; i < nitems(iwm_rates); i++) {
3556		if (iwm_rates[i].rate == rate)
3557			return (i);
3558	}
3559	/* XXX error? */
3560	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3561	    "%s: couldn't find an entry for rate=%d\n",
3562	    __func__,
3563	    rate);
3564	return (0);
3565}
3566
3567/*
3568 * Fill in the rate related information for a transmit command.
3569 */
3570static const struct iwm_rate *
3571iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3572	struct mbuf *m, struct iwm_tx_cmd *tx)
3573{
3574	struct ieee80211_node *ni = &in->in_ni;
3575	struct ieee80211_frame *wh;
3576	const struct ieee80211_txparam *tp = ni->ni_txparms;
3577	const struct iwm_rate *rinfo;
3578	int type;
3579	int ridx, rate_flags;
3580
3581	wh = mtod(m, struct ieee80211_frame *);
3582	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3583
3584	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3585	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3586
3587	if (type == IEEE80211_FC0_TYPE_MGT) {
3588		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3589		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3590		    "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3591	} else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3592		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3593		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3594		    "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3595	} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3596		ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3597		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3598		    "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3599	} else if (m->m_flags & M_EAPOL) {
3600		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3601		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3602		    "%s: EAPOL\n", __func__);
3603	} else if (type == IEEE80211_FC0_TYPE_DATA) {
3604		int i;
3605
3606		/* for data frames, use RS table */
3607		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3608		/* XXX pass pktlen */
3609		(void) ieee80211_ratectl_rate(ni, NULL, 0);
3610		i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3611		ridx = in->in_ridx[i];
3612
3613		/* This is the index into the programmed table */
3614		tx->initial_rate_index = i;
3615		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3616
3617		IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3618		    "%s: start with i=%d, txrate %d\n",
3619		    __func__, i, iwm_rates[ridx].rate);
3620	} else {
3621		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3622		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DEFAULT (%d)\n",
3623		    __func__, tp->mgmtrate);
3624	}
3625
3626	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3627	    "%s: frame type=%d txrate %d\n",
3628	        __func__, type, iwm_rates[ridx].rate);
3629
3630	rinfo = &iwm_rates[ridx];
3631
3632	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3633	    __func__, ridx,
3634	    rinfo->rate,
3635	    !! (IWM_RIDX_IS_CCK(ridx))
3636	    );
3637
3638	/* XXX TODO: hard-coded TX antenna? */
3639	rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3640	if (IWM_RIDX_IS_CCK(ridx))
3641		rate_flags |= IWM_RATE_MCS_CCK_MSK;
3642	tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3643
3644	return rinfo;
3645}
3646
3647#define TB0_SIZE 16
3648static int
3649iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3650{
3651	struct ieee80211com *ic = &sc->sc_ic;
3652	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3653	struct iwm_node *in = IWM_NODE(ni);
3654	struct iwm_tx_ring *ring;
3655	struct iwm_tx_data *data;
3656	struct iwm_tfd *desc;
3657	struct iwm_device_cmd *cmd;
3658	struct iwm_tx_cmd *tx;
3659	struct ieee80211_frame *wh;
3660	struct ieee80211_key *k = NULL;
3661	struct mbuf *m1;
3662	const struct iwm_rate *rinfo;
3663	uint32_t flags;
3664	u_int hdrlen;
3665	bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3666	int nsegs;
3667	uint8_t tid, type;
3668	int i, totlen, error, pad;
3669
3670	wh = mtod(m, struct ieee80211_frame *);
3671	hdrlen = ieee80211_anyhdrsize(wh);
3672	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3673	tid = 0;
3674	ring = &sc->txq[ac];
3675	desc = &ring->desc[ring->cur];
3676	memset(desc, 0, sizeof(*desc));
3677	data = &ring->data[ring->cur];
3678
3679	/* Fill out iwm_tx_cmd to send to the firmware */
3680	cmd = &ring->cmd[ring->cur];
3681	cmd->hdr.code = IWM_TX_CMD;
3682	cmd->hdr.flags = 0;
3683	cmd->hdr.qid = ring->qid;
3684	cmd->hdr.idx = ring->cur;
3685
3686	tx = (void *)cmd->data;
3687	memset(tx, 0, sizeof(*tx));
3688
3689	rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3690
3691	/* Encrypt the frame if need be. */
3692	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3693		/* Retrieve key for TX && do software encryption. */
3694		k = ieee80211_crypto_encap(ni, m);
3695		if (k == NULL) {
3696			m_freem(m);
3697			return (ENOBUFS);
3698		}
3699		/* 802.11 header may have moved. */
3700		wh = mtod(m, struct ieee80211_frame *);
3701	}
3702
3703	if (ieee80211_radiotap_active_vap(vap)) {
3704		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3705
3706		tap->wt_flags = 0;
3707		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3708		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3709		tap->wt_rate = rinfo->rate;
3710		if (k != NULL)
3711			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3712		ieee80211_radiotap_tx(vap, m);
3713	}
3714
3715
3716	totlen = m->m_pkthdr.len;
3717
3718	flags = 0;
3719	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3720		flags |= IWM_TX_CMD_FLG_ACK;
3721	}
3722
3723	if (type == IEEE80211_FC0_TYPE_DATA
3724	    && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3725	    && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3726		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3727	}
3728
3729	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3730	    type != IEEE80211_FC0_TYPE_DATA)
3731		tx->sta_id = sc->sc_aux_sta.sta_id;
3732	else
3733		tx->sta_id = IWM_STATION_ID;
3734
3735	if (type == IEEE80211_FC0_TYPE_MGT) {
3736		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3737
3738		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3739		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3740			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3741		} else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3742			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3743		} else {
3744			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3745		}
3746	} else {
3747		tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3748	}
3749
3750	if (hdrlen & 3) {
3751		/* First segment length must be a multiple of 4. */
3752		flags |= IWM_TX_CMD_FLG_MH_PAD;
3753		pad = 4 - (hdrlen & 3);
3754	} else
3755		pad = 0;
3756
3757	tx->driver_txop = 0;
3758	tx->next_frame_len = 0;
3759
3760	tx->len = htole16(totlen);
3761	tx->tid_tspec = tid;
3762	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3763
3764	/* Set physical address of "scratch area". */
3765	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3766	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3767
3768	/* Copy 802.11 header in TX command. */
3769	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3770
3771	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3772
3773	tx->sec_ctl = 0;
3774	tx->tx_flags |= htole32(flags);
3775
3776	/* Trim 802.11 header. */
3777	m_adj(m, hdrlen);
3778	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3779	    segs, &nsegs, BUS_DMA_NOWAIT);
3780	if (error != 0) {
3781		if (error != EFBIG) {
3782			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3783			    error);
3784			m_freem(m);
3785			return error;
3786		}
3787		/* Too many DMA segments, linearize mbuf. */
3788		m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3789		if (m1 == NULL) {
3790			device_printf(sc->sc_dev,
3791			    "%s: could not defrag mbuf\n", __func__);
3792			m_freem(m);
3793			return (ENOBUFS);
3794		}
3795		m = m1;
3796
3797		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3798		    segs, &nsegs, BUS_DMA_NOWAIT);
3799		if (error != 0) {
3800			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3801			    error);
3802			m_freem(m);
3803			return error;
3804		}
3805	}
3806	data->m = m;
3807	data->in = in;
3808	data->done = 0;
3809
3810	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3811	    "sending txd %p, in %p\n", data, data->in);
3812	KASSERT(data->in != NULL, ("node is NULL"));
3813
3814	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3815	    "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3816	    ring->qid, ring->cur, totlen, nsegs,
3817	    le32toh(tx->tx_flags),
3818	    le32toh(tx->rate_n_flags),
3819	    tx->initial_rate_index
3820	    );
3821
3822	/* Fill TX descriptor. */
3823	desc->num_tbs = 2 + nsegs;
3824
3825	desc->tbs[0].lo = htole32(data->cmd_paddr);
3826	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3827	    (TB0_SIZE << 4);
3828	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3829	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3830	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3831	      + hdrlen + pad - TB0_SIZE) << 4);
3832
3833	/* Other DMA segments are for data payload. */
3834	for (i = 0; i < nsegs; i++) {
3835		seg = &segs[i];
3836		desc->tbs[i+2].lo = htole32(seg->ds_addr);
3837		desc->tbs[i+2].hi_n_len = \
3838		    htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3839		    | ((seg->ds_len) << 4);
3840	}
3841
3842	bus_dmamap_sync(ring->data_dmat, data->map,
3843	    BUS_DMASYNC_PREWRITE);
3844	bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3845	    BUS_DMASYNC_PREWRITE);
3846	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3847	    BUS_DMASYNC_PREWRITE);
3848
3849#if 0
3850	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3851#endif
3852
3853	/* Kick TX ring. */
3854	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3855	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3856
3857	/* Mark TX ring as full if we reach a certain threshold. */
3858	if (++ring->queued > IWM_TX_RING_HIMARK) {
3859		sc->qfullmsk |= 1 << ring->qid;
3860	}
3861
3862	return 0;
3863}
3864
3865static int
3866iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3867    const struct ieee80211_bpf_params *params)
3868{
3869	struct ieee80211com *ic = ni->ni_ic;
3870	struct iwm_softc *sc = ic->ic_softc;
3871	int error = 0;
3872
3873	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3874	    "->%s begin\n", __func__);
3875
3876	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3877		m_freem(m);
3878		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3879		    "<-%s not RUNNING\n", __func__);
3880		return (ENETDOWN);
3881        }
3882
3883	IWM_LOCK(sc);
3884	/* XXX fix this */
3885        if (params == NULL) {
3886		error = iwm_tx(sc, m, ni, 0);
3887	} else {
3888		error = iwm_tx(sc, m, ni, 0);
3889	}
3890	sc->sc_tx_timer = 5;
3891	IWM_UNLOCK(sc);
3892
3893        return (error);
3894}
3895
3896/*
3897 * mvm/tx.c
3898 */
3899
3900/*
3901 * Note that there are transports that buffer frames before they reach
3902 * the firmware. This means that after flush_tx_path is called, the
3903 * queue might not be empty. The race-free way to handle this is to:
3904 * 1) set the station as draining
3905 * 2) flush the Tx path
3906 * 3) wait for the transport queues to be empty
3907 */
3908int
3909iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3910{
3911	int ret;
3912	struct iwm_tx_path_flush_cmd flush_cmd = {
3913		.queues_ctl = htole32(tfd_msk),
3914		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3915	};
3916
3917	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3918	    sizeof(flush_cmd), &flush_cmd);
3919	if (ret)
3920                device_printf(sc->sc_dev,
3921		    "Flushing tx queue failed: %d\n", ret);
3922	return ret;
3923}
3924
3925/*
3926 * BEGIN mvm/sta.c
3927 */
3928
3929static int
3930iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
3931	struct iwm_mvm_add_sta_cmd_v7 *cmd, int *status)
3932{
3933	return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(*cmd),
3934	    cmd, status);
3935}
3936
3937/* send station add/update command to firmware */
3938static int
3939iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
3940{
3941	struct iwm_mvm_add_sta_cmd_v7 add_sta_cmd;
3942	int ret;
3943	uint32_t status;
3944
3945	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
3946
3947	add_sta_cmd.sta_id = IWM_STATION_ID;
3948	add_sta_cmd.mac_id_n_color
3949	    = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
3950	        IWM_DEFAULT_COLOR));
3951	if (!update) {
3952		int ac;
3953		for (ac = 0; ac < WME_NUM_AC; ac++) {
3954			add_sta_cmd.tfd_queue_msk |=
3955			    htole32(1 << iwm_mvm_ac_to_tx_fifo[ac]);
3956		}
3957		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
3958	}
3959	add_sta_cmd.add_modify = update ? 1 : 0;
3960	add_sta_cmd.station_flags_msk
3961	    |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
3962	add_sta_cmd.tid_disable_tx = htole16(0xffff);
3963	if (update)
3964		add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
3965
3966	status = IWM_ADD_STA_SUCCESS;
3967	ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
3968	if (ret)
3969		return ret;
3970
3971	switch (status) {
3972	case IWM_ADD_STA_SUCCESS:
3973		break;
3974	default:
3975		ret = EIO;
3976		device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
3977		break;
3978	}
3979
3980	return ret;
3981}
3982
3983static int
3984iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
3985{
3986	return iwm_mvm_sta_send_to_fw(sc, in, 0);
3987}
3988
3989static int
3990iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
3991{
3992	return iwm_mvm_sta_send_to_fw(sc, in, 1);
3993}
3994
3995static int
3996iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
3997	const uint8_t *addr, uint16_t mac_id, uint16_t color)
3998{
3999	struct iwm_mvm_add_sta_cmd_v7 cmd;
4000	int ret;
4001	uint32_t status;
4002
4003	memset(&cmd, 0, sizeof(cmd));
4004	cmd.sta_id = sta->sta_id;
4005	cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
4006
4007	cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
4008	cmd.tid_disable_tx = htole16(0xffff);
4009
4010	if (addr)
4011		IEEE80211_ADDR_COPY(cmd.addr, addr);
4012
4013	ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
4014	if (ret)
4015		return ret;
4016
4017	switch (status) {
4018	case IWM_ADD_STA_SUCCESS:
4019		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
4020		    "%s: Internal station added.\n", __func__);
4021		return 0;
4022	default:
4023		device_printf(sc->sc_dev,
4024		    "%s: Add internal station failed, status=0x%x\n",
4025		    __func__, status);
4026		ret = EIO;
4027		break;
4028	}
4029	return ret;
4030}
4031
4032static int
4033iwm_mvm_add_aux_sta(struct iwm_softc *sc)
4034{
4035	int ret;
4036
4037	sc->sc_aux_sta.sta_id = IWM_AUX_STA_ID;
4038	sc->sc_aux_sta.tfd_queue_msk = (1 << IWM_MVM_AUX_QUEUE);
4039
4040	ret = iwm_enable_txq(sc, 0, IWM_MVM_AUX_QUEUE, IWM_MVM_TX_FIFO_MCAST);
4041	if (ret)
4042		return ret;
4043
4044	ret = iwm_mvm_add_int_sta_common(sc,
4045	    &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
4046
4047	if (ret)
4048		memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
4049	return ret;
4050}
4051
4052/*
4053 * END mvm/sta.c
4054 */
4055
4056/*
4057 * BEGIN mvm/quota.c
4058 */
4059
4060static int
4061iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
4062{
4063	struct iwm_time_quota_cmd cmd;
4064	int i, idx, ret, num_active_macs, quota, quota_rem;
4065	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
4066	int n_ifs[IWM_MAX_BINDINGS] = {0, };
4067	uint16_t id;
4068
4069	memset(&cmd, 0, sizeof(cmd));
4070
4071	/* currently, PHY ID == binding ID */
4072	if (in) {
4073		id = in->in_phyctxt->id;
4074		KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
4075		colors[id] = in->in_phyctxt->color;
4076
4077		if (1)
4078			n_ifs[id] = 1;
4079	}
4080
4081	/*
4082	 * The FW's scheduling session consists of
4083	 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
4084	 * equally between all the bindings that require quota
4085	 */
4086	num_active_macs = 0;
4087	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
4088		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
4089		num_active_macs += n_ifs[i];
4090	}
4091
4092	quota = 0;
4093	quota_rem = 0;
4094	if (num_active_macs) {
4095		quota = IWM_MVM_MAX_QUOTA / num_active_macs;
4096		quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
4097	}
4098
4099	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
4100		if (colors[i] < 0)
4101			continue;
4102
4103		cmd.quotas[idx].id_and_color =
4104			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
4105
4106		if (n_ifs[i] <= 0) {
4107			cmd.quotas[idx].quota = htole32(0);
4108			cmd.quotas[idx].max_duration = htole32(0);
4109		} else {
4110			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
4111			cmd.quotas[idx].max_duration = htole32(0);
4112		}
4113		idx++;
4114	}
4115
4116	/* Give the remainder of the session to the first binding */
4117	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
4118
4119	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
4120	    sizeof(cmd), &cmd);
4121	if (ret)
4122		device_printf(sc->sc_dev,
4123		    "%s: Failed to send quota: %d\n", __func__, ret);
4124	return ret;
4125}
4126
4127/*
4128 * END mvm/quota.c
4129 */
4130
4131/*
4132 * ieee80211 routines
4133 */
4134
4135/*
4136 * Change to AUTH state in 80211 state machine.  Roughly matches what
4137 * Linux does in bss_info_changed().
4138 */
4139static int
4140iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
4141{
4142	struct ieee80211_node *ni;
4143	struct iwm_node *in;
4144	struct iwm_vap *iv = IWM_VAP(vap);
4145	uint32_t duration;
4146	int error;
4147
4148	/*
4149	 * XXX i have a feeling that the vap node is being
4150	 * freed from underneath us. Grr.
4151	 */
4152	ni = ieee80211_ref_node(vap->iv_bss);
4153	in = IWM_NODE(ni);
4154	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
4155	    "%s: called; vap=%p, bss ni=%p\n",
4156	    __func__,
4157	    vap,
4158	    ni);
4159
4160	in->in_assoc = 0;
4161
4162	error = iwm_mvm_sf_config(sc, IWM_SF_FULL_ON);
4163	if (error != 0)
4164		return error;
4165
4166	error = iwm_allow_mcast(vap, sc);
4167	if (error) {
4168		device_printf(sc->sc_dev,
4169		    "%s: failed to set multicast\n", __func__);
4170		goto out;
4171	}
4172
4173	/*
4174	 * This is where it deviates from what Linux does.
4175	 *
4176	 * Linux iwlwifi doesn't reset the nic each time, nor does it
4177	 * call ctxt_add() here.  Instead, it adds it during vap creation,
4178	 * and always does a mac_ctx_changed().
4179	 *
4180	 * The openbsd port doesn't attempt to do that - it reset things
4181	 * at odd states and does the add here.
4182	 *
4183	 * So, until the state handling is fixed (ie, we never reset
4184	 * the NIC except for a firmware failure, which should drag
4185	 * the NIC back to IDLE, re-setup and re-add all the mac/phy
4186	 * contexts that are required), let's do a dirty hack here.
4187	 */
4188	if (iv->is_uploaded) {
4189		if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4190			device_printf(sc->sc_dev,
4191			    "%s: failed to update MAC\n", __func__);
4192			goto out;
4193		}
4194		if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4195		    in->in_ni.ni_chan, 1, 1)) != 0) {
4196			device_printf(sc->sc_dev,
4197			    "%s: failed update phy ctxt\n", __func__);
4198			goto out;
4199		}
4200		in->in_phyctxt = &sc->sc_phyctxt[0];
4201
4202		if ((error = iwm_mvm_binding_update(sc, in)) != 0) {
4203			device_printf(sc->sc_dev,
4204			    "%s: binding update cmd\n", __func__);
4205			goto out;
4206		}
4207		if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4208			device_printf(sc->sc_dev,
4209			    "%s: failed to update sta\n", __func__);
4210			goto out;
4211		}
4212	} else {
4213		if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
4214			device_printf(sc->sc_dev,
4215			    "%s: failed to add MAC\n", __func__);
4216			goto out;
4217		}
4218		if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4219		    in->in_ni.ni_chan, 1, 1)) != 0) {
4220			device_printf(sc->sc_dev,
4221			    "%s: failed add phy ctxt!\n", __func__);
4222			error = ETIMEDOUT;
4223			goto out;
4224		}
4225		in->in_phyctxt = &sc->sc_phyctxt[0];
4226
4227		if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
4228			device_printf(sc->sc_dev,
4229			    "%s: binding add cmd\n", __func__);
4230			goto out;
4231		}
4232		if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
4233			device_printf(sc->sc_dev,
4234			    "%s: failed to add sta\n", __func__);
4235			goto out;
4236		}
4237	}
4238
4239	/*
4240	 * Prevent the FW from wandering off channel during association
4241	 * by "protecting" the session with a time event.
4242	 */
4243	/* XXX duration is in units of TU, not MS */
4244	duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4245	iwm_mvm_protect_session(sc, in, duration, 500 /* XXX magic number */);
4246	DELAY(100);
4247
4248	error = 0;
4249out:
4250	ieee80211_free_node(ni);
4251	return (error);
4252}
4253
4254static int
4255iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
4256{
4257	struct iwm_node *in = IWM_NODE(vap->iv_bss);
4258	int error;
4259
4260	if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4261		device_printf(sc->sc_dev,
4262		    "%s: failed to update STA\n", __func__);
4263		return error;
4264	}
4265
4266	in->in_assoc = 1;
4267	if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4268		device_printf(sc->sc_dev,
4269		    "%s: failed to update MAC\n", __func__);
4270		return error;
4271	}
4272
4273	return 0;
4274}
4275
4276static int
4277iwm_release(struct iwm_softc *sc, struct iwm_node *in)
4278{
4279	uint32_t tfd_msk;
4280
4281	/*
4282	 * Ok, so *technically* the proper set of calls for going
4283	 * from RUN back to SCAN is:
4284	 *
4285	 * iwm_mvm_power_mac_disable(sc, in);
4286	 * iwm_mvm_mac_ctxt_changed(sc, in);
4287	 * iwm_mvm_rm_sta(sc, in);
4288	 * iwm_mvm_update_quotas(sc, NULL);
4289	 * iwm_mvm_mac_ctxt_changed(sc, in);
4290	 * iwm_mvm_binding_remove_vif(sc, in);
4291	 * iwm_mvm_mac_ctxt_remove(sc, in);
4292	 *
4293	 * However, that freezes the device not matter which permutations
4294	 * and modifications are attempted.  Obviously, this driver is missing
4295	 * something since it works in the Linux driver, but figuring out what
4296	 * is missing is a little more complicated.  Now, since we're going
4297	 * back to nothing anyway, we'll just do a complete device reset.
4298	 * Up your's, device!
4299	 */
4300	/*
4301	 * Just using 0xf for the queues mask is fine as long as we only
4302	 * get here from RUN state.
4303	 */
4304	tfd_msk = 0xf;
4305	mbufq_drain(&sc->sc_snd);
4306	iwm_mvm_flush_tx_path(sc, tfd_msk, IWM_CMD_SYNC);
4307	/*
4308	 * We seem to get away with just synchronously sending the
4309	 * IWM_TXPATH_FLUSH command.
4310	 */
4311//	iwm_trans_wait_tx_queue_empty(sc, tfd_msk);
4312	iwm_stop_device(sc);
4313	iwm_init_hw(sc);
4314	if (in)
4315		in->in_assoc = 0;
4316	return 0;
4317
4318#if 0
4319	int error;
4320
4321	iwm_mvm_power_mac_disable(sc, in);
4322
4323	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4324		device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
4325		return error;
4326	}
4327
4328	if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
4329		device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4330		return error;
4331	}
4332	error = iwm_mvm_rm_sta(sc, in);
4333	in->in_assoc = 0;
4334	iwm_mvm_update_quotas(sc, NULL);
4335	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
4336		device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4337		return error;
4338	}
4339	iwm_mvm_binding_remove_vif(sc, in);
4340
4341	iwm_mvm_mac_ctxt_remove(sc, in);
4342
4343	return error;
4344#endif
4345}
4346
4347static struct ieee80211_node *
4348iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4349{
4350	return malloc(sizeof (struct iwm_node), M_80211_NODE,
4351	    M_NOWAIT | M_ZERO);
4352}
4353
4354static void
4355iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
4356{
4357	struct ieee80211_node *ni = &in->in_ni;
4358	struct iwm_lq_cmd *lq = &in->in_lq;
4359	int nrates = ni->ni_rates.rs_nrates;
4360	int i, ridx, tab = 0;
4361//	int txant = 0;
4362
4363	if (nrates > nitems(lq->rs_table)) {
4364		device_printf(sc->sc_dev,
4365		    "%s: node supports %d rates, driver handles "
4366		    "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4367		return;
4368	}
4369	if (nrates == 0) {
4370		device_printf(sc->sc_dev,
4371		    "%s: node supports 0 rates, odd!\n", __func__);
4372		return;
4373	}
4374
4375	/*
4376	 * XXX .. and most of iwm_node is not initialised explicitly;
4377	 * it's all just 0x0 passed to the firmware.
4378	 */
4379
4380	/* first figure out which rates we should support */
4381	/* XXX TODO: this isn't 11n aware /at all/ */
4382	memset(&in->in_ridx, -1, sizeof(in->in_ridx));
4383	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4384	    "%s: nrates=%d\n", __func__, nrates);
4385
4386	/*
4387	 * Loop over nrates and populate in_ridx from the highest
4388	 * rate to the lowest rate.  Remember, in_ridx[] has
4389	 * IEEE80211_RATE_MAXSIZE entries!
4390	 */
4391	for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4392		int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4393
4394		/* Map 802.11 rate to HW rate index. */
4395		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4396			if (iwm_rates[ridx].rate == rate)
4397				break;
4398		if (ridx > IWM_RIDX_MAX) {
4399			device_printf(sc->sc_dev,
4400			    "%s: WARNING: device rate for %d not found!\n",
4401			    __func__, rate);
4402		} else {
4403			IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4404			    "%s: rate: i: %d, rate=%d, ridx=%d\n",
4405			    __func__,
4406			    i,
4407			    rate,
4408			    ridx);
4409			in->in_ridx[i] = ridx;
4410		}
4411	}
4412
4413	/* then construct a lq_cmd based on those */
4414	memset(lq, 0, sizeof(*lq));
4415	lq->sta_id = IWM_STATION_ID;
4416
4417	/* For HT, always enable RTS/CTS to avoid excessive retries. */
4418	if (ni->ni_flags & IEEE80211_NODE_HT)
4419		lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4420
4421	/*
4422	 * are these used? (we don't do SISO or MIMO)
4423	 * need to set them to non-zero, though, or we get an error.
4424	 */
4425	lq->single_stream_ant_msk = 1;
4426	lq->dual_stream_ant_msk = 1;
4427
4428	/*
4429	 * Build the actual rate selection table.
4430	 * The lowest bits are the rates.  Additionally,
4431	 * CCK needs bit 9 to be set.  The rest of the bits
4432	 * we add to the table select the tx antenna
4433	 * Note that we add the rates in the highest rate first
4434	 * (opposite of ni_rates).
4435	 */
4436	/*
4437	 * XXX TODO: this should be looping over the min of nrates
4438	 * and LQ_MAX_RETRY_NUM.  Sigh.
4439	 */
4440	for (i = 0; i < nrates; i++) {
4441		int nextant;
4442
4443#if 0
4444		if (txant == 0)
4445			txant = iwm_mvm_get_valid_tx_ant(sc);
4446		nextant = 1<<(ffs(txant)-1);
4447		txant &= ~nextant;
4448#else
4449		nextant = iwm_mvm_get_valid_tx_ant(sc);
4450#endif
4451		/*
4452		 * Map the rate id into a rate index into
4453		 * our hardware table containing the
4454		 * configuration to use for this rate.
4455		 */
4456		ridx = in->in_ridx[i];
4457		tab = iwm_rates[ridx].plcp;
4458		tab |= nextant << IWM_RATE_MCS_ANT_POS;
4459		if (IWM_RIDX_IS_CCK(ridx))
4460			tab |= IWM_RATE_MCS_CCK_MSK;
4461		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4462		    "station rate i=%d, rate=%d, hw=%x\n",
4463		    i, iwm_rates[ridx].rate, tab);
4464		lq->rs_table[i] = htole32(tab);
4465	}
4466	/* then fill the rest with the lowest possible rate */
4467	for (i = nrates; i < nitems(lq->rs_table); i++) {
4468		KASSERT(tab != 0, ("invalid tab"));
4469		lq->rs_table[i] = htole32(tab);
4470	}
4471}
4472
4473static int
4474iwm_media_change(struct ifnet *ifp)
4475{
4476	struct ieee80211vap *vap = ifp->if_softc;
4477	struct ieee80211com *ic = vap->iv_ic;
4478	struct iwm_softc *sc = ic->ic_softc;
4479	int error;
4480
4481	error = ieee80211_media_change(ifp);
4482	if (error != ENETRESET)
4483		return error;
4484
4485	IWM_LOCK(sc);
4486	if (ic->ic_nrunning > 0) {
4487		iwm_stop(sc);
4488		iwm_init(sc);
4489	}
4490	IWM_UNLOCK(sc);
4491	return error;
4492}
4493
4494
4495static int
4496iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4497{
4498	struct iwm_vap *ivp = IWM_VAP(vap);
4499	struct ieee80211com *ic = vap->iv_ic;
4500	struct iwm_softc *sc = ic->ic_softc;
4501	struct iwm_node *in;
4502	int error;
4503
4504	IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4505	    "switching state %s -> %s\n",
4506	    ieee80211_state_name[vap->iv_state],
4507	    ieee80211_state_name[nstate]);
4508	IEEE80211_UNLOCK(ic);
4509	IWM_LOCK(sc);
4510
4511	if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4512		iwm_led_blink_stop(sc);
4513
4514	/* disable beacon filtering if we're hopping out of RUN */
4515	if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4516		iwm_mvm_disable_beacon_filter(sc);
4517
4518		if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4519			in->in_assoc = 0;
4520
4521		if (nstate == IEEE80211_S_INIT) {
4522			IWM_UNLOCK(sc);
4523			IEEE80211_LOCK(ic);
4524			error = ivp->iv_newstate(vap, nstate, arg);
4525			IEEE80211_UNLOCK(ic);
4526			IWM_LOCK(sc);
4527			iwm_release(sc, NULL);
4528			IWM_UNLOCK(sc);
4529			IEEE80211_LOCK(ic);
4530			return error;
4531		}
4532
4533		/*
4534		 * It's impossible to directly go RUN->SCAN. If we iwm_release()
4535		 * above then the card will be completely reinitialized,
4536		 * so the driver must do everything necessary to bring the card
4537		 * from INIT to SCAN.
4538		 *
4539		 * Additionally, upon receiving deauth frame from AP,
4540		 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4541		 * state. This will also fail with this driver, so bring the FSM
4542		 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4543		 *
4544		 * XXX TODO: fix this for FreeBSD!
4545		 */
4546		if (nstate == IEEE80211_S_SCAN ||
4547		    nstate == IEEE80211_S_AUTH ||
4548		    nstate == IEEE80211_S_ASSOC) {
4549			IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4550			    "Force transition to INIT; MGT=%d\n", arg);
4551			IWM_UNLOCK(sc);
4552			IEEE80211_LOCK(ic);
4553			/* Always pass arg as -1 since we can't Tx right now. */
4554			/*
4555			 * XXX arg is just ignored anyway when transitioning
4556			 *     to IEEE80211_S_INIT.
4557			 */
4558			vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4559			IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4560			    "Going INIT->SCAN\n");
4561			nstate = IEEE80211_S_SCAN;
4562			IEEE80211_UNLOCK(ic);
4563			IWM_LOCK(sc);
4564		}
4565	}
4566
4567	switch (nstate) {
4568	case IEEE80211_S_INIT:
4569		break;
4570
4571	case IEEE80211_S_AUTH:
4572		if ((error = iwm_auth(vap, sc)) != 0) {
4573			device_printf(sc->sc_dev,
4574			    "%s: could not move to auth state: %d\n",
4575			    __func__, error);
4576			break;
4577		}
4578		break;
4579
4580	case IEEE80211_S_ASSOC:
4581		if ((error = iwm_assoc(vap, sc)) != 0) {
4582			device_printf(sc->sc_dev,
4583			    "%s: failed to associate: %d\n", __func__,
4584			    error);
4585			break;
4586		}
4587		break;
4588
4589	case IEEE80211_S_RUN:
4590	{
4591		struct iwm_host_cmd cmd = {
4592			.id = IWM_LQ_CMD,
4593			.len = { sizeof(in->in_lq), },
4594			.flags = IWM_CMD_SYNC,
4595		};
4596
4597		/* Update the association state, now we have it all */
4598		/* (eg associd comes in at this point */
4599		error = iwm_assoc(vap, sc);
4600		if (error != 0) {
4601			device_printf(sc->sc_dev,
4602			    "%s: failed to update association state: %d\n",
4603			    __func__,
4604			    error);
4605			break;
4606		}
4607
4608		in = IWM_NODE(vap->iv_bss);
4609		iwm_mvm_power_mac_update_mode(sc, in);
4610		iwm_mvm_enable_beacon_filter(sc, in);
4611		iwm_mvm_update_quotas(sc, in);
4612		iwm_setrates(sc, in);
4613
4614		cmd.data[0] = &in->in_lq;
4615		if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
4616			device_printf(sc->sc_dev,
4617			    "%s: IWM_LQ_CMD failed\n", __func__);
4618		}
4619
4620		iwm_mvm_led_enable(sc);
4621		break;
4622	}
4623
4624	default:
4625		break;
4626	}
4627	IWM_UNLOCK(sc);
4628	IEEE80211_LOCK(ic);
4629
4630	return (ivp->iv_newstate(vap, nstate, arg));
4631}
4632
4633void
4634iwm_endscan_cb(void *arg, int pending)
4635{
4636	struct iwm_softc *sc = arg;
4637	struct ieee80211com *ic = &sc->sc_ic;
4638
4639	IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4640	    "%s: scan ended\n",
4641	    __func__);
4642
4643	ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4644}
4645
4646/*
4647 * Aging and idle timeouts for the different possible scenarios
4648 * in default configuration
4649 */
4650static const uint32_t
4651iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4652	{
4653		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
4654		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
4655	},
4656	{
4657		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
4658		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
4659	},
4660	{
4661		htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
4662		htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
4663	},
4664	{
4665		htole32(IWM_SF_BA_AGING_TIMER_DEF),
4666		htole32(IWM_SF_BA_IDLE_TIMER_DEF)
4667	},
4668	{
4669		htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
4670		htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
4671	},
4672};
4673
4674/*
4675 * Aging and idle timeouts for the different possible scenarios
4676 * in single BSS MAC configuration.
4677 */
4678static const uint32_t
4679iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4680	{
4681		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
4682		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
4683	},
4684	{
4685		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
4686		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
4687	},
4688	{
4689		htole32(IWM_SF_MCAST_AGING_TIMER),
4690		htole32(IWM_SF_MCAST_IDLE_TIMER)
4691	},
4692	{
4693		htole32(IWM_SF_BA_AGING_TIMER),
4694		htole32(IWM_SF_BA_IDLE_TIMER)
4695	},
4696	{
4697		htole32(IWM_SF_TX_RE_AGING_TIMER),
4698		htole32(IWM_SF_TX_RE_IDLE_TIMER)
4699	},
4700};
4701
4702static void
4703iwm_mvm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
4704    struct ieee80211_node *ni)
4705{
4706	int i, j, watermark;
4707
4708	sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
4709
4710	/*
4711	 * If we are in association flow - check antenna configuration
4712	 * capabilities of the AP station, and choose the watermark accordingly.
4713	 */
4714	if (ni) {
4715		if (ni->ni_flags & IEEE80211_NODE_HT) {
4716#ifdef notyet
4717			if (ni->ni_rxmcs[2] != 0)
4718				watermark = IWM_SF_W_MARK_MIMO3;
4719			else if (ni->ni_rxmcs[1] != 0)
4720				watermark = IWM_SF_W_MARK_MIMO2;
4721			else
4722#endif
4723				watermark = IWM_SF_W_MARK_SISO;
4724		} else {
4725			watermark = IWM_SF_W_MARK_LEGACY;
4726		}
4727	/* default watermark value for unassociated mode. */
4728	} else {
4729		watermark = IWM_SF_W_MARK_MIMO2;
4730	}
4731	sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
4732
4733	for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
4734		for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
4735			sf_cmd->long_delay_timeouts[i][j] =
4736					htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
4737		}
4738	}
4739
4740	if (ni) {
4741		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
4742		       sizeof(iwm_sf_full_timeout));
4743	} else {
4744		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
4745		       sizeof(iwm_sf_full_timeout_def));
4746	}
4747}
4748
4749static int
4750iwm_mvm_sf_config(struct iwm_softc *sc, enum iwm_sf_state new_state)
4751{
4752	struct ieee80211com *ic = &sc->sc_ic;
4753	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4754	struct iwm_sf_cfg_cmd sf_cmd = {
4755		.state = htole32(IWM_SF_FULL_ON),
4756	};
4757	int ret = 0;
4758
4759	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4760		sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
4761
4762	switch (new_state) {
4763	case IWM_SF_UNINIT:
4764	case IWM_SF_INIT_OFF:
4765		iwm_mvm_fill_sf_command(sc, &sf_cmd, NULL);
4766		break;
4767	case IWM_SF_FULL_ON:
4768		iwm_mvm_fill_sf_command(sc, &sf_cmd, vap->iv_bss);
4769		break;
4770	default:
4771		IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE,
4772		    "Invalid state: %d. not sending Smart Fifo cmd\n",
4773			  new_state);
4774		return EINVAL;
4775	}
4776
4777	ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
4778				   sizeof(sf_cmd), &sf_cmd);
4779	return ret;
4780}
4781
4782static int
4783iwm_send_bt_init_conf(struct iwm_softc *sc)
4784{
4785	struct iwm_bt_coex_cmd bt_cmd;
4786
4787	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4788	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4789
4790	return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4791	    &bt_cmd);
4792}
4793
4794static int
4795iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4796{
4797	struct iwm_mcc_update_cmd mcc_cmd;
4798	struct iwm_host_cmd hcmd = {
4799		.id = IWM_MCC_UPDATE_CMD,
4800		.flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4801		.data = { &mcc_cmd },
4802	};
4803	int ret;
4804#ifdef IWM_DEBUG
4805	struct iwm_rx_packet *pkt;
4806	struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4807	struct iwm_mcc_update_resp *mcc_resp;
4808	int n_channels;
4809	uint16_t mcc;
4810#endif
4811	int resp_v2 = isset(sc->sc_enabled_capa,
4812	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4813
4814	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4815	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4816	if ((sc->sc_ucode_api & IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4817	    isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
4818		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4819	else
4820		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4821
4822	if (resp_v2)
4823		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4824	else
4825		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4826
4827	IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4828	    "send MCC update to FW with '%c%c' src = %d\n",
4829	    alpha2[0], alpha2[1], mcc_cmd.source_id);
4830
4831	ret = iwm_send_cmd(sc, &hcmd);
4832	if (ret)
4833		return ret;
4834
4835#ifdef IWM_DEBUG
4836	pkt = hcmd.resp_pkt;
4837
4838	/* Extract MCC response */
4839	if (resp_v2) {
4840		mcc_resp = (void *)pkt->data;
4841		mcc = mcc_resp->mcc;
4842		n_channels =  le32toh(mcc_resp->n_channels);
4843	} else {
4844		mcc_resp_v1 = (void *)pkt->data;
4845		mcc = mcc_resp_v1->mcc;
4846		n_channels =  le32toh(mcc_resp_v1->n_channels);
4847	}
4848
4849	/* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4850	if (mcc == 0)
4851		mcc = 0x3030;  /* "00" - world */
4852
4853	IWM_DPRINTF(sc, IWM_DEBUG_NODE,
4854	    "regulatory domain '%c%c' (%d channels available)\n",
4855	    mcc >> 8, mcc & 0xff, n_channels);
4856#endif
4857	iwm_free_resp(sc, &hcmd);
4858
4859	return 0;
4860}
4861
4862static void
4863iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4864{
4865	struct iwm_host_cmd cmd = {
4866		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4867		.len = { sizeof(uint32_t), },
4868		.data = { &backoff, },
4869	};
4870
4871	if (iwm_send_cmd(sc, &cmd) != 0) {
4872		device_printf(sc->sc_dev,
4873		    "failed to change thermal tx backoff\n");
4874	}
4875}
4876
4877static int
4878iwm_init_hw(struct iwm_softc *sc)
4879{
4880	struct ieee80211com *ic = &sc->sc_ic;
4881	int error, i, ac;
4882
4883	if ((error = iwm_start_hw(sc)) != 0) {
4884		printf("iwm_start_hw: failed %d\n", error);
4885		return error;
4886	}
4887
4888	if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4889		printf("iwm_run_init_mvm_ucode: failed %d\n", error);
4890		return error;
4891	}
4892
4893	/*
4894	 * should stop and start HW since that INIT
4895	 * image just loaded
4896	 */
4897	iwm_stop_device(sc);
4898	if ((error = iwm_start_hw(sc)) != 0) {
4899		device_printf(sc->sc_dev, "could not initialize hardware\n");
4900		return error;
4901	}
4902
4903	/* omstart, this time with the regular firmware */
4904	error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4905	if (error) {
4906		device_printf(sc->sc_dev, "could not load firmware\n");
4907		goto error;
4908	}
4909
4910	if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4911		device_printf(sc->sc_dev, "bt init conf failed\n");
4912		goto error;
4913	}
4914
4915	error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
4916	if (error != 0) {
4917		device_printf(sc->sc_dev, "antenna config failed\n");
4918		goto error;
4919	}
4920
4921	/* Send phy db control command and then phy db calibration */
4922	if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4923		goto error;
4924
4925	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4926		device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4927		goto error;
4928	}
4929
4930	/* Add auxiliary station for scanning */
4931	if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4932		device_printf(sc->sc_dev, "add_aux_sta failed\n");
4933		goto error;
4934	}
4935
4936	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4937		/*
4938		 * The channel used here isn't relevant as it's
4939		 * going to be overwritten in the other flows.
4940		 * For now use the first channel we have.
4941		 */
4942		if ((error = iwm_mvm_phy_ctxt_add(sc,
4943		    &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4944			goto error;
4945	}
4946
4947	/* Initialize tx backoffs to the minimum. */
4948	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4949		iwm_mvm_tt_tx_backoff(sc, 0);
4950
4951	error = iwm_mvm_power_update_device(sc);
4952	if (error)
4953		goto error;
4954
4955	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
4956		if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4957			goto error;
4958	}
4959
4960	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4961		if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4962			goto error;
4963	}
4964
4965	/* Enable Tx queues. */
4966	for (ac = 0; ac < WME_NUM_AC; ac++) {
4967		error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4968		    iwm_mvm_ac_to_tx_fifo[ac]);
4969		if (error)
4970			goto error;
4971	}
4972
4973	if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4974		device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4975		goto error;
4976	}
4977
4978	return 0;
4979
4980 error:
4981	iwm_stop_device(sc);
4982	return error;
4983}
4984
4985/* Allow multicast from our BSSID. */
4986static int
4987iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4988{
4989	struct ieee80211_node *ni = vap->iv_bss;
4990	struct iwm_mcast_filter_cmd *cmd;
4991	size_t size;
4992	int error;
4993
4994	size = roundup(sizeof(*cmd), 4);
4995	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4996	if (cmd == NULL)
4997		return ENOMEM;
4998	cmd->filter_own = 1;
4999	cmd->port_id = 0;
5000	cmd->count = 0;
5001	cmd->pass_all = 1;
5002	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
5003
5004	error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
5005	    IWM_CMD_SYNC, size, cmd);
5006	free(cmd, M_DEVBUF);
5007
5008	return (error);
5009}
5010
5011/*
5012 * ifnet interfaces
5013 */
5014
5015static void
5016iwm_init(struct iwm_softc *sc)
5017{
5018	int error;
5019
5020	if (sc->sc_flags & IWM_FLAG_HW_INITED) {
5021		return;
5022	}
5023	sc->sc_generation++;
5024	sc->sc_flags &= ~IWM_FLAG_STOPPED;
5025
5026	if ((error = iwm_init_hw(sc)) != 0) {
5027		printf("iwm_init_hw failed %d\n", error);
5028		iwm_stop(sc);
5029		return;
5030	}
5031
5032	/*
5033	 * Ok, firmware loaded and we are jogging
5034	 */
5035	sc->sc_flags |= IWM_FLAG_HW_INITED;
5036	callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
5037}
5038
5039static int
5040iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
5041{
5042	struct iwm_softc *sc;
5043	int error;
5044
5045	sc = ic->ic_softc;
5046
5047	IWM_LOCK(sc);
5048	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
5049		IWM_UNLOCK(sc);
5050		return (ENXIO);
5051	}
5052	error = mbufq_enqueue(&sc->sc_snd, m);
5053	if (error) {
5054		IWM_UNLOCK(sc);
5055		return (error);
5056	}
5057	iwm_start(sc);
5058	IWM_UNLOCK(sc);
5059	return (0);
5060}
5061
5062/*
5063 * Dequeue packets from sendq and call send.
5064 */
5065static void
5066iwm_start(struct iwm_softc *sc)
5067{
5068	struct ieee80211_node *ni;
5069	struct mbuf *m;
5070	int ac = 0;
5071
5072	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
5073	while (sc->qfullmsk == 0 &&
5074		(m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
5075		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
5076		if (iwm_tx(sc, m, ni, ac) != 0) {
5077			if_inc_counter(ni->ni_vap->iv_ifp,
5078			    IFCOUNTER_OERRORS, 1);
5079			ieee80211_free_node(ni);
5080			continue;
5081		}
5082		sc->sc_tx_timer = 15;
5083	}
5084	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
5085}
5086
5087static void
5088iwm_stop(struct iwm_softc *sc)
5089{
5090
5091	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
5092	sc->sc_flags |= IWM_FLAG_STOPPED;
5093	sc->sc_generation++;
5094	iwm_led_blink_stop(sc);
5095	sc->sc_tx_timer = 0;
5096	iwm_stop_device(sc);
5097	sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5098}
5099
5100static void
5101iwm_watchdog(void *arg)
5102{
5103	struct iwm_softc *sc = arg;
5104	struct ieee80211com *ic = &sc->sc_ic;
5105
5106	if (sc->sc_tx_timer > 0) {
5107		if (--sc->sc_tx_timer == 0) {
5108			device_printf(sc->sc_dev, "device timeout\n");
5109#ifdef IWM_DEBUG
5110			iwm_nic_error(sc);
5111#endif
5112			ieee80211_restart_all(ic);
5113			counter_u64_add(sc->sc_ic.ic_oerrors, 1);
5114			return;
5115		}
5116	}
5117	callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
5118}
5119
5120static void
5121iwm_parent(struct ieee80211com *ic)
5122{
5123	struct iwm_softc *sc = ic->ic_softc;
5124	int startall = 0;
5125
5126	IWM_LOCK(sc);
5127	if (ic->ic_nrunning > 0) {
5128		if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
5129			iwm_init(sc);
5130			startall = 1;
5131		}
5132	} else if (sc->sc_flags & IWM_FLAG_HW_INITED)
5133		iwm_stop(sc);
5134	IWM_UNLOCK(sc);
5135	if (startall)
5136		ieee80211_start_all(ic);
5137}
5138
5139/*
5140 * The interrupt side of things
5141 */
5142
5143/*
5144 * error dumping routines are from iwlwifi/mvm/utils.c
5145 */
5146
5147/*
5148 * Note: This structure is read from the device with IO accesses,
5149 * and the reading already does the endian conversion. As it is
5150 * read with uint32_t-sized accesses, any members with a different size
5151 * need to be ordered correctly though!
5152 */
5153struct iwm_error_event_table {
5154	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
5155	uint32_t error_id;		/* type of error */
5156	uint32_t trm_hw_status0;	/* TRM HW status */
5157	uint32_t trm_hw_status1;	/* TRM HW status */
5158	uint32_t blink2;		/* branch link */
5159	uint32_t ilink1;		/* interrupt link */
5160	uint32_t ilink2;		/* interrupt link */
5161	uint32_t data1;		/* error-specific data */
5162	uint32_t data2;		/* error-specific data */
5163	uint32_t data3;		/* error-specific data */
5164	uint32_t bcon_time;		/* beacon timer */
5165	uint32_t tsf_low;		/* network timestamp function timer */
5166	uint32_t tsf_hi;		/* network timestamp function timer */
5167	uint32_t gp1;		/* GP1 timer register */
5168	uint32_t gp2;		/* GP2 timer register */
5169	uint32_t fw_rev_type;	/* firmware revision type */
5170	uint32_t major;		/* uCode version major */
5171	uint32_t minor;		/* uCode version minor */
5172	uint32_t hw_ver;		/* HW Silicon version */
5173	uint32_t brd_ver;		/* HW board version */
5174	uint32_t log_pc;		/* log program counter */
5175	uint32_t frame_ptr;		/* frame pointer */
5176	uint32_t stack_ptr;		/* stack pointer */
5177	uint32_t hcmd;		/* last host command header */
5178	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
5179				 * rxtx_flag */
5180	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
5181				 * host_flag */
5182	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
5183				 * enc_flag */
5184	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
5185				 * time_flag */
5186	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
5187				 * wico interrupt */
5188	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
5189	uint32_t wait_event;		/* wait event() caller address */
5190	uint32_t l2p_control;	/* L2pControlField */
5191	uint32_t l2p_duration;	/* L2pDurationField */
5192	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
5193	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
5194	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
5195				 * (LMPM_PMG_SEL) */
5196	uint32_t u_timestamp;	/* indicate when the date and time of the
5197				 * compilation */
5198	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
5199} __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
5200
5201/*
5202 * UMAC error struct - relevant starting from family 8000 chip.
5203 * Note: This structure is read from the device with IO accesses,
5204 * and the reading already does the endian conversion. As it is
5205 * read with u32-sized accesses, any members with a different size
5206 * need to be ordered correctly though!
5207 */
5208struct iwm_umac_error_event_table {
5209	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
5210	uint32_t error_id;	/* type of error */
5211	uint32_t blink1;	/* branch link */
5212	uint32_t blink2;	/* branch link */
5213	uint32_t ilink1;	/* interrupt link */
5214	uint32_t ilink2;	/* interrupt link */
5215	uint32_t data1;		/* error-specific data */
5216	uint32_t data2;		/* error-specific data */
5217	uint32_t data3;		/* error-specific data */
5218	uint32_t umac_major;
5219	uint32_t umac_minor;
5220	uint32_t frame_pointer;	/* core register 27*/
5221	uint32_t stack_pointer;	/* core register 28 */
5222	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
5223	uint32_t nic_isr_pref;	/* ISR status register */
5224} __packed;
5225
5226#define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
5227#define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
5228
5229#ifdef IWM_DEBUG
5230struct {
5231	const char *name;
5232	uint8_t num;
5233} advanced_lookup[] = {
5234	{ "NMI_INTERRUPT_WDG", 0x34 },
5235	{ "SYSASSERT", 0x35 },
5236	{ "UCODE_VERSION_MISMATCH", 0x37 },
5237	{ "BAD_COMMAND", 0x38 },
5238	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5239	{ "FATAL_ERROR", 0x3D },
5240	{ "NMI_TRM_HW_ERR", 0x46 },
5241	{ "NMI_INTERRUPT_TRM", 0x4C },
5242	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5243	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5244	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5245	{ "NMI_INTERRUPT_HOST", 0x66 },
5246	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
5247	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
5248	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5249	{ "ADVANCED_SYSASSERT", 0 },
5250};
5251
5252static const char *
5253iwm_desc_lookup(uint32_t num)
5254{
5255	int i;
5256
5257	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5258		if (advanced_lookup[i].num == num)
5259			return advanced_lookup[i].name;
5260
5261	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5262	return advanced_lookup[i].name;
5263}
5264
5265static void
5266iwm_nic_umac_error(struct iwm_softc *sc)
5267{
5268	struct iwm_umac_error_event_table table;
5269	uint32_t base;
5270
5271	base = sc->umac_error_event_table;
5272
5273	if (base < 0x800000) {
5274		device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5275		    base);
5276		return;
5277	}
5278
5279	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5280		device_printf(sc->sc_dev, "reading errlog failed\n");
5281		return;
5282	}
5283
5284	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5285		device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5286		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5287		    sc->sc_flags, table.valid);
5288	}
5289
5290	device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5291		iwm_desc_lookup(table.error_id));
5292	device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5293	device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5294	device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5295	    table.ilink1);
5296	device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5297	    table.ilink2);
5298	device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5299	device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5300	device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5301	device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5302	device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5303	device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5304	    table.frame_pointer);
5305	device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5306	    table.stack_pointer);
5307	device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5308	device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5309	    table.nic_isr_pref);
5310}
5311
5312/*
5313 * Support for dumping the error log seemed like a good idea ...
5314 * but it's mostly hex junk and the only sensible thing is the
5315 * hw/ucode revision (which we know anyway).  Since it's here,
5316 * I'll just leave it in, just in case e.g. the Intel guys want to
5317 * help us decipher some "ADVANCED_SYSASSERT" later.
5318 */
5319static void
5320iwm_nic_error(struct iwm_softc *sc)
5321{
5322	struct iwm_error_event_table table;
5323	uint32_t base;
5324
5325	device_printf(sc->sc_dev, "dumping device error log\n");
5326	base = sc->error_event_table;
5327	if (base < 0x800000) {
5328		device_printf(sc->sc_dev,
5329		    "Invalid error log pointer 0x%08x\n", base);
5330		return;
5331	}
5332
5333	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5334		device_printf(sc->sc_dev, "reading errlog failed\n");
5335		return;
5336	}
5337
5338	if (!table.valid) {
5339		device_printf(sc->sc_dev, "errlog not found, skipping\n");
5340		return;
5341	}
5342
5343	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5344		device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5345		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5346		    sc->sc_flags, table.valid);
5347	}
5348
5349	device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5350	    iwm_desc_lookup(table.error_id));
5351	device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5352	    table.trm_hw_status0);
5353	device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5354	    table.trm_hw_status1);
5355	device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5356	device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5357	device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5358	device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5359	device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5360	device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5361	device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5362	device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5363	device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5364	device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5365	device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5366	device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5367	    table.fw_rev_type);
5368	device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5369	device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5370	device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5371	device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5372	device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5373	device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5374	device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5375	device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5376	device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5377	device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5378	device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5379	device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5380	device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5381	device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5382	device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5383	device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5384	device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5385	device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5386	device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5387
5388	if (sc->umac_error_event_table)
5389		iwm_nic_umac_error(sc);
5390}
5391#endif
5392
5393#define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
5394
5395/*
5396 * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5397 * Basic structure from if_iwn
5398 */
5399static void
5400iwm_notif_intr(struct iwm_softc *sc)
5401{
5402	struct ieee80211com *ic = &sc->sc_ic;
5403	uint16_t hw;
5404
5405	bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5406	    BUS_DMASYNC_POSTREAD);
5407
5408	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5409
5410	/*
5411	 * Process responses
5412	 */
5413	while (sc->rxq.cur != hw) {
5414		struct iwm_rx_ring *ring = &sc->rxq;
5415		struct iwm_rx_data *data = &ring->data[ring->cur];
5416		struct iwm_rx_packet *pkt;
5417		struct iwm_cmd_response *cresp;
5418		int qid, idx, code;
5419
5420		bus_dmamap_sync(ring->data_dmat, data->map,
5421		    BUS_DMASYNC_POSTREAD);
5422		pkt = mtod(data->m, struct iwm_rx_packet *);
5423
5424		qid = pkt->hdr.qid & ~0x80;
5425		idx = pkt->hdr.idx;
5426
5427		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5428		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5429		    "rx packet qid=%d idx=%d type=%x %d %d\n",
5430		    pkt->hdr.qid & ~0x80, pkt->hdr.idx, code, ring->cur, hw);
5431
5432		/*
5433		 * randomly get these from the firmware, no idea why.
5434		 * they at least seem harmless, so just ignore them for now
5435		 */
5436		if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
5437		    || pkt->len_n_flags == htole32(0x55550000))) {
5438			ADVANCE_RXQ(sc);
5439			continue;
5440		}
5441
5442		iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5443
5444		switch (code) {
5445		case IWM_REPLY_RX_PHY_CMD:
5446			iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
5447			break;
5448
5449		case IWM_REPLY_RX_MPDU_CMD:
5450			iwm_mvm_rx_rx_mpdu(sc, pkt, data);
5451			break;
5452
5453		case IWM_TX_CMD:
5454			iwm_mvm_rx_tx_cmd(sc, pkt, data);
5455			break;
5456
5457		case IWM_MISSED_BEACONS_NOTIFICATION: {
5458			struct iwm_missed_beacons_notif *resp;
5459			int missed;
5460
5461			/* XXX look at mac_id to determine interface ID */
5462			struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5463
5464			resp = (void *)pkt->data;
5465			missed = le32toh(resp->consec_missed_beacons);
5466
5467			IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5468			    "%s: MISSED_BEACON: mac_id=%d, "
5469			    "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5470			    "num_rx=%d\n",
5471			    __func__,
5472			    le32toh(resp->mac_id),
5473			    le32toh(resp->consec_missed_beacons_since_last_rx),
5474			    le32toh(resp->consec_missed_beacons),
5475			    le32toh(resp->num_expected_beacons),
5476			    le32toh(resp->num_recvd_beacons));
5477
5478			/* Be paranoid */
5479			if (vap == NULL)
5480				break;
5481
5482			/* XXX no net80211 locking? */
5483			if (vap->iv_state == IEEE80211_S_RUN &&
5484			    (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5485				if (missed > vap->iv_bmissthreshold) {
5486					/* XXX bad locking; turn into task */
5487					IWM_UNLOCK(sc);
5488					ieee80211_beacon_miss(ic);
5489					IWM_LOCK(sc);
5490				}
5491			}
5492
5493			break; }
5494
5495		case IWM_MFUART_LOAD_NOTIFICATION:
5496			break;
5497
5498		case IWM_MVM_ALIVE:
5499			break;
5500
5501		case IWM_CALIB_RES_NOTIF_PHY_DB:
5502			break;
5503
5504		case IWM_STATISTICS_NOTIFICATION: {
5505			struct iwm_notif_statistics *stats;
5506			stats = (void *)pkt->data;
5507			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
5508			sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
5509			break; }
5510
5511		case IWM_NVM_ACCESS_CMD:
5512		case IWM_MCC_UPDATE_CMD:
5513			if (sc->sc_wantresp == ((qid << 16) | idx)) {
5514				memcpy(sc->sc_cmd_resp,
5515				    pkt, sizeof(sc->sc_cmd_resp));
5516			}
5517			break;
5518
5519		case IWM_MCC_CHUB_UPDATE_CMD: {
5520			struct iwm_mcc_chub_notif *notif;
5521			notif = (void *)pkt->data;
5522
5523			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5524			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5525			sc->sc_fw_mcc[2] = '\0';
5526			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
5527			    "fw source %d sent CC '%s'\n",
5528			    notif->source_id, sc->sc_fw_mcc);
5529			break; }
5530
5531		case IWM_DTS_MEASUREMENT_NOTIFICATION:
5532		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5533				 IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5534			struct iwm_dts_measurement_notif_v1 *notif;
5535
5536			if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5537				device_printf(sc->sc_dev,
5538				    "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5539				break;
5540			}
5541			notif = (void *)pkt->data;
5542			IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5543			    "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5544			    notif->temp);
5545			break;
5546		}
5547
5548		case IWM_PHY_CONFIGURATION_CMD:
5549		case IWM_TX_ANT_CONFIGURATION_CMD:
5550		case IWM_ADD_STA:
5551		case IWM_MAC_CONTEXT_CMD:
5552		case IWM_REPLY_SF_CFG_CMD:
5553		case IWM_POWER_TABLE_CMD:
5554		case IWM_PHY_CONTEXT_CMD:
5555		case IWM_BINDING_CONTEXT_CMD:
5556		case IWM_TIME_EVENT_CMD:
5557		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5558		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5559		case IWM_SCAN_ABORT_UMAC:
5560		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5561		case IWM_SCAN_OFFLOAD_ABORT_CMD:
5562		case IWM_REPLY_BEACON_FILTERING_CMD:
5563		case IWM_MAC_PM_POWER_TABLE:
5564		case IWM_TIME_QUOTA_CMD:
5565		case IWM_REMOVE_STA:
5566		case IWM_TXPATH_FLUSH:
5567		case IWM_LQ_CMD:
5568		case IWM_BT_CONFIG:
5569		case IWM_REPLY_THERMAL_MNG_BACKOFF:
5570			cresp = (void *)pkt->data;
5571			if (sc->sc_wantresp == ((qid << 16) | idx)) {
5572				memcpy(sc->sc_cmd_resp,
5573				    pkt, sizeof(*pkt)+sizeof(*cresp));
5574			}
5575			break;
5576
5577		/* ignore */
5578		case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
5579			break;
5580
5581		case IWM_INIT_COMPLETE_NOTIF:
5582			break;
5583
5584		case IWM_SCAN_OFFLOAD_COMPLETE: {
5585			struct iwm_periodic_scan_complete *notif;
5586			notif = (void *)pkt->data;
5587			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5588				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5589				ieee80211_runtask(ic, &sc->sc_es_task);
5590			}
5591			break;
5592		}
5593
5594		case IWM_SCAN_ITERATION_COMPLETE: {
5595			struct iwm_lmac_scan_complete_notif *notif;
5596			notif = (void *)pkt->data;
5597			ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5598 			break;
5599		}
5600
5601		case IWM_SCAN_COMPLETE_UMAC: {
5602			struct iwm_umac_scan_complete *notif;
5603			notif = (void *)pkt->data;
5604
5605			IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
5606			    "UMAC scan complete, status=0x%x\n",
5607			    notif->status);
5608			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5609				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5610				ieee80211_runtask(ic, &sc->sc_es_task);
5611			}
5612			break;
5613		}
5614
5615		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5616			struct iwm_umac_scan_iter_complete_notif *notif;
5617			notif = (void *)pkt->data;
5618
5619			IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5620			    "complete, status=0x%x, %d channels scanned\n",
5621			    notif->status, notif->scanned_channels);
5622			ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5623			break;
5624		}
5625
5626		case IWM_REPLY_ERROR: {
5627			struct iwm_error_resp *resp;
5628			resp = (void *)pkt->data;
5629
5630			device_printf(sc->sc_dev,
5631			    "firmware error 0x%x, cmd 0x%x\n",
5632			    le32toh(resp->error_type),
5633			    resp->cmd_id);
5634			break;
5635		}
5636
5637		case IWM_TIME_EVENT_NOTIFICATION: {
5638			struct iwm_time_event_notif *notif;
5639			notif = (void *)pkt->data;
5640
5641			IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5642			    "TE notif status = 0x%x action = 0x%x\n",
5643			    notif->status, notif->action);
5644			break;
5645		}
5646
5647		case IWM_MCAST_FILTER_CMD:
5648			break;
5649
5650		case IWM_SCD_QUEUE_CFG: {
5651			struct iwm_scd_txq_cfg_rsp *rsp;
5652			rsp = (void *)pkt->data;
5653
5654			IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5655			    "queue cfg token=0x%x sta_id=%d "
5656			    "tid=%d scd_queue=%d\n",
5657			    rsp->token, rsp->sta_id, rsp->tid,
5658			    rsp->scd_queue);
5659			break;
5660		}
5661
5662		default:
5663			device_printf(sc->sc_dev,
5664			    "frame %d/%d %x UNHANDLED (this should "
5665			    "not happen)\n", qid, idx,
5666			    pkt->len_n_flags);
5667			break;
5668		}
5669
5670		/*
5671		 * Why test bit 0x80?  The Linux driver:
5672		 *
5673		 * There is one exception:  uCode sets bit 15 when it
5674		 * originates the response/notification, i.e. when the
5675		 * response/notification is not a direct response to a
5676		 * command sent by the driver.  For example, uCode issues
5677		 * IWM_REPLY_RX when it sends a received frame to the driver;
5678		 * it is not a direct response to any driver command.
5679		 *
5680		 * Ok, so since when is 7 == 15?  Well, the Linux driver
5681		 * uses a slightly different format for pkt->hdr, and "qid"
5682		 * is actually the upper byte of a two-byte field.
5683		 */
5684		if (!(pkt->hdr.qid & (1 << 7))) {
5685			iwm_cmd_done(sc, pkt);
5686		}
5687
5688		ADVANCE_RXQ(sc);
5689	}
5690
5691	/*
5692	 * Tell the firmware what we have processed.
5693	 * Seems like the hardware gets upset unless we align
5694	 * the write by 8??
5695	 */
5696	hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5697	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
5698}
5699
5700static void
5701iwm_intr(void *arg)
5702{
5703	struct iwm_softc *sc = arg;
5704	int handled = 0;
5705	int r1, r2, rv = 0;
5706	int isperiodic = 0;
5707
5708	IWM_LOCK(sc);
5709	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5710
5711	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5712		uint32_t *ict = sc->ict_dma.vaddr;
5713		int tmp;
5714
5715		tmp = htole32(ict[sc->ict_cur]);
5716		if (!tmp)
5717			goto out_ena;
5718
5719		/*
5720		 * ok, there was something.  keep plowing until we have all.
5721		 */
5722		r1 = r2 = 0;
5723		while (tmp) {
5724			r1 |= tmp;
5725			ict[sc->ict_cur] = 0;
5726			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5727			tmp = htole32(ict[sc->ict_cur]);
5728		}
5729
5730		/* this is where the fun begins.  don't ask */
5731		if (r1 == 0xffffffff)
5732			r1 = 0;
5733
5734		/* i am not expected to understand this */
5735		if (r1 & 0xc0000)
5736			r1 |= 0x8000;
5737		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5738	} else {
5739		r1 = IWM_READ(sc, IWM_CSR_INT);
5740		/* "hardware gone" (where, fishing?) */
5741		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5742			goto out;
5743		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5744	}
5745	if (r1 == 0 && r2 == 0) {
5746		goto out_ena;
5747	}
5748
5749	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5750
5751	/* Safely ignore these bits for debug checks below */
5752	r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5753
5754	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5755		int i;
5756		struct ieee80211com *ic = &sc->sc_ic;
5757		struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5758
5759#ifdef IWM_DEBUG
5760		iwm_nic_error(sc);
5761#endif
5762		/* Dump driver status (TX and RX rings) while we're here. */
5763		device_printf(sc->sc_dev, "driver status:\n");
5764		for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5765			struct iwm_tx_ring *ring = &sc->txq[i];
5766			device_printf(sc->sc_dev,
5767			    "  tx ring %2d: qid=%-2d cur=%-3d "
5768			    "queued=%-3d\n",
5769			    i, ring->qid, ring->cur, ring->queued);
5770		}
5771		device_printf(sc->sc_dev,
5772		    "  rx ring: cur=%d\n", sc->rxq.cur);
5773		device_printf(sc->sc_dev,
5774		    "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5775
5776		/* Don't stop the device; just do a VAP restart */
5777		IWM_UNLOCK(sc);
5778
5779		if (vap == NULL) {
5780			printf("%s: null vap\n", __func__);
5781			return;
5782		}
5783
5784		device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5785		    "restarting\n", __func__, vap->iv_state);
5786
5787		/* XXX TODO: turn this into a callout/taskqueue */
5788		ieee80211_restart_all(ic);
5789		return;
5790	}
5791
5792	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5793		handled |= IWM_CSR_INT_BIT_HW_ERR;
5794		device_printf(sc->sc_dev, "hardware error, stopping device\n");
5795		iwm_stop(sc);
5796		rv = 1;
5797		goto out;
5798	}
5799
5800	/* firmware chunk loaded */
5801	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5802		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5803		handled |= IWM_CSR_INT_BIT_FH_TX;
5804		sc->sc_fw_chunk_done = 1;
5805		wakeup(&sc->sc_fw);
5806	}
5807
5808	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5809		handled |= IWM_CSR_INT_BIT_RF_KILL;
5810		if (iwm_check_rfkill(sc)) {
5811			device_printf(sc->sc_dev,
5812			    "%s: rfkill switch, disabling interface\n",
5813			    __func__);
5814			iwm_stop(sc);
5815		}
5816	}
5817
5818	/*
5819	 * The Linux driver uses periodic interrupts to avoid races.
5820	 * We cargo-cult like it's going out of fashion.
5821	 */
5822	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5823		handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5824		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5825		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5826			IWM_WRITE_1(sc,
5827			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5828		isperiodic = 1;
5829	}
5830
5831	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5832		handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5833		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5834
5835		iwm_notif_intr(sc);
5836
5837		/* enable periodic interrupt, see above */
5838		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5839			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5840			    IWM_CSR_INT_PERIODIC_ENA);
5841	}
5842
5843	if (__predict_false(r1 & ~handled))
5844		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5845		    "%s: unhandled interrupts: %x\n", __func__, r1);
5846	rv = 1;
5847
5848 out_ena:
5849	iwm_restore_interrupts(sc);
5850 out:
5851	IWM_UNLOCK(sc);
5852	return;
5853}
5854
5855/*
5856 * Autoconf glue-sniffing
5857 */
5858#define	PCI_VENDOR_INTEL		0x8086
5859#define	PCI_PRODUCT_INTEL_WL_3160_1	0x08b3
5860#define	PCI_PRODUCT_INTEL_WL_3160_2	0x08b4
5861#define	PCI_PRODUCT_INTEL_WL_3165_1	0x3165
5862#define	PCI_PRODUCT_INTEL_WL_3165_2	0x3166
5863#define	PCI_PRODUCT_INTEL_WL_7260_1	0x08b1
5864#define	PCI_PRODUCT_INTEL_WL_7260_2	0x08b2
5865#define	PCI_PRODUCT_INTEL_WL_7265_1	0x095a
5866#define	PCI_PRODUCT_INTEL_WL_7265_2	0x095b
5867#define	PCI_PRODUCT_INTEL_WL_8260_1	0x24f3
5868#define	PCI_PRODUCT_INTEL_WL_8260_2	0x24f4
5869
5870static const struct iwm_devices {
5871	uint16_t		device;
5872	const struct iwm_cfg	*cfg;
5873} iwm_devices[] = {
5874	{ PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5875	{ PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5876	{ PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5877	{ PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5878	{ PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5879	{ PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5880	{ PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5881	{ PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5882	{ PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5883	{ PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5884};
5885
5886static int
5887iwm_probe(device_t dev)
5888{
5889	int i;
5890
5891	for (i = 0; i < nitems(iwm_devices); i++) {
5892		if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5893		    pci_get_device(dev) == iwm_devices[i].device) {
5894			device_set_desc(dev, iwm_devices[i].cfg->name);
5895			return (BUS_PROBE_DEFAULT);
5896		}
5897	}
5898
5899	return (ENXIO);
5900}
5901
5902static int
5903iwm_dev_check(device_t dev)
5904{
5905	struct iwm_softc *sc;
5906	uint16_t devid;
5907	int i;
5908
5909	sc = device_get_softc(dev);
5910
5911	devid = pci_get_device(dev);
5912	for (i = 0; i < nitems(iwm_devices); i++) {
5913		if (iwm_devices[i].device == devid) {
5914			sc->cfg = iwm_devices[i].cfg;
5915			return (0);
5916		}
5917	}
5918	device_printf(dev, "unknown adapter type\n");
5919	return ENXIO;
5920}
5921
5922/* PCI registers */
5923#define PCI_CFG_RETRY_TIMEOUT	0x041
5924
5925static int
5926iwm_pci_attach(device_t dev)
5927{
5928	struct iwm_softc *sc;
5929	int count, error, rid;
5930	uint16_t reg;
5931
5932	sc = device_get_softc(dev);
5933
5934	/* We disable the RETRY_TIMEOUT register (0x41) to keep
5935	 * PCI Tx retries from interfering with C3 CPU state */
5936	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5937
5938	/* Enable bus-mastering and hardware bug workaround. */
5939	pci_enable_busmaster(dev);
5940	reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5941	/* if !MSI */
5942	if (reg & PCIM_STATUS_INTxSTATE) {
5943		reg &= ~PCIM_STATUS_INTxSTATE;
5944	}
5945	pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5946
5947	rid = PCIR_BAR(0);
5948	sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5949	    RF_ACTIVE);
5950	if (sc->sc_mem == NULL) {
5951		device_printf(sc->sc_dev, "can't map mem space\n");
5952		return (ENXIO);
5953	}
5954	sc->sc_st = rman_get_bustag(sc->sc_mem);
5955	sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5956
5957	/* Install interrupt handler. */
5958	count = 1;
5959	rid = 0;
5960	if (pci_alloc_msi(dev, &count) == 0)
5961		rid = 1;
5962	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5963	    (rid != 0 ? 0 : RF_SHAREABLE));
5964	if (sc->sc_irq == NULL) {
5965		device_printf(dev, "can't map interrupt\n");
5966			return (ENXIO);
5967	}
5968	error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5969	    NULL, iwm_intr, sc, &sc->sc_ih);
5970	if (sc->sc_ih == NULL) {
5971		device_printf(dev, "can't establish interrupt");
5972			return (ENXIO);
5973	}
5974	sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5975
5976	return (0);
5977}
5978
5979static void
5980iwm_pci_detach(device_t dev)
5981{
5982	struct iwm_softc *sc = device_get_softc(dev);
5983
5984	if (sc->sc_irq != NULL) {
5985		bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5986		bus_release_resource(dev, SYS_RES_IRQ,
5987		    rman_get_rid(sc->sc_irq), sc->sc_irq);
5988		pci_release_msi(dev);
5989        }
5990	if (sc->sc_mem != NULL)
5991		bus_release_resource(dev, SYS_RES_MEMORY,
5992		    rman_get_rid(sc->sc_mem), sc->sc_mem);
5993}
5994
5995
5996
5997static int
5998iwm_attach(device_t dev)
5999{
6000	struct iwm_softc *sc = device_get_softc(dev);
6001	struct ieee80211com *ic = &sc->sc_ic;
6002	int error;
6003	int txq_i, i;
6004
6005	sc->sc_dev = dev;
6006	sc->sc_attached = 1;
6007	IWM_LOCK_INIT(sc);
6008	mbufq_init(&sc->sc_snd, ifqmaxlen);
6009	callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
6010	callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
6011	TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
6012
6013	sc->sc_notif_wait = iwm_notification_wait_init(sc);
6014	if (sc->sc_notif_wait == NULL) {
6015		device_printf(dev, "failed to init notification wait struct\n");
6016		goto fail;
6017	}
6018
6019	/* Init phy db */
6020	sc->sc_phy_db = iwm_phy_db_init(sc);
6021	if (!sc->sc_phy_db) {
6022		device_printf(dev, "Cannot init phy_db\n");
6023		goto fail;
6024	}
6025
6026	/* PCI attach */
6027	error = iwm_pci_attach(dev);
6028	if (error != 0)
6029		goto fail;
6030
6031	sc->sc_wantresp = -1;
6032
6033	/* Check device type */
6034	error = iwm_dev_check(dev);
6035	if (error != 0)
6036		goto fail;
6037
6038	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
6039	/*
6040	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
6041	 * changed, and now the revision step also includes bit 0-1 (no more
6042	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
6043	 * in the old format.
6044	 */
6045	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
6046		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
6047				(IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
6048
6049	if (iwm_prepare_card_hw(sc) != 0) {
6050		device_printf(dev, "could not initialize hardware\n");
6051		goto fail;
6052	}
6053
6054	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
6055		int ret;
6056		uint32_t hw_step;
6057
6058		/*
6059		 * In order to recognize C step the driver should read the
6060		 * chip version id located at the AUX bus MISC address.
6061		 */
6062		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
6063			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
6064		DELAY(2);
6065
6066		ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
6067				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6068				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6069				   25000);
6070		if (!ret) {
6071			device_printf(sc->sc_dev,
6072			    "Failed to wake up the nic\n");
6073			goto fail;
6074		}
6075
6076		if (iwm_nic_lock(sc)) {
6077			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
6078			hw_step |= IWM_ENABLE_WFPM;
6079			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
6080			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
6081			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
6082			if (hw_step == 0x3)
6083				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
6084						(IWM_SILICON_C_STEP << 2);
6085			iwm_nic_unlock(sc);
6086		} else {
6087			device_printf(sc->sc_dev, "Failed to lock the nic\n");
6088			goto fail;
6089		}
6090	}
6091
6092	/* special-case 7265D, it has the same PCI IDs. */
6093	if (sc->cfg == &iwm7265_cfg &&
6094	    (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
6095		sc->cfg = &iwm7265d_cfg;
6096	}
6097
6098	/* Allocate DMA memory for firmware transfers. */
6099	if ((error = iwm_alloc_fwmem(sc)) != 0) {
6100		device_printf(dev, "could not allocate memory for firmware\n");
6101		goto fail;
6102	}
6103
6104	/* Allocate "Keep Warm" page. */
6105	if ((error = iwm_alloc_kw(sc)) != 0) {
6106		device_printf(dev, "could not allocate keep warm page\n");
6107		goto fail;
6108	}
6109
6110	/* We use ICT interrupts */
6111	if ((error = iwm_alloc_ict(sc)) != 0) {
6112		device_printf(dev, "could not allocate ICT table\n");
6113		goto fail;
6114	}
6115
6116	/* Allocate TX scheduler "rings". */
6117	if ((error = iwm_alloc_sched(sc)) != 0) {
6118		device_printf(dev, "could not allocate TX scheduler rings\n");
6119		goto fail;
6120	}
6121
6122	/* Allocate TX rings */
6123	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
6124		if ((error = iwm_alloc_tx_ring(sc,
6125		    &sc->txq[txq_i], txq_i)) != 0) {
6126			device_printf(dev,
6127			    "could not allocate TX ring %d\n",
6128			    txq_i);
6129			goto fail;
6130		}
6131	}
6132
6133	/* Allocate RX ring. */
6134	if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6135		device_printf(dev, "could not allocate RX ring\n");
6136		goto fail;
6137	}
6138
6139	/* Clear pending interrupts. */
6140	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6141
6142	ic->ic_softc = sc;
6143	ic->ic_name = device_get_nameunit(sc->sc_dev);
6144	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
6145	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
6146
6147	/* Set device capabilities. */
6148	ic->ic_caps =
6149	    IEEE80211_C_STA |
6150	    IEEE80211_C_WPA |		/* WPA/RSN */
6151	    IEEE80211_C_WME |
6152	    IEEE80211_C_SHSLOT |	/* short slot time supported */
6153	    IEEE80211_C_SHPREAMBLE	/* short preamble supported */
6154//	    IEEE80211_C_BGSCAN		/* capable of bg scanning */
6155	    ;
6156	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6157		sc->sc_phyctxt[i].id = i;
6158		sc->sc_phyctxt[i].color = 0;
6159		sc->sc_phyctxt[i].ref = 0;
6160		sc->sc_phyctxt[i].channel = NULL;
6161	}
6162
6163	/* Default noise floor */
6164	sc->sc_noise = -96;
6165
6166	/* Max RSSI */
6167	sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6168
6169	sc->sc_preinit_hook.ich_func = iwm_preinit;
6170	sc->sc_preinit_hook.ich_arg = sc;
6171	if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6172		device_printf(dev, "config_intrhook_establish failed\n");
6173		goto fail;
6174	}
6175
6176#ifdef IWM_DEBUG
6177	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6178	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6179	    CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6180#endif
6181
6182	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6183	    "<-%s\n", __func__);
6184
6185	return 0;
6186
6187	/* Free allocated memory if something failed during attachment. */
6188fail:
6189	iwm_detach_local(sc, 0);
6190
6191	return ENXIO;
6192}
6193
6194static int
6195iwm_is_valid_ether_addr(uint8_t *addr)
6196{
6197	char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6198
6199	if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6200		return (FALSE);
6201
6202	return (TRUE);
6203}
6204
6205static int
6206iwm_update_edca(struct ieee80211com *ic)
6207{
6208	struct iwm_softc *sc = ic->ic_softc;
6209
6210	device_printf(sc->sc_dev, "%s: called\n", __func__);
6211	return (0);
6212}
6213
6214static void
6215iwm_preinit(void *arg)
6216{
6217	struct iwm_softc *sc = arg;
6218	device_t dev = sc->sc_dev;
6219	struct ieee80211com *ic = &sc->sc_ic;
6220	int error;
6221
6222	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6223	    "->%s\n", __func__);
6224
6225	IWM_LOCK(sc);
6226	if ((error = iwm_start_hw(sc)) != 0) {
6227		device_printf(dev, "could not initialize hardware\n");
6228		IWM_UNLOCK(sc);
6229		goto fail;
6230	}
6231
6232	error = iwm_run_init_mvm_ucode(sc, 1);
6233	iwm_stop_device(sc);
6234	if (error) {
6235		IWM_UNLOCK(sc);
6236		goto fail;
6237	}
6238	device_printf(dev,
6239	    "hw rev 0x%x, fw ver %s, address %s\n",
6240	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6241	    sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6242
6243	/* not all hardware can do 5GHz band */
6244	if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6245		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6246		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6247	IWM_UNLOCK(sc);
6248
6249	iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6250	    ic->ic_channels);
6251
6252	/*
6253	 * At this point we've committed - if we fail to do setup,
6254	 * we now also have to tear down the net80211 state.
6255	 */
6256	ieee80211_ifattach(ic);
6257	ic->ic_vap_create = iwm_vap_create;
6258	ic->ic_vap_delete = iwm_vap_delete;
6259	ic->ic_raw_xmit = iwm_raw_xmit;
6260	ic->ic_node_alloc = iwm_node_alloc;
6261	ic->ic_scan_start = iwm_scan_start;
6262	ic->ic_scan_end = iwm_scan_end;
6263	ic->ic_update_mcast = iwm_update_mcast;
6264	ic->ic_getradiocaps = iwm_init_channel_map;
6265	ic->ic_set_channel = iwm_set_channel;
6266	ic->ic_scan_curchan = iwm_scan_curchan;
6267	ic->ic_scan_mindwell = iwm_scan_mindwell;
6268	ic->ic_wme.wme_update = iwm_update_edca;
6269	ic->ic_parent = iwm_parent;
6270	ic->ic_transmit = iwm_transmit;
6271	iwm_radiotap_attach(sc);
6272	if (bootverbose)
6273		ieee80211_announce(ic);
6274
6275	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6276	    "<-%s\n", __func__);
6277	config_intrhook_disestablish(&sc->sc_preinit_hook);
6278
6279	return;
6280fail:
6281	config_intrhook_disestablish(&sc->sc_preinit_hook);
6282	iwm_detach_local(sc, 0);
6283}
6284
6285/*
6286 * Attach the interface to 802.11 radiotap.
6287 */
6288static void
6289iwm_radiotap_attach(struct iwm_softc *sc)
6290{
6291        struct ieee80211com *ic = &sc->sc_ic;
6292
6293	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6294	    "->%s begin\n", __func__);
6295        ieee80211_radiotap_attach(ic,
6296            &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6297                IWM_TX_RADIOTAP_PRESENT,
6298            &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6299                IWM_RX_RADIOTAP_PRESENT);
6300	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6301	    "->%s end\n", __func__);
6302}
6303
6304static struct ieee80211vap *
6305iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6306    enum ieee80211_opmode opmode, int flags,
6307    const uint8_t bssid[IEEE80211_ADDR_LEN],
6308    const uint8_t mac[IEEE80211_ADDR_LEN])
6309{
6310	struct iwm_vap *ivp;
6311	struct ieee80211vap *vap;
6312
6313	if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6314		return NULL;
6315	ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6316	vap = &ivp->iv_vap;
6317	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6318	vap->iv_bmissthreshold = 10;            /* override default */
6319	/* Override with driver methods. */
6320	ivp->iv_newstate = vap->iv_newstate;
6321	vap->iv_newstate = iwm_newstate;
6322
6323	ieee80211_ratectl_init(vap);
6324	/* Complete setup. */
6325	ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6326	    mac);
6327	ic->ic_opmode = opmode;
6328
6329	return vap;
6330}
6331
6332static void
6333iwm_vap_delete(struct ieee80211vap *vap)
6334{
6335	struct iwm_vap *ivp = IWM_VAP(vap);
6336
6337	ieee80211_ratectl_deinit(vap);
6338	ieee80211_vap_detach(vap);
6339	free(ivp, M_80211_VAP);
6340}
6341
6342static void
6343iwm_scan_start(struct ieee80211com *ic)
6344{
6345	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6346	struct iwm_softc *sc = ic->ic_softc;
6347	int error;
6348
6349	IWM_LOCK(sc);
6350	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6351		/* This should not be possible */
6352		device_printf(sc->sc_dev,
6353		    "%s: Previous scan not completed yet\n", __func__);
6354	}
6355	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6356		error = iwm_mvm_umac_scan(sc);
6357	else
6358		error = iwm_mvm_lmac_scan(sc);
6359	if (error != 0) {
6360		device_printf(sc->sc_dev, "could not initiate scan\n");
6361		IWM_UNLOCK(sc);
6362		ieee80211_cancel_scan(vap);
6363	} else {
6364		sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6365		iwm_led_blink_start(sc);
6366		IWM_UNLOCK(sc);
6367	}
6368}
6369
6370static void
6371iwm_scan_end(struct ieee80211com *ic)
6372{
6373	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6374	struct iwm_softc *sc = ic->ic_softc;
6375
6376	IWM_LOCK(sc);
6377	iwm_led_blink_stop(sc);
6378	if (vap->iv_state == IEEE80211_S_RUN)
6379		iwm_mvm_led_enable(sc);
6380	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6381		/*
6382		 * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6383		 * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6384		 * taskqueue.
6385		 */
6386		sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6387		iwm_mvm_scan_stop_wait(sc);
6388	}
6389	IWM_UNLOCK(sc);
6390
6391	/*
6392	 * Make sure we don't race, if sc_es_task is still enqueued here.
6393	 * This is to make sure that it won't call ieee80211_scan_done
6394	 * when we have already started the next scan.
6395	 */
6396	taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6397}
6398
6399static void
6400iwm_update_mcast(struct ieee80211com *ic)
6401{
6402}
6403
6404static void
6405iwm_set_channel(struct ieee80211com *ic)
6406{
6407}
6408
6409static void
6410iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6411{
6412}
6413
6414static void
6415iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6416{
6417	return;
6418}
6419
6420void
6421iwm_init_task(void *arg1)
6422{
6423	struct iwm_softc *sc = arg1;
6424
6425	IWM_LOCK(sc);
6426	while (sc->sc_flags & IWM_FLAG_BUSY)
6427		msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6428	sc->sc_flags |= IWM_FLAG_BUSY;
6429	iwm_stop(sc);
6430	if (sc->sc_ic.ic_nrunning > 0)
6431		iwm_init(sc);
6432	sc->sc_flags &= ~IWM_FLAG_BUSY;
6433	wakeup(&sc->sc_flags);
6434	IWM_UNLOCK(sc);
6435}
6436
6437static int
6438iwm_resume(device_t dev)
6439{
6440	struct iwm_softc *sc = device_get_softc(dev);
6441	int do_reinit = 0;
6442
6443	/*
6444	 * We disable the RETRY_TIMEOUT register (0x41) to keep
6445	 * PCI Tx retries from interfering with C3 CPU state.
6446	 */
6447	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6448	iwm_init_task(device_get_softc(dev));
6449
6450	IWM_LOCK(sc);
6451	if (sc->sc_flags & IWM_FLAG_SCANNING) {
6452		sc->sc_flags &= ~IWM_FLAG_SCANNING;
6453		do_reinit = 1;
6454	}
6455	IWM_UNLOCK(sc);
6456
6457	if (do_reinit)
6458		ieee80211_resume_all(&sc->sc_ic);
6459
6460	return 0;
6461}
6462
6463static int
6464iwm_suspend(device_t dev)
6465{
6466	int do_stop = 0;
6467	struct iwm_softc *sc = device_get_softc(dev);
6468
6469	do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6470
6471	ieee80211_suspend_all(&sc->sc_ic);
6472
6473	if (do_stop) {
6474		IWM_LOCK(sc);
6475		iwm_stop(sc);
6476		sc->sc_flags |= IWM_FLAG_SCANNING;
6477		IWM_UNLOCK(sc);
6478	}
6479
6480	return (0);
6481}
6482
6483static int
6484iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6485{
6486	struct iwm_fw_info *fw = &sc->sc_fw;
6487	device_t dev = sc->sc_dev;
6488	int i;
6489
6490	if (!sc->sc_attached)
6491		return 0;
6492	sc->sc_attached = 0;
6493
6494	if (do_net80211)
6495		ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6496
6497	callout_drain(&sc->sc_led_blink_to);
6498	callout_drain(&sc->sc_watchdog_to);
6499	iwm_stop_device(sc);
6500	if (do_net80211) {
6501		ieee80211_ifdetach(&sc->sc_ic);
6502	}
6503
6504	iwm_phy_db_free(sc->sc_phy_db);
6505	sc->sc_phy_db = NULL;
6506
6507	iwm_free_nvm_data(sc->nvm_data);
6508
6509	/* Free descriptor rings */
6510	iwm_free_rx_ring(sc, &sc->rxq);
6511	for (i = 0; i < nitems(sc->txq); i++)
6512		iwm_free_tx_ring(sc, &sc->txq[i]);
6513
6514	/* Free firmware */
6515	if (fw->fw_fp != NULL)
6516		iwm_fw_info_free(fw);
6517
6518	/* Free scheduler */
6519	iwm_dma_contig_free(&sc->sched_dma);
6520	iwm_dma_contig_free(&sc->ict_dma);
6521	iwm_dma_contig_free(&sc->kw_dma);
6522	iwm_dma_contig_free(&sc->fw_dma);
6523
6524	/* Finished with the hardware - detach things */
6525	iwm_pci_detach(dev);
6526
6527	if (sc->sc_notif_wait != NULL) {
6528		iwm_notification_wait_free(sc->sc_notif_wait);
6529		sc->sc_notif_wait = NULL;
6530	}
6531
6532	mbufq_drain(&sc->sc_snd);
6533	IWM_LOCK_DESTROY(sc);
6534
6535	return (0);
6536}
6537
6538static int
6539iwm_detach(device_t dev)
6540{
6541	struct iwm_softc *sc = device_get_softc(dev);
6542
6543	return (iwm_detach_local(sc, 1));
6544}
6545
6546static device_method_t iwm_pci_methods[] = {
6547        /* Device interface */
6548        DEVMETHOD(device_probe,         iwm_probe),
6549        DEVMETHOD(device_attach,        iwm_attach),
6550        DEVMETHOD(device_detach,        iwm_detach),
6551        DEVMETHOD(device_suspend,       iwm_suspend),
6552        DEVMETHOD(device_resume,        iwm_resume),
6553
6554        DEVMETHOD_END
6555};
6556
6557static driver_t iwm_pci_driver = {
6558        "iwm",
6559        iwm_pci_methods,
6560        sizeof (struct iwm_softc)
6561};
6562
6563static devclass_t iwm_devclass;
6564
6565DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6566MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6567MODULE_DEPEND(iwm, pci, 1, 1, 1);
6568MODULE_DEPEND(iwm, wlan, 1, 1, 1);
6569