if_iwm.c revision 330224
1/*	$OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $	*/
2
3/*
4 * Copyright (c) 2014 genua mbh <info@genua.de>
5 * Copyright (c) 2014 Fixup Software Ltd.
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20/*-
21 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22 * which were used as the reference documentation for this implementation.
23 *
24 * Driver version we are currently based off of is
25 * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26 *
27 ***********************************************************************
28 *
29 * This file is provided under a dual BSD/GPLv2 license.  When using or
30 * redistributing this file, you may do so under either license.
31 *
32 * GPL LICENSE SUMMARY
33 *
34 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35 *
36 * This program is free software; you can redistribute it and/or modify
37 * it under the terms of version 2 of the GNU General Public License as
38 * published by the Free Software Foundation.
39 *
40 * This program is distributed in the hope that it will be useful, but
41 * WITHOUT ANY WARRANTY; without even the implied warranty of
42 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43 * General Public License for more details.
44 *
45 * You should have received a copy of the GNU General Public License
46 * along with this program; if not, write to the Free Software
47 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48 * USA
49 *
50 * The full GNU General Public License is included in this distribution
51 * in the file called COPYING.
52 *
53 * Contact Information:
54 *  Intel Linux Wireless <ilw@linux.intel.com>
55 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56 *
57 *
58 * BSD LICENSE
59 *
60 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61 * All rights reserved.
62 *
63 * Redistribution and use in source and binary forms, with or without
64 * modification, are permitted provided that the following conditions
65 * are met:
66 *
67 *  * Redistributions of source code must retain the above copyright
68 *    notice, this list of conditions and the following disclaimer.
69 *  * Redistributions in binary form must reproduce the above copyright
70 *    notice, this list of conditions and the following disclaimer in
71 *    the documentation and/or other materials provided with the
72 *    distribution.
73 *  * Neither the name Intel Corporation nor the names of its
74 *    contributors may be used to endorse or promote products derived
75 *    from this software without specific prior written permission.
76 *
77 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88 */
89
90/*-
91 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92 *
93 * Permission to use, copy, modify, and distribute this software for any
94 * purpose with or without fee is hereby granted, provided that the above
95 * copyright notice and this permission notice appear in all copies.
96 *
97 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104 */
105#include <sys/cdefs.h>
106__FBSDID("$FreeBSD: stable/11/sys/dev/iwm/if_iwm.c 330224 2018-03-01 06:56:10Z eadler $");
107
108#include "opt_wlan.h"
109
110#include <sys/param.h>
111#include <sys/bus.h>
112#include <sys/conf.h>
113#include <sys/endian.h>
114#include <sys/firmware.h>
115#include <sys/kernel.h>
116#include <sys/malloc.h>
117#include <sys/mbuf.h>
118#include <sys/mutex.h>
119#include <sys/module.h>
120#include <sys/proc.h>
121#include <sys/rman.h>
122#include <sys/socket.h>
123#include <sys/sockio.h>
124#include <sys/sysctl.h>
125#include <sys/linker.h>
126
127#include <machine/bus.h>
128#include <machine/endian.h>
129#include <machine/resource.h>
130
131#include <dev/pci/pcivar.h>
132#include <dev/pci/pcireg.h>
133
134#include <net/bpf.h>
135
136#include <net/if.h>
137#include <net/if_var.h>
138#include <net/if_arp.h>
139#include <net/if_dl.h>
140#include <net/if_media.h>
141#include <net/if_types.h>
142
143#include <netinet/in.h>
144#include <netinet/in_systm.h>
145#include <netinet/if_ether.h>
146#include <netinet/ip.h>
147
148#include <net80211/ieee80211_var.h>
149#include <net80211/ieee80211_regdomain.h>
150#include <net80211/ieee80211_ratectl.h>
151#include <net80211/ieee80211_radiotap.h>
152
153#include <dev/iwm/if_iwmreg.h>
154#include <dev/iwm/if_iwmvar.h>
155#include <dev/iwm/if_iwm_config.h>
156#include <dev/iwm/if_iwm_debug.h>
157#include <dev/iwm/if_iwm_notif_wait.h>
158#include <dev/iwm/if_iwm_util.h>
159#include <dev/iwm/if_iwm_binding.h>
160#include <dev/iwm/if_iwm_phy_db.h>
161#include <dev/iwm/if_iwm_mac_ctxt.h>
162#include <dev/iwm/if_iwm_phy_ctxt.h>
163#include <dev/iwm/if_iwm_time_event.h>
164#include <dev/iwm/if_iwm_power.h>
165#include <dev/iwm/if_iwm_scan.h>
166#include <dev/iwm/if_iwm_sta.h>
167
168#include <dev/iwm/if_iwm_pcie_trans.h>
169#include <dev/iwm/if_iwm_led.h>
170#include <dev/iwm/if_iwm_fw.h>
171
172/* From DragonflyBSD */
173#define mtodoff(m, t, off)      ((t)((m)->m_data + (off)))
174
175const uint8_t iwm_nvm_channels[] = {
176	/* 2.4 GHz */
177	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
178	/* 5 GHz */
179	36, 40, 44, 48, 52, 56, 60, 64,
180	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
181	149, 153, 157, 161, 165
182};
183_Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
184    "IWM_NUM_CHANNELS is too small");
185
186const uint8_t iwm_nvm_channels_8000[] = {
187	/* 2.4 GHz */
188	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
189	/* 5 GHz */
190	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
191	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
192	149, 153, 157, 161, 165, 169, 173, 177, 181
193};
194_Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
195    "IWM_NUM_CHANNELS_8000 is too small");
196
197#define IWM_NUM_2GHZ_CHANNELS	14
198#define IWM_N_HW_ADDR_MASK	0xF
199
200/*
201 * XXX For now, there's simply a fixed set of rate table entries
202 * that are populated.
203 */
204const struct iwm_rate {
205	uint8_t rate;
206	uint8_t plcp;
207} iwm_rates[] = {
208	{   2,	IWM_RATE_1M_PLCP  },
209	{   4,	IWM_RATE_2M_PLCP  },
210	{  11,	IWM_RATE_5M_PLCP  },
211	{  22,	IWM_RATE_11M_PLCP },
212	{  12,	IWM_RATE_6M_PLCP  },
213	{  18,	IWM_RATE_9M_PLCP  },
214	{  24,	IWM_RATE_12M_PLCP },
215	{  36,	IWM_RATE_18M_PLCP },
216	{  48,	IWM_RATE_24M_PLCP },
217	{  72,	IWM_RATE_36M_PLCP },
218	{  96,	IWM_RATE_48M_PLCP },
219	{ 108,	IWM_RATE_54M_PLCP },
220};
221#define IWM_RIDX_CCK	0
222#define IWM_RIDX_OFDM	4
223#define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
224#define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
225#define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
226
227struct iwm_nvm_section {
228	uint16_t length;
229	uint8_t *data;
230};
231
232#define IWM_MVM_UCODE_ALIVE_TIMEOUT	hz
233#define IWM_MVM_UCODE_CALIB_TIMEOUT	(2*hz)
234
235struct iwm_mvm_alive_data {
236	int valid;
237	uint32_t scd_base_addr;
238};
239
240static int	iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
241static int	iwm_firmware_store_section(struct iwm_softc *,
242                                           enum iwm_ucode_type,
243                                           const uint8_t *, size_t);
244static int	iwm_set_default_calib(struct iwm_softc *, const void *);
245static void	iwm_fw_info_free(struct iwm_fw_info *);
246static int	iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
247static int	iwm_alloc_fwmem(struct iwm_softc *);
248static int	iwm_alloc_sched(struct iwm_softc *);
249static int	iwm_alloc_kw(struct iwm_softc *);
250static int	iwm_alloc_ict(struct iwm_softc *);
251static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
252static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
253static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
254static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
255                                  int);
256static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
257static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
258static void	iwm_enable_interrupts(struct iwm_softc *);
259static void	iwm_restore_interrupts(struct iwm_softc *);
260static void	iwm_disable_interrupts(struct iwm_softc *);
261static void	iwm_ict_reset(struct iwm_softc *);
262static int	iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
263static void	iwm_stop_device(struct iwm_softc *);
264static void	iwm_mvm_nic_config(struct iwm_softc *);
265static int	iwm_nic_rx_init(struct iwm_softc *);
266static int	iwm_nic_tx_init(struct iwm_softc *);
267static int	iwm_nic_init(struct iwm_softc *);
268static int	iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
269static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
270                                   uint16_t, uint8_t *, uint16_t *);
271static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
272				     uint16_t *, uint32_t);
273static uint32_t	iwm_eeprom_channel_flags(uint16_t);
274static void	iwm_add_channel_band(struct iwm_softc *,
275		    struct ieee80211_channel[], int, int *, int, size_t,
276		    const uint8_t[]);
277static void	iwm_init_channel_map(struct ieee80211com *, int, int *,
278		    struct ieee80211_channel[]);
279static struct iwm_nvm_data *
280	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
281			   const uint16_t *, const uint16_t *,
282			   const uint16_t *, const uint16_t *,
283			   const uint16_t *);
284static void	iwm_free_nvm_data(struct iwm_nvm_data *);
285static void	iwm_set_hw_address_family_8000(struct iwm_softc *,
286					       struct iwm_nvm_data *,
287					       const uint16_t *,
288					       const uint16_t *);
289static int	iwm_get_sku(const struct iwm_softc *, const uint16_t *,
290			    const uint16_t *);
291static int	iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
292static int	iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
293				  const uint16_t *);
294static int	iwm_get_n_hw_addrs(const struct iwm_softc *,
295				   const uint16_t *);
296static void	iwm_set_radio_cfg(const struct iwm_softc *,
297				  struct iwm_nvm_data *, uint32_t);
298static struct iwm_nvm_data *
299	iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
300static int	iwm_nvm_init(struct iwm_softc *);
301static int	iwm_pcie_load_section(struct iwm_softc *, uint8_t,
302				      const struct iwm_fw_desc *);
303static int	iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
304					     bus_addr_t, uint32_t);
305static int	iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
306						const struct iwm_fw_sects *,
307						int, int *);
308static int	iwm_pcie_load_cpu_sections(struct iwm_softc *,
309					   const struct iwm_fw_sects *,
310					   int, int *);
311static int	iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
312					       const struct iwm_fw_sects *);
313static int	iwm_pcie_load_given_ucode(struct iwm_softc *,
314					  const struct iwm_fw_sects *);
315static int	iwm_start_fw(struct iwm_softc *, const struct iwm_fw_sects *);
316static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
317static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
318static int	iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
319                                              enum iwm_ucode_type);
320static int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
321static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
322static int	iwm_mvm_get_signal_strength(struct iwm_softc *,
323					    struct iwm_rx_phy_info *);
324static void	iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
325                                      struct iwm_rx_packet *);
326static int	iwm_get_noise(struct iwm_softc *,
327		    const struct iwm_mvm_statistics_rx_non_phy *);
328static void	iwm_mvm_handle_rx_statistics(struct iwm_softc *,
329		    struct iwm_rx_packet *);
330static boolean_t iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct mbuf *,
331				    uint32_t, boolean_t);
332static int	iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
333                                         struct iwm_rx_packet *,
334				         struct iwm_node *);
335static void	iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *);
336static void	iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
337#if 0
338static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
339                                 uint16_t);
340#endif
341static const struct iwm_rate *
342	iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
343			struct mbuf *, struct iwm_tx_cmd *);
344static int	iwm_tx(struct iwm_softc *, struct mbuf *,
345                       struct ieee80211_node *, int);
346static int	iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
347			     const struct ieee80211_bpf_params *);
348static int	iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_vap *);
349static int	iwm_auth(struct ieee80211vap *, struct iwm_softc *);
350static int	iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
351static int	iwm_release(struct iwm_softc *, struct iwm_node *);
352static struct ieee80211_node *
353		iwm_node_alloc(struct ieee80211vap *,
354		               const uint8_t[IEEE80211_ADDR_LEN]);
355static void	iwm_setrates(struct iwm_softc *, struct iwm_node *);
356static int	iwm_media_change(struct ifnet *);
357static int	iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
358static void	iwm_endscan_cb(void *, int);
359static void	iwm_mvm_fill_sf_command(struct iwm_softc *,
360					struct iwm_sf_cfg_cmd *,
361					struct ieee80211_node *);
362static int	iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state);
363static int	iwm_send_bt_init_conf(struct iwm_softc *);
364static boolean_t iwm_mvm_is_lar_supported(struct iwm_softc *);
365static boolean_t iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *);
366static int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
367static void	iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
368static int	iwm_init_hw(struct iwm_softc *);
369static void	iwm_init(struct iwm_softc *);
370static void	iwm_start(struct iwm_softc *);
371static void	iwm_stop(struct iwm_softc *);
372static void	iwm_watchdog(void *);
373static void	iwm_parent(struct ieee80211com *);
374#ifdef IWM_DEBUG
375static const char *
376		iwm_desc_lookup(uint32_t);
377static void	iwm_nic_error(struct iwm_softc *);
378static void	iwm_nic_umac_error(struct iwm_softc *);
379#endif
380static void	iwm_handle_rxb(struct iwm_softc *, struct mbuf *);
381static void	iwm_notif_intr(struct iwm_softc *);
382static void	iwm_intr(void *);
383static int	iwm_attach(device_t);
384static int	iwm_is_valid_ether_addr(uint8_t *);
385static void	iwm_preinit(void *);
386static int	iwm_detach_local(struct iwm_softc *sc, int);
387static void	iwm_init_task(void *);
388static void	iwm_radiotap_attach(struct iwm_softc *);
389static struct ieee80211vap *
390		iwm_vap_create(struct ieee80211com *,
391		               const char [IFNAMSIZ], int,
392		               enum ieee80211_opmode, int,
393		               const uint8_t [IEEE80211_ADDR_LEN],
394		               const uint8_t [IEEE80211_ADDR_LEN]);
395static void	iwm_vap_delete(struct ieee80211vap *);
396static void	iwm_xmit_queue_drain(struct iwm_softc *);
397static void	iwm_scan_start(struct ieee80211com *);
398static void	iwm_scan_end(struct ieee80211com *);
399static void	iwm_update_mcast(struct ieee80211com *);
400static void	iwm_set_channel(struct ieee80211com *);
401static void	iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
402static void	iwm_scan_mindwell(struct ieee80211_scan_state *);
403static int	iwm_detach(device_t);
404
405static int	iwm_lar_disable = 0;
406TUNABLE_INT("hw.iwm.lar.disable", &iwm_lar_disable);
407
408/*
409 * Firmware parser.
410 */
411
412static int
413iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
414{
415	const struct iwm_fw_cscheme_list *l = (const void *)data;
416
417	if (dlen < sizeof(*l) ||
418	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
419		return EINVAL;
420
421	/* we don't actually store anything for now, always use s/w crypto */
422
423	return 0;
424}
425
426static int
427iwm_firmware_store_section(struct iwm_softc *sc,
428    enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
429{
430	struct iwm_fw_sects *fws;
431	struct iwm_fw_desc *fwone;
432
433	if (type >= IWM_UCODE_TYPE_MAX)
434		return EINVAL;
435	if (dlen < sizeof(uint32_t))
436		return EINVAL;
437
438	fws = &sc->sc_fw.fw_sects[type];
439	if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
440		return EINVAL;
441
442	fwone = &fws->fw_sect[fws->fw_count];
443
444	/* first 32bit are device load offset */
445	memcpy(&fwone->offset, data, sizeof(uint32_t));
446
447	/* rest is data */
448	fwone->data = data + sizeof(uint32_t);
449	fwone->len = dlen - sizeof(uint32_t);
450
451	fws->fw_count++;
452
453	return 0;
454}
455
456#define IWM_DEFAULT_SCAN_CHANNELS 40
457
458/* iwlwifi: iwl-drv.c */
459struct iwm_tlv_calib_data {
460	uint32_t ucode_type;
461	struct iwm_tlv_calib_ctrl calib;
462} __packed;
463
464static int
465iwm_set_default_calib(struct iwm_softc *sc, const void *data)
466{
467	const struct iwm_tlv_calib_data *def_calib = data;
468	uint32_t ucode_type = le32toh(def_calib->ucode_type);
469
470	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
471		device_printf(sc->sc_dev,
472		    "Wrong ucode_type %u for default "
473		    "calibration.\n", ucode_type);
474		return EINVAL;
475	}
476
477	sc->sc_default_calib[ucode_type].flow_trigger =
478	    def_calib->calib.flow_trigger;
479	sc->sc_default_calib[ucode_type].event_trigger =
480	    def_calib->calib.event_trigger;
481
482	return 0;
483}
484
485static int
486iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data,
487			struct iwm_ucode_capabilities *capa)
488{
489	const struct iwm_ucode_api *ucode_api = (const void *)data;
490	uint32_t api_index = le32toh(ucode_api->api_index);
491	uint32_t api_flags = le32toh(ucode_api->api_flags);
492	int i;
493
494	if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
495		device_printf(sc->sc_dev,
496		    "api flags index %d larger than supported by driver\n",
497		    api_index);
498		/* don't return an error so we can load FW that has more bits */
499		return 0;
500	}
501
502	for (i = 0; i < 32; i++) {
503		if (api_flags & (1U << i))
504			setbit(capa->enabled_api, i + 32 * api_index);
505	}
506
507	return 0;
508}
509
510static int
511iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data,
512			   struct iwm_ucode_capabilities *capa)
513{
514	const struct iwm_ucode_capa *ucode_capa = (const void *)data;
515	uint32_t api_index = le32toh(ucode_capa->api_index);
516	uint32_t api_flags = le32toh(ucode_capa->api_capa);
517	int i;
518
519	if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
520		device_printf(sc->sc_dev,
521		    "capa flags index %d larger than supported by driver\n",
522		    api_index);
523		/* don't return an error so we can load FW that has more bits */
524		return 0;
525	}
526
527	for (i = 0; i < 32; i++) {
528		if (api_flags & (1U << i))
529			setbit(capa->enabled_capa, i + 32 * api_index);
530	}
531
532	return 0;
533}
534
535static void
536iwm_fw_info_free(struct iwm_fw_info *fw)
537{
538	firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
539	fw->fw_fp = NULL;
540	/* don't touch fw->fw_status */
541	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
542}
543
544static int
545iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
546{
547	struct iwm_fw_info *fw = &sc->sc_fw;
548	const struct iwm_tlv_ucode_header *uhdr;
549	const struct iwm_ucode_tlv *tlv;
550	struct iwm_ucode_capabilities *capa = &sc->ucode_capa;
551	enum iwm_ucode_tlv_type tlv_type;
552	const struct firmware *fwp;
553	const uint8_t *data;
554	uint32_t tlv_len;
555	uint32_t usniffer_img;
556	const uint8_t *tlv_data;
557	uint32_t paging_mem_size;
558	int num_of_cpus;
559	int error = 0;
560	size_t len;
561
562	if (fw->fw_status == IWM_FW_STATUS_DONE &&
563	    ucode_type != IWM_UCODE_INIT)
564		return 0;
565
566	while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
567		msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
568	fw->fw_status = IWM_FW_STATUS_INPROGRESS;
569
570	if (fw->fw_fp != NULL)
571		iwm_fw_info_free(fw);
572
573	/*
574	 * Load firmware into driver memory.
575	 * fw_fp will be set.
576	 */
577	IWM_UNLOCK(sc);
578	fwp = firmware_get(sc->cfg->fw_name);
579	IWM_LOCK(sc);
580	if (fwp == NULL) {
581		device_printf(sc->sc_dev,
582		    "could not read firmware %s (error %d)\n",
583		    sc->cfg->fw_name, error);
584		goto out;
585	}
586	fw->fw_fp = fwp;
587
588	/* (Re-)Initialize default values. */
589	capa->flags = 0;
590	capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH;
591	capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
592	memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa));
593	memset(capa->enabled_api, 0, sizeof(capa->enabled_api));
594	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
595
596	/*
597	 * Parse firmware contents
598	 */
599
600	uhdr = (const void *)fw->fw_fp->data;
601	if (*(const uint32_t *)fw->fw_fp->data != 0
602	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
603		device_printf(sc->sc_dev, "invalid firmware %s\n",
604		    sc->cfg->fw_name);
605		error = EINVAL;
606		goto out;
607	}
608
609	snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%u.%u (API ver %u)",
610	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
611	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
612	    IWM_UCODE_API(le32toh(uhdr->ver)));
613	data = uhdr->data;
614	len = fw->fw_fp->datasize - sizeof(*uhdr);
615
616	while (len >= sizeof(*tlv)) {
617		len -= sizeof(*tlv);
618		tlv = (const void *)data;
619
620		tlv_len = le32toh(tlv->length);
621		tlv_type = le32toh(tlv->type);
622		tlv_data = tlv->data;
623
624		if (len < tlv_len) {
625			device_printf(sc->sc_dev,
626			    "firmware too short: %zu bytes\n",
627			    len);
628			error = EINVAL;
629			goto parse_out;
630		}
631		len -= roundup2(tlv_len, 4);
632		data += sizeof(tlv) + roundup2(tlv_len, 4);
633
634		switch ((int)tlv_type) {
635		case IWM_UCODE_TLV_PROBE_MAX_LEN:
636			if (tlv_len != sizeof(uint32_t)) {
637				device_printf(sc->sc_dev,
638				    "%s: PROBE_MAX_LEN (%d) != sizeof(uint32_t)\n",
639				    __func__,
640				    (int) tlv_len);
641				error = EINVAL;
642				goto parse_out;
643			}
644			capa->max_probe_length =
645			    le32_to_cpup((const uint32_t *)tlv_data);
646			/* limit it to something sensible */
647			if (capa->max_probe_length >
648			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
649				IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
650				    "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
651				    "ridiculous\n", __func__);
652				error = EINVAL;
653				goto parse_out;
654			}
655			break;
656		case IWM_UCODE_TLV_PAN:
657			if (tlv_len) {
658				device_printf(sc->sc_dev,
659				    "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
660				    __func__,
661				    (int) tlv_len);
662				error = EINVAL;
663				goto parse_out;
664			}
665			capa->flags |= IWM_UCODE_TLV_FLAGS_PAN;
666			break;
667		case IWM_UCODE_TLV_FLAGS:
668			if (tlv_len < sizeof(uint32_t)) {
669				device_printf(sc->sc_dev,
670				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
671				    __func__,
672				    (int) tlv_len);
673				error = EINVAL;
674				goto parse_out;
675			}
676			if (tlv_len % sizeof(uint32_t)) {
677				device_printf(sc->sc_dev,
678				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) %% sizeof(uint32_t)\n",
679				    __func__,
680				    (int) tlv_len);
681				error = EINVAL;
682				goto parse_out;
683			}
684			/*
685			 * Apparently there can be many flags, but Linux driver
686			 * parses only the first one, and so do we.
687			 *
688			 * XXX: why does this override IWM_UCODE_TLV_PAN?
689			 * Intentional or a bug?  Observations from
690			 * current firmware file:
691			 *  1) TLV_PAN is parsed first
692			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
693			 * ==> this resets TLV_PAN to itself... hnnnk
694			 */
695			capa->flags = le32_to_cpup((const uint32_t *)tlv_data);
696			break;
697		case IWM_UCODE_TLV_CSCHEME:
698			if ((error = iwm_store_cscheme(sc,
699			    tlv_data, tlv_len)) != 0) {
700				device_printf(sc->sc_dev,
701				    "%s: iwm_store_cscheme(): returned %d\n",
702				    __func__,
703				    error);
704				goto parse_out;
705			}
706			break;
707		case IWM_UCODE_TLV_NUM_OF_CPU:
708			if (tlv_len != sizeof(uint32_t)) {
709				device_printf(sc->sc_dev,
710				    "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) != sizeof(uint32_t)\n",
711				    __func__,
712				    (int) tlv_len);
713				error = EINVAL;
714				goto parse_out;
715			}
716			num_of_cpus = le32_to_cpup((const uint32_t *)tlv_data);
717			if (num_of_cpus == 2) {
718				fw->fw_sects[IWM_UCODE_REGULAR].is_dual_cpus =
719					TRUE;
720				fw->fw_sects[IWM_UCODE_INIT].is_dual_cpus =
721					TRUE;
722				fw->fw_sects[IWM_UCODE_WOWLAN].is_dual_cpus =
723					TRUE;
724			} else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
725				device_printf(sc->sc_dev,
726				    "%s: Driver supports only 1 or 2 CPUs\n",
727				    __func__);
728				error = EINVAL;
729				goto parse_out;
730			}
731			break;
732		case IWM_UCODE_TLV_SEC_RT:
733			if ((error = iwm_firmware_store_section(sc,
734			    IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
735				device_printf(sc->sc_dev,
736				    "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
737				    __func__,
738				    error);
739				goto parse_out;
740			}
741			break;
742		case IWM_UCODE_TLV_SEC_INIT:
743			if ((error = iwm_firmware_store_section(sc,
744			    IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
745				device_printf(sc->sc_dev,
746				    "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
747				    __func__,
748				    error);
749				goto parse_out;
750			}
751			break;
752		case IWM_UCODE_TLV_SEC_WOWLAN:
753			if ((error = iwm_firmware_store_section(sc,
754			    IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
755				device_printf(sc->sc_dev,
756				    "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
757				    __func__,
758				    error);
759				goto parse_out;
760			}
761			break;
762		case IWM_UCODE_TLV_DEF_CALIB:
763			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
764				device_printf(sc->sc_dev,
765				    "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
766				    __func__,
767				    (int) tlv_len,
768				    (int) sizeof(struct iwm_tlv_calib_data));
769				error = EINVAL;
770				goto parse_out;
771			}
772			if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
773				device_printf(sc->sc_dev,
774				    "%s: iwm_set_default_calib() failed: %d\n",
775				    __func__,
776				    error);
777				goto parse_out;
778			}
779			break;
780		case IWM_UCODE_TLV_PHY_SKU:
781			if (tlv_len != sizeof(uint32_t)) {
782				error = EINVAL;
783				device_printf(sc->sc_dev,
784				    "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
785				    __func__,
786				    (int) tlv_len);
787				goto parse_out;
788			}
789			sc->sc_fw.phy_config =
790			    le32_to_cpup((const uint32_t *)tlv_data);
791			sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
792						  IWM_FW_PHY_CFG_TX_CHAIN) >>
793						  IWM_FW_PHY_CFG_TX_CHAIN_POS;
794			sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
795						  IWM_FW_PHY_CFG_RX_CHAIN) >>
796						  IWM_FW_PHY_CFG_RX_CHAIN_POS;
797			break;
798
799		case IWM_UCODE_TLV_API_CHANGES_SET: {
800			if (tlv_len != sizeof(struct iwm_ucode_api)) {
801				error = EINVAL;
802				goto parse_out;
803			}
804			if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) {
805				error = EINVAL;
806				goto parse_out;
807			}
808			break;
809		}
810
811		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
812			if (tlv_len != sizeof(struct iwm_ucode_capa)) {
813				error = EINVAL;
814				goto parse_out;
815			}
816			if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) {
817				error = EINVAL;
818				goto parse_out;
819			}
820			break;
821		}
822
823		case 48: /* undocumented TLV */
824		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
825		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
826			/* ignore, not used by current driver */
827			break;
828
829		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
830			if ((error = iwm_firmware_store_section(sc,
831			    IWM_UCODE_REGULAR_USNIFFER, tlv_data,
832			    tlv_len)) != 0)
833				goto parse_out;
834			break;
835
836		case IWM_UCODE_TLV_PAGING:
837			if (tlv_len != sizeof(uint32_t)) {
838				error = EINVAL;
839				goto parse_out;
840			}
841			paging_mem_size = le32_to_cpup((const uint32_t *)tlv_data);
842
843			IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
844			    "%s: Paging: paging enabled (size = %u bytes)\n",
845			    __func__, paging_mem_size);
846			if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
847				device_printf(sc->sc_dev,
848					"%s: Paging: driver supports up to %u bytes for paging image\n",
849					__func__, IWM_MAX_PAGING_IMAGE_SIZE);
850				error = EINVAL;
851				goto out;
852			}
853			if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
854				device_printf(sc->sc_dev,
855				    "%s: Paging: image isn't multiple %u\n",
856				    __func__, IWM_FW_PAGING_SIZE);
857				error = EINVAL;
858				goto out;
859			}
860
861			sc->sc_fw.fw_sects[IWM_UCODE_REGULAR].paging_mem_size =
862			    paging_mem_size;
863			usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
864			sc->sc_fw.fw_sects[usniffer_img].paging_mem_size =
865			    paging_mem_size;
866			break;
867
868		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
869			if (tlv_len != sizeof(uint32_t)) {
870				error = EINVAL;
871				goto parse_out;
872			}
873			capa->n_scan_channels =
874			    le32_to_cpup((const uint32_t *)tlv_data);
875			break;
876
877		case IWM_UCODE_TLV_FW_VERSION:
878			if (tlv_len != sizeof(uint32_t) * 3) {
879				error = EINVAL;
880				goto parse_out;
881			}
882			snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
883			    "%d.%d.%d",
884			    le32toh(((const uint32_t *)tlv_data)[0]),
885			    le32toh(((const uint32_t *)tlv_data)[1]),
886			    le32toh(((const uint32_t *)tlv_data)[2]));
887			break;
888
889		case IWM_UCODE_TLV_FW_MEM_SEG:
890			break;
891
892		default:
893			device_printf(sc->sc_dev,
894			    "%s: unknown firmware section %d, abort\n",
895			    __func__, tlv_type);
896			error = EINVAL;
897			goto parse_out;
898		}
899	}
900
901	KASSERT(error == 0, ("unhandled error"));
902
903 parse_out:
904	if (error) {
905		device_printf(sc->sc_dev, "firmware parse error %d, "
906		    "section type %d\n", error, tlv_type);
907	}
908
909 out:
910	if (error) {
911		fw->fw_status = IWM_FW_STATUS_NONE;
912		if (fw->fw_fp != NULL)
913			iwm_fw_info_free(fw);
914	} else
915		fw->fw_status = IWM_FW_STATUS_DONE;
916	wakeup(&sc->sc_fw);
917
918	return error;
919}
920
921/*
922 * DMA resource routines
923 */
924
925/* fwmem is used to load firmware onto the card */
926static int
927iwm_alloc_fwmem(struct iwm_softc *sc)
928{
929	/* Must be aligned on a 16-byte boundary. */
930	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
931	    IWM_FH_MEM_TB_MAX_LENGTH, 16);
932}
933
934/* tx scheduler rings.  not used? */
935static int
936iwm_alloc_sched(struct iwm_softc *sc)
937{
938	/* TX scheduler rings must be aligned on a 1KB boundary. */
939	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
940	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
941}
942
943/* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
944static int
945iwm_alloc_kw(struct iwm_softc *sc)
946{
947	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
948}
949
950/* interrupt cause table */
951static int
952iwm_alloc_ict(struct iwm_softc *sc)
953{
954	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
955	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
956}
957
958static int
959iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
960{
961	bus_size_t size;
962	int i, error;
963
964	ring->cur = 0;
965
966	/* Allocate RX descriptors (256-byte aligned). */
967	size = IWM_RX_RING_COUNT * sizeof(uint32_t);
968	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
969	if (error != 0) {
970		device_printf(sc->sc_dev,
971		    "could not allocate RX ring DMA memory\n");
972		goto fail;
973	}
974	ring->desc = ring->desc_dma.vaddr;
975
976	/* Allocate RX status area (16-byte aligned). */
977	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
978	    sizeof(*ring->stat), 16);
979	if (error != 0) {
980		device_printf(sc->sc_dev,
981		    "could not allocate RX status DMA memory\n");
982		goto fail;
983	}
984	ring->stat = ring->stat_dma.vaddr;
985
986        /* Create RX buffer DMA tag. */
987        error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
988            BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
989            IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
990        if (error != 0) {
991                device_printf(sc->sc_dev,
992                    "%s: could not create RX buf DMA tag, error %d\n",
993                    __func__, error);
994                goto fail;
995        }
996
997	/* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
998	error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
999	if (error != 0) {
1000		device_printf(sc->sc_dev,
1001		    "%s: could not create RX buf DMA map, error %d\n",
1002		    __func__, error);
1003		goto fail;
1004	}
1005	/*
1006	 * Allocate and map RX buffers.
1007	 */
1008	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1009		struct iwm_rx_data *data = &ring->data[i];
1010		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1011		if (error != 0) {
1012			device_printf(sc->sc_dev,
1013			    "%s: could not create RX buf DMA map, error %d\n",
1014			    __func__, error);
1015			goto fail;
1016		}
1017		data->m = NULL;
1018
1019		if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1020			goto fail;
1021		}
1022	}
1023	return 0;
1024
1025fail:	iwm_free_rx_ring(sc, ring);
1026	return error;
1027}
1028
1029static void
1030iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1031{
1032	/* Reset the ring state */
1033	ring->cur = 0;
1034
1035	/*
1036	 * The hw rx ring index in shared memory must also be cleared,
1037	 * otherwise the discrepancy can cause reprocessing chaos.
1038	 */
1039	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1040}
1041
1042static void
1043iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1044{
1045	int i;
1046
1047	iwm_dma_contig_free(&ring->desc_dma);
1048	iwm_dma_contig_free(&ring->stat_dma);
1049
1050	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1051		struct iwm_rx_data *data = &ring->data[i];
1052
1053		if (data->m != NULL) {
1054			bus_dmamap_sync(ring->data_dmat, data->map,
1055			    BUS_DMASYNC_POSTREAD);
1056			bus_dmamap_unload(ring->data_dmat, data->map);
1057			m_freem(data->m);
1058			data->m = NULL;
1059		}
1060		if (data->map != NULL) {
1061			bus_dmamap_destroy(ring->data_dmat, data->map);
1062			data->map = NULL;
1063		}
1064	}
1065	if (ring->spare_map != NULL) {
1066		bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1067		ring->spare_map = NULL;
1068	}
1069	if (ring->data_dmat != NULL) {
1070		bus_dma_tag_destroy(ring->data_dmat);
1071		ring->data_dmat = NULL;
1072	}
1073}
1074
1075static int
1076iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1077{
1078	bus_addr_t paddr;
1079	bus_size_t size;
1080	size_t maxsize;
1081	int nsegments;
1082	int i, error;
1083
1084	ring->qid = qid;
1085	ring->queued = 0;
1086	ring->cur = 0;
1087
1088	/* Allocate TX descriptors (256-byte aligned). */
1089	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1090	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1091	if (error != 0) {
1092		device_printf(sc->sc_dev,
1093		    "could not allocate TX ring DMA memory\n");
1094		goto fail;
1095	}
1096	ring->desc = ring->desc_dma.vaddr;
1097
1098	/*
1099	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1100	 * to allocate commands space for other rings.
1101	 */
1102	if (qid > IWM_MVM_CMD_QUEUE)
1103		return 0;
1104
1105	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1106	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1107	if (error != 0) {
1108		device_printf(sc->sc_dev,
1109		    "could not allocate TX cmd DMA memory\n");
1110		goto fail;
1111	}
1112	ring->cmd = ring->cmd_dma.vaddr;
1113
1114	/* FW commands may require more mapped space than packets. */
1115	if (qid == IWM_MVM_CMD_QUEUE) {
1116		maxsize = IWM_RBUF_SIZE;
1117		nsegments = 1;
1118	} else {
1119		maxsize = MCLBYTES;
1120		nsegments = IWM_MAX_SCATTER - 2;
1121	}
1122
1123	error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1124	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1125            nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1126	if (error != 0) {
1127		device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1128		goto fail;
1129	}
1130
1131	paddr = ring->cmd_dma.paddr;
1132	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1133		struct iwm_tx_data *data = &ring->data[i];
1134
1135		data->cmd_paddr = paddr;
1136		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1137		    + offsetof(struct iwm_tx_cmd, scratch);
1138		paddr += sizeof(struct iwm_device_cmd);
1139
1140		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1141		if (error != 0) {
1142			device_printf(sc->sc_dev,
1143			    "could not create TX buf DMA map\n");
1144			goto fail;
1145		}
1146	}
1147	KASSERT(paddr == ring->cmd_dma.paddr + size,
1148	    ("invalid physical address"));
1149	return 0;
1150
1151fail:	iwm_free_tx_ring(sc, ring);
1152	return error;
1153}
1154
1155static void
1156iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1157{
1158	int i;
1159
1160	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1161		struct iwm_tx_data *data = &ring->data[i];
1162
1163		if (data->m != NULL) {
1164			bus_dmamap_sync(ring->data_dmat, data->map,
1165			    BUS_DMASYNC_POSTWRITE);
1166			bus_dmamap_unload(ring->data_dmat, data->map);
1167			m_freem(data->m);
1168			data->m = NULL;
1169		}
1170	}
1171	/* Clear TX descriptors. */
1172	memset(ring->desc, 0, ring->desc_dma.size);
1173	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1174	    BUS_DMASYNC_PREWRITE);
1175	sc->qfullmsk &= ~(1 << ring->qid);
1176	ring->queued = 0;
1177	ring->cur = 0;
1178
1179	if (ring->qid == IWM_MVM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1180		iwm_pcie_clear_cmd_in_flight(sc);
1181}
1182
1183static void
1184iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1185{
1186	int i;
1187
1188	iwm_dma_contig_free(&ring->desc_dma);
1189	iwm_dma_contig_free(&ring->cmd_dma);
1190
1191	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1192		struct iwm_tx_data *data = &ring->data[i];
1193
1194		if (data->m != NULL) {
1195			bus_dmamap_sync(ring->data_dmat, data->map,
1196			    BUS_DMASYNC_POSTWRITE);
1197			bus_dmamap_unload(ring->data_dmat, data->map);
1198			m_freem(data->m);
1199			data->m = NULL;
1200		}
1201		if (data->map != NULL) {
1202			bus_dmamap_destroy(ring->data_dmat, data->map);
1203			data->map = NULL;
1204		}
1205	}
1206	if (ring->data_dmat != NULL) {
1207		bus_dma_tag_destroy(ring->data_dmat);
1208		ring->data_dmat = NULL;
1209	}
1210}
1211
1212/*
1213 * High-level hardware frobbing routines
1214 */
1215
1216static void
1217iwm_enable_interrupts(struct iwm_softc *sc)
1218{
1219	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1220	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1221}
1222
1223static void
1224iwm_restore_interrupts(struct iwm_softc *sc)
1225{
1226	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1227}
1228
1229static void
1230iwm_disable_interrupts(struct iwm_softc *sc)
1231{
1232	/* disable interrupts */
1233	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1234
1235	/* acknowledge all interrupts */
1236	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1237	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1238}
1239
1240static void
1241iwm_ict_reset(struct iwm_softc *sc)
1242{
1243	iwm_disable_interrupts(sc);
1244
1245	/* Reset ICT table. */
1246	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1247	sc->ict_cur = 0;
1248
1249	/* Set physical address of ICT table (4KB aligned). */
1250	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1251	    IWM_CSR_DRAM_INT_TBL_ENABLE
1252	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1253	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1254	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1255
1256	/* Switch to ICT interrupt mode in driver. */
1257	sc->sc_flags |= IWM_FLAG_USE_ICT;
1258
1259	/* Re-enable interrupts. */
1260	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1261	iwm_enable_interrupts(sc);
1262}
1263
1264/* iwlwifi pcie/trans.c */
1265
1266/*
1267 * Since this .. hard-resets things, it's time to actually
1268 * mark the first vap (if any) as having no mac context.
1269 * It's annoying, but since the driver is potentially being
1270 * stop/start'ed whilst active (thanks openbsd port!) we
1271 * have to correctly track this.
1272 */
1273static void
1274iwm_stop_device(struct iwm_softc *sc)
1275{
1276	struct ieee80211com *ic = &sc->sc_ic;
1277	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1278	int chnl, qid;
1279	uint32_t mask = 0;
1280
1281	/* tell the device to stop sending interrupts */
1282	iwm_disable_interrupts(sc);
1283
1284	/*
1285	 * FreeBSD-local: mark the first vap as not-uploaded,
1286	 * so the next transition through auth/assoc
1287	 * will correctly populate the MAC context.
1288	 */
1289	if (vap) {
1290		struct iwm_vap *iv = IWM_VAP(vap);
1291		iv->phy_ctxt = NULL;
1292		iv->is_uploaded = 0;
1293	}
1294
1295	/* device going down, Stop using ICT table */
1296	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1297
1298	/* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1299
1300	if (iwm_nic_lock(sc)) {
1301		iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1302
1303		/* Stop each Tx DMA channel */
1304		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1305			IWM_WRITE(sc,
1306			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1307			mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1308		}
1309
1310		/* Wait for DMA channels to be idle */
1311		if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1312		    5000)) {
1313			device_printf(sc->sc_dev,
1314			    "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1315			    IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1316		}
1317		iwm_nic_unlock(sc);
1318	}
1319	iwm_pcie_rx_stop(sc);
1320
1321	/* Stop RX ring. */
1322	iwm_reset_rx_ring(sc, &sc->rxq);
1323
1324	/* Reset all TX rings. */
1325	for (qid = 0; qid < nitems(sc->txq); qid++)
1326		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1327
1328	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1329		/* Power-down device's busmaster DMA clocks */
1330		if (iwm_nic_lock(sc)) {
1331			iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1332			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1333			iwm_nic_unlock(sc);
1334		}
1335		DELAY(5);
1336	}
1337
1338	/* Make sure (redundant) we've released our request to stay awake */
1339	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1340	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1341
1342	/* Stop the device, and put it in low power state */
1343	iwm_apm_stop(sc);
1344
1345	/* Upon stop, the APM issues an interrupt if HW RF kill is set.
1346	 * Clean again the interrupt here
1347	 */
1348	iwm_disable_interrupts(sc);
1349	/* stop and reset the on-board processor */
1350	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1351
1352	/*
1353	 * Even if we stop the HW, we still want the RF kill
1354	 * interrupt
1355	 */
1356	iwm_enable_rfkill_int(sc);
1357	iwm_check_rfkill(sc);
1358}
1359
1360/* iwlwifi: mvm/ops.c */
1361static void
1362iwm_mvm_nic_config(struct iwm_softc *sc)
1363{
1364	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1365	uint32_t reg_val = 0;
1366	uint32_t phy_config = iwm_mvm_get_phy_config(sc);
1367
1368	radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1369	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1370	radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1371	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1372	radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1373	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1374
1375	/* SKU control */
1376	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1377	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1378	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1379	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1380
1381	/* radio configuration */
1382	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1383	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1384	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1385
1386	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1387
1388	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1389	    "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1390	    radio_cfg_step, radio_cfg_dash);
1391
1392	/*
1393	 * W/A : NIC is stuck in a reset state after Early PCIe power off
1394	 * (PCIe power is lost before PERST# is asserted), causing ME FW
1395	 * to lose ownership and not being able to obtain it back.
1396	 */
1397	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1398		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1399		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1400		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1401	}
1402}
1403
1404static int
1405iwm_nic_rx_init(struct iwm_softc *sc)
1406{
1407	/*
1408	 * Initialize RX ring.  This is from the iwn driver.
1409	 */
1410	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1411
1412	/* Stop Rx DMA */
1413	iwm_pcie_rx_stop(sc);
1414
1415	if (!iwm_nic_lock(sc))
1416		return EBUSY;
1417
1418	/* reset and flush pointers */
1419	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1420	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1421	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1422	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1423
1424	/* Set physical address of RX ring (256-byte aligned). */
1425	IWM_WRITE(sc,
1426	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1427
1428	/* Set physical address of RX status (16-byte aligned). */
1429	IWM_WRITE(sc,
1430	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1431
1432	/* Enable Rx DMA
1433	 * XXX 5000 HW isn't supported by the iwm(4) driver.
1434	 * IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
1435	 *      the credit mechanism in 5000 HW RX FIFO
1436	 * Direct rx interrupts to hosts
1437	 * Rx buffer size 4 or 8k or 12k
1438	 * RB timeout 0x10
1439	 * 256 RBDs
1440	 */
1441	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1442	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
1443	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
1444	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
1445	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
1446	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1447	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1448
1449	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1450
1451	/* W/A for interrupt coalescing bug in 7260 and 3160 */
1452	if (sc->cfg->host_interrupt_operation_mode)
1453		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1454
1455	/*
1456	 * Thus sayeth el jefe (iwlwifi) via a comment:
1457	 *
1458	 * This value should initially be 0 (before preparing any
1459	 * RBs), should be 8 after preparing the first 8 RBs (for example)
1460	 */
1461	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1462
1463	iwm_nic_unlock(sc);
1464
1465	return 0;
1466}
1467
1468static int
1469iwm_nic_tx_init(struct iwm_softc *sc)
1470{
1471	int qid;
1472
1473	if (!iwm_nic_lock(sc))
1474		return EBUSY;
1475
1476	/* Deactivate TX scheduler. */
1477	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1478
1479	/* Set physical address of "keep warm" page (16-byte aligned). */
1480	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1481
1482	/* Initialize TX rings. */
1483	for (qid = 0; qid < nitems(sc->txq); qid++) {
1484		struct iwm_tx_ring *txq = &sc->txq[qid];
1485
1486		/* Set physical address of TX ring (256-byte aligned). */
1487		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1488		    txq->desc_dma.paddr >> 8);
1489		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1490		    "%s: loading ring %d descriptors (%p) at %lx\n",
1491		    __func__,
1492		    qid, txq->desc,
1493		    (unsigned long) (txq->desc_dma.paddr >> 8));
1494	}
1495
1496	iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1497
1498	iwm_nic_unlock(sc);
1499
1500	return 0;
1501}
1502
1503static int
1504iwm_nic_init(struct iwm_softc *sc)
1505{
1506	int error;
1507
1508	iwm_apm_init(sc);
1509	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1510		iwm_set_pwr(sc);
1511
1512	iwm_mvm_nic_config(sc);
1513
1514	if ((error = iwm_nic_rx_init(sc)) != 0)
1515		return error;
1516
1517	/*
1518	 * Ditto for TX, from iwn
1519	 */
1520	if ((error = iwm_nic_tx_init(sc)) != 0)
1521		return error;
1522
1523	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1524	    "%s: shadow registers enabled\n", __func__);
1525	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1526
1527	return 0;
1528}
1529
1530int
1531iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1532{
1533	if (!iwm_nic_lock(sc)) {
1534		device_printf(sc->sc_dev,
1535		    "%s: cannot enable txq %d\n",
1536		    __func__,
1537		    qid);
1538		return EBUSY;
1539	}
1540
1541	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1542
1543	if (qid == IWM_MVM_CMD_QUEUE) {
1544		/* unactivate before configuration */
1545		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1546		    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1547		    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1548
1549		iwm_nic_unlock(sc);
1550
1551		iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1552
1553		if (!iwm_nic_lock(sc)) {
1554			device_printf(sc->sc_dev,
1555			    "%s: cannot enable txq %d\n", __func__, qid);
1556			return EBUSY;
1557		}
1558		iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1559		iwm_nic_unlock(sc);
1560
1561		iwm_write_mem32(sc, sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1562		/* Set scheduler window size and frame limit. */
1563		iwm_write_mem32(sc,
1564		    sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1565		    sizeof(uint32_t),
1566		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1567		    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1568		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1569		    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1570
1571		if (!iwm_nic_lock(sc)) {
1572			device_printf(sc->sc_dev,
1573			    "%s: cannot enable txq %d\n", __func__, qid);
1574			return EBUSY;
1575		}
1576		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1577		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1578		    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1579		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1580		    IWM_SCD_QUEUE_STTS_REG_MSK);
1581	} else {
1582		struct iwm_scd_txq_cfg_cmd cmd;
1583		int error;
1584
1585		iwm_nic_unlock(sc);
1586
1587		memset(&cmd, 0, sizeof(cmd));
1588		cmd.scd_queue = qid;
1589		cmd.enable = 1;
1590		cmd.sta_id = sta_id;
1591		cmd.tx_fifo = fifo;
1592		cmd.aggregate = 0;
1593		cmd.window = IWM_FRAME_LIMIT;
1594
1595		error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1596		    sizeof(cmd), &cmd);
1597		if (error) {
1598			device_printf(sc->sc_dev,
1599			    "cannot enable txq %d\n", qid);
1600			return error;
1601		}
1602
1603		if (!iwm_nic_lock(sc))
1604			return EBUSY;
1605	}
1606
1607	iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1608	    iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1609
1610	iwm_nic_unlock(sc);
1611
1612	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1613	    __func__, qid, fifo);
1614
1615	return 0;
1616}
1617
1618static int
1619iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1620{
1621	int error, chnl;
1622
1623	int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1624	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1625
1626	if (!iwm_nic_lock(sc))
1627		return EBUSY;
1628
1629	iwm_ict_reset(sc);
1630
1631	sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1632	if (scd_base_addr != 0 &&
1633	    scd_base_addr != sc->scd_base_addr) {
1634		device_printf(sc->sc_dev,
1635		    "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1636		    __func__, sc->scd_base_addr, scd_base_addr);
1637	}
1638
1639	iwm_nic_unlock(sc);
1640
1641	/* reset context data, TX status and translation data */
1642	error = iwm_write_mem(sc,
1643	    sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1644	    NULL, clear_dwords);
1645	if (error)
1646		return EBUSY;
1647
1648	if (!iwm_nic_lock(sc))
1649		return EBUSY;
1650
1651	/* Set physical address of TX scheduler rings (1KB aligned). */
1652	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1653
1654	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1655
1656	iwm_nic_unlock(sc);
1657
1658	/* enable command channel */
1659	error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1660	if (error)
1661		return error;
1662
1663	if (!iwm_nic_lock(sc))
1664		return EBUSY;
1665
1666	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1667
1668	/* Enable DMA channels. */
1669	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1670		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1671		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1672		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1673	}
1674
1675	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1676	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1677
1678	iwm_nic_unlock(sc);
1679
1680	/* Enable L1-Active */
1681	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
1682		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1683		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1684	}
1685
1686	return error;
1687}
1688
1689/*
1690 * NVM read access and content parsing.  We do not support
1691 * external NVM or writing NVM.
1692 * iwlwifi/mvm/nvm.c
1693 */
1694
1695/* Default NVM size to read */
1696#define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
1697
1698#define IWM_NVM_WRITE_OPCODE 1
1699#define IWM_NVM_READ_OPCODE 0
1700
1701/* load nvm chunk response */
1702enum {
1703	IWM_READ_NVM_CHUNK_SUCCEED = 0,
1704	IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1705};
1706
1707static int
1708iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1709	uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1710{
1711	struct iwm_nvm_access_cmd nvm_access_cmd = {
1712		.offset = htole16(offset),
1713		.length = htole16(length),
1714		.type = htole16(section),
1715		.op_code = IWM_NVM_READ_OPCODE,
1716	};
1717	struct iwm_nvm_access_resp *nvm_resp;
1718	struct iwm_rx_packet *pkt;
1719	struct iwm_host_cmd cmd = {
1720		.id = IWM_NVM_ACCESS_CMD,
1721		.flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1722		.data = { &nvm_access_cmd, },
1723	};
1724	int ret, bytes_read, offset_read;
1725	uint8_t *resp_data;
1726
1727	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1728
1729	ret = iwm_send_cmd(sc, &cmd);
1730	if (ret) {
1731		device_printf(sc->sc_dev,
1732		    "Could not send NVM_ACCESS command (error=%d)\n", ret);
1733		return ret;
1734	}
1735
1736	pkt = cmd.resp_pkt;
1737
1738	/* Extract NVM response */
1739	nvm_resp = (void *)pkt->data;
1740	ret = le16toh(nvm_resp->status);
1741	bytes_read = le16toh(nvm_resp->length);
1742	offset_read = le16toh(nvm_resp->offset);
1743	resp_data = nvm_resp->data;
1744	if (ret) {
1745		if ((offset != 0) &&
1746		    (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1747			/*
1748			 * meaning of NOT_VALID_ADDRESS:
1749			 * driver try to read chunk from address that is
1750			 * multiple of 2K and got an error since addr is empty.
1751			 * meaning of (offset != 0): driver already
1752			 * read valid data from another chunk so this case
1753			 * is not an error.
1754			 */
1755			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1756				    "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1757				    offset);
1758			*len = 0;
1759			ret = 0;
1760		} else {
1761			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1762				    "NVM access command failed with status %d\n", ret);
1763			ret = EIO;
1764		}
1765		goto exit;
1766	}
1767
1768	if (offset_read != offset) {
1769		device_printf(sc->sc_dev,
1770		    "NVM ACCESS response with invalid offset %d\n",
1771		    offset_read);
1772		ret = EINVAL;
1773		goto exit;
1774	}
1775
1776	if (bytes_read > length) {
1777		device_printf(sc->sc_dev,
1778		    "NVM ACCESS response with too much data "
1779		    "(%d bytes requested, %d bytes received)\n",
1780		    length, bytes_read);
1781		ret = EINVAL;
1782		goto exit;
1783	}
1784
1785	/* Write data to NVM */
1786	memcpy(data + offset, resp_data, bytes_read);
1787	*len = bytes_read;
1788
1789 exit:
1790	iwm_free_resp(sc, &cmd);
1791	return ret;
1792}
1793
1794/*
1795 * Reads an NVM section completely.
1796 * NICs prior to 7000 family don't have a real NVM, but just read
1797 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1798 * by uCode, we need to manually check in this case that we don't
1799 * overflow and try to read more than the EEPROM size.
1800 * For 7000 family NICs, we supply the maximal size we can read, and
1801 * the uCode fills the response with as much data as we can,
1802 * without overflowing, so no check is needed.
1803 */
1804static int
1805iwm_nvm_read_section(struct iwm_softc *sc,
1806	uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1807{
1808	uint16_t seglen, length, offset = 0;
1809	int ret;
1810
1811	/* Set nvm section read length */
1812	length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1813
1814	seglen = length;
1815
1816	/* Read the NVM until exhausted (reading less than requested) */
1817	while (seglen == length) {
1818		/* Check no memory assumptions fail and cause an overflow */
1819		if ((size_read + offset + length) >
1820		    sc->cfg->eeprom_size) {
1821			device_printf(sc->sc_dev,
1822			    "EEPROM size is too small for NVM\n");
1823			return ENOBUFS;
1824		}
1825
1826		ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1827		if (ret) {
1828			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1829				    "Cannot read NVM from section %d offset %d, length %d\n",
1830				    section, offset, length);
1831			return ret;
1832		}
1833		offset += seglen;
1834	}
1835
1836	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1837		    "NVM section %d read completed\n", section);
1838	*len = offset;
1839	return 0;
1840}
1841
1842/*
1843 * BEGIN IWM_NVM_PARSE
1844 */
1845
1846/* iwlwifi/iwl-nvm-parse.c */
1847
1848/* NVM offsets (in words) definitions */
1849enum iwm_nvm_offsets {
1850	/* NVM HW-Section offset (in words) definitions */
1851	IWM_HW_ADDR = 0x15,
1852
1853/* NVM SW-Section offset (in words) definitions */
1854	IWM_NVM_SW_SECTION = 0x1C0,
1855	IWM_NVM_VERSION = 0,
1856	IWM_RADIO_CFG = 1,
1857	IWM_SKU = 2,
1858	IWM_N_HW_ADDRS = 3,
1859	IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1860
1861/* NVM calibration section offset (in words) definitions */
1862	IWM_NVM_CALIB_SECTION = 0x2B8,
1863	IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1864};
1865
1866enum iwm_8000_nvm_offsets {
1867	/* NVM HW-Section offset (in words) definitions */
1868	IWM_HW_ADDR0_WFPM_8000 = 0x12,
1869	IWM_HW_ADDR1_WFPM_8000 = 0x16,
1870	IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1871	IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1872	IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1873
1874	/* NVM SW-Section offset (in words) definitions */
1875	IWM_NVM_SW_SECTION_8000 = 0x1C0,
1876	IWM_NVM_VERSION_8000 = 0,
1877	IWM_RADIO_CFG_8000 = 0,
1878	IWM_SKU_8000 = 2,
1879	IWM_N_HW_ADDRS_8000 = 3,
1880
1881	/* NVM REGULATORY -Section offset (in words) definitions */
1882	IWM_NVM_CHANNELS_8000 = 0,
1883	IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1884	IWM_NVM_LAR_OFFSET_8000 = 0x507,
1885	IWM_NVM_LAR_ENABLED_8000 = 0x7,
1886
1887	/* NVM calibration section offset (in words) definitions */
1888	IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1889	IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1890};
1891
1892/* SKU Capabilities (actual values from NVM definition) */
1893enum nvm_sku_bits {
1894	IWM_NVM_SKU_CAP_BAND_24GHZ	= (1 << 0),
1895	IWM_NVM_SKU_CAP_BAND_52GHZ	= (1 << 1),
1896	IWM_NVM_SKU_CAP_11N_ENABLE	= (1 << 2),
1897	IWM_NVM_SKU_CAP_11AC_ENABLE	= (1 << 3),
1898};
1899
1900/* radio config bits (actual values from NVM definition) */
1901#define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1902#define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1903#define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1904#define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1905#define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1906#define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1907
1908#define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)	(x & 0xF)
1909#define IWM_NVM_RF_CFG_DASH_MSK_8000(x)		((x >> 4) & 0xF)
1910#define IWM_NVM_RF_CFG_STEP_MSK_8000(x)		((x >> 8) & 0xF)
1911#define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)		((x >> 12) & 0xFFF)
1912#define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)	((x >> 24) & 0xF)
1913#define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)	((x >> 28) & 0xF)
1914
1915#define DEFAULT_MAX_TX_POWER 16
1916
1917/**
1918 * enum iwm_nvm_channel_flags - channel flags in NVM
1919 * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1920 * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1921 * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1922 * @IWM_NVM_CHANNEL_RADAR: radar detection required
1923 * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1924 * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1925 * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1926 * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1927 * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1928 * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1929 */
1930enum iwm_nvm_channel_flags {
1931	IWM_NVM_CHANNEL_VALID = (1 << 0),
1932	IWM_NVM_CHANNEL_IBSS = (1 << 1),
1933	IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1934	IWM_NVM_CHANNEL_RADAR = (1 << 4),
1935	IWM_NVM_CHANNEL_DFS = (1 << 7),
1936	IWM_NVM_CHANNEL_WIDE = (1 << 8),
1937	IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1938	IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1939	IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1940};
1941
1942/*
1943 * Translate EEPROM flags to net80211.
1944 */
1945static uint32_t
1946iwm_eeprom_channel_flags(uint16_t ch_flags)
1947{
1948	uint32_t nflags;
1949
1950	nflags = 0;
1951	if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1952		nflags |= IEEE80211_CHAN_PASSIVE;
1953	if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1954		nflags |= IEEE80211_CHAN_NOADHOC;
1955	if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1956		nflags |= IEEE80211_CHAN_DFS;
1957		/* Just in case. */
1958		nflags |= IEEE80211_CHAN_NOADHOC;
1959	}
1960
1961	return (nflags);
1962}
1963
1964static void
1965iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1966    int maxchans, int *nchans, int ch_idx, size_t ch_num,
1967    const uint8_t bands[])
1968{
1969	const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
1970	uint32_t nflags;
1971	uint16_t ch_flags;
1972	uint8_t ieee;
1973	int error;
1974
1975	for (; ch_idx < ch_num; ch_idx++) {
1976		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1977		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1978			ieee = iwm_nvm_channels[ch_idx];
1979		else
1980			ieee = iwm_nvm_channels_8000[ch_idx];
1981
1982		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1983			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1984			    "Ch. %d Flags %x [%sGHz] - No traffic\n",
1985			    ieee, ch_flags,
1986			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1987			    "5.2" : "2.4");
1988			continue;
1989		}
1990
1991		nflags = iwm_eeprom_channel_flags(ch_flags);
1992		error = ieee80211_add_channel(chans, maxchans, nchans,
1993		    ieee, 0, 0, nflags, bands);
1994		if (error != 0)
1995			break;
1996
1997		IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1998		    "Ch. %d Flags %x [%sGHz] - Added\n",
1999		    ieee, ch_flags,
2000		    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2001		    "5.2" : "2.4");
2002	}
2003}
2004
2005static void
2006iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2007    struct ieee80211_channel chans[])
2008{
2009	struct iwm_softc *sc = ic->ic_softc;
2010	struct iwm_nvm_data *data = sc->nvm_data;
2011	uint8_t bands[IEEE80211_MODE_BYTES];
2012	size_t ch_num;
2013
2014	memset(bands, 0, sizeof(bands));
2015	/* 1-13: 11b/g channels. */
2016	setbit(bands, IEEE80211_MODE_11B);
2017	setbit(bands, IEEE80211_MODE_11G);
2018	iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2019	    IWM_NUM_2GHZ_CHANNELS - 1, bands);
2020
2021	/* 14: 11b channel only. */
2022	clrbit(bands, IEEE80211_MODE_11G);
2023	iwm_add_channel_band(sc, chans, maxchans, nchans,
2024	    IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2025
2026	if (data->sku_cap_band_52GHz_enable) {
2027		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2028			ch_num = nitems(iwm_nvm_channels);
2029		else
2030			ch_num = nitems(iwm_nvm_channels_8000);
2031		memset(bands, 0, sizeof(bands));
2032		setbit(bands, IEEE80211_MODE_11A);
2033		iwm_add_channel_band(sc, chans, maxchans, nchans,
2034		    IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2035	}
2036}
2037
2038static void
2039iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2040	const uint16_t *mac_override, const uint16_t *nvm_hw)
2041{
2042	const uint8_t *hw_addr;
2043
2044	if (mac_override) {
2045		static const uint8_t reserved_mac[] = {
2046			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2047		};
2048
2049		hw_addr = (const uint8_t *)(mac_override +
2050				 IWM_MAC_ADDRESS_OVERRIDE_8000);
2051
2052		/*
2053		 * Store the MAC address from MAO section.
2054		 * No byte swapping is required in MAO section
2055		 */
2056		IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2057
2058		/*
2059		 * Force the use of the OTP MAC address in case of reserved MAC
2060		 * address in the NVM, or if address is given but invalid.
2061		 */
2062		if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2063		    !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2064		    iwm_is_valid_ether_addr(data->hw_addr) &&
2065		    !IEEE80211_IS_MULTICAST(data->hw_addr))
2066			return;
2067
2068		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2069		    "%s: mac address from nvm override section invalid\n",
2070		    __func__);
2071	}
2072
2073	if (nvm_hw) {
2074		/* read the mac address from WFMP registers */
2075		uint32_t mac_addr0 =
2076		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2077		uint32_t mac_addr1 =
2078		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2079
2080		hw_addr = (const uint8_t *)&mac_addr0;
2081		data->hw_addr[0] = hw_addr[3];
2082		data->hw_addr[1] = hw_addr[2];
2083		data->hw_addr[2] = hw_addr[1];
2084		data->hw_addr[3] = hw_addr[0];
2085
2086		hw_addr = (const uint8_t *)&mac_addr1;
2087		data->hw_addr[4] = hw_addr[1];
2088		data->hw_addr[5] = hw_addr[0];
2089
2090		return;
2091	}
2092
2093	device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2094	memset(data->hw_addr, 0, sizeof(data->hw_addr));
2095}
2096
2097static int
2098iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2099	    const uint16_t *phy_sku)
2100{
2101	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2102		return le16_to_cpup(nvm_sw + IWM_SKU);
2103
2104	return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2105}
2106
2107static int
2108iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2109{
2110	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2111		return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2112	else
2113		return le32_to_cpup((const uint32_t *)(nvm_sw +
2114						IWM_NVM_VERSION_8000));
2115}
2116
2117static int
2118iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2119		  const uint16_t *phy_sku)
2120{
2121        if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2122                return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2123
2124        return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2125}
2126
2127static int
2128iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2129{
2130	int n_hw_addr;
2131
2132	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2133		return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2134
2135	n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2136
2137        return n_hw_addr & IWM_N_HW_ADDR_MASK;
2138}
2139
2140static void
2141iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2142		  uint32_t radio_cfg)
2143{
2144	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2145		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2146		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2147		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2148		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2149		return;
2150	}
2151
2152	/* set the radio configuration for family 8000 */
2153	data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2154	data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2155	data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2156	data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2157	data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2158	data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2159}
2160
2161static int
2162iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2163		   const uint16_t *nvm_hw, const uint16_t *mac_override)
2164{
2165#ifdef notyet /* for FAMILY 9000 */
2166	if (cfg->mac_addr_from_csr) {
2167		iwm_set_hw_address_from_csr(sc, data);
2168        } else
2169#endif
2170	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2171		const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2172
2173		/* The byte order is little endian 16 bit, meaning 214365 */
2174		data->hw_addr[0] = hw_addr[1];
2175		data->hw_addr[1] = hw_addr[0];
2176		data->hw_addr[2] = hw_addr[3];
2177		data->hw_addr[3] = hw_addr[2];
2178		data->hw_addr[4] = hw_addr[5];
2179		data->hw_addr[5] = hw_addr[4];
2180	} else {
2181		iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2182	}
2183
2184	if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2185		device_printf(sc->sc_dev, "no valid mac address was found\n");
2186		return EINVAL;
2187	}
2188
2189	return 0;
2190}
2191
2192static struct iwm_nvm_data *
2193iwm_parse_nvm_data(struct iwm_softc *sc,
2194		   const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2195		   const uint16_t *nvm_calib, const uint16_t *mac_override,
2196		   const uint16_t *phy_sku, const uint16_t *regulatory)
2197{
2198	struct iwm_nvm_data *data;
2199	uint32_t sku, radio_cfg;
2200	uint16_t lar_config;
2201
2202	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2203		data = malloc(sizeof(*data) +
2204		    IWM_NUM_CHANNELS * sizeof(uint16_t),
2205		    M_DEVBUF, M_NOWAIT | M_ZERO);
2206	} else {
2207		data = malloc(sizeof(*data) +
2208		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2209		    M_DEVBUF, M_NOWAIT | M_ZERO);
2210	}
2211	if (!data)
2212		return NULL;
2213
2214	data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2215
2216	radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2217	iwm_set_radio_cfg(sc, data, radio_cfg);
2218
2219	sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2220	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2221	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2222	data->sku_cap_11n_enable = 0;
2223
2224	data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2225
2226	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2227		uint16_t lar_offset = data->nvm_version < 0xE39 ?
2228				       IWM_NVM_LAR_OFFSET_8000_OLD :
2229				       IWM_NVM_LAR_OFFSET_8000;
2230
2231		lar_config = le16_to_cpup(regulatory + lar_offset);
2232		data->lar_enabled = !!(lar_config &
2233				       IWM_NVM_LAR_ENABLED_8000);
2234	}
2235
2236	/* If no valid mac address was found - bail out */
2237	if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2238		free(data, M_DEVBUF);
2239		return NULL;
2240	}
2241
2242	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2243		memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2244		    IWM_NUM_CHANNELS * sizeof(uint16_t));
2245	} else {
2246		memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2247		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2248	}
2249
2250	return data;
2251}
2252
2253static void
2254iwm_free_nvm_data(struct iwm_nvm_data *data)
2255{
2256	if (data != NULL)
2257		free(data, M_DEVBUF);
2258}
2259
2260static struct iwm_nvm_data *
2261iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2262{
2263	const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2264
2265	/* Checking for required sections */
2266	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2267		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2268		    !sections[sc->cfg->nvm_hw_section_num].data) {
2269			device_printf(sc->sc_dev,
2270			    "Can't parse empty OTP/NVM sections\n");
2271			return NULL;
2272		}
2273	} else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2274		/* SW and REGULATORY sections are mandatory */
2275		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2276		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2277			device_printf(sc->sc_dev,
2278			    "Can't parse empty OTP/NVM sections\n");
2279			return NULL;
2280		}
2281		/* MAC_OVERRIDE or at least HW section must exist */
2282		if (!sections[sc->cfg->nvm_hw_section_num].data &&
2283		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2284			device_printf(sc->sc_dev,
2285			    "Can't parse mac_address, empty sections\n");
2286			return NULL;
2287		}
2288
2289		/* PHY_SKU section is mandatory in B0 */
2290		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2291			device_printf(sc->sc_dev,
2292			    "Can't parse phy_sku in B0, empty sections\n");
2293			return NULL;
2294		}
2295	} else {
2296		panic("unknown device family %d\n", sc->cfg->device_family);
2297	}
2298
2299	hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2300	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2301	calib = (const uint16_t *)
2302	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2303	regulatory = (const uint16_t *)
2304	    sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2305	mac_override = (const uint16_t *)
2306	    sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2307	phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2308
2309	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2310	    phy_sku, regulatory);
2311}
2312
2313static int
2314iwm_nvm_init(struct iwm_softc *sc)
2315{
2316	struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2317	int i, ret, section;
2318	uint32_t size_read = 0;
2319	uint8_t *nvm_buffer, *temp;
2320	uint16_t len;
2321
2322	memset(nvm_sections, 0, sizeof(nvm_sections));
2323
2324	if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2325		return EINVAL;
2326
2327	/* load NVM values from nic */
2328	/* Read From FW NVM */
2329	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2330
2331	nvm_buffer = malloc(sc->cfg->eeprom_size, M_DEVBUF, M_NOWAIT | M_ZERO);
2332	if (!nvm_buffer)
2333		return ENOMEM;
2334	for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2335		/* we override the constness for initial read */
2336		ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2337					   &len, size_read);
2338		if (ret)
2339			continue;
2340		size_read += len;
2341		temp = malloc(len, M_DEVBUF, M_NOWAIT);
2342		if (!temp) {
2343			ret = ENOMEM;
2344			break;
2345		}
2346		memcpy(temp, nvm_buffer, len);
2347
2348		nvm_sections[section].data = temp;
2349		nvm_sections[section].length = len;
2350	}
2351	if (!size_read)
2352		device_printf(sc->sc_dev, "OTP is blank\n");
2353	free(nvm_buffer, M_DEVBUF);
2354
2355	sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2356	if (!sc->nvm_data)
2357		return EINVAL;
2358	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2359		    "nvm version = %x\n", sc->nvm_data->nvm_version);
2360
2361	for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2362		if (nvm_sections[i].data != NULL)
2363			free(nvm_sections[i].data, M_DEVBUF);
2364	}
2365
2366	return 0;
2367}
2368
2369static int
2370iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2371	const struct iwm_fw_desc *section)
2372{
2373	struct iwm_dma_info *dma = &sc->fw_dma;
2374	uint8_t *v_addr;
2375	bus_addr_t p_addr;
2376	uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2377	int ret = 0;
2378
2379	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2380		    "%s: [%d] uCode section being loaded...\n",
2381		    __func__, section_num);
2382
2383	v_addr = dma->vaddr;
2384	p_addr = dma->paddr;
2385
2386	for (offset = 0; offset < section->len; offset += chunk_sz) {
2387		uint32_t copy_size, dst_addr;
2388		int extended_addr = FALSE;
2389
2390		copy_size = MIN(chunk_sz, section->len - offset);
2391		dst_addr = section->offset + offset;
2392
2393		if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2394		    dst_addr <= IWM_FW_MEM_EXTENDED_END)
2395			extended_addr = TRUE;
2396
2397		if (extended_addr)
2398			iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2399					  IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2400
2401		memcpy(v_addr, (const uint8_t *)section->data + offset,
2402		    copy_size);
2403		bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2404		ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2405						   copy_size);
2406
2407		if (extended_addr)
2408			iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2409					    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2410
2411		if (ret) {
2412			device_printf(sc->sc_dev,
2413			    "%s: Could not load the [%d] uCode section\n",
2414			    __func__, section_num);
2415			break;
2416		}
2417	}
2418
2419	return ret;
2420}
2421
2422/*
2423 * ucode
2424 */
2425static int
2426iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2427			     bus_addr_t phy_addr, uint32_t byte_cnt)
2428{
2429	int ret;
2430
2431	sc->sc_fw_chunk_done = 0;
2432
2433	if (!iwm_nic_lock(sc))
2434		return EBUSY;
2435
2436	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2437	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2438
2439	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2440	    dst_addr);
2441
2442	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2443	    phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2444
2445	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2446	    (iwm_get_dma_hi_addr(phy_addr)
2447	     << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2448
2449	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2450	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2451	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2452	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2453
2454	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2455	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2456	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2457	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2458
2459	iwm_nic_unlock(sc);
2460
2461	/* wait up to 5s for this segment to load */
2462	ret = 0;
2463	while (!sc->sc_fw_chunk_done) {
2464		ret = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz);
2465		if (ret)
2466			break;
2467	}
2468
2469	if (ret != 0) {
2470		device_printf(sc->sc_dev,
2471		    "fw chunk addr 0x%x len %d failed to load\n",
2472		    dst_addr, byte_cnt);
2473		return ETIMEDOUT;
2474	}
2475
2476	return 0;
2477}
2478
2479static int
2480iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2481	const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2482{
2483	int shift_param;
2484	int i, ret = 0, sec_num = 0x1;
2485	uint32_t val, last_read_idx = 0;
2486
2487	if (cpu == 1) {
2488		shift_param = 0;
2489		*first_ucode_section = 0;
2490	} else {
2491		shift_param = 16;
2492		(*first_ucode_section)++;
2493	}
2494
2495	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2496		last_read_idx = i;
2497
2498		/*
2499		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2500		 * CPU1 to CPU2.
2501		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2502		 * CPU2 non paged to CPU2 paging sec.
2503		 */
2504		if (!image->fw_sect[i].data ||
2505		    image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2506		    image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2507			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2508				    "Break since Data not valid or Empty section, sec = %d\n",
2509				    i);
2510			break;
2511		}
2512		ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2513		if (ret)
2514			return ret;
2515
2516		/* Notify the ucode of the loaded section number and status */
2517		if (iwm_nic_lock(sc)) {
2518			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2519			val = val | (sec_num << shift_param);
2520			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2521			sec_num = (sec_num << 1) | 0x1;
2522			iwm_nic_unlock(sc);
2523		}
2524	}
2525
2526	*first_ucode_section = last_read_idx;
2527
2528	iwm_enable_interrupts(sc);
2529
2530	if (iwm_nic_lock(sc)) {
2531		if (cpu == 1)
2532			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2533		else
2534			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2535		iwm_nic_unlock(sc);
2536	}
2537
2538	return 0;
2539}
2540
2541static int
2542iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2543	const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2544{
2545	int shift_param;
2546	int i, ret = 0;
2547	uint32_t last_read_idx = 0;
2548
2549	if (cpu == 1) {
2550		shift_param = 0;
2551		*first_ucode_section = 0;
2552	} else {
2553		shift_param = 16;
2554		(*first_ucode_section)++;
2555	}
2556
2557	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2558		last_read_idx = i;
2559
2560		/*
2561		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2562		 * CPU1 to CPU2.
2563		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2564		 * CPU2 non paged to CPU2 paging sec.
2565		 */
2566		if (!image->fw_sect[i].data ||
2567		    image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2568		    image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2569			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2570				    "Break since Data not valid or Empty section, sec = %d\n",
2571				     i);
2572			break;
2573		}
2574
2575		ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2576		if (ret)
2577			return ret;
2578	}
2579
2580	*first_ucode_section = last_read_idx;
2581
2582	return 0;
2583
2584}
2585
2586static int
2587iwm_pcie_load_given_ucode(struct iwm_softc *sc,
2588	const struct iwm_fw_sects *image)
2589{
2590	int ret = 0;
2591	int first_ucode_section;
2592
2593	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2594		     image->is_dual_cpus ? "Dual" : "Single");
2595
2596	/* load to FW the binary non secured sections of CPU1 */
2597	ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2598	if (ret)
2599		return ret;
2600
2601	if (image->is_dual_cpus) {
2602		/* set CPU2 header address */
2603		if (iwm_nic_lock(sc)) {
2604			iwm_write_prph(sc,
2605				       IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2606				       IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2607			iwm_nic_unlock(sc);
2608		}
2609
2610		/* load to FW the binary sections of CPU2 */
2611		ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2612						 &first_ucode_section);
2613		if (ret)
2614			return ret;
2615	}
2616
2617	iwm_enable_interrupts(sc);
2618
2619	/* release CPU reset */
2620	IWM_WRITE(sc, IWM_CSR_RESET, 0);
2621
2622	return 0;
2623}
2624
2625int
2626iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2627	const struct iwm_fw_sects *image)
2628{
2629	int ret = 0;
2630	int first_ucode_section;
2631
2632	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2633		    image->is_dual_cpus ? "Dual" : "Single");
2634
2635	/* configure the ucode to be ready to get the secured image */
2636	/* release CPU reset */
2637	if (iwm_nic_lock(sc)) {
2638		iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
2639		    IWM_RELEASE_CPU_RESET_BIT);
2640		iwm_nic_unlock(sc);
2641	}
2642
2643	/* load to FW the binary Secured sections of CPU1 */
2644	ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2645	    &first_ucode_section);
2646	if (ret)
2647		return ret;
2648
2649	/* load to FW the binary sections of CPU2 */
2650	return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2651	    &first_ucode_section);
2652}
2653
2654/* XXX Get rid of this definition */
2655static inline void
2656iwm_enable_fw_load_int(struct iwm_softc *sc)
2657{
2658	IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2659	sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2660	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2661}
2662
2663/* XXX Add proper rfkill support code */
2664static int
2665iwm_start_fw(struct iwm_softc *sc,
2666	const struct iwm_fw_sects *fw)
2667{
2668	int ret;
2669
2670	/* This may fail if AMT took ownership of the device */
2671	if (iwm_prepare_card_hw(sc)) {
2672		device_printf(sc->sc_dev,
2673		    "%s: Exit HW not ready\n", __func__);
2674		ret = EIO;
2675		goto out;
2676	}
2677
2678	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2679
2680	iwm_disable_interrupts(sc);
2681
2682	/* make sure rfkill handshake bits are cleared */
2683	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2684	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2685	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2686
2687	/* clear (again), then enable host interrupts */
2688	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2689
2690	ret = iwm_nic_init(sc);
2691	if (ret) {
2692		device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2693		goto out;
2694	}
2695
2696	/*
2697	 * Now, we load the firmware and don't want to be interrupted, even
2698	 * by the RF-Kill interrupt (hence mask all the interrupt besides the
2699	 * FH_TX interrupt which is needed to load the firmware). If the
2700	 * RF-Kill switch is toggled, we will find out after having loaded
2701	 * the firmware and return the proper value to the caller.
2702	 */
2703	iwm_enable_fw_load_int(sc);
2704
2705	/* really make sure rfkill handshake bits are cleared */
2706	/* maybe we should write a few times more?  just to make sure */
2707	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2708	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2709
2710	/* Load the given image to the HW */
2711	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2712		ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2713	else
2714		ret = iwm_pcie_load_given_ucode(sc, fw);
2715
2716	/* XXX re-check RF-Kill state */
2717
2718out:
2719	return ret;
2720}
2721
2722static int
2723iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2724{
2725	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2726		.valid = htole32(valid_tx_ant),
2727	};
2728
2729	return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2730	    IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2731}
2732
2733/* iwlwifi: mvm/fw.c */
2734static int
2735iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2736{
2737	struct iwm_phy_cfg_cmd phy_cfg_cmd;
2738	enum iwm_ucode_type ucode_type = sc->cur_ucode;
2739
2740	/* Set parameters */
2741	phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc));
2742	phy_cfg_cmd.calib_control.event_trigger =
2743	    sc->sc_default_calib[ucode_type].event_trigger;
2744	phy_cfg_cmd.calib_control.flow_trigger =
2745	    sc->sc_default_calib[ucode_type].flow_trigger;
2746
2747	IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2748	    "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2749	return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2750	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2751}
2752
2753static int
2754iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2755{
2756	struct iwm_mvm_alive_data *alive_data = data;
2757	struct iwm_mvm_alive_resp_ver1 *palive1;
2758	struct iwm_mvm_alive_resp_ver2 *palive2;
2759	struct iwm_mvm_alive_resp *palive;
2760
2761	if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
2762		palive1 = (void *)pkt->data;
2763
2764		sc->support_umac_log = FALSE;
2765                sc->error_event_table =
2766                        le32toh(palive1->error_event_table_ptr);
2767                sc->log_event_table =
2768                        le32toh(palive1->log_event_table_ptr);
2769                alive_data->scd_base_addr = le32toh(palive1->scd_base_ptr);
2770
2771                alive_data->valid = le16toh(palive1->status) ==
2772                                    IWM_ALIVE_STATUS_OK;
2773                IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2774			    "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2775			     le16toh(palive1->status), palive1->ver_type,
2776                             palive1->ver_subtype, palive1->flags);
2777	} else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
2778		palive2 = (void *)pkt->data;
2779		sc->error_event_table =
2780			le32toh(palive2->error_event_table_ptr);
2781		sc->log_event_table =
2782			le32toh(palive2->log_event_table_ptr);
2783		alive_data->scd_base_addr = le32toh(palive2->scd_base_ptr);
2784		sc->umac_error_event_table =
2785                        le32toh(palive2->error_info_addr);
2786
2787		alive_data->valid = le16toh(palive2->status) ==
2788				    IWM_ALIVE_STATUS_OK;
2789		if (sc->umac_error_event_table)
2790			sc->support_umac_log = TRUE;
2791
2792		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2793			    "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2794			    le16toh(palive2->status), palive2->ver_type,
2795			    palive2->ver_subtype, palive2->flags);
2796
2797		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2798			    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2799			    palive2->umac_major, palive2->umac_minor);
2800	} else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2801		palive = (void *)pkt->data;
2802
2803		sc->error_event_table =
2804			le32toh(palive->error_event_table_ptr);
2805		sc->log_event_table =
2806			le32toh(palive->log_event_table_ptr);
2807		alive_data->scd_base_addr = le32toh(palive->scd_base_ptr);
2808		sc->umac_error_event_table =
2809			le32toh(palive->error_info_addr);
2810
2811		alive_data->valid = le16toh(palive->status) ==
2812				    IWM_ALIVE_STATUS_OK;
2813		if (sc->umac_error_event_table)
2814			sc->support_umac_log = TRUE;
2815
2816		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2817			    "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2818			    le16toh(palive->status), palive->ver_type,
2819			    palive->ver_subtype, palive->flags);
2820
2821		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2822			    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2823			    le32toh(palive->umac_major),
2824			    le32toh(palive->umac_minor));
2825	}
2826
2827	return TRUE;
2828}
2829
2830static int
2831iwm_wait_phy_db_entry(struct iwm_softc *sc,
2832	struct iwm_rx_packet *pkt, void *data)
2833{
2834	struct iwm_phy_db *phy_db = data;
2835
2836	if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2837		if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2838			device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2839			    __func__, pkt->hdr.code);
2840		}
2841		return TRUE;
2842	}
2843
2844	if (iwm_phy_db_set_section(phy_db, pkt)) {
2845		device_printf(sc->sc_dev,
2846		    "%s: iwm_phy_db_set_section failed\n", __func__);
2847	}
2848
2849	return FALSE;
2850}
2851
2852static int
2853iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2854	enum iwm_ucode_type ucode_type)
2855{
2856	struct iwm_notification_wait alive_wait;
2857	struct iwm_mvm_alive_data alive_data;
2858	const struct iwm_fw_sects *fw;
2859	enum iwm_ucode_type old_type = sc->cur_ucode;
2860	int error;
2861	static const uint16_t alive_cmd[] = { IWM_MVM_ALIVE };
2862
2863	if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2864		device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2865			error);
2866		return error;
2867	}
2868	fw = &sc->sc_fw.fw_sects[ucode_type];
2869	sc->cur_ucode = ucode_type;
2870	sc->ucode_loaded = FALSE;
2871
2872	memset(&alive_data, 0, sizeof(alive_data));
2873	iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2874				   alive_cmd, nitems(alive_cmd),
2875				   iwm_alive_fn, &alive_data);
2876
2877	error = iwm_start_fw(sc, fw);
2878	if (error) {
2879		device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2880		sc->cur_ucode = old_type;
2881		iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2882		return error;
2883	}
2884
2885	/*
2886	 * Some things may run in the background now, but we
2887	 * just wait for the ALIVE notification here.
2888	 */
2889	IWM_UNLOCK(sc);
2890	error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2891				      IWM_MVM_UCODE_ALIVE_TIMEOUT);
2892	IWM_LOCK(sc);
2893	if (error) {
2894		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2895			uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a;
2896			if (iwm_nic_lock(sc)) {
2897				a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS);
2898				b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS);
2899				iwm_nic_unlock(sc);
2900			}
2901			device_printf(sc->sc_dev,
2902			    "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2903			    a, b);
2904		}
2905		sc->cur_ucode = old_type;
2906		return error;
2907	}
2908
2909	if (!alive_data.valid) {
2910		device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2911		    __func__);
2912		sc->cur_ucode = old_type;
2913		return EIO;
2914	}
2915
2916	iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2917
2918	/*
2919	 * configure and operate fw paging mechanism.
2920	 * driver configures the paging flow only once, CPU2 paging image
2921	 * included in the IWM_UCODE_INIT image.
2922	 */
2923	if (fw->paging_mem_size) {
2924		error = iwm_save_fw_paging(sc, fw);
2925		if (error) {
2926			device_printf(sc->sc_dev,
2927			    "%s: failed to save the FW paging image\n",
2928			    __func__);
2929			return error;
2930		}
2931
2932		error = iwm_send_paging_cmd(sc, fw);
2933		if (error) {
2934			device_printf(sc->sc_dev,
2935			    "%s: failed to send the paging cmd\n", __func__);
2936			iwm_free_fw_paging(sc);
2937			return error;
2938		}
2939	}
2940
2941	if (!error)
2942		sc->ucode_loaded = TRUE;
2943	return error;
2944}
2945
2946/*
2947 * mvm misc bits
2948 */
2949
2950/*
2951 * follows iwlwifi/fw.c
2952 */
2953static int
2954iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2955{
2956	struct iwm_notification_wait calib_wait;
2957	static const uint16_t init_complete[] = {
2958		IWM_INIT_COMPLETE_NOTIF,
2959		IWM_CALIB_RES_NOTIF_PHY_DB
2960	};
2961	int ret;
2962
2963	/* do not operate with rfkill switch turned on */
2964	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2965		device_printf(sc->sc_dev,
2966		    "radio is disabled by hardware switch\n");
2967		return EPERM;
2968	}
2969
2970	iwm_init_notification_wait(sc->sc_notif_wait,
2971				   &calib_wait,
2972				   init_complete,
2973				   nitems(init_complete),
2974				   iwm_wait_phy_db_entry,
2975				   sc->sc_phy_db);
2976
2977	/* Will also start the device */
2978	ret = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
2979	if (ret) {
2980		device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
2981		    ret);
2982		goto error;
2983	}
2984
2985	if (justnvm) {
2986		/* Read nvm */
2987		ret = iwm_nvm_init(sc);
2988		if (ret) {
2989			device_printf(sc->sc_dev, "failed to read nvm\n");
2990			goto error;
2991		}
2992		IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
2993		goto error;
2994	}
2995
2996	ret = iwm_send_bt_init_conf(sc);
2997	if (ret) {
2998		device_printf(sc->sc_dev,
2999		    "failed to send bt coex configuration: %d\n", ret);
3000		goto error;
3001	}
3002
3003	/* Init Smart FIFO. */
3004	ret = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF);
3005	if (ret)
3006		goto error;
3007
3008	/* Send TX valid antennas before triggering calibrations */
3009	ret = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
3010	if (ret) {
3011		device_printf(sc->sc_dev,
3012		    "failed to send antennas before calibration: %d\n", ret);
3013		goto error;
3014	}
3015
3016	/*
3017	 * Send phy configurations command to init uCode
3018	 * to start the 16.0 uCode init image internal calibrations.
3019	 */
3020	ret = iwm_send_phy_cfg_cmd(sc);
3021	if (ret) {
3022		device_printf(sc->sc_dev,
3023		    "%s: Failed to run INIT calibrations: %d\n",
3024		    __func__, ret);
3025		goto error;
3026	}
3027
3028	/*
3029	 * Nothing to do but wait for the init complete notification
3030	 * from the firmware.
3031	 */
3032	IWM_UNLOCK(sc);
3033	ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
3034	    IWM_MVM_UCODE_CALIB_TIMEOUT);
3035	IWM_LOCK(sc);
3036
3037
3038	goto out;
3039
3040error:
3041	iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
3042out:
3043	return ret;
3044}
3045
3046/*
3047 * receive side
3048 */
3049
3050/* (re)stock rx ring, called at init-time and at runtime */
3051static int
3052iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3053{
3054	struct iwm_rx_ring *ring = &sc->rxq;
3055	struct iwm_rx_data *data = &ring->data[idx];
3056	struct mbuf *m;
3057	bus_dmamap_t dmamap;
3058	bus_dma_segment_t seg;
3059	int nsegs, error;
3060
3061	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3062	if (m == NULL)
3063		return ENOBUFS;
3064
3065	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3066	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3067	    &seg, &nsegs, BUS_DMA_NOWAIT);
3068	if (error != 0) {
3069		device_printf(sc->sc_dev,
3070		    "%s: can't map mbuf, error %d\n", __func__, error);
3071		m_freem(m);
3072		return error;
3073	}
3074
3075	if (data->m != NULL)
3076		bus_dmamap_unload(ring->data_dmat, data->map);
3077
3078	/* Swap ring->spare_map with data->map */
3079	dmamap = data->map;
3080	data->map = ring->spare_map;
3081	ring->spare_map = dmamap;
3082
3083	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3084	data->m = m;
3085
3086	/* Update RX descriptor. */
3087	KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
3088	ring->desc[idx] = htole32(seg.ds_addr >> 8);
3089	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3090	    BUS_DMASYNC_PREWRITE);
3091
3092	return 0;
3093}
3094
3095/* iwlwifi: mvm/rx.c */
3096/*
3097 * iwm_mvm_get_signal_strength - use new rx PHY INFO API
3098 * values are reported by the fw as positive values - need to negate
3099 * to obtain their dBM.  Account for missing antennas by replacing 0
3100 * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3101 */
3102static int
3103iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3104{
3105	int energy_a, energy_b, energy_c, max_energy;
3106	uint32_t val;
3107
3108	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3109	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3110	    IWM_RX_INFO_ENERGY_ANT_A_POS;
3111	energy_a = energy_a ? -energy_a : -256;
3112	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3113	    IWM_RX_INFO_ENERGY_ANT_B_POS;
3114	energy_b = energy_b ? -energy_b : -256;
3115	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3116	    IWM_RX_INFO_ENERGY_ANT_C_POS;
3117	energy_c = energy_c ? -energy_c : -256;
3118	max_energy = MAX(energy_a, energy_b);
3119	max_energy = MAX(max_energy, energy_c);
3120
3121	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3122	    "energy In A %d B %d C %d , and max %d\n",
3123	    energy_a, energy_b, energy_c, max_energy);
3124
3125	return max_energy;
3126}
3127
3128static void
3129iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3130{
3131	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3132
3133	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3134
3135	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3136}
3137
3138/*
3139 * Retrieve the average noise (in dBm) among receivers.
3140 */
3141static int
3142iwm_get_noise(struct iwm_softc *sc,
3143    const struct iwm_mvm_statistics_rx_non_phy *stats)
3144{
3145	int i, total, nbant, noise;
3146
3147	total = nbant = noise = 0;
3148	for (i = 0; i < 3; i++) {
3149		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3150		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3151		    __func__,
3152		    i,
3153		    noise);
3154
3155		if (noise) {
3156			total += noise;
3157			nbant++;
3158		}
3159	}
3160
3161	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3162	    __func__, nbant, total);
3163#if 0
3164	/* There should be at least one antenna but check anyway. */
3165	return (nbant == 0) ? -127 : (total / nbant) - 107;
3166#else
3167	/* For now, just hard-code it to -96 to be safe */
3168	return (-96);
3169#endif
3170}
3171
3172static void
3173iwm_mvm_handle_rx_statistics(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3174{
3175	struct iwm_notif_statistics_v10 *stats = (void *)&pkt->data;
3176
3177	memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
3178	sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
3179}
3180
3181/*
3182 * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3183 *
3184 * Handles the actual data of the Rx packet from the fw
3185 */
3186static boolean_t
3187iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3188	boolean_t stolen)
3189{
3190	struct ieee80211com *ic = &sc->sc_ic;
3191	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3192	struct ieee80211_frame *wh;
3193	struct ieee80211_node *ni;
3194	struct ieee80211_rx_stats rxs;
3195	struct iwm_rx_phy_info *phy_info;
3196	struct iwm_rx_mpdu_res_start *rx_res;
3197	struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, offset);
3198	uint32_t len;
3199	uint32_t rx_pkt_status;
3200	int rssi;
3201
3202	phy_info = &sc->sc_last_phy_info;
3203	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3204	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3205	len = le16toh(rx_res->byte_count);
3206	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3207
3208	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3209		device_printf(sc->sc_dev,
3210		    "dsp size out of range [0,20]: %d\n",
3211		    phy_info->cfg_phy_cnt);
3212		goto fail;
3213	}
3214
3215	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3216	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3217		IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3218		    "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3219		goto fail;
3220	}
3221
3222	rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3223
3224	/* Map it to relative value */
3225	rssi = rssi - sc->sc_noise;
3226
3227	/* replenish ring for the buffer we're going to feed to the sharks */
3228	if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3229		device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3230		    __func__);
3231		goto fail;
3232	}
3233
3234	m->m_data = pkt->data + sizeof(*rx_res);
3235	m->m_pkthdr.len = m->m_len = len;
3236
3237	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3238	    "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3239
3240	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3241
3242	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3243	    "%s: phy_info: channel=%d, flags=0x%08x\n",
3244	    __func__,
3245	    le16toh(phy_info->channel),
3246	    le16toh(phy_info->phy_flags));
3247
3248	/*
3249	 * Populate an RX state struct with the provided information.
3250	 */
3251	bzero(&rxs, sizeof(rxs));
3252	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3253	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3254	rxs.c_ieee = le16toh(phy_info->channel);
3255	if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3256		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3257	} else {
3258		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3259	}
3260
3261	/* rssi is in 1/2db units */
3262	rxs.rssi = rssi * 2;
3263	rxs.nf = sc->sc_noise;
3264
3265	if (ieee80211_radiotap_active_vap(vap)) {
3266		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3267
3268		tap->wr_flags = 0;
3269		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3270			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3271		tap->wr_chan_freq = htole16(rxs.c_freq);
3272		/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3273		tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3274		tap->wr_dbm_antsignal = (int8_t)rssi;
3275		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3276		tap->wr_tsft = phy_info->system_timestamp;
3277		switch (phy_info->rate) {
3278		/* CCK rates. */
3279		case  10: tap->wr_rate =   2; break;
3280		case  20: tap->wr_rate =   4; break;
3281		case  55: tap->wr_rate =  11; break;
3282		case 110: tap->wr_rate =  22; break;
3283		/* OFDM rates. */
3284		case 0xd: tap->wr_rate =  12; break;
3285		case 0xf: tap->wr_rate =  18; break;
3286		case 0x5: tap->wr_rate =  24; break;
3287		case 0x7: tap->wr_rate =  36; break;
3288		case 0x9: tap->wr_rate =  48; break;
3289		case 0xb: tap->wr_rate =  72; break;
3290		case 0x1: tap->wr_rate =  96; break;
3291		case 0x3: tap->wr_rate = 108; break;
3292		/* Unknown rate: should not happen. */
3293		default:  tap->wr_rate =   0;
3294		}
3295	}
3296
3297	IWM_UNLOCK(sc);
3298	if (ni != NULL) {
3299		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3300		ieee80211_input_mimo(ni, m, &rxs);
3301		ieee80211_free_node(ni);
3302	} else {
3303		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3304		ieee80211_input_mimo_all(ic, m, &rxs);
3305	}
3306	IWM_LOCK(sc);
3307
3308	return TRUE;
3309
3310fail:	counter_u64_add(ic->ic_ierrors, 1);
3311	return FALSE;
3312}
3313
3314static int
3315iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3316	struct iwm_node *in)
3317{
3318	struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3319	struct ieee80211_node *ni = &in->in_ni;
3320	struct ieee80211vap *vap = ni->ni_vap;
3321	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3322	int failack = tx_resp->failure_frame;
3323
3324	KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3325
3326	/* Update rate control statistics. */
3327	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3328	    __func__,
3329	    (int) le16toh(tx_resp->status.status),
3330	    (int) le16toh(tx_resp->status.sequence),
3331	    tx_resp->frame_count,
3332	    tx_resp->bt_kill_count,
3333	    tx_resp->failure_rts,
3334	    tx_resp->failure_frame,
3335	    le32toh(tx_resp->initial_rate),
3336	    (int) le16toh(tx_resp->wireless_media_time));
3337
3338	if (status != IWM_TX_STATUS_SUCCESS &&
3339	    status != IWM_TX_STATUS_DIRECT_DONE) {
3340		ieee80211_ratectl_tx_complete(vap, ni,
3341		    IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
3342		return (1);
3343	} else {
3344		ieee80211_ratectl_tx_complete(vap, ni,
3345		    IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
3346		return (0);
3347	}
3348}
3349
3350static void
3351iwm_mvm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3352{
3353	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3354	int idx = cmd_hdr->idx;
3355	int qid = cmd_hdr->qid;
3356	struct iwm_tx_ring *ring = &sc->txq[qid];
3357	struct iwm_tx_data *txd = &ring->data[idx];
3358	struct iwm_node *in = txd->in;
3359	struct mbuf *m = txd->m;
3360	int status;
3361
3362	KASSERT(txd->done == 0, ("txd not done"));
3363	KASSERT(txd->in != NULL, ("txd without node"));
3364	KASSERT(txd->m != NULL, ("txd without mbuf"));
3365
3366	sc->sc_tx_timer = 0;
3367
3368	status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3369
3370	/* Unmap and free mbuf. */
3371	bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3372	bus_dmamap_unload(ring->data_dmat, txd->map);
3373
3374	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3375	    "free txd %p, in %p\n", txd, txd->in);
3376	txd->done = 1;
3377	txd->m = NULL;
3378	txd->in = NULL;
3379
3380	ieee80211_tx_complete(&in->in_ni, m, status);
3381
3382	if (--ring->queued < IWM_TX_RING_LOMARK) {
3383		sc->qfullmsk &= ~(1 << ring->qid);
3384		if (sc->qfullmsk == 0) {
3385			iwm_start(sc);
3386		}
3387	}
3388}
3389
3390/*
3391 * transmit side
3392 */
3393
3394/*
3395 * Process a "command done" firmware notification.  This is where we wakeup
3396 * processes waiting for a synchronous command completion.
3397 * from if_iwn
3398 */
3399static void
3400iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3401{
3402	struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3403	struct iwm_tx_data *data;
3404
3405	if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3406		return;	/* Not a command ack. */
3407	}
3408
3409	/* XXX wide commands? */
3410	IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3411	    "cmd notification type 0x%x qid %d idx %d\n",
3412	    pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3413
3414	data = &ring->data[pkt->hdr.idx];
3415
3416	/* If the command was mapped in an mbuf, free it. */
3417	if (data->m != NULL) {
3418		bus_dmamap_sync(ring->data_dmat, data->map,
3419		    BUS_DMASYNC_POSTWRITE);
3420		bus_dmamap_unload(ring->data_dmat, data->map);
3421		m_freem(data->m);
3422		data->m = NULL;
3423	}
3424	wakeup(&ring->desc[pkt->hdr.idx]);
3425
3426	if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3427		device_printf(sc->sc_dev,
3428		    "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3429		    __func__, pkt->hdr.idx, ring->queued, ring->cur);
3430		/* XXX call iwm_force_nmi() */
3431	}
3432
3433	KASSERT(ring->queued > 0, ("ring->queued is empty?"));
3434	ring->queued--;
3435	if (ring->queued == 0)
3436		iwm_pcie_clear_cmd_in_flight(sc);
3437}
3438
3439#if 0
3440/*
3441 * necessary only for block ack mode
3442 */
3443void
3444iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3445	uint16_t len)
3446{
3447	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3448	uint16_t w_val;
3449
3450	scd_bc_tbl = sc->sched_dma.vaddr;
3451
3452	len += 8; /* magic numbers came naturally from paris */
3453	len = roundup(len, 4) / 4;
3454
3455	w_val = htole16(sta_id << 12 | len);
3456
3457	/* Update TX scheduler. */
3458	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3459	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3460	    BUS_DMASYNC_PREWRITE);
3461
3462	/* I really wonder what this is ?!? */
3463	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3464		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3465		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3466		    BUS_DMASYNC_PREWRITE);
3467	}
3468}
3469#endif
3470
3471/*
3472 * Take an 802.11 (non-n) rate, find the relevant rate
3473 * table entry.  return the index into in_ridx[].
3474 *
3475 * The caller then uses that index back into in_ridx
3476 * to figure out the rate index programmed /into/
3477 * the firmware for this given node.
3478 */
3479static int
3480iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3481    uint8_t rate)
3482{
3483	int i;
3484	uint8_t r;
3485
3486	for (i = 0; i < nitems(in->in_ridx); i++) {
3487		r = iwm_rates[in->in_ridx[i]].rate;
3488		if (rate == r)
3489			return (i);
3490	}
3491
3492	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3493	    "%s: couldn't find an entry for rate=%d\n",
3494	    __func__,
3495	    rate);
3496
3497	/* XXX Return the first */
3498	/* XXX TODO: have it return the /lowest/ */
3499	return (0);
3500}
3501
3502static int
3503iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
3504{
3505	int i;
3506
3507	for (i = 0; i < nitems(iwm_rates); i++) {
3508		if (iwm_rates[i].rate == rate)
3509			return (i);
3510	}
3511	/* XXX error? */
3512	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3513	    "%s: couldn't find an entry for rate=%d\n",
3514	    __func__,
3515	    rate);
3516	return (0);
3517}
3518
3519/*
3520 * Fill in the rate related information for a transmit command.
3521 */
3522static const struct iwm_rate *
3523iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3524	struct mbuf *m, struct iwm_tx_cmd *tx)
3525{
3526	struct ieee80211_node *ni = &in->in_ni;
3527	struct ieee80211_frame *wh;
3528	const struct ieee80211_txparam *tp = ni->ni_txparms;
3529	const struct iwm_rate *rinfo;
3530	int type;
3531	int ridx, rate_flags;
3532
3533	wh = mtod(m, struct ieee80211_frame *);
3534	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3535
3536	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3537	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3538
3539	if (type == IEEE80211_FC0_TYPE_MGT) {
3540		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3541		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3542		    "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3543	} else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3544		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3545		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3546		    "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3547	} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3548		ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3549		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3550		    "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3551	} else if (m->m_flags & M_EAPOL) {
3552		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3553		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3554		    "%s: EAPOL\n", __func__);
3555	} else if (type == IEEE80211_FC0_TYPE_DATA) {
3556		int i;
3557
3558		/* for data frames, use RS table */
3559		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3560		/* XXX pass pktlen */
3561		(void) ieee80211_ratectl_rate(ni, NULL, 0);
3562		i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3563		ridx = in->in_ridx[i];
3564
3565		/* This is the index into the programmed table */
3566		tx->initial_rate_index = i;
3567		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3568
3569		IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3570		    "%s: start with i=%d, txrate %d\n",
3571		    __func__, i, iwm_rates[ridx].rate);
3572	} else {
3573		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3574		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DEFAULT (%d)\n",
3575		    __func__, tp->mgmtrate);
3576	}
3577
3578	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3579	    "%s: frame type=%d txrate %d\n",
3580	        __func__, type, iwm_rates[ridx].rate);
3581
3582	rinfo = &iwm_rates[ridx];
3583
3584	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3585	    __func__, ridx,
3586	    rinfo->rate,
3587	    !! (IWM_RIDX_IS_CCK(ridx))
3588	    );
3589
3590	/* XXX TODO: hard-coded TX antenna? */
3591	rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3592	if (IWM_RIDX_IS_CCK(ridx))
3593		rate_flags |= IWM_RATE_MCS_CCK_MSK;
3594	tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3595
3596	return rinfo;
3597}
3598
3599#define TB0_SIZE 16
3600static int
3601iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3602{
3603	struct ieee80211com *ic = &sc->sc_ic;
3604	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3605	struct iwm_node *in = IWM_NODE(ni);
3606	struct iwm_tx_ring *ring;
3607	struct iwm_tx_data *data;
3608	struct iwm_tfd *desc;
3609	struct iwm_device_cmd *cmd;
3610	struct iwm_tx_cmd *tx;
3611	struct ieee80211_frame *wh;
3612	struct ieee80211_key *k = NULL;
3613	struct mbuf *m1;
3614	const struct iwm_rate *rinfo;
3615	uint32_t flags;
3616	u_int hdrlen;
3617	bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3618	int nsegs;
3619	uint8_t tid, type;
3620	int i, totlen, error, pad;
3621
3622	wh = mtod(m, struct ieee80211_frame *);
3623	hdrlen = ieee80211_anyhdrsize(wh);
3624	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3625	tid = 0;
3626	ring = &sc->txq[ac];
3627	desc = &ring->desc[ring->cur];
3628	memset(desc, 0, sizeof(*desc));
3629	data = &ring->data[ring->cur];
3630
3631	/* Fill out iwm_tx_cmd to send to the firmware */
3632	cmd = &ring->cmd[ring->cur];
3633	cmd->hdr.code = IWM_TX_CMD;
3634	cmd->hdr.flags = 0;
3635	cmd->hdr.qid = ring->qid;
3636	cmd->hdr.idx = ring->cur;
3637
3638	tx = (void *)cmd->data;
3639	memset(tx, 0, sizeof(*tx));
3640
3641	rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3642
3643	/* Encrypt the frame if need be. */
3644	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3645		/* Retrieve key for TX && do software encryption. */
3646		k = ieee80211_crypto_encap(ni, m);
3647		if (k == NULL) {
3648			m_freem(m);
3649			return (ENOBUFS);
3650		}
3651		/* 802.11 header may have moved. */
3652		wh = mtod(m, struct ieee80211_frame *);
3653	}
3654
3655	if (ieee80211_radiotap_active_vap(vap)) {
3656		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3657
3658		tap->wt_flags = 0;
3659		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3660		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3661		tap->wt_rate = rinfo->rate;
3662		if (k != NULL)
3663			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3664		ieee80211_radiotap_tx(vap, m);
3665	}
3666
3667
3668	totlen = m->m_pkthdr.len;
3669
3670	flags = 0;
3671	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3672		flags |= IWM_TX_CMD_FLG_ACK;
3673	}
3674
3675	if (type == IEEE80211_FC0_TYPE_DATA
3676	    && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3677	    && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3678		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3679	}
3680
3681	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3682	    type != IEEE80211_FC0_TYPE_DATA)
3683		tx->sta_id = sc->sc_aux_sta.sta_id;
3684	else
3685		tx->sta_id = IWM_STATION_ID;
3686
3687	if (type == IEEE80211_FC0_TYPE_MGT) {
3688		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3689
3690		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3691		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3692			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3693		} else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3694			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3695		} else {
3696			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3697		}
3698	} else {
3699		tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3700	}
3701
3702	if (hdrlen & 3) {
3703		/* First segment length must be a multiple of 4. */
3704		flags |= IWM_TX_CMD_FLG_MH_PAD;
3705		pad = 4 - (hdrlen & 3);
3706	} else
3707		pad = 0;
3708
3709	tx->driver_txop = 0;
3710	tx->next_frame_len = 0;
3711
3712	tx->len = htole16(totlen);
3713	tx->tid_tspec = tid;
3714	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3715
3716	/* Set physical address of "scratch area". */
3717	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3718	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3719
3720	/* Copy 802.11 header in TX command. */
3721	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3722
3723	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3724
3725	tx->sec_ctl = 0;
3726	tx->tx_flags |= htole32(flags);
3727
3728	/* Trim 802.11 header. */
3729	m_adj(m, hdrlen);
3730	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3731	    segs, &nsegs, BUS_DMA_NOWAIT);
3732	if (error != 0) {
3733		if (error != EFBIG) {
3734			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3735			    error);
3736			m_freem(m);
3737			return error;
3738		}
3739		/* Too many DMA segments, linearize mbuf. */
3740		m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3741		if (m1 == NULL) {
3742			device_printf(sc->sc_dev,
3743			    "%s: could not defrag mbuf\n", __func__);
3744			m_freem(m);
3745			return (ENOBUFS);
3746		}
3747		m = m1;
3748
3749		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3750		    segs, &nsegs, BUS_DMA_NOWAIT);
3751		if (error != 0) {
3752			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3753			    error);
3754			m_freem(m);
3755			return error;
3756		}
3757	}
3758	data->m = m;
3759	data->in = in;
3760	data->done = 0;
3761
3762	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3763	    "sending txd %p, in %p\n", data, data->in);
3764	KASSERT(data->in != NULL, ("node is NULL"));
3765
3766	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3767	    "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3768	    ring->qid, ring->cur, totlen, nsegs,
3769	    le32toh(tx->tx_flags),
3770	    le32toh(tx->rate_n_flags),
3771	    tx->initial_rate_index
3772	    );
3773
3774	/* Fill TX descriptor. */
3775	desc->num_tbs = 2 + nsegs;
3776
3777	desc->tbs[0].lo = htole32(data->cmd_paddr);
3778	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3779	    (TB0_SIZE << 4);
3780	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3781	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3782	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3783	      + hdrlen + pad - TB0_SIZE) << 4);
3784
3785	/* Other DMA segments are for data payload. */
3786	for (i = 0; i < nsegs; i++) {
3787		seg = &segs[i];
3788		desc->tbs[i+2].lo = htole32(seg->ds_addr);
3789		desc->tbs[i+2].hi_n_len = \
3790		    htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3791		    | ((seg->ds_len) << 4);
3792	}
3793
3794	bus_dmamap_sync(ring->data_dmat, data->map,
3795	    BUS_DMASYNC_PREWRITE);
3796	bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3797	    BUS_DMASYNC_PREWRITE);
3798	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3799	    BUS_DMASYNC_PREWRITE);
3800
3801#if 0
3802	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3803#endif
3804
3805	/* Kick TX ring. */
3806	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3807	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3808
3809	/* Mark TX ring as full if we reach a certain threshold. */
3810	if (++ring->queued > IWM_TX_RING_HIMARK) {
3811		sc->qfullmsk |= 1 << ring->qid;
3812	}
3813
3814	return 0;
3815}
3816
3817static int
3818iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3819    const struct ieee80211_bpf_params *params)
3820{
3821	struct ieee80211com *ic = ni->ni_ic;
3822	struct iwm_softc *sc = ic->ic_softc;
3823	int error = 0;
3824
3825	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3826	    "->%s begin\n", __func__);
3827
3828	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3829		m_freem(m);
3830		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3831		    "<-%s not RUNNING\n", __func__);
3832		return (ENETDOWN);
3833        }
3834
3835	IWM_LOCK(sc);
3836	/* XXX fix this */
3837        if (params == NULL) {
3838		error = iwm_tx(sc, m, ni, 0);
3839	} else {
3840		error = iwm_tx(sc, m, ni, 0);
3841	}
3842	sc->sc_tx_timer = 5;
3843	IWM_UNLOCK(sc);
3844
3845        return (error);
3846}
3847
3848/*
3849 * mvm/tx.c
3850 */
3851
3852/*
3853 * Note that there are transports that buffer frames before they reach
3854 * the firmware. This means that after flush_tx_path is called, the
3855 * queue might not be empty. The race-free way to handle this is to:
3856 * 1) set the station as draining
3857 * 2) flush the Tx path
3858 * 3) wait for the transport queues to be empty
3859 */
3860int
3861iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3862{
3863	int ret;
3864	struct iwm_tx_path_flush_cmd flush_cmd = {
3865		.queues_ctl = htole32(tfd_msk),
3866		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3867	};
3868
3869	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3870	    sizeof(flush_cmd), &flush_cmd);
3871	if (ret)
3872                device_printf(sc->sc_dev,
3873		    "Flushing tx queue failed: %d\n", ret);
3874	return ret;
3875}
3876
3877/*
3878 * BEGIN mvm/quota.c
3879 */
3880
3881static int
3882iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp)
3883{
3884	struct iwm_time_quota_cmd cmd;
3885	int i, idx, ret, num_active_macs, quota, quota_rem;
3886	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3887	int n_ifs[IWM_MAX_BINDINGS] = {0, };
3888	uint16_t id;
3889
3890	memset(&cmd, 0, sizeof(cmd));
3891
3892	/* currently, PHY ID == binding ID */
3893	if (ivp) {
3894		id = ivp->phy_ctxt->id;
3895		KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3896		colors[id] = ivp->phy_ctxt->color;
3897
3898		if (1)
3899			n_ifs[id] = 1;
3900	}
3901
3902	/*
3903	 * The FW's scheduling session consists of
3904	 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3905	 * equally between all the bindings that require quota
3906	 */
3907	num_active_macs = 0;
3908	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3909		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3910		num_active_macs += n_ifs[i];
3911	}
3912
3913	quota = 0;
3914	quota_rem = 0;
3915	if (num_active_macs) {
3916		quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3917		quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3918	}
3919
3920	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3921		if (colors[i] < 0)
3922			continue;
3923
3924		cmd.quotas[idx].id_and_color =
3925			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3926
3927		if (n_ifs[i] <= 0) {
3928			cmd.quotas[idx].quota = htole32(0);
3929			cmd.quotas[idx].max_duration = htole32(0);
3930		} else {
3931			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3932			cmd.quotas[idx].max_duration = htole32(0);
3933		}
3934		idx++;
3935	}
3936
3937	/* Give the remainder of the session to the first binding */
3938	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3939
3940	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3941	    sizeof(cmd), &cmd);
3942	if (ret)
3943		device_printf(sc->sc_dev,
3944		    "%s: Failed to send quota: %d\n", __func__, ret);
3945	return ret;
3946}
3947
3948/*
3949 * END mvm/quota.c
3950 */
3951
3952/*
3953 * ieee80211 routines
3954 */
3955
3956/*
3957 * Change to AUTH state in 80211 state machine.  Roughly matches what
3958 * Linux does in bss_info_changed().
3959 */
3960static int
3961iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3962{
3963	struct ieee80211_node *ni;
3964	struct iwm_node *in;
3965	struct iwm_vap *iv = IWM_VAP(vap);
3966	uint32_t duration;
3967	int error;
3968
3969	/*
3970	 * XXX i have a feeling that the vap node is being
3971	 * freed from underneath us. Grr.
3972	 */
3973	ni = ieee80211_ref_node(vap->iv_bss);
3974	in = IWM_NODE(ni);
3975	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3976	    "%s: called; vap=%p, bss ni=%p\n",
3977	    __func__,
3978	    vap,
3979	    ni);
3980
3981	in->in_assoc = 0;
3982
3983	/*
3984	 * Firmware bug - it'll crash if the beacon interval is less
3985	 * than 16. We can't avoid connecting at all, so refuse the
3986	 * station state change, this will cause net80211 to abandon
3987	 * attempts to connect to this AP, and eventually wpa_s will
3988	 * blacklist the AP...
3989	 */
3990	if (ni->ni_intval < 16) {
3991		device_printf(sc->sc_dev,
3992		    "AP %s beacon interval is %d, refusing due to firmware bug!\n",
3993		    ether_sprintf(ni->ni_bssid), ni->ni_intval);
3994		error = EINVAL;
3995		goto out;
3996	}
3997
3998	error = iwm_mvm_sf_config(sc, IWM_SF_FULL_ON);
3999	if (error != 0)
4000		return error;
4001
4002	error = iwm_allow_mcast(vap, sc);
4003	if (error) {
4004		device_printf(sc->sc_dev,
4005		    "%s: failed to set multicast\n", __func__);
4006		goto out;
4007	}
4008
4009	/*
4010	 * This is where it deviates from what Linux does.
4011	 *
4012	 * Linux iwlwifi doesn't reset the nic each time, nor does it
4013	 * call ctxt_add() here.  Instead, it adds it during vap creation,
4014	 * and always does a mac_ctx_changed().
4015	 *
4016	 * The openbsd port doesn't attempt to do that - it reset things
4017	 * at odd states and does the add here.
4018	 *
4019	 * So, until the state handling is fixed (ie, we never reset
4020	 * the NIC except for a firmware failure, which should drag
4021	 * the NIC back to IDLE, re-setup and re-add all the mac/phy
4022	 * contexts that are required), let's do a dirty hack here.
4023	 */
4024	if (iv->is_uploaded) {
4025		if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4026			device_printf(sc->sc_dev,
4027			    "%s: failed to update MAC\n", __func__);
4028			goto out;
4029		}
4030		if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4031		    in->in_ni.ni_chan, 1, 1)) != 0) {
4032			device_printf(sc->sc_dev,
4033			    "%s: failed update phy ctxt\n", __func__);
4034			goto out;
4035		}
4036		iv->phy_ctxt = &sc->sc_phyctxt[0];
4037
4038		if ((error = iwm_mvm_binding_add_vif(sc, iv)) != 0) {
4039			device_printf(sc->sc_dev,
4040			    "%s: binding update cmd\n", __func__);
4041			goto out;
4042		}
4043		if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4044			device_printf(sc->sc_dev,
4045			    "%s: failed to update sta\n", __func__);
4046			goto out;
4047		}
4048	} else {
4049		if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
4050			device_printf(sc->sc_dev,
4051			    "%s: failed to add MAC\n", __func__);
4052			goto out;
4053		}
4054		if ((error = iwm_mvm_power_update_mac(sc)) != 0) {
4055			device_printf(sc->sc_dev,
4056			    "%s: failed to update power management\n",
4057			    __func__);
4058			goto out;
4059		}
4060		if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4061		    in->in_ni.ni_chan, 1, 1)) != 0) {
4062			device_printf(sc->sc_dev,
4063			    "%s: failed add phy ctxt!\n", __func__);
4064			error = ETIMEDOUT;
4065			goto out;
4066		}
4067		iv->phy_ctxt = &sc->sc_phyctxt[0];
4068
4069		if ((error = iwm_mvm_binding_add_vif(sc, iv)) != 0) {
4070			device_printf(sc->sc_dev,
4071			    "%s: binding add cmd\n", __func__);
4072			goto out;
4073		}
4074		if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
4075			device_printf(sc->sc_dev,
4076			    "%s: failed to add sta\n", __func__);
4077			goto out;
4078		}
4079	}
4080
4081	/*
4082	 * Prevent the FW from wandering off channel during association
4083	 * by "protecting" the session with a time event.
4084	 */
4085	/* XXX duration is in units of TU, not MS */
4086	duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4087	iwm_mvm_protect_session(sc, iv, duration, 500 /* XXX magic number */);
4088	DELAY(100);
4089
4090	error = 0;
4091out:
4092	ieee80211_free_node(ni);
4093	return (error);
4094}
4095
4096static int
4097iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
4098{
4099	struct iwm_node *in = IWM_NODE(vap->iv_bss);
4100	int error;
4101
4102	if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4103		device_printf(sc->sc_dev,
4104		    "%s: failed to update STA\n", __func__);
4105		return error;
4106	}
4107
4108	in->in_assoc = 1;
4109	if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4110		device_printf(sc->sc_dev,
4111		    "%s: failed to update MAC\n", __func__);
4112		return error;
4113	}
4114
4115	return 0;
4116}
4117
4118static int
4119iwm_release(struct iwm_softc *sc, struct iwm_node *in)
4120{
4121	uint32_t tfd_msk;
4122
4123	/*
4124	 * Ok, so *technically* the proper set of calls for going
4125	 * from RUN back to SCAN is:
4126	 *
4127	 * iwm_mvm_power_mac_disable(sc, in);
4128	 * iwm_mvm_mac_ctxt_changed(sc, vap);
4129	 * iwm_mvm_rm_sta(sc, in);
4130	 * iwm_mvm_update_quotas(sc, NULL);
4131	 * iwm_mvm_mac_ctxt_changed(sc, in);
4132	 * iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap));
4133	 * iwm_mvm_mac_ctxt_remove(sc, in);
4134	 *
4135	 * However, that freezes the device not matter which permutations
4136	 * and modifications are attempted.  Obviously, this driver is missing
4137	 * something since it works in the Linux driver, but figuring out what
4138	 * is missing is a little more complicated.  Now, since we're going
4139	 * back to nothing anyway, we'll just do a complete device reset.
4140	 * Up your's, device!
4141	 */
4142	/*
4143	 * Just using 0xf for the queues mask is fine as long as we only
4144	 * get here from RUN state.
4145	 */
4146	tfd_msk = 0xf;
4147	iwm_xmit_queue_drain(sc);
4148	iwm_mvm_flush_tx_path(sc, tfd_msk, IWM_CMD_SYNC);
4149	/*
4150	 * We seem to get away with just synchronously sending the
4151	 * IWM_TXPATH_FLUSH command.
4152	 */
4153//	iwm_trans_wait_tx_queue_empty(sc, tfd_msk);
4154	iwm_stop_device(sc);
4155	iwm_init_hw(sc);
4156	if (in)
4157		in->in_assoc = 0;
4158	return 0;
4159
4160#if 0
4161	int error;
4162
4163	iwm_mvm_power_mac_disable(sc, in);
4164
4165	if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4166		device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
4167		return error;
4168	}
4169
4170	if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
4171		device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4172		return error;
4173	}
4174	error = iwm_mvm_rm_sta(sc, in);
4175	in->in_assoc = 0;
4176	iwm_mvm_update_quotas(sc, NULL);
4177	if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4178		device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4179		return error;
4180	}
4181	iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap));
4182
4183	iwm_mvm_mac_ctxt_remove(sc, in);
4184
4185	return error;
4186#endif
4187}
4188
4189static struct ieee80211_node *
4190iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4191{
4192	return malloc(sizeof (struct iwm_node), M_80211_NODE,
4193	    M_NOWAIT | M_ZERO);
4194}
4195
4196uint8_t
4197iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
4198{
4199	int i;
4200	uint8_t rval;
4201
4202	for (i = 0; i < rs->rs_nrates; i++) {
4203		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
4204		if (rval == iwm_rates[ridx].rate)
4205			return rs->rs_rates[i];
4206	}
4207
4208	return 0;
4209}
4210
4211static void
4212iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
4213{
4214	struct ieee80211_node *ni = &in->in_ni;
4215	struct iwm_lq_cmd *lq = &in->in_lq;
4216	int nrates = ni->ni_rates.rs_nrates;
4217	int i, ridx, tab = 0;
4218//	int txant = 0;
4219
4220	if (nrates > nitems(lq->rs_table)) {
4221		device_printf(sc->sc_dev,
4222		    "%s: node supports %d rates, driver handles "
4223		    "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4224		return;
4225	}
4226	if (nrates == 0) {
4227		device_printf(sc->sc_dev,
4228		    "%s: node supports 0 rates, odd!\n", __func__);
4229		return;
4230	}
4231
4232	/*
4233	 * XXX .. and most of iwm_node is not initialised explicitly;
4234	 * it's all just 0x0 passed to the firmware.
4235	 */
4236
4237	/* first figure out which rates we should support */
4238	/* XXX TODO: this isn't 11n aware /at all/ */
4239	memset(&in->in_ridx, -1, sizeof(in->in_ridx));
4240	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4241	    "%s: nrates=%d\n", __func__, nrates);
4242
4243	/*
4244	 * Loop over nrates and populate in_ridx from the highest
4245	 * rate to the lowest rate.  Remember, in_ridx[] has
4246	 * IEEE80211_RATE_MAXSIZE entries!
4247	 */
4248	for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4249		int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4250
4251		/* Map 802.11 rate to HW rate index. */
4252		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4253			if (iwm_rates[ridx].rate == rate)
4254				break;
4255		if (ridx > IWM_RIDX_MAX) {
4256			device_printf(sc->sc_dev,
4257			    "%s: WARNING: device rate for %d not found!\n",
4258			    __func__, rate);
4259		} else {
4260			IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4261			    "%s: rate: i: %d, rate=%d, ridx=%d\n",
4262			    __func__,
4263			    i,
4264			    rate,
4265			    ridx);
4266			in->in_ridx[i] = ridx;
4267		}
4268	}
4269
4270	/* then construct a lq_cmd based on those */
4271	memset(lq, 0, sizeof(*lq));
4272	lq->sta_id = IWM_STATION_ID;
4273
4274	/* For HT, always enable RTS/CTS to avoid excessive retries. */
4275	if (ni->ni_flags & IEEE80211_NODE_HT)
4276		lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4277
4278	/*
4279	 * are these used? (we don't do SISO or MIMO)
4280	 * need to set them to non-zero, though, or we get an error.
4281	 */
4282	lq->single_stream_ant_msk = 1;
4283	lq->dual_stream_ant_msk = 1;
4284
4285	/*
4286	 * Build the actual rate selection table.
4287	 * The lowest bits are the rates.  Additionally,
4288	 * CCK needs bit 9 to be set.  The rest of the bits
4289	 * we add to the table select the tx antenna
4290	 * Note that we add the rates in the highest rate first
4291	 * (opposite of ni_rates).
4292	 */
4293	/*
4294	 * XXX TODO: this should be looping over the min of nrates
4295	 * and LQ_MAX_RETRY_NUM.  Sigh.
4296	 */
4297	for (i = 0; i < nrates; i++) {
4298		int nextant;
4299
4300#if 0
4301		if (txant == 0)
4302			txant = iwm_mvm_get_valid_tx_ant(sc);
4303		nextant = 1<<(ffs(txant)-1);
4304		txant &= ~nextant;
4305#else
4306		nextant = iwm_mvm_get_valid_tx_ant(sc);
4307#endif
4308		/*
4309		 * Map the rate id into a rate index into
4310		 * our hardware table containing the
4311		 * configuration to use for this rate.
4312		 */
4313		ridx = in->in_ridx[i];
4314		tab = iwm_rates[ridx].plcp;
4315		tab |= nextant << IWM_RATE_MCS_ANT_POS;
4316		if (IWM_RIDX_IS_CCK(ridx))
4317			tab |= IWM_RATE_MCS_CCK_MSK;
4318		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4319		    "station rate i=%d, rate=%d, hw=%x\n",
4320		    i, iwm_rates[ridx].rate, tab);
4321		lq->rs_table[i] = htole32(tab);
4322	}
4323	/* then fill the rest with the lowest possible rate */
4324	for (i = nrates; i < nitems(lq->rs_table); i++) {
4325		KASSERT(tab != 0, ("invalid tab"));
4326		lq->rs_table[i] = htole32(tab);
4327	}
4328}
4329
4330static int
4331iwm_media_change(struct ifnet *ifp)
4332{
4333	struct ieee80211vap *vap = ifp->if_softc;
4334	struct ieee80211com *ic = vap->iv_ic;
4335	struct iwm_softc *sc = ic->ic_softc;
4336	int error;
4337
4338	error = ieee80211_media_change(ifp);
4339	if (error != ENETRESET)
4340		return error;
4341
4342	IWM_LOCK(sc);
4343	if (ic->ic_nrunning > 0) {
4344		iwm_stop(sc);
4345		iwm_init(sc);
4346	}
4347	IWM_UNLOCK(sc);
4348	return error;
4349}
4350
4351
4352static int
4353iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4354{
4355	struct iwm_vap *ivp = IWM_VAP(vap);
4356	struct ieee80211com *ic = vap->iv_ic;
4357	struct iwm_softc *sc = ic->ic_softc;
4358	struct iwm_node *in;
4359	int error;
4360
4361	IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4362	    "switching state %s -> %s\n",
4363	    ieee80211_state_name[vap->iv_state],
4364	    ieee80211_state_name[nstate]);
4365	IEEE80211_UNLOCK(ic);
4366	IWM_LOCK(sc);
4367
4368	if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4369		iwm_led_blink_stop(sc);
4370
4371	/* disable beacon filtering if we're hopping out of RUN */
4372	if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4373		iwm_mvm_disable_beacon_filter(sc);
4374
4375		if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4376			in->in_assoc = 0;
4377
4378		if (nstate == IEEE80211_S_INIT) {
4379			IWM_UNLOCK(sc);
4380			IEEE80211_LOCK(ic);
4381			error = ivp->iv_newstate(vap, nstate, arg);
4382			IEEE80211_UNLOCK(ic);
4383			IWM_LOCK(sc);
4384			iwm_release(sc, NULL);
4385			IWM_UNLOCK(sc);
4386			IEEE80211_LOCK(ic);
4387			return error;
4388		}
4389
4390		/*
4391		 * It's impossible to directly go RUN->SCAN. If we iwm_release()
4392		 * above then the card will be completely reinitialized,
4393		 * so the driver must do everything necessary to bring the card
4394		 * from INIT to SCAN.
4395		 *
4396		 * Additionally, upon receiving deauth frame from AP,
4397		 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4398		 * state. This will also fail with this driver, so bring the FSM
4399		 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4400		 *
4401		 * XXX TODO: fix this for FreeBSD!
4402		 */
4403		if (nstate == IEEE80211_S_SCAN ||
4404		    nstate == IEEE80211_S_AUTH ||
4405		    nstate == IEEE80211_S_ASSOC) {
4406			IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4407			    "Force transition to INIT; MGT=%d\n", arg);
4408			IWM_UNLOCK(sc);
4409			IEEE80211_LOCK(ic);
4410			/* Always pass arg as -1 since we can't Tx right now. */
4411			/*
4412			 * XXX arg is just ignored anyway when transitioning
4413			 *     to IEEE80211_S_INIT.
4414			 */
4415			vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4416			IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4417			    "Going INIT->SCAN\n");
4418			nstate = IEEE80211_S_SCAN;
4419			IEEE80211_UNLOCK(ic);
4420			IWM_LOCK(sc);
4421		}
4422	}
4423
4424	switch (nstate) {
4425	case IEEE80211_S_INIT:
4426	case IEEE80211_S_SCAN:
4427		if (vap->iv_state == IEEE80211_S_AUTH ||
4428		    vap->iv_state == IEEE80211_S_ASSOC) {
4429			int myerr;
4430			IWM_UNLOCK(sc);
4431			IEEE80211_LOCK(ic);
4432			myerr = ivp->iv_newstate(vap, nstate, arg);
4433			IEEE80211_UNLOCK(ic);
4434			IWM_LOCK(sc);
4435			error = iwm_mvm_rm_sta(sc, vap, FALSE);
4436                        if (error) {
4437                                device_printf(sc->sc_dev,
4438				    "%s: Failed to remove station: %d\n",
4439				    __func__, error);
4440			}
4441			error = iwm_mvm_mac_ctxt_changed(sc, vap);
4442                        if (error) {
4443                                device_printf(sc->sc_dev,
4444                                    "%s: Failed to change mac context: %d\n",
4445                                    __func__, error);
4446                        }
4447                        error = iwm_mvm_binding_remove_vif(sc, ivp);
4448                        if (error) {
4449                                device_printf(sc->sc_dev,
4450                                    "%s: Failed to remove channel ctx: %d\n",
4451                                    __func__, error);
4452                        }
4453			ivp->phy_ctxt = NULL;
4454			IWM_UNLOCK(sc);
4455			IEEE80211_LOCK(ic);
4456			return myerr;
4457		}
4458		break;
4459
4460	case IEEE80211_S_AUTH:
4461		if ((error = iwm_auth(vap, sc)) != 0) {
4462			device_printf(sc->sc_dev,
4463			    "%s: could not move to auth state: %d\n",
4464			    __func__, error);
4465			break;
4466		}
4467		break;
4468
4469	case IEEE80211_S_ASSOC:
4470		if ((error = iwm_assoc(vap, sc)) != 0) {
4471			device_printf(sc->sc_dev,
4472			    "%s: failed to associate: %d\n", __func__,
4473			    error);
4474			break;
4475		}
4476		break;
4477
4478	case IEEE80211_S_RUN:
4479	{
4480		struct iwm_host_cmd cmd = {
4481			.id = IWM_LQ_CMD,
4482			.len = { sizeof(in->in_lq), },
4483			.flags = IWM_CMD_SYNC,
4484		};
4485
4486		/* Update the association state, now we have it all */
4487		/* (eg associd comes in at this point */
4488		error = iwm_assoc(vap, sc);
4489		if (error != 0) {
4490			device_printf(sc->sc_dev,
4491			    "%s: failed to update association state: %d\n",
4492			    __func__,
4493			    error);
4494			break;
4495		}
4496
4497		in = IWM_NODE(vap->iv_bss);
4498		iwm_mvm_enable_beacon_filter(sc, in);
4499		iwm_mvm_power_update_mac(sc);
4500		iwm_mvm_update_quotas(sc, ivp);
4501		iwm_setrates(sc, in);
4502
4503		cmd.data[0] = &in->in_lq;
4504		if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
4505			device_printf(sc->sc_dev,
4506			    "%s: IWM_LQ_CMD failed\n", __func__);
4507		}
4508
4509		iwm_mvm_led_enable(sc);
4510		break;
4511	}
4512
4513	default:
4514		break;
4515	}
4516	IWM_UNLOCK(sc);
4517	IEEE80211_LOCK(ic);
4518
4519	return (ivp->iv_newstate(vap, nstate, arg));
4520}
4521
4522void
4523iwm_endscan_cb(void *arg, int pending)
4524{
4525	struct iwm_softc *sc = arg;
4526	struct ieee80211com *ic = &sc->sc_ic;
4527
4528	IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4529	    "%s: scan ended\n",
4530	    __func__);
4531
4532	ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4533}
4534
4535/*
4536 * Aging and idle timeouts for the different possible scenarios
4537 * in default configuration
4538 */
4539static const uint32_t
4540iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4541	{
4542		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
4543		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
4544	},
4545	{
4546		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
4547		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
4548	},
4549	{
4550		htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
4551		htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
4552	},
4553	{
4554		htole32(IWM_SF_BA_AGING_TIMER_DEF),
4555		htole32(IWM_SF_BA_IDLE_TIMER_DEF)
4556	},
4557	{
4558		htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
4559		htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
4560	},
4561};
4562
4563/*
4564 * Aging and idle timeouts for the different possible scenarios
4565 * in single BSS MAC configuration.
4566 */
4567static const uint32_t
4568iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4569	{
4570		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
4571		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
4572	},
4573	{
4574		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
4575		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
4576	},
4577	{
4578		htole32(IWM_SF_MCAST_AGING_TIMER),
4579		htole32(IWM_SF_MCAST_IDLE_TIMER)
4580	},
4581	{
4582		htole32(IWM_SF_BA_AGING_TIMER),
4583		htole32(IWM_SF_BA_IDLE_TIMER)
4584	},
4585	{
4586		htole32(IWM_SF_TX_RE_AGING_TIMER),
4587		htole32(IWM_SF_TX_RE_IDLE_TIMER)
4588	},
4589};
4590
4591static void
4592iwm_mvm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
4593    struct ieee80211_node *ni)
4594{
4595	int i, j, watermark;
4596
4597	sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
4598
4599	/*
4600	 * If we are in association flow - check antenna configuration
4601	 * capabilities of the AP station, and choose the watermark accordingly.
4602	 */
4603	if (ni) {
4604		if (ni->ni_flags & IEEE80211_NODE_HT) {
4605#ifdef notyet
4606			if (ni->ni_rxmcs[2] != 0)
4607				watermark = IWM_SF_W_MARK_MIMO3;
4608			else if (ni->ni_rxmcs[1] != 0)
4609				watermark = IWM_SF_W_MARK_MIMO2;
4610			else
4611#endif
4612				watermark = IWM_SF_W_MARK_SISO;
4613		} else {
4614			watermark = IWM_SF_W_MARK_LEGACY;
4615		}
4616	/* default watermark value for unassociated mode. */
4617	} else {
4618		watermark = IWM_SF_W_MARK_MIMO2;
4619	}
4620	sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
4621
4622	for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
4623		for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
4624			sf_cmd->long_delay_timeouts[i][j] =
4625					htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
4626		}
4627	}
4628
4629	if (ni) {
4630		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
4631		       sizeof(iwm_sf_full_timeout));
4632	} else {
4633		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
4634		       sizeof(iwm_sf_full_timeout_def));
4635	}
4636}
4637
4638static int
4639iwm_mvm_sf_config(struct iwm_softc *sc, enum iwm_sf_state new_state)
4640{
4641	struct ieee80211com *ic = &sc->sc_ic;
4642	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4643	struct iwm_sf_cfg_cmd sf_cmd = {
4644		.state = htole32(IWM_SF_FULL_ON),
4645	};
4646	int ret = 0;
4647
4648	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4649		sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
4650
4651	switch (new_state) {
4652	case IWM_SF_UNINIT:
4653	case IWM_SF_INIT_OFF:
4654		iwm_mvm_fill_sf_command(sc, &sf_cmd, NULL);
4655		break;
4656	case IWM_SF_FULL_ON:
4657		iwm_mvm_fill_sf_command(sc, &sf_cmd, vap->iv_bss);
4658		break;
4659	default:
4660		IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE,
4661		    "Invalid state: %d. not sending Smart Fifo cmd\n",
4662			  new_state);
4663		return EINVAL;
4664	}
4665
4666	ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
4667				   sizeof(sf_cmd), &sf_cmd);
4668	return ret;
4669}
4670
4671static int
4672iwm_send_bt_init_conf(struct iwm_softc *sc)
4673{
4674	struct iwm_bt_coex_cmd bt_cmd;
4675
4676	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4677	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4678
4679	return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4680	    &bt_cmd);
4681}
4682
4683static boolean_t
4684iwm_mvm_is_lar_supported(struct iwm_softc *sc)
4685{
4686	boolean_t nvm_lar = sc->nvm_data->lar_enabled;
4687	boolean_t tlv_lar = fw_has_capa(&sc->ucode_capa,
4688					IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
4689
4690	if (iwm_lar_disable)
4691		return FALSE;
4692
4693	/*
4694	 * Enable LAR only if it is supported by the FW (TLV) &&
4695	 * enabled in the NVM
4696	 */
4697	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4698		return nvm_lar && tlv_lar;
4699	else
4700		return tlv_lar;
4701}
4702
4703static boolean_t
4704iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *sc)
4705{
4706	return fw_has_api(&sc->ucode_capa,
4707			  IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4708	       fw_has_capa(&sc->ucode_capa,
4709			   IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC);
4710}
4711
4712static int
4713iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4714{
4715	struct iwm_mcc_update_cmd mcc_cmd;
4716	struct iwm_host_cmd hcmd = {
4717		.id = IWM_MCC_UPDATE_CMD,
4718		.flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4719		.data = { &mcc_cmd },
4720	};
4721	int ret;
4722#ifdef IWM_DEBUG
4723	struct iwm_rx_packet *pkt;
4724	struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4725	struct iwm_mcc_update_resp *mcc_resp;
4726	int n_channels;
4727	uint16_t mcc;
4728#endif
4729	int resp_v2 = fw_has_capa(&sc->ucode_capa,
4730	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4731
4732	if (!iwm_mvm_is_lar_supported(sc)) {
4733		IWM_DPRINTF(sc, IWM_DEBUG_LAR, "%s: no LAR support\n",
4734		    __func__);
4735		return 0;
4736	}
4737
4738	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4739	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4740	if (iwm_mvm_is_wifi_mcc_supported(sc))
4741		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4742	else
4743		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4744
4745	if (resp_v2)
4746		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4747	else
4748		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4749
4750	IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4751	    "send MCC update to FW with '%c%c' src = %d\n",
4752	    alpha2[0], alpha2[1], mcc_cmd.source_id);
4753
4754	ret = iwm_send_cmd(sc, &hcmd);
4755	if (ret)
4756		return ret;
4757
4758#ifdef IWM_DEBUG
4759	pkt = hcmd.resp_pkt;
4760
4761	/* Extract MCC response */
4762	if (resp_v2) {
4763		mcc_resp = (void *)pkt->data;
4764		mcc = mcc_resp->mcc;
4765		n_channels =  le32toh(mcc_resp->n_channels);
4766	} else {
4767		mcc_resp_v1 = (void *)pkt->data;
4768		mcc = mcc_resp_v1->mcc;
4769		n_channels =  le32toh(mcc_resp_v1->n_channels);
4770	}
4771
4772	/* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4773	if (mcc == 0)
4774		mcc = 0x3030;  /* "00" - world */
4775
4776	IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4777	    "regulatory domain '%c%c' (%d channels available)\n",
4778	    mcc >> 8, mcc & 0xff, n_channels);
4779#endif
4780	iwm_free_resp(sc, &hcmd);
4781
4782	return 0;
4783}
4784
4785static void
4786iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4787{
4788	struct iwm_host_cmd cmd = {
4789		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4790		.len = { sizeof(uint32_t), },
4791		.data = { &backoff, },
4792	};
4793
4794	if (iwm_send_cmd(sc, &cmd) != 0) {
4795		device_printf(sc->sc_dev,
4796		    "failed to change thermal tx backoff\n");
4797	}
4798}
4799
4800static int
4801iwm_init_hw(struct iwm_softc *sc)
4802{
4803	struct ieee80211com *ic = &sc->sc_ic;
4804	int error, i, ac;
4805
4806	if ((error = iwm_start_hw(sc)) != 0) {
4807		printf("iwm_start_hw: failed %d\n", error);
4808		return error;
4809	}
4810
4811	if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4812		printf("iwm_run_init_mvm_ucode: failed %d\n", error);
4813		return error;
4814	}
4815
4816	/*
4817	 * should stop and start HW since that INIT
4818	 * image just loaded
4819	 */
4820	iwm_stop_device(sc);
4821	sc->sc_ps_disabled = FALSE;
4822	if ((error = iwm_start_hw(sc)) != 0) {
4823		device_printf(sc->sc_dev, "could not initialize hardware\n");
4824		return error;
4825	}
4826
4827	/* omstart, this time with the regular firmware */
4828	error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4829	if (error) {
4830		device_printf(sc->sc_dev, "could not load firmware\n");
4831		goto error;
4832	}
4833
4834	if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4835		device_printf(sc->sc_dev, "bt init conf failed\n");
4836		goto error;
4837	}
4838
4839	error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
4840	if (error != 0) {
4841		device_printf(sc->sc_dev, "antenna config failed\n");
4842		goto error;
4843	}
4844
4845	/* Send phy db control command and then phy db calibration */
4846	if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4847		goto error;
4848
4849	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4850		device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4851		goto error;
4852	}
4853
4854	/* Add auxiliary station for scanning */
4855	if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4856		device_printf(sc->sc_dev, "add_aux_sta failed\n");
4857		goto error;
4858	}
4859
4860	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4861		/*
4862		 * The channel used here isn't relevant as it's
4863		 * going to be overwritten in the other flows.
4864		 * For now use the first channel we have.
4865		 */
4866		if ((error = iwm_mvm_phy_ctxt_add(sc,
4867		    &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4868			goto error;
4869	}
4870
4871	/* Initialize tx backoffs to the minimum. */
4872	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4873		iwm_mvm_tt_tx_backoff(sc, 0);
4874
4875	error = iwm_mvm_power_update_device(sc);
4876	if (error)
4877		goto error;
4878
4879	if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4880		goto error;
4881
4882	if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4883		if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4884			goto error;
4885	}
4886
4887	/* Enable Tx queues. */
4888	for (ac = 0; ac < WME_NUM_AC; ac++) {
4889		error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4890		    iwm_mvm_ac_to_tx_fifo[ac]);
4891		if (error)
4892			goto error;
4893	}
4894
4895	if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4896		device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4897		goto error;
4898	}
4899
4900	return 0;
4901
4902 error:
4903	iwm_stop_device(sc);
4904	return error;
4905}
4906
4907/* Allow multicast from our BSSID. */
4908static int
4909iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4910{
4911	struct ieee80211_node *ni = vap->iv_bss;
4912	struct iwm_mcast_filter_cmd *cmd;
4913	size_t size;
4914	int error;
4915
4916	size = roundup(sizeof(*cmd), 4);
4917	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4918	if (cmd == NULL)
4919		return ENOMEM;
4920	cmd->filter_own = 1;
4921	cmd->port_id = 0;
4922	cmd->count = 0;
4923	cmd->pass_all = 1;
4924	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4925
4926	error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4927	    IWM_CMD_SYNC, size, cmd);
4928	free(cmd, M_DEVBUF);
4929
4930	return (error);
4931}
4932
4933/*
4934 * ifnet interfaces
4935 */
4936
4937static void
4938iwm_init(struct iwm_softc *sc)
4939{
4940	int error;
4941
4942	if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4943		return;
4944	}
4945	sc->sc_generation++;
4946	sc->sc_flags &= ~IWM_FLAG_STOPPED;
4947
4948	if ((error = iwm_init_hw(sc)) != 0) {
4949		printf("iwm_init_hw failed %d\n", error);
4950		iwm_stop(sc);
4951		return;
4952	}
4953
4954	/*
4955	 * Ok, firmware loaded and we are jogging
4956	 */
4957	sc->sc_flags |= IWM_FLAG_HW_INITED;
4958	callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4959}
4960
4961static int
4962iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4963{
4964	struct iwm_softc *sc;
4965	int error;
4966
4967	sc = ic->ic_softc;
4968
4969	IWM_LOCK(sc);
4970	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4971		IWM_UNLOCK(sc);
4972		return (ENXIO);
4973	}
4974	error = mbufq_enqueue(&sc->sc_snd, m);
4975	if (error) {
4976		IWM_UNLOCK(sc);
4977		return (error);
4978	}
4979	iwm_start(sc);
4980	IWM_UNLOCK(sc);
4981	return (0);
4982}
4983
4984/*
4985 * Dequeue packets from sendq and call send.
4986 */
4987static void
4988iwm_start(struct iwm_softc *sc)
4989{
4990	struct ieee80211_node *ni;
4991	struct mbuf *m;
4992	int ac = 0;
4993
4994	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4995	while (sc->qfullmsk == 0 &&
4996		(m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4997		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4998		if (iwm_tx(sc, m, ni, ac) != 0) {
4999			if_inc_counter(ni->ni_vap->iv_ifp,
5000			    IFCOUNTER_OERRORS, 1);
5001			ieee80211_free_node(ni);
5002			continue;
5003		}
5004		sc->sc_tx_timer = 15;
5005	}
5006	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
5007}
5008
5009static void
5010iwm_stop(struct iwm_softc *sc)
5011{
5012
5013	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
5014	sc->sc_flags |= IWM_FLAG_STOPPED;
5015	sc->sc_generation++;
5016	iwm_led_blink_stop(sc);
5017	sc->sc_tx_timer = 0;
5018	iwm_stop_device(sc);
5019	sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5020}
5021
5022static void
5023iwm_watchdog(void *arg)
5024{
5025	struct iwm_softc *sc = arg;
5026	struct ieee80211com *ic = &sc->sc_ic;
5027
5028	if (sc->sc_tx_timer > 0) {
5029		if (--sc->sc_tx_timer == 0) {
5030			device_printf(sc->sc_dev, "device timeout\n");
5031#ifdef IWM_DEBUG
5032			iwm_nic_error(sc);
5033#endif
5034			ieee80211_restart_all(ic);
5035			counter_u64_add(sc->sc_ic.ic_oerrors, 1);
5036			return;
5037		}
5038	}
5039	callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
5040}
5041
5042static void
5043iwm_parent(struct ieee80211com *ic)
5044{
5045	struct iwm_softc *sc = ic->ic_softc;
5046	int startall = 0;
5047
5048	IWM_LOCK(sc);
5049	if (ic->ic_nrunning > 0) {
5050		if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
5051			iwm_init(sc);
5052			startall = 1;
5053		}
5054	} else if (sc->sc_flags & IWM_FLAG_HW_INITED)
5055		iwm_stop(sc);
5056	IWM_UNLOCK(sc);
5057	if (startall)
5058		ieee80211_start_all(ic);
5059}
5060
5061/*
5062 * The interrupt side of things
5063 */
5064
5065/*
5066 * error dumping routines are from iwlwifi/mvm/utils.c
5067 */
5068
5069/*
5070 * Note: This structure is read from the device with IO accesses,
5071 * and the reading already does the endian conversion. As it is
5072 * read with uint32_t-sized accesses, any members with a different size
5073 * need to be ordered correctly though!
5074 */
5075struct iwm_error_event_table {
5076	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
5077	uint32_t error_id;		/* type of error */
5078	uint32_t trm_hw_status0;	/* TRM HW status */
5079	uint32_t trm_hw_status1;	/* TRM HW status */
5080	uint32_t blink2;		/* branch link */
5081	uint32_t ilink1;		/* interrupt link */
5082	uint32_t ilink2;		/* interrupt link */
5083	uint32_t data1;		/* error-specific data */
5084	uint32_t data2;		/* error-specific data */
5085	uint32_t data3;		/* error-specific data */
5086	uint32_t bcon_time;		/* beacon timer */
5087	uint32_t tsf_low;		/* network timestamp function timer */
5088	uint32_t tsf_hi;		/* network timestamp function timer */
5089	uint32_t gp1;		/* GP1 timer register */
5090	uint32_t gp2;		/* GP2 timer register */
5091	uint32_t fw_rev_type;	/* firmware revision type */
5092	uint32_t major;		/* uCode version major */
5093	uint32_t minor;		/* uCode version minor */
5094	uint32_t hw_ver;		/* HW Silicon version */
5095	uint32_t brd_ver;		/* HW board version */
5096	uint32_t log_pc;		/* log program counter */
5097	uint32_t frame_ptr;		/* frame pointer */
5098	uint32_t stack_ptr;		/* stack pointer */
5099	uint32_t hcmd;		/* last host command header */
5100	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
5101				 * rxtx_flag */
5102	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
5103				 * host_flag */
5104	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
5105				 * enc_flag */
5106	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
5107				 * time_flag */
5108	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
5109				 * wico interrupt */
5110	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
5111	uint32_t wait_event;		/* wait event() caller address */
5112	uint32_t l2p_control;	/* L2pControlField */
5113	uint32_t l2p_duration;	/* L2pDurationField */
5114	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
5115	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
5116	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
5117				 * (LMPM_PMG_SEL) */
5118	uint32_t u_timestamp;	/* indicate when the date and time of the
5119				 * compilation */
5120	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
5121} __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
5122
5123/*
5124 * UMAC error struct - relevant starting from family 8000 chip.
5125 * Note: This structure is read from the device with IO accesses,
5126 * and the reading already does the endian conversion. As it is
5127 * read with u32-sized accesses, any members with a different size
5128 * need to be ordered correctly though!
5129 */
5130struct iwm_umac_error_event_table {
5131	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
5132	uint32_t error_id;	/* type of error */
5133	uint32_t blink1;	/* branch link */
5134	uint32_t blink2;	/* branch link */
5135	uint32_t ilink1;	/* interrupt link */
5136	uint32_t ilink2;	/* interrupt link */
5137	uint32_t data1;		/* error-specific data */
5138	uint32_t data2;		/* error-specific data */
5139	uint32_t data3;		/* error-specific data */
5140	uint32_t umac_major;
5141	uint32_t umac_minor;
5142	uint32_t frame_pointer;	/* core register 27*/
5143	uint32_t stack_pointer;	/* core register 28 */
5144	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
5145	uint32_t nic_isr_pref;	/* ISR status register */
5146} __packed;
5147
5148#define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
5149#define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
5150
5151#ifdef IWM_DEBUG
5152struct {
5153	const char *name;
5154	uint8_t num;
5155} advanced_lookup[] = {
5156	{ "NMI_INTERRUPT_WDG", 0x34 },
5157	{ "SYSASSERT", 0x35 },
5158	{ "UCODE_VERSION_MISMATCH", 0x37 },
5159	{ "BAD_COMMAND", 0x38 },
5160	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5161	{ "FATAL_ERROR", 0x3D },
5162	{ "NMI_TRM_HW_ERR", 0x46 },
5163	{ "NMI_INTERRUPT_TRM", 0x4C },
5164	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5165	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5166	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5167	{ "NMI_INTERRUPT_HOST", 0x66 },
5168	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
5169	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
5170	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5171	{ "ADVANCED_SYSASSERT", 0 },
5172};
5173
5174static const char *
5175iwm_desc_lookup(uint32_t num)
5176{
5177	int i;
5178
5179	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5180		if (advanced_lookup[i].num == num)
5181			return advanced_lookup[i].name;
5182
5183	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5184	return advanced_lookup[i].name;
5185}
5186
5187static void
5188iwm_nic_umac_error(struct iwm_softc *sc)
5189{
5190	struct iwm_umac_error_event_table table;
5191	uint32_t base;
5192
5193	base = sc->umac_error_event_table;
5194
5195	if (base < 0x800000) {
5196		device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5197		    base);
5198		return;
5199	}
5200
5201	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5202		device_printf(sc->sc_dev, "reading errlog failed\n");
5203		return;
5204	}
5205
5206	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5207		device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5208		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5209		    sc->sc_flags, table.valid);
5210	}
5211
5212	device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5213		iwm_desc_lookup(table.error_id));
5214	device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5215	device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5216	device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5217	    table.ilink1);
5218	device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5219	    table.ilink2);
5220	device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5221	device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5222	device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5223	device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5224	device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5225	device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5226	    table.frame_pointer);
5227	device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5228	    table.stack_pointer);
5229	device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5230	device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5231	    table.nic_isr_pref);
5232}
5233
5234/*
5235 * Support for dumping the error log seemed like a good idea ...
5236 * but it's mostly hex junk and the only sensible thing is the
5237 * hw/ucode revision (which we know anyway).  Since it's here,
5238 * I'll just leave it in, just in case e.g. the Intel guys want to
5239 * help us decipher some "ADVANCED_SYSASSERT" later.
5240 */
5241static void
5242iwm_nic_error(struct iwm_softc *sc)
5243{
5244	struct iwm_error_event_table table;
5245	uint32_t base;
5246
5247	device_printf(sc->sc_dev, "dumping device error log\n");
5248	base = sc->error_event_table;
5249	if (base < 0x800000) {
5250		device_printf(sc->sc_dev,
5251		    "Invalid error log pointer 0x%08x\n", base);
5252		return;
5253	}
5254
5255	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5256		device_printf(sc->sc_dev, "reading errlog failed\n");
5257		return;
5258	}
5259
5260	if (!table.valid) {
5261		device_printf(sc->sc_dev, "errlog not found, skipping\n");
5262		return;
5263	}
5264
5265	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5266		device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5267		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5268		    sc->sc_flags, table.valid);
5269	}
5270
5271	device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5272	    iwm_desc_lookup(table.error_id));
5273	device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5274	    table.trm_hw_status0);
5275	device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5276	    table.trm_hw_status1);
5277	device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5278	device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5279	device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5280	device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5281	device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5282	device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5283	device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5284	device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5285	device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5286	device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5287	device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5288	device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5289	    table.fw_rev_type);
5290	device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5291	device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5292	device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5293	device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5294	device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5295	device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5296	device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5297	device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5298	device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5299	device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5300	device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5301	device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5302	device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5303	device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5304	device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5305	device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5306	device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5307	device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5308	device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5309
5310	if (sc->umac_error_event_table)
5311		iwm_nic_umac_error(sc);
5312}
5313#endif
5314
5315static void
5316iwm_handle_rxb(struct iwm_softc *sc, struct mbuf *m)
5317{
5318	struct ieee80211com *ic = &sc->sc_ic;
5319	struct iwm_cmd_response *cresp;
5320	struct mbuf *m1;
5321	uint32_t offset = 0;
5322	uint32_t maxoff = IWM_RBUF_SIZE;
5323	uint32_t nextoff;
5324	boolean_t stolen = FALSE;
5325
5326#define HAVEROOM(a)	\
5327    ((a) + sizeof(uint32_t) + sizeof(struct iwm_cmd_header) < maxoff)
5328
5329	while (HAVEROOM(offset)) {
5330		struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *,
5331		    offset);
5332		int qid, idx, code, len;
5333
5334		qid = pkt->hdr.qid;
5335		idx = pkt->hdr.idx;
5336
5337		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5338
5339		/*
5340		 * randomly get these from the firmware, no idea why.
5341		 * they at least seem harmless, so just ignore them for now
5342		 */
5343		if ((pkt->hdr.code == 0 && (qid & ~0x80) == 0 && idx == 0) ||
5344		    pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID)) {
5345			break;
5346		}
5347
5348		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5349		    "rx packet qid=%d idx=%d type=%x\n",
5350		    qid & ~0x80, pkt->hdr.idx, code);
5351
5352		len = iwm_rx_packet_len(pkt);
5353		len += sizeof(uint32_t); /* account for status word */
5354		nextoff = offset + roundup2(len, IWM_FH_RSCSR_FRAME_ALIGN);
5355
5356		iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5357
5358		switch (code) {
5359		case IWM_REPLY_RX_PHY_CMD:
5360			iwm_mvm_rx_rx_phy_cmd(sc, pkt);
5361			break;
5362
5363		case IWM_REPLY_RX_MPDU_CMD: {
5364			/*
5365			 * If this is the last frame in the RX buffer, we
5366			 * can directly feed the mbuf to the sharks here.
5367			 */
5368			struct iwm_rx_packet *nextpkt = mtodoff(m,
5369			    struct iwm_rx_packet *, nextoff);
5370			if (!HAVEROOM(nextoff) ||
5371			    (nextpkt->hdr.code == 0 &&
5372			     (nextpkt->hdr.qid & ~0x80) == 0 &&
5373			     nextpkt->hdr.idx == 0) ||
5374			    (nextpkt->len_n_flags ==
5375			     htole32(IWM_FH_RSCSR_FRAME_INVALID))) {
5376				if (iwm_mvm_rx_rx_mpdu(sc, m, offset, stolen)) {
5377					stolen = FALSE;
5378					/* Make sure we abort the loop */
5379					nextoff = maxoff;
5380				}
5381				break;
5382			}
5383
5384			/*
5385			 * Use m_copym instead of m_split, because that
5386			 * makes it easier to keep a valid rx buffer in
5387			 * the ring, when iwm_mvm_rx_rx_mpdu() fails.
5388			 *
5389			 * We need to start m_copym() at offset 0, to get the
5390			 * M_PKTHDR flag preserved.
5391			 */
5392			m1 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
5393			if (m1) {
5394				if (iwm_mvm_rx_rx_mpdu(sc, m1, offset, stolen))
5395					stolen = TRUE;
5396				else
5397					m_freem(m1);
5398			}
5399			break;
5400		}
5401
5402		case IWM_TX_CMD:
5403			iwm_mvm_rx_tx_cmd(sc, pkt);
5404			break;
5405
5406		case IWM_MISSED_BEACONS_NOTIFICATION: {
5407			struct iwm_missed_beacons_notif *resp;
5408			int missed;
5409
5410			/* XXX look at mac_id to determine interface ID */
5411			struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5412
5413			resp = (void *)pkt->data;
5414			missed = le32toh(resp->consec_missed_beacons);
5415
5416			IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5417			    "%s: MISSED_BEACON: mac_id=%d, "
5418			    "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5419			    "num_rx=%d\n",
5420			    __func__,
5421			    le32toh(resp->mac_id),
5422			    le32toh(resp->consec_missed_beacons_since_last_rx),
5423			    le32toh(resp->consec_missed_beacons),
5424			    le32toh(resp->num_expected_beacons),
5425			    le32toh(resp->num_recvd_beacons));
5426
5427			/* Be paranoid */
5428			if (vap == NULL)
5429				break;
5430
5431			/* XXX no net80211 locking? */
5432			if (vap->iv_state == IEEE80211_S_RUN &&
5433			    (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5434				if (missed > vap->iv_bmissthreshold) {
5435					/* XXX bad locking; turn into task */
5436					IWM_UNLOCK(sc);
5437					ieee80211_beacon_miss(ic);
5438					IWM_LOCK(sc);
5439				}
5440			}
5441
5442			break;
5443		}
5444
5445		case IWM_MFUART_LOAD_NOTIFICATION:
5446			break;
5447
5448		case IWM_MVM_ALIVE:
5449			break;
5450
5451		case IWM_CALIB_RES_NOTIF_PHY_DB:
5452			break;
5453
5454		case IWM_STATISTICS_NOTIFICATION:
5455			iwm_mvm_handle_rx_statistics(sc, pkt);
5456			break;
5457
5458		case IWM_NVM_ACCESS_CMD:
5459		case IWM_MCC_UPDATE_CMD:
5460			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5461				memcpy(sc->sc_cmd_resp,
5462				    pkt, sizeof(sc->sc_cmd_resp));
5463			}
5464			break;
5465
5466		case IWM_MCC_CHUB_UPDATE_CMD: {
5467			struct iwm_mcc_chub_notif *notif;
5468			notif = (void *)pkt->data;
5469
5470			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5471			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5472			sc->sc_fw_mcc[2] = '\0';
5473			IWM_DPRINTF(sc, IWM_DEBUG_LAR,
5474			    "fw source %d sent CC '%s'\n",
5475			    notif->source_id, sc->sc_fw_mcc);
5476			break;
5477		}
5478
5479		case IWM_DTS_MEASUREMENT_NOTIFICATION:
5480		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5481				 IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5482			struct iwm_dts_measurement_notif_v1 *notif;
5483
5484			if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5485				device_printf(sc->sc_dev,
5486				    "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5487				break;
5488			}
5489			notif = (void *)pkt->data;
5490			IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5491			    "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5492			    notif->temp);
5493			break;
5494		}
5495
5496		case IWM_PHY_CONFIGURATION_CMD:
5497		case IWM_TX_ANT_CONFIGURATION_CMD:
5498		case IWM_ADD_STA:
5499		case IWM_MAC_CONTEXT_CMD:
5500		case IWM_REPLY_SF_CFG_CMD:
5501		case IWM_POWER_TABLE_CMD:
5502		case IWM_PHY_CONTEXT_CMD:
5503		case IWM_BINDING_CONTEXT_CMD:
5504		case IWM_TIME_EVENT_CMD:
5505		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5506		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5507		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
5508		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5509		case IWM_SCAN_OFFLOAD_ABORT_CMD:
5510		case IWM_REPLY_BEACON_FILTERING_CMD:
5511		case IWM_MAC_PM_POWER_TABLE:
5512		case IWM_TIME_QUOTA_CMD:
5513		case IWM_REMOVE_STA:
5514		case IWM_TXPATH_FLUSH:
5515		case IWM_LQ_CMD:
5516		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP,
5517				 IWM_FW_PAGING_BLOCK_CMD):
5518		case IWM_BT_CONFIG:
5519		case IWM_REPLY_THERMAL_MNG_BACKOFF:
5520			cresp = (void *)pkt->data;
5521			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5522				memcpy(sc->sc_cmd_resp,
5523				    pkt, sizeof(*pkt)+sizeof(*cresp));
5524			}
5525			break;
5526
5527		/* ignore */
5528		case IWM_PHY_DB_CMD:
5529			break;
5530
5531		case IWM_INIT_COMPLETE_NOTIF:
5532			break;
5533
5534		case IWM_SCAN_OFFLOAD_COMPLETE: {
5535			struct iwm_periodic_scan_complete *notif;
5536			notif = (void *)pkt->data;
5537			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5538				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5539				ieee80211_runtask(ic, &sc->sc_es_task);
5540			}
5541			break;
5542		}
5543
5544		case IWM_SCAN_ITERATION_COMPLETE: {
5545			struct iwm_lmac_scan_complete_notif *notif;
5546			notif = (void *)pkt->data;
5547			ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5548 			break;
5549		}
5550
5551		case IWM_SCAN_COMPLETE_UMAC: {
5552			struct iwm_umac_scan_complete *notif;
5553			notif = (void *)pkt->data;
5554
5555			IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
5556			    "UMAC scan complete, status=0x%x\n",
5557			    notif->status);
5558			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5559				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5560				ieee80211_runtask(ic, &sc->sc_es_task);
5561			}
5562			break;
5563		}
5564
5565		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5566			struct iwm_umac_scan_iter_complete_notif *notif;
5567			notif = (void *)pkt->data;
5568
5569			IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5570			    "complete, status=0x%x, %d channels scanned\n",
5571			    notif->status, notif->scanned_channels);
5572			ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5573			break;
5574		}
5575
5576		case IWM_REPLY_ERROR: {
5577			struct iwm_error_resp *resp;
5578			resp = (void *)pkt->data;
5579
5580			device_printf(sc->sc_dev,
5581			    "firmware error 0x%x, cmd 0x%x\n",
5582			    le32toh(resp->error_type),
5583			    resp->cmd_id);
5584			break;
5585		}
5586
5587		case IWM_TIME_EVENT_NOTIFICATION: {
5588			struct iwm_time_event_notif *notif;
5589			notif = (void *)pkt->data;
5590
5591			IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5592			    "TE notif status = 0x%x action = 0x%x\n",
5593			    notif->status, notif->action);
5594			break;
5595		}
5596
5597		/*
5598		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
5599		 * messages. Just ignore them for now.
5600		 */
5601		case IWM_DEBUG_LOG_MSG:
5602			break;
5603
5604		case IWM_MCAST_FILTER_CMD:
5605			break;
5606
5607		case IWM_SCD_QUEUE_CFG: {
5608			struct iwm_scd_txq_cfg_rsp *rsp;
5609			rsp = (void *)pkt->data;
5610
5611			IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5612			    "queue cfg token=0x%x sta_id=%d "
5613			    "tid=%d scd_queue=%d\n",
5614			    rsp->token, rsp->sta_id, rsp->tid,
5615			    rsp->scd_queue);
5616			break;
5617		}
5618
5619		default:
5620			device_printf(sc->sc_dev,
5621			    "frame %d/%d %x UNHANDLED (this should "
5622			    "not happen)\n", qid & ~0x80, idx,
5623			    pkt->len_n_flags);
5624			break;
5625		}
5626
5627		/*
5628		 * Why test bit 0x80?  The Linux driver:
5629		 *
5630		 * There is one exception:  uCode sets bit 15 when it
5631		 * originates the response/notification, i.e. when the
5632		 * response/notification is not a direct response to a
5633		 * command sent by the driver.  For example, uCode issues
5634		 * IWM_REPLY_RX when it sends a received frame to the driver;
5635		 * it is not a direct response to any driver command.
5636		 *
5637		 * Ok, so since when is 7 == 15?  Well, the Linux driver
5638		 * uses a slightly different format for pkt->hdr, and "qid"
5639		 * is actually the upper byte of a two-byte field.
5640		 */
5641		if (!(qid & (1 << 7)))
5642			iwm_cmd_done(sc, pkt);
5643
5644		offset = nextoff;
5645	}
5646	if (stolen)
5647		m_freem(m);
5648#undef HAVEROOM
5649}
5650
5651/*
5652 * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5653 * Basic structure from if_iwn
5654 */
5655static void
5656iwm_notif_intr(struct iwm_softc *sc)
5657{
5658	uint16_t hw;
5659
5660	bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5661	    BUS_DMASYNC_POSTREAD);
5662
5663	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5664
5665	/*
5666	 * Process responses
5667	 */
5668	while (sc->rxq.cur != hw) {
5669		struct iwm_rx_ring *ring = &sc->rxq;
5670		struct iwm_rx_data *data = &ring->data[ring->cur];
5671
5672		bus_dmamap_sync(ring->data_dmat, data->map,
5673		    BUS_DMASYNC_POSTREAD);
5674
5675		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5676		    "%s: hw = %d cur = %d\n", __func__, hw, ring->cur);
5677		iwm_handle_rxb(sc, data->m);
5678
5679		ring->cur = (ring->cur + 1) % IWM_RX_RING_COUNT;
5680	}
5681
5682	/*
5683	 * Tell the firmware that it can reuse the ring entries that
5684	 * we have just processed.
5685	 * Seems like the hardware gets upset unless we align
5686	 * the write by 8??
5687	 */
5688	hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5689	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, rounddown2(hw, 8));
5690}
5691
5692static void
5693iwm_intr(void *arg)
5694{
5695	struct iwm_softc *sc = arg;
5696	int handled = 0;
5697	int r1, r2, rv = 0;
5698	int isperiodic = 0;
5699
5700	IWM_LOCK(sc);
5701	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5702
5703	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5704		uint32_t *ict = sc->ict_dma.vaddr;
5705		int tmp;
5706
5707		tmp = htole32(ict[sc->ict_cur]);
5708		if (!tmp)
5709			goto out_ena;
5710
5711		/*
5712		 * ok, there was something.  keep plowing until we have all.
5713		 */
5714		r1 = r2 = 0;
5715		while (tmp) {
5716			r1 |= tmp;
5717			ict[sc->ict_cur] = 0;
5718			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5719			tmp = htole32(ict[sc->ict_cur]);
5720		}
5721
5722		/* this is where the fun begins.  don't ask */
5723		if (r1 == 0xffffffff)
5724			r1 = 0;
5725
5726		/* i am not expected to understand this */
5727		if (r1 & 0xc0000)
5728			r1 |= 0x8000;
5729		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5730	} else {
5731		r1 = IWM_READ(sc, IWM_CSR_INT);
5732		/* "hardware gone" (where, fishing?) */
5733		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5734			goto out;
5735		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5736	}
5737	if (r1 == 0 && r2 == 0) {
5738		goto out_ena;
5739	}
5740
5741	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5742
5743	/* Safely ignore these bits for debug checks below */
5744	r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5745
5746	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5747		int i;
5748		struct ieee80211com *ic = &sc->sc_ic;
5749		struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5750
5751#ifdef IWM_DEBUG
5752		iwm_nic_error(sc);
5753#endif
5754		/* Dump driver status (TX and RX rings) while we're here. */
5755		device_printf(sc->sc_dev, "driver status:\n");
5756		for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5757			struct iwm_tx_ring *ring = &sc->txq[i];
5758			device_printf(sc->sc_dev,
5759			    "  tx ring %2d: qid=%-2d cur=%-3d "
5760			    "queued=%-3d\n",
5761			    i, ring->qid, ring->cur, ring->queued);
5762		}
5763		device_printf(sc->sc_dev,
5764		    "  rx ring: cur=%d\n", sc->rxq.cur);
5765		device_printf(sc->sc_dev,
5766		    "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5767
5768		/* Don't stop the device; just do a VAP restart */
5769		IWM_UNLOCK(sc);
5770
5771		if (vap == NULL) {
5772			printf("%s: null vap\n", __func__);
5773			return;
5774		}
5775
5776		device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5777		    "restarting\n", __func__, vap->iv_state);
5778
5779		ieee80211_restart_all(ic);
5780		return;
5781	}
5782
5783	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5784		handled |= IWM_CSR_INT_BIT_HW_ERR;
5785		device_printf(sc->sc_dev, "hardware error, stopping device\n");
5786		iwm_stop(sc);
5787		rv = 1;
5788		goto out;
5789	}
5790
5791	/* firmware chunk loaded */
5792	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5793		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5794		handled |= IWM_CSR_INT_BIT_FH_TX;
5795		sc->sc_fw_chunk_done = 1;
5796		wakeup(&sc->sc_fw);
5797	}
5798
5799	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5800		handled |= IWM_CSR_INT_BIT_RF_KILL;
5801		if (iwm_check_rfkill(sc)) {
5802			device_printf(sc->sc_dev,
5803			    "%s: rfkill switch, disabling interface\n",
5804			    __func__);
5805			iwm_stop(sc);
5806		}
5807	}
5808
5809	/*
5810	 * The Linux driver uses periodic interrupts to avoid races.
5811	 * We cargo-cult like it's going out of fashion.
5812	 */
5813	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5814		handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5815		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5816		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5817			IWM_WRITE_1(sc,
5818			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5819		isperiodic = 1;
5820	}
5821
5822	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5823		handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5824		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5825
5826		iwm_notif_intr(sc);
5827
5828		/* enable periodic interrupt, see above */
5829		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5830			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5831			    IWM_CSR_INT_PERIODIC_ENA);
5832	}
5833
5834	if (__predict_false(r1 & ~handled))
5835		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5836		    "%s: unhandled interrupts: %x\n", __func__, r1);
5837	rv = 1;
5838
5839 out_ena:
5840	iwm_restore_interrupts(sc);
5841 out:
5842	IWM_UNLOCK(sc);
5843	return;
5844}
5845
5846/*
5847 * Autoconf glue-sniffing
5848 */
5849#define	PCI_VENDOR_INTEL		0x8086
5850#define	PCI_PRODUCT_INTEL_WL_3160_1	0x08b3
5851#define	PCI_PRODUCT_INTEL_WL_3160_2	0x08b4
5852#define	PCI_PRODUCT_INTEL_WL_3165_1	0x3165
5853#define	PCI_PRODUCT_INTEL_WL_3165_2	0x3166
5854#define	PCI_PRODUCT_INTEL_WL_7260_1	0x08b1
5855#define	PCI_PRODUCT_INTEL_WL_7260_2	0x08b2
5856#define	PCI_PRODUCT_INTEL_WL_7265_1	0x095a
5857#define	PCI_PRODUCT_INTEL_WL_7265_2	0x095b
5858#define	PCI_PRODUCT_INTEL_WL_8260_1	0x24f3
5859#define	PCI_PRODUCT_INTEL_WL_8260_2	0x24f4
5860
5861static const struct iwm_devices {
5862	uint16_t		device;
5863	const struct iwm_cfg	*cfg;
5864} iwm_devices[] = {
5865	{ PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5866	{ PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5867	{ PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5868	{ PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5869	{ PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5870	{ PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5871	{ PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5872	{ PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5873	{ PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5874	{ PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5875};
5876
5877static int
5878iwm_probe(device_t dev)
5879{
5880	int i;
5881
5882	for (i = 0; i < nitems(iwm_devices); i++) {
5883		if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5884		    pci_get_device(dev) == iwm_devices[i].device) {
5885			device_set_desc(dev, iwm_devices[i].cfg->name);
5886			return (BUS_PROBE_DEFAULT);
5887		}
5888	}
5889
5890	return (ENXIO);
5891}
5892
5893static int
5894iwm_dev_check(device_t dev)
5895{
5896	struct iwm_softc *sc;
5897	uint16_t devid;
5898	int i;
5899
5900	sc = device_get_softc(dev);
5901
5902	devid = pci_get_device(dev);
5903	for (i = 0; i < nitems(iwm_devices); i++) {
5904		if (iwm_devices[i].device == devid) {
5905			sc->cfg = iwm_devices[i].cfg;
5906			return (0);
5907		}
5908	}
5909	device_printf(dev, "unknown adapter type\n");
5910	return ENXIO;
5911}
5912
5913/* PCI registers */
5914#define PCI_CFG_RETRY_TIMEOUT	0x041
5915
5916static int
5917iwm_pci_attach(device_t dev)
5918{
5919	struct iwm_softc *sc;
5920	int count, error, rid;
5921	uint16_t reg;
5922
5923	sc = device_get_softc(dev);
5924
5925	/* We disable the RETRY_TIMEOUT register (0x41) to keep
5926	 * PCI Tx retries from interfering with C3 CPU state */
5927	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5928
5929	/* Enable bus-mastering and hardware bug workaround. */
5930	pci_enable_busmaster(dev);
5931	reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5932	/* if !MSI */
5933	if (reg & PCIM_STATUS_INTxSTATE) {
5934		reg &= ~PCIM_STATUS_INTxSTATE;
5935	}
5936	pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5937
5938	rid = PCIR_BAR(0);
5939	sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5940	    RF_ACTIVE);
5941	if (sc->sc_mem == NULL) {
5942		device_printf(sc->sc_dev, "can't map mem space\n");
5943		return (ENXIO);
5944	}
5945	sc->sc_st = rman_get_bustag(sc->sc_mem);
5946	sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5947
5948	/* Install interrupt handler. */
5949	count = 1;
5950	rid = 0;
5951	if (pci_alloc_msi(dev, &count) == 0)
5952		rid = 1;
5953	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5954	    (rid != 0 ? 0 : RF_SHAREABLE));
5955	if (sc->sc_irq == NULL) {
5956		device_printf(dev, "can't map interrupt\n");
5957			return (ENXIO);
5958	}
5959	error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5960	    NULL, iwm_intr, sc, &sc->sc_ih);
5961	if (sc->sc_ih == NULL) {
5962		device_printf(dev, "can't establish interrupt");
5963			return (ENXIO);
5964	}
5965	sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5966
5967	return (0);
5968}
5969
5970static void
5971iwm_pci_detach(device_t dev)
5972{
5973	struct iwm_softc *sc = device_get_softc(dev);
5974
5975	if (sc->sc_irq != NULL) {
5976		bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5977		bus_release_resource(dev, SYS_RES_IRQ,
5978		    rman_get_rid(sc->sc_irq), sc->sc_irq);
5979		pci_release_msi(dev);
5980        }
5981	if (sc->sc_mem != NULL)
5982		bus_release_resource(dev, SYS_RES_MEMORY,
5983		    rman_get_rid(sc->sc_mem), sc->sc_mem);
5984}
5985
5986
5987
5988static int
5989iwm_attach(device_t dev)
5990{
5991	struct iwm_softc *sc = device_get_softc(dev);
5992	struct ieee80211com *ic = &sc->sc_ic;
5993	int error;
5994	int txq_i, i;
5995
5996	sc->sc_dev = dev;
5997	sc->sc_attached = 1;
5998	IWM_LOCK_INIT(sc);
5999	mbufq_init(&sc->sc_snd, ifqmaxlen);
6000	callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
6001	callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
6002	TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
6003
6004	sc->sc_notif_wait = iwm_notification_wait_init(sc);
6005	if (sc->sc_notif_wait == NULL) {
6006		device_printf(dev, "failed to init notification wait struct\n");
6007		goto fail;
6008	}
6009
6010	/* Init phy db */
6011	sc->sc_phy_db = iwm_phy_db_init(sc);
6012	if (!sc->sc_phy_db) {
6013		device_printf(dev, "Cannot init phy_db\n");
6014		goto fail;
6015	}
6016
6017	/* PCI attach */
6018	error = iwm_pci_attach(dev);
6019	if (error != 0)
6020		goto fail;
6021
6022	sc->sc_wantresp = -1;
6023
6024	/* Check device type */
6025	error = iwm_dev_check(dev);
6026	if (error != 0)
6027		goto fail;
6028
6029	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
6030	/*
6031	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
6032	 * changed, and now the revision step also includes bit 0-1 (no more
6033	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
6034	 * in the old format.
6035	 */
6036	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
6037		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
6038				(IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
6039
6040	if (iwm_prepare_card_hw(sc) != 0) {
6041		device_printf(dev, "could not initialize hardware\n");
6042		goto fail;
6043	}
6044
6045	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
6046		int ret;
6047		uint32_t hw_step;
6048
6049		/*
6050		 * In order to recognize C step the driver should read the
6051		 * chip version id located at the AUX bus MISC address.
6052		 */
6053		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
6054			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
6055		DELAY(2);
6056
6057		ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
6058				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6059				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6060				   25000);
6061		if (!ret) {
6062			device_printf(sc->sc_dev,
6063			    "Failed to wake up the nic\n");
6064			goto fail;
6065		}
6066
6067		if (iwm_nic_lock(sc)) {
6068			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
6069			hw_step |= IWM_ENABLE_WFPM;
6070			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
6071			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
6072			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
6073			if (hw_step == 0x3)
6074				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
6075						(IWM_SILICON_C_STEP << 2);
6076			iwm_nic_unlock(sc);
6077		} else {
6078			device_printf(sc->sc_dev, "Failed to lock the nic\n");
6079			goto fail;
6080		}
6081	}
6082
6083	/* special-case 7265D, it has the same PCI IDs. */
6084	if (sc->cfg == &iwm7265_cfg &&
6085	    (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
6086		sc->cfg = &iwm7265d_cfg;
6087	}
6088
6089	/* Allocate DMA memory for firmware transfers. */
6090	if ((error = iwm_alloc_fwmem(sc)) != 0) {
6091		device_printf(dev, "could not allocate memory for firmware\n");
6092		goto fail;
6093	}
6094
6095	/* Allocate "Keep Warm" page. */
6096	if ((error = iwm_alloc_kw(sc)) != 0) {
6097		device_printf(dev, "could not allocate keep warm page\n");
6098		goto fail;
6099	}
6100
6101	/* We use ICT interrupts */
6102	if ((error = iwm_alloc_ict(sc)) != 0) {
6103		device_printf(dev, "could not allocate ICT table\n");
6104		goto fail;
6105	}
6106
6107	/* Allocate TX scheduler "rings". */
6108	if ((error = iwm_alloc_sched(sc)) != 0) {
6109		device_printf(dev, "could not allocate TX scheduler rings\n");
6110		goto fail;
6111	}
6112
6113	/* Allocate TX rings */
6114	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
6115		if ((error = iwm_alloc_tx_ring(sc,
6116		    &sc->txq[txq_i], txq_i)) != 0) {
6117			device_printf(dev,
6118			    "could not allocate TX ring %d\n",
6119			    txq_i);
6120			goto fail;
6121		}
6122	}
6123
6124	/* Allocate RX ring. */
6125	if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6126		device_printf(dev, "could not allocate RX ring\n");
6127		goto fail;
6128	}
6129
6130	/* Clear pending interrupts. */
6131	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6132
6133	ic->ic_softc = sc;
6134	ic->ic_name = device_get_nameunit(sc->sc_dev);
6135	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
6136	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
6137
6138	/* Set device capabilities. */
6139	ic->ic_caps =
6140	    IEEE80211_C_STA |
6141	    IEEE80211_C_WPA |		/* WPA/RSN */
6142	    IEEE80211_C_WME |
6143	    IEEE80211_C_PMGT |
6144	    IEEE80211_C_SHSLOT |	/* short slot time supported */
6145	    IEEE80211_C_SHPREAMBLE	/* short preamble supported */
6146//	    IEEE80211_C_BGSCAN		/* capable of bg scanning */
6147	    ;
6148	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6149		sc->sc_phyctxt[i].id = i;
6150		sc->sc_phyctxt[i].color = 0;
6151		sc->sc_phyctxt[i].ref = 0;
6152		sc->sc_phyctxt[i].channel = NULL;
6153	}
6154
6155	/* Default noise floor */
6156	sc->sc_noise = -96;
6157
6158	/* Max RSSI */
6159	sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6160
6161	sc->sc_preinit_hook.ich_func = iwm_preinit;
6162	sc->sc_preinit_hook.ich_arg = sc;
6163	if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6164		device_printf(dev, "config_intrhook_establish failed\n");
6165		goto fail;
6166	}
6167
6168#ifdef IWM_DEBUG
6169	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6170	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6171	    CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6172#endif
6173
6174	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6175	    "<-%s\n", __func__);
6176
6177	return 0;
6178
6179	/* Free allocated memory if something failed during attachment. */
6180fail:
6181	iwm_detach_local(sc, 0);
6182
6183	return ENXIO;
6184}
6185
6186static int
6187iwm_is_valid_ether_addr(uint8_t *addr)
6188{
6189	char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6190
6191	if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6192		return (FALSE);
6193
6194	return (TRUE);
6195}
6196
6197static int
6198iwm_wme_update(struct ieee80211com *ic)
6199{
6200#define IWM_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
6201	struct iwm_softc *sc = ic->ic_softc;
6202	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6203	struct iwm_vap *ivp = IWM_VAP(vap);
6204	struct iwm_node *in;
6205	struct wmeParams tmp[WME_NUM_AC];
6206	int aci, error;
6207
6208	if (vap == NULL)
6209		return (0);
6210
6211	IEEE80211_LOCK(ic);
6212	for (aci = 0; aci < WME_NUM_AC; aci++)
6213		tmp[aci] = ic->ic_wme.wme_chanParams.cap_wmeParams[aci];
6214	IEEE80211_UNLOCK(ic);
6215
6216	IWM_LOCK(sc);
6217	for (aci = 0; aci < WME_NUM_AC; aci++) {
6218		const struct wmeParams *ac = &tmp[aci];
6219		ivp->queue_params[aci].aifsn = ac->wmep_aifsn;
6220		ivp->queue_params[aci].cw_min = IWM_EXP2(ac->wmep_logcwmin);
6221		ivp->queue_params[aci].cw_max = IWM_EXP2(ac->wmep_logcwmax);
6222		ivp->queue_params[aci].edca_txop =
6223		    IEEE80211_TXOP_TO_US(ac->wmep_txopLimit);
6224	}
6225	ivp->have_wme = TRUE;
6226	if (ivp->is_uploaded && vap->iv_bss != NULL) {
6227		in = IWM_NODE(vap->iv_bss);
6228		if (in->in_assoc) {
6229			if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
6230				device_printf(sc->sc_dev,
6231				    "%s: failed to update MAC\n", __func__);
6232			}
6233		}
6234	}
6235	IWM_UNLOCK(sc);
6236
6237	return (0);
6238#undef IWM_EXP2
6239}
6240
6241static void
6242iwm_preinit(void *arg)
6243{
6244	struct iwm_softc *sc = arg;
6245	device_t dev = sc->sc_dev;
6246	struct ieee80211com *ic = &sc->sc_ic;
6247	int error;
6248
6249	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6250	    "->%s\n", __func__);
6251
6252	IWM_LOCK(sc);
6253	if ((error = iwm_start_hw(sc)) != 0) {
6254		device_printf(dev, "could not initialize hardware\n");
6255		IWM_UNLOCK(sc);
6256		goto fail;
6257	}
6258
6259	error = iwm_run_init_mvm_ucode(sc, 1);
6260	iwm_stop_device(sc);
6261	if (error) {
6262		IWM_UNLOCK(sc);
6263		goto fail;
6264	}
6265	device_printf(dev,
6266	    "hw rev 0x%x, fw ver %s, address %s\n",
6267	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6268	    sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6269
6270	/* not all hardware can do 5GHz band */
6271	if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6272		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6273		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6274	IWM_UNLOCK(sc);
6275
6276	iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6277	    ic->ic_channels);
6278
6279	/*
6280	 * At this point we've committed - if we fail to do setup,
6281	 * we now also have to tear down the net80211 state.
6282	 */
6283	ieee80211_ifattach(ic);
6284	ic->ic_vap_create = iwm_vap_create;
6285	ic->ic_vap_delete = iwm_vap_delete;
6286	ic->ic_raw_xmit = iwm_raw_xmit;
6287	ic->ic_node_alloc = iwm_node_alloc;
6288	ic->ic_scan_start = iwm_scan_start;
6289	ic->ic_scan_end = iwm_scan_end;
6290	ic->ic_update_mcast = iwm_update_mcast;
6291	ic->ic_getradiocaps = iwm_init_channel_map;
6292	ic->ic_set_channel = iwm_set_channel;
6293	ic->ic_scan_curchan = iwm_scan_curchan;
6294	ic->ic_scan_mindwell = iwm_scan_mindwell;
6295	ic->ic_wme.wme_update = iwm_wme_update;
6296	ic->ic_parent = iwm_parent;
6297	ic->ic_transmit = iwm_transmit;
6298	iwm_radiotap_attach(sc);
6299	if (bootverbose)
6300		ieee80211_announce(ic);
6301
6302	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6303	    "<-%s\n", __func__);
6304	config_intrhook_disestablish(&sc->sc_preinit_hook);
6305
6306	return;
6307fail:
6308	config_intrhook_disestablish(&sc->sc_preinit_hook);
6309	iwm_detach_local(sc, 0);
6310}
6311
6312/*
6313 * Attach the interface to 802.11 radiotap.
6314 */
6315static void
6316iwm_radiotap_attach(struct iwm_softc *sc)
6317{
6318        struct ieee80211com *ic = &sc->sc_ic;
6319
6320	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6321	    "->%s begin\n", __func__);
6322        ieee80211_radiotap_attach(ic,
6323            &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6324                IWM_TX_RADIOTAP_PRESENT,
6325            &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6326                IWM_RX_RADIOTAP_PRESENT);
6327	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6328	    "->%s end\n", __func__);
6329}
6330
6331static struct ieee80211vap *
6332iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6333    enum ieee80211_opmode opmode, int flags,
6334    const uint8_t bssid[IEEE80211_ADDR_LEN],
6335    const uint8_t mac[IEEE80211_ADDR_LEN])
6336{
6337	struct iwm_vap *ivp;
6338	struct ieee80211vap *vap;
6339
6340	if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6341		return NULL;
6342	ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6343	vap = &ivp->iv_vap;
6344	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6345	vap->iv_bmissthreshold = 10;            /* override default */
6346	/* Override with driver methods. */
6347	ivp->iv_newstate = vap->iv_newstate;
6348	vap->iv_newstate = iwm_newstate;
6349
6350	ivp->id = IWM_DEFAULT_MACID;
6351	ivp->color = IWM_DEFAULT_COLOR;
6352
6353	ivp->have_wme = FALSE;
6354
6355	ieee80211_ratectl_init(vap);
6356	/* Complete setup. */
6357	ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6358	    mac);
6359	ic->ic_opmode = opmode;
6360
6361	return vap;
6362}
6363
6364static void
6365iwm_vap_delete(struct ieee80211vap *vap)
6366{
6367	struct iwm_vap *ivp = IWM_VAP(vap);
6368
6369	ieee80211_ratectl_deinit(vap);
6370	ieee80211_vap_detach(vap);
6371	free(ivp, M_80211_VAP);
6372}
6373
6374static void
6375iwm_xmit_queue_drain(struct iwm_softc *sc)
6376{
6377	struct mbuf *m;
6378	struct ieee80211_node *ni;
6379
6380	while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
6381		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
6382		ieee80211_free_node(ni);
6383		m_freem(m);
6384	}
6385}
6386
6387static void
6388iwm_scan_start(struct ieee80211com *ic)
6389{
6390	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6391	struct iwm_softc *sc = ic->ic_softc;
6392	int error;
6393
6394	IWM_LOCK(sc);
6395	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6396		/* This should not be possible */
6397		device_printf(sc->sc_dev,
6398		    "%s: Previous scan not completed yet\n", __func__);
6399	}
6400	if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6401		error = iwm_mvm_umac_scan(sc);
6402	else
6403		error = iwm_mvm_lmac_scan(sc);
6404	if (error != 0) {
6405		device_printf(sc->sc_dev, "could not initiate scan\n");
6406		IWM_UNLOCK(sc);
6407		ieee80211_cancel_scan(vap);
6408	} else {
6409		sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6410		iwm_led_blink_start(sc);
6411		IWM_UNLOCK(sc);
6412	}
6413}
6414
6415static void
6416iwm_scan_end(struct ieee80211com *ic)
6417{
6418	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6419	struct iwm_softc *sc = ic->ic_softc;
6420
6421	IWM_LOCK(sc);
6422	iwm_led_blink_stop(sc);
6423	if (vap->iv_state == IEEE80211_S_RUN)
6424		iwm_mvm_led_enable(sc);
6425	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6426		/*
6427		 * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6428		 * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6429		 * taskqueue.
6430		 */
6431		sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6432		iwm_mvm_scan_stop_wait(sc);
6433	}
6434	IWM_UNLOCK(sc);
6435
6436	/*
6437	 * Make sure we don't race, if sc_es_task is still enqueued here.
6438	 * This is to make sure that it won't call ieee80211_scan_done
6439	 * when we have already started the next scan.
6440	 */
6441	taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6442}
6443
6444static void
6445iwm_update_mcast(struct ieee80211com *ic)
6446{
6447}
6448
6449static void
6450iwm_set_channel(struct ieee80211com *ic)
6451{
6452}
6453
6454static void
6455iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6456{
6457}
6458
6459static void
6460iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6461{
6462	return;
6463}
6464
6465void
6466iwm_init_task(void *arg1)
6467{
6468	struct iwm_softc *sc = arg1;
6469
6470	IWM_LOCK(sc);
6471	while (sc->sc_flags & IWM_FLAG_BUSY)
6472		msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6473	sc->sc_flags |= IWM_FLAG_BUSY;
6474	iwm_stop(sc);
6475	if (sc->sc_ic.ic_nrunning > 0)
6476		iwm_init(sc);
6477	sc->sc_flags &= ~IWM_FLAG_BUSY;
6478	wakeup(&sc->sc_flags);
6479	IWM_UNLOCK(sc);
6480}
6481
6482static int
6483iwm_resume(device_t dev)
6484{
6485	struct iwm_softc *sc = device_get_softc(dev);
6486	int do_reinit = 0;
6487
6488	/*
6489	 * We disable the RETRY_TIMEOUT register (0x41) to keep
6490	 * PCI Tx retries from interfering with C3 CPU state.
6491	 */
6492	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6493	iwm_init_task(device_get_softc(dev));
6494
6495	IWM_LOCK(sc);
6496	if (sc->sc_flags & IWM_FLAG_SCANNING) {
6497		sc->sc_flags &= ~IWM_FLAG_SCANNING;
6498		do_reinit = 1;
6499	}
6500	IWM_UNLOCK(sc);
6501
6502	if (do_reinit)
6503		ieee80211_resume_all(&sc->sc_ic);
6504
6505	return 0;
6506}
6507
6508static int
6509iwm_suspend(device_t dev)
6510{
6511	int do_stop = 0;
6512	struct iwm_softc *sc = device_get_softc(dev);
6513
6514	do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6515
6516	ieee80211_suspend_all(&sc->sc_ic);
6517
6518	if (do_stop) {
6519		IWM_LOCK(sc);
6520		iwm_stop(sc);
6521		sc->sc_flags |= IWM_FLAG_SCANNING;
6522		IWM_UNLOCK(sc);
6523	}
6524
6525	return (0);
6526}
6527
6528static int
6529iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6530{
6531	struct iwm_fw_info *fw = &sc->sc_fw;
6532	device_t dev = sc->sc_dev;
6533	int i;
6534
6535	if (!sc->sc_attached)
6536		return 0;
6537	sc->sc_attached = 0;
6538
6539	if (do_net80211)
6540		ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6541
6542	callout_drain(&sc->sc_led_blink_to);
6543	callout_drain(&sc->sc_watchdog_to);
6544	iwm_stop_device(sc);
6545	if (do_net80211) {
6546		IWM_LOCK(sc);
6547		iwm_xmit_queue_drain(sc);
6548		IWM_UNLOCK(sc);
6549		ieee80211_ifdetach(&sc->sc_ic);
6550	}
6551
6552	iwm_phy_db_free(sc->sc_phy_db);
6553	sc->sc_phy_db = NULL;
6554
6555	iwm_free_nvm_data(sc->nvm_data);
6556
6557	/* Free descriptor rings */
6558	iwm_free_rx_ring(sc, &sc->rxq);
6559	for (i = 0; i < nitems(sc->txq); i++)
6560		iwm_free_tx_ring(sc, &sc->txq[i]);
6561
6562	/* Free firmware */
6563	if (fw->fw_fp != NULL)
6564		iwm_fw_info_free(fw);
6565
6566	/* Free scheduler */
6567	iwm_dma_contig_free(&sc->sched_dma);
6568	iwm_dma_contig_free(&sc->ict_dma);
6569	iwm_dma_contig_free(&sc->kw_dma);
6570	iwm_dma_contig_free(&sc->fw_dma);
6571
6572	iwm_free_fw_paging(sc);
6573
6574	/* Finished with the hardware - detach things */
6575	iwm_pci_detach(dev);
6576
6577	if (sc->sc_notif_wait != NULL) {
6578		iwm_notification_wait_free(sc->sc_notif_wait);
6579		sc->sc_notif_wait = NULL;
6580	}
6581
6582	IWM_LOCK_DESTROY(sc);
6583
6584	return (0);
6585}
6586
6587static int
6588iwm_detach(device_t dev)
6589{
6590	struct iwm_softc *sc = device_get_softc(dev);
6591
6592	return (iwm_detach_local(sc, 1));
6593}
6594
6595static device_method_t iwm_pci_methods[] = {
6596        /* Device interface */
6597        DEVMETHOD(device_probe,         iwm_probe),
6598        DEVMETHOD(device_attach,        iwm_attach),
6599        DEVMETHOD(device_detach,        iwm_detach),
6600        DEVMETHOD(device_suspend,       iwm_suspend),
6601        DEVMETHOD(device_resume,        iwm_resume),
6602
6603        DEVMETHOD_END
6604};
6605
6606static driver_t iwm_pci_driver = {
6607        "iwm",
6608        iwm_pci_methods,
6609        sizeof (struct iwm_softc)
6610};
6611
6612static devclass_t iwm_devclass;
6613
6614DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6615MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6616MODULE_DEPEND(iwm, pci, 1, 1, 1);
6617MODULE_DEPEND(iwm, wlan, 1, 1, 1);
6618