if_iwm.c revision 330225
1/*	$OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $	*/
2
3/*
4 * Copyright (c) 2014 genua mbh <info@genua.de>
5 * Copyright (c) 2014 Fixup Software Ltd.
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20/*-
21 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22 * which were used as the reference documentation for this implementation.
23 *
24 * Driver version we are currently based off of is
25 * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26 *
27 ***********************************************************************
28 *
29 * This file is provided under a dual BSD/GPLv2 license.  When using or
30 * redistributing this file, you may do so under either license.
31 *
32 * GPL LICENSE SUMMARY
33 *
34 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35 *
36 * This program is free software; you can redistribute it and/or modify
37 * it under the terms of version 2 of the GNU General Public License as
38 * published by the Free Software Foundation.
39 *
40 * This program is distributed in the hope that it will be useful, but
41 * WITHOUT ANY WARRANTY; without even the implied warranty of
42 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43 * General Public License for more details.
44 *
45 * You should have received a copy of the GNU General Public License
46 * along with this program; if not, write to the Free Software
47 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48 * USA
49 *
50 * The full GNU General Public License is included in this distribution
51 * in the file called COPYING.
52 *
53 * Contact Information:
54 *  Intel Linux Wireless <ilw@linux.intel.com>
55 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56 *
57 *
58 * BSD LICENSE
59 *
60 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61 * All rights reserved.
62 *
63 * Redistribution and use in source and binary forms, with or without
64 * modification, are permitted provided that the following conditions
65 * are met:
66 *
67 *  * Redistributions of source code must retain the above copyright
68 *    notice, this list of conditions and the following disclaimer.
69 *  * Redistributions in binary form must reproduce the above copyright
70 *    notice, this list of conditions and the following disclaimer in
71 *    the documentation and/or other materials provided with the
72 *    distribution.
73 *  * Neither the name Intel Corporation nor the names of its
74 *    contributors may be used to endorse or promote products derived
75 *    from this software without specific prior written permission.
76 *
77 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88 */
89
90/*-
91 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92 *
93 * Permission to use, copy, modify, and distribute this software for any
94 * purpose with or without fee is hereby granted, provided that the above
95 * copyright notice and this permission notice appear in all copies.
96 *
97 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104 */
105#include <sys/cdefs.h>
106__FBSDID("$FreeBSD: stable/11/sys/dev/iwm/if_iwm.c 330225 2018-03-01 06:56:34Z eadler $");
107
108#include "opt_wlan.h"
109
110#include <sys/param.h>
111#include <sys/bus.h>
112#include <sys/conf.h>
113#include <sys/endian.h>
114#include <sys/firmware.h>
115#include <sys/kernel.h>
116#include <sys/malloc.h>
117#include <sys/mbuf.h>
118#include <sys/mutex.h>
119#include <sys/module.h>
120#include <sys/proc.h>
121#include <sys/rman.h>
122#include <sys/socket.h>
123#include <sys/sockio.h>
124#include <sys/sysctl.h>
125#include <sys/linker.h>
126
127#include <machine/bus.h>
128#include <machine/endian.h>
129#include <machine/resource.h>
130
131#include <dev/pci/pcivar.h>
132#include <dev/pci/pcireg.h>
133
134#include <net/bpf.h>
135
136#include <net/if.h>
137#include <net/if_var.h>
138#include <net/if_arp.h>
139#include <net/if_dl.h>
140#include <net/if_media.h>
141#include <net/if_types.h>
142
143#include <netinet/in.h>
144#include <netinet/in_systm.h>
145#include <netinet/if_ether.h>
146#include <netinet/ip.h>
147
148#include <net80211/ieee80211_var.h>
149#include <net80211/ieee80211_regdomain.h>
150#include <net80211/ieee80211_ratectl.h>
151#include <net80211/ieee80211_radiotap.h>
152
153#include <dev/iwm/if_iwmreg.h>
154#include <dev/iwm/if_iwmvar.h>
155#include <dev/iwm/if_iwm_config.h>
156#include <dev/iwm/if_iwm_debug.h>
157#include <dev/iwm/if_iwm_notif_wait.h>
158#include <dev/iwm/if_iwm_util.h>
159#include <dev/iwm/if_iwm_binding.h>
160#include <dev/iwm/if_iwm_phy_db.h>
161#include <dev/iwm/if_iwm_mac_ctxt.h>
162#include <dev/iwm/if_iwm_phy_ctxt.h>
163#include <dev/iwm/if_iwm_time_event.h>
164#include <dev/iwm/if_iwm_power.h>
165#include <dev/iwm/if_iwm_scan.h>
166#include <dev/iwm/if_iwm_sta.h>
167
168#include <dev/iwm/if_iwm_pcie_trans.h>
169#include <dev/iwm/if_iwm_led.h>
170#include <dev/iwm/if_iwm_fw.h>
171
172/* From DragonflyBSD */
173#define mtodoff(m, t, off)      ((t)((m)->m_data + (off)))
174
175const uint8_t iwm_nvm_channels[] = {
176	/* 2.4 GHz */
177	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
178	/* 5 GHz */
179	36, 40, 44, 48, 52, 56, 60, 64,
180	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
181	149, 153, 157, 161, 165
182};
183_Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
184    "IWM_NUM_CHANNELS is too small");
185
186const uint8_t iwm_nvm_channels_8000[] = {
187	/* 2.4 GHz */
188	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
189	/* 5 GHz */
190	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
191	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
192	149, 153, 157, 161, 165, 169, 173, 177, 181
193};
194_Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
195    "IWM_NUM_CHANNELS_8000 is too small");
196
197#define IWM_NUM_2GHZ_CHANNELS	14
198#define IWM_N_HW_ADDR_MASK	0xF
199
200/*
201 * XXX For now, there's simply a fixed set of rate table entries
202 * that are populated.
203 */
204const struct iwm_rate {
205	uint8_t rate;
206	uint8_t plcp;
207} iwm_rates[] = {
208	{   2,	IWM_RATE_1M_PLCP  },
209	{   4,	IWM_RATE_2M_PLCP  },
210	{  11,	IWM_RATE_5M_PLCP  },
211	{  22,	IWM_RATE_11M_PLCP },
212	{  12,	IWM_RATE_6M_PLCP  },
213	{  18,	IWM_RATE_9M_PLCP  },
214	{  24,	IWM_RATE_12M_PLCP },
215	{  36,	IWM_RATE_18M_PLCP },
216	{  48,	IWM_RATE_24M_PLCP },
217	{  72,	IWM_RATE_36M_PLCP },
218	{  96,	IWM_RATE_48M_PLCP },
219	{ 108,	IWM_RATE_54M_PLCP },
220};
221#define IWM_RIDX_CCK	0
222#define IWM_RIDX_OFDM	4
223#define IWM_RIDX_MAX	(nitems(iwm_rates)-1)
224#define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
225#define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
226
227struct iwm_nvm_section {
228	uint16_t length;
229	uint8_t *data;
230};
231
232#define IWM_MVM_UCODE_ALIVE_TIMEOUT	hz
233#define IWM_MVM_UCODE_CALIB_TIMEOUT	(2*hz)
234
235struct iwm_mvm_alive_data {
236	int valid;
237	uint32_t scd_base_addr;
238};
239
240static int	iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
241static int	iwm_firmware_store_section(struct iwm_softc *,
242                                           enum iwm_ucode_type,
243                                           const uint8_t *, size_t);
244static int	iwm_set_default_calib(struct iwm_softc *, const void *);
245static void	iwm_fw_info_free(struct iwm_fw_info *);
246static int	iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
247static int	iwm_alloc_fwmem(struct iwm_softc *);
248static int	iwm_alloc_sched(struct iwm_softc *);
249static int	iwm_alloc_kw(struct iwm_softc *);
250static int	iwm_alloc_ict(struct iwm_softc *);
251static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
252static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
253static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
254static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
255                                  int);
256static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
257static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
258static void	iwm_enable_interrupts(struct iwm_softc *);
259static void	iwm_restore_interrupts(struct iwm_softc *);
260static void	iwm_disable_interrupts(struct iwm_softc *);
261static void	iwm_ict_reset(struct iwm_softc *);
262static int	iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
263static void	iwm_stop_device(struct iwm_softc *);
264static void	iwm_mvm_nic_config(struct iwm_softc *);
265static int	iwm_nic_rx_init(struct iwm_softc *);
266static int	iwm_nic_tx_init(struct iwm_softc *);
267static int	iwm_nic_init(struct iwm_softc *);
268static int	iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
269static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
270                                   uint16_t, uint8_t *, uint16_t *);
271static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
272				     uint16_t *, uint32_t);
273static uint32_t	iwm_eeprom_channel_flags(uint16_t);
274static void	iwm_add_channel_band(struct iwm_softc *,
275		    struct ieee80211_channel[], int, int *, int, size_t,
276		    const uint8_t[]);
277static void	iwm_init_channel_map(struct ieee80211com *, int, int *,
278		    struct ieee80211_channel[]);
279static struct iwm_nvm_data *
280	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
281			   const uint16_t *, const uint16_t *,
282			   const uint16_t *, const uint16_t *,
283			   const uint16_t *);
284static void	iwm_free_nvm_data(struct iwm_nvm_data *);
285static void	iwm_set_hw_address_family_8000(struct iwm_softc *,
286					       struct iwm_nvm_data *,
287					       const uint16_t *,
288					       const uint16_t *);
289static int	iwm_get_sku(const struct iwm_softc *, const uint16_t *,
290			    const uint16_t *);
291static int	iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
292static int	iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
293				  const uint16_t *);
294static int	iwm_get_n_hw_addrs(const struct iwm_softc *,
295				   const uint16_t *);
296static void	iwm_set_radio_cfg(const struct iwm_softc *,
297				  struct iwm_nvm_data *, uint32_t);
298static struct iwm_nvm_data *
299	iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *);
300static int	iwm_nvm_init(struct iwm_softc *);
301static int	iwm_pcie_load_section(struct iwm_softc *, uint8_t,
302				      const struct iwm_fw_desc *);
303static int	iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
304					     bus_addr_t, uint32_t);
305static int	iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
306						const struct iwm_fw_sects *,
307						int, int *);
308static int	iwm_pcie_load_cpu_sections(struct iwm_softc *,
309					   const struct iwm_fw_sects *,
310					   int, int *);
311static int	iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
312					       const struct iwm_fw_sects *);
313static int	iwm_pcie_load_given_ucode(struct iwm_softc *,
314					  const struct iwm_fw_sects *);
315static int	iwm_start_fw(struct iwm_softc *, const struct iwm_fw_sects *);
316static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
317static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
318static int	iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
319                                              enum iwm_ucode_type);
320static int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
321static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
322static int	iwm_mvm_get_signal_strength(struct iwm_softc *,
323					    struct iwm_rx_phy_info *);
324static void	iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
325                                      struct iwm_rx_packet *);
326static int	iwm_get_noise(struct iwm_softc *,
327		    const struct iwm_mvm_statistics_rx_non_phy *);
328static void	iwm_mvm_handle_rx_statistics(struct iwm_softc *,
329		    struct iwm_rx_packet *);
330static boolean_t iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct mbuf *,
331				    uint32_t, boolean_t);
332static int	iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
333                                         struct iwm_rx_packet *,
334				         struct iwm_node *);
335static void	iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *);
336static void	iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
337#if 0
338static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
339                                 uint16_t);
340#endif
341static const struct iwm_rate *
342	iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
343			struct mbuf *, struct iwm_tx_cmd *);
344static int	iwm_tx(struct iwm_softc *, struct mbuf *,
345                       struct ieee80211_node *, int);
346static int	iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
347			     const struct ieee80211_bpf_params *);
348static int	iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_vap *);
349static int	iwm_auth(struct ieee80211vap *, struct iwm_softc *);
350static int	iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
351static int	iwm_release(struct iwm_softc *, struct iwm_node *);
352static struct ieee80211_node *
353		iwm_node_alloc(struct ieee80211vap *,
354		               const uint8_t[IEEE80211_ADDR_LEN]);
355static void	iwm_setrates(struct iwm_softc *, struct iwm_node *);
356static int	iwm_media_change(struct ifnet *);
357static int	iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
358static void	iwm_endscan_cb(void *, int);
359static void	iwm_mvm_fill_sf_command(struct iwm_softc *,
360					struct iwm_sf_cfg_cmd *,
361					struct ieee80211_node *);
362static int	iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state);
363static int	iwm_send_bt_init_conf(struct iwm_softc *);
364static boolean_t iwm_mvm_is_lar_supported(struct iwm_softc *);
365static boolean_t iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *);
366static int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
367static void	iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
368static int	iwm_init_hw(struct iwm_softc *);
369static void	iwm_init(struct iwm_softc *);
370static void	iwm_start(struct iwm_softc *);
371static void	iwm_stop(struct iwm_softc *);
372static void	iwm_watchdog(void *);
373static void	iwm_parent(struct ieee80211com *);
374#ifdef IWM_DEBUG
375static const char *
376		iwm_desc_lookup(uint32_t);
377static void	iwm_nic_error(struct iwm_softc *);
378static void	iwm_nic_umac_error(struct iwm_softc *);
379#endif
380static void	iwm_handle_rxb(struct iwm_softc *, struct mbuf *);
381static void	iwm_notif_intr(struct iwm_softc *);
382static void	iwm_intr(void *);
383static int	iwm_attach(device_t);
384static int	iwm_is_valid_ether_addr(uint8_t *);
385static void	iwm_preinit(void *);
386static int	iwm_detach_local(struct iwm_softc *sc, int);
387static void	iwm_init_task(void *);
388static void	iwm_radiotap_attach(struct iwm_softc *);
389static struct ieee80211vap *
390		iwm_vap_create(struct ieee80211com *,
391		               const char [IFNAMSIZ], int,
392		               enum ieee80211_opmode, int,
393		               const uint8_t [IEEE80211_ADDR_LEN],
394		               const uint8_t [IEEE80211_ADDR_LEN]);
395static void	iwm_vap_delete(struct ieee80211vap *);
396static void	iwm_xmit_queue_drain(struct iwm_softc *);
397static void	iwm_scan_start(struct ieee80211com *);
398static void	iwm_scan_end(struct ieee80211com *);
399static void	iwm_update_mcast(struct ieee80211com *);
400static void	iwm_set_channel(struct ieee80211com *);
401static void	iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
402static void	iwm_scan_mindwell(struct ieee80211_scan_state *);
403static int	iwm_detach(device_t);
404
405static int	iwm_lar_disable = 0;
406TUNABLE_INT("hw.iwm.lar.disable", &iwm_lar_disable);
407
408/*
409 * Firmware parser.
410 */
411
412static int
413iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
414{
415	const struct iwm_fw_cscheme_list *l = (const void *)data;
416
417	if (dlen < sizeof(*l) ||
418	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
419		return EINVAL;
420
421	/* we don't actually store anything for now, always use s/w crypto */
422
423	return 0;
424}
425
426static int
427iwm_firmware_store_section(struct iwm_softc *sc,
428    enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
429{
430	struct iwm_fw_sects *fws;
431	struct iwm_fw_desc *fwone;
432
433	if (type >= IWM_UCODE_TYPE_MAX)
434		return EINVAL;
435	if (dlen < sizeof(uint32_t))
436		return EINVAL;
437
438	fws = &sc->sc_fw.fw_sects[type];
439	if (fws->fw_count >= IWM_UCODE_SECTION_MAX)
440		return EINVAL;
441
442	fwone = &fws->fw_sect[fws->fw_count];
443
444	/* first 32bit are device load offset */
445	memcpy(&fwone->offset, data, sizeof(uint32_t));
446
447	/* rest is data */
448	fwone->data = data + sizeof(uint32_t);
449	fwone->len = dlen - sizeof(uint32_t);
450
451	fws->fw_count++;
452
453	return 0;
454}
455
456#define IWM_DEFAULT_SCAN_CHANNELS 40
457
458/* iwlwifi: iwl-drv.c */
459struct iwm_tlv_calib_data {
460	uint32_t ucode_type;
461	struct iwm_tlv_calib_ctrl calib;
462} __packed;
463
464static int
465iwm_set_default_calib(struct iwm_softc *sc, const void *data)
466{
467	const struct iwm_tlv_calib_data *def_calib = data;
468	uint32_t ucode_type = le32toh(def_calib->ucode_type);
469
470	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
471		device_printf(sc->sc_dev,
472		    "Wrong ucode_type %u for default "
473		    "calibration.\n", ucode_type);
474		return EINVAL;
475	}
476
477	sc->sc_default_calib[ucode_type].flow_trigger =
478	    def_calib->calib.flow_trigger;
479	sc->sc_default_calib[ucode_type].event_trigger =
480	    def_calib->calib.event_trigger;
481
482	return 0;
483}
484
485static int
486iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data,
487			struct iwm_ucode_capabilities *capa)
488{
489	const struct iwm_ucode_api *ucode_api = (const void *)data;
490	uint32_t api_index = le32toh(ucode_api->api_index);
491	uint32_t api_flags = le32toh(ucode_api->api_flags);
492	int i;
493
494	if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
495		device_printf(sc->sc_dev,
496		    "api flags index %d larger than supported by driver\n",
497		    api_index);
498		/* don't return an error so we can load FW that has more bits */
499		return 0;
500	}
501
502	for (i = 0; i < 32; i++) {
503		if (api_flags & (1U << i))
504			setbit(capa->enabled_api, i + 32 * api_index);
505	}
506
507	return 0;
508}
509
510static int
511iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data,
512			   struct iwm_ucode_capabilities *capa)
513{
514	const struct iwm_ucode_capa *ucode_capa = (const void *)data;
515	uint32_t api_index = le32toh(ucode_capa->api_index);
516	uint32_t api_flags = le32toh(ucode_capa->api_capa);
517	int i;
518
519	if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
520		device_printf(sc->sc_dev,
521		    "capa flags index %d larger than supported by driver\n",
522		    api_index);
523		/* don't return an error so we can load FW that has more bits */
524		return 0;
525	}
526
527	for (i = 0; i < 32; i++) {
528		if (api_flags & (1U << i))
529			setbit(capa->enabled_capa, i + 32 * api_index);
530	}
531
532	return 0;
533}
534
535static void
536iwm_fw_info_free(struct iwm_fw_info *fw)
537{
538	firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
539	fw->fw_fp = NULL;
540	/* don't touch fw->fw_status */
541	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
542}
543
544static int
545iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
546{
547	struct iwm_fw_info *fw = &sc->sc_fw;
548	const struct iwm_tlv_ucode_header *uhdr;
549	const struct iwm_ucode_tlv *tlv;
550	struct iwm_ucode_capabilities *capa = &sc->ucode_capa;
551	enum iwm_ucode_tlv_type tlv_type;
552	const struct firmware *fwp;
553	const uint8_t *data;
554	uint32_t tlv_len;
555	uint32_t usniffer_img;
556	const uint8_t *tlv_data;
557	uint32_t paging_mem_size;
558	int num_of_cpus;
559	int error = 0;
560	size_t len;
561
562	if (fw->fw_status == IWM_FW_STATUS_DONE &&
563	    ucode_type != IWM_UCODE_INIT)
564		return 0;
565
566	while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
567		msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
568	fw->fw_status = IWM_FW_STATUS_INPROGRESS;
569
570	if (fw->fw_fp != NULL)
571		iwm_fw_info_free(fw);
572
573	/*
574	 * Load firmware into driver memory.
575	 * fw_fp will be set.
576	 */
577	IWM_UNLOCK(sc);
578	fwp = firmware_get(sc->cfg->fw_name);
579	IWM_LOCK(sc);
580	if (fwp == NULL) {
581		device_printf(sc->sc_dev,
582		    "could not read firmware %s (error %d)\n",
583		    sc->cfg->fw_name, error);
584		goto out;
585	}
586	fw->fw_fp = fwp;
587
588	/* (Re-)Initialize default values. */
589	capa->flags = 0;
590	capa->max_probe_length = IWM_DEFAULT_MAX_PROBE_LENGTH;
591	capa->n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
592	memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa));
593	memset(capa->enabled_api, 0, sizeof(capa->enabled_api));
594	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
595
596	/*
597	 * Parse firmware contents
598	 */
599
600	uhdr = (const void *)fw->fw_fp->data;
601	if (*(const uint32_t *)fw->fw_fp->data != 0
602	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
603		device_printf(sc->sc_dev, "invalid firmware %s\n",
604		    sc->cfg->fw_name);
605		error = EINVAL;
606		goto out;
607	}
608
609	snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%u.%u (API ver %u)",
610	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
611	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
612	    IWM_UCODE_API(le32toh(uhdr->ver)));
613	data = uhdr->data;
614	len = fw->fw_fp->datasize - sizeof(*uhdr);
615
616	while (len >= sizeof(*tlv)) {
617		len -= sizeof(*tlv);
618		tlv = (const void *)data;
619
620		tlv_len = le32toh(tlv->length);
621		tlv_type = le32toh(tlv->type);
622		tlv_data = tlv->data;
623
624		if (len < tlv_len) {
625			device_printf(sc->sc_dev,
626			    "firmware too short: %zu bytes\n",
627			    len);
628			error = EINVAL;
629			goto parse_out;
630		}
631		len -= roundup2(tlv_len, 4);
632		data += sizeof(tlv) + roundup2(tlv_len, 4);
633
634		switch ((int)tlv_type) {
635		case IWM_UCODE_TLV_PROBE_MAX_LEN:
636			if (tlv_len != sizeof(uint32_t)) {
637				device_printf(sc->sc_dev,
638				    "%s: PROBE_MAX_LEN (%d) != sizeof(uint32_t)\n",
639				    __func__,
640				    (int) tlv_len);
641				error = EINVAL;
642				goto parse_out;
643			}
644			capa->max_probe_length =
645			    le32_to_cpup((const uint32_t *)tlv_data);
646			/* limit it to something sensible */
647			if (capa->max_probe_length >
648			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
649				IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
650				    "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
651				    "ridiculous\n", __func__);
652				error = EINVAL;
653				goto parse_out;
654			}
655			break;
656		case IWM_UCODE_TLV_PAN:
657			if (tlv_len) {
658				device_printf(sc->sc_dev,
659				    "%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
660				    __func__,
661				    (int) tlv_len);
662				error = EINVAL;
663				goto parse_out;
664			}
665			capa->flags |= IWM_UCODE_TLV_FLAGS_PAN;
666			break;
667		case IWM_UCODE_TLV_FLAGS:
668			if (tlv_len < sizeof(uint32_t)) {
669				device_printf(sc->sc_dev,
670				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
671				    __func__,
672				    (int) tlv_len);
673				error = EINVAL;
674				goto parse_out;
675			}
676			if (tlv_len % sizeof(uint32_t)) {
677				device_printf(sc->sc_dev,
678				    "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) %% sizeof(uint32_t)\n",
679				    __func__,
680				    (int) tlv_len);
681				error = EINVAL;
682				goto parse_out;
683			}
684			/*
685			 * Apparently there can be many flags, but Linux driver
686			 * parses only the first one, and so do we.
687			 *
688			 * XXX: why does this override IWM_UCODE_TLV_PAN?
689			 * Intentional or a bug?  Observations from
690			 * current firmware file:
691			 *  1) TLV_PAN is parsed first
692			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
693			 * ==> this resets TLV_PAN to itself... hnnnk
694			 */
695			capa->flags = le32_to_cpup((const uint32_t *)tlv_data);
696			break;
697		case IWM_UCODE_TLV_CSCHEME:
698			if ((error = iwm_store_cscheme(sc,
699			    tlv_data, tlv_len)) != 0) {
700				device_printf(sc->sc_dev,
701				    "%s: iwm_store_cscheme(): returned %d\n",
702				    __func__,
703				    error);
704				goto parse_out;
705			}
706			break;
707		case IWM_UCODE_TLV_NUM_OF_CPU:
708			if (tlv_len != sizeof(uint32_t)) {
709				device_printf(sc->sc_dev,
710				    "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) != sizeof(uint32_t)\n",
711				    __func__,
712				    (int) tlv_len);
713				error = EINVAL;
714				goto parse_out;
715			}
716			num_of_cpus = le32_to_cpup((const uint32_t *)tlv_data);
717			if (num_of_cpus == 2) {
718				fw->fw_sects[IWM_UCODE_REGULAR].is_dual_cpus =
719					TRUE;
720				fw->fw_sects[IWM_UCODE_INIT].is_dual_cpus =
721					TRUE;
722				fw->fw_sects[IWM_UCODE_WOWLAN].is_dual_cpus =
723					TRUE;
724			} else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
725				device_printf(sc->sc_dev,
726				    "%s: Driver supports only 1 or 2 CPUs\n",
727				    __func__);
728				error = EINVAL;
729				goto parse_out;
730			}
731			break;
732		case IWM_UCODE_TLV_SEC_RT:
733			if ((error = iwm_firmware_store_section(sc,
734			    IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
735				device_printf(sc->sc_dev,
736				    "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
737				    __func__,
738				    error);
739				goto parse_out;
740			}
741			break;
742		case IWM_UCODE_TLV_SEC_INIT:
743			if ((error = iwm_firmware_store_section(sc,
744			    IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
745				device_printf(sc->sc_dev,
746				    "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
747				    __func__,
748				    error);
749				goto parse_out;
750			}
751			break;
752		case IWM_UCODE_TLV_SEC_WOWLAN:
753			if ((error = iwm_firmware_store_section(sc,
754			    IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
755				device_printf(sc->sc_dev,
756				    "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
757				    __func__,
758				    error);
759				goto parse_out;
760			}
761			break;
762		case IWM_UCODE_TLV_DEF_CALIB:
763			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
764				device_printf(sc->sc_dev,
765				    "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
766				    __func__,
767				    (int) tlv_len,
768				    (int) sizeof(struct iwm_tlv_calib_data));
769				error = EINVAL;
770				goto parse_out;
771			}
772			if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
773				device_printf(sc->sc_dev,
774				    "%s: iwm_set_default_calib() failed: %d\n",
775				    __func__,
776				    error);
777				goto parse_out;
778			}
779			break;
780		case IWM_UCODE_TLV_PHY_SKU:
781			if (tlv_len != sizeof(uint32_t)) {
782				error = EINVAL;
783				device_printf(sc->sc_dev,
784				    "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
785				    __func__,
786				    (int) tlv_len);
787				goto parse_out;
788			}
789			sc->sc_fw.phy_config =
790			    le32_to_cpup((const uint32_t *)tlv_data);
791			sc->sc_fw.valid_tx_ant = (sc->sc_fw.phy_config &
792						  IWM_FW_PHY_CFG_TX_CHAIN) >>
793						  IWM_FW_PHY_CFG_TX_CHAIN_POS;
794			sc->sc_fw.valid_rx_ant = (sc->sc_fw.phy_config &
795						  IWM_FW_PHY_CFG_RX_CHAIN) >>
796						  IWM_FW_PHY_CFG_RX_CHAIN_POS;
797			break;
798
799		case IWM_UCODE_TLV_API_CHANGES_SET: {
800			if (tlv_len != sizeof(struct iwm_ucode_api)) {
801				error = EINVAL;
802				goto parse_out;
803			}
804			if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) {
805				error = EINVAL;
806				goto parse_out;
807			}
808			break;
809		}
810
811		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
812			if (tlv_len != sizeof(struct iwm_ucode_capa)) {
813				error = EINVAL;
814				goto parse_out;
815			}
816			if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) {
817				error = EINVAL;
818				goto parse_out;
819			}
820			break;
821		}
822
823		case 48: /* undocumented TLV */
824		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
825		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
826			/* ignore, not used by current driver */
827			break;
828
829		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
830			if ((error = iwm_firmware_store_section(sc,
831			    IWM_UCODE_REGULAR_USNIFFER, tlv_data,
832			    tlv_len)) != 0)
833				goto parse_out;
834			break;
835
836		case IWM_UCODE_TLV_PAGING:
837			if (tlv_len != sizeof(uint32_t)) {
838				error = EINVAL;
839				goto parse_out;
840			}
841			paging_mem_size = le32_to_cpup((const uint32_t *)tlv_data);
842
843			IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
844			    "%s: Paging: paging enabled (size = %u bytes)\n",
845			    __func__, paging_mem_size);
846			if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
847				device_printf(sc->sc_dev,
848					"%s: Paging: driver supports up to %u bytes for paging image\n",
849					__func__, IWM_MAX_PAGING_IMAGE_SIZE);
850				error = EINVAL;
851				goto out;
852			}
853			if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
854				device_printf(sc->sc_dev,
855				    "%s: Paging: image isn't multiple %u\n",
856				    __func__, IWM_FW_PAGING_SIZE);
857				error = EINVAL;
858				goto out;
859			}
860
861			sc->sc_fw.fw_sects[IWM_UCODE_REGULAR].paging_mem_size =
862			    paging_mem_size;
863			usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
864			sc->sc_fw.fw_sects[usniffer_img].paging_mem_size =
865			    paging_mem_size;
866			break;
867
868		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
869			if (tlv_len != sizeof(uint32_t)) {
870				error = EINVAL;
871				goto parse_out;
872			}
873			capa->n_scan_channels =
874			    le32_to_cpup((const uint32_t *)tlv_data);
875			break;
876
877		case IWM_UCODE_TLV_FW_VERSION:
878			if (tlv_len != sizeof(uint32_t) * 3) {
879				error = EINVAL;
880				goto parse_out;
881			}
882			snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
883			    "%d.%d.%d",
884			    le32toh(((const uint32_t *)tlv_data)[0]),
885			    le32toh(((const uint32_t *)tlv_data)[1]),
886			    le32toh(((const uint32_t *)tlv_data)[2]));
887			break;
888
889		case IWM_UCODE_TLV_FW_MEM_SEG:
890			break;
891
892		default:
893			device_printf(sc->sc_dev,
894			    "%s: unknown firmware section %d, abort\n",
895			    __func__, tlv_type);
896			error = EINVAL;
897			goto parse_out;
898		}
899	}
900
901	KASSERT(error == 0, ("unhandled error"));
902
903 parse_out:
904	if (error) {
905		device_printf(sc->sc_dev, "firmware parse error %d, "
906		    "section type %d\n", error, tlv_type);
907	}
908
909 out:
910	if (error) {
911		fw->fw_status = IWM_FW_STATUS_NONE;
912		if (fw->fw_fp != NULL)
913			iwm_fw_info_free(fw);
914	} else
915		fw->fw_status = IWM_FW_STATUS_DONE;
916	wakeup(&sc->sc_fw);
917
918	return error;
919}
920
921/*
922 * DMA resource routines
923 */
924
925/* fwmem is used to load firmware onto the card */
926static int
927iwm_alloc_fwmem(struct iwm_softc *sc)
928{
929	/* Must be aligned on a 16-byte boundary. */
930	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
931	    IWM_FH_MEM_TB_MAX_LENGTH, 16);
932}
933
934/* tx scheduler rings.  not used? */
935static int
936iwm_alloc_sched(struct iwm_softc *sc)
937{
938	/* TX scheduler rings must be aligned on a 1KB boundary. */
939	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
940	    nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
941}
942
943/* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
944static int
945iwm_alloc_kw(struct iwm_softc *sc)
946{
947	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
948}
949
950/* interrupt cause table */
951static int
952iwm_alloc_ict(struct iwm_softc *sc)
953{
954	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
955	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
956}
957
958static int
959iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
960{
961	bus_size_t size;
962	int i, error;
963
964	ring->cur = 0;
965
966	/* Allocate RX descriptors (256-byte aligned). */
967	size = IWM_RX_RING_COUNT * sizeof(uint32_t);
968	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
969	if (error != 0) {
970		device_printf(sc->sc_dev,
971		    "could not allocate RX ring DMA memory\n");
972		goto fail;
973	}
974	ring->desc = ring->desc_dma.vaddr;
975
976	/* Allocate RX status area (16-byte aligned). */
977	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
978	    sizeof(*ring->stat), 16);
979	if (error != 0) {
980		device_printf(sc->sc_dev,
981		    "could not allocate RX status DMA memory\n");
982		goto fail;
983	}
984	ring->stat = ring->stat_dma.vaddr;
985
986        /* Create RX buffer DMA tag. */
987        error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
988            BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
989            IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
990        if (error != 0) {
991                device_printf(sc->sc_dev,
992                    "%s: could not create RX buf DMA tag, error %d\n",
993                    __func__, error);
994                goto fail;
995        }
996
997	/* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
998	error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
999	if (error != 0) {
1000		device_printf(sc->sc_dev,
1001		    "%s: could not create RX buf DMA map, error %d\n",
1002		    __func__, error);
1003		goto fail;
1004	}
1005	/*
1006	 * Allocate and map RX buffers.
1007	 */
1008	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1009		struct iwm_rx_data *data = &ring->data[i];
1010		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1011		if (error != 0) {
1012			device_printf(sc->sc_dev,
1013			    "%s: could not create RX buf DMA map, error %d\n",
1014			    __func__, error);
1015			goto fail;
1016		}
1017		data->m = NULL;
1018
1019		if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1020			goto fail;
1021		}
1022	}
1023	return 0;
1024
1025fail:	iwm_free_rx_ring(sc, ring);
1026	return error;
1027}
1028
1029static void
1030iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1031{
1032	/* Reset the ring state */
1033	ring->cur = 0;
1034
1035	/*
1036	 * The hw rx ring index in shared memory must also be cleared,
1037	 * otherwise the discrepancy can cause reprocessing chaos.
1038	 */
1039	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1040}
1041
1042static void
1043iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1044{
1045	int i;
1046
1047	iwm_dma_contig_free(&ring->desc_dma);
1048	iwm_dma_contig_free(&ring->stat_dma);
1049
1050	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1051		struct iwm_rx_data *data = &ring->data[i];
1052
1053		if (data->m != NULL) {
1054			bus_dmamap_sync(ring->data_dmat, data->map,
1055			    BUS_DMASYNC_POSTREAD);
1056			bus_dmamap_unload(ring->data_dmat, data->map);
1057			m_freem(data->m);
1058			data->m = NULL;
1059		}
1060		if (data->map != NULL) {
1061			bus_dmamap_destroy(ring->data_dmat, data->map);
1062			data->map = NULL;
1063		}
1064	}
1065	if (ring->spare_map != NULL) {
1066		bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1067		ring->spare_map = NULL;
1068	}
1069	if (ring->data_dmat != NULL) {
1070		bus_dma_tag_destroy(ring->data_dmat);
1071		ring->data_dmat = NULL;
1072	}
1073}
1074
1075static int
1076iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1077{
1078	bus_addr_t paddr;
1079	bus_size_t size;
1080	size_t maxsize;
1081	int nsegments;
1082	int i, error;
1083
1084	ring->qid = qid;
1085	ring->queued = 0;
1086	ring->cur = 0;
1087
1088	/* Allocate TX descriptors (256-byte aligned). */
1089	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1090	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1091	if (error != 0) {
1092		device_printf(sc->sc_dev,
1093		    "could not allocate TX ring DMA memory\n");
1094		goto fail;
1095	}
1096	ring->desc = ring->desc_dma.vaddr;
1097
1098	/*
1099	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1100	 * to allocate commands space for other rings.
1101	 */
1102	if (qid > IWM_MVM_CMD_QUEUE)
1103		return 0;
1104
1105	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1106	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1107	if (error != 0) {
1108		device_printf(sc->sc_dev,
1109		    "could not allocate TX cmd DMA memory\n");
1110		goto fail;
1111	}
1112	ring->cmd = ring->cmd_dma.vaddr;
1113
1114	/* FW commands may require more mapped space than packets. */
1115	if (qid == IWM_MVM_CMD_QUEUE) {
1116		maxsize = IWM_RBUF_SIZE;
1117		nsegments = 1;
1118	} else {
1119		maxsize = MCLBYTES;
1120		nsegments = IWM_MAX_SCATTER - 2;
1121	}
1122
1123	error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1124	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1125            nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1126	if (error != 0) {
1127		device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1128		goto fail;
1129	}
1130
1131	paddr = ring->cmd_dma.paddr;
1132	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1133		struct iwm_tx_data *data = &ring->data[i];
1134
1135		data->cmd_paddr = paddr;
1136		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1137		    + offsetof(struct iwm_tx_cmd, scratch);
1138		paddr += sizeof(struct iwm_device_cmd);
1139
1140		error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1141		if (error != 0) {
1142			device_printf(sc->sc_dev,
1143			    "could not create TX buf DMA map\n");
1144			goto fail;
1145		}
1146	}
1147	KASSERT(paddr == ring->cmd_dma.paddr + size,
1148	    ("invalid physical address"));
1149	return 0;
1150
1151fail:	iwm_free_tx_ring(sc, ring);
1152	return error;
1153}
1154
1155static void
1156iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1157{
1158	int i;
1159
1160	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1161		struct iwm_tx_data *data = &ring->data[i];
1162
1163		if (data->m != NULL) {
1164			bus_dmamap_sync(ring->data_dmat, data->map,
1165			    BUS_DMASYNC_POSTWRITE);
1166			bus_dmamap_unload(ring->data_dmat, data->map);
1167			m_freem(data->m);
1168			data->m = NULL;
1169		}
1170	}
1171	/* Clear TX descriptors. */
1172	memset(ring->desc, 0, ring->desc_dma.size);
1173	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1174	    BUS_DMASYNC_PREWRITE);
1175	sc->qfullmsk &= ~(1 << ring->qid);
1176	ring->queued = 0;
1177	ring->cur = 0;
1178
1179	if (ring->qid == IWM_MVM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1180		iwm_pcie_clear_cmd_in_flight(sc);
1181}
1182
1183static void
1184iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1185{
1186	int i;
1187
1188	iwm_dma_contig_free(&ring->desc_dma);
1189	iwm_dma_contig_free(&ring->cmd_dma);
1190
1191	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1192		struct iwm_tx_data *data = &ring->data[i];
1193
1194		if (data->m != NULL) {
1195			bus_dmamap_sync(ring->data_dmat, data->map,
1196			    BUS_DMASYNC_POSTWRITE);
1197			bus_dmamap_unload(ring->data_dmat, data->map);
1198			m_freem(data->m);
1199			data->m = NULL;
1200		}
1201		if (data->map != NULL) {
1202			bus_dmamap_destroy(ring->data_dmat, data->map);
1203			data->map = NULL;
1204		}
1205	}
1206	if (ring->data_dmat != NULL) {
1207		bus_dma_tag_destroy(ring->data_dmat);
1208		ring->data_dmat = NULL;
1209	}
1210}
1211
1212/*
1213 * High-level hardware frobbing routines
1214 */
1215
1216static void
1217iwm_enable_interrupts(struct iwm_softc *sc)
1218{
1219	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1220	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1221}
1222
1223static void
1224iwm_restore_interrupts(struct iwm_softc *sc)
1225{
1226	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1227}
1228
1229static void
1230iwm_disable_interrupts(struct iwm_softc *sc)
1231{
1232	/* disable interrupts */
1233	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1234
1235	/* acknowledge all interrupts */
1236	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1237	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1238}
1239
1240static void
1241iwm_ict_reset(struct iwm_softc *sc)
1242{
1243	iwm_disable_interrupts(sc);
1244
1245	/* Reset ICT table. */
1246	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1247	sc->ict_cur = 0;
1248
1249	/* Set physical address of ICT table (4KB aligned). */
1250	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1251	    IWM_CSR_DRAM_INT_TBL_ENABLE
1252	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1253	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1254	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1255
1256	/* Switch to ICT interrupt mode in driver. */
1257	sc->sc_flags |= IWM_FLAG_USE_ICT;
1258
1259	/* Re-enable interrupts. */
1260	IWM_WRITE(sc, IWM_CSR_INT, ~0);
1261	iwm_enable_interrupts(sc);
1262}
1263
1264/* iwlwifi pcie/trans.c */
1265
1266/*
1267 * Since this .. hard-resets things, it's time to actually
1268 * mark the first vap (if any) as having no mac context.
1269 * It's annoying, but since the driver is potentially being
1270 * stop/start'ed whilst active (thanks openbsd port!) we
1271 * have to correctly track this.
1272 */
1273static void
1274iwm_stop_device(struct iwm_softc *sc)
1275{
1276	struct ieee80211com *ic = &sc->sc_ic;
1277	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1278	int chnl, qid;
1279	uint32_t mask = 0;
1280
1281	/* tell the device to stop sending interrupts */
1282	iwm_disable_interrupts(sc);
1283
1284	/*
1285	 * FreeBSD-local: mark the first vap as not-uploaded,
1286	 * so the next transition through auth/assoc
1287	 * will correctly populate the MAC context.
1288	 */
1289	if (vap) {
1290		struct iwm_vap *iv = IWM_VAP(vap);
1291		iv->phy_ctxt = NULL;
1292		iv->is_uploaded = 0;
1293	}
1294
1295	/* device going down, Stop using ICT table */
1296	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1297
1298	/* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
1299
1300	if (iwm_nic_lock(sc)) {
1301		iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1302
1303		/* Stop each Tx DMA channel */
1304		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1305			IWM_WRITE(sc,
1306			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1307			mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
1308		}
1309
1310		/* Wait for DMA channels to be idle */
1311		if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1312		    5000)) {
1313			device_printf(sc->sc_dev,
1314			    "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1315			    IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
1316		}
1317		iwm_nic_unlock(sc);
1318	}
1319	iwm_pcie_rx_stop(sc);
1320
1321	/* Stop RX ring. */
1322	iwm_reset_rx_ring(sc, &sc->rxq);
1323
1324	/* Reset all TX rings. */
1325	for (qid = 0; qid < nitems(sc->txq); qid++)
1326		iwm_reset_tx_ring(sc, &sc->txq[qid]);
1327
1328	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1329		/* Power-down device's busmaster DMA clocks */
1330		if (iwm_nic_lock(sc)) {
1331			iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1332			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1333			iwm_nic_unlock(sc);
1334		}
1335		DELAY(5);
1336	}
1337
1338	/* Make sure (redundant) we've released our request to stay awake */
1339	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1340	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1341
1342	/* Stop the device, and put it in low power state */
1343	iwm_apm_stop(sc);
1344
1345	/* Upon stop, the APM issues an interrupt if HW RF kill is set.
1346	 * Clean again the interrupt here
1347	 */
1348	iwm_disable_interrupts(sc);
1349	/* stop and reset the on-board processor */
1350	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1351
1352	/*
1353	 * Even if we stop the HW, we still want the RF kill
1354	 * interrupt
1355	 */
1356	iwm_enable_rfkill_int(sc);
1357	iwm_check_rfkill(sc);
1358}
1359
1360/* iwlwifi: mvm/ops.c */
1361static void
1362iwm_mvm_nic_config(struct iwm_softc *sc)
1363{
1364	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1365	uint32_t reg_val = 0;
1366	uint32_t phy_config = iwm_mvm_get_phy_config(sc);
1367
1368	radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1369	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1370	radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1371	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
1372	radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1373	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
1374
1375	/* SKU control */
1376	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1377	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1378	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1379	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1380
1381	/* radio configuration */
1382	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1383	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1384	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1385
1386	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1387
1388	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1389	    "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1390	    radio_cfg_step, radio_cfg_dash);
1391
1392	/*
1393	 * W/A : NIC is stuck in a reset state after Early PCIe power off
1394	 * (PCIe power is lost before PERST# is asserted), causing ME FW
1395	 * to lose ownership and not being able to obtain it back.
1396	 */
1397	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
1398		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1399		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1400		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1401	}
1402}
1403
1404static int
1405iwm_nic_rx_init(struct iwm_softc *sc)
1406{
1407	/*
1408	 * Initialize RX ring.  This is from the iwn driver.
1409	 */
1410	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1411
1412	/* Stop Rx DMA */
1413	iwm_pcie_rx_stop(sc);
1414
1415	if (!iwm_nic_lock(sc))
1416		return EBUSY;
1417
1418	/* reset and flush pointers */
1419	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1420	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1421	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1422	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1423
1424	/* Set physical address of RX ring (256-byte aligned). */
1425	IWM_WRITE(sc,
1426	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1427
1428	/* Set physical address of RX status (16-byte aligned). */
1429	IWM_WRITE(sc,
1430	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1431
1432	/* Enable Rx DMA
1433	 * XXX 5000 HW isn't supported by the iwm(4) driver.
1434	 * IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
1435	 *      the credit mechanism in 5000 HW RX FIFO
1436	 * Direct rx interrupts to hosts
1437	 * Rx buffer size 4 or 8k or 12k
1438	 * RB timeout 0x10
1439	 * 256 RBDs
1440	 */
1441	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1442	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
1443	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
1444	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
1445	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
1446	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1447	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1448
1449	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1450
1451	/* W/A for interrupt coalescing bug in 7260 and 3160 */
1452	if (sc->cfg->host_interrupt_operation_mode)
1453		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1454
1455	/*
1456	 * Thus sayeth el jefe (iwlwifi) via a comment:
1457	 *
1458	 * This value should initially be 0 (before preparing any
1459	 * RBs), should be 8 after preparing the first 8 RBs (for example)
1460	 */
1461	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1462
1463	iwm_nic_unlock(sc);
1464
1465	return 0;
1466}
1467
1468static int
1469iwm_nic_tx_init(struct iwm_softc *sc)
1470{
1471	int qid;
1472
1473	if (!iwm_nic_lock(sc))
1474		return EBUSY;
1475
1476	/* Deactivate TX scheduler. */
1477	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1478
1479	/* Set physical address of "keep warm" page (16-byte aligned). */
1480	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1481
1482	/* Initialize TX rings. */
1483	for (qid = 0; qid < nitems(sc->txq); qid++) {
1484		struct iwm_tx_ring *txq = &sc->txq[qid];
1485
1486		/* Set physical address of TX ring (256-byte aligned). */
1487		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1488		    txq->desc_dma.paddr >> 8);
1489		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1490		    "%s: loading ring %d descriptors (%p) at %lx\n",
1491		    __func__,
1492		    qid, txq->desc,
1493		    (unsigned long) (txq->desc_dma.paddr >> 8));
1494	}
1495
1496	iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1497
1498	iwm_nic_unlock(sc);
1499
1500	return 0;
1501}
1502
1503static int
1504iwm_nic_init(struct iwm_softc *sc)
1505{
1506	int error;
1507
1508	iwm_apm_init(sc);
1509	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1510		iwm_set_pwr(sc);
1511
1512	iwm_mvm_nic_config(sc);
1513
1514	if ((error = iwm_nic_rx_init(sc)) != 0)
1515		return error;
1516
1517	/*
1518	 * Ditto for TX, from iwn
1519	 */
1520	if ((error = iwm_nic_tx_init(sc)) != 0)
1521		return error;
1522
1523	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1524	    "%s: shadow registers enabled\n", __func__);
1525	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1526
1527	return 0;
1528}
1529
1530int
1531iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1532{
1533	if (!iwm_nic_lock(sc)) {
1534		device_printf(sc->sc_dev,
1535		    "%s: cannot enable txq %d\n",
1536		    __func__,
1537		    qid);
1538		return EBUSY;
1539	}
1540
1541	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1542
1543	if (qid == IWM_MVM_CMD_QUEUE) {
1544		/* unactivate before configuration */
1545		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1546		    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1547		    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1548
1549		iwm_nic_unlock(sc);
1550
1551		iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1552
1553		if (!iwm_nic_lock(sc)) {
1554			device_printf(sc->sc_dev,
1555			    "%s: cannot enable txq %d\n", __func__, qid);
1556			return EBUSY;
1557		}
1558		iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1559		iwm_nic_unlock(sc);
1560
1561		iwm_write_mem32(sc, sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1562		/* Set scheduler window size and frame limit. */
1563		iwm_write_mem32(sc,
1564		    sc->scd_base_addr + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1565		    sizeof(uint32_t),
1566		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1567		    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1568		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1569		    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1570
1571		if (!iwm_nic_lock(sc)) {
1572			device_printf(sc->sc_dev,
1573			    "%s: cannot enable txq %d\n", __func__, qid);
1574			return EBUSY;
1575		}
1576		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1577		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1578		    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1579		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1580		    IWM_SCD_QUEUE_STTS_REG_MSK);
1581	} else {
1582		struct iwm_scd_txq_cfg_cmd cmd;
1583		int error;
1584
1585		iwm_nic_unlock(sc);
1586
1587		memset(&cmd, 0, sizeof(cmd));
1588		cmd.scd_queue = qid;
1589		cmd.enable = 1;
1590		cmd.sta_id = sta_id;
1591		cmd.tx_fifo = fifo;
1592		cmd.aggregate = 0;
1593		cmd.window = IWM_FRAME_LIMIT;
1594
1595		error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
1596		    sizeof(cmd), &cmd);
1597		if (error) {
1598			device_printf(sc->sc_dev,
1599			    "cannot enable txq %d\n", qid);
1600			return error;
1601		}
1602
1603		if (!iwm_nic_lock(sc))
1604			return EBUSY;
1605	}
1606
1607	iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1608	    iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1609
1610	iwm_nic_unlock(sc);
1611
1612	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1613	    __func__, qid, fifo);
1614
1615	return 0;
1616}
1617
1618static int
1619iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1620{
1621	int error, chnl;
1622
1623	int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1624	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1625
1626	if (!iwm_nic_lock(sc))
1627		return EBUSY;
1628
1629	iwm_ict_reset(sc);
1630
1631	sc->scd_base_addr = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1632	if (scd_base_addr != 0 &&
1633	    scd_base_addr != sc->scd_base_addr) {
1634		device_printf(sc->sc_dev,
1635		    "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1636		    __func__, sc->scd_base_addr, scd_base_addr);
1637	}
1638
1639	iwm_nic_unlock(sc);
1640
1641	/* reset context data, TX status and translation data */
1642	error = iwm_write_mem(sc,
1643	    sc->scd_base_addr + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1644	    NULL, clear_dwords);
1645	if (error)
1646		return EBUSY;
1647
1648	if (!iwm_nic_lock(sc))
1649		return EBUSY;
1650
1651	/* Set physical address of TX scheduler rings (1KB aligned). */
1652	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1653
1654	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1655
1656	iwm_nic_unlock(sc);
1657
1658	/* enable command channel */
1659	error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
1660	if (error)
1661		return error;
1662
1663	if (!iwm_nic_lock(sc))
1664		return EBUSY;
1665
1666	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1667
1668	/* Enable DMA channels. */
1669	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1670		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1671		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1672		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1673	}
1674
1675	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1676	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1677
1678	iwm_nic_unlock(sc);
1679
1680	/* Enable L1-Active */
1681	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
1682		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1683		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1684	}
1685
1686	return error;
1687}
1688
1689/*
1690 * NVM read access and content parsing.  We do not support
1691 * external NVM or writing NVM.
1692 * iwlwifi/mvm/nvm.c
1693 */
1694
1695/* Default NVM size to read */
1696#define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
1697
1698#define IWM_NVM_WRITE_OPCODE 1
1699#define IWM_NVM_READ_OPCODE 0
1700
1701/* load nvm chunk response */
1702enum {
1703	IWM_READ_NVM_CHUNK_SUCCEED = 0,
1704	IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
1705};
1706
1707static int
1708iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1709	uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1710{
1711	struct iwm_nvm_access_cmd nvm_access_cmd = {
1712		.offset = htole16(offset),
1713		.length = htole16(length),
1714		.type = htole16(section),
1715		.op_code = IWM_NVM_READ_OPCODE,
1716	};
1717	struct iwm_nvm_access_resp *nvm_resp;
1718	struct iwm_rx_packet *pkt;
1719	struct iwm_host_cmd cmd = {
1720		.id = IWM_NVM_ACCESS_CMD,
1721		.flags = IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL,
1722		.data = { &nvm_access_cmd, },
1723	};
1724	int ret, bytes_read, offset_read;
1725	uint8_t *resp_data;
1726
1727	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1728
1729	ret = iwm_send_cmd(sc, &cmd);
1730	if (ret) {
1731		device_printf(sc->sc_dev,
1732		    "Could not send NVM_ACCESS command (error=%d)\n", ret);
1733		return ret;
1734	}
1735
1736	pkt = cmd.resp_pkt;
1737
1738	/* Extract NVM response */
1739	nvm_resp = (void *)pkt->data;
1740	ret = le16toh(nvm_resp->status);
1741	bytes_read = le16toh(nvm_resp->length);
1742	offset_read = le16toh(nvm_resp->offset);
1743	resp_data = nvm_resp->data;
1744	if (ret) {
1745		if ((offset != 0) &&
1746		    (ret == IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
1747			/*
1748			 * meaning of NOT_VALID_ADDRESS:
1749			 * driver try to read chunk from address that is
1750			 * multiple of 2K and got an error since addr is empty.
1751			 * meaning of (offset != 0): driver already
1752			 * read valid data from another chunk so this case
1753			 * is not an error.
1754			 */
1755			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1756				    "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1757				    offset);
1758			*len = 0;
1759			ret = 0;
1760		} else {
1761			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1762				    "NVM access command failed with status %d\n", ret);
1763			ret = EIO;
1764		}
1765		goto exit;
1766	}
1767
1768	if (offset_read != offset) {
1769		device_printf(sc->sc_dev,
1770		    "NVM ACCESS response with invalid offset %d\n",
1771		    offset_read);
1772		ret = EINVAL;
1773		goto exit;
1774	}
1775
1776	if (bytes_read > length) {
1777		device_printf(sc->sc_dev,
1778		    "NVM ACCESS response with too much data "
1779		    "(%d bytes requested, %d bytes received)\n",
1780		    length, bytes_read);
1781		ret = EINVAL;
1782		goto exit;
1783	}
1784
1785	/* Write data to NVM */
1786	memcpy(data + offset, resp_data, bytes_read);
1787	*len = bytes_read;
1788
1789 exit:
1790	iwm_free_resp(sc, &cmd);
1791	return ret;
1792}
1793
1794/*
1795 * Reads an NVM section completely.
1796 * NICs prior to 7000 family don't have a real NVM, but just read
1797 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1798 * by uCode, we need to manually check in this case that we don't
1799 * overflow and try to read more than the EEPROM size.
1800 * For 7000 family NICs, we supply the maximal size we can read, and
1801 * the uCode fills the response with as much data as we can,
1802 * without overflowing, so no check is needed.
1803 */
1804static int
1805iwm_nvm_read_section(struct iwm_softc *sc,
1806	uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1807{
1808	uint16_t seglen, length, offset = 0;
1809	int ret;
1810
1811	/* Set nvm section read length */
1812	length = IWM_NVM_DEFAULT_CHUNK_SIZE;
1813
1814	seglen = length;
1815
1816	/* Read the NVM until exhausted (reading less than requested) */
1817	while (seglen == length) {
1818		/* Check no memory assumptions fail and cause an overflow */
1819		if ((size_read + offset + length) >
1820		    sc->cfg->eeprom_size) {
1821			device_printf(sc->sc_dev,
1822			    "EEPROM size is too small for NVM\n");
1823			return ENOBUFS;
1824		}
1825
1826		ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1827		if (ret) {
1828			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1829				    "Cannot read NVM from section %d offset %d, length %d\n",
1830				    section, offset, length);
1831			return ret;
1832		}
1833		offset += seglen;
1834	}
1835
1836	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1837		    "NVM section %d read completed\n", section);
1838	*len = offset;
1839	return 0;
1840}
1841
1842/*
1843 * BEGIN IWM_NVM_PARSE
1844 */
1845
1846/* iwlwifi/iwl-nvm-parse.c */
1847
1848/* NVM offsets (in words) definitions */
1849enum iwm_nvm_offsets {
1850	/* NVM HW-Section offset (in words) definitions */
1851	IWM_HW_ADDR = 0x15,
1852
1853/* NVM SW-Section offset (in words) definitions */
1854	IWM_NVM_SW_SECTION = 0x1C0,
1855	IWM_NVM_VERSION = 0,
1856	IWM_RADIO_CFG = 1,
1857	IWM_SKU = 2,
1858	IWM_N_HW_ADDRS = 3,
1859	IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
1860
1861/* NVM calibration section offset (in words) definitions */
1862	IWM_NVM_CALIB_SECTION = 0x2B8,
1863	IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
1864};
1865
1866enum iwm_8000_nvm_offsets {
1867	/* NVM HW-Section offset (in words) definitions */
1868	IWM_HW_ADDR0_WFPM_8000 = 0x12,
1869	IWM_HW_ADDR1_WFPM_8000 = 0x16,
1870	IWM_HW_ADDR0_PCIE_8000 = 0x8A,
1871	IWM_HW_ADDR1_PCIE_8000 = 0x8E,
1872	IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
1873
1874	/* NVM SW-Section offset (in words) definitions */
1875	IWM_NVM_SW_SECTION_8000 = 0x1C0,
1876	IWM_NVM_VERSION_8000 = 0,
1877	IWM_RADIO_CFG_8000 = 0,
1878	IWM_SKU_8000 = 2,
1879	IWM_N_HW_ADDRS_8000 = 3,
1880
1881	/* NVM REGULATORY -Section offset (in words) definitions */
1882	IWM_NVM_CHANNELS_8000 = 0,
1883	IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
1884	IWM_NVM_LAR_OFFSET_8000 = 0x507,
1885	IWM_NVM_LAR_ENABLED_8000 = 0x7,
1886
1887	/* NVM calibration section offset (in words) definitions */
1888	IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
1889	IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
1890};
1891
1892/* SKU Capabilities (actual values from NVM definition) */
1893enum nvm_sku_bits {
1894	IWM_NVM_SKU_CAP_BAND_24GHZ	= (1 << 0),
1895	IWM_NVM_SKU_CAP_BAND_52GHZ	= (1 << 1),
1896	IWM_NVM_SKU_CAP_11N_ENABLE	= (1 << 2),
1897	IWM_NVM_SKU_CAP_11AC_ENABLE	= (1 << 3),
1898};
1899
1900/* radio config bits (actual values from NVM definition) */
1901#define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
1902#define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
1903#define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
1904#define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
1905#define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
1906#define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
1907
1908#define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x)	(x & 0xF)
1909#define IWM_NVM_RF_CFG_DASH_MSK_8000(x)		((x >> 4) & 0xF)
1910#define IWM_NVM_RF_CFG_STEP_MSK_8000(x)		((x >> 8) & 0xF)
1911#define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)		((x >> 12) & 0xFFF)
1912#define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)	((x >> 24) & 0xF)
1913#define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)	((x >> 28) & 0xF)
1914
1915#define DEFAULT_MAX_TX_POWER 16
1916
1917/**
1918 * enum iwm_nvm_channel_flags - channel flags in NVM
1919 * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
1920 * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
1921 * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
1922 * @IWM_NVM_CHANNEL_RADAR: radar detection required
1923 * XXX cannot find this (DFS) flag in iwm-nvm-parse.c
1924 * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
1925 * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
1926 * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
1927 * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
1928 * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
1929 */
1930enum iwm_nvm_channel_flags {
1931	IWM_NVM_CHANNEL_VALID = (1 << 0),
1932	IWM_NVM_CHANNEL_IBSS = (1 << 1),
1933	IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
1934	IWM_NVM_CHANNEL_RADAR = (1 << 4),
1935	IWM_NVM_CHANNEL_DFS = (1 << 7),
1936	IWM_NVM_CHANNEL_WIDE = (1 << 8),
1937	IWM_NVM_CHANNEL_40MHZ = (1 << 9),
1938	IWM_NVM_CHANNEL_80MHZ = (1 << 10),
1939	IWM_NVM_CHANNEL_160MHZ = (1 << 11),
1940};
1941
1942/*
1943 * Translate EEPROM flags to net80211.
1944 */
1945static uint32_t
1946iwm_eeprom_channel_flags(uint16_t ch_flags)
1947{
1948	uint32_t nflags;
1949
1950	nflags = 0;
1951	if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1952		nflags |= IEEE80211_CHAN_PASSIVE;
1953	if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1954		nflags |= IEEE80211_CHAN_NOADHOC;
1955	if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1956		nflags |= IEEE80211_CHAN_DFS;
1957		/* Just in case. */
1958		nflags |= IEEE80211_CHAN_NOADHOC;
1959	}
1960
1961	return (nflags);
1962}
1963
1964static void
1965iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1966    int maxchans, int *nchans, int ch_idx, size_t ch_num,
1967    const uint8_t bands[])
1968{
1969	const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
1970	uint32_t nflags;
1971	uint16_t ch_flags;
1972	uint8_t ieee;
1973	int error;
1974
1975	for (; ch_idx < ch_num; ch_idx++) {
1976		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1977		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
1978			ieee = iwm_nvm_channels[ch_idx];
1979		else
1980			ieee = iwm_nvm_channels_8000[ch_idx];
1981
1982		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1983			IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1984			    "Ch. %d Flags %x [%sGHz] - No traffic\n",
1985			    ieee, ch_flags,
1986			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1987			    "5.2" : "2.4");
1988			continue;
1989		}
1990
1991		nflags = iwm_eeprom_channel_flags(ch_flags);
1992		error = ieee80211_add_channel(chans, maxchans, nchans,
1993		    ieee, 0, 0, nflags, bands);
1994		if (error != 0)
1995			break;
1996
1997		IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1998		    "Ch. %d Flags %x [%sGHz] - Added\n",
1999		    ieee, ch_flags,
2000		    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2001		    "5.2" : "2.4");
2002	}
2003}
2004
2005static void
2006iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
2007    struct ieee80211_channel chans[])
2008{
2009	struct iwm_softc *sc = ic->ic_softc;
2010	struct iwm_nvm_data *data = sc->nvm_data;
2011	uint8_t bands[IEEE80211_MODE_BYTES];
2012	size_t ch_num;
2013
2014	memset(bands, 0, sizeof(bands));
2015	/* 1-13: 11b/g channels. */
2016	setbit(bands, IEEE80211_MODE_11B);
2017	setbit(bands, IEEE80211_MODE_11G);
2018	iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
2019	    IWM_NUM_2GHZ_CHANNELS - 1, bands);
2020
2021	/* 14: 11b channel only. */
2022	clrbit(bands, IEEE80211_MODE_11G);
2023	iwm_add_channel_band(sc, chans, maxchans, nchans,
2024	    IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
2025
2026	if (data->sku_cap_band_52GHz_enable) {
2027		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
2028			ch_num = nitems(iwm_nvm_channels);
2029		else
2030			ch_num = nitems(iwm_nvm_channels_8000);
2031		memset(bands, 0, sizeof(bands));
2032		setbit(bands, IEEE80211_MODE_11A);
2033		iwm_add_channel_band(sc, chans, maxchans, nchans,
2034		    IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2035	}
2036}
2037
2038static void
2039iwm_set_hw_address_family_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2040	const uint16_t *mac_override, const uint16_t *nvm_hw)
2041{
2042	const uint8_t *hw_addr;
2043
2044	if (mac_override) {
2045		static const uint8_t reserved_mac[] = {
2046			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2047		};
2048
2049		hw_addr = (const uint8_t *)(mac_override +
2050				 IWM_MAC_ADDRESS_OVERRIDE_8000);
2051
2052		/*
2053		 * Store the MAC address from MAO section.
2054		 * No byte swapping is required in MAO section
2055		 */
2056		IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2057
2058		/*
2059		 * Force the use of the OTP MAC address in case of reserved MAC
2060		 * address in the NVM, or if address is given but invalid.
2061		 */
2062		if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2063		    !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2064		    iwm_is_valid_ether_addr(data->hw_addr) &&
2065		    !IEEE80211_IS_MULTICAST(data->hw_addr))
2066			return;
2067
2068		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2069		    "%s: mac address from nvm override section invalid\n",
2070		    __func__);
2071	}
2072
2073	if (nvm_hw) {
2074		/* read the mac address from WFMP registers */
2075		uint32_t mac_addr0 =
2076		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2077		uint32_t mac_addr1 =
2078		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2079
2080		hw_addr = (const uint8_t *)&mac_addr0;
2081		data->hw_addr[0] = hw_addr[3];
2082		data->hw_addr[1] = hw_addr[2];
2083		data->hw_addr[2] = hw_addr[1];
2084		data->hw_addr[3] = hw_addr[0];
2085
2086		hw_addr = (const uint8_t *)&mac_addr1;
2087		data->hw_addr[4] = hw_addr[1];
2088		data->hw_addr[5] = hw_addr[0];
2089
2090		return;
2091	}
2092
2093	device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2094	memset(data->hw_addr, 0, sizeof(data->hw_addr));
2095}
2096
2097static int
2098iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2099	    const uint16_t *phy_sku)
2100{
2101	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2102		return le16_to_cpup(nvm_sw + IWM_SKU);
2103
2104	return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2105}
2106
2107static int
2108iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2109{
2110	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2111		return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2112	else
2113		return le32_to_cpup((const uint32_t *)(nvm_sw +
2114						IWM_NVM_VERSION_8000));
2115}
2116
2117static int
2118iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2119		  const uint16_t *phy_sku)
2120{
2121        if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2122                return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2123
2124        return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2125}
2126
2127static int
2128iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2129{
2130	int n_hw_addr;
2131
2132	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000)
2133		return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2134
2135	n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2136
2137        return n_hw_addr & IWM_N_HW_ADDR_MASK;
2138}
2139
2140static void
2141iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2142		  uint32_t radio_cfg)
2143{
2144	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2145		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2146		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2147		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2148		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2149		return;
2150	}
2151
2152	/* set the radio configuration for family 8000 */
2153	data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2154	data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2155	data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2156	data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
2157	data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2158	data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2159}
2160
2161static int
2162iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data,
2163		   const uint16_t *nvm_hw, const uint16_t *mac_override)
2164{
2165#ifdef notyet /* for FAMILY 9000 */
2166	if (cfg->mac_addr_from_csr) {
2167		iwm_set_hw_address_from_csr(sc, data);
2168        } else
2169#endif
2170	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2171		const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2172
2173		/* The byte order is little endian 16 bit, meaning 214365 */
2174		data->hw_addr[0] = hw_addr[1];
2175		data->hw_addr[1] = hw_addr[0];
2176		data->hw_addr[2] = hw_addr[3];
2177		data->hw_addr[3] = hw_addr[2];
2178		data->hw_addr[4] = hw_addr[5];
2179		data->hw_addr[5] = hw_addr[4];
2180	} else {
2181		iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2182	}
2183
2184	if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2185		device_printf(sc->sc_dev, "no valid mac address was found\n");
2186		return EINVAL;
2187	}
2188
2189	return 0;
2190}
2191
2192static struct iwm_nvm_data *
2193iwm_parse_nvm_data(struct iwm_softc *sc,
2194		   const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2195		   const uint16_t *nvm_calib, const uint16_t *mac_override,
2196		   const uint16_t *phy_sku, const uint16_t *regulatory)
2197{
2198	struct iwm_nvm_data *data;
2199	uint32_t sku, radio_cfg;
2200	uint16_t lar_config;
2201
2202	if (sc->cfg->device_family != IWM_DEVICE_FAMILY_8000) {
2203		data = malloc(sizeof(*data) +
2204		    IWM_NUM_CHANNELS * sizeof(uint16_t),
2205		    M_DEVBUF, M_NOWAIT | M_ZERO);
2206	} else {
2207		data = malloc(sizeof(*data) +
2208		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2209		    M_DEVBUF, M_NOWAIT | M_ZERO);
2210	}
2211	if (!data)
2212		return NULL;
2213
2214	data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2215
2216	radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2217	iwm_set_radio_cfg(sc, data, radio_cfg);
2218
2219	sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2220	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2221	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2222	data->sku_cap_11n_enable = 0;
2223
2224	data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2225
2226	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2227		uint16_t lar_offset = data->nvm_version < 0xE39 ?
2228				       IWM_NVM_LAR_OFFSET_8000_OLD :
2229				       IWM_NVM_LAR_OFFSET_8000;
2230
2231		lar_config = le16_to_cpup(regulatory + lar_offset);
2232		data->lar_enabled = !!(lar_config &
2233				       IWM_NVM_LAR_ENABLED_8000);
2234	}
2235
2236	/* If no valid mac address was found - bail out */
2237	if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2238		free(data, M_DEVBUF);
2239		return NULL;
2240	}
2241
2242	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2243		memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
2244		    IWM_NUM_CHANNELS * sizeof(uint16_t));
2245	} else {
2246		memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2247		    IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2248	}
2249
2250	return data;
2251}
2252
2253static void
2254iwm_free_nvm_data(struct iwm_nvm_data *data)
2255{
2256	if (data != NULL)
2257		free(data, M_DEVBUF);
2258}
2259
2260static struct iwm_nvm_data *
2261iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2262{
2263	const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2264
2265	/* Checking for required sections */
2266	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000) {
2267		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2268		    !sections[sc->cfg->nvm_hw_section_num].data) {
2269			device_printf(sc->sc_dev,
2270			    "Can't parse empty OTP/NVM sections\n");
2271			return NULL;
2272		}
2273	} else if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2274		/* SW and REGULATORY sections are mandatory */
2275		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2276		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2277			device_printf(sc->sc_dev,
2278			    "Can't parse empty OTP/NVM sections\n");
2279			return NULL;
2280		}
2281		/* MAC_OVERRIDE or at least HW section must exist */
2282		if (!sections[sc->cfg->nvm_hw_section_num].data &&
2283		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2284			device_printf(sc->sc_dev,
2285			    "Can't parse mac_address, empty sections\n");
2286			return NULL;
2287		}
2288
2289		/* PHY_SKU section is mandatory in B0 */
2290		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2291			device_printf(sc->sc_dev,
2292			    "Can't parse phy_sku in B0, empty sections\n");
2293			return NULL;
2294		}
2295	} else {
2296		panic("unknown device family %d\n", sc->cfg->device_family);
2297	}
2298
2299	hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2300	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2301	calib = (const uint16_t *)
2302	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2303	regulatory = (const uint16_t *)
2304	    sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2305	mac_override = (const uint16_t *)
2306	    sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2307	phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2308
2309	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2310	    phy_sku, regulatory);
2311}
2312
2313static int
2314iwm_nvm_init(struct iwm_softc *sc)
2315{
2316	struct iwm_nvm_section nvm_sections[IWM_NVM_MAX_NUM_SECTIONS];
2317	int i, ret, section;
2318	uint32_t size_read = 0;
2319	uint8_t *nvm_buffer, *temp;
2320	uint16_t len;
2321
2322	memset(nvm_sections, 0, sizeof(nvm_sections));
2323
2324	if (sc->cfg->nvm_hw_section_num >= IWM_NVM_MAX_NUM_SECTIONS)
2325		return EINVAL;
2326
2327	/* load NVM values from nic */
2328	/* Read From FW NVM */
2329	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2330
2331	nvm_buffer = malloc(sc->cfg->eeprom_size, M_DEVBUF, M_NOWAIT | M_ZERO);
2332	if (!nvm_buffer)
2333		return ENOMEM;
2334	for (section = 0; section < IWM_NVM_MAX_NUM_SECTIONS; section++) {
2335		/* we override the constness for initial read */
2336		ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2337					   &len, size_read);
2338		if (ret)
2339			continue;
2340		size_read += len;
2341		temp = malloc(len, M_DEVBUF, M_NOWAIT);
2342		if (!temp) {
2343			ret = ENOMEM;
2344			break;
2345		}
2346		memcpy(temp, nvm_buffer, len);
2347
2348		nvm_sections[section].data = temp;
2349		nvm_sections[section].length = len;
2350	}
2351	if (!size_read)
2352		device_printf(sc->sc_dev, "OTP is blank\n");
2353	free(nvm_buffer, M_DEVBUF);
2354
2355	sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2356	if (!sc->nvm_data)
2357		return EINVAL;
2358	IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2359		    "nvm version = %x\n", sc->nvm_data->nvm_version);
2360
2361	for (i = 0; i < IWM_NVM_MAX_NUM_SECTIONS; i++) {
2362		if (nvm_sections[i].data != NULL)
2363			free(nvm_sections[i].data, M_DEVBUF);
2364	}
2365
2366	return 0;
2367}
2368
2369static int
2370iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2371	const struct iwm_fw_desc *section)
2372{
2373	struct iwm_dma_info *dma = &sc->fw_dma;
2374	uint8_t *v_addr;
2375	bus_addr_t p_addr;
2376	uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2377	int ret = 0;
2378
2379	IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2380		    "%s: [%d] uCode section being loaded...\n",
2381		    __func__, section_num);
2382
2383	v_addr = dma->vaddr;
2384	p_addr = dma->paddr;
2385
2386	for (offset = 0; offset < section->len; offset += chunk_sz) {
2387		uint32_t copy_size, dst_addr;
2388		int extended_addr = FALSE;
2389
2390		copy_size = MIN(chunk_sz, section->len - offset);
2391		dst_addr = section->offset + offset;
2392
2393		if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2394		    dst_addr <= IWM_FW_MEM_EXTENDED_END)
2395			extended_addr = TRUE;
2396
2397		if (extended_addr)
2398			iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2399					  IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2400
2401		memcpy(v_addr, (const uint8_t *)section->data + offset,
2402		    copy_size);
2403		bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2404		ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2405						   copy_size);
2406
2407		if (extended_addr)
2408			iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2409					    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2410
2411		if (ret) {
2412			device_printf(sc->sc_dev,
2413			    "%s: Could not load the [%d] uCode section\n",
2414			    __func__, section_num);
2415			break;
2416		}
2417	}
2418
2419	return ret;
2420}
2421
2422/*
2423 * ucode
2424 */
2425static int
2426iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2427			     bus_addr_t phy_addr, uint32_t byte_cnt)
2428{
2429	int ret;
2430
2431	sc->sc_fw_chunk_done = 0;
2432
2433	if (!iwm_nic_lock(sc))
2434		return EBUSY;
2435
2436	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2437	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2438
2439	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2440	    dst_addr);
2441
2442	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2443	    phy_addr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2444
2445	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2446	    (iwm_get_dma_hi_addr(phy_addr)
2447	     << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2448
2449	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2450	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2451	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2452	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2453
2454	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2455	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
2456	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2457	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2458
2459	iwm_nic_unlock(sc);
2460
2461	/* wait up to 5s for this segment to load */
2462	ret = 0;
2463	while (!sc->sc_fw_chunk_done) {
2464		ret = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz);
2465		if (ret)
2466			break;
2467	}
2468
2469	if (ret != 0) {
2470		device_printf(sc->sc_dev,
2471		    "fw chunk addr 0x%x len %d failed to load\n",
2472		    dst_addr, byte_cnt);
2473		return ETIMEDOUT;
2474	}
2475
2476	return 0;
2477}
2478
2479static int
2480iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
2481	const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2482{
2483	int shift_param;
2484	int i, ret = 0, sec_num = 0x1;
2485	uint32_t val, last_read_idx = 0;
2486
2487	if (cpu == 1) {
2488		shift_param = 0;
2489		*first_ucode_section = 0;
2490	} else {
2491		shift_param = 16;
2492		(*first_ucode_section)++;
2493	}
2494
2495	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2496		last_read_idx = i;
2497
2498		/*
2499		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2500		 * CPU1 to CPU2.
2501		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2502		 * CPU2 non paged to CPU2 paging sec.
2503		 */
2504		if (!image->fw_sect[i].data ||
2505		    image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2506		    image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2507			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2508				    "Break since Data not valid or Empty section, sec = %d\n",
2509				    i);
2510			break;
2511		}
2512		ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2513		if (ret)
2514			return ret;
2515
2516		/* Notify the ucode of the loaded section number and status */
2517		if (iwm_nic_lock(sc)) {
2518			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
2519			val = val | (sec_num << shift_param);
2520			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
2521			sec_num = (sec_num << 1) | 0x1;
2522			iwm_nic_unlock(sc);
2523		}
2524	}
2525
2526	*first_ucode_section = last_read_idx;
2527
2528	iwm_enable_interrupts(sc);
2529
2530	if (iwm_nic_lock(sc)) {
2531		if (cpu == 1)
2532			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
2533		else
2534			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2535		iwm_nic_unlock(sc);
2536	}
2537
2538	return 0;
2539}
2540
2541static int
2542iwm_pcie_load_cpu_sections(struct iwm_softc *sc,
2543	const struct iwm_fw_sects *image, int cpu, int *first_ucode_section)
2544{
2545	int shift_param;
2546	int i, ret = 0;
2547	uint32_t last_read_idx = 0;
2548
2549	if (cpu == 1) {
2550		shift_param = 0;
2551		*first_ucode_section = 0;
2552	} else {
2553		shift_param = 16;
2554		(*first_ucode_section)++;
2555	}
2556
2557	for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2558		last_read_idx = i;
2559
2560		/*
2561		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2562		 * CPU1 to CPU2.
2563		 * PAGING_SEPARATOR_SECTION delimiter - separate between
2564		 * CPU2 non paged to CPU2 paging sec.
2565		 */
2566		if (!image->fw_sect[i].data ||
2567		    image->fw_sect[i].offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
2568		    image->fw_sect[i].offset == IWM_PAGING_SEPARATOR_SECTION) {
2569			IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2570				    "Break since Data not valid or Empty section, sec = %d\n",
2571				     i);
2572			break;
2573		}
2574
2575		ret = iwm_pcie_load_section(sc, i, &image->fw_sect[i]);
2576		if (ret)
2577			return ret;
2578	}
2579
2580	*first_ucode_section = last_read_idx;
2581
2582	return 0;
2583
2584}
2585
2586static int
2587iwm_pcie_load_given_ucode(struct iwm_softc *sc,
2588	const struct iwm_fw_sects *image)
2589{
2590	int ret = 0;
2591	int first_ucode_section;
2592
2593	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2594		     image->is_dual_cpus ? "Dual" : "Single");
2595
2596	/* load to FW the binary non secured sections of CPU1 */
2597	ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2598	if (ret)
2599		return ret;
2600
2601	if (image->is_dual_cpus) {
2602		/* set CPU2 header address */
2603		if (iwm_nic_lock(sc)) {
2604			iwm_write_prph(sc,
2605				       IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
2606				       IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
2607			iwm_nic_unlock(sc);
2608		}
2609
2610		/* load to FW the binary sections of CPU2 */
2611		ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2612						 &first_ucode_section);
2613		if (ret)
2614			return ret;
2615	}
2616
2617	iwm_enable_interrupts(sc);
2618
2619	/* release CPU reset */
2620	IWM_WRITE(sc, IWM_CSR_RESET, 0);
2621
2622	return 0;
2623}
2624
2625int
2626iwm_pcie_load_given_ucode_8000(struct iwm_softc *sc,
2627	const struct iwm_fw_sects *image)
2628{
2629	int ret = 0;
2630	int first_ucode_section;
2631
2632	IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2633		    image->is_dual_cpus ? "Dual" : "Single");
2634
2635	/* configure the ucode to be ready to get the secured image */
2636	/* release CPU reset */
2637	if (iwm_nic_lock(sc)) {
2638		iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
2639		    IWM_RELEASE_CPU_RESET_BIT);
2640		iwm_nic_unlock(sc);
2641	}
2642
2643	/* load to FW the binary Secured sections of CPU1 */
2644	ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2645	    &first_ucode_section);
2646	if (ret)
2647		return ret;
2648
2649	/* load to FW the binary sections of CPU2 */
2650	return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2651	    &first_ucode_section);
2652}
2653
2654/* XXX Get rid of this definition */
2655static inline void
2656iwm_enable_fw_load_int(struct iwm_softc *sc)
2657{
2658	IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2659	sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
2660	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
2661}
2662
2663/* XXX Add proper rfkill support code */
2664static int
2665iwm_start_fw(struct iwm_softc *sc,
2666	const struct iwm_fw_sects *fw)
2667{
2668	int ret;
2669
2670	/* This may fail if AMT took ownership of the device */
2671	if (iwm_prepare_card_hw(sc)) {
2672		device_printf(sc->sc_dev,
2673		    "%s: Exit HW not ready\n", __func__);
2674		ret = EIO;
2675		goto out;
2676	}
2677
2678	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2679
2680	iwm_disable_interrupts(sc);
2681
2682	/* make sure rfkill handshake bits are cleared */
2683	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2684	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2685	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2686
2687	/* clear (again), then enable host interrupts */
2688	IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2689
2690	ret = iwm_nic_init(sc);
2691	if (ret) {
2692		device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2693		goto out;
2694	}
2695
2696	/*
2697	 * Now, we load the firmware and don't want to be interrupted, even
2698	 * by the RF-Kill interrupt (hence mask all the interrupt besides the
2699	 * FH_TX interrupt which is needed to load the firmware). If the
2700	 * RF-Kill switch is toggled, we will find out after having loaded
2701	 * the firmware and return the proper value to the caller.
2702	 */
2703	iwm_enable_fw_load_int(sc);
2704
2705	/* really make sure rfkill handshake bits are cleared */
2706	/* maybe we should write a few times more?  just to make sure */
2707	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2708	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2709
2710	/* Load the given image to the HW */
2711	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
2712		ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2713	else
2714		ret = iwm_pcie_load_given_ucode(sc, fw);
2715
2716	/* XXX re-check RF-Kill state */
2717
2718out:
2719	return ret;
2720}
2721
2722static int
2723iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2724{
2725	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2726		.valid = htole32(valid_tx_ant),
2727	};
2728
2729	return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2730	    IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2731}
2732
2733/* iwlwifi: mvm/fw.c */
2734static int
2735iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2736{
2737	struct iwm_phy_cfg_cmd phy_cfg_cmd;
2738	enum iwm_ucode_type ucode_type = sc->cur_ucode;
2739
2740	/* Set parameters */
2741	phy_cfg_cmd.phy_cfg = htole32(iwm_mvm_get_phy_config(sc));
2742	phy_cfg_cmd.calib_control.event_trigger =
2743	    sc->sc_default_calib[ucode_type].event_trigger;
2744	phy_cfg_cmd.calib_control.flow_trigger =
2745	    sc->sc_default_calib[ucode_type].flow_trigger;
2746
2747	IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2748	    "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2749	return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2750	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2751}
2752
2753static int
2754iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2755{
2756	struct iwm_mvm_alive_data *alive_data = data;
2757	struct iwm_mvm_alive_resp_ver1 *palive1;
2758	struct iwm_mvm_alive_resp_ver2 *palive2;
2759	struct iwm_mvm_alive_resp *palive;
2760
2761	if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
2762		palive1 = (void *)pkt->data;
2763
2764		sc->support_umac_log = FALSE;
2765                sc->error_event_table =
2766                        le32toh(palive1->error_event_table_ptr);
2767                sc->log_event_table =
2768                        le32toh(palive1->log_event_table_ptr);
2769                alive_data->scd_base_addr = le32toh(palive1->scd_base_ptr);
2770
2771                alive_data->valid = le16toh(palive1->status) ==
2772                                    IWM_ALIVE_STATUS_OK;
2773                IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2774			    "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2775			     le16toh(palive1->status), palive1->ver_type,
2776                             palive1->ver_subtype, palive1->flags);
2777	} else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
2778		palive2 = (void *)pkt->data;
2779		sc->error_event_table =
2780			le32toh(palive2->error_event_table_ptr);
2781		sc->log_event_table =
2782			le32toh(palive2->log_event_table_ptr);
2783		alive_data->scd_base_addr = le32toh(palive2->scd_base_ptr);
2784		sc->umac_error_event_table =
2785                        le32toh(palive2->error_info_addr);
2786
2787		alive_data->valid = le16toh(palive2->status) ==
2788				    IWM_ALIVE_STATUS_OK;
2789		if (sc->umac_error_event_table)
2790			sc->support_umac_log = TRUE;
2791
2792		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2793			    "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2794			    le16toh(palive2->status), palive2->ver_type,
2795			    palive2->ver_subtype, palive2->flags);
2796
2797		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2798			    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2799			    palive2->umac_major, palive2->umac_minor);
2800	} else if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2801		palive = (void *)pkt->data;
2802
2803		sc->error_event_table =
2804			le32toh(palive->error_event_table_ptr);
2805		sc->log_event_table =
2806			le32toh(palive->log_event_table_ptr);
2807		alive_data->scd_base_addr = le32toh(palive->scd_base_ptr);
2808		sc->umac_error_event_table =
2809			le32toh(palive->error_info_addr);
2810
2811		alive_data->valid = le16toh(palive->status) ==
2812				    IWM_ALIVE_STATUS_OK;
2813		if (sc->umac_error_event_table)
2814			sc->support_umac_log = TRUE;
2815
2816		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2817			    "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
2818			    le16toh(palive->status), palive->ver_type,
2819			    palive->ver_subtype, palive->flags);
2820
2821		IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2822			    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2823			    le32toh(palive->umac_major),
2824			    le32toh(palive->umac_minor));
2825	}
2826
2827	return TRUE;
2828}
2829
2830static int
2831iwm_wait_phy_db_entry(struct iwm_softc *sc,
2832	struct iwm_rx_packet *pkt, void *data)
2833{
2834	struct iwm_phy_db *phy_db = data;
2835
2836	if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2837		if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2838			device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2839			    __func__, pkt->hdr.code);
2840		}
2841		return TRUE;
2842	}
2843
2844	if (iwm_phy_db_set_section(phy_db, pkt)) {
2845		device_printf(sc->sc_dev,
2846		    "%s: iwm_phy_db_set_section failed\n", __func__);
2847	}
2848
2849	return FALSE;
2850}
2851
2852static int
2853iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2854	enum iwm_ucode_type ucode_type)
2855{
2856	struct iwm_notification_wait alive_wait;
2857	struct iwm_mvm_alive_data alive_data;
2858	const struct iwm_fw_sects *fw;
2859	enum iwm_ucode_type old_type = sc->cur_ucode;
2860	int error;
2861	static const uint16_t alive_cmd[] = { IWM_MVM_ALIVE };
2862
2863	if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
2864		device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
2865			error);
2866		return error;
2867	}
2868	fw = &sc->sc_fw.fw_sects[ucode_type];
2869	sc->cur_ucode = ucode_type;
2870	sc->ucode_loaded = FALSE;
2871
2872	memset(&alive_data, 0, sizeof(alive_data));
2873	iwm_init_notification_wait(sc->sc_notif_wait, &alive_wait,
2874				   alive_cmd, nitems(alive_cmd),
2875				   iwm_alive_fn, &alive_data);
2876
2877	error = iwm_start_fw(sc, fw);
2878	if (error) {
2879		device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2880		sc->cur_ucode = old_type;
2881		iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2882		return error;
2883	}
2884
2885	/*
2886	 * Some things may run in the background now, but we
2887	 * just wait for the ALIVE notification here.
2888	 */
2889	IWM_UNLOCK(sc);
2890	error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2891				      IWM_MVM_UCODE_ALIVE_TIMEOUT);
2892	IWM_LOCK(sc);
2893	if (error) {
2894		if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
2895			uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a;
2896			if (iwm_nic_lock(sc)) {
2897				a = iwm_read_prph(sc, IWM_SB_CPU_1_STATUS);
2898				b = iwm_read_prph(sc, IWM_SB_CPU_2_STATUS);
2899				iwm_nic_unlock(sc);
2900			}
2901			device_printf(sc->sc_dev,
2902			    "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2903			    a, b);
2904		}
2905		sc->cur_ucode = old_type;
2906		return error;
2907	}
2908
2909	if (!alive_data.valid) {
2910		device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2911		    __func__);
2912		sc->cur_ucode = old_type;
2913		return EIO;
2914	}
2915
2916	iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2917
2918	/*
2919	 * configure and operate fw paging mechanism.
2920	 * driver configures the paging flow only once, CPU2 paging image
2921	 * included in the IWM_UCODE_INIT image.
2922	 */
2923	if (fw->paging_mem_size) {
2924		error = iwm_save_fw_paging(sc, fw);
2925		if (error) {
2926			device_printf(sc->sc_dev,
2927			    "%s: failed to save the FW paging image\n",
2928			    __func__);
2929			return error;
2930		}
2931
2932		error = iwm_send_paging_cmd(sc, fw);
2933		if (error) {
2934			device_printf(sc->sc_dev,
2935			    "%s: failed to send the paging cmd\n", __func__);
2936			iwm_free_fw_paging(sc);
2937			return error;
2938		}
2939	}
2940
2941	if (!error)
2942		sc->ucode_loaded = TRUE;
2943	return error;
2944}
2945
2946/*
2947 * mvm misc bits
2948 */
2949
2950/*
2951 * follows iwlwifi/fw.c
2952 */
2953static int
2954iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2955{
2956	struct iwm_notification_wait calib_wait;
2957	static const uint16_t init_complete[] = {
2958		IWM_INIT_COMPLETE_NOTIF,
2959		IWM_CALIB_RES_NOTIF_PHY_DB
2960	};
2961	int ret;
2962
2963	/* do not operate with rfkill switch turned on */
2964	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2965		device_printf(sc->sc_dev,
2966		    "radio is disabled by hardware switch\n");
2967		return EPERM;
2968	}
2969
2970	iwm_init_notification_wait(sc->sc_notif_wait,
2971				   &calib_wait,
2972				   init_complete,
2973				   nitems(init_complete),
2974				   iwm_wait_phy_db_entry,
2975				   sc->sc_phy_db);
2976
2977	/* Will also start the device */
2978	ret = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_INIT);
2979	if (ret) {
2980		device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
2981		    ret);
2982		goto error;
2983	}
2984
2985	if (justnvm) {
2986		/* Read nvm */
2987		ret = iwm_nvm_init(sc);
2988		if (ret) {
2989			device_printf(sc->sc_dev, "failed to read nvm\n");
2990			goto error;
2991		}
2992		IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
2993		goto error;
2994	}
2995
2996	ret = iwm_send_bt_init_conf(sc);
2997	if (ret) {
2998		device_printf(sc->sc_dev,
2999		    "failed to send bt coex configuration: %d\n", ret);
3000		goto error;
3001	}
3002
3003	/* Init Smart FIFO. */
3004	ret = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF);
3005	if (ret)
3006		goto error;
3007
3008	/* Send TX valid antennas before triggering calibrations */
3009	ret = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
3010	if (ret) {
3011		device_printf(sc->sc_dev,
3012		    "failed to send antennas before calibration: %d\n", ret);
3013		goto error;
3014	}
3015
3016	/*
3017	 * Send phy configurations command to init uCode
3018	 * to start the 16.0 uCode init image internal calibrations.
3019	 */
3020	ret = iwm_send_phy_cfg_cmd(sc);
3021	if (ret) {
3022		device_printf(sc->sc_dev,
3023		    "%s: Failed to run INIT calibrations: %d\n",
3024		    __func__, ret);
3025		goto error;
3026	}
3027
3028	/*
3029	 * Nothing to do but wait for the init complete notification
3030	 * from the firmware.
3031	 */
3032	IWM_UNLOCK(sc);
3033	ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
3034	    IWM_MVM_UCODE_CALIB_TIMEOUT);
3035	IWM_LOCK(sc);
3036
3037
3038	goto out;
3039
3040error:
3041	iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
3042out:
3043	return ret;
3044}
3045
3046/*
3047 * receive side
3048 */
3049
3050/* (re)stock rx ring, called at init-time and at runtime */
3051static int
3052iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3053{
3054	struct iwm_rx_ring *ring = &sc->rxq;
3055	struct iwm_rx_data *data = &ring->data[idx];
3056	struct mbuf *m;
3057	bus_dmamap_t dmamap;
3058	bus_dma_segment_t seg;
3059	int nsegs, error;
3060
3061	m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3062	if (m == NULL)
3063		return ENOBUFS;
3064
3065	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3066	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3067	    &seg, &nsegs, BUS_DMA_NOWAIT);
3068	if (error != 0) {
3069		device_printf(sc->sc_dev,
3070		    "%s: can't map mbuf, error %d\n", __func__, error);
3071		m_freem(m);
3072		return error;
3073	}
3074
3075	if (data->m != NULL)
3076		bus_dmamap_unload(ring->data_dmat, data->map);
3077
3078	/* Swap ring->spare_map with data->map */
3079	dmamap = data->map;
3080	data->map = ring->spare_map;
3081	ring->spare_map = dmamap;
3082
3083	bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3084	data->m = m;
3085
3086	/* Update RX descriptor. */
3087	KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
3088	ring->desc[idx] = htole32(seg.ds_addr >> 8);
3089	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3090	    BUS_DMASYNC_PREWRITE);
3091
3092	return 0;
3093}
3094
3095/* iwlwifi: mvm/rx.c */
3096/*
3097 * iwm_mvm_get_signal_strength - use new rx PHY INFO API
3098 * values are reported by the fw as positive values - need to negate
3099 * to obtain their dBM.  Account for missing antennas by replacing 0
3100 * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3101 */
3102static int
3103iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3104{
3105	int energy_a, energy_b, energy_c, max_energy;
3106	uint32_t val;
3107
3108	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3109	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3110	    IWM_RX_INFO_ENERGY_ANT_A_POS;
3111	energy_a = energy_a ? -energy_a : -256;
3112	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3113	    IWM_RX_INFO_ENERGY_ANT_B_POS;
3114	energy_b = energy_b ? -energy_b : -256;
3115	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3116	    IWM_RX_INFO_ENERGY_ANT_C_POS;
3117	energy_c = energy_c ? -energy_c : -256;
3118	max_energy = MAX(energy_a, energy_b);
3119	max_energy = MAX(max_energy, energy_c);
3120
3121	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3122	    "energy In A %d B %d C %d , and max %d\n",
3123	    energy_a, energy_b, energy_c, max_energy);
3124
3125	return max_energy;
3126}
3127
3128static void
3129iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3130{
3131	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3132
3133	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3134
3135	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3136}
3137
3138/*
3139 * Retrieve the average noise (in dBm) among receivers.
3140 */
3141static int
3142iwm_get_noise(struct iwm_softc *sc,
3143    const struct iwm_mvm_statistics_rx_non_phy *stats)
3144{
3145	int i, total, nbant, noise;
3146
3147	total = nbant = noise = 0;
3148	for (i = 0; i < 3; i++) {
3149		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3150		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3151		    __func__,
3152		    i,
3153		    noise);
3154
3155		if (noise) {
3156			total += noise;
3157			nbant++;
3158		}
3159	}
3160
3161	IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3162	    __func__, nbant, total);
3163#if 0
3164	/* There should be at least one antenna but check anyway. */
3165	return (nbant == 0) ? -127 : (total / nbant) - 107;
3166#else
3167	/* For now, just hard-code it to -96 to be safe */
3168	return (-96);
3169#endif
3170}
3171
3172static void
3173iwm_mvm_handle_rx_statistics(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3174{
3175	struct iwm_notif_statistics_v10 *stats = (void *)&pkt->data;
3176
3177	memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
3178	sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
3179}
3180
3181/*
3182 * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3183 *
3184 * Handles the actual data of the Rx packet from the fw
3185 */
3186static boolean_t
3187iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3188	boolean_t stolen)
3189{
3190	struct ieee80211com *ic = &sc->sc_ic;
3191	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3192	struct ieee80211_frame *wh;
3193	struct ieee80211_node *ni;
3194	struct ieee80211_rx_stats rxs;
3195	struct iwm_rx_phy_info *phy_info;
3196	struct iwm_rx_mpdu_res_start *rx_res;
3197	struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, offset);
3198	uint32_t len;
3199	uint32_t rx_pkt_status;
3200	int rssi;
3201
3202	phy_info = &sc->sc_last_phy_info;
3203	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3204	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3205	len = le16toh(rx_res->byte_count);
3206	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3207
3208	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3209		device_printf(sc->sc_dev,
3210		    "dsp size out of range [0,20]: %d\n",
3211		    phy_info->cfg_phy_cnt);
3212		goto fail;
3213	}
3214
3215	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3216	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3217		IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3218		    "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3219		goto fail;
3220	}
3221
3222	rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3223
3224	/* Map it to relative value */
3225	rssi = rssi - sc->sc_noise;
3226
3227	/* replenish ring for the buffer we're going to feed to the sharks */
3228	if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3229		device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3230		    __func__);
3231		goto fail;
3232	}
3233
3234	m->m_data = pkt->data + sizeof(*rx_res);
3235	m->m_pkthdr.len = m->m_len = len;
3236
3237	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3238	    "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3239
3240	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3241
3242	IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3243	    "%s: phy_info: channel=%d, flags=0x%08x\n",
3244	    __func__,
3245	    le16toh(phy_info->channel),
3246	    le16toh(phy_info->phy_flags));
3247
3248	/*
3249	 * Populate an RX state struct with the provided information.
3250	 */
3251	bzero(&rxs, sizeof(rxs));
3252	rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3253	rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3254	rxs.c_ieee = le16toh(phy_info->channel);
3255	if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3256		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3257	} else {
3258		rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3259	}
3260
3261	/* rssi is in 1/2db units */
3262	rxs.rssi = rssi * 2;
3263	rxs.nf = sc->sc_noise;
3264
3265	if (ieee80211_radiotap_active_vap(vap)) {
3266		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3267
3268		tap->wr_flags = 0;
3269		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3270			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3271		tap->wr_chan_freq = htole16(rxs.c_freq);
3272		/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3273		tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3274		tap->wr_dbm_antsignal = (int8_t)rssi;
3275		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3276		tap->wr_tsft = phy_info->system_timestamp;
3277		switch (phy_info->rate) {
3278		/* CCK rates. */
3279		case  10: tap->wr_rate =   2; break;
3280		case  20: tap->wr_rate =   4; break;
3281		case  55: tap->wr_rate =  11; break;
3282		case 110: tap->wr_rate =  22; break;
3283		/* OFDM rates. */
3284		case 0xd: tap->wr_rate =  12; break;
3285		case 0xf: tap->wr_rate =  18; break;
3286		case 0x5: tap->wr_rate =  24; break;
3287		case 0x7: tap->wr_rate =  36; break;
3288		case 0x9: tap->wr_rate =  48; break;
3289		case 0xb: tap->wr_rate =  72; break;
3290		case 0x1: tap->wr_rate =  96; break;
3291		case 0x3: tap->wr_rate = 108; break;
3292		/* Unknown rate: should not happen. */
3293		default:  tap->wr_rate =   0;
3294		}
3295	}
3296
3297	IWM_UNLOCK(sc);
3298	if (ni != NULL) {
3299		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3300		ieee80211_input_mimo(ni, m, &rxs);
3301		ieee80211_free_node(ni);
3302	} else {
3303		IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3304		ieee80211_input_mimo_all(ic, m, &rxs);
3305	}
3306	IWM_LOCK(sc);
3307
3308	return TRUE;
3309
3310fail:	counter_u64_add(ic->ic_ierrors, 1);
3311	return FALSE;
3312}
3313
3314static int
3315iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3316	struct iwm_node *in)
3317{
3318	struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3319	struct ieee80211_node *ni = &in->in_ni;
3320	struct ieee80211vap *vap = ni->ni_vap;
3321	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3322	int failack = tx_resp->failure_frame;
3323
3324	KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3325
3326	/* Update rate control statistics. */
3327	IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3328	    __func__,
3329	    (int) le16toh(tx_resp->status.status),
3330	    (int) le16toh(tx_resp->status.sequence),
3331	    tx_resp->frame_count,
3332	    tx_resp->bt_kill_count,
3333	    tx_resp->failure_rts,
3334	    tx_resp->failure_frame,
3335	    le32toh(tx_resp->initial_rate),
3336	    (int) le16toh(tx_resp->wireless_media_time));
3337
3338	if (status != IWM_TX_STATUS_SUCCESS &&
3339	    status != IWM_TX_STATUS_DIRECT_DONE) {
3340		ieee80211_ratectl_tx_complete(vap, ni,
3341		    IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
3342		return (1);
3343	} else {
3344		ieee80211_ratectl_tx_complete(vap, ni,
3345		    IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
3346		return (0);
3347	}
3348}
3349
3350static void
3351iwm_mvm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3352{
3353	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3354	int idx = cmd_hdr->idx;
3355	int qid = cmd_hdr->qid;
3356	struct iwm_tx_ring *ring = &sc->txq[qid];
3357	struct iwm_tx_data *txd = &ring->data[idx];
3358	struct iwm_node *in = txd->in;
3359	struct mbuf *m = txd->m;
3360	int status;
3361
3362	KASSERT(txd->done == 0, ("txd not done"));
3363	KASSERT(txd->in != NULL, ("txd without node"));
3364	KASSERT(txd->m != NULL, ("txd without mbuf"));
3365
3366	sc->sc_tx_timer = 0;
3367
3368	status = iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3369
3370	/* Unmap and free mbuf. */
3371	bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3372	bus_dmamap_unload(ring->data_dmat, txd->map);
3373
3374	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3375	    "free txd %p, in %p\n", txd, txd->in);
3376	txd->done = 1;
3377	txd->m = NULL;
3378	txd->in = NULL;
3379
3380	ieee80211_tx_complete(&in->in_ni, m, status);
3381
3382	if (--ring->queued < IWM_TX_RING_LOMARK) {
3383		sc->qfullmsk &= ~(1 << ring->qid);
3384		if (sc->qfullmsk == 0) {
3385			iwm_start(sc);
3386		}
3387	}
3388}
3389
3390/*
3391 * transmit side
3392 */
3393
3394/*
3395 * Process a "command done" firmware notification.  This is where we wakeup
3396 * processes waiting for a synchronous command completion.
3397 * from if_iwn
3398 */
3399static void
3400iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3401{
3402	struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3403	struct iwm_tx_data *data;
3404
3405	if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3406		return;	/* Not a command ack. */
3407	}
3408
3409	/* XXX wide commands? */
3410	IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3411	    "cmd notification type 0x%x qid %d idx %d\n",
3412	    pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3413
3414	data = &ring->data[pkt->hdr.idx];
3415
3416	/* If the command was mapped in an mbuf, free it. */
3417	if (data->m != NULL) {
3418		bus_dmamap_sync(ring->data_dmat, data->map,
3419		    BUS_DMASYNC_POSTWRITE);
3420		bus_dmamap_unload(ring->data_dmat, data->map);
3421		m_freem(data->m);
3422		data->m = NULL;
3423	}
3424	wakeup(&ring->desc[pkt->hdr.idx]);
3425
3426	if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3427		device_printf(sc->sc_dev,
3428		    "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3429		    __func__, pkt->hdr.idx, ring->queued, ring->cur);
3430		/* XXX call iwm_force_nmi() */
3431	}
3432
3433	KASSERT(ring->queued > 0, ("ring->queued is empty?"));
3434	ring->queued--;
3435	if (ring->queued == 0)
3436		iwm_pcie_clear_cmd_in_flight(sc);
3437}
3438
3439#if 0
3440/*
3441 * necessary only for block ack mode
3442 */
3443void
3444iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3445	uint16_t len)
3446{
3447	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3448	uint16_t w_val;
3449
3450	scd_bc_tbl = sc->sched_dma.vaddr;
3451
3452	len += 8; /* magic numbers came naturally from paris */
3453	len = roundup(len, 4) / 4;
3454
3455	w_val = htole16(sta_id << 12 | len);
3456
3457	/* Update TX scheduler. */
3458	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3459	bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3460	    BUS_DMASYNC_PREWRITE);
3461
3462	/* I really wonder what this is ?!? */
3463	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3464		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3465		bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3466		    BUS_DMASYNC_PREWRITE);
3467	}
3468}
3469#endif
3470
3471/*
3472 * Take an 802.11 (non-n) rate, find the relevant rate
3473 * table entry.  return the index into in_ridx[].
3474 *
3475 * The caller then uses that index back into in_ridx
3476 * to figure out the rate index programmed /into/
3477 * the firmware for this given node.
3478 */
3479static int
3480iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
3481    uint8_t rate)
3482{
3483	int i;
3484	uint8_t r;
3485
3486	for (i = 0; i < nitems(in->in_ridx); i++) {
3487		r = iwm_rates[in->in_ridx[i]].rate;
3488		if (rate == r)
3489			return (i);
3490	}
3491
3492	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3493	    "%s: couldn't find an entry for rate=%d\n",
3494	    __func__,
3495	    rate);
3496
3497	/* XXX Return the first */
3498	/* XXX TODO: have it return the /lowest/ */
3499	return (0);
3500}
3501
3502static int
3503iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
3504{
3505	int i;
3506
3507	for (i = 0; i < nitems(iwm_rates); i++) {
3508		if (iwm_rates[i].rate == rate)
3509			return (i);
3510	}
3511	/* XXX error? */
3512	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3513	    "%s: couldn't find an entry for rate=%d\n",
3514	    __func__,
3515	    rate);
3516	return (0);
3517}
3518
3519/*
3520 * Fill in the rate related information for a transmit command.
3521 */
3522static const struct iwm_rate *
3523iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3524	struct mbuf *m, struct iwm_tx_cmd *tx)
3525{
3526	struct ieee80211_node *ni = &in->in_ni;
3527	struct ieee80211_frame *wh;
3528	const struct ieee80211_txparam *tp = ni->ni_txparms;
3529	const struct iwm_rate *rinfo;
3530	int type;
3531	int ridx, rate_flags;
3532
3533	wh = mtod(m, struct ieee80211_frame *);
3534	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3535
3536	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3537	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3538
3539	if (type == IEEE80211_FC0_TYPE_MGT) {
3540		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3541		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3542		    "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3543	} else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3544		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3545		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3546		    "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3547	} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3548		ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3549		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3550		    "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3551	} else if (m->m_flags & M_EAPOL) {
3552		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3553		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3554		    "%s: EAPOL\n", __func__);
3555	} else if (type == IEEE80211_FC0_TYPE_DATA) {
3556		int i;
3557
3558		/* for data frames, use RS table */
3559		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3560		/* XXX pass pktlen */
3561		(void) ieee80211_ratectl_rate(ni, NULL, 0);
3562		i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
3563		ridx = in->in_ridx[i];
3564
3565		/* This is the index into the programmed table */
3566		tx->initial_rate_index = i;
3567		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3568
3569		IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3570		    "%s: start with i=%d, txrate %d\n",
3571		    __func__, i, iwm_rates[ridx].rate);
3572	} else {
3573		ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3574		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DEFAULT (%d)\n",
3575		    __func__, tp->mgmtrate);
3576	}
3577
3578	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3579	    "%s: frame type=%d txrate %d\n",
3580	        __func__, type, iwm_rates[ridx].rate);
3581
3582	rinfo = &iwm_rates[ridx];
3583
3584	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3585	    __func__, ridx,
3586	    rinfo->rate,
3587	    !! (IWM_RIDX_IS_CCK(ridx))
3588	    );
3589
3590	/* XXX TODO: hard-coded TX antenna? */
3591	rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3592	if (IWM_RIDX_IS_CCK(ridx))
3593		rate_flags |= IWM_RATE_MCS_CCK_MSK;
3594	tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3595
3596	return rinfo;
3597}
3598
3599#define TB0_SIZE 16
3600static int
3601iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3602{
3603	struct ieee80211com *ic = &sc->sc_ic;
3604	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3605	struct iwm_node *in = IWM_NODE(ni);
3606	struct iwm_tx_ring *ring;
3607	struct iwm_tx_data *data;
3608	struct iwm_tfd *desc;
3609	struct iwm_device_cmd *cmd;
3610	struct iwm_tx_cmd *tx;
3611	struct ieee80211_frame *wh;
3612	struct ieee80211_key *k = NULL;
3613	struct mbuf *m1;
3614	const struct iwm_rate *rinfo;
3615	uint32_t flags;
3616	u_int hdrlen;
3617	bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3618	int nsegs;
3619	uint8_t tid, type;
3620	int i, totlen, error, pad;
3621
3622	wh = mtod(m, struct ieee80211_frame *);
3623	hdrlen = ieee80211_anyhdrsize(wh);
3624	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3625	tid = 0;
3626	ring = &sc->txq[ac];
3627	desc = &ring->desc[ring->cur];
3628	memset(desc, 0, sizeof(*desc));
3629	data = &ring->data[ring->cur];
3630
3631	/* Fill out iwm_tx_cmd to send to the firmware */
3632	cmd = &ring->cmd[ring->cur];
3633	cmd->hdr.code = IWM_TX_CMD;
3634	cmd->hdr.flags = 0;
3635	cmd->hdr.qid = ring->qid;
3636	cmd->hdr.idx = ring->cur;
3637
3638	tx = (void *)cmd->data;
3639	memset(tx, 0, sizeof(*tx));
3640
3641	rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3642
3643	/* Encrypt the frame if need be. */
3644	if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3645		/* Retrieve key for TX && do software encryption. */
3646		k = ieee80211_crypto_encap(ni, m);
3647		if (k == NULL) {
3648			m_freem(m);
3649			return (ENOBUFS);
3650		}
3651		/* 802.11 header may have moved. */
3652		wh = mtod(m, struct ieee80211_frame *);
3653	}
3654
3655	if (ieee80211_radiotap_active_vap(vap)) {
3656		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3657
3658		tap->wt_flags = 0;
3659		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3660		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3661		tap->wt_rate = rinfo->rate;
3662		if (k != NULL)
3663			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3664		ieee80211_radiotap_tx(vap, m);
3665	}
3666
3667
3668	totlen = m->m_pkthdr.len;
3669
3670	flags = 0;
3671	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3672		flags |= IWM_TX_CMD_FLG_ACK;
3673	}
3674
3675	if (type == IEEE80211_FC0_TYPE_DATA
3676	    && (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
3677	    && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3678		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3679	}
3680
3681	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3682	    type != IEEE80211_FC0_TYPE_DATA)
3683		tx->sta_id = sc->sc_aux_sta.sta_id;
3684	else
3685		tx->sta_id = IWM_STATION_ID;
3686
3687	if (type == IEEE80211_FC0_TYPE_MGT) {
3688		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3689
3690		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3691		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3692			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3693		} else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3694			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3695		} else {
3696			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3697		}
3698	} else {
3699		tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3700	}
3701
3702	if (hdrlen & 3) {
3703		/* First segment length must be a multiple of 4. */
3704		flags |= IWM_TX_CMD_FLG_MH_PAD;
3705		pad = 4 - (hdrlen & 3);
3706	} else
3707		pad = 0;
3708
3709	tx->driver_txop = 0;
3710	tx->next_frame_len = 0;
3711
3712	tx->len = htole16(totlen);
3713	tx->tid_tspec = tid;
3714	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3715
3716	/* Set physical address of "scratch area". */
3717	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3718	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3719
3720	/* Copy 802.11 header in TX command. */
3721	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3722
3723	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3724
3725	tx->sec_ctl = 0;
3726	tx->tx_flags |= htole32(flags);
3727
3728	/* Trim 802.11 header. */
3729	m_adj(m, hdrlen);
3730	error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3731	    segs, &nsegs, BUS_DMA_NOWAIT);
3732	if (error != 0) {
3733		if (error != EFBIG) {
3734			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3735			    error);
3736			m_freem(m);
3737			return error;
3738		}
3739		/* Too many DMA segments, linearize mbuf. */
3740		m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3741		if (m1 == NULL) {
3742			device_printf(sc->sc_dev,
3743			    "%s: could not defrag mbuf\n", __func__);
3744			m_freem(m);
3745			return (ENOBUFS);
3746		}
3747		m = m1;
3748
3749		error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3750		    segs, &nsegs, BUS_DMA_NOWAIT);
3751		if (error != 0) {
3752			device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3753			    error);
3754			m_freem(m);
3755			return error;
3756		}
3757	}
3758	data->m = m;
3759	data->in = in;
3760	data->done = 0;
3761
3762	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3763	    "sending txd %p, in %p\n", data, data->in);
3764	KASSERT(data->in != NULL, ("node is NULL"));
3765
3766	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3767	    "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3768	    ring->qid, ring->cur, totlen, nsegs,
3769	    le32toh(tx->tx_flags),
3770	    le32toh(tx->rate_n_flags),
3771	    tx->initial_rate_index
3772	    );
3773
3774	/* Fill TX descriptor. */
3775	desc->num_tbs = 2 + nsegs;
3776
3777	desc->tbs[0].lo = htole32(data->cmd_paddr);
3778	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3779	    (TB0_SIZE << 4);
3780	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3781	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
3782	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
3783	      + hdrlen + pad - TB0_SIZE) << 4);
3784
3785	/* Other DMA segments are for data payload. */
3786	for (i = 0; i < nsegs; i++) {
3787		seg = &segs[i];
3788		desc->tbs[i+2].lo = htole32(seg->ds_addr);
3789		desc->tbs[i+2].hi_n_len = \
3790		    htole16(iwm_get_dma_hi_addr(seg->ds_addr))
3791		    | ((seg->ds_len) << 4);
3792	}
3793
3794	bus_dmamap_sync(ring->data_dmat, data->map,
3795	    BUS_DMASYNC_PREWRITE);
3796	bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3797	    BUS_DMASYNC_PREWRITE);
3798	bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3799	    BUS_DMASYNC_PREWRITE);
3800
3801#if 0
3802	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3803#endif
3804
3805	/* Kick TX ring. */
3806	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3807	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3808
3809	/* Mark TX ring as full if we reach a certain threshold. */
3810	if (++ring->queued > IWM_TX_RING_HIMARK) {
3811		sc->qfullmsk |= 1 << ring->qid;
3812	}
3813
3814	return 0;
3815}
3816
3817static int
3818iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3819    const struct ieee80211_bpf_params *params)
3820{
3821	struct ieee80211com *ic = ni->ni_ic;
3822	struct iwm_softc *sc = ic->ic_softc;
3823	int error = 0;
3824
3825	IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3826	    "->%s begin\n", __func__);
3827
3828	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3829		m_freem(m);
3830		IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3831		    "<-%s not RUNNING\n", __func__);
3832		return (ENETDOWN);
3833        }
3834
3835	IWM_LOCK(sc);
3836	/* XXX fix this */
3837        if (params == NULL) {
3838		error = iwm_tx(sc, m, ni, 0);
3839	} else {
3840		error = iwm_tx(sc, m, ni, 0);
3841	}
3842	sc->sc_tx_timer = 5;
3843	IWM_UNLOCK(sc);
3844
3845        return (error);
3846}
3847
3848/*
3849 * mvm/tx.c
3850 */
3851
3852/*
3853 * Note that there are transports that buffer frames before they reach
3854 * the firmware. This means that after flush_tx_path is called, the
3855 * queue might not be empty. The race-free way to handle this is to:
3856 * 1) set the station as draining
3857 * 2) flush the Tx path
3858 * 3) wait for the transport queues to be empty
3859 */
3860int
3861iwm_mvm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3862{
3863	int ret;
3864	struct iwm_tx_path_flush_cmd flush_cmd = {
3865		.queues_ctl = htole32(tfd_msk),
3866		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3867	};
3868
3869	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3870	    sizeof(flush_cmd), &flush_cmd);
3871	if (ret)
3872                device_printf(sc->sc_dev,
3873		    "Flushing tx queue failed: %d\n", ret);
3874	return ret;
3875}
3876
3877/*
3878 * BEGIN mvm/quota.c
3879 */
3880
3881static int
3882iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp)
3883{
3884	struct iwm_time_quota_cmd cmd;
3885	int i, idx, ret, num_active_macs, quota, quota_rem;
3886	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3887	int n_ifs[IWM_MAX_BINDINGS] = {0, };
3888	uint16_t id;
3889
3890	memset(&cmd, 0, sizeof(cmd));
3891
3892	/* currently, PHY ID == binding ID */
3893	if (ivp) {
3894		id = ivp->phy_ctxt->id;
3895		KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3896		colors[id] = ivp->phy_ctxt->color;
3897
3898		if (1)
3899			n_ifs[id] = 1;
3900	}
3901
3902	/*
3903	 * The FW's scheduling session consists of
3904	 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
3905	 * equally between all the bindings that require quota
3906	 */
3907	num_active_macs = 0;
3908	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
3909		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
3910		num_active_macs += n_ifs[i];
3911	}
3912
3913	quota = 0;
3914	quota_rem = 0;
3915	if (num_active_macs) {
3916		quota = IWM_MVM_MAX_QUOTA / num_active_macs;
3917		quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
3918	}
3919
3920	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
3921		if (colors[i] < 0)
3922			continue;
3923
3924		cmd.quotas[idx].id_and_color =
3925			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
3926
3927		if (n_ifs[i] <= 0) {
3928			cmd.quotas[idx].quota = htole32(0);
3929			cmd.quotas[idx].max_duration = htole32(0);
3930		} else {
3931			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
3932			cmd.quotas[idx].max_duration = htole32(0);
3933		}
3934		idx++;
3935	}
3936
3937	/* Give the remainder of the session to the first binding */
3938	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
3939
3940	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
3941	    sizeof(cmd), &cmd);
3942	if (ret)
3943		device_printf(sc->sc_dev,
3944		    "%s: Failed to send quota: %d\n", __func__, ret);
3945	return ret;
3946}
3947
3948/*
3949 * END mvm/quota.c
3950 */
3951
3952/*
3953 * ieee80211 routines
3954 */
3955
3956/*
3957 * Change to AUTH state in 80211 state machine.  Roughly matches what
3958 * Linux does in bss_info_changed().
3959 */
3960static int
3961iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
3962{
3963	struct ieee80211_node *ni;
3964	struct iwm_node *in;
3965	struct iwm_vap *iv = IWM_VAP(vap);
3966	uint32_t duration;
3967	int error;
3968
3969	/*
3970	 * XXX i have a feeling that the vap node is being
3971	 * freed from underneath us. Grr.
3972	 */
3973	ni = ieee80211_ref_node(vap->iv_bss);
3974	in = IWM_NODE(ni);
3975	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
3976	    "%s: called; vap=%p, bss ni=%p\n",
3977	    __func__,
3978	    vap,
3979	    ni);
3980
3981	in->in_assoc = 0;
3982
3983	/*
3984	 * Firmware bug - it'll crash if the beacon interval is less
3985	 * than 16. We can't avoid connecting at all, so refuse the
3986	 * station state change, this will cause net80211 to abandon
3987	 * attempts to connect to this AP, and eventually wpa_s will
3988	 * blacklist the AP...
3989	 */
3990	if (ni->ni_intval < 16) {
3991		device_printf(sc->sc_dev,
3992		    "AP %s beacon interval is %d, refusing due to firmware bug!\n",
3993		    ether_sprintf(ni->ni_bssid), ni->ni_intval);
3994		error = EINVAL;
3995		goto out;
3996	}
3997
3998	error = iwm_mvm_sf_config(sc, IWM_SF_FULL_ON);
3999	if (error != 0)
4000		return error;
4001
4002	error = iwm_allow_mcast(vap, sc);
4003	if (error) {
4004		device_printf(sc->sc_dev,
4005		    "%s: failed to set multicast\n", __func__);
4006		goto out;
4007	}
4008
4009	/*
4010	 * This is where it deviates from what Linux does.
4011	 *
4012	 * Linux iwlwifi doesn't reset the nic each time, nor does it
4013	 * call ctxt_add() here.  Instead, it adds it during vap creation,
4014	 * and always does a mac_ctx_changed().
4015	 *
4016	 * The openbsd port doesn't attempt to do that - it reset things
4017	 * at odd states and does the add here.
4018	 *
4019	 * So, until the state handling is fixed (ie, we never reset
4020	 * the NIC except for a firmware failure, which should drag
4021	 * the NIC back to IDLE, re-setup and re-add all the mac/phy
4022	 * contexts that are required), let's do a dirty hack here.
4023	 */
4024	if (iv->is_uploaded) {
4025		if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4026			device_printf(sc->sc_dev,
4027			    "%s: failed to update MAC\n", __func__);
4028			goto out;
4029		}
4030		if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4031		    in->in_ni.ni_chan, 1, 1)) != 0) {
4032			device_printf(sc->sc_dev,
4033			    "%s: failed update phy ctxt\n", __func__);
4034			goto out;
4035		}
4036		iv->phy_ctxt = &sc->sc_phyctxt[0];
4037
4038		if ((error = iwm_mvm_binding_add_vif(sc, iv)) != 0) {
4039			device_printf(sc->sc_dev,
4040			    "%s: binding update cmd\n", __func__);
4041			goto out;
4042		}
4043		if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4044			device_printf(sc->sc_dev,
4045			    "%s: failed to update sta\n", __func__);
4046			goto out;
4047		}
4048	} else {
4049		if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
4050			device_printf(sc->sc_dev,
4051			    "%s: failed to add MAC\n", __func__);
4052			goto out;
4053		}
4054		if ((error = iwm_mvm_power_update_mac(sc)) != 0) {
4055			device_printf(sc->sc_dev,
4056			    "%s: failed to update power management\n",
4057			    __func__);
4058			goto out;
4059		}
4060		if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4061		    in->in_ni.ni_chan, 1, 1)) != 0) {
4062			device_printf(sc->sc_dev,
4063			    "%s: failed add phy ctxt!\n", __func__);
4064			error = ETIMEDOUT;
4065			goto out;
4066		}
4067		iv->phy_ctxt = &sc->sc_phyctxt[0];
4068
4069		if ((error = iwm_mvm_binding_add_vif(sc, iv)) != 0) {
4070			device_printf(sc->sc_dev,
4071			    "%s: binding add cmd\n", __func__);
4072			goto out;
4073		}
4074		if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
4075			device_printf(sc->sc_dev,
4076			    "%s: failed to add sta\n", __func__);
4077			goto out;
4078		}
4079	}
4080
4081	/*
4082	 * Prevent the FW from wandering off channel during association
4083	 * by "protecting" the session with a time event.
4084	 */
4085	/* XXX duration is in units of TU, not MS */
4086	duration = IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
4087	iwm_mvm_protect_session(sc, iv, duration, 500 /* XXX magic number */);
4088	DELAY(100);
4089
4090	error = 0;
4091out:
4092	ieee80211_free_node(ni);
4093	return (error);
4094}
4095
4096static int
4097iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
4098{
4099	struct iwm_node *in = IWM_NODE(vap->iv_bss);
4100	int error;
4101
4102	if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
4103		device_printf(sc->sc_dev,
4104		    "%s: failed to update STA\n", __func__);
4105		return error;
4106	}
4107
4108	in->in_assoc = 1;
4109	if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4110		device_printf(sc->sc_dev,
4111		    "%s: failed to update MAC\n", __func__);
4112		return error;
4113	}
4114
4115	return 0;
4116}
4117
4118static int
4119iwm_release(struct iwm_softc *sc, struct iwm_node *in)
4120{
4121	uint32_t tfd_msk;
4122
4123	/*
4124	 * Ok, so *technically* the proper set of calls for going
4125	 * from RUN back to SCAN is:
4126	 *
4127	 * iwm_mvm_power_mac_disable(sc, in);
4128	 * iwm_mvm_mac_ctxt_changed(sc, vap);
4129	 * iwm_mvm_rm_sta(sc, in);
4130	 * iwm_mvm_update_quotas(sc, NULL);
4131	 * iwm_mvm_mac_ctxt_changed(sc, in);
4132	 * iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap));
4133	 * iwm_mvm_mac_ctxt_remove(sc, in);
4134	 *
4135	 * However, that freezes the device not matter which permutations
4136	 * and modifications are attempted.  Obviously, this driver is missing
4137	 * something since it works in the Linux driver, but figuring out what
4138	 * is missing is a little more complicated.  Now, since we're going
4139	 * back to nothing anyway, we'll just do a complete device reset.
4140	 * Up your's, device!
4141	 */
4142	/*
4143	 * Just using 0xf for the queues mask is fine as long as we only
4144	 * get here from RUN state.
4145	 */
4146	tfd_msk = 0xf;
4147	iwm_xmit_queue_drain(sc);
4148	iwm_mvm_flush_tx_path(sc, tfd_msk, IWM_CMD_SYNC);
4149	/*
4150	 * We seem to get away with just synchronously sending the
4151	 * IWM_TXPATH_FLUSH command.
4152	 */
4153//	iwm_trans_wait_tx_queue_empty(sc, tfd_msk);
4154	iwm_stop_device(sc);
4155	iwm_init_hw(sc);
4156	if (in)
4157		in->in_assoc = 0;
4158	return 0;
4159
4160#if 0
4161	int error;
4162
4163	iwm_mvm_power_mac_disable(sc, in);
4164
4165	if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4166		device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
4167		return error;
4168	}
4169
4170	if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
4171		device_printf(sc->sc_dev, "sta remove fail %d\n", error);
4172		return error;
4173	}
4174	error = iwm_mvm_rm_sta(sc, in);
4175	in->in_assoc = 0;
4176	iwm_mvm_update_quotas(sc, NULL);
4177	if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
4178		device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
4179		return error;
4180	}
4181	iwm_mvm_binding_remove_vif(sc, IWM_VAP(in->in_ni.ni_vap));
4182
4183	iwm_mvm_mac_ctxt_remove(sc, in);
4184
4185	return error;
4186#endif
4187}
4188
4189static struct ieee80211_node *
4190iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4191{
4192	return malloc(sizeof (struct iwm_node), M_80211_NODE,
4193	    M_NOWAIT | M_ZERO);
4194}
4195
4196uint8_t
4197iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
4198{
4199	int i;
4200	uint8_t rval;
4201
4202	for (i = 0; i < rs->rs_nrates; i++) {
4203		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
4204		if (rval == iwm_rates[ridx].rate)
4205			return rs->rs_rates[i];
4206	}
4207
4208	return 0;
4209}
4210
4211static void
4212iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
4213{
4214	struct ieee80211_node *ni = &in->in_ni;
4215	struct iwm_lq_cmd *lq = &in->in_lq;
4216	int nrates = ni->ni_rates.rs_nrates;
4217	int i, ridx, tab = 0;
4218//	int txant = 0;
4219
4220	if (nrates > nitems(lq->rs_table)) {
4221		device_printf(sc->sc_dev,
4222		    "%s: node supports %d rates, driver handles "
4223		    "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4224		return;
4225	}
4226	if (nrates == 0) {
4227		device_printf(sc->sc_dev,
4228		    "%s: node supports 0 rates, odd!\n", __func__);
4229		return;
4230	}
4231
4232	/*
4233	 * XXX .. and most of iwm_node is not initialised explicitly;
4234	 * it's all just 0x0 passed to the firmware.
4235	 */
4236
4237	/* first figure out which rates we should support */
4238	/* XXX TODO: this isn't 11n aware /at all/ */
4239	memset(&in->in_ridx, -1, sizeof(in->in_ridx));
4240	IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4241	    "%s: nrates=%d\n", __func__, nrates);
4242
4243	/*
4244	 * Loop over nrates and populate in_ridx from the highest
4245	 * rate to the lowest rate.  Remember, in_ridx[] has
4246	 * IEEE80211_RATE_MAXSIZE entries!
4247	 */
4248	for (i = 0; i < min(nrates, IEEE80211_RATE_MAXSIZE); i++) {
4249		int rate = ni->ni_rates.rs_rates[(nrates - 1) - i] & IEEE80211_RATE_VAL;
4250
4251		/* Map 802.11 rate to HW rate index. */
4252		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
4253			if (iwm_rates[ridx].rate == rate)
4254				break;
4255		if (ridx > IWM_RIDX_MAX) {
4256			device_printf(sc->sc_dev,
4257			    "%s: WARNING: device rate for %d not found!\n",
4258			    __func__, rate);
4259		} else {
4260			IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4261			    "%s: rate: i: %d, rate=%d, ridx=%d\n",
4262			    __func__,
4263			    i,
4264			    rate,
4265			    ridx);
4266			in->in_ridx[i] = ridx;
4267		}
4268	}
4269
4270	/* then construct a lq_cmd based on those */
4271	memset(lq, 0, sizeof(*lq));
4272	lq->sta_id = IWM_STATION_ID;
4273
4274	/* For HT, always enable RTS/CTS to avoid excessive retries. */
4275	if (ni->ni_flags & IEEE80211_NODE_HT)
4276		lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
4277
4278	/*
4279	 * are these used? (we don't do SISO or MIMO)
4280	 * need to set them to non-zero, though, or we get an error.
4281	 */
4282	lq->single_stream_ant_msk = 1;
4283	lq->dual_stream_ant_msk = 1;
4284
4285	/*
4286	 * Build the actual rate selection table.
4287	 * The lowest bits are the rates.  Additionally,
4288	 * CCK needs bit 9 to be set.  The rest of the bits
4289	 * we add to the table select the tx antenna
4290	 * Note that we add the rates in the highest rate first
4291	 * (opposite of ni_rates).
4292	 */
4293	/*
4294	 * XXX TODO: this should be looping over the min of nrates
4295	 * and LQ_MAX_RETRY_NUM.  Sigh.
4296	 */
4297	for (i = 0; i < nrates; i++) {
4298		int nextant;
4299
4300#if 0
4301		if (txant == 0)
4302			txant = iwm_mvm_get_valid_tx_ant(sc);
4303		nextant = 1<<(ffs(txant)-1);
4304		txant &= ~nextant;
4305#else
4306		nextant = iwm_mvm_get_valid_tx_ant(sc);
4307#endif
4308		/*
4309		 * Map the rate id into a rate index into
4310		 * our hardware table containing the
4311		 * configuration to use for this rate.
4312		 */
4313		ridx = in->in_ridx[i];
4314		tab = iwm_rates[ridx].plcp;
4315		tab |= nextant << IWM_RATE_MCS_ANT_POS;
4316		if (IWM_RIDX_IS_CCK(ridx))
4317			tab |= IWM_RATE_MCS_CCK_MSK;
4318		IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4319		    "station rate i=%d, rate=%d, hw=%x\n",
4320		    i, iwm_rates[ridx].rate, tab);
4321		lq->rs_table[i] = htole32(tab);
4322	}
4323	/* then fill the rest with the lowest possible rate */
4324	for (i = nrates; i < nitems(lq->rs_table); i++) {
4325		KASSERT(tab != 0, ("invalid tab"));
4326		lq->rs_table[i] = htole32(tab);
4327	}
4328}
4329
4330static int
4331iwm_media_change(struct ifnet *ifp)
4332{
4333	struct ieee80211vap *vap = ifp->if_softc;
4334	struct ieee80211com *ic = vap->iv_ic;
4335	struct iwm_softc *sc = ic->ic_softc;
4336	int error;
4337
4338	error = ieee80211_media_change(ifp);
4339	if (error != ENETRESET)
4340		return error;
4341
4342	IWM_LOCK(sc);
4343	if (ic->ic_nrunning > 0) {
4344		iwm_stop(sc);
4345		iwm_init(sc);
4346	}
4347	IWM_UNLOCK(sc);
4348	return error;
4349}
4350
4351
4352static int
4353iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4354{
4355	struct iwm_vap *ivp = IWM_VAP(vap);
4356	struct ieee80211com *ic = vap->iv_ic;
4357	struct iwm_softc *sc = ic->ic_softc;
4358	struct iwm_node *in;
4359	int error;
4360
4361	IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4362	    "switching state %s -> %s\n",
4363	    ieee80211_state_name[vap->iv_state],
4364	    ieee80211_state_name[nstate]);
4365	IEEE80211_UNLOCK(ic);
4366	IWM_LOCK(sc);
4367
4368	if (vap->iv_state == IEEE80211_S_SCAN && nstate != vap->iv_state)
4369		iwm_led_blink_stop(sc);
4370
4371	/* disable beacon filtering if we're hopping out of RUN */
4372	if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
4373		iwm_mvm_disable_beacon_filter(sc);
4374
4375		if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4376			in->in_assoc = 0;
4377
4378		if (nstate == IEEE80211_S_INIT) {
4379			IWM_UNLOCK(sc);
4380			IEEE80211_LOCK(ic);
4381			error = ivp->iv_newstate(vap, nstate, arg);
4382			IEEE80211_UNLOCK(ic);
4383			IWM_LOCK(sc);
4384			iwm_release(sc, NULL);
4385			IWM_UNLOCK(sc);
4386			IEEE80211_LOCK(ic);
4387			return error;
4388		}
4389
4390		/*
4391		 * It's impossible to directly go RUN->SCAN. If we iwm_release()
4392		 * above then the card will be completely reinitialized,
4393		 * so the driver must do everything necessary to bring the card
4394		 * from INIT to SCAN.
4395		 *
4396		 * Additionally, upon receiving deauth frame from AP,
4397		 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
4398		 * state. This will also fail with this driver, so bring the FSM
4399		 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
4400		 *
4401		 * XXX TODO: fix this for FreeBSD!
4402		 */
4403		if (nstate == IEEE80211_S_SCAN ||
4404		    nstate == IEEE80211_S_AUTH ||
4405		    nstate == IEEE80211_S_ASSOC) {
4406			IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4407			    "Force transition to INIT; MGT=%d\n", arg);
4408			IWM_UNLOCK(sc);
4409			IEEE80211_LOCK(ic);
4410			/* Always pass arg as -1 since we can't Tx right now. */
4411			/*
4412			 * XXX arg is just ignored anyway when transitioning
4413			 *     to IEEE80211_S_INIT.
4414			 */
4415			vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
4416			IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4417			    "Going INIT->SCAN\n");
4418			nstate = IEEE80211_S_SCAN;
4419			IEEE80211_UNLOCK(ic);
4420			IWM_LOCK(sc);
4421		}
4422	}
4423
4424	switch (nstate) {
4425	case IEEE80211_S_INIT:
4426	case IEEE80211_S_SCAN:
4427		if (vap->iv_state == IEEE80211_S_AUTH ||
4428		    vap->iv_state == IEEE80211_S_ASSOC) {
4429			int myerr;
4430			IWM_UNLOCK(sc);
4431			IEEE80211_LOCK(ic);
4432			myerr = ivp->iv_newstate(vap, nstate, arg);
4433			IEEE80211_UNLOCK(ic);
4434			IWM_LOCK(sc);
4435			error = iwm_mvm_rm_sta(sc, vap, FALSE);
4436                        if (error) {
4437                                device_printf(sc->sc_dev,
4438				    "%s: Failed to remove station: %d\n",
4439				    __func__, error);
4440			}
4441			error = iwm_mvm_mac_ctxt_changed(sc, vap);
4442                        if (error) {
4443                                device_printf(sc->sc_dev,
4444                                    "%s: Failed to change mac context: %d\n",
4445                                    __func__, error);
4446                        }
4447                        error = iwm_mvm_binding_remove_vif(sc, ivp);
4448                        if (error) {
4449                                device_printf(sc->sc_dev,
4450                                    "%s: Failed to remove channel ctx: %d\n",
4451                                    __func__, error);
4452                        }
4453			ivp->phy_ctxt = NULL;
4454			IWM_UNLOCK(sc);
4455			IEEE80211_LOCK(ic);
4456			return myerr;
4457		}
4458		break;
4459
4460	case IEEE80211_S_AUTH:
4461		if ((error = iwm_auth(vap, sc)) != 0) {
4462			device_printf(sc->sc_dev,
4463			    "%s: could not move to auth state: %d\n",
4464			    __func__, error);
4465			break;
4466		}
4467		break;
4468
4469	case IEEE80211_S_ASSOC:
4470		if ((error = iwm_assoc(vap, sc)) != 0) {
4471			device_printf(sc->sc_dev,
4472			    "%s: failed to associate: %d\n", __func__,
4473			    error);
4474			break;
4475		}
4476		break;
4477
4478	case IEEE80211_S_RUN:
4479		/* Update the association state, now we have it all */
4480		/* (eg associd comes in at this point */
4481		error = iwm_assoc(vap, sc);
4482		if (error != 0) {
4483			device_printf(sc->sc_dev,
4484			    "%s: failed to update association state: %d\n",
4485			    __func__,
4486			    error);
4487			break;
4488		}
4489
4490		in = IWM_NODE(vap->iv_bss);
4491		iwm_mvm_enable_beacon_filter(sc, in);
4492		iwm_mvm_power_update_mac(sc);
4493		iwm_mvm_update_quotas(sc, ivp);
4494		iwm_setrates(sc, in);
4495
4496		if ((error = iwm_mvm_send_lq_cmd(sc, &in->in_lq, TRUE)) != 0) {
4497			device_printf(sc->sc_dev,
4498			    "%s: IWM_LQ_CMD failed: %d\n", __func__, error);
4499		}
4500
4501		iwm_mvm_led_enable(sc);
4502		break;
4503
4504	default:
4505		break;
4506	}
4507	IWM_UNLOCK(sc);
4508	IEEE80211_LOCK(ic);
4509
4510	return (ivp->iv_newstate(vap, nstate, arg));
4511}
4512
4513void
4514iwm_endscan_cb(void *arg, int pending)
4515{
4516	struct iwm_softc *sc = arg;
4517	struct ieee80211com *ic = &sc->sc_ic;
4518
4519	IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4520	    "%s: scan ended\n",
4521	    __func__);
4522
4523	ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4524}
4525
4526/*
4527 * Aging and idle timeouts for the different possible scenarios
4528 * in default configuration
4529 */
4530static const uint32_t
4531iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4532	{
4533		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
4534		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
4535	},
4536	{
4537		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
4538		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
4539	},
4540	{
4541		htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
4542		htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
4543	},
4544	{
4545		htole32(IWM_SF_BA_AGING_TIMER_DEF),
4546		htole32(IWM_SF_BA_IDLE_TIMER_DEF)
4547	},
4548	{
4549		htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
4550		htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
4551	},
4552};
4553
4554/*
4555 * Aging and idle timeouts for the different possible scenarios
4556 * in single BSS MAC configuration.
4557 */
4558static const uint32_t
4559iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
4560	{
4561		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
4562		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
4563	},
4564	{
4565		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
4566		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
4567	},
4568	{
4569		htole32(IWM_SF_MCAST_AGING_TIMER),
4570		htole32(IWM_SF_MCAST_IDLE_TIMER)
4571	},
4572	{
4573		htole32(IWM_SF_BA_AGING_TIMER),
4574		htole32(IWM_SF_BA_IDLE_TIMER)
4575	},
4576	{
4577		htole32(IWM_SF_TX_RE_AGING_TIMER),
4578		htole32(IWM_SF_TX_RE_IDLE_TIMER)
4579	},
4580};
4581
4582static void
4583iwm_mvm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
4584    struct ieee80211_node *ni)
4585{
4586	int i, j, watermark;
4587
4588	sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
4589
4590	/*
4591	 * If we are in association flow - check antenna configuration
4592	 * capabilities of the AP station, and choose the watermark accordingly.
4593	 */
4594	if (ni) {
4595		if (ni->ni_flags & IEEE80211_NODE_HT) {
4596#ifdef notyet
4597			if (ni->ni_rxmcs[2] != 0)
4598				watermark = IWM_SF_W_MARK_MIMO3;
4599			else if (ni->ni_rxmcs[1] != 0)
4600				watermark = IWM_SF_W_MARK_MIMO2;
4601			else
4602#endif
4603				watermark = IWM_SF_W_MARK_SISO;
4604		} else {
4605			watermark = IWM_SF_W_MARK_LEGACY;
4606		}
4607	/* default watermark value for unassociated mode. */
4608	} else {
4609		watermark = IWM_SF_W_MARK_MIMO2;
4610	}
4611	sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
4612
4613	for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
4614		for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
4615			sf_cmd->long_delay_timeouts[i][j] =
4616					htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
4617		}
4618	}
4619
4620	if (ni) {
4621		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
4622		       sizeof(iwm_sf_full_timeout));
4623	} else {
4624		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
4625		       sizeof(iwm_sf_full_timeout_def));
4626	}
4627}
4628
4629static int
4630iwm_mvm_sf_config(struct iwm_softc *sc, enum iwm_sf_state new_state)
4631{
4632	struct ieee80211com *ic = &sc->sc_ic;
4633	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
4634	struct iwm_sf_cfg_cmd sf_cmd = {
4635		.state = htole32(IWM_SF_FULL_ON),
4636	};
4637	int ret = 0;
4638
4639	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4640		sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
4641
4642	switch (new_state) {
4643	case IWM_SF_UNINIT:
4644	case IWM_SF_INIT_OFF:
4645		iwm_mvm_fill_sf_command(sc, &sf_cmd, NULL);
4646		break;
4647	case IWM_SF_FULL_ON:
4648		iwm_mvm_fill_sf_command(sc, &sf_cmd, vap->iv_bss);
4649		break;
4650	default:
4651		IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE,
4652		    "Invalid state: %d. not sending Smart Fifo cmd\n",
4653			  new_state);
4654		return EINVAL;
4655	}
4656
4657	ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
4658				   sizeof(sf_cmd), &sf_cmd);
4659	return ret;
4660}
4661
4662static int
4663iwm_send_bt_init_conf(struct iwm_softc *sc)
4664{
4665	struct iwm_bt_coex_cmd bt_cmd;
4666
4667	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4668	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
4669
4670	return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4671	    &bt_cmd);
4672}
4673
4674static boolean_t
4675iwm_mvm_is_lar_supported(struct iwm_softc *sc)
4676{
4677	boolean_t nvm_lar = sc->nvm_data->lar_enabled;
4678	boolean_t tlv_lar = fw_has_capa(&sc->ucode_capa,
4679					IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
4680
4681	if (iwm_lar_disable)
4682		return FALSE;
4683
4684	/*
4685	 * Enable LAR only if it is supported by the FW (TLV) &&
4686	 * enabled in the NVM
4687	 */
4688	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
4689		return nvm_lar && tlv_lar;
4690	else
4691		return tlv_lar;
4692}
4693
4694static boolean_t
4695iwm_mvm_is_wifi_mcc_supported(struct iwm_softc *sc)
4696{
4697	return fw_has_api(&sc->ucode_capa,
4698			  IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
4699	       fw_has_capa(&sc->ucode_capa,
4700			   IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC);
4701}
4702
4703static int
4704iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4705{
4706	struct iwm_mcc_update_cmd mcc_cmd;
4707	struct iwm_host_cmd hcmd = {
4708		.id = IWM_MCC_UPDATE_CMD,
4709		.flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4710		.data = { &mcc_cmd },
4711	};
4712	int ret;
4713#ifdef IWM_DEBUG
4714	struct iwm_rx_packet *pkt;
4715	struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4716	struct iwm_mcc_update_resp *mcc_resp;
4717	int n_channels;
4718	uint16_t mcc;
4719#endif
4720	int resp_v2 = fw_has_capa(&sc->ucode_capa,
4721	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
4722
4723	if (!iwm_mvm_is_lar_supported(sc)) {
4724		IWM_DPRINTF(sc, IWM_DEBUG_LAR, "%s: no LAR support\n",
4725		    __func__);
4726		return 0;
4727	}
4728
4729	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4730	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4731	if (iwm_mvm_is_wifi_mcc_supported(sc))
4732		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
4733	else
4734		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
4735
4736	if (resp_v2)
4737		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4738	else
4739		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4740
4741	IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4742	    "send MCC update to FW with '%c%c' src = %d\n",
4743	    alpha2[0], alpha2[1], mcc_cmd.source_id);
4744
4745	ret = iwm_send_cmd(sc, &hcmd);
4746	if (ret)
4747		return ret;
4748
4749#ifdef IWM_DEBUG
4750	pkt = hcmd.resp_pkt;
4751
4752	/* Extract MCC response */
4753	if (resp_v2) {
4754		mcc_resp = (void *)pkt->data;
4755		mcc = mcc_resp->mcc;
4756		n_channels =  le32toh(mcc_resp->n_channels);
4757	} else {
4758		mcc_resp_v1 = (void *)pkt->data;
4759		mcc = mcc_resp_v1->mcc;
4760		n_channels =  le32toh(mcc_resp_v1->n_channels);
4761	}
4762
4763	/* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4764	if (mcc == 0)
4765		mcc = 0x3030;  /* "00" - world */
4766
4767	IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4768	    "regulatory domain '%c%c' (%d channels available)\n",
4769	    mcc >> 8, mcc & 0xff, n_channels);
4770#endif
4771	iwm_free_resp(sc, &hcmd);
4772
4773	return 0;
4774}
4775
4776static void
4777iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4778{
4779	struct iwm_host_cmd cmd = {
4780		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
4781		.len = { sizeof(uint32_t), },
4782		.data = { &backoff, },
4783	};
4784
4785	if (iwm_send_cmd(sc, &cmd) != 0) {
4786		device_printf(sc->sc_dev,
4787		    "failed to change thermal tx backoff\n");
4788	}
4789}
4790
4791static int
4792iwm_init_hw(struct iwm_softc *sc)
4793{
4794	struct ieee80211com *ic = &sc->sc_ic;
4795	int error, i, ac;
4796
4797	if ((error = iwm_start_hw(sc)) != 0) {
4798		printf("iwm_start_hw: failed %d\n", error);
4799		return error;
4800	}
4801
4802	if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
4803		printf("iwm_run_init_mvm_ucode: failed %d\n", error);
4804		return error;
4805	}
4806
4807	/*
4808	 * should stop and start HW since that INIT
4809	 * image just loaded
4810	 */
4811	iwm_stop_device(sc);
4812	sc->sc_ps_disabled = FALSE;
4813	if ((error = iwm_start_hw(sc)) != 0) {
4814		device_printf(sc->sc_dev, "could not initialize hardware\n");
4815		return error;
4816	}
4817
4818	/* omstart, this time with the regular firmware */
4819	error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_REGULAR);
4820	if (error) {
4821		device_printf(sc->sc_dev, "could not load firmware\n");
4822		goto error;
4823	}
4824
4825	if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4826		device_printf(sc->sc_dev, "bt init conf failed\n");
4827		goto error;
4828	}
4829
4830	error = iwm_send_tx_ant_cfg(sc, iwm_mvm_get_valid_tx_ant(sc));
4831	if (error != 0) {
4832		device_printf(sc->sc_dev, "antenna config failed\n");
4833		goto error;
4834	}
4835
4836	/* Send phy db control command and then phy db calibration */
4837	if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4838		goto error;
4839
4840	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4841		device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4842		goto error;
4843	}
4844
4845	/* Add auxiliary station for scanning */
4846	if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
4847		device_printf(sc->sc_dev, "add_aux_sta failed\n");
4848		goto error;
4849	}
4850
4851	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4852		/*
4853		 * The channel used here isn't relevant as it's
4854		 * going to be overwritten in the other flows.
4855		 * For now use the first channel we have.
4856		 */
4857		if ((error = iwm_mvm_phy_ctxt_add(sc,
4858		    &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4859			goto error;
4860	}
4861
4862	/* Initialize tx backoffs to the minimum. */
4863	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_7000)
4864		iwm_mvm_tt_tx_backoff(sc, 0);
4865
4866	error = iwm_mvm_power_update_device(sc);
4867	if (error)
4868		goto error;
4869
4870	if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4871		goto error;
4872
4873	if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
4874		if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
4875			goto error;
4876	}
4877
4878	/* Enable Tx queues. */
4879	for (ac = 0; ac < WME_NUM_AC; ac++) {
4880		error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4881		    iwm_mvm_ac_to_tx_fifo[ac]);
4882		if (error)
4883			goto error;
4884	}
4885
4886	if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
4887		device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4888		goto error;
4889	}
4890
4891	return 0;
4892
4893 error:
4894	iwm_stop_device(sc);
4895	return error;
4896}
4897
4898/* Allow multicast from our BSSID. */
4899static int
4900iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4901{
4902	struct ieee80211_node *ni = vap->iv_bss;
4903	struct iwm_mcast_filter_cmd *cmd;
4904	size_t size;
4905	int error;
4906
4907	size = roundup(sizeof(*cmd), 4);
4908	cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4909	if (cmd == NULL)
4910		return ENOMEM;
4911	cmd->filter_own = 1;
4912	cmd->port_id = 0;
4913	cmd->count = 0;
4914	cmd->pass_all = 1;
4915	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4916
4917	error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
4918	    IWM_CMD_SYNC, size, cmd);
4919	free(cmd, M_DEVBUF);
4920
4921	return (error);
4922}
4923
4924/*
4925 * ifnet interfaces
4926 */
4927
4928static void
4929iwm_init(struct iwm_softc *sc)
4930{
4931	int error;
4932
4933	if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4934		return;
4935	}
4936	sc->sc_generation++;
4937	sc->sc_flags &= ~IWM_FLAG_STOPPED;
4938
4939	if ((error = iwm_init_hw(sc)) != 0) {
4940		printf("iwm_init_hw failed %d\n", error);
4941		iwm_stop(sc);
4942		return;
4943	}
4944
4945	/*
4946	 * Ok, firmware loaded and we are jogging
4947	 */
4948	sc->sc_flags |= IWM_FLAG_HW_INITED;
4949	callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4950}
4951
4952static int
4953iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4954{
4955	struct iwm_softc *sc;
4956	int error;
4957
4958	sc = ic->ic_softc;
4959
4960	IWM_LOCK(sc);
4961	if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4962		IWM_UNLOCK(sc);
4963		return (ENXIO);
4964	}
4965	error = mbufq_enqueue(&sc->sc_snd, m);
4966	if (error) {
4967		IWM_UNLOCK(sc);
4968		return (error);
4969	}
4970	iwm_start(sc);
4971	IWM_UNLOCK(sc);
4972	return (0);
4973}
4974
4975/*
4976 * Dequeue packets from sendq and call send.
4977 */
4978static void
4979iwm_start(struct iwm_softc *sc)
4980{
4981	struct ieee80211_node *ni;
4982	struct mbuf *m;
4983	int ac = 0;
4984
4985	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4986	while (sc->qfullmsk == 0 &&
4987		(m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4988		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4989		if (iwm_tx(sc, m, ni, ac) != 0) {
4990			if_inc_counter(ni->ni_vap->iv_ifp,
4991			    IFCOUNTER_OERRORS, 1);
4992			ieee80211_free_node(ni);
4993			continue;
4994		}
4995		sc->sc_tx_timer = 15;
4996	}
4997	IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4998}
4999
5000static void
5001iwm_stop(struct iwm_softc *sc)
5002{
5003
5004	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
5005	sc->sc_flags |= IWM_FLAG_STOPPED;
5006	sc->sc_generation++;
5007	iwm_led_blink_stop(sc);
5008	sc->sc_tx_timer = 0;
5009	iwm_stop_device(sc);
5010	sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5011}
5012
5013static void
5014iwm_watchdog(void *arg)
5015{
5016	struct iwm_softc *sc = arg;
5017	struct ieee80211com *ic = &sc->sc_ic;
5018
5019	if (sc->sc_tx_timer > 0) {
5020		if (--sc->sc_tx_timer == 0) {
5021			device_printf(sc->sc_dev, "device timeout\n");
5022#ifdef IWM_DEBUG
5023			iwm_nic_error(sc);
5024#endif
5025			ieee80211_restart_all(ic);
5026			counter_u64_add(sc->sc_ic.ic_oerrors, 1);
5027			return;
5028		}
5029	}
5030	callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
5031}
5032
5033static void
5034iwm_parent(struct ieee80211com *ic)
5035{
5036	struct iwm_softc *sc = ic->ic_softc;
5037	int startall = 0;
5038
5039	IWM_LOCK(sc);
5040	if (ic->ic_nrunning > 0) {
5041		if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
5042			iwm_init(sc);
5043			startall = 1;
5044		}
5045	} else if (sc->sc_flags & IWM_FLAG_HW_INITED)
5046		iwm_stop(sc);
5047	IWM_UNLOCK(sc);
5048	if (startall)
5049		ieee80211_start_all(ic);
5050}
5051
5052/*
5053 * The interrupt side of things
5054 */
5055
5056/*
5057 * error dumping routines are from iwlwifi/mvm/utils.c
5058 */
5059
5060/*
5061 * Note: This structure is read from the device with IO accesses,
5062 * and the reading already does the endian conversion. As it is
5063 * read with uint32_t-sized accesses, any members with a different size
5064 * need to be ordered correctly though!
5065 */
5066struct iwm_error_event_table {
5067	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
5068	uint32_t error_id;		/* type of error */
5069	uint32_t trm_hw_status0;	/* TRM HW status */
5070	uint32_t trm_hw_status1;	/* TRM HW status */
5071	uint32_t blink2;		/* branch link */
5072	uint32_t ilink1;		/* interrupt link */
5073	uint32_t ilink2;		/* interrupt link */
5074	uint32_t data1;		/* error-specific data */
5075	uint32_t data2;		/* error-specific data */
5076	uint32_t data3;		/* error-specific data */
5077	uint32_t bcon_time;		/* beacon timer */
5078	uint32_t tsf_low;		/* network timestamp function timer */
5079	uint32_t tsf_hi;		/* network timestamp function timer */
5080	uint32_t gp1;		/* GP1 timer register */
5081	uint32_t gp2;		/* GP2 timer register */
5082	uint32_t fw_rev_type;	/* firmware revision type */
5083	uint32_t major;		/* uCode version major */
5084	uint32_t minor;		/* uCode version minor */
5085	uint32_t hw_ver;		/* HW Silicon version */
5086	uint32_t brd_ver;		/* HW board version */
5087	uint32_t log_pc;		/* log program counter */
5088	uint32_t frame_ptr;		/* frame pointer */
5089	uint32_t stack_ptr;		/* stack pointer */
5090	uint32_t hcmd;		/* last host command header */
5091	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
5092				 * rxtx_flag */
5093	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
5094				 * host_flag */
5095	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
5096				 * enc_flag */
5097	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
5098				 * time_flag */
5099	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
5100				 * wico interrupt */
5101	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
5102	uint32_t wait_event;		/* wait event() caller address */
5103	uint32_t l2p_control;	/* L2pControlField */
5104	uint32_t l2p_duration;	/* L2pDurationField */
5105	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
5106	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
5107	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
5108				 * (LMPM_PMG_SEL) */
5109	uint32_t u_timestamp;	/* indicate when the date and time of the
5110				 * compilation */
5111	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
5112} __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
5113
5114/*
5115 * UMAC error struct - relevant starting from family 8000 chip.
5116 * Note: This structure is read from the device with IO accesses,
5117 * and the reading already does the endian conversion. As it is
5118 * read with u32-sized accesses, any members with a different size
5119 * need to be ordered correctly though!
5120 */
5121struct iwm_umac_error_event_table {
5122	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
5123	uint32_t error_id;	/* type of error */
5124	uint32_t blink1;	/* branch link */
5125	uint32_t blink2;	/* branch link */
5126	uint32_t ilink1;	/* interrupt link */
5127	uint32_t ilink2;	/* interrupt link */
5128	uint32_t data1;		/* error-specific data */
5129	uint32_t data2;		/* error-specific data */
5130	uint32_t data3;		/* error-specific data */
5131	uint32_t umac_major;
5132	uint32_t umac_minor;
5133	uint32_t frame_pointer;	/* core register 27*/
5134	uint32_t stack_pointer;	/* core register 28 */
5135	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
5136	uint32_t nic_isr_pref;	/* ISR status register */
5137} __packed;
5138
5139#define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
5140#define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
5141
5142#ifdef IWM_DEBUG
5143struct {
5144	const char *name;
5145	uint8_t num;
5146} advanced_lookup[] = {
5147	{ "NMI_INTERRUPT_WDG", 0x34 },
5148	{ "SYSASSERT", 0x35 },
5149	{ "UCODE_VERSION_MISMATCH", 0x37 },
5150	{ "BAD_COMMAND", 0x38 },
5151	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5152	{ "FATAL_ERROR", 0x3D },
5153	{ "NMI_TRM_HW_ERR", 0x46 },
5154	{ "NMI_INTERRUPT_TRM", 0x4C },
5155	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5156	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5157	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5158	{ "NMI_INTERRUPT_HOST", 0x66 },
5159	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
5160	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
5161	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5162	{ "ADVANCED_SYSASSERT", 0 },
5163};
5164
5165static const char *
5166iwm_desc_lookup(uint32_t num)
5167{
5168	int i;
5169
5170	for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5171		if (advanced_lookup[i].num == num)
5172			return advanced_lookup[i].name;
5173
5174	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5175	return advanced_lookup[i].name;
5176}
5177
5178static void
5179iwm_nic_umac_error(struct iwm_softc *sc)
5180{
5181	struct iwm_umac_error_event_table table;
5182	uint32_t base;
5183
5184	base = sc->umac_error_event_table;
5185
5186	if (base < 0x800000) {
5187		device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5188		    base);
5189		return;
5190	}
5191
5192	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5193		device_printf(sc->sc_dev, "reading errlog failed\n");
5194		return;
5195	}
5196
5197	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5198		device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5199		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5200		    sc->sc_flags, table.valid);
5201	}
5202
5203	device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5204		iwm_desc_lookup(table.error_id));
5205	device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5206	device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5207	device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5208	    table.ilink1);
5209	device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5210	    table.ilink2);
5211	device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5212	device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5213	device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5214	device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5215	device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5216	device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5217	    table.frame_pointer);
5218	device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5219	    table.stack_pointer);
5220	device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5221	device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5222	    table.nic_isr_pref);
5223}
5224
5225/*
5226 * Support for dumping the error log seemed like a good idea ...
5227 * but it's mostly hex junk and the only sensible thing is the
5228 * hw/ucode revision (which we know anyway).  Since it's here,
5229 * I'll just leave it in, just in case e.g. the Intel guys want to
5230 * help us decipher some "ADVANCED_SYSASSERT" later.
5231 */
5232static void
5233iwm_nic_error(struct iwm_softc *sc)
5234{
5235	struct iwm_error_event_table table;
5236	uint32_t base;
5237
5238	device_printf(sc->sc_dev, "dumping device error log\n");
5239	base = sc->error_event_table;
5240	if (base < 0x800000) {
5241		device_printf(sc->sc_dev,
5242		    "Invalid error log pointer 0x%08x\n", base);
5243		return;
5244	}
5245
5246	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5247		device_printf(sc->sc_dev, "reading errlog failed\n");
5248		return;
5249	}
5250
5251	if (!table.valid) {
5252		device_printf(sc->sc_dev, "errlog not found, skipping\n");
5253		return;
5254	}
5255
5256	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5257		device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5258		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5259		    sc->sc_flags, table.valid);
5260	}
5261
5262	device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5263	    iwm_desc_lookup(table.error_id));
5264	device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5265	    table.trm_hw_status0);
5266	device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5267	    table.trm_hw_status1);
5268	device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5269	device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5270	device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5271	device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5272	device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5273	device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5274	device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5275	device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5276	device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5277	device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5278	device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5279	device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5280	    table.fw_rev_type);
5281	device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5282	device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5283	device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5284	device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5285	device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5286	device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5287	device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5288	device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5289	device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5290	device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5291	device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5292	device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5293	device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5294	device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5295	device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5296	device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5297	device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5298	device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5299	device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5300
5301	if (sc->umac_error_event_table)
5302		iwm_nic_umac_error(sc);
5303}
5304#endif
5305
5306static void
5307iwm_handle_rxb(struct iwm_softc *sc, struct mbuf *m)
5308{
5309	struct ieee80211com *ic = &sc->sc_ic;
5310	struct iwm_cmd_response *cresp;
5311	struct mbuf *m1;
5312	uint32_t offset = 0;
5313	uint32_t maxoff = IWM_RBUF_SIZE;
5314	uint32_t nextoff;
5315	boolean_t stolen = FALSE;
5316
5317#define HAVEROOM(a)	\
5318    ((a) + sizeof(uint32_t) + sizeof(struct iwm_cmd_header) < maxoff)
5319
5320	while (HAVEROOM(offset)) {
5321		struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *,
5322		    offset);
5323		int qid, idx, code, len;
5324
5325		qid = pkt->hdr.qid;
5326		idx = pkt->hdr.idx;
5327
5328		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5329
5330		/*
5331		 * randomly get these from the firmware, no idea why.
5332		 * they at least seem harmless, so just ignore them for now
5333		 */
5334		if ((pkt->hdr.code == 0 && (qid & ~0x80) == 0 && idx == 0) ||
5335		    pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID)) {
5336			break;
5337		}
5338
5339		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5340		    "rx packet qid=%d idx=%d type=%x\n",
5341		    qid & ~0x80, pkt->hdr.idx, code);
5342
5343		len = iwm_rx_packet_len(pkt);
5344		len += sizeof(uint32_t); /* account for status word */
5345		nextoff = offset + roundup2(len, IWM_FH_RSCSR_FRAME_ALIGN);
5346
5347		iwm_notification_wait_notify(sc->sc_notif_wait, code, pkt);
5348
5349		switch (code) {
5350		case IWM_REPLY_RX_PHY_CMD:
5351			iwm_mvm_rx_rx_phy_cmd(sc, pkt);
5352			break;
5353
5354		case IWM_REPLY_RX_MPDU_CMD: {
5355			/*
5356			 * If this is the last frame in the RX buffer, we
5357			 * can directly feed the mbuf to the sharks here.
5358			 */
5359			struct iwm_rx_packet *nextpkt = mtodoff(m,
5360			    struct iwm_rx_packet *, nextoff);
5361			if (!HAVEROOM(nextoff) ||
5362			    (nextpkt->hdr.code == 0 &&
5363			     (nextpkt->hdr.qid & ~0x80) == 0 &&
5364			     nextpkt->hdr.idx == 0) ||
5365			    (nextpkt->len_n_flags ==
5366			     htole32(IWM_FH_RSCSR_FRAME_INVALID))) {
5367				if (iwm_mvm_rx_rx_mpdu(sc, m, offset, stolen)) {
5368					stolen = FALSE;
5369					/* Make sure we abort the loop */
5370					nextoff = maxoff;
5371				}
5372				break;
5373			}
5374
5375			/*
5376			 * Use m_copym instead of m_split, because that
5377			 * makes it easier to keep a valid rx buffer in
5378			 * the ring, when iwm_mvm_rx_rx_mpdu() fails.
5379			 *
5380			 * We need to start m_copym() at offset 0, to get the
5381			 * M_PKTHDR flag preserved.
5382			 */
5383			m1 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
5384			if (m1) {
5385				if (iwm_mvm_rx_rx_mpdu(sc, m1, offset, stolen))
5386					stolen = TRUE;
5387				else
5388					m_freem(m1);
5389			}
5390			break;
5391		}
5392
5393		case IWM_TX_CMD:
5394			iwm_mvm_rx_tx_cmd(sc, pkt);
5395			break;
5396
5397		case IWM_MISSED_BEACONS_NOTIFICATION: {
5398			struct iwm_missed_beacons_notif *resp;
5399			int missed;
5400
5401			/* XXX look at mac_id to determine interface ID */
5402			struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5403
5404			resp = (void *)pkt->data;
5405			missed = le32toh(resp->consec_missed_beacons);
5406
5407			IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5408			    "%s: MISSED_BEACON: mac_id=%d, "
5409			    "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5410			    "num_rx=%d\n",
5411			    __func__,
5412			    le32toh(resp->mac_id),
5413			    le32toh(resp->consec_missed_beacons_since_last_rx),
5414			    le32toh(resp->consec_missed_beacons),
5415			    le32toh(resp->num_expected_beacons),
5416			    le32toh(resp->num_recvd_beacons));
5417
5418			/* Be paranoid */
5419			if (vap == NULL)
5420				break;
5421
5422			/* XXX no net80211 locking? */
5423			if (vap->iv_state == IEEE80211_S_RUN &&
5424			    (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5425				if (missed > vap->iv_bmissthreshold) {
5426					/* XXX bad locking; turn into task */
5427					IWM_UNLOCK(sc);
5428					ieee80211_beacon_miss(ic);
5429					IWM_LOCK(sc);
5430				}
5431			}
5432
5433			break;
5434		}
5435
5436		case IWM_MFUART_LOAD_NOTIFICATION:
5437			break;
5438
5439		case IWM_MVM_ALIVE:
5440			break;
5441
5442		case IWM_CALIB_RES_NOTIF_PHY_DB:
5443			break;
5444
5445		case IWM_STATISTICS_NOTIFICATION:
5446			iwm_mvm_handle_rx_statistics(sc, pkt);
5447			break;
5448
5449		case IWM_NVM_ACCESS_CMD:
5450		case IWM_MCC_UPDATE_CMD:
5451			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5452				memcpy(sc->sc_cmd_resp,
5453				    pkt, sizeof(sc->sc_cmd_resp));
5454			}
5455			break;
5456
5457		case IWM_MCC_CHUB_UPDATE_CMD: {
5458			struct iwm_mcc_chub_notif *notif;
5459			notif = (void *)pkt->data;
5460
5461			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5462			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5463			sc->sc_fw_mcc[2] = '\0';
5464			IWM_DPRINTF(sc, IWM_DEBUG_LAR,
5465			    "fw source %d sent CC '%s'\n",
5466			    notif->source_id, sc->sc_fw_mcc);
5467			break;
5468		}
5469
5470		case IWM_DTS_MEASUREMENT_NOTIFICATION:
5471		case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
5472				 IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
5473			struct iwm_dts_measurement_notif_v1 *notif;
5474
5475			if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5476				device_printf(sc->sc_dev,
5477				    "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5478				break;
5479			}
5480			notif = (void *)pkt->data;
5481			IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5482			    "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5483			    notif->temp);
5484			break;
5485		}
5486
5487		case IWM_PHY_CONFIGURATION_CMD:
5488		case IWM_TX_ANT_CONFIGURATION_CMD:
5489		case IWM_ADD_STA:
5490		case IWM_MAC_CONTEXT_CMD:
5491		case IWM_REPLY_SF_CFG_CMD:
5492		case IWM_POWER_TABLE_CMD:
5493		case IWM_PHY_CONTEXT_CMD:
5494		case IWM_BINDING_CONTEXT_CMD:
5495		case IWM_TIME_EVENT_CMD:
5496		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
5497		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
5498		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
5499		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
5500		case IWM_SCAN_OFFLOAD_ABORT_CMD:
5501		case IWM_REPLY_BEACON_FILTERING_CMD:
5502		case IWM_MAC_PM_POWER_TABLE:
5503		case IWM_TIME_QUOTA_CMD:
5504		case IWM_REMOVE_STA:
5505		case IWM_TXPATH_FLUSH:
5506		case IWM_LQ_CMD:
5507		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP,
5508				 IWM_FW_PAGING_BLOCK_CMD):
5509		case IWM_BT_CONFIG:
5510		case IWM_REPLY_THERMAL_MNG_BACKOFF:
5511			cresp = (void *)pkt->data;
5512			if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5513				memcpy(sc->sc_cmd_resp,
5514				    pkt, sizeof(*pkt)+sizeof(*cresp));
5515			}
5516			break;
5517
5518		/* ignore */
5519		case IWM_PHY_DB_CMD:
5520			break;
5521
5522		case IWM_INIT_COMPLETE_NOTIF:
5523			break;
5524
5525		case IWM_SCAN_OFFLOAD_COMPLETE: {
5526			struct iwm_periodic_scan_complete *notif;
5527			notif = (void *)pkt->data;
5528			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5529				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5530				ieee80211_runtask(ic, &sc->sc_es_task);
5531			}
5532			break;
5533		}
5534
5535		case IWM_SCAN_ITERATION_COMPLETE: {
5536			struct iwm_lmac_scan_complete_notif *notif;
5537			notif = (void *)pkt->data;
5538			ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5539 			break;
5540		}
5541
5542		case IWM_SCAN_COMPLETE_UMAC: {
5543			struct iwm_umac_scan_complete *notif;
5544			notif = (void *)pkt->data;
5545
5546			IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
5547			    "UMAC scan complete, status=0x%x\n",
5548			    notif->status);
5549			if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5550				sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5551				ieee80211_runtask(ic, &sc->sc_es_task);
5552			}
5553			break;
5554		}
5555
5556		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
5557			struct iwm_umac_scan_iter_complete_notif *notif;
5558			notif = (void *)pkt->data;
5559
5560			IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5561			    "complete, status=0x%x, %d channels scanned\n",
5562			    notif->status, notif->scanned_channels);
5563			ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
5564			break;
5565		}
5566
5567		case IWM_REPLY_ERROR: {
5568			struct iwm_error_resp *resp;
5569			resp = (void *)pkt->data;
5570
5571			device_printf(sc->sc_dev,
5572			    "firmware error 0x%x, cmd 0x%x\n",
5573			    le32toh(resp->error_type),
5574			    resp->cmd_id);
5575			break;
5576		}
5577
5578		case IWM_TIME_EVENT_NOTIFICATION: {
5579			struct iwm_time_event_notif *notif;
5580			notif = (void *)pkt->data;
5581
5582			IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5583			    "TE notif status = 0x%x action = 0x%x\n",
5584			    notif->status, notif->action);
5585			break;
5586		}
5587
5588		/*
5589		 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
5590		 * messages. Just ignore them for now.
5591		 */
5592		case IWM_DEBUG_LOG_MSG:
5593			break;
5594
5595		case IWM_MCAST_FILTER_CMD:
5596			break;
5597
5598		case IWM_SCD_QUEUE_CFG: {
5599			struct iwm_scd_txq_cfg_rsp *rsp;
5600			rsp = (void *)pkt->data;
5601
5602			IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5603			    "queue cfg token=0x%x sta_id=%d "
5604			    "tid=%d scd_queue=%d\n",
5605			    rsp->token, rsp->sta_id, rsp->tid,
5606			    rsp->scd_queue);
5607			break;
5608		}
5609
5610		default:
5611			device_printf(sc->sc_dev,
5612			    "frame %d/%d %x UNHANDLED (this should "
5613			    "not happen)\n", qid & ~0x80, idx,
5614			    pkt->len_n_flags);
5615			break;
5616		}
5617
5618		/*
5619		 * Why test bit 0x80?  The Linux driver:
5620		 *
5621		 * There is one exception:  uCode sets bit 15 when it
5622		 * originates the response/notification, i.e. when the
5623		 * response/notification is not a direct response to a
5624		 * command sent by the driver.  For example, uCode issues
5625		 * IWM_REPLY_RX when it sends a received frame to the driver;
5626		 * it is not a direct response to any driver command.
5627		 *
5628		 * Ok, so since when is 7 == 15?  Well, the Linux driver
5629		 * uses a slightly different format for pkt->hdr, and "qid"
5630		 * is actually the upper byte of a two-byte field.
5631		 */
5632		if (!(qid & (1 << 7)))
5633			iwm_cmd_done(sc, pkt);
5634
5635		offset = nextoff;
5636	}
5637	if (stolen)
5638		m_freem(m);
5639#undef HAVEROOM
5640}
5641
5642/*
5643 * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5644 * Basic structure from if_iwn
5645 */
5646static void
5647iwm_notif_intr(struct iwm_softc *sc)
5648{
5649	uint16_t hw;
5650
5651	bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5652	    BUS_DMASYNC_POSTREAD);
5653
5654	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5655
5656	/*
5657	 * Process responses
5658	 */
5659	while (sc->rxq.cur != hw) {
5660		struct iwm_rx_ring *ring = &sc->rxq;
5661		struct iwm_rx_data *data = &ring->data[ring->cur];
5662
5663		bus_dmamap_sync(ring->data_dmat, data->map,
5664		    BUS_DMASYNC_POSTREAD);
5665
5666		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5667		    "%s: hw = %d cur = %d\n", __func__, hw, ring->cur);
5668		iwm_handle_rxb(sc, data->m);
5669
5670		ring->cur = (ring->cur + 1) % IWM_RX_RING_COUNT;
5671	}
5672
5673	/*
5674	 * Tell the firmware that it can reuse the ring entries that
5675	 * we have just processed.
5676	 * Seems like the hardware gets upset unless we align
5677	 * the write by 8??
5678	 */
5679	hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
5680	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, rounddown2(hw, 8));
5681}
5682
5683static void
5684iwm_intr(void *arg)
5685{
5686	struct iwm_softc *sc = arg;
5687	int handled = 0;
5688	int r1, r2, rv = 0;
5689	int isperiodic = 0;
5690
5691	IWM_LOCK(sc);
5692	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
5693
5694	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5695		uint32_t *ict = sc->ict_dma.vaddr;
5696		int tmp;
5697
5698		tmp = htole32(ict[sc->ict_cur]);
5699		if (!tmp)
5700			goto out_ena;
5701
5702		/*
5703		 * ok, there was something.  keep plowing until we have all.
5704		 */
5705		r1 = r2 = 0;
5706		while (tmp) {
5707			r1 |= tmp;
5708			ict[sc->ict_cur] = 0;
5709			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5710			tmp = htole32(ict[sc->ict_cur]);
5711		}
5712
5713		/* this is where the fun begins.  don't ask */
5714		if (r1 == 0xffffffff)
5715			r1 = 0;
5716
5717		/* i am not expected to understand this */
5718		if (r1 & 0xc0000)
5719			r1 |= 0x8000;
5720		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5721	} else {
5722		r1 = IWM_READ(sc, IWM_CSR_INT);
5723		/* "hardware gone" (where, fishing?) */
5724		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5725			goto out;
5726		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
5727	}
5728	if (r1 == 0 && r2 == 0) {
5729		goto out_ena;
5730	}
5731
5732	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5733
5734	/* Safely ignore these bits for debug checks below */
5735	r1 &= ~(IWM_CSR_INT_BIT_ALIVE | IWM_CSR_INT_BIT_SCD);
5736
5737	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5738		int i;
5739		struct ieee80211com *ic = &sc->sc_ic;
5740		struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5741
5742#ifdef IWM_DEBUG
5743		iwm_nic_error(sc);
5744#endif
5745		/* Dump driver status (TX and RX rings) while we're here. */
5746		device_printf(sc->sc_dev, "driver status:\n");
5747		for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
5748			struct iwm_tx_ring *ring = &sc->txq[i];
5749			device_printf(sc->sc_dev,
5750			    "  tx ring %2d: qid=%-2d cur=%-3d "
5751			    "queued=%-3d\n",
5752			    i, ring->qid, ring->cur, ring->queued);
5753		}
5754		device_printf(sc->sc_dev,
5755		    "  rx ring: cur=%d\n", sc->rxq.cur);
5756		device_printf(sc->sc_dev,
5757		    "  802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5758
5759		/* Don't stop the device; just do a VAP restart */
5760		IWM_UNLOCK(sc);
5761
5762		if (vap == NULL) {
5763			printf("%s: null vap\n", __func__);
5764			return;
5765		}
5766
5767		device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5768		    "restarting\n", __func__, vap->iv_state);
5769
5770		ieee80211_restart_all(ic);
5771		return;
5772	}
5773
5774	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5775		handled |= IWM_CSR_INT_BIT_HW_ERR;
5776		device_printf(sc->sc_dev, "hardware error, stopping device\n");
5777		iwm_stop(sc);
5778		rv = 1;
5779		goto out;
5780	}
5781
5782	/* firmware chunk loaded */
5783	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5784		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
5785		handled |= IWM_CSR_INT_BIT_FH_TX;
5786		sc->sc_fw_chunk_done = 1;
5787		wakeup(&sc->sc_fw);
5788	}
5789
5790	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5791		handled |= IWM_CSR_INT_BIT_RF_KILL;
5792		if (iwm_check_rfkill(sc)) {
5793			device_printf(sc->sc_dev,
5794			    "%s: rfkill switch, disabling interface\n",
5795			    __func__);
5796			iwm_stop(sc);
5797		}
5798	}
5799
5800	/*
5801	 * The Linux driver uses periodic interrupts to avoid races.
5802	 * We cargo-cult like it's going out of fashion.
5803	 */
5804	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5805		handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5806		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
5807		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5808			IWM_WRITE_1(sc,
5809			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
5810		isperiodic = 1;
5811	}
5812
5813	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5814		handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
5815		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
5816
5817		iwm_notif_intr(sc);
5818
5819		/* enable periodic interrupt, see above */
5820		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5821			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
5822			    IWM_CSR_INT_PERIODIC_ENA);
5823	}
5824
5825	if (__predict_false(r1 & ~handled))
5826		IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5827		    "%s: unhandled interrupts: %x\n", __func__, r1);
5828	rv = 1;
5829
5830 out_ena:
5831	iwm_restore_interrupts(sc);
5832 out:
5833	IWM_UNLOCK(sc);
5834	return;
5835}
5836
5837/*
5838 * Autoconf glue-sniffing
5839 */
5840#define	PCI_VENDOR_INTEL		0x8086
5841#define	PCI_PRODUCT_INTEL_WL_3160_1	0x08b3
5842#define	PCI_PRODUCT_INTEL_WL_3160_2	0x08b4
5843#define	PCI_PRODUCT_INTEL_WL_3165_1	0x3165
5844#define	PCI_PRODUCT_INTEL_WL_3165_2	0x3166
5845#define	PCI_PRODUCT_INTEL_WL_7260_1	0x08b1
5846#define	PCI_PRODUCT_INTEL_WL_7260_2	0x08b2
5847#define	PCI_PRODUCT_INTEL_WL_7265_1	0x095a
5848#define	PCI_PRODUCT_INTEL_WL_7265_2	0x095b
5849#define	PCI_PRODUCT_INTEL_WL_8260_1	0x24f3
5850#define	PCI_PRODUCT_INTEL_WL_8260_2	0x24f4
5851
5852static const struct iwm_devices {
5853	uint16_t		device;
5854	const struct iwm_cfg	*cfg;
5855} iwm_devices[] = {
5856	{ PCI_PRODUCT_INTEL_WL_3160_1, &iwm3160_cfg },
5857	{ PCI_PRODUCT_INTEL_WL_3160_2, &iwm3160_cfg },
5858	{ PCI_PRODUCT_INTEL_WL_3165_1, &iwm3165_cfg },
5859	{ PCI_PRODUCT_INTEL_WL_3165_2, &iwm3165_cfg },
5860	{ PCI_PRODUCT_INTEL_WL_7260_1, &iwm7260_cfg },
5861	{ PCI_PRODUCT_INTEL_WL_7260_2, &iwm7260_cfg },
5862	{ PCI_PRODUCT_INTEL_WL_7265_1, &iwm7265_cfg },
5863	{ PCI_PRODUCT_INTEL_WL_7265_2, &iwm7265_cfg },
5864	{ PCI_PRODUCT_INTEL_WL_8260_1, &iwm8260_cfg },
5865	{ PCI_PRODUCT_INTEL_WL_8260_2, &iwm8260_cfg },
5866};
5867
5868static int
5869iwm_probe(device_t dev)
5870{
5871	int i;
5872
5873	for (i = 0; i < nitems(iwm_devices); i++) {
5874		if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5875		    pci_get_device(dev) == iwm_devices[i].device) {
5876			device_set_desc(dev, iwm_devices[i].cfg->name);
5877			return (BUS_PROBE_DEFAULT);
5878		}
5879	}
5880
5881	return (ENXIO);
5882}
5883
5884static int
5885iwm_dev_check(device_t dev)
5886{
5887	struct iwm_softc *sc;
5888	uint16_t devid;
5889	int i;
5890
5891	sc = device_get_softc(dev);
5892
5893	devid = pci_get_device(dev);
5894	for (i = 0; i < nitems(iwm_devices); i++) {
5895		if (iwm_devices[i].device == devid) {
5896			sc->cfg = iwm_devices[i].cfg;
5897			return (0);
5898		}
5899	}
5900	device_printf(dev, "unknown adapter type\n");
5901	return ENXIO;
5902}
5903
5904/* PCI registers */
5905#define PCI_CFG_RETRY_TIMEOUT	0x041
5906
5907static int
5908iwm_pci_attach(device_t dev)
5909{
5910	struct iwm_softc *sc;
5911	int count, error, rid;
5912	uint16_t reg;
5913
5914	sc = device_get_softc(dev);
5915
5916	/* We disable the RETRY_TIMEOUT register (0x41) to keep
5917	 * PCI Tx retries from interfering with C3 CPU state */
5918	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5919
5920	/* Enable bus-mastering and hardware bug workaround. */
5921	pci_enable_busmaster(dev);
5922	reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5923	/* if !MSI */
5924	if (reg & PCIM_STATUS_INTxSTATE) {
5925		reg &= ~PCIM_STATUS_INTxSTATE;
5926	}
5927	pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5928
5929	rid = PCIR_BAR(0);
5930	sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5931	    RF_ACTIVE);
5932	if (sc->sc_mem == NULL) {
5933		device_printf(sc->sc_dev, "can't map mem space\n");
5934		return (ENXIO);
5935	}
5936	sc->sc_st = rman_get_bustag(sc->sc_mem);
5937	sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5938
5939	/* Install interrupt handler. */
5940	count = 1;
5941	rid = 0;
5942	if (pci_alloc_msi(dev, &count) == 0)
5943		rid = 1;
5944	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5945	    (rid != 0 ? 0 : RF_SHAREABLE));
5946	if (sc->sc_irq == NULL) {
5947		device_printf(dev, "can't map interrupt\n");
5948			return (ENXIO);
5949	}
5950	error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5951	    NULL, iwm_intr, sc, &sc->sc_ih);
5952	if (sc->sc_ih == NULL) {
5953		device_printf(dev, "can't establish interrupt");
5954			return (ENXIO);
5955	}
5956	sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5957
5958	return (0);
5959}
5960
5961static void
5962iwm_pci_detach(device_t dev)
5963{
5964	struct iwm_softc *sc = device_get_softc(dev);
5965
5966	if (sc->sc_irq != NULL) {
5967		bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5968		bus_release_resource(dev, SYS_RES_IRQ,
5969		    rman_get_rid(sc->sc_irq), sc->sc_irq);
5970		pci_release_msi(dev);
5971        }
5972	if (sc->sc_mem != NULL)
5973		bus_release_resource(dev, SYS_RES_MEMORY,
5974		    rman_get_rid(sc->sc_mem), sc->sc_mem);
5975}
5976
5977
5978
5979static int
5980iwm_attach(device_t dev)
5981{
5982	struct iwm_softc *sc = device_get_softc(dev);
5983	struct ieee80211com *ic = &sc->sc_ic;
5984	int error;
5985	int txq_i, i;
5986
5987	sc->sc_dev = dev;
5988	sc->sc_attached = 1;
5989	IWM_LOCK_INIT(sc);
5990	mbufq_init(&sc->sc_snd, ifqmaxlen);
5991	callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5992	callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
5993	TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5994
5995	sc->sc_notif_wait = iwm_notification_wait_init(sc);
5996	if (sc->sc_notif_wait == NULL) {
5997		device_printf(dev, "failed to init notification wait struct\n");
5998		goto fail;
5999	}
6000
6001	/* Init phy db */
6002	sc->sc_phy_db = iwm_phy_db_init(sc);
6003	if (!sc->sc_phy_db) {
6004		device_printf(dev, "Cannot init phy_db\n");
6005		goto fail;
6006	}
6007
6008	/* PCI attach */
6009	error = iwm_pci_attach(dev);
6010	if (error != 0)
6011		goto fail;
6012
6013	sc->sc_wantresp = -1;
6014
6015	/* Check device type */
6016	error = iwm_dev_check(dev);
6017	if (error != 0)
6018		goto fail;
6019
6020	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
6021	/*
6022	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
6023	 * changed, and now the revision step also includes bit 0-1 (no more
6024	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
6025	 * in the old format.
6026	 */
6027	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000)
6028		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
6029				(IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
6030
6031	if (iwm_prepare_card_hw(sc) != 0) {
6032		device_printf(dev, "could not initialize hardware\n");
6033		goto fail;
6034	}
6035
6036	if (sc->cfg->device_family == IWM_DEVICE_FAMILY_8000) {
6037		int ret;
6038		uint32_t hw_step;
6039
6040		/*
6041		 * In order to recognize C step the driver should read the
6042		 * chip version id located at the AUX bus MISC address.
6043		 */
6044		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
6045			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
6046		DELAY(2);
6047
6048		ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
6049				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6050				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
6051				   25000);
6052		if (!ret) {
6053			device_printf(sc->sc_dev,
6054			    "Failed to wake up the nic\n");
6055			goto fail;
6056		}
6057
6058		if (iwm_nic_lock(sc)) {
6059			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
6060			hw_step |= IWM_ENABLE_WFPM;
6061			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
6062			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
6063			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
6064			if (hw_step == 0x3)
6065				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
6066						(IWM_SILICON_C_STEP << 2);
6067			iwm_nic_unlock(sc);
6068		} else {
6069			device_printf(sc->sc_dev, "Failed to lock the nic\n");
6070			goto fail;
6071		}
6072	}
6073
6074	/* special-case 7265D, it has the same PCI IDs. */
6075	if (sc->cfg == &iwm7265_cfg &&
6076	    (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) == IWM_CSR_HW_REV_TYPE_7265D) {
6077		sc->cfg = &iwm7265d_cfg;
6078	}
6079
6080	/* Allocate DMA memory for firmware transfers. */
6081	if ((error = iwm_alloc_fwmem(sc)) != 0) {
6082		device_printf(dev, "could not allocate memory for firmware\n");
6083		goto fail;
6084	}
6085
6086	/* Allocate "Keep Warm" page. */
6087	if ((error = iwm_alloc_kw(sc)) != 0) {
6088		device_printf(dev, "could not allocate keep warm page\n");
6089		goto fail;
6090	}
6091
6092	/* We use ICT interrupts */
6093	if ((error = iwm_alloc_ict(sc)) != 0) {
6094		device_printf(dev, "could not allocate ICT table\n");
6095		goto fail;
6096	}
6097
6098	/* Allocate TX scheduler "rings". */
6099	if ((error = iwm_alloc_sched(sc)) != 0) {
6100		device_printf(dev, "could not allocate TX scheduler rings\n");
6101		goto fail;
6102	}
6103
6104	/* Allocate TX rings */
6105	for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
6106		if ((error = iwm_alloc_tx_ring(sc,
6107		    &sc->txq[txq_i], txq_i)) != 0) {
6108			device_printf(dev,
6109			    "could not allocate TX ring %d\n",
6110			    txq_i);
6111			goto fail;
6112		}
6113	}
6114
6115	/* Allocate RX ring. */
6116	if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6117		device_printf(dev, "could not allocate RX ring\n");
6118		goto fail;
6119	}
6120
6121	/* Clear pending interrupts. */
6122	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6123
6124	ic->ic_softc = sc;
6125	ic->ic_name = device_get_nameunit(sc->sc_dev);
6126	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
6127	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
6128
6129	/* Set device capabilities. */
6130	ic->ic_caps =
6131	    IEEE80211_C_STA |
6132	    IEEE80211_C_WPA |		/* WPA/RSN */
6133	    IEEE80211_C_WME |
6134	    IEEE80211_C_PMGT |
6135	    IEEE80211_C_SHSLOT |	/* short slot time supported */
6136	    IEEE80211_C_SHPREAMBLE	/* short preamble supported */
6137//	    IEEE80211_C_BGSCAN		/* capable of bg scanning */
6138	    ;
6139	for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6140		sc->sc_phyctxt[i].id = i;
6141		sc->sc_phyctxt[i].color = 0;
6142		sc->sc_phyctxt[i].ref = 0;
6143		sc->sc_phyctxt[i].channel = NULL;
6144	}
6145
6146	/* Default noise floor */
6147	sc->sc_noise = -96;
6148
6149	/* Max RSSI */
6150	sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6151
6152	sc->sc_preinit_hook.ich_func = iwm_preinit;
6153	sc->sc_preinit_hook.ich_arg = sc;
6154	if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6155		device_printf(dev, "config_intrhook_establish failed\n");
6156		goto fail;
6157	}
6158
6159#ifdef IWM_DEBUG
6160	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6161	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6162	    CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6163#endif
6164
6165	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6166	    "<-%s\n", __func__);
6167
6168	return 0;
6169
6170	/* Free allocated memory if something failed during attachment. */
6171fail:
6172	iwm_detach_local(sc, 0);
6173
6174	return ENXIO;
6175}
6176
6177static int
6178iwm_is_valid_ether_addr(uint8_t *addr)
6179{
6180	char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6181
6182	if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6183		return (FALSE);
6184
6185	return (TRUE);
6186}
6187
6188static int
6189iwm_wme_update(struct ieee80211com *ic)
6190{
6191#define IWM_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
6192	struct iwm_softc *sc = ic->ic_softc;
6193	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6194	struct iwm_vap *ivp = IWM_VAP(vap);
6195	struct iwm_node *in;
6196	struct wmeParams tmp[WME_NUM_AC];
6197	int aci, error;
6198
6199	if (vap == NULL)
6200		return (0);
6201
6202	IEEE80211_LOCK(ic);
6203	for (aci = 0; aci < WME_NUM_AC; aci++)
6204		tmp[aci] = ic->ic_wme.wme_chanParams.cap_wmeParams[aci];
6205	IEEE80211_UNLOCK(ic);
6206
6207	IWM_LOCK(sc);
6208	for (aci = 0; aci < WME_NUM_AC; aci++) {
6209		const struct wmeParams *ac = &tmp[aci];
6210		ivp->queue_params[aci].aifsn = ac->wmep_aifsn;
6211		ivp->queue_params[aci].cw_min = IWM_EXP2(ac->wmep_logcwmin);
6212		ivp->queue_params[aci].cw_max = IWM_EXP2(ac->wmep_logcwmax);
6213		ivp->queue_params[aci].edca_txop =
6214		    IEEE80211_TXOP_TO_US(ac->wmep_txopLimit);
6215	}
6216	ivp->have_wme = TRUE;
6217	if (ivp->is_uploaded && vap->iv_bss != NULL) {
6218		in = IWM_NODE(vap->iv_bss);
6219		if (in->in_assoc) {
6220			if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
6221				device_printf(sc->sc_dev,
6222				    "%s: failed to update MAC\n", __func__);
6223			}
6224		}
6225	}
6226	IWM_UNLOCK(sc);
6227
6228	return (0);
6229#undef IWM_EXP2
6230}
6231
6232static void
6233iwm_preinit(void *arg)
6234{
6235	struct iwm_softc *sc = arg;
6236	device_t dev = sc->sc_dev;
6237	struct ieee80211com *ic = &sc->sc_ic;
6238	int error;
6239
6240	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6241	    "->%s\n", __func__);
6242
6243	IWM_LOCK(sc);
6244	if ((error = iwm_start_hw(sc)) != 0) {
6245		device_printf(dev, "could not initialize hardware\n");
6246		IWM_UNLOCK(sc);
6247		goto fail;
6248	}
6249
6250	error = iwm_run_init_mvm_ucode(sc, 1);
6251	iwm_stop_device(sc);
6252	if (error) {
6253		IWM_UNLOCK(sc);
6254		goto fail;
6255	}
6256	device_printf(dev,
6257	    "hw rev 0x%x, fw ver %s, address %s\n",
6258	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6259	    sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6260
6261	/* not all hardware can do 5GHz band */
6262	if (!sc->nvm_data->sku_cap_band_52GHz_enable)
6263		memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6264		    sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6265	IWM_UNLOCK(sc);
6266
6267	iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6268	    ic->ic_channels);
6269
6270	/*
6271	 * At this point we've committed - if we fail to do setup,
6272	 * we now also have to tear down the net80211 state.
6273	 */
6274	ieee80211_ifattach(ic);
6275	ic->ic_vap_create = iwm_vap_create;
6276	ic->ic_vap_delete = iwm_vap_delete;
6277	ic->ic_raw_xmit = iwm_raw_xmit;
6278	ic->ic_node_alloc = iwm_node_alloc;
6279	ic->ic_scan_start = iwm_scan_start;
6280	ic->ic_scan_end = iwm_scan_end;
6281	ic->ic_update_mcast = iwm_update_mcast;
6282	ic->ic_getradiocaps = iwm_init_channel_map;
6283	ic->ic_set_channel = iwm_set_channel;
6284	ic->ic_scan_curchan = iwm_scan_curchan;
6285	ic->ic_scan_mindwell = iwm_scan_mindwell;
6286	ic->ic_wme.wme_update = iwm_wme_update;
6287	ic->ic_parent = iwm_parent;
6288	ic->ic_transmit = iwm_transmit;
6289	iwm_radiotap_attach(sc);
6290	if (bootverbose)
6291		ieee80211_announce(ic);
6292
6293	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6294	    "<-%s\n", __func__);
6295	config_intrhook_disestablish(&sc->sc_preinit_hook);
6296
6297	return;
6298fail:
6299	config_intrhook_disestablish(&sc->sc_preinit_hook);
6300	iwm_detach_local(sc, 0);
6301}
6302
6303/*
6304 * Attach the interface to 802.11 radiotap.
6305 */
6306static void
6307iwm_radiotap_attach(struct iwm_softc *sc)
6308{
6309        struct ieee80211com *ic = &sc->sc_ic;
6310
6311	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6312	    "->%s begin\n", __func__);
6313        ieee80211_radiotap_attach(ic,
6314            &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6315                IWM_TX_RADIOTAP_PRESENT,
6316            &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6317                IWM_RX_RADIOTAP_PRESENT);
6318	IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6319	    "->%s end\n", __func__);
6320}
6321
6322static struct ieee80211vap *
6323iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6324    enum ieee80211_opmode opmode, int flags,
6325    const uint8_t bssid[IEEE80211_ADDR_LEN],
6326    const uint8_t mac[IEEE80211_ADDR_LEN])
6327{
6328	struct iwm_vap *ivp;
6329	struct ieee80211vap *vap;
6330
6331	if (!TAILQ_EMPTY(&ic->ic_vaps))         /* only one at a time */
6332		return NULL;
6333	ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6334	vap = &ivp->iv_vap;
6335	ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6336	vap->iv_bmissthreshold = 10;            /* override default */
6337	/* Override with driver methods. */
6338	ivp->iv_newstate = vap->iv_newstate;
6339	vap->iv_newstate = iwm_newstate;
6340
6341	ivp->id = IWM_DEFAULT_MACID;
6342	ivp->color = IWM_DEFAULT_COLOR;
6343
6344	ivp->have_wme = FALSE;
6345
6346	ieee80211_ratectl_init(vap);
6347	/* Complete setup. */
6348	ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
6349	    mac);
6350	ic->ic_opmode = opmode;
6351
6352	return vap;
6353}
6354
6355static void
6356iwm_vap_delete(struct ieee80211vap *vap)
6357{
6358	struct iwm_vap *ivp = IWM_VAP(vap);
6359
6360	ieee80211_ratectl_deinit(vap);
6361	ieee80211_vap_detach(vap);
6362	free(ivp, M_80211_VAP);
6363}
6364
6365static void
6366iwm_xmit_queue_drain(struct iwm_softc *sc)
6367{
6368	struct mbuf *m;
6369	struct ieee80211_node *ni;
6370
6371	while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
6372		ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
6373		ieee80211_free_node(ni);
6374		m_freem(m);
6375	}
6376}
6377
6378static void
6379iwm_scan_start(struct ieee80211com *ic)
6380{
6381	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6382	struct iwm_softc *sc = ic->ic_softc;
6383	int error;
6384
6385	IWM_LOCK(sc);
6386	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6387		/* This should not be possible */
6388		device_printf(sc->sc_dev,
6389		    "%s: Previous scan not completed yet\n", __func__);
6390	}
6391	if (fw_has_capa(&sc->ucode_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6392		error = iwm_mvm_umac_scan(sc);
6393	else
6394		error = iwm_mvm_lmac_scan(sc);
6395	if (error != 0) {
6396		device_printf(sc->sc_dev, "could not initiate scan\n");
6397		IWM_UNLOCK(sc);
6398		ieee80211_cancel_scan(vap);
6399	} else {
6400		sc->sc_flags |= IWM_FLAG_SCAN_RUNNING;
6401		iwm_led_blink_start(sc);
6402		IWM_UNLOCK(sc);
6403	}
6404}
6405
6406static void
6407iwm_scan_end(struct ieee80211com *ic)
6408{
6409	struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6410	struct iwm_softc *sc = ic->ic_softc;
6411
6412	IWM_LOCK(sc);
6413	iwm_led_blink_stop(sc);
6414	if (vap->iv_state == IEEE80211_S_RUN)
6415		iwm_mvm_led_enable(sc);
6416	if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6417		/*
6418		 * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6419		 * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6420		 * taskqueue.
6421		 */
6422		sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6423		iwm_mvm_scan_stop_wait(sc);
6424	}
6425	IWM_UNLOCK(sc);
6426
6427	/*
6428	 * Make sure we don't race, if sc_es_task is still enqueued here.
6429	 * This is to make sure that it won't call ieee80211_scan_done
6430	 * when we have already started the next scan.
6431	 */
6432	taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6433}
6434
6435static void
6436iwm_update_mcast(struct ieee80211com *ic)
6437{
6438}
6439
6440static void
6441iwm_set_channel(struct ieee80211com *ic)
6442{
6443}
6444
6445static void
6446iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6447{
6448}
6449
6450static void
6451iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6452{
6453	return;
6454}
6455
6456void
6457iwm_init_task(void *arg1)
6458{
6459	struct iwm_softc *sc = arg1;
6460
6461	IWM_LOCK(sc);
6462	while (sc->sc_flags & IWM_FLAG_BUSY)
6463		msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6464	sc->sc_flags |= IWM_FLAG_BUSY;
6465	iwm_stop(sc);
6466	if (sc->sc_ic.ic_nrunning > 0)
6467		iwm_init(sc);
6468	sc->sc_flags &= ~IWM_FLAG_BUSY;
6469	wakeup(&sc->sc_flags);
6470	IWM_UNLOCK(sc);
6471}
6472
6473static int
6474iwm_resume(device_t dev)
6475{
6476	struct iwm_softc *sc = device_get_softc(dev);
6477	int do_reinit = 0;
6478
6479	/*
6480	 * We disable the RETRY_TIMEOUT register (0x41) to keep
6481	 * PCI Tx retries from interfering with C3 CPU state.
6482	 */
6483	pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6484	iwm_init_task(device_get_softc(dev));
6485
6486	IWM_LOCK(sc);
6487	if (sc->sc_flags & IWM_FLAG_SCANNING) {
6488		sc->sc_flags &= ~IWM_FLAG_SCANNING;
6489		do_reinit = 1;
6490	}
6491	IWM_UNLOCK(sc);
6492
6493	if (do_reinit)
6494		ieee80211_resume_all(&sc->sc_ic);
6495
6496	return 0;
6497}
6498
6499static int
6500iwm_suspend(device_t dev)
6501{
6502	int do_stop = 0;
6503	struct iwm_softc *sc = device_get_softc(dev);
6504
6505	do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6506
6507	ieee80211_suspend_all(&sc->sc_ic);
6508
6509	if (do_stop) {
6510		IWM_LOCK(sc);
6511		iwm_stop(sc);
6512		sc->sc_flags |= IWM_FLAG_SCANNING;
6513		IWM_UNLOCK(sc);
6514	}
6515
6516	return (0);
6517}
6518
6519static int
6520iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6521{
6522	struct iwm_fw_info *fw = &sc->sc_fw;
6523	device_t dev = sc->sc_dev;
6524	int i;
6525
6526	if (!sc->sc_attached)
6527		return 0;
6528	sc->sc_attached = 0;
6529
6530	if (do_net80211)
6531		ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6532
6533	callout_drain(&sc->sc_led_blink_to);
6534	callout_drain(&sc->sc_watchdog_to);
6535	iwm_stop_device(sc);
6536	if (do_net80211) {
6537		IWM_LOCK(sc);
6538		iwm_xmit_queue_drain(sc);
6539		IWM_UNLOCK(sc);
6540		ieee80211_ifdetach(&sc->sc_ic);
6541	}
6542
6543	iwm_phy_db_free(sc->sc_phy_db);
6544	sc->sc_phy_db = NULL;
6545
6546	iwm_free_nvm_data(sc->nvm_data);
6547
6548	/* Free descriptor rings */
6549	iwm_free_rx_ring(sc, &sc->rxq);
6550	for (i = 0; i < nitems(sc->txq); i++)
6551		iwm_free_tx_ring(sc, &sc->txq[i]);
6552
6553	/* Free firmware */
6554	if (fw->fw_fp != NULL)
6555		iwm_fw_info_free(fw);
6556
6557	/* Free scheduler */
6558	iwm_dma_contig_free(&sc->sched_dma);
6559	iwm_dma_contig_free(&sc->ict_dma);
6560	iwm_dma_contig_free(&sc->kw_dma);
6561	iwm_dma_contig_free(&sc->fw_dma);
6562
6563	iwm_free_fw_paging(sc);
6564
6565	/* Finished with the hardware - detach things */
6566	iwm_pci_detach(dev);
6567
6568	if (sc->sc_notif_wait != NULL) {
6569		iwm_notification_wait_free(sc->sc_notif_wait);
6570		sc->sc_notif_wait = NULL;
6571	}
6572
6573	IWM_LOCK_DESTROY(sc);
6574
6575	return (0);
6576}
6577
6578static int
6579iwm_detach(device_t dev)
6580{
6581	struct iwm_softc *sc = device_get_softc(dev);
6582
6583	return (iwm_detach_local(sc, 1));
6584}
6585
6586static device_method_t iwm_pci_methods[] = {
6587        /* Device interface */
6588        DEVMETHOD(device_probe,         iwm_probe),
6589        DEVMETHOD(device_attach,        iwm_attach),
6590        DEVMETHOD(device_detach,        iwm_detach),
6591        DEVMETHOD(device_suspend,       iwm_suspend),
6592        DEVMETHOD(device_resume,        iwm_resume),
6593
6594        DEVMETHOD_END
6595};
6596
6597static driver_t iwm_pci_driver = {
6598        "iwm",
6599        iwm_pci_methods,
6600        sizeof (struct iwm_softc)
6601};
6602
6603static devclass_t iwm_devclass;
6604
6605DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
6606MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6607MODULE_DEPEND(iwm, pci, 1, 1, 1);
6608MODULE_DEPEND(iwm, wlan, 1, 1, 1);
6609