1/******************************************************************************
2
3  Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
4
5  802.11 status code portion of this file from ethereal-0.10.6:
6    Copyright 2000, Axis Communications AB
7    Ethereal - Network traffic analyzer
8    By Gerald Combs <gerald@ethereal.com>
9    Copyright 1998 Gerald Combs
10
11  This program is free software; you can redistribute it and/or modify it
12  under the terms of version 2 of the GNU General Public License as
13  published by the Free Software Foundation.
14
15  This program is distributed in the hope that it will be useful, but WITHOUT
16  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
18  more details.
19
20  You should have received a copy of the GNU General Public License along with
21  this program; if not, write to the Free Software Foundation, Inc., 59
22  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
23
24  The full GNU General Public License is included in this distribution in the
25  file called LICENSE.
26
27  Contact Information:
28  James P. Ketrenos <ipw2100-admin@linux.intel.com>
29  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30
31******************************************************************************/
32
33#include "ipw2200.h"
34#include <linux/version.h>
35
36
37#ifndef KBUILD_EXTMOD
38#define VK "k"
39#else
40#define VK
41#endif
42
43#ifdef CONFIG_IPW2200_DEBUG
44#define VD "d"
45#else
46#define VD
47#endif
48
49#ifdef CONFIG_IPW2200_MONITOR
50#define VM "m"
51#else
52#define VM
53#endif
54
55#ifdef CONFIG_IPW2200_PROMISCUOUS
56#define VP "p"
57#else
58#define VP
59#endif
60
61#ifdef CONFIG_IPW2200_RADIOTAP
62#define VR "r"
63#else
64#define VR
65#endif
66
67#ifdef CONFIG_IPW2200_QOS
68#define VQ "q"
69#else
70#define VQ
71#endif
72
73#define IPW2200_VERSION "1.2.0" VK VD VM VP VR VQ
74#define DRV_DESCRIPTION	"Intel(R) PRO/Wireless 2200/2915 Network Driver"
75#define DRV_COPYRIGHT	"Copyright(c) 2003-2006 Intel Corporation"
76#define DRV_VERSION     IPW2200_VERSION
77
78#define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
79
80MODULE_DESCRIPTION(DRV_DESCRIPTION);
81MODULE_VERSION(DRV_VERSION);
82MODULE_AUTHOR(DRV_COPYRIGHT);
83MODULE_LICENSE("GPL");
84
85static int cmdlog = 0;
86static int debug = 0;
87static int channel = 0;
88static int mode = 0;
89
90static u32 ipw_debug_level;
91static int associate = 1;
92static int auto_create = 1;
93static int led = 0;
94static int disable = 0;
95static int bt_coexist = 0;
96static int hwcrypto = 0;
97static int roaming = 1;
98static const char ipw_modes[] = {
99	'a', 'b', 'g', '?'
100};
101static int antenna = CFG_SYS_ANTENNA_BOTH;
102
103#ifdef CONFIG_IPW2200_PROMISCUOUS
104static int rtap_iface = 0;     /* def: 0 -- do not create rtap interface */
105#endif
106
107
108#ifdef CONFIG_IPW2200_QOS
109static int qos_enable = 0;
110static int qos_burst_enable = 0;
111static int qos_no_ack_mask = 0;
112static int burst_duration_CCK = 0;
113static int burst_duration_OFDM = 0;
114
115static struct ieee80211_qos_parameters def_qos_parameters_OFDM = {
116	{QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM,
117	 QOS_TX3_CW_MIN_OFDM},
118	{QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM,
119	 QOS_TX3_CW_MAX_OFDM},
120	{QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
121	{QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
122	{QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM,
123	 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM}
124};
125
126static struct ieee80211_qos_parameters def_qos_parameters_CCK = {
127	{QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK,
128	 QOS_TX3_CW_MIN_CCK},
129	{QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK,
130	 QOS_TX3_CW_MAX_CCK},
131	{QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
132	{QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
133	{QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK,
134	 QOS_TX3_TXOP_LIMIT_CCK}
135};
136
137static struct ieee80211_qos_parameters def_parameters_OFDM = {
138	{DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM,
139	 DEF_TX3_CW_MIN_OFDM},
140	{DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM,
141	 DEF_TX3_CW_MAX_OFDM},
142	{DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
143	{DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
144	{DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM,
145	 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM}
146};
147
148static struct ieee80211_qos_parameters def_parameters_CCK = {
149	{DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK,
150	 DEF_TX3_CW_MIN_CCK},
151	{DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK,
152	 DEF_TX3_CW_MAX_CCK},
153	{DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
154	{DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
155	{DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK,
156	 DEF_TX3_TXOP_LIMIT_CCK}
157};
158
159static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
160
161static int from_priority_to_tx_queue[] = {
162	IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1,
163	IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4
164};
165
166static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv);
167
168static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
169				       *qos_param);
170static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
171				     *qos_param);
172#endif				/* CONFIG_IPW2200_QOS */
173
174static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
175static void ipw_remove_current_network(struct ipw_priv *priv);
176static void ipw_rx(struct ipw_priv *priv);
177static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
178				struct clx2_tx_queue *txq, int qindex);
179static int ipw_queue_reset(struct ipw_priv *priv);
180
181static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
182			     int len, int sync);
183
184static void ipw_tx_queue_free(struct ipw_priv *);
185
186static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
187static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
188static void ipw_rx_queue_replenish(void *);
189static int ipw_up(struct ipw_priv *);
190static void ipw_bg_up(struct work_struct *work);
191static void ipw_down(struct ipw_priv *);
192static void ipw_bg_down(struct work_struct *work);
193static int ipw_config(struct ipw_priv *);
194static int init_supported_rates(struct ipw_priv *priv,
195				struct ipw_supported_rates *prates);
196static void ipw_set_hwcrypto_keys(struct ipw_priv *);
197static void ipw_send_wep_keys(struct ipw_priv *, int);
198
199static int snprint_line(char *buf, size_t count,
200			const u8 * data, u32 len, u32 ofs)
201{
202	int out, i, j, l;
203	char c;
204
205	out = snprintf(buf, count, "%08X", ofs);
206
207	for (l = 0, i = 0; i < 2; i++) {
208		out += snprintf(buf + out, count - out, " ");
209		for (j = 0; j < 8 && l < len; j++, l++)
210			out += snprintf(buf + out, count - out, "%02X ",
211					data[(i * 8 + j)]);
212		for (; j < 8; j++)
213			out += snprintf(buf + out, count - out, "   ");
214	}
215
216	out += snprintf(buf + out, count - out, " ");
217	for (l = 0, i = 0; i < 2; i++) {
218		out += snprintf(buf + out, count - out, " ");
219		for (j = 0; j < 8 && l < len; j++, l++) {
220			c = data[(i * 8 + j)];
221			if (!isascii(c) || !isprint(c))
222				c = '.';
223
224			out += snprintf(buf + out, count - out, "%c", c);
225		}
226
227		for (; j < 8; j++)
228			out += snprintf(buf + out, count - out, " ");
229	}
230
231	return out;
232}
233
234static void printk_buf(int level, const u8 * data, u32 len)
235{
236	char line[81];
237	u32 ofs = 0;
238	if (!(ipw_debug_level & level))
239		return;
240
241	while (len) {
242		snprint_line(line, sizeof(line), &data[ofs],
243			     min(len, 16U), ofs);
244		printk(KERN_DEBUG "%s\n", line);
245		ofs += 16;
246		len -= min(len, 16U);
247	}
248}
249
250static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
251{
252	size_t out = size;
253	u32 ofs = 0;
254	int total = 0;
255
256	while (size && len) {
257		out = snprint_line(output, size, &data[ofs],
258				   min_t(size_t, len, 16U), ofs);
259
260		ofs += 16;
261		output += out;
262		size -= out;
263		len -= min_t(size_t, len, 16U);
264		total += out;
265	}
266	return total;
267}
268
269/* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
270static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
271#define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
272
273/* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
274static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
275#define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
276
277/* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
278static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
279static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
280{
281	IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
282		     __LINE__, (u32) (b), (u32) (c));
283	_ipw_write_reg8(a, b, c);
284}
285
286/* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
287static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
288static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
289{
290	IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
291		     __LINE__, (u32) (b), (u32) (c));
292	_ipw_write_reg16(a, b, c);
293}
294
295/* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
296static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
297static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
298{
299	IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
300		     __LINE__, (u32) (b), (u32) (c));
301	_ipw_write_reg32(a, b, c);
302}
303
304/* 8-bit direct write (low 4K) */
305#define _ipw_write8(ipw, ofs, val) writeb((val), (ipw)->hw_base + (ofs))
306
307/* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
308#define ipw_write8(ipw, ofs, val) \
309 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
310 _ipw_write8(ipw, ofs, val)
311
312/* 16-bit direct write (low 4K) */
313#define _ipw_write16(ipw, ofs, val) writew((val), (ipw)->hw_base + (ofs))
314
315/* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
316#define ipw_write16(ipw, ofs, val) \
317 IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
318 _ipw_write16(ipw, ofs, val)
319
320/* 32-bit direct write (low 4K) */
321#define _ipw_write32(ipw, ofs, val) writel((val), (ipw)->hw_base + (ofs))
322
323/* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
324#define ipw_write32(ipw, ofs, val) \
325 IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
326 _ipw_write32(ipw, ofs, val)
327
328/* 8-bit direct read (low 4K) */
329#define _ipw_read8(ipw, ofs) readb((ipw)->hw_base + (ofs))
330
331/* 8-bit direct read (low 4K), with debug wrapper */
332static inline u8 __ipw_read8(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
333{
334	IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", f, l, (u32) (ofs));
335	return _ipw_read8(ipw, ofs);
336}
337
338/* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */
339#define ipw_read8(ipw, ofs) __ipw_read8(__FILE__, __LINE__, ipw, ofs)
340
341/* 16-bit direct read (low 4K) */
342#define _ipw_read16(ipw, ofs) readw((ipw)->hw_base + (ofs))
343
344/* 16-bit direct read (low 4K), with debug wrapper */
345static inline u16 __ipw_read16(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
346{
347	IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", f, l, (u32) (ofs));
348	return _ipw_read16(ipw, ofs);
349}
350
351/* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */
352#define ipw_read16(ipw, ofs) __ipw_read16(__FILE__, __LINE__, ipw, ofs)
353
354/* 32-bit direct read (low 4K) */
355#define _ipw_read32(ipw, ofs) readl((ipw)->hw_base + (ofs))
356
357/* 32-bit direct read (low 4K), with debug wrapper */
358static inline u32 __ipw_read32(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
359{
360	IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", f, l, (u32) (ofs));
361	return _ipw_read32(ipw, ofs);
362}
363
364/* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */
365#define ipw_read32(ipw, ofs) __ipw_read32(__FILE__, __LINE__, ipw, ofs)
366
367/* multi-byte read (above 4K), with debug wrapper */
368static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
369static inline void __ipw_read_indirect(const char *f, int l,
370				       struct ipw_priv *a, u32 b, u8 * c, int d)
371{
372	IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %d bytes\n", f, l, (u32) (b),
373		     d);
374	_ipw_read_indirect(a, b, c, d);
375}
376
377/* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
378#define ipw_read_indirect(a, b, c, d) __ipw_read_indirect(__FILE__, __LINE__, a, b, c, d)
379
380/* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
381static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
382				int num);
383#define ipw_write_indirect(a, b, c, d) \
384	IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \
385	_ipw_write_indirect(a, b, c, d)
386
387/* 32-bit indirect write (above 4K) */
388static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
389{
390	IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
391	_ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
392	_ipw_write32(priv, IPW_INDIRECT_DATA, value);
393}
394
395/* 8-bit indirect write (above 4K) */
396static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
397{
398	u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK;	/* dword align */
399	u32 dif_len = reg - aligned_addr;
400
401	IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
402	_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
403	_ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value);
404}
405
406/* 16-bit indirect write (above 4K) */
407static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
408{
409	u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK;	/* dword align */
410	u32 dif_len = (reg - aligned_addr) & (~0x1ul);
411
412	IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
413	_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
414	_ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value);
415}
416
417/* 8-bit indirect read (above 4K) */
418static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
419{
420	u32 word;
421	_ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
422	IPW_DEBUG_IO(" reg = 0x%8X : \n", reg);
423	word = _ipw_read32(priv, IPW_INDIRECT_DATA);
424	return (word >> ((reg & 0x3) * 8)) & 0xff;
425}
426
427/* 32-bit indirect read (above 4K) */
428static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
429{
430	u32 value;
431
432	IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
433
434	_ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
435	value = _ipw_read32(priv, IPW_INDIRECT_DATA);
436	IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value);
437	return value;
438}
439
440/* General purpose, no alignment requirement, iterative (multi-byte) read, */
441/*    for area above 1st 4K of SRAM/reg space */
442static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
443			       int num)
444{
445	u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK;	/* dword align */
446	u32 dif_len = addr - aligned_addr;
447	u32 i;
448
449	IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
450
451	if (num <= 0) {
452		return;
453	}
454
455	/* Read the first dword (or portion) byte by byte */
456	if (unlikely(dif_len)) {
457		_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
458		/* Start reading at aligned_addr + dif_len */
459		for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
460			*buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i);
461		aligned_addr += 4;
462	}
463
464	/* Read all of the middle dwords as dwords, with auto-increment */
465	_ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
466	for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
467		*(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
468
469	/* Read the last dword (or portion) byte by byte */
470	if (unlikely(num)) {
471		_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
472		for (i = 0; num > 0; i++, num--)
473			*buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i);
474	}
475}
476
477/* General purpose, no alignment requirement, iterative (multi-byte) write, */
478/*    for area above 1st 4K of SRAM/reg space */
479static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
480				int num)
481{
482	u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK;	/* dword align */
483	u32 dif_len = addr - aligned_addr;
484	u32 i;
485
486	IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
487
488	if (num <= 0) {
489		return;
490	}
491
492	/* Write the first dword (or portion) byte by byte */
493	if (unlikely(dif_len)) {
494		_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
495		/* Start writing at aligned_addr + dif_len */
496		for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
497			_ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
498		aligned_addr += 4;
499	}
500
501	/* Write all of the middle dwords as dwords, with auto-increment */
502	_ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
503	for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
504		_ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
505
506	/* Write the last dword (or portion) byte by byte */
507	if (unlikely(num)) {
508		_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
509		for (i = 0; num > 0; i++, num--, buf++)
510			_ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
511	}
512}
513
514/* General purpose, no alignment requirement, iterative (multi-byte) write, */
515/*    for 1st 4K of SRAM/regs space */
516static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
517			     int num)
518{
519	memcpy_toio((priv->hw_base + addr), buf, num);
520}
521
522/* Set bit(s) in low 4K of SRAM/regs */
523static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
524{
525	ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
526}
527
528/* Clear bit(s) in low 4K of SRAM/regs */
529static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
530{
531	ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
532}
533
534static inline void __ipw_enable_interrupts(struct ipw_priv *priv)
535{
536	if (priv->status & STATUS_INT_ENABLED)
537		return;
538	priv->status |= STATUS_INT_ENABLED;
539	ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
540}
541
542static inline void __ipw_disable_interrupts(struct ipw_priv *priv)
543{
544	if (!(priv->status & STATUS_INT_ENABLED))
545		return;
546	priv->status &= ~STATUS_INT_ENABLED;
547	ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
548}
549
550static inline void ipw_enable_interrupts(struct ipw_priv *priv)
551{
552	unsigned long flags;
553
554	spin_lock_irqsave(&priv->irq_lock, flags);
555	__ipw_enable_interrupts(priv);
556	spin_unlock_irqrestore(&priv->irq_lock, flags);
557}
558
559static inline void ipw_disable_interrupts(struct ipw_priv *priv)
560{
561	unsigned long flags;
562
563	spin_lock_irqsave(&priv->irq_lock, flags);
564	__ipw_disable_interrupts(priv);
565	spin_unlock_irqrestore(&priv->irq_lock, flags);
566}
567
568static char *ipw_error_desc(u32 val)
569{
570	switch (val) {
571	case IPW_FW_ERROR_OK:
572		return "ERROR_OK";
573	case IPW_FW_ERROR_FAIL:
574		return "ERROR_FAIL";
575	case IPW_FW_ERROR_MEMORY_UNDERFLOW:
576		return "MEMORY_UNDERFLOW";
577	case IPW_FW_ERROR_MEMORY_OVERFLOW:
578		return "MEMORY_OVERFLOW";
579	case IPW_FW_ERROR_BAD_PARAM:
580		return "BAD_PARAM";
581	case IPW_FW_ERROR_BAD_CHECKSUM:
582		return "BAD_CHECKSUM";
583	case IPW_FW_ERROR_NMI_INTERRUPT:
584		return "NMI_INTERRUPT";
585	case IPW_FW_ERROR_BAD_DATABASE:
586		return "BAD_DATABASE";
587	case IPW_FW_ERROR_ALLOC_FAIL:
588		return "ALLOC_FAIL";
589	case IPW_FW_ERROR_DMA_UNDERRUN:
590		return "DMA_UNDERRUN";
591	case IPW_FW_ERROR_DMA_STATUS:
592		return "DMA_STATUS";
593	case IPW_FW_ERROR_DINO_ERROR:
594		return "DINO_ERROR";
595	case IPW_FW_ERROR_EEPROM_ERROR:
596		return "EEPROM_ERROR";
597	case IPW_FW_ERROR_SYSASSERT:
598		return "SYSASSERT";
599	case IPW_FW_ERROR_FATAL_ERROR:
600		return "FATAL_ERROR";
601	default:
602		return "UNKNOWN_ERROR";
603	}
604}
605
606static void ipw_dump_error_log(struct ipw_priv *priv,
607			       struct ipw_fw_error *error)
608{
609	u32 i;
610
611	if (!error) {
612		IPW_ERROR("Error allocating and capturing error log.  "
613			  "Nothing to dump.\n");
614		return;
615	}
616
617	IPW_ERROR("Start IPW Error Log Dump:\n");
618	IPW_ERROR("Status: 0x%08X, Config: %08X\n",
619		  error->status, error->config);
620
621	for (i = 0; i < error->elem_len; i++)
622		IPW_ERROR("%s %i 0x%08x  0x%08x  0x%08x  0x%08x  0x%08x\n",
623			  ipw_error_desc(error->elem[i].desc),
624			  error->elem[i].time,
625			  error->elem[i].blink1,
626			  error->elem[i].blink2,
627			  error->elem[i].link1,
628			  error->elem[i].link2, error->elem[i].data);
629	for (i = 0; i < error->log_len; i++)
630		IPW_ERROR("%i\t0x%08x\t%i\n",
631			  error->log[i].time,
632			  error->log[i].data, error->log[i].event);
633}
634
635static inline int ipw_is_init(struct ipw_priv *priv)
636{
637	return (priv->status & STATUS_INIT) ? 1 : 0;
638}
639
640static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
641{
642	u32 addr, field_info, field_len, field_count, total_len;
643
644	IPW_DEBUG_ORD("ordinal = %i\n", ord);
645
646	if (!priv || !val || !len) {
647		IPW_DEBUG_ORD("Invalid argument\n");
648		return -EINVAL;
649	}
650
651	/* verify device ordinal tables have been initialized */
652	if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
653		IPW_DEBUG_ORD("Access ordinals before initialization\n");
654		return -EINVAL;
655	}
656
657	switch (IPW_ORD_TABLE_ID_MASK & ord) {
658	case IPW_ORD_TABLE_0_MASK:
659		/*
660		 * TABLE 0: Direct access to a table of 32 bit values
661		 *
662		 * This is a very simple table with the data directly
663		 * read from the table
664		 */
665
666		/* remove the table id from the ordinal */
667		ord &= IPW_ORD_TABLE_VALUE_MASK;
668
669		/* boundary check */
670		if (ord > priv->table0_len) {
671			IPW_DEBUG_ORD("ordinal value (%i) longer then "
672				      "max (%i)\n", ord, priv->table0_len);
673			return -EINVAL;
674		}
675
676		/* verify we have enough room to store the value */
677		if (*len < sizeof(u32)) {
678			IPW_DEBUG_ORD("ordinal buffer length too small, "
679				      "need %zd\n", sizeof(u32));
680			return -EINVAL;
681		}
682
683		IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
684			      ord, priv->table0_addr + (ord << 2));
685
686		*len = sizeof(u32);
687		ord <<= 2;
688		*((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
689		break;
690
691	case IPW_ORD_TABLE_1_MASK:
692		/*
693		 * TABLE 1: Indirect access to a table of 32 bit values
694		 *
695		 * This is a fairly large table of u32 values each
696		 * representing starting addr for the data (which is
697		 * also a u32)
698		 */
699
700		/* remove the table id from the ordinal */
701		ord &= IPW_ORD_TABLE_VALUE_MASK;
702
703		/* boundary check */
704		if (ord > priv->table1_len) {
705			IPW_DEBUG_ORD("ordinal value too long\n");
706			return -EINVAL;
707		}
708
709		/* verify we have enough room to store the value */
710		if (*len < sizeof(u32)) {
711			IPW_DEBUG_ORD("ordinal buffer length too small, "
712				      "need %zd\n", sizeof(u32));
713			return -EINVAL;
714		}
715
716		*((u32 *) val) =
717		    ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
718		*len = sizeof(u32);
719		break;
720
721	case IPW_ORD_TABLE_2_MASK:
722		/*
723		 * TABLE 2: Indirect access to a table of variable sized values
724		 *
725		 * This table consist of six values, each containing
726		 *     - dword containing the starting offset of the data
727		 *     - dword containing the lengh in the first 16bits
728		 *       and the count in the second 16bits
729		 */
730
731		/* remove the table id from the ordinal */
732		ord &= IPW_ORD_TABLE_VALUE_MASK;
733
734		/* boundary check */
735		if (ord > priv->table2_len) {
736			IPW_DEBUG_ORD("ordinal value too long\n");
737			return -EINVAL;
738		}
739
740		/* get the address of statistic */
741		addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
742
743		/* get the second DW of statistics ;
744		 * two 16-bit words - first is length, second is count */
745		field_info =
746		    ipw_read_reg32(priv,
747				   priv->table2_addr + (ord << 3) +
748				   sizeof(u32));
749
750		/* get each entry length */
751		field_len = *((u16 *) & field_info);
752
753		/* get number of entries */
754		field_count = *(((u16 *) & field_info) + 1);
755
756		/* abort if not enought memory */
757		total_len = field_len * field_count;
758		if (total_len > *len) {
759			*len = total_len;
760			return -EINVAL;
761		}
762
763		*len = total_len;
764		if (!total_len)
765			return 0;
766
767		IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
768			      "field_info = 0x%08x\n",
769			      addr, total_len, field_info);
770		ipw_read_indirect(priv, addr, val, total_len);
771		break;
772
773	default:
774		IPW_DEBUG_ORD("Invalid ordinal!\n");
775		return -EINVAL;
776
777	}
778
779	return 0;
780}
781
782static void ipw_init_ordinals(struct ipw_priv *priv)
783{
784	priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
785	priv->table0_len = ipw_read32(priv, priv->table0_addr);
786
787	IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
788		      priv->table0_addr, priv->table0_len);
789
790	priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
791	priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
792
793	IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
794		      priv->table1_addr, priv->table1_len);
795
796	priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
797	priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
798	priv->table2_len &= 0x0000ffff;	/* use first two bytes */
799
800	IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
801		      priv->table2_addr, priv->table2_len);
802
803}
804
805static u32 ipw_register_toggle(u32 reg)
806{
807	reg &= ~IPW_START_STANDBY;
808	if (reg & IPW_GATE_ODMA)
809		reg &= ~IPW_GATE_ODMA;
810	if (reg & IPW_GATE_IDMA)
811		reg &= ~IPW_GATE_IDMA;
812	if (reg & IPW_GATE_ADMA)
813		reg &= ~IPW_GATE_ADMA;
814	return reg;
815}
816
817/*
818 * LED behavior:
819 * - On radio ON, turn on any LEDs that require to be on during start
820 * - On initialization, start unassociated blink
821 * - On association, disable unassociated blink
822 * - On disassociation, start unassociated blink
823 * - On radio OFF, turn off any LEDs started during radio on
824 *
825 */
826#define LD_TIME_LINK_ON msecs_to_jiffies(300)
827#define LD_TIME_LINK_OFF msecs_to_jiffies(2700)
828#define LD_TIME_ACT_ON msecs_to_jiffies(250)
829
830static void ipw_led_link_on(struct ipw_priv *priv)
831{
832	unsigned long flags;
833	u32 led;
834
835	/* If configured to not use LEDs, or nic_type is 1,
836	 * then we don't toggle a LINK led */
837	if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
838		return;
839
840	spin_lock_irqsave(&priv->lock, flags);
841
842	if (!(priv->status & STATUS_RF_KILL_MASK) &&
843	    !(priv->status & STATUS_LED_LINK_ON)) {
844		IPW_DEBUG_LED("Link LED On\n");
845		led = ipw_read_reg32(priv, IPW_EVENT_REG);
846		led |= priv->led_association_on;
847
848		led = ipw_register_toggle(led);
849
850		IPW_DEBUG_LED("Reg: 0x%08X\n", led);
851		ipw_write_reg32(priv, IPW_EVENT_REG, led);
852
853		priv->status |= STATUS_LED_LINK_ON;
854
855		/* If we aren't associated, schedule turning the LED off */
856		if (!(priv->status & STATUS_ASSOCIATED))
857			queue_delayed_work(priv->workqueue,
858					   &priv->led_link_off,
859					   LD_TIME_LINK_ON);
860	}
861
862	spin_unlock_irqrestore(&priv->lock, flags);
863}
864
865static void ipw_bg_led_link_on(struct work_struct *work)
866{
867	struct ipw_priv *priv =
868		container_of(work, struct ipw_priv, led_link_on.work);
869	mutex_lock(&priv->mutex);
870	ipw_led_link_on(priv);
871	mutex_unlock(&priv->mutex);
872}
873
874static void ipw_led_link_off(struct ipw_priv *priv)
875{
876	unsigned long flags;
877	u32 led;
878
879	/* If configured not to use LEDs, or nic type is 1,
880	 * then we don't goggle the LINK led. */
881	if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
882		return;
883
884	spin_lock_irqsave(&priv->lock, flags);
885
886	if (priv->status & STATUS_LED_LINK_ON) {
887		led = ipw_read_reg32(priv, IPW_EVENT_REG);
888		led &= priv->led_association_off;
889		led = ipw_register_toggle(led);
890
891		IPW_DEBUG_LED("Reg: 0x%08X\n", led);
892		ipw_write_reg32(priv, IPW_EVENT_REG, led);
893
894		IPW_DEBUG_LED("Link LED Off\n");
895
896		priv->status &= ~STATUS_LED_LINK_ON;
897
898		/* If we aren't associated and the radio is on, schedule
899		 * turning the LED on (blink while unassociated) */
900		if (!(priv->status & STATUS_RF_KILL_MASK) &&
901		    !(priv->status & STATUS_ASSOCIATED))
902			queue_delayed_work(priv->workqueue, &priv->led_link_on,
903					   LD_TIME_LINK_OFF);
904
905	}
906
907	spin_unlock_irqrestore(&priv->lock, flags);
908}
909
910static void ipw_bg_led_link_off(struct work_struct *work)
911{
912	struct ipw_priv *priv =
913		container_of(work, struct ipw_priv, led_link_off.work);
914	mutex_lock(&priv->mutex);
915	ipw_led_link_off(priv);
916	mutex_unlock(&priv->mutex);
917}
918
919static void __ipw_led_activity_on(struct ipw_priv *priv)
920{
921	u32 led;
922
923	if (priv->config & CFG_NO_LED)
924		return;
925
926	if (priv->status & STATUS_RF_KILL_MASK)
927		return;
928
929	if (!(priv->status & STATUS_LED_ACT_ON)) {
930		led = ipw_read_reg32(priv, IPW_EVENT_REG);
931		led |= priv->led_activity_on;
932
933		led = ipw_register_toggle(led);
934
935		IPW_DEBUG_LED("Reg: 0x%08X\n", led);
936		ipw_write_reg32(priv, IPW_EVENT_REG, led);
937
938		IPW_DEBUG_LED("Activity LED On\n");
939
940		priv->status |= STATUS_LED_ACT_ON;
941
942		cancel_delayed_work(&priv->led_act_off);
943		queue_delayed_work(priv->workqueue, &priv->led_act_off,
944				   LD_TIME_ACT_ON);
945	} else {
946		/* Reschedule LED off for full time period */
947		cancel_delayed_work(&priv->led_act_off);
948		queue_delayed_work(priv->workqueue, &priv->led_act_off,
949				   LD_TIME_ACT_ON);
950	}
951}
952
953
954static void ipw_led_activity_off(struct ipw_priv *priv)
955{
956	unsigned long flags;
957	u32 led;
958
959	if (priv->config & CFG_NO_LED)
960		return;
961
962	spin_lock_irqsave(&priv->lock, flags);
963
964	if (priv->status & STATUS_LED_ACT_ON) {
965		led = ipw_read_reg32(priv, IPW_EVENT_REG);
966		led &= priv->led_activity_off;
967
968		led = ipw_register_toggle(led);
969
970		IPW_DEBUG_LED("Reg: 0x%08X\n", led);
971		ipw_write_reg32(priv, IPW_EVENT_REG, led);
972
973		IPW_DEBUG_LED("Activity LED Off\n");
974
975		priv->status &= ~STATUS_LED_ACT_ON;
976	}
977
978	spin_unlock_irqrestore(&priv->lock, flags);
979}
980
981static void ipw_bg_led_activity_off(struct work_struct *work)
982{
983	struct ipw_priv *priv =
984		container_of(work, struct ipw_priv, led_act_off.work);
985	mutex_lock(&priv->mutex);
986	ipw_led_activity_off(priv);
987	mutex_unlock(&priv->mutex);
988}
989
990static void ipw_led_band_on(struct ipw_priv *priv)
991{
992	unsigned long flags;
993	u32 led;
994
995	/* Only nic type 1 supports mode LEDs */
996	if (priv->config & CFG_NO_LED ||
997	    priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network)
998		return;
999
1000	spin_lock_irqsave(&priv->lock, flags);
1001
1002	led = ipw_read_reg32(priv, IPW_EVENT_REG);
1003	if (priv->assoc_network->mode == IEEE_A) {
1004		led |= priv->led_ofdm_on;
1005		led &= priv->led_association_off;
1006		IPW_DEBUG_LED("Mode LED On: 802.11a\n");
1007	} else if (priv->assoc_network->mode == IEEE_G) {
1008		led |= priv->led_ofdm_on;
1009		led |= priv->led_association_on;
1010		IPW_DEBUG_LED("Mode LED On: 802.11g\n");
1011	} else {
1012		led &= priv->led_ofdm_off;
1013		led |= priv->led_association_on;
1014		IPW_DEBUG_LED("Mode LED On: 802.11b\n");
1015	}
1016
1017	led = ipw_register_toggle(led);
1018
1019	IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1020	ipw_write_reg32(priv, IPW_EVENT_REG, led);
1021
1022	spin_unlock_irqrestore(&priv->lock, flags);
1023}
1024
1025static void ipw_led_band_off(struct ipw_priv *priv)
1026{
1027	unsigned long flags;
1028	u32 led;
1029
1030	/* Only nic type 1 supports mode LEDs */
1031	if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
1032		return;
1033
1034	spin_lock_irqsave(&priv->lock, flags);
1035
1036	led = ipw_read_reg32(priv, IPW_EVENT_REG);
1037	led &= priv->led_ofdm_off;
1038	led &= priv->led_association_off;
1039
1040	led = ipw_register_toggle(led);
1041
1042	IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1043	ipw_write_reg32(priv, IPW_EVENT_REG, led);
1044
1045	spin_unlock_irqrestore(&priv->lock, flags);
1046}
1047
1048static void ipw_led_radio_on(struct ipw_priv *priv)
1049{
1050	ipw_led_link_on(priv);
1051}
1052
1053static void ipw_led_radio_off(struct ipw_priv *priv)
1054{
1055	ipw_led_activity_off(priv);
1056	ipw_led_link_off(priv);
1057}
1058
1059static void ipw_led_link_up(struct ipw_priv *priv)
1060{
1061	/* Set the Link Led on for all nic types */
1062	ipw_led_link_on(priv);
1063}
1064
1065static void ipw_led_link_down(struct ipw_priv *priv)
1066{
1067	ipw_led_activity_off(priv);
1068	ipw_led_link_off(priv);
1069
1070	if (priv->status & STATUS_RF_KILL_MASK)
1071		ipw_led_radio_off(priv);
1072}
1073
1074static void ipw_led_init(struct ipw_priv *priv)
1075{
1076	priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
1077
1078	/* Set the default PINs for the link and activity leds */
1079	priv->led_activity_on = IPW_ACTIVITY_LED;
1080	priv->led_activity_off = ~(IPW_ACTIVITY_LED);
1081
1082	priv->led_association_on = IPW_ASSOCIATED_LED;
1083	priv->led_association_off = ~(IPW_ASSOCIATED_LED);
1084
1085	/* Set the default PINs for the OFDM leds */
1086	priv->led_ofdm_on = IPW_OFDM_LED;
1087	priv->led_ofdm_off = ~(IPW_OFDM_LED);
1088
1089	switch (priv->nic_type) {
1090	case EEPROM_NIC_TYPE_1:
1091		/* In this NIC type, the LEDs are reversed.... */
1092		priv->led_activity_on = IPW_ASSOCIATED_LED;
1093		priv->led_activity_off = ~(IPW_ASSOCIATED_LED);
1094		priv->led_association_on = IPW_ACTIVITY_LED;
1095		priv->led_association_off = ~(IPW_ACTIVITY_LED);
1096
1097		if (!(priv->config & CFG_NO_LED))
1098			ipw_led_band_on(priv);
1099
1100		/* And we don't blink link LEDs for this nic, so
1101		 * just return here */
1102		return;
1103
1104	case EEPROM_NIC_TYPE_3:
1105	case EEPROM_NIC_TYPE_2:
1106	case EEPROM_NIC_TYPE_4:
1107	case EEPROM_NIC_TYPE_0:
1108		break;
1109
1110	default:
1111		IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
1112			       priv->nic_type);
1113		priv->nic_type = EEPROM_NIC_TYPE_0;
1114		break;
1115	}
1116
1117	if (!(priv->config & CFG_NO_LED)) {
1118		if (priv->status & STATUS_ASSOCIATED)
1119			ipw_led_link_on(priv);
1120		else
1121			ipw_led_link_off(priv);
1122	}
1123}
1124
1125static void ipw_led_shutdown(struct ipw_priv *priv)
1126{
1127	ipw_led_activity_off(priv);
1128	ipw_led_link_off(priv);
1129	ipw_led_band_off(priv);
1130	cancel_delayed_work(&priv->led_link_on);
1131	cancel_delayed_work(&priv->led_link_off);
1132	cancel_delayed_work(&priv->led_act_off);
1133}
1134
1135/*
1136 * The following adds a new attribute to the sysfs representation
1137 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
1138 * used for controling the debug level.
1139 *
1140 * See the level definitions in ipw for details.
1141 */
1142static ssize_t show_debug_level(struct device_driver *d, char *buf)
1143{
1144	return sprintf(buf, "0x%08X\n", ipw_debug_level);
1145}
1146
1147static ssize_t store_debug_level(struct device_driver *d, const char *buf,
1148				 size_t count)
1149{
1150	char *p = (char *)buf;
1151	u32 val;
1152
1153	if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1154		p++;
1155		if (p[0] == 'x' || p[0] == 'X')
1156			p++;
1157		val = simple_strtoul(p, &p, 16);
1158	} else
1159		val = simple_strtoul(p, &p, 10);
1160	if (p == buf)
1161		printk(KERN_INFO DRV_NAME
1162		       ": %s is not in hex or decimal form.\n", buf);
1163	else
1164		ipw_debug_level = val;
1165
1166	return strnlen(buf, count);
1167}
1168
1169static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
1170		   show_debug_level, store_debug_level);
1171
1172static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
1173{
1174	/* length = 1st dword in log */
1175	return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
1176}
1177
1178static void ipw_capture_event_log(struct ipw_priv *priv,
1179				  u32 log_len, struct ipw_event *log)
1180{
1181	u32 base;
1182
1183	if (log_len) {
1184		base = ipw_read32(priv, IPW_EVENT_LOG);
1185		ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32),
1186				  (u8 *) log, sizeof(*log) * log_len);
1187	}
1188}
1189
1190static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
1191{
1192	struct ipw_fw_error *error;
1193	u32 log_len = ipw_get_event_log_len(priv);
1194	u32 base = ipw_read32(priv, IPW_ERROR_LOG);
1195	u32 elem_len = ipw_read_reg32(priv, base);
1196
1197	error = kmalloc(sizeof(*error) +
1198			sizeof(*error->elem) * elem_len +
1199			sizeof(*error->log) * log_len, GFP_ATOMIC);
1200	if (!error) {
1201		IPW_ERROR("Memory allocation for firmware error log "
1202			  "failed.\n");
1203		return NULL;
1204	}
1205	error->jiffies = jiffies;
1206	error->status = priv->status;
1207	error->config = priv->config;
1208	error->elem_len = elem_len;
1209	error->log_len = log_len;
1210	error->elem = (struct ipw_error_elem *)error->payload;
1211	error->log = (struct ipw_event *)(error->elem + elem_len);
1212
1213	ipw_capture_event_log(priv, log_len, error->log);
1214
1215	if (elem_len)
1216		ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem,
1217				  sizeof(*error->elem) * elem_len);
1218
1219	return error;
1220}
1221
1222static ssize_t show_event_log(struct device *d,
1223			      struct device_attribute *attr, char *buf)
1224{
1225	struct ipw_priv *priv = dev_get_drvdata(d);
1226	u32 log_len = ipw_get_event_log_len(priv);
1227	struct ipw_event log[log_len];
1228	u32 len = 0, i;
1229
1230	ipw_capture_event_log(priv, log_len, log);
1231
1232	len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
1233	for (i = 0; i < log_len; i++)
1234		len += snprintf(buf + len, PAGE_SIZE - len,
1235				"\n%08X%08X%08X",
1236				log[i].time, log[i].event, log[i].data);
1237	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1238	return len;
1239}
1240
1241static DEVICE_ATTR(event_log, S_IRUGO, show_event_log, NULL);
1242
1243static ssize_t show_error(struct device *d,
1244			  struct device_attribute *attr, char *buf)
1245{
1246	struct ipw_priv *priv = dev_get_drvdata(d);
1247	u32 len = 0, i;
1248	if (!priv->error)
1249		return 0;
1250	len += snprintf(buf + len, PAGE_SIZE - len,
1251			"%08lX%08X%08X%08X",
1252			priv->error->jiffies,
1253			priv->error->status,
1254			priv->error->config, priv->error->elem_len);
1255	for (i = 0; i < priv->error->elem_len; i++)
1256		len += snprintf(buf + len, PAGE_SIZE - len,
1257				"\n%08X%08X%08X%08X%08X%08X%08X",
1258				priv->error->elem[i].time,
1259				priv->error->elem[i].desc,
1260				priv->error->elem[i].blink1,
1261				priv->error->elem[i].blink2,
1262				priv->error->elem[i].link1,
1263				priv->error->elem[i].link2,
1264				priv->error->elem[i].data);
1265
1266	len += snprintf(buf + len, PAGE_SIZE - len,
1267			"\n%08X", priv->error->log_len);
1268	for (i = 0; i < priv->error->log_len; i++)
1269		len += snprintf(buf + len, PAGE_SIZE - len,
1270				"\n%08X%08X%08X",
1271				priv->error->log[i].time,
1272				priv->error->log[i].event,
1273				priv->error->log[i].data);
1274	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1275	return len;
1276}
1277
1278static ssize_t clear_error(struct device *d,
1279			   struct device_attribute *attr,
1280			   const char *buf, size_t count)
1281{
1282	struct ipw_priv *priv = dev_get_drvdata(d);
1283
1284	kfree(priv->error);
1285	priv->error = NULL;
1286	return count;
1287}
1288
1289static DEVICE_ATTR(error, S_IRUGO | S_IWUSR, show_error, clear_error);
1290
1291static ssize_t show_cmd_log(struct device *d,
1292			    struct device_attribute *attr, char *buf)
1293{
1294	struct ipw_priv *priv = dev_get_drvdata(d);
1295	u32 len = 0, i;
1296	if (!priv->cmdlog)
1297		return 0;
1298	for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
1299	     (i != priv->cmdlog_pos) && (PAGE_SIZE - len);
1300	     i = (i + 1) % priv->cmdlog_len) {
1301		len +=
1302		    snprintf(buf + len, PAGE_SIZE - len,
1303			     "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
1304			     priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
1305			     priv->cmdlog[i].cmd.len);
1306		len +=
1307		    snprintk_buf(buf + len, PAGE_SIZE - len,
1308				 (u8 *) priv->cmdlog[i].cmd.param,
1309				 priv->cmdlog[i].cmd.len);
1310		len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1311	}
1312	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1313	return len;
1314}
1315
1316static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL);
1317
1318#ifdef CONFIG_IPW2200_PROMISCUOUS
1319static void ipw_prom_free(struct ipw_priv *priv);
1320static int ipw_prom_alloc(struct ipw_priv *priv);
1321static ssize_t store_rtap_iface(struct device *d,
1322			 struct device_attribute *attr,
1323			 const char *buf, size_t count)
1324{
1325	struct ipw_priv *priv = dev_get_drvdata(d);
1326	int rc = 0;
1327
1328	if (count < 1)
1329		return -EINVAL;
1330
1331	switch (buf[0]) {
1332	case '0':
1333		if (!rtap_iface)
1334			return count;
1335
1336		if (netif_running(priv->prom_net_dev)) {
1337			IPW_WARNING("Interface is up.  Cannot unregister.\n");
1338			return count;
1339		}
1340
1341		ipw_prom_free(priv);
1342		rtap_iface = 0;
1343		break;
1344
1345	case '1':
1346		if (rtap_iface)
1347			return count;
1348
1349		rc = ipw_prom_alloc(priv);
1350		if (!rc)
1351			rtap_iface = 1;
1352		break;
1353
1354	default:
1355		return -EINVAL;
1356	}
1357
1358	if (rc) {
1359		IPW_ERROR("Failed to register promiscuous network "
1360			  "device (error %d).\n", rc);
1361	}
1362
1363	return count;
1364}
1365
1366static ssize_t show_rtap_iface(struct device *d,
1367			struct device_attribute *attr,
1368			char *buf)
1369{
1370	struct ipw_priv *priv = dev_get_drvdata(d);
1371	if (rtap_iface)
1372		return sprintf(buf, "%s", priv->prom_net_dev->name);
1373	else {
1374		buf[0] = '-';
1375		buf[1] = '1';
1376		buf[2] = '\0';
1377		return 3;
1378	}
1379}
1380
1381static DEVICE_ATTR(rtap_iface, S_IWUSR | S_IRUSR, show_rtap_iface,
1382		   store_rtap_iface);
1383
1384static ssize_t store_rtap_filter(struct device *d,
1385			 struct device_attribute *attr,
1386			 const char *buf, size_t count)
1387{
1388	struct ipw_priv *priv = dev_get_drvdata(d);
1389
1390	if (!priv->prom_priv) {
1391		IPW_ERROR("Attempting to set filter without "
1392			  "rtap_iface enabled.\n");
1393		return -EPERM;
1394	}
1395
1396	priv->prom_priv->filter = simple_strtol(buf, NULL, 0);
1397
1398	IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n",
1399		       BIT_ARG16(priv->prom_priv->filter));
1400
1401	return count;
1402}
1403
1404static ssize_t show_rtap_filter(struct device *d,
1405			struct device_attribute *attr,
1406			char *buf)
1407{
1408	struct ipw_priv *priv = dev_get_drvdata(d);
1409	return sprintf(buf, "0x%04X",
1410		       priv->prom_priv ? priv->prom_priv->filter : 0);
1411}
1412
1413static DEVICE_ATTR(rtap_filter, S_IWUSR | S_IRUSR, show_rtap_filter,
1414		   store_rtap_filter);
1415#endif
1416
1417static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
1418			     char *buf)
1419{
1420	struct ipw_priv *priv = dev_get_drvdata(d);
1421	return sprintf(buf, "%d\n", priv->ieee->scan_age);
1422}
1423
1424static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
1425			      const char *buf, size_t count)
1426{
1427	struct ipw_priv *priv = dev_get_drvdata(d);
1428	struct net_device *dev = priv->net_dev;
1429	char buffer[] = "00000000";
1430	unsigned long len =
1431	    (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
1432	unsigned long val;
1433	char *p = buffer;
1434
1435	IPW_DEBUG_INFO("enter\n");
1436
1437	strncpy(buffer, buf, len);
1438	buffer[len] = 0;
1439
1440	if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1441		p++;
1442		if (p[0] == 'x' || p[0] == 'X')
1443			p++;
1444		val = simple_strtoul(p, &p, 16);
1445	} else
1446		val = simple_strtoul(p, &p, 10);
1447	if (p == buffer) {
1448		IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
1449	} else {
1450		priv->ieee->scan_age = val;
1451		IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
1452	}
1453
1454	IPW_DEBUG_INFO("exit\n");
1455	return len;
1456}
1457
1458static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age);
1459
1460static ssize_t show_led(struct device *d, struct device_attribute *attr,
1461			char *buf)
1462{
1463	struct ipw_priv *priv = dev_get_drvdata(d);
1464	return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
1465}
1466
1467static ssize_t store_led(struct device *d, struct device_attribute *attr,
1468			 const char *buf, size_t count)
1469{
1470	struct ipw_priv *priv = dev_get_drvdata(d);
1471
1472	IPW_DEBUG_INFO("enter\n");
1473
1474	if (count == 0)
1475		return 0;
1476
1477	if (*buf == 0) {
1478		IPW_DEBUG_LED("Disabling LED control.\n");
1479		priv->config |= CFG_NO_LED;
1480		ipw_led_shutdown(priv);
1481	} else {
1482		IPW_DEBUG_LED("Enabling LED control.\n");
1483		priv->config &= ~CFG_NO_LED;
1484		ipw_led_init(priv);
1485	}
1486
1487	IPW_DEBUG_INFO("exit\n");
1488	return count;
1489}
1490
1491static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led);
1492
1493static ssize_t show_status(struct device *d,
1494			   struct device_attribute *attr, char *buf)
1495{
1496	struct ipw_priv *p = d->driver_data;
1497	return sprintf(buf, "0x%08x\n", (int)p->status);
1498}
1499
1500static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
1501
1502static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
1503			char *buf)
1504{
1505	struct ipw_priv *p = d->driver_data;
1506	return sprintf(buf, "0x%08x\n", (int)p->config);
1507}
1508
1509static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
1510
1511static ssize_t show_nic_type(struct device *d,
1512			     struct device_attribute *attr, char *buf)
1513{
1514	struct ipw_priv *priv = d->driver_data;
1515	return sprintf(buf, "TYPE: %d\n", priv->nic_type);
1516}
1517
1518static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
1519
1520static ssize_t show_ucode_version(struct device *d,
1521				  struct device_attribute *attr, char *buf)
1522{
1523	u32 len = sizeof(u32), tmp = 0;
1524	struct ipw_priv *p = d->driver_data;
1525
1526	if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
1527		return 0;
1528
1529	return sprintf(buf, "0x%08x\n", tmp);
1530}
1531
1532static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
1533
1534static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
1535			char *buf)
1536{
1537	u32 len = sizeof(u32), tmp = 0;
1538	struct ipw_priv *p = d->driver_data;
1539
1540	if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
1541		return 0;
1542
1543	return sprintf(buf, "0x%08x\n", tmp);
1544}
1545
1546static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
1547
1548/*
1549 * Add a device attribute to view/control the delay between eeprom
1550 * operations.
1551 */
1552static ssize_t show_eeprom_delay(struct device *d,
1553				 struct device_attribute *attr, char *buf)
1554{
1555	int n = ((struct ipw_priv *)d->driver_data)->eeprom_delay;
1556	return sprintf(buf, "%i\n", n);
1557}
1558static ssize_t store_eeprom_delay(struct device *d,
1559				  struct device_attribute *attr,
1560				  const char *buf, size_t count)
1561{
1562	struct ipw_priv *p = d->driver_data;
1563	sscanf(buf, "%i", &p->eeprom_delay);
1564	return strnlen(buf, count);
1565}
1566
1567static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
1568		   show_eeprom_delay, store_eeprom_delay);
1569
1570static ssize_t show_command_event_reg(struct device *d,
1571				      struct device_attribute *attr, char *buf)
1572{
1573	u32 reg = 0;
1574	struct ipw_priv *p = d->driver_data;
1575
1576	reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
1577	return sprintf(buf, "0x%08x\n", reg);
1578}
1579static ssize_t store_command_event_reg(struct device *d,
1580				       struct device_attribute *attr,
1581				       const char *buf, size_t count)
1582{
1583	u32 reg;
1584	struct ipw_priv *p = d->driver_data;
1585
1586	sscanf(buf, "%x", &reg);
1587	ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
1588	return strnlen(buf, count);
1589}
1590
1591static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
1592		   show_command_event_reg, store_command_event_reg);
1593
1594static ssize_t show_mem_gpio_reg(struct device *d,
1595				 struct device_attribute *attr, char *buf)
1596{
1597	u32 reg = 0;
1598	struct ipw_priv *p = d->driver_data;
1599
1600	reg = ipw_read_reg32(p, 0x301100);
1601	return sprintf(buf, "0x%08x\n", reg);
1602}
1603static ssize_t store_mem_gpio_reg(struct device *d,
1604				  struct device_attribute *attr,
1605				  const char *buf, size_t count)
1606{
1607	u32 reg;
1608	struct ipw_priv *p = d->driver_data;
1609
1610	sscanf(buf, "%x", &reg);
1611	ipw_write_reg32(p, 0x301100, reg);
1612	return strnlen(buf, count);
1613}
1614
1615static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
1616		   show_mem_gpio_reg, store_mem_gpio_reg);
1617
1618static ssize_t show_indirect_dword(struct device *d,
1619				   struct device_attribute *attr, char *buf)
1620{
1621	u32 reg = 0;
1622	struct ipw_priv *priv = d->driver_data;
1623
1624	if (priv->status & STATUS_INDIRECT_DWORD)
1625		reg = ipw_read_reg32(priv, priv->indirect_dword);
1626	else
1627		reg = 0;
1628
1629	return sprintf(buf, "0x%08x\n", reg);
1630}
1631static ssize_t store_indirect_dword(struct device *d,
1632				    struct device_attribute *attr,
1633				    const char *buf, size_t count)
1634{
1635	struct ipw_priv *priv = d->driver_data;
1636
1637	sscanf(buf, "%x", &priv->indirect_dword);
1638	priv->status |= STATUS_INDIRECT_DWORD;
1639	return strnlen(buf, count);
1640}
1641
1642static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
1643		   show_indirect_dword, store_indirect_dword);
1644
1645static ssize_t show_indirect_byte(struct device *d,
1646				  struct device_attribute *attr, char *buf)
1647{
1648	u8 reg = 0;
1649	struct ipw_priv *priv = d->driver_data;
1650
1651	if (priv->status & STATUS_INDIRECT_BYTE)
1652		reg = ipw_read_reg8(priv, priv->indirect_byte);
1653	else
1654		reg = 0;
1655
1656	return sprintf(buf, "0x%02x\n", reg);
1657}
1658static ssize_t store_indirect_byte(struct device *d,
1659				   struct device_attribute *attr,
1660				   const char *buf, size_t count)
1661{
1662	struct ipw_priv *priv = d->driver_data;
1663
1664	sscanf(buf, "%x", &priv->indirect_byte);
1665	priv->status |= STATUS_INDIRECT_BYTE;
1666	return strnlen(buf, count);
1667}
1668
1669static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
1670		   show_indirect_byte, store_indirect_byte);
1671
1672static ssize_t show_direct_dword(struct device *d,
1673				 struct device_attribute *attr, char *buf)
1674{
1675	u32 reg = 0;
1676	struct ipw_priv *priv = d->driver_data;
1677
1678	if (priv->status & STATUS_DIRECT_DWORD)
1679		reg = ipw_read32(priv, priv->direct_dword);
1680	else
1681		reg = 0;
1682
1683	return sprintf(buf, "0x%08x\n", reg);
1684}
1685static ssize_t store_direct_dword(struct device *d,
1686				  struct device_attribute *attr,
1687				  const char *buf, size_t count)
1688{
1689	struct ipw_priv *priv = d->driver_data;
1690
1691	sscanf(buf, "%x", &priv->direct_dword);
1692	priv->status |= STATUS_DIRECT_DWORD;
1693	return strnlen(buf, count);
1694}
1695
1696static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
1697		   show_direct_dword, store_direct_dword);
1698
1699static int rf_kill_active(struct ipw_priv *priv)
1700{
1701	if (0 == (ipw_read32(priv, 0x30) & 0x10000))
1702		priv->status |= STATUS_RF_KILL_HW;
1703	else
1704		priv->status &= ~STATUS_RF_KILL_HW;
1705
1706	return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
1707}
1708
1709static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
1710			    char *buf)
1711{
1712	/* 0 - RF kill not enabled
1713	   1 - SW based RF kill active (sysfs)
1714	   2 - HW based RF kill active
1715	   3 - Both HW and SW baed RF kill active */
1716	struct ipw_priv *priv = d->driver_data;
1717	int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
1718	    (rf_kill_active(priv) ? 0x2 : 0x0);
1719	return sprintf(buf, "%i\n", val);
1720}
1721
1722static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1723{
1724	if ((disable_radio ? 1 : 0) ==
1725	    ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
1726		return 0;
1727
1728	IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO  %s\n",
1729			  disable_radio ? "OFF" : "ON");
1730
1731	if (disable_radio) {
1732		priv->status |= STATUS_RF_KILL_SW;
1733
1734		if (priv->workqueue)
1735			cancel_delayed_work(&priv->request_scan);
1736		queue_work(priv->workqueue, &priv->down);
1737	} else {
1738		priv->status &= ~STATUS_RF_KILL_SW;
1739		if (rf_kill_active(priv)) {
1740			IPW_DEBUG_RF_KILL("Can not turn radio back on - "
1741					  "disabled by HW switch\n");
1742			/* Make sure the RF_KILL check timer is running */
1743			cancel_delayed_work(&priv->rf_kill);
1744			queue_delayed_work(priv->workqueue, &priv->rf_kill,
1745					   2 * HZ);
1746		} else
1747			queue_work(priv->workqueue, &priv->up);
1748	}
1749
1750	return 1;
1751}
1752
1753static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
1754			     const char *buf, size_t count)
1755{
1756	struct ipw_priv *priv = d->driver_data;
1757
1758	ipw_radio_kill_sw(priv, buf[0] == '1');
1759
1760	return count;
1761}
1762
1763static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
1764
1765static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
1766			       char *buf)
1767{
1768	struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1769	int pos = 0, len = 0;
1770	if (priv->config & CFG_SPEED_SCAN) {
1771		while (priv->speed_scan[pos] != 0)
1772			len += sprintf(&buf[len], "%d ",
1773				       priv->speed_scan[pos++]);
1774		return len + sprintf(&buf[len], "\n");
1775	}
1776
1777	return sprintf(buf, "0\n");
1778}
1779
1780static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
1781				const char *buf, size_t count)
1782{
1783	struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1784	int channel, pos = 0;
1785	const char *p = buf;
1786
1787	/* list of space separated channels to scan, optionally ending with 0 */
1788	while ((channel = simple_strtol(p, NULL, 0))) {
1789		if (pos == MAX_SPEED_SCAN - 1) {
1790			priv->speed_scan[pos] = 0;
1791			break;
1792		}
1793
1794		if (ieee80211_is_valid_channel(priv->ieee, channel))
1795			priv->speed_scan[pos++] = channel;
1796		else
1797			IPW_WARNING("Skipping invalid channel request: %d\n",
1798				    channel);
1799		p = strchr(p, ' ');
1800		if (!p)
1801			break;
1802		while (*p == ' ' || *p == '\t')
1803			p++;
1804	}
1805
1806	if (pos == 0)
1807		priv->config &= ~CFG_SPEED_SCAN;
1808	else {
1809		priv->speed_scan_pos = 0;
1810		priv->config |= CFG_SPEED_SCAN;
1811	}
1812
1813	return count;
1814}
1815
1816static DEVICE_ATTR(speed_scan, S_IWUSR | S_IRUGO, show_speed_scan,
1817		   store_speed_scan);
1818
1819static ssize_t show_net_stats(struct device *d, struct device_attribute *attr,
1820			      char *buf)
1821{
1822	struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1823	return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
1824}
1825
1826static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
1827			       const char *buf, size_t count)
1828{
1829	struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1830	if (buf[0] == '1')
1831		priv->config |= CFG_NET_STATS;
1832	else
1833		priv->config &= ~CFG_NET_STATS;
1834
1835	return count;
1836}
1837
1838static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO,
1839		   show_net_stats, store_net_stats);
1840
1841static ssize_t show_channels(struct device *d,
1842			     struct device_attribute *attr,
1843			     char *buf)
1844{
1845	struct ipw_priv *priv = dev_get_drvdata(d);
1846	const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
1847	int len = 0, i;
1848
1849	len = sprintf(&buf[len],
1850		      "Displaying %d channels in 2.4Ghz band "
1851		      "(802.11bg):\n", geo->bg_channels);
1852
1853	for (i = 0; i < geo->bg_channels; i++) {
1854		len += sprintf(&buf[len], "%d: BSS%s%s, %s, Band %s.\n",
1855			       geo->bg[i].channel,
1856			       geo->bg[i].flags & IEEE80211_CH_RADAR_DETECT ?
1857			       " (radar spectrum)" : "",
1858			       ((geo->bg[i].flags & IEEE80211_CH_NO_IBSS) ||
1859				(geo->bg[i].flags & IEEE80211_CH_RADAR_DETECT))
1860			       ? "" : ", IBSS",
1861			       geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY ?
1862			       "passive only" : "active/passive",
1863			       geo->bg[i].flags & IEEE80211_CH_B_ONLY ?
1864			       "B" : "B/G");
1865	}
1866
1867	len += sprintf(&buf[len],
1868		       "Displaying %d channels in 5.2Ghz band "
1869		       "(802.11a):\n", geo->a_channels);
1870	for (i = 0; i < geo->a_channels; i++) {
1871		len += sprintf(&buf[len], "%d: BSS%s%s, %s.\n",
1872			       geo->a[i].channel,
1873			       geo->a[i].flags & IEEE80211_CH_RADAR_DETECT ?
1874			       " (radar spectrum)" : "",
1875			       ((geo->a[i].flags & IEEE80211_CH_NO_IBSS) ||
1876				(geo->a[i].flags & IEEE80211_CH_RADAR_DETECT))
1877			       ? "" : ", IBSS",
1878			       geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY ?
1879			       "passive only" : "active/passive");
1880	}
1881
1882	return len;
1883}
1884
1885static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
1886
1887static void notify_wx_assoc_event(struct ipw_priv *priv)
1888{
1889	union iwreq_data wrqu;
1890	wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1891	if (priv->status & STATUS_ASSOCIATED)
1892		memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
1893	else
1894		memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
1895	wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1896}
1897
1898static void ipw_irq_tasklet(struct ipw_priv *priv)
1899{
1900	u32 inta, inta_mask, handled = 0;
1901	unsigned long flags;
1902	int rc = 0;
1903
1904	spin_lock_irqsave(&priv->irq_lock, flags);
1905
1906	inta = ipw_read32(priv, IPW_INTA_RW);
1907	inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
1908	inta &= (IPW_INTA_MASK_ALL & inta_mask);
1909
1910	/* Add any cached INTA values that need to be handled */
1911	inta |= priv->isr_inta;
1912
1913	spin_unlock_irqrestore(&priv->irq_lock, flags);
1914
1915	spin_lock_irqsave(&priv->lock, flags);
1916
1917	/* handle all the justifications for the interrupt */
1918	if (inta & IPW_INTA_BIT_RX_TRANSFER) {
1919		ipw_rx(priv);
1920		handled |= IPW_INTA_BIT_RX_TRANSFER;
1921	}
1922
1923	if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
1924		IPW_DEBUG_HC("Command completed.\n");
1925		rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
1926		priv->status &= ~STATUS_HCMD_ACTIVE;
1927		wake_up_interruptible(&priv->wait_command_queue);
1928		handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
1929	}
1930
1931	if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
1932		IPW_DEBUG_TX("TX_QUEUE_1\n");
1933		rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
1934		handled |= IPW_INTA_BIT_TX_QUEUE_1;
1935	}
1936
1937	if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
1938		IPW_DEBUG_TX("TX_QUEUE_2\n");
1939		rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
1940		handled |= IPW_INTA_BIT_TX_QUEUE_2;
1941	}
1942
1943	if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
1944		IPW_DEBUG_TX("TX_QUEUE_3\n");
1945		rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
1946		handled |= IPW_INTA_BIT_TX_QUEUE_3;
1947	}
1948
1949	if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
1950		IPW_DEBUG_TX("TX_QUEUE_4\n");
1951		rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
1952		handled |= IPW_INTA_BIT_TX_QUEUE_4;
1953	}
1954
1955	if (inta & IPW_INTA_BIT_STATUS_CHANGE) {
1956		IPW_WARNING("STATUS_CHANGE\n");
1957		handled |= IPW_INTA_BIT_STATUS_CHANGE;
1958	}
1959
1960	if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) {
1961		IPW_WARNING("TX_PERIOD_EXPIRED\n");
1962		handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED;
1963	}
1964
1965	if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
1966		IPW_WARNING("HOST_CMD_DONE\n");
1967		handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
1968	}
1969
1970	if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) {
1971		IPW_WARNING("FW_INITIALIZATION_DONE\n");
1972		handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE;
1973	}
1974
1975	if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
1976		IPW_WARNING("PHY_OFF_DONE\n");
1977		handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
1978	}
1979
1980	if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
1981		IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
1982		priv->status |= STATUS_RF_KILL_HW;
1983		wake_up_interruptible(&priv->wait_command_queue);
1984		priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
1985		cancel_delayed_work(&priv->request_scan);
1986		schedule_work(&priv->link_down);
1987		queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
1988		handled |= IPW_INTA_BIT_RF_KILL_DONE;
1989	}
1990
1991	if (inta & IPW_INTA_BIT_FATAL_ERROR) {
1992		IPW_WARNING("Firmware error detected.  Restarting.\n");
1993		if (priv->error) {
1994			IPW_DEBUG_FW("Sysfs 'error' log already exists.\n");
1995			if (ipw_debug_level & IPW_DL_FW_ERRORS) {
1996				struct ipw_fw_error *error =
1997				    ipw_alloc_error_log(priv);
1998				ipw_dump_error_log(priv, error);
1999				kfree(error);
2000			}
2001		} else {
2002			priv->error = ipw_alloc_error_log(priv);
2003			if (priv->error)
2004				IPW_DEBUG_FW("Sysfs 'error' log captured.\n");
2005			else
2006				IPW_DEBUG_FW("Error allocating sysfs 'error' "
2007					     "log.\n");
2008			if (ipw_debug_level & IPW_DL_FW_ERRORS)
2009				ipw_dump_error_log(priv, priv->error);
2010		}
2011
2012		if (priv->ieee->sec.encrypt) {
2013			priv->status &= ~STATUS_ASSOCIATED;
2014			notify_wx_assoc_event(priv);
2015		}
2016
2017		/* Keep the restart process from trying to send host
2018		 * commands by clearing the INIT status bit */
2019		priv->status &= ~STATUS_INIT;
2020
2021		/* Cancel currently queued command. */
2022		priv->status &= ~STATUS_HCMD_ACTIVE;
2023		wake_up_interruptible(&priv->wait_command_queue);
2024
2025		queue_work(priv->workqueue, &priv->adapter_restart);
2026		handled |= IPW_INTA_BIT_FATAL_ERROR;
2027	}
2028
2029	if (inta & IPW_INTA_BIT_PARITY_ERROR) {
2030		IPW_ERROR("Parity error\n");
2031		handled |= IPW_INTA_BIT_PARITY_ERROR;
2032	}
2033
2034	if (handled != inta) {
2035		IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
2036	}
2037
2038	spin_unlock_irqrestore(&priv->lock, flags);
2039
2040	/* enable all interrupts */
2041	ipw_enable_interrupts(priv);
2042}
2043
2044#define IPW_CMD(x) case IPW_CMD_ ## x : return #x
2045static char *get_cmd_string(u8 cmd)
2046{
2047	switch (cmd) {
2048		IPW_CMD(HOST_COMPLETE);
2049		IPW_CMD(POWER_DOWN);
2050		IPW_CMD(SYSTEM_CONFIG);
2051		IPW_CMD(MULTICAST_ADDRESS);
2052		IPW_CMD(SSID);
2053		IPW_CMD(ADAPTER_ADDRESS);
2054		IPW_CMD(PORT_TYPE);
2055		IPW_CMD(RTS_THRESHOLD);
2056		IPW_CMD(FRAG_THRESHOLD);
2057		IPW_CMD(POWER_MODE);
2058		IPW_CMD(WEP_KEY);
2059		IPW_CMD(TGI_TX_KEY);
2060		IPW_CMD(SCAN_REQUEST);
2061		IPW_CMD(SCAN_REQUEST_EXT);
2062		IPW_CMD(ASSOCIATE);
2063		IPW_CMD(SUPPORTED_RATES);
2064		IPW_CMD(SCAN_ABORT);
2065		IPW_CMD(TX_FLUSH);
2066		IPW_CMD(QOS_PARAMETERS);
2067		IPW_CMD(DINO_CONFIG);
2068		IPW_CMD(RSN_CAPABILITIES);
2069		IPW_CMD(RX_KEY);
2070		IPW_CMD(CARD_DISABLE);
2071		IPW_CMD(SEED_NUMBER);
2072		IPW_CMD(TX_POWER);
2073		IPW_CMD(COUNTRY_INFO);
2074		IPW_CMD(AIRONET_INFO);
2075		IPW_CMD(AP_TX_POWER);
2076		IPW_CMD(CCKM_INFO);
2077		IPW_CMD(CCX_VER_INFO);
2078		IPW_CMD(SET_CALIBRATION);
2079		IPW_CMD(SENSITIVITY_CALIB);
2080		IPW_CMD(RETRY_LIMIT);
2081		IPW_CMD(IPW_PRE_POWER_DOWN);
2082		IPW_CMD(VAP_BEACON_TEMPLATE);
2083		IPW_CMD(VAP_DTIM_PERIOD);
2084		IPW_CMD(EXT_SUPPORTED_RATES);
2085		IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
2086		IPW_CMD(VAP_QUIET_INTERVALS);
2087		IPW_CMD(VAP_CHANNEL_SWITCH);
2088		IPW_CMD(VAP_MANDATORY_CHANNELS);
2089		IPW_CMD(VAP_CELL_PWR_LIMIT);
2090		IPW_CMD(VAP_CF_PARAM_SET);
2091		IPW_CMD(VAP_SET_BEACONING_STATE);
2092		IPW_CMD(MEASUREMENT);
2093		IPW_CMD(POWER_CAPABILITY);
2094		IPW_CMD(SUPPORTED_CHANNELS);
2095		IPW_CMD(TPC_REPORT);
2096		IPW_CMD(WME_INFO);
2097		IPW_CMD(PRODUCTION_COMMAND);
2098	default:
2099		return "UNKNOWN";
2100	}
2101}
2102
2103#define HOST_COMPLETE_TIMEOUT HZ
2104
2105static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
2106{
2107	int rc = 0;
2108	unsigned long flags;
2109
2110	spin_lock_irqsave(&priv->lock, flags);
2111	if (priv->status & STATUS_HCMD_ACTIVE) {
2112		IPW_ERROR("Failed to send %s: Already sending a command.\n",
2113			  get_cmd_string(cmd->cmd));
2114		spin_unlock_irqrestore(&priv->lock, flags);
2115		return -EAGAIN;
2116	}
2117
2118	priv->status |= STATUS_HCMD_ACTIVE;
2119
2120	if (priv->cmdlog) {
2121		priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies;
2122		priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd;
2123		priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len;
2124		memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param,
2125		       cmd->len);
2126		priv->cmdlog[priv->cmdlog_pos].retcode = -1;
2127	}
2128
2129	IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
2130		     get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
2131		     priv->status);
2132
2133#ifndef DEBUG_CMD_WEP_KEY
2134	if (cmd->cmd == IPW_CMD_WEP_KEY)
2135		IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n");
2136	else
2137#endif
2138		printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
2139
2140	rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
2141	if (rc) {
2142		priv->status &= ~STATUS_HCMD_ACTIVE;
2143		IPW_ERROR("Failed to send %s: Reason %d\n",
2144			  get_cmd_string(cmd->cmd), rc);
2145		spin_unlock_irqrestore(&priv->lock, flags);
2146		goto exit;
2147	}
2148	spin_unlock_irqrestore(&priv->lock, flags);
2149
2150	rc = wait_event_interruptible_timeout(priv->wait_command_queue,
2151					      !(priv->
2152						status & STATUS_HCMD_ACTIVE),
2153					      HOST_COMPLETE_TIMEOUT);
2154	if (rc == 0) {
2155		spin_lock_irqsave(&priv->lock, flags);
2156		if (priv->status & STATUS_HCMD_ACTIVE) {
2157			IPW_ERROR("Failed to send %s: Command timed out.\n",
2158				  get_cmd_string(cmd->cmd));
2159			priv->status &= ~STATUS_HCMD_ACTIVE;
2160			spin_unlock_irqrestore(&priv->lock, flags);
2161			rc = -EIO;
2162			goto exit;
2163		}
2164		spin_unlock_irqrestore(&priv->lock, flags);
2165	} else
2166		rc = 0;
2167
2168	if (priv->status & STATUS_RF_KILL_HW) {
2169		IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n",
2170			  get_cmd_string(cmd->cmd));
2171		rc = -EIO;
2172		goto exit;
2173	}
2174
2175      exit:
2176	if (priv->cmdlog) {
2177		priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
2178		priv->cmdlog_pos %= priv->cmdlog_len;
2179	}
2180	return rc;
2181}
2182
2183static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
2184{
2185	struct host_cmd cmd = {
2186		.cmd = command,
2187	};
2188
2189	return __ipw_send_cmd(priv, &cmd);
2190}
2191
2192static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
2193			    void *data)
2194{
2195	struct host_cmd cmd = {
2196		.cmd = command,
2197		.len = len,
2198		.param = data,
2199	};
2200
2201	return __ipw_send_cmd(priv, &cmd);
2202}
2203
2204static int ipw_send_host_complete(struct ipw_priv *priv)
2205{
2206	if (!priv) {
2207		IPW_ERROR("Invalid args\n");
2208		return -1;
2209	}
2210
2211	return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
2212}
2213
2214static int ipw_send_system_config(struct ipw_priv *priv)
2215{
2216	return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG,
2217				sizeof(priv->sys_config),
2218				&priv->sys_config);
2219}
2220
2221static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
2222{
2223	if (!priv || !ssid) {
2224		IPW_ERROR("Invalid args\n");
2225		return -1;
2226	}
2227
2228	return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
2229				ssid);
2230}
2231
2232static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
2233{
2234	if (!priv || !mac) {
2235		IPW_ERROR("Invalid args\n");
2236		return -1;
2237	}
2238
2239	IPW_DEBUG_INFO("%s: Setting MAC to " MAC_FMT "\n",
2240		       priv->net_dev->name, MAC_ARG(mac));
2241
2242	return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
2243}
2244
2245/*
2246 * NOTE: This must be executed from our workqueue as it results in udelay
2247 * being called which may corrupt the keyboard if executed on default
2248 * workqueue
2249 */
2250static void ipw_adapter_restart(void *adapter)
2251{
2252	struct ipw_priv *priv = adapter;
2253
2254	if (priv->status & STATUS_RF_KILL_MASK)
2255		return;
2256
2257	ipw_down(priv);
2258
2259	if (priv->assoc_network &&
2260	    (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS))
2261		ipw_remove_current_network(priv);
2262
2263	if (ipw_up(priv)) {
2264		IPW_ERROR("Failed to up device\n");
2265		return;
2266	}
2267}
2268
2269static void ipw_bg_adapter_restart(struct work_struct *work)
2270{
2271	struct ipw_priv *priv =
2272		container_of(work, struct ipw_priv, adapter_restart);
2273	mutex_lock(&priv->mutex);
2274	ipw_adapter_restart(priv);
2275	mutex_unlock(&priv->mutex);
2276}
2277
2278#define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
2279
2280static void ipw_scan_check(void *data)
2281{
2282	struct ipw_priv *priv = data;
2283	if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
2284		IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2285			       "adapter after (%dms).\n",
2286			       jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2287		queue_work(priv->workqueue, &priv->adapter_restart);
2288	}
2289}
2290
2291static void ipw_bg_scan_check(struct work_struct *work)
2292{
2293	struct ipw_priv *priv =
2294		container_of(work, struct ipw_priv, scan_check.work);
2295	mutex_lock(&priv->mutex);
2296	ipw_scan_check(priv);
2297	mutex_unlock(&priv->mutex);
2298}
2299
2300static int ipw_send_scan_request_ext(struct ipw_priv *priv,
2301				     struct ipw_scan_request_ext *request)
2302{
2303	return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
2304				sizeof(*request), request);
2305}
2306
2307static int ipw_send_scan_abort(struct ipw_priv *priv)
2308{
2309	if (!priv) {
2310		IPW_ERROR("Invalid args\n");
2311		return -1;
2312	}
2313
2314	return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
2315}
2316
2317static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
2318{
2319	struct ipw_sensitivity_calib calib = {
2320		.beacon_rssi_raw = cpu_to_le16(sens),
2321	};
2322
2323	return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
2324				&calib);
2325}
2326
2327static int ipw_send_associate(struct ipw_priv *priv,
2328			      struct ipw_associate *associate)
2329{
2330	struct ipw_associate tmp_associate;
2331
2332	if (!priv || !associate) {
2333		IPW_ERROR("Invalid args\n");
2334		return -1;
2335	}
2336
2337	memcpy(&tmp_associate, associate, sizeof(*associate));
2338	tmp_associate.policy_support =
2339	    cpu_to_le16(tmp_associate.policy_support);
2340	tmp_associate.assoc_tsf_msw = cpu_to_le32(tmp_associate.assoc_tsf_msw);
2341	tmp_associate.assoc_tsf_lsw = cpu_to_le32(tmp_associate.assoc_tsf_lsw);
2342	tmp_associate.capability = cpu_to_le16(tmp_associate.capability);
2343	tmp_associate.listen_interval =
2344	    cpu_to_le16(tmp_associate.listen_interval);
2345	tmp_associate.beacon_interval =
2346	    cpu_to_le16(tmp_associate.beacon_interval);
2347	tmp_associate.atim_window = cpu_to_le16(tmp_associate.atim_window);
2348
2349	return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(tmp_associate),
2350				&tmp_associate);
2351}
2352
2353static int ipw_send_supported_rates(struct ipw_priv *priv,
2354				    struct ipw_supported_rates *rates)
2355{
2356	if (!priv || !rates) {
2357		IPW_ERROR("Invalid args\n");
2358		return -1;
2359	}
2360
2361	return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
2362				rates);
2363}
2364
2365static int ipw_set_random_seed(struct ipw_priv *priv)
2366{
2367	u32 val;
2368
2369	if (!priv) {
2370		IPW_ERROR("Invalid args\n");
2371		return -1;
2372	}
2373
2374	get_random_bytes(&val, sizeof(val));
2375
2376	return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
2377}
2378
2379static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
2380{
2381	if (!priv) {
2382		IPW_ERROR("Invalid args\n");
2383		return -1;
2384	}
2385
2386	phy_off = cpu_to_le32(phy_off);
2387	return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(phy_off),
2388				&phy_off);
2389}
2390
2391static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
2392{
2393	if (!priv || !power) {
2394		IPW_ERROR("Invalid args\n");
2395		return -1;
2396	}
2397
2398	return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power);
2399}
2400
2401static int ipw_set_tx_power(struct ipw_priv *priv)
2402{
2403	const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
2404	struct ipw_tx_power tx_power;
2405	s8 max_power;
2406	int i;
2407
2408	memset(&tx_power, 0, sizeof(tx_power));
2409
2410	/* configure device for 'G' band */
2411	tx_power.ieee_mode = IPW_G_MODE;
2412	tx_power.num_channels = geo->bg_channels;
2413	for (i = 0; i < geo->bg_channels; i++) {
2414		max_power = geo->bg[i].max_power;
2415		tx_power.channels_tx_power[i].channel_number =
2416		    geo->bg[i].channel;
2417		tx_power.channels_tx_power[i].tx_power = max_power ?
2418		    min(max_power, priv->tx_power) : priv->tx_power;
2419	}
2420	if (ipw_send_tx_power(priv, &tx_power))
2421		return -EIO;
2422
2423	/* configure device to also handle 'B' band */
2424	tx_power.ieee_mode = IPW_B_MODE;
2425	if (ipw_send_tx_power(priv, &tx_power))
2426		return -EIO;
2427
2428	/* configure device to also handle 'A' band */
2429	if (priv->ieee->abg_true) {
2430		tx_power.ieee_mode = IPW_A_MODE;
2431		tx_power.num_channels = geo->a_channels;
2432		for (i = 0; i < tx_power.num_channels; i++) {
2433			max_power = geo->a[i].max_power;
2434			tx_power.channels_tx_power[i].channel_number =
2435			    geo->a[i].channel;
2436			tx_power.channels_tx_power[i].tx_power = max_power ?
2437			    min(max_power, priv->tx_power) : priv->tx_power;
2438		}
2439		if (ipw_send_tx_power(priv, &tx_power))
2440			return -EIO;
2441	}
2442	return 0;
2443}
2444
2445static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
2446{
2447	struct ipw_rts_threshold rts_threshold = {
2448		.rts_threshold = cpu_to_le16(rts),
2449	};
2450
2451	if (!priv) {
2452		IPW_ERROR("Invalid args\n");
2453		return -1;
2454	}
2455
2456	return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
2457				sizeof(rts_threshold), &rts_threshold);
2458}
2459
2460static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
2461{
2462	struct ipw_frag_threshold frag_threshold = {
2463		.frag_threshold = cpu_to_le16(frag),
2464	};
2465
2466	if (!priv) {
2467		IPW_ERROR("Invalid args\n");
2468		return -1;
2469	}
2470
2471	return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
2472				sizeof(frag_threshold), &frag_threshold);
2473}
2474
2475static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
2476{
2477	u32 param;
2478
2479	if (!priv) {
2480		IPW_ERROR("Invalid args\n");
2481		return -1;
2482	}
2483
2484	/* If on battery, set to 3, if AC set to CAM, else user
2485	 * level */
2486	switch (mode) {
2487	case IPW_POWER_BATTERY:
2488		param = IPW_POWER_INDEX_3;
2489		break;
2490	case IPW_POWER_AC:
2491		param = IPW_POWER_MODE_CAM;
2492		break;
2493	default:
2494		param = mode;
2495		break;
2496	}
2497
2498	param = cpu_to_le32(mode);
2499	return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
2500				&param);
2501}
2502
2503static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
2504{
2505	struct ipw_retry_limit retry_limit = {
2506		.short_retry_limit = slimit,
2507		.long_retry_limit = llimit
2508	};
2509
2510	if (!priv) {
2511		IPW_ERROR("Invalid args\n");
2512		return -1;
2513	}
2514
2515	return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
2516				&retry_limit);
2517}
2518
2519/*
2520 * The IPW device contains a Microwire compatible EEPROM that stores
2521 * various data like the MAC address.  Usually the firmware has exclusive
2522 * access to the eeprom, but during device initialization (before the
2523 * device driver has sent the HostComplete command to the firmware) the
2524 * device driver has read access to the EEPROM by way of indirect addressing
2525 * through a couple of memory mapped registers.
2526 *
2527 * The following is a simplified implementation for pulling data out of the
2528 * the eeprom, along with some helper functions to find information in
2529 * the per device private data's copy of the eeprom.
2530 *
2531 * NOTE: To better understand how these functions work (i.e what is a chip
2532 *       select and why do have to keep driving the eeprom clock?), read
2533 *       just about any data sheet for a Microwire compatible EEPROM.
2534 */
2535
2536/* write a 32 bit value into the indirect accessor register */
2537static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
2538{
2539	ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
2540
2541	/* the eeprom requires some time to complete the operation */
2542	udelay(p->eeprom_delay);
2543
2544	return;
2545}
2546
2547/* perform a chip select operation */
2548static void eeprom_cs(struct ipw_priv *priv)
2549{
2550	eeprom_write_reg(priv, 0);
2551	eeprom_write_reg(priv, EEPROM_BIT_CS);
2552	eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2553	eeprom_write_reg(priv, EEPROM_BIT_CS);
2554}
2555
2556/* perform a chip select operation */
2557static void eeprom_disable_cs(struct ipw_priv *priv)
2558{
2559	eeprom_write_reg(priv, EEPROM_BIT_CS);
2560	eeprom_write_reg(priv, 0);
2561	eeprom_write_reg(priv, EEPROM_BIT_SK);
2562}
2563
2564/* push a single bit down to the eeprom */
2565static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
2566{
2567	int d = (bit ? EEPROM_BIT_DI : 0);
2568	eeprom_write_reg(p, EEPROM_BIT_CS | d);
2569	eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
2570}
2571
2572/* push an opcode followed by an address down to the eeprom */
2573static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
2574{
2575	int i;
2576
2577	eeprom_cs(priv);
2578	eeprom_write_bit(priv, 1);
2579	eeprom_write_bit(priv, op & 2);
2580	eeprom_write_bit(priv, op & 1);
2581	for (i = 7; i >= 0; i--) {
2582		eeprom_write_bit(priv, addr & (1 << i));
2583	}
2584}
2585
2586/* pull 16 bits off the eeprom, one bit at a time */
2587static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
2588{
2589	int i;
2590	u16 r = 0;
2591
2592	/* Send READ Opcode */
2593	eeprom_op(priv, EEPROM_CMD_READ, addr);
2594
2595	/* Send dummy bit */
2596	eeprom_write_reg(priv, EEPROM_BIT_CS);
2597
2598	/* Read the byte off the eeprom one bit at a time */
2599	for (i = 0; i < 16; i++) {
2600		u32 data = 0;
2601		eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2602		eeprom_write_reg(priv, EEPROM_BIT_CS);
2603		data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
2604		r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
2605	}
2606
2607	/* Send another dummy bit */
2608	eeprom_write_reg(priv, 0);
2609	eeprom_disable_cs(priv);
2610
2611	return r;
2612}
2613
2614/* helper function for pulling the mac address out of the private */
2615/* data's copy of the eeprom data                                 */
2616static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2617{
2618	memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6);
2619}
2620
2621/*
2622 * Either the device driver (i.e. the host) or the firmware can
2623 * load eeprom data into the designated region in SRAM.  If neither
2624 * happens then the FW will shutdown with a fatal error.
2625 *
2626 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
2627 * bit needs region of shared SRAM needs to be non-zero.
2628 */
2629static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2630{
2631	int i;
2632	u16 *eeprom = (u16 *) priv->eeprom;
2633
2634	IPW_DEBUG_TRACE(">>\n");
2635
2636	/* read entire contents of eeprom into private buffer */
2637	for (i = 0; i < 128; i++)
2638		eeprom[i] = le16_to_cpu(eeprom_read_u16(priv, (u8) i));
2639
2640	/*
2641	   If the data looks correct, then copy it to our private
2642	   copy.  Otherwise let the firmware know to perform the operation
2643	   on its own.
2644	 */
2645	if (priv->eeprom[EEPROM_VERSION] != 0) {
2646		IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
2647
2648		/* write the eeprom data to sram */
2649		for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
2650			ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
2651
2652		/* Do not load eeprom data on fatal error or suspend */
2653		ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2654	} else {
2655		IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
2656
2657		/* Load eeprom data on fatal error or suspend */
2658		ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
2659	}
2660
2661	IPW_DEBUG_TRACE("<<\n");
2662}
2663
2664static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
2665{
2666	count >>= 2;
2667	if (!count)
2668		return;
2669	_ipw_write32(priv, IPW_AUTOINC_ADDR, start);
2670	while (count--)
2671		_ipw_write32(priv, IPW_AUTOINC_DATA, 0);
2672}
2673
2674static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
2675{
2676	ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL,
2677			CB_NUMBER_OF_ELEMENTS_SMALL *
2678			sizeof(struct command_block));
2679}
2680
2681static int ipw_fw_dma_enable(struct ipw_priv *priv)
2682{				/* start dma engine but no transfers yet */
2683
2684	IPW_DEBUG_FW(">> : \n");
2685
2686	/* Start the dma */
2687	ipw_fw_dma_reset_command_blocks(priv);
2688
2689	/* Write CB base address */
2690	ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
2691
2692	IPW_DEBUG_FW("<< : \n");
2693	return 0;
2694}
2695
2696static void ipw_fw_dma_abort(struct ipw_priv *priv)
2697{
2698	u32 control = 0;
2699
2700	IPW_DEBUG_FW(">> :\n");
2701
2702	/* set the Stop and Abort bit */
2703	control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
2704	ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2705	priv->sram_desc.last_cb_index = 0;
2706
2707	IPW_DEBUG_FW("<< \n");
2708}
2709
2710static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
2711					  struct command_block *cb)
2712{
2713	u32 address =
2714	    IPW_SHARED_SRAM_DMA_CONTROL +
2715	    (sizeof(struct command_block) * index);
2716	IPW_DEBUG_FW(">> :\n");
2717
2718	ipw_write_indirect(priv, address, (u8 *) cb,
2719			   (int)sizeof(struct command_block));
2720
2721	IPW_DEBUG_FW("<< :\n");
2722	return 0;
2723
2724}
2725
2726static int ipw_fw_dma_kick(struct ipw_priv *priv)
2727{
2728	u32 control = 0;
2729	u32 index = 0;
2730
2731	IPW_DEBUG_FW(">> :\n");
2732
2733	for (index = 0; index < priv->sram_desc.last_cb_index; index++)
2734		ipw_fw_dma_write_command_block(priv, index,
2735					       &priv->sram_desc.cb_list[index]);
2736
2737	/* Enable the DMA in the CSR register */
2738	ipw_clear_bit(priv, IPW_RESET_REG,
2739		      IPW_RESET_REG_MASTER_DISABLED |
2740		      IPW_RESET_REG_STOP_MASTER);
2741
2742	/* Set the Start bit. */
2743	control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
2744	ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2745
2746	IPW_DEBUG_FW("<< :\n");
2747	return 0;
2748}
2749
2750static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
2751{
2752	u32 address;
2753	u32 register_value = 0;
2754	u32 cb_fields_address = 0;
2755
2756	IPW_DEBUG_FW(">> :\n");
2757	address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2758	IPW_DEBUG_FW_INFO("Current CB is 0x%x \n", address);
2759
2760	/* Read the DMA Controlor register */
2761	register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
2762	IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x \n", register_value);
2763
2764	/* Print the CB values */
2765	cb_fields_address = address;
2766	register_value = ipw_read_reg32(priv, cb_fields_address);
2767	IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n", register_value);
2768
2769	cb_fields_address += sizeof(u32);
2770	register_value = ipw_read_reg32(priv, cb_fields_address);
2771	IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n", register_value);
2772
2773	cb_fields_address += sizeof(u32);
2774	register_value = ipw_read_reg32(priv, cb_fields_address);
2775	IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n",
2776			  register_value);
2777
2778	cb_fields_address += sizeof(u32);
2779	register_value = ipw_read_reg32(priv, cb_fields_address);
2780	IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n", register_value);
2781
2782	IPW_DEBUG_FW(">> :\n");
2783}
2784
2785static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
2786{
2787	u32 current_cb_address = 0;
2788	u32 current_cb_index = 0;
2789
2790	IPW_DEBUG_FW("<< :\n");
2791	current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2792
2793	current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
2794	    sizeof(struct command_block);
2795
2796	IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n",
2797			  current_cb_index, current_cb_address);
2798
2799	IPW_DEBUG_FW(">> :\n");
2800	return current_cb_index;
2801
2802}
2803
2804static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2805					u32 src_address,
2806					u32 dest_address,
2807					u32 length,
2808					int interrupt_enabled, int is_last)
2809{
2810
2811	u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
2812	    CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
2813	    CB_DEST_SIZE_LONG;
2814	struct command_block *cb;
2815	u32 last_cb_element = 0;
2816
2817	IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
2818			  src_address, dest_address, length);
2819
2820	if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
2821		return -1;
2822
2823	last_cb_element = priv->sram_desc.last_cb_index;
2824	cb = &priv->sram_desc.cb_list[last_cb_element];
2825	priv->sram_desc.last_cb_index++;
2826
2827	/* Calculate the new CB control word */
2828	if (interrupt_enabled)
2829		control |= CB_INT_ENABLED;
2830
2831	if (is_last)
2832		control |= CB_LAST_VALID;
2833
2834	control |= length;
2835
2836	/* Calculate the CB Element's checksum value */
2837	cb->status = control ^ src_address ^ dest_address;
2838
2839	/* Copy the Source and Destination addresses */
2840	cb->dest_addr = dest_address;
2841	cb->source_addr = src_address;
2842
2843	/* Copy the Control Word last */
2844	cb->control = control;
2845
2846	return 0;
2847}
2848
2849static int ipw_fw_dma_add_buffer(struct ipw_priv *priv,
2850				 u32 src_phys, u32 dest_address, u32 length)
2851{
2852	u32 bytes_left = length;
2853	u32 src_offset = 0;
2854	u32 dest_offset = 0;
2855	int status = 0;
2856	IPW_DEBUG_FW(">> \n");
2857	IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n",
2858			  src_phys, dest_address, length);
2859	while (bytes_left > CB_MAX_LENGTH) {
2860		status = ipw_fw_dma_add_command_block(priv,
2861						      src_phys + src_offset,
2862						      dest_address +
2863						      dest_offset,
2864						      CB_MAX_LENGTH, 0, 0);
2865		if (status) {
2866			IPW_DEBUG_FW_INFO(": Failed\n");
2867			return -1;
2868		} else
2869			IPW_DEBUG_FW_INFO(": Added new cb\n");
2870
2871		src_offset += CB_MAX_LENGTH;
2872		dest_offset += CB_MAX_LENGTH;
2873		bytes_left -= CB_MAX_LENGTH;
2874	}
2875
2876	/* add the buffer tail */
2877	if (bytes_left > 0) {
2878		status =
2879		    ipw_fw_dma_add_command_block(priv, src_phys + src_offset,
2880						 dest_address + dest_offset,
2881						 bytes_left, 0, 0);
2882		if (status) {
2883			IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n");
2884			return -1;
2885		} else
2886			IPW_DEBUG_FW_INFO
2887			    (": Adding new cb - the buffer tail\n");
2888	}
2889
2890	IPW_DEBUG_FW("<< \n");
2891	return 0;
2892}
2893
2894static int ipw_fw_dma_wait(struct ipw_priv *priv)
2895{
2896	u32 current_index = 0, previous_index;
2897	u32 watchdog = 0;
2898
2899	IPW_DEBUG_FW(">> : \n");
2900
2901	current_index = ipw_fw_dma_command_block_index(priv);
2902	IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
2903			  (int)priv->sram_desc.last_cb_index);
2904
2905	while (current_index < priv->sram_desc.last_cb_index) {
2906		udelay(50);
2907		previous_index = current_index;
2908		current_index = ipw_fw_dma_command_block_index(priv);
2909
2910		if (previous_index < current_index) {
2911			watchdog = 0;
2912			continue;
2913		}
2914		if (++watchdog > 400) {
2915			IPW_DEBUG_FW_INFO("Timeout\n");
2916			ipw_fw_dma_dump_command_block(priv);
2917			ipw_fw_dma_abort(priv);
2918			return -1;
2919		}
2920	}
2921
2922	ipw_fw_dma_abort(priv);
2923
2924	/*Disable the DMA in the CSR register */
2925	ipw_set_bit(priv, IPW_RESET_REG,
2926		    IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
2927
2928	IPW_DEBUG_FW("<< dmaWaitSync \n");
2929	return 0;
2930}
2931
2932static void ipw_remove_current_network(struct ipw_priv *priv)
2933{
2934	struct list_head *element, *safe;
2935	struct ieee80211_network *network = NULL;
2936	unsigned long flags;
2937
2938	spin_lock_irqsave(&priv->ieee->lock, flags);
2939	list_for_each_safe(element, safe, &priv->ieee->network_list) {
2940		network = list_entry(element, struct ieee80211_network, list);
2941		if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
2942			list_del(element);
2943			list_add_tail(&network->list,
2944				      &priv->ieee->network_free_list);
2945		}
2946	}
2947	spin_unlock_irqrestore(&priv->ieee->lock, flags);
2948}
2949
2950/**
2951 * Check that card is still alive.
2952 * Reads debug register from domain0.
2953 * If card is present, pre-defined value should
2954 * be found there.
2955 *
2956 * @param priv
2957 * @return 1 if card is present, 0 otherwise
2958 */
2959static inline int ipw_alive(struct ipw_priv *priv)
2960{
2961	return ipw_read32(priv, 0x90) == 0xd55555d5;
2962}
2963
2964/* timeout in msec, attempted in 10-msec quanta */
2965static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
2966			       int timeout)
2967{
2968	int i = 0;
2969
2970	do {
2971		if ((ipw_read32(priv, addr) & mask) == mask)
2972			return i;
2973		mdelay(10);
2974		i += 10;
2975	} while (i < timeout);
2976
2977	return -ETIME;
2978}
2979
2980/* These functions load the firmware and micro code for the operation of
2981 * the ipw hardware.  It assumes the buffer has all the bits for the
2982 * image and the caller is handling the memory allocation and clean up.
2983 */
2984
2985static int ipw_stop_master(struct ipw_priv *priv)
2986{
2987	int rc;
2988
2989	IPW_DEBUG_TRACE(">> \n");
2990	/* stop master. typical delay - 0 */
2991	ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
2992
2993	/* timeout is in msec, polled in 10-msec quanta */
2994	rc = ipw_poll_bit(priv, IPW_RESET_REG,
2995			  IPW_RESET_REG_MASTER_DISABLED, 100);
2996	if (rc < 0) {
2997		IPW_ERROR("wait for stop master failed after 100ms\n");
2998		return -1;
2999	}
3000
3001	IPW_DEBUG_INFO("stop master %dms\n", rc);
3002
3003	return rc;
3004}
3005
3006static void ipw_arc_release(struct ipw_priv *priv)
3007{
3008	IPW_DEBUG_TRACE(">> \n");
3009	mdelay(5);
3010
3011	ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3012
3013	/* no one knows timing, for safety add some delay */
3014	mdelay(5);
3015}
3016
3017struct fw_chunk {
3018	u32 address;
3019	u32 length;
3020};
3021
3022static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
3023{
3024	int rc = 0, i, addr;
3025	u8 cr = 0;
3026	u16 *image;
3027
3028	image = (u16 *) data;
3029
3030	IPW_DEBUG_TRACE(">> \n");
3031
3032	rc = ipw_stop_master(priv);
3033
3034	if (rc < 0)
3035		return rc;
3036
3037	for (addr = IPW_SHARED_LOWER_BOUND;
3038	     addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
3039		ipw_write32(priv, addr, 0);
3040	}
3041
3042	/* no ucode (yet) */
3043	memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
3044	/* destroy DMA queues */
3045	/* reset sequence */
3046
3047	ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON);
3048	ipw_arc_release(priv);
3049	ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF);
3050	mdelay(1);
3051
3052	/* reset PHY */
3053	ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN);
3054	mdelay(1);
3055
3056	ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0);
3057	mdelay(1);
3058
3059	/* enable ucode store */
3060	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0);
3061	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS);
3062	mdelay(1);
3063
3064	/* write ucode */
3065	/**
3066	 * @bug
3067	 * Do NOT set indirect address register once and then
3068	 * store data to indirect data register in the loop.
3069	 * It seems very reasonable, but in this case DINO do not
3070	 * accept ucode. It is essential to set address each time.
3071	 */
3072	/* load new ipw uCode */
3073	for (i = 0; i < len / 2; i++)
3074		ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE,
3075				cpu_to_le16(image[i]));
3076
3077	/* enable DINO */
3078	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3079	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
3080
3081	/* this is where the igx / win driver deveates from the VAP driver. */
3082
3083	/* wait for alive response */
3084	for (i = 0; i < 100; i++) {
3085		/* poll for incoming data */
3086		cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS);
3087		if (cr & DINO_RXFIFO_DATA)
3088			break;
3089		mdelay(1);
3090	}
3091
3092	if (cr & DINO_RXFIFO_DATA) {
3093		/* alive_command_responce size is NOT multiple of 4 */
3094		u32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
3095
3096		for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
3097			response_buffer[i] =
3098			    le32_to_cpu(ipw_read_reg32(priv,
3099						       IPW_BASEBAND_RX_FIFO_READ));
3100		memcpy(&priv->dino_alive, response_buffer,
3101		       sizeof(priv->dino_alive));
3102		if (priv->dino_alive.alive_command == 1
3103		    && priv->dino_alive.ucode_valid == 1) {
3104			rc = 0;
3105			IPW_DEBUG_INFO
3106			    ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
3107			     "of %02d/%02d/%02d %02d:%02d\n",
3108			     priv->dino_alive.software_revision,
3109			     priv->dino_alive.software_revision,
3110			     priv->dino_alive.device_identifier,
3111			     priv->dino_alive.device_identifier,
3112			     priv->dino_alive.time_stamp[0],
3113			     priv->dino_alive.time_stamp[1],
3114			     priv->dino_alive.time_stamp[2],
3115			     priv->dino_alive.time_stamp[3],
3116			     priv->dino_alive.time_stamp[4]);
3117		} else {
3118			IPW_DEBUG_INFO("Microcode is not alive\n");
3119			rc = -EINVAL;
3120		}
3121	} else {
3122		IPW_DEBUG_INFO("No alive response from DINO\n");
3123		rc = -ETIME;
3124	}
3125
3126	/* disable DINO, otherwise for some reason
3127	   firmware have problem getting alive resp. */
3128	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3129
3130	return rc;
3131}
3132
3133static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
3134{
3135	int rc = -1;
3136	int offset = 0;
3137	struct fw_chunk *chunk;
3138	dma_addr_t shared_phys;
3139	u8 *shared_virt;
3140
3141	IPW_DEBUG_TRACE("<< : \n");
3142	shared_virt = pci_alloc_consistent(priv->pci_dev, len, &shared_phys);
3143
3144	if (!shared_virt)
3145		return -ENOMEM;
3146
3147	memmove(shared_virt, data, len);
3148
3149	/* Start the Dma */
3150	rc = ipw_fw_dma_enable(priv);
3151
3152	if (priv->sram_desc.last_cb_index > 0) {
3153		/* the DMA is already ready this would be a bug. */
3154		BUG();
3155		goto out;
3156	}
3157
3158	do {
3159		chunk = (struct fw_chunk *)(data + offset);
3160		offset += sizeof(struct fw_chunk);
3161		/* build DMA packet and queue up for sending */
3162		/* dma to chunk->address, the chunk->length bytes from data +
3163		 * offeset*/
3164		/* Dma loading */
3165		rc = ipw_fw_dma_add_buffer(priv, shared_phys + offset,
3166					   le32_to_cpu(chunk->address),
3167					   le32_to_cpu(chunk->length));
3168		if (rc) {
3169			IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
3170			goto out;
3171		}
3172
3173		offset += le32_to_cpu(chunk->length);
3174	} while (offset < len);
3175
3176	/* Run the DMA and wait for the answer */
3177	rc = ipw_fw_dma_kick(priv);
3178	if (rc) {
3179		IPW_ERROR("dmaKick Failed\n");
3180		goto out;
3181	}
3182
3183	rc = ipw_fw_dma_wait(priv);
3184	if (rc) {
3185		IPW_ERROR("dmaWaitSync Failed\n");
3186		goto out;
3187	}
3188      out:
3189	pci_free_consistent(priv->pci_dev, len, shared_virt, shared_phys);
3190	return rc;
3191}
3192
3193/* stop nic */
3194static int ipw_stop_nic(struct ipw_priv *priv)
3195{
3196	int rc = 0;
3197
3198	/* stop */
3199	ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3200
3201	rc = ipw_poll_bit(priv, IPW_RESET_REG,
3202			  IPW_RESET_REG_MASTER_DISABLED, 500);
3203	if (rc < 0) {
3204		IPW_ERROR("wait for reg master disabled failed after 500ms\n");
3205		return rc;
3206	}
3207
3208	ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3209
3210	return rc;
3211}
3212
3213static void ipw_start_nic(struct ipw_priv *priv)
3214{
3215	IPW_DEBUG_TRACE(">>\n");
3216
3217	/* prvHwStartNic  release ARC */
3218	ipw_clear_bit(priv, IPW_RESET_REG,
3219		      IPW_RESET_REG_MASTER_DISABLED |
3220		      IPW_RESET_REG_STOP_MASTER |
3221		      CBD_RESET_REG_PRINCETON_RESET);
3222
3223	/* enable power management */
3224	ipw_set_bit(priv, IPW_GP_CNTRL_RW,
3225		    IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
3226
3227	IPW_DEBUG_TRACE("<<\n");
3228}
3229
3230static int ipw_init_nic(struct ipw_priv *priv)
3231{
3232	int rc;
3233
3234	IPW_DEBUG_TRACE(">>\n");
3235	/* reset */
3236	/*prvHwInitNic */
3237	/* set "initialization complete" bit to move adapter to D0 state */
3238	ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3239
3240	/* low-level PLL activation */
3241	ipw_write32(priv, IPW_READ_INT_REGISTER,
3242		    IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
3243
3244	/* wait for clock stabilization */
3245	rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
3246			  IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
3247	if (rc < 0)
3248		IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
3249
3250	/* assert SW reset */
3251	ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
3252
3253	udelay(10);
3254
3255	/* set "initialization complete" bit to move adapter to D0 state */
3256	ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3257
3258	IPW_DEBUG_TRACE(">>\n");
3259	return 0;
3260}
3261
3262/* Call this function from process context, it will sleep in request_firmware.
3263 * Probe is an ok place to call this from.
3264 */
3265static int ipw_reset_nic(struct ipw_priv *priv)
3266{
3267	int rc = 0;
3268	unsigned long flags;
3269
3270	IPW_DEBUG_TRACE(">>\n");
3271
3272	rc = ipw_init_nic(priv);
3273
3274	spin_lock_irqsave(&priv->lock, flags);
3275	/* Clear the 'host command active' bit... */
3276	priv->status &= ~STATUS_HCMD_ACTIVE;
3277	wake_up_interruptible(&priv->wait_command_queue);
3278	priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3279	wake_up_interruptible(&priv->wait_state);
3280	spin_unlock_irqrestore(&priv->lock, flags);
3281
3282	IPW_DEBUG_TRACE("<<\n");
3283	return rc;
3284}
3285
3286
3287struct ipw_fw {
3288	__le32 ver;
3289	__le32 boot_size;
3290	__le32 ucode_size;
3291	__le32 fw_size;
3292	u8 data[0];
3293};
3294
3295static int ipw_get_fw(struct ipw_priv *priv,
3296		      const struct firmware **raw, const char *name)
3297{
3298	struct ipw_fw *fw;
3299	int rc;
3300
3301	/* ask firmware_class module to get the boot firmware off disk */
3302	rc = request_firmware(raw, name, &priv->pci_dev->dev);
3303	if (rc < 0) {
3304		IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc);
3305		return rc;
3306	}
3307
3308	if ((*raw)->size < sizeof(*fw)) {
3309		IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size);
3310		return -EINVAL;
3311	}
3312
3313	fw = (void *)(*raw)->data;
3314
3315	if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) +
3316	    le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) {
3317		IPW_ERROR("%s is too small or corrupt (%zd)\n",
3318			  name, (*raw)->size);
3319		return -EINVAL;
3320	}
3321
3322	IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n",
3323		       name,
3324		       le32_to_cpu(fw->ver) >> 16,
3325		       le32_to_cpu(fw->ver) & 0xff,
3326		       (*raw)->size - sizeof(*fw));
3327	return 0;
3328}
3329
3330#define IPW_RX_BUF_SIZE (3000)
3331
3332static void ipw_rx_queue_reset(struct ipw_priv *priv,
3333				      struct ipw_rx_queue *rxq)
3334{
3335	unsigned long flags;
3336	int i;
3337
3338	spin_lock_irqsave(&rxq->lock, flags);
3339
3340	INIT_LIST_HEAD(&rxq->rx_free);
3341	INIT_LIST_HEAD(&rxq->rx_used);
3342
3343	/* Fill the rx_used queue with _all_ of the Rx buffers */
3344	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3345		/* In the reset function, these buffers may have been allocated
3346		 * to an SKB, so we need to unmap and free potential storage */
3347		if (rxq->pool[i].skb != NULL) {
3348			pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
3349					 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3350			dev_kfree_skb(rxq->pool[i].skb);
3351			rxq->pool[i].skb = NULL;
3352		}
3353		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3354	}
3355
3356	/* Set us so that we have processed and used all buffers, but have
3357	 * not restocked the Rx queue with fresh buffers */
3358	rxq->read = rxq->write = 0;
3359	rxq->processed = RX_QUEUE_SIZE - 1;
3360	rxq->free_count = 0;
3361	spin_unlock_irqrestore(&rxq->lock, flags);
3362}
3363
3364#ifdef CONFIG_PM
3365static int fw_loaded = 0;
3366static const struct firmware *raw = NULL;
3367
3368static void free_firmware(void)
3369{
3370	if (fw_loaded) {
3371		release_firmware(raw);
3372		raw = NULL;
3373		fw_loaded = 0;
3374	}
3375}
3376#else
3377#define free_firmware() do {} while (0)
3378#endif
3379
3380static int ipw_load(struct ipw_priv *priv)
3381{
3382#ifndef CONFIG_PM
3383	const struct firmware *raw = NULL;
3384#endif
3385	struct ipw_fw *fw;
3386	u8 *boot_img, *ucode_img, *fw_img;
3387	u8 *name = NULL;
3388	int rc = 0, retries = 3;
3389
3390	switch (priv->ieee->iw_mode) {
3391	case IW_MODE_ADHOC:
3392		name = "ipw2200-ibss.fw";
3393		break;
3394#ifdef CONFIG_IPW2200_MONITOR
3395	case IW_MODE_MONITOR:
3396		name = "ipw2200-sniffer.fw";
3397		break;
3398#endif
3399	case IW_MODE_INFRA:
3400		name = "ipw2200-bss.fw";
3401		break;
3402	}
3403
3404	if (!name) {
3405		rc = -EINVAL;
3406		goto error;
3407	}
3408
3409#ifdef CONFIG_PM
3410	if (!fw_loaded) {
3411#endif
3412		rc = ipw_get_fw(priv, &raw, name);
3413		if (rc < 0)
3414			goto error;
3415#ifdef CONFIG_PM
3416	}
3417#endif
3418
3419	fw = (void *)raw->data;
3420	boot_img = &fw->data[0];
3421	ucode_img = &fw->data[le32_to_cpu(fw->boot_size)];
3422	fw_img = &fw->data[le32_to_cpu(fw->boot_size) +
3423			   le32_to_cpu(fw->ucode_size)];
3424
3425	if (rc < 0)
3426		goto error;
3427
3428	if (!priv->rxq)
3429		priv->rxq = ipw_rx_queue_alloc(priv);
3430	else
3431		ipw_rx_queue_reset(priv, priv->rxq);
3432	if (!priv->rxq) {
3433		IPW_ERROR("Unable to initialize Rx queue\n");
3434		goto error;
3435	}
3436
3437      retry:
3438	/* Ensure interrupts are disabled */
3439	ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3440	priv->status &= ~STATUS_INT_ENABLED;
3441
3442	/* ack pending interrupts */
3443	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3444
3445	ipw_stop_nic(priv);
3446
3447	rc = ipw_reset_nic(priv);
3448	if (rc < 0) {
3449		IPW_ERROR("Unable to reset NIC\n");
3450		goto error;
3451	}
3452
3453	ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND,
3454			IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
3455
3456	/* DMA the initial boot firmware into the device */
3457	rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size));
3458	if (rc < 0) {
3459		IPW_ERROR("Unable to load boot firmware: %d\n", rc);
3460		goto error;
3461	}
3462
3463	/* kick start the device */
3464	ipw_start_nic(priv);
3465
3466	/* wait for the device to finish its initial startup sequence */
3467	rc = ipw_poll_bit(priv, IPW_INTA_RW,
3468			  IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3469	if (rc < 0) {
3470		IPW_ERROR("device failed to boot initial fw image\n");
3471		goto error;
3472	}
3473	IPW_DEBUG_INFO("initial device response after %dms\n", rc);
3474
3475	/* ack fw init done interrupt */
3476	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3477
3478	/* DMA the ucode into the device */
3479	rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size));
3480	if (rc < 0) {
3481		IPW_ERROR("Unable to load ucode: %d\n", rc);
3482		goto error;
3483	}
3484
3485	/* stop nic */
3486	ipw_stop_nic(priv);
3487
3488	/* DMA bss firmware into the device */
3489	rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size));
3490	if (rc < 0) {
3491		IPW_ERROR("Unable to load firmware: %d\n", rc);
3492		goto error;
3493	}
3494#ifdef CONFIG_PM
3495	fw_loaded = 1;
3496#endif
3497
3498	ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
3499
3500	rc = ipw_queue_reset(priv);
3501	if (rc < 0) {
3502		IPW_ERROR("Unable to initialize queues\n");
3503		goto error;
3504	}
3505
3506	/* Ensure interrupts are disabled */
3507	ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3508	/* ack pending interrupts */
3509	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3510
3511	/* kick start the device */
3512	ipw_start_nic(priv);
3513
3514	if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) {
3515		if (retries > 0) {
3516			IPW_WARNING("Parity error.  Retrying init.\n");
3517			retries--;
3518			goto retry;
3519		}
3520
3521		IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
3522		rc = -EIO;
3523		goto error;
3524	}
3525
3526	/* wait for the device */
3527	rc = ipw_poll_bit(priv, IPW_INTA_RW,
3528			  IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3529	if (rc < 0) {
3530		IPW_ERROR("device failed to start within 500ms\n");
3531		goto error;
3532	}
3533	IPW_DEBUG_INFO("device response after %dms\n", rc);
3534
3535	/* ack fw init done interrupt */
3536	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3537
3538	/* read eeprom data and initialize the eeprom region of sram */
3539	priv->eeprom_delay = 1;
3540	ipw_eeprom_init_sram(priv);
3541
3542	/* enable interrupts */
3543	ipw_enable_interrupts(priv);
3544
3545	/* Ensure our queue has valid packets */
3546	ipw_rx_queue_replenish(priv);
3547
3548	ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
3549
3550	/* ack pending interrupts */
3551	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3552
3553#ifndef CONFIG_PM
3554	release_firmware(raw);
3555#endif
3556	return 0;
3557
3558      error:
3559	if (priv->rxq) {
3560		ipw_rx_queue_free(priv, priv->rxq);
3561		priv->rxq = NULL;
3562	}
3563	ipw_tx_queue_free(priv);
3564	if (raw)
3565		release_firmware(raw);
3566#ifdef CONFIG_PM
3567	fw_loaded = 0;
3568	raw = NULL;
3569#endif
3570
3571	return rc;
3572}
3573
3574/**
3575 * DMA services
3576 *
3577 * Theory of operation
3578 *
3579 * A queue is a circular buffers with 'Read' and 'Write' pointers.
3580 * 2 empty entries always kept in the buffer to protect from overflow.
3581 *
3582 * For Tx queue, there are low mark and high mark limits. If, after queuing
3583 * the packet for Tx, free space become < low mark, Tx queue stopped. When
3584 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
3585 * Tx queue resumed.
3586 *
3587 * The IPW operates with six queues, one receive queue in the device's
3588 * sram, one transmit queue for sending commands to the device firmware,
3589 * and four transmit queues for data.
3590 *
3591 * The four transmit queues allow for performing quality of service (qos)
3592 * transmissions as per the 802.11 protocol.  Currently Linux does not
3593 * provide a mechanism to the user for utilizing prioritized queues, so
3594 * we only utilize the first data transmit queue (queue1).
3595 */
3596
3597/**
3598 * Driver allocates buffers of this size for Rx
3599 */
3600
3601static inline int ipw_queue_space(const struct clx2_queue *q)
3602{
3603	int s = q->last_used - q->first_empty;
3604	if (s <= 0)
3605		s += q->n_bd;
3606	s -= 2;			/* keep some reserve to not confuse empty and full situations */
3607	if (s < 0)
3608		s = 0;
3609	return s;
3610}
3611
3612static inline int ipw_queue_inc_wrap(int index, int n_bd)
3613{
3614	return (++index == n_bd) ? 0 : index;
3615}
3616
3617/**
3618 * Initialize common DMA queue structure
3619 *
3620 * @param q                queue to init
3621 * @param count            Number of BD's to allocate. Should be power of 2
3622 * @param read_register    Address for 'read' register
3623 *                         (not offset within BAR, full address)
3624 * @param write_register   Address for 'write' register
3625 *                         (not offset within BAR, full address)
3626 * @param base_register    Address for 'base' register
3627 *                         (not offset within BAR, full address)
3628 * @param size             Address for 'size' register
3629 *                         (not offset within BAR, full address)
3630 */
3631static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
3632			   int count, u32 read, u32 write, u32 base, u32 size)
3633{
3634	q->n_bd = count;
3635
3636	q->low_mark = q->n_bd / 4;
3637	if (q->low_mark < 4)
3638		q->low_mark = 4;
3639
3640	q->high_mark = q->n_bd / 8;
3641	if (q->high_mark < 2)
3642		q->high_mark = 2;
3643
3644	q->first_empty = q->last_used = 0;
3645	q->reg_r = read;
3646	q->reg_w = write;
3647
3648	ipw_write32(priv, base, q->dma_addr);
3649	ipw_write32(priv, size, count);
3650	ipw_write32(priv, read, 0);
3651	ipw_write32(priv, write, 0);
3652
3653	_ipw_read32(priv, 0x90);
3654}
3655
3656static int ipw_queue_tx_init(struct ipw_priv *priv,
3657			     struct clx2_tx_queue *q,
3658			     int count, u32 read, u32 write, u32 base, u32 size)
3659{
3660	struct pci_dev *dev = priv->pci_dev;
3661
3662	q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
3663	if (!q->txb) {
3664		IPW_ERROR("vmalloc for auxilary BD structures failed\n");
3665		return -ENOMEM;
3666	}
3667
3668	q->bd =
3669	    pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
3670	if (!q->bd) {
3671		IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
3672			  sizeof(q->bd[0]) * count);
3673		kfree(q->txb);
3674		q->txb = NULL;
3675		return -ENOMEM;
3676	}
3677
3678	ipw_queue_init(priv, &q->q, count, read, write, base, size);
3679	return 0;
3680}
3681
3682/**
3683 * Free one TFD, those at index [txq->q.last_used].
3684 * Do NOT advance any indexes
3685 *
3686 * @param dev
3687 * @param txq
3688 */
3689static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
3690				  struct clx2_tx_queue *txq)
3691{
3692	struct tfd_frame *bd = &txq->bd[txq->q.last_used];
3693	struct pci_dev *dev = priv->pci_dev;
3694	int i;
3695
3696	/* classify bd */
3697	if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
3698		/* nothing to cleanup after for host commands */
3699		return;
3700
3701	/* sanity check */
3702	if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
3703		IPW_ERROR("Too many chunks: %i\n",
3704			  le32_to_cpu(bd->u.data.num_chunks));
3705		/** @todo issue fatal error, it is quite serious situation */
3706		return;
3707	}
3708
3709	/* unmap chunks if any */
3710	for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
3711		pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
3712				 le16_to_cpu(bd->u.data.chunk_len[i]),
3713				 PCI_DMA_TODEVICE);
3714		if (txq->txb[txq->q.last_used]) {
3715			ieee80211_txb_free(txq->txb[txq->q.last_used]);
3716			txq->txb[txq->q.last_used] = NULL;
3717		}
3718	}
3719}
3720
3721/**
3722 * Deallocate DMA queue.
3723 *
3724 * Empty queue by removing and destroying all BD's.
3725 * Free all buffers.
3726 *
3727 * @param dev
3728 * @param q
3729 */
3730static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
3731{
3732	struct clx2_queue *q = &txq->q;
3733	struct pci_dev *dev = priv->pci_dev;
3734
3735	if (q->n_bd == 0)
3736		return;
3737
3738	/* first, empty all BD's */
3739	for (; q->first_empty != q->last_used;
3740	     q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3741		ipw_queue_tx_free_tfd(priv, txq);
3742	}
3743
3744	/* free buffers belonging to queue itself */
3745	pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
3746			    q->dma_addr);
3747	kfree(txq->txb);
3748
3749	/* 0 fill whole structure */
3750	memset(txq, 0, sizeof(*txq));
3751}
3752
3753/**
3754 * Destroy all DMA queues and structures
3755 *
3756 * @param priv
3757 */
3758static void ipw_tx_queue_free(struct ipw_priv *priv)
3759{
3760	/* Tx CMD queue */
3761	ipw_queue_tx_free(priv, &priv->txq_cmd);
3762
3763	/* Tx queues */
3764	ipw_queue_tx_free(priv, &priv->txq[0]);
3765	ipw_queue_tx_free(priv, &priv->txq[1]);
3766	ipw_queue_tx_free(priv, &priv->txq[2]);
3767	ipw_queue_tx_free(priv, &priv->txq[3]);
3768}
3769
3770static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
3771{
3772	/* First 3 bytes are manufacturer */
3773	bssid[0] = priv->mac_addr[0];
3774	bssid[1] = priv->mac_addr[1];
3775	bssid[2] = priv->mac_addr[2];
3776
3777	/* Last bytes are random */
3778	get_random_bytes(&bssid[3], ETH_ALEN - 3);
3779
3780	bssid[0] &= 0xfe;	/* clear multicast bit */
3781	bssid[0] |= 0x02;	/* set local assignment bit (IEEE802) */
3782}
3783
3784static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3785{
3786	struct ipw_station_entry entry;
3787	int i;
3788
3789	for (i = 0; i < priv->num_stations; i++) {
3790		if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
3791			/* Another node is active in network */
3792			priv->missed_adhoc_beacons = 0;
3793			if (!(priv->config & CFG_STATIC_CHANNEL))
3794				/* when other nodes drop out, we drop out */
3795				priv->config &= ~CFG_ADHOC_PERSIST;
3796
3797			return i;
3798		}
3799	}
3800
3801	if (i == MAX_STATIONS)
3802		return IPW_INVALID_STATION;
3803
3804	IPW_DEBUG_SCAN("Adding AdHoc station: " MAC_FMT "\n", MAC_ARG(bssid));
3805
3806	entry.reserved = 0;
3807	entry.support_mode = 0;
3808	memcpy(entry.mac_addr, bssid, ETH_ALEN);
3809	memcpy(priv->stations[i], bssid, ETH_ALEN);
3810	ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
3811			 &entry, sizeof(entry));
3812	priv->num_stations++;
3813
3814	return i;
3815}
3816
3817static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
3818{
3819	int i;
3820
3821	for (i = 0; i < priv->num_stations; i++)
3822		if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
3823			return i;
3824
3825	return IPW_INVALID_STATION;
3826}
3827
3828static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3829{
3830	int err;
3831
3832	if (priv->status & STATUS_ASSOCIATING) {
3833		IPW_DEBUG_ASSOC("Disassociating while associating.\n");
3834		queue_work(priv->workqueue, &priv->disassociate);
3835		return;
3836	}
3837
3838	if (!(priv->status & STATUS_ASSOCIATED)) {
3839		IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
3840		return;
3841	}
3842
3843	IPW_DEBUG_ASSOC("Disassocation attempt from " MAC_FMT " "
3844			"on channel %d.\n",
3845			MAC_ARG(priv->assoc_request.bssid),
3846			priv->assoc_request.channel);
3847
3848	priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
3849	priv->status |= STATUS_DISASSOCIATING;
3850
3851	if (quiet)
3852		priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
3853	else
3854		priv->assoc_request.assoc_type = HC_DISASSOCIATE;
3855
3856	err = ipw_send_associate(priv, &priv->assoc_request);
3857	if (err) {
3858		IPW_DEBUG_HC("Attempt to send [dis]associate command "
3859			     "failed.\n");
3860		return;
3861	}
3862
3863}
3864
3865static int ipw_disassociate(void *data)
3866{
3867	struct ipw_priv *priv = data;
3868	if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
3869		return 0;
3870	ipw_send_disassociate(data, 0);
3871	return 1;
3872}
3873
3874static void ipw_bg_disassociate(struct work_struct *work)
3875{
3876	struct ipw_priv *priv =
3877		container_of(work, struct ipw_priv, disassociate);
3878	mutex_lock(&priv->mutex);
3879	ipw_disassociate(priv);
3880	mutex_unlock(&priv->mutex);
3881}
3882
3883static void ipw_system_config(struct work_struct *work)
3884{
3885	struct ipw_priv *priv =
3886		container_of(work, struct ipw_priv, system_config);
3887
3888#ifdef CONFIG_IPW2200_PROMISCUOUS
3889	if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
3890		priv->sys_config.accept_all_data_frames = 1;
3891		priv->sys_config.accept_non_directed_frames = 1;
3892		priv->sys_config.accept_all_mgmt_bcpr = 1;
3893		priv->sys_config.accept_all_mgmt_frames = 1;
3894	}
3895#endif
3896
3897	ipw_send_system_config(priv);
3898}
3899
3900struct ipw_status_code {
3901	u16 status;
3902	const char *reason;
3903};
3904
3905static const struct ipw_status_code ipw_status_codes[] = {
3906	{0x00, "Successful"},
3907	{0x01, "Unspecified failure"},
3908	{0x0A, "Cannot support all requested capabilities in the "
3909	 "Capability information field"},
3910	{0x0B, "Reassociation denied due to inability to confirm that "
3911	 "association exists"},
3912	{0x0C, "Association denied due to reason outside the scope of this "
3913	 "standard"},
3914	{0x0D,
3915	 "Responding station does not support the specified authentication "
3916	 "algorithm"},
3917	{0x0E,
3918	 "Received an Authentication frame with authentication sequence "
3919	 "transaction sequence number out of expected sequence"},
3920	{0x0F, "Authentication rejected because of challenge failure"},
3921	{0x10, "Authentication rejected due to timeout waiting for next "
3922	 "frame in sequence"},
3923	{0x11, "Association denied because AP is unable to handle additional "
3924	 "associated stations"},
3925	{0x12,
3926	 "Association denied due to requesting station not supporting all "
3927	 "of the datarates in the BSSBasicServiceSet Parameter"},
3928	{0x13,
3929	 "Association denied due to requesting station not supporting "
3930	 "short preamble operation"},
3931	{0x14,
3932	 "Association denied due to requesting station not supporting "
3933	 "PBCC encoding"},
3934	{0x15,
3935	 "Association denied due to requesting station not supporting "
3936	 "channel agility"},
3937	{0x19,
3938	 "Association denied due to requesting station not supporting "
3939	 "short slot operation"},
3940	{0x1A,
3941	 "Association denied due to requesting station not supporting "
3942	 "DSSS-OFDM operation"},
3943	{0x28, "Invalid Information Element"},
3944	{0x29, "Group Cipher is not valid"},
3945	{0x2A, "Pairwise Cipher is not valid"},
3946	{0x2B, "AKMP is not valid"},
3947	{0x2C, "Unsupported RSN IE version"},
3948	{0x2D, "Invalid RSN IE Capabilities"},
3949	{0x2E, "Cipher suite is rejected per security policy"},
3950};
3951
3952static const char *ipw_get_status_code(u16 status)
3953{
3954	int i;
3955	for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
3956		if (ipw_status_codes[i].status == (status & 0xff))
3957			return ipw_status_codes[i].reason;
3958	return "Unknown status value.";
3959}
3960
3961static void inline average_init(struct average *avg)
3962{
3963	memset(avg, 0, sizeof(*avg));
3964}
3965
3966#define DEPTH_RSSI 8
3967#define DEPTH_NOISE 16
3968static s16 exponential_average(s16 prev_avg, s16 val, u8 depth)
3969{
3970	return ((depth-1)*prev_avg +  val)/depth;
3971}
3972
3973static void average_add(struct average *avg, s16 val)
3974{
3975	avg->sum -= avg->entries[avg->pos];
3976	avg->sum += val;
3977	avg->entries[avg->pos++] = val;
3978	if (unlikely(avg->pos == AVG_ENTRIES)) {
3979		avg->init = 1;
3980		avg->pos = 0;
3981	}
3982}
3983
3984static s16 average_value(struct average *avg)
3985{
3986	if (!unlikely(avg->init)) {
3987		if (avg->pos)
3988			return avg->sum / avg->pos;
3989		return 0;
3990	}
3991
3992	return avg->sum / AVG_ENTRIES;
3993}
3994
3995static void ipw_reset_stats(struct ipw_priv *priv)
3996{
3997	u32 len = sizeof(u32);
3998
3999	priv->quality = 0;
4000
4001	average_init(&priv->average_missed_beacons);
4002	priv->exp_avg_rssi = -60;
4003	priv->exp_avg_noise = -85 + 0x100;
4004
4005	priv->last_rate = 0;
4006	priv->last_missed_beacons = 0;
4007	priv->last_rx_packets = 0;
4008	priv->last_tx_packets = 0;
4009	priv->last_tx_failures = 0;
4010
4011	/* Firmware managed, reset only when NIC is restarted, so we have to
4012	 * normalize on the current value */
4013	ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
4014			&priv->last_rx_err, &len);
4015	ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
4016			&priv->last_tx_failures, &len);
4017
4018	/* Driver managed, reset with each association */
4019	priv->missed_adhoc_beacons = 0;
4020	priv->missed_beacons = 0;
4021	priv->tx_packets = 0;
4022	priv->rx_packets = 0;
4023
4024}
4025
4026static u32 ipw_get_max_rate(struct ipw_priv *priv)
4027{
4028	u32 i = 0x80000000;
4029	u32 mask = priv->rates_mask;
4030	/* If currently associated in B mode, restrict the maximum
4031	 * rate match to B rates */
4032	if (priv->assoc_request.ieee_mode == IPW_B_MODE)
4033		mask &= IEEE80211_CCK_RATES_MASK;
4034
4035	/* TODO: Verify that the rate is supported by the current rates
4036	 * list. */
4037
4038	while (i && !(mask & i))
4039		i >>= 1;
4040	switch (i) {
4041	case IEEE80211_CCK_RATE_1MB_MASK:
4042		return 1000000;
4043	case IEEE80211_CCK_RATE_2MB_MASK:
4044		return 2000000;
4045	case IEEE80211_CCK_RATE_5MB_MASK:
4046		return 5500000;
4047	case IEEE80211_OFDM_RATE_6MB_MASK:
4048		return 6000000;
4049	case IEEE80211_OFDM_RATE_9MB_MASK:
4050		return 9000000;
4051	case IEEE80211_CCK_RATE_11MB_MASK:
4052		return 11000000;
4053	case IEEE80211_OFDM_RATE_12MB_MASK:
4054		return 12000000;
4055	case IEEE80211_OFDM_RATE_18MB_MASK:
4056		return 18000000;
4057	case IEEE80211_OFDM_RATE_24MB_MASK:
4058		return 24000000;
4059	case IEEE80211_OFDM_RATE_36MB_MASK:
4060		return 36000000;
4061	case IEEE80211_OFDM_RATE_48MB_MASK:
4062		return 48000000;
4063	case IEEE80211_OFDM_RATE_54MB_MASK:
4064		return 54000000;
4065	}
4066
4067	if (priv->ieee->mode == IEEE_B)
4068		return 11000000;
4069	else
4070		return 54000000;
4071}
4072
4073static u32 ipw_get_current_rate(struct ipw_priv *priv)
4074{
4075	u32 rate, len = sizeof(rate);
4076	int err;
4077
4078	if (!(priv->status & STATUS_ASSOCIATED))
4079		return 0;
4080
4081	if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
4082		err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
4083				      &len);
4084		if (err) {
4085			IPW_DEBUG_INFO("failed querying ordinals.\n");
4086			return 0;
4087		}
4088	} else
4089		return ipw_get_max_rate(priv);
4090
4091	switch (rate) {
4092	case IPW_TX_RATE_1MB:
4093		return 1000000;
4094	case IPW_TX_RATE_2MB:
4095		return 2000000;
4096	case IPW_TX_RATE_5MB:
4097		return 5500000;
4098	case IPW_TX_RATE_6MB:
4099		return 6000000;
4100	case IPW_TX_RATE_9MB:
4101		return 9000000;
4102	case IPW_TX_RATE_11MB:
4103		return 11000000;
4104	case IPW_TX_RATE_12MB:
4105		return 12000000;
4106	case IPW_TX_RATE_18MB:
4107		return 18000000;
4108	case IPW_TX_RATE_24MB:
4109		return 24000000;
4110	case IPW_TX_RATE_36MB:
4111		return 36000000;
4112	case IPW_TX_RATE_48MB:
4113		return 48000000;
4114	case IPW_TX_RATE_54MB:
4115		return 54000000;
4116	}
4117
4118	return 0;
4119}
4120
4121#define IPW_STATS_INTERVAL (2 * HZ)
4122static void ipw_gather_stats(struct ipw_priv *priv)
4123{
4124	u32 rx_err, rx_err_delta, rx_packets_delta;
4125	u32 tx_failures, tx_failures_delta, tx_packets_delta;
4126	u32 missed_beacons_percent, missed_beacons_delta;
4127	u32 quality = 0;
4128	u32 len = sizeof(u32);
4129	s16 rssi;
4130	u32 beacon_quality, signal_quality, tx_quality, rx_quality,
4131	    rate_quality;
4132	u32 max_rate;
4133
4134	if (!(priv->status & STATUS_ASSOCIATED)) {
4135		priv->quality = 0;
4136		return;
4137	}
4138
4139	/* Update the statistics */
4140	ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
4141			&priv->missed_beacons, &len);
4142	missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
4143	priv->last_missed_beacons = priv->missed_beacons;
4144	if (priv->assoc_request.beacon_interval) {
4145		missed_beacons_percent = missed_beacons_delta *
4146		    (HZ * priv->assoc_request.beacon_interval) /
4147		    (IPW_STATS_INTERVAL * 10);
4148	} else {
4149		missed_beacons_percent = 0;
4150	}
4151	average_add(&priv->average_missed_beacons, missed_beacons_percent);
4152
4153	ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
4154	rx_err_delta = rx_err - priv->last_rx_err;
4155	priv->last_rx_err = rx_err;
4156
4157	ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
4158	tx_failures_delta = tx_failures - priv->last_tx_failures;
4159	priv->last_tx_failures = tx_failures;
4160
4161	rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
4162	priv->last_rx_packets = priv->rx_packets;
4163
4164	tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
4165	priv->last_tx_packets = priv->tx_packets;
4166
4167	/* Calculate quality based on the following:
4168	 *
4169	 * Missed beacon: 100% = 0, 0% = 70% missed
4170	 * Rate: 60% = 1Mbs, 100% = Max
4171	 * Rx and Tx errors represent a straight % of total Rx/Tx
4172	 * RSSI: 100% = > -50,  0% = < -80
4173	 * Rx errors: 100% = 0, 0% = 50% missed
4174	 *
4175	 * The lowest computed quality is used.
4176	 *
4177	 */
4178#define BEACON_THRESHOLD 5
4179	beacon_quality = 100 - missed_beacons_percent;
4180	if (beacon_quality < BEACON_THRESHOLD)
4181		beacon_quality = 0;
4182	else
4183		beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
4184		    (100 - BEACON_THRESHOLD);
4185	IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
4186			beacon_quality, missed_beacons_percent);
4187
4188	priv->last_rate = ipw_get_current_rate(priv);
4189	max_rate = ipw_get_max_rate(priv);
4190	rate_quality = priv->last_rate * 40 / max_rate + 60;
4191	IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
4192			rate_quality, priv->last_rate / 1000000);
4193
4194	if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
4195		rx_quality = 100 - (rx_err_delta * 100) /
4196		    (rx_packets_delta + rx_err_delta);
4197	else
4198		rx_quality = 100;
4199	IPW_DEBUG_STATS("Rx quality   : %3d%% (%u errors, %u packets)\n",
4200			rx_quality, rx_err_delta, rx_packets_delta);
4201
4202	if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
4203		tx_quality = 100 - (tx_failures_delta * 100) /
4204		    (tx_packets_delta + tx_failures_delta);
4205	else
4206		tx_quality = 100;
4207	IPW_DEBUG_STATS("Tx quality   : %3d%% (%u errors, %u packets)\n",
4208			tx_quality, tx_failures_delta, tx_packets_delta);
4209
4210	rssi = priv->exp_avg_rssi;
4211	signal_quality =
4212	    (100 *
4213	     (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4214	     (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) -
4215	     (priv->ieee->perfect_rssi - rssi) *
4216	     (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) +
4217	      62 * (priv->ieee->perfect_rssi - rssi))) /
4218	    ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4219	     (priv->ieee->perfect_rssi - priv->ieee->worst_rssi));
4220	if (signal_quality > 100)
4221		signal_quality = 100;
4222	else if (signal_quality < 1)
4223		signal_quality = 0;
4224
4225	IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
4226			signal_quality, rssi);
4227
4228	quality = min(beacon_quality,
4229		      min(rate_quality,
4230			  min(tx_quality, min(rx_quality, signal_quality))));
4231	if (quality == beacon_quality)
4232		IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
4233				quality);
4234	if (quality == rate_quality)
4235		IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
4236				quality);
4237	if (quality == tx_quality)
4238		IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
4239				quality);
4240	if (quality == rx_quality)
4241		IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
4242				quality);
4243	if (quality == signal_quality)
4244		IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
4245				quality);
4246
4247	priv->quality = quality;
4248
4249	queue_delayed_work(priv->workqueue, &priv->gather_stats,
4250			   IPW_STATS_INTERVAL);
4251}
4252
4253static void ipw_bg_gather_stats(struct work_struct *work)
4254{
4255	struct ipw_priv *priv =
4256		container_of(work, struct ipw_priv, gather_stats.work);
4257	mutex_lock(&priv->mutex);
4258	ipw_gather_stats(priv);
4259	mutex_unlock(&priv->mutex);
4260}
4261
4262/* Missed beacon behavior:
4263 * 1st missed -> roaming_threshold, just wait, don't do any scan/roam.
4264 * roaming_threshold -> disassociate_threshold, scan and roam for better signal.
4265 * Above disassociate threshold, give up and stop scanning.
4266 * Roaming is disabled if disassociate_threshold <= roaming_threshold  */
4267static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4268					    int missed_count)
4269{
4270	priv->notif_missed_beacons = missed_count;
4271
4272	if (missed_count > priv->disassociate_threshold &&
4273	    priv->status & STATUS_ASSOCIATED) {
4274		/* If associated and we've hit the missed
4275		 * beacon threshold, disassociate, turn
4276		 * off roaming, and abort any active scans */
4277		IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4278			  IPW_DL_STATE | IPW_DL_ASSOC,
4279			  "Missed beacon: %d - disassociate\n", missed_count);
4280		priv->status &= ~STATUS_ROAMING;
4281		if (priv->status & STATUS_SCANNING) {
4282			IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4283				  IPW_DL_STATE,
4284				  "Aborting scan with missed beacon.\n");
4285			queue_work(priv->workqueue, &priv->abort_scan);
4286		}
4287
4288		queue_work(priv->workqueue, &priv->disassociate);
4289		return;
4290	}
4291
4292	if (priv->status & STATUS_ROAMING) {
4293		/* If we are currently roaming, then just
4294		 * print a debug statement... */
4295		IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4296			  "Missed beacon: %d - roam in progress\n",
4297			  missed_count);
4298		return;
4299	}
4300
4301	if (roaming &&
4302	    (missed_count > priv->roaming_threshold &&
4303	     missed_count <= priv->disassociate_threshold)) {
4304		/* If we are not already roaming, set the ROAM
4305		 * bit in the status and kick off a scan.
4306		 * This can happen several times before we reach
4307		 * disassociate_threshold. */
4308		IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4309			  "Missed beacon: %d - initiate "
4310			  "roaming\n", missed_count);
4311		if (!(priv->status & STATUS_ROAMING)) {
4312			priv->status |= STATUS_ROAMING;
4313			if (!(priv->status & STATUS_SCANNING))
4314				queue_delayed_work(priv->workqueue,
4315						   &priv->request_scan, 0);
4316		}
4317		return;
4318	}
4319
4320	if (priv->status & STATUS_SCANNING) {
4321		/* Stop scan to keep fw from getting
4322		 * stuck (only if we aren't roaming --
4323		 * otherwise we'll never scan more than 2 or 3
4324		 * channels..) */
4325		IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
4326			  "Aborting scan with missed beacon.\n");
4327		queue_work(priv->workqueue, &priv->abort_scan);
4328	}
4329
4330	IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
4331}
4332
4333/**
4334 * Handle host notification packet.
4335 * Called from interrupt routine
4336 */
4337static void ipw_rx_notification(struct ipw_priv *priv,
4338				       struct ipw_rx_notification *notif)
4339{
4340	notif->size = le16_to_cpu(notif->size);
4341
4342	IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, notif->size);
4343
4344	switch (notif->subtype) {
4345	case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
4346			struct notif_association *assoc = &notif->u.assoc;
4347
4348			switch (assoc->state) {
4349			case CMAS_ASSOCIATED:{
4350					IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4351						  IPW_DL_ASSOC,
4352						  "associated: '%s' " MAC_FMT
4353						  " \n",
4354						  escape_essid(priv->essid,
4355							       priv->essid_len),
4356						  MAC_ARG(priv->bssid));
4357
4358					switch (priv->ieee->iw_mode) {
4359					case IW_MODE_INFRA:
4360						memcpy(priv->ieee->bssid,
4361						       priv->bssid, ETH_ALEN);
4362						break;
4363
4364					case IW_MODE_ADHOC:
4365						memcpy(priv->ieee->bssid,
4366						       priv->bssid, ETH_ALEN);
4367
4368						/* clear out the station table */
4369						priv->num_stations = 0;
4370
4371						IPW_DEBUG_ASSOC
4372						    ("queueing adhoc check\n");
4373						queue_delayed_work(priv->
4374								   workqueue,
4375								   &priv->
4376								   adhoc_check,
4377								   priv->
4378								   assoc_request.
4379								   beacon_interval);
4380						break;
4381					}
4382
4383					priv->status &= ~STATUS_ASSOCIATING;
4384					priv->status |= STATUS_ASSOCIATED;
4385					queue_work(priv->workqueue,
4386						   &priv->system_config);
4387
4388#ifdef CONFIG_IPW2200_QOS
4389#define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
4390			 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_ctl))
4391					if ((priv->status & STATUS_AUTH) &&
4392					    (IPW_GET_PACKET_STYPE(&notif->u.raw)
4393					     == IEEE80211_STYPE_ASSOC_RESP)) {
4394						if ((sizeof
4395						     (struct
4396						      ieee80211_assoc_response)
4397						     <= notif->size)
4398						    && (notif->size <= 2314)) {
4399							struct
4400							ieee80211_rx_stats
4401							    stats = {
4402								.len =
4403								    notif->
4404								    size - 1,
4405							};
4406
4407							IPW_DEBUG_QOS
4408							    ("QoS Associate "
4409							     "size %d\n",
4410							     notif->size);
4411							ieee80211_rx_mgt(priv->
4412									 ieee,
4413									 (struct
4414									  ieee80211_hdr_4addr
4415									  *)
4416									 &notif->u.raw, &stats);
4417						}
4418					}
4419#endif
4420
4421					schedule_work(&priv->link_up);
4422
4423					break;
4424				}
4425
4426			case CMAS_AUTHENTICATED:{
4427					if (priv->
4428					    status & (STATUS_ASSOCIATED |
4429						      STATUS_AUTH)) {
4430						struct notif_authenticate *auth
4431						    = &notif->u.auth;
4432						IPW_DEBUG(IPW_DL_NOTIF |
4433							  IPW_DL_STATE |
4434							  IPW_DL_ASSOC,
4435							  "deauthenticated: '%s' "
4436							  MAC_FMT
4437							  ": (0x%04X) - %s \n",
4438							  escape_essid(priv->
4439								       essid,
4440								       priv->
4441								       essid_len),
4442							  MAC_ARG(priv->bssid),
4443							  ntohs(auth->status),
4444							  ipw_get_status_code
4445							  (ntohs
4446							   (auth->status)));
4447
4448						priv->status &=
4449						    ~(STATUS_ASSOCIATING |
4450						      STATUS_AUTH |
4451						      STATUS_ASSOCIATED);
4452
4453						schedule_work(&priv->link_down);
4454						break;
4455					}
4456
4457					IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4458						  IPW_DL_ASSOC,
4459						  "authenticated: '%s' " MAC_FMT
4460						  "\n",
4461						  escape_essid(priv->essid,
4462							       priv->essid_len),
4463						  MAC_ARG(priv->bssid));
4464					break;
4465				}
4466
4467			case CMAS_INIT:{
4468					if (priv->status & STATUS_AUTH) {
4469						struct
4470						    ieee80211_assoc_response
4471						*resp;
4472						resp =
4473						    (struct
4474						     ieee80211_assoc_response
4475						     *)&notif->u.raw;
4476						IPW_DEBUG(IPW_DL_NOTIF |
4477							  IPW_DL_STATE |
4478							  IPW_DL_ASSOC,
4479							  "association failed (0x%04X): %s\n",
4480							  ntohs(resp->status),
4481							  ipw_get_status_code
4482							  (ntohs
4483							   (resp->status)));
4484					}
4485
4486					IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4487						  IPW_DL_ASSOC,
4488						  "disassociated: '%s' " MAC_FMT
4489						  " \n",
4490						  escape_essid(priv->essid,
4491							       priv->essid_len),
4492						  MAC_ARG(priv->bssid));
4493
4494					priv->status &=
4495					    ~(STATUS_DISASSOCIATING |
4496					      STATUS_ASSOCIATING |
4497					      STATUS_ASSOCIATED | STATUS_AUTH);
4498					if (priv->assoc_network
4499					    && (priv->assoc_network->
4500						capability &
4501						WLAN_CAPABILITY_IBSS))
4502						ipw_remove_current_network
4503						    (priv);
4504
4505					schedule_work(&priv->link_down);
4506
4507					break;
4508				}
4509
4510			case CMAS_RX_ASSOC_RESP:
4511				break;
4512
4513			default:
4514				IPW_ERROR("assoc: unknown (%d)\n",
4515					  assoc->state);
4516				break;
4517			}
4518
4519			break;
4520		}
4521
4522	case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
4523			struct notif_authenticate *auth = &notif->u.auth;
4524			switch (auth->state) {
4525			case CMAS_AUTHENTICATED:
4526				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4527					  "authenticated: '%s' " MAC_FMT " \n",
4528					  escape_essid(priv->essid,
4529						       priv->essid_len),
4530					  MAC_ARG(priv->bssid));
4531				priv->status |= STATUS_AUTH;
4532				break;
4533
4534			case CMAS_INIT:
4535				if (priv->status & STATUS_AUTH) {
4536					IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4537						  IPW_DL_ASSOC,
4538						  "authentication failed (0x%04X): %s\n",
4539						  ntohs(auth->status),
4540						  ipw_get_status_code(ntohs
4541								      (auth->
4542								       status)));
4543				}
4544				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4545					  IPW_DL_ASSOC,
4546					  "deauthenticated: '%s' " MAC_FMT "\n",
4547					  escape_essid(priv->essid,
4548						       priv->essid_len),
4549					  MAC_ARG(priv->bssid));
4550
4551				priv->status &= ~(STATUS_ASSOCIATING |
4552						  STATUS_AUTH |
4553						  STATUS_ASSOCIATED);
4554
4555				schedule_work(&priv->link_down);
4556				break;
4557
4558			case CMAS_TX_AUTH_SEQ_1:
4559				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4560					  IPW_DL_ASSOC, "AUTH_SEQ_1\n");
4561				break;
4562			case CMAS_RX_AUTH_SEQ_2:
4563				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4564					  IPW_DL_ASSOC, "AUTH_SEQ_2\n");
4565				break;
4566			case CMAS_AUTH_SEQ_1_PASS:
4567				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4568					  IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
4569				break;
4570			case CMAS_AUTH_SEQ_1_FAIL:
4571				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4572					  IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
4573				break;
4574			case CMAS_TX_AUTH_SEQ_3:
4575				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4576					  IPW_DL_ASSOC, "AUTH_SEQ_3\n");
4577				break;
4578			case CMAS_RX_AUTH_SEQ_4:
4579				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4580					  IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
4581				break;
4582			case CMAS_AUTH_SEQ_2_PASS:
4583				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4584					  IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
4585				break;
4586			case CMAS_AUTH_SEQ_2_FAIL:
4587				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4588					  IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
4589				break;
4590			case CMAS_TX_ASSOC:
4591				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4592					  IPW_DL_ASSOC, "TX_ASSOC\n");
4593				break;
4594			case CMAS_RX_ASSOC_RESP:
4595				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4596					  IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
4597
4598				break;
4599			case CMAS_ASSOCIATED:
4600				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4601					  IPW_DL_ASSOC, "ASSOCIATED\n");
4602				break;
4603			default:
4604				IPW_DEBUG_NOTIF("auth: failure - %d\n",
4605						auth->state);
4606				break;
4607			}
4608			break;
4609		}
4610
4611	case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
4612			struct notif_channel_result *x =
4613			    &notif->u.channel_result;
4614
4615			if (notif->size == sizeof(*x)) {
4616				IPW_DEBUG_SCAN("Scan result for channel %d\n",
4617					       x->channel_num);
4618			} else {
4619				IPW_DEBUG_SCAN("Scan result of wrong size %d "
4620					       "(should be %zd)\n",
4621					       notif->size, sizeof(*x));
4622			}
4623			break;
4624		}
4625
4626	case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
4627			struct notif_scan_complete *x = &notif->u.scan_complete;
4628			if (notif->size == sizeof(*x)) {
4629				IPW_DEBUG_SCAN
4630				    ("Scan completed: type %d, %d channels, "
4631				     "%d status\n", x->scan_type,
4632				     x->num_channels, x->status);
4633			} else {
4634				IPW_ERROR("Scan completed of wrong size %d "
4635					  "(should be %zd)\n",
4636					  notif->size, sizeof(*x));
4637			}
4638
4639			priv->status &=
4640			    ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
4641
4642			wake_up_interruptible(&priv->wait_state);
4643			cancel_delayed_work(&priv->scan_check);
4644
4645			if (priv->status & STATUS_EXIT_PENDING)
4646				break;
4647
4648			priv->ieee->scans++;
4649
4650#ifdef CONFIG_IPW2200_MONITOR
4651			if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4652				priv->status |= STATUS_SCAN_FORCED;
4653				queue_delayed_work(priv->workqueue,
4654						   &priv->request_scan, 0);
4655				break;
4656			}
4657			priv->status &= ~STATUS_SCAN_FORCED;
4658#endif				/* CONFIG_IPW2200_MONITOR */
4659
4660			if (!(priv->status & (STATUS_ASSOCIATED |
4661					      STATUS_ASSOCIATING |
4662					      STATUS_ROAMING |
4663					      STATUS_DISASSOCIATING)))
4664				queue_work(priv->workqueue, &priv->associate);
4665			else if (priv->status & STATUS_ROAMING) {
4666				if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4667					/* If a scan completed and we are in roam mode, then
4668					 * the scan that completed was the one requested as a
4669					 * result of entering roam... so, schedule the
4670					 * roam work */
4671					queue_work(priv->workqueue,
4672						   &priv->roam);
4673				else
4674					/* Don't schedule if we aborted the scan */
4675					priv->status &= ~STATUS_ROAMING;
4676			} else if (priv->status & STATUS_SCAN_PENDING)
4677				queue_delayed_work(priv->workqueue,
4678						   &priv->request_scan, 0);
4679			else if (priv->config & CFG_BACKGROUND_SCAN
4680				 && priv->status & STATUS_ASSOCIATED)
4681				queue_delayed_work(priv->workqueue,
4682						   &priv->request_scan, HZ);
4683
4684			/* Send an empty event to user space.
4685			 * We don't send the received data on the event because
4686			 * it would require us to do complex transcoding, and
4687			 * we want to minimise the work done in the irq handler
4688			 * Use a request to extract the data.
4689			 * Also, we generate this even for any scan, regardless
4690			 * on how the scan was initiated. User space can just
4691			 * sync on periodic scan to get fresh data...
4692			 * Jean II */
4693			if (x->status == SCAN_COMPLETED_STATUS_COMPLETE) {
4694				union iwreq_data wrqu;
4695
4696				wrqu.data.length = 0;
4697				wrqu.data.flags = 0;
4698				wireless_send_event(priv->net_dev, SIOCGIWSCAN,
4699						    &wrqu, NULL);
4700			}
4701			break;
4702		}
4703
4704	case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
4705			struct notif_frag_length *x = &notif->u.frag_len;
4706
4707			if (notif->size == sizeof(*x))
4708				IPW_ERROR("Frag length: %d\n",
4709					  le16_to_cpu(x->frag_length));
4710			else
4711				IPW_ERROR("Frag length of wrong size %d "
4712					  "(should be %zd)\n",
4713					  notif->size, sizeof(*x));
4714			break;
4715		}
4716
4717	case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
4718			struct notif_link_deterioration *x =
4719			    &notif->u.link_deterioration;
4720
4721			if (notif->size == sizeof(*x)) {
4722				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4723					"link deterioration: type %d, cnt %d\n",
4724					x->silence_notification_type,
4725					x->silence_count);
4726				memcpy(&priv->last_link_deterioration, x,
4727				       sizeof(*x));
4728			} else {
4729				IPW_ERROR("Link Deterioration of wrong size %d "
4730					  "(should be %zd)\n",
4731					  notif->size, sizeof(*x));
4732			}
4733			break;
4734		}
4735
4736	case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
4737			IPW_ERROR("Dino config\n");
4738			if (priv->hcmd
4739			    && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG)
4740				IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
4741
4742			break;
4743		}
4744
4745	case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
4746			struct notif_beacon_state *x = &notif->u.beacon_state;
4747			if (notif->size != sizeof(*x)) {
4748				IPW_ERROR
4749				    ("Beacon state of wrong size %d (should "
4750				     "be %zd)\n", notif->size, sizeof(*x));
4751				break;
4752			}
4753
4754			if (le32_to_cpu(x->state) ==
4755			    HOST_NOTIFICATION_STATUS_BEACON_MISSING)
4756				ipw_handle_missed_beacon(priv,
4757							 le32_to_cpu(x->
4758								     number));
4759
4760			break;
4761		}
4762
4763	case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
4764			struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
4765			if (notif->size == sizeof(*x)) {
4766				IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
4767					  "0x%02x station %d\n",
4768					  x->key_state, x->security_type,
4769					  x->station_index);
4770				break;
4771			}
4772
4773			IPW_ERROR
4774			    ("TGi Tx Key of wrong size %d (should be %zd)\n",
4775			     notif->size, sizeof(*x));
4776			break;
4777		}
4778
4779	case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
4780			struct notif_calibration *x = &notif->u.calibration;
4781
4782			if (notif->size == sizeof(*x)) {
4783				memcpy(&priv->calib, x, sizeof(*x));
4784				IPW_DEBUG_INFO("TODO: Calibration\n");
4785				break;
4786			}
4787
4788			IPW_ERROR
4789			    ("Calibration of wrong size %d (should be %zd)\n",
4790			     notif->size, sizeof(*x));
4791			break;
4792		}
4793
4794	case HOST_NOTIFICATION_NOISE_STATS:{
4795			if (notif->size == sizeof(u32)) {
4796				priv->exp_avg_noise =
4797				    exponential_average(priv->exp_avg_noise,
4798				    (u8) (le32_to_cpu(notif->u.noise.value) & 0xff),
4799				    DEPTH_NOISE);
4800				break;
4801			}
4802
4803			IPW_ERROR
4804			    ("Noise stat is wrong size %d (should be %zd)\n",
4805			     notif->size, sizeof(u32));
4806			break;
4807		}
4808
4809	default:
4810		IPW_DEBUG_NOTIF("Unknown notification: "
4811				"subtype=%d,flags=0x%2x,size=%d\n",
4812				notif->subtype, notif->flags, notif->size);
4813	}
4814}
4815
4816/**
4817 * Destroys all DMA structures and initialise them again
4818 *
4819 * @param priv
4820 * @return error code
4821 */
4822static int ipw_queue_reset(struct ipw_priv *priv)
4823{
4824	int rc = 0;
4825	/** @todo customize queue sizes */
4826	int nTx = 64, nTxCmd = 8;
4827	ipw_tx_queue_free(priv);
4828	/* Tx CMD queue */
4829	rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
4830			       IPW_TX_CMD_QUEUE_READ_INDEX,
4831			       IPW_TX_CMD_QUEUE_WRITE_INDEX,
4832			       IPW_TX_CMD_QUEUE_BD_BASE,
4833			       IPW_TX_CMD_QUEUE_BD_SIZE);
4834	if (rc) {
4835		IPW_ERROR("Tx Cmd queue init failed\n");
4836		goto error;
4837	}
4838	/* Tx queue(s) */
4839	rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
4840			       IPW_TX_QUEUE_0_READ_INDEX,
4841			       IPW_TX_QUEUE_0_WRITE_INDEX,
4842			       IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE);
4843	if (rc) {
4844		IPW_ERROR("Tx 0 queue init failed\n");
4845		goto error;
4846	}
4847	rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
4848			       IPW_TX_QUEUE_1_READ_INDEX,
4849			       IPW_TX_QUEUE_1_WRITE_INDEX,
4850			       IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE);
4851	if (rc) {
4852		IPW_ERROR("Tx 1 queue init failed\n");
4853		goto error;
4854	}
4855	rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
4856			       IPW_TX_QUEUE_2_READ_INDEX,
4857			       IPW_TX_QUEUE_2_WRITE_INDEX,
4858			       IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE);
4859	if (rc) {
4860		IPW_ERROR("Tx 2 queue init failed\n");
4861		goto error;
4862	}
4863	rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
4864			       IPW_TX_QUEUE_3_READ_INDEX,
4865			       IPW_TX_QUEUE_3_WRITE_INDEX,
4866			       IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE);
4867	if (rc) {
4868		IPW_ERROR("Tx 3 queue init failed\n");
4869		goto error;
4870	}
4871	/* statistics */
4872	priv->rx_bufs_min = 0;
4873	priv->rx_pend_max = 0;
4874	return rc;
4875
4876      error:
4877	ipw_tx_queue_free(priv);
4878	return rc;
4879}
4880
4881/**
4882 * Reclaim Tx queue entries no more used by NIC.
4883 *
4884 * When FW adwances 'R' index, all entries between old and
4885 * new 'R' index need to be reclaimed. As result, some free space
4886 * forms. If there is enough free space (> low mark), wake Tx queue.
4887 *
4888 * @note Need to protect against garbage in 'R' index
4889 * @param priv
4890 * @param txq
4891 * @param qindex
4892 * @return Number of used entries remains in the queue
4893 */
4894static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
4895				struct clx2_tx_queue *txq, int qindex)
4896{
4897	u32 hw_tail;
4898	int used;
4899	struct clx2_queue *q = &txq->q;
4900
4901	hw_tail = ipw_read32(priv, q->reg_r);
4902	if (hw_tail >= q->n_bd) {
4903		IPW_ERROR
4904		    ("Read index for DMA queue (%d) is out of range [0-%d)\n",
4905		     hw_tail, q->n_bd);
4906		goto done;
4907	}
4908	for (; q->last_used != hw_tail;
4909	     q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
4910		ipw_queue_tx_free_tfd(priv, txq);
4911		priv->tx_packets++;
4912	}
4913      done:
4914	if ((ipw_queue_space(q) > q->low_mark) &&
4915	    (qindex >= 0) &&
4916	    (priv->status & STATUS_ASSOCIATED) && netif_running(priv->net_dev))
4917		netif_wake_queue(priv->net_dev);
4918	used = q->first_empty - q->last_used;
4919	if (used < 0)
4920		used += q->n_bd;
4921
4922	return used;
4923}
4924
4925static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
4926			     int len, int sync)
4927{
4928	struct clx2_tx_queue *txq = &priv->txq_cmd;
4929	struct clx2_queue *q = &txq->q;
4930	struct tfd_frame *tfd;
4931
4932	if (ipw_queue_space(q) < (sync ? 1 : 2)) {
4933		IPW_ERROR("No space for Tx\n");
4934		return -EBUSY;
4935	}
4936
4937	tfd = &txq->bd[q->first_empty];
4938	txq->txb[q->first_empty] = NULL;
4939
4940	memset(tfd, 0, sizeof(*tfd));
4941	tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
4942	tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
4943	priv->hcmd_seq++;
4944	tfd->u.cmd.index = hcmd;
4945	tfd->u.cmd.length = len;
4946	memcpy(tfd->u.cmd.payload, buf, len);
4947	q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
4948	ipw_write32(priv, q->reg_w, q->first_empty);
4949	_ipw_read32(priv, 0x90);
4950
4951	return 0;
4952}
4953
4954/*
4955 * Rx theory of operation
4956 *
4957 * The host allocates 32 DMA target addresses and passes the host address
4958 * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
4959 * 0 to 31
4960 *
4961 * Rx Queue Indexes
4962 * The host/firmware share two index registers for managing the Rx buffers.
4963 *
4964 * The READ index maps to the first position that the firmware may be writing
4965 * to -- the driver can read up to (but not including) this position and get
4966 * good data.
4967 * The READ index is managed by the firmware once the card is enabled.
4968 *
4969 * The WRITE index maps to the last position the driver has read from -- the
4970 * position preceding WRITE is the last slot the firmware can place a packet.
4971 *
4972 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
4973 * WRITE = READ.
4974 *
4975 * During initialization the host sets up the READ queue position to the first
4976 * INDEX position, and WRITE to the last (READ - 1 wrapped)
4977 *
4978 * When the firmware places a packet in a buffer it will advance the READ index
4979 * and fire the RX interrupt.  The driver can then query the READ index and
4980 * process as many packets as possible, moving the WRITE index forward as it
4981 * resets the Rx queue buffers with new memory.
4982 *
4983 * The management in the driver is as follows:
4984 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free.  When
4985 *   ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
4986 *   to replensish the ipw->rxq->rx_free.
4987 * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
4988 *   ipw->rxq is replenished and the READ INDEX is updated (updating the
4989 *   'processed' and 'read' driver indexes as well)
4990 * + A received packet is processed and handed to the kernel network stack,
4991 *   detached from the ipw->rxq.  The driver 'processed' index is updated.
4992 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
4993 *   list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
4994 *   INDEX is not incremented and ipw->status(RX_STALLED) is set.  If there
4995 *   were enough free buffers and RX_STALLED is set it is cleared.
4996 *
4997 *
4998 * Driver sequence:
4999 *
5000 * ipw_rx_queue_alloc()       Allocates rx_free
5001 * ipw_rx_queue_replenish()   Replenishes rx_free list from rx_used, and calls
5002 *                            ipw_rx_queue_restock
5003 * ipw_rx_queue_restock()     Moves available buffers from rx_free into Rx
5004 *                            queue, updates firmware pointers, and updates
5005 *                            the WRITE index.  If insufficient rx_free buffers
5006 *                            are available, schedules ipw_rx_queue_replenish
5007 *
5008 * -- enable interrupts --
5009 * ISR - ipw_rx()             Detach ipw_rx_mem_buffers from pool up to the
5010 *                            READ INDEX, detaching the SKB from the pool.
5011 *                            Moves the packet buffer from queue to rx_used.
5012 *                            Calls ipw_rx_queue_restock to refill any empty
5013 *                            slots.
5014 * ...
5015 *
5016 */
5017
5018/*
5019 * If there are slots in the RX queue that  need to be restocked,
5020 * and we have free pre-allocated buffers, fill the ranks as much
5021 * as we can pulling from rx_free.
5022 *
5023 * This moves the 'write' index forward to catch up with 'processed', and
5024 * also updates the memory address in the firmware to reference the new
5025 * target buffer.
5026 */
5027static void ipw_rx_queue_restock(struct ipw_priv *priv)
5028{
5029	struct ipw_rx_queue *rxq = priv->rxq;
5030	struct list_head *element;
5031	struct ipw_rx_mem_buffer *rxb;
5032	unsigned long flags;
5033	int write;
5034
5035	spin_lock_irqsave(&rxq->lock, flags);
5036	write = rxq->write;
5037	while ((rxq->write != rxq->processed) && (rxq->free_count)) {
5038		element = rxq->rx_free.next;
5039		rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5040		list_del(element);
5041
5042		ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
5043			    rxb->dma_addr);
5044		rxq->queue[rxq->write] = rxb;
5045		rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
5046		rxq->free_count--;
5047	}
5048	spin_unlock_irqrestore(&rxq->lock, flags);
5049
5050	/* If the pre-allocated buffer pool is dropping low, schedule to
5051	 * refill it */
5052	if (rxq->free_count <= RX_LOW_WATERMARK)
5053		queue_work(priv->workqueue, &priv->rx_replenish);
5054
5055	/* If we've added more space for the firmware to place data, tell it */
5056	if (write != rxq->write)
5057		ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
5058}
5059
5060/*
5061 * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
5062 * Also restock the Rx queue via ipw_rx_queue_restock.
5063 *
5064 * This is called as a scheduled work item (except for during intialization)
5065 */
5066static void ipw_rx_queue_replenish(void *data)
5067{
5068	struct ipw_priv *priv = data;
5069	struct ipw_rx_queue *rxq = priv->rxq;
5070	struct list_head *element;
5071	struct ipw_rx_mem_buffer *rxb;
5072	unsigned long flags;
5073
5074	spin_lock_irqsave(&rxq->lock, flags);
5075	while (!list_empty(&rxq->rx_used)) {
5076		element = rxq->rx_used.next;
5077		rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5078		rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC);
5079		if (!rxb->skb) {
5080			printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
5081			       priv->net_dev->name);
5082			/* We don't reschedule replenish work here -- we will
5083			 * call the restock method and if it still needs
5084			 * more buffers it will schedule replenish */
5085			break;
5086		}
5087		list_del(element);
5088
5089		rxb->dma_addr =
5090		    pci_map_single(priv->pci_dev, rxb->skb->data,
5091				   IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5092
5093		list_add_tail(&rxb->list, &rxq->rx_free);
5094		rxq->free_count++;
5095	}
5096	spin_unlock_irqrestore(&rxq->lock, flags);
5097
5098	ipw_rx_queue_restock(priv);
5099}
5100
5101static void ipw_bg_rx_queue_replenish(struct work_struct *work)
5102{
5103	struct ipw_priv *priv =
5104		container_of(work, struct ipw_priv, rx_replenish);
5105	mutex_lock(&priv->mutex);
5106	ipw_rx_queue_replenish(priv);
5107	mutex_unlock(&priv->mutex);
5108}
5109
5110/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
5111 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
5112 * This free routine walks the list of POOL entries and if SKB is set to
5113 * non NULL it is unmapped and freed
5114 */
5115static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
5116{
5117	int i;
5118
5119	if (!rxq)
5120		return;
5121
5122	for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
5123		if (rxq->pool[i].skb != NULL) {
5124			pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
5125					 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5126			dev_kfree_skb(rxq->pool[i].skb);
5127		}
5128	}
5129
5130	kfree(rxq);
5131}
5132
5133static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
5134{
5135	struct ipw_rx_queue *rxq;
5136	int i;
5137
5138	rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
5139	if (unlikely(!rxq)) {
5140		IPW_ERROR("memory allocation failed\n");
5141		return NULL;
5142	}
5143	spin_lock_init(&rxq->lock);
5144	INIT_LIST_HEAD(&rxq->rx_free);
5145	INIT_LIST_HEAD(&rxq->rx_used);
5146
5147	/* Fill the rx_used queue with _all_ of the Rx buffers */
5148	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
5149		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
5150
5151	/* Set us so that we have processed and used all buffers, but have
5152	 * not restocked the Rx queue with fresh buffers */
5153	rxq->read = rxq->write = 0;
5154	rxq->processed = RX_QUEUE_SIZE - 1;
5155	rxq->free_count = 0;
5156
5157	return rxq;
5158}
5159
5160static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
5161{
5162	rate &= ~IEEE80211_BASIC_RATE_MASK;
5163	if (ieee_mode == IEEE_A) {
5164		switch (rate) {
5165		case IEEE80211_OFDM_RATE_6MB:
5166			return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ?
5167			    1 : 0;
5168		case IEEE80211_OFDM_RATE_9MB:
5169			return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ?
5170			    1 : 0;
5171		case IEEE80211_OFDM_RATE_12MB:
5172			return priv->
5173			    rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
5174		case IEEE80211_OFDM_RATE_18MB:
5175			return priv->
5176			    rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
5177		case IEEE80211_OFDM_RATE_24MB:
5178			return priv->
5179			    rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
5180		case IEEE80211_OFDM_RATE_36MB:
5181			return priv->
5182			    rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
5183		case IEEE80211_OFDM_RATE_48MB:
5184			return priv->
5185			    rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
5186		case IEEE80211_OFDM_RATE_54MB:
5187			return priv->
5188			    rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
5189		default:
5190			return 0;
5191		}
5192	}
5193
5194	/* B and G mixed */
5195	switch (rate) {
5196	case IEEE80211_CCK_RATE_1MB:
5197		return priv->rates_mask & IEEE80211_CCK_RATE_1MB_MASK ? 1 : 0;
5198	case IEEE80211_CCK_RATE_2MB:
5199		return priv->rates_mask & IEEE80211_CCK_RATE_2MB_MASK ? 1 : 0;
5200	case IEEE80211_CCK_RATE_5MB:
5201		return priv->rates_mask & IEEE80211_CCK_RATE_5MB_MASK ? 1 : 0;
5202	case IEEE80211_CCK_RATE_11MB:
5203		return priv->rates_mask & IEEE80211_CCK_RATE_11MB_MASK ? 1 : 0;
5204	}
5205
5206	/* If we are limited to B modulations, bail at this point */
5207	if (ieee_mode == IEEE_B)
5208		return 0;
5209
5210	/* G */
5211	switch (rate) {
5212	case IEEE80211_OFDM_RATE_6MB:
5213		return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ? 1 : 0;
5214	case IEEE80211_OFDM_RATE_9MB:
5215		return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ? 1 : 0;
5216	case IEEE80211_OFDM_RATE_12MB:
5217		return priv->rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
5218	case IEEE80211_OFDM_RATE_18MB:
5219		return priv->rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
5220	case IEEE80211_OFDM_RATE_24MB:
5221		return priv->rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
5222	case IEEE80211_OFDM_RATE_36MB:
5223		return priv->rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
5224	case IEEE80211_OFDM_RATE_48MB:
5225		return priv->rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
5226	case IEEE80211_OFDM_RATE_54MB:
5227		return priv->rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
5228	}
5229
5230	return 0;
5231}
5232
5233static int ipw_compatible_rates(struct ipw_priv *priv,
5234				const struct ieee80211_network *network,
5235				struct ipw_supported_rates *rates)
5236{
5237	int num_rates, i;
5238
5239	memset(rates, 0, sizeof(*rates));
5240	num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
5241	rates->num_rates = 0;
5242	for (i = 0; i < num_rates; i++) {
5243		if (!ipw_is_rate_in_mask(priv, network->mode,
5244					 network->rates[i])) {
5245
5246			if (network->rates[i] & IEEE80211_BASIC_RATE_MASK) {
5247				IPW_DEBUG_SCAN("Adding masked mandatory "
5248					       "rate %02X\n",
5249					       network->rates[i]);
5250				rates->supported_rates[rates->num_rates++] =
5251				    network->rates[i];
5252				continue;
5253			}
5254
5255			IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5256				       network->rates[i], priv->rates_mask);
5257			continue;
5258		}
5259
5260		rates->supported_rates[rates->num_rates++] = network->rates[i];
5261	}
5262
5263	num_rates = min(network->rates_ex_len,
5264			(u8) (IPW_MAX_RATES - num_rates));
5265	for (i = 0; i < num_rates; i++) {
5266		if (!ipw_is_rate_in_mask(priv, network->mode,
5267					 network->rates_ex[i])) {
5268			if (network->rates_ex[i] & IEEE80211_BASIC_RATE_MASK) {
5269				IPW_DEBUG_SCAN("Adding masked mandatory "
5270					       "rate %02X\n",
5271					       network->rates_ex[i]);
5272				rates->supported_rates[rates->num_rates++] =
5273				    network->rates[i];
5274				continue;
5275			}
5276
5277			IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5278				       network->rates_ex[i], priv->rates_mask);
5279			continue;
5280		}
5281
5282		rates->supported_rates[rates->num_rates++] =
5283		    network->rates_ex[i];
5284	}
5285
5286	return 1;
5287}
5288
5289static void ipw_copy_rates(struct ipw_supported_rates *dest,
5290				  const struct ipw_supported_rates *src)
5291{
5292	u8 i;
5293	for (i = 0; i < src->num_rates; i++)
5294		dest->supported_rates[i] = src->supported_rates[i];
5295	dest->num_rates = src->num_rates;
5296}
5297
5298/* TODO: Look at sniffed packets in the air to determine if the basic rate
5299 * mask should ever be used -- right now all callers to add the scan rates are
5300 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
5301static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
5302				   u8 modulation, u32 rate_mask)
5303{
5304	u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
5305	    IEEE80211_BASIC_RATE_MASK : 0;
5306
5307	if (rate_mask & IEEE80211_CCK_RATE_1MB_MASK)
5308		rates->supported_rates[rates->num_rates++] =
5309		    IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
5310
5311	if (rate_mask & IEEE80211_CCK_RATE_2MB_MASK)
5312		rates->supported_rates[rates->num_rates++] =
5313		    IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
5314
5315	if (rate_mask & IEEE80211_CCK_RATE_5MB_MASK)
5316		rates->supported_rates[rates->num_rates++] = basic_mask |
5317		    IEEE80211_CCK_RATE_5MB;
5318
5319	if (rate_mask & IEEE80211_CCK_RATE_11MB_MASK)
5320		rates->supported_rates[rates->num_rates++] = basic_mask |
5321		    IEEE80211_CCK_RATE_11MB;
5322}
5323
5324static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
5325				    u8 modulation, u32 rate_mask)
5326{
5327	u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
5328	    IEEE80211_BASIC_RATE_MASK : 0;
5329
5330	if (rate_mask & IEEE80211_OFDM_RATE_6MB_MASK)
5331		rates->supported_rates[rates->num_rates++] = basic_mask |
5332		    IEEE80211_OFDM_RATE_6MB;
5333
5334	if (rate_mask & IEEE80211_OFDM_RATE_9MB_MASK)
5335		rates->supported_rates[rates->num_rates++] =
5336		    IEEE80211_OFDM_RATE_9MB;
5337
5338	if (rate_mask & IEEE80211_OFDM_RATE_12MB_MASK)
5339		rates->supported_rates[rates->num_rates++] = basic_mask |
5340		    IEEE80211_OFDM_RATE_12MB;
5341
5342	if (rate_mask & IEEE80211_OFDM_RATE_18MB_MASK)
5343		rates->supported_rates[rates->num_rates++] =
5344		    IEEE80211_OFDM_RATE_18MB;
5345
5346	if (rate_mask & IEEE80211_OFDM_RATE_24MB_MASK)
5347		rates->supported_rates[rates->num_rates++] = basic_mask |
5348		    IEEE80211_OFDM_RATE_24MB;
5349
5350	if (rate_mask & IEEE80211_OFDM_RATE_36MB_MASK)
5351		rates->supported_rates[rates->num_rates++] =
5352		    IEEE80211_OFDM_RATE_36MB;
5353
5354	if (rate_mask & IEEE80211_OFDM_RATE_48MB_MASK)
5355		rates->supported_rates[rates->num_rates++] =
5356		    IEEE80211_OFDM_RATE_48MB;
5357
5358	if (rate_mask & IEEE80211_OFDM_RATE_54MB_MASK)
5359		rates->supported_rates[rates->num_rates++] =
5360		    IEEE80211_OFDM_RATE_54MB;
5361}
5362
5363struct ipw_network_match {
5364	struct ieee80211_network *network;
5365	struct ipw_supported_rates rates;
5366};
5367
5368static int ipw_find_adhoc_network(struct ipw_priv *priv,
5369				  struct ipw_network_match *match,
5370				  struct ieee80211_network *network,
5371				  int roaming)
5372{
5373	struct ipw_supported_rates rates;
5374
5375	/* Verify that this network's capability is compatible with the
5376	 * current mode (AdHoc or Infrastructure) */
5377	if ((priv->ieee->iw_mode == IW_MODE_ADHOC &&
5378	     !(network->capability & WLAN_CAPABILITY_IBSS))) {
5379		IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded due to "
5380				"capability mismatch.\n",
5381				escape_essid(network->ssid, network->ssid_len),
5382				MAC_ARG(network->bssid));
5383		return 0;
5384	}
5385
5386	/* If we do not have an ESSID for this AP, we can not associate with
5387	 * it */
5388	if (network->flags & NETWORK_EMPTY_ESSID) {
5389		IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5390				"because of hidden ESSID.\n",
5391				escape_essid(network->ssid, network->ssid_len),
5392				MAC_ARG(network->bssid));
5393		return 0;
5394	}
5395
5396	if (unlikely(roaming)) {
5397		/* If we are roaming, then ensure check if this is a valid
5398		 * network to try and roam to */
5399		if ((network->ssid_len != match->network->ssid_len) ||
5400		    memcmp(network->ssid, match->network->ssid,
5401			   network->ssid_len)) {
5402			IPW_DEBUG_MERGE("Netowrk '%s (" MAC_FMT ")' excluded "
5403					"because of non-network ESSID.\n",
5404					escape_essid(network->ssid,
5405						     network->ssid_len),
5406					MAC_ARG(network->bssid));
5407			return 0;
5408		}
5409	} else {
5410		/* If an ESSID has been configured then compare the broadcast
5411		 * ESSID to ours */
5412		if ((priv->config & CFG_STATIC_ESSID) &&
5413		    ((network->ssid_len != priv->essid_len) ||
5414		     memcmp(network->ssid, priv->essid,
5415			    min(network->ssid_len, priv->essid_len)))) {
5416			char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5417
5418			strncpy(escaped,
5419				escape_essid(network->ssid, network->ssid_len),
5420				sizeof(escaped));
5421			IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5422					"because of ESSID mismatch: '%s'.\n",
5423					escaped, MAC_ARG(network->bssid),
5424					escape_essid(priv->essid,
5425						     priv->essid_len));
5426			return 0;
5427		}
5428	}
5429
5430	/* If the old network rate is better than this one, don't bother
5431	 * testing everything else. */
5432
5433	if (network->time_stamp[0] < match->network->time_stamp[0]) {
5434		IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5435				"current network.\n",
5436				escape_essid(match->network->ssid,
5437					     match->network->ssid_len));
5438		return 0;
5439	} else if (network->time_stamp[1] < match->network->time_stamp[1]) {
5440		IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5441				"current network.\n",
5442				escape_essid(match->network->ssid,
5443					     match->network->ssid_len));
5444		return 0;
5445	}
5446
5447	/* Now go through and see if the requested network is valid... */
5448	if (priv->ieee->scan_age != 0 &&
5449	    time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5450		IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5451				"because of age: %ums.\n",
5452				escape_essid(network->ssid, network->ssid_len),
5453				MAC_ARG(network->bssid),
5454				jiffies_to_msecs(jiffies -
5455						 network->last_scanned));
5456		return 0;
5457	}
5458
5459	if ((priv->config & CFG_STATIC_CHANNEL) &&
5460	    (network->channel != priv->channel)) {
5461		IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5462				"because of channel mismatch: %d != %d.\n",
5463				escape_essid(network->ssid, network->ssid_len),
5464				MAC_ARG(network->bssid),
5465				network->channel, priv->channel);
5466		return 0;
5467	}
5468
5469	/* Verify privacy compatability */
5470	if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5471	    ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5472		IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5473				"because of privacy mismatch: %s != %s.\n",
5474				escape_essid(network->ssid, network->ssid_len),
5475				MAC_ARG(network->bssid),
5476				priv->
5477				capability & CAP_PRIVACY_ON ? "on" : "off",
5478				network->
5479				capability & WLAN_CAPABILITY_PRIVACY ? "on" :
5480				"off");
5481		return 0;
5482	}
5483
5484	if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5485		IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5486				"because of the same BSSID match: " MAC_FMT
5487				".\n", escape_essid(network->ssid,
5488						    network->ssid_len),
5489				MAC_ARG(network->bssid), MAC_ARG(priv->bssid));
5490		return 0;
5491	}
5492
5493	/* Filter out any incompatible freq / mode combinations */
5494	if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5495		IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5496				"because of invalid frequency/mode "
5497				"combination.\n",
5498				escape_essid(network->ssid, network->ssid_len),
5499				MAC_ARG(network->bssid));
5500		return 0;
5501	}
5502
5503	/* Ensure that the rates supported by the driver are compatible with
5504	 * this AP, including verification of basic rates (mandatory) */
5505	if (!ipw_compatible_rates(priv, network, &rates)) {
5506		IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5507				"because configured rate mask excludes "
5508				"AP mandatory rate.\n",
5509				escape_essid(network->ssid, network->ssid_len),
5510				MAC_ARG(network->bssid));
5511		return 0;
5512	}
5513
5514	if (rates.num_rates == 0) {
5515		IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5516				"because of no compatible rates.\n",
5517				escape_essid(network->ssid, network->ssid_len),
5518				MAC_ARG(network->bssid));
5519		return 0;
5520	}
5521
5522	/* TODO: Perform any further minimal comparititive tests.  We do not
5523	 * want to put too much policy logic here; intelligent scan selection
5524	 * should occur within a generic IEEE 802.11 user space tool.  */
5525
5526	/* Set up 'new' AP to this network */
5527	ipw_copy_rates(&match->rates, &rates);
5528	match->network = network;
5529	IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' is a viable match.\n",
5530			escape_essid(network->ssid, network->ssid_len),
5531			MAC_ARG(network->bssid));
5532
5533	return 1;
5534}
5535
5536static void ipw_merge_adhoc_network(struct work_struct *work)
5537{
5538	struct ipw_priv *priv =
5539		container_of(work, struct ipw_priv, merge_networks);
5540	struct ieee80211_network *network = NULL;
5541	struct ipw_network_match match = {
5542		.network = priv->assoc_network
5543	};
5544
5545	if ((priv->status & STATUS_ASSOCIATED) &&
5546	    (priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5547		/* First pass through ROAM process -- look for a better
5548		 * network */
5549		unsigned long flags;
5550
5551		spin_lock_irqsave(&priv->ieee->lock, flags);
5552		list_for_each_entry(network, &priv->ieee->network_list, list) {
5553			if (network != priv->assoc_network)
5554				ipw_find_adhoc_network(priv, &match, network,
5555						       1);
5556		}
5557		spin_unlock_irqrestore(&priv->ieee->lock, flags);
5558
5559		if (match.network == priv->assoc_network) {
5560			IPW_DEBUG_MERGE("No better ADHOC in this network to "
5561					"merge to.\n");
5562			return;
5563		}
5564
5565		mutex_lock(&priv->mutex);
5566		if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5567			IPW_DEBUG_MERGE("remove network %s\n",
5568					escape_essid(priv->essid,
5569						     priv->essid_len));
5570			ipw_remove_current_network(priv);
5571		}
5572
5573		ipw_disassociate(priv);
5574		priv->assoc_network = match.network;
5575		mutex_unlock(&priv->mutex);
5576		return;
5577	}
5578}
5579
5580static int ipw_best_network(struct ipw_priv *priv,
5581			    struct ipw_network_match *match,
5582			    struct ieee80211_network *network, int roaming)
5583{
5584	struct ipw_supported_rates rates;
5585
5586	/* Verify that this network's capability is compatible with the
5587	 * current mode (AdHoc or Infrastructure) */
5588	if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
5589	     !(network->capability & WLAN_CAPABILITY_ESS)) ||
5590	    (priv->ieee->iw_mode == IW_MODE_ADHOC &&
5591	     !(network->capability & WLAN_CAPABILITY_IBSS))) {
5592		IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded due to "
5593				"capability mismatch.\n",
5594				escape_essid(network->ssid, network->ssid_len),
5595				MAC_ARG(network->bssid));
5596		return 0;
5597	}
5598
5599	/* If we do not have an ESSID for this AP, we can not associate with
5600	 * it */
5601	if (network->flags & NETWORK_EMPTY_ESSID) {
5602		IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5603				"because of hidden ESSID.\n",
5604				escape_essid(network->ssid, network->ssid_len),
5605				MAC_ARG(network->bssid));
5606		return 0;
5607	}
5608
5609	if (unlikely(roaming)) {
5610		/* If we are roaming, then ensure check if this is a valid
5611		 * network to try and roam to */
5612		if ((network->ssid_len != match->network->ssid_len) ||
5613		    memcmp(network->ssid, match->network->ssid,
5614			   network->ssid_len)) {
5615			IPW_DEBUG_ASSOC("Netowrk '%s (" MAC_FMT ")' excluded "
5616					"because of non-network ESSID.\n",
5617					escape_essid(network->ssid,
5618						     network->ssid_len),
5619					MAC_ARG(network->bssid));
5620			return 0;
5621		}
5622	} else {
5623		/* If an ESSID has been configured then compare the broadcast
5624		 * ESSID to ours */
5625		if ((priv->config & CFG_STATIC_ESSID) &&
5626		    ((network->ssid_len != priv->essid_len) ||
5627		     memcmp(network->ssid, priv->essid,
5628			    min(network->ssid_len, priv->essid_len)))) {
5629			char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5630			strncpy(escaped,
5631				escape_essid(network->ssid, network->ssid_len),
5632				sizeof(escaped));
5633			IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5634					"because of ESSID mismatch: '%s'.\n",
5635					escaped, MAC_ARG(network->bssid),
5636					escape_essid(priv->essid,
5637						     priv->essid_len));
5638			return 0;
5639		}
5640	}
5641
5642	/* If the old network rate is better than this one, don't bother
5643	 * testing everything else. */
5644	if (match->network && match->network->stats.rssi > network->stats.rssi) {
5645		char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5646		strncpy(escaped,
5647			escape_essid(network->ssid, network->ssid_len),
5648			sizeof(escaped));
5649		IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded because "
5650				"'%s (" MAC_FMT ")' has a stronger signal.\n",
5651				escaped, MAC_ARG(network->bssid),
5652				escape_essid(match->network->ssid,
5653					     match->network->ssid_len),
5654				MAC_ARG(match->network->bssid));
5655		return 0;
5656	}
5657
5658	/* If this network has already had an association attempt within the
5659	 * last 3 seconds, do not try and associate again... */
5660	if (network->last_associate &&
5661	    time_after(network->last_associate + (HZ * 3UL), jiffies)) {
5662		IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5663				"because of storming (%ums since last "
5664				"assoc attempt).\n",
5665				escape_essid(network->ssid, network->ssid_len),
5666				MAC_ARG(network->bssid),
5667				jiffies_to_msecs(jiffies -
5668						 network->last_associate));
5669		return 0;
5670	}
5671
5672	/* Now go through and see if the requested network is valid... */
5673	if (priv->ieee->scan_age != 0 &&
5674	    time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5675		IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5676				"because of age: %ums.\n",
5677				escape_essid(network->ssid, network->ssid_len),
5678				MAC_ARG(network->bssid),
5679				jiffies_to_msecs(jiffies -
5680						 network->last_scanned));
5681		return 0;
5682	}
5683
5684	if ((priv->config & CFG_STATIC_CHANNEL) &&
5685	    (network->channel != priv->channel)) {
5686		IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5687				"because of channel mismatch: %d != %d.\n",
5688				escape_essid(network->ssid, network->ssid_len),
5689				MAC_ARG(network->bssid),
5690				network->channel, priv->channel);
5691		return 0;
5692	}
5693
5694	/* Verify privacy compatability */
5695	if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5696	    ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5697		IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5698				"because of privacy mismatch: %s != %s.\n",
5699				escape_essid(network->ssid, network->ssid_len),
5700				MAC_ARG(network->bssid),
5701				priv->capability & CAP_PRIVACY_ON ? "on" :
5702				"off",
5703				network->capability &
5704				WLAN_CAPABILITY_PRIVACY ? "on" : "off");
5705		return 0;
5706	}
5707
5708	if ((priv->config & CFG_STATIC_BSSID) &&
5709	    memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5710		IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5711				"because of BSSID mismatch: " MAC_FMT ".\n",
5712				escape_essid(network->ssid, network->ssid_len),
5713				MAC_ARG(network->bssid), MAC_ARG(priv->bssid));
5714		return 0;
5715	}
5716
5717	/* Filter out any incompatible freq / mode combinations */
5718	if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5719		IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5720				"because of invalid frequency/mode "
5721				"combination.\n",
5722				escape_essid(network->ssid, network->ssid_len),
5723				MAC_ARG(network->bssid));
5724		return 0;
5725	}
5726
5727	/* Filter out invalid channel in current GEO */
5728	if (!ieee80211_is_valid_channel(priv->ieee, network->channel)) {
5729		IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5730				"because of invalid channel in current GEO\n",
5731				escape_essid(network->ssid, network->ssid_len),
5732				MAC_ARG(network->bssid));
5733		return 0;
5734	}
5735
5736	/* Ensure that the rates supported by the driver are compatible with
5737	 * this AP, including verification of basic rates (mandatory) */
5738	if (!ipw_compatible_rates(priv, network, &rates)) {
5739		IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5740				"because configured rate mask excludes "
5741				"AP mandatory rate.\n",
5742				escape_essid(network->ssid, network->ssid_len),
5743				MAC_ARG(network->bssid));
5744		return 0;
5745	}
5746
5747	if (rates.num_rates == 0) {
5748		IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5749				"because of no compatible rates.\n",
5750				escape_essid(network->ssid, network->ssid_len),
5751				MAC_ARG(network->bssid));
5752		return 0;
5753	}
5754
5755	/* TODO: Perform any further minimal comparititive tests.  We do not
5756	 * want to put too much policy logic here; intelligent scan selection
5757	 * should occur within a generic IEEE 802.11 user space tool.  */
5758
5759	/* Set up 'new' AP to this network */
5760	ipw_copy_rates(&match->rates, &rates);
5761	match->network = network;
5762
5763	IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' is a viable match.\n",
5764			escape_essid(network->ssid, network->ssid_len),
5765			MAC_ARG(network->bssid));
5766
5767	return 1;
5768}
5769
5770static void ipw_adhoc_create(struct ipw_priv *priv,
5771			     struct ieee80211_network *network)
5772{
5773	const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
5774	int i;
5775
5776	/*
5777	 * For the purposes of scanning, we can set our wireless mode
5778	 * to trigger scans across combinations of bands, but when it
5779	 * comes to creating a new ad-hoc network, we have tell the FW
5780	 * exactly which band to use.
5781	 *
5782	 * We also have the possibility of an invalid channel for the
5783	 * chossen band.  Attempting to create a new ad-hoc network
5784	 * with an invalid channel for wireless mode will trigger a
5785	 * FW fatal error.
5786	 *
5787	 */
5788	switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
5789	case IEEE80211_52GHZ_BAND:
5790		network->mode = IEEE_A;
5791		i = ieee80211_channel_to_index(priv->ieee, priv->channel);
5792		BUG_ON(i == -1);
5793		if (geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5794			IPW_WARNING("Overriding invalid channel\n");
5795			priv->channel = geo->a[0].channel;
5796		}
5797		break;
5798
5799	case IEEE80211_24GHZ_BAND:
5800		if (priv->ieee->mode & IEEE_G)
5801			network->mode = IEEE_G;
5802		else
5803			network->mode = IEEE_B;
5804		i = ieee80211_channel_to_index(priv->ieee, priv->channel);
5805		BUG_ON(i == -1);
5806		if (geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5807			IPW_WARNING("Overriding invalid channel\n");
5808			priv->channel = geo->bg[0].channel;
5809		}
5810		break;
5811
5812	default:
5813		IPW_WARNING("Overriding invalid channel\n");
5814		if (priv->ieee->mode & IEEE_A) {
5815			network->mode = IEEE_A;
5816			priv->channel = geo->a[0].channel;
5817		} else if (priv->ieee->mode & IEEE_G) {
5818			network->mode = IEEE_G;
5819			priv->channel = geo->bg[0].channel;
5820		} else {
5821			network->mode = IEEE_B;
5822			priv->channel = geo->bg[0].channel;
5823		}
5824		break;
5825	}
5826
5827	network->channel = priv->channel;
5828	priv->config |= CFG_ADHOC_PERSIST;
5829	ipw_create_bssid(priv, network->bssid);
5830	network->ssid_len = priv->essid_len;
5831	memcpy(network->ssid, priv->essid, priv->essid_len);
5832	memset(&network->stats, 0, sizeof(network->stats));
5833	network->capability = WLAN_CAPABILITY_IBSS;
5834	if (!(priv->config & CFG_PREAMBLE_LONG))
5835		network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
5836	if (priv->capability & CAP_PRIVACY_ON)
5837		network->capability |= WLAN_CAPABILITY_PRIVACY;
5838	network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
5839	memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
5840	network->rates_ex_len = priv->rates.num_rates - network->rates_len;
5841	memcpy(network->rates_ex,
5842	       &priv->rates.supported_rates[network->rates_len],
5843	       network->rates_ex_len);
5844	network->last_scanned = 0;
5845	network->flags = 0;
5846	network->last_associate = 0;
5847	network->time_stamp[0] = 0;
5848	network->time_stamp[1] = 0;
5849	network->beacon_interval = 100;	/* Default */
5850	network->listen_interval = 10;	/* Default */
5851	network->atim_window = 0;	/* Default */
5852	network->wpa_ie_len = 0;
5853	network->rsn_ie_len = 0;
5854}
5855
5856static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
5857{
5858	struct ipw_tgi_tx_key key;
5859
5860	if (!(priv->ieee->sec.flags & (1 << index)))
5861		return;
5862
5863	key.key_id = index;
5864	memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
5865	key.security_type = type;
5866	key.station_index = 0;	/* always 0 for BSS */
5867	key.flags = 0;
5868	/* 0 for new key; previous value of counter (after fatal error) */
5869	key.tx_counter[0] = cpu_to_le32(0);
5870	key.tx_counter[1] = cpu_to_le32(0);
5871
5872	ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
5873}
5874
5875static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
5876{
5877	struct ipw_wep_key key;
5878	int i;
5879
5880	key.cmd_id = DINO_CMD_WEP_KEY;
5881	key.seq_num = 0;
5882
5883	/* Note: AES keys cannot be set for multiple times.
5884	 * Only set it at the first time. */
5885	for (i = 0; i < 4; i++) {
5886		key.key_index = i | type;
5887		if (!(priv->ieee->sec.flags & (1 << i))) {
5888			key.key_size = 0;
5889			continue;
5890		}
5891
5892		key.key_size = priv->ieee->sec.key_sizes[i];
5893		memcpy(key.key, priv->ieee->sec.keys[i], key.key_size);
5894
5895		ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key);
5896	}
5897}
5898
5899static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level)
5900{
5901	if (priv->ieee->host_encrypt)
5902		return;
5903
5904	switch (level) {
5905	case SEC_LEVEL_3:
5906		priv->sys_config.disable_unicast_decryption = 0;
5907		priv->ieee->host_decrypt = 0;
5908		break;
5909	case SEC_LEVEL_2:
5910		priv->sys_config.disable_unicast_decryption = 1;
5911		priv->ieee->host_decrypt = 1;
5912		break;
5913	case SEC_LEVEL_1:
5914		priv->sys_config.disable_unicast_decryption = 0;
5915		priv->ieee->host_decrypt = 0;
5916		break;
5917	case SEC_LEVEL_0:
5918		priv->sys_config.disable_unicast_decryption = 1;
5919		break;
5920	default:
5921		break;
5922	}
5923}
5924
5925static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level)
5926{
5927	if (priv->ieee->host_encrypt)
5928		return;
5929
5930	switch (level) {
5931	case SEC_LEVEL_3:
5932		priv->sys_config.disable_multicast_decryption = 0;
5933		break;
5934	case SEC_LEVEL_2:
5935		priv->sys_config.disable_multicast_decryption = 1;
5936		break;
5937	case SEC_LEVEL_1:
5938		priv->sys_config.disable_multicast_decryption = 0;
5939		break;
5940	case SEC_LEVEL_0:
5941		priv->sys_config.disable_multicast_decryption = 1;
5942		break;
5943	default:
5944		break;
5945	}
5946}
5947
5948static void ipw_set_hwcrypto_keys(struct ipw_priv *priv)
5949{
5950	switch (priv->ieee->sec.level) {
5951	case SEC_LEVEL_3:
5952		if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
5953			ipw_send_tgi_tx_key(priv,
5954					    DCT_FLAG_EXT_SECURITY_CCM,
5955					    priv->ieee->sec.active_key);
5956
5957		if (!priv->ieee->host_mc_decrypt)
5958			ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM);
5959		break;
5960	case SEC_LEVEL_2:
5961		if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
5962			ipw_send_tgi_tx_key(priv,
5963					    DCT_FLAG_EXT_SECURITY_TKIP,
5964					    priv->ieee->sec.active_key);
5965		break;
5966	case SEC_LEVEL_1:
5967		ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
5968		ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level);
5969		ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level);
5970		break;
5971	case SEC_LEVEL_0:
5972	default:
5973		break;
5974	}
5975}
5976
5977static void ipw_adhoc_check(void *data)
5978{
5979	struct ipw_priv *priv = data;
5980
5981	if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold &&
5982	    !(priv->config & CFG_ADHOC_PERSIST)) {
5983		IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
5984			  IPW_DL_STATE | IPW_DL_ASSOC,
5985			  "Missed beacon: %d - disassociate\n",
5986			  priv->missed_adhoc_beacons);
5987		ipw_remove_current_network(priv);
5988		ipw_disassociate(priv);
5989		return;
5990	}
5991
5992	queue_delayed_work(priv->workqueue, &priv->adhoc_check,
5993			   priv->assoc_request.beacon_interval);
5994}
5995
5996static void ipw_bg_adhoc_check(struct work_struct *work)
5997{
5998	struct ipw_priv *priv =
5999		container_of(work, struct ipw_priv, adhoc_check.work);
6000	mutex_lock(&priv->mutex);
6001	ipw_adhoc_check(priv);
6002	mutex_unlock(&priv->mutex);
6003}
6004
6005static void ipw_debug_config(struct ipw_priv *priv)
6006{
6007	IPW_DEBUG_INFO("Scan completed, no valid APs matched "
6008		       "[CFG 0x%08X]\n", priv->config);
6009	if (priv->config & CFG_STATIC_CHANNEL)
6010		IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
6011	else
6012		IPW_DEBUG_INFO("Channel unlocked.\n");
6013	if (priv->config & CFG_STATIC_ESSID)
6014		IPW_DEBUG_INFO("ESSID locked to '%s'\n",
6015			       escape_essid(priv->essid, priv->essid_len));
6016	else
6017		IPW_DEBUG_INFO("ESSID unlocked.\n");
6018	if (priv->config & CFG_STATIC_BSSID)
6019		IPW_DEBUG_INFO("BSSID locked to " MAC_FMT "\n",
6020			       MAC_ARG(priv->bssid));
6021	else
6022		IPW_DEBUG_INFO("BSSID unlocked.\n");
6023	if (priv->capability & CAP_PRIVACY_ON)
6024		IPW_DEBUG_INFO("PRIVACY on\n");
6025	else
6026		IPW_DEBUG_INFO("PRIVACY off\n");
6027	IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
6028}
6029
6030static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
6031{
6032	/* TODO: Verify that this works... */
6033	struct ipw_fixed_rate fr = {
6034		.tx_rates = priv->rates_mask
6035	};
6036	u32 reg;
6037	u16 mask = 0;
6038
6039	/* Identify 'current FW band' and match it with the fixed
6040	 * Tx rates */
6041
6042	switch (priv->ieee->freq_band) {
6043	case IEEE80211_52GHZ_BAND:	/* A only */
6044		/* IEEE_A */
6045		if (priv->rates_mask & ~IEEE80211_OFDM_RATES_MASK) {
6046			/* Invalid fixed rate mask */
6047			IPW_DEBUG_WX
6048			    ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6049			fr.tx_rates = 0;
6050			break;
6051		}
6052
6053		fr.tx_rates >>= IEEE80211_OFDM_SHIFT_MASK_A;
6054		break;
6055
6056	default:		/* 2.4Ghz or Mixed */
6057		/* IEEE_B */
6058		if (mode == IEEE_B) {
6059			if (fr.tx_rates & ~IEEE80211_CCK_RATES_MASK) {
6060				/* Invalid fixed rate mask */
6061				IPW_DEBUG_WX
6062				    ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6063				fr.tx_rates = 0;
6064			}
6065			break;
6066		}
6067
6068		/* IEEE_G */
6069		if (fr.tx_rates & ~(IEEE80211_CCK_RATES_MASK |
6070				    IEEE80211_OFDM_RATES_MASK)) {
6071			/* Invalid fixed rate mask */
6072			IPW_DEBUG_WX
6073			    ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6074			fr.tx_rates = 0;
6075			break;
6076		}
6077
6078		if (IEEE80211_OFDM_RATE_6MB_MASK & fr.tx_rates) {
6079			mask |= (IEEE80211_OFDM_RATE_6MB_MASK >> 1);
6080			fr.tx_rates &= ~IEEE80211_OFDM_RATE_6MB_MASK;
6081		}
6082
6083		if (IEEE80211_OFDM_RATE_9MB_MASK & fr.tx_rates) {
6084			mask |= (IEEE80211_OFDM_RATE_9MB_MASK >> 1);
6085			fr.tx_rates &= ~IEEE80211_OFDM_RATE_9MB_MASK;
6086		}
6087
6088		if (IEEE80211_OFDM_RATE_12MB_MASK & fr.tx_rates) {
6089			mask |= (IEEE80211_OFDM_RATE_12MB_MASK >> 1);
6090			fr.tx_rates &= ~IEEE80211_OFDM_RATE_12MB_MASK;
6091		}
6092
6093		fr.tx_rates |= mask;
6094		break;
6095	}
6096
6097	reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
6098	ipw_write_reg32(priv, reg, *(u32 *) & fr);
6099}
6100
6101static void ipw_abort_scan(struct ipw_priv *priv)
6102{
6103	int err;
6104
6105	if (priv->status & STATUS_SCAN_ABORTING) {
6106		IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
6107		return;
6108	}
6109	priv->status |= STATUS_SCAN_ABORTING;
6110
6111	err = ipw_send_scan_abort(priv);
6112	if (err)
6113		IPW_DEBUG_HC("Request to abort scan failed.\n");
6114}
6115
6116static void ipw_add_scan_channels(struct ipw_priv *priv,
6117				  struct ipw_scan_request_ext *scan,
6118				  int scan_type)
6119{
6120	int channel_index = 0;
6121	const struct ieee80211_geo *geo;
6122	int i;
6123
6124	geo = ieee80211_get_geo(priv->ieee);
6125
6126	if (priv->ieee->freq_band & IEEE80211_52GHZ_BAND) {
6127		int start = channel_index;
6128		for (i = 0; i < geo->a_channels; i++) {
6129			if ((priv->status & STATUS_ASSOCIATED) &&
6130			    geo->a[i].channel == priv->channel)
6131				continue;
6132			channel_index++;
6133			scan->channels_list[channel_index] = geo->a[i].channel;
6134			ipw_set_scan_type(scan, channel_index,
6135					  geo->a[i].
6136					  flags & IEEE80211_CH_PASSIVE_ONLY ?
6137					  IPW_SCAN_PASSIVE_FULL_DWELL_SCAN :
6138					  scan_type);
6139		}
6140
6141		if (start != channel_index) {
6142			scan->channels_list[start] = (u8) (IPW_A_MODE << 6) |
6143			    (channel_index - start);
6144			channel_index++;
6145		}
6146	}
6147
6148	if (priv->ieee->freq_band & IEEE80211_24GHZ_BAND) {
6149		int start = channel_index;
6150		if (priv->config & CFG_SPEED_SCAN) {
6151			int index;
6152			u8 channels[IEEE80211_24GHZ_CHANNELS] = {
6153				/* nop out the list */
6154				[0] = 0
6155			};
6156
6157			u8 channel;
6158			while (channel_index < IPW_SCAN_CHANNELS) {
6159				channel =
6160				    priv->speed_scan[priv->speed_scan_pos];
6161				if (channel == 0) {
6162					priv->speed_scan_pos = 0;
6163					channel = priv->speed_scan[0];
6164				}
6165				if ((priv->status & STATUS_ASSOCIATED) &&
6166				    channel == priv->channel) {
6167					priv->speed_scan_pos++;
6168					continue;
6169				}
6170
6171				/* If this channel has already been
6172				 * added in scan, break from loop
6173				 * and this will be the first channel
6174				 * in the next scan.
6175				 */
6176				if (channels[channel - 1] != 0)
6177					break;
6178
6179				channels[channel - 1] = 1;
6180				priv->speed_scan_pos++;
6181				channel_index++;
6182				scan->channels_list[channel_index] = channel;
6183				index =
6184				    ieee80211_channel_to_index(priv->ieee, channel);
6185				ipw_set_scan_type(scan, channel_index,
6186						  geo->bg[index].
6187						  flags &
6188						  IEEE80211_CH_PASSIVE_ONLY ?
6189						  IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6190						  : scan_type);
6191			}
6192		} else {
6193			for (i = 0; i < geo->bg_channels; i++) {
6194				if ((priv->status & STATUS_ASSOCIATED) &&
6195				    geo->bg[i].channel == priv->channel)
6196					continue;
6197				channel_index++;
6198				scan->channels_list[channel_index] =
6199				    geo->bg[i].channel;
6200				ipw_set_scan_type(scan, channel_index,
6201						  geo->bg[i].
6202						  flags &
6203						  IEEE80211_CH_PASSIVE_ONLY ?
6204						  IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6205						  : scan_type);
6206			}
6207		}
6208
6209		if (start != channel_index) {
6210			scan->channels_list[start] = (u8) (IPW_B_MODE << 6) |
6211			    (channel_index - start);
6212		}
6213	}
6214}
6215
6216static int ipw_request_scan_helper(struct ipw_priv *priv, int type)
6217{
6218	struct ipw_scan_request_ext scan;
6219	int err = 0, scan_type;
6220
6221	if (!(priv->status & STATUS_INIT) ||
6222	    (priv->status & STATUS_EXIT_PENDING))
6223		return 0;
6224
6225	mutex_lock(&priv->mutex);
6226
6227	if (priv->status & STATUS_SCANNING) {
6228		IPW_DEBUG_HC("Concurrent scan requested.  Ignoring.\n");
6229		priv->status |= STATUS_SCAN_PENDING;
6230		goto done;
6231	}
6232
6233	if (!(priv->status & STATUS_SCAN_FORCED) &&
6234	    priv->status & STATUS_SCAN_ABORTING) {
6235		IPW_DEBUG_HC("Scan request while abort pending.  Queuing.\n");
6236		priv->status |= STATUS_SCAN_PENDING;
6237		goto done;
6238	}
6239
6240	if (priv->status & STATUS_RF_KILL_MASK) {
6241		IPW_DEBUG_HC("Aborting scan due to RF Kill activation\n");
6242		priv->status |= STATUS_SCAN_PENDING;
6243		goto done;
6244	}
6245
6246	memset(&scan, 0, sizeof(scan));
6247	scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
6248
6249	if (type == IW_SCAN_TYPE_PASSIVE) {
6250	  	IPW_DEBUG_WX("use passive scanning\n");
6251	  	scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN;
6252		scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6253			cpu_to_le16(120);
6254		ipw_add_scan_channels(priv, &scan, scan_type);
6255		goto send_request;
6256	}
6257
6258	/* Use active scan by default. */
6259  	if (priv->config & CFG_SPEED_SCAN)
6260		scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6261			cpu_to_le16(30);
6262	else
6263		scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6264			cpu_to_le16(20);
6265
6266	scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
6267		cpu_to_le16(20);
6268
6269  	scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120);
6270
6271#ifdef CONFIG_IPW2200_MONITOR
6272	if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
6273		u8 channel;
6274		u8 band = 0;
6275
6276		switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
6277		case IEEE80211_52GHZ_BAND:
6278			band = (u8) (IPW_A_MODE << 6) | 1;
6279			channel = priv->channel;
6280			break;
6281
6282		case IEEE80211_24GHZ_BAND:
6283			band = (u8) (IPW_B_MODE << 6) | 1;
6284			channel = priv->channel;
6285			break;
6286
6287		default:
6288			band = (u8) (IPW_B_MODE << 6) | 1;
6289			channel = 9;
6290			break;
6291		}
6292
6293		scan.channels_list[0] = band;
6294		scan.channels_list[1] = channel;
6295		ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN);
6296
6297		/* NOTE:  The card will sit on this channel for this time
6298		 * period.  Scan aborts are timing sensitive and frequently
6299		 * result in firmware restarts.  As such, it is best to
6300		 * set a small dwell_time here and just keep re-issuing
6301		 * scans.  Otherwise fast channel hopping will not actually
6302		 * hop channels.
6303		 *
6304		 * TODO: Move SPEED SCAN support to all modes and bands */
6305		scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6306			cpu_to_le16(2000);
6307	} else {
6308#endif				/* CONFIG_IPW2200_MONITOR */
6309		/* If we are roaming, then make this a directed scan for the
6310		 * current network.  Otherwise, ensure that every other scan
6311		 * is a fast channel hop scan */
6312		if ((priv->status & STATUS_ROAMING)
6313		    || (!(priv->status & STATUS_ASSOCIATED)
6314			&& (priv->config & CFG_STATIC_ESSID)
6315			&& (le32_to_cpu(scan.full_scan_index) % 2))) {
6316			err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
6317			if (err) {
6318				IPW_DEBUG_HC("Attempt to send SSID command "
6319					     "failed.\n");
6320				goto done;
6321			}
6322
6323			scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6324		} else
6325			scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
6326
6327		ipw_add_scan_channels(priv, &scan, scan_type);
6328#ifdef CONFIG_IPW2200_MONITOR
6329	}
6330#endif
6331
6332send_request:
6333	err = ipw_send_scan_request_ext(priv, &scan);
6334	if (err) {
6335		IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
6336		goto done;
6337	}
6338
6339	priv->status |= STATUS_SCANNING;
6340	priv->status &= ~STATUS_SCAN_PENDING;
6341	queue_delayed_work(priv->workqueue, &priv->scan_check,
6342			   IPW_SCAN_CHECK_WATCHDOG);
6343done:
6344	mutex_unlock(&priv->mutex);
6345	return err;
6346}
6347
6348static void ipw_request_passive_scan(struct work_struct *work)
6349{
6350	struct ipw_priv *priv =
6351		container_of(work, struct ipw_priv, request_passive_scan);
6352  	ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE);
6353}
6354
6355static void ipw_request_scan(struct work_struct *work)
6356{
6357	struct ipw_priv *priv =
6358		container_of(work, struct ipw_priv, request_scan.work);
6359	ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE);
6360}
6361
6362static void ipw_bg_abort_scan(struct work_struct *work)
6363{
6364	struct ipw_priv *priv =
6365		container_of(work, struct ipw_priv, abort_scan);
6366	mutex_lock(&priv->mutex);
6367	ipw_abort_scan(priv);
6368	mutex_unlock(&priv->mutex);
6369}
6370
6371static int ipw_wpa_enable(struct ipw_priv *priv, int value)
6372{
6373	/* This is called when wpa_supplicant loads and closes the driver
6374	 * interface. */
6375	priv->ieee->wpa_enabled = value;
6376	return 0;
6377}
6378
6379static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
6380{
6381	struct ieee80211_device *ieee = priv->ieee;
6382	struct ieee80211_security sec = {
6383		.flags = SEC_AUTH_MODE,
6384	};
6385	int ret = 0;
6386
6387	if (value & IW_AUTH_ALG_SHARED_KEY) {
6388		sec.auth_mode = WLAN_AUTH_SHARED_KEY;
6389		ieee->open_wep = 0;
6390	} else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
6391		sec.auth_mode = WLAN_AUTH_OPEN;
6392		ieee->open_wep = 1;
6393	} else if (value & IW_AUTH_ALG_LEAP) {
6394		sec.auth_mode = WLAN_AUTH_LEAP;
6395		ieee->open_wep = 1;
6396	} else
6397		return -EINVAL;
6398
6399	if (ieee->set_security)
6400		ieee->set_security(ieee->dev, &sec);
6401	else
6402		ret = -EOPNOTSUPP;
6403
6404	return ret;
6405}
6406
6407static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie,
6408				int wpa_ie_len)
6409{
6410	/* make sure WPA is enabled */
6411	ipw_wpa_enable(priv, 1);
6412}
6413
6414static int ipw_set_rsn_capa(struct ipw_priv *priv,
6415			    char *capabilities, int length)
6416{
6417	IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
6418
6419	return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length,
6420				capabilities);
6421}
6422
6423/*
6424 * WE-18 support
6425 */
6426
6427/* SIOCSIWGENIE */
6428static int ipw_wx_set_genie(struct net_device *dev,
6429			    struct iw_request_info *info,
6430			    union iwreq_data *wrqu, char *extra)
6431{
6432	struct ipw_priv *priv = ieee80211_priv(dev);
6433	struct ieee80211_device *ieee = priv->ieee;
6434	u8 *buf;
6435	int err = 0;
6436
6437	if (wrqu->data.length > MAX_WPA_IE_LEN ||
6438	    (wrqu->data.length && extra == NULL))
6439		return -EINVAL;
6440
6441	if (wrqu->data.length) {
6442		buf = kmalloc(wrqu->data.length, GFP_KERNEL);
6443		if (buf == NULL) {
6444			err = -ENOMEM;
6445			goto out;
6446		}
6447
6448		memcpy(buf, extra, wrqu->data.length);
6449		kfree(ieee->wpa_ie);
6450		ieee->wpa_ie = buf;
6451		ieee->wpa_ie_len = wrqu->data.length;
6452	} else {
6453		kfree(ieee->wpa_ie);
6454		ieee->wpa_ie = NULL;
6455		ieee->wpa_ie_len = 0;
6456	}
6457
6458	ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
6459      out:
6460	return err;
6461}
6462
6463/* SIOCGIWGENIE */
6464static int ipw_wx_get_genie(struct net_device *dev,
6465			    struct iw_request_info *info,
6466			    union iwreq_data *wrqu, char *extra)
6467{
6468	struct ipw_priv *priv = ieee80211_priv(dev);
6469	struct ieee80211_device *ieee = priv->ieee;
6470	int err = 0;
6471
6472	if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
6473		wrqu->data.length = 0;
6474		goto out;
6475	}
6476
6477	if (wrqu->data.length < ieee->wpa_ie_len) {
6478		err = -E2BIG;
6479		goto out;
6480	}
6481
6482	wrqu->data.length = ieee->wpa_ie_len;
6483	memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
6484
6485      out:
6486	return err;
6487}
6488
6489static int wext_cipher2level(int cipher)
6490{
6491	switch (cipher) {
6492	case IW_AUTH_CIPHER_NONE:
6493		return SEC_LEVEL_0;
6494	case IW_AUTH_CIPHER_WEP40:
6495	case IW_AUTH_CIPHER_WEP104:
6496		return SEC_LEVEL_1;
6497	case IW_AUTH_CIPHER_TKIP:
6498		return SEC_LEVEL_2;
6499	case IW_AUTH_CIPHER_CCMP:
6500		return SEC_LEVEL_3;
6501	default:
6502		return -1;
6503	}
6504}
6505
6506/* SIOCSIWAUTH */
6507static int ipw_wx_set_auth(struct net_device *dev,
6508			   struct iw_request_info *info,
6509			   union iwreq_data *wrqu, char *extra)
6510{
6511	struct ipw_priv *priv = ieee80211_priv(dev);
6512	struct ieee80211_device *ieee = priv->ieee;
6513	struct iw_param *param = &wrqu->param;
6514	struct ieee80211_crypt_data *crypt;
6515	unsigned long flags;
6516	int ret = 0;
6517
6518	switch (param->flags & IW_AUTH_INDEX) {
6519	case IW_AUTH_WPA_VERSION:
6520		break;
6521	case IW_AUTH_CIPHER_PAIRWISE:
6522		ipw_set_hw_decrypt_unicast(priv,
6523					   wext_cipher2level(param->value));
6524		break;
6525	case IW_AUTH_CIPHER_GROUP:
6526		ipw_set_hw_decrypt_multicast(priv,
6527					     wext_cipher2level(param->value));
6528		break;
6529	case IW_AUTH_KEY_MGMT:
6530		/*
6531		 * ipw2200 does not use these parameters
6532		 */
6533		break;
6534
6535	case IW_AUTH_TKIP_COUNTERMEASURES:
6536		crypt = priv->ieee->crypt[priv->ieee->tx_keyidx];
6537		if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
6538			break;
6539
6540		flags = crypt->ops->get_flags(crypt->priv);
6541
6542		if (param->value)
6543			flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6544		else
6545			flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6546
6547		crypt->ops->set_flags(flags, crypt->priv);
6548
6549		break;
6550
6551	case IW_AUTH_DROP_UNENCRYPTED:{
6552			/* HACK:
6553			 *
6554			 * wpa_supplicant calls set_wpa_enabled when the driver
6555			 * is loaded and unloaded, regardless of if WPA is being
6556			 * used.  No other calls are made which can be used to
6557			 * determine if encryption will be used or not prior to
6558			 * association being expected.  If encryption is not being
6559			 * used, drop_unencrypted is set to false, else true -- we
6560			 * can use this to determine if the CAP_PRIVACY_ON bit should
6561			 * be set.
6562			 */
6563			struct ieee80211_security sec = {
6564				.flags = SEC_ENABLED,
6565				.enabled = param->value,
6566			};
6567			priv->ieee->drop_unencrypted = param->value;
6568			/* We only change SEC_LEVEL for open mode. Others
6569			 * are set by ipw_wpa_set_encryption.
6570			 */
6571			if (!param->value) {
6572				sec.flags |= SEC_LEVEL;
6573				sec.level = SEC_LEVEL_0;
6574			} else {
6575				sec.flags |= SEC_LEVEL;
6576				sec.level = SEC_LEVEL_1;
6577			}
6578			if (priv->ieee->set_security)
6579				priv->ieee->set_security(priv->ieee->dev, &sec);
6580			break;
6581		}
6582
6583	case IW_AUTH_80211_AUTH_ALG:
6584		ret = ipw_wpa_set_auth_algs(priv, param->value);
6585		break;
6586
6587	case IW_AUTH_WPA_ENABLED:
6588		ret = ipw_wpa_enable(priv, param->value);
6589		ipw_disassociate(priv);
6590		break;
6591
6592	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6593		ieee->ieee802_1x = param->value;
6594		break;
6595
6596	case IW_AUTH_PRIVACY_INVOKED:
6597		ieee->privacy_invoked = param->value;
6598		break;
6599
6600	default:
6601		return -EOPNOTSUPP;
6602	}
6603	return ret;
6604}
6605
6606/* SIOCGIWAUTH */
6607static int ipw_wx_get_auth(struct net_device *dev,
6608			   struct iw_request_info *info,
6609			   union iwreq_data *wrqu, char *extra)
6610{
6611	struct ipw_priv *priv = ieee80211_priv(dev);
6612	struct ieee80211_device *ieee = priv->ieee;
6613	struct ieee80211_crypt_data *crypt;
6614	struct iw_param *param = &wrqu->param;
6615	int ret = 0;
6616
6617	switch (param->flags & IW_AUTH_INDEX) {
6618	case IW_AUTH_WPA_VERSION:
6619	case IW_AUTH_CIPHER_PAIRWISE:
6620	case IW_AUTH_CIPHER_GROUP:
6621	case IW_AUTH_KEY_MGMT:
6622		/*
6623		 * wpa_supplicant will control these internally
6624		 */
6625		ret = -EOPNOTSUPP;
6626		break;
6627
6628	case IW_AUTH_TKIP_COUNTERMEASURES:
6629		crypt = priv->ieee->crypt[priv->ieee->tx_keyidx];
6630		if (!crypt || !crypt->ops->get_flags)
6631			break;
6632
6633		param->value = (crypt->ops->get_flags(crypt->priv) &
6634				IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0;
6635
6636		break;
6637
6638	case IW_AUTH_DROP_UNENCRYPTED:
6639		param->value = ieee->drop_unencrypted;
6640		break;
6641
6642	case IW_AUTH_80211_AUTH_ALG:
6643		param->value = ieee->sec.auth_mode;
6644		break;
6645
6646	case IW_AUTH_WPA_ENABLED:
6647		param->value = ieee->wpa_enabled;
6648		break;
6649
6650	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6651		param->value = ieee->ieee802_1x;
6652		break;
6653
6654	case IW_AUTH_ROAMING_CONTROL:
6655	case IW_AUTH_PRIVACY_INVOKED:
6656		param->value = ieee->privacy_invoked;
6657		break;
6658
6659	default:
6660		return -EOPNOTSUPP;
6661	}
6662	return 0;
6663}
6664
6665/* SIOCSIWENCODEEXT */
6666static int ipw_wx_set_encodeext(struct net_device *dev,
6667				struct iw_request_info *info,
6668				union iwreq_data *wrqu, char *extra)
6669{
6670	struct ipw_priv *priv = ieee80211_priv(dev);
6671	struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6672
6673	if (hwcrypto) {
6674		if (ext->alg == IW_ENCODE_ALG_TKIP) {
6675			/* IPW HW can't build TKIP MIC,
6676			   host decryption still needed */
6677			if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
6678				priv->ieee->host_mc_decrypt = 1;
6679			else {
6680				priv->ieee->host_encrypt = 0;
6681				priv->ieee->host_encrypt_msdu = 1;
6682				priv->ieee->host_decrypt = 1;
6683			}
6684		} else {
6685			priv->ieee->host_encrypt = 0;
6686			priv->ieee->host_encrypt_msdu = 0;
6687			priv->ieee->host_decrypt = 0;
6688			priv->ieee->host_mc_decrypt = 0;
6689		}
6690	}
6691
6692	return ieee80211_wx_set_encodeext(priv->ieee, info, wrqu, extra);
6693}
6694
6695/* SIOCGIWENCODEEXT */
6696static int ipw_wx_get_encodeext(struct net_device *dev,
6697				struct iw_request_info *info,
6698				union iwreq_data *wrqu, char *extra)
6699{
6700	struct ipw_priv *priv = ieee80211_priv(dev);
6701	return ieee80211_wx_get_encodeext(priv->ieee, info, wrqu, extra);
6702}
6703
6704/* SIOCSIWMLME */
6705static int ipw_wx_set_mlme(struct net_device *dev,
6706			   struct iw_request_info *info,
6707			   union iwreq_data *wrqu, char *extra)
6708{
6709	struct ipw_priv *priv = ieee80211_priv(dev);
6710	struct iw_mlme *mlme = (struct iw_mlme *)extra;
6711	u16 reason;
6712
6713	reason = cpu_to_le16(mlme->reason_code);
6714
6715	switch (mlme->cmd) {
6716	case IW_MLME_DEAUTH:
6717		/* silently ignore */
6718		break;
6719
6720	case IW_MLME_DISASSOC:
6721		ipw_disassociate(priv);
6722		break;
6723
6724	default:
6725		return -EOPNOTSUPP;
6726	}
6727	return 0;
6728}
6729
6730#ifdef CONFIG_IPW2200_QOS
6731
6732/* QoS */
6733/*
6734* get the modulation type of the current network or
6735* the card current mode
6736*/
6737static u8 ipw_qos_current_mode(struct ipw_priv * priv)
6738{
6739	u8 mode = 0;
6740
6741	if (priv->status & STATUS_ASSOCIATED) {
6742		unsigned long flags;
6743
6744		spin_lock_irqsave(&priv->ieee->lock, flags);
6745		mode = priv->assoc_network->mode;
6746		spin_unlock_irqrestore(&priv->ieee->lock, flags);
6747	} else {
6748		mode = priv->ieee->mode;
6749	}
6750	IPW_DEBUG_QOS("QoS network/card mode %d \n", mode);
6751	return mode;
6752}
6753
6754/*
6755* Handle management frame beacon and probe response
6756*/
6757static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
6758					 int active_network,
6759					 struct ieee80211_network *network)
6760{
6761	u32 size = sizeof(struct ieee80211_qos_parameters);
6762
6763	if (network->capability & WLAN_CAPABILITY_IBSS)
6764		network->qos_data.active = network->qos_data.supported;
6765
6766	if (network->flags & NETWORK_HAS_QOS_MASK) {
6767		if (active_network &&
6768		    (network->flags & NETWORK_HAS_QOS_PARAMETERS))
6769			network->qos_data.active = network->qos_data.supported;
6770
6771		if ((network->qos_data.active == 1) && (active_network == 1) &&
6772		    (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
6773		    (network->qos_data.old_param_count !=
6774		     network->qos_data.param_count)) {
6775			network->qos_data.old_param_count =
6776			    network->qos_data.param_count;
6777			schedule_work(&priv->qos_activate);
6778			IPW_DEBUG_QOS("QoS parameters change call "
6779				      "qos_activate\n");
6780		}
6781	} else {
6782		if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B))
6783			memcpy(&network->qos_data.parameters,
6784			       &def_parameters_CCK, size);
6785		else
6786			memcpy(&network->qos_data.parameters,
6787			       &def_parameters_OFDM, size);
6788
6789		if ((network->qos_data.active == 1) && (active_network == 1)) {
6790			IPW_DEBUG_QOS("QoS was disabled call qos_activate \n");
6791			schedule_work(&priv->qos_activate);
6792		}
6793
6794		network->qos_data.active = 0;
6795		network->qos_data.supported = 0;
6796	}
6797	if ((priv->status & STATUS_ASSOCIATED) &&
6798	    (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
6799		if (memcmp(network->bssid, priv->bssid, ETH_ALEN))
6800			if ((network->capability & WLAN_CAPABILITY_IBSS) &&
6801			    !(network->flags & NETWORK_EMPTY_ESSID))
6802				if ((network->ssid_len ==
6803				     priv->assoc_network->ssid_len) &&
6804				    !memcmp(network->ssid,
6805					    priv->assoc_network->ssid,
6806					    network->ssid_len)) {
6807					queue_work(priv->workqueue,
6808						   &priv->merge_networks);
6809				}
6810	}
6811
6812	return 0;
6813}
6814
6815/*
6816* This function set up the firmware to support QoS. It sends
6817* IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO
6818*/
6819static int ipw_qos_activate(struct ipw_priv *priv,
6820			    struct ieee80211_qos_data *qos_network_data)
6821{
6822	int err;
6823	struct ieee80211_qos_parameters qos_parameters[QOS_QOS_SETS];
6824	struct ieee80211_qos_parameters *active_one = NULL;
6825	u32 size = sizeof(struct ieee80211_qos_parameters);
6826	u32 burst_duration;
6827	int i;
6828	u8 type;
6829
6830	type = ipw_qos_current_mode(priv);
6831
6832	active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]);
6833	memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size);
6834	active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]);
6835	memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size);
6836
6837	if (qos_network_data == NULL) {
6838		if (type == IEEE_B) {
6839			IPW_DEBUG_QOS("QoS activate network mode %d\n", type);
6840			active_one = &def_parameters_CCK;
6841		} else
6842			active_one = &def_parameters_OFDM;
6843
6844		memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6845		burst_duration = ipw_qos_get_burst_duration(priv);
6846		for (i = 0; i < QOS_QUEUE_NUM; i++)
6847			qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] =
6848			    (u16)burst_duration;
6849	} else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
6850		if (type == IEEE_B) {
6851			IPW_DEBUG_QOS("QoS activate IBSS nework mode %d\n",
6852				      type);
6853			if (priv->qos_data.qos_enable == 0)
6854				active_one = &def_parameters_CCK;
6855			else
6856				active_one = priv->qos_data.def_qos_parm_CCK;
6857		} else {
6858			if (priv->qos_data.qos_enable == 0)
6859				active_one = &def_parameters_OFDM;
6860			else
6861				active_one = priv->qos_data.def_qos_parm_OFDM;
6862		}
6863		memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6864	} else {
6865		unsigned long flags;
6866		int active;
6867
6868		spin_lock_irqsave(&priv->ieee->lock, flags);
6869		active_one = &(qos_network_data->parameters);
6870		qos_network_data->old_param_count =
6871		    qos_network_data->param_count;
6872		memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6873		active = qos_network_data->supported;
6874		spin_unlock_irqrestore(&priv->ieee->lock, flags);
6875
6876		if (active == 0) {
6877			burst_duration = ipw_qos_get_burst_duration(priv);
6878			for (i = 0; i < QOS_QUEUE_NUM; i++)
6879				qos_parameters[QOS_PARAM_SET_ACTIVE].
6880				    tx_op_limit[i] = (u16)burst_duration;
6881		}
6882	}
6883
6884	IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
6885	for (i = 0; i < 3; i++) {
6886		int j;
6887		for (j = 0; j < QOS_QUEUE_NUM; j++) {
6888			qos_parameters[i].cw_min[j] = cpu_to_le16(qos_parameters[i].cw_min[j]);
6889			qos_parameters[i].cw_max[j] = cpu_to_le16(qos_parameters[i].cw_max[j]);
6890			qos_parameters[i].tx_op_limit[j] = cpu_to_le16(qos_parameters[i].tx_op_limit[j]);
6891		}
6892	}
6893
6894	err = ipw_send_qos_params_command(priv,
6895					  (struct ieee80211_qos_parameters *)
6896					  &(qos_parameters[0]));
6897	if (err)
6898		IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
6899
6900	return err;
6901}
6902
6903/*
6904* send IPW_CMD_WME_INFO to the firmware
6905*/
6906static int ipw_qos_set_info_element(struct ipw_priv *priv)
6907{
6908	int ret = 0;
6909	struct ieee80211_qos_information_element qos_info;
6910
6911	if (priv == NULL)
6912		return -1;
6913
6914	qos_info.elementID = QOS_ELEMENT_ID;
6915	qos_info.length = sizeof(struct ieee80211_qos_information_element) - 2;
6916
6917	qos_info.version = QOS_VERSION_1;
6918	qos_info.ac_info = 0;
6919
6920	memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN);
6921	qos_info.qui_type = QOS_OUI_TYPE;
6922	qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE;
6923
6924	ret = ipw_send_qos_info_command(priv, &qos_info);
6925	if (ret != 0) {
6926		IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n");
6927	}
6928	return ret;
6929}
6930
6931/*
6932* Set the QoS parameter with the association request structure
6933*/
6934static int ipw_qos_association(struct ipw_priv *priv,
6935			       struct ieee80211_network *network)
6936{
6937	int err = 0;
6938	struct ieee80211_qos_data *qos_data = NULL;
6939	struct ieee80211_qos_data ibss_data = {
6940		.supported = 1,
6941		.active = 1,
6942	};
6943
6944	switch (priv->ieee->iw_mode) {
6945	case IW_MODE_ADHOC:
6946		BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS));
6947
6948		qos_data = &ibss_data;
6949		break;
6950
6951	case IW_MODE_INFRA:
6952		qos_data = &network->qos_data;
6953		break;
6954
6955	default:
6956		BUG();
6957		break;
6958	}
6959
6960	err = ipw_qos_activate(priv, qos_data);
6961	if (err) {
6962		priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC;
6963		return err;
6964	}
6965
6966	if (priv->qos_data.qos_enable && qos_data->supported) {
6967		IPW_DEBUG_QOS("QoS will be enabled for this association\n");
6968		priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC;
6969		return ipw_qos_set_info_element(priv);
6970	}
6971
6972	return 0;
6973}
6974
6975/*
6976* handling the beaconing responses. if we get different QoS setting
6977* off the network from the associated setting, adjust the QoS
6978* setting
6979*/
6980static int ipw_qos_association_resp(struct ipw_priv *priv,
6981				    struct ieee80211_network *network)
6982{
6983	int ret = 0;
6984	unsigned long flags;
6985	u32 size = sizeof(struct ieee80211_qos_parameters);
6986	int set_qos_param = 0;
6987
6988	if ((priv == NULL) || (network == NULL) ||
6989	    (priv->assoc_network == NULL))
6990		return ret;
6991
6992	if (!(priv->status & STATUS_ASSOCIATED))
6993		return ret;
6994
6995	if ((priv->ieee->iw_mode != IW_MODE_INFRA))
6996		return ret;
6997
6998	spin_lock_irqsave(&priv->ieee->lock, flags);
6999	if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
7000		memcpy(&priv->assoc_network->qos_data, &network->qos_data,
7001		       sizeof(struct ieee80211_qos_data));
7002		priv->assoc_network->qos_data.active = 1;
7003		if ((network->qos_data.old_param_count !=
7004		     network->qos_data.param_count)) {
7005			set_qos_param = 1;
7006			network->qos_data.old_param_count =
7007			    network->qos_data.param_count;
7008		}
7009
7010	} else {
7011		if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B))
7012			memcpy(&priv->assoc_network->qos_data.parameters,
7013			       &def_parameters_CCK, size);
7014		else
7015			memcpy(&priv->assoc_network->qos_data.parameters,
7016			       &def_parameters_OFDM, size);
7017		priv->assoc_network->qos_data.active = 0;
7018		priv->assoc_network->qos_data.supported = 0;
7019		set_qos_param = 1;
7020	}
7021
7022	spin_unlock_irqrestore(&priv->ieee->lock, flags);
7023
7024	if (set_qos_param == 1)
7025		schedule_work(&priv->qos_activate);
7026
7027	return ret;
7028}
7029
7030static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
7031{
7032	u32 ret = 0;
7033
7034	if ((priv == NULL))
7035		return 0;
7036
7037	if (!(priv->ieee->modulation & IEEE80211_OFDM_MODULATION))
7038		ret = priv->qos_data.burst_duration_CCK;
7039	else
7040		ret = priv->qos_data.burst_duration_OFDM;
7041
7042	return ret;
7043}
7044
7045/*
7046* Initialize the setting of QoS global
7047*/
7048static void ipw_qos_init(struct ipw_priv *priv, int enable,
7049			 int burst_enable, u32 burst_duration_CCK,
7050			 u32 burst_duration_OFDM)
7051{
7052	priv->qos_data.qos_enable = enable;
7053
7054	if (priv->qos_data.qos_enable) {
7055		priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK;
7056		priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM;
7057		IPW_DEBUG_QOS("QoS is enabled\n");
7058	} else {
7059		priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK;
7060		priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM;
7061		IPW_DEBUG_QOS("QoS is not enabled\n");
7062	}
7063
7064	priv->qos_data.burst_enable = burst_enable;
7065
7066	if (burst_enable) {
7067		priv->qos_data.burst_duration_CCK = burst_duration_CCK;
7068		priv->qos_data.burst_duration_OFDM = burst_duration_OFDM;
7069	} else {
7070		priv->qos_data.burst_duration_CCK = 0;
7071		priv->qos_data.burst_duration_OFDM = 0;
7072	}
7073}
7074
7075/*
7076* map the packet priority to the right TX Queue
7077*/
7078static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority)
7079{
7080	if (priority > 7 || !priv->qos_data.qos_enable)
7081		priority = 0;
7082
7083	return from_priority_to_tx_queue[priority] - 1;
7084}
7085
7086static int ipw_is_qos_active(struct net_device *dev,
7087			     struct sk_buff *skb)
7088{
7089	struct ipw_priv *priv = ieee80211_priv(dev);
7090	struct ieee80211_qos_data *qos_data = NULL;
7091	int active, supported;
7092	u8 *daddr = skb->data + ETH_ALEN;
7093	int unicast = !is_multicast_ether_addr(daddr);
7094
7095	if (!(priv->status & STATUS_ASSOCIATED))
7096		return 0;
7097
7098	qos_data = &priv->assoc_network->qos_data;
7099
7100	if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7101		if (unicast == 0)
7102			qos_data->active = 0;
7103		else
7104			qos_data->active = qos_data->supported;
7105	}
7106	active = qos_data->active;
7107	supported = qos_data->supported;
7108	IPW_DEBUG_QOS("QoS  %d network is QoS active %d  supported %d  "
7109		      "unicast %d\n",
7110		      priv->qos_data.qos_enable, active, supported, unicast);
7111	if (active && priv->qos_data.qos_enable)
7112		return 1;
7113
7114	return 0;
7115
7116}
7117/*
7118* add QoS parameter to the TX command
7119*/
7120static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
7121					u16 priority,
7122					struct tfd_data *tfd)
7123{
7124	int tx_queue_id = 0;
7125
7126
7127	tx_queue_id = from_priority_to_tx_queue[priority] - 1;
7128	tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
7129
7130	if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) {
7131		tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
7132		tfd->tfd.tfd_26.mchdr.qos_ctrl |= cpu_to_le16(CTRL_QOS_NO_ACK);
7133	}
7134	return 0;
7135}
7136
7137/*
7138* background support to run QoS activate functionality
7139*/
7140static void ipw_bg_qos_activate(struct work_struct *work)
7141{
7142	struct ipw_priv *priv =
7143		container_of(work, struct ipw_priv, qos_activate);
7144
7145	if (priv == NULL)
7146		return;
7147
7148	mutex_lock(&priv->mutex);
7149
7150	if (priv->status & STATUS_ASSOCIATED)
7151		ipw_qos_activate(priv, &(priv->assoc_network->qos_data));
7152
7153	mutex_unlock(&priv->mutex);
7154}
7155
7156static int ipw_handle_probe_response(struct net_device *dev,
7157				     struct ieee80211_probe_response *resp,
7158				     struct ieee80211_network *network)
7159{
7160	struct ipw_priv *priv = ieee80211_priv(dev);
7161	int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7162			      (network == priv->assoc_network));
7163
7164	ipw_qos_handle_probe_response(priv, active_network, network);
7165
7166	return 0;
7167}
7168
7169static int ipw_handle_beacon(struct net_device *dev,
7170			     struct ieee80211_beacon *resp,
7171			     struct ieee80211_network *network)
7172{
7173	struct ipw_priv *priv = ieee80211_priv(dev);
7174	int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7175			      (network == priv->assoc_network));
7176
7177	ipw_qos_handle_probe_response(priv, active_network, network);
7178
7179	return 0;
7180}
7181
7182static int ipw_handle_assoc_response(struct net_device *dev,
7183				     struct ieee80211_assoc_response *resp,
7184				     struct ieee80211_network *network)
7185{
7186	struct ipw_priv *priv = ieee80211_priv(dev);
7187	ipw_qos_association_resp(priv, network);
7188	return 0;
7189}
7190
7191static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
7192				       *qos_param)
7193{
7194	return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS,
7195				sizeof(*qos_param) * 3, qos_param);
7196}
7197
7198static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
7199				     *qos_param)
7200{
7201	return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param),
7202				qos_param);
7203}
7204
7205#endif				/* CONFIG_IPW2200_QOS */
7206
7207static int ipw_associate_network(struct ipw_priv *priv,
7208				 struct ieee80211_network *network,
7209				 struct ipw_supported_rates *rates, int roaming)
7210{
7211	int err;
7212
7213	if (priv->config & CFG_FIXED_RATE)
7214		ipw_set_fixed_rate(priv, network->mode);
7215
7216	if (!(priv->config & CFG_STATIC_ESSID)) {
7217		priv->essid_len = min(network->ssid_len,
7218				      (u8) IW_ESSID_MAX_SIZE);
7219		memcpy(priv->essid, network->ssid, priv->essid_len);
7220	}
7221
7222	network->last_associate = jiffies;
7223
7224	memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
7225	priv->assoc_request.channel = network->channel;
7226	priv->assoc_request.auth_key = 0;
7227
7228	if ((priv->capability & CAP_PRIVACY_ON) &&
7229	    (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) {
7230		priv->assoc_request.auth_type = AUTH_SHARED_KEY;
7231		priv->assoc_request.auth_key = priv->ieee->sec.active_key;
7232
7233		if (priv->ieee->sec.level == SEC_LEVEL_1)
7234			ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
7235
7236	} else if ((priv->capability & CAP_PRIVACY_ON) &&
7237		   (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP))
7238		priv->assoc_request.auth_type = AUTH_LEAP;
7239	else
7240		priv->assoc_request.auth_type = AUTH_OPEN;
7241
7242	if (priv->ieee->wpa_ie_len) {
7243		priv->assoc_request.policy_support = 0x02;	/* RSN active */
7244		ipw_set_rsn_capa(priv, priv->ieee->wpa_ie,
7245				 priv->ieee->wpa_ie_len);
7246	}
7247
7248	/*
7249	 * It is valid for our ieee device to support multiple modes, but
7250	 * when it comes to associating to a given network we have to choose
7251	 * just one mode.
7252	 */
7253	if (network->mode & priv->ieee->mode & IEEE_A)
7254		priv->assoc_request.ieee_mode = IPW_A_MODE;
7255	else if (network->mode & priv->ieee->mode & IEEE_G)
7256		priv->assoc_request.ieee_mode = IPW_G_MODE;
7257	else if (network->mode & priv->ieee->mode & IEEE_B)
7258		priv->assoc_request.ieee_mode = IPW_B_MODE;
7259
7260	priv->assoc_request.capability = network->capability;
7261	if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7262	    && !(priv->config & CFG_PREAMBLE_LONG)) {
7263		priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE;
7264	} else {
7265		priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE;
7266
7267		/* Clear the short preamble if we won't be supporting it */
7268		priv->assoc_request.capability &=
7269		    ~WLAN_CAPABILITY_SHORT_PREAMBLE;
7270	}
7271
7272	/* Clear capability bits that aren't used in Ad Hoc */
7273	if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7274		priv->assoc_request.capability &=
7275		    ~WLAN_CAPABILITY_SHORT_SLOT_TIME;
7276
7277	IPW_DEBUG_ASSOC("%sssocation attempt: '%s', channel %d, "
7278			"802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
7279			roaming ? "Rea" : "A",
7280			escape_essid(priv->essid, priv->essid_len),
7281			network->channel,
7282			ipw_modes[priv->assoc_request.ieee_mode],
7283			rates->num_rates,
7284			(priv->assoc_request.preamble_length ==
7285			 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short",
7286			network->capability &
7287			WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long",
7288			priv->capability & CAP_PRIVACY_ON ? "on " : "off",
7289			priv->capability & CAP_PRIVACY_ON ?
7290			(priv->capability & CAP_SHARED_KEY ? "(shared)" :
7291			 "(open)") : "",
7292			priv->capability & CAP_PRIVACY_ON ? " key=" : "",
7293			priv->capability & CAP_PRIVACY_ON ?
7294			'1' + priv->ieee->sec.active_key : '.',
7295			priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
7296
7297	priv->assoc_request.beacon_interval = network->beacon_interval;
7298	if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
7299	    (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
7300		priv->assoc_request.assoc_type = HC_IBSS_START;
7301		priv->assoc_request.assoc_tsf_msw = 0;
7302		priv->assoc_request.assoc_tsf_lsw = 0;
7303	} else {
7304		if (unlikely(roaming))
7305			priv->assoc_request.assoc_type = HC_REASSOCIATE;
7306		else
7307			priv->assoc_request.assoc_type = HC_ASSOCIATE;
7308		priv->assoc_request.assoc_tsf_msw = network->time_stamp[1];
7309		priv->assoc_request.assoc_tsf_lsw = network->time_stamp[0];
7310	}
7311
7312	memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
7313
7314	if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7315		memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
7316		priv->assoc_request.atim_window = network->atim_window;
7317	} else {
7318		memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
7319		priv->assoc_request.atim_window = 0;
7320	}
7321
7322	priv->assoc_request.listen_interval = network->listen_interval;
7323
7324	err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
7325	if (err) {
7326		IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
7327		return err;
7328	}
7329
7330	rates->ieee_mode = priv->assoc_request.ieee_mode;
7331	rates->purpose = IPW_RATE_CONNECT;
7332	ipw_send_supported_rates(priv, rates);
7333
7334	if (priv->assoc_request.ieee_mode == IPW_G_MODE)
7335		priv->sys_config.dot11g_auto_detection = 1;
7336	else
7337		priv->sys_config.dot11g_auto_detection = 0;
7338
7339	if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7340		priv->sys_config.answer_broadcast_ssid_probe = 1;
7341	else
7342		priv->sys_config.answer_broadcast_ssid_probe = 0;
7343
7344	err = ipw_send_system_config(priv);
7345	if (err) {
7346		IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
7347		return err;
7348	}
7349
7350	IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
7351	err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM);
7352	if (err) {
7353		IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7354		return err;
7355	}
7356
7357	/*
7358	 * If preemption is enabled, it is possible for the association
7359	 * to complete before we return from ipw_send_associate.  Therefore
7360	 * we have to be sure and update our priviate data first.
7361	 */
7362	priv->channel = network->channel;
7363	memcpy(priv->bssid, network->bssid, ETH_ALEN);
7364	priv->status |= STATUS_ASSOCIATING;
7365	priv->status &= ~STATUS_SECURITY_UPDATED;
7366
7367	priv->assoc_network = network;
7368
7369#ifdef CONFIG_IPW2200_QOS
7370	ipw_qos_association(priv, network);
7371#endif
7372
7373	err = ipw_send_associate(priv, &priv->assoc_request);
7374	if (err) {
7375		IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7376		return err;
7377	}
7378
7379	IPW_DEBUG(IPW_DL_STATE, "associating: '%s' " MAC_FMT " \n",
7380		  escape_essid(priv->essid, priv->essid_len),
7381		  MAC_ARG(priv->bssid));
7382
7383	return 0;
7384}
7385
7386static void ipw_roam(void *data)
7387{
7388	struct ipw_priv *priv = data;
7389	struct ieee80211_network *network = NULL;
7390	struct ipw_network_match match = {
7391		.network = priv->assoc_network
7392	};
7393
7394	/* The roaming process is as follows:
7395	 *
7396	 * 1.  Missed beacon threshold triggers the roaming process by
7397	 *     setting the status ROAM bit and requesting a scan.
7398	 * 2.  When the scan completes, it schedules the ROAM work
7399	 * 3.  The ROAM work looks at all of the known networks for one that
7400	 *     is a better network than the currently associated.  If none
7401	 *     found, the ROAM process is over (ROAM bit cleared)
7402	 * 4.  If a better network is found, a disassociation request is
7403	 *     sent.
7404	 * 5.  When the disassociation completes, the roam work is again
7405	 *     scheduled.  The second time through, the driver is no longer
7406	 *     associated, and the newly selected network is sent an
7407	 *     association request.
7408	 * 6.  At this point ,the roaming process is complete and the ROAM
7409	 *     status bit is cleared.
7410	 */
7411
7412	/* If we are no longer associated, and the roaming bit is no longer
7413	 * set, then we are not actively roaming, so just return */
7414	if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
7415		return;
7416
7417	if (priv->status & STATUS_ASSOCIATED) {
7418		/* First pass through ROAM process -- look for a better
7419		 * network */
7420		unsigned long flags;
7421		u8 rssi = priv->assoc_network->stats.rssi;
7422		priv->assoc_network->stats.rssi = -128;
7423		spin_lock_irqsave(&priv->ieee->lock, flags);
7424		list_for_each_entry(network, &priv->ieee->network_list, list) {
7425			if (network != priv->assoc_network)
7426				ipw_best_network(priv, &match, network, 1);
7427		}
7428		spin_unlock_irqrestore(&priv->ieee->lock, flags);
7429		priv->assoc_network->stats.rssi = rssi;
7430
7431		if (match.network == priv->assoc_network) {
7432			IPW_DEBUG_ASSOC("No better APs in this network to "
7433					"roam to.\n");
7434			priv->status &= ~STATUS_ROAMING;
7435			ipw_debug_config(priv);
7436			return;
7437		}
7438
7439		ipw_send_disassociate(priv, 1);
7440		priv->assoc_network = match.network;
7441
7442		return;
7443	}
7444
7445	/* Second pass through ROAM process -- request association */
7446	ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
7447	ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
7448	priv->status &= ~STATUS_ROAMING;
7449}
7450
7451static void ipw_bg_roam(struct work_struct *work)
7452{
7453	struct ipw_priv *priv =
7454		container_of(work, struct ipw_priv, roam);
7455	mutex_lock(&priv->mutex);
7456	ipw_roam(priv);
7457	mutex_unlock(&priv->mutex);
7458}
7459
7460static int ipw_associate(void *data)
7461{
7462	struct ipw_priv *priv = data;
7463
7464	struct ieee80211_network *network = NULL;
7465	struct ipw_network_match match = {
7466		.network = NULL
7467	};
7468	struct ipw_supported_rates *rates;
7469	struct list_head *element;
7470	unsigned long flags;
7471
7472	if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7473		IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n");
7474		return 0;
7475	}
7476
7477	if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
7478		IPW_DEBUG_ASSOC("Not attempting association (already in "
7479				"progress)\n");
7480		return 0;
7481	}
7482
7483	if (priv->status & STATUS_DISASSOCIATING) {
7484		IPW_DEBUG_ASSOC("Not attempting association (in "
7485				"disassociating)\n ");
7486		queue_work(priv->workqueue, &priv->associate);
7487		return 0;
7488	}
7489
7490	if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) {
7491		IPW_DEBUG_ASSOC("Not attempting association (scanning or not "
7492				"initialized)\n");
7493		return 0;
7494	}
7495
7496	if (!(priv->config & CFG_ASSOCIATE) &&
7497	    !(priv->config & (CFG_STATIC_ESSID |
7498			      CFG_STATIC_CHANNEL | CFG_STATIC_BSSID))) {
7499		IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
7500		return 0;
7501	}
7502
7503	/* Protect our use of the network_list */
7504	spin_lock_irqsave(&priv->ieee->lock, flags);
7505	list_for_each_entry(network, &priv->ieee->network_list, list)
7506	    ipw_best_network(priv, &match, network, 0);
7507
7508	network = match.network;
7509	rates = &match.rates;
7510
7511	if (network == NULL &&
7512	    priv->ieee->iw_mode == IW_MODE_ADHOC &&
7513	    priv->config & CFG_ADHOC_CREATE &&
7514	    priv->config & CFG_STATIC_ESSID &&
7515	    priv->config & CFG_STATIC_CHANNEL &&
7516	    !list_empty(&priv->ieee->network_free_list)) {
7517		element = priv->ieee->network_free_list.next;
7518		network = list_entry(element, struct ieee80211_network, list);
7519		ipw_adhoc_create(priv, network);
7520		rates = &priv->rates;
7521		list_del(element);
7522		list_add_tail(&network->list, &priv->ieee->network_list);
7523	}
7524	spin_unlock_irqrestore(&priv->ieee->lock, flags);
7525
7526	/* If we reached the end of the list, then we don't have any valid
7527	 * matching APs */
7528	if (!network) {
7529		ipw_debug_config(priv);
7530
7531		if (!(priv->status & STATUS_SCANNING)) {
7532			if (!(priv->config & CFG_SPEED_SCAN))
7533				queue_delayed_work(priv->workqueue,
7534						   &priv->request_scan,
7535						   SCAN_INTERVAL);
7536			else
7537				queue_delayed_work(priv->workqueue,
7538						   &priv->request_scan, 0);
7539		}
7540
7541		return 0;
7542	}
7543
7544	ipw_associate_network(priv, network, rates, 0);
7545
7546	return 1;
7547}
7548
7549static void ipw_bg_associate(struct work_struct *work)
7550{
7551	struct ipw_priv *priv =
7552		container_of(work, struct ipw_priv, associate);
7553	mutex_lock(&priv->mutex);
7554	ipw_associate(priv);
7555	mutex_unlock(&priv->mutex);
7556}
7557
7558static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv,
7559				      struct sk_buff *skb)
7560{
7561	struct ieee80211_hdr *hdr;
7562	u16 fc;
7563
7564	hdr = (struct ieee80211_hdr *)skb->data;
7565	fc = le16_to_cpu(hdr->frame_ctl);
7566	if (!(fc & IEEE80211_FCTL_PROTECTED))
7567		return;
7568
7569	fc &= ~IEEE80211_FCTL_PROTECTED;
7570	hdr->frame_ctl = cpu_to_le16(fc);
7571	switch (priv->ieee->sec.level) {
7572	case SEC_LEVEL_3:
7573		/* Remove CCMP HDR */
7574		memmove(skb->data + IEEE80211_3ADDR_LEN,
7575			skb->data + IEEE80211_3ADDR_LEN + 8,
7576			skb->len - IEEE80211_3ADDR_LEN - 8);
7577		skb_trim(skb, skb->len - 16);	/* CCMP_HDR_LEN + CCMP_MIC_LEN */
7578		break;
7579	case SEC_LEVEL_2:
7580		break;
7581	case SEC_LEVEL_1:
7582		/* Remove IV */
7583		memmove(skb->data + IEEE80211_3ADDR_LEN,
7584			skb->data + IEEE80211_3ADDR_LEN + 4,
7585			skb->len - IEEE80211_3ADDR_LEN - 4);
7586		skb_trim(skb, skb->len - 8);	/* IV + ICV */
7587		break;
7588	case SEC_LEVEL_0:
7589		break;
7590	default:
7591		printk(KERN_ERR "Unknow security level %d\n",
7592		       priv->ieee->sec.level);
7593		break;
7594	}
7595}
7596
7597static void ipw_handle_data_packet(struct ipw_priv *priv,
7598				   struct ipw_rx_mem_buffer *rxb,
7599				   struct ieee80211_rx_stats *stats)
7600{
7601	struct ieee80211_hdr_4addr *hdr;
7602	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7603
7604	/* We received data from the HW, so stop the watchdog */
7605	priv->net_dev->trans_start = jiffies;
7606
7607	/* We only process data packets if the
7608	 * interface is open */
7609	if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7610		     skb_tailroom(rxb->skb))) {
7611		priv->ieee->stats.rx_errors++;
7612		priv->wstats.discard.misc++;
7613		IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7614		return;
7615	} else if (unlikely(!netif_running(priv->net_dev))) {
7616		priv->ieee->stats.rx_dropped++;
7617		priv->wstats.discard.misc++;
7618		IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7619		return;
7620	}
7621
7622	/* Advance skb->data to the start of the actual payload */
7623	skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
7624
7625	/* Set the size of the skb to the size of the frame */
7626	skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
7627
7628	IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7629
7630	/* HW decrypt will not clear the WEP bit, MIC, PN, etc. */
7631	hdr = (struct ieee80211_hdr_4addr *)rxb->skb->data;
7632	if (priv->ieee->iw_mode != IW_MODE_MONITOR &&
7633	    (is_multicast_ether_addr(hdr->addr1) ?
7634	     !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt))
7635		ipw_rebuild_decrypted_skb(priv, rxb->skb);
7636
7637	if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
7638		priv->ieee->stats.rx_errors++;
7639	else {			/* ieee80211_rx succeeded, so it now owns the SKB */
7640		rxb->skb = NULL;
7641		__ipw_led_activity_on(priv);
7642	}
7643}
7644
7645#ifdef CONFIG_IPW2200_RADIOTAP
7646static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
7647					   struct ipw_rx_mem_buffer *rxb,
7648					   struct ieee80211_rx_stats *stats)
7649{
7650	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7651	struct ipw_rx_frame *frame = &pkt->u.frame;
7652
7653	/* initial pull of some data */
7654	u16 received_channel = frame->received_channel;
7655	u8 antennaAndPhy = frame->antennaAndPhy;
7656	s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM;	/* call it signed anyhow */
7657	u16 pktrate = frame->rate;
7658
7659	/* Magic struct that slots into the radiotap header -- no reason
7660	 * to build this manually element by element, we can write it much
7661	 * more efficiently than we can parse it. ORDER MATTERS HERE */
7662	struct ipw_rt_hdr *ipw_rt;
7663
7664	short len = le16_to_cpu(pkt->u.frame.length);
7665
7666	/* We received data from the HW, so stop the watchdog */
7667	priv->net_dev->trans_start = jiffies;
7668
7669	/* We only process data packets if the
7670	 * interface is open */
7671	if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7672		     skb_tailroom(rxb->skb))) {
7673		priv->ieee->stats.rx_errors++;
7674		priv->wstats.discard.misc++;
7675		IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7676		return;
7677	} else if (unlikely(!netif_running(priv->net_dev))) {
7678		priv->ieee->stats.rx_dropped++;
7679		priv->wstats.discard.misc++;
7680		IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7681		return;
7682	}
7683
7684	/* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7685	 * that now */
7686	if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7687		priv->ieee->stats.rx_dropped++;
7688		priv->wstats.discard.misc++;
7689		IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7690		return;
7691	}
7692
7693	/* copy the frame itself */
7694	memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr),
7695		rxb->skb->data + IPW_RX_FRAME_SIZE, len);
7696
7697	/* Zero the radiotap static buffer  ...  We only need to zero the bytes NOT
7698	 * part of our real header, saves a little time.
7699	 *
7700	 * No longer necessary since we fill in all our data.  Purge before merging
7701	 * patch officially.
7702	 * memset(rxb->skb->data + sizeof(struct ipw_rt_hdr), 0,
7703	 *        IEEE80211_RADIOTAP_HDRLEN - sizeof(struct ipw_rt_hdr));
7704	 */
7705
7706	ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data;
7707
7708	ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7709	ipw_rt->rt_hdr.it_pad = 0;	/* always good to zero */
7710	ipw_rt->rt_hdr.it_len = sizeof(struct ipw_rt_hdr);	/* total header+data */
7711
7712	/* Big bitfield of all the fields we provide in radiotap */
7713	ipw_rt->rt_hdr.it_present =
7714	    ((1 << IEEE80211_RADIOTAP_TSFT) |
7715	     (1 << IEEE80211_RADIOTAP_FLAGS) |
7716	     (1 << IEEE80211_RADIOTAP_RATE) |
7717	     (1 << IEEE80211_RADIOTAP_CHANNEL) |
7718	     (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7719	     (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
7720	     (1 << IEEE80211_RADIOTAP_ANTENNA));
7721
7722	/* Zero the flags, we'll add to them as we go */
7723	ipw_rt->rt_flags = 0;
7724	ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
7725			       frame->parent_tsf[2] << 16 |
7726			       frame->parent_tsf[1] << 8  |
7727			       frame->parent_tsf[0]);
7728
7729	/* Convert signal to DBM */
7730	ipw_rt->rt_dbmsignal = antsignal;
7731	ipw_rt->rt_dbmnoise = frame->noise;
7732
7733	/* Convert the channel data and set the flags */
7734	ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel));
7735	if (received_channel > 14) {	/* 802.11a */
7736		ipw_rt->rt_chbitmask =
7737		    cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7738	} else if (antennaAndPhy & 32) {	/* 802.11b */
7739		ipw_rt->rt_chbitmask =
7740		    cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7741	} else {		/* 802.11g */
7742		ipw_rt->rt_chbitmask =
7743		    (IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7744	}
7745
7746	/* set the rate in multiples of 500k/s */
7747	switch (pktrate) {
7748	case IPW_TX_RATE_1MB:
7749		ipw_rt->rt_rate = 2;
7750		break;
7751	case IPW_TX_RATE_2MB:
7752		ipw_rt->rt_rate = 4;
7753		break;
7754	case IPW_TX_RATE_5MB:
7755		ipw_rt->rt_rate = 10;
7756		break;
7757	case IPW_TX_RATE_6MB:
7758		ipw_rt->rt_rate = 12;
7759		break;
7760	case IPW_TX_RATE_9MB:
7761		ipw_rt->rt_rate = 18;
7762		break;
7763	case IPW_TX_RATE_11MB:
7764		ipw_rt->rt_rate = 22;
7765		break;
7766	case IPW_TX_RATE_12MB:
7767		ipw_rt->rt_rate = 24;
7768		break;
7769	case IPW_TX_RATE_18MB:
7770		ipw_rt->rt_rate = 36;
7771		break;
7772	case IPW_TX_RATE_24MB:
7773		ipw_rt->rt_rate = 48;
7774		break;
7775	case IPW_TX_RATE_36MB:
7776		ipw_rt->rt_rate = 72;
7777		break;
7778	case IPW_TX_RATE_48MB:
7779		ipw_rt->rt_rate = 96;
7780		break;
7781	case IPW_TX_RATE_54MB:
7782		ipw_rt->rt_rate = 108;
7783		break;
7784	default:
7785		ipw_rt->rt_rate = 0;
7786		break;
7787	}
7788
7789	/* antenna number */
7790	ipw_rt->rt_antenna = (antennaAndPhy & 3);	/* Is this right? */
7791
7792	/* set the preamble flag if we have it */
7793	if ((antennaAndPhy & 64))
7794		ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
7795
7796	/* Set the size of the skb to the size of the frame */
7797	skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr));
7798
7799	IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7800
7801	if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
7802		priv->ieee->stats.rx_errors++;
7803	else {			/* ieee80211_rx succeeded, so it now owns the SKB */
7804		rxb->skb = NULL;
7805		/* no LED during capture */
7806	}
7807}
7808#endif
7809
7810#ifdef CONFIG_IPW2200_PROMISCUOUS
7811#define ieee80211_is_probe_response(fc) \
7812   ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && \
7813    (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP )
7814
7815#define ieee80211_is_management(fc) \
7816   ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
7817
7818#define ieee80211_is_control(fc) \
7819   ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL)
7820
7821#define ieee80211_is_data(fc) \
7822   ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)
7823
7824#define ieee80211_is_assoc_request(fc) \
7825   ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ)
7826
7827#define ieee80211_is_reassoc_request(fc) \
7828   ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
7829
7830static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
7831				      struct ipw_rx_mem_buffer *rxb,
7832				      struct ieee80211_rx_stats *stats)
7833{
7834	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7835	struct ipw_rx_frame *frame = &pkt->u.frame;
7836	struct ipw_rt_hdr *ipw_rt;
7837
7838	/* First cache any information we need before we overwrite
7839	 * the information provided in the skb from the hardware */
7840	struct ieee80211_hdr *hdr;
7841	u16 channel = frame->received_channel;
7842	u8 phy_flags = frame->antennaAndPhy;
7843	s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM;
7844	s8 noise = frame->noise;
7845	u8 rate = frame->rate;
7846	short len = le16_to_cpu(pkt->u.frame.length);
7847	struct sk_buff *skb;
7848	int hdr_only = 0;
7849	u16 filter = priv->prom_priv->filter;
7850
7851	/* If the filter is set to not include Rx frames then return */
7852	if (filter & IPW_PROM_NO_RX)
7853		return;
7854
7855	/* We received data from the HW, so stop the watchdog */
7856	priv->prom_net_dev->trans_start = jiffies;
7857
7858	if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
7859		priv->prom_priv->ieee->stats.rx_errors++;
7860		IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7861		return;
7862	}
7863
7864	/* We only process data packets if the interface is open */
7865	if (unlikely(!netif_running(priv->prom_net_dev))) {
7866		priv->prom_priv->ieee->stats.rx_dropped++;
7867		IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7868		return;
7869	}
7870
7871	/* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7872	 * that now */
7873	if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7874		priv->prom_priv->ieee->stats.rx_dropped++;
7875		IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7876		return;
7877	}
7878
7879	hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE;
7880	if (ieee80211_is_management(le16_to_cpu(hdr->frame_ctl))) {
7881		if (filter & IPW_PROM_NO_MGMT)
7882			return;
7883		if (filter & IPW_PROM_MGMT_HEADER_ONLY)
7884			hdr_only = 1;
7885	} else if (ieee80211_is_control(le16_to_cpu(hdr->frame_ctl))) {
7886		if (filter & IPW_PROM_NO_CTL)
7887			return;
7888		if (filter & IPW_PROM_CTL_HEADER_ONLY)
7889			hdr_only = 1;
7890	} else if (ieee80211_is_data(le16_to_cpu(hdr->frame_ctl))) {
7891		if (filter & IPW_PROM_NO_DATA)
7892			return;
7893		if (filter & IPW_PROM_DATA_HEADER_ONLY)
7894			hdr_only = 1;
7895	}
7896
7897	/* Copy the SKB since this is for the promiscuous side */
7898	skb = skb_copy(rxb->skb, GFP_ATOMIC);
7899	if (skb == NULL) {
7900		IPW_ERROR("skb_clone failed for promiscuous copy.\n");
7901		return;
7902	}
7903
7904	/* copy the frame data to write after where the radiotap header goes */
7905	ipw_rt = (void *)skb->data;
7906
7907	if (hdr_only)
7908		len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
7909
7910	memcpy(ipw_rt->payload, hdr, len);
7911
7912	/* Zero the radiotap static buffer  ...  We only need to zero the bytes
7913	 * NOT part of our real header, saves a little time.
7914	 *
7915	 * No longer necessary since we fill in all our data.  Purge before
7916	 * merging patch officially.
7917	 * memset(rxb->skb->data + sizeof(struct ipw_rt_hdr), 0,
7918	 *        IEEE80211_RADIOTAP_HDRLEN - sizeof(struct ipw_rt_hdr));
7919	 */
7920
7921	ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7922	ipw_rt->rt_hdr.it_pad = 0;	/* always good to zero */
7923	ipw_rt->rt_hdr.it_len = sizeof(*ipw_rt);	/* total header+data */
7924
7925	/* Set the size of the skb to the size of the frame */
7926	skb_put(skb, ipw_rt->rt_hdr.it_len + len);
7927
7928	/* Big bitfield of all the fields we provide in radiotap */
7929	ipw_rt->rt_hdr.it_present =
7930	    ((1 << IEEE80211_RADIOTAP_TSFT) |
7931	     (1 << IEEE80211_RADIOTAP_FLAGS) |
7932	     (1 << IEEE80211_RADIOTAP_RATE) |
7933	     (1 << IEEE80211_RADIOTAP_CHANNEL) |
7934	     (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7935	     (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
7936	     (1 << IEEE80211_RADIOTAP_ANTENNA));
7937
7938	/* Zero the flags, we'll add to them as we go */
7939	ipw_rt->rt_flags = 0;
7940	ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
7941			       frame->parent_tsf[2] << 16 |
7942			       frame->parent_tsf[1] << 8  |
7943			       frame->parent_tsf[0]);
7944
7945	/* Convert to DBM */
7946	ipw_rt->rt_dbmsignal = signal;
7947	ipw_rt->rt_dbmnoise = noise;
7948
7949	/* Convert the channel data and set the flags */
7950	ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(channel));
7951	if (channel > 14) {	/* 802.11a */
7952		ipw_rt->rt_chbitmask =
7953		    cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7954	} else if (phy_flags & (1 << 5)) {	/* 802.11b */
7955		ipw_rt->rt_chbitmask =
7956		    cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7957	} else {		/* 802.11g */
7958		ipw_rt->rt_chbitmask =
7959		    (IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7960	}
7961
7962	/* set the rate in multiples of 500k/s */
7963	switch (rate) {
7964	case IPW_TX_RATE_1MB:
7965		ipw_rt->rt_rate = 2;
7966		break;
7967	case IPW_TX_RATE_2MB:
7968		ipw_rt->rt_rate = 4;
7969		break;
7970	case IPW_TX_RATE_5MB:
7971		ipw_rt->rt_rate = 10;
7972		break;
7973	case IPW_TX_RATE_6MB:
7974		ipw_rt->rt_rate = 12;
7975		break;
7976	case IPW_TX_RATE_9MB:
7977		ipw_rt->rt_rate = 18;
7978		break;
7979	case IPW_TX_RATE_11MB:
7980		ipw_rt->rt_rate = 22;
7981		break;
7982	case IPW_TX_RATE_12MB:
7983		ipw_rt->rt_rate = 24;
7984		break;
7985	case IPW_TX_RATE_18MB:
7986		ipw_rt->rt_rate = 36;
7987		break;
7988	case IPW_TX_RATE_24MB:
7989		ipw_rt->rt_rate = 48;
7990		break;
7991	case IPW_TX_RATE_36MB:
7992		ipw_rt->rt_rate = 72;
7993		break;
7994	case IPW_TX_RATE_48MB:
7995		ipw_rt->rt_rate = 96;
7996		break;
7997	case IPW_TX_RATE_54MB:
7998		ipw_rt->rt_rate = 108;
7999		break;
8000	default:
8001		ipw_rt->rt_rate = 0;
8002		break;
8003	}
8004
8005	/* antenna number */
8006	ipw_rt->rt_antenna = (phy_flags & 3);
8007
8008	/* set the preamble flag if we have it */
8009	if (phy_flags & (1 << 6))
8010		ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
8011
8012	IPW_DEBUG_RX("Rx packet of %d bytes.\n", skb->len);
8013
8014	if (!ieee80211_rx(priv->prom_priv->ieee, skb, stats)) {
8015		priv->prom_priv->ieee->stats.rx_errors++;
8016		dev_kfree_skb_any(skb);
8017	}
8018}
8019#endif
8020
8021static int is_network_packet(struct ipw_priv *priv,
8022				    struct ieee80211_hdr_4addr *header)
8023{
8024	/* Filter incoming packets to determine if they are targetted toward
8025	 * this network, discarding packets coming from ourselves */
8026	switch (priv->ieee->iw_mode) {
8027	case IW_MODE_ADHOC:	/* Header: Dest. | Source    | BSSID */
8028		/* packets from our adapter are dropped (echo) */
8029		if (!memcmp(header->addr2, priv->net_dev->dev_addr, ETH_ALEN))
8030			return 0;
8031
8032		/* {broad,multi}cast packets to our BSSID go through */
8033		if (is_multicast_ether_addr(header->addr1))
8034			return !memcmp(header->addr3, priv->bssid, ETH_ALEN);
8035
8036		/* packets to our adapter go through */
8037		return !memcmp(header->addr1, priv->net_dev->dev_addr,
8038			       ETH_ALEN);
8039
8040	case IW_MODE_INFRA:	/* Header: Dest. | BSSID | Source */
8041		/* packets from our adapter are dropped (echo) */
8042		if (!memcmp(header->addr3, priv->net_dev->dev_addr, ETH_ALEN))
8043			return 0;
8044
8045		/* {broad,multi}cast packets to our BSS go through */
8046		if (is_multicast_ether_addr(header->addr1))
8047			return !memcmp(header->addr2, priv->bssid, ETH_ALEN);
8048
8049		/* packets to our adapter go through */
8050		return !memcmp(header->addr1, priv->net_dev->dev_addr,
8051			       ETH_ALEN);
8052	}
8053
8054	return 1;
8055}
8056
8057#define IPW_PACKET_RETRY_TIME HZ
8058
8059static  int is_duplicate_packet(struct ipw_priv *priv,
8060				      struct ieee80211_hdr_4addr *header)
8061{
8062	u16 sc = le16_to_cpu(header->seq_ctl);
8063	u16 seq = WLAN_GET_SEQ_SEQ(sc);
8064	u16 frag = WLAN_GET_SEQ_FRAG(sc);
8065	u16 *last_seq, *last_frag;
8066	unsigned long *last_time;
8067
8068	switch (priv->ieee->iw_mode) {
8069	case IW_MODE_ADHOC:
8070		{
8071			struct list_head *p;
8072			struct ipw_ibss_seq *entry = NULL;
8073			u8 *mac = header->addr2;
8074			int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE;
8075
8076			__list_for_each(p, &priv->ibss_mac_hash[index]) {
8077				entry =
8078				    list_entry(p, struct ipw_ibss_seq, list);
8079				if (!memcmp(entry->mac, mac, ETH_ALEN))
8080					break;
8081			}
8082			if (p == &priv->ibss_mac_hash[index]) {
8083				entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
8084				if (!entry) {
8085					IPW_ERROR
8086					    ("Cannot malloc new mac entry\n");
8087					return 0;
8088				}
8089				memcpy(entry->mac, mac, ETH_ALEN);
8090				entry->seq_num = seq;
8091				entry->frag_num = frag;
8092				entry->packet_time = jiffies;
8093				list_add(&entry->list,
8094					 &priv->ibss_mac_hash[index]);
8095				return 0;
8096			}
8097			last_seq = &entry->seq_num;
8098			last_frag = &entry->frag_num;
8099			last_time = &entry->packet_time;
8100			break;
8101		}
8102	case IW_MODE_INFRA:
8103		last_seq = &priv->last_seq_num;
8104		last_frag = &priv->last_frag_num;
8105		last_time = &priv->last_packet_time;
8106		break;
8107	default:
8108		return 0;
8109	}
8110	if ((*last_seq == seq) &&
8111	    time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) {
8112		if (*last_frag == frag)
8113			goto drop;
8114		if (*last_frag + 1 != frag)
8115			/* out-of-order fragment */
8116			goto drop;
8117	} else
8118		*last_seq = seq;
8119
8120	*last_frag = frag;
8121	*last_time = jiffies;
8122	return 0;
8123
8124      drop:
8125	/* Comment this line now since we observed the card receives
8126	 * duplicate packets but the FCTL_RETRY bit is not set in the
8127	 * IBSS mode with fragmentation enabled.
8128	 BUG_ON(!(le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_RETRY)); */
8129	return 1;
8130}
8131
8132static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
8133				   struct ipw_rx_mem_buffer *rxb,
8134				   struct ieee80211_rx_stats *stats)
8135{
8136	struct sk_buff *skb = rxb->skb;
8137	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data;
8138	struct ieee80211_hdr_4addr *header = (struct ieee80211_hdr_4addr *)
8139	    (skb->data + IPW_RX_FRAME_SIZE);
8140
8141	ieee80211_rx_mgt(priv->ieee, header, stats);
8142
8143	if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
8144	    ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8145	      IEEE80211_STYPE_PROBE_RESP) ||
8146	     (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8147	      IEEE80211_STYPE_BEACON))) {
8148		if (!memcmp(header->addr3, priv->bssid, ETH_ALEN))
8149			ipw_add_station(priv, header->addr2);
8150	}
8151
8152	if (priv->config & CFG_NET_STATS) {
8153		IPW_DEBUG_HC("sending stat packet\n");
8154
8155		/* Set the size of the skb to the size of the full
8156		 * ipw header and 802.11 frame */
8157		skb_put(skb, le16_to_cpu(pkt->u.frame.length) +
8158			IPW_RX_FRAME_SIZE);
8159
8160		/* Advance past the ipw packet header to the 802.11 frame */
8161		skb_pull(skb, IPW_RX_FRAME_SIZE);
8162
8163		/* Push the ieee80211_rx_stats before the 802.11 frame */
8164		memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats));
8165
8166		skb->dev = priv->ieee->dev;
8167
8168		/* Point raw at the ieee80211_stats */
8169		skb_reset_mac_header(skb);
8170
8171		skb->pkt_type = PACKET_OTHERHOST;
8172		skb->protocol = __constant_htons(ETH_P_80211_STATS);
8173		memset(skb->cb, 0, sizeof(rxb->skb->cb));
8174		netif_rx(skb);
8175		rxb->skb = NULL;
8176	}
8177}
8178
8179/*
8180 * Main entry function for recieving a packet with 80211 headers.  This
8181 * should be called when ever the FW has notified us that there is a new
8182 * skb in the recieve queue.
8183 */
8184static void ipw_rx(struct ipw_priv *priv)
8185{
8186	struct ipw_rx_mem_buffer *rxb;
8187	struct ipw_rx_packet *pkt;
8188	struct ieee80211_hdr_4addr *header;
8189	u32 r, w, i;
8190	u8 network_packet;
8191
8192	r = ipw_read32(priv, IPW_RX_READ_INDEX);
8193	w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
8194	i = (priv->rxq->processed + 1) % RX_QUEUE_SIZE;
8195
8196	while (i != r) {
8197		rxb = priv->rxq->queue[i];
8198		if (unlikely(rxb == NULL)) {
8199			printk(KERN_CRIT "Queue not allocated!\n");
8200			break;
8201		}
8202		priv->rxq->queue[i] = NULL;
8203
8204		pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
8205					    IPW_RX_BUF_SIZE,
8206					    PCI_DMA_FROMDEVICE);
8207
8208		pkt = (struct ipw_rx_packet *)rxb->skb->data;
8209		IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
8210			     pkt->header.message_type,
8211			     pkt->header.rx_seq_num, pkt->header.control_bits);
8212
8213		switch (pkt->header.message_type) {
8214		case RX_FRAME_TYPE:	/* 802.11 frame */  {
8215				struct ieee80211_rx_stats stats = {
8216					.rssi = pkt->u.frame.rssi_dbm -
8217					    IPW_RSSI_TO_DBM,
8218					.signal =
8219					    le16_to_cpu(pkt->u.frame.rssi_dbm) -
8220					    IPW_RSSI_TO_DBM + 0x100,
8221					.noise =
8222					    le16_to_cpu(pkt->u.frame.noise),
8223					.rate = pkt->u.frame.rate,
8224					.mac_time = jiffies,
8225					.received_channel =
8226					    pkt->u.frame.received_channel,
8227					.freq =
8228					    (pkt->u.frame.
8229					     control & (1 << 0)) ?
8230					    IEEE80211_24GHZ_BAND :
8231					    IEEE80211_52GHZ_BAND,
8232					.len = le16_to_cpu(pkt->u.frame.length),
8233				};
8234
8235				if (stats.rssi != 0)
8236					stats.mask |= IEEE80211_STATMASK_RSSI;
8237				if (stats.signal != 0)
8238					stats.mask |= IEEE80211_STATMASK_SIGNAL;
8239				if (stats.noise != 0)
8240					stats.mask |= IEEE80211_STATMASK_NOISE;
8241				if (stats.rate != 0)
8242					stats.mask |= IEEE80211_STATMASK_RATE;
8243
8244				priv->rx_packets++;
8245
8246#ifdef CONFIG_IPW2200_PROMISCUOUS
8247	if (priv->prom_net_dev && netif_running(priv->prom_net_dev))
8248		ipw_handle_promiscuous_rx(priv, rxb, &stats);
8249#endif
8250
8251#ifdef CONFIG_IPW2200_MONITOR
8252				if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8253#ifdef CONFIG_IPW2200_RADIOTAP
8254
8255                ipw_handle_data_packet_monitor(priv,
8256					       rxb,
8257					       &stats);
8258#else
8259		ipw_handle_data_packet(priv, rxb,
8260				       &stats);
8261#endif
8262					break;
8263				}
8264#endif
8265
8266				header =
8267				    (struct ieee80211_hdr_4addr *)(rxb->skb->
8268								   data +
8269								   IPW_RX_FRAME_SIZE);
8270				/* TODO: Check Ad-Hoc dest/source and make sure
8271				 * that we are actually parsing these packets
8272				 * correctly -- we should probably use the
8273				 * frame control of the packet and disregard
8274				 * the current iw_mode */
8275
8276				network_packet =
8277				    is_network_packet(priv, header);
8278				if (network_packet && priv->assoc_network) {
8279					priv->assoc_network->stats.rssi =
8280					    stats.rssi;
8281					priv->exp_avg_rssi =
8282					    exponential_average(priv->exp_avg_rssi,
8283					    stats.rssi, DEPTH_RSSI);
8284				}
8285
8286				IPW_DEBUG_RX("Frame: len=%u\n",
8287					     le16_to_cpu(pkt->u.frame.length));
8288
8289				if (le16_to_cpu(pkt->u.frame.length) <
8290				    ieee80211_get_hdrlen(le16_to_cpu(
8291						    header->frame_ctl))) {
8292					IPW_DEBUG_DROP
8293					    ("Received packet is too small. "
8294					     "Dropping.\n");
8295					priv->ieee->stats.rx_errors++;
8296					priv->wstats.discard.misc++;
8297					break;
8298				}
8299
8300				switch (WLAN_FC_GET_TYPE
8301					(le16_to_cpu(header->frame_ctl))) {
8302
8303				case IEEE80211_FTYPE_MGMT:
8304					ipw_handle_mgmt_packet(priv, rxb,
8305							       &stats);
8306					break;
8307
8308				case IEEE80211_FTYPE_CTL:
8309					break;
8310
8311				case IEEE80211_FTYPE_DATA:
8312					if (unlikely(!network_packet ||
8313						     is_duplicate_packet(priv,
8314									 header)))
8315					{
8316						IPW_DEBUG_DROP("Dropping: "
8317							       MAC_FMT ", "
8318							       MAC_FMT ", "
8319							       MAC_FMT "\n",
8320							       MAC_ARG(header->
8321								       addr1),
8322							       MAC_ARG(header->
8323								       addr2),
8324							       MAC_ARG(header->
8325								       addr3));
8326						break;
8327					}
8328
8329					ipw_handle_data_packet(priv, rxb,
8330							       &stats);
8331
8332					break;
8333				}
8334				break;
8335			}
8336
8337		case RX_HOST_NOTIFICATION_TYPE:{
8338				IPW_DEBUG_RX
8339				    ("Notification: subtype=%02X flags=%02X size=%d\n",
8340				     pkt->u.notification.subtype,
8341				     pkt->u.notification.flags,
8342				     le16_to_cpu(pkt->u.notification.size));
8343				ipw_rx_notification(priv, &pkt->u.notification);
8344				break;
8345			}
8346
8347		default:
8348			IPW_DEBUG_RX("Bad Rx packet of type %d\n",
8349				     pkt->header.message_type);
8350			break;
8351		}
8352
8353		/* For now we just don't re-use anything.  We can tweak this
8354		 * later to try and re-use notification packets and SKBs that
8355		 * fail to Rx correctly */
8356		if (rxb->skb != NULL) {
8357			dev_kfree_skb_any(rxb->skb);
8358			rxb->skb = NULL;
8359		}
8360
8361		pci_unmap_single(priv->pci_dev, rxb->dma_addr,
8362				 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
8363		list_add_tail(&rxb->list, &priv->rxq->rx_used);
8364
8365		i = (i + 1) % RX_QUEUE_SIZE;
8366	}
8367
8368	/* Backtrack one entry */
8369	priv->rxq->processed = (i ? i : RX_QUEUE_SIZE) - 1;
8370
8371	ipw_rx_queue_restock(priv);
8372}
8373
8374#define DEFAULT_RTS_THRESHOLD     2304U
8375#define MIN_RTS_THRESHOLD         1U
8376#define MAX_RTS_THRESHOLD         2304U
8377#define DEFAULT_BEACON_INTERVAL   100U
8378#define	DEFAULT_SHORT_RETRY_LIMIT 7U
8379#define	DEFAULT_LONG_RETRY_LIMIT  4U
8380
8381/**
8382 * ipw_sw_reset
8383 * @option: options to control different reset behaviour
8384 * 	    0 = reset everything except the 'disable' module_param
8385 * 	    1 = reset everything and print out driver info (for probe only)
8386 * 	    2 = reset everything
8387 */
8388static int ipw_sw_reset(struct ipw_priv *priv, int option)
8389{
8390	int band, modulation;
8391	int old_mode = priv->ieee->iw_mode;
8392
8393	/* Initialize module parameter values here */
8394	priv->config = 0;
8395
8396	/* We default to disabling the LED code as right now it causes
8397	 * too many systems to lock up... */
8398	if (!led)
8399		priv->config |= CFG_NO_LED;
8400
8401	if (associate)
8402		priv->config |= CFG_ASSOCIATE;
8403	else
8404		IPW_DEBUG_INFO("Auto associate disabled.\n");
8405
8406	if (auto_create)
8407		priv->config |= CFG_ADHOC_CREATE;
8408	else
8409		IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
8410
8411	priv->config &= ~CFG_STATIC_ESSID;
8412	priv->essid_len = 0;
8413	memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
8414
8415	if (disable && option) {
8416		priv->status |= STATUS_RF_KILL_SW;
8417		IPW_DEBUG_INFO("Radio disabled.\n");
8418	}
8419
8420	if (channel != 0) {
8421		priv->config |= CFG_STATIC_CHANNEL;
8422		priv->channel = channel;
8423		IPW_DEBUG_INFO("Bind to static channel %d\n", channel);
8424		/* TODO: Validate that provided channel is in range */
8425	}
8426#ifdef CONFIG_IPW2200_QOS
8427	ipw_qos_init(priv, qos_enable, qos_burst_enable,
8428		     burst_duration_CCK, burst_duration_OFDM);
8429#endif				/* CONFIG_IPW2200_QOS */
8430
8431	switch (mode) {
8432	case 1:
8433		priv->ieee->iw_mode = IW_MODE_ADHOC;
8434		priv->net_dev->type = ARPHRD_ETHER;
8435
8436		break;
8437#ifdef CONFIG_IPW2200_MONITOR
8438	case 2:
8439		priv->ieee->iw_mode = IW_MODE_MONITOR;
8440#ifdef CONFIG_IPW2200_RADIOTAP
8441		priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8442#else
8443		priv->net_dev->type = ARPHRD_IEEE80211;
8444#endif
8445		break;
8446#endif
8447	default:
8448	case 0:
8449		priv->net_dev->type = ARPHRD_ETHER;
8450		priv->ieee->iw_mode = IW_MODE_INFRA;
8451		break;
8452	}
8453
8454	if (hwcrypto) {
8455		priv->ieee->host_encrypt = 0;
8456		priv->ieee->host_encrypt_msdu = 0;
8457		priv->ieee->host_decrypt = 0;
8458		priv->ieee->host_mc_decrypt = 0;
8459	}
8460	IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off");
8461
8462	/* IPW2200/2915 is abled to do hardware fragmentation. */
8463	priv->ieee->host_open_frag = 0;
8464
8465	if ((priv->pci_dev->device == 0x4223) ||
8466	    (priv->pci_dev->device == 0x4224)) {
8467		if (option == 1)
8468			printk(KERN_INFO DRV_NAME
8469			       ": Detected Intel PRO/Wireless 2915ABG Network "
8470			       "Connection\n");
8471		priv->ieee->abg_true = 1;
8472		band = IEEE80211_52GHZ_BAND | IEEE80211_24GHZ_BAND;
8473		modulation = IEEE80211_OFDM_MODULATION |
8474		    IEEE80211_CCK_MODULATION;
8475		priv->adapter = IPW_2915ABG;
8476		priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
8477	} else {
8478		if (option == 1)
8479			printk(KERN_INFO DRV_NAME
8480			       ": Detected Intel PRO/Wireless 2200BG Network "
8481			       "Connection\n");
8482
8483		priv->ieee->abg_true = 0;
8484		band = IEEE80211_24GHZ_BAND;
8485		modulation = IEEE80211_OFDM_MODULATION |
8486		    IEEE80211_CCK_MODULATION;
8487		priv->adapter = IPW_2200BG;
8488		priv->ieee->mode = IEEE_G | IEEE_B;
8489	}
8490
8491	priv->ieee->freq_band = band;
8492	priv->ieee->modulation = modulation;
8493
8494	priv->rates_mask = IEEE80211_DEFAULT_RATES_MASK;
8495
8496	priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8497	priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8498
8499	priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8500	priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
8501	priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
8502
8503	/* If power management is turned on, default to AC mode */
8504	priv->power_mode = IPW_POWER_AC;
8505	priv->tx_power = IPW_TX_POWER_DEFAULT;
8506
8507	return old_mode == priv->ieee->iw_mode;
8508}
8509
8510/*
8511 * This file defines the Wireless Extension handlers.  It does not
8512 * define any methods of hardware manipulation and relies on the
8513 * functions defined in ipw_main to provide the HW interaction.
8514 *
8515 * The exception to this is the use of the ipw_get_ordinal()
8516 * function used to poll the hardware vs. making unecessary calls.
8517 *
8518 */
8519
8520static int ipw_wx_get_name(struct net_device *dev,
8521			   struct iw_request_info *info,
8522			   union iwreq_data *wrqu, char *extra)
8523{
8524	struct ipw_priv *priv = ieee80211_priv(dev);
8525	mutex_lock(&priv->mutex);
8526	if (priv->status & STATUS_RF_KILL_MASK)
8527		strcpy(wrqu->name, "radio off");
8528	else if (!(priv->status & STATUS_ASSOCIATED))
8529		strcpy(wrqu->name, "unassociated");
8530	else
8531		snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c",
8532			 ipw_modes[priv->assoc_request.ieee_mode]);
8533	IPW_DEBUG_WX("Name: %s\n", wrqu->name);
8534	mutex_unlock(&priv->mutex);
8535	return 0;
8536}
8537
8538static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
8539{
8540	if (channel == 0) {
8541		IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
8542		priv->config &= ~CFG_STATIC_CHANNEL;
8543		IPW_DEBUG_ASSOC("Attempting to associate with new "
8544				"parameters.\n");
8545		ipw_associate(priv);
8546		return 0;
8547	}
8548
8549	priv->config |= CFG_STATIC_CHANNEL;
8550
8551	if (priv->channel == channel) {
8552		IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
8553			       channel);
8554		return 0;
8555	}
8556
8557	IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
8558	priv->channel = channel;
8559
8560#ifdef CONFIG_IPW2200_MONITOR
8561	if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8562		int i;
8563		if (priv->status & STATUS_SCANNING) {
8564			IPW_DEBUG_SCAN("Scan abort triggered due to "
8565				       "channel change.\n");
8566			ipw_abort_scan(priv);
8567		}
8568
8569		for (i = 1000; i && (priv->status & STATUS_SCANNING); i--)
8570			udelay(10);
8571
8572		if (priv->status & STATUS_SCANNING)
8573			IPW_DEBUG_SCAN("Still scanning...\n");
8574		else
8575			IPW_DEBUG_SCAN("Took %dms to abort current scan\n",
8576				       1000 - i);
8577
8578		return 0;
8579	}
8580#endif				/* CONFIG_IPW2200_MONITOR */
8581
8582	/* Network configuration changed -- force [re]association */
8583	IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n");
8584	if (!ipw_disassociate(priv))
8585		ipw_associate(priv);
8586
8587	return 0;
8588}
8589
8590static int ipw_wx_set_freq(struct net_device *dev,
8591			   struct iw_request_info *info,
8592			   union iwreq_data *wrqu, char *extra)
8593{
8594	struct ipw_priv *priv = ieee80211_priv(dev);
8595	const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
8596	struct iw_freq *fwrq = &wrqu->freq;
8597	int ret = 0, i;
8598	u8 channel, flags;
8599	int band;
8600
8601	if (fwrq->m == 0) {
8602		IPW_DEBUG_WX("SET Freq/Channel -> any\n");
8603		mutex_lock(&priv->mutex);
8604		ret = ipw_set_channel(priv, 0);
8605		mutex_unlock(&priv->mutex);
8606		return ret;
8607	}
8608	/* if setting by freq convert to channel */
8609	if (fwrq->e == 1) {
8610		channel = ieee80211_freq_to_channel(priv->ieee, fwrq->m);
8611		if (channel == 0)
8612			return -EINVAL;
8613	} else
8614		channel = fwrq->m;
8615
8616	if (!(band = ieee80211_is_valid_channel(priv->ieee, channel)))
8617		return -EINVAL;
8618
8619	if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
8620		i = ieee80211_channel_to_index(priv->ieee, channel);
8621		if (i == -1)
8622			return -EINVAL;
8623
8624		flags = (band == IEEE80211_24GHZ_BAND) ?
8625		    geo->bg[i].flags : geo->a[i].flags;
8626		if (flags & IEEE80211_CH_PASSIVE_ONLY) {
8627			IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n");
8628			return -EINVAL;
8629		}
8630	}
8631
8632	IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
8633	mutex_lock(&priv->mutex);
8634	ret = ipw_set_channel(priv, channel);
8635	mutex_unlock(&priv->mutex);
8636	return ret;
8637}
8638
8639static int ipw_wx_get_freq(struct net_device *dev,
8640			   struct iw_request_info *info,
8641			   union iwreq_data *wrqu, char *extra)
8642{
8643	struct ipw_priv *priv = ieee80211_priv(dev);
8644
8645	wrqu->freq.e = 0;
8646
8647	/* If we are associated, trying to associate, or have a statically
8648	 * configured CHANNEL then return that; otherwise return ANY */
8649	mutex_lock(&priv->mutex);
8650	if (priv->config & CFG_STATIC_CHANNEL ||
8651	    priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) {
8652		int i;
8653
8654		i = ieee80211_channel_to_index(priv->ieee, priv->channel);
8655		BUG_ON(i == -1);
8656		wrqu->freq.e = 1;
8657
8658		switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
8659		case IEEE80211_52GHZ_BAND:
8660			wrqu->freq.m = priv->ieee->geo.a[i].freq * 100000;
8661			break;
8662
8663		case IEEE80211_24GHZ_BAND:
8664			wrqu->freq.m = priv->ieee->geo.bg[i].freq * 100000;
8665			break;
8666
8667		default:
8668			BUG();
8669		}
8670	} else
8671		wrqu->freq.m = 0;
8672
8673	mutex_unlock(&priv->mutex);
8674	IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
8675	return 0;
8676}
8677
8678static int ipw_wx_set_mode(struct net_device *dev,
8679			   struct iw_request_info *info,
8680			   union iwreq_data *wrqu, char *extra)
8681{
8682	struct ipw_priv *priv = ieee80211_priv(dev);
8683	int err = 0;
8684
8685	IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
8686
8687	switch (wrqu->mode) {
8688#ifdef CONFIG_IPW2200_MONITOR
8689	case IW_MODE_MONITOR:
8690#endif
8691	case IW_MODE_ADHOC:
8692	case IW_MODE_INFRA:
8693		break;
8694	case IW_MODE_AUTO:
8695		wrqu->mode = IW_MODE_INFRA;
8696		break;
8697	default:
8698		return -EINVAL;
8699	}
8700	if (wrqu->mode == priv->ieee->iw_mode)
8701		return 0;
8702
8703	mutex_lock(&priv->mutex);
8704
8705	ipw_sw_reset(priv, 0);
8706
8707#ifdef CONFIG_IPW2200_MONITOR
8708	if (priv->ieee->iw_mode == IW_MODE_MONITOR)
8709		priv->net_dev->type = ARPHRD_ETHER;
8710
8711	if (wrqu->mode == IW_MODE_MONITOR)
8712#ifdef CONFIG_IPW2200_RADIOTAP
8713		priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8714#else
8715		priv->net_dev->type = ARPHRD_IEEE80211;
8716#endif
8717#endif				/* CONFIG_IPW2200_MONITOR */
8718
8719	/* Free the existing firmware and reset the fw_loaded
8720	 * flag so ipw_load() will bring in the new firmawre */
8721	free_firmware();
8722
8723	priv->ieee->iw_mode = wrqu->mode;
8724
8725	queue_work(priv->workqueue, &priv->adapter_restart);
8726	mutex_unlock(&priv->mutex);
8727	return err;
8728}
8729
8730static int ipw_wx_get_mode(struct net_device *dev,
8731			   struct iw_request_info *info,
8732			   union iwreq_data *wrqu, char *extra)
8733{
8734	struct ipw_priv *priv = ieee80211_priv(dev);
8735	mutex_lock(&priv->mutex);
8736	wrqu->mode = priv->ieee->iw_mode;
8737	IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
8738	mutex_unlock(&priv->mutex);
8739	return 0;
8740}
8741
8742/* Values are in microsecond */
8743static const s32 timeout_duration[] = {
8744	350000,
8745	250000,
8746	75000,
8747	37000,
8748	25000,
8749};
8750
8751static const s32 period_duration[] = {
8752	400000,
8753	700000,
8754	1000000,
8755	1000000,
8756	1000000
8757};
8758
8759static int ipw_wx_get_range(struct net_device *dev,
8760			    struct iw_request_info *info,
8761			    union iwreq_data *wrqu, char *extra)
8762{
8763	struct ipw_priv *priv = ieee80211_priv(dev);
8764	struct iw_range *range = (struct iw_range *)extra;
8765	const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
8766	int i = 0, j;
8767
8768	wrqu->data.length = sizeof(*range);
8769	memset(range, 0, sizeof(*range));
8770
8771	/* 54Mbs == ~27 Mb/s real (802.11g) */
8772	range->throughput = 27 * 1000 * 1000;
8773
8774	range->max_qual.qual = 100;
8775	/* TODO: Find real max RSSI and stick here */
8776	range->max_qual.level = 0;
8777	range->max_qual.noise = 0;
8778	range->max_qual.updated = 7;	/* Updated all three */
8779
8780	range->avg_qual.qual = 70;
8781	/* TODO: Find real 'good' to 'bad' threshol value for RSSI */
8782	range->avg_qual.level = 0;
8783	range->avg_qual.noise = 0;
8784	range->avg_qual.updated = 7;	/* Updated all three */
8785	mutex_lock(&priv->mutex);
8786	range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
8787
8788	for (i = 0; i < range->num_bitrates; i++)
8789		range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
8790		    500000;
8791
8792	range->max_rts = DEFAULT_RTS_THRESHOLD;
8793	range->min_frag = MIN_FRAG_THRESHOLD;
8794	range->max_frag = MAX_FRAG_THRESHOLD;
8795
8796	range->encoding_size[0] = 5;
8797	range->encoding_size[1] = 13;
8798	range->num_encoding_sizes = 2;
8799	range->max_encoding_tokens = WEP_KEYS;
8800
8801	/* Set the Wireless Extension versions */
8802	range->we_version_compiled = WIRELESS_EXT;
8803	range->we_version_source = 18;
8804
8805	i = 0;
8806	if (priv->ieee->mode & (IEEE_B | IEEE_G)) {
8807		for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) {
8808			if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8809			    (geo->bg[j].flags & IEEE80211_CH_PASSIVE_ONLY))
8810				continue;
8811
8812			range->freq[i].i = geo->bg[j].channel;
8813			range->freq[i].m = geo->bg[j].freq * 100000;
8814			range->freq[i].e = 1;
8815			i++;
8816		}
8817	}
8818
8819	if (priv->ieee->mode & IEEE_A) {
8820		for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) {
8821			if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8822			    (geo->a[j].flags & IEEE80211_CH_PASSIVE_ONLY))
8823				continue;
8824
8825			range->freq[i].i = geo->a[j].channel;
8826			range->freq[i].m = geo->a[j].freq * 100000;
8827			range->freq[i].e = 1;
8828			i++;
8829		}
8830	}
8831
8832	range->num_channels = i;
8833	range->num_frequency = i;
8834
8835	mutex_unlock(&priv->mutex);
8836
8837	/* Event capability (kernel + driver) */
8838	range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
8839				IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
8840				IW_EVENT_CAPA_MASK(SIOCGIWAP) |
8841				IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
8842	range->event_capa[1] = IW_EVENT_CAPA_K_1;
8843
8844	range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
8845		IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
8846
8847	IPW_DEBUG_WX("GET Range\n");
8848	return 0;
8849}
8850
8851static int ipw_wx_set_wap(struct net_device *dev,
8852			  struct iw_request_info *info,
8853			  union iwreq_data *wrqu, char *extra)
8854{
8855	struct ipw_priv *priv = ieee80211_priv(dev);
8856
8857	static const unsigned char any[] = {
8858		0xff, 0xff, 0xff, 0xff, 0xff, 0xff
8859	};
8860	static const unsigned char off[] = {
8861		0x00, 0x00, 0x00, 0x00, 0x00, 0x00
8862	};
8863
8864	if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
8865		return -EINVAL;
8866	mutex_lock(&priv->mutex);
8867	if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
8868	    !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
8869		/* we disable mandatory BSSID association */
8870		IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
8871		priv->config &= ~CFG_STATIC_BSSID;
8872		IPW_DEBUG_ASSOC("Attempting to associate with new "
8873				"parameters.\n");
8874		ipw_associate(priv);
8875		mutex_unlock(&priv->mutex);
8876		return 0;
8877	}
8878
8879	priv->config |= CFG_STATIC_BSSID;
8880	if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
8881		IPW_DEBUG_WX("BSSID set to current BSSID.\n");
8882		mutex_unlock(&priv->mutex);
8883		return 0;
8884	}
8885
8886	IPW_DEBUG_WX("Setting mandatory BSSID to " MAC_FMT "\n",
8887		     MAC_ARG(wrqu->ap_addr.sa_data));
8888
8889	memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
8890
8891	/* Network configuration changed -- force [re]association */
8892	IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n");
8893	if (!ipw_disassociate(priv))
8894		ipw_associate(priv);
8895
8896	mutex_unlock(&priv->mutex);
8897	return 0;
8898}
8899
8900static int ipw_wx_get_wap(struct net_device *dev,
8901			  struct iw_request_info *info,
8902			  union iwreq_data *wrqu, char *extra)
8903{
8904	struct ipw_priv *priv = ieee80211_priv(dev);
8905	/* If we are associated, trying to associate, or have a statically
8906	 * configured BSSID then return that; otherwise return ANY */
8907	mutex_lock(&priv->mutex);
8908	if (priv->config & CFG_STATIC_BSSID ||
8909	    priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
8910		wrqu->ap_addr.sa_family = ARPHRD_ETHER;
8911		memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
8912	} else
8913		memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
8914
8915	IPW_DEBUG_WX("Getting WAP BSSID: " MAC_FMT "\n",
8916		     MAC_ARG(wrqu->ap_addr.sa_data));
8917	mutex_unlock(&priv->mutex);
8918	return 0;
8919}
8920
8921static int ipw_wx_set_essid(struct net_device *dev,
8922			    struct iw_request_info *info,
8923			    union iwreq_data *wrqu, char *extra)
8924{
8925	struct ipw_priv *priv = ieee80211_priv(dev);
8926        int length;
8927
8928        mutex_lock(&priv->mutex);
8929
8930        if (!wrqu->essid.flags)
8931        {
8932                IPW_DEBUG_WX("Setting ESSID to ANY\n");
8933                ipw_disassociate(priv);
8934                priv->config &= ~CFG_STATIC_ESSID;
8935                ipw_associate(priv);
8936                mutex_unlock(&priv->mutex);
8937                return 0;
8938        }
8939
8940	length = min((int)wrqu->essid.length, IW_ESSID_MAX_SIZE);
8941
8942	priv->config |= CFG_STATIC_ESSID;
8943
8944	if (priv->essid_len == length && !memcmp(priv->essid, extra, length)
8945	    && (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) {
8946		IPW_DEBUG_WX("ESSID set to current ESSID.\n");
8947		mutex_unlock(&priv->mutex);
8948		return 0;
8949	}
8950
8951	IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n", escape_essid(extra, length),
8952		     length);
8953
8954	priv->essid_len = length;
8955	memcpy(priv->essid, extra, priv->essid_len);
8956
8957	/* Network configuration changed -- force [re]association */
8958	IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
8959	if (!ipw_disassociate(priv))
8960		ipw_associate(priv);
8961
8962	mutex_unlock(&priv->mutex);
8963	return 0;
8964}
8965
8966static int ipw_wx_get_essid(struct net_device *dev,
8967			    struct iw_request_info *info,
8968			    union iwreq_data *wrqu, char *extra)
8969{
8970	struct ipw_priv *priv = ieee80211_priv(dev);
8971
8972	/* If we are associated, trying to associate, or have a statically
8973	 * configured ESSID then return that; otherwise return ANY */
8974	mutex_lock(&priv->mutex);
8975	if (priv->config & CFG_STATIC_ESSID ||
8976	    priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
8977		IPW_DEBUG_WX("Getting essid: '%s'\n",
8978			     escape_essid(priv->essid, priv->essid_len));
8979		memcpy(extra, priv->essid, priv->essid_len);
8980		wrqu->essid.length = priv->essid_len;
8981		wrqu->essid.flags = 1;	/* active */
8982	} else {
8983		IPW_DEBUG_WX("Getting essid: ANY\n");
8984		wrqu->essid.length = 0;
8985		wrqu->essid.flags = 0;	/* active */
8986	}
8987	mutex_unlock(&priv->mutex);
8988	return 0;
8989}
8990
8991static int ipw_wx_set_nick(struct net_device *dev,
8992			   struct iw_request_info *info,
8993			   union iwreq_data *wrqu, char *extra)
8994{
8995	struct ipw_priv *priv = ieee80211_priv(dev);
8996
8997	IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
8998	if (wrqu->data.length > IW_ESSID_MAX_SIZE)
8999		return -E2BIG;
9000	mutex_lock(&priv->mutex);
9001	wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
9002	memset(priv->nick, 0, sizeof(priv->nick));
9003	memcpy(priv->nick, extra, wrqu->data.length);
9004	IPW_DEBUG_TRACE("<<\n");
9005	mutex_unlock(&priv->mutex);
9006	return 0;
9007
9008}
9009
9010static int ipw_wx_get_nick(struct net_device *dev,
9011			   struct iw_request_info *info,
9012			   union iwreq_data *wrqu, char *extra)
9013{
9014	struct ipw_priv *priv = ieee80211_priv(dev);
9015	IPW_DEBUG_WX("Getting nick\n");
9016	mutex_lock(&priv->mutex);
9017	wrqu->data.length = strlen(priv->nick);
9018	memcpy(extra, priv->nick, wrqu->data.length);
9019	wrqu->data.flags = 1;	/* active */
9020	mutex_unlock(&priv->mutex);
9021	return 0;
9022}
9023
9024static int ipw_wx_set_sens(struct net_device *dev,
9025			    struct iw_request_info *info,
9026			    union iwreq_data *wrqu, char *extra)
9027{
9028	struct ipw_priv *priv = ieee80211_priv(dev);
9029	int err = 0;
9030
9031	IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value);
9032	IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value);
9033	mutex_lock(&priv->mutex);
9034
9035	if (wrqu->sens.fixed == 0)
9036	{
9037		priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
9038		priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
9039		goto out;
9040	}
9041	if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) ||
9042	    (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) {
9043		err = -EINVAL;
9044		goto out;
9045	}
9046
9047	priv->roaming_threshold = wrqu->sens.value;
9048	priv->disassociate_threshold = 3*wrqu->sens.value;
9049      out:
9050	mutex_unlock(&priv->mutex);
9051	return err;
9052}
9053
9054static int ipw_wx_get_sens(struct net_device *dev,
9055			    struct iw_request_info *info,
9056			    union iwreq_data *wrqu, char *extra)
9057{
9058	struct ipw_priv *priv = ieee80211_priv(dev);
9059	mutex_lock(&priv->mutex);
9060	wrqu->sens.fixed = 1;
9061	wrqu->sens.value = priv->roaming_threshold;
9062	mutex_unlock(&priv->mutex);
9063
9064	IPW_DEBUG_WX("GET roaming threshold -> %s %d \n",
9065		     wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9066
9067	return 0;
9068}
9069
9070static int ipw_wx_set_rate(struct net_device *dev,
9071			   struct iw_request_info *info,
9072			   union iwreq_data *wrqu, char *extra)
9073{
9074	/* TODO: We should use semaphores or locks for access to priv */
9075	struct ipw_priv *priv = ieee80211_priv(dev);
9076	u32 target_rate = wrqu->bitrate.value;
9077	u32 fixed, mask;
9078
9079	/* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */
9080	/* value = X, fixed = 1 means only rate X */
9081	/* value = X, fixed = 0 means all rates lower equal X */
9082
9083	if (target_rate == -1) {
9084		fixed = 0;
9085		mask = IEEE80211_DEFAULT_RATES_MASK;
9086		/* Now we should reassociate */
9087		goto apply;
9088	}
9089
9090	mask = 0;
9091	fixed = wrqu->bitrate.fixed;
9092
9093	if (target_rate == 1000000 || !fixed)
9094		mask |= IEEE80211_CCK_RATE_1MB_MASK;
9095	if (target_rate == 1000000)
9096		goto apply;
9097
9098	if (target_rate == 2000000 || !fixed)
9099		mask |= IEEE80211_CCK_RATE_2MB_MASK;
9100	if (target_rate == 2000000)
9101		goto apply;
9102
9103	if (target_rate == 5500000 || !fixed)
9104		mask |= IEEE80211_CCK_RATE_5MB_MASK;
9105	if (target_rate == 5500000)
9106		goto apply;
9107
9108	if (target_rate == 6000000 || !fixed)
9109		mask |= IEEE80211_OFDM_RATE_6MB_MASK;
9110	if (target_rate == 6000000)
9111		goto apply;
9112
9113	if (target_rate == 9000000 || !fixed)
9114		mask |= IEEE80211_OFDM_RATE_9MB_MASK;
9115	if (target_rate == 9000000)
9116		goto apply;
9117
9118	if (target_rate == 11000000 || !fixed)
9119		mask |= IEEE80211_CCK_RATE_11MB_MASK;
9120	if (target_rate == 11000000)
9121		goto apply;
9122
9123	if (target_rate == 12000000 || !fixed)
9124		mask |= IEEE80211_OFDM_RATE_12MB_MASK;
9125	if (target_rate == 12000000)
9126		goto apply;
9127
9128	if (target_rate == 18000000 || !fixed)
9129		mask |= IEEE80211_OFDM_RATE_18MB_MASK;
9130	if (target_rate == 18000000)
9131		goto apply;
9132
9133	if (target_rate == 24000000 || !fixed)
9134		mask |= IEEE80211_OFDM_RATE_24MB_MASK;
9135	if (target_rate == 24000000)
9136		goto apply;
9137
9138	if (target_rate == 36000000 || !fixed)
9139		mask |= IEEE80211_OFDM_RATE_36MB_MASK;
9140	if (target_rate == 36000000)
9141		goto apply;
9142
9143	if (target_rate == 48000000 || !fixed)
9144		mask |= IEEE80211_OFDM_RATE_48MB_MASK;
9145	if (target_rate == 48000000)
9146		goto apply;
9147
9148	if (target_rate == 54000000 || !fixed)
9149		mask |= IEEE80211_OFDM_RATE_54MB_MASK;
9150	if (target_rate == 54000000)
9151		goto apply;
9152
9153	IPW_DEBUG_WX("invalid rate specified, returning error\n");
9154	return -EINVAL;
9155
9156      apply:
9157	IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
9158		     mask, fixed ? "fixed" : "sub-rates");
9159	mutex_lock(&priv->mutex);
9160	if (mask == IEEE80211_DEFAULT_RATES_MASK) {
9161		priv->config &= ~CFG_FIXED_RATE;
9162		ipw_set_fixed_rate(priv, priv->ieee->mode);
9163	} else
9164		priv->config |= CFG_FIXED_RATE;
9165
9166	if (priv->rates_mask == mask) {
9167		IPW_DEBUG_WX("Mask set to current mask.\n");
9168		mutex_unlock(&priv->mutex);
9169		return 0;
9170	}
9171
9172	priv->rates_mask = mask;
9173
9174	/* Network configuration changed -- force [re]association */
9175	IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n");
9176	if (!ipw_disassociate(priv))
9177		ipw_associate(priv);
9178
9179	mutex_unlock(&priv->mutex);
9180	return 0;
9181}
9182
9183static int ipw_wx_get_rate(struct net_device *dev,
9184			   struct iw_request_info *info,
9185			   union iwreq_data *wrqu, char *extra)
9186{
9187	struct ipw_priv *priv = ieee80211_priv(dev);
9188	mutex_lock(&priv->mutex);
9189	wrqu->bitrate.value = priv->last_rate;
9190	wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0;
9191	mutex_unlock(&priv->mutex);
9192	IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
9193	return 0;
9194}
9195
9196static int ipw_wx_set_rts(struct net_device *dev,
9197			  struct iw_request_info *info,
9198			  union iwreq_data *wrqu, char *extra)
9199{
9200	struct ipw_priv *priv = ieee80211_priv(dev);
9201	mutex_lock(&priv->mutex);
9202	if (wrqu->rts.disabled || !wrqu->rts.fixed)
9203		priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
9204	else {
9205		if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
9206		    wrqu->rts.value > MAX_RTS_THRESHOLD) {
9207			mutex_unlock(&priv->mutex);
9208			return -EINVAL;
9209		}
9210		priv->rts_threshold = wrqu->rts.value;
9211	}
9212
9213	ipw_send_rts_threshold(priv, priv->rts_threshold);
9214	mutex_unlock(&priv->mutex);
9215	IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold);
9216	return 0;
9217}
9218
9219static int ipw_wx_get_rts(struct net_device *dev,
9220			  struct iw_request_info *info,
9221			  union iwreq_data *wrqu, char *extra)
9222{
9223	struct ipw_priv *priv = ieee80211_priv(dev);
9224	mutex_lock(&priv->mutex);
9225	wrqu->rts.value = priv->rts_threshold;
9226	wrqu->rts.fixed = 0;	/* no auto select */
9227	wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
9228	mutex_unlock(&priv->mutex);
9229	IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value);
9230	return 0;
9231}
9232
9233static int ipw_wx_set_txpow(struct net_device *dev,
9234			    struct iw_request_info *info,
9235			    union iwreq_data *wrqu, char *extra)
9236{
9237	struct ipw_priv *priv = ieee80211_priv(dev);
9238	int err = 0;
9239
9240	mutex_lock(&priv->mutex);
9241	if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) {
9242		err = -EINPROGRESS;
9243		goto out;
9244	}
9245
9246	if (!wrqu->power.fixed)
9247		wrqu->power.value = IPW_TX_POWER_DEFAULT;
9248
9249	if (wrqu->power.flags != IW_TXPOW_DBM) {
9250		err = -EINVAL;
9251		goto out;
9252	}
9253
9254	if ((wrqu->power.value > IPW_TX_POWER_MAX) ||
9255	    (wrqu->power.value < IPW_TX_POWER_MIN)) {
9256		err = -EINVAL;
9257		goto out;
9258	}
9259
9260	priv->tx_power = wrqu->power.value;
9261	err = ipw_set_tx_power(priv);
9262      out:
9263	mutex_unlock(&priv->mutex);
9264	return err;
9265}
9266
9267static int ipw_wx_get_txpow(struct net_device *dev,
9268			    struct iw_request_info *info,
9269			    union iwreq_data *wrqu, char *extra)
9270{
9271	struct ipw_priv *priv = ieee80211_priv(dev);
9272	mutex_lock(&priv->mutex);
9273	wrqu->power.value = priv->tx_power;
9274	wrqu->power.fixed = 1;
9275	wrqu->power.flags = IW_TXPOW_DBM;
9276	wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
9277	mutex_unlock(&priv->mutex);
9278
9279	IPW_DEBUG_WX("GET TX Power -> %s %d \n",
9280		     wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9281
9282	return 0;
9283}
9284
9285static int ipw_wx_set_frag(struct net_device *dev,
9286			   struct iw_request_info *info,
9287			   union iwreq_data *wrqu, char *extra)
9288{
9289	struct ipw_priv *priv = ieee80211_priv(dev);
9290	mutex_lock(&priv->mutex);
9291	if (wrqu->frag.disabled || !wrqu->frag.fixed)
9292		priv->ieee->fts = DEFAULT_FTS;
9293	else {
9294		if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
9295		    wrqu->frag.value > MAX_FRAG_THRESHOLD) {
9296			mutex_unlock(&priv->mutex);
9297			return -EINVAL;
9298		}
9299
9300		priv->ieee->fts = wrqu->frag.value & ~0x1;
9301	}
9302
9303	ipw_send_frag_threshold(priv, wrqu->frag.value);
9304	mutex_unlock(&priv->mutex);
9305	IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value);
9306	return 0;
9307}
9308
9309static int ipw_wx_get_frag(struct net_device *dev,
9310			   struct iw_request_info *info,
9311			   union iwreq_data *wrqu, char *extra)
9312{
9313	struct ipw_priv *priv = ieee80211_priv(dev);
9314	mutex_lock(&priv->mutex);
9315	wrqu->frag.value = priv->ieee->fts;
9316	wrqu->frag.fixed = 0;	/* no auto select */
9317	wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
9318	mutex_unlock(&priv->mutex);
9319	IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
9320
9321	return 0;
9322}
9323
9324static int ipw_wx_set_retry(struct net_device *dev,
9325			    struct iw_request_info *info,
9326			    union iwreq_data *wrqu, char *extra)
9327{
9328	struct ipw_priv *priv = ieee80211_priv(dev);
9329
9330	if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled)
9331		return -EINVAL;
9332
9333	if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
9334		return 0;
9335
9336	if (wrqu->retry.value < 0 || wrqu->retry.value >= 255)
9337		return -EINVAL;
9338
9339	mutex_lock(&priv->mutex);
9340	if (wrqu->retry.flags & IW_RETRY_SHORT)
9341		priv->short_retry_limit = (u8) wrqu->retry.value;
9342	else if (wrqu->retry.flags & IW_RETRY_LONG)
9343		priv->long_retry_limit = (u8) wrqu->retry.value;
9344	else {
9345		priv->short_retry_limit = (u8) wrqu->retry.value;
9346		priv->long_retry_limit = (u8) wrqu->retry.value;
9347	}
9348
9349	ipw_send_retry_limit(priv, priv->short_retry_limit,
9350			     priv->long_retry_limit);
9351	mutex_unlock(&priv->mutex);
9352	IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n",
9353		     priv->short_retry_limit, priv->long_retry_limit);
9354	return 0;
9355}
9356
9357static int ipw_wx_get_retry(struct net_device *dev,
9358			    struct iw_request_info *info,
9359			    union iwreq_data *wrqu, char *extra)
9360{
9361	struct ipw_priv *priv = ieee80211_priv(dev);
9362
9363	mutex_lock(&priv->mutex);
9364	wrqu->retry.disabled = 0;
9365
9366	if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
9367		mutex_unlock(&priv->mutex);
9368		return -EINVAL;
9369	}
9370
9371	if (wrqu->retry.flags & IW_RETRY_LONG) {
9372		wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
9373		wrqu->retry.value = priv->long_retry_limit;
9374	} else if (wrqu->retry.flags & IW_RETRY_SHORT) {
9375		wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_SHORT;
9376		wrqu->retry.value = priv->short_retry_limit;
9377	} else {
9378		wrqu->retry.flags = IW_RETRY_LIMIT;
9379		wrqu->retry.value = priv->short_retry_limit;
9380	}
9381	mutex_unlock(&priv->mutex);
9382
9383	IPW_DEBUG_WX("GET retry -> %d \n", wrqu->retry.value);
9384
9385	return 0;
9386}
9387
9388static int ipw_request_direct_scan(struct ipw_priv *priv, char *essid,
9389				   int essid_len)
9390{
9391	struct ipw_scan_request_ext scan;
9392	int err = 0, scan_type;
9393
9394	if (!(priv->status & STATUS_INIT) ||
9395	    (priv->status & STATUS_EXIT_PENDING))
9396		return 0;
9397
9398	mutex_lock(&priv->mutex);
9399
9400	if (priv->status & STATUS_RF_KILL_MASK) {
9401		IPW_DEBUG_HC("Aborting scan due to RF kill activation\n");
9402		priv->status |= STATUS_SCAN_PENDING;
9403		goto done;
9404	}
9405
9406	IPW_DEBUG_HC("starting request direct scan!\n");
9407
9408	if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
9409		/* We should not sleep here; otherwise we will block most
9410		 * of the system (for instance, we hold rtnl_lock when we
9411		 * get here).
9412		 */
9413		err = -EAGAIN;
9414		goto done;
9415	}
9416	memset(&scan, 0, sizeof(scan));
9417
9418	if (priv->config & CFG_SPEED_SCAN)
9419		scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
9420		    cpu_to_le16(30);
9421	else
9422		scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
9423		    cpu_to_le16(20);
9424
9425	scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
9426	    cpu_to_le16(20);
9427	scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120);
9428	scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
9429
9430	scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
9431
9432	err = ipw_send_ssid(priv, essid, essid_len);
9433	if (err) {
9434		IPW_DEBUG_HC("Attempt to send SSID command failed\n");
9435		goto done;
9436	}
9437	scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
9438
9439	ipw_add_scan_channels(priv, &scan, scan_type);
9440
9441	err = ipw_send_scan_request_ext(priv, &scan);
9442	if (err) {
9443		IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
9444		goto done;
9445	}
9446
9447	priv->status |= STATUS_SCANNING;
9448
9449      done:
9450	mutex_unlock(&priv->mutex);
9451	return err;
9452}
9453
9454static int ipw_wx_set_scan(struct net_device *dev,
9455			   struct iw_request_info *info,
9456			   union iwreq_data *wrqu, char *extra)
9457{
9458	struct ipw_priv *priv = ieee80211_priv(dev);
9459	struct iw_scan_req *req = (struct iw_scan_req *)extra;
9460
9461	if (wrqu->data.length == sizeof(struct iw_scan_req)) {
9462		if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
9463			ipw_request_direct_scan(priv, req->essid,
9464						req->essid_len);
9465			return 0;
9466		}
9467		if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
9468			queue_work(priv->workqueue,
9469				   &priv->request_passive_scan);
9470			return 0;
9471		}
9472	}
9473
9474	IPW_DEBUG_WX("Start scan\n");
9475
9476	queue_delayed_work(priv->workqueue, &priv->request_scan, 0);
9477
9478	return 0;
9479}
9480
9481static int ipw_wx_get_scan(struct net_device *dev,
9482			   struct iw_request_info *info,
9483			   union iwreq_data *wrqu, char *extra)
9484{
9485	struct ipw_priv *priv = ieee80211_priv(dev);
9486	return ieee80211_wx_get_scan(priv->ieee, info, wrqu, extra);
9487}
9488
9489static int ipw_wx_set_encode(struct net_device *dev,
9490			     struct iw_request_info *info,
9491			     union iwreq_data *wrqu, char *key)
9492{
9493	struct ipw_priv *priv = ieee80211_priv(dev);
9494	int ret;
9495	u32 cap = priv->capability;
9496
9497	mutex_lock(&priv->mutex);
9498	ret = ieee80211_wx_set_encode(priv->ieee, info, wrqu, key);
9499
9500	/* In IBSS mode, we need to notify the firmware to update
9501	 * the beacon info after we changed the capability. */
9502	if (cap != priv->capability &&
9503	    priv->ieee->iw_mode == IW_MODE_ADHOC &&
9504	    priv->status & STATUS_ASSOCIATED)
9505		ipw_disassociate(priv);
9506
9507	mutex_unlock(&priv->mutex);
9508	return ret;
9509}
9510
9511static int ipw_wx_get_encode(struct net_device *dev,
9512			     struct iw_request_info *info,
9513			     union iwreq_data *wrqu, char *key)
9514{
9515	struct ipw_priv *priv = ieee80211_priv(dev);
9516	return ieee80211_wx_get_encode(priv->ieee, info, wrqu, key);
9517}
9518
9519static int ipw_wx_set_power(struct net_device *dev,
9520			    struct iw_request_info *info,
9521			    union iwreq_data *wrqu, char *extra)
9522{
9523	struct ipw_priv *priv = ieee80211_priv(dev);
9524	int err;
9525	mutex_lock(&priv->mutex);
9526	if (wrqu->power.disabled) {
9527		priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
9528		err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
9529		if (err) {
9530			IPW_DEBUG_WX("failed setting power mode.\n");
9531			mutex_unlock(&priv->mutex);
9532			return err;
9533		}
9534		IPW_DEBUG_WX("SET Power Management Mode -> off\n");
9535		mutex_unlock(&priv->mutex);
9536		return 0;
9537	}
9538
9539	switch (wrqu->power.flags & IW_POWER_MODE) {
9540	case IW_POWER_ON:	/* If not specified */
9541	case IW_POWER_MODE:	/* If set all mask */
9542	case IW_POWER_ALL_R:	/* If explicitely state all */
9543		break;
9544	default:		/* Otherwise we don't support it */
9545		IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
9546			     wrqu->power.flags);
9547		mutex_unlock(&priv->mutex);
9548		return -EOPNOTSUPP;
9549	}
9550
9551	/* If the user hasn't specified a power management mode yet, default
9552	 * to BATTERY */
9553	if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
9554		priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
9555	else
9556		priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
9557	err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
9558	if (err) {
9559		IPW_DEBUG_WX("failed setting power mode.\n");
9560		mutex_unlock(&priv->mutex);
9561		return err;
9562	}
9563
9564	IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
9565	mutex_unlock(&priv->mutex);
9566	return 0;
9567}
9568
9569static int ipw_wx_get_power(struct net_device *dev,
9570			    struct iw_request_info *info,
9571			    union iwreq_data *wrqu, char *extra)
9572{
9573	struct ipw_priv *priv = ieee80211_priv(dev);
9574	mutex_lock(&priv->mutex);
9575	if (!(priv->power_mode & IPW_POWER_ENABLED))
9576		wrqu->power.disabled = 1;
9577	else
9578		wrqu->power.disabled = 0;
9579
9580	mutex_unlock(&priv->mutex);
9581	IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
9582
9583	return 0;
9584}
9585
9586static int ipw_wx_set_powermode(struct net_device *dev,
9587				struct iw_request_info *info,
9588				union iwreq_data *wrqu, char *extra)
9589{
9590	struct ipw_priv *priv = ieee80211_priv(dev);
9591	int mode = *(int *)extra;
9592	int err;
9593	mutex_lock(&priv->mutex);
9594	if ((mode < 1) || (mode > IPW_POWER_LIMIT)) {
9595		mode = IPW_POWER_AC;
9596		priv->power_mode = mode;
9597	} else {
9598		priv->power_mode = IPW_POWER_ENABLED | mode;
9599	}
9600
9601	if (priv->power_mode != mode) {
9602		err = ipw_send_power_mode(priv, mode);
9603
9604		if (err) {
9605			IPW_DEBUG_WX("failed setting power mode.\n");
9606			mutex_unlock(&priv->mutex);
9607			return err;
9608		}
9609	}
9610	mutex_unlock(&priv->mutex);
9611	return 0;
9612}
9613
9614#define MAX_WX_STRING 80
9615static int ipw_wx_get_powermode(struct net_device *dev,
9616				struct iw_request_info *info,
9617				union iwreq_data *wrqu, char *extra)
9618{
9619	struct ipw_priv *priv = ieee80211_priv(dev);
9620	int level = IPW_POWER_LEVEL(priv->power_mode);
9621	char *p = extra;
9622
9623	p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
9624
9625	switch (level) {
9626	case IPW_POWER_AC:
9627		p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
9628		break;
9629	case IPW_POWER_BATTERY:
9630		p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
9631		break;
9632	default:
9633		p += snprintf(p, MAX_WX_STRING - (p - extra),
9634			      "(Timeout %dms, Period %dms)",
9635			      timeout_duration[level - 1] / 1000,
9636			      period_duration[level - 1] / 1000);
9637	}
9638
9639	if (!(priv->power_mode & IPW_POWER_ENABLED))
9640		p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
9641
9642	wrqu->data.length = p - extra + 1;
9643
9644	return 0;
9645}
9646
9647static int ipw_wx_set_wireless_mode(struct net_device *dev,
9648				    struct iw_request_info *info,
9649				    union iwreq_data *wrqu, char *extra)
9650{
9651	struct ipw_priv *priv = ieee80211_priv(dev);
9652	int mode = *(int *)extra;
9653	u8 band = 0, modulation = 0;
9654
9655	if (mode == 0 || mode & ~IEEE_MODE_MASK) {
9656		IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
9657		return -EINVAL;
9658	}
9659	mutex_lock(&priv->mutex);
9660	if (priv->adapter == IPW_2915ABG) {
9661		priv->ieee->abg_true = 1;
9662		if (mode & IEEE_A) {
9663			band |= IEEE80211_52GHZ_BAND;
9664			modulation |= IEEE80211_OFDM_MODULATION;
9665		} else
9666			priv->ieee->abg_true = 0;
9667	} else {
9668		if (mode & IEEE_A) {
9669			IPW_WARNING("Attempt to set 2200BG into "
9670				    "802.11a mode\n");
9671			mutex_unlock(&priv->mutex);
9672			return -EINVAL;
9673		}
9674
9675		priv->ieee->abg_true = 0;
9676	}
9677
9678	if (mode & IEEE_B) {
9679		band |= IEEE80211_24GHZ_BAND;
9680		modulation |= IEEE80211_CCK_MODULATION;
9681	} else
9682		priv->ieee->abg_true = 0;
9683
9684	if (mode & IEEE_G) {
9685		band |= IEEE80211_24GHZ_BAND;
9686		modulation |= IEEE80211_OFDM_MODULATION;
9687	} else
9688		priv->ieee->abg_true = 0;
9689
9690	priv->ieee->mode = mode;
9691	priv->ieee->freq_band = band;
9692	priv->ieee->modulation = modulation;
9693	init_supported_rates(priv, &priv->rates);
9694
9695	/* Network configuration changed -- force [re]association */
9696	IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n");
9697	if (!ipw_disassociate(priv)) {
9698		ipw_send_supported_rates(priv, &priv->rates);
9699		ipw_associate(priv);
9700	}
9701
9702	/* Update the band LEDs */
9703	ipw_led_band_on(priv);
9704
9705	IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
9706		     mode & IEEE_A ? 'a' : '.',
9707		     mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
9708	mutex_unlock(&priv->mutex);
9709	return 0;
9710}
9711
9712static int ipw_wx_get_wireless_mode(struct net_device *dev,
9713				    struct iw_request_info *info,
9714				    union iwreq_data *wrqu, char *extra)
9715{
9716	struct ipw_priv *priv = ieee80211_priv(dev);
9717	mutex_lock(&priv->mutex);
9718	switch (priv->ieee->mode) {
9719	case IEEE_A:
9720		strncpy(extra, "802.11a (1)", MAX_WX_STRING);
9721		break;
9722	case IEEE_B:
9723		strncpy(extra, "802.11b (2)", MAX_WX_STRING);
9724		break;
9725	case IEEE_A | IEEE_B:
9726		strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
9727		break;
9728	case IEEE_G:
9729		strncpy(extra, "802.11g (4)", MAX_WX_STRING);
9730		break;
9731	case IEEE_A | IEEE_G:
9732		strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
9733		break;
9734	case IEEE_B | IEEE_G:
9735		strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
9736		break;
9737	case IEEE_A | IEEE_B | IEEE_G:
9738		strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
9739		break;
9740	default:
9741		strncpy(extra, "unknown", MAX_WX_STRING);
9742		break;
9743	}
9744
9745	IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
9746
9747	wrqu->data.length = strlen(extra) + 1;
9748	mutex_unlock(&priv->mutex);
9749
9750	return 0;
9751}
9752
9753static int ipw_wx_set_preamble(struct net_device *dev,
9754			       struct iw_request_info *info,
9755			       union iwreq_data *wrqu, char *extra)
9756{
9757	struct ipw_priv *priv = ieee80211_priv(dev);
9758	int mode = *(int *)extra;
9759	mutex_lock(&priv->mutex);
9760	/* Switching from SHORT -> LONG requires a disassociation */
9761	if (mode == 1) {
9762		if (!(priv->config & CFG_PREAMBLE_LONG)) {
9763			priv->config |= CFG_PREAMBLE_LONG;
9764
9765			/* Network configuration changed -- force [re]association */
9766			IPW_DEBUG_ASSOC
9767			    ("[re]association triggered due to preamble change.\n");
9768			if (!ipw_disassociate(priv))
9769				ipw_associate(priv);
9770		}
9771		goto done;
9772	}
9773
9774	if (mode == 0) {
9775		priv->config &= ~CFG_PREAMBLE_LONG;
9776		goto done;
9777	}
9778	mutex_unlock(&priv->mutex);
9779	return -EINVAL;
9780
9781      done:
9782	mutex_unlock(&priv->mutex);
9783	return 0;
9784}
9785
9786static int ipw_wx_get_preamble(struct net_device *dev,
9787			       struct iw_request_info *info,
9788			       union iwreq_data *wrqu, char *extra)
9789{
9790	struct ipw_priv *priv = ieee80211_priv(dev);
9791	mutex_lock(&priv->mutex);
9792	if (priv->config & CFG_PREAMBLE_LONG)
9793		snprintf(wrqu->name, IFNAMSIZ, "long (1)");
9794	else
9795		snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
9796	mutex_unlock(&priv->mutex);
9797	return 0;
9798}
9799
9800#ifdef CONFIG_IPW2200_MONITOR
9801static int ipw_wx_set_monitor(struct net_device *dev,
9802			      struct iw_request_info *info,
9803			      union iwreq_data *wrqu, char *extra)
9804{
9805	struct ipw_priv *priv = ieee80211_priv(dev);
9806	int *parms = (int *)extra;
9807	int enable = (parms[0] > 0);
9808	mutex_lock(&priv->mutex);
9809	IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
9810	if (enable) {
9811		if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9812#ifdef CONFIG_IPW2200_RADIOTAP
9813			priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
9814#else
9815			priv->net_dev->type = ARPHRD_IEEE80211;
9816#endif
9817			queue_work(priv->workqueue, &priv->adapter_restart);
9818		}
9819
9820		ipw_set_channel(priv, parms[1]);
9821	} else {
9822		if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9823			mutex_unlock(&priv->mutex);
9824			return 0;
9825		}
9826		priv->net_dev->type = ARPHRD_ETHER;
9827		queue_work(priv->workqueue, &priv->adapter_restart);
9828	}
9829	mutex_unlock(&priv->mutex);
9830	return 0;
9831}
9832
9833#endif				/* CONFIG_IPW2200_MONITOR */
9834
9835static int ipw_wx_reset(struct net_device *dev,
9836			struct iw_request_info *info,
9837			union iwreq_data *wrqu, char *extra)
9838{
9839	struct ipw_priv *priv = ieee80211_priv(dev);
9840	IPW_DEBUG_WX("RESET\n");
9841	queue_work(priv->workqueue, &priv->adapter_restart);
9842	return 0;
9843}
9844
9845static int ipw_wx_sw_reset(struct net_device *dev,
9846			   struct iw_request_info *info,
9847			   union iwreq_data *wrqu, char *extra)
9848{
9849	struct ipw_priv *priv = ieee80211_priv(dev);
9850	union iwreq_data wrqu_sec = {
9851		.encoding = {
9852			     .flags = IW_ENCODE_DISABLED,
9853			     },
9854	};
9855	int ret;
9856
9857	IPW_DEBUG_WX("SW_RESET\n");
9858
9859	mutex_lock(&priv->mutex);
9860
9861	ret = ipw_sw_reset(priv, 2);
9862	if (!ret) {
9863		free_firmware();
9864		ipw_adapter_restart(priv);
9865	}
9866
9867	/* The SW reset bit might have been toggled on by the 'disable'
9868	 * module parameter, so take appropriate action */
9869	ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW);
9870
9871	mutex_unlock(&priv->mutex);
9872	ieee80211_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL);
9873	mutex_lock(&priv->mutex);
9874
9875	if (!(priv->status & STATUS_RF_KILL_MASK)) {
9876		/* Configuration likely changed -- force [re]association */
9877		IPW_DEBUG_ASSOC("[re]association triggered due to sw "
9878				"reset.\n");
9879		if (!ipw_disassociate(priv))
9880			ipw_associate(priv);
9881	}
9882
9883	mutex_unlock(&priv->mutex);
9884
9885	return 0;
9886}
9887
9888/* Rebase the WE IOCTLs to zero for the handler array */
9889#define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
9890static iw_handler ipw_wx_handlers[] = {
9891	IW_IOCTL(SIOCGIWNAME) = ipw_wx_get_name,
9892	IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq,
9893	IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq,
9894	IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode,
9895	IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode,
9896	IW_IOCTL(SIOCSIWSENS) = ipw_wx_set_sens,
9897	IW_IOCTL(SIOCGIWSENS) = ipw_wx_get_sens,
9898	IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range,
9899	IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap,
9900	IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap,
9901	IW_IOCTL(SIOCSIWSCAN) = ipw_wx_set_scan,
9902	IW_IOCTL(SIOCGIWSCAN) = ipw_wx_get_scan,
9903	IW_IOCTL(SIOCSIWESSID) = ipw_wx_set_essid,
9904	IW_IOCTL(SIOCGIWESSID) = ipw_wx_get_essid,
9905	IW_IOCTL(SIOCSIWNICKN) = ipw_wx_set_nick,
9906	IW_IOCTL(SIOCGIWNICKN) = ipw_wx_get_nick,
9907	IW_IOCTL(SIOCSIWRATE) = ipw_wx_set_rate,
9908	IW_IOCTL(SIOCGIWRATE) = ipw_wx_get_rate,
9909	IW_IOCTL(SIOCSIWRTS) = ipw_wx_set_rts,
9910	IW_IOCTL(SIOCGIWRTS) = ipw_wx_get_rts,
9911	IW_IOCTL(SIOCSIWFRAG) = ipw_wx_set_frag,
9912	IW_IOCTL(SIOCGIWFRAG) = ipw_wx_get_frag,
9913	IW_IOCTL(SIOCSIWTXPOW) = ipw_wx_set_txpow,
9914	IW_IOCTL(SIOCGIWTXPOW) = ipw_wx_get_txpow,
9915	IW_IOCTL(SIOCSIWRETRY) = ipw_wx_set_retry,
9916	IW_IOCTL(SIOCGIWRETRY) = ipw_wx_get_retry,
9917	IW_IOCTL(SIOCSIWENCODE) = ipw_wx_set_encode,
9918	IW_IOCTL(SIOCGIWENCODE) = ipw_wx_get_encode,
9919	IW_IOCTL(SIOCSIWPOWER) = ipw_wx_set_power,
9920	IW_IOCTL(SIOCGIWPOWER) = ipw_wx_get_power,
9921	IW_IOCTL(SIOCSIWSPY) = iw_handler_set_spy,
9922	IW_IOCTL(SIOCGIWSPY) = iw_handler_get_spy,
9923	IW_IOCTL(SIOCSIWTHRSPY) = iw_handler_set_thrspy,
9924	IW_IOCTL(SIOCGIWTHRSPY) = iw_handler_get_thrspy,
9925	IW_IOCTL(SIOCSIWGENIE) = ipw_wx_set_genie,
9926	IW_IOCTL(SIOCGIWGENIE) = ipw_wx_get_genie,
9927	IW_IOCTL(SIOCSIWMLME) = ipw_wx_set_mlme,
9928	IW_IOCTL(SIOCSIWAUTH) = ipw_wx_set_auth,
9929	IW_IOCTL(SIOCGIWAUTH) = ipw_wx_get_auth,
9930	IW_IOCTL(SIOCSIWENCODEEXT) = ipw_wx_set_encodeext,
9931	IW_IOCTL(SIOCGIWENCODEEXT) = ipw_wx_get_encodeext,
9932};
9933
9934enum {
9935	IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV,
9936	IPW_PRIV_GET_POWER,
9937	IPW_PRIV_SET_MODE,
9938	IPW_PRIV_GET_MODE,
9939	IPW_PRIV_SET_PREAMBLE,
9940	IPW_PRIV_GET_PREAMBLE,
9941	IPW_PRIV_RESET,
9942	IPW_PRIV_SW_RESET,
9943#ifdef CONFIG_IPW2200_MONITOR
9944	IPW_PRIV_SET_MONITOR,
9945#endif
9946};
9947
9948static struct iw_priv_args ipw_priv_args[] = {
9949	{
9950	 .cmd = IPW_PRIV_SET_POWER,
9951	 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9952	 .name = "set_power"},
9953	{
9954	 .cmd = IPW_PRIV_GET_POWER,
9955	 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9956	 .name = "get_power"},
9957	{
9958	 .cmd = IPW_PRIV_SET_MODE,
9959	 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9960	 .name = "set_mode"},
9961	{
9962	 .cmd = IPW_PRIV_GET_MODE,
9963	 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9964	 .name = "get_mode"},
9965	{
9966	 .cmd = IPW_PRIV_SET_PREAMBLE,
9967	 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9968	 .name = "set_preamble"},
9969	{
9970	 .cmd = IPW_PRIV_GET_PREAMBLE,
9971	 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ,
9972	 .name = "get_preamble"},
9973	{
9974	 IPW_PRIV_RESET,
9975	 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
9976	{
9977	 IPW_PRIV_SW_RESET,
9978	 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"},
9979#ifdef CONFIG_IPW2200_MONITOR
9980	{
9981	 IPW_PRIV_SET_MONITOR,
9982	 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
9983#endif				/* CONFIG_IPW2200_MONITOR */
9984};
9985
9986static iw_handler ipw_priv_handler[] = {
9987	ipw_wx_set_powermode,
9988	ipw_wx_get_powermode,
9989	ipw_wx_set_wireless_mode,
9990	ipw_wx_get_wireless_mode,
9991	ipw_wx_set_preamble,
9992	ipw_wx_get_preamble,
9993	ipw_wx_reset,
9994	ipw_wx_sw_reset,
9995#ifdef CONFIG_IPW2200_MONITOR
9996	ipw_wx_set_monitor,
9997#endif
9998};
9999
10000static struct iw_handler_def ipw_wx_handler_def = {
10001	.standard = ipw_wx_handlers,
10002	.num_standard = ARRAY_SIZE(ipw_wx_handlers),
10003	.num_private = ARRAY_SIZE(ipw_priv_handler),
10004	.num_private_args = ARRAY_SIZE(ipw_priv_args),
10005	.private = ipw_priv_handler,
10006	.private_args = ipw_priv_args,
10007	.get_wireless_stats = ipw_get_wireless_stats,
10008};
10009
10010/*
10011 * Get wireless statistics.
10012 * Called by /proc/net/wireless
10013 * Also called by SIOCGIWSTATS
10014 */
10015static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
10016{
10017	struct ipw_priv *priv = ieee80211_priv(dev);
10018	struct iw_statistics *wstats;
10019
10020	wstats = &priv->wstats;
10021
10022	/* if hw is disabled, then ipw_get_ordinal() can't be called.
10023	 * netdev->get_wireless_stats seems to be called before fw is
10024	 * initialized.  STATUS_ASSOCIATED will only be set if the hw is up
10025	 * and associated; if not associcated, the values are all meaningless
10026	 * anyway, so set them all to NULL and INVALID */
10027	if (!(priv->status & STATUS_ASSOCIATED)) {
10028		wstats->miss.beacon = 0;
10029		wstats->discard.retries = 0;
10030		wstats->qual.qual = 0;
10031		wstats->qual.level = 0;
10032		wstats->qual.noise = 0;
10033		wstats->qual.updated = 7;
10034		wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
10035		    IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
10036		return wstats;
10037	}
10038
10039	wstats->qual.qual = priv->quality;
10040	wstats->qual.level = priv->exp_avg_rssi;
10041	wstats->qual.noise = priv->exp_avg_noise;
10042	wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
10043	    IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM;
10044
10045	wstats->miss.beacon = average_value(&priv->average_missed_beacons);
10046	wstats->discard.retries = priv->last_tx_failures;
10047	wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
10048
10049/*	if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
10050	goto fail_get_ordinal;
10051	wstats->discard.retries += tx_retry; */
10052
10053	return wstats;
10054}
10055
10056/* net device stuff */
10057
10058static  void init_sys_config(struct ipw_sys_config *sys_config)
10059{
10060	memset(sys_config, 0, sizeof(struct ipw_sys_config));
10061	sys_config->bt_coexistence = 0;
10062	sys_config->answer_broadcast_ssid_probe = 0;
10063	sys_config->accept_all_data_frames = 0;
10064	sys_config->accept_non_directed_frames = 1;
10065	sys_config->exclude_unicast_unencrypted = 0;
10066	sys_config->disable_unicast_decryption = 1;
10067	sys_config->exclude_multicast_unencrypted = 0;
10068	sys_config->disable_multicast_decryption = 1;
10069	if (antenna < CFG_SYS_ANTENNA_BOTH || antenna > CFG_SYS_ANTENNA_B)
10070		antenna = CFG_SYS_ANTENNA_BOTH;
10071	sys_config->antenna_diversity = antenna;
10072	sys_config->pass_crc_to_host = 0;	/* TODO: See if 1 gives us FCS */
10073	sys_config->dot11g_auto_detection = 0;
10074	sys_config->enable_cts_to_self = 0;
10075	sys_config->bt_coexist_collision_thr = 0;
10076	sys_config->pass_noise_stats_to_host = 1;	/* 1 -- fix for 256 */
10077	sys_config->silence_threshold = 0x1e;
10078}
10079
10080static int ipw_net_open(struct net_device *dev)
10081{
10082	struct ipw_priv *priv = ieee80211_priv(dev);
10083	IPW_DEBUG_INFO("dev->open\n");
10084	/* we should be verifying the device is ready to be opened */
10085	mutex_lock(&priv->mutex);
10086	if (!(priv->status & STATUS_RF_KILL_MASK) &&
10087	    (priv->status & STATUS_ASSOCIATED))
10088		netif_start_queue(dev);
10089	mutex_unlock(&priv->mutex);
10090	return 0;
10091}
10092
10093static int ipw_net_stop(struct net_device *dev)
10094{
10095	IPW_DEBUG_INFO("dev->close\n");
10096	netif_stop_queue(dev);
10097	return 0;
10098}
10099
10100/*
10101todo:
10102
10103modify to send one tfd per fragment instead of using chunking.  otherwise
10104we need to heavily modify the ieee80211_skb_to_txb.
10105*/
10106
10107static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
10108			     int pri)
10109{
10110	struct ieee80211_hdr_3addrqos *hdr = (struct ieee80211_hdr_3addrqos *)
10111	    txb->fragments[0]->data;
10112	int i = 0;
10113	struct tfd_frame *tfd;
10114#ifdef CONFIG_IPW2200_QOS
10115	int tx_id = ipw_get_tx_queue_number(priv, pri);
10116	struct clx2_tx_queue *txq = &priv->txq[tx_id];
10117#else
10118	struct clx2_tx_queue *txq = &priv->txq[0];
10119#endif
10120	struct clx2_queue *q = &txq->q;
10121	u8 id, hdr_len, unicast;
10122	u16 remaining_bytes;
10123	int fc;
10124
10125	hdr_len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10126	switch (priv->ieee->iw_mode) {
10127	case IW_MODE_ADHOC:
10128		unicast = !is_multicast_ether_addr(hdr->addr1);
10129		id = ipw_find_station(priv, hdr->addr1);
10130		if (id == IPW_INVALID_STATION) {
10131			id = ipw_add_station(priv, hdr->addr1);
10132			if (id == IPW_INVALID_STATION) {
10133				IPW_WARNING("Attempt to send data to "
10134					    "invalid cell: " MAC_FMT "\n",
10135					    MAC_ARG(hdr->addr1));
10136				goto drop;
10137			}
10138		}
10139		break;
10140
10141	case IW_MODE_INFRA:
10142	default:
10143		unicast = !is_multicast_ether_addr(hdr->addr3);
10144		id = 0;
10145		break;
10146	}
10147
10148	tfd = &txq->bd[q->first_empty];
10149	txq->txb[q->first_empty] = txb;
10150	memset(tfd, 0, sizeof(*tfd));
10151	tfd->u.data.station_number = id;
10152
10153	tfd->control_flags.message_type = TX_FRAME_TYPE;
10154	tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
10155
10156	tfd->u.data.cmd_id = DINO_CMD_TX;
10157	tfd->u.data.len = cpu_to_le16(txb->payload_size);
10158	remaining_bytes = txb->payload_size;
10159
10160	if (priv->assoc_request.ieee_mode == IPW_B_MODE)
10161		tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK;
10162	else
10163		tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM;
10164
10165	if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE)
10166		tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE;
10167
10168	fc = le16_to_cpu(hdr->frame_ctl);
10169	hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS);
10170
10171	memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
10172
10173	if (likely(unicast))
10174		tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10175
10176	if (txb->encrypted && !priv->ieee->host_encrypt) {
10177		switch (priv->ieee->sec.level) {
10178		case SEC_LEVEL_3:
10179			tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10180			    cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10181			if (!unicast)
10182				tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10183
10184			tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10185			tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM;
10186			tfd->u.data.key_index = 0;
10187			tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE;
10188			break;
10189		case SEC_LEVEL_2:
10190			tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10191			    cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10192			tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10193			tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
10194			tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
10195			break;
10196		case SEC_LEVEL_1:
10197			tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10198			    cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10199			tfd->u.data.key_index = priv->ieee->tx_keyidx;
10200			if (priv->ieee->sec.key_sizes[priv->ieee->tx_keyidx] <=
10201			    40)
10202				tfd->u.data.key_index |= DCT_WEP_KEY_64Bit;
10203			else
10204				tfd->u.data.key_index |= DCT_WEP_KEY_128Bit;
10205			break;
10206		case SEC_LEVEL_0:
10207			break;
10208		default:
10209			printk(KERN_ERR "Unknow security level %d\n",
10210			       priv->ieee->sec.level);
10211			break;
10212		}
10213	} else
10214		/* No hardware encryption */
10215		tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP;
10216
10217#ifdef CONFIG_IPW2200_QOS
10218	if (fc & IEEE80211_STYPE_QOS_DATA)
10219		ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data));
10220#endif				/* CONFIG_IPW2200_QOS */
10221
10222	/* payload */
10223	tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
10224						 txb->nr_frags));
10225	IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n",
10226		       txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks));
10227	for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) {
10228		IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n",
10229			       i, le32_to_cpu(tfd->u.data.num_chunks),
10230			       txb->fragments[i]->len - hdr_len);
10231		IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
10232			     i, tfd->u.data.num_chunks,
10233			     txb->fragments[i]->len - hdr_len);
10234		printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
10235			   txb->fragments[i]->len - hdr_len);
10236
10237		tfd->u.data.chunk_ptr[i] =
10238		    cpu_to_le32(pci_map_single
10239				(priv->pci_dev,
10240				 txb->fragments[i]->data + hdr_len,
10241				 txb->fragments[i]->len - hdr_len,
10242				 PCI_DMA_TODEVICE));
10243		tfd->u.data.chunk_len[i] =
10244		    cpu_to_le16(txb->fragments[i]->len - hdr_len);
10245	}
10246
10247	if (i != txb->nr_frags) {
10248		struct sk_buff *skb;
10249		u16 remaining_bytes = 0;
10250		int j;
10251
10252		for (j = i; j < txb->nr_frags; j++)
10253			remaining_bytes += txb->fragments[j]->len - hdr_len;
10254
10255		printk(KERN_INFO "Trying to reallocate for %d bytes\n",
10256		       remaining_bytes);
10257		skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
10258		if (skb != NULL) {
10259			tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes);
10260			for (j = i; j < txb->nr_frags; j++) {
10261				int size = txb->fragments[j]->len - hdr_len;
10262
10263				printk(KERN_INFO "Adding frag %d %d...\n",
10264				       j, size);
10265				memcpy(skb_put(skb, size),
10266				       txb->fragments[j]->data + hdr_len, size);
10267			}
10268			dev_kfree_skb_any(txb->fragments[i]);
10269			txb->fragments[i] = skb;
10270			tfd->u.data.chunk_ptr[i] =
10271			    cpu_to_le32(pci_map_single
10272					(priv->pci_dev, skb->data,
10273					 tfd->u.data.chunk_len[i],
10274					 PCI_DMA_TODEVICE));
10275
10276			tfd->u.data.num_chunks =
10277			    cpu_to_le32(le32_to_cpu(tfd->u.data.num_chunks) +
10278					1);
10279		}
10280	}
10281
10282	/* kick DMA */
10283	q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
10284	ipw_write32(priv, q->reg_w, q->first_empty);
10285
10286	if (ipw_queue_space(q) < q->high_mark)
10287		netif_stop_queue(priv->net_dev);
10288
10289	return NETDEV_TX_OK;
10290
10291      drop:
10292	IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
10293	ieee80211_txb_free(txb);
10294	return NETDEV_TX_OK;
10295}
10296
10297static int ipw_net_is_queue_full(struct net_device *dev, int pri)
10298{
10299	struct ipw_priv *priv = ieee80211_priv(dev);
10300#ifdef CONFIG_IPW2200_QOS
10301	int tx_id = ipw_get_tx_queue_number(priv, pri);
10302	struct clx2_tx_queue *txq = &priv->txq[tx_id];
10303#else
10304	struct clx2_tx_queue *txq = &priv->txq[0];
10305#endif				/* CONFIG_IPW2200_QOS */
10306
10307	if (ipw_queue_space(&txq->q) < txq->q.high_mark)
10308		return 1;
10309
10310	return 0;
10311}
10312
10313#ifdef CONFIG_IPW2200_PROMISCUOUS
10314static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
10315				      struct ieee80211_txb *txb)
10316{
10317	struct ieee80211_rx_stats dummystats;
10318	struct ieee80211_hdr *hdr;
10319	u8 n;
10320	u16 filter = priv->prom_priv->filter;
10321	int hdr_only = 0;
10322
10323	if (filter & IPW_PROM_NO_TX)
10324		return;
10325
10326	memset(&dummystats, 0, sizeof(dummystats));
10327
10328	/* Filtering of fragment chains is done agains the first fragment */
10329	hdr = (void *)txb->fragments[0]->data;
10330	if (ieee80211_is_management(le16_to_cpu(hdr->frame_ctl))) {
10331		if (filter & IPW_PROM_NO_MGMT)
10332			return;
10333		if (filter & IPW_PROM_MGMT_HEADER_ONLY)
10334			hdr_only = 1;
10335	} else if (ieee80211_is_control(le16_to_cpu(hdr->frame_ctl))) {
10336		if (filter & IPW_PROM_NO_CTL)
10337			return;
10338		if (filter & IPW_PROM_CTL_HEADER_ONLY)
10339			hdr_only = 1;
10340	} else if (ieee80211_is_data(le16_to_cpu(hdr->frame_ctl))) {
10341		if (filter & IPW_PROM_NO_DATA)
10342			return;
10343		if (filter & IPW_PROM_DATA_HEADER_ONLY)
10344			hdr_only = 1;
10345	}
10346
10347	for(n=0; n<txb->nr_frags; ++n) {
10348		struct sk_buff *src = txb->fragments[n];
10349		struct sk_buff *dst;
10350		struct ieee80211_radiotap_header *rt_hdr;
10351		int len;
10352
10353		if (hdr_only) {
10354			hdr = (void *)src->data;
10355			len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10356		} else
10357			len = src->len;
10358
10359		dst = alloc_skb(
10360			len + IEEE80211_RADIOTAP_HDRLEN, GFP_ATOMIC);
10361		if (!dst) continue;
10362
10363		rt_hdr = (void *)skb_put(dst, sizeof(*rt_hdr));
10364
10365		rt_hdr->it_version = PKTHDR_RADIOTAP_VERSION;
10366		rt_hdr->it_pad = 0;
10367		rt_hdr->it_present = 0; /* after all, it's just an idea */
10368		rt_hdr->it_present |=  (1 << IEEE80211_RADIOTAP_CHANNEL);
10369
10370		*(u16*)skb_put(dst, sizeof(u16)) = cpu_to_le16(
10371			ieee80211chan2mhz(priv->channel));
10372		if (priv->channel > 14) 	/* 802.11a */
10373			*(u16*)skb_put(dst, sizeof(u16)) =
10374				cpu_to_le16(IEEE80211_CHAN_OFDM |
10375					     IEEE80211_CHAN_5GHZ);
10376		else if (priv->ieee->mode == IEEE_B) /* 802.11b */
10377			*(u16*)skb_put(dst, sizeof(u16)) =
10378				cpu_to_le16(IEEE80211_CHAN_CCK |
10379					     IEEE80211_CHAN_2GHZ);
10380		else 		/* 802.11g */
10381			*(u16*)skb_put(dst, sizeof(u16)) =
10382				cpu_to_le16(IEEE80211_CHAN_OFDM |
10383				 IEEE80211_CHAN_2GHZ);
10384
10385		rt_hdr->it_len = dst->len;
10386
10387		skb_copy_from_linear_data(src, skb_put(dst, len), len);
10388
10389		if (!ieee80211_rx(priv->prom_priv->ieee, dst, &dummystats))
10390			dev_kfree_skb_any(dst);
10391	}
10392}
10393#endif
10394
10395static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb,
10396				   struct net_device *dev, int pri)
10397{
10398	struct ipw_priv *priv = ieee80211_priv(dev);
10399	unsigned long flags;
10400	int ret;
10401
10402	IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
10403	spin_lock_irqsave(&priv->lock, flags);
10404
10405	if (!(priv->status & STATUS_ASSOCIATED)) {
10406		IPW_DEBUG_INFO("Tx attempt while not associated.\n");
10407		priv->ieee->stats.tx_carrier_errors++;
10408		netif_stop_queue(dev);
10409		goto fail_unlock;
10410	}
10411
10412#ifdef CONFIG_IPW2200_PROMISCUOUS
10413	if (rtap_iface && netif_running(priv->prom_net_dev))
10414		ipw_handle_promiscuous_tx(priv, txb);
10415#endif
10416
10417	ret = ipw_tx_skb(priv, txb, pri);
10418	if (ret == NETDEV_TX_OK)
10419		__ipw_led_activity_on(priv);
10420	spin_unlock_irqrestore(&priv->lock, flags);
10421
10422	return ret;
10423
10424      fail_unlock:
10425	spin_unlock_irqrestore(&priv->lock, flags);
10426	return 1;
10427}
10428
10429static struct net_device_stats *ipw_net_get_stats(struct net_device *dev)
10430{
10431	struct ipw_priv *priv = ieee80211_priv(dev);
10432
10433	priv->ieee->stats.tx_packets = priv->tx_packets;
10434	priv->ieee->stats.rx_packets = priv->rx_packets;
10435	return &priv->ieee->stats;
10436}
10437
10438static void ipw_net_set_multicast_list(struct net_device *dev)
10439{
10440
10441}
10442
10443static int ipw_net_set_mac_address(struct net_device *dev, void *p)
10444{
10445	struct ipw_priv *priv = ieee80211_priv(dev);
10446	struct sockaddr *addr = p;
10447	if (!is_valid_ether_addr(addr->sa_data))
10448		return -EADDRNOTAVAIL;
10449	mutex_lock(&priv->mutex);
10450	priv->config |= CFG_CUSTOM_MAC;
10451	memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
10452	printk(KERN_INFO "%s: Setting MAC to " MAC_FMT "\n",
10453	       priv->net_dev->name, MAC_ARG(priv->mac_addr));
10454	queue_work(priv->workqueue, &priv->adapter_restart);
10455	mutex_unlock(&priv->mutex);
10456	return 0;
10457}
10458
10459static void ipw_ethtool_get_drvinfo(struct net_device *dev,
10460				    struct ethtool_drvinfo *info)
10461{
10462	struct ipw_priv *p = ieee80211_priv(dev);
10463	char vers[64];
10464	char date[32];
10465	u32 len;
10466
10467	strcpy(info->driver, DRV_NAME);
10468	strcpy(info->version, DRV_VERSION);
10469
10470	len = sizeof(vers);
10471	ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
10472	len = sizeof(date);
10473	ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
10474
10475	snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
10476		 vers, date);
10477	strcpy(info->bus_info, pci_name(p->pci_dev));
10478	info->eedump_len = IPW_EEPROM_IMAGE_SIZE;
10479}
10480
10481static u32 ipw_ethtool_get_link(struct net_device *dev)
10482{
10483	struct ipw_priv *priv = ieee80211_priv(dev);
10484	return (priv->status & STATUS_ASSOCIATED) != 0;
10485}
10486
10487static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
10488{
10489	return IPW_EEPROM_IMAGE_SIZE;
10490}
10491
10492static int ipw_ethtool_get_eeprom(struct net_device *dev,
10493				  struct ethtool_eeprom *eeprom, u8 * bytes)
10494{
10495	struct ipw_priv *p = ieee80211_priv(dev);
10496
10497	if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10498		return -EINVAL;
10499	mutex_lock(&p->mutex);
10500	memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len);
10501	mutex_unlock(&p->mutex);
10502	return 0;
10503}
10504
10505static int ipw_ethtool_set_eeprom(struct net_device *dev,
10506				  struct ethtool_eeprom *eeprom, u8 * bytes)
10507{
10508	struct ipw_priv *p = ieee80211_priv(dev);
10509	int i;
10510
10511	if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10512		return -EINVAL;
10513	mutex_lock(&p->mutex);
10514	memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len);
10515	for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
10516		ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]);
10517	mutex_unlock(&p->mutex);
10518	return 0;
10519}
10520
10521static const struct ethtool_ops ipw_ethtool_ops = {
10522	.get_link = ipw_ethtool_get_link,
10523	.get_drvinfo = ipw_ethtool_get_drvinfo,
10524	.get_eeprom_len = ipw_ethtool_get_eeprom_len,
10525	.get_eeprom = ipw_ethtool_get_eeprom,
10526	.set_eeprom = ipw_ethtool_set_eeprom,
10527};
10528
10529static irqreturn_t ipw_isr(int irq, void *data)
10530{
10531	struct ipw_priv *priv = data;
10532	u32 inta, inta_mask;
10533
10534	if (!priv)
10535		return IRQ_NONE;
10536
10537	spin_lock(&priv->irq_lock);
10538
10539	if (!(priv->status & STATUS_INT_ENABLED)) {
10540		/* Shared IRQ */
10541		goto none;
10542	}
10543
10544	inta = ipw_read32(priv, IPW_INTA_RW);
10545	inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
10546
10547	if (inta == 0xFFFFFFFF) {
10548		/* Hardware disappeared */
10549		IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
10550		goto none;
10551	}
10552
10553	if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) {
10554		/* Shared interrupt */
10555		goto none;
10556	}
10557
10558	/* tell the device to stop sending interrupts */
10559	__ipw_disable_interrupts(priv);
10560
10561	/* ack current interrupts */
10562	inta &= (IPW_INTA_MASK_ALL & inta_mask);
10563	ipw_write32(priv, IPW_INTA_RW, inta);
10564
10565	/* Cache INTA value for our tasklet */
10566	priv->isr_inta = inta;
10567
10568	tasklet_schedule(&priv->irq_tasklet);
10569
10570	spin_unlock(&priv->irq_lock);
10571
10572	return IRQ_HANDLED;
10573      none:
10574	spin_unlock(&priv->irq_lock);
10575	return IRQ_NONE;
10576}
10577
10578static void ipw_rf_kill(void *adapter)
10579{
10580	struct ipw_priv *priv = adapter;
10581	unsigned long flags;
10582
10583	spin_lock_irqsave(&priv->lock, flags);
10584
10585	if (rf_kill_active(priv)) {
10586		IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
10587		if (priv->workqueue)
10588			queue_delayed_work(priv->workqueue,
10589					   &priv->rf_kill, 2 * HZ);
10590		goto exit_unlock;
10591	}
10592
10593	/* RF Kill is now disabled, so bring the device back up */
10594
10595	if (!(priv->status & STATUS_RF_KILL_MASK)) {
10596		IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
10597				  "device\n");
10598
10599		/* we can not do an adapter restart while inside an irq lock */
10600		queue_work(priv->workqueue, &priv->adapter_restart);
10601	} else
10602		IPW_DEBUG_RF_KILL("HW RF Kill deactivated.  SW RF Kill still "
10603				  "enabled\n");
10604
10605      exit_unlock:
10606	spin_unlock_irqrestore(&priv->lock, flags);
10607}
10608
10609static void ipw_bg_rf_kill(struct work_struct *work)
10610{
10611	struct ipw_priv *priv =
10612		container_of(work, struct ipw_priv, rf_kill.work);
10613	mutex_lock(&priv->mutex);
10614	ipw_rf_kill(priv);
10615	mutex_unlock(&priv->mutex);
10616}
10617
10618static void ipw_link_up(struct ipw_priv *priv)
10619{
10620	priv->last_seq_num = -1;
10621	priv->last_frag_num = -1;
10622	priv->last_packet_time = 0;
10623
10624	netif_carrier_on(priv->net_dev);
10625	if (netif_queue_stopped(priv->net_dev)) {
10626		IPW_DEBUG_NOTIF("waking queue\n");
10627		netif_wake_queue(priv->net_dev);
10628	} else {
10629		IPW_DEBUG_NOTIF("starting queue\n");
10630		netif_start_queue(priv->net_dev);
10631	}
10632
10633	cancel_delayed_work(&priv->request_scan);
10634	ipw_reset_stats(priv);
10635	/* Ensure the rate is updated immediately */
10636	priv->last_rate = ipw_get_current_rate(priv);
10637	ipw_gather_stats(priv);
10638	ipw_led_link_up(priv);
10639	notify_wx_assoc_event(priv);
10640
10641	if (priv->config & CFG_BACKGROUND_SCAN)
10642		queue_delayed_work(priv->workqueue, &priv->request_scan, HZ);
10643}
10644
10645static void ipw_bg_link_up(struct work_struct *work)
10646{
10647	struct ipw_priv *priv =
10648		container_of(work, struct ipw_priv, link_up);
10649	mutex_lock(&priv->mutex);
10650	ipw_link_up(priv);
10651	mutex_unlock(&priv->mutex);
10652}
10653
10654static void ipw_link_down(struct ipw_priv *priv)
10655{
10656	ipw_led_link_down(priv);
10657	netif_carrier_off(priv->net_dev);
10658	netif_stop_queue(priv->net_dev);
10659	notify_wx_assoc_event(priv);
10660
10661	/* Cancel any queued work ... */
10662	cancel_delayed_work(&priv->request_scan);
10663	cancel_delayed_work(&priv->adhoc_check);
10664	cancel_delayed_work(&priv->gather_stats);
10665
10666	ipw_reset_stats(priv);
10667
10668	if (!(priv->status & STATUS_EXIT_PENDING)) {
10669		/* Queue up another scan... */
10670		queue_delayed_work(priv->workqueue, &priv->request_scan, 0);
10671	}
10672}
10673
10674static void ipw_bg_link_down(struct work_struct *work)
10675{
10676	struct ipw_priv *priv =
10677		container_of(work, struct ipw_priv, link_down);
10678	mutex_lock(&priv->mutex);
10679	ipw_link_down(priv);
10680	mutex_unlock(&priv->mutex);
10681}
10682
10683static int ipw_setup_deferred_work(struct ipw_priv *priv)
10684{
10685	int ret = 0;
10686
10687	priv->workqueue = create_workqueue(DRV_NAME);
10688	init_waitqueue_head(&priv->wait_command_queue);
10689	init_waitqueue_head(&priv->wait_state);
10690
10691	INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check);
10692	INIT_WORK(&priv->associate, ipw_bg_associate);
10693	INIT_WORK(&priv->disassociate, ipw_bg_disassociate);
10694	INIT_WORK(&priv->system_config, ipw_system_config);
10695	INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish);
10696	INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart);
10697	INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill);
10698	INIT_WORK(&priv->up, ipw_bg_up);
10699	INIT_WORK(&priv->down, ipw_bg_down);
10700	INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan);
10701	INIT_WORK(&priv->request_passive_scan, ipw_request_passive_scan);
10702	INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats);
10703	INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan);
10704	INIT_WORK(&priv->roam, ipw_bg_roam);
10705	INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check);
10706	INIT_WORK(&priv->link_up, ipw_bg_link_up);
10707	INIT_WORK(&priv->link_down, ipw_bg_link_down);
10708	INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on);
10709	INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off);
10710	INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off);
10711	INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network);
10712
10713#ifdef CONFIG_IPW2200_QOS
10714	INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
10715#endif				/* CONFIG_IPW2200_QOS */
10716
10717	tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
10718		     ipw_irq_tasklet, (unsigned long)priv);
10719
10720	return ret;
10721}
10722
10723static void shim__set_security(struct net_device *dev,
10724			       struct ieee80211_security *sec)
10725{
10726	struct ipw_priv *priv = ieee80211_priv(dev);
10727	int i;
10728	for (i = 0; i < 4; i++) {
10729		if (sec->flags & (1 << i)) {
10730			priv->ieee->sec.encode_alg[i] = sec->encode_alg[i];
10731			priv->ieee->sec.key_sizes[i] = sec->key_sizes[i];
10732			if (sec->key_sizes[i] == 0)
10733				priv->ieee->sec.flags &= ~(1 << i);
10734			else {
10735				memcpy(priv->ieee->sec.keys[i], sec->keys[i],
10736				       sec->key_sizes[i]);
10737				priv->ieee->sec.flags |= (1 << i);
10738			}
10739			priv->status |= STATUS_SECURITY_UPDATED;
10740		} else if (sec->level != SEC_LEVEL_1)
10741			priv->ieee->sec.flags &= ~(1 << i);
10742	}
10743
10744	if (sec->flags & SEC_ACTIVE_KEY) {
10745		if (sec->active_key <= 3) {
10746			priv->ieee->sec.active_key = sec->active_key;
10747			priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
10748		} else
10749			priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10750		priv->status |= STATUS_SECURITY_UPDATED;
10751	} else
10752		priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10753
10754	if ((sec->flags & SEC_AUTH_MODE) &&
10755	    (priv->ieee->sec.auth_mode != sec->auth_mode)) {
10756		priv->ieee->sec.auth_mode = sec->auth_mode;
10757		priv->ieee->sec.flags |= SEC_AUTH_MODE;
10758		if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
10759			priv->capability |= CAP_SHARED_KEY;
10760		else
10761			priv->capability &= ~CAP_SHARED_KEY;
10762		priv->status |= STATUS_SECURITY_UPDATED;
10763	}
10764
10765	if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) {
10766		priv->ieee->sec.flags |= SEC_ENABLED;
10767		priv->ieee->sec.enabled = sec->enabled;
10768		priv->status |= STATUS_SECURITY_UPDATED;
10769		if (sec->enabled)
10770			priv->capability |= CAP_PRIVACY_ON;
10771		else
10772			priv->capability &= ~CAP_PRIVACY_ON;
10773	}
10774
10775	if (sec->flags & SEC_ENCRYPT)
10776		priv->ieee->sec.encrypt = sec->encrypt;
10777
10778	if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) {
10779		priv->ieee->sec.level = sec->level;
10780		priv->ieee->sec.flags |= SEC_LEVEL;
10781		priv->status |= STATUS_SECURITY_UPDATED;
10782	}
10783
10784	if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT))
10785		ipw_set_hwcrypto_keys(priv);
10786
10787	/* To match current functionality of ipw2100 (which works well w/
10788	 * various supplicants, we don't force a disassociate if the
10789	 * privacy capability changes ... */
10790}
10791
10792static int init_supported_rates(struct ipw_priv *priv,
10793				struct ipw_supported_rates *rates)
10794{
10795	/* TODO: Mask out rates based on priv->rates_mask */
10796
10797	memset(rates, 0, sizeof(*rates));
10798	/* configure supported rates */
10799	switch (priv->ieee->freq_band) {
10800	case IEEE80211_52GHZ_BAND:
10801		rates->ieee_mode = IPW_A_MODE;
10802		rates->purpose = IPW_RATE_CAPABILITIES;
10803		ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
10804					IEEE80211_OFDM_DEFAULT_RATES_MASK);
10805		break;
10806
10807	default:		/* Mixed or 2.4Ghz */
10808		rates->ieee_mode = IPW_G_MODE;
10809		rates->purpose = IPW_RATE_CAPABILITIES;
10810		ipw_add_cck_scan_rates(rates, IEEE80211_CCK_MODULATION,
10811				       IEEE80211_CCK_DEFAULT_RATES_MASK);
10812		if (priv->ieee->modulation & IEEE80211_OFDM_MODULATION) {
10813			ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
10814						IEEE80211_OFDM_DEFAULT_RATES_MASK);
10815		}
10816		break;
10817	}
10818
10819	return 0;
10820}
10821
10822static int ipw_config(struct ipw_priv *priv)
10823{
10824	/* This is only called from ipw_up, which resets/reloads the firmware
10825	   so, we don't need to first disable the card before we configure
10826	   it */
10827	if (ipw_set_tx_power(priv))
10828		goto error;
10829
10830	/* initialize adapter address */
10831	if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
10832		goto error;
10833
10834	/* set basic system config settings */
10835	init_sys_config(&priv->sys_config);
10836
10837	/* Support Bluetooth if we have BT h/w on board, and user wants to.
10838	 * Does not support BT priority yet (don't abort or defer our Tx) */
10839	if (bt_coexist) {
10840		unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY];
10841
10842		if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG)
10843			priv->sys_config.bt_coexistence
10844			    |= CFG_BT_COEXISTENCE_SIGNAL_CHNL;
10845		if (bt_caps & EEPROM_SKU_CAP_BT_OOB)
10846			priv->sys_config.bt_coexistence
10847			    |= CFG_BT_COEXISTENCE_OOB;
10848	}
10849
10850#ifdef CONFIG_IPW2200_PROMISCUOUS
10851	if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
10852		priv->sys_config.accept_all_data_frames = 1;
10853		priv->sys_config.accept_non_directed_frames = 1;
10854		priv->sys_config.accept_all_mgmt_bcpr = 1;
10855		priv->sys_config.accept_all_mgmt_frames = 1;
10856	}
10857#endif
10858
10859	if (priv->ieee->iw_mode == IW_MODE_ADHOC)
10860		priv->sys_config.answer_broadcast_ssid_probe = 1;
10861	else
10862		priv->sys_config.answer_broadcast_ssid_probe = 0;
10863
10864	if (ipw_send_system_config(priv))
10865		goto error;
10866
10867	init_supported_rates(priv, &priv->rates);
10868	if (ipw_send_supported_rates(priv, &priv->rates))
10869		goto error;
10870
10871	/* Set request-to-send threshold */
10872	if (priv->rts_threshold) {
10873		if (ipw_send_rts_threshold(priv, priv->rts_threshold))
10874			goto error;
10875	}
10876#ifdef CONFIG_IPW2200_QOS
10877	IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n");
10878	ipw_qos_activate(priv, NULL);
10879#endif				/* CONFIG_IPW2200_QOS */
10880
10881	if (ipw_set_random_seed(priv))
10882		goto error;
10883
10884	/* final state transition to the RUN state */
10885	if (ipw_send_host_complete(priv))
10886		goto error;
10887
10888	priv->status |= STATUS_INIT;
10889
10890	ipw_led_init(priv);
10891	ipw_led_radio_on(priv);
10892	priv->notif_missed_beacons = 0;
10893
10894	/* Set hardware WEP key if it is configured. */
10895	if ((priv->capability & CAP_PRIVACY_ON) &&
10896	    (priv->ieee->sec.level == SEC_LEVEL_1) &&
10897	    !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
10898		ipw_set_hwcrypto_keys(priv);
10899
10900	return 0;
10901
10902      error:
10903	return -EIO;
10904}
10905
10906/*
10907 * NOTE:
10908 *
10909 * These tables have been tested in conjunction with the
10910 * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters.
10911 *
10912 * Altering this values, using it on other hardware, or in geographies
10913 * not intended for resale of the above mentioned Intel adapters has
10914 * not been tested.
10915 *
10916 * Remember to update the table in README.ipw2200 when changing this
10917 * table.
10918 *
10919 */
10920static const struct ieee80211_geo ipw_geos[] = {
10921	{			/* Restricted */
10922	 "---",
10923	 .bg_channels = 11,
10924	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10925		{2427, 4}, {2432, 5}, {2437, 6},
10926		{2442, 7}, {2447, 8}, {2452, 9},
10927		{2457, 10}, {2462, 11}},
10928	 },
10929
10930	{			/* Custom US/Canada */
10931	 "ZZF",
10932	 .bg_channels = 11,
10933	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10934		{2427, 4}, {2432, 5}, {2437, 6},
10935		{2442, 7}, {2447, 8}, {2452, 9},
10936		{2457, 10}, {2462, 11}},
10937	 .a_channels = 8,
10938	 .a = {{5180, 36},
10939	       {5200, 40},
10940	       {5220, 44},
10941	       {5240, 48},
10942	       {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10943	       {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10944	       {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10945	       {5320, 64, IEEE80211_CH_PASSIVE_ONLY}},
10946	 },
10947
10948	{			/* Rest of World */
10949	 "ZZD",
10950	 .bg_channels = 13,
10951	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10952		{2427, 4}, {2432, 5}, {2437, 6},
10953		{2442, 7}, {2447, 8}, {2452, 9},
10954		{2457, 10}, {2462, 11}, {2467, 12},
10955		{2472, 13}},
10956	 },
10957
10958	{			/* Custom USA & Europe & High */
10959	 "ZZA",
10960	 .bg_channels = 11,
10961	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10962		{2427, 4}, {2432, 5}, {2437, 6},
10963		{2442, 7}, {2447, 8}, {2452, 9},
10964		{2457, 10}, {2462, 11}},
10965	 .a_channels = 13,
10966	 .a = {{5180, 36},
10967	       {5200, 40},
10968	       {5220, 44},
10969	       {5240, 48},
10970	       {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10971	       {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10972	       {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10973	       {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10974	       {5745, 149},
10975	       {5765, 153},
10976	       {5785, 157},
10977	       {5805, 161},
10978	       {5825, 165}},
10979	 },
10980
10981	{			/* Custom NA & Europe */
10982	 "ZZB",
10983	 .bg_channels = 11,
10984	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10985		{2427, 4}, {2432, 5}, {2437, 6},
10986		{2442, 7}, {2447, 8}, {2452, 9},
10987		{2457, 10}, {2462, 11}},
10988	 .a_channels = 13,
10989	 .a = {{5180, 36},
10990	       {5200, 40},
10991	       {5220, 44},
10992	       {5240, 48},
10993	       {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10994	       {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10995	       {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10996	       {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
10997	       {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
10998	       {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
10999	       {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
11000	       {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
11001	       {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
11002	 },
11003
11004	{			/* Custom Japan */
11005	 "ZZC",
11006	 .bg_channels = 11,
11007	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11008		{2427, 4}, {2432, 5}, {2437, 6},
11009		{2442, 7}, {2447, 8}, {2452, 9},
11010		{2457, 10}, {2462, 11}},
11011	 .a_channels = 4,
11012	 .a = {{5170, 34}, {5190, 38},
11013	       {5210, 42}, {5230, 46}},
11014	 },
11015
11016	{			/* Custom */
11017	 "ZZM",
11018	 .bg_channels = 11,
11019	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11020		{2427, 4}, {2432, 5}, {2437, 6},
11021		{2442, 7}, {2447, 8}, {2452, 9},
11022		{2457, 10}, {2462, 11}},
11023	 },
11024
11025	{			/* Europe */
11026	 "ZZE",
11027	 .bg_channels = 13,
11028	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11029		{2427, 4}, {2432, 5}, {2437, 6},
11030		{2442, 7}, {2447, 8}, {2452, 9},
11031		{2457, 10}, {2462, 11}, {2467, 12},
11032		{2472, 13}},
11033	 .a_channels = 19,
11034	 .a = {{5180, 36},
11035	       {5200, 40},
11036	       {5220, 44},
11037	       {5240, 48},
11038	       {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11039	       {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11040	       {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11041	       {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11042	       {5500, 100, IEEE80211_CH_PASSIVE_ONLY},
11043	       {5520, 104, IEEE80211_CH_PASSIVE_ONLY},
11044	       {5540, 108, IEEE80211_CH_PASSIVE_ONLY},
11045	       {5560, 112, IEEE80211_CH_PASSIVE_ONLY},
11046	       {5580, 116, IEEE80211_CH_PASSIVE_ONLY},
11047	       {5600, 120, IEEE80211_CH_PASSIVE_ONLY},
11048	       {5620, 124, IEEE80211_CH_PASSIVE_ONLY},
11049	       {5640, 128, IEEE80211_CH_PASSIVE_ONLY},
11050	       {5660, 132, IEEE80211_CH_PASSIVE_ONLY},
11051	       {5680, 136, IEEE80211_CH_PASSIVE_ONLY},
11052	       {5700, 140, IEEE80211_CH_PASSIVE_ONLY}},
11053	 },
11054
11055	{			/* Custom Japan */
11056	 "ZZJ",
11057	 .bg_channels = 14,
11058	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11059		{2427, 4}, {2432, 5}, {2437, 6},
11060		{2442, 7}, {2447, 8}, {2452, 9},
11061		{2457, 10}, {2462, 11}, {2467, 12},
11062		{2472, 13}, {2484, 14, IEEE80211_CH_B_ONLY}},
11063	 .a_channels = 4,
11064	 .a = {{5170, 34}, {5190, 38},
11065	       {5210, 42}, {5230, 46}},
11066	 },
11067
11068	{			/* Rest of World */
11069	 "ZZR",
11070	 .bg_channels = 14,
11071	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11072		{2427, 4}, {2432, 5}, {2437, 6},
11073		{2442, 7}, {2447, 8}, {2452, 9},
11074		{2457, 10}, {2462, 11}, {2467, 12},
11075		{2472, 13}, {2484, 14, IEEE80211_CH_B_ONLY |
11076			     IEEE80211_CH_PASSIVE_ONLY}},
11077	 },
11078
11079	{			/* High Band */
11080	 "ZZH",
11081	 .bg_channels = 13,
11082	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11083		{2427, 4}, {2432, 5}, {2437, 6},
11084		{2442, 7}, {2447, 8}, {2452, 9},
11085		{2457, 10}, {2462, 11},
11086		{2467, 12, IEEE80211_CH_PASSIVE_ONLY},
11087		{2472, 13, IEEE80211_CH_PASSIVE_ONLY}},
11088	 .a_channels = 4,
11089	 .a = {{5745, 149}, {5765, 153},
11090	       {5785, 157}, {5805, 161}},
11091	 },
11092
11093	{			/* Custom Europe */
11094	 "ZZG",
11095	 .bg_channels = 13,
11096	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11097		{2427, 4}, {2432, 5}, {2437, 6},
11098		{2442, 7}, {2447, 8}, {2452, 9},
11099		{2457, 10}, {2462, 11},
11100		{2467, 12}, {2472, 13}},
11101	 .a_channels = 4,
11102	 .a = {{5180, 36}, {5200, 40},
11103	       {5220, 44}, {5240, 48}},
11104	 },
11105
11106	{			/* Europe */
11107	 "ZZK",
11108	 .bg_channels = 13,
11109	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11110		{2427, 4}, {2432, 5}, {2437, 6},
11111		{2442, 7}, {2447, 8}, {2452, 9},
11112		{2457, 10}, {2462, 11},
11113		{2467, 12, IEEE80211_CH_PASSIVE_ONLY},
11114		{2472, 13, IEEE80211_CH_PASSIVE_ONLY}},
11115	 .a_channels = 24,
11116	 .a = {{5180, 36, IEEE80211_CH_PASSIVE_ONLY},
11117	       {5200, 40, IEEE80211_CH_PASSIVE_ONLY},
11118	       {5220, 44, IEEE80211_CH_PASSIVE_ONLY},
11119	       {5240, 48, IEEE80211_CH_PASSIVE_ONLY},
11120	       {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11121	       {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11122	       {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11123	       {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11124	       {5500, 100, IEEE80211_CH_PASSIVE_ONLY},
11125	       {5520, 104, IEEE80211_CH_PASSIVE_ONLY},
11126	       {5540, 108, IEEE80211_CH_PASSIVE_ONLY},
11127	       {5560, 112, IEEE80211_CH_PASSIVE_ONLY},
11128	       {5580, 116, IEEE80211_CH_PASSIVE_ONLY},
11129	       {5600, 120, IEEE80211_CH_PASSIVE_ONLY},
11130	       {5620, 124, IEEE80211_CH_PASSIVE_ONLY},
11131	       {5640, 128, IEEE80211_CH_PASSIVE_ONLY},
11132	       {5660, 132, IEEE80211_CH_PASSIVE_ONLY},
11133	       {5680, 136, IEEE80211_CH_PASSIVE_ONLY},
11134	       {5700, 140, IEEE80211_CH_PASSIVE_ONLY},
11135	       {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
11136	       {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
11137	       {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
11138	       {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
11139	       {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
11140	 },
11141
11142	{			/* Europe */
11143	 "ZZL",
11144	 .bg_channels = 11,
11145	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11146		{2427, 4}, {2432, 5}, {2437, 6},
11147		{2442, 7}, {2447, 8}, {2452, 9},
11148		{2457, 10}, {2462, 11}},
11149	 .a_channels = 13,
11150	 .a = {{5180, 36, IEEE80211_CH_PASSIVE_ONLY},
11151	       {5200, 40, IEEE80211_CH_PASSIVE_ONLY},
11152	       {5220, 44, IEEE80211_CH_PASSIVE_ONLY},
11153	       {5240, 48, IEEE80211_CH_PASSIVE_ONLY},
11154	       {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11155	       {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11156	       {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11157	       {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11158	       {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
11159	       {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
11160	       {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
11161	       {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
11162	       {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
11163	 }
11164};
11165
11166#define MAX_HW_RESTARTS 5
11167static int ipw_up(struct ipw_priv *priv)
11168{
11169	int rc, i, j;
11170
11171	if (priv->status & STATUS_EXIT_PENDING)
11172		return -EIO;
11173
11174	if (cmdlog && !priv->cmdlog) {
11175		priv->cmdlog = kcalloc(cmdlog, sizeof(*priv->cmdlog),
11176				       GFP_KERNEL);
11177		if (priv->cmdlog == NULL) {
11178			IPW_ERROR("Error allocating %d command log entries.\n",
11179				  cmdlog);
11180			return -ENOMEM;
11181		} else {
11182			priv->cmdlog_len = cmdlog;
11183		}
11184	}
11185
11186	for (i = 0; i < MAX_HW_RESTARTS; i++) {
11187		/* Load the microcode, firmware, and eeprom.
11188		 * Also start the clocks. */
11189		rc = ipw_load(priv);
11190		if (rc) {
11191			IPW_ERROR("Unable to load firmware: %d\n", rc);
11192			return rc;
11193		}
11194
11195		ipw_init_ordinals(priv);
11196		if (!(priv->config & CFG_CUSTOM_MAC))
11197			eeprom_parse_mac(priv, priv->mac_addr);
11198		memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11199
11200		for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
11201			if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
11202				    ipw_geos[j].name, 3))
11203				break;
11204		}
11205		if (j == ARRAY_SIZE(ipw_geos)) {
11206			IPW_WARNING("SKU [%c%c%c] not recognized.\n",
11207				    priv->eeprom[EEPROM_COUNTRY_CODE + 0],
11208				    priv->eeprom[EEPROM_COUNTRY_CODE + 1],
11209				    priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
11210			j = 0;
11211		}
11212		if (ieee80211_set_geo(priv->ieee, &ipw_geos[j])) {
11213			IPW_WARNING("Could not set geography.");
11214			return 0;
11215		}
11216
11217		if (priv->status & STATUS_RF_KILL_SW) {
11218			IPW_WARNING("Radio disabled by module parameter.\n");
11219			return 0;
11220		} else if (rf_kill_active(priv)) {
11221			IPW_WARNING("Radio Frequency Kill Switch is On:\n"
11222				    "Kill switch must be turned off for "
11223				    "wireless networking to work.\n");
11224			queue_delayed_work(priv->workqueue, &priv->rf_kill,
11225					   2 * HZ);
11226			return 0;
11227		}
11228
11229		rc = ipw_config(priv);
11230		if (!rc) {
11231			IPW_DEBUG_INFO("Configured device on count %i\n", i);
11232
11233			/* If configure to try and auto-associate, kick
11234			 * off a scan. */
11235			queue_delayed_work(priv->workqueue,
11236					   &priv->request_scan, 0);
11237
11238			return 0;
11239		}
11240
11241		IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc);
11242		IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
11243			       i, MAX_HW_RESTARTS);
11244
11245		/* We had an error bringing up the hardware, so take it
11246		 * all the way back down so we can try again */
11247		ipw_down(priv);
11248	}
11249
11250	/* tried to restart and config the device for as long as our
11251	 * patience could withstand */
11252	IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
11253
11254	return -EIO;
11255}
11256
11257static void ipw_bg_up(struct work_struct *work)
11258{
11259	struct ipw_priv *priv =
11260		container_of(work, struct ipw_priv, up);
11261	mutex_lock(&priv->mutex);
11262	ipw_up(priv);
11263	mutex_unlock(&priv->mutex);
11264}
11265
11266static void ipw_deinit(struct ipw_priv *priv)
11267{
11268	int i;
11269
11270	if (priv->status & STATUS_SCANNING) {
11271		IPW_DEBUG_INFO("Aborting scan during shutdown.\n");
11272		ipw_abort_scan(priv);
11273	}
11274
11275	if (priv->status & STATUS_ASSOCIATED) {
11276		IPW_DEBUG_INFO("Disassociating during shutdown.\n");
11277		ipw_disassociate(priv);
11278	}
11279
11280	ipw_led_shutdown(priv);
11281
11282	/* Wait up to 1s for status to change to not scanning and not
11283	 * associated (disassociation can take a while for a ful 802.11
11284	 * exchange */
11285	for (i = 1000; i && (priv->status &
11286			     (STATUS_DISASSOCIATING |
11287			      STATUS_ASSOCIATED | STATUS_SCANNING)); i--)
11288		udelay(10);
11289
11290	if (priv->status & (STATUS_DISASSOCIATING |
11291			    STATUS_ASSOCIATED | STATUS_SCANNING))
11292		IPW_DEBUG_INFO("Still associated or scanning...\n");
11293	else
11294		IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i);
11295
11296	/* Attempt to disable the card */
11297	ipw_send_card_disable(priv, 0);
11298
11299	priv->status &= ~STATUS_INIT;
11300}
11301
11302static void ipw_down(struct ipw_priv *priv)
11303{
11304	int exit_pending = priv->status & STATUS_EXIT_PENDING;
11305
11306	priv->status |= STATUS_EXIT_PENDING;
11307
11308	if (ipw_is_init(priv))
11309		ipw_deinit(priv);
11310
11311	/* Wipe out the EXIT_PENDING status bit if we are not actually
11312	 * exiting the module */
11313	if (!exit_pending)
11314		priv->status &= ~STATUS_EXIT_PENDING;
11315
11316	/* tell the device to stop sending interrupts */
11317	ipw_disable_interrupts(priv);
11318
11319	/* Clear all bits but the RF Kill */
11320	priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING;
11321	netif_carrier_off(priv->net_dev);
11322	netif_stop_queue(priv->net_dev);
11323
11324	ipw_stop_nic(priv);
11325
11326	ipw_led_radio_off(priv);
11327}
11328
11329static void ipw_bg_down(struct work_struct *work)
11330{
11331	struct ipw_priv *priv =
11332		container_of(work, struct ipw_priv, down);
11333	mutex_lock(&priv->mutex);
11334	ipw_down(priv);
11335	mutex_unlock(&priv->mutex);
11336}
11337
11338/* Called by register_netdev() */
11339static int ipw_net_init(struct net_device *dev)
11340{
11341	struct ipw_priv *priv = ieee80211_priv(dev);
11342	mutex_lock(&priv->mutex);
11343
11344	if (ipw_up(priv)) {
11345		mutex_unlock(&priv->mutex);
11346		return -EIO;
11347	}
11348
11349	mutex_unlock(&priv->mutex);
11350	return 0;
11351}
11352
11353/* PCI driver stuff */
11354static struct pci_device_id card_ids[] = {
11355	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
11356	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
11357	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
11358	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
11359	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
11360	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
11361	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
11362	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
11363	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
11364	{PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
11365	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
11366	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
11367	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
11368	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
11369	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
11370	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
11371	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
11372	{PCI_VENDOR_ID_INTEL, 0x104f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
11373	{PCI_VENDOR_ID_INTEL, 0x4220, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},	/* BG */
11374	{PCI_VENDOR_ID_INTEL, 0x4221, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},	/* BG */
11375	{PCI_VENDOR_ID_INTEL, 0x4223, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},	/* ABG */
11376	{PCI_VENDOR_ID_INTEL, 0x4224, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},	/* ABG */
11377
11378	/* required last entry */
11379	{0,}
11380};
11381
11382MODULE_DEVICE_TABLE(pci, card_ids);
11383
11384static struct attribute *ipw_sysfs_entries[] = {
11385	&dev_attr_rf_kill.attr,
11386	&dev_attr_direct_dword.attr,
11387	&dev_attr_indirect_byte.attr,
11388	&dev_attr_indirect_dword.attr,
11389	&dev_attr_mem_gpio_reg.attr,
11390	&dev_attr_command_event_reg.attr,
11391	&dev_attr_nic_type.attr,
11392	&dev_attr_status.attr,
11393	&dev_attr_cfg.attr,
11394	&dev_attr_error.attr,
11395	&dev_attr_event_log.attr,
11396	&dev_attr_cmd_log.attr,
11397	&dev_attr_eeprom_delay.attr,
11398	&dev_attr_ucode_version.attr,
11399	&dev_attr_rtc.attr,
11400	&dev_attr_scan_age.attr,
11401	&dev_attr_led.attr,
11402	&dev_attr_speed_scan.attr,
11403	&dev_attr_net_stats.attr,
11404	&dev_attr_channels.attr,
11405#ifdef CONFIG_IPW2200_PROMISCUOUS
11406	&dev_attr_rtap_iface.attr,
11407	&dev_attr_rtap_filter.attr,
11408#endif
11409	NULL
11410};
11411
11412static struct attribute_group ipw_attribute_group = {
11413	.name = NULL,		/* put in device directory */
11414	.attrs = ipw_sysfs_entries,
11415};
11416
11417#ifdef CONFIG_IPW2200_PROMISCUOUS
11418static int ipw_prom_open(struct net_device *dev)
11419{
11420	struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
11421	struct ipw_priv *priv = prom_priv->priv;
11422
11423	IPW_DEBUG_INFO("prom dev->open\n");
11424	netif_carrier_off(dev);
11425	netif_stop_queue(dev);
11426
11427	if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11428		priv->sys_config.accept_all_data_frames = 1;
11429		priv->sys_config.accept_non_directed_frames = 1;
11430		priv->sys_config.accept_all_mgmt_bcpr = 1;
11431		priv->sys_config.accept_all_mgmt_frames = 1;
11432
11433		ipw_send_system_config(priv);
11434	}
11435
11436	return 0;
11437}
11438
11439static int ipw_prom_stop(struct net_device *dev)
11440{
11441	struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
11442	struct ipw_priv *priv = prom_priv->priv;
11443
11444	IPW_DEBUG_INFO("prom dev->stop\n");
11445
11446	if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11447		priv->sys_config.accept_all_data_frames = 0;
11448		priv->sys_config.accept_non_directed_frames = 0;
11449		priv->sys_config.accept_all_mgmt_bcpr = 0;
11450		priv->sys_config.accept_all_mgmt_frames = 0;
11451
11452		ipw_send_system_config(priv);
11453	}
11454
11455	return 0;
11456}
11457
11458static int ipw_prom_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
11459{
11460	IPW_DEBUG_INFO("prom dev->xmit\n");
11461	netif_stop_queue(dev);
11462	return -EOPNOTSUPP;
11463}
11464
11465static struct net_device_stats *ipw_prom_get_stats(struct net_device *dev)
11466{
11467	struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
11468	return &prom_priv->ieee->stats;
11469}
11470
11471static int ipw_prom_alloc(struct ipw_priv *priv)
11472{
11473	int rc = 0;
11474
11475	if (priv->prom_net_dev)
11476		return -EPERM;
11477
11478	priv->prom_net_dev = alloc_ieee80211(sizeof(struct ipw_prom_priv));
11479	if (priv->prom_net_dev == NULL)
11480		return -ENOMEM;
11481
11482	priv->prom_priv = ieee80211_priv(priv->prom_net_dev);
11483	priv->prom_priv->ieee = netdev_priv(priv->prom_net_dev);
11484	priv->prom_priv->priv = priv;
11485
11486	strcpy(priv->prom_net_dev->name, "rtap%d");
11487
11488	priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
11489	priv->prom_net_dev->open = ipw_prom_open;
11490	priv->prom_net_dev->stop = ipw_prom_stop;
11491	priv->prom_net_dev->get_stats = ipw_prom_get_stats;
11492	priv->prom_net_dev->hard_start_xmit = ipw_prom_hard_start_xmit;
11493
11494	priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR;
11495
11496	rc = register_netdev(priv->prom_net_dev);
11497	if (rc) {
11498		free_ieee80211(priv->prom_net_dev);
11499		priv->prom_net_dev = NULL;
11500		return rc;
11501	}
11502
11503	return 0;
11504}
11505
11506static void ipw_prom_free(struct ipw_priv *priv)
11507{
11508	if (!priv->prom_net_dev)
11509		return;
11510
11511	unregister_netdev(priv->prom_net_dev);
11512	free_ieee80211(priv->prom_net_dev);
11513
11514	priv->prom_net_dev = NULL;
11515}
11516
11517#endif
11518
11519
11520static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
11521{
11522	int err = 0;
11523	struct net_device *net_dev;
11524	void __iomem *base;
11525	u32 length, val;
11526	struct ipw_priv *priv;
11527	int i;
11528
11529	net_dev = alloc_ieee80211(sizeof(struct ipw_priv));
11530	if (net_dev == NULL) {
11531		err = -ENOMEM;
11532		goto out;
11533	}
11534
11535	priv = ieee80211_priv(net_dev);
11536	priv->ieee = netdev_priv(net_dev);
11537
11538	priv->net_dev = net_dev;
11539	priv->pci_dev = pdev;
11540	ipw_debug_level = debug;
11541	spin_lock_init(&priv->irq_lock);
11542	spin_lock_init(&priv->lock);
11543	for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
11544		INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
11545
11546	mutex_init(&priv->mutex);
11547	if (pci_enable_device(pdev)) {
11548		err = -ENODEV;
11549		goto out_free_ieee80211;
11550	}
11551
11552	pci_set_master(pdev);
11553
11554	err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11555	if (!err)
11556		err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
11557	if (err) {
11558		printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
11559		goto out_pci_disable_device;
11560	}
11561
11562	pci_set_drvdata(pdev, priv);
11563
11564	err = pci_request_regions(pdev, DRV_NAME);
11565	if (err)
11566		goto out_pci_disable_device;
11567
11568	/* We disable the RETRY_TIMEOUT register (0x41) to keep
11569	 * PCI Tx retries from interfering with C3 CPU state */
11570	pci_read_config_dword(pdev, 0x40, &val);
11571	if ((val & 0x0000ff00) != 0)
11572		pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11573
11574	length = pci_resource_len(pdev, 0);
11575	priv->hw_len = length;
11576
11577	base = ioremap_nocache(pci_resource_start(pdev, 0), length);
11578	if (!base) {
11579		err = -ENODEV;
11580		goto out_pci_release_regions;
11581	}
11582
11583	priv->hw_base = base;
11584	IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
11585	IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
11586
11587	err = ipw_setup_deferred_work(priv);
11588	if (err) {
11589		IPW_ERROR("Unable to setup deferred work\n");
11590		goto out_iounmap;
11591	}
11592
11593	ipw_sw_reset(priv, 1);
11594
11595	err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv);
11596	if (err) {
11597		IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
11598		goto out_destroy_workqueue;
11599	}
11600
11601	SET_MODULE_OWNER(net_dev);
11602	SET_NETDEV_DEV(net_dev, &pdev->dev);
11603
11604	mutex_lock(&priv->mutex);
11605
11606	priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
11607	priv->ieee->set_security = shim__set_security;
11608	priv->ieee->is_queue_full = ipw_net_is_queue_full;
11609
11610#ifdef CONFIG_IPW2200_QOS
11611	priv->ieee->is_qos_active = ipw_is_qos_active;
11612	priv->ieee->handle_probe_response = ipw_handle_beacon;
11613	priv->ieee->handle_beacon = ipw_handle_probe_response;
11614	priv->ieee->handle_assoc_response = ipw_handle_assoc_response;
11615#endif				/* CONFIG_IPW2200_QOS */
11616
11617	priv->ieee->perfect_rssi = -20;
11618	priv->ieee->worst_rssi = -85;
11619
11620	net_dev->open = ipw_net_open;
11621	net_dev->stop = ipw_net_stop;
11622	net_dev->init = ipw_net_init;
11623	net_dev->get_stats = ipw_net_get_stats;
11624	net_dev->set_multicast_list = ipw_net_set_multicast_list;
11625	net_dev->set_mac_address = ipw_net_set_mac_address;
11626	priv->wireless_data.spy_data = &priv->ieee->spy_data;
11627	net_dev->wireless_data = &priv->wireless_data;
11628	net_dev->wireless_handlers = &ipw_wx_handler_def;
11629	net_dev->ethtool_ops = &ipw_ethtool_ops;
11630	net_dev->irq = pdev->irq;
11631	net_dev->base_addr = (unsigned long)priv->hw_base;
11632	net_dev->mem_start = pci_resource_start(pdev, 0);
11633	net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
11634
11635	err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
11636	if (err) {
11637		IPW_ERROR("failed to create sysfs device attributes\n");
11638		mutex_unlock(&priv->mutex);
11639		goto out_release_irq;
11640	}
11641
11642	mutex_unlock(&priv->mutex);
11643	err = register_netdev(net_dev);
11644	if (err) {
11645		IPW_ERROR("failed to register network device\n");
11646		goto out_remove_sysfs;
11647	}
11648
11649#ifdef CONFIG_IPW2200_PROMISCUOUS
11650	if (rtap_iface) {
11651	        err = ipw_prom_alloc(priv);
11652		if (err) {
11653			IPW_ERROR("Failed to register promiscuous network "
11654				  "device (error %d).\n", err);
11655			unregister_netdev(priv->net_dev);
11656			goto out_remove_sysfs;
11657		}
11658	}
11659#endif
11660
11661	printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg "
11662	       "channels, %d 802.11a channels)\n",
11663	       priv->ieee->geo.name, priv->ieee->geo.bg_channels,
11664	       priv->ieee->geo.a_channels);
11665
11666	return 0;
11667
11668      out_remove_sysfs:
11669	sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11670      out_release_irq:
11671	free_irq(pdev->irq, priv);
11672      out_destroy_workqueue:
11673	destroy_workqueue(priv->workqueue);
11674	priv->workqueue = NULL;
11675      out_iounmap:
11676	iounmap(priv->hw_base);
11677      out_pci_release_regions:
11678	pci_release_regions(pdev);
11679      out_pci_disable_device:
11680	pci_disable_device(pdev);
11681	pci_set_drvdata(pdev, NULL);
11682      out_free_ieee80211:
11683	free_ieee80211(priv->net_dev);
11684      out:
11685	return err;
11686}
11687
11688static void ipw_pci_remove(struct pci_dev *pdev)
11689{
11690	struct ipw_priv *priv = pci_get_drvdata(pdev);
11691	struct list_head *p, *q;
11692	int i;
11693
11694	if (!priv)
11695		return;
11696
11697	mutex_lock(&priv->mutex);
11698
11699	priv->status |= STATUS_EXIT_PENDING;
11700	ipw_down(priv);
11701	sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11702
11703	mutex_unlock(&priv->mutex);
11704
11705	unregister_netdev(priv->net_dev);
11706
11707	if (priv->rxq) {
11708		ipw_rx_queue_free(priv, priv->rxq);
11709		priv->rxq = NULL;
11710	}
11711	ipw_tx_queue_free(priv);
11712
11713	if (priv->cmdlog) {
11714		kfree(priv->cmdlog);
11715		priv->cmdlog = NULL;
11716	}
11717	/* ipw_down will ensure that there is no more pending work
11718	 * in the workqueue's, so we can safely remove them now. */
11719	cancel_delayed_work(&priv->adhoc_check);
11720	cancel_delayed_work(&priv->gather_stats);
11721	cancel_delayed_work(&priv->request_scan);
11722	cancel_delayed_work(&priv->rf_kill);
11723	cancel_delayed_work(&priv->scan_check);
11724	destroy_workqueue(priv->workqueue);
11725	priv->workqueue = NULL;
11726
11727	/* Free MAC hash list for ADHOC */
11728	for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
11729		list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
11730			list_del(p);
11731			kfree(list_entry(p, struct ipw_ibss_seq, list));
11732		}
11733	}
11734
11735	kfree(priv->error);
11736	priv->error = NULL;
11737
11738#ifdef CONFIG_IPW2200_PROMISCUOUS
11739	ipw_prom_free(priv);
11740#endif
11741
11742	free_irq(pdev->irq, priv);
11743	iounmap(priv->hw_base);
11744	pci_release_regions(pdev);
11745	pci_disable_device(pdev);
11746	pci_set_drvdata(pdev, NULL);
11747	free_ieee80211(priv->net_dev);
11748	free_firmware();
11749}
11750
11751#ifdef CONFIG_PM
11752static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
11753{
11754	struct ipw_priv *priv = pci_get_drvdata(pdev);
11755	struct net_device *dev = priv->net_dev;
11756
11757	printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
11758
11759	/* Take down the device; powers it off, etc. */
11760	ipw_down(priv);
11761
11762	/* Remove the PRESENT state of the device */
11763	netif_device_detach(dev);
11764
11765	pci_save_state(pdev);
11766	pci_disable_device(pdev);
11767	pci_set_power_state(pdev, pci_choose_state(pdev, state));
11768
11769	return 0;
11770}
11771
11772static int ipw_pci_resume(struct pci_dev *pdev)
11773{
11774	struct ipw_priv *priv = pci_get_drvdata(pdev);
11775	struct net_device *dev = priv->net_dev;
11776	int err;
11777	u32 val;
11778
11779	printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
11780
11781	pci_set_power_state(pdev, PCI_D0);
11782	err = pci_enable_device(pdev);
11783	if (err) {
11784		printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
11785		       dev->name);
11786		return err;
11787	}
11788	pci_restore_state(pdev);
11789
11790	/*
11791	 * Suspend/Resume resets the PCI configuration space, so we have to
11792	 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
11793	 * from interfering with C3 CPU state. pci_restore_state won't help
11794	 * here since it only restores the first 64 bytes pci config header.
11795	 */
11796	pci_read_config_dword(pdev, 0x40, &val);
11797	if ((val & 0x0000ff00) != 0)
11798		pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11799
11800	/* Set the device back into the PRESENT state; this will also wake
11801	 * the queue of needed */
11802	netif_device_attach(dev);
11803
11804	/* Bring the device back up */
11805	queue_work(priv->workqueue, &priv->up);
11806
11807	return 0;
11808}
11809#endif
11810
11811static void ipw_pci_shutdown(struct pci_dev *pdev)
11812{
11813	struct ipw_priv *priv = pci_get_drvdata(pdev);
11814
11815	/* Take down the device; powers it off, etc. */
11816	ipw_down(priv);
11817
11818	pci_disable_device(pdev);
11819}
11820
11821/* driver initialization stuff */
11822static struct pci_driver ipw_driver = {
11823	.name = DRV_NAME,
11824	.id_table = card_ids,
11825	.probe = ipw_pci_probe,
11826	.remove = __devexit_p(ipw_pci_remove),
11827#ifdef CONFIG_PM
11828	.suspend = ipw_pci_suspend,
11829	.resume = ipw_pci_resume,
11830#endif
11831	.shutdown = ipw_pci_shutdown,
11832};
11833
11834static int __init ipw_init(void)
11835{
11836	int ret;
11837
11838	printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
11839	printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
11840
11841	ret = pci_register_driver(&ipw_driver);
11842	if (ret) {
11843		IPW_ERROR("Unable to initialize PCI module\n");
11844		return ret;
11845	}
11846
11847	ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
11848	if (ret) {
11849		IPW_ERROR("Unable to create driver sysfs file\n");
11850		pci_unregister_driver(&ipw_driver);
11851		return ret;
11852	}
11853
11854	return ret;
11855}
11856
11857static void __exit ipw_exit(void)
11858{
11859	driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
11860	pci_unregister_driver(&ipw_driver);
11861}
11862
11863module_param(disable, int, 0444);
11864MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
11865
11866module_param(associate, int, 0444);
11867MODULE_PARM_DESC(associate, "auto associate when scanning (default on)");
11868
11869module_param(auto_create, int, 0444);
11870MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
11871
11872module_param(led, int, 0444);
11873MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)\n");
11874
11875module_param(debug, int, 0444);
11876MODULE_PARM_DESC(debug, "debug output mask");
11877
11878module_param(channel, int, 0444);
11879MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
11880
11881#ifdef CONFIG_IPW2200_PROMISCUOUS
11882module_param(rtap_iface, int, 0444);
11883MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)");
11884#endif
11885
11886#ifdef CONFIG_IPW2200_QOS
11887module_param(qos_enable, int, 0444);
11888MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis");
11889
11890module_param(qos_burst_enable, int, 0444);
11891MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode");
11892
11893module_param(qos_no_ack_mask, int, 0444);
11894MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack");
11895
11896module_param(burst_duration_CCK, int, 0444);
11897MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value");
11898
11899module_param(burst_duration_OFDM, int, 0444);
11900MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value");
11901#endif				/* CONFIG_IPW2200_QOS */
11902
11903#ifdef CONFIG_IPW2200_MONITOR
11904module_param(mode, int, 0444);
11905MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
11906#else
11907module_param(mode, int, 0444);
11908MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
11909#endif
11910
11911module_param(bt_coexist, int, 0444);
11912MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)");
11913
11914module_param(hwcrypto, int, 0444);
11915MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)");
11916
11917module_param(cmdlog, int, 0444);
11918MODULE_PARM_DESC(cmdlog,
11919		 "allocate a ring buffer for logging firmware commands");
11920
11921module_param(roaming, int, 0444);
11922MODULE_PARM_DESC(roaming, "enable roaming support (default on)");
11923
11924module_param(antenna, int, 0444);
11925MODULE_PARM_DESC(antenna, "select antenna 1=Main, 3=Aux, default 0 [both], 2=slow_diversity (choose the one with lower background noise)");
11926
11927module_exit(ipw_exit);
11928module_init(ipw_init);
11929