• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /******************************************************************************
2 
3   Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
4 
5   802.11 status code portion of this file from ethereal-0.10.6:
6     Copyright 2000, Axis Communications AB
7     Ethereal - Network traffic analyzer
8     By Gerald Combs <gerald@ethereal.com>
9     Copyright 1998 Gerald Combs
10 
11   This program is free software; you can redistribute it and/or modify it
12   under the terms of version 2 of the GNU General Public License as
13   published by the Free Software Foundation.
14 
15   This program is distributed in the hope that it will be useful, but WITHOUT
16   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
18   more details.
19 
20   You should have received a copy of the GNU General Public License along with
21   this program; if not, write to the Free Software Foundation, Inc., 59
22   Temple Place - Suite 330, Boston, MA  02111-1307, USA.
23 
24   The full GNU General Public License is included in this distribution in the
25   file called LICENSE.
26 
27   Contact Information:
28   James P. Ketrenos <ipw2100-admin@linux.intel.com>
29   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 
31 ******************************************************************************/
32 
33 #include "ipw2200.h"
34 
35 
36 #ifndef KBUILD_EXTMOD
37 #define VK "k"
38 #else
39 #define VK
40 #endif
41 
42 #ifdef CONFIG_IPW2200_DEBUG
43 #define VD "d"
44 #else
45 #define VD
46 #endif
47 
48 #ifdef CONFIG_IPW2200_MONITOR
49 #define VM "m"
50 #else
51 #define VM
52 #endif
53 
54 #ifdef CONFIG_IPW2200_PROMISCUOUS
55 #define VP "p"
56 #else
57 #define VP
58 #endif
59 
60 #ifdef CONFIG_IPW2200_RADIOTAP
61 #define VR "r"
62 #else
63 #define VR
64 #endif
65 
66 #ifdef CONFIG_IPW2200_QOS
67 #define VQ "q"
68 #else
69 #define VQ
70 #endif
71 
72 #define IPW2200_VERSION "1.2.2" VK VD VM VP VR VQ
73 #define DRV_DESCRIPTION	"Intel(R) PRO/Wireless 2200/2915 Network Driver"
74 #define DRV_COPYRIGHT	"Copyright(c) 2003-2006 Intel Corporation"
75 #define DRV_VERSION     IPW2200_VERSION
76 
77 #define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
78 
79 MODULE_DESCRIPTION(DRV_DESCRIPTION);
80 MODULE_VERSION(DRV_VERSION);
81 MODULE_AUTHOR(DRV_COPYRIGHT);
82 MODULE_LICENSE("GPL");
83 
84 static int cmdlog = 0;
85 static int debug = 0;
86 static int channel = 0;
87 static int mode = 0;
88 
89 static u32 ipw_debug_level;
90 static int associate;
91 static int auto_create = 1;
92 static int led = 0;
93 static int disable = 0;
94 static int bt_coexist = 0;
95 static int hwcrypto = 0;
96 static int roaming = 1;
97 static const char ipw_modes[] = {
98 	'a', 'b', 'g', '?'
99 };
100 static int antenna = CFG_SYS_ANTENNA_BOTH;
101 
102 #ifdef CONFIG_IPW2200_PROMISCUOUS
103 static int rtap_iface = 0;     /* def: 0 -- do not create rtap interface */
104 #endif
105 
106 
107 #ifdef CONFIG_IPW2200_QOS
108 static int qos_enable = 0;
109 static int qos_burst_enable = 0;
110 static int qos_no_ack_mask = 0;
111 static int burst_duration_CCK = 0;
112 static int burst_duration_OFDM = 0;
113 
114 static struct ieee80211_qos_parameters def_qos_parameters_OFDM = {
115 	{QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM,
116 	 QOS_TX3_CW_MIN_OFDM},
117 	{QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM,
118 	 QOS_TX3_CW_MAX_OFDM},
119 	{QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
120 	{QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
121 	{QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM,
122 	 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM}
123 };
124 
125 static struct ieee80211_qos_parameters def_qos_parameters_CCK = {
126 	{QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK,
127 	 QOS_TX3_CW_MIN_CCK},
128 	{QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK,
129 	 QOS_TX3_CW_MAX_CCK},
130 	{QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
131 	{QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
132 	{QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK,
133 	 QOS_TX3_TXOP_LIMIT_CCK}
134 };
135 
136 static struct ieee80211_qos_parameters def_parameters_OFDM = {
137 	{DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM,
138 	 DEF_TX3_CW_MIN_OFDM},
139 	{DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM,
140 	 DEF_TX3_CW_MAX_OFDM},
141 	{DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
142 	{DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
143 	{DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM,
144 	 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM}
145 };
146 
147 static struct ieee80211_qos_parameters def_parameters_CCK = {
148 	{DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK,
149 	 DEF_TX3_CW_MIN_CCK},
150 	{DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK,
151 	 DEF_TX3_CW_MAX_CCK},
152 	{DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
153 	{DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
154 	{DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK,
155 	 DEF_TX3_TXOP_LIMIT_CCK}
156 };
157 
158 static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
159 
160 static int from_priority_to_tx_queue[] = {
161 	IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1,
162 	IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4
163 };
164 
165 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv);
166 
167 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
168 				       *qos_param);
169 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
170 				     *qos_param);
171 #endif				/* CONFIG_IPW2200_QOS */
172 
173 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
174 static void ipw_remove_current_network(struct ipw_priv *priv);
175 static void ipw_rx(struct ipw_priv *priv);
176 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
177 				struct clx2_tx_queue *txq, int qindex);
178 static int ipw_queue_reset(struct ipw_priv *priv);
179 
180 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
181 			     int len, int sync);
182 
183 static void ipw_tx_queue_free(struct ipw_priv *);
184 
185 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
186 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
187 static void ipw_rx_queue_replenish(void *);
188 static int ipw_up(struct ipw_priv *);
189 static void ipw_bg_up(struct work_struct *work);
190 static void ipw_down(struct ipw_priv *);
191 static void ipw_bg_down(struct work_struct *work);
192 static int ipw_config(struct ipw_priv *);
193 static int init_supported_rates(struct ipw_priv *priv,
194 				struct ipw_supported_rates *prates);
195 static void ipw_set_hwcrypto_keys(struct ipw_priv *);
196 static void ipw_send_wep_keys(struct ipw_priv *, int);
197 
snprint_line(char * buf,size_t count,const u8 * data,u32 len,u32 ofs)198 static int snprint_line(char *buf, size_t count,
199 			const u8 * data, u32 len, u32 ofs)
200 {
201 	int out, i, j, l;
202 	char c;
203 
204 	out = snprintf(buf, count, "%08X", ofs);
205 
206 	for (l = 0, i = 0; i < 2; i++) {
207 		out += snprintf(buf + out, count - out, " ");
208 		for (j = 0; j < 8 && l < len; j++, l++)
209 			out += snprintf(buf + out, count - out, "%02X ",
210 					data[(i * 8 + j)]);
211 		for (; j < 8; j++)
212 			out += snprintf(buf + out, count - out, "   ");
213 	}
214 
215 	out += snprintf(buf + out, count - out, " ");
216 	for (l = 0, i = 0; i < 2; i++) {
217 		out += snprintf(buf + out, count - out, " ");
218 		for (j = 0; j < 8 && l < len; j++, l++) {
219 			c = data[(i * 8 + j)];
220 			if (!isascii(c) || !isprint(c))
221 				c = '.';
222 
223 			out += snprintf(buf + out, count - out, "%c", c);
224 		}
225 
226 		for (; j < 8; j++)
227 			out += snprintf(buf + out, count - out, " ");
228 	}
229 
230 	return out;
231 }
232 
printk_buf(int level,const u8 * data,u32 len)233 static void printk_buf(int level, const u8 * data, u32 len)
234 {
235 	char line[81];
236 	u32 ofs = 0;
237 	if (!(ipw_debug_level & level))
238 		return;
239 
240 	while (len) {
241 		snprint_line(line, sizeof(line), &data[ofs],
242 			     min(len, 16U), ofs);
243 		printk(KERN_DEBUG "%s\n", line);
244 		ofs += 16;
245 		len -= min(len, 16U);
246 	}
247 }
248 
snprintk_buf(u8 * output,size_t size,const u8 * data,size_t len)249 static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
250 {
251 	size_t out = size;
252 	u32 ofs = 0;
253 	int total = 0;
254 
255 	while (size && len) {
256 		out = snprint_line(output, size, &data[ofs],
257 				   min_t(size_t, len, 16U), ofs);
258 
259 		ofs += 16;
260 		output += out;
261 		size -= out;
262 		len -= min_t(size_t, len, 16U);
263 		total += out;
264 	}
265 	return total;
266 }
267 
268 /* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
269 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
270 #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
271 
272 /* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
273 static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
274 #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
275 
276 /* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
277 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
ipw_write_reg8(struct ipw_priv * a,u32 b,u8 c)278 static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
279 {
280 	IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
281 		     __LINE__, (u32) (b), (u32) (c));
282 	_ipw_write_reg8(a, b, c);
283 }
284 
285 /* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
286 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
ipw_write_reg16(struct ipw_priv * a,u32 b,u16 c)287 static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
288 {
289 	IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
290 		     __LINE__, (u32) (b), (u32) (c));
291 	_ipw_write_reg16(a, b, c);
292 }
293 
294 /* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
295 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
ipw_write_reg32(struct ipw_priv * a,u32 b,u32 c)296 static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
297 {
298 	IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
299 		     __LINE__, (u32) (b), (u32) (c));
300 	_ipw_write_reg32(a, b, c);
301 }
302 
303 /* 8-bit direct write (low 4K) */
304 #define _ipw_write8(ipw, ofs, val) writeb((val), (ipw)->hw_base + (ofs))
305 
306 /* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
307 #define ipw_write8(ipw, ofs, val) do { \
308  IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
309  _ipw_write8(ipw, ofs, val); \
310  } while (0)
311 
312 /* 16-bit direct write (low 4K) */
313 #define _ipw_write16(ipw, ofs, val) writew((val), (ipw)->hw_base + (ofs))
314 
315 /* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
316 #define ipw_write16(ipw, ofs, val) \
317  IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
318  _ipw_write16(ipw, ofs, val)
319 
320 /* 32-bit direct write (low 4K) */
321 #define _ipw_write32(ipw, ofs, val) writel((val), (ipw)->hw_base + (ofs))
322 
323 /* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
324 #define ipw_write32(ipw, ofs, val) \
325  IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
326  _ipw_write32(ipw, ofs, val)
327 
328 /* 8-bit direct read (low 4K) */
329 #define _ipw_read8(ipw, ofs) readb((ipw)->hw_base + (ofs))
330 
331 /* 8-bit direct read (low 4K), with debug wrapper */
__ipw_read8(char * f,u32 l,struct ipw_priv * ipw,u32 ofs)332 static inline u8 __ipw_read8(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
333 {
334 	IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", f, l, (u32) (ofs));
335 	return _ipw_read8(ipw, ofs);
336 }
337 
338 /* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */
339 #define ipw_read8(ipw, ofs) __ipw_read8(__FILE__, __LINE__, ipw, ofs)
340 
341 /* 16-bit direct read (low 4K) */
342 #define _ipw_read16(ipw, ofs) readw((ipw)->hw_base + (ofs))
343 
344 /* 16-bit direct read (low 4K), with debug wrapper */
__ipw_read16(char * f,u32 l,struct ipw_priv * ipw,u32 ofs)345 static inline u16 __ipw_read16(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
346 {
347 	IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", f, l, (u32) (ofs));
348 	return _ipw_read16(ipw, ofs);
349 }
350 
351 /* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */
352 #define ipw_read16(ipw, ofs) __ipw_read16(__FILE__, __LINE__, ipw, ofs)
353 
354 /* 32-bit direct read (low 4K) */
355 #define _ipw_read32(ipw, ofs) readl((ipw)->hw_base + (ofs))
356 
357 /* 32-bit direct read (low 4K), with debug wrapper */
__ipw_read32(char * f,u32 l,struct ipw_priv * ipw,u32 ofs)358 static inline u32 __ipw_read32(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
359 {
360 	IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", f, l, (u32) (ofs));
361 	return _ipw_read32(ipw, ofs);
362 }
363 
364 /* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */
365 #define ipw_read32(ipw, ofs) __ipw_read32(__FILE__, __LINE__, ipw, ofs)
366 
367 /* multi-byte read (above 4K), with debug wrapper */
368 static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
__ipw_read_indirect(const char * f,int l,struct ipw_priv * a,u32 b,u8 * c,int d)369 static inline void __ipw_read_indirect(const char *f, int l,
370 				       struct ipw_priv *a, u32 b, u8 * c, int d)
371 {
372 	IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %d bytes\n", f, l, (u32) (b),
373 		     d);
374 	_ipw_read_indirect(a, b, c, d);
375 }
376 
377 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
378 #define ipw_read_indirect(a, b, c, d) __ipw_read_indirect(__FILE__, __LINE__, a, b, c, d)
379 
380 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
381 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
382 				int num);
383 #define ipw_write_indirect(a, b, c, d) \
384 	IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \
385 	_ipw_write_indirect(a, b, c, d)
386 
387 /* 32-bit indirect write (above 4K) */
_ipw_write_reg32(struct ipw_priv * priv,u32 reg,u32 value)388 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
389 {
390 	IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
391 	_ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
392 	_ipw_write32(priv, IPW_INDIRECT_DATA, value);
393 }
394 
395 /* 8-bit indirect write (above 4K) */
_ipw_write_reg8(struct ipw_priv * priv,u32 reg,u8 value)396 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
397 {
398 	u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK;	/* dword align */
399 	u32 dif_len = reg - aligned_addr;
400 
401 	IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
402 	_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
403 	_ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value);
404 }
405 
406 /* 16-bit indirect write (above 4K) */
_ipw_write_reg16(struct ipw_priv * priv,u32 reg,u16 value)407 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
408 {
409 	u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK;	/* dword align */
410 	u32 dif_len = (reg - aligned_addr) & (~0x1ul);
411 
412 	IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
413 	_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
414 	_ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value);
415 }
416 
417 /* 8-bit indirect read (above 4K) */
_ipw_read_reg8(struct ipw_priv * priv,u32 reg)418 static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
419 {
420 	u32 word;
421 	_ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
422 	IPW_DEBUG_IO(" reg = 0x%8X : \n", reg);
423 	word = _ipw_read32(priv, IPW_INDIRECT_DATA);
424 	return (word >> ((reg & 0x3) * 8)) & 0xff;
425 }
426 
427 /* 32-bit indirect read (above 4K) */
_ipw_read_reg32(struct ipw_priv * priv,u32 reg)428 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
429 {
430 	u32 value;
431 
432 	IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
433 
434 	_ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
435 	value = _ipw_read32(priv, IPW_INDIRECT_DATA);
436 	IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value);
437 	return value;
438 }
439 
440 /* General purpose, no alignment requirement, iterative (multi-byte) read, */
441 /*    for area above 1st 4K of SRAM/reg space */
_ipw_read_indirect(struct ipw_priv * priv,u32 addr,u8 * buf,int num)442 static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
443 			       int num)
444 {
445 	u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK;	/* dword align */
446 	u32 dif_len = addr - aligned_addr;
447 	u32 i;
448 
449 	IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
450 
451 	if (num <= 0) {
452 		return;
453 	}
454 
455 	/* Read the first dword (or portion) byte by byte */
456 	if (unlikely(dif_len)) {
457 		_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
458 		/* Start reading at aligned_addr + dif_len */
459 		for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
460 			*buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i);
461 		aligned_addr += 4;
462 	}
463 
464 	/* Read all of the middle dwords as dwords, with auto-increment */
465 	_ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
466 	for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
467 		*(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
468 
469 	/* Read the last dword (or portion) byte by byte */
470 	if (unlikely(num)) {
471 		_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
472 		for (i = 0; num > 0; i++, num--)
473 			*buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i);
474 	}
475 }
476 
477 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
478 /*    for area above 1st 4K of SRAM/reg space */
_ipw_write_indirect(struct ipw_priv * priv,u32 addr,u8 * buf,int num)479 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
480 				int num)
481 {
482 	u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK;	/* dword align */
483 	u32 dif_len = addr - aligned_addr;
484 	u32 i;
485 
486 	IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
487 
488 	if (num <= 0) {
489 		return;
490 	}
491 
492 	/* Write the first dword (or portion) byte by byte */
493 	if (unlikely(dif_len)) {
494 		_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
495 		/* Start writing at aligned_addr + dif_len */
496 		for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
497 			_ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
498 		aligned_addr += 4;
499 	}
500 
501 	/* Write all of the middle dwords as dwords, with auto-increment */
502 	_ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
503 	for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
504 		_ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
505 
506 	/* Write the last dword (or portion) byte by byte */
507 	if (unlikely(num)) {
508 		_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
509 		for (i = 0; num > 0; i++, num--, buf++)
510 			_ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
511 	}
512 }
513 
514 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
515 /*    for 1st 4K of SRAM/regs space */
ipw_write_direct(struct ipw_priv * priv,u32 addr,void * buf,int num)516 static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
517 			     int num)
518 {
519 	memcpy_toio((priv->hw_base + addr), buf, num);
520 }
521 
522 /* Set bit(s) in low 4K of SRAM/regs */
ipw_set_bit(struct ipw_priv * priv,u32 reg,u32 mask)523 static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
524 {
525 	ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
526 }
527 
528 /* Clear bit(s) in low 4K of SRAM/regs */
ipw_clear_bit(struct ipw_priv * priv,u32 reg,u32 mask)529 static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
530 {
531 	ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
532 }
533 
__ipw_enable_interrupts(struct ipw_priv * priv)534 static inline void __ipw_enable_interrupts(struct ipw_priv *priv)
535 {
536 	if (priv->status & STATUS_INT_ENABLED)
537 		return;
538 	priv->status |= STATUS_INT_ENABLED;
539 	ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
540 }
541 
__ipw_disable_interrupts(struct ipw_priv * priv)542 static inline void __ipw_disable_interrupts(struct ipw_priv *priv)
543 {
544 	if (!(priv->status & STATUS_INT_ENABLED))
545 		return;
546 	priv->status &= ~STATUS_INT_ENABLED;
547 	ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
548 }
549 
ipw_enable_interrupts(struct ipw_priv * priv)550 static inline void ipw_enable_interrupts(struct ipw_priv *priv)
551 {
552 	unsigned long flags;
553 
554 	spin_lock_irqsave(&priv->irq_lock, flags);
555 	__ipw_enable_interrupts(priv);
556 	spin_unlock_irqrestore(&priv->irq_lock, flags);
557 }
558 
ipw_disable_interrupts(struct ipw_priv * priv)559 static inline void ipw_disable_interrupts(struct ipw_priv *priv)
560 {
561 	unsigned long flags;
562 
563 	spin_lock_irqsave(&priv->irq_lock, flags);
564 	__ipw_disable_interrupts(priv);
565 	spin_unlock_irqrestore(&priv->irq_lock, flags);
566 }
567 
ipw_error_desc(u32 val)568 static char *ipw_error_desc(u32 val)
569 {
570 	switch (val) {
571 	case IPW_FW_ERROR_OK:
572 		return "ERROR_OK";
573 	case IPW_FW_ERROR_FAIL:
574 		return "ERROR_FAIL";
575 	case IPW_FW_ERROR_MEMORY_UNDERFLOW:
576 		return "MEMORY_UNDERFLOW";
577 	case IPW_FW_ERROR_MEMORY_OVERFLOW:
578 		return "MEMORY_OVERFLOW";
579 	case IPW_FW_ERROR_BAD_PARAM:
580 		return "BAD_PARAM";
581 	case IPW_FW_ERROR_BAD_CHECKSUM:
582 		return "BAD_CHECKSUM";
583 	case IPW_FW_ERROR_NMI_INTERRUPT:
584 		return "NMI_INTERRUPT";
585 	case IPW_FW_ERROR_BAD_DATABASE:
586 		return "BAD_DATABASE";
587 	case IPW_FW_ERROR_ALLOC_FAIL:
588 		return "ALLOC_FAIL";
589 	case IPW_FW_ERROR_DMA_UNDERRUN:
590 		return "DMA_UNDERRUN";
591 	case IPW_FW_ERROR_DMA_STATUS:
592 		return "DMA_STATUS";
593 	case IPW_FW_ERROR_DINO_ERROR:
594 		return "DINO_ERROR";
595 	case IPW_FW_ERROR_EEPROM_ERROR:
596 		return "EEPROM_ERROR";
597 	case IPW_FW_ERROR_SYSASSERT:
598 		return "SYSASSERT";
599 	case IPW_FW_ERROR_FATAL_ERROR:
600 		return "FATAL_ERROR";
601 	default:
602 		return "UNKNOWN_ERROR";
603 	}
604 }
605 
ipw_dump_error_log(struct ipw_priv * priv,struct ipw_fw_error * error)606 static void ipw_dump_error_log(struct ipw_priv *priv,
607 			       struct ipw_fw_error *error)
608 {
609 	u32 i;
610 
611 	if (!error) {
612 		IPW_ERROR("Error allocating and capturing error log.  "
613 			  "Nothing to dump.\n");
614 		return;
615 	}
616 
617 	IPW_ERROR("Start IPW Error Log Dump:\n");
618 	IPW_ERROR("Status: 0x%08X, Config: %08X\n",
619 		  error->status, error->config);
620 
621 	for (i = 0; i < error->elem_len; i++)
622 		IPW_ERROR("%s %i 0x%08x  0x%08x  0x%08x  0x%08x  0x%08x\n",
623 			  ipw_error_desc(error->elem[i].desc),
624 			  error->elem[i].time,
625 			  error->elem[i].blink1,
626 			  error->elem[i].blink2,
627 			  error->elem[i].link1,
628 			  error->elem[i].link2, error->elem[i].data);
629 	for (i = 0; i < error->log_len; i++)
630 		IPW_ERROR("%i\t0x%08x\t%i\n",
631 			  error->log[i].time,
632 			  error->log[i].data, error->log[i].event);
633 }
634 
ipw_is_init(struct ipw_priv * priv)635 static inline int ipw_is_init(struct ipw_priv *priv)
636 {
637 	return (priv->status & STATUS_INIT) ? 1 : 0;
638 }
639 
ipw_get_ordinal(struct ipw_priv * priv,u32 ord,void * val,u32 * len)640 static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
641 {
642 	u32 addr, field_info, field_len, field_count, total_len;
643 
644 	IPW_DEBUG_ORD("ordinal = %i\n", ord);
645 
646 	if (!priv || !val || !len) {
647 		IPW_DEBUG_ORD("Invalid argument\n");
648 		return -EINVAL;
649 	}
650 
651 	/* verify device ordinal tables have been initialized */
652 	if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
653 		IPW_DEBUG_ORD("Access ordinals before initialization\n");
654 		return -EINVAL;
655 	}
656 
657 	switch (IPW_ORD_TABLE_ID_MASK & ord) {
658 	case IPW_ORD_TABLE_0_MASK:
659 		/*
660 		 * TABLE 0: Direct access to a table of 32 bit values
661 		 *
662 		 * This is a very simple table with the data directly
663 		 * read from the table
664 		 */
665 
666 		/* remove the table id from the ordinal */
667 		ord &= IPW_ORD_TABLE_VALUE_MASK;
668 
669 		/* boundary check */
670 		if (ord > priv->table0_len) {
671 			IPW_DEBUG_ORD("ordinal value (%i) longer then "
672 				      "max (%i)\n", ord, priv->table0_len);
673 			return -EINVAL;
674 		}
675 
676 		/* verify we have enough room to store the value */
677 		if (*len < sizeof(u32)) {
678 			IPW_DEBUG_ORD("ordinal buffer length too small, "
679 				      "need %zd\n", sizeof(u32));
680 			return -EINVAL;
681 		}
682 
683 		IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
684 			      ord, priv->table0_addr + (ord << 2));
685 
686 		*len = sizeof(u32);
687 		ord <<= 2;
688 		*((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
689 		break;
690 
691 	case IPW_ORD_TABLE_1_MASK:
692 		/*
693 		 * TABLE 1: Indirect access to a table of 32 bit values
694 		 *
695 		 * This is a fairly large table of u32 values each
696 		 * representing starting addr for the data (which is
697 		 * also a u32)
698 		 */
699 
700 		/* remove the table id from the ordinal */
701 		ord &= IPW_ORD_TABLE_VALUE_MASK;
702 
703 		/* boundary check */
704 		if (ord > priv->table1_len) {
705 			IPW_DEBUG_ORD("ordinal value too long\n");
706 			return -EINVAL;
707 		}
708 
709 		/* verify we have enough room to store the value */
710 		if (*len < sizeof(u32)) {
711 			IPW_DEBUG_ORD("ordinal buffer length too small, "
712 				      "need %zd\n", sizeof(u32));
713 			return -EINVAL;
714 		}
715 
716 		*((u32 *) val) =
717 		    ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
718 		*len = sizeof(u32);
719 		break;
720 
721 	case IPW_ORD_TABLE_2_MASK:
722 		/*
723 		 * TABLE 2: Indirect access to a table of variable sized values
724 		 *
725 		 * This table consist of six values, each containing
726 		 *     - dword containing the starting offset of the data
727 		 *     - dword containing the lengh in the first 16bits
728 		 *       and the count in the second 16bits
729 		 */
730 
731 		/* remove the table id from the ordinal */
732 		ord &= IPW_ORD_TABLE_VALUE_MASK;
733 
734 		/* boundary check */
735 		if (ord > priv->table2_len) {
736 			IPW_DEBUG_ORD("ordinal value too long\n");
737 			return -EINVAL;
738 		}
739 
740 		/* get the address of statistic */
741 		addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
742 
743 		/* get the second DW of statistics ;
744 		 * two 16-bit words - first is length, second is count */
745 		field_info =
746 		    ipw_read_reg32(priv,
747 				   priv->table2_addr + (ord << 3) +
748 				   sizeof(u32));
749 
750 		/* get each entry length */
751 		field_len = *((u16 *) & field_info);
752 
753 		/* get number of entries */
754 		field_count = *(((u16 *) & field_info) + 1);
755 
756 		/* abort if not enought memory */
757 		total_len = field_len * field_count;
758 		if (total_len > *len) {
759 			*len = total_len;
760 			return -EINVAL;
761 		}
762 
763 		*len = total_len;
764 		if (!total_len)
765 			return 0;
766 
767 		IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
768 			      "field_info = 0x%08x\n",
769 			      addr, total_len, field_info);
770 		ipw_read_indirect(priv, addr, val, total_len);
771 		break;
772 
773 	default:
774 		IPW_DEBUG_ORD("Invalid ordinal!\n");
775 		return -EINVAL;
776 
777 	}
778 
779 	return 0;
780 }
781 
ipw_init_ordinals(struct ipw_priv * priv)782 static void ipw_init_ordinals(struct ipw_priv *priv)
783 {
784 	priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
785 	priv->table0_len = ipw_read32(priv, priv->table0_addr);
786 
787 	IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
788 		      priv->table0_addr, priv->table0_len);
789 
790 	priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
791 	priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
792 
793 	IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
794 		      priv->table1_addr, priv->table1_len);
795 
796 	priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
797 	priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
798 	priv->table2_len &= 0x0000ffff;	/* use first two bytes */
799 
800 	IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
801 		      priv->table2_addr, priv->table2_len);
802 
803 }
804 
ipw_register_toggle(u32 reg)805 static u32 ipw_register_toggle(u32 reg)
806 {
807 	reg &= ~IPW_START_STANDBY;
808 	if (reg & IPW_GATE_ODMA)
809 		reg &= ~IPW_GATE_ODMA;
810 	if (reg & IPW_GATE_IDMA)
811 		reg &= ~IPW_GATE_IDMA;
812 	if (reg & IPW_GATE_ADMA)
813 		reg &= ~IPW_GATE_ADMA;
814 	return reg;
815 }
816 
817 /*
818  * LED behavior:
819  * - On radio ON, turn on any LEDs that require to be on during start
820  * - On initialization, start unassociated blink
821  * - On association, disable unassociated blink
822  * - On disassociation, start unassociated blink
823  * - On radio OFF, turn off any LEDs started during radio on
824  *
825  */
826 #define LD_TIME_LINK_ON msecs_to_jiffies(300)
827 #define LD_TIME_LINK_OFF msecs_to_jiffies(2700)
828 #define LD_TIME_ACT_ON msecs_to_jiffies(250)
829 
ipw_led_link_on(struct ipw_priv * priv)830 static void ipw_led_link_on(struct ipw_priv *priv)
831 {
832 	unsigned long flags;
833 	u32 led;
834 
835 	/* If configured to not use LEDs, or nic_type is 1,
836 	 * then we don't toggle a LINK led */
837 	if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
838 		return;
839 
840 	spin_lock_irqsave(&priv->lock, flags);
841 
842 	if (!(priv->status & STATUS_RF_KILL_MASK) &&
843 	    !(priv->status & STATUS_LED_LINK_ON)) {
844 		IPW_DEBUG_LED("Link LED On\n");
845 		led = ipw_read_reg32(priv, IPW_EVENT_REG);
846 		led |= priv->led_association_on;
847 
848 		led = ipw_register_toggle(led);
849 
850 		IPW_DEBUG_LED("Reg: 0x%08X\n", led);
851 		ipw_write_reg32(priv, IPW_EVENT_REG, led);
852 
853 		priv->status |= STATUS_LED_LINK_ON;
854 
855 		/* If we aren't associated, schedule turning the LED off */
856 		if (!(priv->status & STATUS_ASSOCIATED))
857 			queue_delayed_work(priv->workqueue,
858 					   &priv->led_link_off,
859 					   LD_TIME_LINK_ON);
860 	}
861 
862 	spin_unlock_irqrestore(&priv->lock, flags);
863 }
864 
ipw_bg_led_link_on(struct work_struct * work)865 static void ipw_bg_led_link_on(struct work_struct *work)
866 {
867 	struct ipw_priv *priv =
868 		container_of(work, struct ipw_priv, led_link_on.work);
869 	mutex_lock(&priv->mutex);
870 	ipw_led_link_on(priv);
871 	mutex_unlock(&priv->mutex);
872 }
873 
ipw_led_link_off(struct ipw_priv * priv)874 static void ipw_led_link_off(struct ipw_priv *priv)
875 {
876 	unsigned long flags;
877 	u32 led;
878 
879 	/* If configured not to use LEDs, or nic type is 1,
880 	 * then we don't goggle the LINK led. */
881 	if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
882 		return;
883 
884 	spin_lock_irqsave(&priv->lock, flags);
885 
886 	if (priv->status & STATUS_LED_LINK_ON) {
887 		led = ipw_read_reg32(priv, IPW_EVENT_REG);
888 		led &= priv->led_association_off;
889 		led = ipw_register_toggle(led);
890 
891 		IPW_DEBUG_LED("Reg: 0x%08X\n", led);
892 		ipw_write_reg32(priv, IPW_EVENT_REG, led);
893 
894 		IPW_DEBUG_LED("Link LED Off\n");
895 
896 		priv->status &= ~STATUS_LED_LINK_ON;
897 
898 		/* If we aren't associated and the radio is on, schedule
899 		 * turning the LED on (blink while unassociated) */
900 		if (!(priv->status & STATUS_RF_KILL_MASK) &&
901 		    !(priv->status & STATUS_ASSOCIATED))
902 			queue_delayed_work(priv->workqueue, &priv->led_link_on,
903 					   LD_TIME_LINK_OFF);
904 
905 	}
906 
907 	spin_unlock_irqrestore(&priv->lock, flags);
908 }
909 
ipw_bg_led_link_off(struct work_struct * work)910 static void ipw_bg_led_link_off(struct work_struct *work)
911 {
912 	struct ipw_priv *priv =
913 		container_of(work, struct ipw_priv, led_link_off.work);
914 	mutex_lock(&priv->mutex);
915 	ipw_led_link_off(priv);
916 	mutex_unlock(&priv->mutex);
917 }
918 
__ipw_led_activity_on(struct ipw_priv * priv)919 static void __ipw_led_activity_on(struct ipw_priv *priv)
920 {
921 	u32 led;
922 
923 	if (priv->config & CFG_NO_LED)
924 		return;
925 
926 	if (priv->status & STATUS_RF_KILL_MASK)
927 		return;
928 
929 	if (!(priv->status & STATUS_LED_ACT_ON)) {
930 		led = ipw_read_reg32(priv, IPW_EVENT_REG);
931 		led |= priv->led_activity_on;
932 
933 		led = ipw_register_toggle(led);
934 
935 		IPW_DEBUG_LED("Reg: 0x%08X\n", led);
936 		ipw_write_reg32(priv, IPW_EVENT_REG, led);
937 
938 		IPW_DEBUG_LED("Activity LED On\n");
939 
940 		priv->status |= STATUS_LED_ACT_ON;
941 
942 		cancel_delayed_work(&priv->led_act_off);
943 		queue_delayed_work(priv->workqueue, &priv->led_act_off,
944 				   LD_TIME_ACT_ON);
945 	} else {
946 		/* Reschedule LED off for full time period */
947 		cancel_delayed_work(&priv->led_act_off);
948 		queue_delayed_work(priv->workqueue, &priv->led_act_off,
949 				   LD_TIME_ACT_ON);
950 	}
951 }
952 
953 #if 0
954 void ipw_led_activity_on(struct ipw_priv *priv)
955 {
956 	unsigned long flags;
957 	spin_lock_irqsave(&priv->lock, flags);
958 	__ipw_led_activity_on(priv);
959 	spin_unlock_irqrestore(&priv->lock, flags);
960 }
961 #endif  /*  0  */
962 
ipw_led_activity_off(struct ipw_priv * priv)963 static void ipw_led_activity_off(struct ipw_priv *priv)
964 {
965 	unsigned long flags;
966 	u32 led;
967 
968 	if (priv->config & CFG_NO_LED)
969 		return;
970 
971 	spin_lock_irqsave(&priv->lock, flags);
972 
973 	if (priv->status & STATUS_LED_ACT_ON) {
974 		led = ipw_read_reg32(priv, IPW_EVENT_REG);
975 		led &= priv->led_activity_off;
976 
977 		led = ipw_register_toggle(led);
978 
979 		IPW_DEBUG_LED("Reg: 0x%08X\n", led);
980 		ipw_write_reg32(priv, IPW_EVENT_REG, led);
981 
982 		IPW_DEBUG_LED("Activity LED Off\n");
983 
984 		priv->status &= ~STATUS_LED_ACT_ON;
985 	}
986 
987 	spin_unlock_irqrestore(&priv->lock, flags);
988 }
989 
ipw_bg_led_activity_off(struct work_struct * work)990 static void ipw_bg_led_activity_off(struct work_struct *work)
991 {
992 	struct ipw_priv *priv =
993 		container_of(work, struct ipw_priv, led_act_off.work);
994 	mutex_lock(&priv->mutex);
995 	ipw_led_activity_off(priv);
996 	mutex_unlock(&priv->mutex);
997 }
998 
ipw_led_band_on(struct ipw_priv * priv)999 static void ipw_led_band_on(struct ipw_priv *priv)
1000 {
1001 	unsigned long flags;
1002 	u32 led;
1003 
1004 	/* Only nic type 1 supports mode LEDs */
1005 	if (priv->config & CFG_NO_LED ||
1006 	    priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network)
1007 		return;
1008 
1009 	spin_lock_irqsave(&priv->lock, flags);
1010 
1011 	led = ipw_read_reg32(priv, IPW_EVENT_REG);
1012 	if (priv->assoc_network->mode == IEEE_A) {
1013 		led |= priv->led_ofdm_on;
1014 		led &= priv->led_association_off;
1015 		IPW_DEBUG_LED("Mode LED On: 802.11a\n");
1016 	} else if (priv->assoc_network->mode == IEEE_G) {
1017 		led |= priv->led_ofdm_on;
1018 		led |= priv->led_association_on;
1019 		IPW_DEBUG_LED("Mode LED On: 802.11g\n");
1020 	} else {
1021 		led &= priv->led_ofdm_off;
1022 		led |= priv->led_association_on;
1023 		IPW_DEBUG_LED("Mode LED On: 802.11b\n");
1024 	}
1025 
1026 	led = ipw_register_toggle(led);
1027 
1028 	IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1029 	ipw_write_reg32(priv, IPW_EVENT_REG, led);
1030 
1031 	spin_unlock_irqrestore(&priv->lock, flags);
1032 }
1033 
ipw_led_band_off(struct ipw_priv * priv)1034 static void ipw_led_band_off(struct ipw_priv *priv)
1035 {
1036 	unsigned long flags;
1037 	u32 led;
1038 
1039 	/* Only nic type 1 supports mode LEDs */
1040 	if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
1041 		return;
1042 
1043 	spin_lock_irqsave(&priv->lock, flags);
1044 
1045 	led = ipw_read_reg32(priv, IPW_EVENT_REG);
1046 	led &= priv->led_ofdm_off;
1047 	led &= priv->led_association_off;
1048 
1049 	led = ipw_register_toggle(led);
1050 
1051 	IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1052 	ipw_write_reg32(priv, IPW_EVENT_REG, led);
1053 
1054 	spin_unlock_irqrestore(&priv->lock, flags);
1055 }
1056 
ipw_led_radio_on(struct ipw_priv * priv)1057 static void ipw_led_radio_on(struct ipw_priv *priv)
1058 {
1059 	ipw_led_link_on(priv);
1060 }
1061 
ipw_led_radio_off(struct ipw_priv * priv)1062 static void ipw_led_radio_off(struct ipw_priv *priv)
1063 {
1064 	ipw_led_activity_off(priv);
1065 	ipw_led_link_off(priv);
1066 }
1067 
ipw_led_link_up(struct ipw_priv * priv)1068 static void ipw_led_link_up(struct ipw_priv *priv)
1069 {
1070 	/* Set the Link Led on for all nic types */
1071 	ipw_led_link_on(priv);
1072 }
1073 
ipw_led_link_down(struct ipw_priv * priv)1074 static void ipw_led_link_down(struct ipw_priv *priv)
1075 {
1076 	ipw_led_activity_off(priv);
1077 	ipw_led_link_off(priv);
1078 
1079 	if (priv->status & STATUS_RF_KILL_MASK)
1080 		ipw_led_radio_off(priv);
1081 }
1082 
ipw_led_init(struct ipw_priv * priv)1083 static void ipw_led_init(struct ipw_priv *priv)
1084 {
1085 	priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
1086 
1087 	/* Set the default PINs for the link and activity leds */
1088 	priv->led_activity_on = IPW_ACTIVITY_LED;
1089 	priv->led_activity_off = ~(IPW_ACTIVITY_LED);
1090 
1091 	priv->led_association_on = IPW_ASSOCIATED_LED;
1092 	priv->led_association_off = ~(IPW_ASSOCIATED_LED);
1093 
1094 	/* Set the default PINs for the OFDM leds */
1095 	priv->led_ofdm_on = IPW_OFDM_LED;
1096 	priv->led_ofdm_off = ~(IPW_OFDM_LED);
1097 
1098 	switch (priv->nic_type) {
1099 	case EEPROM_NIC_TYPE_1:
1100 		/* In this NIC type, the LEDs are reversed.... */
1101 		priv->led_activity_on = IPW_ASSOCIATED_LED;
1102 		priv->led_activity_off = ~(IPW_ASSOCIATED_LED);
1103 		priv->led_association_on = IPW_ACTIVITY_LED;
1104 		priv->led_association_off = ~(IPW_ACTIVITY_LED);
1105 
1106 		if (!(priv->config & CFG_NO_LED))
1107 			ipw_led_band_on(priv);
1108 
1109 		/* And we don't blink link LEDs for this nic, so
1110 		 * just return here */
1111 		return;
1112 
1113 	case EEPROM_NIC_TYPE_3:
1114 	case EEPROM_NIC_TYPE_2:
1115 	case EEPROM_NIC_TYPE_4:
1116 	case EEPROM_NIC_TYPE_0:
1117 		break;
1118 
1119 	default:
1120 		IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
1121 			       priv->nic_type);
1122 		priv->nic_type = EEPROM_NIC_TYPE_0;
1123 		break;
1124 	}
1125 
1126 	if (!(priv->config & CFG_NO_LED)) {
1127 		if (priv->status & STATUS_ASSOCIATED)
1128 			ipw_led_link_on(priv);
1129 		else
1130 			ipw_led_link_off(priv);
1131 	}
1132 }
1133 
ipw_led_shutdown(struct ipw_priv * priv)1134 static void ipw_led_shutdown(struct ipw_priv *priv)
1135 {
1136 	ipw_led_activity_off(priv);
1137 	ipw_led_link_off(priv);
1138 	ipw_led_band_off(priv);
1139 	cancel_delayed_work(&priv->led_link_on);
1140 	cancel_delayed_work(&priv->led_link_off);
1141 	cancel_delayed_work(&priv->led_act_off);
1142 }
1143 
1144 /*
1145  * The following adds a new attribute to the sysfs representation
1146  * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
1147  * used for controling the debug level.
1148  *
1149  * See the level definitions in ipw for details.
1150  */
show_debug_level(struct device_driver * d,char * buf)1151 static ssize_t show_debug_level(struct device_driver *d, char *buf)
1152 {
1153 	return sprintf(buf, "0x%08X\n", ipw_debug_level);
1154 }
1155 
store_debug_level(struct device_driver * d,const char * buf,size_t count)1156 static ssize_t store_debug_level(struct device_driver *d, const char *buf,
1157 				 size_t count)
1158 {
1159 	char *p = (char *)buf;
1160 	u32 val;
1161 
1162 	if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1163 		p++;
1164 		if (p[0] == 'x' || p[0] == 'X')
1165 			p++;
1166 		val = simple_strtoul(p, &p, 16);
1167 	} else
1168 		val = simple_strtoul(p, &p, 10);
1169 	if (p == buf)
1170 		printk(KERN_INFO DRV_NAME
1171 		       ": %s is not in hex or decimal form.\n", buf);
1172 	else
1173 		ipw_debug_level = val;
1174 
1175 	return strnlen(buf, count);
1176 }
1177 
1178 static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
1179 		   show_debug_level, store_debug_level);
1180 
ipw_get_event_log_len(struct ipw_priv * priv)1181 static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
1182 {
1183 	/* length = 1st dword in log */
1184 	return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
1185 }
1186 
ipw_capture_event_log(struct ipw_priv * priv,u32 log_len,struct ipw_event * log)1187 static void ipw_capture_event_log(struct ipw_priv *priv,
1188 				  u32 log_len, struct ipw_event *log)
1189 {
1190 	u32 base;
1191 
1192 	if (log_len) {
1193 		base = ipw_read32(priv, IPW_EVENT_LOG);
1194 		ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32),
1195 				  (u8 *) log, sizeof(*log) * log_len);
1196 	}
1197 }
1198 
ipw_alloc_error_log(struct ipw_priv * priv)1199 static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
1200 {
1201 	struct ipw_fw_error *error;
1202 	u32 log_len = ipw_get_event_log_len(priv);
1203 	u32 base = ipw_read32(priv, IPW_ERROR_LOG);
1204 	u32 elem_len = ipw_read_reg32(priv, base);
1205 
1206 	error = kmalloc(sizeof(*error) +
1207 			sizeof(*error->elem) * elem_len +
1208 			sizeof(*error->log) * log_len, GFP_ATOMIC);
1209 	if (!error) {
1210 		IPW_ERROR("Memory allocation for firmware error log "
1211 			  "failed.\n");
1212 		return NULL;
1213 	}
1214 	error->jiffies = jiffies;
1215 	error->status = priv->status;
1216 	error->config = priv->config;
1217 	error->elem_len = elem_len;
1218 	error->log_len = log_len;
1219 	error->elem = (struct ipw_error_elem *)error->payload;
1220 	error->log = (struct ipw_event *)(error->elem + elem_len);
1221 
1222 	ipw_capture_event_log(priv, log_len, error->log);
1223 
1224 	if (elem_len)
1225 		ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem,
1226 				  sizeof(*error->elem) * elem_len);
1227 
1228 	return error;
1229 }
1230 
show_event_log(struct device * d,struct device_attribute * attr,char * buf)1231 static ssize_t show_event_log(struct device *d,
1232 			      struct device_attribute *attr, char *buf)
1233 {
1234 	struct ipw_priv *priv = dev_get_drvdata(d);
1235 	u32 log_len = ipw_get_event_log_len(priv);
1236 	u32 log_size;
1237 	struct ipw_event *log;
1238 	u32 len = 0, i;
1239 
1240 	/* not using min() because of its strict type checking */
1241 	log_size = PAGE_SIZE / sizeof(*log) > log_len ?
1242 			sizeof(*log) * log_len : PAGE_SIZE;
1243 	log = kzalloc(log_size, GFP_KERNEL);
1244 	if (!log) {
1245 		IPW_ERROR("Unable to allocate memory for log\n");
1246 		return 0;
1247 	}
1248 	log_len = log_size / sizeof(*log);
1249 	ipw_capture_event_log(priv, log_len, log);
1250 
1251 	len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
1252 	for (i = 0; i < log_len; i++)
1253 		len += snprintf(buf + len, PAGE_SIZE - len,
1254 				"\n%08X%08X%08X",
1255 				log[i].time, log[i].event, log[i].data);
1256 	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1257 	kfree(log);
1258 	return len;
1259 }
1260 
1261 static DEVICE_ATTR(event_log, S_IRUGO, show_event_log, NULL);
1262 
show_error(struct device * d,struct device_attribute * attr,char * buf)1263 static ssize_t show_error(struct device *d,
1264 			  struct device_attribute *attr, char *buf)
1265 {
1266 	struct ipw_priv *priv = dev_get_drvdata(d);
1267 	u32 len = 0, i;
1268 	if (!priv->error)
1269 		return 0;
1270 	len += snprintf(buf + len, PAGE_SIZE - len,
1271 			"%08lX%08X%08X%08X",
1272 			priv->error->jiffies,
1273 			priv->error->status,
1274 			priv->error->config, priv->error->elem_len);
1275 	for (i = 0; i < priv->error->elem_len; i++)
1276 		len += snprintf(buf + len, PAGE_SIZE - len,
1277 				"\n%08X%08X%08X%08X%08X%08X%08X",
1278 				priv->error->elem[i].time,
1279 				priv->error->elem[i].desc,
1280 				priv->error->elem[i].blink1,
1281 				priv->error->elem[i].blink2,
1282 				priv->error->elem[i].link1,
1283 				priv->error->elem[i].link2,
1284 				priv->error->elem[i].data);
1285 
1286 	len += snprintf(buf + len, PAGE_SIZE - len,
1287 			"\n%08X", priv->error->log_len);
1288 	for (i = 0; i < priv->error->log_len; i++)
1289 		len += snprintf(buf + len, PAGE_SIZE - len,
1290 				"\n%08X%08X%08X",
1291 				priv->error->log[i].time,
1292 				priv->error->log[i].event,
1293 				priv->error->log[i].data);
1294 	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1295 	return len;
1296 }
1297 
clear_error(struct device * d,struct device_attribute * attr,const char * buf,size_t count)1298 static ssize_t clear_error(struct device *d,
1299 			   struct device_attribute *attr,
1300 			   const char *buf, size_t count)
1301 {
1302 	struct ipw_priv *priv = dev_get_drvdata(d);
1303 
1304 	kfree(priv->error);
1305 	priv->error = NULL;
1306 	return count;
1307 }
1308 
1309 static DEVICE_ATTR(error, S_IRUGO | S_IWUSR, show_error, clear_error);
1310 
show_cmd_log(struct device * d,struct device_attribute * attr,char * buf)1311 static ssize_t show_cmd_log(struct device *d,
1312 			    struct device_attribute *attr, char *buf)
1313 {
1314 	struct ipw_priv *priv = dev_get_drvdata(d);
1315 	u32 len = 0, i;
1316 	if (!priv->cmdlog)
1317 		return 0;
1318 	for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
1319 	     (i != priv->cmdlog_pos) && (PAGE_SIZE - len);
1320 	     i = (i + 1) % priv->cmdlog_len) {
1321 		len +=
1322 		    snprintf(buf + len, PAGE_SIZE - len,
1323 			     "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
1324 			     priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
1325 			     priv->cmdlog[i].cmd.len);
1326 		len +=
1327 		    snprintk_buf(buf + len, PAGE_SIZE - len,
1328 				 (u8 *) priv->cmdlog[i].cmd.param,
1329 				 priv->cmdlog[i].cmd.len);
1330 		len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1331 	}
1332 	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1333 	return len;
1334 }
1335 
1336 static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL);
1337 
1338 #ifdef CONFIG_IPW2200_PROMISCUOUS
1339 static void ipw_prom_free(struct ipw_priv *priv);
1340 static int ipw_prom_alloc(struct ipw_priv *priv);
store_rtap_iface(struct device * d,struct device_attribute * attr,const char * buf,size_t count)1341 static ssize_t store_rtap_iface(struct device *d,
1342 			 struct device_attribute *attr,
1343 			 const char *buf, size_t count)
1344 {
1345 	struct ipw_priv *priv = dev_get_drvdata(d);
1346 	int rc = 0;
1347 
1348 	if (count < 1)
1349 		return -EINVAL;
1350 
1351 	switch (buf[0]) {
1352 	case '0':
1353 		if (!rtap_iface)
1354 			return count;
1355 
1356 		if (netif_running(priv->prom_net_dev)) {
1357 			IPW_WARNING("Interface is up.  Cannot unregister.\n");
1358 			return count;
1359 		}
1360 
1361 		ipw_prom_free(priv);
1362 		rtap_iface = 0;
1363 		break;
1364 
1365 	case '1':
1366 		if (rtap_iface)
1367 			return count;
1368 
1369 		rc = ipw_prom_alloc(priv);
1370 		if (!rc)
1371 			rtap_iface = 1;
1372 		break;
1373 
1374 	default:
1375 		return -EINVAL;
1376 	}
1377 
1378 	if (rc) {
1379 		IPW_ERROR("Failed to register promiscuous network "
1380 			  "device (error %d).\n", rc);
1381 	}
1382 
1383 	return count;
1384 }
1385 
show_rtap_iface(struct device * d,struct device_attribute * attr,char * buf)1386 static ssize_t show_rtap_iface(struct device *d,
1387 			struct device_attribute *attr,
1388 			char *buf)
1389 {
1390 	struct ipw_priv *priv = dev_get_drvdata(d);
1391 	if (rtap_iface)
1392 		return sprintf(buf, "%s", priv->prom_net_dev->name);
1393 	else {
1394 		buf[0] = '-';
1395 		buf[1] = '1';
1396 		buf[2] = '\0';
1397 		return 3;
1398 	}
1399 }
1400 
1401 static DEVICE_ATTR(rtap_iface, S_IWUSR | S_IRUSR, show_rtap_iface,
1402 		   store_rtap_iface);
1403 
store_rtap_filter(struct device * d,struct device_attribute * attr,const char * buf,size_t count)1404 static ssize_t store_rtap_filter(struct device *d,
1405 			 struct device_attribute *attr,
1406 			 const char *buf, size_t count)
1407 {
1408 	struct ipw_priv *priv = dev_get_drvdata(d);
1409 
1410 	if (!priv->prom_priv) {
1411 		IPW_ERROR("Attempting to set filter without "
1412 			  "rtap_iface enabled.\n");
1413 		return -EPERM;
1414 	}
1415 
1416 	priv->prom_priv->filter = simple_strtol(buf, NULL, 0);
1417 
1418 	IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n",
1419 		       BIT_ARG16(priv->prom_priv->filter));
1420 
1421 	return count;
1422 }
1423 
show_rtap_filter(struct device * d,struct device_attribute * attr,char * buf)1424 static ssize_t show_rtap_filter(struct device *d,
1425 			struct device_attribute *attr,
1426 			char *buf)
1427 {
1428 	struct ipw_priv *priv = dev_get_drvdata(d);
1429 	return sprintf(buf, "0x%04X",
1430 		       priv->prom_priv ? priv->prom_priv->filter : 0);
1431 }
1432 
1433 static DEVICE_ATTR(rtap_filter, S_IWUSR | S_IRUSR, show_rtap_filter,
1434 		   store_rtap_filter);
1435 #endif
1436 
show_scan_age(struct device * d,struct device_attribute * attr,char * buf)1437 static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
1438 			     char *buf)
1439 {
1440 	struct ipw_priv *priv = dev_get_drvdata(d);
1441 	return sprintf(buf, "%d\n", priv->ieee->scan_age);
1442 }
1443 
store_scan_age(struct device * d,struct device_attribute * attr,const char * buf,size_t count)1444 static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
1445 			      const char *buf, size_t count)
1446 {
1447 	struct ipw_priv *priv = dev_get_drvdata(d);
1448 	struct net_device *dev = priv->net_dev;
1449 	char buffer[] = "00000000";
1450 	unsigned long len =
1451 	    (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
1452 	unsigned long val;
1453 	char *p = buffer;
1454 
1455 	IPW_DEBUG_INFO("enter\n");
1456 
1457 	strncpy(buffer, buf, len);
1458 	buffer[len] = 0;
1459 
1460 	if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1461 		p++;
1462 		if (p[0] == 'x' || p[0] == 'X')
1463 			p++;
1464 		val = simple_strtoul(p, &p, 16);
1465 	} else
1466 		val = simple_strtoul(p, &p, 10);
1467 	if (p == buffer) {
1468 		IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
1469 	} else {
1470 		priv->ieee->scan_age = val;
1471 		IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
1472 	}
1473 
1474 	IPW_DEBUG_INFO("exit\n");
1475 	return len;
1476 }
1477 
1478 static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age);
1479 
show_led(struct device * d,struct device_attribute * attr,char * buf)1480 static ssize_t show_led(struct device *d, struct device_attribute *attr,
1481 			char *buf)
1482 {
1483 	struct ipw_priv *priv = dev_get_drvdata(d);
1484 	return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
1485 }
1486 
store_led(struct device * d,struct device_attribute * attr,const char * buf,size_t count)1487 static ssize_t store_led(struct device *d, struct device_attribute *attr,
1488 			 const char *buf, size_t count)
1489 {
1490 	struct ipw_priv *priv = dev_get_drvdata(d);
1491 
1492 	IPW_DEBUG_INFO("enter\n");
1493 
1494 	if (count == 0)
1495 		return 0;
1496 
1497 	if (*buf == 0) {
1498 		IPW_DEBUG_LED("Disabling LED control.\n");
1499 		priv->config |= CFG_NO_LED;
1500 		ipw_led_shutdown(priv);
1501 	} else {
1502 		IPW_DEBUG_LED("Enabling LED control.\n");
1503 		priv->config &= ~CFG_NO_LED;
1504 		ipw_led_init(priv);
1505 	}
1506 
1507 	IPW_DEBUG_INFO("exit\n");
1508 	return count;
1509 }
1510 
1511 static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led);
1512 
show_status(struct device * d,struct device_attribute * attr,char * buf)1513 static ssize_t show_status(struct device *d,
1514 			   struct device_attribute *attr, char *buf)
1515 {
1516 	struct ipw_priv *p = d->driver_data;
1517 	return sprintf(buf, "0x%08x\n", (int)p->status);
1518 }
1519 
1520 static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
1521 
show_cfg(struct device * d,struct device_attribute * attr,char * buf)1522 static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
1523 			char *buf)
1524 {
1525 	struct ipw_priv *p = d->driver_data;
1526 	return sprintf(buf, "0x%08x\n", (int)p->config);
1527 }
1528 
1529 static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
1530 
show_nic_type(struct device * d,struct device_attribute * attr,char * buf)1531 static ssize_t show_nic_type(struct device *d,
1532 			     struct device_attribute *attr, char *buf)
1533 {
1534 	struct ipw_priv *priv = d->driver_data;
1535 	return sprintf(buf, "TYPE: %d\n", priv->nic_type);
1536 }
1537 
1538 static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
1539 
show_ucode_version(struct device * d,struct device_attribute * attr,char * buf)1540 static ssize_t show_ucode_version(struct device *d,
1541 				  struct device_attribute *attr, char *buf)
1542 {
1543 	u32 len = sizeof(u32), tmp = 0;
1544 	struct ipw_priv *p = d->driver_data;
1545 
1546 	if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
1547 		return 0;
1548 
1549 	return sprintf(buf, "0x%08x\n", tmp);
1550 }
1551 
1552 static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
1553 
show_rtc(struct device * d,struct device_attribute * attr,char * buf)1554 static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
1555 			char *buf)
1556 {
1557 	u32 len = sizeof(u32), tmp = 0;
1558 	struct ipw_priv *p = d->driver_data;
1559 
1560 	if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
1561 		return 0;
1562 
1563 	return sprintf(buf, "0x%08x\n", tmp);
1564 }
1565 
1566 static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
1567 
1568 /*
1569  * Add a device attribute to view/control the delay between eeprom
1570  * operations.
1571  */
show_eeprom_delay(struct device * d,struct device_attribute * attr,char * buf)1572 static ssize_t show_eeprom_delay(struct device *d,
1573 				 struct device_attribute *attr, char *buf)
1574 {
1575 	int n = ((struct ipw_priv *)d->driver_data)->eeprom_delay;
1576 	return sprintf(buf, "%i\n", n);
1577 }
store_eeprom_delay(struct device * d,struct device_attribute * attr,const char * buf,size_t count)1578 static ssize_t store_eeprom_delay(struct device *d,
1579 				  struct device_attribute *attr,
1580 				  const char *buf, size_t count)
1581 {
1582 	struct ipw_priv *p = d->driver_data;
1583 	sscanf(buf, "%i", &p->eeprom_delay);
1584 	return strnlen(buf, count);
1585 }
1586 
1587 static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
1588 		   show_eeprom_delay, store_eeprom_delay);
1589 
show_command_event_reg(struct device * d,struct device_attribute * attr,char * buf)1590 static ssize_t show_command_event_reg(struct device *d,
1591 				      struct device_attribute *attr, char *buf)
1592 {
1593 	u32 reg = 0;
1594 	struct ipw_priv *p = d->driver_data;
1595 
1596 	reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
1597 	return sprintf(buf, "0x%08x\n", reg);
1598 }
store_command_event_reg(struct device * d,struct device_attribute * attr,const char * buf,size_t count)1599 static ssize_t store_command_event_reg(struct device *d,
1600 				       struct device_attribute *attr,
1601 				       const char *buf, size_t count)
1602 {
1603 	u32 reg;
1604 	struct ipw_priv *p = d->driver_data;
1605 
1606 	sscanf(buf, "%x", &reg);
1607 	ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
1608 	return strnlen(buf, count);
1609 }
1610 
1611 static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
1612 		   show_command_event_reg, store_command_event_reg);
1613 
show_mem_gpio_reg(struct device * d,struct device_attribute * attr,char * buf)1614 static ssize_t show_mem_gpio_reg(struct device *d,
1615 				 struct device_attribute *attr, char *buf)
1616 {
1617 	u32 reg = 0;
1618 	struct ipw_priv *p = d->driver_data;
1619 
1620 	reg = ipw_read_reg32(p, 0x301100);
1621 	return sprintf(buf, "0x%08x\n", reg);
1622 }
store_mem_gpio_reg(struct device * d,struct device_attribute * attr,const char * buf,size_t count)1623 static ssize_t store_mem_gpio_reg(struct device *d,
1624 				  struct device_attribute *attr,
1625 				  const char *buf, size_t count)
1626 {
1627 	u32 reg;
1628 	struct ipw_priv *p = d->driver_data;
1629 
1630 	sscanf(buf, "%x", &reg);
1631 	ipw_write_reg32(p, 0x301100, reg);
1632 	return strnlen(buf, count);
1633 }
1634 
1635 static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
1636 		   show_mem_gpio_reg, store_mem_gpio_reg);
1637 
show_indirect_dword(struct device * d,struct device_attribute * attr,char * buf)1638 static ssize_t show_indirect_dword(struct device *d,
1639 				   struct device_attribute *attr, char *buf)
1640 {
1641 	u32 reg = 0;
1642 	struct ipw_priv *priv = d->driver_data;
1643 
1644 	if (priv->status & STATUS_INDIRECT_DWORD)
1645 		reg = ipw_read_reg32(priv, priv->indirect_dword);
1646 	else
1647 		reg = 0;
1648 
1649 	return sprintf(buf, "0x%08x\n", reg);
1650 }
store_indirect_dword(struct device * d,struct device_attribute * attr,const char * buf,size_t count)1651 static ssize_t store_indirect_dword(struct device *d,
1652 				    struct device_attribute *attr,
1653 				    const char *buf, size_t count)
1654 {
1655 	struct ipw_priv *priv = d->driver_data;
1656 
1657 	sscanf(buf, "%x", &priv->indirect_dword);
1658 	priv->status |= STATUS_INDIRECT_DWORD;
1659 	return strnlen(buf, count);
1660 }
1661 
1662 static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
1663 		   show_indirect_dword, store_indirect_dword);
1664 
show_indirect_byte(struct device * d,struct device_attribute * attr,char * buf)1665 static ssize_t show_indirect_byte(struct device *d,
1666 				  struct device_attribute *attr, char *buf)
1667 {
1668 	u8 reg = 0;
1669 	struct ipw_priv *priv = d->driver_data;
1670 
1671 	if (priv->status & STATUS_INDIRECT_BYTE)
1672 		reg = ipw_read_reg8(priv, priv->indirect_byte);
1673 	else
1674 		reg = 0;
1675 
1676 	return sprintf(buf, "0x%02x\n", reg);
1677 }
store_indirect_byte(struct device * d,struct device_attribute * attr,const char * buf,size_t count)1678 static ssize_t store_indirect_byte(struct device *d,
1679 				   struct device_attribute *attr,
1680 				   const char *buf, size_t count)
1681 {
1682 	struct ipw_priv *priv = d->driver_data;
1683 
1684 	sscanf(buf, "%x", &priv->indirect_byte);
1685 	priv->status |= STATUS_INDIRECT_BYTE;
1686 	return strnlen(buf, count);
1687 }
1688 
1689 static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
1690 		   show_indirect_byte, store_indirect_byte);
1691 
show_direct_dword(struct device * d,struct device_attribute * attr,char * buf)1692 static ssize_t show_direct_dword(struct device *d,
1693 				 struct device_attribute *attr, char *buf)
1694 {
1695 	u32 reg = 0;
1696 	struct ipw_priv *priv = d->driver_data;
1697 
1698 	if (priv->status & STATUS_DIRECT_DWORD)
1699 		reg = ipw_read32(priv, priv->direct_dword);
1700 	else
1701 		reg = 0;
1702 
1703 	return sprintf(buf, "0x%08x\n", reg);
1704 }
store_direct_dword(struct device * d,struct device_attribute * attr,const char * buf,size_t count)1705 static ssize_t store_direct_dword(struct device *d,
1706 				  struct device_attribute *attr,
1707 				  const char *buf, size_t count)
1708 {
1709 	struct ipw_priv *priv = d->driver_data;
1710 
1711 	sscanf(buf, "%x", &priv->direct_dword);
1712 	priv->status |= STATUS_DIRECT_DWORD;
1713 	return strnlen(buf, count);
1714 }
1715 
1716 static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
1717 		   show_direct_dword, store_direct_dword);
1718 
rf_kill_active(struct ipw_priv * priv)1719 static int rf_kill_active(struct ipw_priv *priv)
1720 {
1721 	if (0 == (ipw_read32(priv, 0x30) & 0x10000))
1722 		priv->status |= STATUS_RF_KILL_HW;
1723 	else
1724 		priv->status &= ~STATUS_RF_KILL_HW;
1725 
1726 	return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
1727 }
1728 
show_rf_kill(struct device * d,struct device_attribute * attr,char * buf)1729 static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
1730 			    char *buf)
1731 {
1732 	/* 0 - RF kill not enabled
1733 	   1 - SW based RF kill active (sysfs)
1734 	   2 - HW based RF kill active
1735 	   3 - Both HW and SW baed RF kill active */
1736 	struct ipw_priv *priv = d->driver_data;
1737 	int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
1738 	    (rf_kill_active(priv) ? 0x2 : 0x0);
1739 	return sprintf(buf, "%i\n", val);
1740 }
1741 
ipw_radio_kill_sw(struct ipw_priv * priv,int disable_radio)1742 static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1743 {
1744 	if ((disable_radio ? 1 : 0) ==
1745 	    ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
1746 		return 0;
1747 
1748 	IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO  %s\n",
1749 			  disable_radio ? "OFF" : "ON");
1750 
1751 	if (disable_radio) {
1752 		priv->status |= STATUS_RF_KILL_SW;
1753 
1754 		if (priv->workqueue) {
1755 			cancel_delayed_work(&priv->request_scan);
1756 			cancel_delayed_work(&priv->request_direct_scan);
1757 			cancel_delayed_work(&priv->request_passive_scan);
1758 			cancel_delayed_work(&priv->scan_event);
1759 		}
1760 		queue_work(priv->workqueue, &priv->down);
1761 	} else {
1762 		priv->status &= ~STATUS_RF_KILL_SW;
1763 		if (rf_kill_active(priv)) {
1764 			IPW_DEBUG_RF_KILL("Can not turn radio back on - "
1765 					  "disabled by HW switch\n");
1766 			/* Make sure the RF_KILL check timer is running */
1767 			cancel_delayed_work(&priv->rf_kill);
1768 			queue_delayed_work(priv->workqueue, &priv->rf_kill,
1769 					   round_jiffies_relative(2 * HZ));
1770 		} else
1771 			queue_work(priv->workqueue, &priv->up);
1772 	}
1773 
1774 	return 1;
1775 }
1776 
store_rf_kill(struct device * d,struct device_attribute * attr,const char * buf,size_t count)1777 static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
1778 			     const char *buf, size_t count)
1779 {
1780 	struct ipw_priv *priv = d->driver_data;
1781 
1782 	ipw_radio_kill_sw(priv, buf[0] == '1');
1783 
1784 	return count;
1785 }
1786 
1787 static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
1788 
show_speed_scan(struct device * d,struct device_attribute * attr,char * buf)1789 static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
1790 			       char *buf)
1791 {
1792 	struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1793 	int pos = 0, len = 0;
1794 	if (priv->config & CFG_SPEED_SCAN) {
1795 		while (priv->speed_scan[pos] != 0)
1796 			len += sprintf(&buf[len], "%d ",
1797 				       priv->speed_scan[pos++]);
1798 		return len + sprintf(&buf[len], "\n");
1799 	}
1800 
1801 	return sprintf(buf, "0\n");
1802 }
1803 
store_speed_scan(struct device * d,struct device_attribute * attr,const char * buf,size_t count)1804 static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
1805 				const char *buf, size_t count)
1806 {
1807 	struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1808 	int channel, pos = 0;
1809 	const char *p = buf;
1810 
1811 	/* list of space separated channels to scan, optionally ending with 0 */
1812 	while ((channel = simple_strtol(p, NULL, 0))) {
1813 		if (pos == MAX_SPEED_SCAN - 1) {
1814 			priv->speed_scan[pos] = 0;
1815 			break;
1816 		}
1817 
1818 		if (ieee80211_is_valid_channel(priv->ieee, channel))
1819 			priv->speed_scan[pos++] = channel;
1820 		else
1821 			IPW_WARNING("Skipping invalid channel request: %d\n",
1822 				    channel);
1823 		p = strchr(p, ' ');
1824 		if (!p)
1825 			break;
1826 		while (*p == ' ' || *p == '\t')
1827 			p++;
1828 	}
1829 
1830 	if (pos == 0)
1831 		priv->config &= ~CFG_SPEED_SCAN;
1832 	else {
1833 		priv->speed_scan_pos = 0;
1834 		priv->config |= CFG_SPEED_SCAN;
1835 	}
1836 
1837 	return count;
1838 }
1839 
1840 static DEVICE_ATTR(speed_scan, S_IWUSR | S_IRUGO, show_speed_scan,
1841 		   store_speed_scan);
1842 
show_net_stats(struct device * d,struct device_attribute * attr,char * buf)1843 static ssize_t show_net_stats(struct device *d, struct device_attribute *attr,
1844 			      char *buf)
1845 {
1846 	struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1847 	return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
1848 }
1849 
store_net_stats(struct device * d,struct device_attribute * attr,const char * buf,size_t count)1850 static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
1851 			       const char *buf, size_t count)
1852 {
1853 	struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1854 	if (buf[0] == '1')
1855 		priv->config |= CFG_NET_STATS;
1856 	else
1857 		priv->config &= ~CFG_NET_STATS;
1858 
1859 	return count;
1860 }
1861 
1862 static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO,
1863 		   show_net_stats, store_net_stats);
1864 
show_channels(struct device * d,struct device_attribute * attr,char * buf)1865 static ssize_t show_channels(struct device *d,
1866 			     struct device_attribute *attr,
1867 			     char *buf)
1868 {
1869 	struct ipw_priv *priv = dev_get_drvdata(d);
1870 	const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
1871 	int len = 0, i;
1872 
1873 	len = sprintf(&buf[len],
1874 		      "Displaying %d channels in 2.4Ghz band "
1875 		      "(802.11bg):\n", geo->bg_channels);
1876 
1877 	for (i = 0; i < geo->bg_channels; i++) {
1878 		len += sprintf(&buf[len], "%d: BSS%s%s, %s, Band %s.\n",
1879 			       geo->bg[i].channel,
1880 			       geo->bg[i].flags & IEEE80211_CH_RADAR_DETECT ?
1881 			       " (radar spectrum)" : "",
1882 			       ((geo->bg[i].flags & IEEE80211_CH_NO_IBSS) ||
1883 				(geo->bg[i].flags & IEEE80211_CH_RADAR_DETECT))
1884 			       ? "" : ", IBSS",
1885 			       geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY ?
1886 			       "passive only" : "active/passive",
1887 			       geo->bg[i].flags & IEEE80211_CH_B_ONLY ?
1888 			       "B" : "B/G");
1889 	}
1890 
1891 	len += sprintf(&buf[len],
1892 		       "Displaying %d channels in 5.2Ghz band "
1893 		       "(802.11a):\n", geo->a_channels);
1894 	for (i = 0; i < geo->a_channels; i++) {
1895 		len += sprintf(&buf[len], "%d: BSS%s%s, %s.\n",
1896 			       geo->a[i].channel,
1897 			       geo->a[i].flags & IEEE80211_CH_RADAR_DETECT ?
1898 			       " (radar spectrum)" : "",
1899 			       ((geo->a[i].flags & IEEE80211_CH_NO_IBSS) ||
1900 				(geo->a[i].flags & IEEE80211_CH_RADAR_DETECT))
1901 			       ? "" : ", IBSS",
1902 			       geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY ?
1903 			       "passive only" : "active/passive");
1904 	}
1905 
1906 	return len;
1907 }
1908 
1909 static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
1910 
notify_wx_assoc_event(struct ipw_priv * priv)1911 static void notify_wx_assoc_event(struct ipw_priv *priv)
1912 {
1913 	union iwreq_data wrqu;
1914 	wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1915 	if (priv->status & STATUS_ASSOCIATED)
1916 		memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
1917 	else
1918 		memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
1919 	wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1920 }
1921 
ipw_irq_tasklet(struct ipw_priv * priv)1922 static void ipw_irq_tasklet(struct ipw_priv *priv)
1923 {
1924 	u32 inta, inta_mask, handled = 0;
1925 	unsigned long flags;
1926 	int rc = 0;
1927 
1928 	spin_lock_irqsave(&priv->irq_lock, flags);
1929 
1930 	inta = ipw_read32(priv, IPW_INTA_RW);
1931 	inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
1932 	inta &= (IPW_INTA_MASK_ALL & inta_mask);
1933 
1934 	/* Add any cached INTA values that need to be handled */
1935 	inta |= priv->isr_inta;
1936 
1937 	spin_unlock_irqrestore(&priv->irq_lock, flags);
1938 
1939 	spin_lock_irqsave(&priv->lock, flags);
1940 
1941 	/* handle all the justifications for the interrupt */
1942 	if (inta & IPW_INTA_BIT_RX_TRANSFER) {
1943 		ipw_rx(priv);
1944 		handled |= IPW_INTA_BIT_RX_TRANSFER;
1945 	}
1946 
1947 	if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
1948 		IPW_DEBUG_HC("Command completed.\n");
1949 		rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
1950 		priv->status &= ~STATUS_HCMD_ACTIVE;
1951 		wake_up_interruptible(&priv->wait_command_queue);
1952 		handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
1953 	}
1954 
1955 	if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
1956 		IPW_DEBUG_TX("TX_QUEUE_1\n");
1957 		rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
1958 		handled |= IPW_INTA_BIT_TX_QUEUE_1;
1959 	}
1960 
1961 	if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
1962 		IPW_DEBUG_TX("TX_QUEUE_2\n");
1963 		rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
1964 		handled |= IPW_INTA_BIT_TX_QUEUE_2;
1965 	}
1966 
1967 	if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
1968 		IPW_DEBUG_TX("TX_QUEUE_3\n");
1969 		rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
1970 		handled |= IPW_INTA_BIT_TX_QUEUE_3;
1971 	}
1972 
1973 	if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
1974 		IPW_DEBUG_TX("TX_QUEUE_4\n");
1975 		rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
1976 		handled |= IPW_INTA_BIT_TX_QUEUE_4;
1977 	}
1978 
1979 	if (inta & IPW_INTA_BIT_STATUS_CHANGE) {
1980 		IPW_WARNING("STATUS_CHANGE\n");
1981 		handled |= IPW_INTA_BIT_STATUS_CHANGE;
1982 	}
1983 
1984 	if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) {
1985 		IPW_WARNING("TX_PERIOD_EXPIRED\n");
1986 		handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED;
1987 	}
1988 
1989 	if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
1990 		IPW_WARNING("HOST_CMD_DONE\n");
1991 		handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
1992 	}
1993 
1994 	if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) {
1995 		IPW_WARNING("FW_INITIALIZATION_DONE\n");
1996 		handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE;
1997 	}
1998 
1999 	if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
2000 		IPW_WARNING("PHY_OFF_DONE\n");
2001 		handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
2002 	}
2003 
2004 	if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
2005 		IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
2006 		priv->status |= STATUS_RF_KILL_HW;
2007 		wake_up_interruptible(&priv->wait_command_queue);
2008 		priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
2009 		cancel_delayed_work(&priv->request_scan);
2010 		cancel_delayed_work(&priv->request_direct_scan);
2011 		cancel_delayed_work(&priv->request_passive_scan);
2012 		cancel_delayed_work(&priv->scan_event);
2013 		schedule_work(&priv->link_down);
2014 		queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
2015 		handled |= IPW_INTA_BIT_RF_KILL_DONE;
2016 	}
2017 
2018 	if (inta & IPW_INTA_BIT_FATAL_ERROR) {
2019 		IPW_WARNING("Firmware error detected.  Restarting.\n");
2020 		if (priv->error) {
2021 			IPW_DEBUG_FW("Sysfs 'error' log already exists.\n");
2022 			if (ipw_debug_level & IPW_DL_FW_ERRORS) {
2023 				struct ipw_fw_error *error =
2024 				    ipw_alloc_error_log(priv);
2025 				ipw_dump_error_log(priv, error);
2026 				kfree(error);
2027 			}
2028 		} else {
2029 			priv->error = ipw_alloc_error_log(priv);
2030 			if (priv->error)
2031 				IPW_DEBUG_FW("Sysfs 'error' log captured.\n");
2032 			else
2033 				IPW_DEBUG_FW("Error allocating sysfs 'error' "
2034 					     "log.\n");
2035 			if (ipw_debug_level & IPW_DL_FW_ERRORS)
2036 				ipw_dump_error_log(priv, priv->error);
2037 		}
2038 
2039 		/* XXX: If hardware encryption is for WPA/WPA2,
2040 		 * we have to notify the supplicant. */
2041 		if (priv->ieee->sec.encrypt) {
2042 			priv->status &= ~STATUS_ASSOCIATED;
2043 			notify_wx_assoc_event(priv);
2044 		}
2045 
2046 		/* Keep the restart process from trying to send host
2047 		 * commands by clearing the INIT status bit */
2048 		priv->status &= ~STATUS_INIT;
2049 
2050 		/* Cancel currently queued command. */
2051 		priv->status &= ~STATUS_HCMD_ACTIVE;
2052 		wake_up_interruptible(&priv->wait_command_queue);
2053 
2054 		queue_work(priv->workqueue, &priv->adapter_restart);
2055 		handled |= IPW_INTA_BIT_FATAL_ERROR;
2056 	}
2057 
2058 	if (inta & IPW_INTA_BIT_PARITY_ERROR) {
2059 		IPW_ERROR("Parity error\n");
2060 		handled |= IPW_INTA_BIT_PARITY_ERROR;
2061 	}
2062 
2063 	if (handled != inta) {
2064 		IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
2065 	}
2066 
2067 	spin_unlock_irqrestore(&priv->lock, flags);
2068 
2069 	/* enable all interrupts */
2070 	ipw_enable_interrupts(priv);
2071 }
2072 
2073 #define IPW_CMD(x) case IPW_CMD_ ## x : return #x
get_cmd_string(u8 cmd)2074 static char *get_cmd_string(u8 cmd)
2075 {
2076 	switch (cmd) {
2077 		IPW_CMD(HOST_COMPLETE);
2078 		IPW_CMD(POWER_DOWN);
2079 		IPW_CMD(SYSTEM_CONFIG);
2080 		IPW_CMD(MULTICAST_ADDRESS);
2081 		IPW_CMD(SSID);
2082 		IPW_CMD(ADAPTER_ADDRESS);
2083 		IPW_CMD(PORT_TYPE);
2084 		IPW_CMD(RTS_THRESHOLD);
2085 		IPW_CMD(FRAG_THRESHOLD);
2086 		IPW_CMD(POWER_MODE);
2087 		IPW_CMD(WEP_KEY);
2088 		IPW_CMD(TGI_TX_KEY);
2089 		IPW_CMD(SCAN_REQUEST);
2090 		IPW_CMD(SCAN_REQUEST_EXT);
2091 		IPW_CMD(ASSOCIATE);
2092 		IPW_CMD(SUPPORTED_RATES);
2093 		IPW_CMD(SCAN_ABORT);
2094 		IPW_CMD(TX_FLUSH);
2095 		IPW_CMD(QOS_PARAMETERS);
2096 		IPW_CMD(DINO_CONFIG);
2097 		IPW_CMD(RSN_CAPABILITIES);
2098 		IPW_CMD(RX_KEY);
2099 		IPW_CMD(CARD_DISABLE);
2100 		IPW_CMD(SEED_NUMBER);
2101 		IPW_CMD(TX_POWER);
2102 		IPW_CMD(COUNTRY_INFO);
2103 		IPW_CMD(AIRONET_INFO);
2104 		IPW_CMD(AP_TX_POWER);
2105 		IPW_CMD(CCKM_INFO);
2106 		IPW_CMD(CCX_VER_INFO);
2107 		IPW_CMD(SET_CALIBRATION);
2108 		IPW_CMD(SENSITIVITY_CALIB);
2109 		IPW_CMD(RETRY_LIMIT);
2110 		IPW_CMD(IPW_PRE_POWER_DOWN);
2111 		IPW_CMD(VAP_BEACON_TEMPLATE);
2112 		IPW_CMD(VAP_DTIM_PERIOD);
2113 		IPW_CMD(EXT_SUPPORTED_RATES);
2114 		IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
2115 		IPW_CMD(VAP_QUIET_INTERVALS);
2116 		IPW_CMD(VAP_CHANNEL_SWITCH);
2117 		IPW_CMD(VAP_MANDATORY_CHANNELS);
2118 		IPW_CMD(VAP_CELL_PWR_LIMIT);
2119 		IPW_CMD(VAP_CF_PARAM_SET);
2120 		IPW_CMD(VAP_SET_BEACONING_STATE);
2121 		IPW_CMD(MEASUREMENT);
2122 		IPW_CMD(POWER_CAPABILITY);
2123 		IPW_CMD(SUPPORTED_CHANNELS);
2124 		IPW_CMD(TPC_REPORT);
2125 		IPW_CMD(WME_INFO);
2126 		IPW_CMD(PRODUCTION_COMMAND);
2127 	default:
2128 		return "UNKNOWN";
2129 	}
2130 }
2131 
2132 #define HOST_COMPLETE_TIMEOUT HZ
2133 
__ipw_send_cmd(struct ipw_priv * priv,struct host_cmd * cmd)2134 static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
2135 {
2136 	int rc = 0;
2137 	unsigned long flags;
2138 
2139 	spin_lock_irqsave(&priv->lock, flags);
2140 	if (priv->status & STATUS_HCMD_ACTIVE) {
2141 		IPW_ERROR("Failed to send %s: Already sending a command.\n",
2142 			  get_cmd_string(cmd->cmd));
2143 		spin_unlock_irqrestore(&priv->lock, flags);
2144 		return -EAGAIN;
2145 	}
2146 
2147 	priv->status |= STATUS_HCMD_ACTIVE;
2148 
2149 	if (priv->cmdlog) {
2150 		priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies;
2151 		priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd;
2152 		priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len;
2153 		memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param,
2154 		       cmd->len);
2155 		priv->cmdlog[priv->cmdlog_pos].retcode = -1;
2156 	}
2157 
2158 	IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
2159 		     get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
2160 		     priv->status);
2161 
2162 #ifndef DEBUG_CMD_WEP_KEY
2163 	if (cmd->cmd == IPW_CMD_WEP_KEY)
2164 		IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n");
2165 	else
2166 #endif
2167 		printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
2168 
2169 	rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
2170 	if (rc) {
2171 		priv->status &= ~STATUS_HCMD_ACTIVE;
2172 		IPW_ERROR("Failed to send %s: Reason %d\n",
2173 			  get_cmd_string(cmd->cmd), rc);
2174 		spin_unlock_irqrestore(&priv->lock, flags);
2175 		goto exit;
2176 	}
2177 	spin_unlock_irqrestore(&priv->lock, flags);
2178 
2179 	rc = wait_event_interruptible_timeout(priv->wait_command_queue,
2180 					      !(priv->
2181 						status & STATUS_HCMD_ACTIVE),
2182 					      HOST_COMPLETE_TIMEOUT);
2183 	if (rc == 0) {
2184 		spin_lock_irqsave(&priv->lock, flags);
2185 		if (priv->status & STATUS_HCMD_ACTIVE) {
2186 			IPW_ERROR("Failed to send %s: Command timed out.\n",
2187 				  get_cmd_string(cmd->cmd));
2188 			priv->status &= ~STATUS_HCMD_ACTIVE;
2189 			spin_unlock_irqrestore(&priv->lock, flags);
2190 			rc = -EIO;
2191 			goto exit;
2192 		}
2193 		spin_unlock_irqrestore(&priv->lock, flags);
2194 	} else
2195 		rc = 0;
2196 
2197 	if (priv->status & STATUS_RF_KILL_HW) {
2198 		IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n",
2199 			  get_cmd_string(cmd->cmd));
2200 		rc = -EIO;
2201 		goto exit;
2202 	}
2203 
2204       exit:
2205 	if (priv->cmdlog) {
2206 		priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
2207 		priv->cmdlog_pos %= priv->cmdlog_len;
2208 	}
2209 	return rc;
2210 }
2211 
ipw_send_cmd_simple(struct ipw_priv * priv,u8 command)2212 static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
2213 {
2214 	struct host_cmd cmd = {
2215 		.cmd = command,
2216 	};
2217 
2218 	return __ipw_send_cmd(priv, &cmd);
2219 }
2220 
ipw_send_cmd_pdu(struct ipw_priv * priv,u8 command,u8 len,void * data)2221 static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
2222 			    void *data)
2223 {
2224 	struct host_cmd cmd = {
2225 		.cmd = command,
2226 		.len = len,
2227 		.param = data,
2228 	};
2229 
2230 	return __ipw_send_cmd(priv, &cmd);
2231 }
2232 
ipw_send_host_complete(struct ipw_priv * priv)2233 static int ipw_send_host_complete(struct ipw_priv *priv)
2234 {
2235 	if (!priv) {
2236 		IPW_ERROR("Invalid args\n");
2237 		return -1;
2238 	}
2239 
2240 	return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
2241 }
2242 
ipw_send_system_config(struct ipw_priv * priv)2243 static int ipw_send_system_config(struct ipw_priv *priv)
2244 {
2245 	return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG,
2246 				sizeof(priv->sys_config),
2247 				&priv->sys_config);
2248 }
2249 
ipw_send_ssid(struct ipw_priv * priv,u8 * ssid,int len)2250 static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
2251 {
2252 	if (!priv || !ssid) {
2253 		IPW_ERROR("Invalid args\n");
2254 		return -1;
2255 	}
2256 
2257 	return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
2258 				ssid);
2259 }
2260 
ipw_send_adapter_address(struct ipw_priv * priv,u8 * mac)2261 static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
2262 {
2263 	if (!priv || !mac) {
2264 		IPW_ERROR("Invalid args\n");
2265 		return -1;
2266 	}
2267 
2268 	IPW_DEBUG_INFO("%s: Setting MAC to %pM\n",
2269 		       priv->net_dev->name, mac);
2270 
2271 	return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
2272 }
2273 
2274 /*
2275  * NOTE: This must be executed from our workqueue as it results in udelay
2276  * being called which may corrupt the keyboard if executed on default
2277  * workqueue
2278  */
ipw_adapter_restart(void * adapter)2279 static void ipw_adapter_restart(void *adapter)
2280 {
2281 	struct ipw_priv *priv = adapter;
2282 
2283 	if (priv->status & STATUS_RF_KILL_MASK)
2284 		return;
2285 
2286 	ipw_down(priv);
2287 
2288 	if (priv->assoc_network &&
2289 	    (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS))
2290 		ipw_remove_current_network(priv);
2291 
2292 	if (ipw_up(priv)) {
2293 		IPW_ERROR("Failed to up device\n");
2294 		return;
2295 	}
2296 }
2297 
ipw_bg_adapter_restart(struct work_struct * work)2298 static void ipw_bg_adapter_restart(struct work_struct *work)
2299 {
2300 	struct ipw_priv *priv =
2301 		container_of(work, struct ipw_priv, adapter_restart);
2302 	mutex_lock(&priv->mutex);
2303 	ipw_adapter_restart(priv);
2304 	mutex_unlock(&priv->mutex);
2305 }
2306 
2307 #define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
2308 
ipw_scan_check(void * data)2309 static void ipw_scan_check(void *data)
2310 {
2311 	struct ipw_priv *priv = data;
2312 	if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
2313 		IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2314 			       "adapter after (%dms).\n",
2315 			       jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2316 		queue_work(priv->workqueue, &priv->adapter_restart);
2317 	}
2318 }
2319 
ipw_bg_scan_check(struct work_struct * work)2320 static void ipw_bg_scan_check(struct work_struct *work)
2321 {
2322 	struct ipw_priv *priv =
2323 		container_of(work, struct ipw_priv, scan_check.work);
2324 	mutex_lock(&priv->mutex);
2325 	ipw_scan_check(priv);
2326 	mutex_unlock(&priv->mutex);
2327 }
2328 
ipw_send_scan_request_ext(struct ipw_priv * priv,struct ipw_scan_request_ext * request)2329 static int ipw_send_scan_request_ext(struct ipw_priv *priv,
2330 				     struct ipw_scan_request_ext *request)
2331 {
2332 	return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
2333 				sizeof(*request), request);
2334 }
2335 
ipw_send_scan_abort(struct ipw_priv * priv)2336 static int ipw_send_scan_abort(struct ipw_priv *priv)
2337 {
2338 	if (!priv) {
2339 		IPW_ERROR("Invalid args\n");
2340 		return -1;
2341 	}
2342 
2343 	return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
2344 }
2345 
ipw_set_sensitivity(struct ipw_priv * priv,u16 sens)2346 static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
2347 {
2348 	struct ipw_sensitivity_calib calib = {
2349 		.beacon_rssi_raw = cpu_to_le16(sens),
2350 	};
2351 
2352 	return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
2353 				&calib);
2354 }
2355 
ipw_send_associate(struct ipw_priv * priv,struct ipw_associate * associate)2356 static int ipw_send_associate(struct ipw_priv *priv,
2357 			      struct ipw_associate *associate)
2358 {
2359 	if (!priv || !associate) {
2360 		IPW_ERROR("Invalid args\n");
2361 		return -1;
2362 	}
2363 
2364 	return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(*associate),
2365 				associate);
2366 }
2367 
ipw_send_supported_rates(struct ipw_priv * priv,struct ipw_supported_rates * rates)2368 static int ipw_send_supported_rates(struct ipw_priv *priv,
2369 				    struct ipw_supported_rates *rates)
2370 {
2371 	if (!priv || !rates) {
2372 		IPW_ERROR("Invalid args\n");
2373 		return -1;
2374 	}
2375 
2376 	return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
2377 				rates);
2378 }
2379 
ipw_set_random_seed(struct ipw_priv * priv)2380 static int ipw_set_random_seed(struct ipw_priv *priv)
2381 {
2382 	u32 val;
2383 
2384 	if (!priv) {
2385 		IPW_ERROR("Invalid args\n");
2386 		return -1;
2387 	}
2388 
2389 	get_random_bytes(&val, sizeof(val));
2390 
2391 	return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
2392 }
2393 
ipw_send_card_disable(struct ipw_priv * priv,u32 phy_off)2394 static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
2395 {
2396 	__le32 v = cpu_to_le32(phy_off);
2397 	if (!priv) {
2398 		IPW_ERROR("Invalid args\n");
2399 		return -1;
2400 	}
2401 
2402 	return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(v), &v);
2403 }
2404 
ipw_send_tx_power(struct ipw_priv * priv,struct ipw_tx_power * power)2405 static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
2406 {
2407 	if (!priv || !power) {
2408 		IPW_ERROR("Invalid args\n");
2409 		return -1;
2410 	}
2411 
2412 	return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power);
2413 }
2414 
ipw_set_tx_power(struct ipw_priv * priv)2415 static int ipw_set_tx_power(struct ipw_priv *priv)
2416 {
2417 	const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
2418 	struct ipw_tx_power tx_power;
2419 	s8 max_power;
2420 	int i;
2421 
2422 	memset(&tx_power, 0, sizeof(tx_power));
2423 
2424 	/* configure device for 'G' band */
2425 	tx_power.ieee_mode = IPW_G_MODE;
2426 	tx_power.num_channels = geo->bg_channels;
2427 	for (i = 0; i < geo->bg_channels; i++) {
2428 		max_power = geo->bg[i].max_power;
2429 		tx_power.channels_tx_power[i].channel_number =
2430 		    geo->bg[i].channel;
2431 		tx_power.channels_tx_power[i].tx_power = max_power ?
2432 		    min(max_power, priv->tx_power) : priv->tx_power;
2433 	}
2434 	if (ipw_send_tx_power(priv, &tx_power))
2435 		return -EIO;
2436 
2437 	/* configure device to also handle 'B' band */
2438 	tx_power.ieee_mode = IPW_B_MODE;
2439 	if (ipw_send_tx_power(priv, &tx_power))
2440 		return -EIO;
2441 
2442 	/* configure device to also handle 'A' band */
2443 	if (priv->ieee->abg_true) {
2444 		tx_power.ieee_mode = IPW_A_MODE;
2445 		tx_power.num_channels = geo->a_channels;
2446 		for (i = 0; i < tx_power.num_channels; i++) {
2447 			max_power = geo->a[i].max_power;
2448 			tx_power.channels_tx_power[i].channel_number =
2449 			    geo->a[i].channel;
2450 			tx_power.channels_tx_power[i].tx_power = max_power ?
2451 			    min(max_power, priv->tx_power) : priv->tx_power;
2452 		}
2453 		if (ipw_send_tx_power(priv, &tx_power))
2454 			return -EIO;
2455 	}
2456 	return 0;
2457 }
2458 
ipw_send_rts_threshold(struct ipw_priv * priv,u16 rts)2459 static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
2460 {
2461 	struct ipw_rts_threshold rts_threshold = {
2462 		.rts_threshold = cpu_to_le16(rts),
2463 	};
2464 
2465 	if (!priv) {
2466 		IPW_ERROR("Invalid args\n");
2467 		return -1;
2468 	}
2469 
2470 	return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
2471 				sizeof(rts_threshold), &rts_threshold);
2472 }
2473 
ipw_send_frag_threshold(struct ipw_priv * priv,u16 frag)2474 static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
2475 {
2476 	struct ipw_frag_threshold frag_threshold = {
2477 		.frag_threshold = cpu_to_le16(frag),
2478 	};
2479 
2480 	if (!priv) {
2481 		IPW_ERROR("Invalid args\n");
2482 		return -1;
2483 	}
2484 
2485 	return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
2486 				sizeof(frag_threshold), &frag_threshold);
2487 }
2488 
ipw_send_power_mode(struct ipw_priv * priv,u32 mode)2489 static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
2490 {
2491 	__le32 param;
2492 
2493 	if (!priv) {
2494 		IPW_ERROR("Invalid args\n");
2495 		return -1;
2496 	}
2497 
2498 	/* If on battery, set to 3, if AC set to CAM, else user
2499 	 * level */
2500 	switch (mode) {
2501 	case IPW_POWER_BATTERY:
2502 		param = cpu_to_le32(IPW_POWER_INDEX_3);
2503 		break;
2504 	case IPW_POWER_AC:
2505 		param = cpu_to_le32(IPW_POWER_MODE_CAM);
2506 		break;
2507 	default:
2508 		param = cpu_to_le32(mode);
2509 		break;
2510 	}
2511 
2512 	return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
2513 				&param);
2514 }
2515 
ipw_send_retry_limit(struct ipw_priv * priv,u8 slimit,u8 llimit)2516 static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
2517 {
2518 	struct ipw_retry_limit retry_limit = {
2519 		.short_retry_limit = slimit,
2520 		.long_retry_limit = llimit
2521 	};
2522 
2523 	if (!priv) {
2524 		IPW_ERROR("Invalid args\n");
2525 		return -1;
2526 	}
2527 
2528 	return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
2529 				&retry_limit);
2530 }
2531 
2532 /*
2533  * The IPW device contains a Microwire compatible EEPROM that stores
2534  * various data like the MAC address.  Usually the firmware has exclusive
2535  * access to the eeprom, but during device initialization (before the
2536  * device driver has sent the HostComplete command to the firmware) the
2537  * device driver has read access to the EEPROM by way of indirect addressing
2538  * through a couple of memory mapped registers.
2539  *
2540  * The following is a simplified implementation for pulling data out of the
2541  * the eeprom, along with some helper functions to find information in
2542  * the per device private data's copy of the eeprom.
2543  *
2544  * NOTE: To better understand how these functions work (i.e what is a chip
2545  *       select and why do have to keep driving the eeprom clock?), read
2546  *       just about any data sheet for a Microwire compatible EEPROM.
2547  */
2548 
2549 /* write a 32 bit value into the indirect accessor register */
eeprom_write_reg(struct ipw_priv * p,u32 data)2550 static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
2551 {
2552 	ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
2553 
2554 	/* the eeprom requires some time to complete the operation */
2555 	udelay(p->eeprom_delay);
2556 
2557 	return;
2558 }
2559 
2560 /* perform a chip select operation */
eeprom_cs(struct ipw_priv * priv)2561 static void eeprom_cs(struct ipw_priv *priv)
2562 {
2563 	eeprom_write_reg(priv, 0);
2564 	eeprom_write_reg(priv, EEPROM_BIT_CS);
2565 	eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2566 	eeprom_write_reg(priv, EEPROM_BIT_CS);
2567 }
2568 
2569 /* perform a chip select operation */
eeprom_disable_cs(struct ipw_priv * priv)2570 static void eeprom_disable_cs(struct ipw_priv *priv)
2571 {
2572 	eeprom_write_reg(priv, EEPROM_BIT_CS);
2573 	eeprom_write_reg(priv, 0);
2574 	eeprom_write_reg(priv, EEPROM_BIT_SK);
2575 }
2576 
2577 /* push a single bit down to the eeprom */
eeprom_write_bit(struct ipw_priv * p,u8 bit)2578 static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
2579 {
2580 	int d = (bit ? EEPROM_BIT_DI : 0);
2581 	eeprom_write_reg(p, EEPROM_BIT_CS | d);
2582 	eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
2583 }
2584 
2585 /* push an opcode followed by an address down to the eeprom */
eeprom_op(struct ipw_priv * priv,u8 op,u8 addr)2586 static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
2587 {
2588 	int i;
2589 
2590 	eeprom_cs(priv);
2591 	eeprom_write_bit(priv, 1);
2592 	eeprom_write_bit(priv, op & 2);
2593 	eeprom_write_bit(priv, op & 1);
2594 	for (i = 7; i >= 0; i--) {
2595 		eeprom_write_bit(priv, addr & (1 << i));
2596 	}
2597 }
2598 
2599 /* pull 16 bits off the eeprom, one bit at a time */
eeprom_read_u16(struct ipw_priv * priv,u8 addr)2600 static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
2601 {
2602 	int i;
2603 	u16 r = 0;
2604 
2605 	/* Send READ Opcode */
2606 	eeprom_op(priv, EEPROM_CMD_READ, addr);
2607 
2608 	/* Send dummy bit */
2609 	eeprom_write_reg(priv, EEPROM_BIT_CS);
2610 
2611 	/* Read the byte off the eeprom one bit at a time */
2612 	for (i = 0; i < 16; i++) {
2613 		u32 data = 0;
2614 		eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2615 		eeprom_write_reg(priv, EEPROM_BIT_CS);
2616 		data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
2617 		r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
2618 	}
2619 
2620 	/* Send another dummy bit */
2621 	eeprom_write_reg(priv, 0);
2622 	eeprom_disable_cs(priv);
2623 
2624 	return r;
2625 }
2626 
2627 /* helper function for pulling the mac address out of the private */
2628 /* data's copy of the eeprom data                                 */
eeprom_parse_mac(struct ipw_priv * priv,u8 * mac)2629 static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2630 {
2631 	memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6);
2632 }
2633 
2634 /*
2635  * Either the device driver (i.e. the host) or the firmware can
2636  * load eeprom data into the designated region in SRAM.  If neither
2637  * happens then the FW will shutdown with a fatal error.
2638  *
2639  * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
2640  * bit needs region of shared SRAM needs to be non-zero.
2641  */
ipw_eeprom_init_sram(struct ipw_priv * priv)2642 static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2643 {
2644 	int i;
2645 	__le16 *eeprom = (__le16 *) priv->eeprom;
2646 
2647 	IPW_DEBUG_TRACE(">>\n");
2648 
2649 	/* read entire contents of eeprom into private buffer */
2650 	for (i = 0; i < 128; i++)
2651 		eeprom[i] = cpu_to_le16(eeprom_read_u16(priv, (u8) i));
2652 
2653 	/*
2654 	   If the data looks correct, then copy it to our private
2655 	   copy.  Otherwise let the firmware know to perform the operation
2656 	   on its own.
2657 	 */
2658 	if (priv->eeprom[EEPROM_VERSION] != 0) {
2659 		IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
2660 
2661 		/* write the eeprom data to sram */
2662 		for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
2663 			ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
2664 
2665 		/* Do not load eeprom data on fatal error or suspend */
2666 		ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2667 	} else {
2668 		IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
2669 
2670 		/* Load eeprom data on fatal error or suspend */
2671 		ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
2672 	}
2673 
2674 	IPW_DEBUG_TRACE("<<\n");
2675 }
2676 
ipw_zero_memory(struct ipw_priv * priv,u32 start,u32 count)2677 static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
2678 {
2679 	count >>= 2;
2680 	if (!count)
2681 		return;
2682 	_ipw_write32(priv, IPW_AUTOINC_ADDR, start);
2683 	while (count--)
2684 		_ipw_write32(priv, IPW_AUTOINC_DATA, 0);
2685 }
2686 
ipw_fw_dma_reset_command_blocks(struct ipw_priv * priv)2687 static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
2688 {
2689 	ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL,
2690 			CB_NUMBER_OF_ELEMENTS_SMALL *
2691 			sizeof(struct command_block));
2692 }
2693 
ipw_fw_dma_enable(struct ipw_priv * priv)2694 static int ipw_fw_dma_enable(struct ipw_priv *priv)
2695 {				/* start dma engine but no transfers yet */
2696 
2697 	IPW_DEBUG_FW(">> : \n");
2698 
2699 	/* Start the dma */
2700 	ipw_fw_dma_reset_command_blocks(priv);
2701 
2702 	/* Write CB base address */
2703 	ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
2704 
2705 	IPW_DEBUG_FW("<< : \n");
2706 	return 0;
2707 }
2708 
ipw_fw_dma_abort(struct ipw_priv * priv)2709 static void ipw_fw_dma_abort(struct ipw_priv *priv)
2710 {
2711 	u32 control = 0;
2712 
2713 	IPW_DEBUG_FW(">> :\n");
2714 
2715 	/* set the Stop and Abort bit */
2716 	control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
2717 	ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2718 	priv->sram_desc.last_cb_index = 0;
2719 
2720 	IPW_DEBUG_FW("<< \n");
2721 }
2722 
ipw_fw_dma_write_command_block(struct ipw_priv * priv,int index,struct command_block * cb)2723 static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
2724 					  struct command_block *cb)
2725 {
2726 	u32 address =
2727 	    IPW_SHARED_SRAM_DMA_CONTROL +
2728 	    (sizeof(struct command_block) * index);
2729 	IPW_DEBUG_FW(">> :\n");
2730 
2731 	ipw_write_indirect(priv, address, (u8 *) cb,
2732 			   (int)sizeof(struct command_block));
2733 
2734 	IPW_DEBUG_FW("<< :\n");
2735 	return 0;
2736 
2737 }
2738 
ipw_fw_dma_kick(struct ipw_priv * priv)2739 static int ipw_fw_dma_kick(struct ipw_priv *priv)
2740 {
2741 	u32 control = 0;
2742 	u32 index = 0;
2743 
2744 	IPW_DEBUG_FW(">> :\n");
2745 
2746 	for (index = 0; index < priv->sram_desc.last_cb_index; index++)
2747 		ipw_fw_dma_write_command_block(priv, index,
2748 					       &priv->sram_desc.cb_list[index]);
2749 
2750 	/* Enable the DMA in the CSR register */
2751 	ipw_clear_bit(priv, IPW_RESET_REG,
2752 		      IPW_RESET_REG_MASTER_DISABLED |
2753 		      IPW_RESET_REG_STOP_MASTER);
2754 
2755 	/* Set the Start bit. */
2756 	control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
2757 	ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2758 
2759 	IPW_DEBUG_FW("<< :\n");
2760 	return 0;
2761 }
2762 
ipw_fw_dma_dump_command_block(struct ipw_priv * priv)2763 static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
2764 {
2765 	u32 address;
2766 	u32 register_value = 0;
2767 	u32 cb_fields_address = 0;
2768 
2769 	IPW_DEBUG_FW(">> :\n");
2770 	address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2771 	IPW_DEBUG_FW_INFO("Current CB is 0x%x \n", address);
2772 
2773 	/* Read the DMA Controlor register */
2774 	register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
2775 	IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x \n", register_value);
2776 
2777 	/* Print the CB values */
2778 	cb_fields_address = address;
2779 	register_value = ipw_read_reg32(priv, cb_fields_address);
2780 	IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n", register_value);
2781 
2782 	cb_fields_address += sizeof(u32);
2783 	register_value = ipw_read_reg32(priv, cb_fields_address);
2784 	IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n", register_value);
2785 
2786 	cb_fields_address += sizeof(u32);
2787 	register_value = ipw_read_reg32(priv, cb_fields_address);
2788 	IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n",
2789 			  register_value);
2790 
2791 	cb_fields_address += sizeof(u32);
2792 	register_value = ipw_read_reg32(priv, cb_fields_address);
2793 	IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n", register_value);
2794 
2795 	IPW_DEBUG_FW(">> :\n");
2796 }
2797 
ipw_fw_dma_command_block_index(struct ipw_priv * priv)2798 static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
2799 {
2800 	u32 current_cb_address = 0;
2801 	u32 current_cb_index = 0;
2802 
2803 	IPW_DEBUG_FW("<< :\n");
2804 	current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2805 
2806 	current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
2807 	    sizeof(struct command_block);
2808 
2809 	IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n",
2810 			  current_cb_index, current_cb_address);
2811 
2812 	IPW_DEBUG_FW(">> :\n");
2813 	return current_cb_index;
2814 
2815 }
2816 
ipw_fw_dma_add_command_block(struct ipw_priv * priv,u32 src_address,u32 dest_address,u32 length,int interrupt_enabled,int is_last)2817 static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2818 					u32 src_address,
2819 					u32 dest_address,
2820 					u32 length,
2821 					int interrupt_enabled, int is_last)
2822 {
2823 
2824 	u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
2825 	    CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
2826 	    CB_DEST_SIZE_LONG;
2827 	struct command_block *cb;
2828 	u32 last_cb_element = 0;
2829 
2830 	IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
2831 			  src_address, dest_address, length);
2832 
2833 	if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
2834 		return -1;
2835 
2836 	last_cb_element = priv->sram_desc.last_cb_index;
2837 	cb = &priv->sram_desc.cb_list[last_cb_element];
2838 	priv->sram_desc.last_cb_index++;
2839 
2840 	/* Calculate the new CB control word */
2841 	if (interrupt_enabled)
2842 		control |= CB_INT_ENABLED;
2843 
2844 	if (is_last)
2845 		control |= CB_LAST_VALID;
2846 
2847 	control |= length;
2848 
2849 	/* Calculate the CB Element's checksum value */
2850 	cb->status = control ^ src_address ^ dest_address;
2851 
2852 	/* Copy the Source and Destination addresses */
2853 	cb->dest_addr = dest_address;
2854 	cb->source_addr = src_address;
2855 
2856 	/* Copy the Control Word last */
2857 	cb->control = control;
2858 
2859 	return 0;
2860 }
2861 
ipw_fw_dma_add_buffer(struct ipw_priv * priv,u32 src_phys,u32 dest_address,u32 length)2862 static int ipw_fw_dma_add_buffer(struct ipw_priv *priv,
2863 				 u32 src_phys, u32 dest_address, u32 length)
2864 {
2865 	u32 bytes_left = length;
2866 	u32 src_offset = 0;
2867 	u32 dest_offset = 0;
2868 	int status = 0;
2869 	IPW_DEBUG_FW(">> \n");
2870 	IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n",
2871 			  src_phys, dest_address, length);
2872 	while (bytes_left > CB_MAX_LENGTH) {
2873 		status = ipw_fw_dma_add_command_block(priv,
2874 						      src_phys + src_offset,
2875 						      dest_address +
2876 						      dest_offset,
2877 						      CB_MAX_LENGTH, 0, 0);
2878 		if (status) {
2879 			IPW_DEBUG_FW_INFO(": Failed\n");
2880 			return -1;
2881 		} else
2882 			IPW_DEBUG_FW_INFO(": Added new cb\n");
2883 
2884 		src_offset += CB_MAX_LENGTH;
2885 		dest_offset += CB_MAX_LENGTH;
2886 		bytes_left -= CB_MAX_LENGTH;
2887 	}
2888 
2889 	/* add the buffer tail */
2890 	if (bytes_left > 0) {
2891 		status =
2892 		    ipw_fw_dma_add_command_block(priv, src_phys + src_offset,
2893 						 dest_address + dest_offset,
2894 						 bytes_left, 0, 0);
2895 		if (status) {
2896 			IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n");
2897 			return -1;
2898 		} else
2899 			IPW_DEBUG_FW_INFO
2900 			    (": Adding new cb - the buffer tail\n");
2901 	}
2902 
2903 	IPW_DEBUG_FW("<< \n");
2904 	return 0;
2905 }
2906 
ipw_fw_dma_wait(struct ipw_priv * priv)2907 static int ipw_fw_dma_wait(struct ipw_priv *priv)
2908 {
2909 	u32 current_index = 0, previous_index;
2910 	u32 watchdog = 0;
2911 
2912 	IPW_DEBUG_FW(">> : \n");
2913 
2914 	current_index = ipw_fw_dma_command_block_index(priv);
2915 	IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
2916 			  (int)priv->sram_desc.last_cb_index);
2917 
2918 	while (current_index < priv->sram_desc.last_cb_index) {
2919 		udelay(50);
2920 		previous_index = current_index;
2921 		current_index = ipw_fw_dma_command_block_index(priv);
2922 
2923 		if (previous_index < current_index) {
2924 			watchdog = 0;
2925 			continue;
2926 		}
2927 		if (++watchdog > 400) {
2928 			IPW_DEBUG_FW_INFO("Timeout\n");
2929 			ipw_fw_dma_dump_command_block(priv);
2930 			ipw_fw_dma_abort(priv);
2931 			return -1;
2932 		}
2933 	}
2934 
2935 	ipw_fw_dma_abort(priv);
2936 
2937 	/*Disable the DMA in the CSR register */
2938 	ipw_set_bit(priv, IPW_RESET_REG,
2939 		    IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
2940 
2941 	IPW_DEBUG_FW("<< dmaWaitSync \n");
2942 	return 0;
2943 }
2944 
ipw_remove_current_network(struct ipw_priv * priv)2945 static void ipw_remove_current_network(struct ipw_priv *priv)
2946 {
2947 	struct list_head *element, *safe;
2948 	struct ieee80211_network *network = NULL;
2949 	unsigned long flags;
2950 
2951 	spin_lock_irqsave(&priv->ieee->lock, flags);
2952 	list_for_each_safe(element, safe, &priv->ieee->network_list) {
2953 		network = list_entry(element, struct ieee80211_network, list);
2954 		if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
2955 			list_del(element);
2956 			list_add_tail(&network->list,
2957 				      &priv->ieee->network_free_list);
2958 		}
2959 	}
2960 	spin_unlock_irqrestore(&priv->ieee->lock, flags);
2961 }
2962 
2963 /**
2964  * Check that card is still alive.
2965  * Reads debug register from domain0.
2966  * If card is present, pre-defined value should
2967  * be found there.
2968  *
2969  * @param priv
2970  * @return 1 if card is present, 0 otherwise
2971  */
ipw_alive(struct ipw_priv * priv)2972 static inline int ipw_alive(struct ipw_priv *priv)
2973 {
2974 	return ipw_read32(priv, 0x90) == 0xd55555d5;
2975 }
2976 
2977 /* timeout in msec, attempted in 10-msec quanta */
ipw_poll_bit(struct ipw_priv * priv,u32 addr,u32 mask,int timeout)2978 static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
2979 			       int timeout)
2980 {
2981 	int i = 0;
2982 
2983 	do {
2984 		if ((ipw_read32(priv, addr) & mask) == mask)
2985 			return i;
2986 		mdelay(10);
2987 		i += 10;
2988 	} while (i < timeout);
2989 
2990 	return -ETIME;
2991 }
2992 
2993 /* These functions load the firmware and micro code for the operation of
2994  * the ipw hardware.  It assumes the buffer has all the bits for the
2995  * image and the caller is handling the memory allocation and clean up.
2996  */
2997 
ipw_stop_master(struct ipw_priv * priv)2998 static int ipw_stop_master(struct ipw_priv *priv)
2999 {
3000 	int rc;
3001 
3002 	IPW_DEBUG_TRACE(">> \n");
3003 	/* stop master. typical delay - 0 */
3004 	ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3005 
3006 	/* timeout is in msec, polled in 10-msec quanta */
3007 	rc = ipw_poll_bit(priv, IPW_RESET_REG,
3008 			  IPW_RESET_REG_MASTER_DISABLED, 100);
3009 	if (rc < 0) {
3010 		IPW_ERROR("wait for stop master failed after 100ms\n");
3011 		return -1;
3012 	}
3013 
3014 	IPW_DEBUG_INFO("stop master %dms\n", rc);
3015 
3016 	return rc;
3017 }
3018 
ipw_arc_release(struct ipw_priv * priv)3019 static void ipw_arc_release(struct ipw_priv *priv)
3020 {
3021 	IPW_DEBUG_TRACE(">> \n");
3022 	mdelay(5);
3023 
3024 	ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3025 
3026 	/* no one knows timing, for safety add some delay */
3027 	mdelay(5);
3028 }
3029 
3030 struct fw_chunk {
3031 	__le32 address;
3032 	__le32 length;
3033 };
3034 
ipw_load_ucode(struct ipw_priv * priv,u8 * data,size_t len)3035 static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
3036 {
3037 	int rc = 0, i, addr;
3038 	u8 cr = 0;
3039 	__le16 *image;
3040 
3041 	image = (__le16 *) data;
3042 
3043 	IPW_DEBUG_TRACE(">> \n");
3044 
3045 	rc = ipw_stop_master(priv);
3046 
3047 	if (rc < 0)
3048 		return rc;
3049 
3050 	for (addr = IPW_SHARED_LOWER_BOUND;
3051 	     addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
3052 		ipw_write32(priv, addr, 0);
3053 	}
3054 
3055 	/* no ucode (yet) */
3056 	memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
3057 	/* destroy DMA queues */
3058 	/* reset sequence */
3059 
3060 	ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON);
3061 	ipw_arc_release(priv);
3062 	ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF);
3063 	mdelay(1);
3064 
3065 	/* reset PHY */
3066 	ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN);
3067 	mdelay(1);
3068 
3069 	ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0);
3070 	mdelay(1);
3071 
3072 	/* enable ucode store */
3073 	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0);
3074 	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS);
3075 	mdelay(1);
3076 
3077 	/* write ucode */
3078 	/**
3079 	 * @bug
3080 	 * Do NOT set indirect address register once and then
3081 	 * store data to indirect data register in the loop.
3082 	 * It seems very reasonable, but in this case DINO do not
3083 	 * accept ucode. It is essential to set address each time.
3084 	 */
3085 	/* load new ipw uCode */
3086 	for (i = 0; i < len / 2; i++)
3087 		ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE,
3088 				le16_to_cpu(image[i]));
3089 
3090 	/* enable DINO */
3091 	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3092 	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
3093 
3094 	/* this is where the igx / win driver deveates from the VAP driver. */
3095 
3096 	/* wait for alive response */
3097 	for (i = 0; i < 100; i++) {
3098 		/* poll for incoming data */
3099 		cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS);
3100 		if (cr & DINO_RXFIFO_DATA)
3101 			break;
3102 		mdelay(1);
3103 	}
3104 
3105 	if (cr & DINO_RXFIFO_DATA) {
3106 		/* alive_command_responce size is NOT multiple of 4 */
3107 		__le32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
3108 
3109 		for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
3110 			response_buffer[i] =
3111 			    cpu_to_le32(ipw_read_reg32(priv,
3112 						       IPW_BASEBAND_RX_FIFO_READ));
3113 		memcpy(&priv->dino_alive, response_buffer,
3114 		       sizeof(priv->dino_alive));
3115 		if (priv->dino_alive.alive_command == 1
3116 		    && priv->dino_alive.ucode_valid == 1) {
3117 			rc = 0;
3118 			IPW_DEBUG_INFO
3119 			    ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
3120 			     "of %02d/%02d/%02d %02d:%02d\n",
3121 			     priv->dino_alive.software_revision,
3122 			     priv->dino_alive.software_revision,
3123 			     priv->dino_alive.device_identifier,
3124 			     priv->dino_alive.device_identifier,
3125 			     priv->dino_alive.time_stamp[0],
3126 			     priv->dino_alive.time_stamp[1],
3127 			     priv->dino_alive.time_stamp[2],
3128 			     priv->dino_alive.time_stamp[3],
3129 			     priv->dino_alive.time_stamp[4]);
3130 		} else {
3131 			IPW_DEBUG_INFO("Microcode is not alive\n");
3132 			rc = -EINVAL;
3133 		}
3134 	} else {
3135 		IPW_DEBUG_INFO("No alive response from DINO\n");
3136 		rc = -ETIME;
3137 	}
3138 
3139 	/* disable DINO, otherwise for some reason
3140 	   firmware have problem getting alive resp. */
3141 	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3142 
3143 	return rc;
3144 }
3145 
ipw_load_firmware(struct ipw_priv * priv,u8 * data,size_t len)3146 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
3147 {
3148 	int rc = -1;
3149 	int offset = 0;
3150 	struct fw_chunk *chunk;
3151 	dma_addr_t shared_phys;
3152 	u8 *shared_virt;
3153 
3154 	IPW_DEBUG_TRACE("<< : \n");
3155 	shared_virt = pci_alloc_consistent(priv->pci_dev, len, &shared_phys);
3156 
3157 	if (!shared_virt)
3158 		return -ENOMEM;
3159 
3160 	memmove(shared_virt, data, len);
3161 
3162 	/* Start the Dma */
3163 	rc = ipw_fw_dma_enable(priv);
3164 
3165 	if (priv->sram_desc.last_cb_index > 0) {
3166 		/* the DMA is already ready this would be a bug. */
3167 		BUG();
3168 		goto out;
3169 	}
3170 
3171 	do {
3172 		chunk = (struct fw_chunk *)(data + offset);
3173 		offset += sizeof(struct fw_chunk);
3174 		/* build DMA packet and queue up for sending */
3175 		/* dma to chunk->address, the chunk->length bytes from data +
3176 		 * offeset*/
3177 		/* Dma loading */
3178 		rc = ipw_fw_dma_add_buffer(priv, shared_phys + offset,
3179 					   le32_to_cpu(chunk->address),
3180 					   le32_to_cpu(chunk->length));
3181 		if (rc) {
3182 			IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
3183 			goto out;
3184 		}
3185 
3186 		offset += le32_to_cpu(chunk->length);
3187 	} while (offset < len);
3188 
3189 	/* Run the DMA and wait for the answer */
3190 	rc = ipw_fw_dma_kick(priv);
3191 	if (rc) {
3192 		IPW_ERROR("dmaKick Failed\n");
3193 		goto out;
3194 	}
3195 
3196 	rc = ipw_fw_dma_wait(priv);
3197 	if (rc) {
3198 		IPW_ERROR("dmaWaitSync Failed\n");
3199 		goto out;
3200 	}
3201       out:
3202 	pci_free_consistent(priv->pci_dev, len, shared_virt, shared_phys);
3203 	return rc;
3204 }
3205 
3206 /* stop nic */
ipw_stop_nic(struct ipw_priv * priv)3207 static int ipw_stop_nic(struct ipw_priv *priv)
3208 {
3209 	int rc = 0;
3210 
3211 	/* stop */
3212 	ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3213 
3214 	rc = ipw_poll_bit(priv, IPW_RESET_REG,
3215 			  IPW_RESET_REG_MASTER_DISABLED, 500);
3216 	if (rc < 0) {
3217 		IPW_ERROR("wait for reg master disabled failed after 500ms\n");
3218 		return rc;
3219 	}
3220 
3221 	ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3222 
3223 	return rc;
3224 }
3225 
ipw_start_nic(struct ipw_priv * priv)3226 static void ipw_start_nic(struct ipw_priv *priv)
3227 {
3228 	IPW_DEBUG_TRACE(">>\n");
3229 
3230 	/* prvHwStartNic  release ARC */
3231 	ipw_clear_bit(priv, IPW_RESET_REG,
3232 		      IPW_RESET_REG_MASTER_DISABLED |
3233 		      IPW_RESET_REG_STOP_MASTER |
3234 		      CBD_RESET_REG_PRINCETON_RESET);
3235 
3236 	/* enable power management */
3237 	ipw_set_bit(priv, IPW_GP_CNTRL_RW,
3238 		    IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
3239 
3240 	IPW_DEBUG_TRACE("<<\n");
3241 }
3242 
ipw_init_nic(struct ipw_priv * priv)3243 static int ipw_init_nic(struct ipw_priv *priv)
3244 {
3245 	int rc;
3246 
3247 	IPW_DEBUG_TRACE(">>\n");
3248 	/* reset */
3249 	/*prvHwInitNic */
3250 	/* set "initialization complete" bit to move adapter to D0 state */
3251 	ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3252 
3253 	/* low-level PLL activation */
3254 	ipw_write32(priv, IPW_READ_INT_REGISTER,
3255 		    IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
3256 
3257 	/* wait for clock stabilization */
3258 	rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
3259 			  IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
3260 	if (rc < 0)
3261 		IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
3262 
3263 	/* assert SW reset */
3264 	ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
3265 
3266 	udelay(10);
3267 
3268 	/* set "initialization complete" bit to move adapter to D0 state */
3269 	ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3270 
3271 	IPW_DEBUG_TRACE(">>\n");
3272 	return 0;
3273 }
3274 
3275 /* Call this function from process context, it will sleep in request_firmware.
3276  * Probe is an ok place to call this from.
3277  */
ipw_reset_nic(struct ipw_priv * priv)3278 static int ipw_reset_nic(struct ipw_priv *priv)
3279 {
3280 	int rc = 0;
3281 	unsigned long flags;
3282 
3283 	IPW_DEBUG_TRACE(">>\n");
3284 
3285 	rc = ipw_init_nic(priv);
3286 
3287 	spin_lock_irqsave(&priv->lock, flags);
3288 	/* Clear the 'host command active' bit... */
3289 	priv->status &= ~STATUS_HCMD_ACTIVE;
3290 	wake_up_interruptible(&priv->wait_command_queue);
3291 	priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3292 	wake_up_interruptible(&priv->wait_state);
3293 	spin_unlock_irqrestore(&priv->lock, flags);
3294 
3295 	IPW_DEBUG_TRACE("<<\n");
3296 	return rc;
3297 }
3298 
3299 
3300 struct ipw_fw {
3301 	__le32 ver;
3302 	__le32 boot_size;
3303 	__le32 ucode_size;
3304 	__le32 fw_size;
3305 	u8 data[0];
3306 };
3307 
ipw_get_fw(struct ipw_priv * priv,const struct firmware ** raw,const char * name)3308 static int ipw_get_fw(struct ipw_priv *priv,
3309 		      const struct firmware **raw, const char *name)
3310 {
3311 	struct ipw_fw *fw;
3312 	int rc;
3313 
3314 	/* ask firmware_class module to get the boot firmware off disk */
3315 	rc = request_firmware(raw, name, &priv->pci_dev->dev);
3316 	if (rc < 0) {
3317 		IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc);
3318 		return rc;
3319 	}
3320 
3321 	if ((*raw)->size < sizeof(*fw)) {
3322 		IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size);
3323 		return -EINVAL;
3324 	}
3325 
3326 	fw = (void *)(*raw)->data;
3327 
3328 	if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) +
3329 	    le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) {
3330 		IPW_ERROR("%s is too small or corrupt (%zd)\n",
3331 			  name, (*raw)->size);
3332 		return -EINVAL;
3333 	}
3334 
3335 	IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n",
3336 		       name,
3337 		       le32_to_cpu(fw->ver) >> 16,
3338 		       le32_to_cpu(fw->ver) & 0xff,
3339 		       (*raw)->size - sizeof(*fw));
3340 	return 0;
3341 }
3342 
3343 #define IPW_RX_BUF_SIZE (3000)
3344 
ipw_rx_queue_reset(struct ipw_priv * priv,struct ipw_rx_queue * rxq)3345 static void ipw_rx_queue_reset(struct ipw_priv *priv,
3346 				      struct ipw_rx_queue *rxq)
3347 {
3348 	unsigned long flags;
3349 	int i;
3350 
3351 	spin_lock_irqsave(&rxq->lock, flags);
3352 
3353 	INIT_LIST_HEAD(&rxq->rx_free);
3354 	INIT_LIST_HEAD(&rxq->rx_used);
3355 
3356 	/* Fill the rx_used queue with _all_ of the Rx buffers */
3357 	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3358 		/* In the reset function, these buffers may have been allocated
3359 		 * to an SKB, so we need to unmap and free potential storage */
3360 		if (rxq->pool[i].skb != NULL) {
3361 			pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
3362 					 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3363 			dev_kfree_skb(rxq->pool[i].skb);
3364 			rxq->pool[i].skb = NULL;
3365 		}
3366 		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3367 	}
3368 
3369 	/* Set us so that we have processed and used all buffers, but have
3370 	 * not restocked the Rx queue with fresh buffers */
3371 	rxq->read = rxq->write = 0;
3372 	rxq->free_count = 0;
3373 	spin_unlock_irqrestore(&rxq->lock, flags);
3374 }
3375 
3376 #ifdef CONFIG_PM
3377 static int fw_loaded = 0;
3378 static const struct firmware *raw = NULL;
3379 
free_firmware(void)3380 static void free_firmware(void)
3381 {
3382 	if (fw_loaded) {
3383 		release_firmware(raw);
3384 		raw = NULL;
3385 		fw_loaded = 0;
3386 	}
3387 }
3388 #else
3389 #define free_firmware() do {} while (0)
3390 #endif
3391 
ipw_load(struct ipw_priv * priv)3392 static int ipw_load(struct ipw_priv *priv)
3393 {
3394 #ifndef CONFIG_PM
3395 	const struct firmware *raw = NULL;
3396 #endif
3397 	struct ipw_fw *fw;
3398 	u8 *boot_img, *ucode_img, *fw_img;
3399 	u8 *name = NULL;
3400 	int rc = 0, retries = 3;
3401 
3402 	switch (priv->ieee->iw_mode) {
3403 	case IW_MODE_ADHOC:
3404 		name = "ipw2200-ibss.fw";
3405 		break;
3406 #ifdef CONFIG_IPW2200_MONITOR
3407 	case IW_MODE_MONITOR:
3408 		name = "ipw2200-sniffer.fw";
3409 		break;
3410 #endif
3411 	case IW_MODE_INFRA:
3412 		name = "ipw2200-bss.fw";
3413 		break;
3414 	}
3415 
3416 	if (!name) {
3417 		rc = -EINVAL;
3418 		goto error;
3419 	}
3420 
3421 #ifdef CONFIG_PM
3422 	if (!fw_loaded) {
3423 #endif
3424 		rc = ipw_get_fw(priv, &raw, name);
3425 		if (rc < 0)
3426 			goto error;
3427 #ifdef CONFIG_PM
3428 	}
3429 #endif
3430 
3431 	fw = (void *)raw->data;
3432 	boot_img = &fw->data[0];
3433 	ucode_img = &fw->data[le32_to_cpu(fw->boot_size)];
3434 	fw_img = &fw->data[le32_to_cpu(fw->boot_size) +
3435 			   le32_to_cpu(fw->ucode_size)];
3436 
3437 	if (rc < 0)
3438 		goto error;
3439 
3440 	if (!priv->rxq)
3441 		priv->rxq = ipw_rx_queue_alloc(priv);
3442 	else
3443 		ipw_rx_queue_reset(priv, priv->rxq);
3444 	if (!priv->rxq) {
3445 		IPW_ERROR("Unable to initialize Rx queue\n");
3446 		goto error;
3447 	}
3448 
3449       retry:
3450 	/* Ensure interrupts are disabled */
3451 	ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3452 	priv->status &= ~STATUS_INT_ENABLED;
3453 
3454 	/* ack pending interrupts */
3455 	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3456 
3457 	ipw_stop_nic(priv);
3458 
3459 	rc = ipw_reset_nic(priv);
3460 	if (rc < 0) {
3461 		IPW_ERROR("Unable to reset NIC\n");
3462 		goto error;
3463 	}
3464 
3465 	ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND,
3466 			IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
3467 
3468 	/* DMA the initial boot firmware into the device */
3469 	rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size));
3470 	if (rc < 0) {
3471 		IPW_ERROR("Unable to load boot firmware: %d\n", rc);
3472 		goto error;
3473 	}
3474 
3475 	/* kick start the device */
3476 	ipw_start_nic(priv);
3477 
3478 	/* wait for the device to finish its initial startup sequence */
3479 	rc = ipw_poll_bit(priv, IPW_INTA_RW,
3480 			  IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3481 	if (rc < 0) {
3482 		IPW_ERROR("device failed to boot initial fw image\n");
3483 		goto error;
3484 	}
3485 	IPW_DEBUG_INFO("initial device response after %dms\n", rc);
3486 
3487 	/* ack fw init done interrupt */
3488 	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3489 
3490 	/* DMA the ucode into the device */
3491 	rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size));
3492 	if (rc < 0) {
3493 		IPW_ERROR("Unable to load ucode: %d\n", rc);
3494 		goto error;
3495 	}
3496 
3497 	/* stop nic */
3498 	ipw_stop_nic(priv);
3499 
3500 	/* DMA bss firmware into the device */
3501 	rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size));
3502 	if (rc < 0) {
3503 		IPW_ERROR("Unable to load firmware: %d\n", rc);
3504 		goto error;
3505 	}
3506 #ifdef CONFIG_PM
3507 	fw_loaded = 1;
3508 #endif
3509 
3510 	ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
3511 
3512 	rc = ipw_queue_reset(priv);
3513 	if (rc < 0) {
3514 		IPW_ERROR("Unable to initialize queues\n");
3515 		goto error;
3516 	}
3517 
3518 	/* Ensure interrupts are disabled */
3519 	ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3520 	/* ack pending interrupts */
3521 	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3522 
3523 	/* kick start the device */
3524 	ipw_start_nic(priv);
3525 
3526 	if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) {
3527 		if (retries > 0) {
3528 			IPW_WARNING("Parity error.  Retrying init.\n");
3529 			retries--;
3530 			goto retry;
3531 		}
3532 
3533 		IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
3534 		rc = -EIO;
3535 		goto error;
3536 	}
3537 
3538 	/* wait for the device */
3539 	rc = ipw_poll_bit(priv, IPW_INTA_RW,
3540 			  IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3541 	if (rc < 0) {
3542 		IPW_ERROR("device failed to start within 500ms\n");
3543 		goto error;
3544 	}
3545 	IPW_DEBUG_INFO("device response after %dms\n", rc);
3546 
3547 	/* ack fw init done interrupt */
3548 	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3549 
3550 	/* read eeprom data and initialize the eeprom region of sram */
3551 	priv->eeprom_delay = 1;
3552 	ipw_eeprom_init_sram(priv);
3553 
3554 	/* enable interrupts */
3555 	ipw_enable_interrupts(priv);
3556 
3557 	/* Ensure our queue has valid packets */
3558 	ipw_rx_queue_replenish(priv);
3559 
3560 	ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
3561 
3562 	/* ack pending interrupts */
3563 	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3564 
3565 #ifndef CONFIG_PM
3566 	release_firmware(raw);
3567 #endif
3568 	return 0;
3569 
3570       error:
3571 	if (priv->rxq) {
3572 		ipw_rx_queue_free(priv, priv->rxq);
3573 		priv->rxq = NULL;
3574 	}
3575 	ipw_tx_queue_free(priv);
3576 	if (raw)
3577 		release_firmware(raw);
3578 #ifdef CONFIG_PM
3579 	fw_loaded = 0;
3580 	raw = NULL;
3581 #endif
3582 
3583 	return rc;
3584 }
3585 
3586 /**
3587  * DMA services
3588  *
3589  * Theory of operation
3590  *
3591  * A queue is a circular buffers with 'Read' and 'Write' pointers.
3592  * 2 empty entries always kept in the buffer to protect from overflow.
3593  *
3594  * For Tx queue, there are low mark and high mark limits. If, after queuing
3595  * the packet for Tx, free space become < low mark, Tx queue stopped. When
3596  * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
3597  * Tx queue resumed.
3598  *
3599  * The IPW operates with six queues, one receive queue in the device's
3600  * sram, one transmit queue for sending commands to the device firmware,
3601  * and four transmit queues for data.
3602  *
3603  * The four transmit queues allow for performing quality of service (qos)
3604  * transmissions as per the 802.11 protocol.  Currently Linux does not
3605  * provide a mechanism to the user for utilizing prioritized queues, so
3606  * we only utilize the first data transmit queue (queue1).
3607  */
3608 
3609 /**
3610  * Driver allocates buffers of this size for Rx
3611  */
3612 
3613 /**
3614  * ipw_rx_queue_space - Return number of free slots available in queue.
3615  */
ipw_rx_queue_space(const struct ipw_rx_queue * q)3616 static int ipw_rx_queue_space(const struct ipw_rx_queue *q)
3617 {
3618 	int s = q->read - q->write;
3619 	if (s <= 0)
3620 		s += RX_QUEUE_SIZE;
3621 	/* keep some buffer to not confuse full and empty queue */
3622 	s -= 2;
3623 	if (s < 0)
3624 		s = 0;
3625 	return s;
3626 }
3627 
ipw_tx_queue_space(const struct clx2_queue * q)3628 static inline int ipw_tx_queue_space(const struct clx2_queue *q)
3629 {
3630 	int s = q->last_used - q->first_empty;
3631 	if (s <= 0)
3632 		s += q->n_bd;
3633 	s -= 2;			/* keep some reserve to not confuse empty and full situations */
3634 	if (s < 0)
3635 		s = 0;
3636 	return s;
3637 }
3638 
ipw_queue_inc_wrap(int index,int n_bd)3639 static inline int ipw_queue_inc_wrap(int index, int n_bd)
3640 {
3641 	return (++index == n_bd) ? 0 : index;
3642 }
3643 
3644 /**
3645  * Initialize common DMA queue structure
3646  *
3647  * @param q                queue to init
3648  * @param count            Number of BD's to allocate. Should be power of 2
3649  * @param read_register    Address for 'read' register
3650  *                         (not offset within BAR, full address)
3651  * @param write_register   Address for 'write' register
3652  *                         (not offset within BAR, full address)
3653  * @param base_register    Address for 'base' register
3654  *                         (not offset within BAR, full address)
3655  * @param size             Address for 'size' register
3656  *                         (not offset within BAR, full address)
3657  */
ipw_queue_init(struct ipw_priv * priv,struct clx2_queue * q,int count,u32 read,u32 write,u32 base,u32 size)3658 static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
3659 			   int count, u32 read, u32 write, u32 base, u32 size)
3660 {
3661 	q->n_bd = count;
3662 
3663 	q->low_mark = q->n_bd / 4;
3664 	if (q->low_mark < 4)
3665 		q->low_mark = 4;
3666 
3667 	q->high_mark = q->n_bd / 8;
3668 	if (q->high_mark < 2)
3669 		q->high_mark = 2;
3670 
3671 	q->first_empty = q->last_used = 0;
3672 	q->reg_r = read;
3673 	q->reg_w = write;
3674 
3675 	ipw_write32(priv, base, q->dma_addr);
3676 	ipw_write32(priv, size, count);
3677 	ipw_write32(priv, read, 0);
3678 	ipw_write32(priv, write, 0);
3679 
3680 	_ipw_read32(priv, 0x90);
3681 }
3682 
ipw_queue_tx_init(struct ipw_priv * priv,struct clx2_tx_queue * q,int count,u32 read,u32 write,u32 base,u32 size)3683 static int ipw_queue_tx_init(struct ipw_priv *priv,
3684 			     struct clx2_tx_queue *q,
3685 			     int count, u32 read, u32 write, u32 base, u32 size)
3686 {
3687 	struct pci_dev *dev = priv->pci_dev;
3688 
3689 	q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
3690 	if (!q->txb) {
3691 		IPW_ERROR("vmalloc for auxilary BD structures failed\n");
3692 		return -ENOMEM;
3693 	}
3694 
3695 	q->bd =
3696 	    pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
3697 	if (!q->bd) {
3698 		IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
3699 			  sizeof(q->bd[0]) * count);
3700 		kfree(q->txb);
3701 		q->txb = NULL;
3702 		return -ENOMEM;
3703 	}
3704 
3705 	ipw_queue_init(priv, &q->q, count, read, write, base, size);
3706 	return 0;
3707 }
3708 
3709 /**
3710  * Free one TFD, those at index [txq->q.last_used].
3711  * Do NOT advance any indexes
3712  *
3713  * @param dev
3714  * @param txq
3715  */
ipw_queue_tx_free_tfd(struct ipw_priv * priv,struct clx2_tx_queue * txq)3716 static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
3717 				  struct clx2_tx_queue *txq)
3718 {
3719 	struct tfd_frame *bd = &txq->bd[txq->q.last_used];
3720 	struct pci_dev *dev = priv->pci_dev;
3721 	int i;
3722 
3723 	/* classify bd */
3724 	if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
3725 		/* nothing to cleanup after for host commands */
3726 		return;
3727 
3728 	/* sanity check */
3729 	if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
3730 		IPW_ERROR("Too many chunks: %i\n",
3731 			  le32_to_cpu(bd->u.data.num_chunks));
3732 		/** @todo issue fatal error, it is quite serious situation */
3733 		return;
3734 	}
3735 
3736 	/* unmap chunks if any */
3737 	for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
3738 		pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
3739 				 le16_to_cpu(bd->u.data.chunk_len[i]),
3740 				 PCI_DMA_TODEVICE);
3741 		if (txq->txb[txq->q.last_used]) {
3742 			ieee80211_txb_free(txq->txb[txq->q.last_used]);
3743 			txq->txb[txq->q.last_used] = NULL;
3744 		}
3745 	}
3746 }
3747 
3748 /**
3749  * Deallocate DMA queue.
3750  *
3751  * Empty queue by removing and destroying all BD's.
3752  * Free all buffers.
3753  *
3754  * @param dev
3755  * @param q
3756  */
ipw_queue_tx_free(struct ipw_priv * priv,struct clx2_tx_queue * txq)3757 static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
3758 {
3759 	struct clx2_queue *q = &txq->q;
3760 	struct pci_dev *dev = priv->pci_dev;
3761 
3762 	if (q->n_bd == 0)
3763 		return;
3764 
3765 	/* first, empty all BD's */
3766 	for (; q->first_empty != q->last_used;
3767 	     q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3768 		ipw_queue_tx_free_tfd(priv, txq);
3769 	}
3770 
3771 	/* free buffers belonging to queue itself */
3772 	pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
3773 			    q->dma_addr);
3774 	kfree(txq->txb);
3775 
3776 	/* 0 fill whole structure */
3777 	memset(txq, 0, sizeof(*txq));
3778 }
3779 
3780 /**
3781  * Destroy all DMA queues and structures
3782  *
3783  * @param priv
3784  */
ipw_tx_queue_free(struct ipw_priv * priv)3785 static void ipw_tx_queue_free(struct ipw_priv *priv)
3786 {
3787 	/* Tx CMD queue */
3788 	ipw_queue_tx_free(priv, &priv->txq_cmd);
3789 
3790 	/* Tx queues */
3791 	ipw_queue_tx_free(priv, &priv->txq[0]);
3792 	ipw_queue_tx_free(priv, &priv->txq[1]);
3793 	ipw_queue_tx_free(priv, &priv->txq[2]);
3794 	ipw_queue_tx_free(priv, &priv->txq[3]);
3795 }
3796 
ipw_create_bssid(struct ipw_priv * priv,u8 * bssid)3797 static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
3798 {
3799 	/* First 3 bytes are manufacturer */
3800 	bssid[0] = priv->mac_addr[0];
3801 	bssid[1] = priv->mac_addr[1];
3802 	bssid[2] = priv->mac_addr[2];
3803 
3804 	/* Last bytes are random */
3805 	get_random_bytes(&bssid[3], ETH_ALEN - 3);
3806 
3807 	bssid[0] &= 0xfe;	/* clear multicast bit */
3808 	bssid[0] |= 0x02;	/* set local assignment bit (IEEE802) */
3809 }
3810 
ipw_add_station(struct ipw_priv * priv,u8 * bssid)3811 static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3812 {
3813 	struct ipw_station_entry entry;
3814 	int i;
3815 
3816 	for (i = 0; i < priv->num_stations; i++) {
3817 		if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
3818 			/* Another node is active in network */
3819 			priv->missed_adhoc_beacons = 0;
3820 			if (!(priv->config & CFG_STATIC_CHANNEL))
3821 				/* when other nodes drop out, we drop out */
3822 				priv->config &= ~CFG_ADHOC_PERSIST;
3823 
3824 			return i;
3825 		}
3826 	}
3827 
3828 	if (i == MAX_STATIONS)
3829 		return IPW_INVALID_STATION;
3830 
3831 	IPW_DEBUG_SCAN("Adding AdHoc station: %pM\n", bssid);
3832 
3833 	entry.reserved = 0;
3834 	entry.support_mode = 0;
3835 	memcpy(entry.mac_addr, bssid, ETH_ALEN);
3836 	memcpy(priv->stations[i], bssid, ETH_ALEN);
3837 	ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
3838 			 &entry, sizeof(entry));
3839 	priv->num_stations++;
3840 
3841 	return i;
3842 }
3843 
ipw_find_station(struct ipw_priv * priv,u8 * bssid)3844 static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
3845 {
3846 	int i;
3847 
3848 	for (i = 0; i < priv->num_stations; i++)
3849 		if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
3850 			return i;
3851 
3852 	return IPW_INVALID_STATION;
3853 }
3854 
ipw_send_disassociate(struct ipw_priv * priv,int quiet)3855 static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3856 {
3857 	int err;
3858 
3859 	if (priv->status & STATUS_ASSOCIATING) {
3860 		IPW_DEBUG_ASSOC("Disassociating while associating.\n");
3861 		queue_work(priv->workqueue, &priv->disassociate);
3862 		return;
3863 	}
3864 
3865 	if (!(priv->status & STATUS_ASSOCIATED)) {
3866 		IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
3867 		return;
3868 	}
3869 
3870 	IPW_DEBUG_ASSOC("Disassocation attempt from %pM "
3871 			"on channel %d.\n",
3872 			priv->assoc_request.bssid,
3873 			priv->assoc_request.channel);
3874 
3875 	priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
3876 	priv->status |= STATUS_DISASSOCIATING;
3877 
3878 	if (quiet)
3879 		priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
3880 	else
3881 		priv->assoc_request.assoc_type = HC_DISASSOCIATE;
3882 
3883 	err = ipw_send_associate(priv, &priv->assoc_request);
3884 	if (err) {
3885 		IPW_DEBUG_HC("Attempt to send [dis]associate command "
3886 			     "failed.\n");
3887 		return;
3888 	}
3889 
3890 }
3891 
ipw_disassociate(void * data)3892 static int ipw_disassociate(void *data)
3893 {
3894 	struct ipw_priv *priv = data;
3895 	if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
3896 		return 0;
3897 	ipw_send_disassociate(data, 0);
3898 	netif_carrier_off(priv->net_dev);
3899 	return 1;
3900 }
3901 
ipw_bg_disassociate(struct work_struct * work)3902 static void ipw_bg_disassociate(struct work_struct *work)
3903 {
3904 	struct ipw_priv *priv =
3905 		container_of(work, struct ipw_priv, disassociate);
3906 	mutex_lock(&priv->mutex);
3907 	ipw_disassociate(priv);
3908 	mutex_unlock(&priv->mutex);
3909 }
3910 
ipw_system_config(struct work_struct * work)3911 static void ipw_system_config(struct work_struct *work)
3912 {
3913 	struct ipw_priv *priv =
3914 		container_of(work, struct ipw_priv, system_config);
3915 
3916 #ifdef CONFIG_IPW2200_PROMISCUOUS
3917 	if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
3918 		priv->sys_config.accept_all_data_frames = 1;
3919 		priv->sys_config.accept_non_directed_frames = 1;
3920 		priv->sys_config.accept_all_mgmt_bcpr = 1;
3921 		priv->sys_config.accept_all_mgmt_frames = 1;
3922 	}
3923 #endif
3924 
3925 	ipw_send_system_config(priv);
3926 }
3927 
3928 struct ipw_status_code {
3929 	u16 status;
3930 	const char *reason;
3931 };
3932 
3933 static const struct ipw_status_code ipw_status_codes[] = {
3934 	{0x00, "Successful"},
3935 	{0x01, "Unspecified failure"},
3936 	{0x0A, "Cannot support all requested capabilities in the "
3937 	 "Capability information field"},
3938 	{0x0B, "Reassociation denied due to inability to confirm that "
3939 	 "association exists"},
3940 	{0x0C, "Association denied due to reason outside the scope of this "
3941 	 "standard"},
3942 	{0x0D,
3943 	 "Responding station does not support the specified authentication "
3944 	 "algorithm"},
3945 	{0x0E,
3946 	 "Received an Authentication frame with authentication sequence "
3947 	 "transaction sequence number out of expected sequence"},
3948 	{0x0F, "Authentication rejected because of challenge failure"},
3949 	{0x10, "Authentication rejected due to timeout waiting for next "
3950 	 "frame in sequence"},
3951 	{0x11, "Association denied because AP is unable to handle additional "
3952 	 "associated stations"},
3953 	{0x12,
3954 	 "Association denied due to requesting station not supporting all "
3955 	 "of the datarates in the BSSBasicServiceSet Parameter"},
3956 	{0x13,
3957 	 "Association denied due to requesting station not supporting "
3958 	 "short preamble operation"},
3959 	{0x14,
3960 	 "Association denied due to requesting station not supporting "
3961 	 "PBCC encoding"},
3962 	{0x15,
3963 	 "Association denied due to requesting station not supporting "
3964 	 "channel agility"},
3965 	{0x19,
3966 	 "Association denied due to requesting station not supporting "
3967 	 "short slot operation"},
3968 	{0x1A,
3969 	 "Association denied due to requesting station not supporting "
3970 	 "DSSS-OFDM operation"},
3971 	{0x28, "Invalid Information Element"},
3972 	{0x29, "Group Cipher is not valid"},
3973 	{0x2A, "Pairwise Cipher is not valid"},
3974 	{0x2B, "AKMP is not valid"},
3975 	{0x2C, "Unsupported RSN IE version"},
3976 	{0x2D, "Invalid RSN IE Capabilities"},
3977 	{0x2E, "Cipher suite is rejected per security policy"},
3978 };
3979 
ipw_get_status_code(u16 status)3980 static const char *ipw_get_status_code(u16 status)
3981 {
3982 	int i;
3983 	for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
3984 		if (ipw_status_codes[i].status == (status & 0xff))
3985 			return ipw_status_codes[i].reason;
3986 	return "Unknown status value.";
3987 }
3988 
average_init(struct average * avg)3989 static void inline average_init(struct average *avg)
3990 {
3991 	memset(avg, 0, sizeof(*avg));
3992 }
3993 
3994 #define DEPTH_RSSI 8
3995 #define DEPTH_NOISE 16
exponential_average(s16 prev_avg,s16 val,u8 depth)3996 static s16 exponential_average(s16 prev_avg, s16 val, u8 depth)
3997 {
3998 	return ((depth-1)*prev_avg +  val)/depth;
3999 }
4000 
average_add(struct average * avg,s16 val)4001 static void average_add(struct average *avg, s16 val)
4002 {
4003 	avg->sum -= avg->entries[avg->pos];
4004 	avg->sum += val;
4005 	avg->entries[avg->pos++] = val;
4006 	if (unlikely(avg->pos == AVG_ENTRIES)) {
4007 		avg->init = 1;
4008 		avg->pos = 0;
4009 	}
4010 }
4011 
average_value(struct average * avg)4012 static s16 average_value(struct average *avg)
4013 {
4014 	if (!unlikely(avg->init)) {
4015 		if (avg->pos)
4016 			return avg->sum / avg->pos;
4017 		return 0;
4018 	}
4019 
4020 	return avg->sum / AVG_ENTRIES;
4021 }
4022 
ipw_reset_stats(struct ipw_priv * priv)4023 static void ipw_reset_stats(struct ipw_priv *priv)
4024 {
4025 	u32 len = sizeof(u32);
4026 
4027 	priv->quality = 0;
4028 
4029 	average_init(&priv->average_missed_beacons);
4030 	priv->exp_avg_rssi = -60;
4031 	priv->exp_avg_noise = -85 + 0x100;
4032 
4033 	priv->last_rate = 0;
4034 	priv->last_missed_beacons = 0;
4035 	priv->last_rx_packets = 0;
4036 	priv->last_tx_packets = 0;
4037 	priv->last_tx_failures = 0;
4038 
4039 	/* Firmware managed, reset only when NIC is restarted, so we have to
4040 	 * normalize on the current value */
4041 	ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
4042 			&priv->last_rx_err, &len);
4043 	ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
4044 			&priv->last_tx_failures, &len);
4045 
4046 	/* Driver managed, reset with each association */
4047 	priv->missed_adhoc_beacons = 0;
4048 	priv->missed_beacons = 0;
4049 	priv->tx_packets = 0;
4050 	priv->rx_packets = 0;
4051 
4052 }
4053 
ipw_get_max_rate(struct ipw_priv * priv)4054 static u32 ipw_get_max_rate(struct ipw_priv *priv)
4055 {
4056 	u32 i = 0x80000000;
4057 	u32 mask = priv->rates_mask;
4058 	/* If currently associated in B mode, restrict the maximum
4059 	 * rate match to B rates */
4060 	if (priv->assoc_request.ieee_mode == IPW_B_MODE)
4061 		mask &= IEEE80211_CCK_RATES_MASK;
4062 
4063 	/* TODO: Verify that the rate is supported by the current rates
4064 	 * list. */
4065 
4066 	while (i && !(mask & i))
4067 		i >>= 1;
4068 	switch (i) {
4069 	case IEEE80211_CCK_RATE_1MB_MASK:
4070 		return 1000000;
4071 	case IEEE80211_CCK_RATE_2MB_MASK:
4072 		return 2000000;
4073 	case IEEE80211_CCK_RATE_5MB_MASK:
4074 		return 5500000;
4075 	case IEEE80211_OFDM_RATE_6MB_MASK:
4076 		return 6000000;
4077 	case IEEE80211_OFDM_RATE_9MB_MASK:
4078 		return 9000000;
4079 	case IEEE80211_CCK_RATE_11MB_MASK:
4080 		return 11000000;
4081 	case IEEE80211_OFDM_RATE_12MB_MASK:
4082 		return 12000000;
4083 	case IEEE80211_OFDM_RATE_18MB_MASK:
4084 		return 18000000;
4085 	case IEEE80211_OFDM_RATE_24MB_MASK:
4086 		return 24000000;
4087 	case IEEE80211_OFDM_RATE_36MB_MASK:
4088 		return 36000000;
4089 	case IEEE80211_OFDM_RATE_48MB_MASK:
4090 		return 48000000;
4091 	case IEEE80211_OFDM_RATE_54MB_MASK:
4092 		return 54000000;
4093 	}
4094 
4095 	if (priv->ieee->mode == IEEE_B)
4096 		return 11000000;
4097 	else
4098 		return 54000000;
4099 }
4100 
ipw_get_current_rate(struct ipw_priv * priv)4101 static u32 ipw_get_current_rate(struct ipw_priv *priv)
4102 {
4103 	u32 rate, len = sizeof(rate);
4104 	int err;
4105 
4106 	if (!(priv->status & STATUS_ASSOCIATED))
4107 		return 0;
4108 
4109 	if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
4110 		err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
4111 				      &len);
4112 		if (err) {
4113 			IPW_DEBUG_INFO("failed querying ordinals.\n");
4114 			return 0;
4115 		}
4116 	} else
4117 		return ipw_get_max_rate(priv);
4118 
4119 	switch (rate) {
4120 	case IPW_TX_RATE_1MB:
4121 		return 1000000;
4122 	case IPW_TX_RATE_2MB:
4123 		return 2000000;
4124 	case IPW_TX_RATE_5MB:
4125 		return 5500000;
4126 	case IPW_TX_RATE_6MB:
4127 		return 6000000;
4128 	case IPW_TX_RATE_9MB:
4129 		return 9000000;
4130 	case IPW_TX_RATE_11MB:
4131 		return 11000000;
4132 	case IPW_TX_RATE_12MB:
4133 		return 12000000;
4134 	case IPW_TX_RATE_18MB:
4135 		return 18000000;
4136 	case IPW_TX_RATE_24MB:
4137 		return 24000000;
4138 	case IPW_TX_RATE_36MB:
4139 		return 36000000;
4140 	case IPW_TX_RATE_48MB:
4141 		return 48000000;
4142 	case IPW_TX_RATE_54MB:
4143 		return 54000000;
4144 	}
4145 
4146 	return 0;
4147 }
4148 
4149 #define IPW_STATS_INTERVAL (2 * HZ)
ipw_gather_stats(struct ipw_priv * priv)4150 static void ipw_gather_stats(struct ipw_priv *priv)
4151 {
4152 	u32 rx_err, rx_err_delta, rx_packets_delta;
4153 	u32 tx_failures, tx_failures_delta, tx_packets_delta;
4154 	u32 missed_beacons_percent, missed_beacons_delta;
4155 	u32 quality = 0;
4156 	u32 len = sizeof(u32);
4157 	s16 rssi;
4158 	u32 beacon_quality, signal_quality, tx_quality, rx_quality,
4159 	    rate_quality;
4160 	u32 max_rate;
4161 
4162 	if (!(priv->status & STATUS_ASSOCIATED)) {
4163 		priv->quality = 0;
4164 		return;
4165 	}
4166 
4167 	/* Update the statistics */
4168 	ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
4169 			&priv->missed_beacons, &len);
4170 	missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
4171 	priv->last_missed_beacons = priv->missed_beacons;
4172 	if (priv->assoc_request.beacon_interval) {
4173 		missed_beacons_percent = missed_beacons_delta *
4174 		    (HZ * le16_to_cpu(priv->assoc_request.beacon_interval)) /
4175 		    (IPW_STATS_INTERVAL * 10);
4176 	} else {
4177 		missed_beacons_percent = 0;
4178 	}
4179 	average_add(&priv->average_missed_beacons, missed_beacons_percent);
4180 
4181 	ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
4182 	rx_err_delta = rx_err - priv->last_rx_err;
4183 	priv->last_rx_err = rx_err;
4184 
4185 	ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
4186 	tx_failures_delta = tx_failures - priv->last_tx_failures;
4187 	priv->last_tx_failures = tx_failures;
4188 
4189 	rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
4190 	priv->last_rx_packets = priv->rx_packets;
4191 
4192 	tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
4193 	priv->last_tx_packets = priv->tx_packets;
4194 
4195 	/* Calculate quality based on the following:
4196 	 *
4197 	 * Missed beacon: 100% = 0, 0% = 70% missed
4198 	 * Rate: 60% = 1Mbs, 100% = Max
4199 	 * Rx and Tx errors represent a straight % of total Rx/Tx
4200 	 * RSSI: 100% = > -50,  0% = < -80
4201 	 * Rx errors: 100% = 0, 0% = 50% missed
4202 	 *
4203 	 * The lowest computed quality is used.
4204 	 *
4205 	 */
4206 #define BEACON_THRESHOLD 5
4207 	beacon_quality = 100 - missed_beacons_percent;
4208 	if (beacon_quality < BEACON_THRESHOLD)
4209 		beacon_quality = 0;
4210 	else
4211 		beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
4212 		    (100 - BEACON_THRESHOLD);
4213 	IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
4214 			beacon_quality, missed_beacons_percent);
4215 
4216 	priv->last_rate = ipw_get_current_rate(priv);
4217 	max_rate = ipw_get_max_rate(priv);
4218 	rate_quality = priv->last_rate * 40 / max_rate + 60;
4219 	IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
4220 			rate_quality, priv->last_rate / 1000000);
4221 
4222 	if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
4223 		rx_quality = 100 - (rx_err_delta * 100) /
4224 		    (rx_packets_delta + rx_err_delta);
4225 	else
4226 		rx_quality = 100;
4227 	IPW_DEBUG_STATS("Rx quality   : %3d%% (%u errors, %u packets)\n",
4228 			rx_quality, rx_err_delta, rx_packets_delta);
4229 
4230 	if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
4231 		tx_quality = 100 - (tx_failures_delta * 100) /
4232 		    (tx_packets_delta + tx_failures_delta);
4233 	else
4234 		tx_quality = 100;
4235 	IPW_DEBUG_STATS("Tx quality   : %3d%% (%u errors, %u packets)\n",
4236 			tx_quality, tx_failures_delta, tx_packets_delta);
4237 
4238 	rssi = priv->exp_avg_rssi;
4239 	signal_quality =
4240 	    (100 *
4241 	     (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4242 	     (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) -
4243 	     (priv->ieee->perfect_rssi - rssi) *
4244 	     (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) +
4245 	      62 * (priv->ieee->perfect_rssi - rssi))) /
4246 	    ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4247 	     (priv->ieee->perfect_rssi - priv->ieee->worst_rssi));
4248 	if (signal_quality > 100)
4249 		signal_quality = 100;
4250 	else if (signal_quality < 1)
4251 		signal_quality = 0;
4252 
4253 	IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
4254 			signal_quality, rssi);
4255 
4256 	quality = min(beacon_quality,
4257 		      min(rate_quality,
4258 			  min(tx_quality, min(rx_quality, signal_quality))));
4259 	if (quality == beacon_quality)
4260 		IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
4261 				quality);
4262 	if (quality == rate_quality)
4263 		IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
4264 				quality);
4265 	if (quality == tx_quality)
4266 		IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
4267 				quality);
4268 	if (quality == rx_quality)
4269 		IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
4270 				quality);
4271 	if (quality == signal_quality)
4272 		IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
4273 				quality);
4274 
4275 	priv->quality = quality;
4276 
4277 	queue_delayed_work(priv->workqueue, &priv->gather_stats,
4278 			   IPW_STATS_INTERVAL);
4279 }
4280 
ipw_bg_gather_stats(struct work_struct * work)4281 static void ipw_bg_gather_stats(struct work_struct *work)
4282 {
4283 	struct ipw_priv *priv =
4284 		container_of(work, struct ipw_priv, gather_stats.work);
4285 	mutex_lock(&priv->mutex);
4286 	ipw_gather_stats(priv);
4287 	mutex_unlock(&priv->mutex);
4288 }
4289 
4290 /* Missed beacon behavior:
4291  * 1st missed -> roaming_threshold, just wait, don't do any scan/roam.
4292  * roaming_threshold -> disassociate_threshold, scan and roam for better signal.
4293  * Above disassociate threshold, give up and stop scanning.
4294  * Roaming is disabled if disassociate_threshold <= roaming_threshold  */
ipw_handle_missed_beacon(struct ipw_priv * priv,int missed_count)4295 static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4296 					    int missed_count)
4297 {
4298 	priv->notif_missed_beacons = missed_count;
4299 
4300 	if (missed_count > priv->disassociate_threshold &&
4301 	    priv->status & STATUS_ASSOCIATED) {
4302 		/* If associated and we've hit the missed
4303 		 * beacon threshold, disassociate, turn
4304 		 * off roaming, and abort any active scans */
4305 		IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4306 			  IPW_DL_STATE | IPW_DL_ASSOC,
4307 			  "Missed beacon: %d - disassociate\n", missed_count);
4308 		priv->status &= ~STATUS_ROAMING;
4309 		if (priv->status & STATUS_SCANNING) {
4310 			IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4311 				  IPW_DL_STATE,
4312 				  "Aborting scan with missed beacon.\n");
4313 			queue_work(priv->workqueue, &priv->abort_scan);
4314 		}
4315 
4316 		queue_work(priv->workqueue, &priv->disassociate);
4317 		return;
4318 	}
4319 
4320 	if (priv->status & STATUS_ROAMING) {
4321 		/* If we are currently roaming, then just
4322 		 * print a debug statement... */
4323 		IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4324 			  "Missed beacon: %d - roam in progress\n",
4325 			  missed_count);
4326 		return;
4327 	}
4328 
4329 	if (roaming &&
4330 	    (missed_count > priv->roaming_threshold &&
4331 	     missed_count <= priv->disassociate_threshold)) {
4332 		/* If we are not already roaming, set the ROAM
4333 		 * bit in the status and kick off a scan.
4334 		 * This can happen several times before we reach
4335 		 * disassociate_threshold. */
4336 		IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4337 			  "Missed beacon: %d - initiate "
4338 			  "roaming\n", missed_count);
4339 		if (!(priv->status & STATUS_ROAMING)) {
4340 			priv->status |= STATUS_ROAMING;
4341 			if (!(priv->status & STATUS_SCANNING))
4342 				queue_delayed_work(priv->workqueue,
4343 						   &priv->request_scan, 0);
4344 		}
4345 		return;
4346 	}
4347 
4348 	if (priv->status & STATUS_SCANNING &&
4349 	    missed_count > IPW_MB_SCAN_CANCEL_THRESHOLD) {
4350 		/* Stop scan to keep fw from getting
4351 		 * stuck (only if we aren't roaming --
4352 		 * otherwise we'll never scan more than 2 or 3
4353 		 * channels..) */
4354 		IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
4355 			  "Aborting scan with missed beacon.\n");
4356 		queue_work(priv->workqueue, &priv->abort_scan);
4357 	}
4358 
4359 	IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
4360 }
4361 
ipw_scan_event(struct work_struct * work)4362 static void ipw_scan_event(struct work_struct *work)
4363 {
4364 	union iwreq_data wrqu;
4365 
4366 	struct ipw_priv *priv =
4367 		container_of(work, struct ipw_priv, scan_event.work);
4368 
4369 	wrqu.data.length = 0;
4370 	wrqu.data.flags = 0;
4371 	wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
4372 }
4373 
handle_scan_event(struct ipw_priv * priv)4374 static void handle_scan_event(struct ipw_priv *priv)
4375 {
4376 	/* Only userspace-requested scan completion events go out immediately */
4377 	if (!priv->user_requested_scan) {
4378 		if (!delayed_work_pending(&priv->scan_event))
4379 			queue_delayed_work(priv->workqueue, &priv->scan_event,
4380 					 round_jiffies_relative(msecs_to_jiffies(4000)));
4381 	} else {
4382 		union iwreq_data wrqu;
4383 
4384 		priv->user_requested_scan = 0;
4385 		cancel_delayed_work(&priv->scan_event);
4386 
4387 		wrqu.data.length = 0;
4388 		wrqu.data.flags = 0;
4389 		wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
4390 	}
4391 }
4392 
4393 /**
4394  * Handle host notification packet.
4395  * Called from interrupt routine
4396  */
ipw_rx_notification(struct ipw_priv * priv,struct ipw_rx_notification * notif)4397 static void ipw_rx_notification(struct ipw_priv *priv,
4398 				       struct ipw_rx_notification *notif)
4399 {
4400 	DECLARE_SSID_BUF(ssid);
4401 	u16 size = le16_to_cpu(notif->size);
4402 	notif->size = le16_to_cpu(notif->size);
4403 
4404 	IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, size);
4405 
4406 	switch (notif->subtype) {
4407 	case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
4408 			struct notif_association *assoc = &notif->u.assoc;
4409 
4410 			switch (assoc->state) {
4411 			case CMAS_ASSOCIATED:{
4412 					IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4413 						  IPW_DL_ASSOC,
4414 						  "associated: '%s' %pM \n",
4415 						  print_ssid(ssid, priv->essid,
4416 							     priv->essid_len),
4417 						  priv->bssid);
4418 
4419 					switch (priv->ieee->iw_mode) {
4420 					case IW_MODE_INFRA:
4421 						memcpy(priv->ieee->bssid,
4422 						       priv->bssid, ETH_ALEN);
4423 						break;
4424 
4425 					case IW_MODE_ADHOC:
4426 						memcpy(priv->ieee->bssid,
4427 						       priv->bssid, ETH_ALEN);
4428 
4429 						/* clear out the station table */
4430 						priv->num_stations = 0;
4431 
4432 						IPW_DEBUG_ASSOC
4433 						    ("queueing adhoc check\n");
4434 						queue_delayed_work(priv->
4435 								   workqueue,
4436 								   &priv->
4437 								   adhoc_check,
4438 								   le16_to_cpu(priv->
4439 								   assoc_request.
4440 								   beacon_interval));
4441 						break;
4442 					}
4443 
4444 					priv->status &= ~STATUS_ASSOCIATING;
4445 					priv->status |= STATUS_ASSOCIATED;
4446 					queue_work(priv->workqueue,
4447 						   &priv->system_config);
4448 
4449 #ifdef CONFIG_IPW2200_QOS
4450 #define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
4451 			 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_control))
4452 					if ((priv->status & STATUS_AUTH) &&
4453 					    (IPW_GET_PACKET_STYPE(&notif->u.raw)
4454 					     == IEEE80211_STYPE_ASSOC_RESP)) {
4455 						if ((sizeof
4456 						     (struct
4457 						      ieee80211_assoc_response)
4458 						     <= size)
4459 						    && (size <= 2314)) {
4460 							struct
4461 							ieee80211_rx_stats
4462 							    stats = {
4463 								.len = size - 1,
4464 							};
4465 
4466 							IPW_DEBUG_QOS
4467 							    ("QoS Associate "
4468 							     "size %d\n", size);
4469 							ieee80211_rx_mgt(priv->
4470 									 ieee,
4471 									 (struct
4472 									  ieee80211_hdr_4addr
4473 									  *)
4474 									 &notif->u.raw, &stats);
4475 						}
4476 					}
4477 #endif
4478 
4479 					schedule_work(&priv->link_up);
4480 
4481 					break;
4482 				}
4483 
4484 			case CMAS_AUTHENTICATED:{
4485 					if (priv->
4486 					    status & (STATUS_ASSOCIATED |
4487 						      STATUS_AUTH)) {
4488 						struct notif_authenticate *auth
4489 						    = &notif->u.auth;
4490 						IPW_DEBUG(IPW_DL_NOTIF |
4491 							  IPW_DL_STATE |
4492 							  IPW_DL_ASSOC,
4493 							  "deauthenticated: '%s' "
4494 							  "%pM"
4495 							  ": (0x%04X) - %s \n",
4496 							  print_ssid(ssid,
4497 								     priv->
4498 								     essid,
4499 								     priv->
4500 								     essid_len),
4501 							  priv->bssid,
4502 							  le16_to_cpu(auth->status),
4503 							  ipw_get_status_code
4504 							  (le16_to_cpu
4505 							   (auth->status)));
4506 
4507 						priv->status &=
4508 						    ~(STATUS_ASSOCIATING |
4509 						      STATUS_AUTH |
4510 						      STATUS_ASSOCIATED);
4511 
4512 						schedule_work(&priv->link_down);
4513 						break;
4514 					}
4515 
4516 					IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4517 						  IPW_DL_ASSOC,
4518 						  "authenticated: '%s' %pM\n",
4519 						  print_ssid(ssid, priv->essid,
4520 							     priv->essid_len),
4521 						  priv->bssid);
4522 					break;
4523 				}
4524 
4525 			case CMAS_INIT:{
4526 					if (priv->status & STATUS_AUTH) {
4527 						struct
4528 						    ieee80211_assoc_response
4529 						*resp;
4530 						resp =
4531 						    (struct
4532 						     ieee80211_assoc_response
4533 						     *)&notif->u.raw;
4534 						IPW_DEBUG(IPW_DL_NOTIF |
4535 							  IPW_DL_STATE |
4536 							  IPW_DL_ASSOC,
4537 							  "association failed (0x%04X): %s\n",
4538 							  le16_to_cpu(resp->status),
4539 							  ipw_get_status_code
4540 							  (le16_to_cpu
4541 							   (resp->status)));
4542 					}
4543 
4544 					IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4545 						  IPW_DL_ASSOC,
4546 						  "disassociated: '%s' %pM \n",
4547 						  print_ssid(ssid, priv->essid,
4548 							     priv->essid_len),
4549 						  priv->bssid);
4550 
4551 					priv->status &=
4552 					    ~(STATUS_DISASSOCIATING |
4553 					      STATUS_ASSOCIATING |
4554 					      STATUS_ASSOCIATED | STATUS_AUTH);
4555 					if (priv->assoc_network
4556 					    && (priv->assoc_network->
4557 						capability &
4558 						WLAN_CAPABILITY_IBSS))
4559 						ipw_remove_current_network
4560 						    (priv);
4561 
4562 					schedule_work(&priv->link_down);
4563 
4564 					break;
4565 				}
4566 
4567 			case CMAS_RX_ASSOC_RESP:
4568 				break;
4569 
4570 			default:
4571 				IPW_ERROR("assoc: unknown (%d)\n",
4572 					  assoc->state);
4573 				break;
4574 			}
4575 
4576 			break;
4577 		}
4578 
4579 	case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
4580 			struct notif_authenticate *auth = &notif->u.auth;
4581 			switch (auth->state) {
4582 			case CMAS_AUTHENTICATED:
4583 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4584 					  "authenticated: '%s' %pM \n",
4585 					  print_ssid(ssid, priv->essid,
4586 						     priv->essid_len),
4587 					  priv->bssid);
4588 				priv->status |= STATUS_AUTH;
4589 				break;
4590 
4591 			case CMAS_INIT:
4592 				if (priv->status & STATUS_AUTH) {
4593 					IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4594 						  IPW_DL_ASSOC,
4595 						  "authentication failed (0x%04X): %s\n",
4596 						  le16_to_cpu(auth->status),
4597 						  ipw_get_status_code(le16_to_cpu
4598 								      (auth->
4599 								       status)));
4600 				}
4601 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4602 					  IPW_DL_ASSOC,
4603 					  "deauthenticated: '%s' %pM\n",
4604 					  print_ssid(ssid, priv->essid,
4605 						     priv->essid_len),
4606 					  priv->bssid);
4607 
4608 				priv->status &= ~(STATUS_ASSOCIATING |
4609 						  STATUS_AUTH |
4610 						  STATUS_ASSOCIATED);
4611 
4612 				schedule_work(&priv->link_down);
4613 				break;
4614 
4615 			case CMAS_TX_AUTH_SEQ_1:
4616 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4617 					  IPW_DL_ASSOC, "AUTH_SEQ_1\n");
4618 				break;
4619 			case CMAS_RX_AUTH_SEQ_2:
4620 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4621 					  IPW_DL_ASSOC, "AUTH_SEQ_2\n");
4622 				break;
4623 			case CMAS_AUTH_SEQ_1_PASS:
4624 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4625 					  IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
4626 				break;
4627 			case CMAS_AUTH_SEQ_1_FAIL:
4628 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4629 					  IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
4630 				break;
4631 			case CMAS_TX_AUTH_SEQ_3:
4632 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4633 					  IPW_DL_ASSOC, "AUTH_SEQ_3\n");
4634 				break;
4635 			case CMAS_RX_AUTH_SEQ_4:
4636 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4637 					  IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
4638 				break;
4639 			case CMAS_AUTH_SEQ_2_PASS:
4640 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4641 					  IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
4642 				break;
4643 			case CMAS_AUTH_SEQ_2_FAIL:
4644 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4645 					  IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
4646 				break;
4647 			case CMAS_TX_ASSOC:
4648 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4649 					  IPW_DL_ASSOC, "TX_ASSOC\n");
4650 				break;
4651 			case CMAS_RX_ASSOC_RESP:
4652 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4653 					  IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
4654 
4655 				break;
4656 			case CMAS_ASSOCIATED:
4657 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4658 					  IPW_DL_ASSOC, "ASSOCIATED\n");
4659 				break;
4660 			default:
4661 				IPW_DEBUG_NOTIF("auth: failure - %d\n",
4662 						auth->state);
4663 				break;
4664 			}
4665 			break;
4666 		}
4667 
4668 	case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
4669 			struct notif_channel_result *x =
4670 			    &notif->u.channel_result;
4671 
4672 			if (size == sizeof(*x)) {
4673 				IPW_DEBUG_SCAN("Scan result for channel %d\n",
4674 					       x->channel_num);
4675 			} else {
4676 				IPW_DEBUG_SCAN("Scan result of wrong size %d "
4677 					       "(should be %zd)\n",
4678 					       size, sizeof(*x));
4679 			}
4680 			break;
4681 		}
4682 
4683 	case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
4684 			struct notif_scan_complete *x = &notif->u.scan_complete;
4685 			if (size == sizeof(*x)) {
4686 				IPW_DEBUG_SCAN
4687 				    ("Scan completed: type %d, %d channels, "
4688 				     "%d status\n", x->scan_type,
4689 				     x->num_channels, x->status);
4690 			} else {
4691 				IPW_ERROR("Scan completed of wrong size %d "
4692 					  "(should be %zd)\n",
4693 					  size, sizeof(*x));
4694 			}
4695 
4696 			priv->status &=
4697 			    ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
4698 
4699 			wake_up_interruptible(&priv->wait_state);
4700 			cancel_delayed_work(&priv->scan_check);
4701 
4702 			if (priv->status & STATUS_EXIT_PENDING)
4703 				break;
4704 
4705 			priv->ieee->scans++;
4706 
4707 #ifdef CONFIG_IPW2200_MONITOR
4708 			if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4709 				priv->status |= STATUS_SCAN_FORCED;
4710 				queue_delayed_work(priv->workqueue,
4711 						   &priv->request_scan, 0);
4712 				break;
4713 			}
4714 			priv->status &= ~STATUS_SCAN_FORCED;
4715 #endif				/* CONFIG_IPW2200_MONITOR */
4716 
4717 			/* Do queued direct scans first */
4718 			if (priv->status & STATUS_DIRECT_SCAN_PENDING) {
4719 				queue_delayed_work(priv->workqueue,
4720 						   &priv->request_direct_scan, 0);
4721 			}
4722 
4723 			if (!(priv->status & (STATUS_ASSOCIATED |
4724 					      STATUS_ASSOCIATING |
4725 					      STATUS_ROAMING |
4726 					      STATUS_DISASSOCIATING)))
4727 				queue_work(priv->workqueue, &priv->associate);
4728 			else if (priv->status & STATUS_ROAMING) {
4729 				if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4730 					/* If a scan completed and we are in roam mode, then
4731 					 * the scan that completed was the one requested as a
4732 					 * result of entering roam... so, schedule the
4733 					 * roam work */
4734 					queue_work(priv->workqueue,
4735 						   &priv->roam);
4736 				else
4737 					/* Don't schedule if we aborted the scan */
4738 					priv->status &= ~STATUS_ROAMING;
4739 			} else if (priv->status & STATUS_SCAN_PENDING)
4740 				queue_delayed_work(priv->workqueue,
4741 						   &priv->request_scan, 0);
4742 			else if (priv->config & CFG_BACKGROUND_SCAN
4743 				 && priv->status & STATUS_ASSOCIATED)
4744 				queue_delayed_work(priv->workqueue,
4745 						   &priv->request_scan,
4746 						   round_jiffies_relative(HZ));
4747 
4748 			/* Send an empty event to user space.
4749 			 * We don't send the received data on the event because
4750 			 * it would require us to do complex transcoding, and
4751 			 * we want to minimise the work done in the irq handler
4752 			 * Use a request to extract the data.
4753 			 * Also, we generate this even for any scan, regardless
4754 			 * on how the scan was initiated. User space can just
4755 			 * sync on periodic scan to get fresh data...
4756 			 * Jean II */
4757 			if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4758 				handle_scan_event(priv);
4759 			break;
4760 		}
4761 
4762 	case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
4763 			struct notif_frag_length *x = &notif->u.frag_len;
4764 
4765 			if (size == sizeof(*x))
4766 				IPW_ERROR("Frag length: %d\n",
4767 					  le16_to_cpu(x->frag_length));
4768 			else
4769 				IPW_ERROR("Frag length of wrong size %d "
4770 					  "(should be %zd)\n",
4771 					  size, sizeof(*x));
4772 			break;
4773 		}
4774 
4775 	case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
4776 			struct notif_link_deterioration *x =
4777 			    &notif->u.link_deterioration;
4778 
4779 			if (size == sizeof(*x)) {
4780 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4781 					"link deterioration: type %d, cnt %d\n",
4782 					x->silence_notification_type,
4783 					x->silence_count);
4784 				memcpy(&priv->last_link_deterioration, x,
4785 				       sizeof(*x));
4786 			} else {
4787 				IPW_ERROR("Link Deterioration of wrong size %d "
4788 					  "(should be %zd)\n",
4789 					  size, sizeof(*x));
4790 			}
4791 			break;
4792 		}
4793 
4794 	case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
4795 			IPW_ERROR("Dino config\n");
4796 			if (priv->hcmd
4797 			    && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG)
4798 				IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
4799 
4800 			break;
4801 		}
4802 
4803 	case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
4804 			struct notif_beacon_state *x = &notif->u.beacon_state;
4805 			if (size != sizeof(*x)) {
4806 				IPW_ERROR
4807 				    ("Beacon state of wrong size %d (should "
4808 				     "be %zd)\n", size, sizeof(*x));
4809 				break;
4810 			}
4811 
4812 			if (le32_to_cpu(x->state) ==
4813 			    HOST_NOTIFICATION_STATUS_BEACON_MISSING)
4814 				ipw_handle_missed_beacon(priv,
4815 							 le32_to_cpu(x->
4816 								     number));
4817 
4818 			break;
4819 		}
4820 
4821 	case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
4822 			struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
4823 			if (size == sizeof(*x)) {
4824 				IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
4825 					  "0x%02x station %d\n",
4826 					  x->key_state, x->security_type,
4827 					  x->station_index);
4828 				break;
4829 			}
4830 
4831 			IPW_ERROR
4832 			    ("TGi Tx Key of wrong size %d (should be %zd)\n",
4833 			     size, sizeof(*x));
4834 			break;
4835 		}
4836 
4837 	case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
4838 			struct notif_calibration *x = &notif->u.calibration;
4839 
4840 			if (size == sizeof(*x)) {
4841 				memcpy(&priv->calib, x, sizeof(*x));
4842 				IPW_DEBUG_INFO("TODO: Calibration\n");
4843 				break;
4844 			}
4845 
4846 			IPW_ERROR
4847 			    ("Calibration of wrong size %d (should be %zd)\n",
4848 			     size, sizeof(*x));
4849 			break;
4850 		}
4851 
4852 	case HOST_NOTIFICATION_NOISE_STATS:{
4853 			if (size == sizeof(u32)) {
4854 				priv->exp_avg_noise =
4855 				    exponential_average(priv->exp_avg_noise,
4856 				    (u8) (le32_to_cpu(notif->u.noise.value) & 0xff),
4857 				    DEPTH_NOISE);
4858 				break;
4859 			}
4860 
4861 			IPW_ERROR
4862 			    ("Noise stat is wrong size %d (should be %zd)\n",
4863 			     size, sizeof(u32));
4864 			break;
4865 		}
4866 
4867 	default:
4868 		IPW_DEBUG_NOTIF("Unknown notification: "
4869 				"subtype=%d,flags=0x%2x,size=%d\n",
4870 				notif->subtype, notif->flags, size);
4871 	}
4872 }
4873 
4874 /**
4875  * Destroys all DMA structures and initialise them again
4876  *
4877  * @param priv
4878  * @return error code
4879  */
ipw_queue_reset(struct ipw_priv * priv)4880 static int ipw_queue_reset(struct ipw_priv *priv)
4881 {
4882 	int rc = 0;
4883 	/** @todo customize queue sizes */
4884 	int nTx = 64, nTxCmd = 8;
4885 	ipw_tx_queue_free(priv);
4886 	/* Tx CMD queue */
4887 	rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
4888 			       IPW_TX_CMD_QUEUE_READ_INDEX,
4889 			       IPW_TX_CMD_QUEUE_WRITE_INDEX,
4890 			       IPW_TX_CMD_QUEUE_BD_BASE,
4891 			       IPW_TX_CMD_QUEUE_BD_SIZE);
4892 	if (rc) {
4893 		IPW_ERROR("Tx Cmd queue init failed\n");
4894 		goto error;
4895 	}
4896 	/* Tx queue(s) */
4897 	rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
4898 			       IPW_TX_QUEUE_0_READ_INDEX,
4899 			       IPW_TX_QUEUE_0_WRITE_INDEX,
4900 			       IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE);
4901 	if (rc) {
4902 		IPW_ERROR("Tx 0 queue init failed\n");
4903 		goto error;
4904 	}
4905 	rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
4906 			       IPW_TX_QUEUE_1_READ_INDEX,
4907 			       IPW_TX_QUEUE_1_WRITE_INDEX,
4908 			       IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE);
4909 	if (rc) {
4910 		IPW_ERROR("Tx 1 queue init failed\n");
4911 		goto error;
4912 	}
4913 	rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
4914 			       IPW_TX_QUEUE_2_READ_INDEX,
4915 			       IPW_TX_QUEUE_2_WRITE_INDEX,
4916 			       IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE);
4917 	if (rc) {
4918 		IPW_ERROR("Tx 2 queue init failed\n");
4919 		goto error;
4920 	}
4921 	rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
4922 			       IPW_TX_QUEUE_3_READ_INDEX,
4923 			       IPW_TX_QUEUE_3_WRITE_INDEX,
4924 			       IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE);
4925 	if (rc) {
4926 		IPW_ERROR("Tx 3 queue init failed\n");
4927 		goto error;
4928 	}
4929 	/* statistics */
4930 	priv->rx_bufs_min = 0;
4931 	priv->rx_pend_max = 0;
4932 	return rc;
4933 
4934       error:
4935 	ipw_tx_queue_free(priv);
4936 	return rc;
4937 }
4938 
4939 /**
4940  * Reclaim Tx queue entries no more used by NIC.
4941  *
4942  * When FW advances 'R' index, all entries between old and
4943  * new 'R' index need to be reclaimed. As result, some free space
4944  * forms. If there is enough free space (> low mark), wake Tx queue.
4945  *
4946  * @note Need to protect against garbage in 'R' index
4947  * @param priv
4948  * @param txq
4949  * @param qindex
4950  * @return Number of used entries remains in the queue
4951  */
ipw_queue_tx_reclaim(struct ipw_priv * priv,struct clx2_tx_queue * txq,int qindex)4952 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
4953 				struct clx2_tx_queue *txq, int qindex)
4954 {
4955 	u32 hw_tail;
4956 	int used;
4957 	struct clx2_queue *q = &txq->q;
4958 
4959 	hw_tail = ipw_read32(priv, q->reg_r);
4960 	if (hw_tail >= q->n_bd) {
4961 		IPW_ERROR
4962 		    ("Read index for DMA queue (%d) is out of range [0-%d)\n",
4963 		     hw_tail, q->n_bd);
4964 		goto done;
4965 	}
4966 	for (; q->last_used != hw_tail;
4967 	     q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
4968 		ipw_queue_tx_free_tfd(priv, txq);
4969 		priv->tx_packets++;
4970 	}
4971       done:
4972 	if ((ipw_tx_queue_space(q) > q->low_mark) &&
4973 	    (qindex >= 0))
4974 		netif_wake_queue(priv->net_dev);
4975 	used = q->first_empty - q->last_used;
4976 	if (used < 0)
4977 		used += q->n_bd;
4978 
4979 	return used;
4980 }
4981 
ipw_queue_tx_hcmd(struct ipw_priv * priv,int hcmd,void * buf,int len,int sync)4982 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
4983 			     int len, int sync)
4984 {
4985 	struct clx2_tx_queue *txq = &priv->txq_cmd;
4986 	struct clx2_queue *q = &txq->q;
4987 	struct tfd_frame *tfd;
4988 
4989 	if (ipw_tx_queue_space(q) < (sync ? 1 : 2)) {
4990 		IPW_ERROR("No space for Tx\n");
4991 		return -EBUSY;
4992 	}
4993 
4994 	tfd = &txq->bd[q->first_empty];
4995 	txq->txb[q->first_empty] = NULL;
4996 
4997 	memset(tfd, 0, sizeof(*tfd));
4998 	tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
4999 	tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
5000 	priv->hcmd_seq++;
5001 	tfd->u.cmd.index = hcmd;
5002 	tfd->u.cmd.length = len;
5003 	memcpy(tfd->u.cmd.payload, buf, len);
5004 	q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
5005 	ipw_write32(priv, q->reg_w, q->first_empty);
5006 	_ipw_read32(priv, 0x90);
5007 
5008 	return 0;
5009 }
5010 
5011 /*
5012  * Rx theory of operation
5013  *
5014  * The host allocates 32 DMA target addresses and passes the host address
5015  * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
5016  * 0 to 31
5017  *
5018  * Rx Queue Indexes
5019  * The host/firmware share two index registers for managing the Rx buffers.
5020  *
5021  * The READ index maps to the first position that the firmware may be writing
5022  * to -- the driver can read up to (but not including) this position and get
5023  * good data.
5024  * The READ index is managed by the firmware once the card is enabled.
5025  *
5026  * The WRITE index maps to the last position the driver has read from -- the
5027  * position preceding WRITE is the last slot the firmware can place a packet.
5028  *
5029  * The queue is empty (no good data) if WRITE = READ - 1, and is full if
5030  * WRITE = READ.
5031  *
5032  * During initialization the host sets up the READ queue position to the first
5033  * INDEX position, and WRITE to the last (READ - 1 wrapped)
5034  *
5035  * When the firmware places a packet in a buffer it will advance the READ index
5036  * and fire the RX interrupt.  The driver can then query the READ index and
5037  * process as many packets as possible, moving the WRITE index forward as it
5038  * resets the Rx queue buffers with new memory.
5039  *
5040  * The management in the driver is as follows:
5041  * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free.  When
5042  *   ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
5043  *   to replensish the ipw->rxq->rx_free.
5044  * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
5045  *   ipw->rxq is replenished and the READ INDEX is updated (updating the
5046  *   'processed' and 'read' driver indexes as well)
5047  * + A received packet is processed and handed to the kernel network stack,
5048  *   detached from the ipw->rxq.  The driver 'processed' index is updated.
5049  * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
5050  *   list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
5051  *   INDEX is not incremented and ipw->status(RX_STALLED) is set.  If there
5052  *   were enough free buffers and RX_STALLED is set it is cleared.
5053  *
5054  *
5055  * Driver sequence:
5056  *
5057  * ipw_rx_queue_alloc()       Allocates rx_free
5058  * ipw_rx_queue_replenish()   Replenishes rx_free list from rx_used, and calls
5059  *                            ipw_rx_queue_restock
5060  * ipw_rx_queue_restock()     Moves available buffers from rx_free into Rx
5061  *                            queue, updates firmware pointers, and updates
5062  *                            the WRITE index.  If insufficient rx_free buffers
5063  *                            are available, schedules ipw_rx_queue_replenish
5064  *
5065  * -- enable interrupts --
5066  * ISR - ipw_rx()             Detach ipw_rx_mem_buffers from pool up to the
5067  *                            READ INDEX, detaching the SKB from the pool.
5068  *                            Moves the packet buffer from queue to rx_used.
5069  *                            Calls ipw_rx_queue_restock to refill any empty
5070  *                            slots.
5071  * ...
5072  *
5073  */
5074 
5075 /*
5076  * If there are slots in the RX queue that  need to be restocked,
5077  * and we have free pre-allocated buffers, fill the ranks as much
5078  * as we can pulling from rx_free.
5079  *
5080  * This moves the 'write' index forward to catch up with 'processed', and
5081  * also updates the memory address in the firmware to reference the new
5082  * target buffer.
5083  */
ipw_rx_queue_restock(struct ipw_priv * priv)5084 static void ipw_rx_queue_restock(struct ipw_priv *priv)
5085 {
5086 	struct ipw_rx_queue *rxq = priv->rxq;
5087 	struct list_head *element;
5088 	struct ipw_rx_mem_buffer *rxb;
5089 	unsigned long flags;
5090 	int write;
5091 
5092 	spin_lock_irqsave(&rxq->lock, flags);
5093 	write = rxq->write;
5094 	while ((ipw_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
5095 		element = rxq->rx_free.next;
5096 		rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5097 		list_del(element);
5098 
5099 		ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
5100 			    rxb->dma_addr);
5101 		rxq->queue[rxq->write] = rxb;
5102 		rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
5103 		rxq->free_count--;
5104 	}
5105 	spin_unlock_irqrestore(&rxq->lock, flags);
5106 
5107 	/* If the pre-allocated buffer pool is dropping low, schedule to
5108 	 * refill it */
5109 	if (rxq->free_count <= RX_LOW_WATERMARK)
5110 		queue_work(priv->workqueue, &priv->rx_replenish);
5111 
5112 	/* If we've added more space for the firmware to place data, tell it */
5113 	if (write != rxq->write)
5114 		ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
5115 }
5116 
5117 /*
5118  * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
5119  * Also restock the Rx queue via ipw_rx_queue_restock.
5120  *
5121  * This is called as a scheduled work item (except for during intialization)
5122  */
ipw_rx_queue_replenish(void * data)5123 static void ipw_rx_queue_replenish(void *data)
5124 {
5125 	struct ipw_priv *priv = data;
5126 	struct ipw_rx_queue *rxq = priv->rxq;
5127 	struct list_head *element;
5128 	struct ipw_rx_mem_buffer *rxb;
5129 	unsigned long flags;
5130 
5131 	spin_lock_irqsave(&rxq->lock, flags);
5132 	while (!list_empty(&rxq->rx_used)) {
5133 		element = rxq->rx_used.next;
5134 		rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5135 		rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC);
5136 		if (!rxb->skb) {
5137 			printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
5138 			       priv->net_dev->name);
5139 			/* We don't reschedule replenish work here -- we will
5140 			 * call the restock method and if it still needs
5141 			 * more buffers it will schedule replenish */
5142 			break;
5143 		}
5144 		list_del(element);
5145 
5146 		rxb->dma_addr =
5147 		    pci_map_single(priv->pci_dev, rxb->skb->data,
5148 				   IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5149 
5150 		list_add_tail(&rxb->list, &rxq->rx_free);
5151 		rxq->free_count++;
5152 	}
5153 	spin_unlock_irqrestore(&rxq->lock, flags);
5154 
5155 	ipw_rx_queue_restock(priv);
5156 }
5157 
ipw_bg_rx_queue_replenish(struct work_struct * work)5158 static void ipw_bg_rx_queue_replenish(struct work_struct *work)
5159 {
5160 	struct ipw_priv *priv =
5161 		container_of(work, struct ipw_priv, rx_replenish);
5162 	mutex_lock(&priv->mutex);
5163 	ipw_rx_queue_replenish(priv);
5164 	mutex_unlock(&priv->mutex);
5165 }
5166 
5167 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
5168  * If an SKB has been detached, the POOL needs to have its SKB set to NULL
5169  * This free routine walks the list of POOL entries and if SKB is set to
5170  * non NULL it is unmapped and freed
5171  */
ipw_rx_queue_free(struct ipw_priv * priv,struct ipw_rx_queue * rxq)5172 static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
5173 {
5174 	int i;
5175 
5176 	if (!rxq)
5177 		return;
5178 
5179 	for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
5180 		if (rxq->pool[i].skb != NULL) {
5181 			pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
5182 					 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5183 			dev_kfree_skb(rxq->pool[i].skb);
5184 		}
5185 	}
5186 
5187 	kfree(rxq);
5188 }
5189 
ipw_rx_queue_alloc(struct ipw_priv * priv)5190 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
5191 {
5192 	struct ipw_rx_queue *rxq;
5193 	int i;
5194 
5195 	rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
5196 	if (unlikely(!rxq)) {
5197 		IPW_ERROR("memory allocation failed\n");
5198 		return NULL;
5199 	}
5200 	spin_lock_init(&rxq->lock);
5201 	INIT_LIST_HEAD(&rxq->rx_free);
5202 	INIT_LIST_HEAD(&rxq->rx_used);
5203 
5204 	/* Fill the rx_used queue with _all_ of the Rx buffers */
5205 	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
5206 		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
5207 
5208 	/* Set us so that we have processed and used all buffers, but have
5209 	 * not restocked the Rx queue with fresh buffers */
5210 	rxq->read = rxq->write = 0;
5211 	rxq->free_count = 0;
5212 
5213 	return rxq;
5214 }
5215 
ipw_is_rate_in_mask(struct ipw_priv * priv,int ieee_mode,u8 rate)5216 static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
5217 {
5218 	rate &= ~IEEE80211_BASIC_RATE_MASK;
5219 	if (ieee_mode == IEEE_A) {
5220 		switch (rate) {
5221 		case IEEE80211_OFDM_RATE_6MB:
5222 			return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ?
5223 			    1 : 0;
5224 		case IEEE80211_OFDM_RATE_9MB:
5225 			return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ?
5226 			    1 : 0;
5227 		case IEEE80211_OFDM_RATE_12MB:
5228 			return priv->
5229 			    rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
5230 		case IEEE80211_OFDM_RATE_18MB:
5231 			return priv->
5232 			    rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
5233 		case IEEE80211_OFDM_RATE_24MB:
5234 			return priv->
5235 			    rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
5236 		case IEEE80211_OFDM_RATE_36MB:
5237 			return priv->
5238 			    rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
5239 		case IEEE80211_OFDM_RATE_48MB:
5240 			return priv->
5241 			    rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
5242 		case IEEE80211_OFDM_RATE_54MB:
5243 			return priv->
5244 			    rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
5245 		default:
5246 			return 0;
5247 		}
5248 	}
5249 
5250 	/* B and G mixed */
5251 	switch (rate) {
5252 	case IEEE80211_CCK_RATE_1MB:
5253 		return priv->rates_mask & IEEE80211_CCK_RATE_1MB_MASK ? 1 : 0;
5254 	case IEEE80211_CCK_RATE_2MB:
5255 		return priv->rates_mask & IEEE80211_CCK_RATE_2MB_MASK ? 1 : 0;
5256 	case IEEE80211_CCK_RATE_5MB:
5257 		return priv->rates_mask & IEEE80211_CCK_RATE_5MB_MASK ? 1 : 0;
5258 	case IEEE80211_CCK_RATE_11MB:
5259 		return priv->rates_mask & IEEE80211_CCK_RATE_11MB_MASK ? 1 : 0;
5260 	}
5261 
5262 	/* If we are limited to B modulations, bail at this point */
5263 	if (ieee_mode == IEEE_B)
5264 		return 0;
5265 
5266 	/* G */
5267 	switch (rate) {
5268 	case IEEE80211_OFDM_RATE_6MB:
5269 		return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ? 1 : 0;
5270 	case IEEE80211_OFDM_RATE_9MB:
5271 		return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ? 1 : 0;
5272 	case IEEE80211_OFDM_RATE_12MB:
5273 		return priv->rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
5274 	case IEEE80211_OFDM_RATE_18MB:
5275 		return priv->rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
5276 	case IEEE80211_OFDM_RATE_24MB:
5277 		return priv->rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
5278 	case IEEE80211_OFDM_RATE_36MB:
5279 		return priv->rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
5280 	case IEEE80211_OFDM_RATE_48MB:
5281 		return priv->rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
5282 	case IEEE80211_OFDM_RATE_54MB:
5283 		return priv->rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
5284 	}
5285 
5286 	return 0;
5287 }
5288 
ipw_compatible_rates(struct ipw_priv * priv,const struct ieee80211_network * network,struct ipw_supported_rates * rates)5289 static int ipw_compatible_rates(struct ipw_priv *priv,
5290 				const struct ieee80211_network *network,
5291 				struct ipw_supported_rates *rates)
5292 {
5293 	int num_rates, i;
5294 
5295 	memset(rates, 0, sizeof(*rates));
5296 	num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
5297 	rates->num_rates = 0;
5298 	for (i = 0; i < num_rates; i++) {
5299 		if (!ipw_is_rate_in_mask(priv, network->mode,
5300 					 network->rates[i])) {
5301 
5302 			if (network->rates[i] & IEEE80211_BASIC_RATE_MASK) {
5303 				IPW_DEBUG_SCAN("Adding masked mandatory "
5304 					       "rate %02X\n",
5305 					       network->rates[i]);
5306 				rates->supported_rates[rates->num_rates++] =
5307 				    network->rates[i];
5308 				continue;
5309 			}
5310 
5311 			IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5312 				       network->rates[i], priv->rates_mask);
5313 			continue;
5314 		}
5315 
5316 		rates->supported_rates[rates->num_rates++] = network->rates[i];
5317 	}
5318 
5319 	num_rates = min(network->rates_ex_len,
5320 			(u8) (IPW_MAX_RATES - num_rates));
5321 	for (i = 0; i < num_rates; i++) {
5322 		if (!ipw_is_rate_in_mask(priv, network->mode,
5323 					 network->rates_ex[i])) {
5324 			if (network->rates_ex[i] & IEEE80211_BASIC_RATE_MASK) {
5325 				IPW_DEBUG_SCAN("Adding masked mandatory "
5326 					       "rate %02X\n",
5327 					       network->rates_ex[i]);
5328 				rates->supported_rates[rates->num_rates++] =
5329 				    network->rates[i];
5330 				continue;
5331 			}
5332 
5333 			IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5334 				       network->rates_ex[i], priv->rates_mask);
5335 			continue;
5336 		}
5337 
5338 		rates->supported_rates[rates->num_rates++] =
5339 		    network->rates_ex[i];
5340 	}
5341 
5342 	return 1;
5343 }
5344 
ipw_copy_rates(struct ipw_supported_rates * dest,const struct ipw_supported_rates * src)5345 static void ipw_copy_rates(struct ipw_supported_rates *dest,
5346 				  const struct ipw_supported_rates *src)
5347 {
5348 	u8 i;
5349 	for (i = 0; i < src->num_rates; i++)
5350 		dest->supported_rates[i] = src->supported_rates[i];
5351 	dest->num_rates = src->num_rates;
5352 }
5353 
5354 /* TODO: Look at sniffed packets in the air to determine if the basic rate
5355  * mask should ever be used -- right now all callers to add the scan rates are
5356  * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
ipw_add_cck_scan_rates(struct ipw_supported_rates * rates,u8 modulation,u32 rate_mask)5357 static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
5358 				   u8 modulation, u32 rate_mask)
5359 {
5360 	u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
5361 	    IEEE80211_BASIC_RATE_MASK : 0;
5362 
5363 	if (rate_mask & IEEE80211_CCK_RATE_1MB_MASK)
5364 		rates->supported_rates[rates->num_rates++] =
5365 		    IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
5366 
5367 	if (rate_mask & IEEE80211_CCK_RATE_2MB_MASK)
5368 		rates->supported_rates[rates->num_rates++] =
5369 		    IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
5370 
5371 	if (rate_mask & IEEE80211_CCK_RATE_5MB_MASK)
5372 		rates->supported_rates[rates->num_rates++] = basic_mask |
5373 		    IEEE80211_CCK_RATE_5MB;
5374 
5375 	if (rate_mask & IEEE80211_CCK_RATE_11MB_MASK)
5376 		rates->supported_rates[rates->num_rates++] = basic_mask |
5377 		    IEEE80211_CCK_RATE_11MB;
5378 }
5379 
ipw_add_ofdm_scan_rates(struct ipw_supported_rates * rates,u8 modulation,u32 rate_mask)5380 static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
5381 				    u8 modulation, u32 rate_mask)
5382 {
5383 	u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
5384 	    IEEE80211_BASIC_RATE_MASK : 0;
5385 
5386 	if (rate_mask & IEEE80211_OFDM_RATE_6MB_MASK)
5387 		rates->supported_rates[rates->num_rates++] = basic_mask |
5388 		    IEEE80211_OFDM_RATE_6MB;
5389 
5390 	if (rate_mask & IEEE80211_OFDM_RATE_9MB_MASK)
5391 		rates->supported_rates[rates->num_rates++] =
5392 		    IEEE80211_OFDM_RATE_9MB;
5393 
5394 	if (rate_mask & IEEE80211_OFDM_RATE_12MB_MASK)
5395 		rates->supported_rates[rates->num_rates++] = basic_mask |
5396 		    IEEE80211_OFDM_RATE_12MB;
5397 
5398 	if (rate_mask & IEEE80211_OFDM_RATE_18MB_MASK)
5399 		rates->supported_rates[rates->num_rates++] =
5400 		    IEEE80211_OFDM_RATE_18MB;
5401 
5402 	if (rate_mask & IEEE80211_OFDM_RATE_24MB_MASK)
5403 		rates->supported_rates[rates->num_rates++] = basic_mask |
5404 		    IEEE80211_OFDM_RATE_24MB;
5405 
5406 	if (rate_mask & IEEE80211_OFDM_RATE_36MB_MASK)
5407 		rates->supported_rates[rates->num_rates++] =
5408 		    IEEE80211_OFDM_RATE_36MB;
5409 
5410 	if (rate_mask & IEEE80211_OFDM_RATE_48MB_MASK)
5411 		rates->supported_rates[rates->num_rates++] =
5412 		    IEEE80211_OFDM_RATE_48MB;
5413 
5414 	if (rate_mask & IEEE80211_OFDM_RATE_54MB_MASK)
5415 		rates->supported_rates[rates->num_rates++] =
5416 		    IEEE80211_OFDM_RATE_54MB;
5417 }
5418 
5419 struct ipw_network_match {
5420 	struct ieee80211_network *network;
5421 	struct ipw_supported_rates rates;
5422 };
5423 
ipw_find_adhoc_network(struct ipw_priv * priv,struct ipw_network_match * match,struct ieee80211_network * network,int roaming)5424 static int ipw_find_adhoc_network(struct ipw_priv *priv,
5425 				  struct ipw_network_match *match,
5426 				  struct ieee80211_network *network,
5427 				  int roaming)
5428 {
5429 	struct ipw_supported_rates rates;
5430 	DECLARE_SSID_BUF(ssid);
5431 
5432 	/* Verify that this network's capability is compatible with the
5433 	 * current mode (AdHoc or Infrastructure) */
5434 	if ((priv->ieee->iw_mode == IW_MODE_ADHOC &&
5435 	     !(network->capability & WLAN_CAPABILITY_IBSS))) {
5436 		IPW_DEBUG_MERGE("Network '%s (%pM)' excluded due to "
5437 				"capability mismatch.\n",
5438 				print_ssid(ssid, network->ssid,
5439 					   network->ssid_len),
5440 				network->bssid);
5441 		return 0;
5442 	}
5443 
5444 	if (unlikely(roaming)) {
5445 		/* If we are roaming, then ensure check if this is a valid
5446 		 * network to try and roam to */
5447 		if ((network->ssid_len != match->network->ssid_len) ||
5448 		    memcmp(network->ssid, match->network->ssid,
5449 			   network->ssid_len)) {
5450 			IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5451 					"because of non-network ESSID.\n",
5452 					print_ssid(ssid, network->ssid,
5453 						   network->ssid_len),
5454 					network->bssid);
5455 			return 0;
5456 		}
5457 	} else {
5458 		/* If an ESSID has been configured then compare the broadcast
5459 		 * ESSID to ours */
5460 		if ((priv->config & CFG_STATIC_ESSID) &&
5461 		    ((network->ssid_len != priv->essid_len) ||
5462 		     memcmp(network->ssid, priv->essid,
5463 			    min(network->ssid_len, priv->essid_len)))) {
5464 			char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5465 
5466 			strncpy(escaped,
5467 				print_ssid(ssid, network->ssid,
5468 					   network->ssid_len),
5469 				sizeof(escaped));
5470 			IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5471 					"because of ESSID mismatch: '%s'.\n",
5472 					escaped, network->bssid,
5473 					print_ssid(ssid, priv->essid,
5474 						   priv->essid_len));
5475 			return 0;
5476 		}
5477 	}
5478 
5479 	/* If the old network rate is better than this one, don't bother
5480 	 * testing everything else. */
5481 
5482 	if (network->time_stamp[0] < match->network->time_stamp[0]) {
5483 		IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5484 				"current network.\n",
5485 				print_ssid(ssid, match->network->ssid,
5486 					   match->network->ssid_len));
5487 		return 0;
5488 	} else if (network->time_stamp[1] < match->network->time_stamp[1]) {
5489 		IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5490 				"current network.\n",
5491 				print_ssid(ssid, match->network->ssid,
5492 					   match->network->ssid_len));
5493 		return 0;
5494 	}
5495 
5496 	/* Now go through and see if the requested network is valid... */
5497 	if (priv->ieee->scan_age != 0 &&
5498 	    time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5499 		IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5500 				"because of age: %ums.\n",
5501 				print_ssid(ssid, network->ssid,
5502 					   network->ssid_len),
5503 				network->bssid,
5504 				jiffies_to_msecs(jiffies -
5505 						 network->last_scanned));
5506 		return 0;
5507 	}
5508 
5509 	if ((priv->config & CFG_STATIC_CHANNEL) &&
5510 	    (network->channel != priv->channel)) {
5511 		IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5512 				"because of channel mismatch: %d != %d.\n",
5513 				print_ssid(ssid, network->ssid,
5514 					   network->ssid_len),
5515 				network->bssid,
5516 				network->channel, priv->channel);
5517 		return 0;
5518 	}
5519 
5520 	/* Verify privacy compatability */
5521 	if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5522 	    ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5523 		IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5524 				"because of privacy mismatch: %s != %s.\n",
5525 				print_ssid(ssid, network->ssid,
5526 					   network->ssid_len),
5527 				network->bssid,
5528 				priv->
5529 				capability & CAP_PRIVACY_ON ? "on" : "off",
5530 				network->
5531 				capability & WLAN_CAPABILITY_PRIVACY ? "on" :
5532 				"off");
5533 		return 0;
5534 	}
5535 
5536 	if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5537 		IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5538 				"because of the same BSSID match: %pM"
5539 				".\n", print_ssid(ssid, network->ssid,
5540 						  network->ssid_len),
5541 				network->bssid,
5542 				priv->bssid);
5543 		return 0;
5544 	}
5545 
5546 	/* Filter out any incompatible freq / mode combinations */
5547 	if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5548 		IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5549 				"because of invalid frequency/mode "
5550 				"combination.\n",
5551 				print_ssid(ssid, network->ssid,
5552 					   network->ssid_len),
5553 				network->bssid);
5554 		return 0;
5555 	}
5556 
5557 	/* Ensure that the rates supported by the driver are compatible with
5558 	 * this AP, including verification of basic rates (mandatory) */
5559 	if (!ipw_compatible_rates(priv, network, &rates)) {
5560 		IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5561 				"because configured rate mask excludes "
5562 				"AP mandatory rate.\n",
5563 				print_ssid(ssid, network->ssid,
5564 					   network->ssid_len),
5565 				network->bssid);
5566 		return 0;
5567 	}
5568 
5569 	if (rates.num_rates == 0) {
5570 		IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5571 				"because of no compatible rates.\n",
5572 				print_ssid(ssid, network->ssid,
5573 					   network->ssid_len),
5574 				network->bssid);
5575 		return 0;
5576 	}
5577 
5578 	/* TODO: Perform any further minimal comparititive tests.  We do not
5579 	 * want to put too much policy logic here; intelligent scan selection
5580 	 * should occur within a generic IEEE 802.11 user space tool.  */
5581 
5582 	/* Set up 'new' AP to this network */
5583 	ipw_copy_rates(&match->rates, &rates);
5584 	match->network = network;
5585 	IPW_DEBUG_MERGE("Network '%s (%pM)' is a viable match.\n",
5586 			print_ssid(ssid, network->ssid, network->ssid_len),
5587 			network->bssid);
5588 
5589 	return 1;
5590 }
5591 
ipw_merge_adhoc_network(struct work_struct * work)5592 static void ipw_merge_adhoc_network(struct work_struct *work)
5593 {
5594 	DECLARE_SSID_BUF(ssid);
5595 	struct ipw_priv *priv =
5596 		container_of(work, struct ipw_priv, merge_networks);
5597 	struct ieee80211_network *network = NULL;
5598 	struct ipw_network_match match = {
5599 		.network = priv->assoc_network
5600 	};
5601 
5602 	if ((priv->status & STATUS_ASSOCIATED) &&
5603 	    (priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5604 		/* First pass through ROAM process -- look for a better
5605 		 * network */
5606 		unsigned long flags;
5607 
5608 		spin_lock_irqsave(&priv->ieee->lock, flags);
5609 		list_for_each_entry(network, &priv->ieee->network_list, list) {
5610 			if (network != priv->assoc_network)
5611 				ipw_find_adhoc_network(priv, &match, network,
5612 						       1);
5613 		}
5614 		spin_unlock_irqrestore(&priv->ieee->lock, flags);
5615 
5616 		if (match.network == priv->assoc_network) {
5617 			IPW_DEBUG_MERGE("No better ADHOC in this network to "
5618 					"merge to.\n");
5619 			return;
5620 		}
5621 
5622 		mutex_lock(&priv->mutex);
5623 		if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5624 			IPW_DEBUG_MERGE("remove network %s\n",
5625 					print_ssid(ssid, priv->essid,
5626 						   priv->essid_len));
5627 			ipw_remove_current_network(priv);
5628 		}
5629 
5630 		ipw_disassociate(priv);
5631 		priv->assoc_network = match.network;
5632 		mutex_unlock(&priv->mutex);
5633 		return;
5634 	}
5635 }
5636 
ipw_best_network(struct ipw_priv * priv,struct ipw_network_match * match,struct ieee80211_network * network,int roaming)5637 static int ipw_best_network(struct ipw_priv *priv,
5638 			    struct ipw_network_match *match,
5639 			    struct ieee80211_network *network, int roaming)
5640 {
5641 	struct ipw_supported_rates rates;
5642 	DECLARE_SSID_BUF(ssid);
5643 
5644 	/* Verify that this network's capability is compatible with the
5645 	 * current mode (AdHoc or Infrastructure) */
5646 	if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
5647 	     !(network->capability & WLAN_CAPABILITY_ESS)) ||
5648 	    (priv->ieee->iw_mode == IW_MODE_ADHOC &&
5649 	     !(network->capability & WLAN_CAPABILITY_IBSS))) {
5650 		IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded due to "
5651 				"capability mismatch.\n",
5652 				print_ssid(ssid, network->ssid,
5653 					   network->ssid_len),
5654 				network->bssid);
5655 		return 0;
5656 	}
5657 
5658 	if (unlikely(roaming)) {
5659 		/* If we are roaming, then ensure check if this is a valid
5660 		 * network to try and roam to */
5661 		if ((network->ssid_len != match->network->ssid_len) ||
5662 		    memcmp(network->ssid, match->network->ssid,
5663 			   network->ssid_len)) {
5664 			IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5665 					"because of non-network ESSID.\n",
5666 					print_ssid(ssid, network->ssid,
5667 						   network->ssid_len),
5668 					network->bssid);
5669 			return 0;
5670 		}
5671 	} else {
5672 		/* If an ESSID has been configured then compare the broadcast
5673 		 * ESSID to ours */
5674 		if ((priv->config & CFG_STATIC_ESSID) &&
5675 		    ((network->ssid_len != priv->essid_len) ||
5676 		     memcmp(network->ssid, priv->essid,
5677 			    min(network->ssid_len, priv->essid_len)))) {
5678 			char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5679 			strncpy(escaped,
5680 				print_ssid(ssid, network->ssid,
5681 					   network->ssid_len),
5682 				sizeof(escaped));
5683 			IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5684 					"because of ESSID mismatch: '%s'.\n",
5685 					escaped, network->bssid,
5686 					print_ssid(ssid, priv->essid,
5687 						   priv->essid_len));
5688 			return 0;
5689 		}
5690 	}
5691 
5692 	/* If the old network rate is better than this one, don't bother
5693 	 * testing everything else. */
5694 	if (match->network && match->network->stats.rssi > network->stats.rssi) {
5695 		char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5696 		strncpy(escaped,
5697 			print_ssid(ssid, network->ssid, network->ssid_len),
5698 			sizeof(escaped));
5699 		IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded because "
5700 				"'%s (%pM)' has a stronger signal.\n",
5701 				escaped, network->bssid,
5702 				print_ssid(ssid, match->network->ssid,
5703 					   match->network->ssid_len),
5704 				match->network->bssid);
5705 		return 0;
5706 	}
5707 
5708 	/* If this network has already had an association attempt within the
5709 	 * last 3 seconds, do not try and associate again... */
5710 	if (network->last_associate &&
5711 	    time_after(network->last_associate + (HZ * 3UL), jiffies)) {
5712 		IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5713 				"because of storming (%ums since last "
5714 				"assoc attempt).\n",
5715 				print_ssid(ssid, network->ssid,
5716 					   network->ssid_len),
5717 				network->bssid,
5718 				jiffies_to_msecs(jiffies -
5719 						 network->last_associate));
5720 		return 0;
5721 	}
5722 
5723 	/* Now go through and see if the requested network is valid... */
5724 	if (priv->ieee->scan_age != 0 &&
5725 	    time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5726 		IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5727 				"because of age: %ums.\n",
5728 				print_ssid(ssid, network->ssid,
5729 					   network->ssid_len),
5730 				network->bssid,
5731 				jiffies_to_msecs(jiffies -
5732 						 network->last_scanned));
5733 		return 0;
5734 	}
5735 
5736 	if ((priv->config & CFG_STATIC_CHANNEL) &&
5737 	    (network->channel != priv->channel)) {
5738 		IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5739 				"because of channel mismatch: %d != %d.\n",
5740 				print_ssid(ssid, network->ssid,
5741 					   network->ssid_len),
5742 				network->bssid,
5743 				network->channel, priv->channel);
5744 		return 0;
5745 	}
5746 
5747 	/* Verify privacy compatability */
5748 	if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5749 	    ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5750 		IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5751 				"because of privacy mismatch: %s != %s.\n",
5752 				print_ssid(ssid, network->ssid,
5753 					   network->ssid_len),
5754 				network->bssid,
5755 				priv->capability & CAP_PRIVACY_ON ? "on" :
5756 				"off",
5757 				network->capability &
5758 				WLAN_CAPABILITY_PRIVACY ? "on" : "off");
5759 		return 0;
5760 	}
5761 
5762 	if ((priv->config & CFG_STATIC_BSSID) &&
5763 	    memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5764 		IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5765 				"because of BSSID mismatch: %pM.\n",
5766 				print_ssid(ssid, network->ssid,
5767 					   network->ssid_len),
5768 				network->bssid, priv->bssid);
5769 		return 0;
5770 	}
5771 
5772 	/* Filter out any incompatible freq / mode combinations */
5773 	if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5774 		IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5775 				"because of invalid frequency/mode "
5776 				"combination.\n",
5777 				print_ssid(ssid, network->ssid,
5778 					   network->ssid_len),
5779 				network->bssid);
5780 		return 0;
5781 	}
5782 
5783 	/* Filter out invalid channel in current GEO */
5784 	if (!ieee80211_is_valid_channel(priv->ieee, network->channel)) {
5785 		IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5786 				"because of invalid channel in current GEO\n",
5787 				print_ssid(ssid, network->ssid,
5788 					   network->ssid_len),
5789 				network->bssid);
5790 		return 0;
5791 	}
5792 
5793 	/* Ensure that the rates supported by the driver are compatible with
5794 	 * this AP, including verification of basic rates (mandatory) */
5795 	if (!ipw_compatible_rates(priv, network, &rates)) {
5796 		IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5797 				"because configured rate mask excludes "
5798 				"AP mandatory rate.\n",
5799 				print_ssid(ssid, network->ssid,
5800 					   network->ssid_len),
5801 				network->bssid);
5802 		return 0;
5803 	}
5804 
5805 	if (rates.num_rates == 0) {
5806 		IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5807 				"because of no compatible rates.\n",
5808 				print_ssid(ssid, network->ssid,
5809 					   network->ssid_len),
5810 				network->bssid);
5811 		return 0;
5812 	}
5813 
5814 	/* TODO: Perform any further minimal comparititive tests.  We do not
5815 	 * want to put too much policy logic here; intelligent scan selection
5816 	 * should occur within a generic IEEE 802.11 user space tool.  */
5817 
5818 	/* Set up 'new' AP to this network */
5819 	ipw_copy_rates(&match->rates, &rates);
5820 	match->network = network;
5821 
5822 	IPW_DEBUG_ASSOC("Network '%s (%pM)' is a viable match.\n",
5823 			print_ssid(ssid, network->ssid, network->ssid_len),
5824 			network->bssid);
5825 
5826 	return 1;
5827 }
5828 
ipw_adhoc_create(struct ipw_priv * priv,struct ieee80211_network * network)5829 static void ipw_adhoc_create(struct ipw_priv *priv,
5830 			     struct ieee80211_network *network)
5831 {
5832 	const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
5833 	int i;
5834 
5835 	/*
5836 	 * For the purposes of scanning, we can set our wireless mode
5837 	 * to trigger scans across combinations of bands, but when it
5838 	 * comes to creating a new ad-hoc network, we have tell the FW
5839 	 * exactly which band to use.
5840 	 *
5841 	 * We also have the possibility of an invalid channel for the
5842 	 * chossen band.  Attempting to create a new ad-hoc network
5843 	 * with an invalid channel for wireless mode will trigger a
5844 	 * FW fatal error.
5845 	 *
5846 	 */
5847 	switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
5848 	case IEEE80211_52GHZ_BAND:
5849 		network->mode = IEEE_A;
5850 		i = ieee80211_channel_to_index(priv->ieee, priv->channel);
5851 		BUG_ON(i == -1);
5852 		if (geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5853 			IPW_WARNING("Overriding invalid channel\n");
5854 			priv->channel = geo->a[0].channel;
5855 		}
5856 		break;
5857 
5858 	case IEEE80211_24GHZ_BAND:
5859 		if (priv->ieee->mode & IEEE_G)
5860 			network->mode = IEEE_G;
5861 		else
5862 			network->mode = IEEE_B;
5863 		i = ieee80211_channel_to_index(priv->ieee, priv->channel);
5864 		BUG_ON(i == -1);
5865 		if (geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5866 			IPW_WARNING("Overriding invalid channel\n");
5867 			priv->channel = geo->bg[0].channel;
5868 		}
5869 		break;
5870 
5871 	default:
5872 		IPW_WARNING("Overriding invalid channel\n");
5873 		if (priv->ieee->mode & IEEE_A) {
5874 			network->mode = IEEE_A;
5875 			priv->channel = geo->a[0].channel;
5876 		} else if (priv->ieee->mode & IEEE_G) {
5877 			network->mode = IEEE_G;
5878 			priv->channel = geo->bg[0].channel;
5879 		} else {
5880 			network->mode = IEEE_B;
5881 			priv->channel = geo->bg[0].channel;
5882 		}
5883 		break;
5884 	}
5885 
5886 	network->channel = priv->channel;
5887 	priv->config |= CFG_ADHOC_PERSIST;
5888 	ipw_create_bssid(priv, network->bssid);
5889 	network->ssid_len = priv->essid_len;
5890 	memcpy(network->ssid, priv->essid, priv->essid_len);
5891 	memset(&network->stats, 0, sizeof(network->stats));
5892 	network->capability = WLAN_CAPABILITY_IBSS;
5893 	if (!(priv->config & CFG_PREAMBLE_LONG))
5894 		network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
5895 	if (priv->capability & CAP_PRIVACY_ON)
5896 		network->capability |= WLAN_CAPABILITY_PRIVACY;
5897 	network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
5898 	memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
5899 	network->rates_ex_len = priv->rates.num_rates - network->rates_len;
5900 	memcpy(network->rates_ex,
5901 	       &priv->rates.supported_rates[network->rates_len],
5902 	       network->rates_ex_len);
5903 	network->last_scanned = 0;
5904 	network->flags = 0;
5905 	network->last_associate = 0;
5906 	network->time_stamp[0] = 0;
5907 	network->time_stamp[1] = 0;
5908 	network->beacon_interval = 100;	/* Default */
5909 	network->listen_interval = 10;	/* Default */
5910 	network->atim_window = 0;	/* Default */
5911 	network->wpa_ie_len = 0;
5912 	network->rsn_ie_len = 0;
5913 }
5914 
ipw_send_tgi_tx_key(struct ipw_priv * priv,int type,int index)5915 static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
5916 {
5917 	struct ipw_tgi_tx_key key;
5918 
5919 	if (!(priv->ieee->sec.flags & (1 << index)))
5920 		return;
5921 
5922 	key.key_id = index;
5923 	memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
5924 	key.security_type = type;
5925 	key.station_index = 0;	/* always 0 for BSS */
5926 	key.flags = 0;
5927 	/* 0 for new key; previous value of counter (after fatal error) */
5928 	key.tx_counter[0] = cpu_to_le32(0);
5929 	key.tx_counter[1] = cpu_to_le32(0);
5930 
5931 	ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
5932 }
5933 
ipw_send_wep_keys(struct ipw_priv * priv,int type)5934 static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
5935 {
5936 	struct ipw_wep_key key;
5937 	int i;
5938 
5939 	key.cmd_id = DINO_CMD_WEP_KEY;
5940 	key.seq_num = 0;
5941 
5942 	/* Note: AES keys cannot be set for multiple times.
5943 	 * Only set it at the first time. */
5944 	for (i = 0; i < 4; i++) {
5945 		key.key_index = i | type;
5946 		if (!(priv->ieee->sec.flags & (1 << i))) {
5947 			key.key_size = 0;
5948 			continue;
5949 		}
5950 
5951 		key.key_size = priv->ieee->sec.key_sizes[i];
5952 		memcpy(key.key, priv->ieee->sec.keys[i], key.key_size);
5953 
5954 		ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key);
5955 	}
5956 }
5957 
ipw_set_hw_decrypt_unicast(struct ipw_priv * priv,int level)5958 static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level)
5959 {
5960 	if (priv->ieee->host_encrypt)
5961 		return;
5962 
5963 	switch (level) {
5964 	case SEC_LEVEL_3:
5965 		priv->sys_config.disable_unicast_decryption = 0;
5966 		priv->ieee->host_decrypt = 0;
5967 		break;
5968 	case SEC_LEVEL_2:
5969 		priv->sys_config.disable_unicast_decryption = 1;
5970 		priv->ieee->host_decrypt = 1;
5971 		break;
5972 	case SEC_LEVEL_1:
5973 		priv->sys_config.disable_unicast_decryption = 0;
5974 		priv->ieee->host_decrypt = 0;
5975 		break;
5976 	case SEC_LEVEL_0:
5977 		priv->sys_config.disable_unicast_decryption = 1;
5978 		break;
5979 	default:
5980 		break;
5981 	}
5982 }
5983 
ipw_set_hw_decrypt_multicast(struct ipw_priv * priv,int level)5984 static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level)
5985 {
5986 	if (priv->ieee->host_encrypt)
5987 		return;
5988 
5989 	switch (level) {
5990 	case SEC_LEVEL_3:
5991 		priv->sys_config.disable_multicast_decryption = 0;
5992 		break;
5993 	case SEC_LEVEL_2:
5994 		priv->sys_config.disable_multicast_decryption = 1;
5995 		break;
5996 	case SEC_LEVEL_1:
5997 		priv->sys_config.disable_multicast_decryption = 0;
5998 		break;
5999 	case SEC_LEVEL_0:
6000 		priv->sys_config.disable_multicast_decryption = 1;
6001 		break;
6002 	default:
6003 		break;
6004 	}
6005 }
6006 
ipw_set_hwcrypto_keys(struct ipw_priv * priv)6007 static void ipw_set_hwcrypto_keys(struct ipw_priv *priv)
6008 {
6009 	switch (priv->ieee->sec.level) {
6010 	case SEC_LEVEL_3:
6011 		if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
6012 			ipw_send_tgi_tx_key(priv,
6013 					    DCT_FLAG_EXT_SECURITY_CCM,
6014 					    priv->ieee->sec.active_key);
6015 
6016 		if (!priv->ieee->host_mc_decrypt)
6017 			ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM);
6018 		break;
6019 	case SEC_LEVEL_2:
6020 		if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
6021 			ipw_send_tgi_tx_key(priv,
6022 					    DCT_FLAG_EXT_SECURITY_TKIP,
6023 					    priv->ieee->sec.active_key);
6024 		break;
6025 	case SEC_LEVEL_1:
6026 		ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
6027 		ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level);
6028 		ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level);
6029 		break;
6030 	case SEC_LEVEL_0:
6031 	default:
6032 		break;
6033 	}
6034 }
6035 
ipw_adhoc_check(void * data)6036 static void ipw_adhoc_check(void *data)
6037 {
6038 	struct ipw_priv *priv = data;
6039 
6040 	if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold &&
6041 	    !(priv->config & CFG_ADHOC_PERSIST)) {
6042 		IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
6043 			  IPW_DL_STATE | IPW_DL_ASSOC,
6044 			  "Missed beacon: %d - disassociate\n",
6045 			  priv->missed_adhoc_beacons);
6046 		ipw_remove_current_network(priv);
6047 		ipw_disassociate(priv);
6048 		return;
6049 	}
6050 
6051 	queue_delayed_work(priv->workqueue, &priv->adhoc_check,
6052 			   le16_to_cpu(priv->assoc_request.beacon_interval));
6053 }
6054 
ipw_bg_adhoc_check(struct work_struct * work)6055 static void ipw_bg_adhoc_check(struct work_struct *work)
6056 {
6057 	struct ipw_priv *priv =
6058 		container_of(work, struct ipw_priv, adhoc_check.work);
6059 	mutex_lock(&priv->mutex);
6060 	ipw_adhoc_check(priv);
6061 	mutex_unlock(&priv->mutex);
6062 }
6063 
ipw_debug_config(struct ipw_priv * priv)6064 static void ipw_debug_config(struct ipw_priv *priv)
6065 {
6066 	DECLARE_SSID_BUF(ssid);
6067 	IPW_DEBUG_INFO("Scan completed, no valid APs matched "
6068 		       "[CFG 0x%08X]\n", priv->config);
6069 	if (priv->config & CFG_STATIC_CHANNEL)
6070 		IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
6071 	else
6072 		IPW_DEBUG_INFO("Channel unlocked.\n");
6073 	if (priv->config & CFG_STATIC_ESSID)
6074 		IPW_DEBUG_INFO("ESSID locked to '%s'\n",
6075 			       print_ssid(ssid, priv->essid, priv->essid_len));
6076 	else
6077 		IPW_DEBUG_INFO("ESSID unlocked.\n");
6078 	if (priv->config & CFG_STATIC_BSSID)
6079 		IPW_DEBUG_INFO("BSSID locked to %pM\n", priv->bssid);
6080 	else
6081 		IPW_DEBUG_INFO("BSSID unlocked.\n");
6082 	if (priv->capability & CAP_PRIVACY_ON)
6083 		IPW_DEBUG_INFO("PRIVACY on\n");
6084 	else
6085 		IPW_DEBUG_INFO("PRIVACY off\n");
6086 	IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
6087 }
6088 
ipw_set_fixed_rate(struct ipw_priv * priv,int mode)6089 static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
6090 {
6091 	/* TODO: Verify that this works... */
6092 	struct ipw_fixed_rate fr = {
6093 		.tx_rates = priv->rates_mask
6094 	};
6095 	u32 reg;
6096 	u16 mask = 0;
6097 
6098 	/* Identify 'current FW band' and match it with the fixed
6099 	 * Tx rates */
6100 
6101 	switch (priv->ieee->freq_band) {
6102 	case IEEE80211_52GHZ_BAND:	/* A only */
6103 		/* IEEE_A */
6104 		if (priv->rates_mask & ~IEEE80211_OFDM_RATES_MASK) {
6105 			/* Invalid fixed rate mask */
6106 			IPW_DEBUG_WX
6107 			    ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6108 			fr.tx_rates = 0;
6109 			break;
6110 		}
6111 
6112 		fr.tx_rates >>= IEEE80211_OFDM_SHIFT_MASK_A;
6113 		break;
6114 
6115 	default:		/* 2.4Ghz or Mixed */
6116 		/* IEEE_B */
6117 		if (mode == IEEE_B) {
6118 			if (fr.tx_rates & ~IEEE80211_CCK_RATES_MASK) {
6119 				/* Invalid fixed rate mask */
6120 				IPW_DEBUG_WX
6121 				    ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6122 				fr.tx_rates = 0;
6123 			}
6124 			break;
6125 		}
6126 
6127 		/* IEEE_G */
6128 		if (fr.tx_rates & ~(IEEE80211_CCK_RATES_MASK |
6129 				    IEEE80211_OFDM_RATES_MASK)) {
6130 			/* Invalid fixed rate mask */
6131 			IPW_DEBUG_WX
6132 			    ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6133 			fr.tx_rates = 0;
6134 			break;
6135 		}
6136 
6137 		if (IEEE80211_OFDM_RATE_6MB_MASK & fr.tx_rates) {
6138 			mask |= (IEEE80211_OFDM_RATE_6MB_MASK >> 1);
6139 			fr.tx_rates &= ~IEEE80211_OFDM_RATE_6MB_MASK;
6140 		}
6141 
6142 		if (IEEE80211_OFDM_RATE_9MB_MASK & fr.tx_rates) {
6143 			mask |= (IEEE80211_OFDM_RATE_9MB_MASK >> 1);
6144 			fr.tx_rates &= ~IEEE80211_OFDM_RATE_9MB_MASK;
6145 		}
6146 
6147 		if (IEEE80211_OFDM_RATE_12MB_MASK & fr.tx_rates) {
6148 			mask |= (IEEE80211_OFDM_RATE_12MB_MASK >> 1);
6149 			fr.tx_rates &= ~IEEE80211_OFDM_RATE_12MB_MASK;
6150 		}
6151 
6152 		fr.tx_rates |= mask;
6153 		break;
6154 	}
6155 
6156 	reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
6157 	ipw_write_reg32(priv, reg, *(u32 *) & fr);
6158 }
6159 
ipw_abort_scan(struct ipw_priv * priv)6160 static void ipw_abort_scan(struct ipw_priv *priv)
6161 {
6162 	int err;
6163 
6164 	if (priv->status & STATUS_SCAN_ABORTING) {
6165 		IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
6166 		return;
6167 	}
6168 	priv->status |= STATUS_SCAN_ABORTING;
6169 
6170 	err = ipw_send_scan_abort(priv);
6171 	if (err)
6172 		IPW_DEBUG_HC("Request to abort scan failed.\n");
6173 }
6174 
ipw_add_scan_channels(struct ipw_priv * priv,struct ipw_scan_request_ext * scan,int scan_type)6175 static void ipw_add_scan_channels(struct ipw_priv *priv,
6176 				  struct ipw_scan_request_ext *scan,
6177 				  int scan_type)
6178 {
6179 	int channel_index = 0;
6180 	const struct ieee80211_geo *geo;
6181 	int i;
6182 
6183 	geo = ieee80211_get_geo(priv->ieee);
6184 
6185 	if (priv->ieee->freq_band & IEEE80211_52GHZ_BAND) {
6186 		int start = channel_index;
6187 		for (i = 0; i < geo->a_channels; i++) {
6188 			if ((priv->status & STATUS_ASSOCIATED) &&
6189 			    geo->a[i].channel == priv->channel)
6190 				continue;
6191 			channel_index++;
6192 			scan->channels_list[channel_index] = geo->a[i].channel;
6193 			ipw_set_scan_type(scan, channel_index,
6194 					  geo->a[i].
6195 					  flags & IEEE80211_CH_PASSIVE_ONLY ?
6196 					  IPW_SCAN_PASSIVE_FULL_DWELL_SCAN :
6197 					  scan_type);
6198 		}
6199 
6200 		if (start != channel_index) {
6201 			scan->channels_list[start] = (u8) (IPW_A_MODE << 6) |
6202 			    (channel_index - start);
6203 			channel_index++;
6204 		}
6205 	}
6206 
6207 	if (priv->ieee->freq_band & IEEE80211_24GHZ_BAND) {
6208 		int start = channel_index;
6209 		if (priv->config & CFG_SPEED_SCAN) {
6210 			int index;
6211 			u8 channels[IEEE80211_24GHZ_CHANNELS] = {
6212 				/* nop out the list */
6213 				[0] = 0
6214 			};
6215 
6216 			u8 channel;
6217 			while (channel_index < IPW_SCAN_CHANNELS) {
6218 				channel =
6219 				    priv->speed_scan[priv->speed_scan_pos];
6220 				if (channel == 0) {
6221 					priv->speed_scan_pos = 0;
6222 					channel = priv->speed_scan[0];
6223 				}
6224 				if ((priv->status & STATUS_ASSOCIATED) &&
6225 				    channel == priv->channel) {
6226 					priv->speed_scan_pos++;
6227 					continue;
6228 				}
6229 
6230 				/* If this channel has already been
6231 				 * added in scan, break from loop
6232 				 * and this will be the first channel
6233 				 * in the next scan.
6234 				 */
6235 				if (channels[channel - 1] != 0)
6236 					break;
6237 
6238 				channels[channel - 1] = 1;
6239 				priv->speed_scan_pos++;
6240 				channel_index++;
6241 				scan->channels_list[channel_index] = channel;
6242 				index =
6243 				    ieee80211_channel_to_index(priv->ieee, channel);
6244 				ipw_set_scan_type(scan, channel_index,
6245 						  geo->bg[index].
6246 						  flags &
6247 						  IEEE80211_CH_PASSIVE_ONLY ?
6248 						  IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6249 						  : scan_type);
6250 			}
6251 		} else {
6252 			for (i = 0; i < geo->bg_channels; i++) {
6253 				if ((priv->status & STATUS_ASSOCIATED) &&
6254 				    geo->bg[i].channel == priv->channel)
6255 					continue;
6256 				channel_index++;
6257 				scan->channels_list[channel_index] =
6258 				    geo->bg[i].channel;
6259 				ipw_set_scan_type(scan, channel_index,
6260 						  geo->bg[i].
6261 						  flags &
6262 						  IEEE80211_CH_PASSIVE_ONLY ?
6263 						  IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6264 						  : scan_type);
6265 			}
6266 		}
6267 
6268 		if (start != channel_index) {
6269 			scan->channels_list[start] = (u8) (IPW_B_MODE << 6) |
6270 			    (channel_index - start);
6271 		}
6272 	}
6273 }
6274 
ipw_passive_dwell_time(struct ipw_priv * priv)6275 static int ipw_passive_dwell_time(struct ipw_priv *priv)
6276 {
6277 	/* staying on passive channels longer than the DTIM interval during a
6278 	 * scan, while associated, causes the firmware to cancel the scan
6279 	 * without notification. Hence, don't stay on passive channels longer
6280 	 * than the beacon interval.
6281 	 */
6282 	if (priv->status & STATUS_ASSOCIATED
6283 	    && priv->assoc_network->beacon_interval > 10)
6284 		return priv->assoc_network->beacon_interval - 10;
6285 	else
6286 		return 120;
6287 }
6288 
ipw_request_scan_helper(struct ipw_priv * priv,int type,int direct)6289 static int ipw_request_scan_helper(struct ipw_priv *priv, int type, int direct)
6290 {
6291 	struct ipw_scan_request_ext scan;
6292 	int err = 0, scan_type;
6293 
6294 	if (!(priv->status & STATUS_INIT) ||
6295 	    (priv->status & STATUS_EXIT_PENDING))
6296 		return 0;
6297 
6298 	mutex_lock(&priv->mutex);
6299 
6300 	if (direct && (priv->direct_scan_ssid_len == 0)) {
6301 		IPW_DEBUG_HC("Direct scan requested but no SSID to scan for\n");
6302 		priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6303 		goto done;
6304 	}
6305 
6306 	if (priv->status & STATUS_SCANNING) {
6307 		IPW_DEBUG_HC("Concurrent scan requested.  Queuing.\n");
6308 		priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6309 					STATUS_SCAN_PENDING;
6310 		goto done;
6311 	}
6312 
6313 	if (!(priv->status & STATUS_SCAN_FORCED) &&
6314 	    priv->status & STATUS_SCAN_ABORTING) {
6315 		IPW_DEBUG_HC("Scan request while abort pending.  Queuing.\n");
6316 		priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6317 					STATUS_SCAN_PENDING;
6318 		goto done;
6319 	}
6320 
6321 	if (priv->status & STATUS_RF_KILL_MASK) {
6322 		IPW_DEBUG_HC("Queuing scan due to RF Kill activation\n");
6323 		priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6324 					STATUS_SCAN_PENDING;
6325 		goto done;
6326 	}
6327 
6328 	memset(&scan, 0, sizeof(scan));
6329 	scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
6330 
6331 	if (type == IW_SCAN_TYPE_PASSIVE) {
6332 		IPW_DEBUG_WX("use passive scanning\n");
6333 		scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN;
6334 		scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6335 			cpu_to_le16(ipw_passive_dwell_time(priv));
6336 		ipw_add_scan_channels(priv, &scan, scan_type);
6337 		goto send_request;
6338 	}
6339 
6340 	/* Use active scan by default. */
6341 	if (priv->config & CFG_SPEED_SCAN)
6342 		scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6343 			cpu_to_le16(30);
6344 	else
6345 		scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6346 			cpu_to_le16(20);
6347 
6348 	scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
6349 		cpu_to_le16(20);
6350 
6351 	scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6352 		cpu_to_le16(ipw_passive_dwell_time(priv));
6353 	scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
6354 
6355 #ifdef CONFIG_IPW2200_MONITOR
6356 	if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
6357 		u8 channel;
6358 		u8 band = 0;
6359 
6360 		switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
6361 		case IEEE80211_52GHZ_BAND:
6362 			band = (u8) (IPW_A_MODE << 6) | 1;
6363 			channel = priv->channel;
6364 			break;
6365 
6366 		case IEEE80211_24GHZ_BAND:
6367 			band = (u8) (IPW_B_MODE << 6) | 1;
6368 			channel = priv->channel;
6369 			break;
6370 
6371 		default:
6372 			band = (u8) (IPW_B_MODE << 6) | 1;
6373 			channel = 9;
6374 			break;
6375 		}
6376 
6377 		scan.channels_list[0] = band;
6378 		scan.channels_list[1] = channel;
6379 		ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN);
6380 
6381 		/* NOTE:  The card will sit on this channel for this time
6382 		 * period.  Scan aborts are timing sensitive and frequently
6383 		 * result in firmware restarts.  As such, it is best to
6384 		 * set a small dwell_time here and just keep re-issuing
6385 		 * scans.  Otherwise fast channel hopping will not actually
6386 		 * hop channels.
6387 		 *
6388 		 * TODO: Move SPEED SCAN support to all modes and bands */
6389 		scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6390 			cpu_to_le16(2000);
6391 	} else {
6392 #endif				/* CONFIG_IPW2200_MONITOR */
6393 		/* Honor direct scans first, otherwise if we are roaming make
6394 		 * this a direct scan for the current network.  Finally,
6395 		 * ensure that every other scan is a fast channel hop scan */
6396 		if (direct) {
6397 			err = ipw_send_ssid(priv, priv->direct_scan_ssid,
6398 			                    priv->direct_scan_ssid_len);
6399 			if (err) {
6400 				IPW_DEBUG_HC("Attempt to send SSID command  "
6401 					     "failed\n");
6402 				goto done;
6403 			}
6404 
6405 			scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6406 		} else if ((priv->status & STATUS_ROAMING)
6407 			   || (!(priv->status & STATUS_ASSOCIATED)
6408 			       && (priv->config & CFG_STATIC_ESSID)
6409 			       && (le32_to_cpu(scan.full_scan_index) % 2))) {
6410 			err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
6411 			if (err) {
6412 				IPW_DEBUG_HC("Attempt to send SSID command "
6413 					     "failed.\n");
6414 				goto done;
6415 			}
6416 
6417 			scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6418 		} else
6419 			scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
6420 
6421 		ipw_add_scan_channels(priv, &scan, scan_type);
6422 #ifdef CONFIG_IPW2200_MONITOR
6423 	}
6424 #endif
6425 
6426 send_request:
6427 	err = ipw_send_scan_request_ext(priv, &scan);
6428 	if (err) {
6429 		IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
6430 		goto done;
6431 	}
6432 
6433 	priv->status |= STATUS_SCANNING;
6434 	if (direct) {
6435 		priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6436 		priv->direct_scan_ssid_len = 0;
6437 	} else
6438 		priv->status &= ~STATUS_SCAN_PENDING;
6439 
6440 	queue_delayed_work(priv->workqueue, &priv->scan_check,
6441 			   IPW_SCAN_CHECK_WATCHDOG);
6442 done:
6443 	mutex_unlock(&priv->mutex);
6444 	return err;
6445 }
6446 
ipw_request_passive_scan(struct work_struct * work)6447 static void ipw_request_passive_scan(struct work_struct *work)
6448 {
6449 	struct ipw_priv *priv =
6450 		container_of(work, struct ipw_priv, request_passive_scan.work);
6451 	ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE, 0);
6452 }
6453 
ipw_request_scan(struct work_struct * work)6454 static void ipw_request_scan(struct work_struct *work)
6455 {
6456 	struct ipw_priv *priv =
6457 		container_of(work, struct ipw_priv, request_scan.work);
6458 	ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 0);
6459 }
6460 
ipw_request_direct_scan(struct work_struct * work)6461 static void ipw_request_direct_scan(struct work_struct *work)
6462 {
6463 	struct ipw_priv *priv =
6464 		container_of(work, struct ipw_priv, request_direct_scan.work);
6465 	ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 1);
6466 }
6467 
ipw_bg_abort_scan(struct work_struct * work)6468 static void ipw_bg_abort_scan(struct work_struct *work)
6469 {
6470 	struct ipw_priv *priv =
6471 		container_of(work, struct ipw_priv, abort_scan);
6472 	mutex_lock(&priv->mutex);
6473 	ipw_abort_scan(priv);
6474 	mutex_unlock(&priv->mutex);
6475 }
6476 
ipw_wpa_enable(struct ipw_priv * priv,int value)6477 static int ipw_wpa_enable(struct ipw_priv *priv, int value)
6478 {
6479 	/* This is called when wpa_supplicant loads and closes the driver
6480 	 * interface. */
6481 	priv->ieee->wpa_enabled = value;
6482 	return 0;
6483 }
6484 
ipw_wpa_set_auth_algs(struct ipw_priv * priv,int value)6485 static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
6486 {
6487 	struct ieee80211_device *ieee = priv->ieee;
6488 	struct ieee80211_security sec = {
6489 		.flags = SEC_AUTH_MODE,
6490 	};
6491 	int ret = 0;
6492 
6493 	if (value & IW_AUTH_ALG_SHARED_KEY) {
6494 		sec.auth_mode = WLAN_AUTH_SHARED_KEY;
6495 		ieee->open_wep = 0;
6496 	} else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
6497 		sec.auth_mode = WLAN_AUTH_OPEN;
6498 		ieee->open_wep = 1;
6499 	} else if (value & IW_AUTH_ALG_LEAP) {
6500 		sec.auth_mode = WLAN_AUTH_LEAP;
6501 		ieee->open_wep = 1;
6502 	} else
6503 		return -EINVAL;
6504 
6505 	if (ieee->set_security)
6506 		ieee->set_security(ieee->dev, &sec);
6507 	else
6508 		ret = -EOPNOTSUPP;
6509 
6510 	return ret;
6511 }
6512 
ipw_wpa_assoc_frame(struct ipw_priv * priv,char * wpa_ie,int wpa_ie_len)6513 static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie,
6514 				int wpa_ie_len)
6515 {
6516 	/* make sure WPA is enabled */
6517 	ipw_wpa_enable(priv, 1);
6518 }
6519 
ipw_set_rsn_capa(struct ipw_priv * priv,char * capabilities,int length)6520 static int ipw_set_rsn_capa(struct ipw_priv *priv,
6521 			    char *capabilities, int length)
6522 {
6523 	IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
6524 
6525 	return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length,
6526 				capabilities);
6527 }
6528 
6529 /*
6530  * WE-18 support
6531  */
6532 
6533 /* SIOCSIWGENIE */
ipw_wx_set_genie(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)6534 static int ipw_wx_set_genie(struct net_device *dev,
6535 			    struct iw_request_info *info,
6536 			    union iwreq_data *wrqu, char *extra)
6537 {
6538 	struct ipw_priv *priv = ieee80211_priv(dev);
6539 	struct ieee80211_device *ieee = priv->ieee;
6540 	u8 *buf;
6541 	int err = 0;
6542 
6543 	if (wrqu->data.length > MAX_WPA_IE_LEN ||
6544 	    (wrqu->data.length && extra == NULL))
6545 		return -EINVAL;
6546 
6547 	if (wrqu->data.length) {
6548 		buf = kmalloc(wrqu->data.length, GFP_KERNEL);
6549 		if (buf == NULL) {
6550 			err = -ENOMEM;
6551 			goto out;
6552 		}
6553 
6554 		memcpy(buf, extra, wrqu->data.length);
6555 		kfree(ieee->wpa_ie);
6556 		ieee->wpa_ie = buf;
6557 		ieee->wpa_ie_len = wrqu->data.length;
6558 	} else {
6559 		kfree(ieee->wpa_ie);
6560 		ieee->wpa_ie = NULL;
6561 		ieee->wpa_ie_len = 0;
6562 	}
6563 
6564 	ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
6565       out:
6566 	return err;
6567 }
6568 
6569 /* SIOCGIWGENIE */
ipw_wx_get_genie(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)6570 static int ipw_wx_get_genie(struct net_device *dev,
6571 			    struct iw_request_info *info,
6572 			    union iwreq_data *wrqu, char *extra)
6573 {
6574 	struct ipw_priv *priv = ieee80211_priv(dev);
6575 	struct ieee80211_device *ieee = priv->ieee;
6576 	int err = 0;
6577 
6578 	if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
6579 		wrqu->data.length = 0;
6580 		goto out;
6581 	}
6582 
6583 	if (wrqu->data.length < ieee->wpa_ie_len) {
6584 		err = -E2BIG;
6585 		goto out;
6586 	}
6587 
6588 	wrqu->data.length = ieee->wpa_ie_len;
6589 	memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
6590 
6591       out:
6592 	return err;
6593 }
6594 
wext_cipher2level(int cipher)6595 static int wext_cipher2level(int cipher)
6596 {
6597 	switch (cipher) {
6598 	case IW_AUTH_CIPHER_NONE:
6599 		return SEC_LEVEL_0;
6600 	case IW_AUTH_CIPHER_WEP40:
6601 	case IW_AUTH_CIPHER_WEP104:
6602 		return SEC_LEVEL_1;
6603 	case IW_AUTH_CIPHER_TKIP:
6604 		return SEC_LEVEL_2;
6605 	case IW_AUTH_CIPHER_CCMP:
6606 		return SEC_LEVEL_3;
6607 	default:
6608 		return -1;
6609 	}
6610 }
6611 
6612 /* SIOCSIWAUTH */
ipw_wx_set_auth(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)6613 static int ipw_wx_set_auth(struct net_device *dev,
6614 			   struct iw_request_info *info,
6615 			   union iwreq_data *wrqu, char *extra)
6616 {
6617 	struct ipw_priv *priv = ieee80211_priv(dev);
6618 	struct ieee80211_device *ieee = priv->ieee;
6619 	struct iw_param *param = &wrqu->param;
6620 	struct lib80211_crypt_data *crypt;
6621 	unsigned long flags;
6622 	int ret = 0;
6623 
6624 	switch (param->flags & IW_AUTH_INDEX) {
6625 	case IW_AUTH_WPA_VERSION:
6626 		break;
6627 	case IW_AUTH_CIPHER_PAIRWISE:
6628 		ipw_set_hw_decrypt_unicast(priv,
6629 					   wext_cipher2level(param->value));
6630 		break;
6631 	case IW_AUTH_CIPHER_GROUP:
6632 		ipw_set_hw_decrypt_multicast(priv,
6633 					     wext_cipher2level(param->value));
6634 		break;
6635 	case IW_AUTH_KEY_MGMT:
6636 		/*
6637 		 * ipw2200 does not use these parameters
6638 		 */
6639 		break;
6640 
6641 	case IW_AUTH_TKIP_COUNTERMEASURES:
6642 		crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6643 		if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
6644 			break;
6645 
6646 		flags = crypt->ops->get_flags(crypt->priv);
6647 
6648 		if (param->value)
6649 			flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6650 		else
6651 			flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6652 
6653 		crypt->ops->set_flags(flags, crypt->priv);
6654 
6655 		break;
6656 
6657 	case IW_AUTH_DROP_UNENCRYPTED:{
6658 			/* HACK:
6659 			 *
6660 			 * wpa_supplicant calls set_wpa_enabled when the driver
6661 			 * is loaded and unloaded, regardless of if WPA is being
6662 			 * used.  No other calls are made which can be used to
6663 			 * determine if encryption will be used or not prior to
6664 			 * association being expected.  If encryption is not being
6665 			 * used, drop_unencrypted is set to false, else true -- we
6666 			 * can use this to determine if the CAP_PRIVACY_ON bit should
6667 			 * be set.
6668 			 */
6669 			struct ieee80211_security sec = {
6670 				.flags = SEC_ENABLED,
6671 				.enabled = param->value,
6672 			};
6673 			priv->ieee->drop_unencrypted = param->value;
6674 			/* We only change SEC_LEVEL for open mode. Others
6675 			 * are set by ipw_wpa_set_encryption.
6676 			 */
6677 			if (!param->value) {
6678 				sec.flags |= SEC_LEVEL;
6679 				sec.level = SEC_LEVEL_0;
6680 			} else {
6681 				sec.flags |= SEC_LEVEL;
6682 				sec.level = SEC_LEVEL_1;
6683 			}
6684 			if (priv->ieee->set_security)
6685 				priv->ieee->set_security(priv->ieee->dev, &sec);
6686 			break;
6687 		}
6688 
6689 	case IW_AUTH_80211_AUTH_ALG:
6690 		ret = ipw_wpa_set_auth_algs(priv, param->value);
6691 		break;
6692 
6693 	case IW_AUTH_WPA_ENABLED:
6694 		ret = ipw_wpa_enable(priv, param->value);
6695 		ipw_disassociate(priv);
6696 		break;
6697 
6698 	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6699 		ieee->ieee802_1x = param->value;
6700 		break;
6701 
6702 	case IW_AUTH_PRIVACY_INVOKED:
6703 		ieee->privacy_invoked = param->value;
6704 		break;
6705 
6706 	default:
6707 		return -EOPNOTSUPP;
6708 	}
6709 	return ret;
6710 }
6711 
6712 /* SIOCGIWAUTH */
ipw_wx_get_auth(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)6713 static int ipw_wx_get_auth(struct net_device *dev,
6714 			   struct iw_request_info *info,
6715 			   union iwreq_data *wrqu, char *extra)
6716 {
6717 	struct ipw_priv *priv = ieee80211_priv(dev);
6718 	struct ieee80211_device *ieee = priv->ieee;
6719 	struct lib80211_crypt_data *crypt;
6720 	struct iw_param *param = &wrqu->param;
6721 	int ret = 0;
6722 
6723 	switch (param->flags & IW_AUTH_INDEX) {
6724 	case IW_AUTH_WPA_VERSION:
6725 	case IW_AUTH_CIPHER_PAIRWISE:
6726 	case IW_AUTH_CIPHER_GROUP:
6727 	case IW_AUTH_KEY_MGMT:
6728 		/*
6729 		 * wpa_supplicant will control these internally
6730 		 */
6731 		ret = -EOPNOTSUPP;
6732 		break;
6733 
6734 	case IW_AUTH_TKIP_COUNTERMEASURES:
6735 		crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6736 		if (!crypt || !crypt->ops->get_flags)
6737 			break;
6738 
6739 		param->value = (crypt->ops->get_flags(crypt->priv) &
6740 				IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0;
6741 
6742 		break;
6743 
6744 	case IW_AUTH_DROP_UNENCRYPTED:
6745 		param->value = ieee->drop_unencrypted;
6746 		break;
6747 
6748 	case IW_AUTH_80211_AUTH_ALG:
6749 		param->value = ieee->sec.auth_mode;
6750 		break;
6751 
6752 	case IW_AUTH_WPA_ENABLED:
6753 		param->value = ieee->wpa_enabled;
6754 		break;
6755 
6756 	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6757 		param->value = ieee->ieee802_1x;
6758 		break;
6759 
6760 	case IW_AUTH_ROAMING_CONTROL:
6761 	case IW_AUTH_PRIVACY_INVOKED:
6762 		param->value = ieee->privacy_invoked;
6763 		break;
6764 
6765 	default:
6766 		return -EOPNOTSUPP;
6767 	}
6768 	return 0;
6769 }
6770 
6771 /* SIOCSIWENCODEEXT */
ipw_wx_set_encodeext(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)6772 static int ipw_wx_set_encodeext(struct net_device *dev,
6773 				struct iw_request_info *info,
6774 				union iwreq_data *wrqu, char *extra)
6775 {
6776 	struct ipw_priv *priv = ieee80211_priv(dev);
6777 	struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6778 
6779 	if (hwcrypto) {
6780 		if (ext->alg == IW_ENCODE_ALG_TKIP) {
6781 			/* IPW HW can't build TKIP MIC,
6782 			   host decryption still needed */
6783 			if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
6784 				priv->ieee->host_mc_decrypt = 1;
6785 			else {
6786 				priv->ieee->host_encrypt = 0;
6787 				priv->ieee->host_encrypt_msdu = 1;
6788 				priv->ieee->host_decrypt = 1;
6789 			}
6790 		} else {
6791 			priv->ieee->host_encrypt = 0;
6792 			priv->ieee->host_encrypt_msdu = 0;
6793 			priv->ieee->host_decrypt = 0;
6794 			priv->ieee->host_mc_decrypt = 0;
6795 		}
6796 	}
6797 
6798 	return ieee80211_wx_set_encodeext(priv->ieee, info, wrqu, extra);
6799 }
6800 
6801 /* SIOCGIWENCODEEXT */
ipw_wx_get_encodeext(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)6802 static int ipw_wx_get_encodeext(struct net_device *dev,
6803 				struct iw_request_info *info,
6804 				union iwreq_data *wrqu, char *extra)
6805 {
6806 	struct ipw_priv *priv = ieee80211_priv(dev);
6807 	return ieee80211_wx_get_encodeext(priv->ieee, info, wrqu, extra);
6808 }
6809 
6810 /* SIOCSIWMLME */
ipw_wx_set_mlme(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)6811 static int ipw_wx_set_mlme(struct net_device *dev,
6812 			   struct iw_request_info *info,
6813 			   union iwreq_data *wrqu, char *extra)
6814 {
6815 	struct ipw_priv *priv = ieee80211_priv(dev);
6816 	struct iw_mlme *mlme = (struct iw_mlme *)extra;
6817 	__le16 reason;
6818 
6819 	reason = cpu_to_le16(mlme->reason_code);
6820 
6821 	switch (mlme->cmd) {
6822 	case IW_MLME_DEAUTH:
6823 		/* silently ignore */
6824 		break;
6825 
6826 	case IW_MLME_DISASSOC:
6827 		ipw_disassociate(priv);
6828 		break;
6829 
6830 	default:
6831 		return -EOPNOTSUPP;
6832 	}
6833 	return 0;
6834 }
6835 
6836 #ifdef CONFIG_IPW2200_QOS
6837 
6838 /* QoS */
6839 /*
6840 * get the modulation type of the current network or
6841 * the card current mode
6842 */
ipw_qos_current_mode(struct ipw_priv * priv)6843 static u8 ipw_qos_current_mode(struct ipw_priv * priv)
6844 {
6845 	u8 mode = 0;
6846 
6847 	if (priv->status & STATUS_ASSOCIATED) {
6848 		unsigned long flags;
6849 
6850 		spin_lock_irqsave(&priv->ieee->lock, flags);
6851 		mode = priv->assoc_network->mode;
6852 		spin_unlock_irqrestore(&priv->ieee->lock, flags);
6853 	} else {
6854 		mode = priv->ieee->mode;
6855 	}
6856 	IPW_DEBUG_QOS("QoS network/card mode %d \n", mode);
6857 	return mode;
6858 }
6859 
6860 /*
6861 * Handle management frame beacon and probe response
6862 */
ipw_qos_handle_probe_response(struct ipw_priv * priv,int active_network,struct ieee80211_network * network)6863 static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
6864 					 int active_network,
6865 					 struct ieee80211_network *network)
6866 {
6867 	u32 size = sizeof(struct ieee80211_qos_parameters);
6868 
6869 	if (network->capability & WLAN_CAPABILITY_IBSS)
6870 		network->qos_data.active = network->qos_data.supported;
6871 
6872 	if (network->flags & NETWORK_HAS_QOS_MASK) {
6873 		if (active_network &&
6874 		    (network->flags & NETWORK_HAS_QOS_PARAMETERS))
6875 			network->qos_data.active = network->qos_data.supported;
6876 
6877 		if ((network->qos_data.active == 1) && (active_network == 1) &&
6878 		    (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
6879 		    (network->qos_data.old_param_count !=
6880 		     network->qos_data.param_count)) {
6881 			network->qos_data.old_param_count =
6882 			    network->qos_data.param_count;
6883 			schedule_work(&priv->qos_activate);
6884 			IPW_DEBUG_QOS("QoS parameters change call "
6885 				      "qos_activate\n");
6886 		}
6887 	} else {
6888 		if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B))
6889 			memcpy(&network->qos_data.parameters,
6890 			       &def_parameters_CCK, size);
6891 		else
6892 			memcpy(&network->qos_data.parameters,
6893 			       &def_parameters_OFDM, size);
6894 
6895 		if ((network->qos_data.active == 1) && (active_network == 1)) {
6896 			IPW_DEBUG_QOS("QoS was disabled call qos_activate \n");
6897 			schedule_work(&priv->qos_activate);
6898 		}
6899 
6900 		network->qos_data.active = 0;
6901 		network->qos_data.supported = 0;
6902 	}
6903 	if ((priv->status & STATUS_ASSOCIATED) &&
6904 	    (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
6905 		if (memcmp(network->bssid, priv->bssid, ETH_ALEN))
6906 			if (network->capability & WLAN_CAPABILITY_IBSS)
6907 				if ((network->ssid_len ==
6908 				     priv->assoc_network->ssid_len) &&
6909 				    !memcmp(network->ssid,
6910 					    priv->assoc_network->ssid,
6911 					    network->ssid_len)) {
6912 					queue_work(priv->workqueue,
6913 						   &priv->merge_networks);
6914 				}
6915 	}
6916 
6917 	return 0;
6918 }
6919 
6920 /*
6921 * This function set up the firmware to support QoS. It sends
6922 * IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO
6923 */
ipw_qos_activate(struct ipw_priv * priv,struct ieee80211_qos_data * qos_network_data)6924 static int ipw_qos_activate(struct ipw_priv *priv,
6925 			    struct ieee80211_qos_data *qos_network_data)
6926 {
6927 	int err;
6928 	struct ieee80211_qos_parameters qos_parameters[QOS_QOS_SETS];
6929 	struct ieee80211_qos_parameters *active_one = NULL;
6930 	u32 size = sizeof(struct ieee80211_qos_parameters);
6931 	u32 burst_duration;
6932 	int i;
6933 	u8 type;
6934 
6935 	type = ipw_qos_current_mode(priv);
6936 
6937 	active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]);
6938 	memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size);
6939 	active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]);
6940 	memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size);
6941 
6942 	if (qos_network_data == NULL) {
6943 		if (type == IEEE_B) {
6944 			IPW_DEBUG_QOS("QoS activate network mode %d\n", type);
6945 			active_one = &def_parameters_CCK;
6946 		} else
6947 			active_one = &def_parameters_OFDM;
6948 
6949 		memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6950 		burst_duration = ipw_qos_get_burst_duration(priv);
6951 		for (i = 0; i < QOS_QUEUE_NUM; i++)
6952 			qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] =
6953 			    cpu_to_le16(burst_duration);
6954 	} else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
6955 		if (type == IEEE_B) {
6956 			IPW_DEBUG_QOS("QoS activate IBSS nework mode %d\n",
6957 				      type);
6958 			if (priv->qos_data.qos_enable == 0)
6959 				active_one = &def_parameters_CCK;
6960 			else
6961 				active_one = priv->qos_data.def_qos_parm_CCK;
6962 		} else {
6963 			if (priv->qos_data.qos_enable == 0)
6964 				active_one = &def_parameters_OFDM;
6965 			else
6966 				active_one = priv->qos_data.def_qos_parm_OFDM;
6967 		}
6968 		memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6969 	} else {
6970 		unsigned long flags;
6971 		int active;
6972 
6973 		spin_lock_irqsave(&priv->ieee->lock, flags);
6974 		active_one = &(qos_network_data->parameters);
6975 		qos_network_data->old_param_count =
6976 		    qos_network_data->param_count;
6977 		memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6978 		active = qos_network_data->supported;
6979 		spin_unlock_irqrestore(&priv->ieee->lock, flags);
6980 
6981 		if (active == 0) {
6982 			burst_duration = ipw_qos_get_burst_duration(priv);
6983 			for (i = 0; i < QOS_QUEUE_NUM; i++)
6984 				qos_parameters[QOS_PARAM_SET_ACTIVE].
6985 				    tx_op_limit[i] = cpu_to_le16(burst_duration);
6986 		}
6987 	}
6988 
6989 	IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
6990 	err = ipw_send_qos_params_command(priv,
6991 					  (struct ieee80211_qos_parameters *)
6992 					  &(qos_parameters[0]));
6993 	if (err)
6994 		IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
6995 
6996 	return err;
6997 }
6998 
6999 /*
7000 * send IPW_CMD_WME_INFO to the firmware
7001 */
ipw_qos_set_info_element(struct ipw_priv * priv)7002 static int ipw_qos_set_info_element(struct ipw_priv *priv)
7003 {
7004 	int ret = 0;
7005 	struct ieee80211_qos_information_element qos_info;
7006 
7007 	if (priv == NULL)
7008 		return -1;
7009 
7010 	qos_info.elementID = QOS_ELEMENT_ID;
7011 	qos_info.length = sizeof(struct ieee80211_qos_information_element) - 2;
7012 
7013 	qos_info.version = QOS_VERSION_1;
7014 	qos_info.ac_info = 0;
7015 
7016 	memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN);
7017 	qos_info.qui_type = QOS_OUI_TYPE;
7018 	qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE;
7019 
7020 	ret = ipw_send_qos_info_command(priv, &qos_info);
7021 	if (ret != 0) {
7022 		IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n");
7023 	}
7024 	return ret;
7025 }
7026 
7027 /*
7028 * Set the QoS parameter with the association request structure
7029 */
ipw_qos_association(struct ipw_priv * priv,struct ieee80211_network * network)7030 static int ipw_qos_association(struct ipw_priv *priv,
7031 			       struct ieee80211_network *network)
7032 {
7033 	int err = 0;
7034 	struct ieee80211_qos_data *qos_data = NULL;
7035 	struct ieee80211_qos_data ibss_data = {
7036 		.supported = 1,
7037 		.active = 1,
7038 	};
7039 
7040 	switch (priv->ieee->iw_mode) {
7041 	case IW_MODE_ADHOC:
7042 		BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS));
7043 
7044 		qos_data = &ibss_data;
7045 		break;
7046 
7047 	case IW_MODE_INFRA:
7048 		qos_data = &network->qos_data;
7049 		break;
7050 
7051 	default:
7052 		BUG();
7053 		break;
7054 	}
7055 
7056 	err = ipw_qos_activate(priv, qos_data);
7057 	if (err) {
7058 		priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC;
7059 		return err;
7060 	}
7061 
7062 	if (priv->qos_data.qos_enable && qos_data->supported) {
7063 		IPW_DEBUG_QOS("QoS will be enabled for this association\n");
7064 		priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC;
7065 		return ipw_qos_set_info_element(priv);
7066 	}
7067 
7068 	return 0;
7069 }
7070 
7071 /*
7072 * handling the beaconing responses. if we get different QoS setting
7073 * off the network from the associated setting, adjust the QoS
7074 * setting
7075 */
ipw_qos_association_resp(struct ipw_priv * priv,struct ieee80211_network * network)7076 static int ipw_qos_association_resp(struct ipw_priv *priv,
7077 				    struct ieee80211_network *network)
7078 {
7079 	int ret = 0;
7080 	unsigned long flags;
7081 	u32 size = sizeof(struct ieee80211_qos_parameters);
7082 	int set_qos_param = 0;
7083 
7084 	if ((priv == NULL) || (network == NULL) ||
7085 	    (priv->assoc_network == NULL))
7086 		return ret;
7087 
7088 	if (!(priv->status & STATUS_ASSOCIATED))
7089 		return ret;
7090 
7091 	if ((priv->ieee->iw_mode != IW_MODE_INFRA))
7092 		return ret;
7093 
7094 	spin_lock_irqsave(&priv->ieee->lock, flags);
7095 	if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
7096 		memcpy(&priv->assoc_network->qos_data, &network->qos_data,
7097 		       sizeof(struct ieee80211_qos_data));
7098 		priv->assoc_network->qos_data.active = 1;
7099 		if ((network->qos_data.old_param_count !=
7100 		     network->qos_data.param_count)) {
7101 			set_qos_param = 1;
7102 			network->qos_data.old_param_count =
7103 			    network->qos_data.param_count;
7104 		}
7105 
7106 	} else {
7107 		if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B))
7108 			memcpy(&priv->assoc_network->qos_data.parameters,
7109 			       &def_parameters_CCK, size);
7110 		else
7111 			memcpy(&priv->assoc_network->qos_data.parameters,
7112 			       &def_parameters_OFDM, size);
7113 		priv->assoc_network->qos_data.active = 0;
7114 		priv->assoc_network->qos_data.supported = 0;
7115 		set_qos_param = 1;
7116 	}
7117 
7118 	spin_unlock_irqrestore(&priv->ieee->lock, flags);
7119 
7120 	if (set_qos_param == 1)
7121 		schedule_work(&priv->qos_activate);
7122 
7123 	return ret;
7124 }
7125 
ipw_qos_get_burst_duration(struct ipw_priv * priv)7126 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
7127 {
7128 	u32 ret = 0;
7129 
7130 	if ((priv == NULL))
7131 		return 0;
7132 
7133 	if (!(priv->ieee->modulation & IEEE80211_OFDM_MODULATION))
7134 		ret = priv->qos_data.burst_duration_CCK;
7135 	else
7136 		ret = priv->qos_data.burst_duration_OFDM;
7137 
7138 	return ret;
7139 }
7140 
7141 /*
7142 * Initialize the setting of QoS global
7143 */
ipw_qos_init(struct ipw_priv * priv,int enable,int burst_enable,u32 burst_duration_CCK,u32 burst_duration_OFDM)7144 static void ipw_qos_init(struct ipw_priv *priv, int enable,
7145 			 int burst_enable, u32 burst_duration_CCK,
7146 			 u32 burst_duration_OFDM)
7147 {
7148 	priv->qos_data.qos_enable = enable;
7149 
7150 	if (priv->qos_data.qos_enable) {
7151 		priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK;
7152 		priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM;
7153 		IPW_DEBUG_QOS("QoS is enabled\n");
7154 	} else {
7155 		priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK;
7156 		priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM;
7157 		IPW_DEBUG_QOS("QoS is not enabled\n");
7158 	}
7159 
7160 	priv->qos_data.burst_enable = burst_enable;
7161 
7162 	if (burst_enable) {
7163 		priv->qos_data.burst_duration_CCK = burst_duration_CCK;
7164 		priv->qos_data.burst_duration_OFDM = burst_duration_OFDM;
7165 	} else {
7166 		priv->qos_data.burst_duration_CCK = 0;
7167 		priv->qos_data.burst_duration_OFDM = 0;
7168 	}
7169 }
7170 
7171 /*
7172 * map the packet priority to the right TX Queue
7173 */
ipw_get_tx_queue_number(struct ipw_priv * priv,u16 priority)7174 static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority)
7175 {
7176 	if (priority > 7 || !priv->qos_data.qos_enable)
7177 		priority = 0;
7178 
7179 	return from_priority_to_tx_queue[priority] - 1;
7180 }
7181 
ipw_is_qos_active(struct net_device * dev,struct sk_buff * skb)7182 static int ipw_is_qos_active(struct net_device *dev,
7183 			     struct sk_buff *skb)
7184 {
7185 	struct ipw_priv *priv = ieee80211_priv(dev);
7186 	struct ieee80211_qos_data *qos_data = NULL;
7187 	int active, supported;
7188 	u8 *daddr = skb->data + ETH_ALEN;
7189 	int unicast = !is_multicast_ether_addr(daddr);
7190 
7191 	if (!(priv->status & STATUS_ASSOCIATED))
7192 		return 0;
7193 
7194 	qos_data = &priv->assoc_network->qos_data;
7195 
7196 	if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7197 		if (unicast == 0)
7198 			qos_data->active = 0;
7199 		else
7200 			qos_data->active = qos_data->supported;
7201 	}
7202 	active = qos_data->active;
7203 	supported = qos_data->supported;
7204 	IPW_DEBUG_QOS("QoS  %d network is QoS active %d  supported %d  "
7205 		      "unicast %d\n",
7206 		      priv->qos_data.qos_enable, active, supported, unicast);
7207 	if (active && priv->qos_data.qos_enable)
7208 		return 1;
7209 
7210 	return 0;
7211 
7212 }
7213 /*
7214 * add QoS parameter to the TX command
7215 */
ipw_qos_set_tx_queue_command(struct ipw_priv * priv,u16 priority,struct tfd_data * tfd)7216 static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
7217 					u16 priority,
7218 					struct tfd_data *tfd)
7219 {
7220 	int tx_queue_id = 0;
7221 
7222 
7223 	tx_queue_id = from_priority_to_tx_queue[priority] - 1;
7224 	tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
7225 
7226 	if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) {
7227 		tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
7228 		tfd->tfd.tfd_26.mchdr.qos_ctrl |= cpu_to_le16(CTRL_QOS_NO_ACK);
7229 	}
7230 	return 0;
7231 }
7232 
7233 /*
7234 * background support to run QoS activate functionality
7235 */
ipw_bg_qos_activate(struct work_struct * work)7236 static void ipw_bg_qos_activate(struct work_struct *work)
7237 {
7238 	struct ipw_priv *priv =
7239 		container_of(work, struct ipw_priv, qos_activate);
7240 
7241 	if (priv == NULL)
7242 		return;
7243 
7244 	mutex_lock(&priv->mutex);
7245 
7246 	if (priv->status & STATUS_ASSOCIATED)
7247 		ipw_qos_activate(priv, &(priv->assoc_network->qos_data));
7248 
7249 	mutex_unlock(&priv->mutex);
7250 }
7251 
ipw_handle_probe_response(struct net_device * dev,struct ieee80211_probe_response * resp,struct ieee80211_network * network)7252 static int ipw_handle_probe_response(struct net_device *dev,
7253 				     struct ieee80211_probe_response *resp,
7254 				     struct ieee80211_network *network)
7255 {
7256 	struct ipw_priv *priv = ieee80211_priv(dev);
7257 	int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7258 			      (network == priv->assoc_network));
7259 
7260 	ipw_qos_handle_probe_response(priv, active_network, network);
7261 
7262 	return 0;
7263 }
7264 
ipw_handle_beacon(struct net_device * dev,struct ieee80211_beacon * resp,struct ieee80211_network * network)7265 static int ipw_handle_beacon(struct net_device *dev,
7266 			     struct ieee80211_beacon *resp,
7267 			     struct ieee80211_network *network)
7268 {
7269 	struct ipw_priv *priv = ieee80211_priv(dev);
7270 	int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7271 			      (network == priv->assoc_network));
7272 
7273 	ipw_qos_handle_probe_response(priv, active_network, network);
7274 
7275 	return 0;
7276 }
7277 
ipw_handle_assoc_response(struct net_device * dev,struct ieee80211_assoc_response * resp,struct ieee80211_network * network)7278 static int ipw_handle_assoc_response(struct net_device *dev,
7279 				     struct ieee80211_assoc_response *resp,
7280 				     struct ieee80211_network *network)
7281 {
7282 	struct ipw_priv *priv = ieee80211_priv(dev);
7283 	ipw_qos_association_resp(priv, network);
7284 	return 0;
7285 }
7286 
ipw_send_qos_params_command(struct ipw_priv * priv,struct ieee80211_qos_parameters * qos_param)7287 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
7288 				       *qos_param)
7289 {
7290 	return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS,
7291 				sizeof(*qos_param) * 3, qos_param);
7292 }
7293 
ipw_send_qos_info_command(struct ipw_priv * priv,struct ieee80211_qos_information_element * qos_param)7294 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
7295 				     *qos_param)
7296 {
7297 	return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param),
7298 				qos_param);
7299 }
7300 
7301 #endif				/* CONFIG_IPW2200_QOS */
7302 
ipw_associate_network(struct ipw_priv * priv,struct ieee80211_network * network,struct ipw_supported_rates * rates,int roaming)7303 static int ipw_associate_network(struct ipw_priv *priv,
7304 				 struct ieee80211_network *network,
7305 				 struct ipw_supported_rates *rates, int roaming)
7306 {
7307 	int err;
7308 	DECLARE_SSID_BUF(ssid);
7309 
7310 	if (priv->config & CFG_FIXED_RATE)
7311 		ipw_set_fixed_rate(priv, network->mode);
7312 
7313 	if (!(priv->config & CFG_STATIC_ESSID)) {
7314 		priv->essid_len = min(network->ssid_len,
7315 				      (u8) IW_ESSID_MAX_SIZE);
7316 		memcpy(priv->essid, network->ssid, priv->essid_len);
7317 	}
7318 
7319 	network->last_associate = jiffies;
7320 
7321 	memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
7322 	priv->assoc_request.channel = network->channel;
7323 	priv->assoc_request.auth_key = 0;
7324 
7325 	if ((priv->capability & CAP_PRIVACY_ON) &&
7326 	    (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) {
7327 		priv->assoc_request.auth_type = AUTH_SHARED_KEY;
7328 		priv->assoc_request.auth_key = priv->ieee->sec.active_key;
7329 
7330 		if (priv->ieee->sec.level == SEC_LEVEL_1)
7331 			ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
7332 
7333 	} else if ((priv->capability & CAP_PRIVACY_ON) &&
7334 		   (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP))
7335 		priv->assoc_request.auth_type = AUTH_LEAP;
7336 	else
7337 		priv->assoc_request.auth_type = AUTH_OPEN;
7338 
7339 	if (priv->ieee->wpa_ie_len) {
7340 		priv->assoc_request.policy_support = cpu_to_le16(0x02);	/* RSN active */
7341 		ipw_set_rsn_capa(priv, priv->ieee->wpa_ie,
7342 				 priv->ieee->wpa_ie_len);
7343 	}
7344 
7345 	/*
7346 	 * It is valid for our ieee device to support multiple modes, but
7347 	 * when it comes to associating to a given network we have to choose
7348 	 * just one mode.
7349 	 */
7350 	if (network->mode & priv->ieee->mode & IEEE_A)
7351 		priv->assoc_request.ieee_mode = IPW_A_MODE;
7352 	else if (network->mode & priv->ieee->mode & IEEE_G)
7353 		priv->assoc_request.ieee_mode = IPW_G_MODE;
7354 	else if (network->mode & priv->ieee->mode & IEEE_B)
7355 		priv->assoc_request.ieee_mode = IPW_B_MODE;
7356 
7357 	priv->assoc_request.capability = cpu_to_le16(network->capability);
7358 	if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7359 	    && !(priv->config & CFG_PREAMBLE_LONG)) {
7360 		priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE;
7361 	} else {
7362 		priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE;
7363 
7364 		/* Clear the short preamble if we won't be supporting it */
7365 		priv->assoc_request.capability &=
7366 		    ~cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE);
7367 	}
7368 
7369 	/* Clear capability bits that aren't used in Ad Hoc */
7370 	if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7371 		priv->assoc_request.capability &=
7372 		    ~cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME);
7373 
7374 	IPW_DEBUG_ASSOC("%sssocation attempt: '%s', channel %d, "
7375 			"802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
7376 			roaming ? "Rea" : "A",
7377 			print_ssid(ssid, priv->essid, priv->essid_len),
7378 			network->channel,
7379 			ipw_modes[priv->assoc_request.ieee_mode],
7380 			rates->num_rates,
7381 			(priv->assoc_request.preamble_length ==
7382 			 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short",
7383 			network->capability &
7384 			WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long",
7385 			priv->capability & CAP_PRIVACY_ON ? "on " : "off",
7386 			priv->capability & CAP_PRIVACY_ON ?
7387 			(priv->capability & CAP_SHARED_KEY ? "(shared)" :
7388 			 "(open)") : "",
7389 			priv->capability & CAP_PRIVACY_ON ? " key=" : "",
7390 			priv->capability & CAP_PRIVACY_ON ?
7391 			'1' + priv->ieee->sec.active_key : '.',
7392 			priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
7393 
7394 	priv->assoc_request.beacon_interval = cpu_to_le16(network->beacon_interval);
7395 	if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
7396 	    (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
7397 		priv->assoc_request.assoc_type = HC_IBSS_START;
7398 		priv->assoc_request.assoc_tsf_msw = 0;
7399 		priv->assoc_request.assoc_tsf_lsw = 0;
7400 	} else {
7401 		if (unlikely(roaming))
7402 			priv->assoc_request.assoc_type = HC_REASSOCIATE;
7403 		else
7404 			priv->assoc_request.assoc_type = HC_ASSOCIATE;
7405 		priv->assoc_request.assoc_tsf_msw = cpu_to_le32(network->time_stamp[1]);
7406 		priv->assoc_request.assoc_tsf_lsw = cpu_to_le32(network->time_stamp[0]);
7407 	}
7408 
7409 	memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
7410 
7411 	if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7412 		memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
7413 		priv->assoc_request.atim_window = cpu_to_le16(network->atim_window);
7414 	} else {
7415 		memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
7416 		priv->assoc_request.atim_window = 0;
7417 	}
7418 
7419 	priv->assoc_request.listen_interval = cpu_to_le16(network->listen_interval);
7420 
7421 	err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
7422 	if (err) {
7423 		IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
7424 		return err;
7425 	}
7426 
7427 	rates->ieee_mode = priv->assoc_request.ieee_mode;
7428 	rates->purpose = IPW_RATE_CONNECT;
7429 	ipw_send_supported_rates(priv, rates);
7430 
7431 	if (priv->assoc_request.ieee_mode == IPW_G_MODE)
7432 		priv->sys_config.dot11g_auto_detection = 1;
7433 	else
7434 		priv->sys_config.dot11g_auto_detection = 0;
7435 
7436 	if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7437 		priv->sys_config.answer_broadcast_ssid_probe = 1;
7438 	else
7439 		priv->sys_config.answer_broadcast_ssid_probe = 0;
7440 
7441 	err = ipw_send_system_config(priv);
7442 	if (err) {
7443 		IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
7444 		return err;
7445 	}
7446 
7447 	IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
7448 	err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM);
7449 	if (err) {
7450 		IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7451 		return err;
7452 	}
7453 
7454 	/*
7455 	 * If preemption is enabled, it is possible for the association
7456 	 * to complete before we return from ipw_send_associate.  Therefore
7457 	 * we have to be sure and update our priviate data first.
7458 	 */
7459 	priv->channel = network->channel;
7460 	memcpy(priv->bssid, network->bssid, ETH_ALEN);
7461 	priv->status |= STATUS_ASSOCIATING;
7462 	priv->status &= ~STATUS_SECURITY_UPDATED;
7463 
7464 	priv->assoc_network = network;
7465 
7466 #ifdef CONFIG_IPW2200_QOS
7467 	ipw_qos_association(priv, network);
7468 #endif
7469 
7470 	err = ipw_send_associate(priv, &priv->assoc_request);
7471 	if (err) {
7472 		IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7473 		return err;
7474 	}
7475 
7476 	IPW_DEBUG(IPW_DL_STATE, "associating: '%s' %pM \n",
7477 		  print_ssid(ssid, priv->essid, priv->essid_len),
7478 		  priv->bssid);
7479 
7480 	return 0;
7481 }
7482 
ipw_roam(void * data)7483 static void ipw_roam(void *data)
7484 {
7485 	struct ipw_priv *priv = data;
7486 	struct ieee80211_network *network = NULL;
7487 	struct ipw_network_match match = {
7488 		.network = priv->assoc_network
7489 	};
7490 
7491 	/* The roaming process is as follows:
7492 	 *
7493 	 * 1.  Missed beacon threshold triggers the roaming process by
7494 	 *     setting the status ROAM bit and requesting a scan.
7495 	 * 2.  When the scan completes, it schedules the ROAM work
7496 	 * 3.  The ROAM work looks at all of the known networks for one that
7497 	 *     is a better network than the currently associated.  If none
7498 	 *     found, the ROAM process is over (ROAM bit cleared)
7499 	 * 4.  If a better network is found, a disassociation request is
7500 	 *     sent.
7501 	 * 5.  When the disassociation completes, the roam work is again
7502 	 *     scheduled.  The second time through, the driver is no longer
7503 	 *     associated, and the newly selected network is sent an
7504 	 *     association request.
7505 	 * 6.  At this point ,the roaming process is complete and the ROAM
7506 	 *     status bit is cleared.
7507 	 */
7508 
7509 	/* If we are no longer associated, and the roaming bit is no longer
7510 	 * set, then we are not actively roaming, so just return */
7511 	if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
7512 		return;
7513 
7514 	if (priv->status & STATUS_ASSOCIATED) {
7515 		/* First pass through ROAM process -- look for a better
7516 		 * network */
7517 		unsigned long flags;
7518 		u8 rssi = priv->assoc_network->stats.rssi;
7519 		priv->assoc_network->stats.rssi = -128;
7520 		spin_lock_irqsave(&priv->ieee->lock, flags);
7521 		list_for_each_entry(network, &priv->ieee->network_list, list) {
7522 			if (network != priv->assoc_network)
7523 				ipw_best_network(priv, &match, network, 1);
7524 		}
7525 		spin_unlock_irqrestore(&priv->ieee->lock, flags);
7526 		priv->assoc_network->stats.rssi = rssi;
7527 
7528 		if (match.network == priv->assoc_network) {
7529 			IPW_DEBUG_ASSOC("No better APs in this network to "
7530 					"roam to.\n");
7531 			priv->status &= ~STATUS_ROAMING;
7532 			ipw_debug_config(priv);
7533 			return;
7534 		}
7535 
7536 		ipw_send_disassociate(priv, 1);
7537 		priv->assoc_network = match.network;
7538 
7539 		return;
7540 	}
7541 
7542 	/* Second pass through ROAM process -- request association */
7543 	ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
7544 	ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
7545 	priv->status &= ~STATUS_ROAMING;
7546 }
7547 
ipw_bg_roam(struct work_struct * work)7548 static void ipw_bg_roam(struct work_struct *work)
7549 {
7550 	struct ipw_priv *priv =
7551 		container_of(work, struct ipw_priv, roam);
7552 	mutex_lock(&priv->mutex);
7553 	ipw_roam(priv);
7554 	mutex_unlock(&priv->mutex);
7555 }
7556 
ipw_associate(void * data)7557 static int ipw_associate(void *data)
7558 {
7559 	struct ipw_priv *priv = data;
7560 
7561 	struct ieee80211_network *network = NULL;
7562 	struct ipw_network_match match = {
7563 		.network = NULL
7564 	};
7565 	struct ipw_supported_rates *rates;
7566 	struct list_head *element;
7567 	unsigned long flags;
7568 	DECLARE_SSID_BUF(ssid);
7569 
7570 	if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7571 		IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n");
7572 		return 0;
7573 	}
7574 
7575 	if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
7576 		IPW_DEBUG_ASSOC("Not attempting association (already in "
7577 				"progress)\n");
7578 		return 0;
7579 	}
7580 
7581 	if (priv->status & STATUS_DISASSOCIATING) {
7582 		IPW_DEBUG_ASSOC("Not attempting association (in "
7583 				"disassociating)\n ");
7584 		queue_work(priv->workqueue, &priv->associate);
7585 		return 0;
7586 	}
7587 
7588 	if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) {
7589 		IPW_DEBUG_ASSOC("Not attempting association (scanning or not "
7590 				"initialized)\n");
7591 		return 0;
7592 	}
7593 
7594 	if (!(priv->config & CFG_ASSOCIATE) &&
7595 	    !(priv->config & (CFG_STATIC_ESSID | CFG_STATIC_BSSID))) {
7596 		IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
7597 		return 0;
7598 	}
7599 
7600 	/* Protect our use of the network_list */
7601 	spin_lock_irqsave(&priv->ieee->lock, flags);
7602 	list_for_each_entry(network, &priv->ieee->network_list, list)
7603 	    ipw_best_network(priv, &match, network, 0);
7604 
7605 	network = match.network;
7606 	rates = &match.rates;
7607 
7608 	if (network == NULL &&
7609 	    priv->ieee->iw_mode == IW_MODE_ADHOC &&
7610 	    priv->config & CFG_ADHOC_CREATE &&
7611 	    priv->config & CFG_STATIC_ESSID &&
7612 	    priv->config & CFG_STATIC_CHANNEL) {
7613 		/* Use oldest network if the free list is empty */
7614 		if (list_empty(&priv->ieee->network_free_list)) {
7615 			struct ieee80211_network *oldest = NULL;
7616 			struct ieee80211_network *target;
7617 
7618 			list_for_each_entry(target, &priv->ieee->network_list, list) {
7619 				if ((oldest == NULL) ||
7620 				    (target->last_scanned < oldest->last_scanned))
7621 					oldest = target;
7622 			}
7623 
7624 			/* If there are no more slots, expire the oldest */
7625 			list_del(&oldest->list);
7626 			target = oldest;
7627 			IPW_DEBUG_ASSOC("Expired '%s' (%pM) from "
7628 					"network list.\n",
7629 					print_ssid(ssid, target->ssid,
7630 						   target->ssid_len),
7631 					target->bssid);
7632 			list_add_tail(&target->list,
7633 				      &priv->ieee->network_free_list);
7634 		}
7635 
7636 		element = priv->ieee->network_free_list.next;
7637 		network = list_entry(element, struct ieee80211_network, list);
7638 		ipw_adhoc_create(priv, network);
7639 		rates = &priv->rates;
7640 		list_del(element);
7641 		list_add_tail(&network->list, &priv->ieee->network_list);
7642 	}
7643 	spin_unlock_irqrestore(&priv->ieee->lock, flags);
7644 
7645 	/* If we reached the end of the list, then we don't have any valid
7646 	 * matching APs */
7647 	if (!network) {
7648 		ipw_debug_config(priv);
7649 
7650 		if (!(priv->status & STATUS_SCANNING)) {
7651 			if (!(priv->config & CFG_SPEED_SCAN))
7652 				queue_delayed_work(priv->workqueue,
7653 						   &priv->request_scan,
7654 						   SCAN_INTERVAL);
7655 			else
7656 				queue_delayed_work(priv->workqueue,
7657 						   &priv->request_scan, 0);
7658 		}
7659 
7660 		return 0;
7661 	}
7662 
7663 	ipw_associate_network(priv, network, rates, 0);
7664 
7665 	return 1;
7666 }
7667 
ipw_bg_associate(struct work_struct * work)7668 static void ipw_bg_associate(struct work_struct *work)
7669 {
7670 	struct ipw_priv *priv =
7671 		container_of(work, struct ipw_priv, associate);
7672 	mutex_lock(&priv->mutex);
7673 	ipw_associate(priv);
7674 	mutex_unlock(&priv->mutex);
7675 }
7676 
ipw_rebuild_decrypted_skb(struct ipw_priv * priv,struct sk_buff * skb)7677 static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv,
7678 				      struct sk_buff *skb)
7679 {
7680 	struct ieee80211_hdr *hdr;
7681 	u16 fc;
7682 
7683 	hdr = (struct ieee80211_hdr *)skb->data;
7684 	fc = le16_to_cpu(hdr->frame_control);
7685 	if (!(fc & IEEE80211_FCTL_PROTECTED))
7686 		return;
7687 
7688 	fc &= ~IEEE80211_FCTL_PROTECTED;
7689 	hdr->frame_control = cpu_to_le16(fc);
7690 	switch (priv->ieee->sec.level) {
7691 	case SEC_LEVEL_3:
7692 		/* Remove CCMP HDR */
7693 		memmove(skb->data + IEEE80211_3ADDR_LEN,
7694 			skb->data + IEEE80211_3ADDR_LEN + 8,
7695 			skb->len - IEEE80211_3ADDR_LEN - 8);
7696 		skb_trim(skb, skb->len - 16);	/* CCMP_HDR_LEN + CCMP_MIC_LEN */
7697 		break;
7698 	case SEC_LEVEL_2:
7699 		break;
7700 	case SEC_LEVEL_1:
7701 		/* Remove IV */
7702 		memmove(skb->data + IEEE80211_3ADDR_LEN,
7703 			skb->data + IEEE80211_3ADDR_LEN + 4,
7704 			skb->len - IEEE80211_3ADDR_LEN - 4);
7705 		skb_trim(skb, skb->len - 8);	/* IV + ICV */
7706 		break;
7707 	case SEC_LEVEL_0:
7708 		break;
7709 	default:
7710 		printk(KERN_ERR "Unknow security level %d\n",
7711 		       priv->ieee->sec.level);
7712 		break;
7713 	}
7714 }
7715 
ipw_handle_data_packet(struct ipw_priv * priv,struct ipw_rx_mem_buffer * rxb,struct ieee80211_rx_stats * stats)7716 static void ipw_handle_data_packet(struct ipw_priv *priv,
7717 				   struct ipw_rx_mem_buffer *rxb,
7718 				   struct ieee80211_rx_stats *stats)
7719 {
7720 	struct ieee80211_hdr_4addr *hdr;
7721 	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7722 
7723 	/* We received data from the HW, so stop the watchdog */
7724 	priv->net_dev->trans_start = jiffies;
7725 
7726 	/* We only process data packets if the
7727 	 * interface is open */
7728 	if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7729 		     skb_tailroom(rxb->skb))) {
7730 		priv->ieee->stats.rx_errors++;
7731 		priv->wstats.discard.misc++;
7732 		IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7733 		return;
7734 	} else if (unlikely(!netif_running(priv->net_dev))) {
7735 		priv->ieee->stats.rx_dropped++;
7736 		priv->wstats.discard.misc++;
7737 		IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7738 		return;
7739 	}
7740 
7741 	/* Advance skb->data to the start of the actual payload */
7742 	skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
7743 
7744 	/* Set the size of the skb to the size of the frame */
7745 	skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
7746 
7747 	IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7748 
7749 	/* HW decrypt will not clear the WEP bit, MIC, PN, etc. */
7750 	hdr = (struct ieee80211_hdr_4addr *)rxb->skb->data;
7751 	if (priv->ieee->iw_mode != IW_MODE_MONITOR &&
7752 	    (is_multicast_ether_addr(hdr->addr1) ?
7753 	     !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt))
7754 		ipw_rebuild_decrypted_skb(priv, rxb->skb);
7755 
7756 	if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
7757 		priv->ieee->stats.rx_errors++;
7758 	else {			/* ieee80211_rx succeeded, so it now owns the SKB */
7759 		rxb->skb = NULL;
7760 		__ipw_led_activity_on(priv);
7761 	}
7762 }
7763 
7764 #ifdef CONFIG_IPW2200_RADIOTAP
ipw_handle_data_packet_monitor(struct ipw_priv * priv,struct ipw_rx_mem_buffer * rxb,struct ieee80211_rx_stats * stats)7765 static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
7766 					   struct ipw_rx_mem_buffer *rxb,
7767 					   struct ieee80211_rx_stats *stats)
7768 {
7769 	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7770 	struct ipw_rx_frame *frame = &pkt->u.frame;
7771 
7772 	/* initial pull of some data */
7773 	u16 received_channel = frame->received_channel;
7774 	u8 antennaAndPhy = frame->antennaAndPhy;
7775 	s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM;	/* call it signed anyhow */
7776 	u16 pktrate = frame->rate;
7777 
7778 	/* Magic struct that slots into the radiotap header -- no reason
7779 	 * to build this manually element by element, we can write it much
7780 	 * more efficiently than we can parse it. ORDER MATTERS HERE */
7781 	struct ipw_rt_hdr *ipw_rt;
7782 
7783 	short len = le16_to_cpu(pkt->u.frame.length);
7784 
7785 	/* We received data from the HW, so stop the watchdog */
7786 	priv->net_dev->trans_start = jiffies;
7787 
7788 	/* We only process data packets if the
7789 	 * interface is open */
7790 	if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7791 		     skb_tailroom(rxb->skb))) {
7792 		priv->ieee->stats.rx_errors++;
7793 		priv->wstats.discard.misc++;
7794 		IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7795 		return;
7796 	} else if (unlikely(!netif_running(priv->net_dev))) {
7797 		priv->ieee->stats.rx_dropped++;
7798 		priv->wstats.discard.misc++;
7799 		IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7800 		return;
7801 	}
7802 
7803 	/* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7804 	 * that now */
7805 	if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7806 		/* FIXME: Should alloc bigger skb instead */
7807 		priv->ieee->stats.rx_dropped++;
7808 		priv->wstats.discard.misc++;
7809 		IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7810 		return;
7811 	}
7812 
7813 	/* copy the frame itself */
7814 	memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr),
7815 		rxb->skb->data + IPW_RX_FRAME_SIZE, len);
7816 
7817 	ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data;
7818 
7819 	ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7820 	ipw_rt->rt_hdr.it_pad = 0;	/* always good to zero */
7821 	ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(struct ipw_rt_hdr));	/* total header+data */
7822 
7823 	/* Big bitfield of all the fields we provide in radiotap */
7824 	ipw_rt->rt_hdr.it_present = cpu_to_le32(
7825 	     (1 << IEEE80211_RADIOTAP_TSFT) |
7826 	     (1 << IEEE80211_RADIOTAP_FLAGS) |
7827 	     (1 << IEEE80211_RADIOTAP_RATE) |
7828 	     (1 << IEEE80211_RADIOTAP_CHANNEL) |
7829 	     (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7830 	     (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
7831 	     (1 << IEEE80211_RADIOTAP_ANTENNA));
7832 
7833 	/* Zero the flags, we'll add to them as we go */
7834 	ipw_rt->rt_flags = 0;
7835 	ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
7836 			       frame->parent_tsf[2] << 16 |
7837 			       frame->parent_tsf[1] << 8  |
7838 			       frame->parent_tsf[0]);
7839 
7840 	/* Convert signal to DBM */
7841 	ipw_rt->rt_dbmsignal = antsignal;
7842 	ipw_rt->rt_dbmnoise = frame->noise;
7843 
7844 	/* Convert the channel data and set the flags */
7845 	ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel));
7846 	if (received_channel > 14) {	/* 802.11a */
7847 		ipw_rt->rt_chbitmask =
7848 		    cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7849 	} else if (antennaAndPhy & 32) {	/* 802.11b */
7850 		ipw_rt->rt_chbitmask =
7851 		    cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7852 	} else {		/* 802.11g */
7853 		ipw_rt->rt_chbitmask =
7854 		    cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7855 	}
7856 
7857 	/* set the rate in multiples of 500k/s */
7858 	switch (pktrate) {
7859 	case IPW_TX_RATE_1MB:
7860 		ipw_rt->rt_rate = 2;
7861 		break;
7862 	case IPW_TX_RATE_2MB:
7863 		ipw_rt->rt_rate = 4;
7864 		break;
7865 	case IPW_TX_RATE_5MB:
7866 		ipw_rt->rt_rate = 10;
7867 		break;
7868 	case IPW_TX_RATE_6MB:
7869 		ipw_rt->rt_rate = 12;
7870 		break;
7871 	case IPW_TX_RATE_9MB:
7872 		ipw_rt->rt_rate = 18;
7873 		break;
7874 	case IPW_TX_RATE_11MB:
7875 		ipw_rt->rt_rate = 22;
7876 		break;
7877 	case IPW_TX_RATE_12MB:
7878 		ipw_rt->rt_rate = 24;
7879 		break;
7880 	case IPW_TX_RATE_18MB:
7881 		ipw_rt->rt_rate = 36;
7882 		break;
7883 	case IPW_TX_RATE_24MB:
7884 		ipw_rt->rt_rate = 48;
7885 		break;
7886 	case IPW_TX_RATE_36MB:
7887 		ipw_rt->rt_rate = 72;
7888 		break;
7889 	case IPW_TX_RATE_48MB:
7890 		ipw_rt->rt_rate = 96;
7891 		break;
7892 	case IPW_TX_RATE_54MB:
7893 		ipw_rt->rt_rate = 108;
7894 		break;
7895 	default:
7896 		ipw_rt->rt_rate = 0;
7897 		break;
7898 	}
7899 
7900 	/* antenna number */
7901 	ipw_rt->rt_antenna = (antennaAndPhy & 3);	/* Is this right? */
7902 
7903 	/* set the preamble flag if we have it */
7904 	if ((antennaAndPhy & 64))
7905 		ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
7906 
7907 	/* Set the size of the skb to the size of the frame */
7908 	skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr));
7909 
7910 	IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7911 
7912 	if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
7913 		priv->ieee->stats.rx_errors++;
7914 	else {			/* ieee80211_rx succeeded, so it now owns the SKB */
7915 		rxb->skb = NULL;
7916 		/* no LED during capture */
7917 	}
7918 }
7919 #endif
7920 
7921 #ifdef CONFIG_IPW2200_PROMISCUOUS
7922 #define ieee80211_is_probe_response(fc) \
7923    ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && \
7924     (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP )
7925 
7926 #define ieee80211_is_management(fc) \
7927    ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
7928 
7929 #define ieee80211_is_control(fc) \
7930    ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL)
7931 
7932 #define ieee80211_is_data(fc) \
7933    ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)
7934 
7935 #define ieee80211_is_assoc_request(fc) \
7936    ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ)
7937 
7938 #define ieee80211_is_reassoc_request(fc) \
7939    ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
7940 
ipw_handle_promiscuous_rx(struct ipw_priv * priv,struct ipw_rx_mem_buffer * rxb,struct ieee80211_rx_stats * stats)7941 static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
7942 				      struct ipw_rx_mem_buffer *rxb,
7943 				      struct ieee80211_rx_stats *stats)
7944 {
7945 	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7946 	struct ipw_rx_frame *frame = &pkt->u.frame;
7947 	struct ipw_rt_hdr *ipw_rt;
7948 
7949 	/* First cache any information we need before we overwrite
7950 	 * the information provided in the skb from the hardware */
7951 	struct ieee80211_hdr *hdr;
7952 	u16 channel = frame->received_channel;
7953 	u8 phy_flags = frame->antennaAndPhy;
7954 	s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM;
7955 	s8 noise = frame->noise;
7956 	u8 rate = frame->rate;
7957 	short len = le16_to_cpu(pkt->u.frame.length);
7958 	struct sk_buff *skb;
7959 	int hdr_only = 0;
7960 	u16 filter = priv->prom_priv->filter;
7961 
7962 	/* If the filter is set to not include Rx frames then return */
7963 	if (filter & IPW_PROM_NO_RX)
7964 		return;
7965 
7966 	/* We received data from the HW, so stop the watchdog */
7967 	priv->prom_net_dev->trans_start = jiffies;
7968 
7969 	if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
7970 		priv->prom_priv->ieee->stats.rx_errors++;
7971 		IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7972 		return;
7973 	}
7974 
7975 	/* We only process data packets if the interface is open */
7976 	if (unlikely(!netif_running(priv->prom_net_dev))) {
7977 		priv->prom_priv->ieee->stats.rx_dropped++;
7978 		IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7979 		return;
7980 	}
7981 
7982 	/* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7983 	 * that now */
7984 	if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7985 		/* FIXME: Should alloc bigger skb instead */
7986 		priv->prom_priv->ieee->stats.rx_dropped++;
7987 		IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7988 		return;
7989 	}
7990 
7991 	hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE;
7992 	if (ieee80211_is_management(le16_to_cpu(hdr->frame_control))) {
7993 		if (filter & IPW_PROM_NO_MGMT)
7994 			return;
7995 		if (filter & IPW_PROM_MGMT_HEADER_ONLY)
7996 			hdr_only = 1;
7997 	} else if (ieee80211_is_control(le16_to_cpu(hdr->frame_control))) {
7998 		if (filter & IPW_PROM_NO_CTL)
7999 			return;
8000 		if (filter & IPW_PROM_CTL_HEADER_ONLY)
8001 			hdr_only = 1;
8002 	} else if (ieee80211_is_data(le16_to_cpu(hdr->frame_control))) {
8003 		if (filter & IPW_PROM_NO_DATA)
8004 			return;
8005 		if (filter & IPW_PROM_DATA_HEADER_ONLY)
8006 			hdr_only = 1;
8007 	}
8008 
8009 	/* Copy the SKB since this is for the promiscuous side */
8010 	skb = skb_copy(rxb->skb, GFP_ATOMIC);
8011 	if (skb == NULL) {
8012 		IPW_ERROR("skb_clone failed for promiscuous copy.\n");
8013 		return;
8014 	}
8015 
8016 	/* copy the frame data to write after where the radiotap header goes */
8017 	ipw_rt = (void *)skb->data;
8018 
8019 	if (hdr_only)
8020 		len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control));
8021 
8022 	memcpy(ipw_rt->payload, hdr, len);
8023 
8024 	ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
8025 	ipw_rt->rt_hdr.it_pad = 0;	/* always good to zero */
8026 	ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*ipw_rt));	/* total header+data */
8027 
8028 	/* Set the size of the skb to the size of the frame */
8029 	skb_put(skb, sizeof(*ipw_rt) + len);
8030 
8031 	/* Big bitfield of all the fields we provide in radiotap */
8032 	ipw_rt->rt_hdr.it_present = cpu_to_le32(
8033 	     (1 << IEEE80211_RADIOTAP_TSFT) |
8034 	     (1 << IEEE80211_RADIOTAP_FLAGS) |
8035 	     (1 << IEEE80211_RADIOTAP_RATE) |
8036 	     (1 << IEEE80211_RADIOTAP_CHANNEL) |
8037 	     (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
8038 	     (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
8039 	     (1 << IEEE80211_RADIOTAP_ANTENNA));
8040 
8041 	/* Zero the flags, we'll add to them as we go */
8042 	ipw_rt->rt_flags = 0;
8043 	ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
8044 			       frame->parent_tsf[2] << 16 |
8045 			       frame->parent_tsf[1] << 8  |
8046 			       frame->parent_tsf[0]);
8047 
8048 	/* Convert to DBM */
8049 	ipw_rt->rt_dbmsignal = signal;
8050 	ipw_rt->rt_dbmnoise = noise;
8051 
8052 	/* Convert the channel data and set the flags */
8053 	ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(channel));
8054 	if (channel > 14) {	/* 802.11a */
8055 		ipw_rt->rt_chbitmask =
8056 		    cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
8057 	} else if (phy_flags & (1 << 5)) {	/* 802.11b */
8058 		ipw_rt->rt_chbitmask =
8059 		    cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
8060 	} else {		/* 802.11g */
8061 		ipw_rt->rt_chbitmask =
8062 		    cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
8063 	}
8064 
8065 	/* set the rate in multiples of 500k/s */
8066 	switch (rate) {
8067 	case IPW_TX_RATE_1MB:
8068 		ipw_rt->rt_rate = 2;
8069 		break;
8070 	case IPW_TX_RATE_2MB:
8071 		ipw_rt->rt_rate = 4;
8072 		break;
8073 	case IPW_TX_RATE_5MB:
8074 		ipw_rt->rt_rate = 10;
8075 		break;
8076 	case IPW_TX_RATE_6MB:
8077 		ipw_rt->rt_rate = 12;
8078 		break;
8079 	case IPW_TX_RATE_9MB:
8080 		ipw_rt->rt_rate = 18;
8081 		break;
8082 	case IPW_TX_RATE_11MB:
8083 		ipw_rt->rt_rate = 22;
8084 		break;
8085 	case IPW_TX_RATE_12MB:
8086 		ipw_rt->rt_rate = 24;
8087 		break;
8088 	case IPW_TX_RATE_18MB:
8089 		ipw_rt->rt_rate = 36;
8090 		break;
8091 	case IPW_TX_RATE_24MB:
8092 		ipw_rt->rt_rate = 48;
8093 		break;
8094 	case IPW_TX_RATE_36MB:
8095 		ipw_rt->rt_rate = 72;
8096 		break;
8097 	case IPW_TX_RATE_48MB:
8098 		ipw_rt->rt_rate = 96;
8099 		break;
8100 	case IPW_TX_RATE_54MB:
8101 		ipw_rt->rt_rate = 108;
8102 		break;
8103 	default:
8104 		ipw_rt->rt_rate = 0;
8105 		break;
8106 	}
8107 
8108 	/* antenna number */
8109 	ipw_rt->rt_antenna = (phy_flags & 3);
8110 
8111 	/* set the preamble flag if we have it */
8112 	if (phy_flags & (1 << 6))
8113 		ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
8114 
8115 	IPW_DEBUG_RX("Rx packet of %d bytes.\n", skb->len);
8116 
8117 	if (!ieee80211_rx(priv->prom_priv->ieee, skb, stats)) {
8118 		priv->prom_priv->ieee->stats.rx_errors++;
8119 		dev_kfree_skb_any(skb);
8120 	}
8121 }
8122 #endif
8123 
is_network_packet(struct ipw_priv * priv,struct ieee80211_hdr_4addr * header)8124 static int is_network_packet(struct ipw_priv *priv,
8125 				    struct ieee80211_hdr_4addr *header)
8126 {
8127 	/* Filter incoming packets to determine if they are targetted toward
8128 	 * this network, discarding packets coming from ourselves */
8129 	switch (priv->ieee->iw_mode) {
8130 	case IW_MODE_ADHOC:	/* Header: Dest. | Source    | BSSID */
8131 		/* packets from our adapter are dropped (echo) */
8132 		if (!memcmp(header->addr2, priv->net_dev->dev_addr, ETH_ALEN))
8133 			return 0;
8134 
8135 		/* {broad,multi}cast packets to our BSSID go through */
8136 		if (is_multicast_ether_addr(header->addr1))
8137 			return !memcmp(header->addr3, priv->bssid, ETH_ALEN);
8138 
8139 		/* packets to our adapter go through */
8140 		return !memcmp(header->addr1, priv->net_dev->dev_addr,
8141 			       ETH_ALEN);
8142 
8143 	case IW_MODE_INFRA:	/* Header: Dest. | BSSID | Source */
8144 		/* packets from our adapter are dropped (echo) */
8145 		if (!memcmp(header->addr3, priv->net_dev->dev_addr, ETH_ALEN))
8146 			return 0;
8147 
8148 		/* {broad,multi}cast packets to our BSS go through */
8149 		if (is_multicast_ether_addr(header->addr1))
8150 			return !memcmp(header->addr2, priv->bssid, ETH_ALEN);
8151 
8152 		/* packets to our adapter go through */
8153 		return !memcmp(header->addr1, priv->net_dev->dev_addr,
8154 			       ETH_ALEN);
8155 	}
8156 
8157 	return 1;
8158 }
8159 
8160 #define IPW_PACKET_RETRY_TIME HZ
8161 
is_duplicate_packet(struct ipw_priv * priv,struct ieee80211_hdr_4addr * header)8162 static  int is_duplicate_packet(struct ipw_priv *priv,
8163 				      struct ieee80211_hdr_4addr *header)
8164 {
8165 	u16 sc = le16_to_cpu(header->seq_ctl);
8166 	u16 seq = WLAN_GET_SEQ_SEQ(sc);
8167 	u16 frag = WLAN_GET_SEQ_FRAG(sc);
8168 	u16 *last_seq, *last_frag;
8169 	unsigned long *last_time;
8170 
8171 	switch (priv->ieee->iw_mode) {
8172 	case IW_MODE_ADHOC:
8173 		{
8174 			struct list_head *p;
8175 			struct ipw_ibss_seq *entry = NULL;
8176 			u8 *mac = header->addr2;
8177 			int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE;
8178 
8179 			__list_for_each(p, &priv->ibss_mac_hash[index]) {
8180 				entry =
8181 				    list_entry(p, struct ipw_ibss_seq, list);
8182 				if (!memcmp(entry->mac, mac, ETH_ALEN))
8183 					break;
8184 			}
8185 			if (p == &priv->ibss_mac_hash[index]) {
8186 				entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
8187 				if (!entry) {
8188 					IPW_ERROR
8189 					    ("Cannot malloc new mac entry\n");
8190 					return 0;
8191 				}
8192 				memcpy(entry->mac, mac, ETH_ALEN);
8193 				entry->seq_num = seq;
8194 				entry->frag_num = frag;
8195 				entry->packet_time = jiffies;
8196 				list_add(&entry->list,
8197 					 &priv->ibss_mac_hash[index]);
8198 				return 0;
8199 			}
8200 			last_seq = &entry->seq_num;
8201 			last_frag = &entry->frag_num;
8202 			last_time = &entry->packet_time;
8203 			break;
8204 		}
8205 	case IW_MODE_INFRA:
8206 		last_seq = &priv->last_seq_num;
8207 		last_frag = &priv->last_frag_num;
8208 		last_time = &priv->last_packet_time;
8209 		break;
8210 	default:
8211 		return 0;
8212 	}
8213 	if ((*last_seq == seq) &&
8214 	    time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) {
8215 		if (*last_frag == frag)
8216 			goto drop;
8217 		if (*last_frag + 1 != frag)
8218 			/* out-of-order fragment */
8219 			goto drop;
8220 	} else
8221 		*last_seq = seq;
8222 
8223 	*last_frag = frag;
8224 	*last_time = jiffies;
8225 	return 0;
8226 
8227       drop:
8228 	/* Comment this line now since we observed the card receives
8229 	 * duplicate packets but the FCTL_RETRY bit is not set in the
8230 	 * IBSS mode with fragmentation enabled.
8231 	 BUG_ON(!(le16_to_cpu(header->frame_control) & IEEE80211_FCTL_RETRY)); */
8232 	return 1;
8233 }
8234 
ipw_handle_mgmt_packet(struct ipw_priv * priv,struct ipw_rx_mem_buffer * rxb,struct ieee80211_rx_stats * stats)8235 static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
8236 				   struct ipw_rx_mem_buffer *rxb,
8237 				   struct ieee80211_rx_stats *stats)
8238 {
8239 	struct sk_buff *skb = rxb->skb;
8240 	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data;
8241 	struct ieee80211_hdr_4addr *header = (struct ieee80211_hdr_4addr *)
8242 	    (skb->data + IPW_RX_FRAME_SIZE);
8243 
8244 	ieee80211_rx_mgt(priv->ieee, header, stats);
8245 
8246 	if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
8247 	    ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8248 	      IEEE80211_STYPE_PROBE_RESP) ||
8249 	     (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8250 	      IEEE80211_STYPE_BEACON))) {
8251 		if (!memcmp(header->addr3, priv->bssid, ETH_ALEN))
8252 			ipw_add_station(priv, header->addr2);
8253 	}
8254 
8255 	if (priv->config & CFG_NET_STATS) {
8256 		IPW_DEBUG_HC("sending stat packet\n");
8257 
8258 		/* Set the size of the skb to the size of the full
8259 		 * ipw header and 802.11 frame */
8260 		skb_put(skb, le16_to_cpu(pkt->u.frame.length) +
8261 			IPW_RX_FRAME_SIZE);
8262 
8263 		/* Advance past the ipw packet header to the 802.11 frame */
8264 		skb_pull(skb, IPW_RX_FRAME_SIZE);
8265 
8266 		/* Push the ieee80211_rx_stats before the 802.11 frame */
8267 		memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats));
8268 
8269 		skb->dev = priv->ieee->dev;
8270 
8271 		/* Point raw at the ieee80211_stats */
8272 		skb_reset_mac_header(skb);
8273 
8274 		skb->pkt_type = PACKET_OTHERHOST;
8275 		skb->protocol = __constant_htons(ETH_P_80211_STATS);
8276 		memset(skb->cb, 0, sizeof(rxb->skb->cb));
8277 		netif_rx(skb);
8278 		rxb->skb = NULL;
8279 	}
8280 }
8281 
8282 /*
8283  * Main entry function for recieving a packet with 80211 headers.  This
8284  * should be called when ever the FW has notified us that there is a new
8285  * skb in the recieve queue.
8286  */
ipw_rx(struct ipw_priv * priv)8287 static void ipw_rx(struct ipw_priv *priv)
8288 {
8289 	struct ipw_rx_mem_buffer *rxb;
8290 	struct ipw_rx_packet *pkt;
8291 	struct ieee80211_hdr_4addr *header;
8292 	u32 r, w, i;
8293 	u8 network_packet;
8294 	u8 fill_rx = 0;
8295 
8296 	r = ipw_read32(priv, IPW_RX_READ_INDEX);
8297 	w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
8298 	i = priv->rxq->read;
8299 
8300 	if (ipw_rx_queue_space (priv->rxq) > (RX_QUEUE_SIZE / 2))
8301 		fill_rx = 1;
8302 
8303 	while (i != r) {
8304 		rxb = priv->rxq->queue[i];
8305 		if (unlikely(rxb == NULL)) {
8306 			printk(KERN_CRIT "Queue not allocated!\n");
8307 			break;
8308 		}
8309 		priv->rxq->queue[i] = NULL;
8310 
8311 		pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
8312 					    IPW_RX_BUF_SIZE,
8313 					    PCI_DMA_FROMDEVICE);
8314 
8315 		pkt = (struct ipw_rx_packet *)rxb->skb->data;
8316 		IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
8317 			     pkt->header.message_type,
8318 			     pkt->header.rx_seq_num, pkt->header.control_bits);
8319 
8320 		switch (pkt->header.message_type) {
8321 		case RX_FRAME_TYPE:	/* 802.11 frame */  {
8322 				struct ieee80211_rx_stats stats = {
8323 					.rssi = pkt->u.frame.rssi_dbm -
8324 					    IPW_RSSI_TO_DBM,
8325 					.signal =
8326 					    le16_to_cpu(pkt->u.frame.rssi_dbm) -
8327 					    IPW_RSSI_TO_DBM + 0x100,
8328 					.noise =
8329 					    le16_to_cpu(pkt->u.frame.noise),
8330 					.rate = pkt->u.frame.rate,
8331 					.mac_time = jiffies,
8332 					.received_channel =
8333 					    pkt->u.frame.received_channel,
8334 					.freq =
8335 					    (pkt->u.frame.
8336 					     control & (1 << 0)) ?
8337 					    IEEE80211_24GHZ_BAND :
8338 					    IEEE80211_52GHZ_BAND,
8339 					.len = le16_to_cpu(pkt->u.frame.length),
8340 				};
8341 
8342 				if (stats.rssi != 0)
8343 					stats.mask |= IEEE80211_STATMASK_RSSI;
8344 				if (stats.signal != 0)
8345 					stats.mask |= IEEE80211_STATMASK_SIGNAL;
8346 				if (stats.noise != 0)
8347 					stats.mask |= IEEE80211_STATMASK_NOISE;
8348 				if (stats.rate != 0)
8349 					stats.mask |= IEEE80211_STATMASK_RATE;
8350 
8351 				priv->rx_packets++;
8352 
8353 #ifdef CONFIG_IPW2200_PROMISCUOUS
8354 	if (priv->prom_net_dev && netif_running(priv->prom_net_dev))
8355 		ipw_handle_promiscuous_rx(priv, rxb, &stats);
8356 #endif
8357 
8358 #ifdef CONFIG_IPW2200_MONITOR
8359 				if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8360 #ifdef CONFIG_IPW2200_RADIOTAP
8361 
8362                 ipw_handle_data_packet_monitor(priv,
8363 					       rxb,
8364 					       &stats);
8365 #else
8366 		ipw_handle_data_packet(priv, rxb,
8367 				       &stats);
8368 #endif
8369 					break;
8370 				}
8371 #endif
8372 
8373 				header =
8374 				    (struct ieee80211_hdr_4addr *)(rxb->skb->
8375 								   data +
8376 								   IPW_RX_FRAME_SIZE);
8377 				/* TODO: Check Ad-Hoc dest/source and make sure
8378 				 * that we are actually parsing these packets
8379 				 * correctly -- we should probably use the
8380 				 * frame control of the packet and disregard
8381 				 * the current iw_mode */
8382 
8383 				network_packet =
8384 				    is_network_packet(priv, header);
8385 				if (network_packet && priv->assoc_network) {
8386 					priv->assoc_network->stats.rssi =
8387 					    stats.rssi;
8388 					priv->exp_avg_rssi =
8389 					    exponential_average(priv->exp_avg_rssi,
8390 					    stats.rssi, DEPTH_RSSI);
8391 				}
8392 
8393 				IPW_DEBUG_RX("Frame: len=%u\n",
8394 					     le16_to_cpu(pkt->u.frame.length));
8395 
8396 				if (le16_to_cpu(pkt->u.frame.length) <
8397 				    ieee80211_get_hdrlen(le16_to_cpu(
8398 						    header->frame_ctl))) {
8399 					IPW_DEBUG_DROP
8400 					    ("Received packet is too small. "
8401 					     "Dropping.\n");
8402 					priv->ieee->stats.rx_errors++;
8403 					priv->wstats.discard.misc++;
8404 					break;
8405 				}
8406 
8407 				switch (WLAN_FC_GET_TYPE
8408 					(le16_to_cpu(header->frame_ctl))) {
8409 
8410 				case IEEE80211_FTYPE_MGMT:
8411 					ipw_handle_mgmt_packet(priv, rxb,
8412 							       &stats);
8413 					break;
8414 
8415 				case IEEE80211_FTYPE_CTL:
8416 					break;
8417 
8418 				case IEEE80211_FTYPE_DATA:
8419 					if (unlikely(!network_packet ||
8420 						     is_duplicate_packet(priv,
8421 									 header)))
8422 					{
8423 						IPW_DEBUG_DROP("Dropping: "
8424 							       "%pM, "
8425 							       "%pM, "
8426 							       "%pM\n",
8427 							       header->addr1,
8428 							       header->addr2,
8429 							       header->addr3);
8430 						break;
8431 					}
8432 
8433 					ipw_handle_data_packet(priv, rxb,
8434 							       &stats);
8435 
8436 					break;
8437 				}
8438 				break;
8439 			}
8440 
8441 		case RX_HOST_NOTIFICATION_TYPE:{
8442 				IPW_DEBUG_RX
8443 				    ("Notification: subtype=%02X flags=%02X size=%d\n",
8444 				     pkt->u.notification.subtype,
8445 				     pkt->u.notification.flags,
8446 				     le16_to_cpu(pkt->u.notification.size));
8447 				ipw_rx_notification(priv, &pkt->u.notification);
8448 				break;
8449 			}
8450 
8451 		default:
8452 			IPW_DEBUG_RX("Bad Rx packet of type %d\n",
8453 				     pkt->header.message_type);
8454 			break;
8455 		}
8456 
8457 		/* For now we just don't re-use anything.  We can tweak this
8458 		 * later to try and re-use notification packets and SKBs that
8459 		 * fail to Rx correctly */
8460 		if (rxb->skb != NULL) {
8461 			dev_kfree_skb_any(rxb->skb);
8462 			rxb->skb = NULL;
8463 		}
8464 
8465 		pci_unmap_single(priv->pci_dev, rxb->dma_addr,
8466 				 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
8467 		list_add_tail(&rxb->list, &priv->rxq->rx_used);
8468 
8469 		i = (i + 1) % RX_QUEUE_SIZE;
8470 
8471 		/* If there are a lot of unsued frames, restock the Rx queue
8472 		 * so the ucode won't assert */
8473 		if (fill_rx) {
8474 			priv->rxq->read = i;
8475 			ipw_rx_queue_replenish(priv);
8476 		}
8477 	}
8478 
8479 	/* Backtrack one entry */
8480 	priv->rxq->read = i;
8481 	ipw_rx_queue_restock(priv);
8482 }
8483 
8484 #define DEFAULT_RTS_THRESHOLD     2304U
8485 #define MIN_RTS_THRESHOLD         1U
8486 #define MAX_RTS_THRESHOLD         2304U
8487 #define DEFAULT_BEACON_INTERVAL   100U
8488 #define	DEFAULT_SHORT_RETRY_LIMIT 7U
8489 #define	DEFAULT_LONG_RETRY_LIMIT  4U
8490 
8491 /**
8492  * ipw_sw_reset
8493  * @option: options to control different reset behaviour
8494  * 	    0 = reset everything except the 'disable' module_param
8495  * 	    1 = reset everything and print out driver info (for probe only)
8496  * 	    2 = reset everything
8497  */
ipw_sw_reset(struct ipw_priv * priv,int option)8498 static int ipw_sw_reset(struct ipw_priv *priv, int option)
8499 {
8500 	int band, modulation;
8501 	int old_mode = priv->ieee->iw_mode;
8502 
8503 	/* Initialize module parameter values here */
8504 	priv->config = 0;
8505 
8506 	/* We default to disabling the LED code as right now it causes
8507 	 * too many systems to lock up... */
8508 	if (!led)
8509 		priv->config |= CFG_NO_LED;
8510 
8511 	if (associate)
8512 		priv->config |= CFG_ASSOCIATE;
8513 	else
8514 		IPW_DEBUG_INFO("Auto associate disabled.\n");
8515 
8516 	if (auto_create)
8517 		priv->config |= CFG_ADHOC_CREATE;
8518 	else
8519 		IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
8520 
8521 	priv->config &= ~CFG_STATIC_ESSID;
8522 	priv->essid_len = 0;
8523 	memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
8524 
8525 	if (disable && option) {
8526 		priv->status |= STATUS_RF_KILL_SW;
8527 		IPW_DEBUG_INFO("Radio disabled.\n");
8528 	}
8529 
8530 	if (channel != 0) {
8531 		priv->config |= CFG_STATIC_CHANNEL;
8532 		priv->channel = channel;
8533 		IPW_DEBUG_INFO("Bind to static channel %d\n", channel);
8534 		/* TODO: Validate that provided channel is in range */
8535 	}
8536 #ifdef CONFIG_IPW2200_QOS
8537 	ipw_qos_init(priv, qos_enable, qos_burst_enable,
8538 		     burst_duration_CCK, burst_duration_OFDM);
8539 #endif				/* CONFIG_IPW2200_QOS */
8540 
8541 	switch (mode) {
8542 	case 1:
8543 		priv->ieee->iw_mode = IW_MODE_ADHOC;
8544 		priv->net_dev->type = ARPHRD_ETHER;
8545 
8546 		break;
8547 #ifdef CONFIG_IPW2200_MONITOR
8548 	case 2:
8549 		priv->ieee->iw_mode = IW_MODE_MONITOR;
8550 #ifdef CONFIG_IPW2200_RADIOTAP
8551 		priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8552 #else
8553 		priv->net_dev->type = ARPHRD_IEEE80211;
8554 #endif
8555 		break;
8556 #endif
8557 	default:
8558 	case 0:
8559 		priv->net_dev->type = ARPHRD_ETHER;
8560 		priv->ieee->iw_mode = IW_MODE_INFRA;
8561 		break;
8562 	}
8563 
8564 	if (hwcrypto) {
8565 		priv->ieee->host_encrypt = 0;
8566 		priv->ieee->host_encrypt_msdu = 0;
8567 		priv->ieee->host_decrypt = 0;
8568 		priv->ieee->host_mc_decrypt = 0;
8569 	}
8570 	IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off");
8571 
8572 	/* IPW2200/2915 is abled to do hardware fragmentation. */
8573 	priv->ieee->host_open_frag = 0;
8574 
8575 	if ((priv->pci_dev->device == 0x4223) ||
8576 	    (priv->pci_dev->device == 0x4224)) {
8577 		if (option == 1)
8578 			printk(KERN_INFO DRV_NAME
8579 			       ": Detected Intel PRO/Wireless 2915ABG Network "
8580 			       "Connection\n");
8581 		priv->ieee->abg_true = 1;
8582 		band = IEEE80211_52GHZ_BAND | IEEE80211_24GHZ_BAND;
8583 		modulation = IEEE80211_OFDM_MODULATION |
8584 		    IEEE80211_CCK_MODULATION;
8585 		priv->adapter = IPW_2915ABG;
8586 		priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
8587 	} else {
8588 		if (option == 1)
8589 			printk(KERN_INFO DRV_NAME
8590 			       ": Detected Intel PRO/Wireless 2200BG Network "
8591 			       "Connection\n");
8592 
8593 		priv->ieee->abg_true = 0;
8594 		band = IEEE80211_24GHZ_BAND;
8595 		modulation = IEEE80211_OFDM_MODULATION |
8596 		    IEEE80211_CCK_MODULATION;
8597 		priv->adapter = IPW_2200BG;
8598 		priv->ieee->mode = IEEE_G | IEEE_B;
8599 	}
8600 
8601 	priv->ieee->freq_band = band;
8602 	priv->ieee->modulation = modulation;
8603 
8604 	priv->rates_mask = IEEE80211_DEFAULT_RATES_MASK;
8605 
8606 	priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8607 	priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8608 
8609 	priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8610 	priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
8611 	priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
8612 
8613 	/* If power management is turned on, default to AC mode */
8614 	priv->power_mode = IPW_POWER_AC;
8615 	priv->tx_power = IPW_TX_POWER_DEFAULT;
8616 
8617 	return old_mode == priv->ieee->iw_mode;
8618 }
8619 
8620 /*
8621  * This file defines the Wireless Extension handlers.  It does not
8622  * define any methods of hardware manipulation and relies on the
8623  * functions defined in ipw_main to provide the HW interaction.
8624  *
8625  * The exception to this is the use of the ipw_get_ordinal()
8626  * function used to poll the hardware vs. making unecessary calls.
8627  *
8628  */
8629 
ipw_wx_get_name(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)8630 static int ipw_wx_get_name(struct net_device *dev,
8631 			   struct iw_request_info *info,
8632 			   union iwreq_data *wrqu, char *extra)
8633 {
8634 	struct ipw_priv *priv = ieee80211_priv(dev);
8635 	mutex_lock(&priv->mutex);
8636 	if (priv->status & STATUS_RF_KILL_MASK)
8637 		strcpy(wrqu->name, "radio off");
8638 	else if (!(priv->status & STATUS_ASSOCIATED))
8639 		strcpy(wrqu->name, "unassociated");
8640 	else
8641 		snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c",
8642 			 ipw_modes[priv->assoc_request.ieee_mode]);
8643 	IPW_DEBUG_WX("Name: %s\n", wrqu->name);
8644 	mutex_unlock(&priv->mutex);
8645 	return 0;
8646 }
8647 
ipw_set_channel(struct ipw_priv * priv,u8 channel)8648 static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
8649 {
8650 	if (channel == 0) {
8651 		IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
8652 		priv->config &= ~CFG_STATIC_CHANNEL;
8653 		IPW_DEBUG_ASSOC("Attempting to associate with new "
8654 				"parameters.\n");
8655 		ipw_associate(priv);
8656 		return 0;
8657 	}
8658 
8659 	priv->config |= CFG_STATIC_CHANNEL;
8660 
8661 	if (priv->channel == channel) {
8662 		IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
8663 			       channel);
8664 		return 0;
8665 	}
8666 
8667 	IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
8668 	priv->channel = channel;
8669 
8670 #ifdef CONFIG_IPW2200_MONITOR
8671 	if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8672 		int i;
8673 		if (priv->status & STATUS_SCANNING) {
8674 			IPW_DEBUG_SCAN("Scan abort triggered due to "
8675 				       "channel change.\n");
8676 			ipw_abort_scan(priv);
8677 		}
8678 
8679 		for (i = 1000; i && (priv->status & STATUS_SCANNING); i--)
8680 			udelay(10);
8681 
8682 		if (priv->status & STATUS_SCANNING)
8683 			IPW_DEBUG_SCAN("Still scanning...\n");
8684 		else
8685 			IPW_DEBUG_SCAN("Took %dms to abort current scan\n",
8686 				       1000 - i);
8687 
8688 		return 0;
8689 	}
8690 #endif				/* CONFIG_IPW2200_MONITOR */
8691 
8692 	/* Network configuration changed -- force [re]association */
8693 	IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n");
8694 	if (!ipw_disassociate(priv))
8695 		ipw_associate(priv);
8696 
8697 	return 0;
8698 }
8699 
ipw_wx_set_freq(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)8700 static int ipw_wx_set_freq(struct net_device *dev,
8701 			   struct iw_request_info *info,
8702 			   union iwreq_data *wrqu, char *extra)
8703 {
8704 	struct ipw_priv *priv = ieee80211_priv(dev);
8705 	const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
8706 	struct iw_freq *fwrq = &wrqu->freq;
8707 	int ret = 0, i;
8708 	u8 channel, flags;
8709 	int band;
8710 
8711 	if (fwrq->m == 0) {
8712 		IPW_DEBUG_WX("SET Freq/Channel -> any\n");
8713 		mutex_lock(&priv->mutex);
8714 		ret = ipw_set_channel(priv, 0);
8715 		mutex_unlock(&priv->mutex);
8716 		return ret;
8717 	}
8718 	/* if setting by freq convert to channel */
8719 	if (fwrq->e == 1) {
8720 		channel = ieee80211_freq_to_channel(priv->ieee, fwrq->m);
8721 		if (channel == 0)
8722 			return -EINVAL;
8723 	} else
8724 		channel = fwrq->m;
8725 
8726 	if (!(band = ieee80211_is_valid_channel(priv->ieee, channel)))
8727 		return -EINVAL;
8728 
8729 	if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
8730 		i = ieee80211_channel_to_index(priv->ieee, channel);
8731 		if (i == -1)
8732 			return -EINVAL;
8733 
8734 		flags = (band == IEEE80211_24GHZ_BAND) ?
8735 		    geo->bg[i].flags : geo->a[i].flags;
8736 		if (flags & IEEE80211_CH_PASSIVE_ONLY) {
8737 			IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n");
8738 			return -EINVAL;
8739 		}
8740 	}
8741 
8742 	IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
8743 	mutex_lock(&priv->mutex);
8744 	ret = ipw_set_channel(priv, channel);
8745 	mutex_unlock(&priv->mutex);
8746 	return ret;
8747 }
8748 
ipw_wx_get_freq(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)8749 static int ipw_wx_get_freq(struct net_device *dev,
8750 			   struct iw_request_info *info,
8751 			   union iwreq_data *wrqu, char *extra)
8752 {
8753 	struct ipw_priv *priv = ieee80211_priv(dev);
8754 
8755 	wrqu->freq.e = 0;
8756 
8757 	/* If we are associated, trying to associate, or have a statically
8758 	 * configured CHANNEL then return that; otherwise return ANY */
8759 	mutex_lock(&priv->mutex);
8760 	if (priv->config & CFG_STATIC_CHANNEL ||
8761 	    priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) {
8762 		int i;
8763 
8764 		i = ieee80211_channel_to_index(priv->ieee, priv->channel);
8765 		BUG_ON(i == -1);
8766 		wrqu->freq.e = 1;
8767 
8768 		switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
8769 		case IEEE80211_52GHZ_BAND:
8770 			wrqu->freq.m = priv->ieee->geo.a[i].freq * 100000;
8771 			break;
8772 
8773 		case IEEE80211_24GHZ_BAND:
8774 			wrqu->freq.m = priv->ieee->geo.bg[i].freq * 100000;
8775 			break;
8776 
8777 		default:
8778 			BUG();
8779 		}
8780 	} else
8781 		wrqu->freq.m = 0;
8782 
8783 	mutex_unlock(&priv->mutex);
8784 	IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
8785 	return 0;
8786 }
8787 
ipw_wx_set_mode(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)8788 static int ipw_wx_set_mode(struct net_device *dev,
8789 			   struct iw_request_info *info,
8790 			   union iwreq_data *wrqu, char *extra)
8791 {
8792 	struct ipw_priv *priv = ieee80211_priv(dev);
8793 	int err = 0;
8794 
8795 	IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
8796 
8797 	switch (wrqu->mode) {
8798 #ifdef CONFIG_IPW2200_MONITOR
8799 	case IW_MODE_MONITOR:
8800 #endif
8801 	case IW_MODE_ADHOC:
8802 	case IW_MODE_INFRA:
8803 		break;
8804 	case IW_MODE_AUTO:
8805 		wrqu->mode = IW_MODE_INFRA;
8806 		break;
8807 	default:
8808 		return -EINVAL;
8809 	}
8810 	if (wrqu->mode == priv->ieee->iw_mode)
8811 		return 0;
8812 
8813 	mutex_lock(&priv->mutex);
8814 
8815 	ipw_sw_reset(priv, 0);
8816 
8817 #ifdef CONFIG_IPW2200_MONITOR
8818 	if (priv->ieee->iw_mode == IW_MODE_MONITOR)
8819 		priv->net_dev->type = ARPHRD_ETHER;
8820 
8821 	if (wrqu->mode == IW_MODE_MONITOR)
8822 #ifdef CONFIG_IPW2200_RADIOTAP
8823 		priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8824 #else
8825 		priv->net_dev->type = ARPHRD_IEEE80211;
8826 #endif
8827 #endif				/* CONFIG_IPW2200_MONITOR */
8828 
8829 	/* Free the existing firmware and reset the fw_loaded
8830 	 * flag so ipw_load() will bring in the new firmawre */
8831 	free_firmware();
8832 
8833 	priv->ieee->iw_mode = wrqu->mode;
8834 
8835 	queue_work(priv->workqueue, &priv->adapter_restart);
8836 	mutex_unlock(&priv->mutex);
8837 	return err;
8838 }
8839 
ipw_wx_get_mode(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)8840 static int ipw_wx_get_mode(struct net_device *dev,
8841 			   struct iw_request_info *info,
8842 			   union iwreq_data *wrqu, char *extra)
8843 {
8844 	struct ipw_priv *priv = ieee80211_priv(dev);
8845 	mutex_lock(&priv->mutex);
8846 	wrqu->mode = priv->ieee->iw_mode;
8847 	IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
8848 	mutex_unlock(&priv->mutex);
8849 	return 0;
8850 }
8851 
8852 /* Values are in microsecond */
8853 static const s32 timeout_duration[] = {
8854 	350000,
8855 	250000,
8856 	75000,
8857 	37000,
8858 	25000,
8859 };
8860 
8861 static const s32 period_duration[] = {
8862 	400000,
8863 	700000,
8864 	1000000,
8865 	1000000,
8866 	1000000
8867 };
8868 
ipw_wx_get_range(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)8869 static int ipw_wx_get_range(struct net_device *dev,
8870 			    struct iw_request_info *info,
8871 			    union iwreq_data *wrqu, char *extra)
8872 {
8873 	struct ipw_priv *priv = ieee80211_priv(dev);
8874 	struct iw_range *range = (struct iw_range *)extra;
8875 	const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
8876 	int i = 0, j;
8877 
8878 	wrqu->data.length = sizeof(*range);
8879 	memset(range, 0, sizeof(*range));
8880 
8881 	/* 54Mbs == ~27 Mb/s real (802.11g) */
8882 	range->throughput = 27 * 1000 * 1000;
8883 
8884 	range->max_qual.qual = 100;
8885 	/* TODO: Find real max RSSI and stick here */
8886 	range->max_qual.level = 0;
8887 	range->max_qual.noise = 0;
8888 	range->max_qual.updated = 7;	/* Updated all three */
8889 
8890 	range->avg_qual.qual = 70;
8891 	/* TODO: Find real 'good' to 'bad' threshol value for RSSI */
8892 	range->avg_qual.level = 0;	/* FIXME to real average level */
8893 	range->avg_qual.noise = 0;
8894 	range->avg_qual.updated = 7;	/* Updated all three */
8895 	mutex_lock(&priv->mutex);
8896 	range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
8897 
8898 	for (i = 0; i < range->num_bitrates; i++)
8899 		range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
8900 		    500000;
8901 
8902 	range->max_rts = DEFAULT_RTS_THRESHOLD;
8903 	range->min_frag = MIN_FRAG_THRESHOLD;
8904 	range->max_frag = MAX_FRAG_THRESHOLD;
8905 
8906 	range->encoding_size[0] = 5;
8907 	range->encoding_size[1] = 13;
8908 	range->num_encoding_sizes = 2;
8909 	range->max_encoding_tokens = WEP_KEYS;
8910 
8911 	/* Set the Wireless Extension versions */
8912 	range->we_version_compiled = WIRELESS_EXT;
8913 	range->we_version_source = 18;
8914 
8915 	i = 0;
8916 	if (priv->ieee->mode & (IEEE_B | IEEE_G)) {
8917 		for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) {
8918 			if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8919 			    (geo->bg[j].flags & IEEE80211_CH_PASSIVE_ONLY))
8920 				continue;
8921 
8922 			range->freq[i].i = geo->bg[j].channel;
8923 			range->freq[i].m = geo->bg[j].freq * 100000;
8924 			range->freq[i].e = 1;
8925 			i++;
8926 		}
8927 	}
8928 
8929 	if (priv->ieee->mode & IEEE_A) {
8930 		for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) {
8931 			if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8932 			    (geo->a[j].flags & IEEE80211_CH_PASSIVE_ONLY))
8933 				continue;
8934 
8935 			range->freq[i].i = geo->a[j].channel;
8936 			range->freq[i].m = geo->a[j].freq * 100000;
8937 			range->freq[i].e = 1;
8938 			i++;
8939 		}
8940 	}
8941 
8942 	range->num_channels = i;
8943 	range->num_frequency = i;
8944 
8945 	mutex_unlock(&priv->mutex);
8946 
8947 	/* Event capability (kernel + driver) */
8948 	range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
8949 				IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
8950 				IW_EVENT_CAPA_MASK(SIOCGIWAP) |
8951 				IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
8952 	range->event_capa[1] = IW_EVENT_CAPA_K_1;
8953 
8954 	range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
8955 		IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
8956 
8957 	range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE;
8958 
8959 	IPW_DEBUG_WX("GET Range\n");
8960 	return 0;
8961 }
8962 
ipw_wx_set_wap(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)8963 static int ipw_wx_set_wap(struct net_device *dev,
8964 			  struct iw_request_info *info,
8965 			  union iwreq_data *wrqu, char *extra)
8966 {
8967 	struct ipw_priv *priv = ieee80211_priv(dev);
8968 
8969 	static const unsigned char any[] = {
8970 		0xff, 0xff, 0xff, 0xff, 0xff, 0xff
8971 	};
8972 	static const unsigned char off[] = {
8973 		0x00, 0x00, 0x00, 0x00, 0x00, 0x00
8974 	};
8975 
8976 	if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
8977 		return -EINVAL;
8978 	mutex_lock(&priv->mutex);
8979 	if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
8980 	    !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
8981 		/* we disable mandatory BSSID association */
8982 		IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
8983 		priv->config &= ~CFG_STATIC_BSSID;
8984 		IPW_DEBUG_ASSOC("Attempting to associate with new "
8985 				"parameters.\n");
8986 		ipw_associate(priv);
8987 		mutex_unlock(&priv->mutex);
8988 		return 0;
8989 	}
8990 
8991 	priv->config |= CFG_STATIC_BSSID;
8992 	if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
8993 		IPW_DEBUG_WX("BSSID set to current BSSID.\n");
8994 		mutex_unlock(&priv->mutex);
8995 		return 0;
8996 	}
8997 
8998 	IPW_DEBUG_WX("Setting mandatory BSSID to %pM\n",
8999 		     wrqu->ap_addr.sa_data);
9000 
9001 	memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
9002 
9003 	/* Network configuration changed -- force [re]association */
9004 	IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n");
9005 	if (!ipw_disassociate(priv))
9006 		ipw_associate(priv);
9007 
9008 	mutex_unlock(&priv->mutex);
9009 	return 0;
9010 }
9011 
ipw_wx_get_wap(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9012 static int ipw_wx_get_wap(struct net_device *dev,
9013 			  struct iw_request_info *info,
9014 			  union iwreq_data *wrqu, char *extra)
9015 {
9016 	struct ipw_priv *priv = ieee80211_priv(dev);
9017 
9018 	/* If we are associated, trying to associate, or have a statically
9019 	 * configured BSSID then return that; otherwise return ANY */
9020 	mutex_lock(&priv->mutex);
9021 	if (priv->config & CFG_STATIC_BSSID ||
9022 	    priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
9023 		wrqu->ap_addr.sa_family = ARPHRD_ETHER;
9024 		memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
9025 	} else
9026 		memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
9027 
9028 	IPW_DEBUG_WX("Getting WAP BSSID: %pM\n",
9029 		     wrqu->ap_addr.sa_data);
9030 	mutex_unlock(&priv->mutex);
9031 	return 0;
9032 }
9033 
ipw_wx_set_essid(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9034 static int ipw_wx_set_essid(struct net_device *dev,
9035 			    struct iw_request_info *info,
9036 			    union iwreq_data *wrqu, char *extra)
9037 {
9038 	struct ipw_priv *priv = ieee80211_priv(dev);
9039         int length;
9040 	DECLARE_SSID_BUF(ssid);
9041 
9042         mutex_lock(&priv->mutex);
9043 
9044         if (!wrqu->essid.flags)
9045         {
9046                 IPW_DEBUG_WX("Setting ESSID to ANY\n");
9047                 ipw_disassociate(priv);
9048                 priv->config &= ~CFG_STATIC_ESSID;
9049                 ipw_associate(priv);
9050                 mutex_unlock(&priv->mutex);
9051                 return 0;
9052         }
9053 
9054 	length = min((int)wrqu->essid.length, IW_ESSID_MAX_SIZE);
9055 
9056 	priv->config |= CFG_STATIC_ESSID;
9057 
9058 	if (priv->essid_len == length && !memcmp(priv->essid, extra, length)
9059 	    && (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) {
9060 		IPW_DEBUG_WX("ESSID set to current ESSID.\n");
9061 		mutex_unlock(&priv->mutex);
9062 		return 0;
9063 	}
9064 
9065 	IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n",
9066 		     print_ssid(ssid, extra, length), length);
9067 
9068 	priv->essid_len = length;
9069 	memcpy(priv->essid, extra, priv->essid_len);
9070 
9071 	/* Network configuration changed -- force [re]association */
9072 	IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
9073 	if (!ipw_disassociate(priv))
9074 		ipw_associate(priv);
9075 
9076 	mutex_unlock(&priv->mutex);
9077 	return 0;
9078 }
9079 
ipw_wx_get_essid(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9080 static int ipw_wx_get_essid(struct net_device *dev,
9081 			    struct iw_request_info *info,
9082 			    union iwreq_data *wrqu, char *extra)
9083 {
9084 	struct ipw_priv *priv = ieee80211_priv(dev);
9085 	DECLARE_SSID_BUF(ssid);
9086 
9087 	/* If we are associated, trying to associate, or have a statically
9088 	 * configured ESSID then return that; otherwise return ANY */
9089 	mutex_lock(&priv->mutex);
9090 	if (priv->config & CFG_STATIC_ESSID ||
9091 	    priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
9092 		IPW_DEBUG_WX("Getting essid: '%s'\n",
9093 			     print_ssid(ssid, priv->essid, priv->essid_len));
9094 		memcpy(extra, priv->essid, priv->essid_len);
9095 		wrqu->essid.length = priv->essid_len;
9096 		wrqu->essid.flags = 1;	/* active */
9097 	} else {
9098 		IPW_DEBUG_WX("Getting essid: ANY\n");
9099 		wrqu->essid.length = 0;
9100 		wrqu->essid.flags = 0;	/* active */
9101 	}
9102 	mutex_unlock(&priv->mutex);
9103 	return 0;
9104 }
9105 
ipw_wx_set_nick(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9106 static int ipw_wx_set_nick(struct net_device *dev,
9107 			   struct iw_request_info *info,
9108 			   union iwreq_data *wrqu, char *extra)
9109 {
9110 	struct ipw_priv *priv = ieee80211_priv(dev);
9111 
9112 	IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
9113 	if (wrqu->data.length > IW_ESSID_MAX_SIZE)
9114 		return -E2BIG;
9115 	mutex_lock(&priv->mutex);
9116 	wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
9117 	memset(priv->nick, 0, sizeof(priv->nick));
9118 	memcpy(priv->nick, extra, wrqu->data.length);
9119 	IPW_DEBUG_TRACE("<<\n");
9120 	mutex_unlock(&priv->mutex);
9121 	return 0;
9122 
9123 }
9124 
ipw_wx_get_nick(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9125 static int ipw_wx_get_nick(struct net_device *dev,
9126 			   struct iw_request_info *info,
9127 			   union iwreq_data *wrqu, char *extra)
9128 {
9129 	struct ipw_priv *priv = ieee80211_priv(dev);
9130 	IPW_DEBUG_WX("Getting nick\n");
9131 	mutex_lock(&priv->mutex);
9132 	wrqu->data.length = strlen(priv->nick);
9133 	memcpy(extra, priv->nick, wrqu->data.length);
9134 	wrqu->data.flags = 1;	/* active */
9135 	mutex_unlock(&priv->mutex);
9136 	return 0;
9137 }
9138 
ipw_wx_set_sens(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9139 static int ipw_wx_set_sens(struct net_device *dev,
9140 			    struct iw_request_info *info,
9141 			    union iwreq_data *wrqu, char *extra)
9142 {
9143 	struct ipw_priv *priv = ieee80211_priv(dev);
9144 	int err = 0;
9145 
9146 	IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value);
9147 	IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value);
9148 	mutex_lock(&priv->mutex);
9149 
9150 	if (wrqu->sens.fixed == 0)
9151 	{
9152 		priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
9153 		priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
9154 		goto out;
9155 	}
9156 	if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) ||
9157 	    (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) {
9158 		err = -EINVAL;
9159 		goto out;
9160 	}
9161 
9162 	priv->roaming_threshold = wrqu->sens.value;
9163 	priv->disassociate_threshold = 3*wrqu->sens.value;
9164       out:
9165 	mutex_unlock(&priv->mutex);
9166 	return err;
9167 }
9168 
ipw_wx_get_sens(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9169 static int ipw_wx_get_sens(struct net_device *dev,
9170 			    struct iw_request_info *info,
9171 			    union iwreq_data *wrqu, char *extra)
9172 {
9173 	struct ipw_priv *priv = ieee80211_priv(dev);
9174 	mutex_lock(&priv->mutex);
9175 	wrqu->sens.fixed = 1;
9176 	wrqu->sens.value = priv->roaming_threshold;
9177 	mutex_unlock(&priv->mutex);
9178 
9179 	IPW_DEBUG_WX("GET roaming threshold -> %s %d \n",
9180 		     wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9181 
9182 	return 0;
9183 }
9184 
ipw_wx_set_rate(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9185 static int ipw_wx_set_rate(struct net_device *dev,
9186 			   struct iw_request_info *info,
9187 			   union iwreq_data *wrqu, char *extra)
9188 {
9189 	/* TODO: We should use semaphores or locks for access to priv */
9190 	struct ipw_priv *priv = ieee80211_priv(dev);
9191 	u32 target_rate = wrqu->bitrate.value;
9192 	u32 fixed, mask;
9193 
9194 	/* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */
9195 	/* value = X, fixed = 1 means only rate X */
9196 	/* value = X, fixed = 0 means all rates lower equal X */
9197 
9198 	if (target_rate == -1) {
9199 		fixed = 0;
9200 		mask = IEEE80211_DEFAULT_RATES_MASK;
9201 		/* Now we should reassociate */
9202 		goto apply;
9203 	}
9204 
9205 	mask = 0;
9206 	fixed = wrqu->bitrate.fixed;
9207 
9208 	if (target_rate == 1000000 || !fixed)
9209 		mask |= IEEE80211_CCK_RATE_1MB_MASK;
9210 	if (target_rate == 1000000)
9211 		goto apply;
9212 
9213 	if (target_rate == 2000000 || !fixed)
9214 		mask |= IEEE80211_CCK_RATE_2MB_MASK;
9215 	if (target_rate == 2000000)
9216 		goto apply;
9217 
9218 	if (target_rate == 5500000 || !fixed)
9219 		mask |= IEEE80211_CCK_RATE_5MB_MASK;
9220 	if (target_rate == 5500000)
9221 		goto apply;
9222 
9223 	if (target_rate == 6000000 || !fixed)
9224 		mask |= IEEE80211_OFDM_RATE_6MB_MASK;
9225 	if (target_rate == 6000000)
9226 		goto apply;
9227 
9228 	if (target_rate == 9000000 || !fixed)
9229 		mask |= IEEE80211_OFDM_RATE_9MB_MASK;
9230 	if (target_rate == 9000000)
9231 		goto apply;
9232 
9233 	if (target_rate == 11000000 || !fixed)
9234 		mask |= IEEE80211_CCK_RATE_11MB_MASK;
9235 	if (target_rate == 11000000)
9236 		goto apply;
9237 
9238 	if (target_rate == 12000000 || !fixed)
9239 		mask |= IEEE80211_OFDM_RATE_12MB_MASK;
9240 	if (target_rate == 12000000)
9241 		goto apply;
9242 
9243 	if (target_rate == 18000000 || !fixed)
9244 		mask |= IEEE80211_OFDM_RATE_18MB_MASK;
9245 	if (target_rate == 18000000)
9246 		goto apply;
9247 
9248 	if (target_rate == 24000000 || !fixed)
9249 		mask |= IEEE80211_OFDM_RATE_24MB_MASK;
9250 	if (target_rate == 24000000)
9251 		goto apply;
9252 
9253 	if (target_rate == 36000000 || !fixed)
9254 		mask |= IEEE80211_OFDM_RATE_36MB_MASK;
9255 	if (target_rate == 36000000)
9256 		goto apply;
9257 
9258 	if (target_rate == 48000000 || !fixed)
9259 		mask |= IEEE80211_OFDM_RATE_48MB_MASK;
9260 	if (target_rate == 48000000)
9261 		goto apply;
9262 
9263 	if (target_rate == 54000000 || !fixed)
9264 		mask |= IEEE80211_OFDM_RATE_54MB_MASK;
9265 	if (target_rate == 54000000)
9266 		goto apply;
9267 
9268 	IPW_DEBUG_WX("invalid rate specified, returning error\n");
9269 	return -EINVAL;
9270 
9271       apply:
9272 	IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
9273 		     mask, fixed ? "fixed" : "sub-rates");
9274 	mutex_lock(&priv->mutex);
9275 	if (mask == IEEE80211_DEFAULT_RATES_MASK) {
9276 		priv->config &= ~CFG_FIXED_RATE;
9277 		ipw_set_fixed_rate(priv, priv->ieee->mode);
9278 	} else
9279 		priv->config |= CFG_FIXED_RATE;
9280 
9281 	if (priv->rates_mask == mask) {
9282 		IPW_DEBUG_WX("Mask set to current mask.\n");
9283 		mutex_unlock(&priv->mutex);
9284 		return 0;
9285 	}
9286 
9287 	priv->rates_mask = mask;
9288 
9289 	/* Network configuration changed -- force [re]association */
9290 	IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n");
9291 	if (!ipw_disassociate(priv))
9292 		ipw_associate(priv);
9293 
9294 	mutex_unlock(&priv->mutex);
9295 	return 0;
9296 }
9297 
ipw_wx_get_rate(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9298 static int ipw_wx_get_rate(struct net_device *dev,
9299 			   struct iw_request_info *info,
9300 			   union iwreq_data *wrqu, char *extra)
9301 {
9302 	struct ipw_priv *priv = ieee80211_priv(dev);
9303 	mutex_lock(&priv->mutex);
9304 	wrqu->bitrate.value = priv->last_rate;
9305 	wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0;
9306 	mutex_unlock(&priv->mutex);
9307 	IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
9308 	return 0;
9309 }
9310 
ipw_wx_set_rts(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9311 static int ipw_wx_set_rts(struct net_device *dev,
9312 			  struct iw_request_info *info,
9313 			  union iwreq_data *wrqu, char *extra)
9314 {
9315 	struct ipw_priv *priv = ieee80211_priv(dev);
9316 	mutex_lock(&priv->mutex);
9317 	if (wrqu->rts.disabled || !wrqu->rts.fixed)
9318 		priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
9319 	else {
9320 		if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
9321 		    wrqu->rts.value > MAX_RTS_THRESHOLD) {
9322 			mutex_unlock(&priv->mutex);
9323 			return -EINVAL;
9324 		}
9325 		priv->rts_threshold = wrqu->rts.value;
9326 	}
9327 
9328 	ipw_send_rts_threshold(priv, priv->rts_threshold);
9329 	mutex_unlock(&priv->mutex);
9330 	IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold);
9331 	return 0;
9332 }
9333 
ipw_wx_get_rts(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9334 static int ipw_wx_get_rts(struct net_device *dev,
9335 			  struct iw_request_info *info,
9336 			  union iwreq_data *wrqu, char *extra)
9337 {
9338 	struct ipw_priv *priv = ieee80211_priv(dev);
9339 	mutex_lock(&priv->mutex);
9340 	wrqu->rts.value = priv->rts_threshold;
9341 	wrqu->rts.fixed = 0;	/* no auto select */
9342 	wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
9343 	mutex_unlock(&priv->mutex);
9344 	IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value);
9345 	return 0;
9346 }
9347 
ipw_wx_set_txpow(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9348 static int ipw_wx_set_txpow(struct net_device *dev,
9349 			    struct iw_request_info *info,
9350 			    union iwreq_data *wrqu, char *extra)
9351 {
9352 	struct ipw_priv *priv = ieee80211_priv(dev);
9353 	int err = 0;
9354 
9355 	mutex_lock(&priv->mutex);
9356 	if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) {
9357 		err = -EINPROGRESS;
9358 		goto out;
9359 	}
9360 
9361 	if (!wrqu->power.fixed)
9362 		wrqu->power.value = IPW_TX_POWER_DEFAULT;
9363 
9364 	if (wrqu->power.flags != IW_TXPOW_DBM) {
9365 		err = -EINVAL;
9366 		goto out;
9367 	}
9368 
9369 	if ((wrqu->power.value > IPW_TX_POWER_MAX) ||
9370 	    (wrqu->power.value < IPW_TX_POWER_MIN)) {
9371 		err = -EINVAL;
9372 		goto out;
9373 	}
9374 
9375 	priv->tx_power = wrqu->power.value;
9376 	err = ipw_set_tx_power(priv);
9377       out:
9378 	mutex_unlock(&priv->mutex);
9379 	return err;
9380 }
9381 
ipw_wx_get_txpow(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9382 static int ipw_wx_get_txpow(struct net_device *dev,
9383 			    struct iw_request_info *info,
9384 			    union iwreq_data *wrqu, char *extra)
9385 {
9386 	struct ipw_priv *priv = ieee80211_priv(dev);
9387 	mutex_lock(&priv->mutex);
9388 	wrqu->power.value = priv->tx_power;
9389 	wrqu->power.fixed = 1;
9390 	wrqu->power.flags = IW_TXPOW_DBM;
9391 	wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
9392 	mutex_unlock(&priv->mutex);
9393 
9394 	IPW_DEBUG_WX("GET TX Power -> %s %d \n",
9395 		     wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9396 
9397 	return 0;
9398 }
9399 
ipw_wx_set_frag(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9400 static int ipw_wx_set_frag(struct net_device *dev,
9401 			   struct iw_request_info *info,
9402 			   union iwreq_data *wrqu, char *extra)
9403 {
9404 	struct ipw_priv *priv = ieee80211_priv(dev);
9405 	mutex_lock(&priv->mutex);
9406 	if (wrqu->frag.disabled || !wrqu->frag.fixed)
9407 		priv->ieee->fts = DEFAULT_FTS;
9408 	else {
9409 		if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
9410 		    wrqu->frag.value > MAX_FRAG_THRESHOLD) {
9411 			mutex_unlock(&priv->mutex);
9412 			return -EINVAL;
9413 		}
9414 
9415 		priv->ieee->fts = wrqu->frag.value & ~0x1;
9416 	}
9417 
9418 	ipw_send_frag_threshold(priv, wrqu->frag.value);
9419 	mutex_unlock(&priv->mutex);
9420 	IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value);
9421 	return 0;
9422 }
9423 
ipw_wx_get_frag(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9424 static int ipw_wx_get_frag(struct net_device *dev,
9425 			   struct iw_request_info *info,
9426 			   union iwreq_data *wrqu, char *extra)
9427 {
9428 	struct ipw_priv *priv = ieee80211_priv(dev);
9429 	mutex_lock(&priv->mutex);
9430 	wrqu->frag.value = priv->ieee->fts;
9431 	wrqu->frag.fixed = 0;	/* no auto select */
9432 	wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
9433 	mutex_unlock(&priv->mutex);
9434 	IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
9435 
9436 	return 0;
9437 }
9438 
ipw_wx_set_retry(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9439 static int ipw_wx_set_retry(struct net_device *dev,
9440 			    struct iw_request_info *info,
9441 			    union iwreq_data *wrqu, char *extra)
9442 {
9443 	struct ipw_priv *priv = ieee80211_priv(dev);
9444 
9445 	if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled)
9446 		return -EINVAL;
9447 
9448 	if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
9449 		return 0;
9450 
9451 	if (wrqu->retry.value < 0 || wrqu->retry.value >= 255)
9452 		return -EINVAL;
9453 
9454 	mutex_lock(&priv->mutex);
9455 	if (wrqu->retry.flags & IW_RETRY_SHORT)
9456 		priv->short_retry_limit = (u8) wrqu->retry.value;
9457 	else if (wrqu->retry.flags & IW_RETRY_LONG)
9458 		priv->long_retry_limit = (u8) wrqu->retry.value;
9459 	else {
9460 		priv->short_retry_limit = (u8) wrqu->retry.value;
9461 		priv->long_retry_limit = (u8) wrqu->retry.value;
9462 	}
9463 
9464 	ipw_send_retry_limit(priv, priv->short_retry_limit,
9465 			     priv->long_retry_limit);
9466 	mutex_unlock(&priv->mutex);
9467 	IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n",
9468 		     priv->short_retry_limit, priv->long_retry_limit);
9469 	return 0;
9470 }
9471 
ipw_wx_get_retry(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9472 static int ipw_wx_get_retry(struct net_device *dev,
9473 			    struct iw_request_info *info,
9474 			    union iwreq_data *wrqu, char *extra)
9475 {
9476 	struct ipw_priv *priv = ieee80211_priv(dev);
9477 
9478 	mutex_lock(&priv->mutex);
9479 	wrqu->retry.disabled = 0;
9480 
9481 	if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
9482 		mutex_unlock(&priv->mutex);
9483 		return -EINVAL;
9484 	}
9485 
9486 	if (wrqu->retry.flags & IW_RETRY_LONG) {
9487 		wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
9488 		wrqu->retry.value = priv->long_retry_limit;
9489 	} else if (wrqu->retry.flags & IW_RETRY_SHORT) {
9490 		wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_SHORT;
9491 		wrqu->retry.value = priv->short_retry_limit;
9492 	} else {
9493 		wrqu->retry.flags = IW_RETRY_LIMIT;
9494 		wrqu->retry.value = priv->short_retry_limit;
9495 	}
9496 	mutex_unlock(&priv->mutex);
9497 
9498 	IPW_DEBUG_WX("GET retry -> %d \n", wrqu->retry.value);
9499 
9500 	return 0;
9501 }
9502 
ipw_wx_set_scan(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9503 static int ipw_wx_set_scan(struct net_device *dev,
9504 			   struct iw_request_info *info,
9505 			   union iwreq_data *wrqu, char *extra)
9506 {
9507 	struct ipw_priv *priv = ieee80211_priv(dev);
9508 	struct iw_scan_req *req = (struct iw_scan_req *)extra;
9509 	struct delayed_work *work = NULL;
9510 
9511 	mutex_lock(&priv->mutex);
9512 
9513 	priv->user_requested_scan = 1;
9514 
9515 	if (wrqu->data.length == sizeof(struct iw_scan_req)) {
9516 		if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
9517 			int len = min((int)req->essid_len,
9518 			              (int)sizeof(priv->direct_scan_ssid));
9519 			memcpy(priv->direct_scan_ssid, req->essid, len);
9520 			priv->direct_scan_ssid_len = len;
9521 			work = &priv->request_direct_scan;
9522 		} else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
9523 			work = &priv->request_passive_scan;
9524 		}
9525 	} else {
9526 		/* Normal active broadcast scan */
9527 		work = &priv->request_scan;
9528 	}
9529 
9530 	mutex_unlock(&priv->mutex);
9531 
9532 	IPW_DEBUG_WX("Start scan\n");
9533 
9534 	queue_delayed_work(priv->workqueue, work, 0);
9535 
9536 	return 0;
9537 }
9538 
ipw_wx_get_scan(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9539 static int ipw_wx_get_scan(struct net_device *dev,
9540 			   struct iw_request_info *info,
9541 			   union iwreq_data *wrqu, char *extra)
9542 {
9543 	struct ipw_priv *priv = ieee80211_priv(dev);
9544 	return ieee80211_wx_get_scan(priv->ieee, info, wrqu, extra);
9545 }
9546 
ipw_wx_set_encode(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * key)9547 static int ipw_wx_set_encode(struct net_device *dev,
9548 			     struct iw_request_info *info,
9549 			     union iwreq_data *wrqu, char *key)
9550 {
9551 	struct ipw_priv *priv = ieee80211_priv(dev);
9552 	int ret;
9553 	u32 cap = priv->capability;
9554 
9555 	mutex_lock(&priv->mutex);
9556 	ret = ieee80211_wx_set_encode(priv->ieee, info, wrqu, key);
9557 
9558 	/* In IBSS mode, we need to notify the firmware to update
9559 	 * the beacon info after we changed the capability. */
9560 	if (cap != priv->capability &&
9561 	    priv->ieee->iw_mode == IW_MODE_ADHOC &&
9562 	    priv->status & STATUS_ASSOCIATED)
9563 		ipw_disassociate(priv);
9564 
9565 	mutex_unlock(&priv->mutex);
9566 	return ret;
9567 }
9568 
ipw_wx_get_encode(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * key)9569 static int ipw_wx_get_encode(struct net_device *dev,
9570 			     struct iw_request_info *info,
9571 			     union iwreq_data *wrqu, char *key)
9572 {
9573 	struct ipw_priv *priv = ieee80211_priv(dev);
9574 	return ieee80211_wx_get_encode(priv->ieee, info, wrqu, key);
9575 }
9576 
ipw_wx_set_power(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9577 static int ipw_wx_set_power(struct net_device *dev,
9578 			    struct iw_request_info *info,
9579 			    union iwreq_data *wrqu, char *extra)
9580 {
9581 	struct ipw_priv *priv = ieee80211_priv(dev);
9582 	int err;
9583 	mutex_lock(&priv->mutex);
9584 	if (wrqu->power.disabled) {
9585 		priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
9586 		err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
9587 		if (err) {
9588 			IPW_DEBUG_WX("failed setting power mode.\n");
9589 			mutex_unlock(&priv->mutex);
9590 			return err;
9591 		}
9592 		IPW_DEBUG_WX("SET Power Management Mode -> off\n");
9593 		mutex_unlock(&priv->mutex);
9594 		return 0;
9595 	}
9596 
9597 	switch (wrqu->power.flags & IW_POWER_MODE) {
9598 	case IW_POWER_ON:	/* If not specified */
9599 	case IW_POWER_MODE:	/* If set all mask */
9600 	case IW_POWER_ALL_R:	/* If explicitly state all */
9601 		break;
9602 	default:		/* Otherwise we don't support it */
9603 		IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
9604 			     wrqu->power.flags);
9605 		mutex_unlock(&priv->mutex);
9606 		return -EOPNOTSUPP;
9607 	}
9608 
9609 	/* If the user hasn't specified a power management mode yet, default
9610 	 * to BATTERY */
9611 	if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
9612 		priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
9613 	else
9614 		priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
9615 
9616 	err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
9617 	if (err) {
9618 		IPW_DEBUG_WX("failed setting power mode.\n");
9619 		mutex_unlock(&priv->mutex);
9620 		return err;
9621 	}
9622 
9623 	IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
9624 	mutex_unlock(&priv->mutex);
9625 	return 0;
9626 }
9627 
ipw_wx_get_power(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9628 static int ipw_wx_get_power(struct net_device *dev,
9629 			    struct iw_request_info *info,
9630 			    union iwreq_data *wrqu, char *extra)
9631 {
9632 	struct ipw_priv *priv = ieee80211_priv(dev);
9633 	mutex_lock(&priv->mutex);
9634 	if (!(priv->power_mode & IPW_POWER_ENABLED))
9635 		wrqu->power.disabled = 1;
9636 	else
9637 		wrqu->power.disabled = 0;
9638 
9639 	mutex_unlock(&priv->mutex);
9640 	IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
9641 
9642 	return 0;
9643 }
9644 
ipw_wx_set_powermode(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9645 static int ipw_wx_set_powermode(struct net_device *dev,
9646 				struct iw_request_info *info,
9647 				union iwreq_data *wrqu, char *extra)
9648 {
9649 	struct ipw_priv *priv = ieee80211_priv(dev);
9650 	int mode = *(int *)extra;
9651 	int err;
9652 
9653 	mutex_lock(&priv->mutex);
9654 	if ((mode < 1) || (mode > IPW_POWER_LIMIT))
9655 		mode = IPW_POWER_AC;
9656 
9657 	if (IPW_POWER_LEVEL(priv->power_mode) != mode) {
9658 		err = ipw_send_power_mode(priv, mode);
9659 		if (err) {
9660 			IPW_DEBUG_WX("failed setting power mode.\n");
9661 			mutex_unlock(&priv->mutex);
9662 			return err;
9663 		}
9664 		priv->power_mode = IPW_POWER_ENABLED | mode;
9665 	}
9666 	mutex_unlock(&priv->mutex);
9667 	return 0;
9668 }
9669 
9670 #define MAX_WX_STRING 80
ipw_wx_get_powermode(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9671 static int ipw_wx_get_powermode(struct net_device *dev,
9672 				struct iw_request_info *info,
9673 				union iwreq_data *wrqu, char *extra)
9674 {
9675 	struct ipw_priv *priv = ieee80211_priv(dev);
9676 	int level = IPW_POWER_LEVEL(priv->power_mode);
9677 	char *p = extra;
9678 
9679 	p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
9680 
9681 	switch (level) {
9682 	case IPW_POWER_AC:
9683 		p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
9684 		break;
9685 	case IPW_POWER_BATTERY:
9686 		p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
9687 		break;
9688 	default:
9689 		p += snprintf(p, MAX_WX_STRING - (p - extra),
9690 			      "(Timeout %dms, Period %dms)",
9691 			      timeout_duration[level - 1] / 1000,
9692 			      period_duration[level - 1] / 1000);
9693 	}
9694 
9695 	if (!(priv->power_mode & IPW_POWER_ENABLED))
9696 		p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
9697 
9698 	wrqu->data.length = p - extra + 1;
9699 
9700 	return 0;
9701 }
9702 
ipw_wx_set_wireless_mode(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9703 static int ipw_wx_set_wireless_mode(struct net_device *dev,
9704 				    struct iw_request_info *info,
9705 				    union iwreq_data *wrqu, char *extra)
9706 {
9707 	struct ipw_priv *priv = ieee80211_priv(dev);
9708 	int mode = *(int *)extra;
9709 	u8 band = 0, modulation = 0;
9710 
9711 	if (mode == 0 || mode & ~IEEE_MODE_MASK) {
9712 		IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
9713 		return -EINVAL;
9714 	}
9715 	mutex_lock(&priv->mutex);
9716 	if (priv->adapter == IPW_2915ABG) {
9717 		priv->ieee->abg_true = 1;
9718 		if (mode & IEEE_A) {
9719 			band |= IEEE80211_52GHZ_BAND;
9720 			modulation |= IEEE80211_OFDM_MODULATION;
9721 		} else
9722 			priv->ieee->abg_true = 0;
9723 	} else {
9724 		if (mode & IEEE_A) {
9725 			IPW_WARNING("Attempt to set 2200BG into "
9726 				    "802.11a mode\n");
9727 			mutex_unlock(&priv->mutex);
9728 			return -EINVAL;
9729 		}
9730 
9731 		priv->ieee->abg_true = 0;
9732 	}
9733 
9734 	if (mode & IEEE_B) {
9735 		band |= IEEE80211_24GHZ_BAND;
9736 		modulation |= IEEE80211_CCK_MODULATION;
9737 	} else
9738 		priv->ieee->abg_true = 0;
9739 
9740 	if (mode & IEEE_G) {
9741 		band |= IEEE80211_24GHZ_BAND;
9742 		modulation |= IEEE80211_OFDM_MODULATION;
9743 	} else
9744 		priv->ieee->abg_true = 0;
9745 
9746 	priv->ieee->mode = mode;
9747 	priv->ieee->freq_band = band;
9748 	priv->ieee->modulation = modulation;
9749 	init_supported_rates(priv, &priv->rates);
9750 
9751 	/* Network configuration changed -- force [re]association */
9752 	IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n");
9753 	if (!ipw_disassociate(priv)) {
9754 		ipw_send_supported_rates(priv, &priv->rates);
9755 		ipw_associate(priv);
9756 	}
9757 
9758 	/* Update the band LEDs */
9759 	ipw_led_band_on(priv);
9760 
9761 	IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
9762 		     mode & IEEE_A ? 'a' : '.',
9763 		     mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
9764 	mutex_unlock(&priv->mutex);
9765 	return 0;
9766 }
9767 
ipw_wx_get_wireless_mode(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9768 static int ipw_wx_get_wireless_mode(struct net_device *dev,
9769 				    struct iw_request_info *info,
9770 				    union iwreq_data *wrqu, char *extra)
9771 {
9772 	struct ipw_priv *priv = ieee80211_priv(dev);
9773 	mutex_lock(&priv->mutex);
9774 	switch (priv->ieee->mode) {
9775 	case IEEE_A:
9776 		strncpy(extra, "802.11a (1)", MAX_WX_STRING);
9777 		break;
9778 	case IEEE_B:
9779 		strncpy(extra, "802.11b (2)", MAX_WX_STRING);
9780 		break;
9781 	case IEEE_A | IEEE_B:
9782 		strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
9783 		break;
9784 	case IEEE_G:
9785 		strncpy(extra, "802.11g (4)", MAX_WX_STRING);
9786 		break;
9787 	case IEEE_A | IEEE_G:
9788 		strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
9789 		break;
9790 	case IEEE_B | IEEE_G:
9791 		strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
9792 		break;
9793 	case IEEE_A | IEEE_B | IEEE_G:
9794 		strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
9795 		break;
9796 	default:
9797 		strncpy(extra, "unknown", MAX_WX_STRING);
9798 		break;
9799 	}
9800 
9801 	IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
9802 
9803 	wrqu->data.length = strlen(extra) + 1;
9804 	mutex_unlock(&priv->mutex);
9805 
9806 	return 0;
9807 }
9808 
ipw_wx_set_preamble(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9809 static int ipw_wx_set_preamble(struct net_device *dev,
9810 			       struct iw_request_info *info,
9811 			       union iwreq_data *wrqu, char *extra)
9812 {
9813 	struct ipw_priv *priv = ieee80211_priv(dev);
9814 	int mode = *(int *)extra;
9815 	mutex_lock(&priv->mutex);
9816 	/* Switching from SHORT -> LONG requires a disassociation */
9817 	if (mode == 1) {
9818 		if (!(priv->config & CFG_PREAMBLE_LONG)) {
9819 			priv->config |= CFG_PREAMBLE_LONG;
9820 
9821 			/* Network configuration changed -- force [re]association */
9822 			IPW_DEBUG_ASSOC
9823 			    ("[re]association triggered due to preamble change.\n");
9824 			if (!ipw_disassociate(priv))
9825 				ipw_associate(priv);
9826 		}
9827 		goto done;
9828 	}
9829 
9830 	if (mode == 0) {
9831 		priv->config &= ~CFG_PREAMBLE_LONG;
9832 		goto done;
9833 	}
9834 	mutex_unlock(&priv->mutex);
9835 	return -EINVAL;
9836 
9837       done:
9838 	mutex_unlock(&priv->mutex);
9839 	return 0;
9840 }
9841 
ipw_wx_get_preamble(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9842 static int ipw_wx_get_preamble(struct net_device *dev,
9843 			       struct iw_request_info *info,
9844 			       union iwreq_data *wrqu, char *extra)
9845 {
9846 	struct ipw_priv *priv = ieee80211_priv(dev);
9847 	mutex_lock(&priv->mutex);
9848 	if (priv->config & CFG_PREAMBLE_LONG)
9849 		snprintf(wrqu->name, IFNAMSIZ, "long (1)");
9850 	else
9851 		snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
9852 	mutex_unlock(&priv->mutex);
9853 	return 0;
9854 }
9855 
9856 #ifdef CONFIG_IPW2200_MONITOR
ipw_wx_set_monitor(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9857 static int ipw_wx_set_monitor(struct net_device *dev,
9858 			      struct iw_request_info *info,
9859 			      union iwreq_data *wrqu, char *extra)
9860 {
9861 	struct ipw_priv *priv = ieee80211_priv(dev);
9862 	int *parms = (int *)extra;
9863 	int enable = (parms[0] > 0);
9864 	mutex_lock(&priv->mutex);
9865 	IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
9866 	if (enable) {
9867 		if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9868 #ifdef CONFIG_IPW2200_RADIOTAP
9869 			priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
9870 #else
9871 			priv->net_dev->type = ARPHRD_IEEE80211;
9872 #endif
9873 			queue_work(priv->workqueue, &priv->adapter_restart);
9874 		}
9875 
9876 		ipw_set_channel(priv, parms[1]);
9877 	} else {
9878 		if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9879 			mutex_unlock(&priv->mutex);
9880 			return 0;
9881 		}
9882 		priv->net_dev->type = ARPHRD_ETHER;
9883 		queue_work(priv->workqueue, &priv->adapter_restart);
9884 	}
9885 	mutex_unlock(&priv->mutex);
9886 	return 0;
9887 }
9888 
9889 #endif				/* CONFIG_IPW2200_MONITOR */
9890 
ipw_wx_reset(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9891 static int ipw_wx_reset(struct net_device *dev,
9892 			struct iw_request_info *info,
9893 			union iwreq_data *wrqu, char *extra)
9894 {
9895 	struct ipw_priv *priv = ieee80211_priv(dev);
9896 	IPW_DEBUG_WX("RESET\n");
9897 	queue_work(priv->workqueue, &priv->adapter_restart);
9898 	return 0;
9899 }
9900 
ipw_wx_sw_reset(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9901 static int ipw_wx_sw_reset(struct net_device *dev,
9902 			   struct iw_request_info *info,
9903 			   union iwreq_data *wrqu, char *extra)
9904 {
9905 	struct ipw_priv *priv = ieee80211_priv(dev);
9906 	union iwreq_data wrqu_sec = {
9907 		.encoding = {
9908 			     .flags = IW_ENCODE_DISABLED,
9909 			     },
9910 	};
9911 	int ret;
9912 
9913 	IPW_DEBUG_WX("SW_RESET\n");
9914 
9915 	mutex_lock(&priv->mutex);
9916 
9917 	ret = ipw_sw_reset(priv, 2);
9918 	if (!ret) {
9919 		free_firmware();
9920 		ipw_adapter_restart(priv);
9921 	}
9922 
9923 	/* The SW reset bit might have been toggled on by the 'disable'
9924 	 * module parameter, so take appropriate action */
9925 	ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW);
9926 
9927 	mutex_unlock(&priv->mutex);
9928 	ieee80211_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL);
9929 	mutex_lock(&priv->mutex);
9930 
9931 	if (!(priv->status & STATUS_RF_KILL_MASK)) {
9932 		/* Configuration likely changed -- force [re]association */
9933 		IPW_DEBUG_ASSOC("[re]association triggered due to sw "
9934 				"reset.\n");
9935 		if (!ipw_disassociate(priv))
9936 			ipw_associate(priv);
9937 	}
9938 
9939 	mutex_unlock(&priv->mutex);
9940 
9941 	return 0;
9942 }
9943 
9944 /* Rebase the WE IOCTLs to zero for the handler array */
9945 #define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
9946 static iw_handler ipw_wx_handlers[] = {
9947 	IW_IOCTL(SIOCGIWNAME) = ipw_wx_get_name,
9948 	IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq,
9949 	IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq,
9950 	IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode,
9951 	IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode,
9952 	IW_IOCTL(SIOCSIWSENS) = ipw_wx_set_sens,
9953 	IW_IOCTL(SIOCGIWSENS) = ipw_wx_get_sens,
9954 	IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range,
9955 	IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap,
9956 	IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap,
9957 	IW_IOCTL(SIOCSIWSCAN) = ipw_wx_set_scan,
9958 	IW_IOCTL(SIOCGIWSCAN) = ipw_wx_get_scan,
9959 	IW_IOCTL(SIOCSIWESSID) = ipw_wx_set_essid,
9960 	IW_IOCTL(SIOCGIWESSID) = ipw_wx_get_essid,
9961 	IW_IOCTL(SIOCSIWNICKN) = ipw_wx_set_nick,
9962 	IW_IOCTL(SIOCGIWNICKN) = ipw_wx_get_nick,
9963 	IW_IOCTL(SIOCSIWRATE) = ipw_wx_set_rate,
9964 	IW_IOCTL(SIOCGIWRATE) = ipw_wx_get_rate,
9965 	IW_IOCTL(SIOCSIWRTS) = ipw_wx_set_rts,
9966 	IW_IOCTL(SIOCGIWRTS) = ipw_wx_get_rts,
9967 	IW_IOCTL(SIOCSIWFRAG) = ipw_wx_set_frag,
9968 	IW_IOCTL(SIOCGIWFRAG) = ipw_wx_get_frag,
9969 	IW_IOCTL(SIOCSIWTXPOW) = ipw_wx_set_txpow,
9970 	IW_IOCTL(SIOCGIWTXPOW) = ipw_wx_get_txpow,
9971 	IW_IOCTL(SIOCSIWRETRY) = ipw_wx_set_retry,
9972 	IW_IOCTL(SIOCGIWRETRY) = ipw_wx_get_retry,
9973 	IW_IOCTL(SIOCSIWENCODE) = ipw_wx_set_encode,
9974 	IW_IOCTL(SIOCGIWENCODE) = ipw_wx_get_encode,
9975 	IW_IOCTL(SIOCSIWPOWER) = ipw_wx_set_power,
9976 	IW_IOCTL(SIOCGIWPOWER) = ipw_wx_get_power,
9977 	IW_IOCTL(SIOCSIWSPY) = iw_handler_set_spy,
9978 	IW_IOCTL(SIOCGIWSPY) = iw_handler_get_spy,
9979 	IW_IOCTL(SIOCSIWTHRSPY) = iw_handler_set_thrspy,
9980 	IW_IOCTL(SIOCGIWTHRSPY) = iw_handler_get_thrspy,
9981 	IW_IOCTL(SIOCSIWGENIE) = ipw_wx_set_genie,
9982 	IW_IOCTL(SIOCGIWGENIE) = ipw_wx_get_genie,
9983 	IW_IOCTL(SIOCSIWMLME) = ipw_wx_set_mlme,
9984 	IW_IOCTL(SIOCSIWAUTH) = ipw_wx_set_auth,
9985 	IW_IOCTL(SIOCGIWAUTH) = ipw_wx_get_auth,
9986 	IW_IOCTL(SIOCSIWENCODEEXT) = ipw_wx_set_encodeext,
9987 	IW_IOCTL(SIOCGIWENCODEEXT) = ipw_wx_get_encodeext,
9988 };
9989 
9990 enum {
9991 	IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV,
9992 	IPW_PRIV_GET_POWER,
9993 	IPW_PRIV_SET_MODE,
9994 	IPW_PRIV_GET_MODE,
9995 	IPW_PRIV_SET_PREAMBLE,
9996 	IPW_PRIV_GET_PREAMBLE,
9997 	IPW_PRIV_RESET,
9998 	IPW_PRIV_SW_RESET,
9999 #ifdef CONFIG_IPW2200_MONITOR
10000 	IPW_PRIV_SET_MONITOR,
10001 #endif
10002 };
10003 
10004 static struct iw_priv_args ipw_priv_args[] = {
10005 	{
10006 	 .cmd = IPW_PRIV_SET_POWER,
10007 	 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10008 	 .name = "set_power"},
10009 	{
10010 	 .cmd = IPW_PRIV_GET_POWER,
10011 	 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
10012 	 .name = "get_power"},
10013 	{
10014 	 .cmd = IPW_PRIV_SET_MODE,
10015 	 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10016 	 .name = "set_mode"},
10017 	{
10018 	 .cmd = IPW_PRIV_GET_MODE,
10019 	 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
10020 	 .name = "get_mode"},
10021 	{
10022 	 .cmd = IPW_PRIV_SET_PREAMBLE,
10023 	 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10024 	 .name = "set_preamble"},
10025 	{
10026 	 .cmd = IPW_PRIV_GET_PREAMBLE,
10027 	 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ,
10028 	 .name = "get_preamble"},
10029 	{
10030 	 IPW_PRIV_RESET,
10031 	 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
10032 	{
10033 	 IPW_PRIV_SW_RESET,
10034 	 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"},
10035 #ifdef CONFIG_IPW2200_MONITOR
10036 	{
10037 	 IPW_PRIV_SET_MONITOR,
10038 	 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
10039 #endif				/* CONFIG_IPW2200_MONITOR */
10040 };
10041 
10042 static iw_handler ipw_priv_handler[] = {
10043 	ipw_wx_set_powermode,
10044 	ipw_wx_get_powermode,
10045 	ipw_wx_set_wireless_mode,
10046 	ipw_wx_get_wireless_mode,
10047 	ipw_wx_set_preamble,
10048 	ipw_wx_get_preamble,
10049 	ipw_wx_reset,
10050 	ipw_wx_sw_reset,
10051 #ifdef CONFIG_IPW2200_MONITOR
10052 	ipw_wx_set_monitor,
10053 #endif
10054 };
10055 
10056 static struct iw_handler_def ipw_wx_handler_def = {
10057 	.standard = ipw_wx_handlers,
10058 	.num_standard = ARRAY_SIZE(ipw_wx_handlers),
10059 	.num_private = ARRAY_SIZE(ipw_priv_handler),
10060 	.num_private_args = ARRAY_SIZE(ipw_priv_args),
10061 	.private = ipw_priv_handler,
10062 	.private_args = ipw_priv_args,
10063 	.get_wireless_stats = ipw_get_wireless_stats,
10064 };
10065 
10066 /*
10067  * Get wireless statistics.
10068  * Called by /proc/net/wireless
10069  * Also called by SIOCGIWSTATS
10070  */
ipw_get_wireless_stats(struct net_device * dev)10071 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
10072 {
10073 	struct ipw_priv *priv = ieee80211_priv(dev);
10074 	struct iw_statistics *wstats;
10075 
10076 	wstats = &priv->wstats;
10077 
10078 	/* if hw is disabled, then ipw_get_ordinal() can't be called.
10079 	 * netdev->get_wireless_stats seems to be called before fw is
10080 	 * initialized.  STATUS_ASSOCIATED will only be set if the hw is up
10081 	 * and associated; if not associcated, the values are all meaningless
10082 	 * anyway, so set them all to NULL and INVALID */
10083 	if (!(priv->status & STATUS_ASSOCIATED)) {
10084 		wstats->miss.beacon = 0;
10085 		wstats->discard.retries = 0;
10086 		wstats->qual.qual = 0;
10087 		wstats->qual.level = 0;
10088 		wstats->qual.noise = 0;
10089 		wstats->qual.updated = 7;
10090 		wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
10091 		    IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
10092 		return wstats;
10093 	}
10094 
10095 	wstats->qual.qual = priv->quality;
10096 	wstats->qual.level = priv->exp_avg_rssi;
10097 	wstats->qual.noise = priv->exp_avg_noise;
10098 	wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
10099 	    IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM;
10100 
10101 	wstats->miss.beacon = average_value(&priv->average_missed_beacons);
10102 	wstats->discard.retries = priv->last_tx_failures;
10103 	wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
10104 
10105 /*	if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
10106 	goto fail_get_ordinal;
10107 	wstats->discard.retries += tx_retry; */
10108 
10109 	return wstats;
10110 }
10111 
10112 /* net device stuff */
10113 
init_sys_config(struct ipw_sys_config * sys_config)10114 static  void init_sys_config(struct ipw_sys_config *sys_config)
10115 {
10116 	memset(sys_config, 0, sizeof(struct ipw_sys_config));
10117 	sys_config->bt_coexistence = 0;
10118 	sys_config->answer_broadcast_ssid_probe = 0;
10119 	sys_config->accept_all_data_frames = 0;
10120 	sys_config->accept_non_directed_frames = 1;
10121 	sys_config->exclude_unicast_unencrypted = 0;
10122 	sys_config->disable_unicast_decryption = 1;
10123 	sys_config->exclude_multicast_unencrypted = 0;
10124 	sys_config->disable_multicast_decryption = 1;
10125 	if (antenna < CFG_SYS_ANTENNA_BOTH || antenna > CFG_SYS_ANTENNA_B)
10126 		antenna = CFG_SYS_ANTENNA_BOTH;
10127 	sys_config->antenna_diversity = antenna;
10128 	sys_config->pass_crc_to_host = 0;	/* TODO: See if 1 gives us FCS */
10129 	sys_config->dot11g_auto_detection = 0;
10130 	sys_config->enable_cts_to_self = 0;
10131 	sys_config->bt_coexist_collision_thr = 0;
10132 	sys_config->pass_noise_stats_to_host = 1;	/* 1 -- fix for 256 */
10133 	sys_config->silence_threshold = 0x1e;
10134 }
10135 
ipw_net_open(struct net_device * dev)10136 static int ipw_net_open(struct net_device *dev)
10137 {
10138 	IPW_DEBUG_INFO("dev->open\n");
10139 	netif_start_queue(dev);
10140 	return 0;
10141 }
10142 
ipw_net_stop(struct net_device * dev)10143 static int ipw_net_stop(struct net_device *dev)
10144 {
10145 	IPW_DEBUG_INFO("dev->close\n");
10146 	netif_stop_queue(dev);
10147 	return 0;
10148 }
10149 
10150 /*
10151 todo:
10152 
10153 modify to send one tfd per fragment instead of using chunking.  otherwise
10154 we need to heavily modify the ieee80211_skb_to_txb.
10155 */
10156 
ipw_tx_skb(struct ipw_priv * priv,struct ieee80211_txb * txb,int pri)10157 static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
10158 			     int pri)
10159 {
10160 	struct ieee80211_hdr_3addrqos *hdr = (struct ieee80211_hdr_3addrqos *)
10161 	    txb->fragments[0]->data;
10162 	int i = 0;
10163 	struct tfd_frame *tfd;
10164 #ifdef CONFIG_IPW2200_QOS
10165 	int tx_id = ipw_get_tx_queue_number(priv, pri);
10166 	struct clx2_tx_queue *txq = &priv->txq[tx_id];
10167 #else
10168 	struct clx2_tx_queue *txq = &priv->txq[0];
10169 #endif
10170 	struct clx2_queue *q = &txq->q;
10171 	u8 id, hdr_len, unicast;
10172 	u16 remaining_bytes;
10173 	int fc;
10174 
10175 	if (!(priv->status & STATUS_ASSOCIATED))
10176 		goto drop;
10177 
10178 	hdr_len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10179 	switch (priv->ieee->iw_mode) {
10180 	case IW_MODE_ADHOC:
10181 		unicast = !is_multicast_ether_addr(hdr->addr1);
10182 		id = ipw_find_station(priv, hdr->addr1);
10183 		if (id == IPW_INVALID_STATION) {
10184 			id = ipw_add_station(priv, hdr->addr1);
10185 			if (id == IPW_INVALID_STATION) {
10186 				IPW_WARNING("Attempt to send data to "
10187 					    "invalid cell: %pM\n",
10188 					    hdr->addr1);
10189 				goto drop;
10190 			}
10191 		}
10192 		break;
10193 
10194 	case IW_MODE_INFRA:
10195 	default:
10196 		unicast = !is_multicast_ether_addr(hdr->addr3);
10197 		id = 0;
10198 		break;
10199 	}
10200 
10201 	tfd = &txq->bd[q->first_empty];
10202 	txq->txb[q->first_empty] = txb;
10203 	memset(tfd, 0, sizeof(*tfd));
10204 	tfd->u.data.station_number = id;
10205 
10206 	tfd->control_flags.message_type = TX_FRAME_TYPE;
10207 	tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
10208 
10209 	tfd->u.data.cmd_id = DINO_CMD_TX;
10210 	tfd->u.data.len = cpu_to_le16(txb->payload_size);
10211 	remaining_bytes = txb->payload_size;
10212 
10213 	if (priv->assoc_request.ieee_mode == IPW_B_MODE)
10214 		tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK;
10215 	else
10216 		tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM;
10217 
10218 	if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE)
10219 		tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE;
10220 
10221 	fc = le16_to_cpu(hdr->frame_ctl);
10222 	hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS);
10223 
10224 	memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
10225 
10226 	if (likely(unicast))
10227 		tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10228 
10229 	if (txb->encrypted && !priv->ieee->host_encrypt) {
10230 		switch (priv->ieee->sec.level) {
10231 		case SEC_LEVEL_3:
10232 			tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10233 			    cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10234 			/* XXX: ACK flag must be set for CCMP even if it
10235 			 * is a multicast/broadcast packet, because CCMP
10236 			 * group communication encrypted by GTK is
10237 			 * actually done by the AP. */
10238 			if (!unicast)
10239 				tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10240 
10241 			tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10242 			tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM;
10243 			tfd->u.data.key_index = 0;
10244 			tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE;
10245 			break;
10246 		case SEC_LEVEL_2:
10247 			tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10248 			    cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10249 			tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10250 			tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
10251 			tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
10252 			break;
10253 		case SEC_LEVEL_1:
10254 			tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10255 			    cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10256 			tfd->u.data.key_index = priv->ieee->crypt_info.tx_keyidx;
10257 			if (priv->ieee->sec.key_sizes[priv->ieee->crypt_info.tx_keyidx] <=
10258 			    40)
10259 				tfd->u.data.key_index |= DCT_WEP_KEY_64Bit;
10260 			else
10261 				tfd->u.data.key_index |= DCT_WEP_KEY_128Bit;
10262 			break;
10263 		case SEC_LEVEL_0:
10264 			break;
10265 		default:
10266 			printk(KERN_ERR "Unknow security level %d\n",
10267 			       priv->ieee->sec.level);
10268 			break;
10269 		}
10270 	} else
10271 		/* No hardware encryption */
10272 		tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP;
10273 
10274 #ifdef CONFIG_IPW2200_QOS
10275 	if (fc & IEEE80211_STYPE_QOS_DATA)
10276 		ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data));
10277 #endif				/* CONFIG_IPW2200_QOS */
10278 
10279 	/* payload */
10280 	tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
10281 						 txb->nr_frags));
10282 	IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n",
10283 		       txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks));
10284 	for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) {
10285 		IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n",
10286 			       i, le32_to_cpu(tfd->u.data.num_chunks),
10287 			       txb->fragments[i]->len - hdr_len);
10288 		IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
10289 			     i, tfd->u.data.num_chunks,
10290 			     txb->fragments[i]->len - hdr_len);
10291 		printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
10292 			   txb->fragments[i]->len - hdr_len);
10293 
10294 		tfd->u.data.chunk_ptr[i] =
10295 		    cpu_to_le32(pci_map_single
10296 				(priv->pci_dev,
10297 				 txb->fragments[i]->data + hdr_len,
10298 				 txb->fragments[i]->len - hdr_len,
10299 				 PCI_DMA_TODEVICE));
10300 		tfd->u.data.chunk_len[i] =
10301 		    cpu_to_le16(txb->fragments[i]->len - hdr_len);
10302 	}
10303 
10304 	if (i != txb->nr_frags) {
10305 		struct sk_buff *skb;
10306 		u16 remaining_bytes = 0;
10307 		int j;
10308 
10309 		for (j = i; j < txb->nr_frags; j++)
10310 			remaining_bytes += txb->fragments[j]->len - hdr_len;
10311 
10312 		printk(KERN_INFO "Trying to reallocate for %d bytes\n",
10313 		       remaining_bytes);
10314 		skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
10315 		if (skb != NULL) {
10316 			tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes);
10317 			for (j = i; j < txb->nr_frags; j++) {
10318 				int size = txb->fragments[j]->len - hdr_len;
10319 
10320 				printk(KERN_INFO "Adding frag %d %d...\n",
10321 				       j, size);
10322 				memcpy(skb_put(skb, size),
10323 				       txb->fragments[j]->data + hdr_len, size);
10324 			}
10325 			dev_kfree_skb_any(txb->fragments[i]);
10326 			txb->fragments[i] = skb;
10327 			tfd->u.data.chunk_ptr[i] =
10328 			    cpu_to_le32(pci_map_single
10329 					(priv->pci_dev, skb->data,
10330 					 remaining_bytes,
10331 					 PCI_DMA_TODEVICE));
10332 
10333 			le32_add_cpu(&tfd->u.data.num_chunks, 1);
10334 		}
10335 	}
10336 
10337 	/* kick DMA */
10338 	q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
10339 	ipw_write32(priv, q->reg_w, q->first_empty);
10340 
10341 	if (ipw_tx_queue_space(q) < q->high_mark)
10342 		netif_stop_queue(priv->net_dev);
10343 
10344 	return NETDEV_TX_OK;
10345 
10346       drop:
10347 	IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
10348 	ieee80211_txb_free(txb);
10349 	return NETDEV_TX_OK;
10350 }
10351 
ipw_net_is_queue_full(struct net_device * dev,int pri)10352 static int ipw_net_is_queue_full(struct net_device *dev, int pri)
10353 {
10354 	struct ipw_priv *priv = ieee80211_priv(dev);
10355 #ifdef CONFIG_IPW2200_QOS
10356 	int tx_id = ipw_get_tx_queue_number(priv, pri);
10357 	struct clx2_tx_queue *txq = &priv->txq[tx_id];
10358 #else
10359 	struct clx2_tx_queue *txq = &priv->txq[0];
10360 #endif				/* CONFIG_IPW2200_QOS */
10361 
10362 	if (ipw_tx_queue_space(&txq->q) < txq->q.high_mark)
10363 		return 1;
10364 
10365 	return 0;
10366 }
10367 
10368 #ifdef CONFIG_IPW2200_PROMISCUOUS
ipw_handle_promiscuous_tx(struct ipw_priv * priv,struct ieee80211_txb * txb)10369 static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
10370 				      struct ieee80211_txb *txb)
10371 {
10372 	struct ieee80211_rx_stats dummystats;
10373 	struct ieee80211_hdr *hdr;
10374 	u8 n;
10375 	u16 filter = priv->prom_priv->filter;
10376 	int hdr_only = 0;
10377 
10378 	if (filter & IPW_PROM_NO_TX)
10379 		return;
10380 
10381 	memset(&dummystats, 0, sizeof(dummystats));
10382 
10383 	/* Filtering of fragment chains is done agains the first fragment */
10384 	hdr = (void *)txb->fragments[0]->data;
10385 	if (ieee80211_is_management(le16_to_cpu(hdr->frame_control))) {
10386 		if (filter & IPW_PROM_NO_MGMT)
10387 			return;
10388 		if (filter & IPW_PROM_MGMT_HEADER_ONLY)
10389 			hdr_only = 1;
10390 	} else if (ieee80211_is_control(le16_to_cpu(hdr->frame_control))) {
10391 		if (filter & IPW_PROM_NO_CTL)
10392 			return;
10393 		if (filter & IPW_PROM_CTL_HEADER_ONLY)
10394 			hdr_only = 1;
10395 	} else if (ieee80211_is_data(le16_to_cpu(hdr->frame_control))) {
10396 		if (filter & IPW_PROM_NO_DATA)
10397 			return;
10398 		if (filter & IPW_PROM_DATA_HEADER_ONLY)
10399 			hdr_only = 1;
10400 	}
10401 
10402 	for(n=0; n<txb->nr_frags; ++n) {
10403 		struct sk_buff *src = txb->fragments[n];
10404 		struct sk_buff *dst;
10405 		struct ieee80211_radiotap_header *rt_hdr;
10406 		int len;
10407 
10408 		if (hdr_only) {
10409 			hdr = (void *)src->data;
10410 			len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control));
10411 		} else
10412 			len = src->len;
10413 
10414 		dst = alloc_skb(len + sizeof(*rt_hdr), GFP_ATOMIC);
10415 		if (!dst)
10416 			continue;
10417 
10418 		rt_hdr = (void *)skb_put(dst, sizeof(*rt_hdr));
10419 
10420 		rt_hdr->it_version = PKTHDR_RADIOTAP_VERSION;
10421 		rt_hdr->it_pad = 0;
10422 		rt_hdr->it_present = 0; /* after all, it's just an idea */
10423 		rt_hdr->it_present |=  cpu_to_le32(1 << IEEE80211_RADIOTAP_CHANNEL);
10424 
10425 		*(__le16*)skb_put(dst, sizeof(u16)) = cpu_to_le16(
10426 			ieee80211chan2mhz(priv->channel));
10427 		if (priv->channel > 14) 	/* 802.11a */
10428 			*(__le16*)skb_put(dst, sizeof(u16)) =
10429 				cpu_to_le16(IEEE80211_CHAN_OFDM |
10430 					     IEEE80211_CHAN_5GHZ);
10431 		else if (priv->ieee->mode == IEEE_B) /* 802.11b */
10432 			*(__le16*)skb_put(dst, sizeof(u16)) =
10433 				cpu_to_le16(IEEE80211_CHAN_CCK |
10434 					     IEEE80211_CHAN_2GHZ);
10435 		else 		/* 802.11g */
10436 			*(__le16*)skb_put(dst, sizeof(u16)) =
10437 				cpu_to_le16(IEEE80211_CHAN_OFDM |
10438 				 IEEE80211_CHAN_2GHZ);
10439 
10440 		rt_hdr->it_len = cpu_to_le16(dst->len);
10441 
10442 		skb_copy_from_linear_data(src, skb_put(dst, len), len);
10443 
10444 		if (!ieee80211_rx(priv->prom_priv->ieee, dst, &dummystats))
10445 			dev_kfree_skb_any(dst);
10446 	}
10447 }
10448 #endif
10449 
ipw_net_hard_start_xmit(struct ieee80211_txb * txb,struct net_device * dev,int pri)10450 static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb,
10451 				   struct net_device *dev, int pri)
10452 {
10453 	struct ipw_priv *priv = ieee80211_priv(dev);
10454 	unsigned long flags;
10455 	int ret;
10456 
10457 	IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
10458 	spin_lock_irqsave(&priv->lock, flags);
10459 
10460 #ifdef CONFIG_IPW2200_PROMISCUOUS
10461 	if (rtap_iface && netif_running(priv->prom_net_dev))
10462 		ipw_handle_promiscuous_tx(priv, txb);
10463 #endif
10464 
10465 	ret = ipw_tx_skb(priv, txb, pri);
10466 	if (ret == NETDEV_TX_OK)
10467 		__ipw_led_activity_on(priv);
10468 	spin_unlock_irqrestore(&priv->lock, flags);
10469 
10470 	return ret;
10471 }
10472 
ipw_net_get_stats(struct net_device * dev)10473 static struct net_device_stats *ipw_net_get_stats(struct net_device *dev)
10474 {
10475 	struct ipw_priv *priv = ieee80211_priv(dev);
10476 
10477 	priv->ieee->stats.tx_packets = priv->tx_packets;
10478 	priv->ieee->stats.rx_packets = priv->rx_packets;
10479 	return &priv->ieee->stats;
10480 }
10481 
ipw_net_set_multicast_list(struct net_device * dev)10482 static void ipw_net_set_multicast_list(struct net_device *dev)
10483 {
10484 
10485 }
10486 
ipw_net_set_mac_address(struct net_device * dev,void * p)10487 static int ipw_net_set_mac_address(struct net_device *dev, void *p)
10488 {
10489 	struct ipw_priv *priv = ieee80211_priv(dev);
10490 	struct sockaddr *addr = p;
10491 
10492 	if (!is_valid_ether_addr(addr->sa_data))
10493 		return -EADDRNOTAVAIL;
10494 	mutex_lock(&priv->mutex);
10495 	priv->config |= CFG_CUSTOM_MAC;
10496 	memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
10497 	printk(KERN_INFO "%s: Setting MAC to %pM\n",
10498 	       priv->net_dev->name, priv->mac_addr);
10499 	queue_work(priv->workqueue, &priv->adapter_restart);
10500 	mutex_unlock(&priv->mutex);
10501 	return 0;
10502 }
10503 
ipw_ethtool_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)10504 static void ipw_ethtool_get_drvinfo(struct net_device *dev,
10505 				    struct ethtool_drvinfo *info)
10506 {
10507 	struct ipw_priv *p = ieee80211_priv(dev);
10508 	char vers[64];
10509 	char date[32];
10510 	u32 len;
10511 
10512 	strcpy(info->driver, DRV_NAME);
10513 	strcpy(info->version, DRV_VERSION);
10514 
10515 	len = sizeof(vers);
10516 	ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
10517 	len = sizeof(date);
10518 	ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
10519 
10520 	snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
10521 		 vers, date);
10522 	strcpy(info->bus_info, pci_name(p->pci_dev));
10523 	info->eedump_len = IPW_EEPROM_IMAGE_SIZE;
10524 }
10525 
ipw_ethtool_get_link(struct net_device * dev)10526 static u32 ipw_ethtool_get_link(struct net_device *dev)
10527 {
10528 	struct ipw_priv *priv = ieee80211_priv(dev);
10529 	return (priv->status & STATUS_ASSOCIATED) != 0;
10530 }
10531 
ipw_ethtool_get_eeprom_len(struct net_device * dev)10532 static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
10533 {
10534 	return IPW_EEPROM_IMAGE_SIZE;
10535 }
10536 
ipw_ethtool_get_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * bytes)10537 static int ipw_ethtool_get_eeprom(struct net_device *dev,
10538 				  struct ethtool_eeprom *eeprom, u8 * bytes)
10539 {
10540 	struct ipw_priv *p = ieee80211_priv(dev);
10541 
10542 	if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10543 		return -EINVAL;
10544 	mutex_lock(&p->mutex);
10545 	memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len);
10546 	mutex_unlock(&p->mutex);
10547 	return 0;
10548 }
10549 
ipw_ethtool_set_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * bytes)10550 static int ipw_ethtool_set_eeprom(struct net_device *dev,
10551 				  struct ethtool_eeprom *eeprom, u8 * bytes)
10552 {
10553 	struct ipw_priv *p = ieee80211_priv(dev);
10554 	int i;
10555 
10556 	if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10557 		return -EINVAL;
10558 	mutex_lock(&p->mutex);
10559 	memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len);
10560 	for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
10561 		ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]);
10562 	mutex_unlock(&p->mutex);
10563 	return 0;
10564 }
10565 
10566 static const struct ethtool_ops ipw_ethtool_ops = {
10567 	.get_link = ipw_ethtool_get_link,
10568 	.get_drvinfo = ipw_ethtool_get_drvinfo,
10569 	.get_eeprom_len = ipw_ethtool_get_eeprom_len,
10570 	.get_eeprom = ipw_ethtool_get_eeprom,
10571 	.set_eeprom = ipw_ethtool_set_eeprom,
10572 };
10573 
ipw_isr(int irq,void * data)10574 static irqreturn_t ipw_isr(int irq, void *data)
10575 {
10576 	struct ipw_priv *priv = data;
10577 	u32 inta, inta_mask;
10578 
10579 	if (!priv)
10580 		return IRQ_NONE;
10581 
10582 	spin_lock(&priv->irq_lock);
10583 
10584 	if (!(priv->status & STATUS_INT_ENABLED)) {
10585 		/* IRQ is disabled */
10586 		goto none;
10587 	}
10588 
10589 	inta = ipw_read32(priv, IPW_INTA_RW);
10590 	inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
10591 
10592 	if (inta == 0xFFFFFFFF) {
10593 		/* Hardware disappeared */
10594 		IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
10595 		goto none;
10596 	}
10597 
10598 	if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) {
10599 		/* Shared interrupt */
10600 		goto none;
10601 	}
10602 
10603 	/* tell the device to stop sending interrupts */
10604 	__ipw_disable_interrupts(priv);
10605 
10606 	/* ack current interrupts */
10607 	inta &= (IPW_INTA_MASK_ALL & inta_mask);
10608 	ipw_write32(priv, IPW_INTA_RW, inta);
10609 
10610 	/* Cache INTA value for our tasklet */
10611 	priv->isr_inta = inta;
10612 
10613 	tasklet_schedule(&priv->irq_tasklet);
10614 
10615 	spin_unlock(&priv->irq_lock);
10616 
10617 	return IRQ_HANDLED;
10618       none:
10619 	spin_unlock(&priv->irq_lock);
10620 	return IRQ_NONE;
10621 }
10622 
ipw_rf_kill(void * adapter)10623 static void ipw_rf_kill(void *adapter)
10624 {
10625 	struct ipw_priv *priv = adapter;
10626 	unsigned long flags;
10627 
10628 	spin_lock_irqsave(&priv->lock, flags);
10629 
10630 	if (rf_kill_active(priv)) {
10631 		IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
10632 		if (priv->workqueue)
10633 			queue_delayed_work(priv->workqueue,
10634 					   &priv->rf_kill, 2 * HZ);
10635 		goto exit_unlock;
10636 	}
10637 
10638 	/* RF Kill is now disabled, so bring the device back up */
10639 
10640 	if (!(priv->status & STATUS_RF_KILL_MASK)) {
10641 		IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
10642 				  "device\n");
10643 
10644 		/* we can not do an adapter restart while inside an irq lock */
10645 		queue_work(priv->workqueue, &priv->adapter_restart);
10646 	} else
10647 		IPW_DEBUG_RF_KILL("HW RF Kill deactivated.  SW RF Kill still "
10648 				  "enabled\n");
10649 
10650       exit_unlock:
10651 	spin_unlock_irqrestore(&priv->lock, flags);
10652 }
10653 
ipw_bg_rf_kill(struct work_struct * work)10654 static void ipw_bg_rf_kill(struct work_struct *work)
10655 {
10656 	struct ipw_priv *priv =
10657 		container_of(work, struct ipw_priv, rf_kill.work);
10658 	mutex_lock(&priv->mutex);
10659 	ipw_rf_kill(priv);
10660 	mutex_unlock(&priv->mutex);
10661 }
10662 
ipw_link_up(struct ipw_priv * priv)10663 static void ipw_link_up(struct ipw_priv *priv)
10664 {
10665 	priv->last_seq_num = -1;
10666 	priv->last_frag_num = -1;
10667 	priv->last_packet_time = 0;
10668 
10669 	netif_carrier_on(priv->net_dev);
10670 
10671 	cancel_delayed_work(&priv->request_scan);
10672 	cancel_delayed_work(&priv->request_direct_scan);
10673 	cancel_delayed_work(&priv->request_passive_scan);
10674 	cancel_delayed_work(&priv->scan_event);
10675 	ipw_reset_stats(priv);
10676 	/* Ensure the rate is updated immediately */
10677 	priv->last_rate = ipw_get_current_rate(priv);
10678 	ipw_gather_stats(priv);
10679 	ipw_led_link_up(priv);
10680 	notify_wx_assoc_event(priv);
10681 
10682 	if (priv->config & CFG_BACKGROUND_SCAN)
10683 		queue_delayed_work(priv->workqueue, &priv->request_scan, HZ);
10684 }
10685 
ipw_bg_link_up(struct work_struct * work)10686 static void ipw_bg_link_up(struct work_struct *work)
10687 {
10688 	struct ipw_priv *priv =
10689 		container_of(work, struct ipw_priv, link_up);
10690 	mutex_lock(&priv->mutex);
10691 	ipw_link_up(priv);
10692 	mutex_unlock(&priv->mutex);
10693 }
10694 
ipw_link_down(struct ipw_priv * priv)10695 static void ipw_link_down(struct ipw_priv *priv)
10696 {
10697 	ipw_led_link_down(priv);
10698 	netif_carrier_off(priv->net_dev);
10699 	notify_wx_assoc_event(priv);
10700 
10701 	/* Cancel any queued work ... */
10702 	cancel_delayed_work(&priv->request_scan);
10703 	cancel_delayed_work(&priv->request_direct_scan);
10704 	cancel_delayed_work(&priv->request_passive_scan);
10705 	cancel_delayed_work(&priv->adhoc_check);
10706 	cancel_delayed_work(&priv->gather_stats);
10707 
10708 	ipw_reset_stats(priv);
10709 
10710 	if (!(priv->status & STATUS_EXIT_PENDING)) {
10711 		/* Queue up another scan... */
10712 		queue_delayed_work(priv->workqueue, &priv->request_scan, 0);
10713 	} else
10714 		cancel_delayed_work(&priv->scan_event);
10715 }
10716 
ipw_bg_link_down(struct work_struct * work)10717 static void ipw_bg_link_down(struct work_struct *work)
10718 {
10719 	struct ipw_priv *priv =
10720 		container_of(work, struct ipw_priv, link_down);
10721 	mutex_lock(&priv->mutex);
10722 	ipw_link_down(priv);
10723 	mutex_unlock(&priv->mutex);
10724 }
10725 
ipw_setup_deferred_work(struct ipw_priv * priv)10726 static int __devinit ipw_setup_deferred_work(struct ipw_priv *priv)
10727 {
10728 	int ret = 0;
10729 
10730 	priv->workqueue = create_workqueue(DRV_NAME);
10731 	init_waitqueue_head(&priv->wait_command_queue);
10732 	init_waitqueue_head(&priv->wait_state);
10733 
10734 	INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check);
10735 	INIT_WORK(&priv->associate, ipw_bg_associate);
10736 	INIT_WORK(&priv->disassociate, ipw_bg_disassociate);
10737 	INIT_WORK(&priv->system_config, ipw_system_config);
10738 	INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish);
10739 	INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart);
10740 	INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill);
10741 	INIT_WORK(&priv->up, ipw_bg_up);
10742 	INIT_WORK(&priv->down, ipw_bg_down);
10743 	INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan);
10744 	INIT_DELAYED_WORK(&priv->request_direct_scan, ipw_request_direct_scan);
10745 	INIT_DELAYED_WORK(&priv->request_passive_scan, ipw_request_passive_scan);
10746 	INIT_DELAYED_WORK(&priv->scan_event, ipw_scan_event);
10747 	INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats);
10748 	INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan);
10749 	INIT_WORK(&priv->roam, ipw_bg_roam);
10750 	INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check);
10751 	INIT_WORK(&priv->link_up, ipw_bg_link_up);
10752 	INIT_WORK(&priv->link_down, ipw_bg_link_down);
10753 	INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on);
10754 	INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off);
10755 	INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off);
10756 	INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network);
10757 
10758 #ifdef CONFIG_IPW2200_QOS
10759 	INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
10760 #endif				/* CONFIG_IPW2200_QOS */
10761 
10762 	tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
10763 		     ipw_irq_tasklet, (unsigned long)priv);
10764 
10765 	return ret;
10766 }
10767 
shim__set_security(struct net_device * dev,struct ieee80211_security * sec)10768 static void shim__set_security(struct net_device *dev,
10769 			       struct ieee80211_security *sec)
10770 {
10771 	struct ipw_priv *priv = ieee80211_priv(dev);
10772 	int i;
10773 	for (i = 0; i < 4; i++) {
10774 		if (sec->flags & (1 << i)) {
10775 			priv->ieee->sec.encode_alg[i] = sec->encode_alg[i];
10776 			priv->ieee->sec.key_sizes[i] = sec->key_sizes[i];
10777 			if (sec->key_sizes[i] == 0)
10778 				priv->ieee->sec.flags &= ~(1 << i);
10779 			else {
10780 				memcpy(priv->ieee->sec.keys[i], sec->keys[i],
10781 				       sec->key_sizes[i]);
10782 				priv->ieee->sec.flags |= (1 << i);
10783 			}
10784 			priv->status |= STATUS_SECURITY_UPDATED;
10785 		} else if (sec->level != SEC_LEVEL_1)
10786 			priv->ieee->sec.flags &= ~(1 << i);
10787 	}
10788 
10789 	if (sec->flags & SEC_ACTIVE_KEY) {
10790 		if (sec->active_key <= 3) {
10791 			priv->ieee->sec.active_key = sec->active_key;
10792 			priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
10793 		} else
10794 			priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10795 		priv->status |= STATUS_SECURITY_UPDATED;
10796 	} else
10797 		priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10798 
10799 	if ((sec->flags & SEC_AUTH_MODE) &&
10800 	    (priv->ieee->sec.auth_mode != sec->auth_mode)) {
10801 		priv->ieee->sec.auth_mode = sec->auth_mode;
10802 		priv->ieee->sec.flags |= SEC_AUTH_MODE;
10803 		if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
10804 			priv->capability |= CAP_SHARED_KEY;
10805 		else
10806 			priv->capability &= ~CAP_SHARED_KEY;
10807 		priv->status |= STATUS_SECURITY_UPDATED;
10808 	}
10809 
10810 	if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) {
10811 		priv->ieee->sec.flags |= SEC_ENABLED;
10812 		priv->ieee->sec.enabled = sec->enabled;
10813 		priv->status |= STATUS_SECURITY_UPDATED;
10814 		if (sec->enabled)
10815 			priv->capability |= CAP_PRIVACY_ON;
10816 		else
10817 			priv->capability &= ~CAP_PRIVACY_ON;
10818 	}
10819 
10820 	if (sec->flags & SEC_ENCRYPT)
10821 		priv->ieee->sec.encrypt = sec->encrypt;
10822 
10823 	if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) {
10824 		priv->ieee->sec.level = sec->level;
10825 		priv->ieee->sec.flags |= SEC_LEVEL;
10826 		priv->status |= STATUS_SECURITY_UPDATED;
10827 	}
10828 
10829 	if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT))
10830 		ipw_set_hwcrypto_keys(priv);
10831 
10832 	/* To match current functionality of ipw2100 (which works well w/
10833 	 * various supplicants, we don't force a disassociate if the
10834 	 * privacy capability changes ... */
10835 #if 0
10836 	if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
10837 	    (((priv->assoc_request.capability &
10838 	       cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && !sec->enabled) ||
10839 	     (!(priv->assoc_request.capability &
10840 		cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && sec->enabled))) {
10841 		IPW_DEBUG_ASSOC("Disassociating due to capability "
10842 				"change.\n");
10843 		ipw_disassociate(priv);
10844 	}
10845 #endif
10846 }
10847 
init_supported_rates(struct ipw_priv * priv,struct ipw_supported_rates * rates)10848 static int init_supported_rates(struct ipw_priv *priv,
10849 				struct ipw_supported_rates *rates)
10850 {
10851 	/* TODO: Mask out rates based on priv->rates_mask */
10852 
10853 	memset(rates, 0, sizeof(*rates));
10854 	/* configure supported rates */
10855 	switch (priv->ieee->freq_band) {
10856 	case IEEE80211_52GHZ_BAND:
10857 		rates->ieee_mode = IPW_A_MODE;
10858 		rates->purpose = IPW_RATE_CAPABILITIES;
10859 		ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
10860 					IEEE80211_OFDM_DEFAULT_RATES_MASK);
10861 		break;
10862 
10863 	default:		/* Mixed or 2.4Ghz */
10864 		rates->ieee_mode = IPW_G_MODE;
10865 		rates->purpose = IPW_RATE_CAPABILITIES;
10866 		ipw_add_cck_scan_rates(rates, IEEE80211_CCK_MODULATION,
10867 				       IEEE80211_CCK_DEFAULT_RATES_MASK);
10868 		if (priv->ieee->modulation & IEEE80211_OFDM_MODULATION) {
10869 			ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
10870 						IEEE80211_OFDM_DEFAULT_RATES_MASK);
10871 		}
10872 		break;
10873 	}
10874 
10875 	return 0;
10876 }
10877 
ipw_config(struct ipw_priv * priv)10878 static int ipw_config(struct ipw_priv *priv)
10879 {
10880 	/* This is only called from ipw_up, which resets/reloads the firmware
10881 	   so, we don't need to first disable the card before we configure
10882 	   it */
10883 	if (ipw_set_tx_power(priv))
10884 		goto error;
10885 
10886 	/* initialize adapter address */
10887 	if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
10888 		goto error;
10889 
10890 	/* set basic system config settings */
10891 	init_sys_config(&priv->sys_config);
10892 
10893 	/* Support Bluetooth if we have BT h/w on board, and user wants to.
10894 	 * Does not support BT priority yet (don't abort or defer our Tx) */
10895 	if (bt_coexist) {
10896 		unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY];
10897 
10898 		if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG)
10899 			priv->sys_config.bt_coexistence
10900 			    |= CFG_BT_COEXISTENCE_SIGNAL_CHNL;
10901 		if (bt_caps & EEPROM_SKU_CAP_BT_OOB)
10902 			priv->sys_config.bt_coexistence
10903 			    |= CFG_BT_COEXISTENCE_OOB;
10904 	}
10905 
10906 #ifdef CONFIG_IPW2200_PROMISCUOUS
10907 	if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
10908 		priv->sys_config.accept_all_data_frames = 1;
10909 		priv->sys_config.accept_non_directed_frames = 1;
10910 		priv->sys_config.accept_all_mgmt_bcpr = 1;
10911 		priv->sys_config.accept_all_mgmt_frames = 1;
10912 	}
10913 #endif
10914 
10915 	if (priv->ieee->iw_mode == IW_MODE_ADHOC)
10916 		priv->sys_config.answer_broadcast_ssid_probe = 1;
10917 	else
10918 		priv->sys_config.answer_broadcast_ssid_probe = 0;
10919 
10920 	if (ipw_send_system_config(priv))
10921 		goto error;
10922 
10923 	init_supported_rates(priv, &priv->rates);
10924 	if (ipw_send_supported_rates(priv, &priv->rates))
10925 		goto error;
10926 
10927 	/* Set request-to-send threshold */
10928 	if (priv->rts_threshold) {
10929 		if (ipw_send_rts_threshold(priv, priv->rts_threshold))
10930 			goto error;
10931 	}
10932 #ifdef CONFIG_IPW2200_QOS
10933 	IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n");
10934 	ipw_qos_activate(priv, NULL);
10935 #endif				/* CONFIG_IPW2200_QOS */
10936 
10937 	if (ipw_set_random_seed(priv))
10938 		goto error;
10939 
10940 	/* final state transition to the RUN state */
10941 	if (ipw_send_host_complete(priv))
10942 		goto error;
10943 
10944 	priv->status |= STATUS_INIT;
10945 
10946 	ipw_led_init(priv);
10947 	ipw_led_radio_on(priv);
10948 	priv->notif_missed_beacons = 0;
10949 
10950 	/* Set hardware WEP key if it is configured. */
10951 	if ((priv->capability & CAP_PRIVACY_ON) &&
10952 	    (priv->ieee->sec.level == SEC_LEVEL_1) &&
10953 	    !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
10954 		ipw_set_hwcrypto_keys(priv);
10955 
10956 	return 0;
10957 
10958       error:
10959 	return -EIO;
10960 }
10961 
10962 /*
10963  * NOTE:
10964  *
10965  * These tables have been tested in conjunction with the
10966  * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters.
10967  *
10968  * Altering this values, using it on other hardware, or in geographies
10969  * not intended for resale of the above mentioned Intel adapters has
10970  * not been tested.
10971  *
10972  * Remember to update the table in README.ipw2200 when changing this
10973  * table.
10974  *
10975  */
10976 static const struct ieee80211_geo ipw_geos[] = {
10977 	{			/* Restricted */
10978 	 "---",
10979 	 .bg_channels = 11,
10980 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10981 		{2427, 4}, {2432, 5}, {2437, 6},
10982 		{2442, 7}, {2447, 8}, {2452, 9},
10983 		{2457, 10}, {2462, 11}},
10984 	 },
10985 
10986 	{			/* Custom US/Canada */
10987 	 "ZZF",
10988 	 .bg_channels = 11,
10989 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10990 		{2427, 4}, {2432, 5}, {2437, 6},
10991 		{2442, 7}, {2447, 8}, {2452, 9},
10992 		{2457, 10}, {2462, 11}},
10993 	 .a_channels = 8,
10994 	 .a = {{5180, 36},
10995 	       {5200, 40},
10996 	       {5220, 44},
10997 	       {5240, 48},
10998 	       {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10999 	       {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11000 	       {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11001 	       {5320, 64, IEEE80211_CH_PASSIVE_ONLY}},
11002 	 },
11003 
11004 	{			/* Rest of World */
11005 	 "ZZD",
11006 	 .bg_channels = 13,
11007 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11008 		{2427, 4}, {2432, 5}, {2437, 6},
11009 		{2442, 7}, {2447, 8}, {2452, 9},
11010 		{2457, 10}, {2462, 11}, {2467, 12},
11011 		{2472, 13}},
11012 	 },
11013 
11014 	{			/* Custom USA & Europe & High */
11015 	 "ZZA",
11016 	 .bg_channels = 11,
11017 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11018 		{2427, 4}, {2432, 5}, {2437, 6},
11019 		{2442, 7}, {2447, 8}, {2452, 9},
11020 		{2457, 10}, {2462, 11}},
11021 	 .a_channels = 13,
11022 	 .a = {{5180, 36},
11023 	       {5200, 40},
11024 	       {5220, 44},
11025 	       {5240, 48},
11026 	       {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11027 	       {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11028 	       {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11029 	       {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11030 	       {5745, 149},
11031 	       {5765, 153},
11032 	       {5785, 157},
11033 	       {5805, 161},
11034 	       {5825, 165}},
11035 	 },
11036 
11037 	{			/* Custom NA & Europe */
11038 	 "ZZB",
11039 	 .bg_channels = 11,
11040 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11041 		{2427, 4}, {2432, 5}, {2437, 6},
11042 		{2442, 7}, {2447, 8}, {2452, 9},
11043 		{2457, 10}, {2462, 11}},
11044 	 .a_channels = 13,
11045 	 .a = {{5180, 36},
11046 	       {5200, 40},
11047 	       {5220, 44},
11048 	       {5240, 48},
11049 	       {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11050 	       {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11051 	       {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11052 	       {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11053 	       {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
11054 	       {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
11055 	       {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
11056 	       {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
11057 	       {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
11058 	 },
11059 
11060 	{			/* Custom Japan */
11061 	 "ZZC",
11062 	 .bg_channels = 11,
11063 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11064 		{2427, 4}, {2432, 5}, {2437, 6},
11065 		{2442, 7}, {2447, 8}, {2452, 9},
11066 		{2457, 10}, {2462, 11}},
11067 	 .a_channels = 4,
11068 	 .a = {{5170, 34}, {5190, 38},
11069 	       {5210, 42}, {5230, 46}},
11070 	 },
11071 
11072 	{			/* Custom */
11073 	 "ZZM",
11074 	 .bg_channels = 11,
11075 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11076 		{2427, 4}, {2432, 5}, {2437, 6},
11077 		{2442, 7}, {2447, 8}, {2452, 9},
11078 		{2457, 10}, {2462, 11}},
11079 	 },
11080 
11081 	{			/* Europe */
11082 	 "ZZE",
11083 	 .bg_channels = 13,
11084 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11085 		{2427, 4}, {2432, 5}, {2437, 6},
11086 		{2442, 7}, {2447, 8}, {2452, 9},
11087 		{2457, 10}, {2462, 11}, {2467, 12},
11088 		{2472, 13}},
11089 	 .a_channels = 19,
11090 	 .a = {{5180, 36},
11091 	       {5200, 40},
11092 	       {5220, 44},
11093 	       {5240, 48},
11094 	       {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11095 	       {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11096 	       {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11097 	       {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11098 	       {5500, 100, IEEE80211_CH_PASSIVE_ONLY},
11099 	       {5520, 104, IEEE80211_CH_PASSIVE_ONLY},
11100 	       {5540, 108, IEEE80211_CH_PASSIVE_ONLY},
11101 	       {5560, 112, IEEE80211_CH_PASSIVE_ONLY},
11102 	       {5580, 116, IEEE80211_CH_PASSIVE_ONLY},
11103 	       {5600, 120, IEEE80211_CH_PASSIVE_ONLY},
11104 	       {5620, 124, IEEE80211_CH_PASSIVE_ONLY},
11105 	       {5640, 128, IEEE80211_CH_PASSIVE_ONLY},
11106 	       {5660, 132, IEEE80211_CH_PASSIVE_ONLY},
11107 	       {5680, 136, IEEE80211_CH_PASSIVE_ONLY},
11108 	       {5700, 140, IEEE80211_CH_PASSIVE_ONLY}},
11109 	 },
11110 
11111 	{			/* Custom Japan */
11112 	 "ZZJ",
11113 	 .bg_channels = 14,
11114 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11115 		{2427, 4}, {2432, 5}, {2437, 6},
11116 		{2442, 7}, {2447, 8}, {2452, 9},
11117 		{2457, 10}, {2462, 11}, {2467, 12},
11118 		{2472, 13}, {2484, 14, IEEE80211_CH_B_ONLY}},
11119 	 .a_channels = 4,
11120 	 .a = {{5170, 34}, {5190, 38},
11121 	       {5210, 42}, {5230, 46}},
11122 	 },
11123 
11124 	{			/* Rest of World */
11125 	 "ZZR",
11126 	 .bg_channels = 14,
11127 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11128 		{2427, 4}, {2432, 5}, {2437, 6},
11129 		{2442, 7}, {2447, 8}, {2452, 9},
11130 		{2457, 10}, {2462, 11}, {2467, 12},
11131 		{2472, 13}, {2484, 14, IEEE80211_CH_B_ONLY |
11132 			     IEEE80211_CH_PASSIVE_ONLY}},
11133 	 },
11134 
11135 	{			/* High Band */
11136 	 "ZZH",
11137 	 .bg_channels = 13,
11138 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11139 		{2427, 4}, {2432, 5}, {2437, 6},
11140 		{2442, 7}, {2447, 8}, {2452, 9},
11141 		{2457, 10}, {2462, 11},
11142 		{2467, 12, IEEE80211_CH_PASSIVE_ONLY},
11143 		{2472, 13, IEEE80211_CH_PASSIVE_ONLY}},
11144 	 .a_channels = 4,
11145 	 .a = {{5745, 149}, {5765, 153},
11146 	       {5785, 157}, {5805, 161}},
11147 	 },
11148 
11149 	{			/* Custom Europe */
11150 	 "ZZG",
11151 	 .bg_channels = 13,
11152 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11153 		{2427, 4}, {2432, 5}, {2437, 6},
11154 		{2442, 7}, {2447, 8}, {2452, 9},
11155 		{2457, 10}, {2462, 11},
11156 		{2467, 12}, {2472, 13}},
11157 	 .a_channels = 4,
11158 	 .a = {{5180, 36}, {5200, 40},
11159 	       {5220, 44}, {5240, 48}},
11160 	 },
11161 
11162 	{			/* Europe */
11163 	 "ZZK",
11164 	 .bg_channels = 13,
11165 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11166 		{2427, 4}, {2432, 5}, {2437, 6},
11167 		{2442, 7}, {2447, 8}, {2452, 9},
11168 		{2457, 10}, {2462, 11},
11169 		{2467, 12, IEEE80211_CH_PASSIVE_ONLY},
11170 		{2472, 13, IEEE80211_CH_PASSIVE_ONLY}},
11171 	 .a_channels = 24,
11172 	 .a = {{5180, 36, IEEE80211_CH_PASSIVE_ONLY},
11173 	       {5200, 40, IEEE80211_CH_PASSIVE_ONLY},
11174 	       {5220, 44, IEEE80211_CH_PASSIVE_ONLY},
11175 	       {5240, 48, IEEE80211_CH_PASSIVE_ONLY},
11176 	       {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11177 	       {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11178 	       {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11179 	       {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11180 	       {5500, 100, IEEE80211_CH_PASSIVE_ONLY},
11181 	       {5520, 104, IEEE80211_CH_PASSIVE_ONLY},
11182 	       {5540, 108, IEEE80211_CH_PASSIVE_ONLY},
11183 	       {5560, 112, IEEE80211_CH_PASSIVE_ONLY},
11184 	       {5580, 116, IEEE80211_CH_PASSIVE_ONLY},
11185 	       {5600, 120, IEEE80211_CH_PASSIVE_ONLY},
11186 	       {5620, 124, IEEE80211_CH_PASSIVE_ONLY},
11187 	       {5640, 128, IEEE80211_CH_PASSIVE_ONLY},
11188 	       {5660, 132, IEEE80211_CH_PASSIVE_ONLY},
11189 	       {5680, 136, IEEE80211_CH_PASSIVE_ONLY},
11190 	       {5700, 140, IEEE80211_CH_PASSIVE_ONLY},
11191 	       {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
11192 	       {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
11193 	       {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
11194 	       {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
11195 	       {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
11196 	 },
11197 
11198 	{			/* Europe */
11199 	 "ZZL",
11200 	 .bg_channels = 11,
11201 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11202 		{2427, 4}, {2432, 5}, {2437, 6},
11203 		{2442, 7}, {2447, 8}, {2452, 9},
11204 		{2457, 10}, {2462, 11}},
11205 	 .a_channels = 13,
11206 	 .a = {{5180, 36, IEEE80211_CH_PASSIVE_ONLY},
11207 	       {5200, 40, IEEE80211_CH_PASSIVE_ONLY},
11208 	       {5220, 44, IEEE80211_CH_PASSIVE_ONLY},
11209 	       {5240, 48, IEEE80211_CH_PASSIVE_ONLY},
11210 	       {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11211 	       {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11212 	       {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11213 	       {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11214 	       {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
11215 	       {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
11216 	       {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
11217 	       {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
11218 	       {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
11219 	 }
11220 };
11221 
11222 #define MAX_HW_RESTARTS 5
ipw_up(struct ipw_priv * priv)11223 static int ipw_up(struct ipw_priv *priv)
11224 {
11225 	int rc, i, j;
11226 
11227 	if (priv->status & STATUS_EXIT_PENDING)
11228 		return -EIO;
11229 
11230 	if (cmdlog && !priv->cmdlog) {
11231 		priv->cmdlog = kcalloc(cmdlog, sizeof(*priv->cmdlog),
11232 				       GFP_KERNEL);
11233 		if (priv->cmdlog == NULL) {
11234 			IPW_ERROR("Error allocating %d command log entries.\n",
11235 				  cmdlog);
11236 			return -ENOMEM;
11237 		} else {
11238 			priv->cmdlog_len = cmdlog;
11239 		}
11240 	}
11241 
11242 	for (i = 0; i < MAX_HW_RESTARTS; i++) {
11243 		/* Load the microcode, firmware, and eeprom.
11244 		 * Also start the clocks. */
11245 		rc = ipw_load(priv);
11246 		if (rc) {
11247 			IPW_ERROR("Unable to load firmware: %d\n", rc);
11248 			return rc;
11249 		}
11250 
11251 		ipw_init_ordinals(priv);
11252 		if (!(priv->config & CFG_CUSTOM_MAC))
11253 			eeprom_parse_mac(priv, priv->mac_addr);
11254 		memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11255 
11256 		for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
11257 			if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
11258 				    ipw_geos[j].name, 3))
11259 				break;
11260 		}
11261 		if (j == ARRAY_SIZE(ipw_geos)) {
11262 			IPW_WARNING("SKU [%c%c%c] not recognized.\n",
11263 				    priv->eeprom[EEPROM_COUNTRY_CODE + 0],
11264 				    priv->eeprom[EEPROM_COUNTRY_CODE + 1],
11265 				    priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
11266 			j = 0;
11267 		}
11268 		if (ieee80211_set_geo(priv->ieee, &ipw_geos[j])) {
11269 			IPW_WARNING("Could not set geography.");
11270 			return 0;
11271 		}
11272 
11273 		if (priv->status & STATUS_RF_KILL_SW) {
11274 			IPW_WARNING("Radio disabled by module parameter.\n");
11275 			return 0;
11276 		} else if (rf_kill_active(priv)) {
11277 			IPW_WARNING("Radio Frequency Kill Switch is On:\n"
11278 				    "Kill switch must be turned off for "
11279 				    "wireless networking to work.\n");
11280 			queue_delayed_work(priv->workqueue, &priv->rf_kill,
11281 					   2 * HZ);
11282 			return 0;
11283 		}
11284 
11285 		rc = ipw_config(priv);
11286 		if (!rc) {
11287 			IPW_DEBUG_INFO("Configured device on count %i\n", i);
11288 
11289 			/* If configure to try and auto-associate, kick
11290 			 * off a scan. */
11291 			queue_delayed_work(priv->workqueue,
11292 					   &priv->request_scan, 0);
11293 
11294 			return 0;
11295 		}
11296 
11297 		IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc);
11298 		IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
11299 			       i, MAX_HW_RESTARTS);
11300 
11301 		/* We had an error bringing up the hardware, so take it
11302 		 * all the way back down so we can try again */
11303 		ipw_down(priv);
11304 	}
11305 
11306 	/* tried to restart and config the device for as long as our
11307 	 * patience could withstand */
11308 	IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
11309 
11310 	return -EIO;
11311 }
11312 
ipw_bg_up(struct work_struct * work)11313 static void ipw_bg_up(struct work_struct *work)
11314 {
11315 	struct ipw_priv *priv =
11316 		container_of(work, struct ipw_priv, up);
11317 	mutex_lock(&priv->mutex);
11318 	ipw_up(priv);
11319 	mutex_unlock(&priv->mutex);
11320 }
11321 
ipw_deinit(struct ipw_priv * priv)11322 static void ipw_deinit(struct ipw_priv *priv)
11323 {
11324 	int i;
11325 
11326 	if (priv->status & STATUS_SCANNING) {
11327 		IPW_DEBUG_INFO("Aborting scan during shutdown.\n");
11328 		ipw_abort_scan(priv);
11329 	}
11330 
11331 	if (priv->status & STATUS_ASSOCIATED) {
11332 		IPW_DEBUG_INFO("Disassociating during shutdown.\n");
11333 		ipw_disassociate(priv);
11334 	}
11335 
11336 	ipw_led_shutdown(priv);
11337 
11338 	/* Wait up to 1s for status to change to not scanning and not
11339 	 * associated (disassociation can take a while for a ful 802.11
11340 	 * exchange */
11341 	for (i = 1000; i && (priv->status &
11342 			     (STATUS_DISASSOCIATING |
11343 			      STATUS_ASSOCIATED | STATUS_SCANNING)); i--)
11344 		udelay(10);
11345 
11346 	if (priv->status & (STATUS_DISASSOCIATING |
11347 			    STATUS_ASSOCIATED | STATUS_SCANNING))
11348 		IPW_DEBUG_INFO("Still associated or scanning...\n");
11349 	else
11350 		IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i);
11351 
11352 	/* Attempt to disable the card */
11353 	ipw_send_card_disable(priv, 0);
11354 
11355 	priv->status &= ~STATUS_INIT;
11356 }
11357 
ipw_down(struct ipw_priv * priv)11358 static void ipw_down(struct ipw_priv *priv)
11359 {
11360 	int exit_pending = priv->status & STATUS_EXIT_PENDING;
11361 
11362 	priv->status |= STATUS_EXIT_PENDING;
11363 
11364 	if (ipw_is_init(priv))
11365 		ipw_deinit(priv);
11366 
11367 	/* Wipe out the EXIT_PENDING status bit if we are not actually
11368 	 * exiting the module */
11369 	if (!exit_pending)
11370 		priv->status &= ~STATUS_EXIT_PENDING;
11371 
11372 	/* tell the device to stop sending interrupts */
11373 	ipw_disable_interrupts(priv);
11374 
11375 	/* Clear all bits but the RF Kill */
11376 	priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING;
11377 	netif_carrier_off(priv->net_dev);
11378 
11379 	ipw_stop_nic(priv);
11380 
11381 	ipw_led_radio_off(priv);
11382 }
11383 
ipw_bg_down(struct work_struct * work)11384 static void ipw_bg_down(struct work_struct *work)
11385 {
11386 	struct ipw_priv *priv =
11387 		container_of(work, struct ipw_priv, down);
11388 	mutex_lock(&priv->mutex);
11389 	ipw_down(priv);
11390 	mutex_unlock(&priv->mutex);
11391 }
11392 
11393 /* Called by register_netdev() */
ipw_net_init(struct net_device * dev)11394 static int ipw_net_init(struct net_device *dev)
11395 {
11396 	struct ipw_priv *priv = ieee80211_priv(dev);
11397 	mutex_lock(&priv->mutex);
11398 
11399 	if (ipw_up(priv)) {
11400 		mutex_unlock(&priv->mutex);
11401 		return -EIO;
11402 	}
11403 
11404 	mutex_unlock(&priv->mutex);
11405 	return 0;
11406 }
11407 
11408 /* PCI driver stuff */
11409 static struct pci_device_id card_ids[] = {
11410 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
11411 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
11412 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
11413 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
11414 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
11415 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
11416 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
11417 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
11418 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
11419 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
11420 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
11421 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
11422 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
11423 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
11424 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
11425 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
11426 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
11427 	{PCI_VENDOR_ID_INTEL, 0x104f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
11428 	{PCI_VENDOR_ID_INTEL, 0x4220, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},	/* BG */
11429 	{PCI_VENDOR_ID_INTEL, 0x4221, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},	/* BG */
11430 	{PCI_VENDOR_ID_INTEL, 0x4223, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},	/* ABG */
11431 	{PCI_VENDOR_ID_INTEL, 0x4224, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},	/* ABG */
11432 
11433 	/* required last entry */
11434 	{0,}
11435 };
11436 
11437 MODULE_DEVICE_TABLE(pci, card_ids);
11438 
11439 static struct attribute *ipw_sysfs_entries[] = {
11440 	&dev_attr_rf_kill.attr,
11441 	&dev_attr_direct_dword.attr,
11442 	&dev_attr_indirect_byte.attr,
11443 	&dev_attr_indirect_dword.attr,
11444 	&dev_attr_mem_gpio_reg.attr,
11445 	&dev_attr_command_event_reg.attr,
11446 	&dev_attr_nic_type.attr,
11447 	&dev_attr_status.attr,
11448 	&dev_attr_cfg.attr,
11449 	&dev_attr_error.attr,
11450 	&dev_attr_event_log.attr,
11451 	&dev_attr_cmd_log.attr,
11452 	&dev_attr_eeprom_delay.attr,
11453 	&dev_attr_ucode_version.attr,
11454 	&dev_attr_rtc.attr,
11455 	&dev_attr_scan_age.attr,
11456 	&dev_attr_led.attr,
11457 	&dev_attr_speed_scan.attr,
11458 	&dev_attr_net_stats.attr,
11459 	&dev_attr_channels.attr,
11460 #ifdef CONFIG_IPW2200_PROMISCUOUS
11461 	&dev_attr_rtap_iface.attr,
11462 	&dev_attr_rtap_filter.attr,
11463 #endif
11464 	NULL
11465 };
11466 
11467 static struct attribute_group ipw_attribute_group = {
11468 	.name = NULL,		/* put in device directory */
11469 	.attrs = ipw_sysfs_entries,
11470 };
11471 
11472 #ifdef CONFIG_IPW2200_PROMISCUOUS
ipw_prom_open(struct net_device * dev)11473 static int ipw_prom_open(struct net_device *dev)
11474 {
11475 	struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
11476 	struct ipw_priv *priv = prom_priv->priv;
11477 
11478 	IPW_DEBUG_INFO("prom dev->open\n");
11479 	netif_carrier_off(dev);
11480 
11481 	if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11482 		priv->sys_config.accept_all_data_frames = 1;
11483 		priv->sys_config.accept_non_directed_frames = 1;
11484 		priv->sys_config.accept_all_mgmt_bcpr = 1;
11485 		priv->sys_config.accept_all_mgmt_frames = 1;
11486 
11487 		ipw_send_system_config(priv);
11488 	}
11489 
11490 	return 0;
11491 }
11492 
ipw_prom_stop(struct net_device * dev)11493 static int ipw_prom_stop(struct net_device *dev)
11494 {
11495 	struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
11496 	struct ipw_priv *priv = prom_priv->priv;
11497 
11498 	IPW_DEBUG_INFO("prom dev->stop\n");
11499 
11500 	if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11501 		priv->sys_config.accept_all_data_frames = 0;
11502 		priv->sys_config.accept_non_directed_frames = 0;
11503 		priv->sys_config.accept_all_mgmt_bcpr = 0;
11504 		priv->sys_config.accept_all_mgmt_frames = 0;
11505 
11506 		ipw_send_system_config(priv);
11507 	}
11508 
11509 	return 0;
11510 }
11511 
ipw_prom_hard_start_xmit(struct sk_buff * skb,struct net_device * dev)11512 static int ipw_prom_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
11513 {
11514 	IPW_DEBUG_INFO("prom dev->xmit\n");
11515 	return -EOPNOTSUPP;
11516 }
11517 
ipw_prom_get_stats(struct net_device * dev)11518 static struct net_device_stats *ipw_prom_get_stats(struct net_device *dev)
11519 {
11520 	struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
11521 	return &prom_priv->ieee->stats;
11522 }
11523 
ipw_prom_alloc(struct ipw_priv * priv)11524 static int ipw_prom_alloc(struct ipw_priv *priv)
11525 {
11526 	int rc = 0;
11527 
11528 	if (priv->prom_net_dev)
11529 		return -EPERM;
11530 
11531 	priv->prom_net_dev = alloc_ieee80211(sizeof(struct ipw_prom_priv));
11532 	if (priv->prom_net_dev == NULL)
11533 		return -ENOMEM;
11534 
11535 	priv->prom_priv = ieee80211_priv(priv->prom_net_dev);
11536 	priv->prom_priv->ieee = netdev_priv(priv->prom_net_dev);
11537 	priv->prom_priv->priv = priv;
11538 
11539 	strcpy(priv->prom_net_dev->name, "rtap%d");
11540 	memcpy(priv->prom_net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11541 
11542 	priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
11543 	priv->prom_net_dev->open = ipw_prom_open;
11544 	priv->prom_net_dev->stop = ipw_prom_stop;
11545 	priv->prom_net_dev->get_stats = ipw_prom_get_stats;
11546 	priv->prom_net_dev->hard_start_xmit = ipw_prom_hard_start_xmit;
11547 
11548 	priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR;
11549 	SET_NETDEV_DEV(priv->prom_net_dev, &priv->pci_dev->dev);
11550 
11551 	rc = register_netdev(priv->prom_net_dev);
11552 	if (rc) {
11553 		free_ieee80211(priv->prom_net_dev);
11554 		priv->prom_net_dev = NULL;
11555 		return rc;
11556 	}
11557 
11558 	return 0;
11559 }
11560 
ipw_prom_free(struct ipw_priv * priv)11561 static void ipw_prom_free(struct ipw_priv *priv)
11562 {
11563 	if (!priv->prom_net_dev)
11564 		return;
11565 
11566 	unregister_netdev(priv->prom_net_dev);
11567 	free_ieee80211(priv->prom_net_dev);
11568 
11569 	priv->prom_net_dev = NULL;
11570 }
11571 
11572 #endif
11573 
11574 
ipw_pci_probe(struct pci_dev * pdev,const struct pci_device_id * ent)11575 static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11576 				   const struct pci_device_id *ent)
11577 {
11578 	int err = 0;
11579 	struct net_device *net_dev;
11580 	void __iomem *base;
11581 	u32 length, val;
11582 	struct ipw_priv *priv;
11583 	int i;
11584 
11585 	net_dev = alloc_ieee80211(sizeof(struct ipw_priv));
11586 	if (net_dev == NULL) {
11587 		err = -ENOMEM;
11588 		goto out;
11589 	}
11590 
11591 	priv = ieee80211_priv(net_dev);
11592 	priv->ieee = netdev_priv(net_dev);
11593 
11594 	priv->net_dev = net_dev;
11595 	priv->pci_dev = pdev;
11596 	ipw_debug_level = debug;
11597 	spin_lock_init(&priv->irq_lock);
11598 	spin_lock_init(&priv->lock);
11599 	for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
11600 		INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
11601 
11602 	mutex_init(&priv->mutex);
11603 	if (pci_enable_device(pdev)) {
11604 		err = -ENODEV;
11605 		goto out_free_ieee80211;
11606 	}
11607 
11608 	pci_set_master(pdev);
11609 
11610 	err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11611 	if (!err)
11612 		err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
11613 	if (err) {
11614 		printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
11615 		goto out_pci_disable_device;
11616 	}
11617 
11618 	pci_set_drvdata(pdev, priv);
11619 
11620 	err = pci_request_regions(pdev, DRV_NAME);
11621 	if (err)
11622 		goto out_pci_disable_device;
11623 
11624 	/* We disable the RETRY_TIMEOUT register (0x41) to keep
11625 	 * PCI Tx retries from interfering with C3 CPU state */
11626 	pci_read_config_dword(pdev, 0x40, &val);
11627 	if ((val & 0x0000ff00) != 0)
11628 		pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11629 
11630 	length = pci_resource_len(pdev, 0);
11631 	priv->hw_len = length;
11632 
11633 	base = pci_ioremap_bar(pdev, 0);
11634 	if (!base) {
11635 		err = -ENODEV;
11636 		goto out_pci_release_regions;
11637 	}
11638 
11639 	priv->hw_base = base;
11640 	IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
11641 	IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
11642 
11643 	err = ipw_setup_deferred_work(priv);
11644 	if (err) {
11645 		IPW_ERROR("Unable to setup deferred work\n");
11646 		goto out_iounmap;
11647 	}
11648 
11649 	ipw_sw_reset(priv, 1);
11650 
11651 	err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv);
11652 	if (err) {
11653 		IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
11654 		goto out_destroy_workqueue;
11655 	}
11656 
11657 	SET_NETDEV_DEV(net_dev, &pdev->dev);
11658 
11659 	mutex_lock(&priv->mutex);
11660 
11661 	priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
11662 	priv->ieee->set_security = shim__set_security;
11663 	priv->ieee->is_queue_full = ipw_net_is_queue_full;
11664 
11665 #ifdef CONFIG_IPW2200_QOS
11666 	priv->ieee->is_qos_active = ipw_is_qos_active;
11667 	priv->ieee->handle_probe_response = ipw_handle_beacon;
11668 	priv->ieee->handle_beacon = ipw_handle_probe_response;
11669 	priv->ieee->handle_assoc_response = ipw_handle_assoc_response;
11670 #endif				/* CONFIG_IPW2200_QOS */
11671 
11672 	priv->ieee->perfect_rssi = -20;
11673 	priv->ieee->worst_rssi = -85;
11674 
11675 	net_dev->open = ipw_net_open;
11676 	net_dev->stop = ipw_net_stop;
11677 	net_dev->init = ipw_net_init;
11678 	net_dev->get_stats = ipw_net_get_stats;
11679 	net_dev->set_multicast_list = ipw_net_set_multicast_list;
11680 	net_dev->set_mac_address = ipw_net_set_mac_address;
11681 	priv->wireless_data.spy_data = &priv->ieee->spy_data;
11682 	net_dev->wireless_data = &priv->wireless_data;
11683 	net_dev->wireless_handlers = &ipw_wx_handler_def;
11684 	net_dev->ethtool_ops = &ipw_ethtool_ops;
11685 	net_dev->irq = pdev->irq;
11686 	net_dev->base_addr = (unsigned long)priv->hw_base;
11687 	net_dev->mem_start = pci_resource_start(pdev, 0);
11688 	net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
11689 
11690 	err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
11691 	if (err) {
11692 		IPW_ERROR("failed to create sysfs device attributes\n");
11693 		mutex_unlock(&priv->mutex);
11694 		goto out_release_irq;
11695 	}
11696 
11697 	mutex_unlock(&priv->mutex);
11698 	err = register_netdev(net_dev);
11699 	if (err) {
11700 		IPW_ERROR("failed to register network device\n");
11701 		goto out_remove_sysfs;
11702 	}
11703 
11704 #ifdef CONFIG_IPW2200_PROMISCUOUS
11705 	if (rtap_iface) {
11706 	        err = ipw_prom_alloc(priv);
11707 		if (err) {
11708 			IPW_ERROR("Failed to register promiscuous network "
11709 				  "device (error %d).\n", err);
11710 			unregister_netdev(priv->net_dev);
11711 			goto out_remove_sysfs;
11712 		}
11713 	}
11714 #endif
11715 
11716 	printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg "
11717 	       "channels, %d 802.11a channels)\n",
11718 	       priv->ieee->geo.name, priv->ieee->geo.bg_channels,
11719 	       priv->ieee->geo.a_channels);
11720 
11721 	return 0;
11722 
11723       out_remove_sysfs:
11724 	sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11725       out_release_irq:
11726 	free_irq(pdev->irq, priv);
11727       out_destroy_workqueue:
11728 	destroy_workqueue(priv->workqueue);
11729 	priv->workqueue = NULL;
11730       out_iounmap:
11731 	iounmap(priv->hw_base);
11732       out_pci_release_regions:
11733 	pci_release_regions(pdev);
11734       out_pci_disable_device:
11735 	pci_disable_device(pdev);
11736 	pci_set_drvdata(pdev, NULL);
11737       out_free_ieee80211:
11738 	free_ieee80211(priv->net_dev);
11739       out:
11740 	return err;
11741 }
11742 
ipw_pci_remove(struct pci_dev * pdev)11743 static void __devexit ipw_pci_remove(struct pci_dev *pdev)
11744 {
11745 	struct ipw_priv *priv = pci_get_drvdata(pdev);
11746 	struct list_head *p, *q;
11747 	int i;
11748 
11749 	if (!priv)
11750 		return;
11751 
11752 	mutex_lock(&priv->mutex);
11753 
11754 	priv->status |= STATUS_EXIT_PENDING;
11755 	ipw_down(priv);
11756 	sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11757 
11758 	mutex_unlock(&priv->mutex);
11759 
11760 	unregister_netdev(priv->net_dev);
11761 
11762 	if (priv->rxq) {
11763 		ipw_rx_queue_free(priv, priv->rxq);
11764 		priv->rxq = NULL;
11765 	}
11766 	ipw_tx_queue_free(priv);
11767 
11768 	if (priv->cmdlog) {
11769 		kfree(priv->cmdlog);
11770 		priv->cmdlog = NULL;
11771 	}
11772 	/* ipw_down will ensure that there is no more pending work
11773 	 * in the workqueue's, so we can safely remove them now. */
11774 	cancel_delayed_work(&priv->adhoc_check);
11775 	cancel_delayed_work(&priv->gather_stats);
11776 	cancel_delayed_work(&priv->request_scan);
11777 	cancel_delayed_work(&priv->request_direct_scan);
11778 	cancel_delayed_work(&priv->request_passive_scan);
11779 	cancel_delayed_work(&priv->scan_event);
11780 	cancel_delayed_work(&priv->rf_kill);
11781 	cancel_delayed_work(&priv->scan_check);
11782 	destroy_workqueue(priv->workqueue);
11783 	priv->workqueue = NULL;
11784 
11785 	/* Free MAC hash list for ADHOC */
11786 	for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
11787 		list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
11788 			list_del(p);
11789 			kfree(list_entry(p, struct ipw_ibss_seq, list));
11790 		}
11791 	}
11792 
11793 	kfree(priv->error);
11794 	priv->error = NULL;
11795 
11796 #ifdef CONFIG_IPW2200_PROMISCUOUS
11797 	ipw_prom_free(priv);
11798 #endif
11799 
11800 	free_irq(pdev->irq, priv);
11801 	iounmap(priv->hw_base);
11802 	pci_release_regions(pdev);
11803 	pci_disable_device(pdev);
11804 	pci_set_drvdata(pdev, NULL);
11805 	free_ieee80211(priv->net_dev);
11806 	free_firmware();
11807 }
11808 
11809 #ifdef CONFIG_PM
ipw_pci_suspend(struct pci_dev * pdev,pm_message_t state)11810 static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
11811 {
11812 	struct ipw_priv *priv = pci_get_drvdata(pdev);
11813 	struct net_device *dev = priv->net_dev;
11814 
11815 	printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
11816 
11817 	/* Take down the device; powers it off, etc. */
11818 	ipw_down(priv);
11819 
11820 	/* Remove the PRESENT state of the device */
11821 	netif_device_detach(dev);
11822 
11823 	pci_save_state(pdev);
11824 	pci_disable_device(pdev);
11825 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
11826 
11827 	return 0;
11828 }
11829 
ipw_pci_resume(struct pci_dev * pdev)11830 static int ipw_pci_resume(struct pci_dev *pdev)
11831 {
11832 	struct ipw_priv *priv = pci_get_drvdata(pdev);
11833 	struct net_device *dev = priv->net_dev;
11834 	int err;
11835 	u32 val;
11836 
11837 	printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
11838 
11839 	pci_set_power_state(pdev, PCI_D0);
11840 	err = pci_enable_device(pdev);
11841 	if (err) {
11842 		printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
11843 		       dev->name);
11844 		return err;
11845 	}
11846 	pci_restore_state(pdev);
11847 
11848 	/*
11849 	 * Suspend/Resume resets the PCI configuration space, so we have to
11850 	 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
11851 	 * from interfering with C3 CPU state. pci_restore_state won't help
11852 	 * here since it only restores the first 64 bytes pci config header.
11853 	 */
11854 	pci_read_config_dword(pdev, 0x40, &val);
11855 	if ((val & 0x0000ff00) != 0)
11856 		pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11857 
11858 	/* Set the device back into the PRESENT state; this will also wake
11859 	 * the queue of needed */
11860 	netif_device_attach(dev);
11861 
11862 	/* Bring the device back up */
11863 	queue_work(priv->workqueue, &priv->up);
11864 
11865 	return 0;
11866 }
11867 #endif
11868 
ipw_pci_shutdown(struct pci_dev * pdev)11869 static void ipw_pci_shutdown(struct pci_dev *pdev)
11870 {
11871 	struct ipw_priv *priv = pci_get_drvdata(pdev);
11872 
11873 	/* Take down the device; powers it off, etc. */
11874 	ipw_down(priv);
11875 
11876 	pci_disable_device(pdev);
11877 }
11878 
11879 /* driver initialization stuff */
11880 static struct pci_driver ipw_driver = {
11881 	.name = DRV_NAME,
11882 	.id_table = card_ids,
11883 	.probe = ipw_pci_probe,
11884 	.remove = __devexit_p(ipw_pci_remove),
11885 #ifdef CONFIG_PM
11886 	.suspend = ipw_pci_suspend,
11887 	.resume = ipw_pci_resume,
11888 #endif
11889 	.shutdown = ipw_pci_shutdown,
11890 };
11891 
ipw_init(void)11892 static int __init ipw_init(void)
11893 {
11894 	int ret;
11895 
11896 	printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
11897 	printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
11898 
11899 	ret = pci_register_driver(&ipw_driver);
11900 	if (ret) {
11901 		IPW_ERROR("Unable to initialize PCI module\n");
11902 		return ret;
11903 	}
11904 
11905 	ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
11906 	if (ret) {
11907 		IPW_ERROR("Unable to create driver sysfs file\n");
11908 		pci_unregister_driver(&ipw_driver);
11909 		return ret;
11910 	}
11911 
11912 	return ret;
11913 }
11914 
ipw_exit(void)11915 static void __exit ipw_exit(void)
11916 {
11917 	driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
11918 	pci_unregister_driver(&ipw_driver);
11919 }
11920 
11921 module_param(disable, int, 0444);
11922 MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
11923 
11924 module_param(associate, int, 0444);
11925 MODULE_PARM_DESC(associate, "auto associate when scanning (default off)");
11926 
11927 module_param(auto_create, int, 0444);
11928 MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
11929 
11930 module_param(led, int, 0444);
11931 MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)");
11932 
11933 module_param(debug, int, 0444);
11934 MODULE_PARM_DESC(debug, "debug output mask");
11935 
11936 module_param(channel, int, 0444);
11937 MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
11938 
11939 #ifdef CONFIG_IPW2200_PROMISCUOUS
11940 module_param(rtap_iface, int, 0444);
11941 MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)");
11942 #endif
11943 
11944 #ifdef CONFIG_IPW2200_QOS
11945 module_param(qos_enable, int, 0444);
11946 MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis");
11947 
11948 module_param(qos_burst_enable, int, 0444);
11949 MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode");
11950 
11951 module_param(qos_no_ack_mask, int, 0444);
11952 MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack");
11953 
11954 module_param(burst_duration_CCK, int, 0444);
11955 MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value");
11956 
11957 module_param(burst_duration_OFDM, int, 0444);
11958 MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value");
11959 #endif				/* CONFIG_IPW2200_QOS */
11960 
11961 #ifdef CONFIG_IPW2200_MONITOR
11962 module_param(mode, int, 0444);
11963 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
11964 #else
11965 module_param(mode, int, 0444);
11966 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
11967 #endif
11968 
11969 module_param(bt_coexist, int, 0444);
11970 MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)");
11971 
11972 module_param(hwcrypto, int, 0444);
11973 MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)");
11974 
11975 module_param(cmdlog, int, 0444);
11976 MODULE_PARM_DESC(cmdlog,
11977 		 "allocate a ring buffer for logging firmware commands");
11978 
11979 module_param(roaming, int, 0444);
11980 MODULE_PARM_DESC(roaming, "enable roaming support (default on)");
11981 
11982 module_param(antenna, int, 0444);
11983 MODULE_PARM_DESC(antenna, "select antenna 1=Main, 3=Aux, default 0 [both], 2=slow_diversity (choose the one with lower background noise)");
11984 
11985 module_exit(ipw_exit);
11986 module_init(ipw_init);
11987