1 /******************************************************************************
2
3 Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
4
5 802.11 status code portion of this file from ethereal-0.10.6:
6 Copyright 2000, Axis Communications AB
7 Ethereal - Network traffic analyzer
8 By Gerald Combs <gerald@ethereal.com>
9 Copyright 1998 Gerald Combs
10
11 This program is free software; you can redistribute it and/or modify it
12 under the terms of version 2 of the GNU General Public License as
13 published by the Free Software Foundation.
14
15 This program is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 more details.
19
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc., 59
22 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23
24 The full GNU General Public License is included in this distribution in the
25 file called LICENSE.
26
27 Contact Information:
28 Intel Linux Wireless <ilw@linux.intel.com>
29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30
31 ******************************************************************************/
32
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <net/cfg80211-wext.h>
36 #include "ipw2200.h"
37 #include "ipw.h"
38
39
40 #ifndef KBUILD_EXTMOD
41 #define VK "k"
42 #else
43 #define VK
44 #endif
45
46 #ifdef CONFIG_IPW2200_DEBUG
47 #define VD "d"
48 #else
49 #define VD
50 #endif
51
52 #ifdef CONFIG_IPW2200_MONITOR
53 #define VM "m"
54 #else
55 #define VM
56 #endif
57
58 #ifdef CONFIG_IPW2200_PROMISCUOUS
59 #define VP "p"
60 #else
61 #define VP
62 #endif
63
64 #ifdef CONFIG_IPW2200_RADIOTAP
65 #define VR "r"
66 #else
67 #define VR
68 #endif
69
70 #ifdef CONFIG_IPW2200_QOS
71 #define VQ "q"
72 #else
73 #define VQ
74 #endif
75
76 #define IPW2200_VERSION "1.2.2" VK VD VM VP VR VQ
77 #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
78 #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
79 #define DRV_VERSION IPW2200_VERSION
80
81 #define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
82
83 MODULE_DESCRIPTION(DRV_DESCRIPTION);
84 MODULE_VERSION(DRV_VERSION);
85 MODULE_AUTHOR(DRV_COPYRIGHT);
86 MODULE_LICENSE("GPL");
87 MODULE_FIRMWARE("ipw2200-ibss.fw");
88 #ifdef CONFIG_IPW2200_MONITOR
89 MODULE_FIRMWARE("ipw2200-sniffer.fw");
90 #endif
91 MODULE_FIRMWARE("ipw2200-bss.fw");
92
93 static int cmdlog = 0;
94 static int debug = 0;
95 static int default_channel = 0;
96 static int network_mode = 0;
97
98 static u32 ipw_debug_level;
99 static int associate;
100 static int auto_create = 1;
101 static int led_support = 1;
102 static int disable = 0;
103 static int bt_coexist = 0;
104 static int hwcrypto = 0;
105 static int roaming = 1;
106 static const char ipw_modes[] = {
107 'a', 'b', 'g', '?'
108 };
109 static int antenna = CFG_SYS_ANTENNA_BOTH;
110
111 #ifdef CONFIG_IPW2200_PROMISCUOUS
112 static int rtap_iface = 0; /* def: 0 -- do not create rtap interface */
113 #endif
114
115 static struct ieee80211_rate ipw2200_rates[] = {
116 { .bitrate = 10 },
117 { .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
118 { .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
119 { .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
120 { .bitrate = 60 },
121 { .bitrate = 90 },
122 { .bitrate = 120 },
123 { .bitrate = 180 },
124 { .bitrate = 240 },
125 { .bitrate = 360 },
126 { .bitrate = 480 },
127 { .bitrate = 540 }
128 };
129
130 #define ipw2200_a_rates (ipw2200_rates + 4)
131 #define ipw2200_num_a_rates 8
132 #define ipw2200_bg_rates (ipw2200_rates + 0)
133 #define ipw2200_num_bg_rates 12
134
135 /* Ugly macro to convert literal channel numbers into their mhz equivalents
136 * There are certianly some conditions that will break this (like feeding it '30')
137 * but they shouldn't arise since nothing talks on channel 30. */
138 #define ieee80211chan2mhz(x) \
139 (((x) <= 14) ? \
140 (((x) == 14) ? 2484 : ((x) * 5) + 2407) : \
141 ((x) + 1000) * 5)
142
143 #ifdef CONFIG_IPW2200_QOS
144 static int qos_enable = 0;
145 static int qos_burst_enable = 0;
146 static int qos_no_ack_mask = 0;
147 static int burst_duration_CCK = 0;
148 static int burst_duration_OFDM = 0;
149
150 static struct libipw_qos_parameters def_qos_parameters_OFDM = {
151 {QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM,
152 QOS_TX3_CW_MIN_OFDM},
153 {QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM,
154 QOS_TX3_CW_MAX_OFDM},
155 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
156 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
157 {QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM,
158 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM}
159 };
160
161 static struct libipw_qos_parameters def_qos_parameters_CCK = {
162 {QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK,
163 QOS_TX3_CW_MIN_CCK},
164 {QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK,
165 QOS_TX3_CW_MAX_CCK},
166 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
167 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
168 {QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK,
169 QOS_TX3_TXOP_LIMIT_CCK}
170 };
171
172 static struct libipw_qos_parameters def_parameters_OFDM = {
173 {DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM,
174 DEF_TX3_CW_MIN_OFDM},
175 {DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM,
176 DEF_TX3_CW_MAX_OFDM},
177 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
178 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
179 {DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM,
180 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM}
181 };
182
183 static struct libipw_qos_parameters def_parameters_CCK = {
184 {DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK,
185 DEF_TX3_CW_MIN_CCK},
186 {DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK,
187 DEF_TX3_CW_MAX_CCK},
188 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
189 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
190 {DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK,
191 DEF_TX3_TXOP_LIMIT_CCK}
192 };
193
194 static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
195
196 static int from_priority_to_tx_queue[] = {
197 IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1,
198 IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4
199 };
200
201 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv);
202
203 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters
204 *qos_param);
205 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element
206 *qos_param);
207 #endif /* CONFIG_IPW2200_QOS */
208
209 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
210 static void ipw_remove_current_network(struct ipw_priv *priv);
211 static void ipw_rx(struct ipw_priv *priv);
212 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
213 struct clx2_tx_queue *txq, int qindex);
214 static int ipw_queue_reset(struct ipw_priv *priv);
215
216 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
217 int len, int sync);
218
219 static void ipw_tx_queue_free(struct ipw_priv *);
220
221 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
222 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
223 static void ipw_rx_queue_replenish(void *);
224 static int ipw_up(struct ipw_priv *);
225 static void ipw_bg_up(struct work_struct *work);
226 static void ipw_down(struct ipw_priv *);
227 static void ipw_bg_down(struct work_struct *work);
228 static int ipw_config(struct ipw_priv *);
229 static int init_supported_rates(struct ipw_priv *priv,
230 struct ipw_supported_rates *prates);
231 static void ipw_set_hwcrypto_keys(struct ipw_priv *);
232 static void ipw_send_wep_keys(struct ipw_priv *, int);
233
snprint_line(char * buf,size_t count,const u8 * data,u32 len,u32 ofs)234 static int snprint_line(char *buf, size_t count,
235 const u8 * data, u32 len, u32 ofs)
236 {
237 int out, i, j, l;
238 char c;
239
240 out = snprintf(buf, count, "%08X", ofs);
241
242 for (l = 0, i = 0; i < 2; i++) {
243 out += snprintf(buf + out, count - out, " ");
244 for (j = 0; j < 8 && l < len; j++, l++)
245 out += snprintf(buf + out, count - out, "%02X ",
246 data[(i * 8 + j)]);
247 for (; j < 8; j++)
248 out += snprintf(buf + out, count - out, " ");
249 }
250
251 out += snprintf(buf + out, count - out, " ");
252 for (l = 0, i = 0; i < 2; i++) {
253 out += snprintf(buf + out, count - out, " ");
254 for (j = 0; j < 8 && l < len; j++, l++) {
255 c = data[(i * 8 + j)];
256 if (!isascii(c) || !isprint(c))
257 c = '.';
258
259 out += snprintf(buf + out, count - out, "%c", c);
260 }
261
262 for (; j < 8; j++)
263 out += snprintf(buf + out, count - out, " ");
264 }
265
266 return out;
267 }
268
printk_buf(int level,const u8 * data,u32 len)269 static void printk_buf(int level, const u8 * data, u32 len)
270 {
271 char line[81];
272 u32 ofs = 0;
273 if (!(ipw_debug_level & level))
274 return;
275
276 while (len) {
277 snprint_line(line, sizeof(line), &data[ofs],
278 min(len, 16U), ofs);
279 printk(KERN_DEBUG "%s\n", line);
280 ofs += 16;
281 len -= min(len, 16U);
282 }
283 }
284
snprintk_buf(u8 * output,size_t size,const u8 * data,size_t len)285 static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
286 {
287 size_t out = size;
288 u32 ofs = 0;
289 int total = 0;
290
291 while (size && len) {
292 out = snprint_line(output, size, &data[ofs],
293 min_t(size_t, len, 16U), ofs);
294
295 ofs += 16;
296 output += out;
297 size -= out;
298 len -= min_t(size_t, len, 16U);
299 total += out;
300 }
301 return total;
302 }
303
304 /* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
305 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
306 #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
307
308 /* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
309 static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
310 #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
311
312 /* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
313 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
ipw_write_reg8(struct ipw_priv * a,u32 b,u8 c)314 static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
315 {
316 IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
317 __LINE__, (u32) (b), (u32) (c));
318 _ipw_write_reg8(a, b, c);
319 }
320
321 /* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
322 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
ipw_write_reg16(struct ipw_priv * a,u32 b,u16 c)323 static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
324 {
325 IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
326 __LINE__, (u32) (b), (u32) (c));
327 _ipw_write_reg16(a, b, c);
328 }
329
330 /* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
331 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
ipw_write_reg32(struct ipw_priv * a,u32 b,u32 c)332 static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
333 {
334 IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
335 __LINE__, (u32) (b), (u32) (c));
336 _ipw_write_reg32(a, b, c);
337 }
338
339 /* 8-bit direct write (low 4K) */
_ipw_write8(struct ipw_priv * ipw,unsigned long ofs,u8 val)340 static inline void _ipw_write8(struct ipw_priv *ipw, unsigned long ofs,
341 u8 val)
342 {
343 writeb(val, ipw->hw_base + ofs);
344 }
345
346 /* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
347 #define ipw_write8(ipw, ofs, val) do { \
348 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, \
349 __LINE__, (u32)(ofs), (u32)(val)); \
350 _ipw_write8(ipw, ofs, val); \
351 } while (0)
352
353 /* 16-bit direct write (low 4K) */
_ipw_write16(struct ipw_priv * ipw,unsigned long ofs,u16 val)354 static inline void _ipw_write16(struct ipw_priv *ipw, unsigned long ofs,
355 u16 val)
356 {
357 writew(val, ipw->hw_base + ofs);
358 }
359
360 /* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
361 #define ipw_write16(ipw, ofs, val) do { \
362 IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, \
363 __LINE__, (u32)(ofs), (u32)(val)); \
364 _ipw_write16(ipw, ofs, val); \
365 } while (0)
366
367 /* 32-bit direct write (low 4K) */
_ipw_write32(struct ipw_priv * ipw,unsigned long ofs,u32 val)368 static inline void _ipw_write32(struct ipw_priv *ipw, unsigned long ofs,
369 u32 val)
370 {
371 writel(val, ipw->hw_base + ofs);
372 }
373
374 /* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
375 #define ipw_write32(ipw, ofs, val) do { \
376 IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, \
377 __LINE__, (u32)(ofs), (u32)(val)); \
378 _ipw_write32(ipw, ofs, val); \
379 } while (0)
380
381 /* 8-bit direct read (low 4K) */
_ipw_read8(struct ipw_priv * ipw,unsigned long ofs)382 static inline u8 _ipw_read8(struct ipw_priv *ipw, unsigned long ofs)
383 {
384 return readb(ipw->hw_base + ofs);
385 }
386
387 /* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */
388 #define ipw_read8(ipw, ofs) ({ \
389 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", __FILE__, __LINE__, \
390 (u32)(ofs)); \
391 _ipw_read8(ipw, ofs); \
392 })
393
394 /* 16-bit direct read (low 4K) */
_ipw_read16(struct ipw_priv * ipw,unsigned long ofs)395 static inline u16 _ipw_read16(struct ipw_priv *ipw, unsigned long ofs)
396 {
397 return readw(ipw->hw_base + ofs);
398 }
399
400 /* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */
401 #define ipw_read16(ipw, ofs) ({ \
402 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", __FILE__, __LINE__, \
403 (u32)(ofs)); \
404 _ipw_read16(ipw, ofs); \
405 })
406
407 /* 32-bit direct read (low 4K) */
_ipw_read32(struct ipw_priv * ipw,unsigned long ofs)408 static inline u32 _ipw_read32(struct ipw_priv *ipw, unsigned long ofs)
409 {
410 return readl(ipw->hw_base + ofs);
411 }
412
413 /* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */
414 #define ipw_read32(ipw, ofs) ({ \
415 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", __FILE__, __LINE__, \
416 (u32)(ofs)); \
417 _ipw_read32(ipw, ofs); \
418 })
419
420 static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
421 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
422 #define ipw_read_indirect(a, b, c, d) ({ \
423 IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %u bytes\n", __FILE__, \
424 __LINE__, (u32)(b), (u32)(d)); \
425 _ipw_read_indirect(a, b, c, d); \
426 })
427
428 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
429 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
430 int num);
431 #define ipw_write_indirect(a, b, c, d) do { \
432 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %u bytes\n", __FILE__, \
433 __LINE__, (u32)(b), (u32)(d)); \
434 _ipw_write_indirect(a, b, c, d); \
435 } while (0)
436
437 /* 32-bit indirect write (above 4K) */
_ipw_write_reg32(struct ipw_priv * priv,u32 reg,u32 value)438 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
439 {
440 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
441 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
442 _ipw_write32(priv, IPW_INDIRECT_DATA, value);
443 }
444
445 /* 8-bit indirect write (above 4K) */
_ipw_write_reg8(struct ipw_priv * priv,u32 reg,u8 value)446 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
447 {
448 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
449 u32 dif_len = reg - aligned_addr;
450
451 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
452 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
453 _ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value);
454 }
455
456 /* 16-bit indirect write (above 4K) */
_ipw_write_reg16(struct ipw_priv * priv,u32 reg,u16 value)457 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
458 {
459 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
460 u32 dif_len = (reg - aligned_addr) & (~0x1ul);
461
462 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
463 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
464 _ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value);
465 }
466
467 /* 8-bit indirect read (above 4K) */
_ipw_read_reg8(struct ipw_priv * priv,u32 reg)468 static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
469 {
470 u32 word;
471 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
472 IPW_DEBUG_IO(" reg = 0x%8X :\n", reg);
473 word = _ipw_read32(priv, IPW_INDIRECT_DATA);
474 return (word >> ((reg & 0x3) * 8)) & 0xff;
475 }
476
477 /* 32-bit indirect read (above 4K) */
_ipw_read_reg32(struct ipw_priv * priv,u32 reg)478 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
479 {
480 u32 value;
481
482 IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
483
484 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
485 value = _ipw_read32(priv, IPW_INDIRECT_DATA);
486 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x\n", reg, value);
487 return value;
488 }
489
490 /* General purpose, no alignment requirement, iterative (multi-byte) read, */
491 /* for area above 1st 4K of SRAM/reg space */
_ipw_read_indirect(struct ipw_priv * priv,u32 addr,u8 * buf,int num)492 static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
493 int num)
494 {
495 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
496 u32 dif_len = addr - aligned_addr;
497 u32 i;
498
499 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
500
501 if (num <= 0) {
502 return;
503 }
504
505 /* Read the first dword (or portion) byte by byte */
506 if (unlikely(dif_len)) {
507 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
508 /* Start reading at aligned_addr + dif_len */
509 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
510 *buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i);
511 aligned_addr += 4;
512 }
513
514 /* Read all of the middle dwords as dwords, with auto-increment */
515 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
516 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
517 *(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
518
519 /* Read the last dword (or portion) byte by byte */
520 if (unlikely(num)) {
521 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
522 for (i = 0; num > 0; i++, num--)
523 *buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i);
524 }
525 }
526
527 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
528 /* for area above 1st 4K of SRAM/reg space */
_ipw_write_indirect(struct ipw_priv * priv,u32 addr,u8 * buf,int num)529 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
530 int num)
531 {
532 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
533 u32 dif_len = addr - aligned_addr;
534 u32 i;
535
536 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
537
538 if (num <= 0) {
539 return;
540 }
541
542 /* Write the first dword (or portion) byte by byte */
543 if (unlikely(dif_len)) {
544 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
545 /* Start writing at aligned_addr + dif_len */
546 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
547 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
548 aligned_addr += 4;
549 }
550
551 /* Write all of the middle dwords as dwords, with auto-increment */
552 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
553 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
554 _ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
555
556 /* Write the last dword (or portion) byte by byte */
557 if (unlikely(num)) {
558 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
559 for (i = 0; num > 0; i++, num--, buf++)
560 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
561 }
562 }
563
564 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
565 /* for 1st 4K of SRAM/regs space */
ipw_write_direct(struct ipw_priv * priv,u32 addr,void * buf,int num)566 static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
567 int num)
568 {
569 memcpy_toio((priv->hw_base + addr), buf, num);
570 }
571
572 /* Set bit(s) in low 4K of SRAM/regs */
ipw_set_bit(struct ipw_priv * priv,u32 reg,u32 mask)573 static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
574 {
575 ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
576 }
577
578 /* Clear bit(s) in low 4K of SRAM/regs */
ipw_clear_bit(struct ipw_priv * priv,u32 reg,u32 mask)579 static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
580 {
581 ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
582 }
583
__ipw_enable_interrupts(struct ipw_priv * priv)584 static inline void __ipw_enable_interrupts(struct ipw_priv *priv)
585 {
586 if (priv->status & STATUS_INT_ENABLED)
587 return;
588 priv->status |= STATUS_INT_ENABLED;
589 ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
590 }
591
__ipw_disable_interrupts(struct ipw_priv * priv)592 static inline void __ipw_disable_interrupts(struct ipw_priv *priv)
593 {
594 if (!(priv->status & STATUS_INT_ENABLED))
595 return;
596 priv->status &= ~STATUS_INT_ENABLED;
597 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
598 }
599
ipw_enable_interrupts(struct ipw_priv * priv)600 static inline void ipw_enable_interrupts(struct ipw_priv *priv)
601 {
602 unsigned long flags;
603
604 spin_lock_irqsave(&priv->irq_lock, flags);
605 __ipw_enable_interrupts(priv);
606 spin_unlock_irqrestore(&priv->irq_lock, flags);
607 }
608
ipw_disable_interrupts(struct ipw_priv * priv)609 static inline void ipw_disable_interrupts(struct ipw_priv *priv)
610 {
611 unsigned long flags;
612
613 spin_lock_irqsave(&priv->irq_lock, flags);
614 __ipw_disable_interrupts(priv);
615 spin_unlock_irqrestore(&priv->irq_lock, flags);
616 }
617
ipw_error_desc(u32 val)618 static char *ipw_error_desc(u32 val)
619 {
620 switch (val) {
621 case IPW_FW_ERROR_OK:
622 return "ERROR_OK";
623 case IPW_FW_ERROR_FAIL:
624 return "ERROR_FAIL";
625 case IPW_FW_ERROR_MEMORY_UNDERFLOW:
626 return "MEMORY_UNDERFLOW";
627 case IPW_FW_ERROR_MEMORY_OVERFLOW:
628 return "MEMORY_OVERFLOW";
629 case IPW_FW_ERROR_BAD_PARAM:
630 return "BAD_PARAM";
631 case IPW_FW_ERROR_BAD_CHECKSUM:
632 return "BAD_CHECKSUM";
633 case IPW_FW_ERROR_NMI_INTERRUPT:
634 return "NMI_INTERRUPT";
635 case IPW_FW_ERROR_BAD_DATABASE:
636 return "BAD_DATABASE";
637 case IPW_FW_ERROR_ALLOC_FAIL:
638 return "ALLOC_FAIL";
639 case IPW_FW_ERROR_DMA_UNDERRUN:
640 return "DMA_UNDERRUN";
641 case IPW_FW_ERROR_DMA_STATUS:
642 return "DMA_STATUS";
643 case IPW_FW_ERROR_DINO_ERROR:
644 return "DINO_ERROR";
645 case IPW_FW_ERROR_EEPROM_ERROR:
646 return "EEPROM_ERROR";
647 case IPW_FW_ERROR_SYSASSERT:
648 return "SYSASSERT";
649 case IPW_FW_ERROR_FATAL_ERROR:
650 return "FATAL_ERROR";
651 default:
652 return "UNKNOWN_ERROR";
653 }
654 }
655
ipw_dump_error_log(struct ipw_priv * priv,struct ipw_fw_error * error)656 static void ipw_dump_error_log(struct ipw_priv *priv,
657 struct ipw_fw_error *error)
658 {
659 u32 i;
660
661 if (!error) {
662 IPW_ERROR("Error allocating and capturing error log. "
663 "Nothing to dump.\n");
664 return;
665 }
666
667 IPW_ERROR("Start IPW Error Log Dump:\n");
668 IPW_ERROR("Status: 0x%08X, Config: %08X\n",
669 error->status, error->config);
670
671 for (i = 0; i < error->elem_len; i++)
672 IPW_ERROR("%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
673 ipw_error_desc(error->elem[i].desc),
674 error->elem[i].time,
675 error->elem[i].blink1,
676 error->elem[i].blink2,
677 error->elem[i].link1,
678 error->elem[i].link2, error->elem[i].data);
679 for (i = 0; i < error->log_len; i++)
680 IPW_ERROR("%i\t0x%08x\t%i\n",
681 error->log[i].time,
682 error->log[i].data, error->log[i].event);
683 }
684
ipw_is_init(struct ipw_priv * priv)685 static inline int ipw_is_init(struct ipw_priv *priv)
686 {
687 return (priv->status & STATUS_INIT) ? 1 : 0;
688 }
689
ipw_get_ordinal(struct ipw_priv * priv,u32 ord,void * val,u32 * len)690 static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
691 {
692 u32 addr, field_info, field_len, field_count, total_len;
693
694 IPW_DEBUG_ORD("ordinal = %i\n", ord);
695
696 if (!priv || !val || !len) {
697 IPW_DEBUG_ORD("Invalid argument\n");
698 return -EINVAL;
699 }
700
701 /* verify device ordinal tables have been initialized */
702 if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
703 IPW_DEBUG_ORD("Access ordinals before initialization\n");
704 return -EINVAL;
705 }
706
707 switch (IPW_ORD_TABLE_ID_MASK & ord) {
708 case IPW_ORD_TABLE_0_MASK:
709 /*
710 * TABLE 0: Direct access to a table of 32 bit values
711 *
712 * This is a very simple table with the data directly
713 * read from the table
714 */
715
716 /* remove the table id from the ordinal */
717 ord &= IPW_ORD_TABLE_VALUE_MASK;
718
719 /* boundary check */
720 if (ord > priv->table0_len) {
721 IPW_DEBUG_ORD("ordinal value (%i) longer then "
722 "max (%i)\n", ord, priv->table0_len);
723 return -EINVAL;
724 }
725
726 /* verify we have enough room to store the value */
727 if (*len < sizeof(u32)) {
728 IPW_DEBUG_ORD("ordinal buffer length too small, "
729 "need %zd\n", sizeof(u32));
730 return -EINVAL;
731 }
732
733 IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
734 ord, priv->table0_addr + (ord << 2));
735
736 *len = sizeof(u32);
737 ord <<= 2;
738 *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
739 break;
740
741 case IPW_ORD_TABLE_1_MASK:
742 /*
743 * TABLE 1: Indirect access to a table of 32 bit values
744 *
745 * This is a fairly large table of u32 values each
746 * representing starting addr for the data (which is
747 * also a u32)
748 */
749
750 /* remove the table id from the ordinal */
751 ord &= IPW_ORD_TABLE_VALUE_MASK;
752
753 /* boundary check */
754 if (ord > priv->table1_len) {
755 IPW_DEBUG_ORD("ordinal value too long\n");
756 return -EINVAL;
757 }
758
759 /* verify we have enough room to store the value */
760 if (*len < sizeof(u32)) {
761 IPW_DEBUG_ORD("ordinal buffer length too small, "
762 "need %zd\n", sizeof(u32));
763 return -EINVAL;
764 }
765
766 *((u32 *) val) =
767 ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
768 *len = sizeof(u32);
769 break;
770
771 case IPW_ORD_TABLE_2_MASK:
772 /*
773 * TABLE 2: Indirect access to a table of variable sized values
774 *
775 * This table consist of six values, each containing
776 * - dword containing the starting offset of the data
777 * - dword containing the lengh in the first 16bits
778 * and the count in the second 16bits
779 */
780
781 /* remove the table id from the ordinal */
782 ord &= IPW_ORD_TABLE_VALUE_MASK;
783
784 /* boundary check */
785 if (ord > priv->table2_len) {
786 IPW_DEBUG_ORD("ordinal value too long\n");
787 return -EINVAL;
788 }
789
790 /* get the address of statistic */
791 addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
792
793 /* get the second DW of statistics ;
794 * two 16-bit words - first is length, second is count */
795 field_info =
796 ipw_read_reg32(priv,
797 priv->table2_addr + (ord << 3) +
798 sizeof(u32));
799
800 /* get each entry length */
801 field_len = *((u16 *) & field_info);
802
803 /* get number of entries */
804 field_count = *(((u16 *) & field_info) + 1);
805
806 /* abort if not enough memory */
807 total_len = field_len * field_count;
808 if (total_len > *len) {
809 *len = total_len;
810 return -EINVAL;
811 }
812
813 *len = total_len;
814 if (!total_len)
815 return 0;
816
817 IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
818 "field_info = 0x%08x\n",
819 addr, total_len, field_info);
820 ipw_read_indirect(priv, addr, val, total_len);
821 break;
822
823 default:
824 IPW_DEBUG_ORD("Invalid ordinal!\n");
825 return -EINVAL;
826
827 }
828
829 return 0;
830 }
831
ipw_init_ordinals(struct ipw_priv * priv)832 static void ipw_init_ordinals(struct ipw_priv *priv)
833 {
834 priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
835 priv->table0_len = ipw_read32(priv, priv->table0_addr);
836
837 IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
838 priv->table0_addr, priv->table0_len);
839
840 priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
841 priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
842
843 IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
844 priv->table1_addr, priv->table1_len);
845
846 priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
847 priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
848 priv->table2_len &= 0x0000ffff; /* use first two bytes */
849
850 IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
851 priv->table2_addr, priv->table2_len);
852
853 }
854
ipw_register_toggle(u32 reg)855 static u32 ipw_register_toggle(u32 reg)
856 {
857 reg &= ~IPW_START_STANDBY;
858 if (reg & IPW_GATE_ODMA)
859 reg &= ~IPW_GATE_ODMA;
860 if (reg & IPW_GATE_IDMA)
861 reg &= ~IPW_GATE_IDMA;
862 if (reg & IPW_GATE_ADMA)
863 reg &= ~IPW_GATE_ADMA;
864 return reg;
865 }
866
867 /*
868 * LED behavior:
869 * - On radio ON, turn on any LEDs that require to be on during start
870 * - On initialization, start unassociated blink
871 * - On association, disable unassociated blink
872 * - On disassociation, start unassociated blink
873 * - On radio OFF, turn off any LEDs started during radio on
874 *
875 */
876 #define LD_TIME_LINK_ON msecs_to_jiffies(300)
877 #define LD_TIME_LINK_OFF msecs_to_jiffies(2700)
878 #define LD_TIME_ACT_ON msecs_to_jiffies(250)
879
ipw_led_link_on(struct ipw_priv * priv)880 static void ipw_led_link_on(struct ipw_priv *priv)
881 {
882 unsigned long flags;
883 u32 led;
884
885 /* If configured to not use LEDs, or nic_type is 1,
886 * then we don't toggle a LINK led */
887 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
888 return;
889
890 spin_lock_irqsave(&priv->lock, flags);
891
892 if (!(priv->status & STATUS_RF_KILL_MASK) &&
893 !(priv->status & STATUS_LED_LINK_ON)) {
894 IPW_DEBUG_LED("Link LED On\n");
895 led = ipw_read_reg32(priv, IPW_EVENT_REG);
896 led |= priv->led_association_on;
897
898 led = ipw_register_toggle(led);
899
900 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
901 ipw_write_reg32(priv, IPW_EVENT_REG, led);
902
903 priv->status |= STATUS_LED_LINK_ON;
904
905 /* If we aren't associated, schedule turning the LED off */
906 if (!(priv->status & STATUS_ASSOCIATED))
907 schedule_delayed_work(&priv->led_link_off,
908 LD_TIME_LINK_ON);
909 }
910
911 spin_unlock_irqrestore(&priv->lock, flags);
912 }
913
ipw_bg_led_link_on(struct work_struct * work)914 static void ipw_bg_led_link_on(struct work_struct *work)
915 {
916 struct ipw_priv *priv =
917 container_of(work, struct ipw_priv, led_link_on.work);
918 mutex_lock(&priv->mutex);
919 ipw_led_link_on(priv);
920 mutex_unlock(&priv->mutex);
921 }
922
ipw_led_link_off(struct ipw_priv * priv)923 static void ipw_led_link_off(struct ipw_priv *priv)
924 {
925 unsigned long flags;
926 u32 led;
927
928 /* If configured not to use LEDs, or nic type is 1,
929 * then we don't goggle the LINK led. */
930 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
931 return;
932
933 spin_lock_irqsave(&priv->lock, flags);
934
935 if (priv->status & STATUS_LED_LINK_ON) {
936 led = ipw_read_reg32(priv, IPW_EVENT_REG);
937 led &= priv->led_association_off;
938 led = ipw_register_toggle(led);
939
940 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
941 ipw_write_reg32(priv, IPW_EVENT_REG, led);
942
943 IPW_DEBUG_LED("Link LED Off\n");
944
945 priv->status &= ~STATUS_LED_LINK_ON;
946
947 /* If we aren't associated and the radio is on, schedule
948 * turning the LED on (blink while unassociated) */
949 if (!(priv->status & STATUS_RF_KILL_MASK) &&
950 !(priv->status & STATUS_ASSOCIATED))
951 schedule_delayed_work(&priv->led_link_on,
952 LD_TIME_LINK_OFF);
953
954 }
955
956 spin_unlock_irqrestore(&priv->lock, flags);
957 }
958
ipw_bg_led_link_off(struct work_struct * work)959 static void ipw_bg_led_link_off(struct work_struct *work)
960 {
961 struct ipw_priv *priv =
962 container_of(work, struct ipw_priv, led_link_off.work);
963 mutex_lock(&priv->mutex);
964 ipw_led_link_off(priv);
965 mutex_unlock(&priv->mutex);
966 }
967
__ipw_led_activity_on(struct ipw_priv * priv)968 static void __ipw_led_activity_on(struct ipw_priv *priv)
969 {
970 u32 led;
971
972 if (priv->config & CFG_NO_LED)
973 return;
974
975 if (priv->status & STATUS_RF_KILL_MASK)
976 return;
977
978 if (!(priv->status & STATUS_LED_ACT_ON)) {
979 led = ipw_read_reg32(priv, IPW_EVENT_REG);
980 led |= priv->led_activity_on;
981
982 led = ipw_register_toggle(led);
983
984 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
985 ipw_write_reg32(priv, IPW_EVENT_REG, led);
986
987 IPW_DEBUG_LED("Activity LED On\n");
988
989 priv->status |= STATUS_LED_ACT_ON;
990
991 cancel_delayed_work(&priv->led_act_off);
992 schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON);
993 } else {
994 /* Reschedule LED off for full time period */
995 cancel_delayed_work(&priv->led_act_off);
996 schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON);
997 }
998 }
999
1000 #if 0
1001 void ipw_led_activity_on(struct ipw_priv *priv)
1002 {
1003 unsigned long flags;
1004 spin_lock_irqsave(&priv->lock, flags);
1005 __ipw_led_activity_on(priv);
1006 spin_unlock_irqrestore(&priv->lock, flags);
1007 }
1008 #endif /* 0 */
1009
ipw_led_activity_off(struct ipw_priv * priv)1010 static void ipw_led_activity_off(struct ipw_priv *priv)
1011 {
1012 unsigned long flags;
1013 u32 led;
1014
1015 if (priv->config & CFG_NO_LED)
1016 return;
1017
1018 spin_lock_irqsave(&priv->lock, flags);
1019
1020 if (priv->status & STATUS_LED_ACT_ON) {
1021 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1022 led &= priv->led_activity_off;
1023
1024 led = ipw_register_toggle(led);
1025
1026 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1027 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1028
1029 IPW_DEBUG_LED("Activity LED Off\n");
1030
1031 priv->status &= ~STATUS_LED_ACT_ON;
1032 }
1033
1034 spin_unlock_irqrestore(&priv->lock, flags);
1035 }
1036
ipw_bg_led_activity_off(struct work_struct * work)1037 static void ipw_bg_led_activity_off(struct work_struct *work)
1038 {
1039 struct ipw_priv *priv =
1040 container_of(work, struct ipw_priv, led_act_off.work);
1041 mutex_lock(&priv->mutex);
1042 ipw_led_activity_off(priv);
1043 mutex_unlock(&priv->mutex);
1044 }
1045
ipw_led_band_on(struct ipw_priv * priv)1046 static void ipw_led_band_on(struct ipw_priv *priv)
1047 {
1048 unsigned long flags;
1049 u32 led;
1050
1051 /* Only nic type 1 supports mode LEDs */
1052 if (priv->config & CFG_NO_LED ||
1053 priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network)
1054 return;
1055
1056 spin_lock_irqsave(&priv->lock, flags);
1057
1058 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1059 if (priv->assoc_network->mode == IEEE_A) {
1060 led |= priv->led_ofdm_on;
1061 led &= priv->led_association_off;
1062 IPW_DEBUG_LED("Mode LED On: 802.11a\n");
1063 } else if (priv->assoc_network->mode == IEEE_G) {
1064 led |= priv->led_ofdm_on;
1065 led |= priv->led_association_on;
1066 IPW_DEBUG_LED("Mode LED On: 802.11g\n");
1067 } else {
1068 led &= priv->led_ofdm_off;
1069 led |= priv->led_association_on;
1070 IPW_DEBUG_LED("Mode LED On: 802.11b\n");
1071 }
1072
1073 led = ipw_register_toggle(led);
1074
1075 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1076 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1077
1078 spin_unlock_irqrestore(&priv->lock, flags);
1079 }
1080
ipw_led_band_off(struct ipw_priv * priv)1081 static void ipw_led_band_off(struct ipw_priv *priv)
1082 {
1083 unsigned long flags;
1084 u32 led;
1085
1086 /* Only nic type 1 supports mode LEDs */
1087 if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
1088 return;
1089
1090 spin_lock_irqsave(&priv->lock, flags);
1091
1092 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1093 led &= priv->led_ofdm_off;
1094 led &= priv->led_association_off;
1095
1096 led = ipw_register_toggle(led);
1097
1098 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1099 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1100
1101 spin_unlock_irqrestore(&priv->lock, flags);
1102 }
1103
ipw_led_radio_on(struct ipw_priv * priv)1104 static void ipw_led_radio_on(struct ipw_priv *priv)
1105 {
1106 ipw_led_link_on(priv);
1107 }
1108
ipw_led_radio_off(struct ipw_priv * priv)1109 static void ipw_led_radio_off(struct ipw_priv *priv)
1110 {
1111 ipw_led_activity_off(priv);
1112 ipw_led_link_off(priv);
1113 }
1114
ipw_led_link_up(struct ipw_priv * priv)1115 static void ipw_led_link_up(struct ipw_priv *priv)
1116 {
1117 /* Set the Link Led on for all nic types */
1118 ipw_led_link_on(priv);
1119 }
1120
ipw_led_link_down(struct ipw_priv * priv)1121 static void ipw_led_link_down(struct ipw_priv *priv)
1122 {
1123 ipw_led_activity_off(priv);
1124 ipw_led_link_off(priv);
1125
1126 if (priv->status & STATUS_RF_KILL_MASK)
1127 ipw_led_radio_off(priv);
1128 }
1129
ipw_led_init(struct ipw_priv * priv)1130 static void ipw_led_init(struct ipw_priv *priv)
1131 {
1132 priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
1133
1134 /* Set the default PINs for the link and activity leds */
1135 priv->led_activity_on = IPW_ACTIVITY_LED;
1136 priv->led_activity_off = ~(IPW_ACTIVITY_LED);
1137
1138 priv->led_association_on = IPW_ASSOCIATED_LED;
1139 priv->led_association_off = ~(IPW_ASSOCIATED_LED);
1140
1141 /* Set the default PINs for the OFDM leds */
1142 priv->led_ofdm_on = IPW_OFDM_LED;
1143 priv->led_ofdm_off = ~(IPW_OFDM_LED);
1144
1145 switch (priv->nic_type) {
1146 case EEPROM_NIC_TYPE_1:
1147 /* In this NIC type, the LEDs are reversed.... */
1148 priv->led_activity_on = IPW_ASSOCIATED_LED;
1149 priv->led_activity_off = ~(IPW_ASSOCIATED_LED);
1150 priv->led_association_on = IPW_ACTIVITY_LED;
1151 priv->led_association_off = ~(IPW_ACTIVITY_LED);
1152
1153 if (!(priv->config & CFG_NO_LED))
1154 ipw_led_band_on(priv);
1155
1156 /* And we don't blink link LEDs for this nic, so
1157 * just return here */
1158 return;
1159
1160 case EEPROM_NIC_TYPE_3:
1161 case EEPROM_NIC_TYPE_2:
1162 case EEPROM_NIC_TYPE_4:
1163 case EEPROM_NIC_TYPE_0:
1164 break;
1165
1166 default:
1167 IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
1168 priv->nic_type);
1169 priv->nic_type = EEPROM_NIC_TYPE_0;
1170 break;
1171 }
1172
1173 if (!(priv->config & CFG_NO_LED)) {
1174 if (priv->status & STATUS_ASSOCIATED)
1175 ipw_led_link_on(priv);
1176 else
1177 ipw_led_link_off(priv);
1178 }
1179 }
1180
ipw_led_shutdown(struct ipw_priv * priv)1181 static void ipw_led_shutdown(struct ipw_priv *priv)
1182 {
1183 ipw_led_activity_off(priv);
1184 ipw_led_link_off(priv);
1185 ipw_led_band_off(priv);
1186 cancel_delayed_work(&priv->led_link_on);
1187 cancel_delayed_work(&priv->led_link_off);
1188 cancel_delayed_work(&priv->led_act_off);
1189 }
1190
1191 /*
1192 * The following adds a new attribute to the sysfs representation
1193 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
1194 * used for controlling the debug level.
1195 *
1196 * See the level definitions in ipw for details.
1197 */
show_debug_level(struct device_driver * d,char * buf)1198 static ssize_t show_debug_level(struct device_driver *d, char *buf)
1199 {
1200 return sprintf(buf, "0x%08X\n", ipw_debug_level);
1201 }
1202
store_debug_level(struct device_driver * d,const char * buf,size_t count)1203 static ssize_t store_debug_level(struct device_driver *d, const char *buf,
1204 size_t count)
1205 {
1206 char *p = (char *)buf;
1207 u32 val;
1208
1209 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1210 p++;
1211 if (p[0] == 'x' || p[0] == 'X')
1212 p++;
1213 val = simple_strtoul(p, &p, 16);
1214 } else
1215 val = simple_strtoul(p, &p, 10);
1216 if (p == buf)
1217 printk(KERN_INFO DRV_NAME
1218 ": %s is not in hex or decimal form.\n", buf);
1219 else
1220 ipw_debug_level = val;
1221
1222 return strnlen(buf, count);
1223 }
1224
1225 static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
1226 show_debug_level, store_debug_level);
1227
ipw_get_event_log_len(struct ipw_priv * priv)1228 static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
1229 {
1230 /* length = 1st dword in log */
1231 return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
1232 }
1233
ipw_capture_event_log(struct ipw_priv * priv,u32 log_len,struct ipw_event * log)1234 static void ipw_capture_event_log(struct ipw_priv *priv,
1235 u32 log_len, struct ipw_event *log)
1236 {
1237 u32 base;
1238
1239 if (log_len) {
1240 base = ipw_read32(priv, IPW_EVENT_LOG);
1241 ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32),
1242 (u8 *) log, sizeof(*log) * log_len);
1243 }
1244 }
1245
ipw_alloc_error_log(struct ipw_priv * priv)1246 static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
1247 {
1248 struct ipw_fw_error *error;
1249 u32 log_len = ipw_get_event_log_len(priv);
1250 u32 base = ipw_read32(priv, IPW_ERROR_LOG);
1251 u32 elem_len = ipw_read_reg32(priv, base);
1252
1253 error = kmalloc(sizeof(*error) +
1254 sizeof(*error->elem) * elem_len +
1255 sizeof(*error->log) * log_len, GFP_ATOMIC);
1256 if (!error) {
1257 IPW_ERROR("Memory allocation for firmware error log "
1258 "failed.\n");
1259 return NULL;
1260 }
1261 error->jiffies = jiffies;
1262 error->status = priv->status;
1263 error->config = priv->config;
1264 error->elem_len = elem_len;
1265 error->log_len = log_len;
1266 error->elem = (struct ipw_error_elem *)error->payload;
1267 error->log = (struct ipw_event *)(error->elem + elem_len);
1268
1269 ipw_capture_event_log(priv, log_len, error->log);
1270
1271 if (elem_len)
1272 ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem,
1273 sizeof(*error->elem) * elem_len);
1274
1275 return error;
1276 }
1277
show_event_log(struct device * d,struct device_attribute * attr,char * buf)1278 static ssize_t show_event_log(struct device *d,
1279 struct device_attribute *attr, char *buf)
1280 {
1281 struct ipw_priv *priv = dev_get_drvdata(d);
1282 u32 log_len = ipw_get_event_log_len(priv);
1283 u32 log_size;
1284 struct ipw_event *log;
1285 u32 len = 0, i;
1286
1287 /* not using min() because of its strict type checking */
1288 log_size = PAGE_SIZE / sizeof(*log) > log_len ?
1289 sizeof(*log) * log_len : PAGE_SIZE;
1290 log = kzalloc(log_size, GFP_KERNEL);
1291 if (!log) {
1292 IPW_ERROR("Unable to allocate memory for log\n");
1293 return 0;
1294 }
1295 log_len = log_size / sizeof(*log);
1296 ipw_capture_event_log(priv, log_len, log);
1297
1298 len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
1299 for (i = 0; i < log_len; i++)
1300 len += snprintf(buf + len, PAGE_SIZE - len,
1301 "\n%08X%08X%08X",
1302 log[i].time, log[i].event, log[i].data);
1303 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1304 kfree(log);
1305 return len;
1306 }
1307
1308 static DEVICE_ATTR(event_log, S_IRUGO, show_event_log, NULL);
1309
show_error(struct device * d,struct device_attribute * attr,char * buf)1310 static ssize_t show_error(struct device *d,
1311 struct device_attribute *attr, char *buf)
1312 {
1313 struct ipw_priv *priv = dev_get_drvdata(d);
1314 u32 len = 0, i;
1315 if (!priv->error)
1316 return 0;
1317 len += snprintf(buf + len, PAGE_SIZE - len,
1318 "%08lX%08X%08X%08X",
1319 priv->error->jiffies,
1320 priv->error->status,
1321 priv->error->config, priv->error->elem_len);
1322 for (i = 0; i < priv->error->elem_len; i++)
1323 len += snprintf(buf + len, PAGE_SIZE - len,
1324 "\n%08X%08X%08X%08X%08X%08X%08X",
1325 priv->error->elem[i].time,
1326 priv->error->elem[i].desc,
1327 priv->error->elem[i].blink1,
1328 priv->error->elem[i].blink2,
1329 priv->error->elem[i].link1,
1330 priv->error->elem[i].link2,
1331 priv->error->elem[i].data);
1332
1333 len += snprintf(buf + len, PAGE_SIZE - len,
1334 "\n%08X", priv->error->log_len);
1335 for (i = 0; i < priv->error->log_len; i++)
1336 len += snprintf(buf + len, PAGE_SIZE - len,
1337 "\n%08X%08X%08X",
1338 priv->error->log[i].time,
1339 priv->error->log[i].event,
1340 priv->error->log[i].data);
1341 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1342 return len;
1343 }
1344
clear_error(struct device * d,struct device_attribute * attr,const char * buf,size_t count)1345 static ssize_t clear_error(struct device *d,
1346 struct device_attribute *attr,
1347 const char *buf, size_t count)
1348 {
1349 struct ipw_priv *priv = dev_get_drvdata(d);
1350
1351 kfree(priv->error);
1352 priv->error = NULL;
1353 return count;
1354 }
1355
1356 static DEVICE_ATTR(error, S_IRUGO | S_IWUSR, show_error, clear_error);
1357
show_cmd_log(struct device * d,struct device_attribute * attr,char * buf)1358 static ssize_t show_cmd_log(struct device *d,
1359 struct device_attribute *attr, char *buf)
1360 {
1361 struct ipw_priv *priv = dev_get_drvdata(d);
1362 u32 len = 0, i;
1363 if (!priv->cmdlog)
1364 return 0;
1365 for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
1366 (i != priv->cmdlog_pos) && (len < PAGE_SIZE);
1367 i = (i + 1) % priv->cmdlog_len) {
1368 len +=
1369 snprintf(buf + len, PAGE_SIZE - len,
1370 "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
1371 priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
1372 priv->cmdlog[i].cmd.len);
1373 len +=
1374 snprintk_buf(buf + len, PAGE_SIZE - len,
1375 (u8 *) priv->cmdlog[i].cmd.param,
1376 priv->cmdlog[i].cmd.len);
1377 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1378 }
1379 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1380 return len;
1381 }
1382
1383 static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL);
1384
1385 #ifdef CONFIG_IPW2200_PROMISCUOUS
1386 static void ipw_prom_free(struct ipw_priv *priv);
1387 static int ipw_prom_alloc(struct ipw_priv *priv);
store_rtap_iface(struct device * d,struct device_attribute * attr,const char * buf,size_t count)1388 static ssize_t store_rtap_iface(struct device *d,
1389 struct device_attribute *attr,
1390 const char *buf, size_t count)
1391 {
1392 struct ipw_priv *priv = dev_get_drvdata(d);
1393 int rc = 0;
1394
1395 if (count < 1)
1396 return -EINVAL;
1397
1398 switch (buf[0]) {
1399 case '0':
1400 if (!rtap_iface)
1401 return count;
1402
1403 if (netif_running(priv->prom_net_dev)) {
1404 IPW_WARNING("Interface is up. Cannot unregister.\n");
1405 return count;
1406 }
1407
1408 ipw_prom_free(priv);
1409 rtap_iface = 0;
1410 break;
1411
1412 case '1':
1413 if (rtap_iface)
1414 return count;
1415
1416 rc = ipw_prom_alloc(priv);
1417 if (!rc)
1418 rtap_iface = 1;
1419 break;
1420
1421 default:
1422 return -EINVAL;
1423 }
1424
1425 if (rc) {
1426 IPW_ERROR("Failed to register promiscuous network "
1427 "device (error %d).\n", rc);
1428 }
1429
1430 return count;
1431 }
1432
show_rtap_iface(struct device * d,struct device_attribute * attr,char * buf)1433 static ssize_t show_rtap_iface(struct device *d,
1434 struct device_attribute *attr,
1435 char *buf)
1436 {
1437 struct ipw_priv *priv = dev_get_drvdata(d);
1438 if (rtap_iface)
1439 return sprintf(buf, "%s", priv->prom_net_dev->name);
1440 else {
1441 buf[0] = '-';
1442 buf[1] = '1';
1443 buf[2] = '\0';
1444 return 3;
1445 }
1446 }
1447
1448 static DEVICE_ATTR(rtap_iface, S_IWUSR | S_IRUSR, show_rtap_iface,
1449 store_rtap_iface);
1450
store_rtap_filter(struct device * d,struct device_attribute * attr,const char * buf,size_t count)1451 static ssize_t store_rtap_filter(struct device *d,
1452 struct device_attribute *attr,
1453 const char *buf, size_t count)
1454 {
1455 struct ipw_priv *priv = dev_get_drvdata(d);
1456
1457 if (!priv->prom_priv) {
1458 IPW_ERROR("Attempting to set filter without "
1459 "rtap_iface enabled.\n");
1460 return -EPERM;
1461 }
1462
1463 priv->prom_priv->filter = simple_strtol(buf, NULL, 0);
1464
1465 IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n",
1466 BIT_ARG16(priv->prom_priv->filter));
1467
1468 return count;
1469 }
1470
show_rtap_filter(struct device * d,struct device_attribute * attr,char * buf)1471 static ssize_t show_rtap_filter(struct device *d,
1472 struct device_attribute *attr,
1473 char *buf)
1474 {
1475 struct ipw_priv *priv = dev_get_drvdata(d);
1476 return sprintf(buf, "0x%04X",
1477 priv->prom_priv ? priv->prom_priv->filter : 0);
1478 }
1479
1480 static DEVICE_ATTR(rtap_filter, S_IWUSR | S_IRUSR, show_rtap_filter,
1481 store_rtap_filter);
1482 #endif
1483
show_scan_age(struct device * d,struct device_attribute * attr,char * buf)1484 static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
1485 char *buf)
1486 {
1487 struct ipw_priv *priv = dev_get_drvdata(d);
1488 return sprintf(buf, "%d\n", priv->ieee->scan_age);
1489 }
1490
store_scan_age(struct device * d,struct device_attribute * attr,const char * buf,size_t count)1491 static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
1492 const char *buf, size_t count)
1493 {
1494 struct ipw_priv *priv = dev_get_drvdata(d);
1495 struct net_device *dev = priv->net_dev;
1496 char buffer[] = "00000000";
1497 unsigned long len =
1498 (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
1499 unsigned long val;
1500 char *p = buffer;
1501
1502 IPW_DEBUG_INFO("enter\n");
1503
1504 strncpy(buffer, buf, len);
1505 buffer[len] = 0;
1506
1507 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1508 p++;
1509 if (p[0] == 'x' || p[0] == 'X')
1510 p++;
1511 val = simple_strtoul(p, &p, 16);
1512 } else
1513 val = simple_strtoul(p, &p, 10);
1514 if (p == buffer) {
1515 IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
1516 } else {
1517 priv->ieee->scan_age = val;
1518 IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
1519 }
1520
1521 IPW_DEBUG_INFO("exit\n");
1522 return len;
1523 }
1524
1525 static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age);
1526
show_led(struct device * d,struct device_attribute * attr,char * buf)1527 static ssize_t show_led(struct device *d, struct device_attribute *attr,
1528 char *buf)
1529 {
1530 struct ipw_priv *priv = dev_get_drvdata(d);
1531 return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
1532 }
1533
store_led(struct device * d,struct device_attribute * attr,const char * buf,size_t count)1534 static ssize_t store_led(struct device *d, struct device_attribute *attr,
1535 const char *buf, size_t count)
1536 {
1537 struct ipw_priv *priv = dev_get_drvdata(d);
1538
1539 IPW_DEBUG_INFO("enter\n");
1540
1541 if (count == 0)
1542 return 0;
1543
1544 if (*buf == 0) {
1545 IPW_DEBUG_LED("Disabling LED control.\n");
1546 priv->config |= CFG_NO_LED;
1547 ipw_led_shutdown(priv);
1548 } else {
1549 IPW_DEBUG_LED("Enabling LED control.\n");
1550 priv->config &= ~CFG_NO_LED;
1551 ipw_led_init(priv);
1552 }
1553
1554 IPW_DEBUG_INFO("exit\n");
1555 return count;
1556 }
1557
1558 static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led);
1559
show_status(struct device * d,struct device_attribute * attr,char * buf)1560 static ssize_t show_status(struct device *d,
1561 struct device_attribute *attr, char *buf)
1562 {
1563 struct ipw_priv *p = dev_get_drvdata(d);
1564 return sprintf(buf, "0x%08x\n", (int)p->status);
1565 }
1566
1567 static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
1568
show_cfg(struct device * d,struct device_attribute * attr,char * buf)1569 static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
1570 char *buf)
1571 {
1572 struct ipw_priv *p = dev_get_drvdata(d);
1573 return sprintf(buf, "0x%08x\n", (int)p->config);
1574 }
1575
1576 static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
1577
show_nic_type(struct device * d,struct device_attribute * attr,char * buf)1578 static ssize_t show_nic_type(struct device *d,
1579 struct device_attribute *attr, char *buf)
1580 {
1581 struct ipw_priv *priv = dev_get_drvdata(d);
1582 return sprintf(buf, "TYPE: %d\n", priv->nic_type);
1583 }
1584
1585 static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
1586
show_ucode_version(struct device * d,struct device_attribute * attr,char * buf)1587 static ssize_t show_ucode_version(struct device *d,
1588 struct device_attribute *attr, char *buf)
1589 {
1590 u32 len = sizeof(u32), tmp = 0;
1591 struct ipw_priv *p = dev_get_drvdata(d);
1592
1593 if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
1594 return 0;
1595
1596 return sprintf(buf, "0x%08x\n", tmp);
1597 }
1598
1599 static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
1600
show_rtc(struct device * d,struct device_attribute * attr,char * buf)1601 static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
1602 char *buf)
1603 {
1604 u32 len = sizeof(u32), tmp = 0;
1605 struct ipw_priv *p = dev_get_drvdata(d);
1606
1607 if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
1608 return 0;
1609
1610 return sprintf(buf, "0x%08x\n", tmp);
1611 }
1612
1613 static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
1614
1615 /*
1616 * Add a device attribute to view/control the delay between eeprom
1617 * operations.
1618 */
show_eeprom_delay(struct device * d,struct device_attribute * attr,char * buf)1619 static ssize_t show_eeprom_delay(struct device *d,
1620 struct device_attribute *attr, char *buf)
1621 {
1622 struct ipw_priv *p = dev_get_drvdata(d);
1623 int n = p->eeprom_delay;
1624 return sprintf(buf, "%i\n", n);
1625 }
store_eeprom_delay(struct device * d,struct device_attribute * attr,const char * buf,size_t count)1626 static ssize_t store_eeprom_delay(struct device *d,
1627 struct device_attribute *attr,
1628 const char *buf, size_t count)
1629 {
1630 struct ipw_priv *p = dev_get_drvdata(d);
1631 sscanf(buf, "%i", &p->eeprom_delay);
1632 return strnlen(buf, count);
1633 }
1634
1635 static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
1636 show_eeprom_delay, store_eeprom_delay);
1637
show_command_event_reg(struct device * d,struct device_attribute * attr,char * buf)1638 static ssize_t show_command_event_reg(struct device *d,
1639 struct device_attribute *attr, char *buf)
1640 {
1641 u32 reg = 0;
1642 struct ipw_priv *p = dev_get_drvdata(d);
1643
1644 reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
1645 return sprintf(buf, "0x%08x\n", reg);
1646 }
store_command_event_reg(struct device * d,struct device_attribute * attr,const char * buf,size_t count)1647 static ssize_t store_command_event_reg(struct device *d,
1648 struct device_attribute *attr,
1649 const char *buf, size_t count)
1650 {
1651 u32 reg;
1652 struct ipw_priv *p = dev_get_drvdata(d);
1653
1654 sscanf(buf, "%x", ®);
1655 ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
1656 return strnlen(buf, count);
1657 }
1658
1659 static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
1660 show_command_event_reg, store_command_event_reg);
1661
show_mem_gpio_reg(struct device * d,struct device_attribute * attr,char * buf)1662 static ssize_t show_mem_gpio_reg(struct device *d,
1663 struct device_attribute *attr, char *buf)
1664 {
1665 u32 reg = 0;
1666 struct ipw_priv *p = dev_get_drvdata(d);
1667
1668 reg = ipw_read_reg32(p, 0x301100);
1669 return sprintf(buf, "0x%08x\n", reg);
1670 }
store_mem_gpio_reg(struct device * d,struct device_attribute * attr,const char * buf,size_t count)1671 static ssize_t store_mem_gpio_reg(struct device *d,
1672 struct device_attribute *attr,
1673 const char *buf, size_t count)
1674 {
1675 u32 reg;
1676 struct ipw_priv *p = dev_get_drvdata(d);
1677
1678 sscanf(buf, "%x", ®);
1679 ipw_write_reg32(p, 0x301100, reg);
1680 return strnlen(buf, count);
1681 }
1682
1683 static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
1684 show_mem_gpio_reg, store_mem_gpio_reg);
1685
show_indirect_dword(struct device * d,struct device_attribute * attr,char * buf)1686 static ssize_t show_indirect_dword(struct device *d,
1687 struct device_attribute *attr, char *buf)
1688 {
1689 u32 reg = 0;
1690 struct ipw_priv *priv = dev_get_drvdata(d);
1691
1692 if (priv->status & STATUS_INDIRECT_DWORD)
1693 reg = ipw_read_reg32(priv, priv->indirect_dword);
1694 else
1695 reg = 0;
1696
1697 return sprintf(buf, "0x%08x\n", reg);
1698 }
store_indirect_dword(struct device * d,struct device_attribute * attr,const char * buf,size_t count)1699 static ssize_t store_indirect_dword(struct device *d,
1700 struct device_attribute *attr,
1701 const char *buf, size_t count)
1702 {
1703 struct ipw_priv *priv = dev_get_drvdata(d);
1704
1705 sscanf(buf, "%x", &priv->indirect_dword);
1706 priv->status |= STATUS_INDIRECT_DWORD;
1707 return strnlen(buf, count);
1708 }
1709
1710 static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
1711 show_indirect_dword, store_indirect_dword);
1712
show_indirect_byte(struct device * d,struct device_attribute * attr,char * buf)1713 static ssize_t show_indirect_byte(struct device *d,
1714 struct device_attribute *attr, char *buf)
1715 {
1716 u8 reg = 0;
1717 struct ipw_priv *priv = dev_get_drvdata(d);
1718
1719 if (priv->status & STATUS_INDIRECT_BYTE)
1720 reg = ipw_read_reg8(priv, priv->indirect_byte);
1721 else
1722 reg = 0;
1723
1724 return sprintf(buf, "0x%02x\n", reg);
1725 }
store_indirect_byte(struct device * d,struct device_attribute * attr,const char * buf,size_t count)1726 static ssize_t store_indirect_byte(struct device *d,
1727 struct device_attribute *attr,
1728 const char *buf, size_t count)
1729 {
1730 struct ipw_priv *priv = dev_get_drvdata(d);
1731
1732 sscanf(buf, "%x", &priv->indirect_byte);
1733 priv->status |= STATUS_INDIRECT_BYTE;
1734 return strnlen(buf, count);
1735 }
1736
1737 static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
1738 show_indirect_byte, store_indirect_byte);
1739
show_direct_dword(struct device * d,struct device_attribute * attr,char * buf)1740 static ssize_t show_direct_dword(struct device *d,
1741 struct device_attribute *attr, char *buf)
1742 {
1743 u32 reg = 0;
1744 struct ipw_priv *priv = dev_get_drvdata(d);
1745
1746 if (priv->status & STATUS_DIRECT_DWORD)
1747 reg = ipw_read32(priv, priv->direct_dword);
1748 else
1749 reg = 0;
1750
1751 return sprintf(buf, "0x%08x\n", reg);
1752 }
store_direct_dword(struct device * d,struct device_attribute * attr,const char * buf,size_t count)1753 static ssize_t store_direct_dword(struct device *d,
1754 struct device_attribute *attr,
1755 const char *buf, size_t count)
1756 {
1757 struct ipw_priv *priv = dev_get_drvdata(d);
1758
1759 sscanf(buf, "%x", &priv->direct_dword);
1760 priv->status |= STATUS_DIRECT_DWORD;
1761 return strnlen(buf, count);
1762 }
1763
1764 static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
1765 show_direct_dword, store_direct_dword);
1766
rf_kill_active(struct ipw_priv * priv)1767 static int rf_kill_active(struct ipw_priv *priv)
1768 {
1769 if (0 == (ipw_read32(priv, 0x30) & 0x10000)) {
1770 priv->status |= STATUS_RF_KILL_HW;
1771 wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true);
1772 } else {
1773 priv->status &= ~STATUS_RF_KILL_HW;
1774 wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, false);
1775 }
1776
1777 return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
1778 }
1779
show_rf_kill(struct device * d,struct device_attribute * attr,char * buf)1780 static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
1781 char *buf)
1782 {
1783 /* 0 - RF kill not enabled
1784 1 - SW based RF kill active (sysfs)
1785 2 - HW based RF kill active
1786 3 - Both HW and SW baed RF kill active */
1787 struct ipw_priv *priv = dev_get_drvdata(d);
1788 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
1789 (rf_kill_active(priv) ? 0x2 : 0x0);
1790 return sprintf(buf, "%i\n", val);
1791 }
1792
ipw_radio_kill_sw(struct ipw_priv * priv,int disable_radio)1793 static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1794 {
1795 if ((disable_radio ? 1 : 0) ==
1796 ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
1797 return 0;
1798
1799 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n",
1800 disable_radio ? "OFF" : "ON");
1801
1802 if (disable_radio) {
1803 priv->status |= STATUS_RF_KILL_SW;
1804
1805 cancel_delayed_work(&priv->request_scan);
1806 cancel_delayed_work(&priv->request_direct_scan);
1807 cancel_delayed_work(&priv->request_passive_scan);
1808 cancel_delayed_work(&priv->scan_event);
1809 schedule_work(&priv->down);
1810 } else {
1811 priv->status &= ~STATUS_RF_KILL_SW;
1812 if (rf_kill_active(priv)) {
1813 IPW_DEBUG_RF_KILL("Can not turn radio back on - "
1814 "disabled by HW switch\n");
1815 /* Make sure the RF_KILL check timer is running */
1816 cancel_delayed_work(&priv->rf_kill);
1817 schedule_delayed_work(&priv->rf_kill,
1818 round_jiffies_relative(2 * HZ));
1819 } else
1820 schedule_work(&priv->up);
1821 }
1822
1823 return 1;
1824 }
1825
store_rf_kill(struct device * d,struct device_attribute * attr,const char * buf,size_t count)1826 static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
1827 const char *buf, size_t count)
1828 {
1829 struct ipw_priv *priv = dev_get_drvdata(d);
1830
1831 ipw_radio_kill_sw(priv, buf[0] == '1');
1832
1833 return count;
1834 }
1835
1836 static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
1837
show_speed_scan(struct device * d,struct device_attribute * attr,char * buf)1838 static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
1839 char *buf)
1840 {
1841 struct ipw_priv *priv = dev_get_drvdata(d);
1842 int pos = 0, len = 0;
1843 if (priv->config & CFG_SPEED_SCAN) {
1844 while (priv->speed_scan[pos] != 0)
1845 len += sprintf(&buf[len], "%d ",
1846 priv->speed_scan[pos++]);
1847 return len + sprintf(&buf[len], "\n");
1848 }
1849
1850 return sprintf(buf, "0\n");
1851 }
1852
store_speed_scan(struct device * d,struct device_attribute * attr,const char * buf,size_t count)1853 static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
1854 const char *buf, size_t count)
1855 {
1856 struct ipw_priv *priv = dev_get_drvdata(d);
1857 int channel, pos = 0;
1858 const char *p = buf;
1859
1860 /* list of space separated channels to scan, optionally ending with 0 */
1861 while ((channel = simple_strtol(p, NULL, 0))) {
1862 if (pos == MAX_SPEED_SCAN - 1) {
1863 priv->speed_scan[pos] = 0;
1864 break;
1865 }
1866
1867 if (libipw_is_valid_channel(priv->ieee, channel))
1868 priv->speed_scan[pos++] = channel;
1869 else
1870 IPW_WARNING("Skipping invalid channel request: %d\n",
1871 channel);
1872 p = strchr(p, ' ');
1873 if (!p)
1874 break;
1875 while (*p == ' ' || *p == '\t')
1876 p++;
1877 }
1878
1879 if (pos == 0)
1880 priv->config &= ~CFG_SPEED_SCAN;
1881 else {
1882 priv->speed_scan_pos = 0;
1883 priv->config |= CFG_SPEED_SCAN;
1884 }
1885
1886 return count;
1887 }
1888
1889 static DEVICE_ATTR(speed_scan, S_IWUSR | S_IRUGO, show_speed_scan,
1890 store_speed_scan);
1891
show_net_stats(struct device * d,struct device_attribute * attr,char * buf)1892 static ssize_t show_net_stats(struct device *d, struct device_attribute *attr,
1893 char *buf)
1894 {
1895 struct ipw_priv *priv = dev_get_drvdata(d);
1896 return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
1897 }
1898
store_net_stats(struct device * d,struct device_attribute * attr,const char * buf,size_t count)1899 static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
1900 const char *buf, size_t count)
1901 {
1902 struct ipw_priv *priv = dev_get_drvdata(d);
1903 if (buf[0] == '1')
1904 priv->config |= CFG_NET_STATS;
1905 else
1906 priv->config &= ~CFG_NET_STATS;
1907
1908 return count;
1909 }
1910
1911 static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO,
1912 show_net_stats, store_net_stats);
1913
show_channels(struct device * d,struct device_attribute * attr,char * buf)1914 static ssize_t show_channels(struct device *d,
1915 struct device_attribute *attr,
1916 char *buf)
1917 {
1918 struct ipw_priv *priv = dev_get_drvdata(d);
1919 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
1920 int len = 0, i;
1921
1922 len = sprintf(&buf[len],
1923 "Displaying %d channels in 2.4Ghz band "
1924 "(802.11bg):\n", geo->bg_channels);
1925
1926 for (i = 0; i < geo->bg_channels; i++) {
1927 len += sprintf(&buf[len], "%d: BSS%s%s, %s, Band %s.\n",
1928 geo->bg[i].channel,
1929 geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT ?
1930 " (radar spectrum)" : "",
1931 ((geo->bg[i].flags & LIBIPW_CH_NO_IBSS) ||
1932 (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT))
1933 ? "" : ", IBSS",
1934 geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY ?
1935 "passive only" : "active/passive",
1936 geo->bg[i].flags & LIBIPW_CH_B_ONLY ?
1937 "B" : "B/G");
1938 }
1939
1940 len += sprintf(&buf[len],
1941 "Displaying %d channels in 5.2Ghz band "
1942 "(802.11a):\n", geo->a_channels);
1943 for (i = 0; i < geo->a_channels; i++) {
1944 len += sprintf(&buf[len], "%d: BSS%s%s, %s.\n",
1945 geo->a[i].channel,
1946 geo->a[i].flags & LIBIPW_CH_RADAR_DETECT ?
1947 " (radar spectrum)" : "",
1948 ((geo->a[i].flags & LIBIPW_CH_NO_IBSS) ||
1949 (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT))
1950 ? "" : ", IBSS",
1951 geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY ?
1952 "passive only" : "active/passive");
1953 }
1954
1955 return len;
1956 }
1957
1958 static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
1959
notify_wx_assoc_event(struct ipw_priv * priv)1960 static void notify_wx_assoc_event(struct ipw_priv *priv)
1961 {
1962 union iwreq_data wrqu;
1963 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1964 if (priv->status & STATUS_ASSOCIATED)
1965 memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
1966 else
1967 eth_zero_addr(wrqu.ap_addr.sa_data);
1968 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1969 }
1970
ipw_irq_tasklet(unsigned long data)1971 static void ipw_irq_tasklet(unsigned long data)
1972 {
1973 struct ipw_priv *priv = (struct ipw_priv *)data;
1974 u32 inta, inta_mask, handled = 0;
1975 unsigned long flags;
1976 int rc = 0;
1977
1978 spin_lock_irqsave(&priv->irq_lock, flags);
1979
1980 inta = ipw_read32(priv, IPW_INTA_RW);
1981 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
1982
1983 if (inta == 0xFFFFFFFF) {
1984 /* Hardware disappeared */
1985 IPW_WARNING("TASKLET INTA == 0xFFFFFFFF\n");
1986 /* Only handle the cached INTA values */
1987 inta = 0;
1988 }
1989 inta &= (IPW_INTA_MASK_ALL & inta_mask);
1990
1991 /* Add any cached INTA values that need to be handled */
1992 inta |= priv->isr_inta;
1993
1994 spin_unlock_irqrestore(&priv->irq_lock, flags);
1995
1996 spin_lock_irqsave(&priv->lock, flags);
1997
1998 /* handle all the justifications for the interrupt */
1999 if (inta & IPW_INTA_BIT_RX_TRANSFER) {
2000 ipw_rx(priv);
2001 handled |= IPW_INTA_BIT_RX_TRANSFER;
2002 }
2003
2004 if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
2005 IPW_DEBUG_HC("Command completed.\n");
2006 rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
2007 priv->status &= ~STATUS_HCMD_ACTIVE;
2008 wake_up_interruptible(&priv->wait_command_queue);
2009 handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
2010 }
2011
2012 if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
2013 IPW_DEBUG_TX("TX_QUEUE_1\n");
2014 rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
2015 handled |= IPW_INTA_BIT_TX_QUEUE_1;
2016 }
2017
2018 if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
2019 IPW_DEBUG_TX("TX_QUEUE_2\n");
2020 rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
2021 handled |= IPW_INTA_BIT_TX_QUEUE_2;
2022 }
2023
2024 if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
2025 IPW_DEBUG_TX("TX_QUEUE_3\n");
2026 rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
2027 handled |= IPW_INTA_BIT_TX_QUEUE_3;
2028 }
2029
2030 if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
2031 IPW_DEBUG_TX("TX_QUEUE_4\n");
2032 rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
2033 handled |= IPW_INTA_BIT_TX_QUEUE_4;
2034 }
2035
2036 if (inta & IPW_INTA_BIT_STATUS_CHANGE) {
2037 IPW_WARNING("STATUS_CHANGE\n");
2038 handled |= IPW_INTA_BIT_STATUS_CHANGE;
2039 }
2040
2041 if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) {
2042 IPW_WARNING("TX_PERIOD_EXPIRED\n");
2043 handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED;
2044 }
2045
2046 if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
2047 IPW_WARNING("HOST_CMD_DONE\n");
2048 handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
2049 }
2050
2051 if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) {
2052 IPW_WARNING("FW_INITIALIZATION_DONE\n");
2053 handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE;
2054 }
2055
2056 if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
2057 IPW_WARNING("PHY_OFF_DONE\n");
2058 handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
2059 }
2060
2061 if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
2062 IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
2063 priv->status |= STATUS_RF_KILL_HW;
2064 wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true);
2065 wake_up_interruptible(&priv->wait_command_queue);
2066 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
2067 cancel_delayed_work(&priv->request_scan);
2068 cancel_delayed_work(&priv->request_direct_scan);
2069 cancel_delayed_work(&priv->request_passive_scan);
2070 cancel_delayed_work(&priv->scan_event);
2071 schedule_work(&priv->link_down);
2072 schedule_delayed_work(&priv->rf_kill, 2 * HZ);
2073 handled |= IPW_INTA_BIT_RF_KILL_DONE;
2074 }
2075
2076 if (inta & IPW_INTA_BIT_FATAL_ERROR) {
2077 IPW_WARNING("Firmware error detected. Restarting.\n");
2078 if (priv->error) {
2079 IPW_DEBUG_FW("Sysfs 'error' log already exists.\n");
2080 if (ipw_debug_level & IPW_DL_FW_ERRORS) {
2081 struct ipw_fw_error *error =
2082 ipw_alloc_error_log(priv);
2083 ipw_dump_error_log(priv, error);
2084 kfree(error);
2085 }
2086 } else {
2087 priv->error = ipw_alloc_error_log(priv);
2088 if (priv->error)
2089 IPW_DEBUG_FW("Sysfs 'error' log captured.\n");
2090 else
2091 IPW_DEBUG_FW("Error allocating sysfs 'error' "
2092 "log.\n");
2093 if (ipw_debug_level & IPW_DL_FW_ERRORS)
2094 ipw_dump_error_log(priv, priv->error);
2095 }
2096
2097 /* XXX: If hardware encryption is for WPA/WPA2,
2098 * we have to notify the supplicant. */
2099 if (priv->ieee->sec.encrypt) {
2100 priv->status &= ~STATUS_ASSOCIATED;
2101 notify_wx_assoc_event(priv);
2102 }
2103
2104 /* Keep the restart process from trying to send host
2105 * commands by clearing the INIT status bit */
2106 priv->status &= ~STATUS_INIT;
2107
2108 /* Cancel currently queued command. */
2109 priv->status &= ~STATUS_HCMD_ACTIVE;
2110 wake_up_interruptible(&priv->wait_command_queue);
2111
2112 schedule_work(&priv->adapter_restart);
2113 handled |= IPW_INTA_BIT_FATAL_ERROR;
2114 }
2115
2116 if (inta & IPW_INTA_BIT_PARITY_ERROR) {
2117 IPW_ERROR("Parity error\n");
2118 handled |= IPW_INTA_BIT_PARITY_ERROR;
2119 }
2120
2121 if (handled != inta) {
2122 IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
2123 }
2124
2125 spin_unlock_irqrestore(&priv->lock, flags);
2126
2127 /* enable all interrupts */
2128 ipw_enable_interrupts(priv);
2129 }
2130
2131 #define IPW_CMD(x) case IPW_CMD_ ## x : return #x
get_cmd_string(u8 cmd)2132 static char *get_cmd_string(u8 cmd)
2133 {
2134 switch (cmd) {
2135 IPW_CMD(HOST_COMPLETE);
2136 IPW_CMD(POWER_DOWN);
2137 IPW_CMD(SYSTEM_CONFIG);
2138 IPW_CMD(MULTICAST_ADDRESS);
2139 IPW_CMD(SSID);
2140 IPW_CMD(ADAPTER_ADDRESS);
2141 IPW_CMD(PORT_TYPE);
2142 IPW_CMD(RTS_THRESHOLD);
2143 IPW_CMD(FRAG_THRESHOLD);
2144 IPW_CMD(POWER_MODE);
2145 IPW_CMD(WEP_KEY);
2146 IPW_CMD(TGI_TX_KEY);
2147 IPW_CMD(SCAN_REQUEST);
2148 IPW_CMD(SCAN_REQUEST_EXT);
2149 IPW_CMD(ASSOCIATE);
2150 IPW_CMD(SUPPORTED_RATES);
2151 IPW_CMD(SCAN_ABORT);
2152 IPW_CMD(TX_FLUSH);
2153 IPW_CMD(QOS_PARAMETERS);
2154 IPW_CMD(DINO_CONFIG);
2155 IPW_CMD(RSN_CAPABILITIES);
2156 IPW_CMD(RX_KEY);
2157 IPW_CMD(CARD_DISABLE);
2158 IPW_CMD(SEED_NUMBER);
2159 IPW_CMD(TX_POWER);
2160 IPW_CMD(COUNTRY_INFO);
2161 IPW_CMD(AIRONET_INFO);
2162 IPW_CMD(AP_TX_POWER);
2163 IPW_CMD(CCKM_INFO);
2164 IPW_CMD(CCX_VER_INFO);
2165 IPW_CMD(SET_CALIBRATION);
2166 IPW_CMD(SENSITIVITY_CALIB);
2167 IPW_CMD(RETRY_LIMIT);
2168 IPW_CMD(IPW_PRE_POWER_DOWN);
2169 IPW_CMD(VAP_BEACON_TEMPLATE);
2170 IPW_CMD(VAP_DTIM_PERIOD);
2171 IPW_CMD(EXT_SUPPORTED_RATES);
2172 IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
2173 IPW_CMD(VAP_QUIET_INTERVALS);
2174 IPW_CMD(VAP_CHANNEL_SWITCH);
2175 IPW_CMD(VAP_MANDATORY_CHANNELS);
2176 IPW_CMD(VAP_CELL_PWR_LIMIT);
2177 IPW_CMD(VAP_CF_PARAM_SET);
2178 IPW_CMD(VAP_SET_BEACONING_STATE);
2179 IPW_CMD(MEASUREMENT);
2180 IPW_CMD(POWER_CAPABILITY);
2181 IPW_CMD(SUPPORTED_CHANNELS);
2182 IPW_CMD(TPC_REPORT);
2183 IPW_CMD(WME_INFO);
2184 IPW_CMD(PRODUCTION_COMMAND);
2185 default:
2186 return "UNKNOWN";
2187 }
2188 }
2189
2190 #define HOST_COMPLETE_TIMEOUT HZ
2191
__ipw_send_cmd(struct ipw_priv * priv,struct host_cmd * cmd)2192 static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
2193 {
2194 int rc = 0;
2195 unsigned long flags;
2196 unsigned long now, end;
2197
2198 spin_lock_irqsave(&priv->lock, flags);
2199 if (priv->status & STATUS_HCMD_ACTIVE) {
2200 IPW_ERROR("Failed to send %s: Already sending a command.\n",
2201 get_cmd_string(cmd->cmd));
2202 spin_unlock_irqrestore(&priv->lock, flags);
2203 return -EAGAIN;
2204 }
2205
2206 priv->status |= STATUS_HCMD_ACTIVE;
2207
2208 if (priv->cmdlog) {
2209 priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies;
2210 priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd;
2211 priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len;
2212 memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param,
2213 cmd->len);
2214 priv->cmdlog[priv->cmdlog_pos].retcode = -1;
2215 }
2216
2217 IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
2218 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
2219 priv->status);
2220
2221 #ifndef DEBUG_CMD_WEP_KEY
2222 if (cmd->cmd == IPW_CMD_WEP_KEY)
2223 IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n");
2224 else
2225 #endif
2226 printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
2227
2228 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
2229 if (rc) {
2230 priv->status &= ~STATUS_HCMD_ACTIVE;
2231 IPW_ERROR("Failed to send %s: Reason %d\n",
2232 get_cmd_string(cmd->cmd), rc);
2233 spin_unlock_irqrestore(&priv->lock, flags);
2234 goto exit;
2235 }
2236 spin_unlock_irqrestore(&priv->lock, flags);
2237
2238 now = jiffies;
2239 end = now + HOST_COMPLETE_TIMEOUT;
2240 again:
2241 rc = wait_event_interruptible_timeout(priv->wait_command_queue,
2242 !(priv->
2243 status & STATUS_HCMD_ACTIVE),
2244 end - now);
2245 if (rc < 0) {
2246 now = jiffies;
2247 if (time_before(now, end))
2248 goto again;
2249 rc = 0;
2250 }
2251
2252 if (rc == 0) {
2253 spin_lock_irqsave(&priv->lock, flags);
2254 if (priv->status & STATUS_HCMD_ACTIVE) {
2255 IPW_ERROR("Failed to send %s: Command timed out.\n",
2256 get_cmd_string(cmd->cmd));
2257 priv->status &= ~STATUS_HCMD_ACTIVE;
2258 spin_unlock_irqrestore(&priv->lock, flags);
2259 rc = -EIO;
2260 goto exit;
2261 }
2262 spin_unlock_irqrestore(&priv->lock, flags);
2263 } else
2264 rc = 0;
2265
2266 if (priv->status & STATUS_RF_KILL_HW) {
2267 IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n",
2268 get_cmd_string(cmd->cmd));
2269 rc = -EIO;
2270 goto exit;
2271 }
2272
2273 exit:
2274 if (priv->cmdlog) {
2275 priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
2276 priv->cmdlog_pos %= priv->cmdlog_len;
2277 }
2278 return rc;
2279 }
2280
ipw_send_cmd_simple(struct ipw_priv * priv,u8 command)2281 static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
2282 {
2283 struct host_cmd cmd = {
2284 .cmd = command,
2285 };
2286
2287 return __ipw_send_cmd(priv, &cmd);
2288 }
2289
ipw_send_cmd_pdu(struct ipw_priv * priv,u8 command,u8 len,void * data)2290 static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
2291 void *data)
2292 {
2293 struct host_cmd cmd = {
2294 .cmd = command,
2295 .len = len,
2296 .param = data,
2297 };
2298
2299 return __ipw_send_cmd(priv, &cmd);
2300 }
2301
ipw_send_host_complete(struct ipw_priv * priv)2302 static int ipw_send_host_complete(struct ipw_priv *priv)
2303 {
2304 if (!priv) {
2305 IPW_ERROR("Invalid args\n");
2306 return -1;
2307 }
2308
2309 return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
2310 }
2311
ipw_send_system_config(struct ipw_priv * priv)2312 static int ipw_send_system_config(struct ipw_priv *priv)
2313 {
2314 return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG,
2315 sizeof(priv->sys_config),
2316 &priv->sys_config);
2317 }
2318
ipw_send_ssid(struct ipw_priv * priv,u8 * ssid,int len)2319 static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
2320 {
2321 if (!priv || !ssid) {
2322 IPW_ERROR("Invalid args\n");
2323 return -1;
2324 }
2325
2326 return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
2327 ssid);
2328 }
2329
ipw_send_adapter_address(struct ipw_priv * priv,u8 * mac)2330 static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
2331 {
2332 if (!priv || !mac) {
2333 IPW_ERROR("Invalid args\n");
2334 return -1;
2335 }
2336
2337 IPW_DEBUG_INFO("%s: Setting MAC to %pM\n",
2338 priv->net_dev->name, mac);
2339
2340 return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
2341 }
2342
ipw_adapter_restart(void * adapter)2343 static void ipw_adapter_restart(void *adapter)
2344 {
2345 struct ipw_priv *priv = adapter;
2346
2347 if (priv->status & STATUS_RF_KILL_MASK)
2348 return;
2349
2350 ipw_down(priv);
2351
2352 if (priv->assoc_network &&
2353 (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS))
2354 ipw_remove_current_network(priv);
2355
2356 if (ipw_up(priv)) {
2357 IPW_ERROR("Failed to up device\n");
2358 return;
2359 }
2360 }
2361
ipw_bg_adapter_restart(struct work_struct * work)2362 static void ipw_bg_adapter_restart(struct work_struct *work)
2363 {
2364 struct ipw_priv *priv =
2365 container_of(work, struct ipw_priv, adapter_restart);
2366 mutex_lock(&priv->mutex);
2367 ipw_adapter_restart(priv);
2368 mutex_unlock(&priv->mutex);
2369 }
2370
2371 static void ipw_abort_scan(struct ipw_priv *priv);
2372
2373 #define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
2374
ipw_scan_check(void * data)2375 static void ipw_scan_check(void *data)
2376 {
2377 struct ipw_priv *priv = data;
2378
2379 if (priv->status & STATUS_SCAN_ABORTING) {
2380 IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2381 "adapter after (%dms).\n",
2382 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2383 schedule_work(&priv->adapter_restart);
2384 } else if (priv->status & STATUS_SCANNING) {
2385 IPW_DEBUG_SCAN("Scan completion watchdog aborting scan "
2386 "after (%dms).\n",
2387 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2388 ipw_abort_scan(priv);
2389 schedule_delayed_work(&priv->scan_check, HZ);
2390 }
2391 }
2392
ipw_bg_scan_check(struct work_struct * work)2393 static void ipw_bg_scan_check(struct work_struct *work)
2394 {
2395 struct ipw_priv *priv =
2396 container_of(work, struct ipw_priv, scan_check.work);
2397 mutex_lock(&priv->mutex);
2398 ipw_scan_check(priv);
2399 mutex_unlock(&priv->mutex);
2400 }
2401
ipw_send_scan_request_ext(struct ipw_priv * priv,struct ipw_scan_request_ext * request)2402 static int ipw_send_scan_request_ext(struct ipw_priv *priv,
2403 struct ipw_scan_request_ext *request)
2404 {
2405 return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
2406 sizeof(*request), request);
2407 }
2408
ipw_send_scan_abort(struct ipw_priv * priv)2409 static int ipw_send_scan_abort(struct ipw_priv *priv)
2410 {
2411 if (!priv) {
2412 IPW_ERROR("Invalid args\n");
2413 return -1;
2414 }
2415
2416 return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
2417 }
2418
ipw_set_sensitivity(struct ipw_priv * priv,u16 sens)2419 static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
2420 {
2421 struct ipw_sensitivity_calib calib = {
2422 .beacon_rssi_raw = cpu_to_le16(sens),
2423 };
2424
2425 return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
2426 &calib);
2427 }
2428
ipw_send_associate(struct ipw_priv * priv,struct ipw_associate * associate)2429 static int ipw_send_associate(struct ipw_priv *priv,
2430 struct ipw_associate *associate)
2431 {
2432 if (!priv || !associate) {
2433 IPW_ERROR("Invalid args\n");
2434 return -1;
2435 }
2436
2437 return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(*associate),
2438 associate);
2439 }
2440
ipw_send_supported_rates(struct ipw_priv * priv,struct ipw_supported_rates * rates)2441 static int ipw_send_supported_rates(struct ipw_priv *priv,
2442 struct ipw_supported_rates *rates)
2443 {
2444 if (!priv || !rates) {
2445 IPW_ERROR("Invalid args\n");
2446 return -1;
2447 }
2448
2449 return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
2450 rates);
2451 }
2452
ipw_set_random_seed(struct ipw_priv * priv)2453 static int ipw_set_random_seed(struct ipw_priv *priv)
2454 {
2455 u32 val;
2456
2457 if (!priv) {
2458 IPW_ERROR("Invalid args\n");
2459 return -1;
2460 }
2461
2462 get_random_bytes(&val, sizeof(val));
2463
2464 return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
2465 }
2466
ipw_send_card_disable(struct ipw_priv * priv,u32 phy_off)2467 static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
2468 {
2469 __le32 v = cpu_to_le32(phy_off);
2470 if (!priv) {
2471 IPW_ERROR("Invalid args\n");
2472 return -1;
2473 }
2474
2475 return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(v), &v);
2476 }
2477
ipw_send_tx_power(struct ipw_priv * priv,struct ipw_tx_power * power)2478 static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
2479 {
2480 if (!priv || !power) {
2481 IPW_ERROR("Invalid args\n");
2482 return -1;
2483 }
2484
2485 return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power);
2486 }
2487
ipw_set_tx_power(struct ipw_priv * priv)2488 static int ipw_set_tx_power(struct ipw_priv *priv)
2489 {
2490 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
2491 struct ipw_tx_power tx_power;
2492 s8 max_power;
2493 int i;
2494
2495 memset(&tx_power, 0, sizeof(tx_power));
2496
2497 /* configure device for 'G' band */
2498 tx_power.ieee_mode = IPW_G_MODE;
2499 tx_power.num_channels = geo->bg_channels;
2500 for (i = 0; i < geo->bg_channels; i++) {
2501 max_power = geo->bg[i].max_power;
2502 tx_power.channels_tx_power[i].channel_number =
2503 geo->bg[i].channel;
2504 tx_power.channels_tx_power[i].tx_power = max_power ?
2505 min(max_power, priv->tx_power) : priv->tx_power;
2506 }
2507 if (ipw_send_tx_power(priv, &tx_power))
2508 return -EIO;
2509
2510 /* configure device to also handle 'B' band */
2511 tx_power.ieee_mode = IPW_B_MODE;
2512 if (ipw_send_tx_power(priv, &tx_power))
2513 return -EIO;
2514
2515 /* configure device to also handle 'A' band */
2516 if (priv->ieee->abg_true) {
2517 tx_power.ieee_mode = IPW_A_MODE;
2518 tx_power.num_channels = geo->a_channels;
2519 for (i = 0; i < tx_power.num_channels; i++) {
2520 max_power = geo->a[i].max_power;
2521 tx_power.channels_tx_power[i].channel_number =
2522 geo->a[i].channel;
2523 tx_power.channels_tx_power[i].tx_power = max_power ?
2524 min(max_power, priv->tx_power) : priv->tx_power;
2525 }
2526 if (ipw_send_tx_power(priv, &tx_power))
2527 return -EIO;
2528 }
2529 return 0;
2530 }
2531
ipw_send_rts_threshold(struct ipw_priv * priv,u16 rts)2532 static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
2533 {
2534 struct ipw_rts_threshold rts_threshold = {
2535 .rts_threshold = cpu_to_le16(rts),
2536 };
2537
2538 if (!priv) {
2539 IPW_ERROR("Invalid args\n");
2540 return -1;
2541 }
2542
2543 return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
2544 sizeof(rts_threshold), &rts_threshold);
2545 }
2546
ipw_send_frag_threshold(struct ipw_priv * priv,u16 frag)2547 static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
2548 {
2549 struct ipw_frag_threshold frag_threshold = {
2550 .frag_threshold = cpu_to_le16(frag),
2551 };
2552
2553 if (!priv) {
2554 IPW_ERROR("Invalid args\n");
2555 return -1;
2556 }
2557
2558 return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
2559 sizeof(frag_threshold), &frag_threshold);
2560 }
2561
ipw_send_power_mode(struct ipw_priv * priv,u32 mode)2562 static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
2563 {
2564 __le32 param;
2565
2566 if (!priv) {
2567 IPW_ERROR("Invalid args\n");
2568 return -1;
2569 }
2570
2571 /* If on battery, set to 3, if AC set to CAM, else user
2572 * level */
2573 switch (mode) {
2574 case IPW_POWER_BATTERY:
2575 param = cpu_to_le32(IPW_POWER_INDEX_3);
2576 break;
2577 case IPW_POWER_AC:
2578 param = cpu_to_le32(IPW_POWER_MODE_CAM);
2579 break;
2580 default:
2581 param = cpu_to_le32(mode);
2582 break;
2583 }
2584
2585 return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
2586 ¶m);
2587 }
2588
ipw_send_retry_limit(struct ipw_priv * priv,u8 slimit,u8 llimit)2589 static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
2590 {
2591 struct ipw_retry_limit retry_limit = {
2592 .short_retry_limit = slimit,
2593 .long_retry_limit = llimit
2594 };
2595
2596 if (!priv) {
2597 IPW_ERROR("Invalid args\n");
2598 return -1;
2599 }
2600
2601 return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
2602 &retry_limit);
2603 }
2604
2605 /*
2606 * The IPW device contains a Microwire compatible EEPROM that stores
2607 * various data like the MAC address. Usually the firmware has exclusive
2608 * access to the eeprom, but during device initialization (before the
2609 * device driver has sent the HostComplete command to the firmware) the
2610 * device driver has read access to the EEPROM by way of indirect addressing
2611 * through a couple of memory mapped registers.
2612 *
2613 * The following is a simplified implementation for pulling data out of the
2614 * the eeprom, along with some helper functions to find information in
2615 * the per device private data's copy of the eeprom.
2616 *
2617 * NOTE: To better understand how these functions work (i.e what is a chip
2618 * select and why do have to keep driving the eeprom clock?), read
2619 * just about any data sheet for a Microwire compatible EEPROM.
2620 */
2621
2622 /* write a 32 bit value into the indirect accessor register */
eeprom_write_reg(struct ipw_priv * p,u32 data)2623 static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
2624 {
2625 ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
2626
2627 /* the eeprom requires some time to complete the operation */
2628 udelay(p->eeprom_delay);
2629 }
2630
2631 /* perform a chip select operation */
eeprom_cs(struct ipw_priv * priv)2632 static void eeprom_cs(struct ipw_priv *priv)
2633 {
2634 eeprom_write_reg(priv, 0);
2635 eeprom_write_reg(priv, EEPROM_BIT_CS);
2636 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2637 eeprom_write_reg(priv, EEPROM_BIT_CS);
2638 }
2639
2640 /* perform a chip select operation */
eeprom_disable_cs(struct ipw_priv * priv)2641 static void eeprom_disable_cs(struct ipw_priv *priv)
2642 {
2643 eeprom_write_reg(priv, EEPROM_BIT_CS);
2644 eeprom_write_reg(priv, 0);
2645 eeprom_write_reg(priv, EEPROM_BIT_SK);
2646 }
2647
2648 /* push a single bit down to the eeprom */
eeprom_write_bit(struct ipw_priv * p,u8 bit)2649 static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
2650 {
2651 int d = (bit ? EEPROM_BIT_DI : 0);
2652 eeprom_write_reg(p, EEPROM_BIT_CS | d);
2653 eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
2654 }
2655
2656 /* push an opcode followed by an address down to the eeprom */
eeprom_op(struct ipw_priv * priv,u8 op,u8 addr)2657 static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
2658 {
2659 int i;
2660
2661 eeprom_cs(priv);
2662 eeprom_write_bit(priv, 1);
2663 eeprom_write_bit(priv, op & 2);
2664 eeprom_write_bit(priv, op & 1);
2665 for (i = 7; i >= 0; i--) {
2666 eeprom_write_bit(priv, addr & (1 << i));
2667 }
2668 }
2669
2670 /* pull 16 bits off the eeprom, one bit at a time */
eeprom_read_u16(struct ipw_priv * priv,u8 addr)2671 static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
2672 {
2673 int i;
2674 u16 r = 0;
2675
2676 /* Send READ Opcode */
2677 eeprom_op(priv, EEPROM_CMD_READ, addr);
2678
2679 /* Send dummy bit */
2680 eeprom_write_reg(priv, EEPROM_BIT_CS);
2681
2682 /* Read the byte off the eeprom one bit at a time */
2683 for (i = 0; i < 16; i++) {
2684 u32 data = 0;
2685 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2686 eeprom_write_reg(priv, EEPROM_BIT_CS);
2687 data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
2688 r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
2689 }
2690
2691 /* Send another dummy bit */
2692 eeprom_write_reg(priv, 0);
2693 eeprom_disable_cs(priv);
2694
2695 return r;
2696 }
2697
2698 /* helper function for pulling the mac address out of the private */
2699 /* data's copy of the eeprom data */
eeprom_parse_mac(struct ipw_priv * priv,u8 * mac)2700 static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2701 {
2702 memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], ETH_ALEN);
2703 }
2704
ipw_read_eeprom(struct ipw_priv * priv)2705 static void ipw_read_eeprom(struct ipw_priv *priv)
2706 {
2707 int i;
2708 __le16 *eeprom = (__le16 *) priv->eeprom;
2709
2710 IPW_DEBUG_TRACE(">>\n");
2711
2712 /* read entire contents of eeprom into private buffer */
2713 for (i = 0; i < 128; i++)
2714 eeprom[i] = cpu_to_le16(eeprom_read_u16(priv, (u8) i));
2715
2716 IPW_DEBUG_TRACE("<<\n");
2717 }
2718
2719 /*
2720 * Either the device driver (i.e. the host) or the firmware can
2721 * load eeprom data into the designated region in SRAM. If neither
2722 * happens then the FW will shutdown with a fatal error.
2723 *
2724 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
2725 * bit needs region of shared SRAM needs to be non-zero.
2726 */
ipw_eeprom_init_sram(struct ipw_priv * priv)2727 static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2728 {
2729 int i;
2730
2731 IPW_DEBUG_TRACE(">>\n");
2732
2733 /*
2734 If the data looks correct, then copy it to our private
2735 copy. Otherwise let the firmware know to perform the operation
2736 on its own.
2737 */
2738 if (priv->eeprom[EEPROM_VERSION] != 0) {
2739 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
2740
2741 /* write the eeprom data to sram */
2742 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
2743 ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
2744
2745 /* Do not load eeprom data on fatal error or suspend */
2746 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2747 } else {
2748 IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
2749
2750 /* Load eeprom data on fatal error or suspend */
2751 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
2752 }
2753
2754 IPW_DEBUG_TRACE("<<\n");
2755 }
2756
ipw_zero_memory(struct ipw_priv * priv,u32 start,u32 count)2757 static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
2758 {
2759 count >>= 2;
2760 if (!count)
2761 return;
2762 _ipw_write32(priv, IPW_AUTOINC_ADDR, start);
2763 while (count--)
2764 _ipw_write32(priv, IPW_AUTOINC_DATA, 0);
2765 }
2766
ipw_fw_dma_reset_command_blocks(struct ipw_priv * priv)2767 static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
2768 {
2769 ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL,
2770 CB_NUMBER_OF_ELEMENTS_SMALL *
2771 sizeof(struct command_block));
2772 }
2773
ipw_fw_dma_enable(struct ipw_priv * priv)2774 static int ipw_fw_dma_enable(struct ipw_priv *priv)
2775 { /* start dma engine but no transfers yet */
2776
2777 IPW_DEBUG_FW(">> :\n");
2778
2779 /* Start the dma */
2780 ipw_fw_dma_reset_command_blocks(priv);
2781
2782 /* Write CB base address */
2783 ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
2784
2785 IPW_DEBUG_FW("<< :\n");
2786 return 0;
2787 }
2788
ipw_fw_dma_abort(struct ipw_priv * priv)2789 static void ipw_fw_dma_abort(struct ipw_priv *priv)
2790 {
2791 u32 control = 0;
2792
2793 IPW_DEBUG_FW(">> :\n");
2794
2795 /* set the Stop and Abort bit */
2796 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
2797 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2798 priv->sram_desc.last_cb_index = 0;
2799
2800 IPW_DEBUG_FW("<<\n");
2801 }
2802
ipw_fw_dma_write_command_block(struct ipw_priv * priv,int index,struct command_block * cb)2803 static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
2804 struct command_block *cb)
2805 {
2806 u32 address =
2807 IPW_SHARED_SRAM_DMA_CONTROL +
2808 (sizeof(struct command_block) * index);
2809 IPW_DEBUG_FW(">> :\n");
2810
2811 ipw_write_indirect(priv, address, (u8 *) cb,
2812 (int)sizeof(struct command_block));
2813
2814 IPW_DEBUG_FW("<< :\n");
2815 return 0;
2816
2817 }
2818
ipw_fw_dma_kick(struct ipw_priv * priv)2819 static int ipw_fw_dma_kick(struct ipw_priv *priv)
2820 {
2821 u32 control = 0;
2822 u32 index = 0;
2823
2824 IPW_DEBUG_FW(">> :\n");
2825
2826 for (index = 0; index < priv->sram_desc.last_cb_index; index++)
2827 ipw_fw_dma_write_command_block(priv, index,
2828 &priv->sram_desc.cb_list[index]);
2829
2830 /* Enable the DMA in the CSR register */
2831 ipw_clear_bit(priv, IPW_RESET_REG,
2832 IPW_RESET_REG_MASTER_DISABLED |
2833 IPW_RESET_REG_STOP_MASTER);
2834
2835 /* Set the Start bit. */
2836 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
2837 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2838
2839 IPW_DEBUG_FW("<< :\n");
2840 return 0;
2841 }
2842
ipw_fw_dma_dump_command_block(struct ipw_priv * priv)2843 static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
2844 {
2845 u32 address;
2846 u32 register_value = 0;
2847 u32 cb_fields_address = 0;
2848
2849 IPW_DEBUG_FW(">> :\n");
2850 address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2851 IPW_DEBUG_FW_INFO("Current CB is 0x%x\n", address);
2852
2853 /* Read the DMA Controlor register */
2854 register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
2855 IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x\n", register_value);
2856
2857 /* Print the CB values */
2858 cb_fields_address = address;
2859 register_value = ipw_read_reg32(priv, cb_fields_address);
2860 IPW_DEBUG_FW_INFO("Current CB Control Field is 0x%x\n", register_value);
2861
2862 cb_fields_address += sizeof(u32);
2863 register_value = ipw_read_reg32(priv, cb_fields_address);
2864 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x\n", register_value);
2865
2866 cb_fields_address += sizeof(u32);
2867 register_value = ipw_read_reg32(priv, cb_fields_address);
2868 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x\n",
2869 register_value);
2870
2871 cb_fields_address += sizeof(u32);
2872 register_value = ipw_read_reg32(priv, cb_fields_address);
2873 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x\n", register_value);
2874
2875 IPW_DEBUG_FW(">> :\n");
2876 }
2877
ipw_fw_dma_command_block_index(struct ipw_priv * priv)2878 static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
2879 {
2880 u32 current_cb_address = 0;
2881 u32 current_cb_index = 0;
2882
2883 IPW_DEBUG_FW("<< :\n");
2884 current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2885
2886 current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
2887 sizeof(struct command_block);
2888
2889 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X\n",
2890 current_cb_index, current_cb_address);
2891
2892 IPW_DEBUG_FW(">> :\n");
2893 return current_cb_index;
2894
2895 }
2896
ipw_fw_dma_add_command_block(struct ipw_priv * priv,u32 src_address,u32 dest_address,u32 length,int interrupt_enabled,int is_last)2897 static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2898 u32 src_address,
2899 u32 dest_address,
2900 u32 length,
2901 int interrupt_enabled, int is_last)
2902 {
2903
2904 u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
2905 CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
2906 CB_DEST_SIZE_LONG;
2907 struct command_block *cb;
2908 u32 last_cb_element = 0;
2909
2910 IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
2911 src_address, dest_address, length);
2912
2913 if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
2914 return -1;
2915
2916 last_cb_element = priv->sram_desc.last_cb_index;
2917 cb = &priv->sram_desc.cb_list[last_cb_element];
2918 priv->sram_desc.last_cb_index++;
2919
2920 /* Calculate the new CB control word */
2921 if (interrupt_enabled)
2922 control |= CB_INT_ENABLED;
2923
2924 if (is_last)
2925 control |= CB_LAST_VALID;
2926
2927 control |= length;
2928
2929 /* Calculate the CB Element's checksum value */
2930 cb->status = control ^ src_address ^ dest_address;
2931
2932 /* Copy the Source and Destination addresses */
2933 cb->dest_addr = dest_address;
2934 cb->source_addr = src_address;
2935
2936 /* Copy the Control Word last */
2937 cb->control = control;
2938
2939 return 0;
2940 }
2941
ipw_fw_dma_add_buffer(struct ipw_priv * priv,dma_addr_t * src_address,int nr,u32 dest_address,u32 len)2942 static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address,
2943 int nr, u32 dest_address, u32 len)
2944 {
2945 int ret, i;
2946 u32 size;
2947
2948 IPW_DEBUG_FW(">>\n");
2949 IPW_DEBUG_FW_INFO("nr=%d dest_address=0x%x len=0x%x\n",
2950 nr, dest_address, len);
2951
2952 for (i = 0; i < nr; i++) {
2953 size = min_t(u32, len - i * CB_MAX_LENGTH, CB_MAX_LENGTH);
2954 ret = ipw_fw_dma_add_command_block(priv, src_address[i],
2955 dest_address +
2956 i * CB_MAX_LENGTH, size,
2957 0, 0);
2958 if (ret) {
2959 IPW_DEBUG_FW_INFO(": Failed\n");
2960 return -1;
2961 } else
2962 IPW_DEBUG_FW_INFO(": Added new cb\n");
2963 }
2964
2965 IPW_DEBUG_FW("<<\n");
2966 return 0;
2967 }
2968
ipw_fw_dma_wait(struct ipw_priv * priv)2969 static int ipw_fw_dma_wait(struct ipw_priv *priv)
2970 {
2971 u32 current_index = 0, previous_index;
2972 u32 watchdog = 0;
2973
2974 IPW_DEBUG_FW(">> :\n");
2975
2976 current_index = ipw_fw_dma_command_block_index(priv);
2977 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
2978 (int)priv->sram_desc.last_cb_index);
2979
2980 while (current_index < priv->sram_desc.last_cb_index) {
2981 udelay(50);
2982 previous_index = current_index;
2983 current_index = ipw_fw_dma_command_block_index(priv);
2984
2985 if (previous_index < current_index) {
2986 watchdog = 0;
2987 continue;
2988 }
2989 if (++watchdog > 400) {
2990 IPW_DEBUG_FW_INFO("Timeout\n");
2991 ipw_fw_dma_dump_command_block(priv);
2992 ipw_fw_dma_abort(priv);
2993 return -1;
2994 }
2995 }
2996
2997 ipw_fw_dma_abort(priv);
2998
2999 /*Disable the DMA in the CSR register */
3000 ipw_set_bit(priv, IPW_RESET_REG,
3001 IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
3002
3003 IPW_DEBUG_FW("<< dmaWaitSync\n");
3004 return 0;
3005 }
3006
ipw_remove_current_network(struct ipw_priv * priv)3007 static void ipw_remove_current_network(struct ipw_priv *priv)
3008 {
3009 struct list_head *element, *safe;
3010 struct libipw_network *network = NULL;
3011 unsigned long flags;
3012
3013 spin_lock_irqsave(&priv->ieee->lock, flags);
3014 list_for_each_safe(element, safe, &priv->ieee->network_list) {
3015 network = list_entry(element, struct libipw_network, list);
3016 if (ether_addr_equal(network->bssid, priv->bssid)) {
3017 list_del(element);
3018 list_add_tail(&network->list,
3019 &priv->ieee->network_free_list);
3020 }
3021 }
3022 spin_unlock_irqrestore(&priv->ieee->lock, flags);
3023 }
3024
3025 /**
3026 * Check that card is still alive.
3027 * Reads debug register from domain0.
3028 * If card is present, pre-defined value should
3029 * be found there.
3030 *
3031 * @param priv
3032 * @return 1 if card is present, 0 otherwise
3033 */
ipw_alive(struct ipw_priv * priv)3034 static inline int ipw_alive(struct ipw_priv *priv)
3035 {
3036 return ipw_read32(priv, 0x90) == 0xd55555d5;
3037 }
3038
3039 /* timeout in msec, attempted in 10-msec quanta */
ipw_poll_bit(struct ipw_priv * priv,u32 addr,u32 mask,int timeout)3040 static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
3041 int timeout)
3042 {
3043 int i = 0;
3044
3045 do {
3046 if ((ipw_read32(priv, addr) & mask) == mask)
3047 return i;
3048 mdelay(10);
3049 i += 10;
3050 } while (i < timeout);
3051
3052 return -ETIME;
3053 }
3054
3055 /* These functions load the firmware and micro code for the operation of
3056 * the ipw hardware. It assumes the buffer has all the bits for the
3057 * image and the caller is handling the memory allocation and clean up.
3058 */
3059
ipw_stop_master(struct ipw_priv * priv)3060 static int ipw_stop_master(struct ipw_priv *priv)
3061 {
3062 int rc;
3063
3064 IPW_DEBUG_TRACE(">>\n");
3065 /* stop master. typical delay - 0 */
3066 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3067
3068 /* timeout is in msec, polled in 10-msec quanta */
3069 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3070 IPW_RESET_REG_MASTER_DISABLED, 100);
3071 if (rc < 0) {
3072 IPW_ERROR("wait for stop master failed after 100ms\n");
3073 return -1;
3074 }
3075
3076 IPW_DEBUG_INFO("stop master %dms\n", rc);
3077
3078 return rc;
3079 }
3080
ipw_arc_release(struct ipw_priv * priv)3081 static void ipw_arc_release(struct ipw_priv *priv)
3082 {
3083 IPW_DEBUG_TRACE(">>\n");
3084 mdelay(5);
3085
3086 ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3087
3088 /* no one knows timing, for safety add some delay */
3089 mdelay(5);
3090 }
3091
3092 struct fw_chunk {
3093 __le32 address;
3094 __le32 length;
3095 };
3096
ipw_load_ucode(struct ipw_priv * priv,u8 * data,size_t len)3097 static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
3098 {
3099 int rc = 0, i, addr;
3100 u8 cr = 0;
3101 __le16 *image;
3102
3103 image = (__le16 *) data;
3104
3105 IPW_DEBUG_TRACE(">>\n");
3106
3107 rc = ipw_stop_master(priv);
3108
3109 if (rc < 0)
3110 return rc;
3111
3112 for (addr = IPW_SHARED_LOWER_BOUND;
3113 addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
3114 ipw_write32(priv, addr, 0);
3115 }
3116
3117 /* no ucode (yet) */
3118 memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
3119 /* destroy DMA queues */
3120 /* reset sequence */
3121
3122 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON);
3123 ipw_arc_release(priv);
3124 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF);
3125 mdelay(1);
3126
3127 /* reset PHY */
3128 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN);
3129 mdelay(1);
3130
3131 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0);
3132 mdelay(1);
3133
3134 /* enable ucode store */
3135 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0);
3136 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS);
3137 mdelay(1);
3138
3139 /* write ucode */
3140 /**
3141 * @bug
3142 * Do NOT set indirect address register once and then
3143 * store data to indirect data register in the loop.
3144 * It seems very reasonable, but in this case DINO do not
3145 * accept ucode. It is essential to set address each time.
3146 */
3147 /* load new ipw uCode */
3148 for (i = 0; i < len / 2; i++)
3149 ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE,
3150 le16_to_cpu(image[i]));
3151
3152 /* enable DINO */
3153 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3154 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
3155
3156 /* this is where the igx / win driver deveates from the VAP driver. */
3157
3158 /* wait for alive response */
3159 for (i = 0; i < 100; i++) {
3160 /* poll for incoming data */
3161 cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS);
3162 if (cr & DINO_RXFIFO_DATA)
3163 break;
3164 mdelay(1);
3165 }
3166
3167 if (cr & DINO_RXFIFO_DATA) {
3168 /* alive_command_responce size is NOT multiple of 4 */
3169 __le32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
3170
3171 for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
3172 response_buffer[i] =
3173 cpu_to_le32(ipw_read_reg32(priv,
3174 IPW_BASEBAND_RX_FIFO_READ));
3175 memcpy(&priv->dino_alive, response_buffer,
3176 sizeof(priv->dino_alive));
3177 if (priv->dino_alive.alive_command == 1
3178 && priv->dino_alive.ucode_valid == 1) {
3179 rc = 0;
3180 IPW_DEBUG_INFO
3181 ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
3182 "of %02d/%02d/%02d %02d:%02d\n",
3183 priv->dino_alive.software_revision,
3184 priv->dino_alive.software_revision,
3185 priv->dino_alive.device_identifier,
3186 priv->dino_alive.device_identifier,
3187 priv->dino_alive.time_stamp[0],
3188 priv->dino_alive.time_stamp[1],
3189 priv->dino_alive.time_stamp[2],
3190 priv->dino_alive.time_stamp[3],
3191 priv->dino_alive.time_stamp[4]);
3192 } else {
3193 IPW_DEBUG_INFO("Microcode is not alive\n");
3194 rc = -EINVAL;
3195 }
3196 } else {
3197 IPW_DEBUG_INFO("No alive response from DINO\n");
3198 rc = -ETIME;
3199 }
3200
3201 /* disable DINO, otherwise for some reason
3202 firmware have problem getting alive resp. */
3203 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3204
3205 return rc;
3206 }
3207
ipw_load_firmware(struct ipw_priv * priv,u8 * data,size_t len)3208 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
3209 {
3210 int ret = -1;
3211 int offset = 0;
3212 struct fw_chunk *chunk;
3213 int total_nr = 0;
3214 int i;
3215 struct pci_pool *pool;
3216 void **virts;
3217 dma_addr_t *phys;
3218
3219 IPW_DEBUG_TRACE("<< :\n");
3220
3221 virts = kmalloc(sizeof(void *) * CB_NUMBER_OF_ELEMENTS_SMALL,
3222 GFP_KERNEL);
3223 if (!virts)
3224 return -ENOMEM;
3225
3226 phys = kmalloc(sizeof(dma_addr_t) * CB_NUMBER_OF_ELEMENTS_SMALL,
3227 GFP_KERNEL);
3228 if (!phys) {
3229 kfree(virts);
3230 return -ENOMEM;
3231 }
3232 pool = pci_pool_create("ipw2200", priv->pci_dev, CB_MAX_LENGTH, 0, 0);
3233 if (!pool) {
3234 IPW_ERROR("pci_pool_create failed\n");
3235 kfree(phys);
3236 kfree(virts);
3237 return -ENOMEM;
3238 }
3239
3240 /* Start the Dma */
3241 ret = ipw_fw_dma_enable(priv);
3242
3243 /* the DMA is already ready this would be a bug. */
3244 BUG_ON(priv->sram_desc.last_cb_index > 0);
3245
3246 do {
3247 u32 chunk_len;
3248 u8 *start;
3249 int size;
3250 int nr = 0;
3251
3252 chunk = (struct fw_chunk *)(data + offset);
3253 offset += sizeof(struct fw_chunk);
3254 chunk_len = le32_to_cpu(chunk->length);
3255 start = data + offset;
3256
3257 nr = (chunk_len + CB_MAX_LENGTH - 1) / CB_MAX_LENGTH;
3258 for (i = 0; i < nr; i++) {
3259 virts[total_nr] = pci_pool_alloc(pool, GFP_KERNEL,
3260 &phys[total_nr]);
3261 if (!virts[total_nr]) {
3262 ret = -ENOMEM;
3263 goto out;
3264 }
3265 size = min_t(u32, chunk_len - i * CB_MAX_LENGTH,
3266 CB_MAX_LENGTH);
3267 memcpy(virts[total_nr], start, size);
3268 start += size;
3269 total_nr++;
3270 /* We don't support fw chunk larger than 64*8K */
3271 BUG_ON(total_nr > CB_NUMBER_OF_ELEMENTS_SMALL);
3272 }
3273
3274 /* build DMA packet and queue up for sending */
3275 /* dma to chunk->address, the chunk->length bytes from data +
3276 * offeset*/
3277 /* Dma loading */
3278 ret = ipw_fw_dma_add_buffer(priv, &phys[total_nr - nr],
3279 nr, le32_to_cpu(chunk->address),
3280 chunk_len);
3281 if (ret) {
3282 IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
3283 goto out;
3284 }
3285
3286 offset += chunk_len;
3287 } while (offset < len);
3288
3289 /* Run the DMA and wait for the answer */
3290 ret = ipw_fw_dma_kick(priv);
3291 if (ret) {
3292 IPW_ERROR("dmaKick Failed\n");
3293 goto out;
3294 }
3295
3296 ret = ipw_fw_dma_wait(priv);
3297 if (ret) {
3298 IPW_ERROR("dmaWaitSync Failed\n");
3299 goto out;
3300 }
3301 out:
3302 for (i = 0; i < total_nr; i++)
3303 pci_pool_free(pool, virts[i], phys[i]);
3304
3305 pci_pool_destroy(pool);
3306 kfree(phys);
3307 kfree(virts);
3308
3309 return ret;
3310 }
3311
3312 /* stop nic */
ipw_stop_nic(struct ipw_priv * priv)3313 static int ipw_stop_nic(struct ipw_priv *priv)
3314 {
3315 int rc = 0;
3316
3317 /* stop */
3318 ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3319
3320 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3321 IPW_RESET_REG_MASTER_DISABLED, 500);
3322 if (rc < 0) {
3323 IPW_ERROR("wait for reg master disabled failed after 500ms\n");
3324 return rc;
3325 }
3326
3327 ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3328
3329 return rc;
3330 }
3331
ipw_start_nic(struct ipw_priv * priv)3332 static void ipw_start_nic(struct ipw_priv *priv)
3333 {
3334 IPW_DEBUG_TRACE(">>\n");
3335
3336 /* prvHwStartNic release ARC */
3337 ipw_clear_bit(priv, IPW_RESET_REG,
3338 IPW_RESET_REG_MASTER_DISABLED |
3339 IPW_RESET_REG_STOP_MASTER |
3340 CBD_RESET_REG_PRINCETON_RESET);
3341
3342 /* enable power management */
3343 ipw_set_bit(priv, IPW_GP_CNTRL_RW,
3344 IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
3345
3346 IPW_DEBUG_TRACE("<<\n");
3347 }
3348
ipw_init_nic(struct ipw_priv * priv)3349 static int ipw_init_nic(struct ipw_priv *priv)
3350 {
3351 int rc;
3352
3353 IPW_DEBUG_TRACE(">>\n");
3354 /* reset */
3355 /*prvHwInitNic */
3356 /* set "initialization complete" bit to move adapter to D0 state */
3357 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3358
3359 /* low-level PLL activation */
3360 ipw_write32(priv, IPW_READ_INT_REGISTER,
3361 IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
3362
3363 /* wait for clock stabilization */
3364 rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
3365 IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
3366 if (rc < 0)
3367 IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
3368
3369 /* assert SW reset */
3370 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
3371
3372 udelay(10);
3373
3374 /* set "initialization complete" bit to move adapter to D0 state */
3375 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3376
3377 IPW_DEBUG_TRACE(">>\n");
3378 return 0;
3379 }
3380
3381 /* Call this function from process context, it will sleep in request_firmware.
3382 * Probe is an ok place to call this from.
3383 */
ipw_reset_nic(struct ipw_priv * priv)3384 static int ipw_reset_nic(struct ipw_priv *priv)
3385 {
3386 int rc = 0;
3387 unsigned long flags;
3388
3389 IPW_DEBUG_TRACE(">>\n");
3390
3391 rc = ipw_init_nic(priv);
3392
3393 spin_lock_irqsave(&priv->lock, flags);
3394 /* Clear the 'host command active' bit... */
3395 priv->status &= ~STATUS_HCMD_ACTIVE;
3396 wake_up_interruptible(&priv->wait_command_queue);
3397 priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3398 wake_up_interruptible(&priv->wait_state);
3399 spin_unlock_irqrestore(&priv->lock, flags);
3400
3401 IPW_DEBUG_TRACE("<<\n");
3402 return rc;
3403 }
3404
3405
3406 struct ipw_fw {
3407 __le32 ver;
3408 __le32 boot_size;
3409 __le32 ucode_size;
3410 __le32 fw_size;
3411 u8 data[0];
3412 };
3413
ipw_get_fw(struct ipw_priv * priv,const struct firmware ** raw,const char * name)3414 static int ipw_get_fw(struct ipw_priv *priv,
3415 const struct firmware **raw, const char *name)
3416 {
3417 struct ipw_fw *fw;
3418 int rc;
3419
3420 /* ask firmware_class module to get the boot firmware off disk */
3421 rc = request_firmware(raw, name, &priv->pci_dev->dev);
3422 if (rc < 0) {
3423 IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc);
3424 return rc;
3425 }
3426
3427 if ((*raw)->size < sizeof(*fw)) {
3428 IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size);
3429 return -EINVAL;
3430 }
3431
3432 fw = (void *)(*raw)->data;
3433
3434 if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) +
3435 le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) {
3436 IPW_ERROR("%s is too small or corrupt (%zd)\n",
3437 name, (*raw)->size);
3438 return -EINVAL;
3439 }
3440
3441 IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n",
3442 name,
3443 le32_to_cpu(fw->ver) >> 16,
3444 le32_to_cpu(fw->ver) & 0xff,
3445 (*raw)->size - sizeof(*fw));
3446 return 0;
3447 }
3448
3449 #define IPW_RX_BUF_SIZE (3000)
3450
ipw_rx_queue_reset(struct ipw_priv * priv,struct ipw_rx_queue * rxq)3451 static void ipw_rx_queue_reset(struct ipw_priv *priv,
3452 struct ipw_rx_queue *rxq)
3453 {
3454 unsigned long flags;
3455 int i;
3456
3457 spin_lock_irqsave(&rxq->lock, flags);
3458
3459 INIT_LIST_HEAD(&rxq->rx_free);
3460 INIT_LIST_HEAD(&rxq->rx_used);
3461
3462 /* Fill the rx_used queue with _all_ of the Rx buffers */
3463 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3464 /* In the reset function, these buffers may have been allocated
3465 * to an SKB, so we need to unmap and free potential storage */
3466 if (rxq->pool[i].skb != NULL) {
3467 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
3468 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3469 dev_kfree_skb(rxq->pool[i].skb);
3470 rxq->pool[i].skb = NULL;
3471 }
3472 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3473 }
3474
3475 /* Set us so that we have processed and used all buffers, but have
3476 * not restocked the Rx queue with fresh buffers */
3477 rxq->read = rxq->write = 0;
3478 rxq->free_count = 0;
3479 spin_unlock_irqrestore(&rxq->lock, flags);
3480 }
3481
3482 #ifdef CONFIG_PM
3483 static int fw_loaded = 0;
3484 static const struct firmware *raw = NULL;
3485
free_firmware(void)3486 static void free_firmware(void)
3487 {
3488 if (fw_loaded) {
3489 release_firmware(raw);
3490 raw = NULL;
3491 fw_loaded = 0;
3492 }
3493 }
3494 #else
3495 #define free_firmware() do {} while (0)
3496 #endif
3497
ipw_load(struct ipw_priv * priv)3498 static int ipw_load(struct ipw_priv *priv)
3499 {
3500 #ifndef CONFIG_PM
3501 const struct firmware *raw = NULL;
3502 #endif
3503 struct ipw_fw *fw;
3504 u8 *boot_img, *ucode_img, *fw_img;
3505 u8 *name = NULL;
3506 int rc = 0, retries = 3;
3507
3508 switch (priv->ieee->iw_mode) {
3509 case IW_MODE_ADHOC:
3510 name = "ipw2200-ibss.fw";
3511 break;
3512 #ifdef CONFIG_IPW2200_MONITOR
3513 case IW_MODE_MONITOR:
3514 name = "ipw2200-sniffer.fw";
3515 break;
3516 #endif
3517 case IW_MODE_INFRA:
3518 name = "ipw2200-bss.fw";
3519 break;
3520 }
3521
3522 if (!name) {
3523 rc = -EINVAL;
3524 goto error;
3525 }
3526
3527 #ifdef CONFIG_PM
3528 if (!fw_loaded) {
3529 #endif
3530 rc = ipw_get_fw(priv, &raw, name);
3531 if (rc < 0)
3532 goto error;
3533 #ifdef CONFIG_PM
3534 }
3535 #endif
3536
3537 fw = (void *)raw->data;
3538 boot_img = &fw->data[0];
3539 ucode_img = &fw->data[le32_to_cpu(fw->boot_size)];
3540 fw_img = &fw->data[le32_to_cpu(fw->boot_size) +
3541 le32_to_cpu(fw->ucode_size)];
3542
3543 if (rc < 0)
3544 goto error;
3545
3546 if (!priv->rxq)
3547 priv->rxq = ipw_rx_queue_alloc(priv);
3548 else
3549 ipw_rx_queue_reset(priv, priv->rxq);
3550 if (!priv->rxq) {
3551 IPW_ERROR("Unable to initialize Rx queue\n");
3552 rc = -ENOMEM;
3553 goto error;
3554 }
3555
3556 retry:
3557 /* Ensure interrupts are disabled */
3558 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3559 priv->status &= ~STATUS_INT_ENABLED;
3560
3561 /* ack pending interrupts */
3562 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3563
3564 ipw_stop_nic(priv);
3565
3566 rc = ipw_reset_nic(priv);
3567 if (rc < 0) {
3568 IPW_ERROR("Unable to reset NIC\n");
3569 goto error;
3570 }
3571
3572 ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND,
3573 IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
3574
3575 /* DMA the initial boot firmware into the device */
3576 rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size));
3577 if (rc < 0) {
3578 IPW_ERROR("Unable to load boot firmware: %d\n", rc);
3579 goto error;
3580 }
3581
3582 /* kick start the device */
3583 ipw_start_nic(priv);
3584
3585 /* wait for the device to finish its initial startup sequence */
3586 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3587 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3588 if (rc < 0) {
3589 IPW_ERROR("device failed to boot initial fw image\n");
3590 goto error;
3591 }
3592 IPW_DEBUG_INFO("initial device response after %dms\n", rc);
3593
3594 /* ack fw init done interrupt */
3595 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3596
3597 /* DMA the ucode into the device */
3598 rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size));
3599 if (rc < 0) {
3600 IPW_ERROR("Unable to load ucode: %d\n", rc);
3601 goto error;
3602 }
3603
3604 /* stop nic */
3605 ipw_stop_nic(priv);
3606
3607 /* DMA bss firmware into the device */
3608 rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size));
3609 if (rc < 0) {
3610 IPW_ERROR("Unable to load firmware: %d\n", rc);
3611 goto error;
3612 }
3613 #ifdef CONFIG_PM
3614 fw_loaded = 1;
3615 #endif
3616
3617 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
3618
3619 rc = ipw_queue_reset(priv);
3620 if (rc < 0) {
3621 IPW_ERROR("Unable to initialize queues\n");
3622 goto error;
3623 }
3624
3625 /* Ensure interrupts are disabled */
3626 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3627 /* ack pending interrupts */
3628 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3629
3630 /* kick start the device */
3631 ipw_start_nic(priv);
3632
3633 if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) {
3634 if (retries > 0) {
3635 IPW_WARNING("Parity error. Retrying init.\n");
3636 retries--;
3637 goto retry;
3638 }
3639
3640 IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
3641 rc = -EIO;
3642 goto error;
3643 }
3644
3645 /* wait for the device */
3646 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3647 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3648 if (rc < 0) {
3649 IPW_ERROR("device failed to start within 500ms\n");
3650 goto error;
3651 }
3652 IPW_DEBUG_INFO("device response after %dms\n", rc);
3653
3654 /* ack fw init done interrupt */
3655 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3656
3657 /* read eeprom data */
3658 priv->eeprom_delay = 1;
3659 ipw_read_eeprom(priv);
3660 /* initialize the eeprom region of sram */
3661 ipw_eeprom_init_sram(priv);
3662
3663 /* enable interrupts */
3664 ipw_enable_interrupts(priv);
3665
3666 /* Ensure our queue has valid packets */
3667 ipw_rx_queue_replenish(priv);
3668
3669 ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
3670
3671 /* ack pending interrupts */
3672 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3673
3674 #ifndef CONFIG_PM
3675 release_firmware(raw);
3676 #endif
3677 return 0;
3678
3679 error:
3680 if (priv->rxq) {
3681 ipw_rx_queue_free(priv, priv->rxq);
3682 priv->rxq = NULL;
3683 }
3684 ipw_tx_queue_free(priv);
3685 release_firmware(raw);
3686 #ifdef CONFIG_PM
3687 fw_loaded = 0;
3688 raw = NULL;
3689 #endif
3690
3691 return rc;
3692 }
3693
3694 /**
3695 * DMA services
3696 *
3697 * Theory of operation
3698 *
3699 * A queue is a circular buffers with 'Read' and 'Write' pointers.
3700 * 2 empty entries always kept in the buffer to protect from overflow.
3701 *
3702 * For Tx queue, there are low mark and high mark limits. If, after queuing
3703 * the packet for Tx, free space become < low mark, Tx queue stopped. When
3704 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
3705 * Tx queue resumed.
3706 *
3707 * The IPW operates with six queues, one receive queue in the device's
3708 * sram, one transmit queue for sending commands to the device firmware,
3709 * and four transmit queues for data.
3710 *
3711 * The four transmit queues allow for performing quality of service (qos)
3712 * transmissions as per the 802.11 protocol. Currently Linux does not
3713 * provide a mechanism to the user for utilizing prioritized queues, so
3714 * we only utilize the first data transmit queue (queue1).
3715 */
3716
3717 /**
3718 * Driver allocates buffers of this size for Rx
3719 */
3720
3721 /**
3722 * ipw_rx_queue_space - Return number of free slots available in queue.
3723 */
ipw_rx_queue_space(const struct ipw_rx_queue * q)3724 static int ipw_rx_queue_space(const struct ipw_rx_queue *q)
3725 {
3726 int s = q->read - q->write;
3727 if (s <= 0)
3728 s += RX_QUEUE_SIZE;
3729 /* keep some buffer to not confuse full and empty queue */
3730 s -= 2;
3731 if (s < 0)
3732 s = 0;
3733 return s;
3734 }
3735
ipw_tx_queue_space(const struct clx2_queue * q)3736 static inline int ipw_tx_queue_space(const struct clx2_queue *q)
3737 {
3738 int s = q->last_used - q->first_empty;
3739 if (s <= 0)
3740 s += q->n_bd;
3741 s -= 2; /* keep some reserve to not confuse empty and full situations */
3742 if (s < 0)
3743 s = 0;
3744 return s;
3745 }
3746
ipw_queue_inc_wrap(int index,int n_bd)3747 static inline int ipw_queue_inc_wrap(int index, int n_bd)
3748 {
3749 return (++index == n_bd) ? 0 : index;
3750 }
3751
3752 /**
3753 * Initialize common DMA queue structure
3754 *
3755 * @param q queue to init
3756 * @param count Number of BD's to allocate. Should be power of 2
3757 * @param read_register Address for 'read' register
3758 * (not offset within BAR, full address)
3759 * @param write_register Address for 'write' register
3760 * (not offset within BAR, full address)
3761 * @param base_register Address for 'base' register
3762 * (not offset within BAR, full address)
3763 * @param size Address for 'size' register
3764 * (not offset within BAR, full address)
3765 */
ipw_queue_init(struct ipw_priv * priv,struct clx2_queue * q,int count,u32 read,u32 write,u32 base,u32 size)3766 static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
3767 int count, u32 read, u32 write, u32 base, u32 size)
3768 {
3769 q->n_bd = count;
3770
3771 q->low_mark = q->n_bd / 4;
3772 if (q->low_mark < 4)
3773 q->low_mark = 4;
3774
3775 q->high_mark = q->n_bd / 8;
3776 if (q->high_mark < 2)
3777 q->high_mark = 2;
3778
3779 q->first_empty = q->last_used = 0;
3780 q->reg_r = read;
3781 q->reg_w = write;
3782
3783 ipw_write32(priv, base, q->dma_addr);
3784 ipw_write32(priv, size, count);
3785 ipw_write32(priv, read, 0);
3786 ipw_write32(priv, write, 0);
3787
3788 _ipw_read32(priv, 0x90);
3789 }
3790
ipw_queue_tx_init(struct ipw_priv * priv,struct clx2_tx_queue * q,int count,u32 read,u32 write,u32 base,u32 size)3791 static int ipw_queue_tx_init(struct ipw_priv *priv,
3792 struct clx2_tx_queue *q,
3793 int count, u32 read, u32 write, u32 base, u32 size)
3794 {
3795 struct pci_dev *dev = priv->pci_dev;
3796
3797 q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
3798 if (!q->txb) {
3799 IPW_ERROR("vmalloc for auxiliary BD structures failed\n");
3800 return -ENOMEM;
3801 }
3802
3803 q->bd =
3804 pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
3805 if (!q->bd) {
3806 IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
3807 sizeof(q->bd[0]) * count);
3808 kfree(q->txb);
3809 q->txb = NULL;
3810 return -ENOMEM;
3811 }
3812
3813 ipw_queue_init(priv, &q->q, count, read, write, base, size);
3814 return 0;
3815 }
3816
3817 /**
3818 * Free one TFD, those at index [txq->q.last_used].
3819 * Do NOT advance any indexes
3820 *
3821 * @param dev
3822 * @param txq
3823 */
ipw_queue_tx_free_tfd(struct ipw_priv * priv,struct clx2_tx_queue * txq)3824 static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
3825 struct clx2_tx_queue *txq)
3826 {
3827 struct tfd_frame *bd = &txq->bd[txq->q.last_used];
3828 struct pci_dev *dev = priv->pci_dev;
3829 int i;
3830
3831 /* classify bd */
3832 if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
3833 /* nothing to cleanup after for host commands */
3834 return;
3835
3836 /* sanity check */
3837 if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
3838 IPW_ERROR("Too many chunks: %i\n",
3839 le32_to_cpu(bd->u.data.num_chunks));
3840 /** @todo issue fatal error, it is quite serious situation */
3841 return;
3842 }
3843
3844 /* unmap chunks if any */
3845 for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
3846 pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
3847 le16_to_cpu(bd->u.data.chunk_len[i]),
3848 PCI_DMA_TODEVICE);
3849 if (txq->txb[txq->q.last_used]) {
3850 libipw_txb_free(txq->txb[txq->q.last_used]);
3851 txq->txb[txq->q.last_used] = NULL;
3852 }
3853 }
3854 }
3855
3856 /**
3857 * Deallocate DMA queue.
3858 *
3859 * Empty queue by removing and destroying all BD's.
3860 * Free all buffers.
3861 *
3862 * @param dev
3863 * @param q
3864 */
ipw_queue_tx_free(struct ipw_priv * priv,struct clx2_tx_queue * txq)3865 static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
3866 {
3867 struct clx2_queue *q = &txq->q;
3868 struct pci_dev *dev = priv->pci_dev;
3869
3870 if (q->n_bd == 0)
3871 return;
3872
3873 /* first, empty all BD's */
3874 for (; q->first_empty != q->last_used;
3875 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3876 ipw_queue_tx_free_tfd(priv, txq);
3877 }
3878
3879 /* free buffers belonging to queue itself */
3880 pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
3881 q->dma_addr);
3882 kfree(txq->txb);
3883
3884 /* 0 fill whole structure */
3885 memset(txq, 0, sizeof(*txq));
3886 }
3887
3888 /**
3889 * Destroy all DMA queues and structures
3890 *
3891 * @param priv
3892 */
ipw_tx_queue_free(struct ipw_priv * priv)3893 static void ipw_tx_queue_free(struct ipw_priv *priv)
3894 {
3895 /* Tx CMD queue */
3896 ipw_queue_tx_free(priv, &priv->txq_cmd);
3897
3898 /* Tx queues */
3899 ipw_queue_tx_free(priv, &priv->txq[0]);
3900 ipw_queue_tx_free(priv, &priv->txq[1]);
3901 ipw_queue_tx_free(priv, &priv->txq[2]);
3902 ipw_queue_tx_free(priv, &priv->txq[3]);
3903 }
3904
ipw_create_bssid(struct ipw_priv * priv,u8 * bssid)3905 static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
3906 {
3907 /* First 3 bytes are manufacturer */
3908 bssid[0] = priv->mac_addr[0];
3909 bssid[1] = priv->mac_addr[1];
3910 bssid[2] = priv->mac_addr[2];
3911
3912 /* Last bytes are random */
3913 get_random_bytes(&bssid[3], ETH_ALEN - 3);
3914
3915 bssid[0] &= 0xfe; /* clear multicast bit */
3916 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */
3917 }
3918
ipw_add_station(struct ipw_priv * priv,u8 * bssid)3919 static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3920 {
3921 struct ipw_station_entry entry;
3922 int i;
3923
3924 for (i = 0; i < priv->num_stations; i++) {
3925 if (ether_addr_equal(priv->stations[i], bssid)) {
3926 /* Another node is active in network */
3927 priv->missed_adhoc_beacons = 0;
3928 if (!(priv->config & CFG_STATIC_CHANNEL))
3929 /* when other nodes drop out, we drop out */
3930 priv->config &= ~CFG_ADHOC_PERSIST;
3931
3932 return i;
3933 }
3934 }
3935
3936 if (i == MAX_STATIONS)
3937 return IPW_INVALID_STATION;
3938
3939 IPW_DEBUG_SCAN("Adding AdHoc station: %pM\n", bssid);
3940
3941 entry.reserved = 0;
3942 entry.support_mode = 0;
3943 memcpy(entry.mac_addr, bssid, ETH_ALEN);
3944 memcpy(priv->stations[i], bssid, ETH_ALEN);
3945 ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
3946 &entry, sizeof(entry));
3947 priv->num_stations++;
3948
3949 return i;
3950 }
3951
ipw_find_station(struct ipw_priv * priv,u8 * bssid)3952 static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
3953 {
3954 int i;
3955
3956 for (i = 0; i < priv->num_stations; i++)
3957 if (ether_addr_equal(priv->stations[i], bssid))
3958 return i;
3959
3960 return IPW_INVALID_STATION;
3961 }
3962
ipw_send_disassociate(struct ipw_priv * priv,int quiet)3963 static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3964 {
3965 int err;
3966
3967 if (priv->status & STATUS_ASSOCIATING) {
3968 IPW_DEBUG_ASSOC("Disassociating while associating.\n");
3969 schedule_work(&priv->disassociate);
3970 return;
3971 }
3972
3973 if (!(priv->status & STATUS_ASSOCIATED)) {
3974 IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
3975 return;
3976 }
3977
3978 IPW_DEBUG_ASSOC("Disassocation attempt from %pM "
3979 "on channel %d.\n",
3980 priv->assoc_request.bssid,
3981 priv->assoc_request.channel);
3982
3983 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
3984 priv->status |= STATUS_DISASSOCIATING;
3985
3986 if (quiet)
3987 priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
3988 else
3989 priv->assoc_request.assoc_type = HC_DISASSOCIATE;
3990
3991 err = ipw_send_associate(priv, &priv->assoc_request);
3992 if (err) {
3993 IPW_DEBUG_HC("Attempt to send [dis]associate command "
3994 "failed.\n");
3995 return;
3996 }
3997
3998 }
3999
ipw_disassociate(void * data)4000 static int ipw_disassociate(void *data)
4001 {
4002 struct ipw_priv *priv = data;
4003 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
4004 return 0;
4005 ipw_send_disassociate(data, 0);
4006 netif_carrier_off(priv->net_dev);
4007 return 1;
4008 }
4009
ipw_bg_disassociate(struct work_struct * work)4010 static void ipw_bg_disassociate(struct work_struct *work)
4011 {
4012 struct ipw_priv *priv =
4013 container_of(work, struct ipw_priv, disassociate);
4014 mutex_lock(&priv->mutex);
4015 ipw_disassociate(priv);
4016 mutex_unlock(&priv->mutex);
4017 }
4018
ipw_system_config(struct work_struct * work)4019 static void ipw_system_config(struct work_struct *work)
4020 {
4021 struct ipw_priv *priv =
4022 container_of(work, struct ipw_priv, system_config);
4023
4024 #ifdef CONFIG_IPW2200_PROMISCUOUS
4025 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
4026 priv->sys_config.accept_all_data_frames = 1;
4027 priv->sys_config.accept_non_directed_frames = 1;
4028 priv->sys_config.accept_all_mgmt_bcpr = 1;
4029 priv->sys_config.accept_all_mgmt_frames = 1;
4030 }
4031 #endif
4032
4033 ipw_send_system_config(priv);
4034 }
4035
4036 struct ipw_status_code {
4037 u16 status;
4038 const char *reason;
4039 };
4040
4041 static const struct ipw_status_code ipw_status_codes[] = {
4042 {0x00, "Successful"},
4043 {0x01, "Unspecified failure"},
4044 {0x0A, "Cannot support all requested capabilities in the "
4045 "Capability information field"},
4046 {0x0B, "Reassociation denied due to inability to confirm that "
4047 "association exists"},
4048 {0x0C, "Association denied due to reason outside the scope of this "
4049 "standard"},
4050 {0x0D,
4051 "Responding station does not support the specified authentication "
4052 "algorithm"},
4053 {0x0E,
4054 "Received an Authentication frame with authentication sequence "
4055 "transaction sequence number out of expected sequence"},
4056 {0x0F, "Authentication rejected because of challenge failure"},
4057 {0x10, "Authentication rejected due to timeout waiting for next "
4058 "frame in sequence"},
4059 {0x11, "Association denied because AP is unable to handle additional "
4060 "associated stations"},
4061 {0x12,
4062 "Association denied due to requesting station not supporting all "
4063 "of the datarates in the BSSBasicServiceSet Parameter"},
4064 {0x13,
4065 "Association denied due to requesting station not supporting "
4066 "short preamble operation"},
4067 {0x14,
4068 "Association denied due to requesting station not supporting "
4069 "PBCC encoding"},
4070 {0x15,
4071 "Association denied due to requesting station not supporting "
4072 "channel agility"},
4073 {0x19,
4074 "Association denied due to requesting station not supporting "
4075 "short slot operation"},
4076 {0x1A,
4077 "Association denied due to requesting station not supporting "
4078 "DSSS-OFDM operation"},
4079 {0x28, "Invalid Information Element"},
4080 {0x29, "Group Cipher is not valid"},
4081 {0x2A, "Pairwise Cipher is not valid"},
4082 {0x2B, "AKMP is not valid"},
4083 {0x2C, "Unsupported RSN IE version"},
4084 {0x2D, "Invalid RSN IE Capabilities"},
4085 {0x2E, "Cipher suite is rejected per security policy"},
4086 };
4087
ipw_get_status_code(u16 status)4088 static const char *ipw_get_status_code(u16 status)
4089 {
4090 int i;
4091 for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
4092 if (ipw_status_codes[i].status == (status & 0xff))
4093 return ipw_status_codes[i].reason;
4094 return "Unknown status value.";
4095 }
4096
average_init(struct average * avg)4097 static void inline average_init(struct average *avg)
4098 {
4099 memset(avg, 0, sizeof(*avg));
4100 }
4101
4102 #define DEPTH_RSSI 8
4103 #define DEPTH_NOISE 16
exponential_average(s16 prev_avg,s16 val,u8 depth)4104 static s16 exponential_average(s16 prev_avg, s16 val, u8 depth)
4105 {
4106 return ((depth-1)*prev_avg + val)/depth;
4107 }
4108
average_add(struct average * avg,s16 val)4109 static void average_add(struct average *avg, s16 val)
4110 {
4111 avg->sum -= avg->entries[avg->pos];
4112 avg->sum += val;
4113 avg->entries[avg->pos++] = val;
4114 if (unlikely(avg->pos == AVG_ENTRIES)) {
4115 avg->init = 1;
4116 avg->pos = 0;
4117 }
4118 }
4119
average_value(struct average * avg)4120 static s16 average_value(struct average *avg)
4121 {
4122 if (!unlikely(avg->init)) {
4123 if (avg->pos)
4124 return avg->sum / avg->pos;
4125 return 0;
4126 }
4127
4128 return avg->sum / AVG_ENTRIES;
4129 }
4130
ipw_reset_stats(struct ipw_priv * priv)4131 static void ipw_reset_stats(struct ipw_priv *priv)
4132 {
4133 u32 len = sizeof(u32);
4134
4135 priv->quality = 0;
4136
4137 average_init(&priv->average_missed_beacons);
4138 priv->exp_avg_rssi = -60;
4139 priv->exp_avg_noise = -85 + 0x100;
4140
4141 priv->last_rate = 0;
4142 priv->last_missed_beacons = 0;
4143 priv->last_rx_packets = 0;
4144 priv->last_tx_packets = 0;
4145 priv->last_tx_failures = 0;
4146
4147 /* Firmware managed, reset only when NIC is restarted, so we have to
4148 * normalize on the current value */
4149 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
4150 &priv->last_rx_err, &len);
4151 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
4152 &priv->last_tx_failures, &len);
4153
4154 /* Driver managed, reset with each association */
4155 priv->missed_adhoc_beacons = 0;
4156 priv->missed_beacons = 0;
4157 priv->tx_packets = 0;
4158 priv->rx_packets = 0;
4159
4160 }
4161
ipw_get_max_rate(struct ipw_priv * priv)4162 static u32 ipw_get_max_rate(struct ipw_priv *priv)
4163 {
4164 u32 i = 0x80000000;
4165 u32 mask = priv->rates_mask;
4166 /* If currently associated in B mode, restrict the maximum
4167 * rate match to B rates */
4168 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
4169 mask &= LIBIPW_CCK_RATES_MASK;
4170
4171 /* TODO: Verify that the rate is supported by the current rates
4172 * list. */
4173
4174 while (i && !(mask & i))
4175 i >>= 1;
4176 switch (i) {
4177 case LIBIPW_CCK_RATE_1MB_MASK:
4178 return 1000000;
4179 case LIBIPW_CCK_RATE_2MB_MASK:
4180 return 2000000;
4181 case LIBIPW_CCK_RATE_5MB_MASK:
4182 return 5500000;
4183 case LIBIPW_OFDM_RATE_6MB_MASK:
4184 return 6000000;
4185 case LIBIPW_OFDM_RATE_9MB_MASK:
4186 return 9000000;
4187 case LIBIPW_CCK_RATE_11MB_MASK:
4188 return 11000000;
4189 case LIBIPW_OFDM_RATE_12MB_MASK:
4190 return 12000000;
4191 case LIBIPW_OFDM_RATE_18MB_MASK:
4192 return 18000000;
4193 case LIBIPW_OFDM_RATE_24MB_MASK:
4194 return 24000000;
4195 case LIBIPW_OFDM_RATE_36MB_MASK:
4196 return 36000000;
4197 case LIBIPW_OFDM_RATE_48MB_MASK:
4198 return 48000000;
4199 case LIBIPW_OFDM_RATE_54MB_MASK:
4200 return 54000000;
4201 }
4202
4203 if (priv->ieee->mode == IEEE_B)
4204 return 11000000;
4205 else
4206 return 54000000;
4207 }
4208
ipw_get_current_rate(struct ipw_priv * priv)4209 static u32 ipw_get_current_rate(struct ipw_priv *priv)
4210 {
4211 u32 rate, len = sizeof(rate);
4212 int err;
4213
4214 if (!(priv->status & STATUS_ASSOCIATED))
4215 return 0;
4216
4217 if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
4218 err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
4219 &len);
4220 if (err) {
4221 IPW_DEBUG_INFO("failed querying ordinals.\n");
4222 return 0;
4223 }
4224 } else
4225 return ipw_get_max_rate(priv);
4226
4227 switch (rate) {
4228 case IPW_TX_RATE_1MB:
4229 return 1000000;
4230 case IPW_TX_RATE_2MB:
4231 return 2000000;
4232 case IPW_TX_RATE_5MB:
4233 return 5500000;
4234 case IPW_TX_RATE_6MB:
4235 return 6000000;
4236 case IPW_TX_RATE_9MB:
4237 return 9000000;
4238 case IPW_TX_RATE_11MB:
4239 return 11000000;
4240 case IPW_TX_RATE_12MB:
4241 return 12000000;
4242 case IPW_TX_RATE_18MB:
4243 return 18000000;
4244 case IPW_TX_RATE_24MB:
4245 return 24000000;
4246 case IPW_TX_RATE_36MB:
4247 return 36000000;
4248 case IPW_TX_RATE_48MB:
4249 return 48000000;
4250 case IPW_TX_RATE_54MB:
4251 return 54000000;
4252 }
4253
4254 return 0;
4255 }
4256
4257 #define IPW_STATS_INTERVAL (2 * HZ)
ipw_gather_stats(struct ipw_priv * priv)4258 static void ipw_gather_stats(struct ipw_priv *priv)
4259 {
4260 u32 rx_err, rx_err_delta, rx_packets_delta;
4261 u32 tx_failures, tx_failures_delta, tx_packets_delta;
4262 u32 missed_beacons_percent, missed_beacons_delta;
4263 u32 quality = 0;
4264 u32 len = sizeof(u32);
4265 s16 rssi;
4266 u32 beacon_quality, signal_quality, tx_quality, rx_quality,
4267 rate_quality;
4268 u32 max_rate;
4269
4270 if (!(priv->status & STATUS_ASSOCIATED)) {
4271 priv->quality = 0;
4272 return;
4273 }
4274
4275 /* Update the statistics */
4276 ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
4277 &priv->missed_beacons, &len);
4278 missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
4279 priv->last_missed_beacons = priv->missed_beacons;
4280 if (priv->assoc_request.beacon_interval) {
4281 missed_beacons_percent = missed_beacons_delta *
4282 (HZ * le16_to_cpu(priv->assoc_request.beacon_interval)) /
4283 (IPW_STATS_INTERVAL * 10);
4284 } else {
4285 missed_beacons_percent = 0;
4286 }
4287 average_add(&priv->average_missed_beacons, missed_beacons_percent);
4288
4289 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
4290 rx_err_delta = rx_err - priv->last_rx_err;
4291 priv->last_rx_err = rx_err;
4292
4293 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
4294 tx_failures_delta = tx_failures - priv->last_tx_failures;
4295 priv->last_tx_failures = tx_failures;
4296
4297 rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
4298 priv->last_rx_packets = priv->rx_packets;
4299
4300 tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
4301 priv->last_tx_packets = priv->tx_packets;
4302
4303 /* Calculate quality based on the following:
4304 *
4305 * Missed beacon: 100% = 0, 0% = 70% missed
4306 * Rate: 60% = 1Mbs, 100% = Max
4307 * Rx and Tx errors represent a straight % of total Rx/Tx
4308 * RSSI: 100% = > -50, 0% = < -80
4309 * Rx errors: 100% = 0, 0% = 50% missed
4310 *
4311 * The lowest computed quality is used.
4312 *
4313 */
4314 #define BEACON_THRESHOLD 5
4315 beacon_quality = 100 - missed_beacons_percent;
4316 if (beacon_quality < BEACON_THRESHOLD)
4317 beacon_quality = 0;
4318 else
4319 beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
4320 (100 - BEACON_THRESHOLD);
4321 IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
4322 beacon_quality, missed_beacons_percent);
4323
4324 priv->last_rate = ipw_get_current_rate(priv);
4325 max_rate = ipw_get_max_rate(priv);
4326 rate_quality = priv->last_rate * 40 / max_rate + 60;
4327 IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
4328 rate_quality, priv->last_rate / 1000000);
4329
4330 if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
4331 rx_quality = 100 - (rx_err_delta * 100) /
4332 (rx_packets_delta + rx_err_delta);
4333 else
4334 rx_quality = 100;
4335 IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n",
4336 rx_quality, rx_err_delta, rx_packets_delta);
4337
4338 if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
4339 tx_quality = 100 - (tx_failures_delta * 100) /
4340 (tx_packets_delta + tx_failures_delta);
4341 else
4342 tx_quality = 100;
4343 IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n",
4344 tx_quality, tx_failures_delta, tx_packets_delta);
4345
4346 rssi = priv->exp_avg_rssi;
4347 signal_quality =
4348 (100 *
4349 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4350 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) -
4351 (priv->ieee->perfect_rssi - rssi) *
4352 (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) +
4353 62 * (priv->ieee->perfect_rssi - rssi))) /
4354 ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4355 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi));
4356 if (signal_quality > 100)
4357 signal_quality = 100;
4358 else if (signal_quality < 1)
4359 signal_quality = 0;
4360
4361 IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
4362 signal_quality, rssi);
4363
4364 quality = min(rx_quality, signal_quality);
4365 quality = min(tx_quality, quality);
4366 quality = min(rate_quality, quality);
4367 quality = min(beacon_quality, quality);
4368 if (quality == beacon_quality)
4369 IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
4370 quality);
4371 if (quality == rate_quality)
4372 IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
4373 quality);
4374 if (quality == tx_quality)
4375 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
4376 quality);
4377 if (quality == rx_quality)
4378 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
4379 quality);
4380 if (quality == signal_quality)
4381 IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
4382 quality);
4383
4384 priv->quality = quality;
4385
4386 schedule_delayed_work(&priv->gather_stats, IPW_STATS_INTERVAL);
4387 }
4388
ipw_bg_gather_stats(struct work_struct * work)4389 static void ipw_bg_gather_stats(struct work_struct *work)
4390 {
4391 struct ipw_priv *priv =
4392 container_of(work, struct ipw_priv, gather_stats.work);
4393 mutex_lock(&priv->mutex);
4394 ipw_gather_stats(priv);
4395 mutex_unlock(&priv->mutex);
4396 }
4397
4398 /* Missed beacon behavior:
4399 * 1st missed -> roaming_threshold, just wait, don't do any scan/roam.
4400 * roaming_threshold -> disassociate_threshold, scan and roam for better signal.
4401 * Above disassociate threshold, give up and stop scanning.
4402 * Roaming is disabled if disassociate_threshold <= roaming_threshold */
ipw_handle_missed_beacon(struct ipw_priv * priv,int missed_count)4403 static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4404 int missed_count)
4405 {
4406 priv->notif_missed_beacons = missed_count;
4407
4408 if (missed_count > priv->disassociate_threshold &&
4409 priv->status & STATUS_ASSOCIATED) {
4410 /* If associated and we've hit the missed
4411 * beacon threshold, disassociate, turn
4412 * off roaming, and abort any active scans */
4413 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4414 IPW_DL_STATE | IPW_DL_ASSOC,
4415 "Missed beacon: %d - disassociate\n", missed_count);
4416 priv->status &= ~STATUS_ROAMING;
4417 if (priv->status & STATUS_SCANNING) {
4418 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4419 IPW_DL_STATE,
4420 "Aborting scan with missed beacon.\n");
4421 schedule_work(&priv->abort_scan);
4422 }
4423
4424 schedule_work(&priv->disassociate);
4425 return;
4426 }
4427
4428 if (priv->status & STATUS_ROAMING) {
4429 /* If we are currently roaming, then just
4430 * print a debug statement... */
4431 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4432 "Missed beacon: %d - roam in progress\n",
4433 missed_count);
4434 return;
4435 }
4436
4437 if (roaming &&
4438 (missed_count > priv->roaming_threshold &&
4439 missed_count <= priv->disassociate_threshold)) {
4440 /* If we are not already roaming, set the ROAM
4441 * bit in the status and kick off a scan.
4442 * This can happen several times before we reach
4443 * disassociate_threshold. */
4444 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4445 "Missed beacon: %d - initiate "
4446 "roaming\n", missed_count);
4447 if (!(priv->status & STATUS_ROAMING)) {
4448 priv->status |= STATUS_ROAMING;
4449 if (!(priv->status & STATUS_SCANNING))
4450 schedule_delayed_work(&priv->request_scan, 0);
4451 }
4452 return;
4453 }
4454
4455 if (priv->status & STATUS_SCANNING &&
4456 missed_count > IPW_MB_SCAN_CANCEL_THRESHOLD) {
4457 /* Stop scan to keep fw from getting
4458 * stuck (only if we aren't roaming --
4459 * otherwise we'll never scan more than 2 or 3
4460 * channels..) */
4461 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
4462 "Aborting scan with missed beacon.\n");
4463 schedule_work(&priv->abort_scan);
4464 }
4465
4466 IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
4467 }
4468
ipw_scan_event(struct work_struct * work)4469 static void ipw_scan_event(struct work_struct *work)
4470 {
4471 union iwreq_data wrqu;
4472
4473 struct ipw_priv *priv =
4474 container_of(work, struct ipw_priv, scan_event.work);
4475
4476 wrqu.data.length = 0;
4477 wrqu.data.flags = 0;
4478 wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
4479 }
4480
handle_scan_event(struct ipw_priv * priv)4481 static void handle_scan_event(struct ipw_priv *priv)
4482 {
4483 /* Only userspace-requested scan completion events go out immediately */
4484 if (!priv->user_requested_scan) {
4485 schedule_delayed_work(&priv->scan_event,
4486 round_jiffies_relative(msecs_to_jiffies(4000)));
4487 } else {
4488 priv->user_requested_scan = 0;
4489 mod_delayed_work(system_wq, &priv->scan_event, 0);
4490 }
4491 }
4492
4493 /**
4494 * Handle host notification packet.
4495 * Called from interrupt routine
4496 */
ipw_rx_notification(struct ipw_priv * priv,struct ipw_rx_notification * notif)4497 static void ipw_rx_notification(struct ipw_priv *priv,
4498 struct ipw_rx_notification *notif)
4499 {
4500 u16 size = le16_to_cpu(notif->size);
4501
4502 IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, size);
4503
4504 switch (notif->subtype) {
4505 case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
4506 struct notif_association *assoc = ¬if->u.assoc;
4507
4508 switch (assoc->state) {
4509 case CMAS_ASSOCIATED:{
4510 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4511 IPW_DL_ASSOC,
4512 "associated: '%*pE' %pM\n",
4513 priv->essid_len, priv->essid,
4514 priv->bssid);
4515
4516 switch (priv->ieee->iw_mode) {
4517 case IW_MODE_INFRA:
4518 memcpy(priv->ieee->bssid,
4519 priv->bssid, ETH_ALEN);
4520 break;
4521
4522 case IW_MODE_ADHOC:
4523 memcpy(priv->ieee->bssid,
4524 priv->bssid, ETH_ALEN);
4525
4526 /* clear out the station table */
4527 priv->num_stations = 0;
4528
4529 IPW_DEBUG_ASSOC
4530 ("queueing adhoc check\n");
4531 schedule_delayed_work(
4532 &priv->adhoc_check,
4533 le16_to_cpu(priv->
4534 assoc_request.
4535 beacon_interval));
4536 break;
4537 }
4538
4539 priv->status &= ~STATUS_ASSOCIATING;
4540 priv->status |= STATUS_ASSOCIATED;
4541 schedule_work(&priv->system_config);
4542
4543 #ifdef CONFIG_IPW2200_QOS
4544 #define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
4545 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_control))
4546 if ((priv->status & STATUS_AUTH) &&
4547 (IPW_GET_PACKET_STYPE(¬if->u.raw)
4548 == IEEE80211_STYPE_ASSOC_RESP)) {
4549 if ((sizeof
4550 (struct
4551 libipw_assoc_response)
4552 <= size)
4553 && (size <= 2314)) {
4554 struct
4555 libipw_rx_stats
4556 stats = {
4557 .len = size - 1,
4558 };
4559
4560 IPW_DEBUG_QOS
4561 ("QoS Associate "
4562 "size %d\n", size);
4563 libipw_rx_mgt(priv->
4564 ieee,
4565 (struct
4566 libipw_hdr_4addr
4567 *)
4568 ¬if->u.raw, &stats);
4569 }
4570 }
4571 #endif
4572
4573 schedule_work(&priv->link_up);
4574
4575 break;
4576 }
4577
4578 case CMAS_AUTHENTICATED:{
4579 if (priv->
4580 status & (STATUS_ASSOCIATED |
4581 STATUS_AUTH)) {
4582 struct notif_authenticate *auth
4583 = ¬if->u.auth;
4584 IPW_DEBUG(IPW_DL_NOTIF |
4585 IPW_DL_STATE |
4586 IPW_DL_ASSOC,
4587 "deauthenticated: '%*pE' %pM: (0x%04X) - %s\n",
4588 priv->essid_len,
4589 priv->essid,
4590 priv->bssid,
4591 le16_to_cpu(auth->status),
4592 ipw_get_status_code
4593 (le16_to_cpu
4594 (auth->status)));
4595
4596 priv->status &=
4597 ~(STATUS_ASSOCIATING |
4598 STATUS_AUTH |
4599 STATUS_ASSOCIATED);
4600
4601 schedule_work(&priv->link_down);
4602 break;
4603 }
4604
4605 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4606 IPW_DL_ASSOC,
4607 "authenticated: '%*pE' %pM\n",
4608 priv->essid_len, priv->essid,
4609 priv->bssid);
4610 break;
4611 }
4612
4613 case CMAS_INIT:{
4614 if (priv->status & STATUS_AUTH) {
4615 struct
4616 libipw_assoc_response
4617 *resp;
4618 resp =
4619 (struct
4620 libipw_assoc_response
4621 *)¬if->u.raw;
4622 IPW_DEBUG(IPW_DL_NOTIF |
4623 IPW_DL_STATE |
4624 IPW_DL_ASSOC,
4625 "association failed (0x%04X): %s\n",
4626 le16_to_cpu(resp->status),
4627 ipw_get_status_code
4628 (le16_to_cpu
4629 (resp->status)));
4630 }
4631
4632 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4633 IPW_DL_ASSOC,
4634 "disassociated: '%*pE' %pM\n",
4635 priv->essid_len, priv->essid,
4636 priv->bssid);
4637
4638 priv->status &=
4639 ~(STATUS_DISASSOCIATING |
4640 STATUS_ASSOCIATING |
4641 STATUS_ASSOCIATED | STATUS_AUTH);
4642 if (priv->assoc_network
4643 && (priv->assoc_network->
4644 capability &
4645 WLAN_CAPABILITY_IBSS))
4646 ipw_remove_current_network
4647 (priv);
4648
4649 schedule_work(&priv->link_down);
4650
4651 break;
4652 }
4653
4654 case CMAS_RX_ASSOC_RESP:
4655 break;
4656
4657 default:
4658 IPW_ERROR("assoc: unknown (%d)\n",
4659 assoc->state);
4660 break;
4661 }
4662
4663 break;
4664 }
4665
4666 case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
4667 struct notif_authenticate *auth = ¬if->u.auth;
4668 switch (auth->state) {
4669 case CMAS_AUTHENTICATED:
4670 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4671 "authenticated: '%*pE' %pM\n",
4672 priv->essid_len, priv->essid,
4673 priv->bssid);
4674 priv->status |= STATUS_AUTH;
4675 break;
4676
4677 case CMAS_INIT:
4678 if (priv->status & STATUS_AUTH) {
4679 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4680 IPW_DL_ASSOC,
4681 "authentication failed (0x%04X): %s\n",
4682 le16_to_cpu(auth->status),
4683 ipw_get_status_code(le16_to_cpu
4684 (auth->
4685 status)));
4686 }
4687 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4688 IPW_DL_ASSOC,
4689 "deauthenticated: '%*pE' %pM\n",
4690 priv->essid_len, priv->essid,
4691 priv->bssid);
4692
4693 priv->status &= ~(STATUS_ASSOCIATING |
4694 STATUS_AUTH |
4695 STATUS_ASSOCIATED);
4696
4697 schedule_work(&priv->link_down);
4698 break;
4699
4700 case CMAS_TX_AUTH_SEQ_1:
4701 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4702 IPW_DL_ASSOC, "AUTH_SEQ_1\n");
4703 break;
4704 case CMAS_RX_AUTH_SEQ_2:
4705 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4706 IPW_DL_ASSOC, "AUTH_SEQ_2\n");
4707 break;
4708 case CMAS_AUTH_SEQ_1_PASS:
4709 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4710 IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
4711 break;
4712 case CMAS_AUTH_SEQ_1_FAIL:
4713 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4714 IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
4715 break;
4716 case CMAS_TX_AUTH_SEQ_3:
4717 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4718 IPW_DL_ASSOC, "AUTH_SEQ_3\n");
4719 break;
4720 case CMAS_RX_AUTH_SEQ_4:
4721 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4722 IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
4723 break;
4724 case CMAS_AUTH_SEQ_2_PASS:
4725 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4726 IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
4727 break;
4728 case CMAS_AUTH_SEQ_2_FAIL:
4729 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4730 IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
4731 break;
4732 case CMAS_TX_ASSOC:
4733 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4734 IPW_DL_ASSOC, "TX_ASSOC\n");
4735 break;
4736 case CMAS_RX_ASSOC_RESP:
4737 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4738 IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
4739
4740 break;
4741 case CMAS_ASSOCIATED:
4742 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4743 IPW_DL_ASSOC, "ASSOCIATED\n");
4744 break;
4745 default:
4746 IPW_DEBUG_NOTIF("auth: failure - %d\n",
4747 auth->state);
4748 break;
4749 }
4750 break;
4751 }
4752
4753 case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
4754 struct notif_channel_result *x =
4755 ¬if->u.channel_result;
4756
4757 if (size == sizeof(*x)) {
4758 IPW_DEBUG_SCAN("Scan result for channel %d\n",
4759 x->channel_num);
4760 } else {
4761 IPW_DEBUG_SCAN("Scan result of wrong size %d "
4762 "(should be %zd)\n",
4763 size, sizeof(*x));
4764 }
4765 break;
4766 }
4767
4768 case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
4769 struct notif_scan_complete *x = ¬if->u.scan_complete;
4770 if (size == sizeof(*x)) {
4771 IPW_DEBUG_SCAN
4772 ("Scan completed: type %d, %d channels, "
4773 "%d status\n", x->scan_type,
4774 x->num_channels, x->status);
4775 } else {
4776 IPW_ERROR("Scan completed of wrong size %d "
4777 "(should be %zd)\n",
4778 size, sizeof(*x));
4779 }
4780
4781 priv->status &=
4782 ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
4783
4784 wake_up_interruptible(&priv->wait_state);
4785 cancel_delayed_work(&priv->scan_check);
4786
4787 if (priv->status & STATUS_EXIT_PENDING)
4788 break;
4789
4790 priv->ieee->scans++;
4791
4792 #ifdef CONFIG_IPW2200_MONITOR
4793 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4794 priv->status |= STATUS_SCAN_FORCED;
4795 schedule_delayed_work(&priv->request_scan, 0);
4796 break;
4797 }
4798 priv->status &= ~STATUS_SCAN_FORCED;
4799 #endif /* CONFIG_IPW2200_MONITOR */
4800
4801 /* Do queued direct scans first */
4802 if (priv->status & STATUS_DIRECT_SCAN_PENDING)
4803 schedule_delayed_work(&priv->request_direct_scan, 0);
4804
4805 if (!(priv->status & (STATUS_ASSOCIATED |
4806 STATUS_ASSOCIATING |
4807 STATUS_ROAMING |
4808 STATUS_DISASSOCIATING)))
4809 schedule_work(&priv->associate);
4810 else if (priv->status & STATUS_ROAMING) {
4811 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4812 /* If a scan completed and we are in roam mode, then
4813 * the scan that completed was the one requested as a
4814 * result of entering roam... so, schedule the
4815 * roam work */
4816 schedule_work(&priv->roam);
4817 else
4818 /* Don't schedule if we aborted the scan */
4819 priv->status &= ~STATUS_ROAMING;
4820 } else if (priv->status & STATUS_SCAN_PENDING)
4821 schedule_delayed_work(&priv->request_scan, 0);
4822 else if (priv->config & CFG_BACKGROUND_SCAN
4823 && priv->status & STATUS_ASSOCIATED)
4824 schedule_delayed_work(&priv->request_scan,
4825 round_jiffies_relative(HZ));
4826
4827 /* Send an empty event to user space.
4828 * We don't send the received data on the event because
4829 * it would require us to do complex transcoding, and
4830 * we want to minimise the work done in the irq handler
4831 * Use a request to extract the data.
4832 * Also, we generate this even for any scan, regardless
4833 * on how the scan was initiated. User space can just
4834 * sync on periodic scan to get fresh data...
4835 * Jean II */
4836 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4837 handle_scan_event(priv);
4838 break;
4839 }
4840
4841 case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
4842 struct notif_frag_length *x = ¬if->u.frag_len;
4843
4844 if (size == sizeof(*x))
4845 IPW_ERROR("Frag length: %d\n",
4846 le16_to_cpu(x->frag_length));
4847 else
4848 IPW_ERROR("Frag length of wrong size %d "
4849 "(should be %zd)\n",
4850 size, sizeof(*x));
4851 break;
4852 }
4853
4854 case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
4855 struct notif_link_deterioration *x =
4856 ¬if->u.link_deterioration;
4857
4858 if (size == sizeof(*x)) {
4859 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4860 "link deterioration: type %d, cnt %d\n",
4861 x->silence_notification_type,
4862 x->silence_count);
4863 memcpy(&priv->last_link_deterioration, x,
4864 sizeof(*x));
4865 } else {
4866 IPW_ERROR("Link Deterioration of wrong size %d "
4867 "(should be %zd)\n",
4868 size, sizeof(*x));
4869 }
4870 break;
4871 }
4872
4873 case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
4874 IPW_ERROR("Dino config\n");
4875 if (priv->hcmd
4876 && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG)
4877 IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
4878
4879 break;
4880 }
4881
4882 case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
4883 struct notif_beacon_state *x = ¬if->u.beacon_state;
4884 if (size != sizeof(*x)) {
4885 IPW_ERROR
4886 ("Beacon state of wrong size %d (should "
4887 "be %zd)\n", size, sizeof(*x));
4888 break;
4889 }
4890
4891 if (le32_to_cpu(x->state) ==
4892 HOST_NOTIFICATION_STATUS_BEACON_MISSING)
4893 ipw_handle_missed_beacon(priv,
4894 le32_to_cpu(x->
4895 number));
4896
4897 break;
4898 }
4899
4900 case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
4901 struct notif_tgi_tx_key *x = ¬if->u.tgi_tx_key;
4902 if (size == sizeof(*x)) {
4903 IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
4904 "0x%02x station %d\n",
4905 x->key_state, x->security_type,
4906 x->station_index);
4907 break;
4908 }
4909
4910 IPW_ERROR
4911 ("TGi Tx Key of wrong size %d (should be %zd)\n",
4912 size, sizeof(*x));
4913 break;
4914 }
4915
4916 case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
4917 struct notif_calibration *x = ¬if->u.calibration;
4918
4919 if (size == sizeof(*x)) {
4920 memcpy(&priv->calib, x, sizeof(*x));
4921 IPW_DEBUG_INFO("TODO: Calibration\n");
4922 break;
4923 }
4924
4925 IPW_ERROR
4926 ("Calibration of wrong size %d (should be %zd)\n",
4927 size, sizeof(*x));
4928 break;
4929 }
4930
4931 case HOST_NOTIFICATION_NOISE_STATS:{
4932 if (size == sizeof(u32)) {
4933 priv->exp_avg_noise =
4934 exponential_average(priv->exp_avg_noise,
4935 (u8) (le32_to_cpu(notif->u.noise.value) & 0xff),
4936 DEPTH_NOISE);
4937 break;
4938 }
4939
4940 IPW_ERROR
4941 ("Noise stat is wrong size %d (should be %zd)\n",
4942 size, sizeof(u32));
4943 break;
4944 }
4945
4946 default:
4947 IPW_DEBUG_NOTIF("Unknown notification: "
4948 "subtype=%d,flags=0x%2x,size=%d\n",
4949 notif->subtype, notif->flags, size);
4950 }
4951 }
4952
4953 /**
4954 * Destroys all DMA structures and initialise them again
4955 *
4956 * @param priv
4957 * @return error code
4958 */
ipw_queue_reset(struct ipw_priv * priv)4959 static int ipw_queue_reset(struct ipw_priv *priv)
4960 {
4961 int rc = 0;
4962 /** @todo customize queue sizes */
4963 int nTx = 64, nTxCmd = 8;
4964 ipw_tx_queue_free(priv);
4965 /* Tx CMD queue */
4966 rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
4967 IPW_TX_CMD_QUEUE_READ_INDEX,
4968 IPW_TX_CMD_QUEUE_WRITE_INDEX,
4969 IPW_TX_CMD_QUEUE_BD_BASE,
4970 IPW_TX_CMD_QUEUE_BD_SIZE);
4971 if (rc) {
4972 IPW_ERROR("Tx Cmd queue init failed\n");
4973 goto error;
4974 }
4975 /* Tx queue(s) */
4976 rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
4977 IPW_TX_QUEUE_0_READ_INDEX,
4978 IPW_TX_QUEUE_0_WRITE_INDEX,
4979 IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE);
4980 if (rc) {
4981 IPW_ERROR("Tx 0 queue init failed\n");
4982 goto error;
4983 }
4984 rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
4985 IPW_TX_QUEUE_1_READ_INDEX,
4986 IPW_TX_QUEUE_1_WRITE_INDEX,
4987 IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE);
4988 if (rc) {
4989 IPW_ERROR("Tx 1 queue init failed\n");
4990 goto error;
4991 }
4992 rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
4993 IPW_TX_QUEUE_2_READ_INDEX,
4994 IPW_TX_QUEUE_2_WRITE_INDEX,
4995 IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE);
4996 if (rc) {
4997 IPW_ERROR("Tx 2 queue init failed\n");
4998 goto error;
4999 }
5000 rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
5001 IPW_TX_QUEUE_3_READ_INDEX,
5002 IPW_TX_QUEUE_3_WRITE_INDEX,
5003 IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE);
5004 if (rc) {
5005 IPW_ERROR("Tx 3 queue init failed\n");
5006 goto error;
5007 }
5008 /* statistics */
5009 priv->rx_bufs_min = 0;
5010 priv->rx_pend_max = 0;
5011 return rc;
5012
5013 error:
5014 ipw_tx_queue_free(priv);
5015 return rc;
5016 }
5017
5018 /**
5019 * Reclaim Tx queue entries no more used by NIC.
5020 *
5021 * When FW advances 'R' index, all entries between old and
5022 * new 'R' index need to be reclaimed. As result, some free space
5023 * forms. If there is enough free space (> low mark), wake Tx queue.
5024 *
5025 * @note Need to protect against garbage in 'R' index
5026 * @param priv
5027 * @param txq
5028 * @param qindex
5029 * @return Number of used entries remains in the queue
5030 */
ipw_queue_tx_reclaim(struct ipw_priv * priv,struct clx2_tx_queue * txq,int qindex)5031 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
5032 struct clx2_tx_queue *txq, int qindex)
5033 {
5034 u32 hw_tail;
5035 int used;
5036 struct clx2_queue *q = &txq->q;
5037
5038 hw_tail = ipw_read32(priv, q->reg_r);
5039 if (hw_tail >= q->n_bd) {
5040 IPW_ERROR
5041 ("Read index for DMA queue (%d) is out of range [0-%d)\n",
5042 hw_tail, q->n_bd);
5043 goto done;
5044 }
5045 for (; q->last_used != hw_tail;
5046 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
5047 ipw_queue_tx_free_tfd(priv, txq);
5048 priv->tx_packets++;
5049 }
5050 done:
5051 if ((ipw_tx_queue_space(q) > q->low_mark) &&
5052 (qindex >= 0))
5053 netif_wake_queue(priv->net_dev);
5054 used = q->first_empty - q->last_used;
5055 if (used < 0)
5056 used += q->n_bd;
5057
5058 return used;
5059 }
5060
ipw_queue_tx_hcmd(struct ipw_priv * priv,int hcmd,void * buf,int len,int sync)5061 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
5062 int len, int sync)
5063 {
5064 struct clx2_tx_queue *txq = &priv->txq_cmd;
5065 struct clx2_queue *q = &txq->q;
5066 struct tfd_frame *tfd;
5067
5068 if (ipw_tx_queue_space(q) < (sync ? 1 : 2)) {
5069 IPW_ERROR("No space for Tx\n");
5070 return -EBUSY;
5071 }
5072
5073 tfd = &txq->bd[q->first_empty];
5074 txq->txb[q->first_empty] = NULL;
5075
5076 memset(tfd, 0, sizeof(*tfd));
5077 tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
5078 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
5079 priv->hcmd_seq++;
5080 tfd->u.cmd.index = hcmd;
5081 tfd->u.cmd.length = len;
5082 memcpy(tfd->u.cmd.payload, buf, len);
5083 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
5084 ipw_write32(priv, q->reg_w, q->first_empty);
5085 _ipw_read32(priv, 0x90);
5086
5087 return 0;
5088 }
5089
5090 /*
5091 * Rx theory of operation
5092 *
5093 * The host allocates 32 DMA target addresses and passes the host address
5094 * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
5095 * 0 to 31
5096 *
5097 * Rx Queue Indexes
5098 * The host/firmware share two index registers for managing the Rx buffers.
5099 *
5100 * The READ index maps to the first position that the firmware may be writing
5101 * to -- the driver can read up to (but not including) this position and get
5102 * good data.
5103 * The READ index is managed by the firmware once the card is enabled.
5104 *
5105 * The WRITE index maps to the last position the driver has read from -- the
5106 * position preceding WRITE is the last slot the firmware can place a packet.
5107 *
5108 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
5109 * WRITE = READ.
5110 *
5111 * During initialization the host sets up the READ queue position to the first
5112 * INDEX position, and WRITE to the last (READ - 1 wrapped)
5113 *
5114 * When the firmware places a packet in a buffer it will advance the READ index
5115 * and fire the RX interrupt. The driver can then query the READ index and
5116 * process as many packets as possible, moving the WRITE index forward as it
5117 * resets the Rx queue buffers with new memory.
5118 *
5119 * The management in the driver is as follows:
5120 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When
5121 * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
5122 * to replensish the ipw->rxq->rx_free.
5123 * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
5124 * ipw->rxq is replenished and the READ INDEX is updated (updating the
5125 * 'processed' and 'read' driver indexes as well)
5126 * + A received packet is processed and handed to the kernel network stack,
5127 * detached from the ipw->rxq. The driver 'processed' index is updated.
5128 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
5129 * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
5130 * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there
5131 * were enough free buffers and RX_STALLED is set it is cleared.
5132 *
5133 *
5134 * Driver sequence:
5135 *
5136 * ipw_rx_queue_alloc() Allocates rx_free
5137 * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls
5138 * ipw_rx_queue_restock
5139 * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx
5140 * queue, updates firmware pointers, and updates
5141 * the WRITE index. If insufficient rx_free buffers
5142 * are available, schedules ipw_rx_queue_replenish
5143 *
5144 * -- enable interrupts --
5145 * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the
5146 * READ INDEX, detaching the SKB from the pool.
5147 * Moves the packet buffer from queue to rx_used.
5148 * Calls ipw_rx_queue_restock to refill any empty
5149 * slots.
5150 * ...
5151 *
5152 */
5153
5154 /*
5155 * If there are slots in the RX queue that need to be restocked,
5156 * and we have free pre-allocated buffers, fill the ranks as much
5157 * as we can pulling from rx_free.
5158 *
5159 * This moves the 'write' index forward to catch up with 'processed', and
5160 * also updates the memory address in the firmware to reference the new
5161 * target buffer.
5162 */
ipw_rx_queue_restock(struct ipw_priv * priv)5163 static void ipw_rx_queue_restock(struct ipw_priv *priv)
5164 {
5165 struct ipw_rx_queue *rxq = priv->rxq;
5166 struct list_head *element;
5167 struct ipw_rx_mem_buffer *rxb;
5168 unsigned long flags;
5169 int write;
5170
5171 spin_lock_irqsave(&rxq->lock, flags);
5172 write = rxq->write;
5173 while ((ipw_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
5174 element = rxq->rx_free.next;
5175 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5176 list_del(element);
5177
5178 ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
5179 rxb->dma_addr);
5180 rxq->queue[rxq->write] = rxb;
5181 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
5182 rxq->free_count--;
5183 }
5184 spin_unlock_irqrestore(&rxq->lock, flags);
5185
5186 /* If the pre-allocated buffer pool is dropping low, schedule to
5187 * refill it */
5188 if (rxq->free_count <= RX_LOW_WATERMARK)
5189 schedule_work(&priv->rx_replenish);
5190
5191 /* If we've added more space for the firmware to place data, tell it */
5192 if (write != rxq->write)
5193 ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
5194 }
5195
5196 /*
5197 * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
5198 * Also restock the Rx queue via ipw_rx_queue_restock.
5199 *
5200 * This is called as a scheduled work item (except for during intialization)
5201 */
ipw_rx_queue_replenish(void * data)5202 static void ipw_rx_queue_replenish(void *data)
5203 {
5204 struct ipw_priv *priv = data;
5205 struct ipw_rx_queue *rxq = priv->rxq;
5206 struct list_head *element;
5207 struct ipw_rx_mem_buffer *rxb;
5208 unsigned long flags;
5209
5210 spin_lock_irqsave(&rxq->lock, flags);
5211 while (!list_empty(&rxq->rx_used)) {
5212 element = rxq->rx_used.next;
5213 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5214 rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC);
5215 if (!rxb->skb) {
5216 printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
5217 priv->net_dev->name);
5218 /* We don't reschedule replenish work here -- we will
5219 * call the restock method and if it still needs
5220 * more buffers it will schedule replenish */
5221 break;
5222 }
5223 list_del(element);
5224
5225 rxb->dma_addr =
5226 pci_map_single(priv->pci_dev, rxb->skb->data,
5227 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5228
5229 list_add_tail(&rxb->list, &rxq->rx_free);
5230 rxq->free_count++;
5231 }
5232 spin_unlock_irqrestore(&rxq->lock, flags);
5233
5234 ipw_rx_queue_restock(priv);
5235 }
5236
ipw_bg_rx_queue_replenish(struct work_struct * work)5237 static void ipw_bg_rx_queue_replenish(struct work_struct *work)
5238 {
5239 struct ipw_priv *priv =
5240 container_of(work, struct ipw_priv, rx_replenish);
5241 mutex_lock(&priv->mutex);
5242 ipw_rx_queue_replenish(priv);
5243 mutex_unlock(&priv->mutex);
5244 }
5245
5246 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
5247 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
5248 * This free routine walks the list of POOL entries and if SKB is set to
5249 * non NULL it is unmapped and freed
5250 */
ipw_rx_queue_free(struct ipw_priv * priv,struct ipw_rx_queue * rxq)5251 static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
5252 {
5253 int i;
5254
5255 if (!rxq)
5256 return;
5257
5258 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
5259 if (rxq->pool[i].skb != NULL) {
5260 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
5261 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5262 dev_kfree_skb(rxq->pool[i].skb);
5263 }
5264 }
5265
5266 kfree(rxq);
5267 }
5268
ipw_rx_queue_alloc(struct ipw_priv * priv)5269 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
5270 {
5271 struct ipw_rx_queue *rxq;
5272 int i;
5273
5274 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
5275 if (unlikely(!rxq)) {
5276 IPW_ERROR("memory allocation failed\n");
5277 return NULL;
5278 }
5279 spin_lock_init(&rxq->lock);
5280 INIT_LIST_HEAD(&rxq->rx_free);
5281 INIT_LIST_HEAD(&rxq->rx_used);
5282
5283 /* Fill the rx_used queue with _all_ of the Rx buffers */
5284 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
5285 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
5286
5287 /* Set us so that we have processed and used all buffers, but have
5288 * not restocked the Rx queue with fresh buffers */
5289 rxq->read = rxq->write = 0;
5290 rxq->free_count = 0;
5291
5292 return rxq;
5293 }
5294
ipw_is_rate_in_mask(struct ipw_priv * priv,int ieee_mode,u8 rate)5295 static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
5296 {
5297 rate &= ~LIBIPW_BASIC_RATE_MASK;
5298 if (ieee_mode == IEEE_A) {
5299 switch (rate) {
5300 case LIBIPW_OFDM_RATE_6MB:
5301 return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ?
5302 1 : 0;
5303 case LIBIPW_OFDM_RATE_9MB:
5304 return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ?
5305 1 : 0;
5306 case LIBIPW_OFDM_RATE_12MB:
5307 return priv->
5308 rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0;
5309 case LIBIPW_OFDM_RATE_18MB:
5310 return priv->
5311 rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0;
5312 case LIBIPW_OFDM_RATE_24MB:
5313 return priv->
5314 rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0;
5315 case LIBIPW_OFDM_RATE_36MB:
5316 return priv->
5317 rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0;
5318 case LIBIPW_OFDM_RATE_48MB:
5319 return priv->
5320 rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0;
5321 case LIBIPW_OFDM_RATE_54MB:
5322 return priv->
5323 rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0;
5324 default:
5325 return 0;
5326 }
5327 }
5328
5329 /* B and G mixed */
5330 switch (rate) {
5331 case LIBIPW_CCK_RATE_1MB:
5332 return priv->rates_mask & LIBIPW_CCK_RATE_1MB_MASK ? 1 : 0;
5333 case LIBIPW_CCK_RATE_2MB:
5334 return priv->rates_mask & LIBIPW_CCK_RATE_2MB_MASK ? 1 : 0;
5335 case LIBIPW_CCK_RATE_5MB:
5336 return priv->rates_mask & LIBIPW_CCK_RATE_5MB_MASK ? 1 : 0;
5337 case LIBIPW_CCK_RATE_11MB:
5338 return priv->rates_mask & LIBIPW_CCK_RATE_11MB_MASK ? 1 : 0;
5339 }
5340
5341 /* If we are limited to B modulations, bail at this point */
5342 if (ieee_mode == IEEE_B)
5343 return 0;
5344
5345 /* G */
5346 switch (rate) {
5347 case LIBIPW_OFDM_RATE_6MB:
5348 return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ? 1 : 0;
5349 case LIBIPW_OFDM_RATE_9MB:
5350 return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ? 1 : 0;
5351 case LIBIPW_OFDM_RATE_12MB:
5352 return priv->rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0;
5353 case LIBIPW_OFDM_RATE_18MB:
5354 return priv->rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0;
5355 case LIBIPW_OFDM_RATE_24MB:
5356 return priv->rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0;
5357 case LIBIPW_OFDM_RATE_36MB:
5358 return priv->rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0;
5359 case LIBIPW_OFDM_RATE_48MB:
5360 return priv->rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0;
5361 case LIBIPW_OFDM_RATE_54MB:
5362 return priv->rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0;
5363 }
5364
5365 return 0;
5366 }
5367
ipw_compatible_rates(struct ipw_priv * priv,const struct libipw_network * network,struct ipw_supported_rates * rates)5368 static int ipw_compatible_rates(struct ipw_priv *priv,
5369 const struct libipw_network *network,
5370 struct ipw_supported_rates *rates)
5371 {
5372 int num_rates, i;
5373
5374 memset(rates, 0, sizeof(*rates));
5375 num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
5376 rates->num_rates = 0;
5377 for (i = 0; i < num_rates; i++) {
5378 if (!ipw_is_rate_in_mask(priv, network->mode,
5379 network->rates[i])) {
5380
5381 if (network->rates[i] & LIBIPW_BASIC_RATE_MASK) {
5382 IPW_DEBUG_SCAN("Adding masked mandatory "
5383 "rate %02X\n",
5384 network->rates[i]);
5385 rates->supported_rates[rates->num_rates++] =
5386 network->rates[i];
5387 continue;
5388 }
5389
5390 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5391 network->rates[i], priv->rates_mask);
5392 continue;
5393 }
5394
5395 rates->supported_rates[rates->num_rates++] = network->rates[i];
5396 }
5397
5398 num_rates = min(network->rates_ex_len,
5399 (u8) (IPW_MAX_RATES - num_rates));
5400 for (i = 0; i < num_rates; i++) {
5401 if (!ipw_is_rate_in_mask(priv, network->mode,
5402 network->rates_ex[i])) {
5403 if (network->rates_ex[i] & LIBIPW_BASIC_RATE_MASK) {
5404 IPW_DEBUG_SCAN("Adding masked mandatory "
5405 "rate %02X\n",
5406 network->rates_ex[i]);
5407 rates->supported_rates[rates->num_rates++] =
5408 network->rates[i];
5409 continue;
5410 }
5411
5412 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5413 network->rates_ex[i], priv->rates_mask);
5414 continue;
5415 }
5416
5417 rates->supported_rates[rates->num_rates++] =
5418 network->rates_ex[i];
5419 }
5420
5421 return 1;
5422 }
5423
ipw_copy_rates(struct ipw_supported_rates * dest,const struct ipw_supported_rates * src)5424 static void ipw_copy_rates(struct ipw_supported_rates *dest,
5425 const struct ipw_supported_rates *src)
5426 {
5427 u8 i;
5428 for (i = 0; i < src->num_rates; i++)
5429 dest->supported_rates[i] = src->supported_rates[i];
5430 dest->num_rates = src->num_rates;
5431 }
5432
5433 /* TODO: Look at sniffed packets in the air to determine if the basic rate
5434 * mask should ever be used -- right now all callers to add the scan rates are
5435 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
ipw_add_cck_scan_rates(struct ipw_supported_rates * rates,u8 modulation,u32 rate_mask)5436 static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
5437 u8 modulation, u32 rate_mask)
5438 {
5439 u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ?
5440 LIBIPW_BASIC_RATE_MASK : 0;
5441
5442 if (rate_mask & LIBIPW_CCK_RATE_1MB_MASK)
5443 rates->supported_rates[rates->num_rates++] =
5444 LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_1MB;
5445
5446 if (rate_mask & LIBIPW_CCK_RATE_2MB_MASK)
5447 rates->supported_rates[rates->num_rates++] =
5448 LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_2MB;
5449
5450 if (rate_mask & LIBIPW_CCK_RATE_5MB_MASK)
5451 rates->supported_rates[rates->num_rates++] = basic_mask |
5452 LIBIPW_CCK_RATE_5MB;
5453
5454 if (rate_mask & LIBIPW_CCK_RATE_11MB_MASK)
5455 rates->supported_rates[rates->num_rates++] = basic_mask |
5456 LIBIPW_CCK_RATE_11MB;
5457 }
5458
ipw_add_ofdm_scan_rates(struct ipw_supported_rates * rates,u8 modulation,u32 rate_mask)5459 static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
5460 u8 modulation, u32 rate_mask)
5461 {
5462 u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ?
5463 LIBIPW_BASIC_RATE_MASK : 0;
5464
5465 if (rate_mask & LIBIPW_OFDM_RATE_6MB_MASK)
5466 rates->supported_rates[rates->num_rates++] = basic_mask |
5467 LIBIPW_OFDM_RATE_6MB;
5468
5469 if (rate_mask & LIBIPW_OFDM_RATE_9MB_MASK)
5470 rates->supported_rates[rates->num_rates++] =
5471 LIBIPW_OFDM_RATE_9MB;
5472
5473 if (rate_mask & LIBIPW_OFDM_RATE_12MB_MASK)
5474 rates->supported_rates[rates->num_rates++] = basic_mask |
5475 LIBIPW_OFDM_RATE_12MB;
5476
5477 if (rate_mask & LIBIPW_OFDM_RATE_18MB_MASK)
5478 rates->supported_rates[rates->num_rates++] =
5479 LIBIPW_OFDM_RATE_18MB;
5480
5481 if (rate_mask & LIBIPW_OFDM_RATE_24MB_MASK)
5482 rates->supported_rates[rates->num_rates++] = basic_mask |
5483 LIBIPW_OFDM_RATE_24MB;
5484
5485 if (rate_mask & LIBIPW_OFDM_RATE_36MB_MASK)
5486 rates->supported_rates[rates->num_rates++] =
5487 LIBIPW_OFDM_RATE_36MB;
5488
5489 if (rate_mask & LIBIPW_OFDM_RATE_48MB_MASK)
5490 rates->supported_rates[rates->num_rates++] =
5491 LIBIPW_OFDM_RATE_48MB;
5492
5493 if (rate_mask & LIBIPW_OFDM_RATE_54MB_MASK)
5494 rates->supported_rates[rates->num_rates++] =
5495 LIBIPW_OFDM_RATE_54MB;
5496 }
5497
5498 struct ipw_network_match {
5499 struct libipw_network *network;
5500 struct ipw_supported_rates rates;
5501 };
5502
ipw_find_adhoc_network(struct ipw_priv * priv,struct ipw_network_match * match,struct libipw_network * network,int roaming)5503 static int ipw_find_adhoc_network(struct ipw_priv *priv,
5504 struct ipw_network_match *match,
5505 struct libipw_network *network,
5506 int roaming)
5507 {
5508 struct ipw_supported_rates rates;
5509
5510 /* Verify that this network's capability is compatible with the
5511 * current mode (AdHoc or Infrastructure) */
5512 if ((priv->ieee->iw_mode == IW_MODE_ADHOC &&
5513 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5514 IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded due to capability mismatch.\n",
5515 network->ssid_len, network->ssid,
5516 network->bssid);
5517 return 0;
5518 }
5519
5520 if (unlikely(roaming)) {
5521 /* If we are roaming, then ensure check if this is a valid
5522 * network to try and roam to */
5523 if ((network->ssid_len != match->network->ssid_len) ||
5524 memcmp(network->ssid, match->network->ssid,
5525 network->ssid_len)) {
5526 IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of non-network ESSID.\n",
5527 network->ssid_len, network->ssid,
5528 network->bssid);
5529 return 0;
5530 }
5531 } else {
5532 /* If an ESSID has been configured then compare the broadcast
5533 * ESSID to ours */
5534 if ((priv->config & CFG_STATIC_ESSID) &&
5535 ((network->ssid_len != priv->essid_len) ||
5536 memcmp(network->ssid, priv->essid,
5537 min(network->ssid_len, priv->essid_len)))) {
5538 IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of ESSID mismatch: '%*pE'.\n",
5539 network->ssid_len, network->ssid,
5540 network->bssid, priv->essid_len,
5541 priv->essid);
5542 return 0;
5543 }
5544 }
5545
5546 /* If the old network rate is better than this one, don't bother
5547 * testing everything else. */
5548
5549 if (network->time_stamp[0] < match->network->time_stamp[0]) {
5550 IPW_DEBUG_MERGE("Network '%*pE excluded because newer than current network.\n",
5551 match->network->ssid_len, match->network->ssid);
5552 return 0;
5553 } else if (network->time_stamp[1] < match->network->time_stamp[1]) {
5554 IPW_DEBUG_MERGE("Network '%*pE excluded because newer than current network.\n",
5555 match->network->ssid_len, match->network->ssid);
5556 return 0;
5557 }
5558
5559 /* Now go through and see if the requested network is valid... */
5560 if (priv->ieee->scan_age != 0 &&
5561 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5562 IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of age: %ums.\n",
5563 network->ssid_len, network->ssid,
5564 network->bssid,
5565 jiffies_to_msecs(jiffies -
5566 network->last_scanned));
5567 return 0;
5568 }
5569
5570 if ((priv->config & CFG_STATIC_CHANNEL) &&
5571 (network->channel != priv->channel)) {
5572 IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of channel mismatch: %d != %d.\n",
5573 network->ssid_len, network->ssid,
5574 network->bssid,
5575 network->channel, priv->channel);
5576 return 0;
5577 }
5578
5579 /* Verify privacy compatibility */
5580 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5581 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5582 IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of privacy mismatch: %s != %s.\n",
5583 network->ssid_len, network->ssid,
5584 network->bssid,
5585 priv->
5586 capability & CAP_PRIVACY_ON ? "on" : "off",
5587 network->
5588 capability & WLAN_CAPABILITY_PRIVACY ? "on" :
5589 "off");
5590 return 0;
5591 }
5592
5593 if (ether_addr_equal(network->bssid, priv->bssid)) {
5594 IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of the same BSSID match: %pM.\n",
5595 network->ssid_len, network->ssid,
5596 network->bssid, priv->bssid);
5597 return 0;
5598 }
5599
5600 /* Filter out any incompatible freq / mode combinations */
5601 if (!libipw_is_valid_mode(priv->ieee, network->mode)) {
5602 IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of invalid frequency/mode combination.\n",
5603 network->ssid_len, network->ssid,
5604 network->bssid);
5605 return 0;
5606 }
5607
5608 /* Ensure that the rates supported by the driver are compatible with
5609 * this AP, including verification of basic rates (mandatory) */
5610 if (!ipw_compatible_rates(priv, network, &rates)) {
5611 IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because configured rate mask excludes AP mandatory rate.\n",
5612 network->ssid_len, network->ssid,
5613 network->bssid);
5614 return 0;
5615 }
5616
5617 if (rates.num_rates == 0) {
5618 IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of no compatible rates.\n",
5619 network->ssid_len, network->ssid,
5620 network->bssid);
5621 return 0;
5622 }
5623
5624 /* TODO: Perform any further minimal comparititive tests. We do not
5625 * want to put too much policy logic here; intelligent scan selection
5626 * should occur within a generic IEEE 802.11 user space tool. */
5627
5628 /* Set up 'new' AP to this network */
5629 ipw_copy_rates(&match->rates, &rates);
5630 match->network = network;
5631 IPW_DEBUG_MERGE("Network '%*pE (%pM)' is a viable match.\n",
5632 network->ssid_len, network->ssid, network->bssid);
5633
5634 return 1;
5635 }
5636
ipw_merge_adhoc_network(struct work_struct * work)5637 static void ipw_merge_adhoc_network(struct work_struct *work)
5638 {
5639 struct ipw_priv *priv =
5640 container_of(work, struct ipw_priv, merge_networks);
5641 struct libipw_network *network = NULL;
5642 struct ipw_network_match match = {
5643 .network = priv->assoc_network
5644 };
5645
5646 if ((priv->status & STATUS_ASSOCIATED) &&
5647 (priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5648 /* First pass through ROAM process -- look for a better
5649 * network */
5650 unsigned long flags;
5651
5652 spin_lock_irqsave(&priv->ieee->lock, flags);
5653 list_for_each_entry(network, &priv->ieee->network_list, list) {
5654 if (network != priv->assoc_network)
5655 ipw_find_adhoc_network(priv, &match, network,
5656 1);
5657 }
5658 spin_unlock_irqrestore(&priv->ieee->lock, flags);
5659
5660 if (match.network == priv->assoc_network) {
5661 IPW_DEBUG_MERGE("No better ADHOC in this network to "
5662 "merge to.\n");
5663 return;
5664 }
5665
5666 mutex_lock(&priv->mutex);
5667 if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5668 IPW_DEBUG_MERGE("remove network %*pE\n",
5669 priv->essid_len, priv->essid);
5670 ipw_remove_current_network(priv);
5671 }
5672
5673 ipw_disassociate(priv);
5674 priv->assoc_network = match.network;
5675 mutex_unlock(&priv->mutex);
5676 return;
5677 }
5678 }
5679
ipw_best_network(struct ipw_priv * priv,struct ipw_network_match * match,struct libipw_network * network,int roaming)5680 static int ipw_best_network(struct ipw_priv *priv,
5681 struct ipw_network_match *match,
5682 struct libipw_network *network, int roaming)
5683 {
5684 struct ipw_supported_rates rates;
5685
5686 /* Verify that this network's capability is compatible with the
5687 * current mode (AdHoc or Infrastructure) */
5688 if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
5689 !(network->capability & WLAN_CAPABILITY_ESS)) ||
5690 (priv->ieee->iw_mode == IW_MODE_ADHOC &&
5691 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5692 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded due to capability mismatch.\n",
5693 network->ssid_len, network->ssid,
5694 network->bssid);
5695 return 0;
5696 }
5697
5698 if (unlikely(roaming)) {
5699 /* If we are roaming, then ensure check if this is a valid
5700 * network to try and roam to */
5701 if ((network->ssid_len != match->network->ssid_len) ||
5702 memcmp(network->ssid, match->network->ssid,
5703 network->ssid_len)) {
5704 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of non-network ESSID.\n",
5705 network->ssid_len, network->ssid,
5706 network->bssid);
5707 return 0;
5708 }
5709 } else {
5710 /* If an ESSID has been configured then compare the broadcast
5711 * ESSID to ours */
5712 if ((priv->config & CFG_STATIC_ESSID) &&
5713 ((network->ssid_len != priv->essid_len) ||
5714 memcmp(network->ssid, priv->essid,
5715 min(network->ssid_len, priv->essid_len)))) {
5716 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of ESSID mismatch: '%*pE'.\n",
5717 network->ssid_len, network->ssid,
5718 network->bssid, priv->essid_len,
5719 priv->essid);
5720 return 0;
5721 }
5722 }
5723
5724 /* If the old network rate is better than this one, don't bother
5725 * testing everything else. */
5726 if (match->network && match->network->stats.rssi > network->stats.rssi) {
5727 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because '%*pE (%pM)' has a stronger signal.\n",
5728 network->ssid_len, network->ssid,
5729 network->bssid, match->network->ssid_len,
5730 match->network->ssid, match->network->bssid);
5731 return 0;
5732 }
5733
5734 /* If this network has already had an association attempt within the
5735 * last 3 seconds, do not try and associate again... */
5736 if (network->last_associate &&
5737 time_after(network->last_associate + (HZ * 3UL), jiffies)) {
5738 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of storming (%ums since last assoc attempt).\n",
5739 network->ssid_len, network->ssid,
5740 network->bssid,
5741 jiffies_to_msecs(jiffies -
5742 network->last_associate));
5743 return 0;
5744 }
5745
5746 /* Now go through and see if the requested network is valid... */
5747 if (priv->ieee->scan_age != 0 &&
5748 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5749 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of age: %ums.\n",
5750 network->ssid_len, network->ssid,
5751 network->bssid,
5752 jiffies_to_msecs(jiffies -
5753 network->last_scanned));
5754 return 0;
5755 }
5756
5757 if ((priv->config & CFG_STATIC_CHANNEL) &&
5758 (network->channel != priv->channel)) {
5759 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of channel mismatch: %d != %d.\n",
5760 network->ssid_len, network->ssid,
5761 network->bssid,
5762 network->channel, priv->channel);
5763 return 0;
5764 }
5765
5766 /* Verify privacy compatibility */
5767 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5768 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5769 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of privacy mismatch: %s != %s.\n",
5770 network->ssid_len, network->ssid,
5771 network->bssid,
5772 priv->capability & CAP_PRIVACY_ON ? "on" :
5773 "off",
5774 network->capability &
5775 WLAN_CAPABILITY_PRIVACY ? "on" : "off");
5776 return 0;
5777 }
5778
5779 if ((priv->config & CFG_STATIC_BSSID) &&
5780 !ether_addr_equal(network->bssid, priv->bssid)) {
5781 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of BSSID mismatch: %pM.\n",
5782 network->ssid_len, network->ssid,
5783 network->bssid, priv->bssid);
5784 return 0;
5785 }
5786
5787 /* Filter out any incompatible freq / mode combinations */
5788 if (!libipw_is_valid_mode(priv->ieee, network->mode)) {
5789 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of invalid frequency/mode combination.\n",
5790 network->ssid_len, network->ssid,
5791 network->bssid);
5792 return 0;
5793 }
5794
5795 /* Filter out invalid channel in current GEO */
5796 if (!libipw_is_valid_channel(priv->ieee, network->channel)) {
5797 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of invalid channel in current GEO\n",
5798 network->ssid_len, network->ssid,
5799 network->bssid);
5800 return 0;
5801 }
5802
5803 /* Ensure that the rates supported by the driver are compatible with
5804 * this AP, including verification of basic rates (mandatory) */
5805 if (!ipw_compatible_rates(priv, network, &rates)) {
5806 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because configured rate mask excludes AP mandatory rate.\n",
5807 network->ssid_len, network->ssid,
5808 network->bssid);
5809 return 0;
5810 }
5811
5812 if (rates.num_rates == 0) {
5813 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of no compatible rates.\n",
5814 network->ssid_len, network->ssid,
5815 network->bssid);
5816 return 0;
5817 }
5818
5819 /* TODO: Perform any further minimal comparititive tests. We do not
5820 * want to put too much policy logic here; intelligent scan selection
5821 * should occur within a generic IEEE 802.11 user space tool. */
5822
5823 /* Set up 'new' AP to this network */
5824 ipw_copy_rates(&match->rates, &rates);
5825 match->network = network;
5826
5827 IPW_DEBUG_ASSOC("Network '%*pE (%pM)' is a viable match.\n",
5828 network->ssid_len, network->ssid, network->bssid);
5829
5830 return 1;
5831 }
5832
ipw_adhoc_create(struct ipw_priv * priv,struct libipw_network * network)5833 static void ipw_adhoc_create(struct ipw_priv *priv,
5834 struct libipw_network *network)
5835 {
5836 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
5837 int i;
5838
5839 /*
5840 * For the purposes of scanning, we can set our wireless mode
5841 * to trigger scans across combinations of bands, but when it
5842 * comes to creating a new ad-hoc network, we have tell the FW
5843 * exactly which band to use.
5844 *
5845 * We also have the possibility of an invalid channel for the
5846 * chossen band. Attempting to create a new ad-hoc network
5847 * with an invalid channel for wireless mode will trigger a
5848 * FW fatal error.
5849 *
5850 */
5851 switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
5852 case LIBIPW_52GHZ_BAND:
5853 network->mode = IEEE_A;
5854 i = libipw_channel_to_index(priv->ieee, priv->channel);
5855 BUG_ON(i == -1);
5856 if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY) {
5857 IPW_WARNING("Overriding invalid channel\n");
5858 priv->channel = geo->a[0].channel;
5859 }
5860 break;
5861
5862 case LIBIPW_24GHZ_BAND:
5863 if (priv->ieee->mode & IEEE_G)
5864 network->mode = IEEE_G;
5865 else
5866 network->mode = IEEE_B;
5867 i = libipw_channel_to_index(priv->ieee, priv->channel);
5868 BUG_ON(i == -1);
5869 if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY) {
5870 IPW_WARNING("Overriding invalid channel\n");
5871 priv->channel = geo->bg[0].channel;
5872 }
5873 break;
5874
5875 default:
5876 IPW_WARNING("Overriding invalid channel\n");
5877 if (priv->ieee->mode & IEEE_A) {
5878 network->mode = IEEE_A;
5879 priv->channel = geo->a[0].channel;
5880 } else if (priv->ieee->mode & IEEE_G) {
5881 network->mode = IEEE_G;
5882 priv->channel = geo->bg[0].channel;
5883 } else {
5884 network->mode = IEEE_B;
5885 priv->channel = geo->bg[0].channel;
5886 }
5887 break;
5888 }
5889
5890 network->channel = priv->channel;
5891 priv->config |= CFG_ADHOC_PERSIST;
5892 ipw_create_bssid(priv, network->bssid);
5893 network->ssid_len = priv->essid_len;
5894 memcpy(network->ssid, priv->essid, priv->essid_len);
5895 memset(&network->stats, 0, sizeof(network->stats));
5896 network->capability = WLAN_CAPABILITY_IBSS;
5897 if (!(priv->config & CFG_PREAMBLE_LONG))
5898 network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
5899 if (priv->capability & CAP_PRIVACY_ON)
5900 network->capability |= WLAN_CAPABILITY_PRIVACY;
5901 network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
5902 memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
5903 network->rates_ex_len = priv->rates.num_rates - network->rates_len;
5904 memcpy(network->rates_ex,
5905 &priv->rates.supported_rates[network->rates_len],
5906 network->rates_ex_len);
5907 network->last_scanned = 0;
5908 network->flags = 0;
5909 network->last_associate = 0;
5910 network->time_stamp[0] = 0;
5911 network->time_stamp[1] = 0;
5912 network->beacon_interval = 100; /* Default */
5913 network->listen_interval = 10; /* Default */
5914 network->atim_window = 0; /* Default */
5915 network->wpa_ie_len = 0;
5916 network->rsn_ie_len = 0;
5917 }
5918
ipw_send_tgi_tx_key(struct ipw_priv * priv,int type,int index)5919 static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
5920 {
5921 struct ipw_tgi_tx_key key;
5922
5923 if (!(priv->ieee->sec.flags & (1 << index)))
5924 return;
5925
5926 key.key_id = index;
5927 memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
5928 key.security_type = type;
5929 key.station_index = 0; /* always 0 for BSS */
5930 key.flags = 0;
5931 /* 0 for new key; previous value of counter (after fatal error) */
5932 key.tx_counter[0] = cpu_to_le32(0);
5933 key.tx_counter[1] = cpu_to_le32(0);
5934
5935 ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
5936 }
5937
ipw_send_wep_keys(struct ipw_priv * priv,int type)5938 static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
5939 {
5940 struct ipw_wep_key key;
5941 int i;
5942
5943 key.cmd_id = DINO_CMD_WEP_KEY;
5944 key.seq_num = 0;
5945
5946 /* Note: AES keys cannot be set for multiple times.
5947 * Only set it at the first time. */
5948 for (i = 0; i < 4; i++) {
5949 key.key_index = i | type;
5950 if (!(priv->ieee->sec.flags & (1 << i))) {
5951 key.key_size = 0;
5952 continue;
5953 }
5954
5955 key.key_size = priv->ieee->sec.key_sizes[i];
5956 memcpy(key.key, priv->ieee->sec.keys[i], key.key_size);
5957
5958 ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key);
5959 }
5960 }
5961
ipw_set_hw_decrypt_unicast(struct ipw_priv * priv,int level)5962 static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level)
5963 {
5964 if (priv->ieee->host_encrypt)
5965 return;
5966
5967 switch (level) {
5968 case SEC_LEVEL_3:
5969 priv->sys_config.disable_unicast_decryption = 0;
5970 priv->ieee->host_decrypt = 0;
5971 break;
5972 case SEC_LEVEL_2:
5973 priv->sys_config.disable_unicast_decryption = 1;
5974 priv->ieee->host_decrypt = 1;
5975 break;
5976 case SEC_LEVEL_1:
5977 priv->sys_config.disable_unicast_decryption = 0;
5978 priv->ieee->host_decrypt = 0;
5979 break;
5980 case SEC_LEVEL_0:
5981 priv->sys_config.disable_unicast_decryption = 1;
5982 break;
5983 default:
5984 break;
5985 }
5986 }
5987
ipw_set_hw_decrypt_multicast(struct ipw_priv * priv,int level)5988 static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level)
5989 {
5990 if (priv->ieee->host_encrypt)
5991 return;
5992
5993 switch (level) {
5994 case SEC_LEVEL_3:
5995 priv->sys_config.disable_multicast_decryption = 0;
5996 break;
5997 case SEC_LEVEL_2:
5998 priv->sys_config.disable_multicast_decryption = 1;
5999 break;
6000 case SEC_LEVEL_1:
6001 priv->sys_config.disable_multicast_decryption = 0;
6002 break;
6003 case SEC_LEVEL_0:
6004 priv->sys_config.disable_multicast_decryption = 1;
6005 break;
6006 default:
6007 break;
6008 }
6009 }
6010
ipw_set_hwcrypto_keys(struct ipw_priv * priv)6011 static void ipw_set_hwcrypto_keys(struct ipw_priv *priv)
6012 {
6013 switch (priv->ieee->sec.level) {
6014 case SEC_LEVEL_3:
6015 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
6016 ipw_send_tgi_tx_key(priv,
6017 DCT_FLAG_EXT_SECURITY_CCM,
6018 priv->ieee->sec.active_key);
6019
6020 if (!priv->ieee->host_mc_decrypt)
6021 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM);
6022 break;
6023 case SEC_LEVEL_2:
6024 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
6025 ipw_send_tgi_tx_key(priv,
6026 DCT_FLAG_EXT_SECURITY_TKIP,
6027 priv->ieee->sec.active_key);
6028 break;
6029 case SEC_LEVEL_1:
6030 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
6031 ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level);
6032 ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level);
6033 break;
6034 case SEC_LEVEL_0:
6035 default:
6036 break;
6037 }
6038 }
6039
ipw_adhoc_check(void * data)6040 static void ipw_adhoc_check(void *data)
6041 {
6042 struct ipw_priv *priv = data;
6043
6044 if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold &&
6045 !(priv->config & CFG_ADHOC_PERSIST)) {
6046 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
6047 IPW_DL_STATE | IPW_DL_ASSOC,
6048 "Missed beacon: %d - disassociate\n",
6049 priv->missed_adhoc_beacons);
6050 ipw_remove_current_network(priv);
6051 ipw_disassociate(priv);
6052 return;
6053 }
6054
6055 schedule_delayed_work(&priv->adhoc_check,
6056 le16_to_cpu(priv->assoc_request.beacon_interval));
6057 }
6058
ipw_bg_adhoc_check(struct work_struct * work)6059 static void ipw_bg_adhoc_check(struct work_struct *work)
6060 {
6061 struct ipw_priv *priv =
6062 container_of(work, struct ipw_priv, adhoc_check.work);
6063 mutex_lock(&priv->mutex);
6064 ipw_adhoc_check(priv);
6065 mutex_unlock(&priv->mutex);
6066 }
6067
ipw_debug_config(struct ipw_priv * priv)6068 static void ipw_debug_config(struct ipw_priv *priv)
6069 {
6070 IPW_DEBUG_INFO("Scan completed, no valid APs matched "
6071 "[CFG 0x%08X]\n", priv->config);
6072 if (priv->config & CFG_STATIC_CHANNEL)
6073 IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
6074 else
6075 IPW_DEBUG_INFO("Channel unlocked.\n");
6076 if (priv->config & CFG_STATIC_ESSID)
6077 IPW_DEBUG_INFO("ESSID locked to '%*pE'\n",
6078 priv->essid_len, priv->essid);
6079 else
6080 IPW_DEBUG_INFO("ESSID unlocked.\n");
6081 if (priv->config & CFG_STATIC_BSSID)
6082 IPW_DEBUG_INFO("BSSID locked to %pM\n", priv->bssid);
6083 else
6084 IPW_DEBUG_INFO("BSSID unlocked.\n");
6085 if (priv->capability & CAP_PRIVACY_ON)
6086 IPW_DEBUG_INFO("PRIVACY on\n");
6087 else
6088 IPW_DEBUG_INFO("PRIVACY off\n");
6089 IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
6090 }
6091
ipw_set_fixed_rate(struct ipw_priv * priv,int mode)6092 static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
6093 {
6094 /* TODO: Verify that this works... */
6095 struct ipw_fixed_rate fr;
6096 u32 reg;
6097 u16 mask = 0;
6098 u16 new_tx_rates = priv->rates_mask;
6099
6100 /* Identify 'current FW band' and match it with the fixed
6101 * Tx rates */
6102
6103 switch (priv->ieee->freq_band) {
6104 case LIBIPW_52GHZ_BAND: /* A only */
6105 /* IEEE_A */
6106 if (priv->rates_mask & ~LIBIPW_OFDM_RATES_MASK) {
6107 /* Invalid fixed rate mask */
6108 IPW_DEBUG_WX
6109 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6110 new_tx_rates = 0;
6111 break;
6112 }
6113
6114 new_tx_rates >>= LIBIPW_OFDM_SHIFT_MASK_A;
6115 break;
6116
6117 default: /* 2.4Ghz or Mixed */
6118 /* IEEE_B */
6119 if (mode == IEEE_B) {
6120 if (new_tx_rates & ~LIBIPW_CCK_RATES_MASK) {
6121 /* Invalid fixed rate mask */
6122 IPW_DEBUG_WX
6123 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6124 new_tx_rates = 0;
6125 }
6126 break;
6127 }
6128
6129 /* IEEE_G */
6130 if (new_tx_rates & ~(LIBIPW_CCK_RATES_MASK |
6131 LIBIPW_OFDM_RATES_MASK)) {
6132 /* Invalid fixed rate mask */
6133 IPW_DEBUG_WX
6134 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6135 new_tx_rates = 0;
6136 break;
6137 }
6138
6139 if (LIBIPW_OFDM_RATE_6MB_MASK & new_tx_rates) {
6140 mask |= (LIBIPW_OFDM_RATE_6MB_MASK >> 1);
6141 new_tx_rates &= ~LIBIPW_OFDM_RATE_6MB_MASK;
6142 }
6143
6144 if (LIBIPW_OFDM_RATE_9MB_MASK & new_tx_rates) {
6145 mask |= (LIBIPW_OFDM_RATE_9MB_MASK >> 1);
6146 new_tx_rates &= ~LIBIPW_OFDM_RATE_9MB_MASK;
6147 }
6148
6149 if (LIBIPW_OFDM_RATE_12MB_MASK & new_tx_rates) {
6150 mask |= (LIBIPW_OFDM_RATE_12MB_MASK >> 1);
6151 new_tx_rates &= ~LIBIPW_OFDM_RATE_12MB_MASK;
6152 }
6153
6154 new_tx_rates |= mask;
6155 break;
6156 }
6157
6158 fr.tx_rates = cpu_to_le16(new_tx_rates);
6159
6160 reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
6161 ipw_write_reg32(priv, reg, *(u32 *) & fr);
6162 }
6163
ipw_abort_scan(struct ipw_priv * priv)6164 static void ipw_abort_scan(struct ipw_priv *priv)
6165 {
6166 int err;
6167
6168 if (priv->status & STATUS_SCAN_ABORTING) {
6169 IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
6170 return;
6171 }
6172 priv->status |= STATUS_SCAN_ABORTING;
6173
6174 err = ipw_send_scan_abort(priv);
6175 if (err)
6176 IPW_DEBUG_HC("Request to abort scan failed.\n");
6177 }
6178
ipw_add_scan_channels(struct ipw_priv * priv,struct ipw_scan_request_ext * scan,int scan_type)6179 static void ipw_add_scan_channels(struct ipw_priv *priv,
6180 struct ipw_scan_request_ext *scan,
6181 int scan_type)
6182 {
6183 int channel_index = 0;
6184 const struct libipw_geo *geo;
6185 int i;
6186
6187 geo = libipw_get_geo(priv->ieee);
6188
6189 if (priv->ieee->freq_band & LIBIPW_52GHZ_BAND) {
6190 int start = channel_index;
6191 for (i = 0; i < geo->a_channels; i++) {
6192 if ((priv->status & STATUS_ASSOCIATED) &&
6193 geo->a[i].channel == priv->channel)
6194 continue;
6195 channel_index++;
6196 scan->channels_list[channel_index] = geo->a[i].channel;
6197 ipw_set_scan_type(scan, channel_index,
6198 geo->a[i].
6199 flags & LIBIPW_CH_PASSIVE_ONLY ?
6200 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN :
6201 scan_type);
6202 }
6203
6204 if (start != channel_index) {
6205 scan->channels_list[start] = (u8) (IPW_A_MODE << 6) |
6206 (channel_index - start);
6207 channel_index++;
6208 }
6209 }
6210
6211 if (priv->ieee->freq_band & LIBIPW_24GHZ_BAND) {
6212 int start = channel_index;
6213 if (priv->config & CFG_SPEED_SCAN) {
6214 int index;
6215 u8 channels[LIBIPW_24GHZ_CHANNELS] = {
6216 /* nop out the list */
6217 [0] = 0
6218 };
6219
6220 u8 channel;
6221 while (channel_index < IPW_SCAN_CHANNELS - 1) {
6222 channel =
6223 priv->speed_scan[priv->speed_scan_pos];
6224 if (channel == 0) {
6225 priv->speed_scan_pos = 0;
6226 channel = priv->speed_scan[0];
6227 }
6228 if ((priv->status & STATUS_ASSOCIATED) &&
6229 channel == priv->channel) {
6230 priv->speed_scan_pos++;
6231 continue;
6232 }
6233
6234 /* If this channel has already been
6235 * added in scan, break from loop
6236 * and this will be the first channel
6237 * in the next scan.
6238 */
6239 if (channels[channel - 1] != 0)
6240 break;
6241
6242 channels[channel - 1] = 1;
6243 priv->speed_scan_pos++;
6244 channel_index++;
6245 scan->channels_list[channel_index] = channel;
6246 index =
6247 libipw_channel_to_index(priv->ieee, channel);
6248 ipw_set_scan_type(scan, channel_index,
6249 geo->bg[index].
6250 flags &
6251 LIBIPW_CH_PASSIVE_ONLY ?
6252 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6253 : scan_type);
6254 }
6255 } else {
6256 for (i = 0; i < geo->bg_channels; i++) {
6257 if ((priv->status & STATUS_ASSOCIATED) &&
6258 geo->bg[i].channel == priv->channel)
6259 continue;
6260 channel_index++;
6261 scan->channels_list[channel_index] =
6262 geo->bg[i].channel;
6263 ipw_set_scan_type(scan, channel_index,
6264 geo->bg[i].
6265 flags &
6266 LIBIPW_CH_PASSIVE_ONLY ?
6267 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6268 : scan_type);
6269 }
6270 }
6271
6272 if (start != channel_index) {
6273 scan->channels_list[start] = (u8) (IPW_B_MODE << 6) |
6274 (channel_index - start);
6275 }
6276 }
6277 }
6278
ipw_passive_dwell_time(struct ipw_priv * priv)6279 static int ipw_passive_dwell_time(struct ipw_priv *priv)
6280 {
6281 /* staying on passive channels longer than the DTIM interval during a
6282 * scan, while associated, causes the firmware to cancel the scan
6283 * without notification. Hence, don't stay on passive channels longer
6284 * than the beacon interval.
6285 */
6286 if (priv->status & STATUS_ASSOCIATED
6287 && priv->assoc_network->beacon_interval > 10)
6288 return priv->assoc_network->beacon_interval - 10;
6289 else
6290 return 120;
6291 }
6292
ipw_request_scan_helper(struct ipw_priv * priv,int type,int direct)6293 static int ipw_request_scan_helper(struct ipw_priv *priv, int type, int direct)
6294 {
6295 struct ipw_scan_request_ext scan;
6296 int err = 0, scan_type;
6297
6298 if (!(priv->status & STATUS_INIT) ||
6299 (priv->status & STATUS_EXIT_PENDING))
6300 return 0;
6301
6302 mutex_lock(&priv->mutex);
6303
6304 if (direct && (priv->direct_scan_ssid_len == 0)) {
6305 IPW_DEBUG_HC("Direct scan requested but no SSID to scan for\n");
6306 priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6307 goto done;
6308 }
6309
6310 if (priv->status & STATUS_SCANNING) {
6311 IPW_DEBUG_HC("Concurrent scan requested. Queuing.\n");
6312 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6313 STATUS_SCAN_PENDING;
6314 goto done;
6315 }
6316
6317 if (!(priv->status & STATUS_SCAN_FORCED) &&
6318 priv->status & STATUS_SCAN_ABORTING) {
6319 IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n");
6320 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6321 STATUS_SCAN_PENDING;
6322 goto done;
6323 }
6324
6325 if (priv->status & STATUS_RF_KILL_MASK) {
6326 IPW_DEBUG_HC("Queuing scan due to RF Kill activation\n");
6327 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6328 STATUS_SCAN_PENDING;
6329 goto done;
6330 }
6331
6332 memset(&scan, 0, sizeof(scan));
6333 scan.full_scan_index = cpu_to_le32(libipw_get_scans(priv->ieee));
6334
6335 if (type == IW_SCAN_TYPE_PASSIVE) {
6336 IPW_DEBUG_WX("use passive scanning\n");
6337 scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN;
6338 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6339 cpu_to_le16(ipw_passive_dwell_time(priv));
6340 ipw_add_scan_channels(priv, &scan, scan_type);
6341 goto send_request;
6342 }
6343
6344 /* Use active scan by default. */
6345 if (priv->config & CFG_SPEED_SCAN)
6346 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6347 cpu_to_le16(30);
6348 else
6349 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6350 cpu_to_le16(20);
6351
6352 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
6353 cpu_to_le16(20);
6354
6355 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6356 cpu_to_le16(ipw_passive_dwell_time(priv));
6357 scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
6358
6359 #ifdef CONFIG_IPW2200_MONITOR
6360 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
6361 u8 channel;
6362 u8 band = 0;
6363
6364 switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
6365 case LIBIPW_52GHZ_BAND:
6366 band = (u8) (IPW_A_MODE << 6) | 1;
6367 channel = priv->channel;
6368 break;
6369
6370 case LIBIPW_24GHZ_BAND:
6371 band = (u8) (IPW_B_MODE << 6) | 1;
6372 channel = priv->channel;
6373 break;
6374
6375 default:
6376 band = (u8) (IPW_B_MODE << 6) | 1;
6377 channel = 9;
6378 break;
6379 }
6380
6381 scan.channels_list[0] = band;
6382 scan.channels_list[1] = channel;
6383 ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN);
6384
6385 /* NOTE: The card will sit on this channel for this time
6386 * period. Scan aborts are timing sensitive and frequently
6387 * result in firmware restarts. As such, it is best to
6388 * set a small dwell_time here and just keep re-issuing
6389 * scans. Otherwise fast channel hopping will not actually
6390 * hop channels.
6391 *
6392 * TODO: Move SPEED SCAN support to all modes and bands */
6393 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6394 cpu_to_le16(2000);
6395 } else {
6396 #endif /* CONFIG_IPW2200_MONITOR */
6397 /* Honor direct scans first, otherwise if we are roaming make
6398 * this a direct scan for the current network. Finally,
6399 * ensure that every other scan is a fast channel hop scan */
6400 if (direct) {
6401 err = ipw_send_ssid(priv, priv->direct_scan_ssid,
6402 priv->direct_scan_ssid_len);
6403 if (err) {
6404 IPW_DEBUG_HC("Attempt to send SSID command "
6405 "failed\n");
6406 goto done;
6407 }
6408
6409 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6410 } else if ((priv->status & STATUS_ROAMING)
6411 || (!(priv->status & STATUS_ASSOCIATED)
6412 && (priv->config & CFG_STATIC_ESSID)
6413 && (le32_to_cpu(scan.full_scan_index) % 2))) {
6414 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
6415 if (err) {
6416 IPW_DEBUG_HC("Attempt to send SSID command "
6417 "failed.\n");
6418 goto done;
6419 }
6420
6421 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6422 } else
6423 scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
6424
6425 ipw_add_scan_channels(priv, &scan, scan_type);
6426 #ifdef CONFIG_IPW2200_MONITOR
6427 }
6428 #endif
6429
6430 send_request:
6431 err = ipw_send_scan_request_ext(priv, &scan);
6432 if (err) {
6433 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
6434 goto done;
6435 }
6436
6437 priv->status |= STATUS_SCANNING;
6438 if (direct) {
6439 priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6440 priv->direct_scan_ssid_len = 0;
6441 } else
6442 priv->status &= ~STATUS_SCAN_PENDING;
6443
6444 schedule_delayed_work(&priv->scan_check, IPW_SCAN_CHECK_WATCHDOG);
6445 done:
6446 mutex_unlock(&priv->mutex);
6447 return err;
6448 }
6449
ipw_request_passive_scan(struct work_struct * work)6450 static void ipw_request_passive_scan(struct work_struct *work)
6451 {
6452 struct ipw_priv *priv =
6453 container_of(work, struct ipw_priv, request_passive_scan.work);
6454 ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE, 0);
6455 }
6456
ipw_request_scan(struct work_struct * work)6457 static void ipw_request_scan(struct work_struct *work)
6458 {
6459 struct ipw_priv *priv =
6460 container_of(work, struct ipw_priv, request_scan.work);
6461 ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 0);
6462 }
6463
ipw_request_direct_scan(struct work_struct * work)6464 static void ipw_request_direct_scan(struct work_struct *work)
6465 {
6466 struct ipw_priv *priv =
6467 container_of(work, struct ipw_priv, request_direct_scan.work);
6468 ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 1);
6469 }
6470
ipw_bg_abort_scan(struct work_struct * work)6471 static void ipw_bg_abort_scan(struct work_struct *work)
6472 {
6473 struct ipw_priv *priv =
6474 container_of(work, struct ipw_priv, abort_scan);
6475 mutex_lock(&priv->mutex);
6476 ipw_abort_scan(priv);
6477 mutex_unlock(&priv->mutex);
6478 }
6479
ipw_wpa_enable(struct ipw_priv * priv,int value)6480 static int ipw_wpa_enable(struct ipw_priv *priv, int value)
6481 {
6482 /* This is called when wpa_supplicant loads and closes the driver
6483 * interface. */
6484 priv->ieee->wpa_enabled = value;
6485 return 0;
6486 }
6487
ipw_wpa_set_auth_algs(struct ipw_priv * priv,int value)6488 static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
6489 {
6490 struct libipw_device *ieee = priv->ieee;
6491 struct libipw_security sec = {
6492 .flags = SEC_AUTH_MODE,
6493 };
6494 int ret = 0;
6495
6496 if (value & IW_AUTH_ALG_SHARED_KEY) {
6497 sec.auth_mode = WLAN_AUTH_SHARED_KEY;
6498 ieee->open_wep = 0;
6499 } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
6500 sec.auth_mode = WLAN_AUTH_OPEN;
6501 ieee->open_wep = 1;
6502 } else if (value & IW_AUTH_ALG_LEAP) {
6503 sec.auth_mode = WLAN_AUTH_LEAP;
6504 ieee->open_wep = 1;
6505 } else
6506 return -EINVAL;
6507
6508 if (ieee->set_security)
6509 ieee->set_security(ieee->dev, &sec);
6510 else
6511 ret = -EOPNOTSUPP;
6512
6513 return ret;
6514 }
6515
ipw_wpa_assoc_frame(struct ipw_priv * priv,char * wpa_ie,int wpa_ie_len)6516 static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie,
6517 int wpa_ie_len)
6518 {
6519 /* make sure WPA is enabled */
6520 ipw_wpa_enable(priv, 1);
6521 }
6522
ipw_set_rsn_capa(struct ipw_priv * priv,char * capabilities,int length)6523 static int ipw_set_rsn_capa(struct ipw_priv *priv,
6524 char *capabilities, int length)
6525 {
6526 IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
6527
6528 return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length,
6529 capabilities);
6530 }
6531
6532 /*
6533 * WE-18 support
6534 */
6535
6536 /* SIOCSIWGENIE */
ipw_wx_set_genie(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)6537 static int ipw_wx_set_genie(struct net_device *dev,
6538 struct iw_request_info *info,
6539 union iwreq_data *wrqu, char *extra)
6540 {
6541 struct ipw_priv *priv = libipw_priv(dev);
6542 struct libipw_device *ieee = priv->ieee;
6543 u8 *buf;
6544 int err = 0;
6545
6546 if (wrqu->data.length > MAX_WPA_IE_LEN ||
6547 (wrqu->data.length && extra == NULL))
6548 return -EINVAL;
6549
6550 if (wrqu->data.length) {
6551 buf = kmemdup(extra, wrqu->data.length, GFP_KERNEL);
6552 if (buf == NULL) {
6553 err = -ENOMEM;
6554 goto out;
6555 }
6556
6557 kfree(ieee->wpa_ie);
6558 ieee->wpa_ie = buf;
6559 ieee->wpa_ie_len = wrqu->data.length;
6560 } else {
6561 kfree(ieee->wpa_ie);
6562 ieee->wpa_ie = NULL;
6563 ieee->wpa_ie_len = 0;
6564 }
6565
6566 ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
6567 out:
6568 return err;
6569 }
6570
6571 /* SIOCGIWGENIE */
ipw_wx_get_genie(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)6572 static int ipw_wx_get_genie(struct net_device *dev,
6573 struct iw_request_info *info,
6574 union iwreq_data *wrqu, char *extra)
6575 {
6576 struct ipw_priv *priv = libipw_priv(dev);
6577 struct libipw_device *ieee = priv->ieee;
6578 int err = 0;
6579
6580 if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
6581 wrqu->data.length = 0;
6582 goto out;
6583 }
6584
6585 if (wrqu->data.length < ieee->wpa_ie_len) {
6586 err = -E2BIG;
6587 goto out;
6588 }
6589
6590 wrqu->data.length = ieee->wpa_ie_len;
6591 memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
6592
6593 out:
6594 return err;
6595 }
6596
wext_cipher2level(int cipher)6597 static int wext_cipher2level(int cipher)
6598 {
6599 switch (cipher) {
6600 case IW_AUTH_CIPHER_NONE:
6601 return SEC_LEVEL_0;
6602 case IW_AUTH_CIPHER_WEP40:
6603 case IW_AUTH_CIPHER_WEP104:
6604 return SEC_LEVEL_1;
6605 case IW_AUTH_CIPHER_TKIP:
6606 return SEC_LEVEL_2;
6607 case IW_AUTH_CIPHER_CCMP:
6608 return SEC_LEVEL_3;
6609 default:
6610 return -1;
6611 }
6612 }
6613
6614 /* SIOCSIWAUTH */
ipw_wx_set_auth(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)6615 static int ipw_wx_set_auth(struct net_device *dev,
6616 struct iw_request_info *info,
6617 union iwreq_data *wrqu, char *extra)
6618 {
6619 struct ipw_priv *priv = libipw_priv(dev);
6620 struct libipw_device *ieee = priv->ieee;
6621 struct iw_param *param = &wrqu->param;
6622 struct lib80211_crypt_data *crypt;
6623 unsigned long flags;
6624 int ret = 0;
6625
6626 switch (param->flags & IW_AUTH_INDEX) {
6627 case IW_AUTH_WPA_VERSION:
6628 break;
6629 case IW_AUTH_CIPHER_PAIRWISE:
6630 ipw_set_hw_decrypt_unicast(priv,
6631 wext_cipher2level(param->value));
6632 break;
6633 case IW_AUTH_CIPHER_GROUP:
6634 ipw_set_hw_decrypt_multicast(priv,
6635 wext_cipher2level(param->value));
6636 break;
6637 case IW_AUTH_KEY_MGMT:
6638 /*
6639 * ipw2200 does not use these parameters
6640 */
6641 break;
6642
6643 case IW_AUTH_TKIP_COUNTERMEASURES:
6644 crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6645 if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
6646 break;
6647
6648 flags = crypt->ops->get_flags(crypt->priv);
6649
6650 if (param->value)
6651 flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6652 else
6653 flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6654
6655 crypt->ops->set_flags(flags, crypt->priv);
6656
6657 break;
6658
6659 case IW_AUTH_DROP_UNENCRYPTED:{
6660 /* HACK:
6661 *
6662 * wpa_supplicant calls set_wpa_enabled when the driver
6663 * is loaded and unloaded, regardless of if WPA is being
6664 * used. No other calls are made which can be used to
6665 * determine if encryption will be used or not prior to
6666 * association being expected. If encryption is not being
6667 * used, drop_unencrypted is set to false, else true -- we
6668 * can use this to determine if the CAP_PRIVACY_ON bit should
6669 * be set.
6670 */
6671 struct libipw_security sec = {
6672 .flags = SEC_ENABLED,
6673 .enabled = param->value,
6674 };
6675 priv->ieee->drop_unencrypted = param->value;
6676 /* We only change SEC_LEVEL for open mode. Others
6677 * are set by ipw_wpa_set_encryption.
6678 */
6679 if (!param->value) {
6680 sec.flags |= SEC_LEVEL;
6681 sec.level = SEC_LEVEL_0;
6682 } else {
6683 sec.flags |= SEC_LEVEL;
6684 sec.level = SEC_LEVEL_1;
6685 }
6686 if (priv->ieee->set_security)
6687 priv->ieee->set_security(priv->ieee->dev, &sec);
6688 break;
6689 }
6690
6691 case IW_AUTH_80211_AUTH_ALG:
6692 ret = ipw_wpa_set_auth_algs(priv, param->value);
6693 break;
6694
6695 case IW_AUTH_WPA_ENABLED:
6696 ret = ipw_wpa_enable(priv, param->value);
6697 ipw_disassociate(priv);
6698 break;
6699
6700 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6701 ieee->ieee802_1x = param->value;
6702 break;
6703
6704 case IW_AUTH_PRIVACY_INVOKED:
6705 ieee->privacy_invoked = param->value;
6706 break;
6707
6708 default:
6709 return -EOPNOTSUPP;
6710 }
6711 return ret;
6712 }
6713
6714 /* SIOCGIWAUTH */
ipw_wx_get_auth(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)6715 static int ipw_wx_get_auth(struct net_device *dev,
6716 struct iw_request_info *info,
6717 union iwreq_data *wrqu, char *extra)
6718 {
6719 struct ipw_priv *priv = libipw_priv(dev);
6720 struct libipw_device *ieee = priv->ieee;
6721 struct lib80211_crypt_data *crypt;
6722 struct iw_param *param = &wrqu->param;
6723
6724 switch (param->flags & IW_AUTH_INDEX) {
6725 case IW_AUTH_WPA_VERSION:
6726 case IW_AUTH_CIPHER_PAIRWISE:
6727 case IW_AUTH_CIPHER_GROUP:
6728 case IW_AUTH_KEY_MGMT:
6729 /*
6730 * wpa_supplicant will control these internally
6731 */
6732 return -EOPNOTSUPP;
6733
6734 case IW_AUTH_TKIP_COUNTERMEASURES:
6735 crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6736 if (!crypt || !crypt->ops->get_flags)
6737 break;
6738
6739 param->value = (crypt->ops->get_flags(crypt->priv) &
6740 IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0;
6741
6742 break;
6743
6744 case IW_AUTH_DROP_UNENCRYPTED:
6745 param->value = ieee->drop_unencrypted;
6746 break;
6747
6748 case IW_AUTH_80211_AUTH_ALG:
6749 param->value = ieee->sec.auth_mode;
6750 break;
6751
6752 case IW_AUTH_WPA_ENABLED:
6753 param->value = ieee->wpa_enabled;
6754 break;
6755
6756 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6757 param->value = ieee->ieee802_1x;
6758 break;
6759
6760 case IW_AUTH_ROAMING_CONTROL:
6761 case IW_AUTH_PRIVACY_INVOKED:
6762 param->value = ieee->privacy_invoked;
6763 break;
6764
6765 default:
6766 return -EOPNOTSUPP;
6767 }
6768 return 0;
6769 }
6770
6771 /* SIOCSIWENCODEEXT */
ipw_wx_set_encodeext(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)6772 static int ipw_wx_set_encodeext(struct net_device *dev,
6773 struct iw_request_info *info,
6774 union iwreq_data *wrqu, char *extra)
6775 {
6776 struct ipw_priv *priv = libipw_priv(dev);
6777 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6778
6779 if (hwcrypto) {
6780 if (ext->alg == IW_ENCODE_ALG_TKIP) {
6781 /* IPW HW can't build TKIP MIC,
6782 host decryption still needed */
6783 if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
6784 priv->ieee->host_mc_decrypt = 1;
6785 else {
6786 priv->ieee->host_encrypt = 0;
6787 priv->ieee->host_encrypt_msdu = 1;
6788 priv->ieee->host_decrypt = 1;
6789 }
6790 } else {
6791 priv->ieee->host_encrypt = 0;
6792 priv->ieee->host_encrypt_msdu = 0;
6793 priv->ieee->host_decrypt = 0;
6794 priv->ieee->host_mc_decrypt = 0;
6795 }
6796 }
6797
6798 return libipw_wx_set_encodeext(priv->ieee, info, wrqu, extra);
6799 }
6800
6801 /* SIOCGIWENCODEEXT */
ipw_wx_get_encodeext(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)6802 static int ipw_wx_get_encodeext(struct net_device *dev,
6803 struct iw_request_info *info,
6804 union iwreq_data *wrqu, char *extra)
6805 {
6806 struct ipw_priv *priv = libipw_priv(dev);
6807 return libipw_wx_get_encodeext(priv->ieee, info, wrqu, extra);
6808 }
6809
6810 /* SIOCSIWMLME */
ipw_wx_set_mlme(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)6811 static int ipw_wx_set_mlme(struct net_device *dev,
6812 struct iw_request_info *info,
6813 union iwreq_data *wrqu, char *extra)
6814 {
6815 struct ipw_priv *priv = libipw_priv(dev);
6816 struct iw_mlme *mlme = (struct iw_mlme *)extra;
6817 __le16 reason;
6818
6819 reason = cpu_to_le16(mlme->reason_code);
6820
6821 switch (mlme->cmd) {
6822 case IW_MLME_DEAUTH:
6823 /* silently ignore */
6824 break;
6825
6826 case IW_MLME_DISASSOC:
6827 ipw_disassociate(priv);
6828 break;
6829
6830 default:
6831 return -EOPNOTSUPP;
6832 }
6833 return 0;
6834 }
6835
6836 #ifdef CONFIG_IPW2200_QOS
6837
6838 /* QoS */
6839 /*
6840 * get the modulation type of the current network or
6841 * the card current mode
6842 */
ipw_qos_current_mode(struct ipw_priv * priv)6843 static u8 ipw_qos_current_mode(struct ipw_priv * priv)
6844 {
6845 u8 mode = 0;
6846
6847 if (priv->status & STATUS_ASSOCIATED) {
6848 unsigned long flags;
6849
6850 spin_lock_irqsave(&priv->ieee->lock, flags);
6851 mode = priv->assoc_network->mode;
6852 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6853 } else {
6854 mode = priv->ieee->mode;
6855 }
6856 IPW_DEBUG_QOS("QoS network/card mode %d\n", mode);
6857 return mode;
6858 }
6859
6860 /*
6861 * Handle management frame beacon and probe response
6862 */
ipw_qos_handle_probe_response(struct ipw_priv * priv,int active_network,struct libipw_network * network)6863 static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
6864 int active_network,
6865 struct libipw_network *network)
6866 {
6867 u32 size = sizeof(struct libipw_qos_parameters);
6868
6869 if (network->capability & WLAN_CAPABILITY_IBSS)
6870 network->qos_data.active = network->qos_data.supported;
6871
6872 if (network->flags & NETWORK_HAS_QOS_MASK) {
6873 if (active_network &&
6874 (network->flags & NETWORK_HAS_QOS_PARAMETERS))
6875 network->qos_data.active = network->qos_data.supported;
6876
6877 if ((network->qos_data.active == 1) && (active_network == 1) &&
6878 (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
6879 (network->qos_data.old_param_count !=
6880 network->qos_data.param_count)) {
6881 network->qos_data.old_param_count =
6882 network->qos_data.param_count;
6883 schedule_work(&priv->qos_activate);
6884 IPW_DEBUG_QOS("QoS parameters change call "
6885 "qos_activate\n");
6886 }
6887 } else {
6888 if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B))
6889 memcpy(&network->qos_data.parameters,
6890 &def_parameters_CCK, size);
6891 else
6892 memcpy(&network->qos_data.parameters,
6893 &def_parameters_OFDM, size);
6894
6895 if ((network->qos_data.active == 1) && (active_network == 1)) {
6896 IPW_DEBUG_QOS("QoS was disabled call qos_activate\n");
6897 schedule_work(&priv->qos_activate);
6898 }
6899
6900 network->qos_data.active = 0;
6901 network->qos_data.supported = 0;
6902 }
6903 if ((priv->status & STATUS_ASSOCIATED) &&
6904 (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
6905 if (!ether_addr_equal(network->bssid, priv->bssid))
6906 if (network->capability & WLAN_CAPABILITY_IBSS)
6907 if ((network->ssid_len ==
6908 priv->assoc_network->ssid_len) &&
6909 !memcmp(network->ssid,
6910 priv->assoc_network->ssid,
6911 network->ssid_len)) {
6912 schedule_work(&priv->merge_networks);
6913 }
6914 }
6915
6916 return 0;
6917 }
6918
6919 /*
6920 * This function set up the firmware to support QoS. It sends
6921 * IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO
6922 */
ipw_qos_activate(struct ipw_priv * priv,struct libipw_qos_data * qos_network_data)6923 static int ipw_qos_activate(struct ipw_priv *priv,
6924 struct libipw_qos_data *qos_network_data)
6925 {
6926 int err;
6927 struct libipw_qos_parameters qos_parameters[QOS_QOS_SETS];
6928 struct libipw_qos_parameters *active_one = NULL;
6929 u32 size = sizeof(struct libipw_qos_parameters);
6930 u32 burst_duration;
6931 int i;
6932 u8 type;
6933
6934 type = ipw_qos_current_mode(priv);
6935
6936 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]);
6937 memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size);
6938 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]);
6939 memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size);
6940
6941 if (qos_network_data == NULL) {
6942 if (type == IEEE_B) {
6943 IPW_DEBUG_QOS("QoS activate network mode %d\n", type);
6944 active_one = &def_parameters_CCK;
6945 } else
6946 active_one = &def_parameters_OFDM;
6947
6948 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6949 burst_duration = ipw_qos_get_burst_duration(priv);
6950 for (i = 0; i < QOS_QUEUE_NUM; i++)
6951 qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] =
6952 cpu_to_le16(burst_duration);
6953 } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
6954 if (type == IEEE_B) {
6955 IPW_DEBUG_QOS("QoS activate IBSS network mode %d\n",
6956 type);
6957 if (priv->qos_data.qos_enable == 0)
6958 active_one = &def_parameters_CCK;
6959 else
6960 active_one = priv->qos_data.def_qos_parm_CCK;
6961 } else {
6962 if (priv->qos_data.qos_enable == 0)
6963 active_one = &def_parameters_OFDM;
6964 else
6965 active_one = priv->qos_data.def_qos_parm_OFDM;
6966 }
6967 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6968 } else {
6969 unsigned long flags;
6970 int active;
6971
6972 spin_lock_irqsave(&priv->ieee->lock, flags);
6973 active_one = &(qos_network_data->parameters);
6974 qos_network_data->old_param_count =
6975 qos_network_data->param_count;
6976 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6977 active = qos_network_data->supported;
6978 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6979
6980 if (active == 0) {
6981 burst_duration = ipw_qos_get_burst_duration(priv);
6982 for (i = 0; i < QOS_QUEUE_NUM; i++)
6983 qos_parameters[QOS_PARAM_SET_ACTIVE].
6984 tx_op_limit[i] = cpu_to_le16(burst_duration);
6985 }
6986 }
6987
6988 IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
6989 err = ipw_send_qos_params_command(priv, &qos_parameters[0]);
6990 if (err)
6991 IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
6992
6993 return err;
6994 }
6995
6996 /*
6997 * send IPW_CMD_WME_INFO to the firmware
6998 */
ipw_qos_set_info_element(struct ipw_priv * priv)6999 static int ipw_qos_set_info_element(struct ipw_priv *priv)
7000 {
7001 int ret = 0;
7002 struct libipw_qos_information_element qos_info;
7003
7004 if (priv == NULL)
7005 return -1;
7006
7007 qos_info.elementID = QOS_ELEMENT_ID;
7008 qos_info.length = sizeof(struct libipw_qos_information_element) - 2;
7009
7010 qos_info.version = QOS_VERSION_1;
7011 qos_info.ac_info = 0;
7012
7013 memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN);
7014 qos_info.qui_type = QOS_OUI_TYPE;
7015 qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE;
7016
7017 ret = ipw_send_qos_info_command(priv, &qos_info);
7018 if (ret != 0) {
7019 IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n");
7020 }
7021 return ret;
7022 }
7023
7024 /*
7025 * Set the QoS parameter with the association request structure
7026 */
ipw_qos_association(struct ipw_priv * priv,struct libipw_network * network)7027 static int ipw_qos_association(struct ipw_priv *priv,
7028 struct libipw_network *network)
7029 {
7030 int err = 0;
7031 struct libipw_qos_data *qos_data = NULL;
7032 struct libipw_qos_data ibss_data = {
7033 .supported = 1,
7034 .active = 1,
7035 };
7036
7037 switch (priv->ieee->iw_mode) {
7038 case IW_MODE_ADHOC:
7039 BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS));
7040
7041 qos_data = &ibss_data;
7042 break;
7043
7044 case IW_MODE_INFRA:
7045 qos_data = &network->qos_data;
7046 break;
7047
7048 default:
7049 BUG();
7050 break;
7051 }
7052
7053 err = ipw_qos_activate(priv, qos_data);
7054 if (err) {
7055 priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC;
7056 return err;
7057 }
7058
7059 if (priv->qos_data.qos_enable && qos_data->supported) {
7060 IPW_DEBUG_QOS("QoS will be enabled for this association\n");
7061 priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC;
7062 return ipw_qos_set_info_element(priv);
7063 }
7064
7065 return 0;
7066 }
7067
7068 /*
7069 * handling the beaconing responses. if we get different QoS setting
7070 * off the network from the associated setting, adjust the QoS
7071 * setting
7072 */
ipw_qos_association_resp(struct ipw_priv * priv,struct libipw_network * network)7073 static int ipw_qos_association_resp(struct ipw_priv *priv,
7074 struct libipw_network *network)
7075 {
7076 int ret = 0;
7077 unsigned long flags;
7078 u32 size = sizeof(struct libipw_qos_parameters);
7079 int set_qos_param = 0;
7080
7081 if ((priv == NULL) || (network == NULL) ||
7082 (priv->assoc_network == NULL))
7083 return ret;
7084
7085 if (!(priv->status & STATUS_ASSOCIATED))
7086 return ret;
7087
7088 if ((priv->ieee->iw_mode != IW_MODE_INFRA))
7089 return ret;
7090
7091 spin_lock_irqsave(&priv->ieee->lock, flags);
7092 if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
7093 memcpy(&priv->assoc_network->qos_data, &network->qos_data,
7094 sizeof(struct libipw_qos_data));
7095 priv->assoc_network->qos_data.active = 1;
7096 if ((network->qos_data.old_param_count !=
7097 network->qos_data.param_count)) {
7098 set_qos_param = 1;
7099 network->qos_data.old_param_count =
7100 network->qos_data.param_count;
7101 }
7102
7103 } else {
7104 if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B))
7105 memcpy(&priv->assoc_network->qos_data.parameters,
7106 &def_parameters_CCK, size);
7107 else
7108 memcpy(&priv->assoc_network->qos_data.parameters,
7109 &def_parameters_OFDM, size);
7110 priv->assoc_network->qos_data.active = 0;
7111 priv->assoc_network->qos_data.supported = 0;
7112 set_qos_param = 1;
7113 }
7114
7115 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7116
7117 if (set_qos_param == 1)
7118 schedule_work(&priv->qos_activate);
7119
7120 return ret;
7121 }
7122
ipw_qos_get_burst_duration(struct ipw_priv * priv)7123 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
7124 {
7125 u32 ret = 0;
7126
7127 if ((priv == NULL))
7128 return 0;
7129
7130 if (!(priv->ieee->modulation & LIBIPW_OFDM_MODULATION))
7131 ret = priv->qos_data.burst_duration_CCK;
7132 else
7133 ret = priv->qos_data.burst_duration_OFDM;
7134
7135 return ret;
7136 }
7137
7138 /*
7139 * Initialize the setting of QoS global
7140 */
ipw_qos_init(struct ipw_priv * priv,int enable,int burst_enable,u32 burst_duration_CCK,u32 burst_duration_OFDM)7141 static void ipw_qos_init(struct ipw_priv *priv, int enable,
7142 int burst_enable, u32 burst_duration_CCK,
7143 u32 burst_duration_OFDM)
7144 {
7145 priv->qos_data.qos_enable = enable;
7146
7147 if (priv->qos_data.qos_enable) {
7148 priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK;
7149 priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM;
7150 IPW_DEBUG_QOS("QoS is enabled\n");
7151 } else {
7152 priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK;
7153 priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM;
7154 IPW_DEBUG_QOS("QoS is not enabled\n");
7155 }
7156
7157 priv->qos_data.burst_enable = burst_enable;
7158
7159 if (burst_enable) {
7160 priv->qos_data.burst_duration_CCK = burst_duration_CCK;
7161 priv->qos_data.burst_duration_OFDM = burst_duration_OFDM;
7162 } else {
7163 priv->qos_data.burst_duration_CCK = 0;
7164 priv->qos_data.burst_duration_OFDM = 0;
7165 }
7166 }
7167
7168 /*
7169 * map the packet priority to the right TX Queue
7170 */
ipw_get_tx_queue_number(struct ipw_priv * priv,u16 priority)7171 static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority)
7172 {
7173 if (priority > 7 || !priv->qos_data.qos_enable)
7174 priority = 0;
7175
7176 return from_priority_to_tx_queue[priority] - 1;
7177 }
7178
ipw_is_qos_active(struct net_device * dev,struct sk_buff * skb)7179 static int ipw_is_qos_active(struct net_device *dev,
7180 struct sk_buff *skb)
7181 {
7182 struct ipw_priv *priv = libipw_priv(dev);
7183 struct libipw_qos_data *qos_data = NULL;
7184 int active, supported;
7185 u8 *daddr = skb->data + ETH_ALEN;
7186 int unicast = !is_multicast_ether_addr(daddr);
7187
7188 if (!(priv->status & STATUS_ASSOCIATED))
7189 return 0;
7190
7191 qos_data = &priv->assoc_network->qos_data;
7192
7193 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7194 if (unicast == 0)
7195 qos_data->active = 0;
7196 else
7197 qos_data->active = qos_data->supported;
7198 }
7199 active = qos_data->active;
7200 supported = qos_data->supported;
7201 IPW_DEBUG_QOS("QoS %d network is QoS active %d supported %d "
7202 "unicast %d\n",
7203 priv->qos_data.qos_enable, active, supported, unicast);
7204 if (active && priv->qos_data.qos_enable)
7205 return 1;
7206
7207 return 0;
7208
7209 }
7210 /*
7211 * add QoS parameter to the TX command
7212 */
ipw_qos_set_tx_queue_command(struct ipw_priv * priv,u16 priority,struct tfd_data * tfd)7213 static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
7214 u16 priority,
7215 struct tfd_data *tfd)
7216 {
7217 int tx_queue_id = 0;
7218
7219
7220 tx_queue_id = from_priority_to_tx_queue[priority] - 1;
7221 tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
7222
7223 if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) {
7224 tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
7225 tfd->tfd.tfd_26.mchdr.qos_ctrl |= cpu_to_le16(CTRL_QOS_NO_ACK);
7226 }
7227 return 0;
7228 }
7229
7230 /*
7231 * background support to run QoS activate functionality
7232 */
ipw_bg_qos_activate(struct work_struct * work)7233 static void ipw_bg_qos_activate(struct work_struct *work)
7234 {
7235 struct ipw_priv *priv =
7236 container_of(work, struct ipw_priv, qos_activate);
7237
7238 mutex_lock(&priv->mutex);
7239
7240 if (priv->status & STATUS_ASSOCIATED)
7241 ipw_qos_activate(priv, &(priv->assoc_network->qos_data));
7242
7243 mutex_unlock(&priv->mutex);
7244 }
7245
ipw_handle_probe_response(struct net_device * dev,struct libipw_probe_response * resp,struct libipw_network * network)7246 static int ipw_handle_probe_response(struct net_device *dev,
7247 struct libipw_probe_response *resp,
7248 struct libipw_network *network)
7249 {
7250 struct ipw_priv *priv = libipw_priv(dev);
7251 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7252 (network == priv->assoc_network));
7253
7254 ipw_qos_handle_probe_response(priv, active_network, network);
7255
7256 return 0;
7257 }
7258
ipw_handle_beacon(struct net_device * dev,struct libipw_beacon * resp,struct libipw_network * network)7259 static int ipw_handle_beacon(struct net_device *dev,
7260 struct libipw_beacon *resp,
7261 struct libipw_network *network)
7262 {
7263 struct ipw_priv *priv = libipw_priv(dev);
7264 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7265 (network == priv->assoc_network));
7266
7267 ipw_qos_handle_probe_response(priv, active_network, network);
7268
7269 return 0;
7270 }
7271
ipw_handle_assoc_response(struct net_device * dev,struct libipw_assoc_response * resp,struct libipw_network * network)7272 static int ipw_handle_assoc_response(struct net_device *dev,
7273 struct libipw_assoc_response *resp,
7274 struct libipw_network *network)
7275 {
7276 struct ipw_priv *priv = libipw_priv(dev);
7277 ipw_qos_association_resp(priv, network);
7278 return 0;
7279 }
7280
ipw_send_qos_params_command(struct ipw_priv * priv,struct libipw_qos_parameters * qos_param)7281 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters
7282 *qos_param)
7283 {
7284 return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS,
7285 sizeof(*qos_param) * 3, qos_param);
7286 }
7287
ipw_send_qos_info_command(struct ipw_priv * priv,struct libipw_qos_information_element * qos_param)7288 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element
7289 *qos_param)
7290 {
7291 return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param),
7292 qos_param);
7293 }
7294
7295 #endif /* CONFIG_IPW2200_QOS */
7296
ipw_associate_network(struct ipw_priv * priv,struct libipw_network * network,struct ipw_supported_rates * rates,int roaming)7297 static int ipw_associate_network(struct ipw_priv *priv,
7298 struct libipw_network *network,
7299 struct ipw_supported_rates *rates, int roaming)
7300 {
7301 int err;
7302
7303 if (priv->config & CFG_FIXED_RATE)
7304 ipw_set_fixed_rate(priv, network->mode);
7305
7306 if (!(priv->config & CFG_STATIC_ESSID)) {
7307 priv->essid_len = min(network->ssid_len,
7308 (u8) IW_ESSID_MAX_SIZE);
7309 memcpy(priv->essid, network->ssid, priv->essid_len);
7310 }
7311
7312 network->last_associate = jiffies;
7313
7314 memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
7315 priv->assoc_request.channel = network->channel;
7316 priv->assoc_request.auth_key = 0;
7317
7318 if ((priv->capability & CAP_PRIVACY_ON) &&
7319 (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) {
7320 priv->assoc_request.auth_type = AUTH_SHARED_KEY;
7321 priv->assoc_request.auth_key = priv->ieee->sec.active_key;
7322
7323 if (priv->ieee->sec.level == SEC_LEVEL_1)
7324 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
7325
7326 } else if ((priv->capability & CAP_PRIVACY_ON) &&
7327 (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP))
7328 priv->assoc_request.auth_type = AUTH_LEAP;
7329 else
7330 priv->assoc_request.auth_type = AUTH_OPEN;
7331
7332 if (priv->ieee->wpa_ie_len) {
7333 priv->assoc_request.policy_support = cpu_to_le16(0x02); /* RSN active */
7334 ipw_set_rsn_capa(priv, priv->ieee->wpa_ie,
7335 priv->ieee->wpa_ie_len);
7336 }
7337
7338 /*
7339 * It is valid for our ieee device to support multiple modes, but
7340 * when it comes to associating to a given network we have to choose
7341 * just one mode.
7342 */
7343 if (network->mode & priv->ieee->mode & IEEE_A)
7344 priv->assoc_request.ieee_mode = IPW_A_MODE;
7345 else if (network->mode & priv->ieee->mode & IEEE_G)
7346 priv->assoc_request.ieee_mode = IPW_G_MODE;
7347 else if (network->mode & priv->ieee->mode & IEEE_B)
7348 priv->assoc_request.ieee_mode = IPW_B_MODE;
7349
7350 priv->assoc_request.capability = cpu_to_le16(network->capability);
7351 if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7352 && !(priv->config & CFG_PREAMBLE_LONG)) {
7353 priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE;
7354 } else {
7355 priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE;
7356
7357 /* Clear the short preamble if we won't be supporting it */
7358 priv->assoc_request.capability &=
7359 ~cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE);
7360 }
7361
7362 /* Clear capability bits that aren't used in Ad Hoc */
7363 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7364 priv->assoc_request.capability &=
7365 ~cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME);
7366
7367 IPW_DEBUG_ASSOC("%ssociation attempt: '%*pE', channel %d, 802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
7368 roaming ? "Rea" : "A",
7369 priv->essid_len, priv->essid,
7370 network->channel,
7371 ipw_modes[priv->assoc_request.ieee_mode],
7372 rates->num_rates,
7373 (priv->assoc_request.preamble_length ==
7374 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short",
7375 network->capability &
7376 WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long",
7377 priv->capability & CAP_PRIVACY_ON ? "on " : "off",
7378 priv->capability & CAP_PRIVACY_ON ?
7379 (priv->capability & CAP_SHARED_KEY ? "(shared)" :
7380 "(open)") : "",
7381 priv->capability & CAP_PRIVACY_ON ? " key=" : "",
7382 priv->capability & CAP_PRIVACY_ON ?
7383 '1' + priv->ieee->sec.active_key : '.',
7384 priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
7385
7386 priv->assoc_request.beacon_interval = cpu_to_le16(network->beacon_interval);
7387 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
7388 (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
7389 priv->assoc_request.assoc_type = HC_IBSS_START;
7390 priv->assoc_request.assoc_tsf_msw = 0;
7391 priv->assoc_request.assoc_tsf_lsw = 0;
7392 } else {
7393 if (unlikely(roaming))
7394 priv->assoc_request.assoc_type = HC_REASSOCIATE;
7395 else
7396 priv->assoc_request.assoc_type = HC_ASSOCIATE;
7397 priv->assoc_request.assoc_tsf_msw = cpu_to_le32(network->time_stamp[1]);
7398 priv->assoc_request.assoc_tsf_lsw = cpu_to_le32(network->time_stamp[0]);
7399 }
7400
7401 memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
7402
7403 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7404 eth_broadcast_addr(priv->assoc_request.dest);
7405 priv->assoc_request.atim_window = cpu_to_le16(network->atim_window);
7406 } else {
7407 memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
7408 priv->assoc_request.atim_window = 0;
7409 }
7410
7411 priv->assoc_request.listen_interval = cpu_to_le16(network->listen_interval);
7412
7413 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
7414 if (err) {
7415 IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
7416 return err;
7417 }
7418
7419 rates->ieee_mode = priv->assoc_request.ieee_mode;
7420 rates->purpose = IPW_RATE_CONNECT;
7421 ipw_send_supported_rates(priv, rates);
7422
7423 if (priv->assoc_request.ieee_mode == IPW_G_MODE)
7424 priv->sys_config.dot11g_auto_detection = 1;
7425 else
7426 priv->sys_config.dot11g_auto_detection = 0;
7427
7428 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7429 priv->sys_config.answer_broadcast_ssid_probe = 1;
7430 else
7431 priv->sys_config.answer_broadcast_ssid_probe = 0;
7432
7433 err = ipw_send_system_config(priv);
7434 if (err) {
7435 IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
7436 return err;
7437 }
7438
7439 IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
7440 err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM);
7441 if (err) {
7442 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7443 return err;
7444 }
7445
7446 /*
7447 * If preemption is enabled, it is possible for the association
7448 * to complete before we return from ipw_send_associate. Therefore
7449 * we have to be sure and update our priviate data first.
7450 */
7451 priv->channel = network->channel;
7452 memcpy(priv->bssid, network->bssid, ETH_ALEN);
7453 priv->status |= STATUS_ASSOCIATING;
7454 priv->status &= ~STATUS_SECURITY_UPDATED;
7455
7456 priv->assoc_network = network;
7457
7458 #ifdef CONFIG_IPW2200_QOS
7459 ipw_qos_association(priv, network);
7460 #endif
7461
7462 err = ipw_send_associate(priv, &priv->assoc_request);
7463 if (err) {
7464 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7465 return err;
7466 }
7467
7468 IPW_DEBUG(IPW_DL_STATE, "associating: '%*pE' %pM\n",
7469 priv->essid_len, priv->essid, priv->bssid);
7470
7471 return 0;
7472 }
7473
ipw_roam(void * data)7474 static void ipw_roam(void *data)
7475 {
7476 struct ipw_priv *priv = data;
7477 struct libipw_network *network = NULL;
7478 struct ipw_network_match match = {
7479 .network = priv->assoc_network
7480 };
7481
7482 /* The roaming process is as follows:
7483 *
7484 * 1. Missed beacon threshold triggers the roaming process by
7485 * setting the status ROAM bit and requesting a scan.
7486 * 2. When the scan completes, it schedules the ROAM work
7487 * 3. The ROAM work looks at all of the known networks for one that
7488 * is a better network than the currently associated. If none
7489 * found, the ROAM process is over (ROAM bit cleared)
7490 * 4. If a better network is found, a disassociation request is
7491 * sent.
7492 * 5. When the disassociation completes, the roam work is again
7493 * scheduled. The second time through, the driver is no longer
7494 * associated, and the newly selected network is sent an
7495 * association request.
7496 * 6. At this point ,the roaming process is complete and the ROAM
7497 * status bit is cleared.
7498 */
7499
7500 /* If we are no longer associated, and the roaming bit is no longer
7501 * set, then we are not actively roaming, so just return */
7502 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
7503 return;
7504
7505 if (priv->status & STATUS_ASSOCIATED) {
7506 /* First pass through ROAM process -- look for a better
7507 * network */
7508 unsigned long flags;
7509 u8 rssi = priv->assoc_network->stats.rssi;
7510 priv->assoc_network->stats.rssi = -128;
7511 spin_lock_irqsave(&priv->ieee->lock, flags);
7512 list_for_each_entry(network, &priv->ieee->network_list, list) {
7513 if (network != priv->assoc_network)
7514 ipw_best_network(priv, &match, network, 1);
7515 }
7516 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7517 priv->assoc_network->stats.rssi = rssi;
7518
7519 if (match.network == priv->assoc_network) {
7520 IPW_DEBUG_ASSOC("No better APs in this network to "
7521 "roam to.\n");
7522 priv->status &= ~STATUS_ROAMING;
7523 ipw_debug_config(priv);
7524 return;
7525 }
7526
7527 ipw_send_disassociate(priv, 1);
7528 priv->assoc_network = match.network;
7529
7530 return;
7531 }
7532
7533 /* Second pass through ROAM process -- request association */
7534 ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
7535 ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
7536 priv->status &= ~STATUS_ROAMING;
7537 }
7538
ipw_bg_roam(struct work_struct * work)7539 static void ipw_bg_roam(struct work_struct *work)
7540 {
7541 struct ipw_priv *priv =
7542 container_of(work, struct ipw_priv, roam);
7543 mutex_lock(&priv->mutex);
7544 ipw_roam(priv);
7545 mutex_unlock(&priv->mutex);
7546 }
7547
ipw_associate(void * data)7548 static int ipw_associate(void *data)
7549 {
7550 struct ipw_priv *priv = data;
7551
7552 struct libipw_network *network = NULL;
7553 struct ipw_network_match match = {
7554 .network = NULL
7555 };
7556 struct ipw_supported_rates *rates;
7557 struct list_head *element;
7558 unsigned long flags;
7559
7560 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7561 IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n");
7562 return 0;
7563 }
7564
7565 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
7566 IPW_DEBUG_ASSOC("Not attempting association (already in "
7567 "progress)\n");
7568 return 0;
7569 }
7570
7571 if (priv->status & STATUS_DISASSOCIATING) {
7572 IPW_DEBUG_ASSOC("Not attempting association (in "
7573 "disassociating)\n ");
7574 schedule_work(&priv->associate);
7575 return 0;
7576 }
7577
7578 if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) {
7579 IPW_DEBUG_ASSOC("Not attempting association (scanning or not "
7580 "initialized)\n");
7581 return 0;
7582 }
7583
7584 if (!(priv->config & CFG_ASSOCIATE) &&
7585 !(priv->config & (CFG_STATIC_ESSID | CFG_STATIC_BSSID))) {
7586 IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
7587 return 0;
7588 }
7589
7590 /* Protect our use of the network_list */
7591 spin_lock_irqsave(&priv->ieee->lock, flags);
7592 list_for_each_entry(network, &priv->ieee->network_list, list)
7593 ipw_best_network(priv, &match, network, 0);
7594
7595 network = match.network;
7596 rates = &match.rates;
7597
7598 if (network == NULL &&
7599 priv->ieee->iw_mode == IW_MODE_ADHOC &&
7600 priv->config & CFG_ADHOC_CREATE &&
7601 priv->config & CFG_STATIC_ESSID &&
7602 priv->config & CFG_STATIC_CHANNEL) {
7603 /* Use oldest network if the free list is empty */
7604 if (list_empty(&priv->ieee->network_free_list)) {
7605 struct libipw_network *oldest = NULL;
7606 struct libipw_network *target;
7607
7608 list_for_each_entry(target, &priv->ieee->network_list, list) {
7609 if ((oldest == NULL) ||
7610 (target->last_scanned < oldest->last_scanned))
7611 oldest = target;
7612 }
7613
7614 /* If there are no more slots, expire the oldest */
7615 list_del(&oldest->list);
7616 target = oldest;
7617 IPW_DEBUG_ASSOC("Expired '%*pE' (%pM) from network list.\n",
7618 target->ssid_len, target->ssid,
7619 target->bssid);
7620 list_add_tail(&target->list,
7621 &priv->ieee->network_free_list);
7622 }
7623
7624 element = priv->ieee->network_free_list.next;
7625 network = list_entry(element, struct libipw_network, list);
7626 ipw_adhoc_create(priv, network);
7627 rates = &priv->rates;
7628 list_del(element);
7629 list_add_tail(&network->list, &priv->ieee->network_list);
7630 }
7631 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7632
7633 /* If we reached the end of the list, then we don't have any valid
7634 * matching APs */
7635 if (!network) {
7636 ipw_debug_config(priv);
7637
7638 if (!(priv->status & STATUS_SCANNING)) {
7639 if (!(priv->config & CFG_SPEED_SCAN))
7640 schedule_delayed_work(&priv->request_scan,
7641 SCAN_INTERVAL);
7642 else
7643 schedule_delayed_work(&priv->request_scan, 0);
7644 }
7645
7646 return 0;
7647 }
7648
7649 ipw_associate_network(priv, network, rates, 0);
7650
7651 return 1;
7652 }
7653
ipw_bg_associate(struct work_struct * work)7654 static void ipw_bg_associate(struct work_struct *work)
7655 {
7656 struct ipw_priv *priv =
7657 container_of(work, struct ipw_priv, associate);
7658 mutex_lock(&priv->mutex);
7659 ipw_associate(priv);
7660 mutex_unlock(&priv->mutex);
7661 }
7662
ipw_rebuild_decrypted_skb(struct ipw_priv * priv,struct sk_buff * skb)7663 static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv,
7664 struct sk_buff *skb)
7665 {
7666 struct ieee80211_hdr *hdr;
7667 u16 fc;
7668
7669 hdr = (struct ieee80211_hdr *)skb->data;
7670 fc = le16_to_cpu(hdr->frame_control);
7671 if (!(fc & IEEE80211_FCTL_PROTECTED))
7672 return;
7673
7674 fc &= ~IEEE80211_FCTL_PROTECTED;
7675 hdr->frame_control = cpu_to_le16(fc);
7676 switch (priv->ieee->sec.level) {
7677 case SEC_LEVEL_3:
7678 /* Remove CCMP HDR */
7679 memmove(skb->data + LIBIPW_3ADDR_LEN,
7680 skb->data + LIBIPW_3ADDR_LEN + 8,
7681 skb->len - LIBIPW_3ADDR_LEN - 8);
7682 skb_trim(skb, skb->len - 16); /* CCMP_HDR_LEN + CCMP_MIC_LEN */
7683 break;
7684 case SEC_LEVEL_2:
7685 break;
7686 case SEC_LEVEL_1:
7687 /* Remove IV */
7688 memmove(skb->data + LIBIPW_3ADDR_LEN,
7689 skb->data + LIBIPW_3ADDR_LEN + 4,
7690 skb->len - LIBIPW_3ADDR_LEN - 4);
7691 skb_trim(skb, skb->len - 8); /* IV + ICV */
7692 break;
7693 case SEC_LEVEL_0:
7694 break;
7695 default:
7696 printk(KERN_ERR "Unknown security level %d\n",
7697 priv->ieee->sec.level);
7698 break;
7699 }
7700 }
7701
ipw_handle_data_packet(struct ipw_priv * priv,struct ipw_rx_mem_buffer * rxb,struct libipw_rx_stats * stats)7702 static void ipw_handle_data_packet(struct ipw_priv *priv,
7703 struct ipw_rx_mem_buffer *rxb,
7704 struct libipw_rx_stats *stats)
7705 {
7706 struct net_device *dev = priv->net_dev;
7707 struct libipw_hdr_4addr *hdr;
7708 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7709
7710 /* We received data from the HW, so stop the watchdog */
7711 dev->trans_start = jiffies;
7712
7713 /* We only process data packets if the
7714 * interface is open */
7715 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7716 skb_tailroom(rxb->skb))) {
7717 dev->stats.rx_errors++;
7718 priv->wstats.discard.misc++;
7719 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7720 return;
7721 } else if (unlikely(!netif_running(priv->net_dev))) {
7722 dev->stats.rx_dropped++;
7723 priv->wstats.discard.misc++;
7724 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7725 return;
7726 }
7727
7728 /* Advance skb->data to the start of the actual payload */
7729 skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
7730
7731 /* Set the size of the skb to the size of the frame */
7732 skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
7733
7734 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7735
7736 /* HW decrypt will not clear the WEP bit, MIC, PN, etc. */
7737 hdr = (struct libipw_hdr_4addr *)rxb->skb->data;
7738 if (priv->ieee->iw_mode != IW_MODE_MONITOR &&
7739 (is_multicast_ether_addr(hdr->addr1) ?
7740 !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt))
7741 ipw_rebuild_decrypted_skb(priv, rxb->skb);
7742
7743 if (!libipw_rx(priv->ieee, rxb->skb, stats))
7744 dev->stats.rx_errors++;
7745 else { /* libipw_rx succeeded, so it now owns the SKB */
7746 rxb->skb = NULL;
7747 __ipw_led_activity_on(priv);
7748 }
7749 }
7750
7751 #ifdef CONFIG_IPW2200_RADIOTAP
ipw_handle_data_packet_monitor(struct ipw_priv * priv,struct ipw_rx_mem_buffer * rxb,struct libipw_rx_stats * stats)7752 static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
7753 struct ipw_rx_mem_buffer *rxb,
7754 struct libipw_rx_stats *stats)
7755 {
7756 struct net_device *dev = priv->net_dev;
7757 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7758 struct ipw_rx_frame *frame = &pkt->u.frame;
7759
7760 /* initial pull of some data */
7761 u16 received_channel = frame->received_channel;
7762 u8 antennaAndPhy = frame->antennaAndPhy;
7763 s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM; /* call it signed anyhow */
7764 u16 pktrate = frame->rate;
7765
7766 /* Magic struct that slots into the radiotap header -- no reason
7767 * to build this manually element by element, we can write it much
7768 * more efficiently than we can parse it. ORDER MATTERS HERE */
7769 struct ipw_rt_hdr *ipw_rt;
7770
7771 unsigned short len = le16_to_cpu(pkt->u.frame.length);
7772
7773 /* We received data from the HW, so stop the watchdog */
7774 dev->trans_start = jiffies;
7775
7776 /* We only process data packets if the
7777 * interface is open */
7778 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7779 skb_tailroom(rxb->skb))) {
7780 dev->stats.rx_errors++;
7781 priv->wstats.discard.misc++;
7782 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7783 return;
7784 } else if (unlikely(!netif_running(priv->net_dev))) {
7785 dev->stats.rx_dropped++;
7786 priv->wstats.discard.misc++;
7787 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7788 return;
7789 }
7790
7791 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7792 * that now */
7793 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7794 /* FIXME: Should alloc bigger skb instead */
7795 dev->stats.rx_dropped++;
7796 priv->wstats.discard.misc++;
7797 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7798 return;
7799 }
7800
7801 /* copy the frame itself */
7802 memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr),
7803 rxb->skb->data + IPW_RX_FRAME_SIZE, len);
7804
7805 ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data;
7806
7807 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7808 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
7809 ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(struct ipw_rt_hdr)); /* total header+data */
7810
7811 /* Big bitfield of all the fields we provide in radiotap */
7812 ipw_rt->rt_hdr.it_present = cpu_to_le32(
7813 (1 << IEEE80211_RADIOTAP_TSFT) |
7814 (1 << IEEE80211_RADIOTAP_FLAGS) |
7815 (1 << IEEE80211_RADIOTAP_RATE) |
7816 (1 << IEEE80211_RADIOTAP_CHANNEL) |
7817 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7818 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
7819 (1 << IEEE80211_RADIOTAP_ANTENNA));
7820
7821 /* Zero the flags, we'll add to them as we go */
7822 ipw_rt->rt_flags = 0;
7823 ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
7824 frame->parent_tsf[2] << 16 |
7825 frame->parent_tsf[1] << 8 |
7826 frame->parent_tsf[0]);
7827
7828 /* Convert signal to DBM */
7829 ipw_rt->rt_dbmsignal = antsignal;
7830 ipw_rt->rt_dbmnoise = (s8) le16_to_cpu(frame->noise);
7831
7832 /* Convert the channel data and set the flags */
7833 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel));
7834 if (received_channel > 14) { /* 802.11a */
7835 ipw_rt->rt_chbitmask =
7836 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7837 } else if (antennaAndPhy & 32) { /* 802.11b */
7838 ipw_rt->rt_chbitmask =
7839 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7840 } else { /* 802.11g */
7841 ipw_rt->rt_chbitmask =
7842 cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7843 }
7844
7845 /* set the rate in multiples of 500k/s */
7846 switch (pktrate) {
7847 case IPW_TX_RATE_1MB:
7848 ipw_rt->rt_rate = 2;
7849 break;
7850 case IPW_TX_RATE_2MB:
7851 ipw_rt->rt_rate = 4;
7852 break;
7853 case IPW_TX_RATE_5MB:
7854 ipw_rt->rt_rate = 10;
7855 break;
7856 case IPW_TX_RATE_6MB:
7857 ipw_rt->rt_rate = 12;
7858 break;
7859 case IPW_TX_RATE_9MB:
7860 ipw_rt->rt_rate = 18;
7861 break;
7862 case IPW_TX_RATE_11MB:
7863 ipw_rt->rt_rate = 22;
7864 break;
7865 case IPW_TX_RATE_12MB:
7866 ipw_rt->rt_rate = 24;
7867 break;
7868 case IPW_TX_RATE_18MB:
7869 ipw_rt->rt_rate = 36;
7870 break;
7871 case IPW_TX_RATE_24MB:
7872 ipw_rt->rt_rate = 48;
7873 break;
7874 case IPW_TX_RATE_36MB:
7875 ipw_rt->rt_rate = 72;
7876 break;
7877 case IPW_TX_RATE_48MB:
7878 ipw_rt->rt_rate = 96;
7879 break;
7880 case IPW_TX_RATE_54MB:
7881 ipw_rt->rt_rate = 108;
7882 break;
7883 default:
7884 ipw_rt->rt_rate = 0;
7885 break;
7886 }
7887
7888 /* antenna number */
7889 ipw_rt->rt_antenna = (antennaAndPhy & 3); /* Is this right? */
7890
7891 /* set the preamble flag if we have it */
7892 if ((antennaAndPhy & 64))
7893 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
7894
7895 /* Set the size of the skb to the size of the frame */
7896 skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr));
7897
7898 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7899
7900 if (!libipw_rx(priv->ieee, rxb->skb, stats))
7901 dev->stats.rx_errors++;
7902 else { /* libipw_rx succeeded, so it now owns the SKB */
7903 rxb->skb = NULL;
7904 /* no LED during capture */
7905 }
7906 }
7907 #endif
7908
7909 #ifdef CONFIG_IPW2200_PROMISCUOUS
7910 #define libipw_is_probe_response(fc) \
7911 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && \
7912 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP )
7913
7914 #define libipw_is_management(fc) \
7915 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
7916
7917 #define libipw_is_control(fc) \
7918 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL)
7919
7920 #define libipw_is_data(fc) \
7921 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)
7922
7923 #define libipw_is_assoc_request(fc) \
7924 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ)
7925
7926 #define libipw_is_reassoc_request(fc) \
7927 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
7928
ipw_handle_promiscuous_rx(struct ipw_priv * priv,struct ipw_rx_mem_buffer * rxb,struct libipw_rx_stats * stats)7929 static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
7930 struct ipw_rx_mem_buffer *rxb,
7931 struct libipw_rx_stats *stats)
7932 {
7933 struct net_device *dev = priv->prom_net_dev;
7934 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7935 struct ipw_rx_frame *frame = &pkt->u.frame;
7936 struct ipw_rt_hdr *ipw_rt;
7937
7938 /* First cache any information we need before we overwrite
7939 * the information provided in the skb from the hardware */
7940 struct ieee80211_hdr *hdr;
7941 u16 channel = frame->received_channel;
7942 u8 phy_flags = frame->antennaAndPhy;
7943 s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM;
7944 s8 noise = (s8) le16_to_cpu(frame->noise);
7945 u8 rate = frame->rate;
7946 unsigned short len = le16_to_cpu(pkt->u.frame.length);
7947 struct sk_buff *skb;
7948 int hdr_only = 0;
7949 u16 filter = priv->prom_priv->filter;
7950
7951 /* If the filter is set to not include Rx frames then return */
7952 if (filter & IPW_PROM_NO_RX)
7953 return;
7954
7955 /* We received data from the HW, so stop the watchdog */
7956 dev->trans_start = jiffies;
7957
7958 if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
7959 dev->stats.rx_errors++;
7960 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7961 return;
7962 }
7963
7964 /* We only process data packets if the interface is open */
7965 if (unlikely(!netif_running(dev))) {
7966 dev->stats.rx_dropped++;
7967 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7968 return;
7969 }
7970
7971 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7972 * that now */
7973 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7974 /* FIXME: Should alloc bigger skb instead */
7975 dev->stats.rx_dropped++;
7976 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7977 return;
7978 }
7979
7980 hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE;
7981 if (libipw_is_management(le16_to_cpu(hdr->frame_control))) {
7982 if (filter & IPW_PROM_NO_MGMT)
7983 return;
7984 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
7985 hdr_only = 1;
7986 } else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) {
7987 if (filter & IPW_PROM_NO_CTL)
7988 return;
7989 if (filter & IPW_PROM_CTL_HEADER_ONLY)
7990 hdr_only = 1;
7991 } else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) {
7992 if (filter & IPW_PROM_NO_DATA)
7993 return;
7994 if (filter & IPW_PROM_DATA_HEADER_ONLY)
7995 hdr_only = 1;
7996 }
7997
7998 /* Copy the SKB since this is for the promiscuous side */
7999 skb = skb_copy(rxb->skb, GFP_ATOMIC);
8000 if (skb == NULL) {
8001 IPW_ERROR("skb_clone failed for promiscuous copy.\n");
8002 return;
8003 }
8004
8005 /* copy the frame data to write after where the radiotap header goes */
8006 ipw_rt = (void *)skb->data;
8007
8008 if (hdr_only)
8009 len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control));
8010
8011 memcpy(ipw_rt->payload, hdr, len);
8012
8013 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
8014 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
8015 ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*ipw_rt)); /* total header+data */
8016
8017 /* Set the size of the skb to the size of the frame */
8018 skb_put(skb, sizeof(*ipw_rt) + len);
8019
8020 /* Big bitfield of all the fields we provide in radiotap */
8021 ipw_rt->rt_hdr.it_present = cpu_to_le32(
8022 (1 << IEEE80211_RADIOTAP_TSFT) |
8023 (1 << IEEE80211_RADIOTAP_FLAGS) |
8024 (1 << IEEE80211_RADIOTAP_RATE) |
8025 (1 << IEEE80211_RADIOTAP_CHANNEL) |
8026 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
8027 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
8028 (1 << IEEE80211_RADIOTAP_ANTENNA));
8029
8030 /* Zero the flags, we'll add to them as we go */
8031 ipw_rt->rt_flags = 0;
8032 ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
8033 frame->parent_tsf[2] << 16 |
8034 frame->parent_tsf[1] << 8 |
8035 frame->parent_tsf[0]);
8036
8037 /* Convert to DBM */
8038 ipw_rt->rt_dbmsignal = signal;
8039 ipw_rt->rt_dbmnoise = noise;
8040
8041 /* Convert the channel data and set the flags */
8042 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(channel));
8043 if (channel > 14) { /* 802.11a */
8044 ipw_rt->rt_chbitmask =
8045 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
8046 } else if (phy_flags & (1 << 5)) { /* 802.11b */
8047 ipw_rt->rt_chbitmask =
8048 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
8049 } else { /* 802.11g */
8050 ipw_rt->rt_chbitmask =
8051 cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
8052 }
8053
8054 /* set the rate in multiples of 500k/s */
8055 switch (rate) {
8056 case IPW_TX_RATE_1MB:
8057 ipw_rt->rt_rate = 2;
8058 break;
8059 case IPW_TX_RATE_2MB:
8060 ipw_rt->rt_rate = 4;
8061 break;
8062 case IPW_TX_RATE_5MB:
8063 ipw_rt->rt_rate = 10;
8064 break;
8065 case IPW_TX_RATE_6MB:
8066 ipw_rt->rt_rate = 12;
8067 break;
8068 case IPW_TX_RATE_9MB:
8069 ipw_rt->rt_rate = 18;
8070 break;
8071 case IPW_TX_RATE_11MB:
8072 ipw_rt->rt_rate = 22;
8073 break;
8074 case IPW_TX_RATE_12MB:
8075 ipw_rt->rt_rate = 24;
8076 break;
8077 case IPW_TX_RATE_18MB:
8078 ipw_rt->rt_rate = 36;
8079 break;
8080 case IPW_TX_RATE_24MB:
8081 ipw_rt->rt_rate = 48;
8082 break;
8083 case IPW_TX_RATE_36MB:
8084 ipw_rt->rt_rate = 72;
8085 break;
8086 case IPW_TX_RATE_48MB:
8087 ipw_rt->rt_rate = 96;
8088 break;
8089 case IPW_TX_RATE_54MB:
8090 ipw_rt->rt_rate = 108;
8091 break;
8092 default:
8093 ipw_rt->rt_rate = 0;
8094 break;
8095 }
8096
8097 /* antenna number */
8098 ipw_rt->rt_antenna = (phy_flags & 3);
8099
8100 /* set the preamble flag if we have it */
8101 if (phy_flags & (1 << 6))
8102 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
8103
8104 IPW_DEBUG_RX("Rx packet of %d bytes.\n", skb->len);
8105
8106 if (!libipw_rx(priv->prom_priv->ieee, skb, stats)) {
8107 dev->stats.rx_errors++;
8108 dev_kfree_skb_any(skb);
8109 }
8110 }
8111 #endif
8112
is_network_packet(struct ipw_priv * priv,struct libipw_hdr_4addr * header)8113 static int is_network_packet(struct ipw_priv *priv,
8114 struct libipw_hdr_4addr *header)
8115 {
8116 /* Filter incoming packets to determine if they are targeted toward
8117 * this network, discarding packets coming from ourselves */
8118 switch (priv->ieee->iw_mode) {
8119 case IW_MODE_ADHOC: /* Header: Dest. | Source | BSSID */
8120 /* packets from our adapter are dropped (echo) */
8121 if (ether_addr_equal(header->addr2, priv->net_dev->dev_addr))
8122 return 0;
8123
8124 /* {broad,multi}cast packets to our BSSID go through */
8125 if (is_multicast_ether_addr(header->addr1))
8126 return ether_addr_equal(header->addr3, priv->bssid);
8127
8128 /* packets to our adapter go through */
8129 return ether_addr_equal(header->addr1,
8130 priv->net_dev->dev_addr);
8131
8132 case IW_MODE_INFRA: /* Header: Dest. | BSSID | Source */
8133 /* packets from our adapter are dropped (echo) */
8134 if (ether_addr_equal(header->addr3, priv->net_dev->dev_addr))
8135 return 0;
8136
8137 /* {broad,multi}cast packets to our BSS go through */
8138 if (is_multicast_ether_addr(header->addr1))
8139 return ether_addr_equal(header->addr2, priv->bssid);
8140
8141 /* packets to our adapter go through */
8142 return ether_addr_equal(header->addr1,
8143 priv->net_dev->dev_addr);
8144 }
8145
8146 return 1;
8147 }
8148
8149 #define IPW_PACKET_RETRY_TIME HZ
8150
is_duplicate_packet(struct ipw_priv * priv,struct libipw_hdr_4addr * header)8151 static int is_duplicate_packet(struct ipw_priv *priv,
8152 struct libipw_hdr_4addr *header)
8153 {
8154 u16 sc = le16_to_cpu(header->seq_ctl);
8155 u16 seq = WLAN_GET_SEQ_SEQ(sc);
8156 u16 frag = WLAN_GET_SEQ_FRAG(sc);
8157 u16 *last_seq, *last_frag;
8158 unsigned long *last_time;
8159
8160 switch (priv->ieee->iw_mode) {
8161 case IW_MODE_ADHOC:
8162 {
8163 struct list_head *p;
8164 struct ipw_ibss_seq *entry = NULL;
8165 u8 *mac = header->addr2;
8166 int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE;
8167
8168 list_for_each(p, &priv->ibss_mac_hash[index]) {
8169 entry =
8170 list_entry(p, struct ipw_ibss_seq, list);
8171 if (ether_addr_equal(entry->mac, mac))
8172 break;
8173 }
8174 if (p == &priv->ibss_mac_hash[index]) {
8175 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
8176 if (!entry) {
8177 IPW_ERROR
8178 ("Cannot malloc new mac entry\n");
8179 return 0;
8180 }
8181 memcpy(entry->mac, mac, ETH_ALEN);
8182 entry->seq_num = seq;
8183 entry->frag_num = frag;
8184 entry->packet_time = jiffies;
8185 list_add(&entry->list,
8186 &priv->ibss_mac_hash[index]);
8187 return 0;
8188 }
8189 last_seq = &entry->seq_num;
8190 last_frag = &entry->frag_num;
8191 last_time = &entry->packet_time;
8192 break;
8193 }
8194 case IW_MODE_INFRA:
8195 last_seq = &priv->last_seq_num;
8196 last_frag = &priv->last_frag_num;
8197 last_time = &priv->last_packet_time;
8198 break;
8199 default:
8200 return 0;
8201 }
8202 if ((*last_seq == seq) &&
8203 time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) {
8204 if (*last_frag == frag)
8205 goto drop;
8206 if (*last_frag + 1 != frag)
8207 /* out-of-order fragment */
8208 goto drop;
8209 } else
8210 *last_seq = seq;
8211
8212 *last_frag = frag;
8213 *last_time = jiffies;
8214 return 0;
8215
8216 drop:
8217 /* Comment this line now since we observed the card receives
8218 * duplicate packets but the FCTL_RETRY bit is not set in the
8219 * IBSS mode with fragmentation enabled.
8220 BUG_ON(!(le16_to_cpu(header->frame_control) & IEEE80211_FCTL_RETRY)); */
8221 return 1;
8222 }
8223
ipw_handle_mgmt_packet(struct ipw_priv * priv,struct ipw_rx_mem_buffer * rxb,struct libipw_rx_stats * stats)8224 static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
8225 struct ipw_rx_mem_buffer *rxb,
8226 struct libipw_rx_stats *stats)
8227 {
8228 struct sk_buff *skb = rxb->skb;
8229 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data;
8230 struct libipw_hdr_4addr *header = (struct libipw_hdr_4addr *)
8231 (skb->data + IPW_RX_FRAME_SIZE);
8232
8233 libipw_rx_mgt(priv->ieee, header, stats);
8234
8235 if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
8236 ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8237 IEEE80211_STYPE_PROBE_RESP) ||
8238 (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8239 IEEE80211_STYPE_BEACON))) {
8240 if (ether_addr_equal(header->addr3, priv->bssid))
8241 ipw_add_station(priv, header->addr2);
8242 }
8243
8244 if (priv->config & CFG_NET_STATS) {
8245 IPW_DEBUG_HC("sending stat packet\n");
8246
8247 /* Set the size of the skb to the size of the full
8248 * ipw header and 802.11 frame */
8249 skb_put(skb, le16_to_cpu(pkt->u.frame.length) +
8250 IPW_RX_FRAME_SIZE);
8251
8252 /* Advance past the ipw packet header to the 802.11 frame */
8253 skb_pull(skb, IPW_RX_FRAME_SIZE);
8254
8255 /* Push the libipw_rx_stats before the 802.11 frame */
8256 memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats));
8257
8258 skb->dev = priv->ieee->dev;
8259
8260 /* Point raw at the libipw_stats */
8261 skb_reset_mac_header(skb);
8262
8263 skb->pkt_type = PACKET_OTHERHOST;
8264 skb->protocol = cpu_to_be16(ETH_P_80211_STATS);
8265 memset(skb->cb, 0, sizeof(rxb->skb->cb));
8266 netif_rx(skb);
8267 rxb->skb = NULL;
8268 }
8269 }
8270
8271 /*
8272 * Main entry function for receiving a packet with 80211 headers. This
8273 * should be called when ever the FW has notified us that there is a new
8274 * skb in the receive queue.
8275 */
ipw_rx(struct ipw_priv * priv)8276 static void ipw_rx(struct ipw_priv *priv)
8277 {
8278 struct ipw_rx_mem_buffer *rxb;
8279 struct ipw_rx_packet *pkt;
8280 struct libipw_hdr_4addr *header;
8281 u32 r, w, i;
8282 u8 network_packet;
8283 u8 fill_rx = 0;
8284
8285 r = ipw_read32(priv, IPW_RX_READ_INDEX);
8286 w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
8287 i = priv->rxq->read;
8288
8289 if (ipw_rx_queue_space (priv->rxq) > (RX_QUEUE_SIZE / 2))
8290 fill_rx = 1;
8291
8292 while (i != r) {
8293 rxb = priv->rxq->queue[i];
8294 if (unlikely(rxb == NULL)) {
8295 printk(KERN_CRIT "Queue not allocated!\n");
8296 break;
8297 }
8298 priv->rxq->queue[i] = NULL;
8299
8300 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
8301 IPW_RX_BUF_SIZE,
8302 PCI_DMA_FROMDEVICE);
8303
8304 pkt = (struct ipw_rx_packet *)rxb->skb->data;
8305 IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
8306 pkt->header.message_type,
8307 pkt->header.rx_seq_num, pkt->header.control_bits);
8308
8309 switch (pkt->header.message_type) {
8310 case RX_FRAME_TYPE: /* 802.11 frame */ {
8311 struct libipw_rx_stats stats = {
8312 .rssi = pkt->u.frame.rssi_dbm -
8313 IPW_RSSI_TO_DBM,
8314 .signal =
8315 pkt->u.frame.rssi_dbm -
8316 IPW_RSSI_TO_DBM + 0x100,
8317 .noise =
8318 le16_to_cpu(pkt->u.frame.noise),
8319 .rate = pkt->u.frame.rate,
8320 .mac_time = jiffies,
8321 .received_channel =
8322 pkt->u.frame.received_channel,
8323 .freq =
8324 (pkt->u.frame.
8325 control & (1 << 0)) ?
8326 LIBIPW_24GHZ_BAND :
8327 LIBIPW_52GHZ_BAND,
8328 .len = le16_to_cpu(pkt->u.frame.length),
8329 };
8330
8331 if (stats.rssi != 0)
8332 stats.mask |= LIBIPW_STATMASK_RSSI;
8333 if (stats.signal != 0)
8334 stats.mask |= LIBIPW_STATMASK_SIGNAL;
8335 if (stats.noise != 0)
8336 stats.mask |= LIBIPW_STATMASK_NOISE;
8337 if (stats.rate != 0)
8338 stats.mask |= LIBIPW_STATMASK_RATE;
8339
8340 priv->rx_packets++;
8341
8342 #ifdef CONFIG_IPW2200_PROMISCUOUS
8343 if (priv->prom_net_dev && netif_running(priv->prom_net_dev))
8344 ipw_handle_promiscuous_rx(priv, rxb, &stats);
8345 #endif
8346
8347 #ifdef CONFIG_IPW2200_MONITOR
8348 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8349 #ifdef CONFIG_IPW2200_RADIOTAP
8350
8351 ipw_handle_data_packet_monitor(priv,
8352 rxb,
8353 &stats);
8354 #else
8355 ipw_handle_data_packet(priv, rxb,
8356 &stats);
8357 #endif
8358 break;
8359 }
8360 #endif
8361
8362 header =
8363 (struct libipw_hdr_4addr *)(rxb->skb->
8364 data +
8365 IPW_RX_FRAME_SIZE);
8366 /* TODO: Check Ad-Hoc dest/source and make sure
8367 * that we are actually parsing these packets
8368 * correctly -- we should probably use the
8369 * frame control of the packet and disregard
8370 * the current iw_mode */
8371
8372 network_packet =
8373 is_network_packet(priv, header);
8374 if (network_packet && priv->assoc_network) {
8375 priv->assoc_network->stats.rssi =
8376 stats.rssi;
8377 priv->exp_avg_rssi =
8378 exponential_average(priv->exp_avg_rssi,
8379 stats.rssi, DEPTH_RSSI);
8380 }
8381
8382 IPW_DEBUG_RX("Frame: len=%u\n",
8383 le16_to_cpu(pkt->u.frame.length));
8384
8385 if (le16_to_cpu(pkt->u.frame.length) <
8386 libipw_get_hdrlen(le16_to_cpu(
8387 header->frame_ctl))) {
8388 IPW_DEBUG_DROP
8389 ("Received packet is too small. "
8390 "Dropping.\n");
8391 priv->net_dev->stats.rx_errors++;
8392 priv->wstats.discard.misc++;
8393 break;
8394 }
8395
8396 switch (WLAN_FC_GET_TYPE
8397 (le16_to_cpu(header->frame_ctl))) {
8398
8399 case IEEE80211_FTYPE_MGMT:
8400 ipw_handle_mgmt_packet(priv, rxb,
8401 &stats);
8402 break;
8403
8404 case IEEE80211_FTYPE_CTL:
8405 break;
8406
8407 case IEEE80211_FTYPE_DATA:
8408 if (unlikely(!network_packet ||
8409 is_duplicate_packet(priv,
8410 header)))
8411 {
8412 IPW_DEBUG_DROP("Dropping: "
8413 "%pM, "
8414 "%pM, "
8415 "%pM\n",
8416 header->addr1,
8417 header->addr2,
8418 header->addr3);
8419 break;
8420 }
8421
8422 ipw_handle_data_packet(priv, rxb,
8423 &stats);
8424
8425 break;
8426 }
8427 break;
8428 }
8429
8430 case RX_HOST_NOTIFICATION_TYPE:{
8431 IPW_DEBUG_RX
8432 ("Notification: subtype=%02X flags=%02X size=%d\n",
8433 pkt->u.notification.subtype,
8434 pkt->u.notification.flags,
8435 le16_to_cpu(pkt->u.notification.size));
8436 ipw_rx_notification(priv, &pkt->u.notification);
8437 break;
8438 }
8439
8440 default:
8441 IPW_DEBUG_RX("Bad Rx packet of type %d\n",
8442 pkt->header.message_type);
8443 break;
8444 }
8445
8446 /* For now we just don't re-use anything. We can tweak this
8447 * later to try and re-use notification packets and SKBs that
8448 * fail to Rx correctly */
8449 if (rxb->skb != NULL) {
8450 dev_kfree_skb_any(rxb->skb);
8451 rxb->skb = NULL;
8452 }
8453
8454 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
8455 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
8456 list_add_tail(&rxb->list, &priv->rxq->rx_used);
8457
8458 i = (i + 1) % RX_QUEUE_SIZE;
8459
8460 /* If there are a lot of unsued frames, restock the Rx queue
8461 * so the ucode won't assert */
8462 if (fill_rx) {
8463 priv->rxq->read = i;
8464 ipw_rx_queue_replenish(priv);
8465 }
8466 }
8467
8468 /* Backtrack one entry */
8469 priv->rxq->read = i;
8470 ipw_rx_queue_restock(priv);
8471 }
8472
8473 #define DEFAULT_RTS_THRESHOLD 2304U
8474 #define MIN_RTS_THRESHOLD 1U
8475 #define MAX_RTS_THRESHOLD 2304U
8476 #define DEFAULT_BEACON_INTERVAL 100U
8477 #define DEFAULT_SHORT_RETRY_LIMIT 7U
8478 #define DEFAULT_LONG_RETRY_LIMIT 4U
8479
8480 /**
8481 * ipw_sw_reset
8482 * @option: options to control different reset behaviour
8483 * 0 = reset everything except the 'disable' module_param
8484 * 1 = reset everything and print out driver info (for probe only)
8485 * 2 = reset everything
8486 */
ipw_sw_reset(struct ipw_priv * priv,int option)8487 static int ipw_sw_reset(struct ipw_priv *priv, int option)
8488 {
8489 int band, modulation;
8490 int old_mode = priv->ieee->iw_mode;
8491
8492 /* Initialize module parameter values here */
8493 priv->config = 0;
8494
8495 /* We default to disabling the LED code as right now it causes
8496 * too many systems to lock up... */
8497 if (!led_support)
8498 priv->config |= CFG_NO_LED;
8499
8500 if (associate)
8501 priv->config |= CFG_ASSOCIATE;
8502 else
8503 IPW_DEBUG_INFO("Auto associate disabled.\n");
8504
8505 if (auto_create)
8506 priv->config |= CFG_ADHOC_CREATE;
8507 else
8508 IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
8509
8510 priv->config &= ~CFG_STATIC_ESSID;
8511 priv->essid_len = 0;
8512 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
8513
8514 if (disable && option) {
8515 priv->status |= STATUS_RF_KILL_SW;
8516 IPW_DEBUG_INFO("Radio disabled.\n");
8517 }
8518
8519 if (default_channel != 0) {
8520 priv->config |= CFG_STATIC_CHANNEL;
8521 priv->channel = default_channel;
8522 IPW_DEBUG_INFO("Bind to static channel %d\n", default_channel);
8523 /* TODO: Validate that provided channel is in range */
8524 }
8525 #ifdef CONFIG_IPW2200_QOS
8526 ipw_qos_init(priv, qos_enable, qos_burst_enable,
8527 burst_duration_CCK, burst_duration_OFDM);
8528 #endif /* CONFIG_IPW2200_QOS */
8529
8530 switch (network_mode) {
8531 case 1:
8532 priv->ieee->iw_mode = IW_MODE_ADHOC;
8533 priv->net_dev->type = ARPHRD_ETHER;
8534
8535 break;
8536 #ifdef CONFIG_IPW2200_MONITOR
8537 case 2:
8538 priv->ieee->iw_mode = IW_MODE_MONITOR;
8539 #ifdef CONFIG_IPW2200_RADIOTAP
8540 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8541 #else
8542 priv->net_dev->type = ARPHRD_IEEE80211;
8543 #endif
8544 break;
8545 #endif
8546 default:
8547 case 0:
8548 priv->net_dev->type = ARPHRD_ETHER;
8549 priv->ieee->iw_mode = IW_MODE_INFRA;
8550 break;
8551 }
8552
8553 if (hwcrypto) {
8554 priv->ieee->host_encrypt = 0;
8555 priv->ieee->host_encrypt_msdu = 0;
8556 priv->ieee->host_decrypt = 0;
8557 priv->ieee->host_mc_decrypt = 0;
8558 }
8559 IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off");
8560
8561 /* IPW2200/2915 is abled to do hardware fragmentation. */
8562 priv->ieee->host_open_frag = 0;
8563
8564 if ((priv->pci_dev->device == 0x4223) ||
8565 (priv->pci_dev->device == 0x4224)) {
8566 if (option == 1)
8567 printk(KERN_INFO DRV_NAME
8568 ": Detected Intel PRO/Wireless 2915ABG Network "
8569 "Connection\n");
8570 priv->ieee->abg_true = 1;
8571 band = LIBIPW_52GHZ_BAND | LIBIPW_24GHZ_BAND;
8572 modulation = LIBIPW_OFDM_MODULATION |
8573 LIBIPW_CCK_MODULATION;
8574 priv->adapter = IPW_2915ABG;
8575 priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
8576 } else {
8577 if (option == 1)
8578 printk(KERN_INFO DRV_NAME
8579 ": Detected Intel PRO/Wireless 2200BG Network "
8580 "Connection\n");
8581
8582 priv->ieee->abg_true = 0;
8583 band = LIBIPW_24GHZ_BAND;
8584 modulation = LIBIPW_OFDM_MODULATION |
8585 LIBIPW_CCK_MODULATION;
8586 priv->adapter = IPW_2200BG;
8587 priv->ieee->mode = IEEE_G | IEEE_B;
8588 }
8589
8590 priv->ieee->freq_band = band;
8591 priv->ieee->modulation = modulation;
8592
8593 priv->rates_mask = LIBIPW_DEFAULT_RATES_MASK;
8594
8595 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8596 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8597
8598 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8599 priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
8600 priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
8601
8602 /* If power management is turned on, default to AC mode */
8603 priv->power_mode = IPW_POWER_AC;
8604 priv->tx_power = IPW_TX_POWER_DEFAULT;
8605
8606 return old_mode == priv->ieee->iw_mode;
8607 }
8608
8609 /*
8610 * This file defines the Wireless Extension handlers. It does not
8611 * define any methods of hardware manipulation and relies on the
8612 * functions defined in ipw_main to provide the HW interaction.
8613 *
8614 * The exception to this is the use of the ipw_get_ordinal()
8615 * function used to poll the hardware vs. making unnecessary calls.
8616 *
8617 */
8618
ipw_set_channel(struct ipw_priv * priv,u8 channel)8619 static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
8620 {
8621 if (channel == 0) {
8622 IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
8623 priv->config &= ~CFG_STATIC_CHANNEL;
8624 IPW_DEBUG_ASSOC("Attempting to associate with new "
8625 "parameters.\n");
8626 ipw_associate(priv);
8627 return 0;
8628 }
8629
8630 priv->config |= CFG_STATIC_CHANNEL;
8631
8632 if (priv->channel == channel) {
8633 IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
8634 channel);
8635 return 0;
8636 }
8637
8638 IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
8639 priv->channel = channel;
8640
8641 #ifdef CONFIG_IPW2200_MONITOR
8642 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8643 int i;
8644 if (priv->status & STATUS_SCANNING) {
8645 IPW_DEBUG_SCAN("Scan abort triggered due to "
8646 "channel change.\n");
8647 ipw_abort_scan(priv);
8648 }
8649
8650 for (i = 1000; i && (priv->status & STATUS_SCANNING); i--)
8651 udelay(10);
8652
8653 if (priv->status & STATUS_SCANNING)
8654 IPW_DEBUG_SCAN("Still scanning...\n");
8655 else
8656 IPW_DEBUG_SCAN("Took %dms to abort current scan\n",
8657 1000 - i);
8658
8659 return 0;
8660 }
8661 #endif /* CONFIG_IPW2200_MONITOR */
8662
8663 /* Network configuration changed -- force [re]association */
8664 IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n");
8665 if (!ipw_disassociate(priv))
8666 ipw_associate(priv);
8667
8668 return 0;
8669 }
8670
ipw_wx_set_freq(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)8671 static int ipw_wx_set_freq(struct net_device *dev,
8672 struct iw_request_info *info,
8673 union iwreq_data *wrqu, char *extra)
8674 {
8675 struct ipw_priv *priv = libipw_priv(dev);
8676 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
8677 struct iw_freq *fwrq = &wrqu->freq;
8678 int ret = 0, i;
8679 u8 channel, flags;
8680 int band;
8681
8682 if (fwrq->m == 0) {
8683 IPW_DEBUG_WX("SET Freq/Channel -> any\n");
8684 mutex_lock(&priv->mutex);
8685 ret = ipw_set_channel(priv, 0);
8686 mutex_unlock(&priv->mutex);
8687 return ret;
8688 }
8689 /* if setting by freq convert to channel */
8690 if (fwrq->e == 1) {
8691 channel = libipw_freq_to_channel(priv->ieee, fwrq->m);
8692 if (channel == 0)
8693 return -EINVAL;
8694 } else
8695 channel = fwrq->m;
8696
8697 if (!(band = libipw_is_valid_channel(priv->ieee, channel)))
8698 return -EINVAL;
8699
8700 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
8701 i = libipw_channel_to_index(priv->ieee, channel);
8702 if (i == -1)
8703 return -EINVAL;
8704
8705 flags = (band == LIBIPW_24GHZ_BAND) ?
8706 geo->bg[i].flags : geo->a[i].flags;
8707 if (flags & LIBIPW_CH_PASSIVE_ONLY) {
8708 IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n");
8709 return -EINVAL;
8710 }
8711 }
8712
8713 IPW_DEBUG_WX("SET Freq/Channel -> %d\n", fwrq->m);
8714 mutex_lock(&priv->mutex);
8715 ret = ipw_set_channel(priv, channel);
8716 mutex_unlock(&priv->mutex);
8717 return ret;
8718 }
8719
ipw_wx_get_freq(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)8720 static int ipw_wx_get_freq(struct net_device *dev,
8721 struct iw_request_info *info,
8722 union iwreq_data *wrqu, char *extra)
8723 {
8724 struct ipw_priv *priv = libipw_priv(dev);
8725
8726 wrqu->freq.e = 0;
8727
8728 /* If we are associated, trying to associate, or have a statically
8729 * configured CHANNEL then return that; otherwise return ANY */
8730 mutex_lock(&priv->mutex);
8731 if (priv->config & CFG_STATIC_CHANNEL ||
8732 priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) {
8733 int i;
8734
8735 i = libipw_channel_to_index(priv->ieee, priv->channel);
8736 BUG_ON(i == -1);
8737 wrqu->freq.e = 1;
8738
8739 switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
8740 case LIBIPW_52GHZ_BAND:
8741 wrqu->freq.m = priv->ieee->geo.a[i].freq * 100000;
8742 break;
8743
8744 case LIBIPW_24GHZ_BAND:
8745 wrqu->freq.m = priv->ieee->geo.bg[i].freq * 100000;
8746 break;
8747
8748 default:
8749 BUG();
8750 }
8751 } else
8752 wrqu->freq.m = 0;
8753
8754 mutex_unlock(&priv->mutex);
8755 IPW_DEBUG_WX("GET Freq/Channel -> %d\n", priv->channel);
8756 return 0;
8757 }
8758
ipw_wx_set_mode(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)8759 static int ipw_wx_set_mode(struct net_device *dev,
8760 struct iw_request_info *info,
8761 union iwreq_data *wrqu, char *extra)
8762 {
8763 struct ipw_priv *priv = libipw_priv(dev);
8764 int err = 0;
8765
8766 IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
8767
8768 switch (wrqu->mode) {
8769 #ifdef CONFIG_IPW2200_MONITOR
8770 case IW_MODE_MONITOR:
8771 #endif
8772 case IW_MODE_ADHOC:
8773 case IW_MODE_INFRA:
8774 break;
8775 case IW_MODE_AUTO:
8776 wrqu->mode = IW_MODE_INFRA;
8777 break;
8778 default:
8779 return -EINVAL;
8780 }
8781 if (wrqu->mode == priv->ieee->iw_mode)
8782 return 0;
8783
8784 mutex_lock(&priv->mutex);
8785
8786 ipw_sw_reset(priv, 0);
8787
8788 #ifdef CONFIG_IPW2200_MONITOR
8789 if (priv->ieee->iw_mode == IW_MODE_MONITOR)
8790 priv->net_dev->type = ARPHRD_ETHER;
8791
8792 if (wrqu->mode == IW_MODE_MONITOR)
8793 #ifdef CONFIG_IPW2200_RADIOTAP
8794 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8795 #else
8796 priv->net_dev->type = ARPHRD_IEEE80211;
8797 #endif
8798 #endif /* CONFIG_IPW2200_MONITOR */
8799
8800 /* Free the existing firmware and reset the fw_loaded
8801 * flag so ipw_load() will bring in the new firmware */
8802 free_firmware();
8803
8804 priv->ieee->iw_mode = wrqu->mode;
8805
8806 schedule_work(&priv->adapter_restart);
8807 mutex_unlock(&priv->mutex);
8808 return err;
8809 }
8810
ipw_wx_get_mode(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)8811 static int ipw_wx_get_mode(struct net_device *dev,
8812 struct iw_request_info *info,
8813 union iwreq_data *wrqu, char *extra)
8814 {
8815 struct ipw_priv *priv = libipw_priv(dev);
8816 mutex_lock(&priv->mutex);
8817 wrqu->mode = priv->ieee->iw_mode;
8818 IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
8819 mutex_unlock(&priv->mutex);
8820 return 0;
8821 }
8822
8823 /* Values are in microsecond */
8824 static const s32 timeout_duration[] = {
8825 350000,
8826 250000,
8827 75000,
8828 37000,
8829 25000,
8830 };
8831
8832 static const s32 period_duration[] = {
8833 400000,
8834 700000,
8835 1000000,
8836 1000000,
8837 1000000
8838 };
8839
ipw_wx_get_range(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)8840 static int ipw_wx_get_range(struct net_device *dev,
8841 struct iw_request_info *info,
8842 union iwreq_data *wrqu, char *extra)
8843 {
8844 struct ipw_priv *priv = libipw_priv(dev);
8845 struct iw_range *range = (struct iw_range *)extra;
8846 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
8847 int i = 0, j;
8848
8849 wrqu->data.length = sizeof(*range);
8850 memset(range, 0, sizeof(*range));
8851
8852 /* 54Mbs == ~27 Mb/s real (802.11g) */
8853 range->throughput = 27 * 1000 * 1000;
8854
8855 range->max_qual.qual = 100;
8856 /* TODO: Find real max RSSI and stick here */
8857 range->max_qual.level = 0;
8858 range->max_qual.noise = 0;
8859 range->max_qual.updated = 7; /* Updated all three */
8860
8861 range->avg_qual.qual = 70;
8862 /* TODO: Find real 'good' to 'bad' threshold value for RSSI */
8863 range->avg_qual.level = 0; /* FIXME to real average level */
8864 range->avg_qual.noise = 0;
8865 range->avg_qual.updated = 7; /* Updated all three */
8866 mutex_lock(&priv->mutex);
8867 range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
8868
8869 for (i = 0; i < range->num_bitrates; i++)
8870 range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
8871 500000;
8872
8873 range->max_rts = DEFAULT_RTS_THRESHOLD;
8874 range->min_frag = MIN_FRAG_THRESHOLD;
8875 range->max_frag = MAX_FRAG_THRESHOLD;
8876
8877 range->encoding_size[0] = 5;
8878 range->encoding_size[1] = 13;
8879 range->num_encoding_sizes = 2;
8880 range->max_encoding_tokens = WEP_KEYS;
8881
8882 /* Set the Wireless Extension versions */
8883 range->we_version_compiled = WIRELESS_EXT;
8884 range->we_version_source = 18;
8885
8886 i = 0;
8887 if (priv->ieee->mode & (IEEE_B | IEEE_G)) {
8888 for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) {
8889 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8890 (geo->bg[j].flags & LIBIPW_CH_PASSIVE_ONLY))
8891 continue;
8892
8893 range->freq[i].i = geo->bg[j].channel;
8894 range->freq[i].m = geo->bg[j].freq * 100000;
8895 range->freq[i].e = 1;
8896 i++;
8897 }
8898 }
8899
8900 if (priv->ieee->mode & IEEE_A) {
8901 for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) {
8902 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8903 (geo->a[j].flags & LIBIPW_CH_PASSIVE_ONLY))
8904 continue;
8905
8906 range->freq[i].i = geo->a[j].channel;
8907 range->freq[i].m = geo->a[j].freq * 100000;
8908 range->freq[i].e = 1;
8909 i++;
8910 }
8911 }
8912
8913 range->num_channels = i;
8914 range->num_frequency = i;
8915
8916 mutex_unlock(&priv->mutex);
8917
8918 /* Event capability (kernel + driver) */
8919 range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
8920 IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
8921 IW_EVENT_CAPA_MASK(SIOCGIWAP) |
8922 IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
8923 range->event_capa[1] = IW_EVENT_CAPA_K_1;
8924
8925 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
8926 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
8927
8928 range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE;
8929
8930 IPW_DEBUG_WX("GET Range\n");
8931 return 0;
8932 }
8933
ipw_wx_set_wap(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)8934 static int ipw_wx_set_wap(struct net_device *dev,
8935 struct iw_request_info *info,
8936 union iwreq_data *wrqu, char *extra)
8937 {
8938 struct ipw_priv *priv = libipw_priv(dev);
8939
8940 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
8941 return -EINVAL;
8942 mutex_lock(&priv->mutex);
8943 if (is_broadcast_ether_addr(wrqu->ap_addr.sa_data) ||
8944 is_zero_ether_addr(wrqu->ap_addr.sa_data)) {
8945 /* we disable mandatory BSSID association */
8946 IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
8947 priv->config &= ~CFG_STATIC_BSSID;
8948 IPW_DEBUG_ASSOC("Attempting to associate with new "
8949 "parameters.\n");
8950 ipw_associate(priv);
8951 mutex_unlock(&priv->mutex);
8952 return 0;
8953 }
8954
8955 priv->config |= CFG_STATIC_BSSID;
8956 if (ether_addr_equal(priv->bssid, wrqu->ap_addr.sa_data)) {
8957 IPW_DEBUG_WX("BSSID set to current BSSID.\n");
8958 mutex_unlock(&priv->mutex);
8959 return 0;
8960 }
8961
8962 IPW_DEBUG_WX("Setting mandatory BSSID to %pM\n",
8963 wrqu->ap_addr.sa_data);
8964
8965 memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
8966
8967 /* Network configuration changed -- force [re]association */
8968 IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n");
8969 if (!ipw_disassociate(priv))
8970 ipw_associate(priv);
8971
8972 mutex_unlock(&priv->mutex);
8973 return 0;
8974 }
8975
ipw_wx_get_wap(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)8976 static int ipw_wx_get_wap(struct net_device *dev,
8977 struct iw_request_info *info,
8978 union iwreq_data *wrqu, char *extra)
8979 {
8980 struct ipw_priv *priv = libipw_priv(dev);
8981
8982 /* If we are associated, trying to associate, or have a statically
8983 * configured BSSID then return that; otherwise return ANY */
8984 mutex_lock(&priv->mutex);
8985 if (priv->config & CFG_STATIC_BSSID ||
8986 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
8987 wrqu->ap_addr.sa_family = ARPHRD_ETHER;
8988 memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
8989 } else
8990 eth_zero_addr(wrqu->ap_addr.sa_data);
8991
8992 IPW_DEBUG_WX("Getting WAP BSSID: %pM\n",
8993 wrqu->ap_addr.sa_data);
8994 mutex_unlock(&priv->mutex);
8995 return 0;
8996 }
8997
ipw_wx_set_essid(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)8998 static int ipw_wx_set_essid(struct net_device *dev,
8999 struct iw_request_info *info,
9000 union iwreq_data *wrqu, char *extra)
9001 {
9002 struct ipw_priv *priv = libipw_priv(dev);
9003 int length;
9004
9005 mutex_lock(&priv->mutex);
9006
9007 if (!wrqu->essid.flags)
9008 {
9009 IPW_DEBUG_WX("Setting ESSID to ANY\n");
9010 ipw_disassociate(priv);
9011 priv->config &= ~CFG_STATIC_ESSID;
9012 ipw_associate(priv);
9013 mutex_unlock(&priv->mutex);
9014 return 0;
9015 }
9016
9017 length = min((int)wrqu->essid.length, IW_ESSID_MAX_SIZE);
9018
9019 priv->config |= CFG_STATIC_ESSID;
9020
9021 if (priv->essid_len == length && !memcmp(priv->essid, extra, length)
9022 && (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) {
9023 IPW_DEBUG_WX("ESSID set to current ESSID.\n");
9024 mutex_unlock(&priv->mutex);
9025 return 0;
9026 }
9027
9028 IPW_DEBUG_WX("Setting ESSID: '%*pE' (%d)\n", length, extra, length);
9029
9030 priv->essid_len = length;
9031 memcpy(priv->essid, extra, priv->essid_len);
9032
9033 /* Network configuration changed -- force [re]association */
9034 IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
9035 if (!ipw_disassociate(priv))
9036 ipw_associate(priv);
9037
9038 mutex_unlock(&priv->mutex);
9039 return 0;
9040 }
9041
ipw_wx_get_essid(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9042 static int ipw_wx_get_essid(struct net_device *dev,
9043 struct iw_request_info *info,
9044 union iwreq_data *wrqu, char *extra)
9045 {
9046 struct ipw_priv *priv = libipw_priv(dev);
9047
9048 /* If we are associated, trying to associate, or have a statically
9049 * configured ESSID then return that; otherwise return ANY */
9050 mutex_lock(&priv->mutex);
9051 if (priv->config & CFG_STATIC_ESSID ||
9052 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
9053 IPW_DEBUG_WX("Getting essid: '%*pE'\n",
9054 priv->essid_len, priv->essid);
9055 memcpy(extra, priv->essid, priv->essid_len);
9056 wrqu->essid.length = priv->essid_len;
9057 wrqu->essid.flags = 1; /* active */
9058 } else {
9059 IPW_DEBUG_WX("Getting essid: ANY\n");
9060 wrqu->essid.length = 0;
9061 wrqu->essid.flags = 0; /* active */
9062 }
9063 mutex_unlock(&priv->mutex);
9064 return 0;
9065 }
9066
ipw_wx_set_nick(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9067 static int ipw_wx_set_nick(struct net_device *dev,
9068 struct iw_request_info *info,
9069 union iwreq_data *wrqu, char *extra)
9070 {
9071 struct ipw_priv *priv = libipw_priv(dev);
9072
9073 IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
9074 if (wrqu->data.length > IW_ESSID_MAX_SIZE)
9075 return -E2BIG;
9076 mutex_lock(&priv->mutex);
9077 wrqu->data.length = min_t(size_t, wrqu->data.length, sizeof(priv->nick));
9078 memset(priv->nick, 0, sizeof(priv->nick));
9079 memcpy(priv->nick, extra, wrqu->data.length);
9080 IPW_DEBUG_TRACE("<<\n");
9081 mutex_unlock(&priv->mutex);
9082 return 0;
9083
9084 }
9085
ipw_wx_get_nick(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9086 static int ipw_wx_get_nick(struct net_device *dev,
9087 struct iw_request_info *info,
9088 union iwreq_data *wrqu, char *extra)
9089 {
9090 struct ipw_priv *priv = libipw_priv(dev);
9091 IPW_DEBUG_WX("Getting nick\n");
9092 mutex_lock(&priv->mutex);
9093 wrqu->data.length = strlen(priv->nick);
9094 memcpy(extra, priv->nick, wrqu->data.length);
9095 wrqu->data.flags = 1; /* active */
9096 mutex_unlock(&priv->mutex);
9097 return 0;
9098 }
9099
ipw_wx_set_sens(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9100 static int ipw_wx_set_sens(struct net_device *dev,
9101 struct iw_request_info *info,
9102 union iwreq_data *wrqu, char *extra)
9103 {
9104 struct ipw_priv *priv = libipw_priv(dev);
9105 int err = 0;
9106
9107 IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value);
9108 IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value);
9109 mutex_lock(&priv->mutex);
9110
9111 if (wrqu->sens.fixed == 0)
9112 {
9113 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
9114 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
9115 goto out;
9116 }
9117 if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) ||
9118 (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) {
9119 err = -EINVAL;
9120 goto out;
9121 }
9122
9123 priv->roaming_threshold = wrqu->sens.value;
9124 priv->disassociate_threshold = 3*wrqu->sens.value;
9125 out:
9126 mutex_unlock(&priv->mutex);
9127 return err;
9128 }
9129
ipw_wx_get_sens(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9130 static int ipw_wx_get_sens(struct net_device *dev,
9131 struct iw_request_info *info,
9132 union iwreq_data *wrqu, char *extra)
9133 {
9134 struct ipw_priv *priv = libipw_priv(dev);
9135 mutex_lock(&priv->mutex);
9136 wrqu->sens.fixed = 1;
9137 wrqu->sens.value = priv->roaming_threshold;
9138 mutex_unlock(&priv->mutex);
9139
9140 IPW_DEBUG_WX("GET roaming threshold -> %s %d\n",
9141 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9142
9143 return 0;
9144 }
9145
ipw_wx_set_rate(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9146 static int ipw_wx_set_rate(struct net_device *dev,
9147 struct iw_request_info *info,
9148 union iwreq_data *wrqu, char *extra)
9149 {
9150 /* TODO: We should use semaphores or locks for access to priv */
9151 struct ipw_priv *priv = libipw_priv(dev);
9152 u32 target_rate = wrqu->bitrate.value;
9153 u32 fixed, mask;
9154
9155 /* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */
9156 /* value = X, fixed = 1 means only rate X */
9157 /* value = X, fixed = 0 means all rates lower equal X */
9158
9159 if (target_rate == -1) {
9160 fixed = 0;
9161 mask = LIBIPW_DEFAULT_RATES_MASK;
9162 /* Now we should reassociate */
9163 goto apply;
9164 }
9165
9166 mask = 0;
9167 fixed = wrqu->bitrate.fixed;
9168
9169 if (target_rate == 1000000 || !fixed)
9170 mask |= LIBIPW_CCK_RATE_1MB_MASK;
9171 if (target_rate == 1000000)
9172 goto apply;
9173
9174 if (target_rate == 2000000 || !fixed)
9175 mask |= LIBIPW_CCK_RATE_2MB_MASK;
9176 if (target_rate == 2000000)
9177 goto apply;
9178
9179 if (target_rate == 5500000 || !fixed)
9180 mask |= LIBIPW_CCK_RATE_5MB_MASK;
9181 if (target_rate == 5500000)
9182 goto apply;
9183
9184 if (target_rate == 6000000 || !fixed)
9185 mask |= LIBIPW_OFDM_RATE_6MB_MASK;
9186 if (target_rate == 6000000)
9187 goto apply;
9188
9189 if (target_rate == 9000000 || !fixed)
9190 mask |= LIBIPW_OFDM_RATE_9MB_MASK;
9191 if (target_rate == 9000000)
9192 goto apply;
9193
9194 if (target_rate == 11000000 || !fixed)
9195 mask |= LIBIPW_CCK_RATE_11MB_MASK;
9196 if (target_rate == 11000000)
9197 goto apply;
9198
9199 if (target_rate == 12000000 || !fixed)
9200 mask |= LIBIPW_OFDM_RATE_12MB_MASK;
9201 if (target_rate == 12000000)
9202 goto apply;
9203
9204 if (target_rate == 18000000 || !fixed)
9205 mask |= LIBIPW_OFDM_RATE_18MB_MASK;
9206 if (target_rate == 18000000)
9207 goto apply;
9208
9209 if (target_rate == 24000000 || !fixed)
9210 mask |= LIBIPW_OFDM_RATE_24MB_MASK;
9211 if (target_rate == 24000000)
9212 goto apply;
9213
9214 if (target_rate == 36000000 || !fixed)
9215 mask |= LIBIPW_OFDM_RATE_36MB_MASK;
9216 if (target_rate == 36000000)
9217 goto apply;
9218
9219 if (target_rate == 48000000 || !fixed)
9220 mask |= LIBIPW_OFDM_RATE_48MB_MASK;
9221 if (target_rate == 48000000)
9222 goto apply;
9223
9224 if (target_rate == 54000000 || !fixed)
9225 mask |= LIBIPW_OFDM_RATE_54MB_MASK;
9226 if (target_rate == 54000000)
9227 goto apply;
9228
9229 IPW_DEBUG_WX("invalid rate specified, returning error\n");
9230 return -EINVAL;
9231
9232 apply:
9233 IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
9234 mask, fixed ? "fixed" : "sub-rates");
9235 mutex_lock(&priv->mutex);
9236 if (mask == LIBIPW_DEFAULT_RATES_MASK) {
9237 priv->config &= ~CFG_FIXED_RATE;
9238 ipw_set_fixed_rate(priv, priv->ieee->mode);
9239 } else
9240 priv->config |= CFG_FIXED_RATE;
9241
9242 if (priv->rates_mask == mask) {
9243 IPW_DEBUG_WX("Mask set to current mask.\n");
9244 mutex_unlock(&priv->mutex);
9245 return 0;
9246 }
9247
9248 priv->rates_mask = mask;
9249
9250 /* Network configuration changed -- force [re]association */
9251 IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n");
9252 if (!ipw_disassociate(priv))
9253 ipw_associate(priv);
9254
9255 mutex_unlock(&priv->mutex);
9256 return 0;
9257 }
9258
ipw_wx_get_rate(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9259 static int ipw_wx_get_rate(struct net_device *dev,
9260 struct iw_request_info *info,
9261 union iwreq_data *wrqu, char *extra)
9262 {
9263 struct ipw_priv *priv = libipw_priv(dev);
9264 mutex_lock(&priv->mutex);
9265 wrqu->bitrate.value = priv->last_rate;
9266 wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0;
9267 mutex_unlock(&priv->mutex);
9268 IPW_DEBUG_WX("GET Rate -> %d\n", wrqu->bitrate.value);
9269 return 0;
9270 }
9271
ipw_wx_set_rts(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9272 static int ipw_wx_set_rts(struct net_device *dev,
9273 struct iw_request_info *info,
9274 union iwreq_data *wrqu, char *extra)
9275 {
9276 struct ipw_priv *priv = libipw_priv(dev);
9277 mutex_lock(&priv->mutex);
9278 if (wrqu->rts.disabled || !wrqu->rts.fixed)
9279 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
9280 else {
9281 if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
9282 wrqu->rts.value > MAX_RTS_THRESHOLD) {
9283 mutex_unlock(&priv->mutex);
9284 return -EINVAL;
9285 }
9286 priv->rts_threshold = wrqu->rts.value;
9287 }
9288
9289 ipw_send_rts_threshold(priv, priv->rts_threshold);
9290 mutex_unlock(&priv->mutex);
9291 IPW_DEBUG_WX("SET RTS Threshold -> %d\n", priv->rts_threshold);
9292 return 0;
9293 }
9294
ipw_wx_get_rts(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9295 static int ipw_wx_get_rts(struct net_device *dev,
9296 struct iw_request_info *info,
9297 union iwreq_data *wrqu, char *extra)
9298 {
9299 struct ipw_priv *priv = libipw_priv(dev);
9300 mutex_lock(&priv->mutex);
9301 wrqu->rts.value = priv->rts_threshold;
9302 wrqu->rts.fixed = 0; /* no auto select */
9303 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
9304 mutex_unlock(&priv->mutex);
9305 IPW_DEBUG_WX("GET RTS Threshold -> %d\n", wrqu->rts.value);
9306 return 0;
9307 }
9308
ipw_wx_set_txpow(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9309 static int ipw_wx_set_txpow(struct net_device *dev,
9310 struct iw_request_info *info,
9311 union iwreq_data *wrqu, char *extra)
9312 {
9313 struct ipw_priv *priv = libipw_priv(dev);
9314 int err = 0;
9315
9316 mutex_lock(&priv->mutex);
9317 if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) {
9318 err = -EINPROGRESS;
9319 goto out;
9320 }
9321
9322 if (!wrqu->power.fixed)
9323 wrqu->power.value = IPW_TX_POWER_DEFAULT;
9324
9325 if (wrqu->power.flags != IW_TXPOW_DBM) {
9326 err = -EINVAL;
9327 goto out;
9328 }
9329
9330 if ((wrqu->power.value > IPW_TX_POWER_MAX) ||
9331 (wrqu->power.value < IPW_TX_POWER_MIN)) {
9332 err = -EINVAL;
9333 goto out;
9334 }
9335
9336 priv->tx_power = wrqu->power.value;
9337 err = ipw_set_tx_power(priv);
9338 out:
9339 mutex_unlock(&priv->mutex);
9340 return err;
9341 }
9342
ipw_wx_get_txpow(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9343 static int ipw_wx_get_txpow(struct net_device *dev,
9344 struct iw_request_info *info,
9345 union iwreq_data *wrqu, char *extra)
9346 {
9347 struct ipw_priv *priv = libipw_priv(dev);
9348 mutex_lock(&priv->mutex);
9349 wrqu->power.value = priv->tx_power;
9350 wrqu->power.fixed = 1;
9351 wrqu->power.flags = IW_TXPOW_DBM;
9352 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
9353 mutex_unlock(&priv->mutex);
9354
9355 IPW_DEBUG_WX("GET TX Power -> %s %d\n",
9356 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9357
9358 return 0;
9359 }
9360
ipw_wx_set_frag(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9361 static int ipw_wx_set_frag(struct net_device *dev,
9362 struct iw_request_info *info,
9363 union iwreq_data *wrqu, char *extra)
9364 {
9365 struct ipw_priv *priv = libipw_priv(dev);
9366 mutex_lock(&priv->mutex);
9367 if (wrqu->frag.disabled || !wrqu->frag.fixed)
9368 priv->ieee->fts = DEFAULT_FTS;
9369 else {
9370 if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
9371 wrqu->frag.value > MAX_FRAG_THRESHOLD) {
9372 mutex_unlock(&priv->mutex);
9373 return -EINVAL;
9374 }
9375
9376 priv->ieee->fts = wrqu->frag.value & ~0x1;
9377 }
9378
9379 ipw_send_frag_threshold(priv, wrqu->frag.value);
9380 mutex_unlock(&priv->mutex);
9381 IPW_DEBUG_WX("SET Frag Threshold -> %d\n", wrqu->frag.value);
9382 return 0;
9383 }
9384
ipw_wx_get_frag(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9385 static int ipw_wx_get_frag(struct net_device *dev,
9386 struct iw_request_info *info,
9387 union iwreq_data *wrqu, char *extra)
9388 {
9389 struct ipw_priv *priv = libipw_priv(dev);
9390 mutex_lock(&priv->mutex);
9391 wrqu->frag.value = priv->ieee->fts;
9392 wrqu->frag.fixed = 0; /* no auto select */
9393 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
9394 mutex_unlock(&priv->mutex);
9395 IPW_DEBUG_WX("GET Frag Threshold -> %d\n", wrqu->frag.value);
9396
9397 return 0;
9398 }
9399
ipw_wx_set_retry(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9400 static int ipw_wx_set_retry(struct net_device *dev,
9401 struct iw_request_info *info,
9402 union iwreq_data *wrqu, char *extra)
9403 {
9404 struct ipw_priv *priv = libipw_priv(dev);
9405
9406 if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled)
9407 return -EINVAL;
9408
9409 if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
9410 return 0;
9411
9412 if (wrqu->retry.value < 0 || wrqu->retry.value >= 255)
9413 return -EINVAL;
9414
9415 mutex_lock(&priv->mutex);
9416 if (wrqu->retry.flags & IW_RETRY_SHORT)
9417 priv->short_retry_limit = (u8) wrqu->retry.value;
9418 else if (wrqu->retry.flags & IW_RETRY_LONG)
9419 priv->long_retry_limit = (u8) wrqu->retry.value;
9420 else {
9421 priv->short_retry_limit = (u8) wrqu->retry.value;
9422 priv->long_retry_limit = (u8) wrqu->retry.value;
9423 }
9424
9425 ipw_send_retry_limit(priv, priv->short_retry_limit,
9426 priv->long_retry_limit);
9427 mutex_unlock(&priv->mutex);
9428 IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n",
9429 priv->short_retry_limit, priv->long_retry_limit);
9430 return 0;
9431 }
9432
ipw_wx_get_retry(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9433 static int ipw_wx_get_retry(struct net_device *dev,
9434 struct iw_request_info *info,
9435 union iwreq_data *wrqu, char *extra)
9436 {
9437 struct ipw_priv *priv = libipw_priv(dev);
9438
9439 mutex_lock(&priv->mutex);
9440 wrqu->retry.disabled = 0;
9441
9442 if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
9443 mutex_unlock(&priv->mutex);
9444 return -EINVAL;
9445 }
9446
9447 if (wrqu->retry.flags & IW_RETRY_LONG) {
9448 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
9449 wrqu->retry.value = priv->long_retry_limit;
9450 } else if (wrqu->retry.flags & IW_RETRY_SHORT) {
9451 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_SHORT;
9452 wrqu->retry.value = priv->short_retry_limit;
9453 } else {
9454 wrqu->retry.flags = IW_RETRY_LIMIT;
9455 wrqu->retry.value = priv->short_retry_limit;
9456 }
9457 mutex_unlock(&priv->mutex);
9458
9459 IPW_DEBUG_WX("GET retry -> %d\n", wrqu->retry.value);
9460
9461 return 0;
9462 }
9463
ipw_wx_set_scan(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9464 static int ipw_wx_set_scan(struct net_device *dev,
9465 struct iw_request_info *info,
9466 union iwreq_data *wrqu, char *extra)
9467 {
9468 struct ipw_priv *priv = libipw_priv(dev);
9469 struct iw_scan_req *req = (struct iw_scan_req *)extra;
9470 struct delayed_work *work = NULL;
9471
9472 mutex_lock(&priv->mutex);
9473
9474 priv->user_requested_scan = 1;
9475
9476 if (wrqu->data.length == sizeof(struct iw_scan_req)) {
9477 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
9478 int len = min((int)req->essid_len,
9479 (int)sizeof(priv->direct_scan_ssid));
9480 memcpy(priv->direct_scan_ssid, req->essid, len);
9481 priv->direct_scan_ssid_len = len;
9482 work = &priv->request_direct_scan;
9483 } else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
9484 work = &priv->request_passive_scan;
9485 }
9486 } else {
9487 /* Normal active broadcast scan */
9488 work = &priv->request_scan;
9489 }
9490
9491 mutex_unlock(&priv->mutex);
9492
9493 IPW_DEBUG_WX("Start scan\n");
9494
9495 schedule_delayed_work(work, 0);
9496
9497 return 0;
9498 }
9499
ipw_wx_get_scan(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9500 static int ipw_wx_get_scan(struct net_device *dev,
9501 struct iw_request_info *info,
9502 union iwreq_data *wrqu, char *extra)
9503 {
9504 struct ipw_priv *priv = libipw_priv(dev);
9505 return libipw_wx_get_scan(priv->ieee, info, wrqu, extra);
9506 }
9507
ipw_wx_set_encode(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * key)9508 static int ipw_wx_set_encode(struct net_device *dev,
9509 struct iw_request_info *info,
9510 union iwreq_data *wrqu, char *key)
9511 {
9512 struct ipw_priv *priv = libipw_priv(dev);
9513 int ret;
9514 u32 cap = priv->capability;
9515
9516 mutex_lock(&priv->mutex);
9517 ret = libipw_wx_set_encode(priv->ieee, info, wrqu, key);
9518
9519 /* In IBSS mode, we need to notify the firmware to update
9520 * the beacon info after we changed the capability. */
9521 if (cap != priv->capability &&
9522 priv->ieee->iw_mode == IW_MODE_ADHOC &&
9523 priv->status & STATUS_ASSOCIATED)
9524 ipw_disassociate(priv);
9525
9526 mutex_unlock(&priv->mutex);
9527 return ret;
9528 }
9529
ipw_wx_get_encode(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * key)9530 static int ipw_wx_get_encode(struct net_device *dev,
9531 struct iw_request_info *info,
9532 union iwreq_data *wrqu, char *key)
9533 {
9534 struct ipw_priv *priv = libipw_priv(dev);
9535 return libipw_wx_get_encode(priv->ieee, info, wrqu, key);
9536 }
9537
ipw_wx_set_power(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9538 static int ipw_wx_set_power(struct net_device *dev,
9539 struct iw_request_info *info,
9540 union iwreq_data *wrqu, char *extra)
9541 {
9542 struct ipw_priv *priv = libipw_priv(dev);
9543 int err;
9544 mutex_lock(&priv->mutex);
9545 if (wrqu->power.disabled) {
9546 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
9547 err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
9548 if (err) {
9549 IPW_DEBUG_WX("failed setting power mode.\n");
9550 mutex_unlock(&priv->mutex);
9551 return err;
9552 }
9553 IPW_DEBUG_WX("SET Power Management Mode -> off\n");
9554 mutex_unlock(&priv->mutex);
9555 return 0;
9556 }
9557
9558 switch (wrqu->power.flags & IW_POWER_MODE) {
9559 case IW_POWER_ON: /* If not specified */
9560 case IW_POWER_MODE: /* If set all mask */
9561 case IW_POWER_ALL_R: /* If explicitly state all */
9562 break;
9563 default: /* Otherwise we don't support it */
9564 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
9565 wrqu->power.flags);
9566 mutex_unlock(&priv->mutex);
9567 return -EOPNOTSUPP;
9568 }
9569
9570 /* If the user hasn't specified a power management mode yet, default
9571 * to BATTERY */
9572 if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
9573 priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
9574 else
9575 priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
9576
9577 err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
9578 if (err) {
9579 IPW_DEBUG_WX("failed setting power mode.\n");
9580 mutex_unlock(&priv->mutex);
9581 return err;
9582 }
9583
9584 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
9585 mutex_unlock(&priv->mutex);
9586 return 0;
9587 }
9588
ipw_wx_get_power(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9589 static int ipw_wx_get_power(struct net_device *dev,
9590 struct iw_request_info *info,
9591 union iwreq_data *wrqu, char *extra)
9592 {
9593 struct ipw_priv *priv = libipw_priv(dev);
9594 mutex_lock(&priv->mutex);
9595 if (!(priv->power_mode & IPW_POWER_ENABLED))
9596 wrqu->power.disabled = 1;
9597 else
9598 wrqu->power.disabled = 0;
9599
9600 mutex_unlock(&priv->mutex);
9601 IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
9602
9603 return 0;
9604 }
9605
ipw_wx_set_powermode(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9606 static int ipw_wx_set_powermode(struct net_device *dev,
9607 struct iw_request_info *info,
9608 union iwreq_data *wrqu, char *extra)
9609 {
9610 struct ipw_priv *priv = libipw_priv(dev);
9611 int mode = *(int *)extra;
9612 int err;
9613
9614 mutex_lock(&priv->mutex);
9615 if ((mode < 1) || (mode > IPW_POWER_LIMIT))
9616 mode = IPW_POWER_AC;
9617
9618 if (IPW_POWER_LEVEL(priv->power_mode) != mode) {
9619 err = ipw_send_power_mode(priv, mode);
9620 if (err) {
9621 IPW_DEBUG_WX("failed setting power mode.\n");
9622 mutex_unlock(&priv->mutex);
9623 return err;
9624 }
9625 priv->power_mode = IPW_POWER_ENABLED | mode;
9626 }
9627 mutex_unlock(&priv->mutex);
9628 return 0;
9629 }
9630
9631 #define MAX_WX_STRING 80
ipw_wx_get_powermode(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9632 static int ipw_wx_get_powermode(struct net_device *dev,
9633 struct iw_request_info *info,
9634 union iwreq_data *wrqu, char *extra)
9635 {
9636 struct ipw_priv *priv = libipw_priv(dev);
9637 int level = IPW_POWER_LEVEL(priv->power_mode);
9638 char *p = extra;
9639
9640 p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
9641
9642 switch (level) {
9643 case IPW_POWER_AC:
9644 p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
9645 break;
9646 case IPW_POWER_BATTERY:
9647 p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
9648 break;
9649 default:
9650 p += snprintf(p, MAX_WX_STRING - (p - extra),
9651 "(Timeout %dms, Period %dms)",
9652 timeout_duration[level - 1] / 1000,
9653 period_duration[level - 1] / 1000);
9654 }
9655
9656 if (!(priv->power_mode & IPW_POWER_ENABLED))
9657 p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
9658
9659 wrqu->data.length = p - extra + 1;
9660
9661 return 0;
9662 }
9663
ipw_wx_set_wireless_mode(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9664 static int ipw_wx_set_wireless_mode(struct net_device *dev,
9665 struct iw_request_info *info,
9666 union iwreq_data *wrqu, char *extra)
9667 {
9668 struct ipw_priv *priv = libipw_priv(dev);
9669 int mode = *(int *)extra;
9670 u8 band = 0, modulation = 0;
9671
9672 if (mode == 0 || mode & ~IEEE_MODE_MASK) {
9673 IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
9674 return -EINVAL;
9675 }
9676 mutex_lock(&priv->mutex);
9677 if (priv->adapter == IPW_2915ABG) {
9678 priv->ieee->abg_true = 1;
9679 if (mode & IEEE_A) {
9680 band |= LIBIPW_52GHZ_BAND;
9681 modulation |= LIBIPW_OFDM_MODULATION;
9682 } else
9683 priv->ieee->abg_true = 0;
9684 } else {
9685 if (mode & IEEE_A) {
9686 IPW_WARNING("Attempt to set 2200BG into "
9687 "802.11a mode\n");
9688 mutex_unlock(&priv->mutex);
9689 return -EINVAL;
9690 }
9691
9692 priv->ieee->abg_true = 0;
9693 }
9694
9695 if (mode & IEEE_B) {
9696 band |= LIBIPW_24GHZ_BAND;
9697 modulation |= LIBIPW_CCK_MODULATION;
9698 } else
9699 priv->ieee->abg_true = 0;
9700
9701 if (mode & IEEE_G) {
9702 band |= LIBIPW_24GHZ_BAND;
9703 modulation |= LIBIPW_OFDM_MODULATION;
9704 } else
9705 priv->ieee->abg_true = 0;
9706
9707 priv->ieee->mode = mode;
9708 priv->ieee->freq_band = band;
9709 priv->ieee->modulation = modulation;
9710 init_supported_rates(priv, &priv->rates);
9711
9712 /* Network configuration changed -- force [re]association */
9713 IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n");
9714 if (!ipw_disassociate(priv)) {
9715 ipw_send_supported_rates(priv, &priv->rates);
9716 ipw_associate(priv);
9717 }
9718
9719 /* Update the band LEDs */
9720 ipw_led_band_on(priv);
9721
9722 IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
9723 mode & IEEE_A ? 'a' : '.',
9724 mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
9725 mutex_unlock(&priv->mutex);
9726 return 0;
9727 }
9728
ipw_wx_get_wireless_mode(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9729 static int ipw_wx_get_wireless_mode(struct net_device *dev,
9730 struct iw_request_info *info,
9731 union iwreq_data *wrqu, char *extra)
9732 {
9733 struct ipw_priv *priv = libipw_priv(dev);
9734 mutex_lock(&priv->mutex);
9735 switch (priv->ieee->mode) {
9736 case IEEE_A:
9737 strncpy(extra, "802.11a (1)", MAX_WX_STRING);
9738 break;
9739 case IEEE_B:
9740 strncpy(extra, "802.11b (2)", MAX_WX_STRING);
9741 break;
9742 case IEEE_A | IEEE_B:
9743 strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
9744 break;
9745 case IEEE_G:
9746 strncpy(extra, "802.11g (4)", MAX_WX_STRING);
9747 break;
9748 case IEEE_A | IEEE_G:
9749 strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
9750 break;
9751 case IEEE_B | IEEE_G:
9752 strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
9753 break;
9754 case IEEE_A | IEEE_B | IEEE_G:
9755 strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
9756 break;
9757 default:
9758 strncpy(extra, "unknown", MAX_WX_STRING);
9759 break;
9760 }
9761 extra[MAX_WX_STRING - 1] = '\0';
9762
9763 IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
9764
9765 wrqu->data.length = strlen(extra) + 1;
9766 mutex_unlock(&priv->mutex);
9767
9768 return 0;
9769 }
9770
ipw_wx_set_preamble(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9771 static int ipw_wx_set_preamble(struct net_device *dev,
9772 struct iw_request_info *info,
9773 union iwreq_data *wrqu, char *extra)
9774 {
9775 struct ipw_priv *priv = libipw_priv(dev);
9776 int mode = *(int *)extra;
9777 mutex_lock(&priv->mutex);
9778 /* Switching from SHORT -> LONG requires a disassociation */
9779 if (mode == 1) {
9780 if (!(priv->config & CFG_PREAMBLE_LONG)) {
9781 priv->config |= CFG_PREAMBLE_LONG;
9782
9783 /* Network configuration changed -- force [re]association */
9784 IPW_DEBUG_ASSOC
9785 ("[re]association triggered due to preamble change.\n");
9786 if (!ipw_disassociate(priv))
9787 ipw_associate(priv);
9788 }
9789 goto done;
9790 }
9791
9792 if (mode == 0) {
9793 priv->config &= ~CFG_PREAMBLE_LONG;
9794 goto done;
9795 }
9796 mutex_unlock(&priv->mutex);
9797 return -EINVAL;
9798
9799 done:
9800 mutex_unlock(&priv->mutex);
9801 return 0;
9802 }
9803
ipw_wx_get_preamble(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9804 static int ipw_wx_get_preamble(struct net_device *dev,
9805 struct iw_request_info *info,
9806 union iwreq_data *wrqu, char *extra)
9807 {
9808 struct ipw_priv *priv = libipw_priv(dev);
9809 mutex_lock(&priv->mutex);
9810 if (priv->config & CFG_PREAMBLE_LONG)
9811 snprintf(wrqu->name, IFNAMSIZ, "long (1)");
9812 else
9813 snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
9814 mutex_unlock(&priv->mutex);
9815 return 0;
9816 }
9817
9818 #ifdef CONFIG_IPW2200_MONITOR
ipw_wx_set_monitor(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9819 static int ipw_wx_set_monitor(struct net_device *dev,
9820 struct iw_request_info *info,
9821 union iwreq_data *wrqu, char *extra)
9822 {
9823 struct ipw_priv *priv = libipw_priv(dev);
9824 int *parms = (int *)extra;
9825 int enable = (parms[0] > 0);
9826 mutex_lock(&priv->mutex);
9827 IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
9828 if (enable) {
9829 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9830 #ifdef CONFIG_IPW2200_RADIOTAP
9831 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
9832 #else
9833 priv->net_dev->type = ARPHRD_IEEE80211;
9834 #endif
9835 schedule_work(&priv->adapter_restart);
9836 }
9837
9838 ipw_set_channel(priv, parms[1]);
9839 } else {
9840 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9841 mutex_unlock(&priv->mutex);
9842 return 0;
9843 }
9844 priv->net_dev->type = ARPHRD_ETHER;
9845 schedule_work(&priv->adapter_restart);
9846 }
9847 mutex_unlock(&priv->mutex);
9848 return 0;
9849 }
9850
9851 #endif /* CONFIG_IPW2200_MONITOR */
9852
ipw_wx_reset(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9853 static int ipw_wx_reset(struct net_device *dev,
9854 struct iw_request_info *info,
9855 union iwreq_data *wrqu, char *extra)
9856 {
9857 struct ipw_priv *priv = libipw_priv(dev);
9858 IPW_DEBUG_WX("RESET\n");
9859 schedule_work(&priv->adapter_restart);
9860 return 0;
9861 }
9862
ipw_wx_sw_reset(struct net_device * dev,struct iw_request_info * info,union iwreq_data * wrqu,char * extra)9863 static int ipw_wx_sw_reset(struct net_device *dev,
9864 struct iw_request_info *info,
9865 union iwreq_data *wrqu, char *extra)
9866 {
9867 struct ipw_priv *priv = libipw_priv(dev);
9868 union iwreq_data wrqu_sec = {
9869 .encoding = {
9870 .flags = IW_ENCODE_DISABLED,
9871 },
9872 };
9873 int ret;
9874
9875 IPW_DEBUG_WX("SW_RESET\n");
9876
9877 mutex_lock(&priv->mutex);
9878
9879 ret = ipw_sw_reset(priv, 2);
9880 if (!ret) {
9881 free_firmware();
9882 ipw_adapter_restart(priv);
9883 }
9884
9885 /* The SW reset bit might have been toggled on by the 'disable'
9886 * module parameter, so take appropriate action */
9887 ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW);
9888
9889 mutex_unlock(&priv->mutex);
9890 libipw_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL);
9891 mutex_lock(&priv->mutex);
9892
9893 if (!(priv->status & STATUS_RF_KILL_MASK)) {
9894 /* Configuration likely changed -- force [re]association */
9895 IPW_DEBUG_ASSOC("[re]association triggered due to sw "
9896 "reset.\n");
9897 if (!ipw_disassociate(priv))
9898 ipw_associate(priv);
9899 }
9900
9901 mutex_unlock(&priv->mutex);
9902
9903 return 0;
9904 }
9905
9906 /* Rebase the WE IOCTLs to zero for the handler array */
9907 static iw_handler ipw_wx_handlers[] = {
9908 IW_HANDLER(SIOCGIWNAME, (iw_handler)cfg80211_wext_giwname),
9909 IW_HANDLER(SIOCSIWFREQ, ipw_wx_set_freq),
9910 IW_HANDLER(SIOCGIWFREQ, ipw_wx_get_freq),
9911 IW_HANDLER(SIOCSIWMODE, ipw_wx_set_mode),
9912 IW_HANDLER(SIOCGIWMODE, ipw_wx_get_mode),
9913 IW_HANDLER(SIOCSIWSENS, ipw_wx_set_sens),
9914 IW_HANDLER(SIOCGIWSENS, ipw_wx_get_sens),
9915 IW_HANDLER(SIOCGIWRANGE, ipw_wx_get_range),
9916 IW_HANDLER(SIOCSIWAP, ipw_wx_set_wap),
9917 IW_HANDLER(SIOCGIWAP, ipw_wx_get_wap),
9918 IW_HANDLER(SIOCSIWSCAN, ipw_wx_set_scan),
9919 IW_HANDLER(SIOCGIWSCAN, ipw_wx_get_scan),
9920 IW_HANDLER(SIOCSIWESSID, ipw_wx_set_essid),
9921 IW_HANDLER(SIOCGIWESSID, ipw_wx_get_essid),
9922 IW_HANDLER(SIOCSIWNICKN, ipw_wx_set_nick),
9923 IW_HANDLER(SIOCGIWNICKN, ipw_wx_get_nick),
9924 IW_HANDLER(SIOCSIWRATE, ipw_wx_set_rate),
9925 IW_HANDLER(SIOCGIWRATE, ipw_wx_get_rate),
9926 IW_HANDLER(SIOCSIWRTS, ipw_wx_set_rts),
9927 IW_HANDLER(SIOCGIWRTS, ipw_wx_get_rts),
9928 IW_HANDLER(SIOCSIWFRAG, ipw_wx_set_frag),
9929 IW_HANDLER(SIOCGIWFRAG, ipw_wx_get_frag),
9930 IW_HANDLER(SIOCSIWTXPOW, ipw_wx_set_txpow),
9931 IW_HANDLER(SIOCGIWTXPOW, ipw_wx_get_txpow),
9932 IW_HANDLER(SIOCSIWRETRY, ipw_wx_set_retry),
9933 IW_HANDLER(SIOCGIWRETRY, ipw_wx_get_retry),
9934 IW_HANDLER(SIOCSIWENCODE, ipw_wx_set_encode),
9935 IW_HANDLER(SIOCGIWENCODE, ipw_wx_get_encode),
9936 IW_HANDLER(SIOCSIWPOWER, ipw_wx_set_power),
9937 IW_HANDLER(SIOCGIWPOWER, ipw_wx_get_power),
9938 IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
9939 IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
9940 IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
9941 IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
9942 IW_HANDLER(SIOCSIWGENIE, ipw_wx_set_genie),
9943 IW_HANDLER(SIOCGIWGENIE, ipw_wx_get_genie),
9944 IW_HANDLER(SIOCSIWMLME, ipw_wx_set_mlme),
9945 IW_HANDLER(SIOCSIWAUTH, ipw_wx_set_auth),
9946 IW_HANDLER(SIOCGIWAUTH, ipw_wx_get_auth),
9947 IW_HANDLER(SIOCSIWENCODEEXT, ipw_wx_set_encodeext),
9948 IW_HANDLER(SIOCGIWENCODEEXT, ipw_wx_get_encodeext),
9949 };
9950
9951 enum {
9952 IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV,
9953 IPW_PRIV_GET_POWER,
9954 IPW_PRIV_SET_MODE,
9955 IPW_PRIV_GET_MODE,
9956 IPW_PRIV_SET_PREAMBLE,
9957 IPW_PRIV_GET_PREAMBLE,
9958 IPW_PRIV_RESET,
9959 IPW_PRIV_SW_RESET,
9960 #ifdef CONFIG_IPW2200_MONITOR
9961 IPW_PRIV_SET_MONITOR,
9962 #endif
9963 };
9964
9965 static struct iw_priv_args ipw_priv_args[] = {
9966 {
9967 .cmd = IPW_PRIV_SET_POWER,
9968 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9969 .name = "set_power"},
9970 {
9971 .cmd = IPW_PRIV_GET_POWER,
9972 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9973 .name = "get_power"},
9974 {
9975 .cmd = IPW_PRIV_SET_MODE,
9976 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9977 .name = "set_mode"},
9978 {
9979 .cmd = IPW_PRIV_GET_MODE,
9980 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9981 .name = "get_mode"},
9982 {
9983 .cmd = IPW_PRIV_SET_PREAMBLE,
9984 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9985 .name = "set_preamble"},
9986 {
9987 .cmd = IPW_PRIV_GET_PREAMBLE,
9988 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ,
9989 .name = "get_preamble"},
9990 {
9991 IPW_PRIV_RESET,
9992 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
9993 {
9994 IPW_PRIV_SW_RESET,
9995 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"},
9996 #ifdef CONFIG_IPW2200_MONITOR
9997 {
9998 IPW_PRIV_SET_MONITOR,
9999 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
10000 #endif /* CONFIG_IPW2200_MONITOR */
10001 };
10002
10003 static iw_handler ipw_priv_handler[] = {
10004 ipw_wx_set_powermode,
10005 ipw_wx_get_powermode,
10006 ipw_wx_set_wireless_mode,
10007 ipw_wx_get_wireless_mode,
10008 ipw_wx_set_preamble,
10009 ipw_wx_get_preamble,
10010 ipw_wx_reset,
10011 ipw_wx_sw_reset,
10012 #ifdef CONFIG_IPW2200_MONITOR
10013 ipw_wx_set_monitor,
10014 #endif
10015 };
10016
10017 static struct iw_handler_def ipw_wx_handler_def = {
10018 .standard = ipw_wx_handlers,
10019 .num_standard = ARRAY_SIZE(ipw_wx_handlers),
10020 .num_private = ARRAY_SIZE(ipw_priv_handler),
10021 .num_private_args = ARRAY_SIZE(ipw_priv_args),
10022 .private = ipw_priv_handler,
10023 .private_args = ipw_priv_args,
10024 .get_wireless_stats = ipw_get_wireless_stats,
10025 };
10026
10027 /*
10028 * Get wireless statistics.
10029 * Called by /proc/net/wireless
10030 * Also called by SIOCGIWSTATS
10031 */
ipw_get_wireless_stats(struct net_device * dev)10032 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
10033 {
10034 struct ipw_priv *priv = libipw_priv(dev);
10035 struct iw_statistics *wstats;
10036
10037 wstats = &priv->wstats;
10038
10039 /* if hw is disabled, then ipw_get_ordinal() can't be called.
10040 * netdev->get_wireless_stats seems to be called before fw is
10041 * initialized. STATUS_ASSOCIATED will only be set if the hw is up
10042 * and associated; if not associcated, the values are all meaningless
10043 * anyway, so set them all to NULL and INVALID */
10044 if (!(priv->status & STATUS_ASSOCIATED)) {
10045 wstats->miss.beacon = 0;
10046 wstats->discard.retries = 0;
10047 wstats->qual.qual = 0;
10048 wstats->qual.level = 0;
10049 wstats->qual.noise = 0;
10050 wstats->qual.updated = 7;
10051 wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
10052 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
10053 return wstats;
10054 }
10055
10056 wstats->qual.qual = priv->quality;
10057 wstats->qual.level = priv->exp_avg_rssi;
10058 wstats->qual.noise = priv->exp_avg_noise;
10059 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
10060 IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM;
10061
10062 wstats->miss.beacon = average_value(&priv->average_missed_beacons);
10063 wstats->discard.retries = priv->last_tx_failures;
10064 wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
10065
10066 /* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
10067 goto fail_get_ordinal;
10068 wstats->discard.retries += tx_retry; */
10069
10070 return wstats;
10071 }
10072
10073 /* net device stuff */
10074
init_sys_config(struct ipw_sys_config * sys_config)10075 static void init_sys_config(struct ipw_sys_config *sys_config)
10076 {
10077 memset(sys_config, 0, sizeof(struct ipw_sys_config));
10078 sys_config->bt_coexistence = 0;
10079 sys_config->answer_broadcast_ssid_probe = 0;
10080 sys_config->accept_all_data_frames = 0;
10081 sys_config->accept_non_directed_frames = 1;
10082 sys_config->exclude_unicast_unencrypted = 0;
10083 sys_config->disable_unicast_decryption = 1;
10084 sys_config->exclude_multicast_unencrypted = 0;
10085 sys_config->disable_multicast_decryption = 1;
10086 if (antenna < CFG_SYS_ANTENNA_BOTH || antenna > CFG_SYS_ANTENNA_B)
10087 antenna = CFG_SYS_ANTENNA_BOTH;
10088 sys_config->antenna_diversity = antenna;
10089 sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */
10090 sys_config->dot11g_auto_detection = 0;
10091 sys_config->enable_cts_to_self = 0;
10092 sys_config->bt_coexist_collision_thr = 0;
10093 sys_config->pass_noise_stats_to_host = 1; /* 1 -- fix for 256 */
10094 sys_config->silence_threshold = 0x1e;
10095 }
10096
ipw_net_open(struct net_device * dev)10097 static int ipw_net_open(struct net_device *dev)
10098 {
10099 IPW_DEBUG_INFO("dev->open\n");
10100 netif_start_queue(dev);
10101 return 0;
10102 }
10103
ipw_net_stop(struct net_device * dev)10104 static int ipw_net_stop(struct net_device *dev)
10105 {
10106 IPW_DEBUG_INFO("dev->close\n");
10107 netif_stop_queue(dev);
10108 return 0;
10109 }
10110
10111 /*
10112 todo:
10113
10114 modify to send one tfd per fragment instead of using chunking. otherwise
10115 we need to heavily modify the libipw_skb_to_txb.
10116 */
10117
ipw_tx_skb(struct ipw_priv * priv,struct libipw_txb * txb,int pri)10118 static int ipw_tx_skb(struct ipw_priv *priv, struct libipw_txb *txb,
10119 int pri)
10120 {
10121 struct libipw_hdr_3addrqos *hdr = (struct libipw_hdr_3addrqos *)
10122 txb->fragments[0]->data;
10123 int i = 0;
10124 struct tfd_frame *tfd;
10125 #ifdef CONFIG_IPW2200_QOS
10126 int tx_id = ipw_get_tx_queue_number(priv, pri);
10127 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10128 #else
10129 struct clx2_tx_queue *txq = &priv->txq[0];
10130 #endif
10131 struct clx2_queue *q = &txq->q;
10132 u8 id, hdr_len, unicast;
10133 int fc;
10134
10135 if (!(priv->status & STATUS_ASSOCIATED))
10136 goto drop;
10137
10138 hdr_len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10139 switch (priv->ieee->iw_mode) {
10140 case IW_MODE_ADHOC:
10141 unicast = !is_multicast_ether_addr(hdr->addr1);
10142 id = ipw_find_station(priv, hdr->addr1);
10143 if (id == IPW_INVALID_STATION) {
10144 id = ipw_add_station(priv, hdr->addr1);
10145 if (id == IPW_INVALID_STATION) {
10146 IPW_WARNING("Attempt to send data to "
10147 "invalid cell: %pM\n",
10148 hdr->addr1);
10149 goto drop;
10150 }
10151 }
10152 break;
10153
10154 case IW_MODE_INFRA:
10155 default:
10156 unicast = !is_multicast_ether_addr(hdr->addr3);
10157 id = 0;
10158 break;
10159 }
10160
10161 tfd = &txq->bd[q->first_empty];
10162 txq->txb[q->first_empty] = txb;
10163 memset(tfd, 0, sizeof(*tfd));
10164 tfd->u.data.station_number = id;
10165
10166 tfd->control_flags.message_type = TX_FRAME_TYPE;
10167 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
10168
10169 tfd->u.data.cmd_id = DINO_CMD_TX;
10170 tfd->u.data.len = cpu_to_le16(txb->payload_size);
10171
10172 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
10173 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK;
10174 else
10175 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM;
10176
10177 if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE)
10178 tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE;
10179
10180 fc = le16_to_cpu(hdr->frame_ctl);
10181 hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS);
10182
10183 memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
10184
10185 if (likely(unicast))
10186 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10187
10188 if (txb->encrypted && !priv->ieee->host_encrypt) {
10189 switch (priv->ieee->sec.level) {
10190 case SEC_LEVEL_3:
10191 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10192 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10193 /* XXX: ACK flag must be set for CCMP even if it
10194 * is a multicast/broadcast packet, because CCMP
10195 * group communication encrypted by GTK is
10196 * actually done by the AP. */
10197 if (!unicast)
10198 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10199
10200 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10201 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM;
10202 tfd->u.data.key_index = 0;
10203 tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE;
10204 break;
10205 case SEC_LEVEL_2:
10206 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10207 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10208 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10209 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
10210 tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
10211 break;
10212 case SEC_LEVEL_1:
10213 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10214 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10215 tfd->u.data.key_index = priv->ieee->crypt_info.tx_keyidx;
10216 if (priv->ieee->sec.key_sizes[priv->ieee->crypt_info.tx_keyidx] <=
10217 40)
10218 tfd->u.data.key_index |= DCT_WEP_KEY_64Bit;
10219 else
10220 tfd->u.data.key_index |= DCT_WEP_KEY_128Bit;
10221 break;
10222 case SEC_LEVEL_0:
10223 break;
10224 default:
10225 printk(KERN_ERR "Unknown security level %d\n",
10226 priv->ieee->sec.level);
10227 break;
10228 }
10229 } else
10230 /* No hardware encryption */
10231 tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP;
10232
10233 #ifdef CONFIG_IPW2200_QOS
10234 if (fc & IEEE80211_STYPE_QOS_DATA)
10235 ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data));
10236 #endif /* CONFIG_IPW2200_QOS */
10237
10238 /* payload */
10239 tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
10240 txb->nr_frags));
10241 IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n",
10242 txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks));
10243 for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) {
10244 IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n",
10245 i, le32_to_cpu(tfd->u.data.num_chunks),
10246 txb->fragments[i]->len - hdr_len);
10247 IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
10248 i, tfd->u.data.num_chunks,
10249 txb->fragments[i]->len - hdr_len);
10250 printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
10251 txb->fragments[i]->len - hdr_len);
10252
10253 tfd->u.data.chunk_ptr[i] =
10254 cpu_to_le32(pci_map_single
10255 (priv->pci_dev,
10256 txb->fragments[i]->data + hdr_len,
10257 txb->fragments[i]->len - hdr_len,
10258 PCI_DMA_TODEVICE));
10259 tfd->u.data.chunk_len[i] =
10260 cpu_to_le16(txb->fragments[i]->len - hdr_len);
10261 }
10262
10263 if (i != txb->nr_frags) {
10264 struct sk_buff *skb;
10265 u16 remaining_bytes = 0;
10266 int j;
10267
10268 for (j = i; j < txb->nr_frags; j++)
10269 remaining_bytes += txb->fragments[j]->len - hdr_len;
10270
10271 printk(KERN_INFO "Trying to reallocate for %d bytes\n",
10272 remaining_bytes);
10273 skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
10274 if (skb != NULL) {
10275 tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes);
10276 for (j = i; j < txb->nr_frags; j++) {
10277 int size = txb->fragments[j]->len - hdr_len;
10278
10279 printk(KERN_INFO "Adding frag %d %d...\n",
10280 j, size);
10281 memcpy(skb_put(skb, size),
10282 txb->fragments[j]->data + hdr_len, size);
10283 }
10284 dev_kfree_skb_any(txb->fragments[i]);
10285 txb->fragments[i] = skb;
10286 tfd->u.data.chunk_ptr[i] =
10287 cpu_to_le32(pci_map_single
10288 (priv->pci_dev, skb->data,
10289 remaining_bytes,
10290 PCI_DMA_TODEVICE));
10291
10292 le32_add_cpu(&tfd->u.data.num_chunks, 1);
10293 }
10294 }
10295
10296 /* kick DMA */
10297 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
10298 ipw_write32(priv, q->reg_w, q->first_empty);
10299
10300 if (ipw_tx_queue_space(q) < q->high_mark)
10301 netif_stop_queue(priv->net_dev);
10302
10303 return NETDEV_TX_OK;
10304
10305 drop:
10306 IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
10307 libipw_txb_free(txb);
10308 return NETDEV_TX_OK;
10309 }
10310
ipw_net_is_queue_full(struct net_device * dev,int pri)10311 static int ipw_net_is_queue_full(struct net_device *dev, int pri)
10312 {
10313 struct ipw_priv *priv = libipw_priv(dev);
10314 #ifdef CONFIG_IPW2200_QOS
10315 int tx_id = ipw_get_tx_queue_number(priv, pri);
10316 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10317 #else
10318 struct clx2_tx_queue *txq = &priv->txq[0];
10319 #endif /* CONFIG_IPW2200_QOS */
10320
10321 if (ipw_tx_queue_space(&txq->q) < txq->q.high_mark)
10322 return 1;
10323
10324 return 0;
10325 }
10326
10327 #ifdef CONFIG_IPW2200_PROMISCUOUS
ipw_handle_promiscuous_tx(struct ipw_priv * priv,struct libipw_txb * txb)10328 static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
10329 struct libipw_txb *txb)
10330 {
10331 struct libipw_rx_stats dummystats;
10332 struct ieee80211_hdr *hdr;
10333 u8 n;
10334 u16 filter = priv->prom_priv->filter;
10335 int hdr_only = 0;
10336
10337 if (filter & IPW_PROM_NO_TX)
10338 return;
10339
10340 memset(&dummystats, 0, sizeof(dummystats));
10341
10342 /* Filtering of fragment chains is done against the first fragment */
10343 hdr = (void *)txb->fragments[0]->data;
10344 if (libipw_is_management(le16_to_cpu(hdr->frame_control))) {
10345 if (filter & IPW_PROM_NO_MGMT)
10346 return;
10347 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
10348 hdr_only = 1;
10349 } else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) {
10350 if (filter & IPW_PROM_NO_CTL)
10351 return;
10352 if (filter & IPW_PROM_CTL_HEADER_ONLY)
10353 hdr_only = 1;
10354 } else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) {
10355 if (filter & IPW_PROM_NO_DATA)
10356 return;
10357 if (filter & IPW_PROM_DATA_HEADER_ONLY)
10358 hdr_only = 1;
10359 }
10360
10361 for(n=0; n<txb->nr_frags; ++n) {
10362 struct sk_buff *src = txb->fragments[n];
10363 struct sk_buff *dst;
10364 struct ieee80211_radiotap_header *rt_hdr;
10365 int len;
10366
10367 if (hdr_only) {
10368 hdr = (void *)src->data;
10369 len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control));
10370 } else
10371 len = src->len;
10372
10373 dst = alloc_skb(len + sizeof(*rt_hdr) + sizeof(u16)*2, GFP_ATOMIC);
10374 if (!dst)
10375 continue;
10376
10377 rt_hdr = (void *)skb_put(dst, sizeof(*rt_hdr));
10378
10379 rt_hdr->it_version = PKTHDR_RADIOTAP_VERSION;
10380 rt_hdr->it_pad = 0;
10381 rt_hdr->it_present = 0; /* after all, it's just an idea */
10382 rt_hdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_CHANNEL);
10383
10384 *(__le16*)skb_put(dst, sizeof(u16)) = cpu_to_le16(
10385 ieee80211chan2mhz(priv->channel));
10386 if (priv->channel > 14) /* 802.11a */
10387 *(__le16*)skb_put(dst, sizeof(u16)) =
10388 cpu_to_le16(IEEE80211_CHAN_OFDM |
10389 IEEE80211_CHAN_5GHZ);
10390 else if (priv->ieee->mode == IEEE_B) /* 802.11b */
10391 *(__le16*)skb_put(dst, sizeof(u16)) =
10392 cpu_to_le16(IEEE80211_CHAN_CCK |
10393 IEEE80211_CHAN_2GHZ);
10394 else /* 802.11g */
10395 *(__le16*)skb_put(dst, sizeof(u16)) =
10396 cpu_to_le16(IEEE80211_CHAN_OFDM |
10397 IEEE80211_CHAN_2GHZ);
10398
10399 rt_hdr->it_len = cpu_to_le16(dst->len);
10400
10401 skb_copy_from_linear_data(src, skb_put(dst, len), len);
10402
10403 if (!libipw_rx(priv->prom_priv->ieee, dst, &dummystats))
10404 dev_kfree_skb_any(dst);
10405 }
10406 }
10407 #endif
10408
ipw_net_hard_start_xmit(struct libipw_txb * txb,struct net_device * dev,int pri)10409 static netdev_tx_t ipw_net_hard_start_xmit(struct libipw_txb *txb,
10410 struct net_device *dev, int pri)
10411 {
10412 struct ipw_priv *priv = libipw_priv(dev);
10413 unsigned long flags;
10414 netdev_tx_t ret;
10415
10416 IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
10417 spin_lock_irqsave(&priv->lock, flags);
10418
10419 #ifdef CONFIG_IPW2200_PROMISCUOUS
10420 if (rtap_iface && netif_running(priv->prom_net_dev))
10421 ipw_handle_promiscuous_tx(priv, txb);
10422 #endif
10423
10424 ret = ipw_tx_skb(priv, txb, pri);
10425 if (ret == NETDEV_TX_OK)
10426 __ipw_led_activity_on(priv);
10427 spin_unlock_irqrestore(&priv->lock, flags);
10428
10429 return ret;
10430 }
10431
ipw_net_set_multicast_list(struct net_device * dev)10432 static void ipw_net_set_multicast_list(struct net_device *dev)
10433 {
10434
10435 }
10436
ipw_net_set_mac_address(struct net_device * dev,void * p)10437 static int ipw_net_set_mac_address(struct net_device *dev, void *p)
10438 {
10439 struct ipw_priv *priv = libipw_priv(dev);
10440 struct sockaddr *addr = p;
10441
10442 if (!is_valid_ether_addr(addr->sa_data))
10443 return -EADDRNOTAVAIL;
10444 mutex_lock(&priv->mutex);
10445 priv->config |= CFG_CUSTOM_MAC;
10446 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
10447 printk(KERN_INFO "%s: Setting MAC to %pM\n",
10448 priv->net_dev->name, priv->mac_addr);
10449 schedule_work(&priv->adapter_restart);
10450 mutex_unlock(&priv->mutex);
10451 return 0;
10452 }
10453
ipw_ethtool_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)10454 static void ipw_ethtool_get_drvinfo(struct net_device *dev,
10455 struct ethtool_drvinfo *info)
10456 {
10457 struct ipw_priv *p = libipw_priv(dev);
10458 char vers[64];
10459 char date[32];
10460 u32 len;
10461
10462 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
10463 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
10464
10465 len = sizeof(vers);
10466 ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
10467 len = sizeof(date);
10468 ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
10469
10470 snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
10471 vers, date);
10472 strlcpy(info->bus_info, pci_name(p->pci_dev),
10473 sizeof(info->bus_info));
10474 }
10475
ipw_ethtool_get_link(struct net_device * dev)10476 static u32 ipw_ethtool_get_link(struct net_device *dev)
10477 {
10478 struct ipw_priv *priv = libipw_priv(dev);
10479 return (priv->status & STATUS_ASSOCIATED) != 0;
10480 }
10481
ipw_ethtool_get_eeprom_len(struct net_device * dev)10482 static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
10483 {
10484 return IPW_EEPROM_IMAGE_SIZE;
10485 }
10486
ipw_ethtool_get_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * bytes)10487 static int ipw_ethtool_get_eeprom(struct net_device *dev,
10488 struct ethtool_eeprom *eeprom, u8 * bytes)
10489 {
10490 struct ipw_priv *p = libipw_priv(dev);
10491
10492 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10493 return -EINVAL;
10494 mutex_lock(&p->mutex);
10495 memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len);
10496 mutex_unlock(&p->mutex);
10497 return 0;
10498 }
10499
ipw_ethtool_set_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * bytes)10500 static int ipw_ethtool_set_eeprom(struct net_device *dev,
10501 struct ethtool_eeprom *eeprom, u8 * bytes)
10502 {
10503 struct ipw_priv *p = libipw_priv(dev);
10504 int i;
10505
10506 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10507 return -EINVAL;
10508 mutex_lock(&p->mutex);
10509 memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len);
10510 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
10511 ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]);
10512 mutex_unlock(&p->mutex);
10513 return 0;
10514 }
10515
10516 static const struct ethtool_ops ipw_ethtool_ops = {
10517 .get_link = ipw_ethtool_get_link,
10518 .get_drvinfo = ipw_ethtool_get_drvinfo,
10519 .get_eeprom_len = ipw_ethtool_get_eeprom_len,
10520 .get_eeprom = ipw_ethtool_get_eeprom,
10521 .set_eeprom = ipw_ethtool_set_eeprom,
10522 };
10523
ipw_isr(int irq,void * data)10524 static irqreturn_t ipw_isr(int irq, void *data)
10525 {
10526 struct ipw_priv *priv = data;
10527 u32 inta, inta_mask;
10528
10529 if (!priv)
10530 return IRQ_NONE;
10531
10532 spin_lock(&priv->irq_lock);
10533
10534 if (!(priv->status & STATUS_INT_ENABLED)) {
10535 /* IRQ is disabled */
10536 goto none;
10537 }
10538
10539 inta = ipw_read32(priv, IPW_INTA_RW);
10540 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
10541
10542 if (inta == 0xFFFFFFFF) {
10543 /* Hardware disappeared */
10544 IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
10545 goto none;
10546 }
10547
10548 if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) {
10549 /* Shared interrupt */
10550 goto none;
10551 }
10552
10553 /* tell the device to stop sending interrupts */
10554 __ipw_disable_interrupts(priv);
10555
10556 /* ack current interrupts */
10557 inta &= (IPW_INTA_MASK_ALL & inta_mask);
10558 ipw_write32(priv, IPW_INTA_RW, inta);
10559
10560 /* Cache INTA value for our tasklet */
10561 priv->isr_inta = inta;
10562
10563 tasklet_schedule(&priv->irq_tasklet);
10564
10565 spin_unlock(&priv->irq_lock);
10566
10567 return IRQ_HANDLED;
10568 none:
10569 spin_unlock(&priv->irq_lock);
10570 return IRQ_NONE;
10571 }
10572
ipw_rf_kill(void * adapter)10573 static void ipw_rf_kill(void *adapter)
10574 {
10575 struct ipw_priv *priv = adapter;
10576 unsigned long flags;
10577
10578 spin_lock_irqsave(&priv->lock, flags);
10579
10580 if (rf_kill_active(priv)) {
10581 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
10582 schedule_delayed_work(&priv->rf_kill, 2 * HZ);
10583 goto exit_unlock;
10584 }
10585
10586 /* RF Kill is now disabled, so bring the device back up */
10587
10588 if (!(priv->status & STATUS_RF_KILL_MASK)) {
10589 IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
10590 "device\n");
10591
10592 /* we can not do an adapter restart while inside an irq lock */
10593 schedule_work(&priv->adapter_restart);
10594 } else
10595 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
10596 "enabled\n");
10597
10598 exit_unlock:
10599 spin_unlock_irqrestore(&priv->lock, flags);
10600 }
10601
ipw_bg_rf_kill(struct work_struct * work)10602 static void ipw_bg_rf_kill(struct work_struct *work)
10603 {
10604 struct ipw_priv *priv =
10605 container_of(work, struct ipw_priv, rf_kill.work);
10606 mutex_lock(&priv->mutex);
10607 ipw_rf_kill(priv);
10608 mutex_unlock(&priv->mutex);
10609 }
10610
ipw_link_up(struct ipw_priv * priv)10611 static void ipw_link_up(struct ipw_priv *priv)
10612 {
10613 priv->last_seq_num = -1;
10614 priv->last_frag_num = -1;
10615 priv->last_packet_time = 0;
10616
10617 netif_carrier_on(priv->net_dev);
10618
10619 cancel_delayed_work(&priv->request_scan);
10620 cancel_delayed_work(&priv->request_direct_scan);
10621 cancel_delayed_work(&priv->request_passive_scan);
10622 cancel_delayed_work(&priv->scan_event);
10623 ipw_reset_stats(priv);
10624 /* Ensure the rate is updated immediately */
10625 priv->last_rate = ipw_get_current_rate(priv);
10626 ipw_gather_stats(priv);
10627 ipw_led_link_up(priv);
10628 notify_wx_assoc_event(priv);
10629
10630 if (priv->config & CFG_BACKGROUND_SCAN)
10631 schedule_delayed_work(&priv->request_scan, HZ);
10632 }
10633
ipw_bg_link_up(struct work_struct * work)10634 static void ipw_bg_link_up(struct work_struct *work)
10635 {
10636 struct ipw_priv *priv =
10637 container_of(work, struct ipw_priv, link_up);
10638 mutex_lock(&priv->mutex);
10639 ipw_link_up(priv);
10640 mutex_unlock(&priv->mutex);
10641 }
10642
ipw_link_down(struct ipw_priv * priv)10643 static void ipw_link_down(struct ipw_priv *priv)
10644 {
10645 ipw_led_link_down(priv);
10646 netif_carrier_off(priv->net_dev);
10647 notify_wx_assoc_event(priv);
10648
10649 /* Cancel any queued work ... */
10650 cancel_delayed_work(&priv->request_scan);
10651 cancel_delayed_work(&priv->request_direct_scan);
10652 cancel_delayed_work(&priv->request_passive_scan);
10653 cancel_delayed_work(&priv->adhoc_check);
10654 cancel_delayed_work(&priv->gather_stats);
10655
10656 ipw_reset_stats(priv);
10657
10658 if (!(priv->status & STATUS_EXIT_PENDING)) {
10659 /* Queue up another scan... */
10660 schedule_delayed_work(&priv->request_scan, 0);
10661 } else
10662 cancel_delayed_work(&priv->scan_event);
10663 }
10664
ipw_bg_link_down(struct work_struct * work)10665 static void ipw_bg_link_down(struct work_struct *work)
10666 {
10667 struct ipw_priv *priv =
10668 container_of(work, struct ipw_priv, link_down);
10669 mutex_lock(&priv->mutex);
10670 ipw_link_down(priv);
10671 mutex_unlock(&priv->mutex);
10672 }
10673
ipw_setup_deferred_work(struct ipw_priv * priv)10674 static int ipw_setup_deferred_work(struct ipw_priv *priv)
10675 {
10676 int ret = 0;
10677
10678 init_waitqueue_head(&priv->wait_command_queue);
10679 init_waitqueue_head(&priv->wait_state);
10680
10681 INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check);
10682 INIT_WORK(&priv->associate, ipw_bg_associate);
10683 INIT_WORK(&priv->disassociate, ipw_bg_disassociate);
10684 INIT_WORK(&priv->system_config, ipw_system_config);
10685 INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish);
10686 INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart);
10687 INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill);
10688 INIT_WORK(&priv->up, ipw_bg_up);
10689 INIT_WORK(&priv->down, ipw_bg_down);
10690 INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan);
10691 INIT_DELAYED_WORK(&priv->request_direct_scan, ipw_request_direct_scan);
10692 INIT_DELAYED_WORK(&priv->request_passive_scan, ipw_request_passive_scan);
10693 INIT_DELAYED_WORK(&priv->scan_event, ipw_scan_event);
10694 INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats);
10695 INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan);
10696 INIT_WORK(&priv->roam, ipw_bg_roam);
10697 INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check);
10698 INIT_WORK(&priv->link_up, ipw_bg_link_up);
10699 INIT_WORK(&priv->link_down, ipw_bg_link_down);
10700 INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on);
10701 INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off);
10702 INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off);
10703 INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network);
10704
10705 #ifdef CONFIG_IPW2200_QOS
10706 INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
10707 #endif /* CONFIG_IPW2200_QOS */
10708
10709 tasklet_init(&priv->irq_tasklet,
10710 ipw_irq_tasklet, (unsigned long)priv);
10711
10712 return ret;
10713 }
10714
shim__set_security(struct net_device * dev,struct libipw_security * sec)10715 static void shim__set_security(struct net_device *dev,
10716 struct libipw_security *sec)
10717 {
10718 struct ipw_priv *priv = libipw_priv(dev);
10719 int i;
10720 for (i = 0; i < 4; i++) {
10721 if (sec->flags & (1 << i)) {
10722 priv->ieee->sec.encode_alg[i] = sec->encode_alg[i];
10723 priv->ieee->sec.key_sizes[i] = sec->key_sizes[i];
10724 if (sec->key_sizes[i] == 0)
10725 priv->ieee->sec.flags &= ~(1 << i);
10726 else {
10727 memcpy(priv->ieee->sec.keys[i], sec->keys[i],
10728 sec->key_sizes[i]);
10729 priv->ieee->sec.flags |= (1 << i);
10730 }
10731 priv->status |= STATUS_SECURITY_UPDATED;
10732 } else if (sec->level != SEC_LEVEL_1)
10733 priv->ieee->sec.flags &= ~(1 << i);
10734 }
10735
10736 if (sec->flags & SEC_ACTIVE_KEY) {
10737 if (sec->active_key <= 3) {
10738 priv->ieee->sec.active_key = sec->active_key;
10739 priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
10740 } else
10741 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10742 priv->status |= STATUS_SECURITY_UPDATED;
10743 } else
10744 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10745
10746 if ((sec->flags & SEC_AUTH_MODE) &&
10747 (priv->ieee->sec.auth_mode != sec->auth_mode)) {
10748 priv->ieee->sec.auth_mode = sec->auth_mode;
10749 priv->ieee->sec.flags |= SEC_AUTH_MODE;
10750 if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
10751 priv->capability |= CAP_SHARED_KEY;
10752 else
10753 priv->capability &= ~CAP_SHARED_KEY;
10754 priv->status |= STATUS_SECURITY_UPDATED;
10755 }
10756
10757 if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) {
10758 priv->ieee->sec.flags |= SEC_ENABLED;
10759 priv->ieee->sec.enabled = sec->enabled;
10760 priv->status |= STATUS_SECURITY_UPDATED;
10761 if (sec->enabled)
10762 priv->capability |= CAP_PRIVACY_ON;
10763 else
10764 priv->capability &= ~CAP_PRIVACY_ON;
10765 }
10766
10767 if (sec->flags & SEC_ENCRYPT)
10768 priv->ieee->sec.encrypt = sec->encrypt;
10769
10770 if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) {
10771 priv->ieee->sec.level = sec->level;
10772 priv->ieee->sec.flags |= SEC_LEVEL;
10773 priv->status |= STATUS_SECURITY_UPDATED;
10774 }
10775
10776 if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT))
10777 ipw_set_hwcrypto_keys(priv);
10778
10779 /* To match current functionality of ipw2100 (which works well w/
10780 * various supplicants, we don't force a disassociate if the
10781 * privacy capability changes ... */
10782 #if 0
10783 if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
10784 (((priv->assoc_request.capability &
10785 cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && !sec->enabled) ||
10786 (!(priv->assoc_request.capability &
10787 cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && sec->enabled))) {
10788 IPW_DEBUG_ASSOC("Disassociating due to capability "
10789 "change.\n");
10790 ipw_disassociate(priv);
10791 }
10792 #endif
10793 }
10794
init_supported_rates(struct ipw_priv * priv,struct ipw_supported_rates * rates)10795 static int init_supported_rates(struct ipw_priv *priv,
10796 struct ipw_supported_rates *rates)
10797 {
10798 /* TODO: Mask out rates based on priv->rates_mask */
10799
10800 memset(rates, 0, sizeof(*rates));
10801 /* configure supported rates */
10802 switch (priv->ieee->freq_band) {
10803 case LIBIPW_52GHZ_BAND:
10804 rates->ieee_mode = IPW_A_MODE;
10805 rates->purpose = IPW_RATE_CAPABILITIES;
10806 ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION,
10807 LIBIPW_OFDM_DEFAULT_RATES_MASK);
10808 break;
10809
10810 default: /* Mixed or 2.4Ghz */
10811 rates->ieee_mode = IPW_G_MODE;
10812 rates->purpose = IPW_RATE_CAPABILITIES;
10813 ipw_add_cck_scan_rates(rates, LIBIPW_CCK_MODULATION,
10814 LIBIPW_CCK_DEFAULT_RATES_MASK);
10815 if (priv->ieee->modulation & LIBIPW_OFDM_MODULATION) {
10816 ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION,
10817 LIBIPW_OFDM_DEFAULT_RATES_MASK);
10818 }
10819 break;
10820 }
10821
10822 return 0;
10823 }
10824
ipw_config(struct ipw_priv * priv)10825 static int ipw_config(struct ipw_priv *priv)
10826 {
10827 /* This is only called from ipw_up, which resets/reloads the firmware
10828 so, we don't need to first disable the card before we configure
10829 it */
10830 if (ipw_set_tx_power(priv))
10831 goto error;
10832
10833 /* initialize adapter address */
10834 if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
10835 goto error;
10836
10837 /* set basic system config settings */
10838 init_sys_config(&priv->sys_config);
10839
10840 /* Support Bluetooth if we have BT h/w on board, and user wants to.
10841 * Does not support BT priority yet (don't abort or defer our Tx) */
10842 if (bt_coexist) {
10843 unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY];
10844
10845 if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG)
10846 priv->sys_config.bt_coexistence
10847 |= CFG_BT_COEXISTENCE_SIGNAL_CHNL;
10848 if (bt_caps & EEPROM_SKU_CAP_BT_OOB)
10849 priv->sys_config.bt_coexistence
10850 |= CFG_BT_COEXISTENCE_OOB;
10851 }
10852
10853 #ifdef CONFIG_IPW2200_PROMISCUOUS
10854 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
10855 priv->sys_config.accept_all_data_frames = 1;
10856 priv->sys_config.accept_non_directed_frames = 1;
10857 priv->sys_config.accept_all_mgmt_bcpr = 1;
10858 priv->sys_config.accept_all_mgmt_frames = 1;
10859 }
10860 #endif
10861
10862 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
10863 priv->sys_config.answer_broadcast_ssid_probe = 1;
10864 else
10865 priv->sys_config.answer_broadcast_ssid_probe = 0;
10866
10867 if (ipw_send_system_config(priv))
10868 goto error;
10869
10870 init_supported_rates(priv, &priv->rates);
10871 if (ipw_send_supported_rates(priv, &priv->rates))
10872 goto error;
10873
10874 /* Set request-to-send threshold */
10875 if (priv->rts_threshold) {
10876 if (ipw_send_rts_threshold(priv, priv->rts_threshold))
10877 goto error;
10878 }
10879 #ifdef CONFIG_IPW2200_QOS
10880 IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n");
10881 ipw_qos_activate(priv, NULL);
10882 #endif /* CONFIG_IPW2200_QOS */
10883
10884 if (ipw_set_random_seed(priv))
10885 goto error;
10886
10887 /* final state transition to the RUN state */
10888 if (ipw_send_host_complete(priv))
10889 goto error;
10890
10891 priv->status |= STATUS_INIT;
10892
10893 ipw_led_init(priv);
10894 ipw_led_radio_on(priv);
10895 priv->notif_missed_beacons = 0;
10896
10897 /* Set hardware WEP key if it is configured. */
10898 if ((priv->capability & CAP_PRIVACY_ON) &&
10899 (priv->ieee->sec.level == SEC_LEVEL_1) &&
10900 !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
10901 ipw_set_hwcrypto_keys(priv);
10902
10903 return 0;
10904
10905 error:
10906 return -EIO;
10907 }
10908
10909 /*
10910 * NOTE:
10911 *
10912 * These tables have been tested in conjunction with the
10913 * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters.
10914 *
10915 * Altering this values, using it on other hardware, or in geographies
10916 * not intended for resale of the above mentioned Intel adapters has
10917 * not been tested.
10918 *
10919 * Remember to update the table in README.ipw2200 when changing this
10920 * table.
10921 *
10922 */
10923 static const struct libipw_geo ipw_geos[] = {
10924 { /* Restricted */
10925 "---",
10926 .bg_channels = 11,
10927 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10928 {2427, 4}, {2432, 5}, {2437, 6},
10929 {2442, 7}, {2447, 8}, {2452, 9},
10930 {2457, 10}, {2462, 11}},
10931 },
10932
10933 { /* Custom US/Canada */
10934 "ZZF",
10935 .bg_channels = 11,
10936 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10937 {2427, 4}, {2432, 5}, {2437, 6},
10938 {2442, 7}, {2447, 8}, {2452, 9},
10939 {2457, 10}, {2462, 11}},
10940 .a_channels = 8,
10941 .a = {{5180, 36},
10942 {5200, 40},
10943 {5220, 44},
10944 {5240, 48},
10945 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
10946 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
10947 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
10948 {5320, 64, LIBIPW_CH_PASSIVE_ONLY}},
10949 },
10950
10951 { /* Rest of World */
10952 "ZZD",
10953 .bg_channels = 13,
10954 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10955 {2427, 4}, {2432, 5}, {2437, 6},
10956 {2442, 7}, {2447, 8}, {2452, 9},
10957 {2457, 10}, {2462, 11}, {2467, 12},
10958 {2472, 13}},
10959 },
10960
10961 { /* Custom USA & Europe & High */
10962 "ZZA",
10963 .bg_channels = 11,
10964 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10965 {2427, 4}, {2432, 5}, {2437, 6},
10966 {2442, 7}, {2447, 8}, {2452, 9},
10967 {2457, 10}, {2462, 11}},
10968 .a_channels = 13,
10969 .a = {{5180, 36},
10970 {5200, 40},
10971 {5220, 44},
10972 {5240, 48},
10973 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
10974 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
10975 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
10976 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
10977 {5745, 149},
10978 {5765, 153},
10979 {5785, 157},
10980 {5805, 161},
10981 {5825, 165}},
10982 },
10983
10984 { /* Custom NA & Europe */
10985 "ZZB",
10986 .bg_channels = 11,
10987 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10988 {2427, 4}, {2432, 5}, {2437, 6},
10989 {2442, 7}, {2447, 8}, {2452, 9},
10990 {2457, 10}, {2462, 11}},
10991 .a_channels = 13,
10992 .a = {{5180, 36},
10993 {5200, 40},
10994 {5220, 44},
10995 {5240, 48},
10996 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
10997 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
10998 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
10999 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11000 {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11001 {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11002 {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11003 {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11004 {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11005 },
11006
11007 { /* Custom Japan */
11008 "ZZC",
11009 .bg_channels = 11,
11010 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11011 {2427, 4}, {2432, 5}, {2437, 6},
11012 {2442, 7}, {2447, 8}, {2452, 9},
11013 {2457, 10}, {2462, 11}},
11014 .a_channels = 4,
11015 .a = {{5170, 34}, {5190, 38},
11016 {5210, 42}, {5230, 46}},
11017 },
11018
11019 { /* Custom */
11020 "ZZM",
11021 .bg_channels = 11,
11022 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11023 {2427, 4}, {2432, 5}, {2437, 6},
11024 {2442, 7}, {2447, 8}, {2452, 9},
11025 {2457, 10}, {2462, 11}},
11026 },
11027
11028 { /* Europe */
11029 "ZZE",
11030 .bg_channels = 13,
11031 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11032 {2427, 4}, {2432, 5}, {2437, 6},
11033 {2442, 7}, {2447, 8}, {2452, 9},
11034 {2457, 10}, {2462, 11}, {2467, 12},
11035 {2472, 13}},
11036 .a_channels = 19,
11037 .a = {{5180, 36},
11038 {5200, 40},
11039 {5220, 44},
11040 {5240, 48},
11041 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11042 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11043 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11044 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11045 {5500, 100, LIBIPW_CH_PASSIVE_ONLY},
11046 {5520, 104, LIBIPW_CH_PASSIVE_ONLY},
11047 {5540, 108, LIBIPW_CH_PASSIVE_ONLY},
11048 {5560, 112, LIBIPW_CH_PASSIVE_ONLY},
11049 {5580, 116, LIBIPW_CH_PASSIVE_ONLY},
11050 {5600, 120, LIBIPW_CH_PASSIVE_ONLY},
11051 {5620, 124, LIBIPW_CH_PASSIVE_ONLY},
11052 {5640, 128, LIBIPW_CH_PASSIVE_ONLY},
11053 {5660, 132, LIBIPW_CH_PASSIVE_ONLY},
11054 {5680, 136, LIBIPW_CH_PASSIVE_ONLY},
11055 {5700, 140, LIBIPW_CH_PASSIVE_ONLY}},
11056 },
11057
11058 { /* Custom Japan */
11059 "ZZJ",
11060 .bg_channels = 14,
11061 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11062 {2427, 4}, {2432, 5}, {2437, 6},
11063 {2442, 7}, {2447, 8}, {2452, 9},
11064 {2457, 10}, {2462, 11}, {2467, 12},
11065 {2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY}},
11066 .a_channels = 4,
11067 .a = {{5170, 34}, {5190, 38},
11068 {5210, 42}, {5230, 46}},
11069 },
11070
11071 { /* Rest of World */
11072 "ZZR",
11073 .bg_channels = 14,
11074 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11075 {2427, 4}, {2432, 5}, {2437, 6},
11076 {2442, 7}, {2447, 8}, {2452, 9},
11077 {2457, 10}, {2462, 11}, {2467, 12},
11078 {2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY |
11079 LIBIPW_CH_PASSIVE_ONLY}},
11080 },
11081
11082 { /* High Band */
11083 "ZZH",
11084 .bg_channels = 13,
11085 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11086 {2427, 4}, {2432, 5}, {2437, 6},
11087 {2442, 7}, {2447, 8}, {2452, 9},
11088 {2457, 10}, {2462, 11},
11089 {2467, 12, LIBIPW_CH_PASSIVE_ONLY},
11090 {2472, 13, LIBIPW_CH_PASSIVE_ONLY}},
11091 .a_channels = 4,
11092 .a = {{5745, 149}, {5765, 153},
11093 {5785, 157}, {5805, 161}},
11094 },
11095
11096 { /* Custom Europe */
11097 "ZZG",
11098 .bg_channels = 13,
11099 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11100 {2427, 4}, {2432, 5}, {2437, 6},
11101 {2442, 7}, {2447, 8}, {2452, 9},
11102 {2457, 10}, {2462, 11},
11103 {2467, 12}, {2472, 13}},
11104 .a_channels = 4,
11105 .a = {{5180, 36}, {5200, 40},
11106 {5220, 44}, {5240, 48}},
11107 },
11108
11109 { /* Europe */
11110 "ZZK",
11111 .bg_channels = 13,
11112 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11113 {2427, 4}, {2432, 5}, {2437, 6},
11114 {2442, 7}, {2447, 8}, {2452, 9},
11115 {2457, 10}, {2462, 11},
11116 {2467, 12, LIBIPW_CH_PASSIVE_ONLY},
11117 {2472, 13, LIBIPW_CH_PASSIVE_ONLY}},
11118 .a_channels = 24,
11119 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY},
11120 {5200, 40, LIBIPW_CH_PASSIVE_ONLY},
11121 {5220, 44, LIBIPW_CH_PASSIVE_ONLY},
11122 {5240, 48, LIBIPW_CH_PASSIVE_ONLY},
11123 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11124 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11125 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11126 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11127 {5500, 100, LIBIPW_CH_PASSIVE_ONLY},
11128 {5520, 104, LIBIPW_CH_PASSIVE_ONLY},
11129 {5540, 108, LIBIPW_CH_PASSIVE_ONLY},
11130 {5560, 112, LIBIPW_CH_PASSIVE_ONLY},
11131 {5580, 116, LIBIPW_CH_PASSIVE_ONLY},
11132 {5600, 120, LIBIPW_CH_PASSIVE_ONLY},
11133 {5620, 124, LIBIPW_CH_PASSIVE_ONLY},
11134 {5640, 128, LIBIPW_CH_PASSIVE_ONLY},
11135 {5660, 132, LIBIPW_CH_PASSIVE_ONLY},
11136 {5680, 136, LIBIPW_CH_PASSIVE_ONLY},
11137 {5700, 140, LIBIPW_CH_PASSIVE_ONLY},
11138 {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11139 {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11140 {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11141 {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11142 {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11143 },
11144
11145 { /* Europe */
11146 "ZZL",
11147 .bg_channels = 11,
11148 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11149 {2427, 4}, {2432, 5}, {2437, 6},
11150 {2442, 7}, {2447, 8}, {2452, 9},
11151 {2457, 10}, {2462, 11}},
11152 .a_channels = 13,
11153 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY},
11154 {5200, 40, LIBIPW_CH_PASSIVE_ONLY},
11155 {5220, 44, LIBIPW_CH_PASSIVE_ONLY},
11156 {5240, 48, LIBIPW_CH_PASSIVE_ONLY},
11157 {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11158 {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11159 {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11160 {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11161 {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11162 {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11163 {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11164 {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11165 {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11166 }
11167 };
11168
ipw_set_geo(struct ipw_priv * priv)11169 static void ipw_set_geo(struct ipw_priv *priv)
11170 {
11171 int j;
11172
11173 for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
11174 if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
11175 ipw_geos[j].name, 3))
11176 break;
11177 }
11178
11179 if (j == ARRAY_SIZE(ipw_geos)) {
11180 IPW_WARNING("SKU [%c%c%c] not recognized.\n",
11181 priv->eeprom[EEPROM_COUNTRY_CODE + 0],
11182 priv->eeprom[EEPROM_COUNTRY_CODE + 1],
11183 priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
11184 j = 0;
11185 }
11186
11187 libipw_set_geo(priv->ieee, &ipw_geos[j]);
11188 }
11189
11190 #define MAX_HW_RESTARTS 5
ipw_up(struct ipw_priv * priv)11191 static int ipw_up(struct ipw_priv *priv)
11192 {
11193 int rc, i;
11194
11195 /* Age scan list entries found before suspend */
11196 if (priv->suspend_time) {
11197 libipw_networks_age(priv->ieee, priv->suspend_time);
11198 priv->suspend_time = 0;
11199 }
11200
11201 if (priv->status & STATUS_EXIT_PENDING)
11202 return -EIO;
11203
11204 if (cmdlog && !priv->cmdlog) {
11205 priv->cmdlog = kcalloc(cmdlog, sizeof(*priv->cmdlog),
11206 GFP_KERNEL);
11207 if (priv->cmdlog == NULL) {
11208 IPW_ERROR("Error allocating %d command log entries.\n",
11209 cmdlog);
11210 return -ENOMEM;
11211 } else {
11212 priv->cmdlog_len = cmdlog;
11213 }
11214 }
11215
11216 for (i = 0; i < MAX_HW_RESTARTS; i++) {
11217 /* Load the microcode, firmware, and eeprom.
11218 * Also start the clocks. */
11219 rc = ipw_load(priv);
11220 if (rc) {
11221 IPW_ERROR("Unable to load firmware: %d\n", rc);
11222 return rc;
11223 }
11224
11225 ipw_init_ordinals(priv);
11226 if (!(priv->config & CFG_CUSTOM_MAC))
11227 eeprom_parse_mac(priv, priv->mac_addr);
11228 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11229
11230 ipw_set_geo(priv);
11231
11232 if (priv->status & STATUS_RF_KILL_SW) {
11233 IPW_WARNING("Radio disabled by module parameter.\n");
11234 return 0;
11235 } else if (rf_kill_active(priv)) {
11236 IPW_WARNING("Radio Frequency Kill Switch is On:\n"
11237 "Kill switch must be turned off for "
11238 "wireless networking to work.\n");
11239 schedule_delayed_work(&priv->rf_kill, 2 * HZ);
11240 return 0;
11241 }
11242
11243 rc = ipw_config(priv);
11244 if (!rc) {
11245 IPW_DEBUG_INFO("Configured device on count %i\n", i);
11246
11247 /* If configure to try and auto-associate, kick
11248 * off a scan. */
11249 schedule_delayed_work(&priv->request_scan, 0);
11250
11251 return 0;
11252 }
11253
11254 IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc);
11255 IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
11256 i, MAX_HW_RESTARTS);
11257
11258 /* We had an error bringing up the hardware, so take it
11259 * all the way back down so we can try again */
11260 ipw_down(priv);
11261 }
11262
11263 /* tried to restart and config the device for as long as our
11264 * patience could withstand */
11265 IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
11266
11267 return -EIO;
11268 }
11269
ipw_bg_up(struct work_struct * work)11270 static void ipw_bg_up(struct work_struct *work)
11271 {
11272 struct ipw_priv *priv =
11273 container_of(work, struct ipw_priv, up);
11274 mutex_lock(&priv->mutex);
11275 ipw_up(priv);
11276 mutex_unlock(&priv->mutex);
11277 }
11278
ipw_deinit(struct ipw_priv * priv)11279 static void ipw_deinit(struct ipw_priv *priv)
11280 {
11281 int i;
11282
11283 if (priv->status & STATUS_SCANNING) {
11284 IPW_DEBUG_INFO("Aborting scan during shutdown.\n");
11285 ipw_abort_scan(priv);
11286 }
11287
11288 if (priv->status & STATUS_ASSOCIATED) {
11289 IPW_DEBUG_INFO("Disassociating during shutdown.\n");
11290 ipw_disassociate(priv);
11291 }
11292
11293 ipw_led_shutdown(priv);
11294
11295 /* Wait up to 1s for status to change to not scanning and not
11296 * associated (disassociation can take a while for a ful 802.11
11297 * exchange */
11298 for (i = 1000; i && (priv->status &
11299 (STATUS_DISASSOCIATING |
11300 STATUS_ASSOCIATED | STATUS_SCANNING)); i--)
11301 udelay(10);
11302
11303 if (priv->status & (STATUS_DISASSOCIATING |
11304 STATUS_ASSOCIATED | STATUS_SCANNING))
11305 IPW_DEBUG_INFO("Still associated or scanning...\n");
11306 else
11307 IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i);
11308
11309 /* Attempt to disable the card */
11310 ipw_send_card_disable(priv, 0);
11311
11312 priv->status &= ~STATUS_INIT;
11313 }
11314
ipw_down(struct ipw_priv * priv)11315 static void ipw_down(struct ipw_priv *priv)
11316 {
11317 int exit_pending = priv->status & STATUS_EXIT_PENDING;
11318
11319 priv->status |= STATUS_EXIT_PENDING;
11320
11321 if (ipw_is_init(priv))
11322 ipw_deinit(priv);
11323
11324 /* Wipe out the EXIT_PENDING status bit if we are not actually
11325 * exiting the module */
11326 if (!exit_pending)
11327 priv->status &= ~STATUS_EXIT_PENDING;
11328
11329 /* tell the device to stop sending interrupts */
11330 ipw_disable_interrupts(priv);
11331
11332 /* Clear all bits but the RF Kill */
11333 priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING;
11334 netif_carrier_off(priv->net_dev);
11335
11336 ipw_stop_nic(priv);
11337
11338 ipw_led_radio_off(priv);
11339 }
11340
ipw_bg_down(struct work_struct * work)11341 static void ipw_bg_down(struct work_struct *work)
11342 {
11343 struct ipw_priv *priv =
11344 container_of(work, struct ipw_priv, down);
11345 mutex_lock(&priv->mutex);
11346 ipw_down(priv);
11347 mutex_unlock(&priv->mutex);
11348 }
11349
ipw_wdev_init(struct net_device * dev)11350 static int ipw_wdev_init(struct net_device *dev)
11351 {
11352 int i, rc = 0;
11353 struct ipw_priv *priv = libipw_priv(dev);
11354 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
11355 struct wireless_dev *wdev = &priv->ieee->wdev;
11356
11357 memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);
11358
11359 /* fill-out priv->ieee->bg_band */
11360 if (geo->bg_channels) {
11361 struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band;
11362
11363 bg_band->band = IEEE80211_BAND_2GHZ;
11364 bg_band->n_channels = geo->bg_channels;
11365 bg_band->channels = kcalloc(geo->bg_channels,
11366 sizeof(struct ieee80211_channel),
11367 GFP_KERNEL);
11368 if (!bg_band->channels) {
11369 rc = -ENOMEM;
11370 goto out;
11371 }
11372 /* translate geo->bg to bg_band.channels */
11373 for (i = 0; i < geo->bg_channels; i++) {
11374 bg_band->channels[i].band = IEEE80211_BAND_2GHZ;
11375 bg_band->channels[i].center_freq = geo->bg[i].freq;
11376 bg_band->channels[i].hw_value = geo->bg[i].channel;
11377 bg_band->channels[i].max_power = geo->bg[i].max_power;
11378 if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY)
11379 bg_band->channels[i].flags |=
11380 IEEE80211_CHAN_NO_IR;
11381 if (geo->bg[i].flags & LIBIPW_CH_NO_IBSS)
11382 bg_band->channels[i].flags |=
11383 IEEE80211_CHAN_NO_IR;
11384 if (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT)
11385 bg_band->channels[i].flags |=
11386 IEEE80211_CHAN_RADAR;
11387 /* No equivalent for LIBIPW_CH_80211H_RULES,
11388 LIBIPW_CH_UNIFORM_SPREADING, or
11389 LIBIPW_CH_B_ONLY... */
11390 }
11391 /* point at bitrate info */
11392 bg_band->bitrates = ipw2200_bg_rates;
11393 bg_band->n_bitrates = ipw2200_num_bg_rates;
11394
11395 wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band;
11396 }
11397
11398 /* fill-out priv->ieee->a_band */
11399 if (geo->a_channels) {
11400 struct ieee80211_supported_band *a_band = &priv->ieee->a_band;
11401
11402 a_band->band = IEEE80211_BAND_5GHZ;
11403 a_band->n_channels = geo->a_channels;
11404 a_band->channels = kcalloc(geo->a_channels,
11405 sizeof(struct ieee80211_channel),
11406 GFP_KERNEL);
11407 if (!a_band->channels) {
11408 rc = -ENOMEM;
11409 goto out;
11410 }
11411 /* translate geo->a to a_band.channels */
11412 for (i = 0; i < geo->a_channels; i++) {
11413 a_band->channels[i].band = IEEE80211_BAND_5GHZ;
11414 a_band->channels[i].center_freq = geo->a[i].freq;
11415 a_band->channels[i].hw_value = geo->a[i].channel;
11416 a_band->channels[i].max_power = geo->a[i].max_power;
11417 if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY)
11418 a_band->channels[i].flags |=
11419 IEEE80211_CHAN_NO_IR;
11420 if (geo->a[i].flags & LIBIPW_CH_NO_IBSS)
11421 a_band->channels[i].flags |=
11422 IEEE80211_CHAN_NO_IR;
11423 if (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT)
11424 a_band->channels[i].flags |=
11425 IEEE80211_CHAN_RADAR;
11426 /* No equivalent for LIBIPW_CH_80211H_RULES,
11427 LIBIPW_CH_UNIFORM_SPREADING, or
11428 LIBIPW_CH_B_ONLY... */
11429 }
11430 /* point at bitrate info */
11431 a_band->bitrates = ipw2200_a_rates;
11432 a_band->n_bitrates = ipw2200_num_a_rates;
11433
11434 wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = a_band;
11435 }
11436
11437 wdev->wiphy->cipher_suites = ipw_cipher_suites;
11438 wdev->wiphy->n_cipher_suites = ARRAY_SIZE(ipw_cipher_suites);
11439
11440 set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
11441
11442 /* With that information in place, we can now register the wiphy... */
11443 if (wiphy_register(wdev->wiphy))
11444 rc = -EIO;
11445 out:
11446 return rc;
11447 }
11448
11449 /* PCI driver stuff */
11450 static const struct pci_device_id card_ids[] = {
11451 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
11452 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
11453 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
11454 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
11455 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
11456 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
11457 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
11458 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
11459 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
11460 {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
11461 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
11462 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
11463 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
11464 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
11465 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
11466 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
11467 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
11468 {PCI_VDEVICE(INTEL, 0x104f), 0},
11469 {PCI_VDEVICE(INTEL, 0x4220), 0}, /* BG */
11470 {PCI_VDEVICE(INTEL, 0x4221), 0}, /* BG */
11471 {PCI_VDEVICE(INTEL, 0x4223), 0}, /* ABG */
11472 {PCI_VDEVICE(INTEL, 0x4224), 0}, /* ABG */
11473
11474 /* required last entry */
11475 {0,}
11476 };
11477
11478 MODULE_DEVICE_TABLE(pci, card_ids);
11479
11480 static struct attribute *ipw_sysfs_entries[] = {
11481 &dev_attr_rf_kill.attr,
11482 &dev_attr_direct_dword.attr,
11483 &dev_attr_indirect_byte.attr,
11484 &dev_attr_indirect_dword.attr,
11485 &dev_attr_mem_gpio_reg.attr,
11486 &dev_attr_command_event_reg.attr,
11487 &dev_attr_nic_type.attr,
11488 &dev_attr_status.attr,
11489 &dev_attr_cfg.attr,
11490 &dev_attr_error.attr,
11491 &dev_attr_event_log.attr,
11492 &dev_attr_cmd_log.attr,
11493 &dev_attr_eeprom_delay.attr,
11494 &dev_attr_ucode_version.attr,
11495 &dev_attr_rtc.attr,
11496 &dev_attr_scan_age.attr,
11497 &dev_attr_led.attr,
11498 &dev_attr_speed_scan.attr,
11499 &dev_attr_net_stats.attr,
11500 &dev_attr_channels.attr,
11501 #ifdef CONFIG_IPW2200_PROMISCUOUS
11502 &dev_attr_rtap_iface.attr,
11503 &dev_attr_rtap_filter.attr,
11504 #endif
11505 NULL
11506 };
11507
11508 static struct attribute_group ipw_attribute_group = {
11509 .name = NULL, /* put in device directory */
11510 .attrs = ipw_sysfs_entries,
11511 };
11512
11513 #ifdef CONFIG_IPW2200_PROMISCUOUS
ipw_prom_open(struct net_device * dev)11514 static int ipw_prom_open(struct net_device *dev)
11515 {
11516 struct ipw_prom_priv *prom_priv = libipw_priv(dev);
11517 struct ipw_priv *priv = prom_priv->priv;
11518
11519 IPW_DEBUG_INFO("prom dev->open\n");
11520 netif_carrier_off(dev);
11521
11522 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11523 priv->sys_config.accept_all_data_frames = 1;
11524 priv->sys_config.accept_non_directed_frames = 1;
11525 priv->sys_config.accept_all_mgmt_bcpr = 1;
11526 priv->sys_config.accept_all_mgmt_frames = 1;
11527
11528 ipw_send_system_config(priv);
11529 }
11530
11531 return 0;
11532 }
11533
ipw_prom_stop(struct net_device * dev)11534 static int ipw_prom_stop(struct net_device *dev)
11535 {
11536 struct ipw_prom_priv *prom_priv = libipw_priv(dev);
11537 struct ipw_priv *priv = prom_priv->priv;
11538
11539 IPW_DEBUG_INFO("prom dev->stop\n");
11540
11541 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11542 priv->sys_config.accept_all_data_frames = 0;
11543 priv->sys_config.accept_non_directed_frames = 0;
11544 priv->sys_config.accept_all_mgmt_bcpr = 0;
11545 priv->sys_config.accept_all_mgmt_frames = 0;
11546
11547 ipw_send_system_config(priv);
11548 }
11549
11550 return 0;
11551 }
11552
ipw_prom_hard_start_xmit(struct sk_buff * skb,struct net_device * dev)11553 static netdev_tx_t ipw_prom_hard_start_xmit(struct sk_buff *skb,
11554 struct net_device *dev)
11555 {
11556 IPW_DEBUG_INFO("prom dev->xmit\n");
11557 dev_kfree_skb(skb);
11558 return NETDEV_TX_OK;
11559 }
11560
11561 static const struct net_device_ops ipw_prom_netdev_ops = {
11562 .ndo_open = ipw_prom_open,
11563 .ndo_stop = ipw_prom_stop,
11564 .ndo_start_xmit = ipw_prom_hard_start_xmit,
11565 .ndo_change_mtu = libipw_change_mtu,
11566 .ndo_set_mac_address = eth_mac_addr,
11567 .ndo_validate_addr = eth_validate_addr,
11568 };
11569
ipw_prom_alloc(struct ipw_priv * priv)11570 static int ipw_prom_alloc(struct ipw_priv *priv)
11571 {
11572 int rc = 0;
11573
11574 if (priv->prom_net_dev)
11575 return -EPERM;
11576
11577 priv->prom_net_dev = alloc_libipw(sizeof(struct ipw_prom_priv), 1);
11578 if (priv->prom_net_dev == NULL)
11579 return -ENOMEM;
11580
11581 priv->prom_priv = libipw_priv(priv->prom_net_dev);
11582 priv->prom_priv->ieee = netdev_priv(priv->prom_net_dev);
11583 priv->prom_priv->priv = priv;
11584
11585 strcpy(priv->prom_net_dev->name, "rtap%d");
11586 memcpy(priv->prom_net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11587
11588 priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
11589 priv->prom_net_dev->netdev_ops = &ipw_prom_netdev_ops;
11590
11591 priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR;
11592 SET_NETDEV_DEV(priv->prom_net_dev, &priv->pci_dev->dev);
11593
11594 rc = register_netdev(priv->prom_net_dev);
11595 if (rc) {
11596 free_libipw(priv->prom_net_dev, 1);
11597 priv->prom_net_dev = NULL;
11598 return rc;
11599 }
11600
11601 return 0;
11602 }
11603
ipw_prom_free(struct ipw_priv * priv)11604 static void ipw_prom_free(struct ipw_priv *priv)
11605 {
11606 if (!priv->prom_net_dev)
11607 return;
11608
11609 unregister_netdev(priv->prom_net_dev);
11610 free_libipw(priv->prom_net_dev, 1);
11611
11612 priv->prom_net_dev = NULL;
11613 }
11614
11615 #endif
11616
11617 static const struct net_device_ops ipw_netdev_ops = {
11618 .ndo_open = ipw_net_open,
11619 .ndo_stop = ipw_net_stop,
11620 .ndo_set_rx_mode = ipw_net_set_multicast_list,
11621 .ndo_set_mac_address = ipw_net_set_mac_address,
11622 .ndo_start_xmit = libipw_xmit,
11623 .ndo_change_mtu = libipw_change_mtu,
11624 .ndo_validate_addr = eth_validate_addr,
11625 };
11626
ipw_pci_probe(struct pci_dev * pdev,const struct pci_device_id * ent)11627 static int ipw_pci_probe(struct pci_dev *pdev,
11628 const struct pci_device_id *ent)
11629 {
11630 int err = 0;
11631 struct net_device *net_dev;
11632 void __iomem *base;
11633 u32 length, val;
11634 struct ipw_priv *priv;
11635 int i;
11636
11637 net_dev = alloc_libipw(sizeof(struct ipw_priv), 0);
11638 if (net_dev == NULL) {
11639 err = -ENOMEM;
11640 goto out;
11641 }
11642
11643 priv = libipw_priv(net_dev);
11644 priv->ieee = netdev_priv(net_dev);
11645
11646 priv->net_dev = net_dev;
11647 priv->pci_dev = pdev;
11648 ipw_debug_level = debug;
11649 spin_lock_init(&priv->irq_lock);
11650 spin_lock_init(&priv->lock);
11651 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
11652 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
11653
11654 mutex_init(&priv->mutex);
11655 if (pci_enable_device(pdev)) {
11656 err = -ENODEV;
11657 goto out_free_libipw;
11658 }
11659
11660 pci_set_master(pdev);
11661
11662 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
11663 if (!err)
11664 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
11665 if (err) {
11666 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
11667 goto out_pci_disable_device;
11668 }
11669
11670 pci_set_drvdata(pdev, priv);
11671
11672 err = pci_request_regions(pdev, DRV_NAME);
11673 if (err)
11674 goto out_pci_disable_device;
11675
11676 /* We disable the RETRY_TIMEOUT register (0x41) to keep
11677 * PCI Tx retries from interfering with C3 CPU state */
11678 pci_read_config_dword(pdev, 0x40, &val);
11679 if ((val & 0x0000ff00) != 0)
11680 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11681
11682 length = pci_resource_len(pdev, 0);
11683 priv->hw_len = length;
11684
11685 base = pci_ioremap_bar(pdev, 0);
11686 if (!base) {
11687 err = -ENODEV;
11688 goto out_pci_release_regions;
11689 }
11690
11691 priv->hw_base = base;
11692 IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
11693 IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
11694
11695 err = ipw_setup_deferred_work(priv);
11696 if (err) {
11697 IPW_ERROR("Unable to setup deferred work\n");
11698 goto out_iounmap;
11699 }
11700
11701 ipw_sw_reset(priv, 1);
11702
11703 err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv);
11704 if (err) {
11705 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
11706 goto out_iounmap;
11707 }
11708
11709 SET_NETDEV_DEV(net_dev, &pdev->dev);
11710
11711 mutex_lock(&priv->mutex);
11712
11713 priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
11714 priv->ieee->set_security = shim__set_security;
11715 priv->ieee->is_queue_full = ipw_net_is_queue_full;
11716
11717 #ifdef CONFIG_IPW2200_QOS
11718 priv->ieee->is_qos_active = ipw_is_qos_active;
11719 priv->ieee->handle_probe_response = ipw_handle_beacon;
11720 priv->ieee->handle_beacon = ipw_handle_probe_response;
11721 priv->ieee->handle_assoc_response = ipw_handle_assoc_response;
11722 #endif /* CONFIG_IPW2200_QOS */
11723
11724 priv->ieee->perfect_rssi = -20;
11725 priv->ieee->worst_rssi = -85;
11726
11727 net_dev->netdev_ops = &ipw_netdev_ops;
11728 priv->wireless_data.spy_data = &priv->ieee->spy_data;
11729 net_dev->wireless_data = &priv->wireless_data;
11730 net_dev->wireless_handlers = &ipw_wx_handler_def;
11731 net_dev->ethtool_ops = &ipw_ethtool_ops;
11732
11733 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
11734 if (err) {
11735 IPW_ERROR("failed to create sysfs device attributes\n");
11736 mutex_unlock(&priv->mutex);
11737 goto out_release_irq;
11738 }
11739
11740 if (ipw_up(priv)) {
11741 mutex_unlock(&priv->mutex);
11742 err = -EIO;
11743 goto out_remove_sysfs;
11744 }
11745
11746 mutex_unlock(&priv->mutex);
11747
11748 err = ipw_wdev_init(net_dev);
11749 if (err) {
11750 IPW_ERROR("failed to register wireless device\n");
11751 goto out_remove_sysfs;
11752 }
11753
11754 err = register_netdev(net_dev);
11755 if (err) {
11756 IPW_ERROR("failed to register network device\n");
11757 goto out_unregister_wiphy;
11758 }
11759
11760 #ifdef CONFIG_IPW2200_PROMISCUOUS
11761 if (rtap_iface) {
11762 err = ipw_prom_alloc(priv);
11763 if (err) {
11764 IPW_ERROR("Failed to register promiscuous network "
11765 "device (error %d).\n", err);
11766 unregister_netdev(priv->net_dev);
11767 goto out_unregister_wiphy;
11768 }
11769 }
11770 #endif
11771
11772 printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg "
11773 "channels, %d 802.11a channels)\n",
11774 priv->ieee->geo.name, priv->ieee->geo.bg_channels,
11775 priv->ieee->geo.a_channels);
11776
11777 return 0;
11778
11779 out_unregister_wiphy:
11780 wiphy_unregister(priv->ieee->wdev.wiphy);
11781 kfree(priv->ieee->a_band.channels);
11782 kfree(priv->ieee->bg_band.channels);
11783 out_remove_sysfs:
11784 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11785 out_release_irq:
11786 free_irq(pdev->irq, priv);
11787 out_iounmap:
11788 iounmap(priv->hw_base);
11789 out_pci_release_regions:
11790 pci_release_regions(pdev);
11791 out_pci_disable_device:
11792 pci_disable_device(pdev);
11793 out_free_libipw:
11794 free_libipw(priv->net_dev, 0);
11795 out:
11796 return err;
11797 }
11798
ipw_pci_remove(struct pci_dev * pdev)11799 static void ipw_pci_remove(struct pci_dev *pdev)
11800 {
11801 struct ipw_priv *priv = pci_get_drvdata(pdev);
11802 struct list_head *p, *q;
11803 int i;
11804
11805 if (!priv)
11806 return;
11807
11808 mutex_lock(&priv->mutex);
11809
11810 priv->status |= STATUS_EXIT_PENDING;
11811 ipw_down(priv);
11812 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11813
11814 mutex_unlock(&priv->mutex);
11815
11816 unregister_netdev(priv->net_dev);
11817
11818 if (priv->rxq) {
11819 ipw_rx_queue_free(priv, priv->rxq);
11820 priv->rxq = NULL;
11821 }
11822 ipw_tx_queue_free(priv);
11823
11824 if (priv->cmdlog) {
11825 kfree(priv->cmdlog);
11826 priv->cmdlog = NULL;
11827 }
11828
11829 /* make sure all works are inactive */
11830 cancel_delayed_work_sync(&priv->adhoc_check);
11831 cancel_work_sync(&priv->associate);
11832 cancel_work_sync(&priv->disassociate);
11833 cancel_work_sync(&priv->system_config);
11834 cancel_work_sync(&priv->rx_replenish);
11835 cancel_work_sync(&priv->adapter_restart);
11836 cancel_delayed_work_sync(&priv->rf_kill);
11837 cancel_work_sync(&priv->up);
11838 cancel_work_sync(&priv->down);
11839 cancel_delayed_work_sync(&priv->request_scan);
11840 cancel_delayed_work_sync(&priv->request_direct_scan);
11841 cancel_delayed_work_sync(&priv->request_passive_scan);
11842 cancel_delayed_work_sync(&priv->scan_event);
11843 cancel_delayed_work_sync(&priv->gather_stats);
11844 cancel_work_sync(&priv->abort_scan);
11845 cancel_work_sync(&priv->roam);
11846 cancel_delayed_work_sync(&priv->scan_check);
11847 cancel_work_sync(&priv->link_up);
11848 cancel_work_sync(&priv->link_down);
11849 cancel_delayed_work_sync(&priv->led_link_on);
11850 cancel_delayed_work_sync(&priv->led_link_off);
11851 cancel_delayed_work_sync(&priv->led_act_off);
11852 cancel_work_sync(&priv->merge_networks);
11853
11854 /* Free MAC hash list for ADHOC */
11855 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
11856 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
11857 list_del(p);
11858 kfree(list_entry(p, struct ipw_ibss_seq, list));
11859 }
11860 }
11861
11862 kfree(priv->error);
11863 priv->error = NULL;
11864
11865 #ifdef CONFIG_IPW2200_PROMISCUOUS
11866 ipw_prom_free(priv);
11867 #endif
11868
11869 free_irq(pdev->irq, priv);
11870 iounmap(priv->hw_base);
11871 pci_release_regions(pdev);
11872 pci_disable_device(pdev);
11873 /* wiphy_unregister needs to be here, before free_libipw */
11874 wiphy_unregister(priv->ieee->wdev.wiphy);
11875 kfree(priv->ieee->a_band.channels);
11876 kfree(priv->ieee->bg_band.channels);
11877 free_libipw(priv->net_dev, 0);
11878 free_firmware();
11879 }
11880
11881 #ifdef CONFIG_PM
ipw_pci_suspend(struct pci_dev * pdev,pm_message_t state)11882 static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
11883 {
11884 struct ipw_priv *priv = pci_get_drvdata(pdev);
11885 struct net_device *dev = priv->net_dev;
11886
11887 printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
11888
11889 /* Take down the device; powers it off, etc. */
11890 ipw_down(priv);
11891
11892 /* Remove the PRESENT state of the device */
11893 netif_device_detach(dev);
11894
11895 pci_save_state(pdev);
11896 pci_disable_device(pdev);
11897 pci_set_power_state(pdev, pci_choose_state(pdev, state));
11898
11899 priv->suspend_at = get_seconds();
11900
11901 return 0;
11902 }
11903
ipw_pci_resume(struct pci_dev * pdev)11904 static int ipw_pci_resume(struct pci_dev *pdev)
11905 {
11906 struct ipw_priv *priv = pci_get_drvdata(pdev);
11907 struct net_device *dev = priv->net_dev;
11908 int err;
11909 u32 val;
11910
11911 printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
11912
11913 pci_set_power_state(pdev, PCI_D0);
11914 err = pci_enable_device(pdev);
11915 if (err) {
11916 printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
11917 dev->name);
11918 return err;
11919 }
11920 pci_restore_state(pdev);
11921
11922 /*
11923 * Suspend/Resume resets the PCI configuration space, so we have to
11924 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
11925 * from interfering with C3 CPU state. pci_restore_state won't help
11926 * here since it only restores the first 64 bytes pci config header.
11927 */
11928 pci_read_config_dword(pdev, 0x40, &val);
11929 if ((val & 0x0000ff00) != 0)
11930 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11931
11932 /* Set the device back into the PRESENT state; this will also wake
11933 * the queue of needed */
11934 netif_device_attach(dev);
11935
11936 priv->suspend_time = get_seconds() - priv->suspend_at;
11937
11938 /* Bring the device back up */
11939 schedule_work(&priv->up);
11940
11941 return 0;
11942 }
11943 #endif
11944
ipw_pci_shutdown(struct pci_dev * pdev)11945 static void ipw_pci_shutdown(struct pci_dev *pdev)
11946 {
11947 struct ipw_priv *priv = pci_get_drvdata(pdev);
11948
11949 /* Take down the device; powers it off, etc. */
11950 ipw_down(priv);
11951
11952 pci_disable_device(pdev);
11953 }
11954
11955 /* driver initialization stuff */
11956 static struct pci_driver ipw_driver = {
11957 .name = DRV_NAME,
11958 .id_table = card_ids,
11959 .probe = ipw_pci_probe,
11960 .remove = ipw_pci_remove,
11961 #ifdef CONFIG_PM
11962 .suspend = ipw_pci_suspend,
11963 .resume = ipw_pci_resume,
11964 #endif
11965 .shutdown = ipw_pci_shutdown,
11966 };
11967
ipw_init(void)11968 static int __init ipw_init(void)
11969 {
11970 int ret;
11971
11972 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
11973 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
11974
11975 ret = pci_register_driver(&ipw_driver);
11976 if (ret) {
11977 IPW_ERROR("Unable to initialize PCI module\n");
11978 return ret;
11979 }
11980
11981 ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
11982 if (ret) {
11983 IPW_ERROR("Unable to create driver sysfs file\n");
11984 pci_unregister_driver(&ipw_driver);
11985 return ret;
11986 }
11987
11988 return ret;
11989 }
11990
ipw_exit(void)11991 static void __exit ipw_exit(void)
11992 {
11993 driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
11994 pci_unregister_driver(&ipw_driver);
11995 }
11996
11997 module_param(disable, int, 0444);
11998 MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
11999
12000 module_param(associate, int, 0444);
12001 MODULE_PARM_DESC(associate, "auto associate when scanning (default off)");
12002
12003 module_param(auto_create, int, 0444);
12004 MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
12005
12006 module_param_named(led, led_support, int, 0444);
12007 MODULE_PARM_DESC(led, "enable led control on some systems (default 1 on)");
12008
12009 module_param(debug, int, 0444);
12010 MODULE_PARM_DESC(debug, "debug output mask");
12011
12012 module_param_named(channel, default_channel, int, 0444);
12013 MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
12014
12015 #ifdef CONFIG_IPW2200_PROMISCUOUS
12016 module_param(rtap_iface, int, 0444);
12017 MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)");
12018 #endif
12019
12020 #ifdef CONFIG_IPW2200_QOS
12021 module_param(qos_enable, int, 0444);
12022 MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis");
12023
12024 module_param(qos_burst_enable, int, 0444);
12025 MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode");
12026
12027 module_param(qos_no_ack_mask, int, 0444);
12028 MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack");
12029
12030 module_param(burst_duration_CCK, int, 0444);
12031 MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value");
12032
12033 module_param(burst_duration_OFDM, int, 0444);
12034 MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value");
12035 #endif /* CONFIG_IPW2200_QOS */
12036
12037 #ifdef CONFIG_IPW2200_MONITOR
12038 module_param_named(mode, network_mode, int, 0444);
12039 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
12040 #else
12041 module_param_named(mode, network_mode, int, 0444);
12042 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
12043 #endif
12044
12045 module_param(bt_coexist, int, 0444);
12046 MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)");
12047
12048 module_param(hwcrypto, int, 0444);
12049 MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)");
12050
12051 module_param(cmdlog, int, 0444);
12052 MODULE_PARM_DESC(cmdlog,
12053 "allocate a ring buffer for logging firmware commands");
12054
12055 module_param(roaming, int, 0444);
12056 MODULE_PARM_DESC(roaming, "enable roaming support (default on)");
12057
12058 module_param(antenna, int, 0444);
12059 MODULE_PARM_DESC(antenna, "select antenna 1=Main, 3=Aux, default 0 [both], 2=slow_diversity (choose the one with lower background noise)");
12060
12061 module_exit(ipw_exit);
12062 module_init(ipw_init);
12063