1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /****************************************************************************** |
3 | * |
4 | * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. |
5 | * |
6 | * Contact Information: |
7 | * Intel Linux Wireless <ilw@linux.intel.com> |
8 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
9 | *****************************************************************************/ |
10 | |
11 | #include <linux/kernel.h> |
12 | #include <linux/module.h> |
13 | #include <linux/etherdevice.h> |
14 | #include <linux/sched.h> |
15 | #include <linux/slab.h> |
16 | #include <linux/types.h> |
17 | #include <linux/lockdep.h> |
18 | #include <linux/pci.h> |
19 | #include <linux/dma-mapping.h> |
20 | #include <linux/delay.h> |
21 | #include <linux/skbuff.h> |
22 | #include <net/mac80211.h> |
23 | |
24 | #include "common.h" |
25 | |
26 | int |
27 | _il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout) |
28 | { |
29 | const int interval = 10; /* microseconds */ |
30 | int t = 0; |
31 | |
32 | do { |
33 | if ((_il_rd(il, ofs: addr) & mask) == (bits & mask)) |
34 | return t; |
35 | udelay(interval); |
36 | t += interval; |
37 | } while (t < timeout); |
38 | |
39 | return -ETIMEDOUT; |
40 | } |
41 | EXPORT_SYMBOL(_il_poll_bit); |
42 | |
43 | void |
44 | il_set_bit(struct il_priv *p, u32 r, u32 m) |
45 | { |
46 | unsigned long reg_flags; |
47 | |
48 | spin_lock_irqsave(&p->reg_lock, reg_flags); |
49 | _il_set_bit(il: p, reg: r, mask: m); |
50 | spin_unlock_irqrestore(lock: &p->reg_lock, flags: reg_flags); |
51 | } |
52 | EXPORT_SYMBOL(il_set_bit); |
53 | |
54 | void |
55 | il_clear_bit(struct il_priv *p, u32 r, u32 m) |
56 | { |
57 | unsigned long reg_flags; |
58 | |
59 | spin_lock_irqsave(&p->reg_lock, reg_flags); |
60 | _il_clear_bit(il: p, reg: r, mask: m); |
61 | spin_unlock_irqrestore(lock: &p->reg_lock, flags: reg_flags); |
62 | } |
63 | EXPORT_SYMBOL(il_clear_bit); |
64 | |
65 | bool |
66 | _il_grab_nic_access(struct il_priv *il) |
67 | { |
68 | int ret; |
69 | u32 val; |
70 | |
71 | /* this bit wakes up the NIC */ |
72 | _il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); |
73 | |
74 | /* |
75 | * These bits say the device is running, and should keep running for |
76 | * at least a short while (at least as long as MAC_ACCESS_REQ stays 1), |
77 | * but they do not indicate that embedded SRAM is restored yet; |
78 | * 3945 and 4965 have volatile SRAM, and must save/restore contents |
79 | * to/from host DRAM when sleeping/waking for power-saving. |
80 | * Each direction takes approximately 1/4 millisecond; with this |
81 | * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a |
82 | * series of register accesses are expected (e.g. reading Event Log), |
83 | * to keep device from sleeping. |
84 | * |
85 | * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that |
86 | * SRAM is okay/restored. We don't check that here because this call |
87 | * is just for hardware register access; but GP1 MAC_SLEEP check is a |
88 | * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log). |
89 | * |
90 | */ |
91 | ret = |
92 | _il_poll_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, |
93 | (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | |
94 | CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000); |
95 | if (unlikely(ret < 0)) { |
96 | val = _il_rd(il, CSR_GP_CNTRL); |
97 | WARN_ONCE(1, "Timeout waiting for ucode processor access " |
98 | "(CSR_GP_CNTRL 0x%08x)\n" , val); |
99 | _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI); |
100 | return false; |
101 | } |
102 | |
103 | return true; |
104 | } |
105 | EXPORT_SYMBOL_GPL(_il_grab_nic_access); |
106 | |
107 | int |
108 | il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout) |
109 | { |
110 | const int interval = 10; /* microseconds */ |
111 | int t = 0; |
112 | |
113 | do { |
114 | if ((il_rd(il, reg: addr) & mask) == mask) |
115 | return t; |
116 | udelay(interval); |
117 | t += interval; |
118 | } while (t < timeout); |
119 | |
120 | return -ETIMEDOUT; |
121 | } |
122 | EXPORT_SYMBOL(il_poll_bit); |
123 | |
124 | u32 |
125 | il_rd_prph(struct il_priv *il, u32 reg) |
126 | { |
127 | unsigned long reg_flags; |
128 | u32 val; |
129 | |
130 | spin_lock_irqsave(&il->reg_lock, reg_flags); |
131 | _il_grab_nic_access(il); |
132 | val = _il_rd_prph(il, reg); |
133 | _il_release_nic_access(il); |
134 | spin_unlock_irqrestore(lock: &il->reg_lock, flags: reg_flags); |
135 | return val; |
136 | } |
137 | EXPORT_SYMBOL(il_rd_prph); |
138 | |
139 | void |
140 | il_wr_prph(struct il_priv *il, u32 addr, u32 val) |
141 | { |
142 | unsigned long reg_flags; |
143 | |
144 | spin_lock_irqsave(&il->reg_lock, reg_flags); |
145 | if (likely(_il_grab_nic_access(il))) { |
146 | _il_wr_prph(il, addr, val); |
147 | _il_release_nic_access(il); |
148 | } |
149 | spin_unlock_irqrestore(lock: &il->reg_lock, flags: reg_flags); |
150 | } |
151 | EXPORT_SYMBOL(il_wr_prph); |
152 | |
153 | u32 |
154 | il_read_targ_mem(struct il_priv *il, u32 addr) |
155 | { |
156 | unsigned long reg_flags; |
157 | u32 value; |
158 | |
159 | spin_lock_irqsave(&il->reg_lock, reg_flags); |
160 | _il_grab_nic_access(il); |
161 | |
162 | _il_wr(il, HBUS_TARG_MEM_RADDR, val: addr); |
163 | value = _il_rd(il, HBUS_TARG_MEM_RDAT); |
164 | |
165 | _il_release_nic_access(il); |
166 | spin_unlock_irqrestore(lock: &il->reg_lock, flags: reg_flags); |
167 | return value; |
168 | } |
169 | EXPORT_SYMBOL(il_read_targ_mem); |
170 | |
171 | void |
172 | il_write_targ_mem(struct il_priv *il, u32 addr, u32 val) |
173 | { |
174 | unsigned long reg_flags; |
175 | |
176 | spin_lock_irqsave(&il->reg_lock, reg_flags); |
177 | if (likely(_il_grab_nic_access(il))) { |
178 | _il_wr(il, HBUS_TARG_MEM_WADDR, val: addr); |
179 | _il_wr(il, HBUS_TARG_MEM_WDAT, val); |
180 | _il_release_nic_access(il); |
181 | } |
182 | spin_unlock_irqrestore(lock: &il->reg_lock, flags: reg_flags); |
183 | } |
184 | EXPORT_SYMBOL(il_write_targ_mem); |
185 | |
186 | const char * |
187 | il_get_cmd_string(u8 cmd) |
188 | { |
189 | switch (cmd) { |
190 | IL_CMD(N_ALIVE); |
191 | IL_CMD(N_ERROR); |
192 | IL_CMD(C_RXON); |
193 | IL_CMD(C_RXON_ASSOC); |
194 | IL_CMD(C_QOS_PARAM); |
195 | IL_CMD(C_RXON_TIMING); |
196 | IL_CMD(C_ADD_STA); |
197 | IL_CMD(C_REM_STA); |
198 | IL_CMD(C_WEPKEY); |
199 | IL_CMD(N_3945_RX); |
200 | IL_CMD(C_TX); |
201 | IL_CMD(C_RATE_SCALE); |
202 | IL_CMD(C_LEDS); |
203 | IL_CMD(C_TX_LINK_QUALITY_CMD); |
204 | IL_CMD(C_CHANNEL_SWITCH); |
205 | IL_CMD(N_CHANNEL_SWITCH); |
206 | IL_CMD(C_SPECTRUM_MEASUREMENT); |
207 | IL_CMD(N_SPECTRUM_MEASUREMENT); |
208 | IL_CMD(C_POWER_TBL); |
209 | IL_CMD(N_PM_SLEEP); |
210 | IL_CMD(N_PM_DEBUG_STATS); |
211 | IL_CMD(C_SCAN); |
212 | IL_CMD(C_SCAN_ABORT); |
213 | IL_CMD(N_SCAN_START); |
214 | IL_CMD(N_SCAN_RESULTS); |
215 | IL_CMD(N_SCAN_COMPLETE); |
216 | IL_CMD(N_BEACON); |
217 | IL_CMD(C_TX_BEACON); |
218 | IL_CMD(C_TX_PWR_TBL); |
219 | IL_CMD(C_BT_CONFIG); |
220 | IL_CMD(C_STATS); |
221 | IL_CMD(N_STATS); |
222 | IL_CMD(N_CARD_STATE); |
223 | IL_CMD(N_MISSED_BEACONS); |
224 | IL_CMD(C_CT_KILL_CONFIG); |
225 | IL_CMD(C_SENSITIVITY); |
226 | IL_CMD(C_PHY_CALIBRATION); |
227 | IL_CMD(N_RX_PHY); |
228 | IL_CMD(N_RX_MPDU); |
229 | IL_CMD(N_RX); |
230 | IL_CMD(N_COMPRESSED_BA); |
231 | default: |
232 | return "UNKNOWN" ; |
233 | |
234 | } |
235 | } |
236 | EXPORT_SYMBOL(il_get_cmd_string); |
237 | |
238 | #define HOST_COMPLETE_TIMEOUT (HZ / 2) |
239 | |
240 | static void |
241 | il_generic_cmd_callback(struct il_priv *il, struct il_device_cmd *cmd, |
242 | struct il_rx_pkt *pkt) |
243 | { |
244 | if (pkt->hdr.flags & IL_CMD_FAILED_MSK) { |
245 | IL_ERR("Bad return from %s (0x%08X)\n" , |
246 | il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); |
247 | return; |
248 | } |
249 | #ifdef CONFIG_IWLEGACY_DEBUG |
250 | switch (cmd->hdr.cmd) { |
251 | case C_TX_LINK_QUALITY_CMD: |
252 | case C_SENSITIVITY: |
253 | D_HC_DUMP("back from %s (0x%08X)\n" , |
254 | il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); |
255 | break; |
256 | default: |
257 | D_HC("back from %s (0x%08X)\n" , il_get_cmd_string(cmd->hdr.cmd), |
258 | pkt->hdr.flags); |
259 | } |
260 | #endif |
261 | } |
262 | |
263 | static int |
264 | il_send_cmd_async(struct il_priv *il, struct il_host_cmd *cmd) |
265 | { |
266 | int ret; |
267 | |
268 | BUG_ON(!(cmd->flags & CMD_ASYNC)); |
269 | |
270 | /* An asynchronous command can not expect an SKB to be set. */ |
271 | BUG_ON(cmd->flags & CMD_WANT_SKB); |
272 | |
273 | /* Assign a generic callback if one is not provided */ |
274 | if (!cmd->callback) |
275 | cmd->callback = il_generic_cmd_callback; |
276 | |
277 | if (test_bit(S_EXIT_PENDING, &il->status)) |
278 | return -EBUSY; |
279 | |
280 | ret = il_enqueue_hcmd(il, cmd); |
281 | if (ret < 0) { |
282 | IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n" , |
283 | il_get_cmd_string(cmd->id), ret); |
284 | return ret; |
285 | } |
286 | return 0; |
287 | } |
288 | |
289 | int |
290 | il_send_cmd_sync(struct il_priv *il, struct il_host_cmd *cmd) |
291 | { |
292 | int cmd_idx; |
293 | int ret; |
294 | |
295 | lockdep_assert_held(&il->mutex); |
296 | |
297 | BUG_ON(cmd->flags & CMD_ASYNC); |
298 | |
299 | /* A synchronous command can not have a callback set. */ |
300 | BUG_ON(cmd->callback); |
301 | |
302 | D_INFO("Attempting to send sync command %s\n" , |
303 | il_get_cmd_string(cmd->id)); |
304 | |
305 | set_bit(S_HCMD_ACTIVE, addr: &il->status); |
306 | D_INFO("Setting HCMD_ACTIVE for command %s\n" , |
307 | il_get_cmd_string(cmd->id)); |
308 | |
309 | cmd_idx = il_enqueue_hcmd(il, cmd); |
310 | if (cmd_idx < 0) { |
311 | ret = cmd_idx; |
312 | IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n" , |
313 | il_get_cmd_string(cmd->id), ret); |
314 | goto out; |
315 | } |
316 | |
317 | ret = wait_event_timeout(il->wait_command_queue, |
318 | !test_bit(S_HCMD_ACTIVE, &il->status), |
319 | HOST_COMPLETE_TIMEOUT); |
320 | if (!ret) { |
321 | if (test_bit(S_HCMD_ACTIVE, &il->status)) { |
322 | IL_ERR("Error sending %s: time out after %dms.\n" , |
323 | il_get_cmd_string(cmd->id), |
324 | jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); |
325 | |
326 | clear_bit(S_HCMD_ACTIVE, addr: &il->status); |
327 | D_INFO("Clearing HCMD_ACTIVE for command %s\n" , |
328 | il_get_cmd_string(cmd->id)); |
329 | ret = -ETIMEDOUT; |
330 | goto cancel; |
331 | } |
332 | } |
333 | |
334 | if (test_bit(S_RFKILL, &il->status)) { |
335 | IL_ERR("Command %s aborted: RF KILL Switch\n" , |
336 | il_get_cmd_string(cmd->id)); |
337 | ret = -ECANCELED; |
338 | goto fail; |
339 | } |
340 | if (test_bit(S_FW_ERROR, &il->status)) { |
341 | IL_ERR("Command %s failed: FW Error\n" , |
342 | il_get_cmd_string(cmd->id)); |
343 | ret = -EIO; |
344 | goto fail; |
345 | } |
346 | if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) { |
347 | IL_ERR("Error: Response NULL in '%s'\n" , |
348 | il_get_cmd_string(cmd->id)); |
349 | ret = -EIO; |
350 | goto cancel; |
351 | } |
352 | |
353 | ret = 0; |
354 | goto out; |
355 | |
356 | cancel: |
357 | if (cmd->flags & CMD_WANT_SKB) { |
358 | /* |
359 | * Cancel the CMD_WANT_SKB flag for the cmd in the |
360 | * TX cmd queue. Otherwise in case the cmd comes |
361 | * in later, it will possibly set an invalid |
362 | * address (cmd->meta.source). |
363 | */ |
364 | il->txq[il->cmd_queue].meta[cmd_idx].flags &= ~CMD_WANT_SKB; |
365 | } |
366 | fail: |
367 | if (cmd->reply_page) { |
368 | il_free_pages(il, page: cmd->reply_page); |
369 | cmd->reply_page = 0; |
370 | } |
371 | out: |
372 | return ret; |
373 | } |
374 | EXPORT_SYMBOL(il_send_cmd_sync); |
375 | |
376 | int |
377 | il_send_cmd(struct il_priv *il, struct il_host_cmd *cmd) |
378 | { |
379 | if (cmd->flags & CMD_ASYNC) |
380 | return il_send_cmd_async(il, cmd); |
381 | |
382 | return il_send_cmd_sync(il, cmd); |
383 | } |
384 | EXPORT_SYMBOL(il_send_cmd); |
385 | |
386 | int |
387 | il_send_cmd_pdu(struct il_priv *il, u8 id, u16 len, const void *data) |
388 | { |
389 | struct il_host_cmd cmd = { |
390 | .id = id, |
391 | .len = len, |
392 | .data = data, |
393 | }; |
394 | |
395 | return il_send_cmd_sync(il, &cmd); |
396 | } |
397 | EXPORT_SYMBOL(il_send_cmd_pdu); |
398 | |
399 | int |
400 | il_send_cmd_pdu_async(struct il_priv *il, u8 id, u16 len, const void *data, |
401 | void (*callback) (struct il_priv *il, |
402 | struct il_device_cmd *cmd, |
403 | struct il_rx_pkt *pkt)) |
404 | { |
405 | struct il_host_cmd cmd = { |
406 | .id = id, |
407 | .len = len, |
408 | .data = data, |
409 | }; |
410 | |
411 | cmd.flags |= CMD_ASYNC; |
412 | cmd.callback = callback; |
413 | |
414 | return il_send_cmd_async(il, cmd: &cmd); |
415 | } |
416 | EXPORT_SYMBOL(il_send_cmd_pdu_async); |
417 | |
418 | /* default: IL_LED_BLINK(0) using blinking idx table */ |
419 | static int led_mode; |
420 | module_param(led_mode, int, 0444); |
421 | MODULE_PARM_DESC(led_mode, |
422 | "0=system default, " "1=On(RF On)/Off(RF Off), 2=blinking" ); |
423 | |
424 | /* Throughput OFF time(ms) ON time (ms) |
425 | * >300 25 25 |
426 | * >200 to 300 40 40 |
427 | * >100 to 200 55 55 |
428 | * >70 to 100 65 65 |
429 | * >50 to 70 75 75 |
430 | * >20 to 50 85 85 |
431 | * >10 to 20 95 95 |
432 | * >5 to 10 110 110 |
433 | * >1 to 5 130 130 |
434 | * >0 to 1 167 167 |
435 | * <=0 SOLID ON |
436 | */ |
437 | static const struct ieee80211_tpt_blink il_blink[] = { |
438 | {.throughput = 0, .blink_time = 334}, |
439 | {.throughput = 1 * 1024 - 1, .blink_time = 260}, |
440 | {.throughput = 5 * 1024 - 1, .blink_time = 220}, |
441 | {.throughput = 10 * 1024 - 1, .blink_time = 190}, |
442 | {.throughput = 20 * 1024 - 1, .blink_time = 170}, |
443 | {.throughput = 50 * 1024 - 1, .blink_time = 150}, |
444 | {.throughput = 70 * 1024 - 1, .blink_time = 130}, |
445 | {.throughput = 100 * 1024 - 1, .blink_time = 110}, |
446 | {.throughput = 200 * 1024 - 1, .blink_time = 80}, |
447 | {.throughput = 300 * 1024 - 1, .blink_time = 50}, |
448 | }; |
449 | |
450 | /* |
451 | * Adjust led blink rate to compensate on a MAC Clock difference on every HW |
452 | * Led blink rate analysis showed an average deviation of 0% on 3945, |
453 | * 5% on 4965 HW. |
454 | * Need to compensate on the led on/off time per HW according to the deviation |
455 | * to achieve the desired led frequency |
456 | * The calculation is: (100-averageDeviation)/100 * blinkTime |
457 | * For code efficiency the calculation will be: |
458 | * compensation = (100 - averageDeviation) * 64 / 100 |
459 | * NewBlinkTime = (compensation * BlinkTime) / 64 |
460 | */ |
461 | static inline u8 |
462 | il_blink_compensation(struct il_priv *il, u8 time, u16 compensation) |
463 | { |
464 | if (!compensation) { |
465 | IL_ERR("undefined blink compensation: " |
466 | "use pre-defined blinking time\n" ); |
467 | return time; |
468 | } |
469 | |
470 | return (u8) ((time * compensation) >> 6); |
471 | } |
472 | |
473 | /* Set led pattern command */ |
474 | static int |
475 | il_led_cmd(struct il_priv *il, unsigned long on, unsigned long off) |
476 | { |
477 | struct il_led_cmd led_cmd = { |
478 | .id = IL_LED_LINK, |
479 | .interval = IL_DEF_LED_INTRVL |
480 | }; |
481 | int ret; |
482 | |
483 | if (!test_bit(S_READY, &il->status)) |
484 | return -EBUSY; |
485 | |
486 | if (il->blink_on == on && il->blink_off == off) |
487 | return 0; |
488 | |
489 | if (off == 0) { |
490 | /* led is SOLID_ON */ |
491 | on = IL_LED_SOLID; |
492 | } |
493 | |
494 | D_LED("Led blink time compensation=%u\n" , |
495 | il->cfg->led_compensation); |
496 | led_cmd.on = |
497 | il_blink_compensation(il, time: on, |
498 | compensation: il->cfg->led_compensation); |
499 | led_cmd.off = |
500 | il_blink_compensation(il, time: off, |
501 | compensation: il->cfg->led_compensation); |
502 | |
503 | ret = il->ops->send_led_cmd(il, &led_cmd); |
504 | if (!ret) { |
505 | il->blink_on = on; |
506 | il->blink_off = off; |
507 | } |
508 | return ret; |
509 | } |
510 | |
511 | static void |
512 | il_led_brightness_set(struct led_classdev *led_cdev, |
513 | enum led_brightness brightness) |
514 | { |
515 | struct il_priv *il = container_of(led_cdev, struct il_priv, led); |
516 | unsigned long on = 0; |
517 | |
518 | if (brightness > 0) |
519 | on = IL_LED_SOLID; |
520 | |
521 | il_led_cmd(il, on, off: 0); |
522 | } |
523 | |
524 | static int |
525 | il_led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on, |
526 | unsigned long *delay_off) |
527 | { |
528 | struct il_priv *il = container_of(led_cdev, struct il_priv, led); |
529 | |
530 | return il_led_cmd(il, on: *delay_on, off: *delay_off); |
531 | } |
532 | |
533 | void |
534 | il_leds_init(struct il_priv *il) |
535 | { |
536 | int mode = led_mode; |
537 | int ret; |
538 | |
539 | if (mode == IL_LED_DEFAULT) |
540 | mode = il->cfg->led_mode; |
541 | |
542 | il->led.name = |
543 | kasprintf(GFP_KERNEL, fmt: "%s-led" , wiphy_name(wiphy: il->hw->wiphy)); |
544 | if (!il->led.name) |
545 | return; |
546 | |
547 | il->led.brightness_set = il_led_brightness_set; |
548 | il->led.blink_set = il_led_blink_set; |
549 | il->led.max_brightness = 1; |
550 | |
551 | switch (mode) { |
552 | case IL_LED_DEFAULT: |
553 | WARN_ON(1); |
554 | break; |
555 | case IL_LED_BLINK: |
556 | il->led.default_trigger = |
557 | ieee80211_create_tpt_led_trigger(hw: il->hw, |
558 | flags: IEEE80211_TPT_LEDTRIG_FL_CONNECTED, |
559 | blink_table: il_blink, |
560 | ARRAY_SIZE(il_blink)); |
561 | break; |
562 | case IL_LED_RF_STATE: |
563 | il->led.default_trigger = ieee80211_get_radio_led_name(hw: il->hw); |
564 | break; |
565 | } |
566 | |
567 | ret = led_classdev_register(parent: &il->pci_dev->dev, led_cdev: &il->led); |
568 | if (ret) { |
569 | kfree(objp: il->led.name); |
570 | return; |
571 | } |
572 | |
573 | il->led_registered = true; |
574 | } |
575 | EXPORT_SYMBOL(il_leds_init); |
576 | |
577 | void |
578 | il_leds_exit(struct il_priv *il) |
579 | { |
580 | if (!il->led_registered) |
581 | return; |
582 | |
583 | led_classdev_unregister(led_cdev: &il->led); |
584 | kfree(objp: il->led.name); |
585 | } |
586 | EXPORT_SYMBOL(il_leds_exit); |
587 | |
588 | /************************** EEPROM BANDS **************************** |
589 | * |
590 | * The il_eeprom_band definitions below provide the mapping from the |
591 | * EEPROM contents to the specific channel number supported for each |
592 | * band. |
593 | * |
594 | * For example, il_priv->eeprom.band_3_channels[4] from the band_3 |
595 | * definition below maps to physical channel 42 in the 5.2GHz spectrum. |
596 | * The specific geography and calibration information for that channel |
597 | * is contained in the eeprom map itself. |
598 | * |
599 | * During init, we copy the eeprom information and channel map |
600 | * information into il->channel_info_24/52 and il->channel_map_24/52 |
601 | * |
602 | * channel_map_24/52 provides the idx in the channel_info array for a |
603 | * given channel. We have to have two separate maps as there is channel |
604 | * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and |
605 | * band_2 |
606 | * |
607 | * A value of 0xff stored in the channel_map indicates that the channel |
608 | * is not supported by the hardware at all. |
609 | * |
610 | * A value of 0xfe in the channel_map indicates that the channel is not |
611 | * valid for Tx with the current hardware. This means that |
612 | * while the system can tune and receive on a given channel, it may not |
613 | * be able to associate or transmit any frames on that |
614 | * channel. There is no corresponding channel information for that |
615 | * entry. |
616 | * |
617 | *********************************************************************/ |
618 | |
619 | /* 2.4 GHz */ |
620 | const u8 il_eeprom_band_1[14] = { |
621 | 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 |
622 | }; |
623 | |
624 | /* 5.2 GHz bands */ |
625 | static const u8 il_eeprom_band_2[] = { /* 4915-5080MHz */ |
626 | 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16 |
627 | }; |
628 | |
629 | static const u8 il_eeprom_band_3[] = { /* 5170-5320MHz */ |
630 | 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 |
631 | }; |
632 | |
633 | static const u8 il_eeprom_band_4[] = { /* 5500-5700MHz */ |
634 | 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 |
635 | }; |
636 | |
637 | static const u8 il_eeprom_band_5[] = { /* 5725-5825MHz */ |
638 | 145, 149, 153, 157, 161, 165 |
639 | }; |
640 | |
641 | static const u8 il_eeprom_band_6[] = { /* 2.4 ht40 channel */ |
642 | 1, 2, 3, 4, 5, 6, 7 |
643 | }; |
644 | |
645 | static const u8 il_eeprom_band_7[] = { /* 5.2 ht40 channel */ |
646 | 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157 |
647 | }; |
648 | |
649 | /****************************************************************************** |
650 | * |
651 | * EEPROM related functions |
652 | * |
653 | ******************************************************************************/ |
654 | |
655 | static int |
656 | il_eeprom_verify_signature(struct il_priv *il) |
657 | { |
658 | u32 gp = _il_rd(il, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK; |
659 | int ret = 0; |
660 | |
661 | D_EEPROM("EEPROM signature=0x%08x\n" , gp); |
662 | switch (gp) { |
663 | case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K: |
664 | case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K: |
665 | break; |
666 | default: |
667 | IL_ERR("bad EEPROM signature," "EEPROM_GP=0x%08x\n" , gp); |
668 | ret = -ENOENT; |
669 | break; |
670 | } |
671 | return ret; |
672 | } |
673 | |
674 | const u8 * |
675 | il_eeprom_query_addr(const struct il_priv *il, size_t offset) |
676 | { |
677 | BUG_ON(offset >= il->cfg->eeprom_size); |
678 | return &il->eeprom[offset]; |
679 | } |
680 | EXPORT_SYMBOL(il_eeprom_query_addr); |
681 | |
682 | u16 |
683 | il_eeprom_query16(const struct il_priv *il, size_t offset) |
684 | { |
685 | if (!il->eeprom) |
686 | return 0; |
687 | return (u16) il->eeprom[offset] | ((u16) il->eeprom[offset + 1] << 8); |
688 | } |
689 | EXPORT_SYMBOL(il_eeprom_query16); |
690 | |
691 | /* |
692 | * il_eeprom_init - read EEPROM contents |
693 | * |
694 | * Load the EEPROM contents from adapter into il->eeprom |
695 | * |
696 | * NOTE: This routine uses the non-debug IO access functions. |
697 | */ |
698 | int |
699 | il_eeprom_init(struct il_priv *il) |
700 | { |
701 | __le16 *e; |
702 | u32 gp = _il_rd(il, CSR_EEPROM_GP); |
703 | int sz; |
704 | int ret; |
705 | int addr; |
706 | |
707 | /* allocate eeprom */ |
708 | sz = il->cfg->eeprom_size; |
709 | D_EEPROM("NVM size = %d\n" , sz); |
710 | il->eeprom = kzalloc(size: sz, GFP_KERNEL); |
711 | if (!il->eeprom) |
712 | return -ENOMEM; |
713 | |
714 | e = (__le16 *) il->eeprom; |
715 | |
716 | il->ops->apm_init(il); |
717 | |
718 | ret = il_eeprom_verify_signature(il); |
719 | if (ret < 0) { |
720 | IL_ERR("EEPROM not found, EEPROM_GP=0x%08x\n" , gp); |
721 | ret = -ENOENT; |
722 | goto err; |
723 | } |
724 | |
725 | /* Make sure driver (instead of uCode) is allowed to read EEPROM */ |
726 | ret = il->ops->eeprom_acquire_semaphore(il); |
727 | if (ret < 0) { |
728 | IL_ERR("Failed to acquire EEPROM semaphore.\n" ); |
729 | ret = -ENOENT; |
730 | goto err; |
731 | } |
732 | |
733 | /* eeprom is an array of 16bit values */ |
734 | for (addr = 0; addr < sz; addr += sizeof(u16)) { |
735 | u32 r; |
736 | |
737 | _il_wr(il, CSR_EEPROM_REG, |
738 | CSR_EEPROM_REG_MSK_ADDR & (addr << 1)); |
739 | |
740 | ret = |
741 | _il_poll_bit(il, CSR_EEPROM_REG, |
742 | CSR_EEPROM_REG_READ_VALID_MSK, |
743 | CSR_EEPROM_REG_READ_VALID_MSK, |
744 | IL_EEPROM_ACCESS_TIMEOUT); |
745 | if (ret < 0) { |
746 | IL_ERR("Time out reading EEPROM[%d]\n" , addr); |
747 | goto done; |
748 | } |
749 | r = _il_rd(il, CSR_EEPROM_REG); |
750 | e[addr / 2] = cpu_to_le16(r >> 16); |
751 | } |
752 | |
753 | D_EEPROM("NVM Type: %s, version: 0x%x\n" , "EEPROM" , |
754 | il_eeprom_query16(il, EEPROM_VERSION)); |
755 | |
756 | ret = 0; |
757 | done: |
758 | il->ops->eeprom_release_semaphore(il); |
759 | |
760 | err: |
761 | if (ret) |
762 | il_eeprom_free(il); |
763 | /* Reset chip to save power until we load uCode during "up". */ |
764 | il_apm_stop(il); |
765 | return ret; |
766 | } |
767 | EXPORT_SYMBOL(il_eeprom_init); |
768 | |
769 | void |
770 | il_eeprom_free(struct il_priv *il) |
771 | { |
772 | kfree(objp: il->eeprom); |
773 | il->eeprom = NULL; |
774 | } |
775 | EXPORT_SYMBOL(il_eeprom_free); |
776 | |
777 | static void |
778 | il_init_band_reference(const struct il_priv *il, int eep_band, |
779 | int *eeprom_ch_count, |
780 | const struct il_eeprom_channel **eeprom_ch_info, |
781 | const u8 **eeprom_ch_idx) |
782 | { |
783 | u32 offset = il->cfg->regulatory_bands[eep_band - 1]; |
784 | |
785 | switch (eep_band) { |
786 | case 1: /* 2.4GHz band */ |
787 | *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_1); |
788 | *eeprom_ch_info = |
789 | (struct il_eeprom_channel *)il_eeprom_query_addr(il, |
790 | offset); |
791 | *eeprom_ch_idx = il_eeprom_band_1; |
792 | break; |
793 | case 2: /* 4.9GHz band */ |
794 | *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_2); |
795 | *eeprom_ch_info = |
796 | (struct il_eeprom_channel *)il_eeprom_query_addr(il, |
797 | offset); |
798 | *eeprom_ch_idx = il_eeprom_band_2; |
799 | break; |
800 | case 3: /* 5.2GHz band */ |
801 | *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_3); |
802 | *eeprom_ch_info = |
803 | (struct il_eeprom_channel *)il_eeprom_query_addr(il, |
804 | offset); |
805 | *eeprom_ch_idx = il_eeprom_band_3; |
806 | break; |
807 | case 4: /* 5.5GHz band */ |
808 | *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_4); |
809 | *eeprom_ch_info = |
810 | (struct il_eeprom_channel *)il_eeprom_query_addr(il, |
811 | offset); |
812 | *eeprom_ch_idx = il_eeprom_band_4; |
813 | break; |
814 | case 5: /* 5.7GHz band */ |
815 | *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_5); |
816 | *eeprom_ch_info = |
817 | (struct il_eeprom_channel *)il_eeprom_query_addr(il, |
818 | offset); |
819 | *eeprom_ch_idx = il_eeprom_band_5; |
820 | break; |
821 | case 6: /* 2.4GHz ht40 channels */ |
822 | *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_6); |
823 | *eeprom_ch_info = |
824 | (struct il_eeprom_channel *)il_eeprom_query_addr(il, |
825 | offset); |
826 | *eeprom_ch_idx = il_eeprom_band_6; |
827 | break; |
828 | case 7: /* 5 GHz ht40 channels */ |
829 | *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_7); |
830 | *eeprom_ch_info = |
831 | (struct il_eeprom_channel *)il_eeprom_query_addr(il, |
832 | offset); |
833 | *eeprom_ch_idx = il_eeprom_band_7; |
834 | break; |
835 | default: |
836 | BUG(); |
837 | } |
838 | } |
839 | |
840 | #define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \ |
841 | ? # x " " : "") |
842 | /* |
843 | * il_mod_ht40_chan_info - Copy ht40 channel info into driver's il. |
844 | * |
845 | * Does not set up a command, or touch hardware. |
846 | */ |
847 | static int |
848 | il_mod_ht40_chan_info(struct il_priv *il, enum nl80211_band band, u16 channel, |
849 | const struct il_eeprom_channel *eeprom_ch, |
850 | u8 clear_ht40_extension_channel) |
851 | { |
852 | struct il_channel_info *ch_info; |
853 | |
854 | ch_info = |
855 | (struct il_channel_info *)il_get_channel_info(il, band, channel); |
856 | |
857 | if (!il_is_channel_valid(ch_info)) |
858 | return -1; |
859 | |
860 | D_EEPROM("HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):" |
861 | " Ad-Hoc %ssupported\n" , ch_info->channel, |
862 | il_is_channel_a_band(ch_info) ? "5.2" : "2.4" , |
863 | CHECK_AND_PRINT(IBSS), CHECK_AND_PRINT(ACTIVE), |
864 | CHECK_AND_PRINT(RADAR), CHECK_AND_PRINT(WIDE), |
865 | CHECK_AND_PRINT(DFS), eeprom_ch->flags, |
866 | eeprom_ch->max_power_avg, |
867 | ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) && |
868 | !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? "" : "not " ); |
869 | |
870 | ch_info->ht40_eeprom = *eeprom_ch; |
871 | ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg; |
872 | ch_info->ht40_flags = eeprom_ch->flags; |
873 | if (eeprom_ch->flags & EEPROM_CHANNEL_VALID) |
874 | ch_info->ht40_extension_channel &= |
875 | ~clear_ht40_extension_channel; |
876 | |
877 | return 0; |
878 | } |
879 | |
880 | #define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \ |
881 | ? # x " " : "") |
882 | |
883 | /* |
884 | * il_init_channel_map - Set up driver's info for all possible channels |
885 | */ |
886 | int |
887 | il_init_channel_map(struct il_priv *il) |
888 | { |
889 | int eeprom_ch_count = 0; |
890 | const u8 *eeprom_ch_idx = NULL; |
891 | const struct il_eeprom_channel *eeprom_ch_info = NULL; |
892 | int band, ch; |
893 | struct il_channel_info *ch_info; |
894 | |
895 | if (il->channel_count) { |
896 | D_EEPROM("Channel map already initialized.\n" ); |
897 | return 0; |
898 | } |
899 | |
900 | D_EEPROM("Initializing regulatory info from EEPROM\n" ); |
901 | |
902 | il->channel_count = |
903 | ARRAY_SIZE(il_eeprom_band_1) + ARRAY_SIZE(il_eeprom_band_2) + |
904 | ARRAY_SIZE(il_eeprom_band_3) + ARRAY_SIZE(il_eeprom_band_4) + |
905 | ARRAY_SIZE(il_eeprom_band_5); |
906 | |
907 | D_EEPROM("Parsing data for %d channels.\n" , il->channel_count); |
908 | |
909 | il->channel_info = |
910 | kcalloc(n: il->channel_count, size: sizeof(struct il_channel_info), |
911 | GFP_KERNEL); |
912 | if (!il->channel_info) { |
913 | IL_ERR("Could not allocate channel_info\n" ); |
914 | il->channel_count = 0; |
915 | return -ENOMEM; |
916 | } |
917 | |
918 | ch_info = il->channel_info; |
919 | |
920 | /* Loop through the 5 EEPROM bands adding them in order to the |
921 | * channel map we maintain (that contains additional information than |
922 | * what just in the EEPROM) */ |
923 | for (band = 1; band <= 5; band++) { |
924 | |
925 | il_init_band_reference(il, eep_band: band, eeprom_ch_count: &eeprom_ch_count, |
926 | eeprom_ch_info: &eeprom_ch_info, eeprom_ch_idx: &eeprom_ch_idx); |
927 | |
928 | /* Loop through each band adding each of the channels */ |
929 | for (ch = 0; ch < eeprom_ch_count; ch++) { |
930 | ch_info->channel = eeprom_ch_idx[ch]; |
931 | ch_info->band = |
932 | (band == |
933 | 1) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; |
934 | |
935 | /* permanently store EEPROM's channel regulatory flags |
936 | * and max power in channel info database. */ |
937 | ch_info->eeprom = eeprom_ch_info[ch]; |
938 | |
939 | /* Copy the run-time flags so they are there even on |
940 | * invalid channels */ |
941 | ch_info->flags = eeprom_ch_info[ch].flags; |
942 | /* First write that ht40 is not enabled, and then enable |
943 | * one by one */ |
944 | ch_info->ht40_extension_channel = |
945 | IEEE80211_CHAN_NO_HT40; |
946 | |
947 | if (!(il_is_channel_valid(ch_info))) { |
948 | D_EEPROM("Ch. %d Flags %x [%sGHz] - " |
949 | "No traffic\n" , ch_info->channel, |
950 | ch_info->flags, |
951 | il_is_channel_a_band(ch_info) ? "5.2" : |
952 | "2.4" ); |
953 | ch_info++; |
954 | continue; |
955 | } |
956 | |
957 | /* Initialize regulatory-based run-time data */ |
958 | ch_info->max_power_avg = ch_info->curr_txpow = |
959 | eeprom_ch_info[ch].max_power_avg; |
960 | ch_info->scan_power = eeprom_ch_info[ch].max_power_avg; |
961 | ch_info->min_power = 0; |
962 | |
963 | D_EEPROM("Ch. %d [%sGHz] " "%s%s%s%s%s%s(0x%02x %ddBm):" |
964 | " Ad-Hoc %ssupported\n" , ch_info->channel, |
965 | il_is_channel_a_band(ch_info) ? "5.2" : "2.4" , |
966 | CHECK_AND_PRINT_I(VALID), |
967 | CHECK_AND_PRINT_I(IBSS), |
968 | CHECK_AND_PRINT_I(ACTIVE), |
969 | CHECK_AND_PRINT_I(RADAR), |
970 | CHECK_AND_PRINT_I(WIDE), |
971 | CHECK_AND_PRINT_I(DFS), |
972 | eeprom_ch_info[ch].flags, |
973 | eeprom_ch_info[ch].max_power_avg, |
974 | ((eeprom_ch_info[ch]. |
975 | flags & EEPROM_CHANNEL_IBSS) && |
976 | !(eeprom_ch_info[ch]. |
977 | flags & EEPROM_CHANNEL_RADAR)) ? "" : |
978 | "not " ); |
979 | |
980 | ch_info++; |
981 | } |
982 | } |
983 | |
984 | /* Check if we do have HT40 channels */ |
985 | if (il->cfg->regulatory_bands[5] == EEPROM_REGULATORY_BAND_NO_HT40 && |
986 | il->cfg->regulatory_bands[6] == EEPROM_REGULATORY_BAND_NO_HT40) |
987 | return 0; |
988 | |
989 | /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */ |
990 | for (band = 6; band <= 7; band++) { |
991 | enum nl80211_band ieeeband; |
992 | |
993 | il_init_band_reference(il, eep_band: band, eeprom_ch_count: &eeprom_ch_count, |
994 | eeprom_ch_info: &eeprom_ch_info, eeprom_ch_idx: &eeprom_ch_idx); |
995 | |
996 | /* EEPROM band 6 is 2.4, band 7 is 5 GHz */ |
997 | ieeeband = |
998 | (band == 6) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; |
999 | |
1000 | /* Loop through each band adding each of the channels */ |
1001 | for (ch = 0; ch < eeprom_ch_count; ch++) { |
1002 | /* Set up driver's info for lower half */ |
1003 | il_mod_ht40_chan_info(il, band: ieeeband, channel: eeprom_ch_idx[ch], |
1004 | eeprom_ch: &eeprom_ch_info[ch], |
1005 | clear_ht40_extension_channel: IEEE80211_CHAN_NO_HT40PLUS); |
1006 | |
1007 | /* Set up driver's info for upper half */ |
1008 | il_mod_ht40_chan_info(il, band: ieeeband, |
1009 | channel: eeprom_ch_idx[ch] + 4, |
1010 | eeprom_ch: &eeprom_ch_info[ch], |
1011 | clear_ht40_extension_channel: IEEE80211_CHAN_NO_HT40MINUS); |
1012 | } |
1013 | } |
1014 | |
1015 | return 0; |
1016 | } |
1017 | EXPORT_SYMBOL(il_init_channel_map); |
1018 | |
1019 | /* |
1020 | * il_free_channel_map - undo allocations in il_init_channel_map |
1021 | */ |
1022 | void |
1023 | il_free_channel_map(struct il_priv *il) |
1024 | { |
1025 | kfree(objp: il->channel_info); |
1026 | il->channel_count = 0; |
1027 | } |
1028 | EXPORT_SYMBOL(il_free_channel_map); |
1029 | |
1030 | /* |
1031 | * il_get_channel_info - Find driver's ilate channel info |
1032 | * |
1033 | * Based on band and channel number. |
1034 | */ |
1035 | const struct il_channel_info * |
1036 | il_get_channel_info(const struct il_priv *il, enum nl80211_band band, |
1037 | u16 channel) |
1038 | { |
1039 | int i; |
1040 | |
1041 | switch (band) { |
1042 | case NL80211_BAND_5GHZ: |
1043 | for (i = 14; i < il->channel_count; i++) { |
1044 | if (il->channel_info[i].channel == channel) |
1045 | return &il->channel_info[i]; |
1046 | } |
1047 | break; |
1048 | case NL80211_BAND_2GHZ: |
1049 | if (channel >= 1 && channel <= 14) |
1050 | return &il->channel_info[channel - 1]; |
1051 | break; |
1052 | default: |
1053 | BUG(); |
1054 | } |
1055 | |
1056 | return NULL; |
1057 | } |
1058 | EXPORT_SYMBOL(il_get_channel_info); |
1059 | |
1060 | /* |
1061 | * Setting power level allows the card to go to sleep when not busy. |
1062 | * |
1063 | * We calculate a sleep command based on the required latency, which |
1064 | * we get from mac80211. |
1065 | */ |
1066 | |
1067 | #define SLP_VEC(X0, X1, X2, X3, X4) { \ |
1068 | cpu_to_le32(X0), \ |
1069 | cpu_to_le32(X1), \ |
1070 | cpu_to_le32(X2), \ |
1071 | cpu_to_le32(X3), \ |
1072 | cpu_to_le32(X4) \ |
1073 | } |
1074 | |
1075 | static void |
1076 | il_build_powertable_cmd(struct il_priv *il, struct il_powertable_cmd *cmd) |
1077 | { |
1078 | static const __le32 interval[3][IL_POWER_VEC_SIZE] = { |
1079 | SLP_VEC(2, 2, 4, 6, 0xFF), |
1080 | SLP_VEC(2, 4, 7, 10, 10), |
1081 | SLP_VEC(4, 7, 10, 10, 0xFF) |
1082 | }; |
1083 | int i, dtim_period, no_dtim; |
1084 | u32 max_sleep; |
1085 | bool skip; |
1086 | |
1087 | memset(cmd, 0, sizeof(*cmd)); |
1088 | |
1089 | if (il->power_data.pci_pm) |
1090 | cmd->flags |= IL_POWER_PCI_PM_MSK; |
1091 | |
1092 | /* if no Power Save, we are done */ |
1093 | if (il->power_data.ps_disabled) |
1094 | return; |
1095 | |
1096 | cmd->flags = IL_POWER_DRIVER_ALLOW_SLEEP_MSK; |
1097 | cmd->keep_alive_seconds = 0; |
1098 | cmd->debug_flags = 0; |
1099 | cmd->rx_data_timeout = cpu_to_le32(25 * 1024); |
1100 | cmd->tx_data_timeout = cpu_to_le32(25 * 1024); |
1101 | cmd->keep_alive_beacons = 0; |
1102 | |
1103 | dtim_period = il->vif ? il->vif->bss_conf.dtim_period : 0; |
1104 | |
1105 | if (dtim_period <= 2) { |
1106 | memcpy(cmd->sleep_interval, interval[0], sizeof(interval[0])); |
1107 | no_dtim = 2; |
1108 | } else if (dtim_period <= 10) { |
1109 | memcpy(cmd->sleep_interval, interval[1], sizeof(interval[1])); |
1110 | no_dtim = 2; |
1111 | } else { |
1112 | memcpy(cmd->sleep_interval, interval[2], sizeof(interval[2])); |
1113 | no_dtim = 0; |
1114 | } |
1115 | |
1116 | if (dtim_period == 0) { |
1117 | dtim_period = 1; |
1118 | skip = false; |
1119 | } else { |
1120 | skip = !!no_dtim; |
1121 | } |
1122 | |
1123 | if (skip) { |
1124 | __le32 tmp = cmd->sleep_interval[IL_POWER_VEC_SIZE - 1]; |
1125 | |
1126 | max_sleep = le32_to_cpu(tmp); |
1127 | if (max_sleep == 0xFF) |
1128 | max_sleep = dtim_period * (skip + 1); |
1129 | else if (max_sleep > dtim_period) |
1130 | max_sleep = (max_sleep / dtim_period) * dtim_period; |
1131 | cmd->flags |= IL_POWER_SLEEP_OVER_DTIM_MSK; |
1132 | } else { |
1133 | max_sleep = dtim_period; |
1134 | cmd->flags &= ~IL_POWER_SLEEP_OVER_DTIM_MSK; |
1135 | } |
1136 | |
1137 | for (i = 0; i < IL_POWER_VEC_SIZE; i++) |
1138 | if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep) |
1139 | cmd->sleep_interval[i] = cpu_to_le32(max_sleep); |
1140 | } |
1141 | |
1142 | static int |
1143 | il_set_power(struct il_priv *il, struct il_powertable_cmd *cmd) |
1144 | { |
1145 | D_POWER("Sending power/sleep command\n" ); |
1146 | D_POWER("Flags value = 0x%08X\n" , cmd->flags); |
1147 | D_POWER("Tx timeout = %u\n" , le32_to_cpu(cmd->tx_data_timeout)); |
1148 | D_POWER("Rx timeout = %u\n" , le32_to_cpu(cmd->rx_data_timeout)); |
1149 | D_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n" , |
1150 | le32_to_cpu(cmd->sleep_interval[0]), |
1151 | le32_to_cpu(cmd->sleep_interval[1]), |
1152 | le32_to_cpu(cmd->sleep_interval[2]), |
1153 | le32_to_cpu(cmd->sleep_interval[3]), |
1154 | le32_to_cpu(cmd->sleep_interval[4])); |
1155 | |
1156 | return il_send_cmd_pdu(il, C_POWER_TBL, |
1157 | sizeof(struct il_powertable_cmd), cmd); |
1158 | } |
1159 | |
1160 | static int |
1161 | il_power_set_mode(struct il_priv *il, struct il_powertable_cmd *cmd, bool force) |
1162 | { |
1163 | int ret; |
1164 | bool update_chains; |
1165 | |
1166 | lockdep_assert_held(&il->mutex); |
1167 | |
1168 | /* Don't update the RX chain when chain noise calibration is running */ |
1169 | update_chains = il->chain_noise_data.state == IL_CHAIN_NOISE_DONE || |
1170 | il->chain_noise_data.state == IL_CHAIN_NOISE_ALIVE; |
1171 | |
1172 | if (!memcmp(p: &il->power_data.sleep_cmd, q: cmd, size: sizeof(*cmd)) && !force) |
1173 | return 0; |
1174 | |
1175 | if (!il_is_ready_rf(il)) |
1176 | return -EIO; |
1177 | |
1178 | /* scan complete use sleep_power_next, need to be updated */ |
1179 | memcpy(&il->power_data.sleep_cmd_next, cmd, sizeof(*cmd)); |
1180 | if (test_bit(S_SCANNING, &il->status) && !force) { |
1181 | D_INFO("Defer power set mode while scanning\n" ); |
1182 | return 0; |
1183 | } |
1184 | |
1185 | if (cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK) |
1186 | set_bit(S_POWER_PMI, addr: &il->status); |
1187 | |
1188 | ret = il_set_power(il, cmd); |
1189 | if (!ret) { |
1190 | if (!(cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK)) |
1191 | clear_bit(S_POWER_PMI, addr: &il->status); |
1192 | |
1193 | if (il->ops->update_chain_flags && update_chains) |
1194 | il->ops->update_chain_flags(il); |
1195 | else if (il->ops->update_chain_flags) |
1196 | D_POWER("Cannot update the power, chain noise " |
1197 | "calibration running: %d\n" , |
1198 | il->chain_noise_data.state); |
1199 | |
1200 | memcpy(&il->power_data.sleep_cmd, cmd, sizeof(*cmd)); |
1201 | } else |
1202 | IL_ERR("set power fail, ret = %d" , ret); |
1203 | |
1204 | return ret; |
1205 | } |
1206 | |
1207 | int |
1208 | il_power_update_mode(struct il_priv *il, bool force) |
1209 | { |
1210 | struct il_powertable_cmd cmd; |
1211 | |
1212 | il_build_powertable_cmd(il, cmd: &cmd); |
1213 | |
1214 | return il_power_set_mode(il, cmd: &cmd, force); |
1215 | } |
1216 | EXPORT_SYMBOL(il_power_update_mode); |
1217 | |
1218 | /* initialize to default */ |
1219 | void |
1220 | il_power_initialize(struct il_priv *il) |
1221 | { |
1222 | u16 lctl; |
1223 | |
1224 | pcie_capability_read_word(dev: il->pci_dev, PCI_EXP_LNKCTL, val: &lctl); |
1225 | il->power_data.pci_pm = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S); |
1226 | |
1227 | il->power_data.debug_sleep_level_override = -1; |
1228 | |
1229 | memset(&il->power_data.sleep_cmd, 0, sizeof(il->power_data.sleep_cmd)); |
1230 | } |
1231 | EXPORT_SYMBOL(il_power_initialize); |
1232 | |
1233 | /* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after |
1234 | * sending probe req. This should be set long enough to hear probe responses |
1235 | * from more than one AP. */ |
1236 | #define IL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */ |
1237 | #define IL_ACTIVE_DWELL_TIME_52 (20) |
1238 | |
1239 | #define IL_ACTIVE_DWELL_FACTOR_24GHZ (3) |
1240 | #define IL_ACTIVE_DWELL_FACTOR_52GHZ (2) |
1241 | |
1242 | /* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel. |
1243 | * Must be set longer than active dwell time. |
1244 | * For the most reliable scan, set > AP beacon interval (typically 100msec). */ |
1245 | #define IL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */ |
1246 | #define IL_PASSIVE_DWELL_TIME_52 (10) |
1247 | #define IL_PASSIVE_DWELL_BASE (100) |
1248 | #define IL_CHANNEL_TUNE_TIME 5 |
1249 | |
1250 | static int |
1251 | il_send_scan_abort(struct il_priv *il) |
1252 | { |
1253 | int ret; |
1254 | struct il_rx_pkt *pkt; |
1255 | struct il_host_cmd cmd = { |
1256 | .id = C_SCAN_ABORT, |
1257 | .flags = CMD_WANT_SKB, |
1258 | }; |
1259 | |
1260 | /* Exit instantly with error when device is not ready |
1261 | * to receive scan abort command or it does not perform |
1262 | * hardware scan currently */ |
1263 | if (!test_bit(S_READY, &il->status) || |
1264 | !test_bit(S_GEO_CONFIGURED, &il->status) || |
1265 | !test_bit(S_SCAN_HW, &il->status) || |
1266 | test_bit(S_FW_ERROR, &il->status) || |
1267 | test_bit(S_EXIT_PENDING, &il->status)) |
1268 | return -EIO; |
1269 | |
1270 | ret = il_send_cmd_sync(il, &cmd); |
1271 | if (ret) |
1272 | return ret; |
1273 | |
1274 | pkt = (struct il_rx_pkt *)cmd.reply_page; |
1275 | if (pkt->u.status != CAN_ABORT_STATUS) { |
1276 | /* The scan abort will return 1 for success or |
1277 | * 2 for "failure". A failure condition can be |
1278 | * due to simply not being in an active scan which |
1279 | * can occur if we send the scan abort before we |
1280 | * the microcode has notified us that a scan is |
1281 | * completed. */ |
1282 | D_SCAN("SCAN_ABORT ret %d.\n" , pkt->u.status); |
1283 | ret = -EIO; |
1284 | } |
1285 | |
1286 | il_free_pages(il, page: cmd.reply_page); |
1287 | return ret; |
1288 | } |
1289 | |
1290 | static void |
1291 | il_complete_scan(struct il_priv *il, bool aborted) |
1292 | { |
1293 | struct cfg80211_scan_info info = { |
1294 | .aborted = aborted, |
1295 | }; |
1296 | |
1297 | /* check if scan was requested from mac80211 */ |
1298 | if (il->scan_request) { |
1299 | D_SCAN("Complete scan in mac80211\n" ); |
1300 | ieee80211_scan_completed(hw: il->hw, info: &info); |
1301 | } |
1302 | |
1303 | il->scan_vif = NULL; |
1304 | il->scan_request = NULL; |
1305 | } |
1306 | |
1307 | void |
1308 | il_force_scan_end(struct il_priv *il) |
1309 | { |
1310 | lockdep_assert_held(&il->mutex); |
1311 | |
1312 | if (!test_bit(S_SCANNING, &il->status)) { |
1313 | D_SCAN("Forcing scan end while not scanning\n" ); |
1314 | return; |
1315 | } |
1316 | |
1317 | D_SCAN("Forcing scan end\n" ); |
1318 | clear_bit(S_SCANNING, addr: &il->status); |
1319 | clear_bit(S_SCAN_HW, addr: &il->status); |
1320 | clear_bit(S_SCAN_ABORTING, addr: &il->status); |
1321 | il_complete_scan(il, aborted: true); |
1322 | } |
1323 | |
1324 | static void |
1325 | il_do_scan_abort(struct il_priv *il) |
1326 | { |
1327 | int ret; |
1328 | |
1329 | lockdep_assert_held(&il->mutex); |
1330 | |
1331 | if (!test_bit(S_SCANNING, &il->status)) { |
1332 | D_SCAN("Not performing scan to abort\n" ); |
1333 | return; |
1334 | } |
1335 | |
1336 | if (test_and_set_bit(S_SCAN_ABORTING, addr: &il->status)) { |
1337 | D_SCAN("Scan abort in progress\n" ); |
1338 | return; |
1339 | } |
1340 | |
1341 | ret = il_send_scan_abort(il); |
1342 | if (ret) { |
1343 | D_SCAN("Send scan abort failed %d\n" , ret); |
1344 | il_force_scan_end(il); |
1345 | } else |
1346 | D_SCAN("Successfully send scan abort\n" ); |
1347 | } |
1348 | |
1349 | /* |
1350 | * il_scan_cancel - Cancel any currently executing HW scan |
1351 | */ |
1352 | int |
1353 | il_scan_cancel(struct il_priv *il) |
1354 | { |
1355 | D_SCAN("Queuing abort scan\n" ); |
1356 | queue_work(wq: il->workqueue, work: &il->abort_scan); |
1357 | return 0; |
1358 | } |
1359 | EXPORT_SYMBOL(il_scan_cancel); |
1360 | |
1361 | /* |
1362 | * il_scan_cancel_timeout - Cancel any currently executing HW scan |
1363 | * @ms: amount of time to wait (in milliseconds) for scan to abort |
1364 | * |
1365 | */ |
1366 | int |
1367 | il_scan_cancel_timeout(struct il_priv *il, unsigned long ms) |
1368 | { |
1369 | unsigned long timeout = jiffies + msecs_to_jiffies(m: ms); |
1370 | |
1371 | lockdep_assert_held(&il->mutex); |
1372 | |
1373 | D_SCAN("Scan cancel timeout\n" ); |
1374 | |
1375 | il_do_scan_abort(il); |
1376 | |
1377 | while (time_before_eq(jiffies, timeout)) { |
1378 | if (!test_bit(S_SCAN_HW, &il->status)) |
1379 | break; |
1380 | msleep(msecs: 20); |
1381 | } |
1382 | |
1383 | return test_bit(S_SCAN_HW, &il->status); |
1384 | } |
1385 | EXPORT_SYMBOL(il_scan_cancel_timeout); |
1386 | |
1387 | /* Service response to C_SCAN (0x80) */ |
1388 | static void |
1389 | il_hdl_scan(struct il_priv *il, struct il_rx_buf *rxb) |
1390 | { |
1391 | #ifdef CONFIG_IWLEGACY_DEBUG |
1392 | struct il_rx_pkt *pkt = rxb_addr(rxb); |
1393 | struct il_scanreq_notification *notif = |
1394 | (struct il_scanreq_notification *)pkt->u.raw; |
1395 | |
1396 | D_SCAN("Scan request status = 0x%x\n" , notif->status); |
1397 | #endif |
1398 | } |
1399 | |
1400 | /* Service N_SCAN_START (0x82) */ |
1401 | static void |
1402 | il_hdl_scan_start(struct il_priv *il, struct il_rx_buf *rxb) |
1403 | { |
1404 | struct il_rx_pkt *pkt = rxb_addr(rxb); |
1405 | struct il_scanstart_notification *notif = |
1406 | (struct il_scanstart_notification *)pkt->u.raw; |
1407 | il->scan_start_tsf = le32_to_cpu(notif->tsf_low); |
1408 | D_SCAN("Scan start: " "%d [802.11%s] " |
1409 | "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n" , notif->channel, |
1410 | notif->band ? "bg" : "a" , le32_to_cpu(notif->tsf_high), |
1411 | le32_to_cpu(notif->tsf_low), notif->status, notif->beacon_timer); |
1412 | } |
1413 | |
1414 | /* Service N_SCAN_RESULTS (0x83) */ |
1415 | static void |
1416 | il_hdl_scan_results(struct il_priv *il, struct il_rx_buf *rxb) |
1417 | { |
1418 | #ifdef CONFIG_IWLEGACY_DEBUG |
1419 | struct il_rx_pkt *pkt = rxb_addr(rxb); |
1420 | struct il_scanresults_notification *notif = |
1421 | (struct il_scanresults_notification *)pkt->u.raw; |
1422 | |
1423 | D_SCAN("Scan ch.res: " "%d [802.11%s] " "(TSF: 0x%08X:%08X) - %d " |
1424 | "elapsed=%lu usec\n" , notif->channel, notif->band ? "bg" : "a" , |
1425 | le32_to_cpu(notif->tsf_high), le32_to_cpu(notif->tsf_low), |
1426 | le32_to_cpu(notif->stats[0]), |
1427 | le32_to_cpu(notif->tsf_low) - il->scan_start_tsf); |
1428 | #endif |
1429 | } |
1430 | |
1431 | /* Service N_SCAN_COMPLETE (0x84) */ |
1432 | static void |
1433 | il_hdl_scan_complete(struct il_priv *il, struct il_rx_buf *rxb) |
1434 | { |
1435 | |
1436 | struct il_rx_pkt *pkt = rxb_addr(rxb); |
1437 | struct il_scancomplete_notification *scan_notif = (void *)pkt->u.raw; |
1438 | |
1439 | D_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n" , |
1440 | scan_notif->scanned_channels, scan_notif->tsf_low, |
1441 | scan_notif->tsf_high, scan_notif->status); |
1442 | |
1443 | /* The HW is no longer scanning */ |
1444 | clear_bit(S_SCAN_HW, addr: &il->status); |
1445 | |
1446 | D_SCAN("Scan on %sGHz took %dms\n" , |
1447 | (il->scan_band == NL80211_BAND_2GHZ) ? "2.4" : "5.2" , |
1448 | jiffies_to_msecs(jiffies - il->scan_start)); |
1449 | |
1450 | queue_work(wq: il->workqueue, work: &il->scan_completed); |
1451 | } |
1452 | |
1453 | void |
1454 | il_setup_rx_scan_handlers(struct il_priv *il) |
1455 | { |
1456 | /* scan handlers */ |
1457 | il->handlers[C_SCAN] = il_hdl_scan; |
1458 | il->handlers[N_SCAN_START] = il_hdl_scan_start; |
1459 | il->handlers[N_SCAN_RESULTS] = il_hdl_scan_results; |
1460 | il->handlers[N_SCAN_COMPLETE] = il_hdl_scan_complete; |
1461 | } |
1462 | EXPORT_SYMBOL(il_setup_rx_scan_handlers); |
1463 | |
1464 | u16 |
1465 | il_get_active_dwell_time(struct il_priv *il, enum nl80211_band band, |
1466 | u8 n_probes) |
1467 | { |
1468 | if (band == NL80211_BAND_5GHZ) |
1469 | return IL_ACTIVE_DWELL_TIME_52 + |
1470 | IL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1); |
1471 | else |
1472 | return IL_ACTIVE_DWELL_TIME_24 + |
1473 | IL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1); |
1474 | } |
1475 | EXPORT_SYMBOL(il_get_active_dwell_time); |
1476 | |
1477 | u16 |
1478 | il_get_passive_dwell_time(struct il_priv *il, enum nl80211_band band, |
1479 | struct ieee80211_vif *vif) |
1480 | { |
1481 | u16 value; |
1482 | |
1483 | u16 passive = |
1484 | (band == |
1485 | NL80211_BAND_2GHZ) ? IL_PASSIVE_DWELL_BASE + |
1486 | IL_PASSIVE_DWELL_TIME_24 : IL_PASSIVE_DWELL_BASE + |
1487 | IL_PASSIVE_DWELL_TIME_52; |
1488 | |
1489 | if (il_is_any_associated(il)) { |
1490 | /* |
1491 | * If we're associated, we clamp the maximum passive |
1492 | * dwell time to be 98% of the smallest beacon interval |
1493 | * (minus 2 * channel tune time) |
1494 | */ |
1495 | value = il->vif ? il->vif->bss_conf.beacon_int : 0; |
1496 | if (value > IL_PASSIVE_DWELL_BASE || !value) |
1497 | value = IL_PASSIVE_DWELL_BASE; |
1498 | value = (value * 98) / 100 - IL_CHANNEL_TUNE_TIME * 2; |
1499 | passive = min(value, passive); |
1500 | } |
1501 | |
1502 | return passive; |
1503 | } |
1504 | EXPORT_SYMBOL(il_get_passive_dwell_time); |
1505 | |
1506 | void |
1507 | il_init_scan_params(struct il_priv *il) |
1508 | { |
1509 | u8 ant_idx = fls(x: il->hw_params.valid_tx_ant) - 1; |
1510 | if (!il->scan_tx_ant[NL80211_BAND_5GHZ]) |
1511 | il->scan_tx_ant[NL80211_BAND_5GHZ] = ant_idx; |
1512 | if (!il->scan_tx_ant[NL80211_BAND_2GHZ]) |
1513 | il->scan_tx_ant[NL80211_BAND_2GHZ] = ant_idx; |
1514 | } |
1515 | EXPORT_SYMBOL(il_init_scan_params); |
1516 | |
1517 | static int |
1518 | il_scan_initiate(struct il_priv *il, struct ieee80211_vif *vif) |
1519 | { |
1520 | int ret; |
1521 | |
1522 | lockdep_assert_held(&il->mutex); |
1523 | |
1524 | cancel_delayed_work(dwork: &il->scan_check); |
1525 | |
1526 | if (!il_is_ready_rf(il)) { |
1527 | IL_WARN("Request scan called when driver not ready.\n" ); |
1528 | return -EIO; |
1529 | } |
1530 | |
1531 | if (test_bit(S_SCAN_HW, &il->status)) { |
1532 | D_SCAN("Multiple concurrent scan requests in parallel.\n" ); |
1533 | return -EBUSY; |
1534 | } |
1535 | |
1536 | if (test_bit(S_SCAN_ABORTING, &il->status)) { |
1537 | D_SCAN("Scan request while abort pending.\n" ); |
1538 | return -EBUSY; |
1539 | } |
1540 | |
1541 | D_SCAN("Starting scan...\n" ); |
1542 | |
1543 | set_bit(S_SCANNING, addr: &il->status); |
1544 | il->scan_start = jiffies; |
1545 | |
1546 | ret = il->ops->request_scan(il, vif); |
1547 | if (ret) { |
1548 | clear_bit(S_SCANNING, addr: &il->status); |
1549 | return ret; |
1550 | } |
1551 | |
1552 | queue_delayed_work(wq: il->workqueue, dwork: &il->scan_check, |
1553 | IL_SCAN_CHECK_WATCHDOG); |
1554 | |
1555 | return 0; |
1556 | } |
1557 | |
1558 | int |
1559 | il_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, |
1560 | struct ieee80211_scan_request *hw_req) |
1561 | { |
1562 | struct cfg80211_scan_request *req = &hw_req->req; |
1563 | struct il_priv *il = hw->priv; |
1564 | int ret; |
1565 | |
1566 | if (req->n_channels == 0) { |
1567 | IL_ERR("Can not scan on no channels.\n" ); |
1568 | return -EINVAL; |
1569 | } |
1570 | |
1571 | mutex_lock(&il->mutex); |
1572 | D_MAC80211("enter\n" ); |
1573 | |
1574 | if (test_bit(S_SCANNING, &il->status)) { |
1575 | D_SCAN("Scan already in progress.\n" ); |
1576 | ret = -EAGAIN; |
1577 | goto out_unlock; |
1578 | } |
1579 | |
1580 | /* mac80211 will only ask for one band at a time */ |
1581 | il->scan_request = req; |
1582 | il->scan_vif = vif; |
1583 | il->scan_band = req->channels[0]->band; |
1584 | |
1585 | ret = il_scan_initiate(il, vif); |
1586 | |
1587 | out_unlock: |
1588 | D_MAC80211("leave ret %d\n" , ret); |
1589 | mutex_unlock(lock: &il->mutex); |
1590 | |
1591 | return ret; |
1592 | } |
1593 | EXPORT_SYMBOL(il_mac_hw_scan); |
1594 | |
1595 | static void |
1596 | il_bg_scan_check(struct work_struct *data) |
1597 | { |
1598 | struct il_priv *il = |
1599 | container_of(data, struct il_priv, scan_check.work); |
1600 | |
1601 | D_SCAN("Scan check work\n" ); |
1602 | |
1603 | /* Since we are here firmware does not finish scan and |
1604 | * most likely is in bad shape, so we don't bother to |
1605 | * send abort command, just force scan complete to mac80211 */ |
1606 | mutex_lock(&il->mutex); |
1607 | il_force_scan_end(il); |
1608 | mutex_unlock(lock: &il->mutex); |
1609 | } |
1610 | |
1611 | /* |
1612 | * il_fill_probe_req - fill in all required fields and IE for probe request |
1613 | */ |
1614 | u16 |
1615 | il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame, |
1616 | const u8 *ta, const u8 *ies, int ie_len, int left) |
1617 | { |
1618 | int len = 0; |
1619 | u8 *pos = NULL; |
1620 | |
1621 | /* Make sure there is enough space for the probe request, |
1622 | * two mandatory IEs and the data */ |
1623 | left -= 24; |
1624 | if (left < 0) |
1625 | return 0; |
1626 | |
1627 | frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); |
1628 | eth_broadcast_addr(addr: frame->da); |
1629 | memcpy(frame->sa, ta, ETH_ALEN); |
1630 | eth_broadcast_addr(addr: frame->bssid); |
1631 | frame->seq_ctrl = 0; |
1632 | |
1633 | len += 24; |
1634 | |
1635 | /* ...next IE... */ |
1636 | pos = &frame->u.probe_req.variable[0]; |
1637 | |
1638 | /* fill in our indirect SSID IE */ |
1639 | left -= 2; |
1640 | if (left < 0) |
1641 | return 0; |
1642 | *pos++ = WLAN_EID_SSID; |
1643 | *pos++ = 0; |
1644 | |
1645 | len += 2; |
1646 | |
1647 | if (WARN_ON(left < ie_len)) |
1648 | return len; |
1649 | |
1650 | if (ies && ie_len) { |
1651 | memcpy(pos, ies, ie_len); |
1652 | len += ie_len; |
1653 | } |
1654 | |
1655 | return (u16) len; |
1656 | } |
1657 | EXPORT_SYMBOL(il_fill_probe_req); |
1658 | |
1659 | static void |
1660 | il_bg_abort_scan(struct work_struct *work) |
1661 | { |
1662 | struct il_priv *il = container_of(work, struct il_priv, abort_scan); |
1663 | |
1664 | D_SCAN("Abort scan work\n" ); |
1665 | |
1666 | /* We keep scan_check work queued in case when firmware will not |
1667 | * report back scan completed notification */ |
1668 | mutex_lock(&il->mutex); |
1669 | il_scan_cancel_timeout(il, 200); |
1670 | mutex_unlock(lock: &il->mutex); |
1671 | } |
1672 | |
1673 | static void |
1674 | il_bg_scan_completed(struct work_struct *work) |
1675 | { |
1676 | struct il_priv *il = container_of(work, struct il_priv, scan_completed); |
1677 | bool aborted; |
1678 | |
1679 | D_SCAN("Completed scan.\n" ); |
1680 | |
1681 | cancel_delayed_work(dwork: &il->scan_check); |
1682 | |
1683 | mutex_lock(&il->mutex); |
1684 | |
1685 | aborted = test_and_clear_bit(S_SCAN_ABORTING, addr: &il->status); |
1686 | if (aborted) |
1687 | D_SCAN("Aborted scan completed.\n" ); |
1688 | |
1689 | if (!test_and_clear_bit(S_SCANNING, addr: &il->status)) { |
1690 | D_SCAN("Scan already completed.\n" ); |
1691 | goto out_settings; |
1692 | } |
1693 | |
1694 | il_complete_scan(il, aborted); |
1695 | |
1696 | out_settings: |
1697 | /* Can we still talk to firmware ? */ |
1698 | if (!il_is_ready_rf(il)) |
1699 | goto out; |
1700 | |
1701 | /* |
1702 | * We do not commit power settings while scan is pending, |
1703 | * do it now if the settings changed. |
1704 | */ |
1705 | il_power_set_mode(il, cmd: &il->power_data.sleep_cmd_next, force: false); |
1706 | il_set_tx_power(il, tx_power: il->tx_power_next, force: false); |
1707 | |
1708 | il->ops->post_scan(il); |
1709 | |
1710 | out: |
1711 | mutex_unlock(lock: &il->mutex); |
1712 | } |
1713 | |
1714 | void |
1715 | il_setup_scan_deferred_work(struct il_priv *il) |
1716 | { |
1717 | INIT_WORK(&il->scan_completed, il_bg_scan_completed); |
1718 | INIT_WORK(&il->abort_scan, il_bg_abort_scan); |
1719 | INIT_DELAYED_WORK(&il->scan_check, il_bg_scan_check); |
1720 | } |
1721 | EXPORT_SYMBOL(il_setup_scan_deferred_work); |
1722 | |
1723 | void |
1724 | il_cancel_scan_deferred_work(struct il_priv *il) |
1725 | { |
1726 | cancel_work_sync(work: &il->abort_scan); |
1727 | cancel_work_sync(work: &il->scan_completed); |
1728 | |
1729 | if (cancel_delayed_work_sync(dwork: &il->scan_check)) { |
1730 | mutex_lock(&il->mutex); |
1731 | il_force_scan_end(il); |
1732 | mutex_unlock(lock: &il->mutex); |
1733 | } |
1734 | } |
1735 | EXPORT_SYMBOL(il_cancel_scan_deferred_work); |
1736 | |
1737 | /* il->sta_lock must be held */ |
1738 | static void |
1739 | il_sta_ucode_activate(struct il_priv *il, u8 sta_id) |
1740 | { |
1741 | |
1742 | if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE)) |
1743 | IL_ERR("ACTIVATE a non DRIVER active station id %u addr %pM\n" , |
1744 | sta_id, il->stations[sta_id].sta.sta.addr); |
1745 | |
1746 | if (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) { |
1747 | D_ASSOC("STA id %u addr %pM already present" |
1748 | " in uCode (according to driver)\n" , sta_id, |
1749 | il->stations[sta_id].sta.sta.addr); |
1750 | } else { |
1751 | il->stations[sta_id].used |= IL_STA_UCODE_ACTIVE; |
1752 | D_ASSOC("Added STA id %u addr %pM to uCode\n" , sta_id, |
1753 | il->stations[sta_id].sta.sta.addr); |
1754 | } |
1755 | } |
1756 | |
1757 | static int |
1758 | il_process_add_sta_resp(struct il_priv *il, struct il_addsta_cmd *addsta, |
1759 | struct il_rx_pkt *pkt, bool sync) |
1760 | { |
1761 | u8 sta_id = addsta->sta.sta_id; |
1762 | unsigned long flags; |
1763 | int ret = -EIO; |
1764 | |
1765 | if (pkt->hdr.flags & IL_CMD_FAILED_MSK) { |
1766 | IL_ERR("Bad return from C_ADD_STA (0x%08X)\n" , pkt->hdr.flags); |
1767 | return ret; |
1768 | } |
1769 | |
1770 | D_INFO("Processing response for adding station %u\n" , sta_id); |
1771 | |
1772 | spin_lock_irqsave(&il->sta_lock, flags); |
1773 | |
1774 | switch (pkt->u.add_sta.status) { |
1775 | case ADD_STA_SUCCESS_MSK: |
1776 | D_INFO("C_ADD_STA PASSED\n" ); |
1777 | il_sta_ucode_activate(il, sta_id); |
1778 | ret = 0; |
1779 | break; |
1780 | case ADD_STA_NO_ROOM_IN_TBL: |
1781 | IL_ERR("Adding station %d failed, no room in table.\n" , sta_id); |
1782 | break; |
1783 | case ADD_STA_NO_BLOCK_ACK_RESOURCE: |
1784 | IL_ERR("Adding station %d failed, no block ack resource.\n" , |
1785 | sta_id); |
1786 | break; |
1787 | case ADD_STA_MODIFY_NON_EXIST_STA: |
1788 | IL_ERR("Attempting to modify non-existing station %d\n" , |
1789 | sta_id); |
1790 | break; |
1791 | default: |
1792 | D_ASSOC("Received C_ADD_STA:(0x%08X)\n" , pkt->u.add_sta.status); |
1793 | break; |
1794 | } |
1795 | |
1796 | D_INFO("%s station id %u addr %pM\n" , |
1797 | il->stations[sta_id].sta.mode == |
1798 | STA_CONTROL_MODIFY_MSK ? "Modified" : "Added" , sta_id, |
1799 | il->stations[sta_id].sta.sta.addr); |
1800 | |
1801 | /* |
1802 | * XXX: The MAC address in the command buffer is often changed from |
1803 | * the original sent to the device. That is, the MAC address |
1804 | * written to the command buffer often is not the same MAC address |
1805 | * read from the command buffer when the command returns. This |
1806 | * issue has not yet been resolved and this debugging is left to |
1807 | * observe the problem. |
1808 | */ |
1809 | D_INFO("%s station according to cmd buffer %pM\n" , |
1810 | il->stations[sta_id].sta.mode == |
1811 | STA_CONTROL_MODIFY_MSK ? "Modified" : "Added" , addsta->sta.addr); |
1812 | spin_unlock_irqrestore(lock: &il->sta_lock, flags); |
1813 | |
1814 | return ret; |
1815 | } |
1816 | |
1817 | static void |
1818 | il_add_sta_callback(struct il_priv *il, struct il_device_cmd *cmd, |
1819 | struct il_rx_pkt *pkt) |
1820 | { |
1821 | struct il_addsta_cmd *addsta = (struct il_addsta_cmd *)cmd->cmd.payload; |
1822 | |
1823 | il_process_add_sta_resp(il, addsta, pkt, sync: false); |
1824 | |
1825 | } |
1826 | |
1827 | int |
1828 | il_send_add_sta(struct il_priv *il, struct il_addsta_cmd *sta, u8 flags) |
1829 | { |
1830 | struct il_rx_pkt *pkt = NULL; |
1831 | int ret = 0; |
1832 | u8 data[sizeof(*sta)]; |
1833 | struct il_host_cmd cmd = { |
1834 | .id = C_ADD_STA, |
1835 | .flags = flags, |
1836 | .data = data, |
1837 | }; |
1838 | u8 sta_id __maybe_unused = sta->sta.sta_id; |
1839 | |
1840 | D_INFO("Adding sta %u (%pM) %ssynchronously\n" , sta_id, sta->sta.addr, |
1841 | flags & CMD_ASYNC ? "a" : "" ); |
1842 | |
1843 | if (flags & CMD_ASYNC) |
1844 | cmd.callback = il_add_sta_callback; |
1845 | else { |
1846 | cmd.flags |= CMD_WANT_SKB; |
1847 | might_sleep(); |
1848 | } |
1849 | |
1850 | cmd.len = il->ops->build_addsta_hcmd(sta, data); |
1851 | ret = il_send_cmd(il, &cmd); |
1852 | if (ret) |
1853 | return ret; |
1854 | if (flags & CMD_ASYNC) |
1855 | return 0; |
1856 | |
1857 | pkt = (struct il_rx_pkt *)cmd.reply_page; |
1858 | ret = il_process_add_sta_resp(il, addsta: sta, pkt, sync: true); |
1859 | |
1860 | il_free_pages(il, page: cmd.reply_page); |
1861 | |
1862 | return ret; |
1863 | } |
1864 | EXPORT_SYMBOL(il_send_add_sta); |
1865 | |
1866 | static void |
1867 | il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta) |
1868 | { |
1869 | struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->deflink.ht_cap; |
1870 | __le32 sta_flags; |
1871 | |
1872 | if (!sta || !sta_ht_inf->ht_supported) |
1873 | goto done; |
1874 | |
1875 | D_ASSOC("spatial multiplexing power save mode: %s\n" , |
1876 | (sta->deflink.smps_mode == IEEE80211_SMPS_STATIC) ? "static" : |
1877 | (sta->deflink.smps_mode == IEEE80211_SMPS_DYNAMIC) ? "dynamic" : |
1878 | "disabled" ); |
1879 | |
1880 | sta_flags = il->stations[idx].sta.station_flags; |
1881 | |
1882 | sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK); |
1883 | |
1884 | switch (sta->deflink.smps_mode) { |
1885 | case IEEE80211_SMPS_STATIC: |
1886 | sta_flags |= STA_FLG_MIMO_DIS_MSK; |
1887 | break; |
1888 | case IEEE80211_SMPS_DYNAMIC: |
1889 | sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK; |
1890 | break; |
1891 | case IEEE80211_SMPS_OFF: |
1892 | break; |
1893 | default: |
1894 | IL_WARN("Invalid MIMO PS mode %d\n" , sta->deflink.smps_mode); |
1895 | break; |
1896 | } |
1897 | |
1898 | sta_flags |= |
1899 | cpu_to_le32((u32) sta_ht_inf-> |
1900 | ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS); |
1901 | |
1902 | sta_flags |= |
1903 | cpu_to_le32((u32) sta_ht_inf-> |
1904 | ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS); |
1905 | |
1906 | if (il_is_ht40_tx_allowed(il, ht_cap: &sta->deflink.ht_cap)) |
1907 | sta_flags |= STA_FLG_HT40_EN_MSK; |
1908 | else |
1909 | sta_flags &= ~STA_FLG_HT40_EN_MSK; |
1910 | |
1911 | il->stations[idx].sta.station_flags = sta_flags; |
1912 | done: |
1913 | return; |
1914 | } |
1915 | |
1916 | /* |
1917 | * il_prep_station - Prepare station information for addition |
1918 | * |
1919 | * should be called with sta_lock held |
1920 | */ |
1921 | u8 |
1922 | il_prep_station(struct il_priv *il, const u8 *addr, bool is_ap, |
1923 | struct ieee80211_sta *sta) |
1924 | { |
1925 | struct il_station_entry *station; |
1926 | int i; |
1927 | u8 sta_id = IL_INVALID_STATION; |
1928 | u16 rate; |
1929 | |
1930 | if (is_ap) |
1931 | sta_id = IL_AP_ID; |
1932 | else if (is_broadcast_ether_addr(addr)) |
1933 | sta_id = il->hw_params.bcast_id; |
1934 | else |
1935 | for (i = IL_STA_ID; i < il->hw_params.max_stations; i++) { |
1936 | if (ether_addr_equal(addr1: il->stations[i].sta.sta.addr, |
1937 | addr2: addr)) { |
1938 | sta_id = i; |
1939 | break; |
1940 | } |
1941 | |
1942 | if (!il->stations[i].used && |
1943 | sta_id == IL_INVALID_STATION) |
1944 | sta_id = i; |
1945 | } |
1946 | |
1947 | /* |
1948 | * These two conditions have the same outcome, but keep them |
1949 | * separate |
1950 | */ |
1951 | if (unlikely(sta_id == IL_INVALID_STATION)) |
1952 | return sta_id; |
1953 | |
1954 | /* |
1955 | * uCode is not able to deal with multiple requests to add a |
1956 | * station. Keep track if one is in progress so that we do not send |
1957 | * another. |
1958 | */ |
1959 | if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) { |
1960 | D_INFO("STA %d already in process of being added.\n" , sta_id); |
1961 | return sta_id; |
1962 | } |
1963 | |
1964 | if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) && |
1965 | (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) && |
1966 | ether_addr_equal(addr1: il->stations[sta_id].sta.sta.addr, addr2: addr)) { |
1967 | D_ASSOC("STA %d (%pM) already added, not adding again.\n" , |
1968 | sta_id, addr); |
1969 | return sta_id; |
1970 | } |
1971 | |
1972 | station = &il->stations[sta_id]; |
1973 | station->used = IL_STA_DRIVER_ACTIVE; |
1974 | D_ASSOC("Add STA to driver ID %d: %pM\n" , sta_id, addr); |
1975 | il->num_stations++; |
1976 | |
1977 | /* Set up the C_ADD_STA command to send to device */ |
1978 | memset(&station->sta, 0, sizeof(struct il_addsta_cmd)); |
1979 | memcpy(station->sta.sta.addr, addr, ETH_ALEN); |
1980 | station->sta.mode = 0; |
1981 | station->sta.sta.sta_id = sta_id; |
1982 | station->sta.station_flags = 0; |
1983 | |
1984 | /* |
1985 | * OK to call unconditionally, since local stations (IBSS BSSID |
1986 | * STA and broadcast STA) pass in a NULL sta, and mac80211 |
1987 | * doesn't allow HT IBSS. |
1988 | */ |
1989 | il_set_ht_add_station(il, idx: sta_id, sta); |
1990 | |
1991 | /* 3945 only */ |
1992 | rate = (il->band == NL80211_BAND_5GHZ) ? RATE_6M_PLCP : RATE_1M_PLCP; |
1993 | /* Turn on both antennas for the station... */ |
1994 | station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK); |
1995 | |
1996 | return sta_id; |
1997 | |
1998 | } |
1999 | EXPORT_SYMBOL_GPL(il_prep_station); |
2000 | |
2001 | #define STA_WAIT_TIMEOUT (HZ/2) |
2002 | |
2003 | /* |
2004 | * il_add_station_common - |
2005 | */ |
2006 | int |
2007 | il_add_station_common(struct il_priv *il, const u8 *addr, bool is_ap, |
2008 | struct ieee80211_sta *sta, u8 *sta_id_r) |
2009 | { |
2010 | unsigned long flags_spin; |
2011 | int ret = 0; |
2012 | u8 sta_id; |
2013 | struct il_addsta_cmd sta_cmd; |
2014 | |
2015 | *sta_id_r = 0; |
2016 | spin_lock_irqsave(&il->sta_lock, flags_spin); |
2017 | sta_id = il_prep_station(il, addr, is_ap, sta); |
2018 | if (sta_id == IL_INVALID_STATION) { |
2019 | IL_ERR("Unable to prepare station %pM for addition\n" , addr); |
2020 | spin_unlock_irqrestore(lock: &il->sta_lock, flags: flags_spin); |
2021 | return -EINVAL; |
2022 | } |
2023 | |
2024 | /* |
2025 | * uCode is not able to deal with multiple requests to add a |
2026 | * station. Keep track if one is in progress so that we do not send |
2027 | * another. |
2028 | */ |
2029 | if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) { |
2030 | D_INFO("STA %d already in process of being added.\n" , sta_id); |
2031 | spin_unlock_irqrestore(lock: &il->sta_lock, flags: flags_spin); |
2032 | return -EEXIST; |
2033 | } |
2034 | |
2035 | if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) && |
2036 | (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) { |
2037 | D_ASSOC("STA %d (%pM) already added, not adding again.\n" , |
2038 | sta_id, addr); |
2039 | spin_unlock_irqrestore(lock: &il->sta_lock, flags: flags_spin); |
2040 | return -EEXIST; |
2041 | } |
2042 | |
2043 | il->stations[sta_id].used |= IL_STA_UCODE_INPROGRESS; |
2044 | memcpy(&sta_cmd, &il->stations[sta_id].sta, |
2045 | sizeof(struct il_addsta_cmd)); |
2046 | spin_unlock_irqrestore(lock: &il->sta_lock, flags: flags_spin); |
2047 | |
2048 | /* Add station to device's station table */ |
2049 | ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC); |
2050 | if (ret) { |
2051 | spin_lock_irqsave(&il->sta_lock, flags_spin); |
2052 | IL_ERR("Adding station %pM failed.\n" , |
2053 | il->stations[sta_id].sta.sta.addr); |
2054 | il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE; |
2055 | il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS; |
2056 | spin_unlock_irqrestore(lock: &il->sta_lock, flags: flags_spin); |
2057 | } |
2058 | *sta_id_r = sta_id; |
2059 | return ret; |
2060 | } |
2061 | EXPORT_SYMBOL(il_add_station_common); |
2062 | |
2063 | /* |
2064 | * il_sta_ucode_deactivate - deactivate ucode status for a station |
2065 | * |
2066 | * il->sta_lock must be held |
2067 | */ |
2068 | static void |
2069 | il_sta_ucode_deactivate(struct il_priv *il, u8 sta_id) |
2070 | { |
2071 | /* Ucode must be active and driver must be non active */ |
2072 | if ((il->stations[sta_id]. |
2073 | used & (IL_STA_UCODE_ACTIVE | IL_STA_DRIVER_ACTIVE)) != |
2074 | IL_STA_UCODE_ACTIVE) |
2075 | IL_ERR("removed non active STA %u\n" , sta_id); |
2076 | |
2077 | il->stations[sta_id].used &= ~IL_STA_UCODE_ACTIVE; |
2078 | |
2079 | memset(&il->stations[sta_id], 0, sizeof(struct il_station_entry)); |
2080 | D_ASSOC("Removed STA %u\n" , sta_id); |
2081 | } |
2082 | |
2083 | static int |
2084 | il_send_remove_station(struct il_priv *il, const u8 * addr, int sta_id, |
2085 | bool temporary) |
2086 | { |
2087 | struct il_rx_pkt *pkt; |
2088 | int ret; |
2089 | |
2090 | unsigned long flags_spin; |
2091 | struct il_rem_sta_cmd rm_sta_cmd; |
2092 | |
2093 | struct il_host_cmd cmd = { |
2094 | .id = C_REM_STA, |
2095 | .len = sizeof(struct il_rem_sta_cmd), |
2096 | .flags = CMD_SYNC, |
2097 | .data = &rm_sta_cmd, |
2098 | }; |
2099 | |
2100 | memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd)); |
2101 | rm_sta_cmd.num_sta = 1; |
2102 | memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN); |
2103 | |
2104 | cmd.flags |= CMD_WANT_SKB; |
2105 | |
2106 | ret = il_send_cmd(il, &cmd); |
2107 | |
2108 | if (ret) |
2109 | return ret; |
2110 | |
2111 | pkt = (struct il_rx_pkt *)cmd.reply_page; |
2112 | if (pkt->hdr.flags & IL_CMD_FAILED_MSK) { |
2113 | IL_ERR("Bad return from C_REM_STA (0x%08X)\n" , pkt->hdr.flags); |
2114 | ret = -EIO; |
2115 | } |
2116 | |
2117 | if (!ret) { |
2118 | switch (pkt->u.rem_sta.status) { |
2119 | case REM_STA_SUCCESS_MSK: |
2120 | if (!temporary) { |
2121 | spin_lock_irqsave(&il->sta_lock, flags_spin); |
2122 | il_sta_ucode_deactivate(il, sta_id); |
2123 | spin_unlock_irqrestore(lock: &il->sta_lock, |
2124 | flags: flags_spin); |
2125 | } |
2126 | D_ASSOC("C_REM_STA PASSED\n" ); |
2127 | break; |
2128 | default: |
2129 | ret = -EIO; |
2130 | IL_ERR("C_REM_STA failed\n" ); |
2131 | break; |
2132 | } |
2133 | } |
2134 | il_free_pages(il, page: cmd.reply_page); |
2135 | |
2136 | return ret; |
2137 | } |
2138 | |
2139 | /* |
2140 | * il_remove_station - Remove driver's knowledge of station. |
2141 | */ |
2142 | int |
2143 | il_remove_station(struct il_priv *il, const u8 sta_id, const u8 * addr) |
2144 | { |
2145 | unsigned long flags; |
2146 | |
2147 | if (!il_is_ready(il)) { |
2148 | D_INFO("Unable to remove station %pM, device not ready.\n" , |
2149 | addr); |
2150 | /* |
2151 | * It is typical for stations to be removed when we are |
2152 | * going down. Return success since device will be down |
2153 | * soon anyway |
2154 | */ |
2155 | return 0; |
2156 | } |
2157 | |
2158 | D_ASSOC("Removing STA from driver:%d %pM\n" , sta_id, addr); |
2159 | |
2160 | if (WARN_ON(sta_id == IL_INVALID_STATION)) |
2161 | return -EINVAL; |
2162 | |
2163 | spin_lock_irqsave(&il->sta_lock, flags); |
2164 | |
2165 | if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE)) { |
2166 | D_INFO("Removing %pM but non DRIVER active\n" , addr); |
2167 | goto out_err; |
2168 | } |
2169 | |
2170 | if (!(il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) { |
2171 | D_INFO("Removing %pM but non UCODE active\n" , addr); |
2172 | goto out_err; |
2173 | } |
2174 | |
2175 | if (il->stations[sta_id].used & IL_STA_LOCAL) { |
2176 | kfree(objp: il->stations[sta_id].lq); |
2177 | il->stations[sta_id].lq = NULL; |
2178 | } |
2179 | |
2180 | il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE; |
2181 | |
2182 | il->num_stations--; |
2183 | |
2184 | BUG_ON(il->num_stations < 0); |
2185 | |
2186 | spin_unlock_irqrestore(lock: &il->sta_lock, flags); |
2187 | |
2188 | return il_send_remove_station(il, addr, sta_id, temporary: false); |
2189 | out_err: |
2190 | spin_unlock_irqrestore(lock: &il->sta_lock, flags); |
2191 | return -EINVAL; |
2192 | } |
2193 | EXPORT_SYMBOL_GPL(il_remove_station); |
2194 | |
2195 | /* |
2196 | * il_clear_ucode_stations - clear ucode station table bits |
2197 | * |
2198 | * This function clears all the bits in the driver indicating |
2199 | * which stations are active in the ucode. Call when something |
2200 | * other than explicit station management would cause this in |
2201 | * the ucode, e.g. unassociated RXON. |
2202 | */ |
2203 | void |
2204 | il_clear_ucode_stations(struct il_priv *il) |
2205 | { |
2206 | int i; |
2207 | unsigned long flags_spin; |
2208 | bool cleared = false; |
2209 | |
2210 | D_INFO("Clearing ucode stations in driver\n" ); |
2211 | |
2212 | spin_lock_irqsave(&il->sta_lock, flags_spin); |
2213 | for (i = 0; i < il->hw_params.max_stations; i++) { |
2214 | if (il->stations[i].used & IL_STA_UCODE_ACTIVE) { |
2215 | D_INFO("Clearing ucode active for station %d\n" , i); |
2216 | il->stations[i].used &= ~IL_STA_UCODE_ACTIVE; |
2217 | cleared = true; |
2218 | } |
2219 | } |
2220 | spin_unlock_irqrestore(lock: &il->sta_lock, flags: flags_spin); |
2221 | |
2222 | if (!cleared) |
2223 | D_INFO("No active stations found to be cleared\n" ); |
2224 | } |
2225 | EXPORT_SYMBOL(il_clear_ucode_stations); |
2226 | |
2227 | /* |
2228 | * il_restore_stations() - Restore driver known stations to device |
2229 | * |
2230 | * All stations considered active by driver, but not present in ucode, is |
2231 | * restored. |
2232 | * |
2233 | * Function sleeps. |
2234 | */ |
2235 | void |
2236 | il_restore_stations(struct il_priv *il) |
2237 | { |
2238 | struct il_addsta_cmd sta_cmd; |
2239 | struct il_link_quality_cmd lq; |
2240 | unsigned long flags_spin; |
2241 | int i; |
2242 | bool found = false; |
2243 | int ret; |
2244 | bool send_lq; |
2245 | |
2246 | if (!il_is_ready(il)) { |
2247 | D_INFO("Not ready yet, not restoring any stations.\n" ); |
2248 | return; |
2249 | } |
2250 | |
2251 | D_ASSOC("Restoring all known stations ... start.\n" ); |
2252 | spin_lock_irqsave(&il->sta_lock, flags_spin); |
2253 | for (i = 0; i < il->hw_params.max_stations; i++) { |
2254 | if ((il->stations[i].used & IL_STA_DRIVER_ACTIVE) && |
2255 | !(il->stations[i].used & IL_STA_UCODE_ACTIVE)) { |
2256 | D_ASSOC("Restoring sta %pM\n" , |
2257 | il->stations[i].sta.sta.addr); |
2258 | il->stations[i].sta.mode = 0; |
2259 | il->stations[i].used |= IL_STA_UCODE_INPROGRESS; |
2260 | found = true; |
2261 | } |
2262 | } |
2263 | |
2264 | for (i = 0; i < il->hw_params.max_stations; i++) { |
2265 | if ((il->stations[i].used & IL_STA_UCODE_INPROGRESS)) { |
2266 | memcpy(&sta_cmd, &il->stations[i].sta, |
2267 | sizeof(struct il_addsta_cmd)); |
2268 | send_lq = false; |
2269 | if (il->stations[i].lq) { |
2270 | memcpy(&lq, il->stations[i].lq, |
2271 | sizeof(struct il_link_quality_cmd)); |
2272 | send_lq = true; |
2273 | } |
2274 | spin_unlock_irqrestore(lock: &il->sta_lock, flags: flags_spin); |
2275 | ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC); |
2276 | if (ret) { |
2277 | spin_lock_irqsave(&il->sta_lock, flags_spin); |
2278 | IL_ERR("Adding station %pM failed.\n" , |
2279 | il->stations[i].sta.sta.addr); |
2280 | il->stations[i].used &= ~IL_STA_DRIVER_ACTIVE; |
2281 | il->stations[i].used &= |
2282 | ~IL_STA_UCODE_INPROGRESS; |
2283 | spin_unlock_irqrestore(lock: &il->sta_lock, |
2284 | flags: flags_spin); |
2285 | } |
2286 | /* |
2287 | * Rate scaling has already been initialized, send |
2288 | * current LQ command |
2289 | */ |
2290 | if (send_lq) |
2291 | il_send_lq_cmd(il, lq: &lq, flags: CMD_SYNC, init: true); |
2292 | spin_lock_irqsave(&il->sta_lock, flags_spin); |
2293 | il->stations[i].used &= ~IL_STA_UCODE_INPROGRESS; |
2294 | } |
2295 | } |
2296 | |
2297 | spin_unlock_irqrestore(lock: &il->sta_lock, flags: flags_spin); |
2298 | if (!found) |
2299 | D_INFO("Restoring all known stations" |
2300 | " .... no stations to be restored.\n" ); |
2301 | else |
2302 | D_INFO("Restoring all known stations" " .... complete.\n" ); |
2303 | } |
2304 | EXPORT_SYMBOL(il_restore_stations); |
2305 | |
2306 | int |
2307 | il_get_free_ucode_key_idx(struct il_priv *il) |
2308 | { |
2309 | int i; |
2310 | |
2311 | for (i = 0; i < il->sta_key_max_num; i++) |
2312 | if (!test_and_set_bit(nr: i, addr: &il->ucode_key_table)) |
2313 | return i; |
2314 | |
2315 | return WEP_INVALID_OFFSET; |
2316 | } |
2317 | EXPORT_SYMBOL(il_get_free_ucode_key_idx); |
2318 | |
2319 | void |
2320 | il_dealloc_bcast_stations(struct il_priv *il) |
2321 | { |
2322 | unsigned long flags; |
2323 | int i; |
2324 | |
2325 | spin_lock_irqsave(&il->sta_lock, flags); |
2326 | for (i = 0; i < il->hw_params.max_stations; i++) { |
2327 | if (!(il->stations[i].used & IL_STA_BCAST)) |
2328 | continue; |
2329 | |
2330 | il->stations[i].used &= ~IL_STA_UCODE_ACTIVE; |
2331 | il->num_stations--; |
2332 | BUG_ON(il->num_stations < 0); |
2333 | kfree(objp: il->stations[i].lq); |
2334 | il->stations[i].lq = NULL; |
2335 | } |
2336 | spin_unlock_irqrestore(lock: &il->sta_lock, flags); |
2337 | } |
2338 | EXPORT_SYMBOL_GPL(il_dealloc_bcast_stations); |
2339 | |
2340 | #ifdef CONFIG_IWLEGACY_DEBUG |
2341 | static void |
2342 | il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq) |
2343 | { |
2344 | int i; |
2345 | D_RATE("lq station id 0x%x\n" , lq->sta_id); |
2346 | D_RATE("lq ant 0x%X 0x%X\n" , lq->general_params.single_stream_ant_msk, |
2347 | lq->general_params.dual_stream_ant_msk); |
2348 | |
2349 | for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) |
2350 | D_RATE("lq idx %d 0x%X\n" , i, lq->rs_table[i].rate_n_flags); |
2351 | } |
2352 | #else |
2353 | static inline void |
2354 | il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq) |
2355 | { |
2356 | } |
2357 | #endif |
2358 | |
2359 | /* |
2360 | * il_is_lq_table_valid() - Test one aspect of LQ cmd for validity |
2361 | * |
2362 | * It sometimes happens when a HT rate has been in use and we |
2363 | * loose connectivity with AP then mac80211 will first tell us that the |
2364 | * current channel is not HT anymore before removing the station. In such a |
2365 | * scenario the RXON flags will be updated to indicate we are not |
2366 | * communicating HT anymore, but the LQ command may still contain HT rates. |
2367 | * Test for this to prevent driver from sending LQ command between the time |
2368 | * RXON flags are updated and when LQ command is updated. |
2369 | */ |
2370 | static bool |
2371 | il_is_lq_table_valid(struct il_priv *il, struct il_link_quality_cmd *lq) |
2372 | { |
2373 | int i; |
2374 | |
2375 | if (il->ht.enabled) |
2376 | return true; |
2377 | |
2378 | D_INFO("Channel %u is not an HT channel\n" , il->active.channel); |
2379 | for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { |
2380 | if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & RATE_MCS_HT_MSK) { |
2381 | D_INFO("idx %d of LQ expects HT channel\n" , i); |
2382 | return false; |
2383 | } |
2384 | } |
2385 | return true; |
2386 | } |
2387 | |
2388 | /* |
2389 | * il_send_lq_cmd() - Send link quality command |
2390 | * @init: This command is sent as part of station initialization right |
2391 | * after station has been added. |
2392 | * |
2393 | * The link quality command is sent as the last step of station creation. |
2394 | * This is the special case in which init is set and we call a callback in |
2395 | * this case to clear the state indicating that station creation is in |
2396 | * progress. |
2397 | */ |
2398 | int |
2399 | il_send_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq, |
2400 | u8 flags, bool init) |
2401 | { |
2402 | int ret = 0; |
2403 | unsigned long flags_spin; |
2404 | |
2405 | struct il_host_cmd cmd = { |
2406 | .id = C_TX_LINK_QUALITY_CMD, |
2407 | .len = sizeof(struct il_link_quality_cmd), |
2408 | .flags = flags, |
2409 | .data = lq, |
2410 | }; |
2411 | |
2412 | if (WARN_ON(lq->sta_id == IL_INVALID_STATION)) |
2413 | return -EINVAL; |
2414 | |
2415 | spin_lock_irqsave(&il->sta_lock, flags_spin); |
2416 | if (!(il->stations[lq->sta_id].used & IL_STA_DRIVER_ACTIVE)) { |
2417 | spin_unlock_irqrestore(lock: &il->sta_lock, flags: flags_spin); |
2418 | return -EINVAL; |
2419 | } |
2420 | spin_unlock_irqrestore(lock: &il->sta_lock, flags: flags_spin); |
2421 | |
2422 | il_dump_lq_cmd(il, lq); |
2423 | BUG_ON(init && (cmd.flags & CMD_ASYNC)); |
2424 | |
2425 | if (il_is_lq_table_valid(il, lq)) |
2426 | ret = il_send_cmd(il, &cmd); |
2427 | else |
2428 | ret = -EINVAL; |
2429 | |
2430 | if (cmd.flags & CMD_ASYNC) |
2431 | return ret; |
2432 | |
2433 | if (init) { |
2434 | D_INFO("init LQ command complete," |
2435 | " clearing sta addition status for sta %d\n" , |
2436 | lq->sta_id); |
2437 | spin_lock_irqsave(&il->sta_lock, flags_spin); |
2438 | il->stations[lq->sta_id].used &= ~IL_STA_UCODE_INPROGRESS; |
2439 | spin_unlock_irqrestore(lock: &il->sta_lock, flags: flags_spin); |
2440 | } |
2441 | return ret; |
2442 | } |
2443 | EXPORT_SYMBOL(il_send_lq_cmd); |
2444 | |
2445 | int |
2446 | il_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, |
2447 | struct ieee80211_sta *sta) |
2448 | { |
2449 | struct il_priv *il = hw->priv; |
2450 | struct il_station_priv_common *sta_common = (void *)sta->drv_priv; |
2451 | int ret; |
2452 | |
2453 | mutex_lock(&il->mutex); |
2454 | D_MAC80211("enter station %pM\n" , sta->addr); |
2455 | |
2456 | ret = il_remove_station(il, sta_common->sta_id, sta->addr); |
2457 | if (ret) |
2458 | IL_ERR("Error removing station %pM\n" , sta->addr); |
2459 | |
2460 | D_MAC80211("leave ret %d\n" , ret); |
2461 | mutex_unlock(lock: &il->mutex); |
2462 | |
2463 | return ret; |
2464 | } |
2465 | EXPORT_SYMBOL(il_mac_sta_remove); |
2466 | |
2467 | /************************** RX-FUNCTIONS ****************************/ |
2468 | /* |
2469 | * Rx theory of operation |
2470 | * |
2471 | * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), |
2472 | * each of which point to Receive Buffers to be filled by the NIC. These get |
2473 | * used not only for Rx frames, but for any command response or notification |
2474 | * from the NIC. The driver and NIC manage the Rx buffers by means |
2475 | * of idxes into the circular buffer. |
2476 | * |
2477 | * Rx Queue Indexes |
2478 | * The host/firmware share two idx registers for managing the Rx buffers. |
2479 | * |
2480 | * The READ idx maps to the first position that the firmware may be writing |
2481 | * to -- the driver can read up to (but not including) this position and get |
2482 | * good data. |
2483 | * The READ idx is managed by the firmware once the card is enabled. |
2484 | * |
2485 | * The WRITE idx maps to the last position the driver has read from -- the |
2486 | * position preceding WRITE is the last slot the firmware can place a packet. |
2487 | * |
2488 | * The queue is empty (no good data) if WRITE = READ - 1, and is full if |
2489 | * WRITE = READ. |
2490 | * |
2491 | * During initialization, the host sets up the READ queue position to the first |
2492 | * IDX position, and WRITE to the last (READ - 1 wrapped) |
2493 | * |
2494 | * When the firmware places a packet in a buffer, it will advance the READ idx |
2495 | * and fire the RX interrupt. The driver can then query the READ idx and |
2496 | * process as many packets as possible, moving the WRITE idx forward as it |
2497 | * resets the Rx queue buffers with new memory. |
2498 | * |
2499 | * The management in the driver is as follows: |
2500 | * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When |
2501 | * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled |
2502 | * to replenish the iwl->rxq->rx_free. |
2503 | * + In il_rx_replenish (scheduled) if 'processed' != 'read' then the |
2504 | * iwl->rxq is replenished and the READ IDX is updated (updating the |
2505 | * 'processed' and 'read' driver idxes as well) |
2506 | * + A received packet is processed and handed to the kernel network stack, |
2507 | * detached from the iwl->rxq. The driver 'processed' idx is updated. |
2508 | * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free |
2509 | * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ |
2510 | * IDX is not incremented and iwl->status(RX_STALLED) is set. If there |
2511 | * were enough free buffers and RX_STALLED is set it is cleared. |
2512 | * |
2513 | * |
2514 | * Driver sequence: |
2515 | * |
2516 | * il_rx_queue_alloc() Allocates rx_free |
2517 | * il_rx_replenish() Replenishes rx_free list from rx_used, and calls |
2518 | * il_rx_queue_restock |
2519 | * il_rx_queue_restock() Moves available buffers from rx_free into Rx |
2520 | * queue, updates firmware pointers, and updates |
2521 | * the WRITE idx. If insufficient rx_free buffers |
2522 | * are available, schedules il_rx_replenish |
2523 | * |
2524 | * -- enable interrupts -- |
2525 | * ISR - il_rx() Detach il_rx_bufs from pool up to the |
2526 | * READ IDX, detaching the SKB from the pool. |
2527 | * Moves the packet buffer from queue to rx_used. |
2528 | * Calls il_rx_queue_restock to refill any empty |
2529 | * slots. |
2530 | * ... |
2531 | * |
2532 | */ |
2533 | |
2534 | /* |
2535 | * il_rx_queue_space - Return number of free slots available in queue. |
2536 | */ |
2537 | int |
2538 | il_rx_queue_space(const struct il_rx_queue *q) |
2539 | { |
2540 | int s = q->read - q->write; |
2541 | if (s <= 0) |
2542 | s += RX_QUEUE_SIZE; |
2543 | /* keep some buffer to not confuse full and empty queue */ |
2544 | s -= 2; |
2545 | if (s < 0) |
2546 | s = 0; |
2547 | return s; |
2548 | } |
2549 | EXPORT_SYMBOL(il_rx_queue_space); |
2550 | |
2551 | /* |
2552 | * il_rx_queue_update_write_ptr - Update the write pointer for the RX queue |
2553 | */ |
2554 | void |
2555 | il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q) |
2556 | { |
2557 | unsigned long flags; |
2558 | u32 rx_wrt_ptr_reg = il->hw_params.rx_wrt_ptr_reg; |
2559 | u32 reg; |
2560 | |
2561 | spin_lock_irqsave(&q->lock, flags); |
2562 | |
2563 | if (q->need_update == 0) |
2564 | goto exit_unlock; |
2565 | |
2566 | /* If power-saving is in use, make sure device is awake */ |
2567 | if (test_bit(S_POWER_PMI, &il->status)) { |
2568 | reg = _il_rd(il, CSR_UCODE_DRV_GP1); |
2569 | |
2570 | if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { |
2571 | D_INFO("Rx queue requesting wakeup," " GP1 = 0x%x\n" , |
2572 | reg); |
2573 | il_set_bit(il, CSR_GP_CNTRL, |
2574 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); |
2575 | goto exit_unlock; |
2576 | } |
2577 | |
2578 | q->write_actual = (q->write & ~0x7); |
2579 | il_wr(il, reg: rx_wrt_ptr_reg, value: q->write_actual); |
2580 | |
2581 | /* Else device is assumed to be awake */ |
2582 | } else { |
2583 | /* Device expects a multiple of 8 */ |
2584 | q->write_actual = (q->write & ~0x7); |
2585 | il_wr(il, reg: rx_wrt_ptr_reg, value: q->write_actual); |
2586 | } |
2587 | |
2588 | q->need_update = 0; |
2589 | |
2590 | exit_unlock: |
2591 | spin_unlock_irqrestore(lock: &q->lock, flags); |
2592 | } |
2593 | EXPORT_SYMBOL(il_rx_queue_update_write_ptr); |
2594 | |
2595 | int |
2596 | il_rx_queue_alloc(struct il_priv *il) |
2597 | { |
2598 | struct il_rx_queue *rxq = &il->rxq; |
2599 | struct device *dev = &il->pci_dev->dev; |
2600 | int i; |
2601 | |
2602 | spin_lock_init(&rxq->lock); |
2603 | INIT_LIST_HEAD(list: &rxq->rx_free); |
2604 | INIT_LIST_HEAD(list: &rxq->rx_used); |
2605 | |
2606 | /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */ |
2607 | rxq->bd = dma_alloc_coherent(dev, size: 4 * RX_QUEUE_SIZE, dma_handle: &rxq->bd_dma, |
2608 | GFP_KERNEL); |
2609 | if (!rxq->bd) |
2610 | goto err_bd; |
2611 | |
2612 | rxq->rb_stts = dma_alloc_coherent(dev, size: sizeof(struct il_rb_status), |
2613 | dma_handle: &rxq->rb_stts_dma, GFP_KERNEL); |
2614 | if (!rxq->rb_stts) |
2615 | goto err_rb; |
2616 | |
2617 | /* Fill the rx_used queue with _all_ of the Rx buffers */ |
2618 | for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) |
2619 | list_add_tail(new: &rxq->pool[i].list, head: &rxq->rx_used); |
2620 | |
2621 | /* Set us so that we have processed and used all buffers, but have |
2622 | * not restocked the Rx queue with fresh buffers */ |
2623 | rxq->read = rxq->write = 0; |
2624 | rxq->write_actual = 0; |
2625 | rxq->free_count = 0; |
2626 | rxq->need_update = 0; |
2627 | return 0; |
2628 | |
2629 | err_rb: |
2630 | dma_free_coherent(dev: &il->pci_dev->dev, size: 4 * RX_QUEUE_SIZE, cpu_addr: rxq->bd, |
2631 | dma_handle: rxq->bd_dma); |
2632 | err_bd: |
2633 | return -ENOMEM; |
2634 | } |
2635 | EXPORT_SYMBOL(il_rx_queue_alloc); |
2636 | |
2637 | void |
2638 | il_hdl_spectrum_measurement(struct il_priv *il, struct il_rx_buf *rxb) |
2639 | { |
2640 | struct il_rx_pkt *pkt = rxb_addr(rxb); |
2641 | struct il_spectrum_notification *report = &(pkt->u.spectrum_notif); |
2642 | |
2643 | if (!report->state) { |
2644 | D_11H("Spectrum Measure Notification: Start\n" ); |
2645 | return; |
2646 | } |
2647 | |
2648 | memcpy(&il->measure_report, report, sizeof(*report)); |
2649 | il->measurement_status |= MEASUREMENT_READY; |
2650 | } |
2651 | EXPORT_SYMBOL(il_hdl_spectrum_measurement); |
2652 | |
2653 | /* |
2654 | * returns non-zero if packet should be dropped |
2655 | */ |
2656 | int |
2657 | il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr, |
2658 | u32 decrypt_res, struct ieee80211_rx_status *stats) |
2659 | { |
2660 | u16 fc = le16_to_cpu(hdr->frame_control); |
2661 | |
2662 | /* |
2663 | * All contexts have the same setting here due to it being |
2664 | * a module parameter, so OK to check any context. |
2665 | */ |
2666 | if (il->active.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK) |
2667 | return 0; |
2668 | |
2669 | if (!(fc & IEEE80211_FCTL_PROTECTED)) |
2670 | return 0; |
2671 | |
2672 | D_RX("decrypt_res:0x%x\n" , decrypt_res); |
2673 | switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) { |
2674 | case RX_RES_STATUS_SEC_TYPE_TKIP: |
2675 | /* The uCode has got a bad phase 1 Key, pushes the packet. |
2676 | * Decryption will be done in SW. */ |
2677 | if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == |
2678 | RX_RES_STATUS_BAD_KEY_TTAK) |
2679 | break; |
2680 | fallthrough; |
2681 | |
2682 | case RX_RES_STATUS_SEC_TYPE_WEP: |
2683 | if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == |
2684 | RX_RES_STATUS_BAD_ICV_MIC) { |
2685 | /* bad ICV, the packet is destroyed since the |
2686 | * decryption is inplace, drop it */ |
2687 | D_RX("Packet destroyed\n" ); |
2688 | return -1; |
2689 | } |
2690 | fallthrough; |
2691 | case RX_RES_STATUS_SEC_TYPE_CCMP: |
2692 | if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == |
2693 | RX_RES_STATUS_DECRYPT_OK) { |
2694 | D_RX("hw decrypt successfully!!!\n" ); |
2695 | stats->flag |= RX_FLAG_DECRYPTED; |
2696 | } |
2697 | break; |
2698 | |
2699 | default: |
2700 | break; |
2701 | } |
2702 | return 0; |
2703 | } |
2704 | EXPORT_SYMBOL(il_set_decrypted_flag); |
2705 | |
2706 | /* |
2707 | * il_txq_update_write_ptr - Send new write idx to hardware |
2708 | */ |
2709 | void |
2710 | il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq) |
2711 | { |
2712 | u32 reg = 0; |
2713 | int txq_id = txq->q.id; |
2714 | |
2715 | if (txq->need_update == 0) |
2716 | return; |
2717 | |
2718 | /* if we're trying to save power */ |
2719 | if (test_bit(S_POWER_PMI, &il->status)) { |
2720 | /* wake up nic if it's powered down ... |
2721 | * uCode will wake up, and interrupt us again, so next |
2722 | * time we'll skip this part. */ |
2723 | reg = _il_rd(il, CSR_UCODE_DRV_GP1); |
2724 | |
2725 | if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { |
2726 | D_INFO("Tx queue %d requesting wakeup," " GP1 = 0x%x\n" , |
2727 | txq_id, reg); |
2728 | il_set_bit(il, CSR_GP_CNTRL, |
2729 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); |
2730 | return; |
2731 | } |
2732 | |
2733 | il_wr(il, HBUS_TARG_WRPTR, value: txq->q.write_ptr | (txq_id << 8)); |
2734 | |
2735 | /* |
2736 | * else not in power-save mode, |
2737 | * uCode will never sleep when we're |
2738 | * trying to tx (during RFKILL, we're not trying to tx). |
2739 | */ |
2740 | } else |
2741 | _il_wr(il, HBUS_TARG_WRPTR, val: txq->q.write_ptr | (txq_id << 8)); |
2742 | txq->need_update = 0; |
2743 | } |
2744 | EXPORT_SYMBOL(il_txq_update_write_ptr); |
2745 | |
2746 | /* |
2747 | * il_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's |
2748 | */ |
2749 | void |
2750 | il_tx_queue_unmap(struct il_priv *il, int txq_id) |
2751 | { |
2752 | struct il_tx_queue *txq = &il->txq[txq_id]; |
2753 | struct il_queue *q = &txq->q; |
2754 | |
2755 | if (q->n_bd == 0) |
2756 | return; |
2757 | |
2758 | while (q->write_ptr != q->read_ptr) { |
2759 | il->ops->txq_free_tfd(il, txq); |
2760 | q->read_ptr = il_queue_inc_wrap(idx: q->read_ptr, n_bd: q->n_bd); |
2761 | } |
2762 | } |
2763 | EXPORT_SYMBOL(il_tx_queue_unmap); |
2764 | |
2765 | /* |
2766 | * il_tx_queue_free - Deallocate DMA queue. |
2767 | * @txq: Transmit queue to deallocate. |
2768 | * |
2769 | * Empty queue by removing and destroying all BD's. |
2770 | * Free all buffers. |
2771 | * 0-fill, but do not free "txq" descriptor structure. |
2772 | */ |
2773 | void |
2774 | il_tx_queue_free(struct il_priv *il, int txq_id) |
2775 | { |
2776 | struct il_tx_queue *txq = &il->txq[txq_id]; |
2777 | struct device *dev = &il->pci_dev->dev; |
2778 | int i; |
2779 | |
2780 | il_tx_queue_unmap(il, txq_id); |
2781 | |
2782 | /* De-alloc array of command/tx buffers */ |
2783 | if (txq->cmd) { |
2784 | for (i = 0; i < TFD_TX_CMD_SLOTS; i++) |
2785 | kfree(objp: txq->cmd[i]); |
2786 | } |
2787 | |
2788 | /* De-alloc circular buffer of TFDs */ |
2789 | if (txq->q.n_bd) |
2790 | dma_free_coherent(dev, size: il->hw_params.tfd_size * txq->q.n_bd, |
2791 | cpu_addr: txq->tfds, dma_handle: txq->q.dma_addr); |
2792 | |
2793 | /* De-alloc array of per-TFD driver data */ |
2794 | kfree(objp: txq->skbs); |
2795 | txq->skbs = NULL; |
2796 | |
2797 | /* deallocate arrays */ |
2798 | kfree(objp: txq->cmd); |
2799 | kfree(objp: txq->meta); |
2800 | txq->cmd = NULL; |
2801 | txq->meta = NULL; |
2802 | |
2803 | /* 0-fill queue descriptor structure */ |
2804 | memset(txq, 0, sizeof(*txq)); |
2805 | } |
2806 | EXPORT_SYMBOL(il_tx_queue_free); |
2807 | |
2808 | /* |
2809 | * il_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue |
2810 | */ |
2811 | void |
2812 | il_cmd_queue_unmap(struct il_priv *il) |
2813 | { |
2814 | struct il_tx_queue *txq = &il->txq[il->cmd_queue]; |
2815 | struct il_queue *q = &txq->q; |
2816 | int i; |
2817 | |
2818 | if (q->n_bd == 0) |
2819 | return; |
2820 | |
2821 | while (q->read_ptr != q->write_ptr) { |
2822 | i = il_get_cmd_idx(q, idx: q->read_ptr, is_huge: 0); |
2823 | |
2824 | if (txq->meta[i].flags & CMD_MAPPED) { |
2825 | dma_unmap_single(&il->pci_dev->dev, |
2826 | dma_unmap_addr(&txq->meta[i], mapping), |
2827 | dma_unmap_len(&txq->meta[i], len), |
2828 | DMA_BIDIRECTIONAL); |
2829 | txq->meta[i].flags = 0; |
2830 | } |
2831 | |
2832 | q->read_ptr = il_queue_inc_wrap(idx: q->read_ptr, n_bd: q->n_bd); |
2833 | } |
2834 | |
2835 | i = q->n_win; |
2836 | if (txq->meta[i].flags & CMD_MAPPED) { |
2837 | dma_unmap_single(&il->pci_dev->dev, |
2838 | dma_unmap_addr(&txq->meta[i], mapping), |
2839 | dma_unmap_len(&txq->meta[i], len), |
2840 | DMA_BIDIRECTIONAL); |
2841 | txq->meta[i].flags = 0; |
2842 | } |
2843 | } |
2844 | EXPORT_SYMBOL(il_cmd_queue_unmap); |
2845 | |
2846 | /* |
2847 | * il_cmd_queue_free - Deallocate DMA queue. |
2848 | * |
2849 | * Empty queue by removing and destroying all BD's. |
2850 | * Free all buffers. |
2851 | * 0-fill, but do not free "txq" descriptor structure. |
2852 | */ |
2853 | void |
2854 | il_cmd_queue_free(struct il_priv *il) |
2855 | { |
2856 | struct il_tx_queue *txq = &il->txq[il->cmd_queue]; |
2857 | struct device *dev = &il->pci_dev->dev; |
2858 | int i; |
2859 | |
2860 | il_cmd_queue_unmap(il); |
2861 | |
2862 | /* De-alloc array of command/tx buffers */ |
2863 | if (txq->cmd) { |
2864 | for (i = 0; i <= TFD_CMD_SLOTS; i++) |
2865 | kfree(objp: txq->cmd[i]); |
2866 | } |
2867 | |
2868 | /* De-alloc circular buffer of TFDs */ |
2869 | if (txq->q.n_bd) |
2870 | dma_free_coherent(dev, size: il->hw_params.tfd_size * txq->q.n_bd, |
2871 | cpu_addr: txq->tfds, dma_handle: txq->q.dma_addr); |
2872 | |
2873 | /* deallocate arrays */ |
2874 | kfree(objp: txq->cmd); |
2875 | kfree(objp: txq->meta); |
2876 | txq->cmd = NULL; |
2877 | txq->meta = NULL; |
2878 | |
2879 | /* 0-fill queue descriptor structure */ |
2880 | memset(txq, 0, sizeof(*txq)); |
2881 | } |
2882 | EXPORT_SYMBOL(il_cmd_queue_free); |
2883 | |
2884 | /*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** |
2885 | * DMA services |
2886 | * |
2887 | * Theory of operation |
2888 | * |
2889 | * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer |
2890 | * of buffer descriptors, each of which points to one or more data buffers for |
2891 | * the device to read from or fill. Driver and device exchange status of each |
2892 | * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty |
2893 | * entries in each circular buffer, to protect against confusing empty and full |
2894 | * queue states. |
2895 | * |
2896 | * The device reads or writes the data in the queues via the device's several |
2897 | * DMA/FIFO channels. Each queue is mapped to a single DMA channel. |
2898 | * |
2899 | * For Tx queue, there are low mark and high mark limits. If, after queuing |
2900 | * the packet for Tx, free space become < low mark, Tx queue stopped. When |
2901 | * reclaiming packets (on 'tx done IRQ), if free space become > high mark, |
2902 | * Tx queue resumed. |
2903 | * |
2904 | * See more detailed info in 4965.h. |
2905 | ***************************************************/ |
2906 | |
2907 | int |
2908 | il_queue_space(const struct il_queue *q) |
2909 | { |
2910 | int s = q->read_ptr - q->write_ptr; |
2911 | |
2912 | if (q->read_ptr > q->write_ptr) |
2913 | s -= q->n_bd; |
2914 | |
2915 | if (s <= 0) |
2916 | s += q->n_win; |
2917 | /* keep some reserve to not confuse empty and full situations */ |
2918 | s -= 2; |
2919 | if (s < 0) |
2920 | s = 0; |
2921 | return s; |
2922 | } |
2923 | EXPORT_SYMBOL(il_queue_space); |
2924 | |
2925 | |
2926 | /* |
2927 | * il_queue_init - Initialize queue's high/low-water and read/write idxes |
2928 | */ |
2929 | static int |
2930 | il_queue_init(struct il_priv *il, struct il_queue *q, int slots, u32 id) |
2931 | { |
2932 | /* |
2933 | * TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise |
2934 | * il_queue_inc_wrap and il_queue_dec_wrap are broken. |
2935 | */ |
2936 | BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); |
2937 | /* FIXME: remove q->n_bd */ |
2938 | q->n_bd = TFD_QUEUE_SIZE_MAX; |
2939 | |
2940 | q->n_win = slots; |
2941 | q->id = id; |
2942 | |
2943 | /* slots_must be power-of-two size, otherwise |
2944 | * il_get_cmd_idx is broken. */ |
2945 | BUG_ON(!is_power_of_2(slots)); |
2946 | |
2947 | q->low_mark = q->n_win / 4; |
2948 | if (q->low_mark < 4) |
2949 | q->low_mark = 4; |
2950 | |
2951 | q->high_mark = q->n_win / 8; |
2952 | if (q->high_mark < 2) |
2953 | q->high_mark = 2; |
2954 | |
2955 | q->write_ptr = q->read_ptr = 0; |
2956 | |
2957 | return 0; |
2958 | } |
2959 | |
2960 | /* |
2961 | * il_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue |
2962 | */ |
2963 | static int |
2964 | il_tx_queue_alloc(struct il_priv *il, struct il_tx_queue *txq, u32 id) |
2965 | { |
2966 | struct device *dev = &il->pci_dev->dev; |
2967 | size_t tfd_sz = il->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX; |
2968 | |
2969 | /* Driver ilate data, only for Tx (not command) queues, |
2970 | * not shared with device. */ |
2971 | if (id != il->cmd_queue) { |
2972 | txq->skbs = kcalloc(TFD_QUEUE_SIZE_MAX, |
2973 | size: sizeof(struct sk_buff *), |
2974 | GFP_KERNEL); |
2975 | if (!txq->skbs) { |
2976 | IL_ERR("Fail to alloc skbs\n" ); |
2977 | goto error; |
2978 | } |
2979 | } else |
2980 | txq->skbs = NULL; |
2981 | |
2982 | /* Circular buffer of transmit frame descriptors (TFDs), |
2983 | * shared with device */ |
2984 | txq->tfds = |
2985 | dma_alloc_coherent(dev, size: tfd_sz, dma_handle: &txq->q.dma_addr, GFP_KERNEL); |
2986 | if (!txq->tfds) |
2987 | goto error; |
2988 | |
2989 | txq->q.id = id; |
2990 | |
2991 | return 0; |
2992 | |
2993 | error: |
2994 | kfree(objp: txq->skbs); |
2995 | txq->skbs = NULL; |
2996 | |
2997 | return -ENOMEM; |
2998 | } |
2999 | |
3000 | /* |
3001 | * il_tx_queue_init - Allocate and initialize one tx/cmd queue |
3002 | */ |
3003 | int |
3004 | il_tx_queue_init(struct il_priv *il, u32 txq_id) |
3005 | { |
3006 | int i, len, ret; |
3007 | int slots, actual_slots; |
3008 | struct il_tx_queue *txq = &il->txq[txq_id]; |
3009 | |
3010 | /* |
3011 | * Alloc buffer array for commands (Tx or other types of commands). |
3012 | * For the command queue (#4/#9), allocate command space + one big |
3013 | * command for scan, since scan command is very huge; the system will |
3014 | * not have two scans at the same time, so only one is needed. |
3015 | * For normal Tx queues (all other queues), no super-size command |
3016 | * space is needed. |
3017 | */ |
3018 | if (txq_id == il->cmd_queue) { |
3019 | slots = TFD_CMD_SLOTS; |
3020 | actual_slots = slots + 1; |
3021 | } else { |
3022 | slots = TFD_TX_CMD_SLOTS; |
3023 | actual_slots = slots; |
3024 | } |
3025 | |
3026 | txq->meta = |
3027 | kcalloc(n: actual_slots, size: sizeof(struct il_cmd_meta), GFP_KERNEL); |
3028 | txq->cmd = |
3029 | kcalloc(n: actual_slots, size: sizeof(struct il_device_cmd *), GFP_KERNEL); |
3030 | |
3031 | if (!txq->meta || !txq->cmd) |
3032 | goto out_free_arrays; |
3033 | |
3034 | len = sizeof(struct il_device_cmd); |
3035 | for (i = 0; i < actual_slots; i++) { |
3036 | /* only happens for cmd queue */ |
3037 | if (i == slots) |
3038 | len = IL_MAX_CMD_SIZE; |
3039 | |
3040 | txq->cmd[i] = kmalloc(size: len, GFP_KERNEL); |
3041 | if (!txq->cmd[i]) |
3042 | goto err; |
3043 | } |
3044 | |
3045 | /* Alloc driver data array and TFD circular buffer */ |
3046 | ret = il_tx_queue_alloc(il, txq, id: txq_id); |
3047 | if (ret) |
3048 | goto err; |
3049 | |
3050 | txq->need_update = 0; |
3051 | |
3052 | /* |
3053 | * For the default queues 0-3, set up the swq_id |
3054 | * already -- all others need to get one later |
3055 | * (if they need one at all). |
3056 | */ |
3057 | if (txq_id < 4) |
3058 | il_set_swq_id(txq, ac: txq_id, hwq: txq_id); |
3059 | |
3060 | /* Initialize queue's high/low-water marks, and head/tail idxes */ |
3061 | il_queue_init(il, q: &txq->q, slots, id: txq_id); |
3062 | |
3063 | /* Tell device where to find queue */ |
3064 | il->ops->txq_init(il, txq); |
3065 | |
3066 | return 0; |
3067 | err: |
3068 | for (i = 0; i < actual_slots; i++) |
3069 | kfree(objp: txq->cmd[i]); |
3070 | out_free_arrays: |
3071 | kfree(objp: txq->meta); |
3072 | txq->meta = NULL; |
3073 | kfree(objp: txq->cmd); |
3074 | txq->cmd = NULL; |
3075 | |
3076 | return -ENOMEM; |
3077 | } |
3078 | EXPORT_SYMBOL(il_tx_queue_init); |
3079 | |
3080 | void |
3081 | il_tx_queue_reset(struct il_priv *il, u32 txq_id) |
3082 | { |
3083 | int slots, actual_slots; |
3084 | struct il_tx_queue *txq = &il->txq[txq_id]; |
3085 | |
3086 | if (txq_id == il->cmd_queue) { |
3087 | slots = TFD_CMD_SLOTS; |
3088 | actual_slots = TFD_CMD_SLOTS + 1; |
3089 | } else { |
3090 | slots = TFD_TX_CMD_SLOTS; |
3091 | actual_slots = TFD_TX_CMD_SLOTS; |
3092 | } |
3093 | |
3094 | memset(txq->meta, 0, sizeof(struct il_cmd_meta) * actual_slots); |
3095 | txq->need_update = 0; |
3096 | |
3097 | /* Initialize queue's high/low-water marks, and head/tail idxes */ |
3098 | il_queue_init(il, q: &txq->q, slots, id: txq_id); |
3099 | |
3100 | /* Tell device where to find queue */ |
3101 | il->ops->txq_init(il, txq); |
3102 | } |
3103 | EXPORT_SYMBOL(il_tx_queue_reset); |
3104 | |
3105 | /*************** HOST COMMAND QUEUE FUNCTIONS *****/ |
3106 | |
3107 | /* |
3108 | * il_enqueue_hcmd - enqueue a uCode command |
3109 | * @il: device ilate data point |
3110 | * @cmd: a point to the ucode command structure |
3111 | * |
3112 | * The function returns < 0 values to indicate the operation is |
3113 | * failed. On success, it turns the idx (> 0) of command in the |
3114 | * command queue. |
3115 | */ |
3116 | int |
3117 | il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd) |
3118 | { |
3119 | struct il_tx_queue *txq = &il->txq[il->cmd_queue]; |
3120 | struct il_queue *q = &txq->q; |
3121 | struct il_device_cmd *out_cmd; |
3122 | struct il_cmd_meta *out_meta; |
3123 | dma_addr_t phys_addr; |
3124 | unsigned long flags; |
3125 | u32 idx; |
3126 | u16 fix_size; |
3127 | |
3128 | cmd->len = il->ops->get_hcmd_size(cmd->id, cmd->len); |
3129 | fix_size = (u16) (cmd->len + sizeof(out_cmd->hdr)); |
3130 | |
3131 | /* If any of the command structures end up being larger than |
3132 | * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then |
3133 | * we will need to increase the size of the TFD entries |
3134 | * Also, check to see if command buffer should not exceed the size |
3135 | * of device_cmd and max_cmd_size. */ |
3136 | BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) && |
3137 | !(cmd->flags & CMD_SIZE_HUGE)); |
3138 | BUG_ON(fix_size > IL_MAX_CMD_SIZE); |
3139 | |
3140 | if (il_is_rfkill(il) || il_is_ctkill(il)) { |
3141 | IL_WARN("Not sending command - %s KILL\n" , |
3142 | il_is_rfkill(il) ? "RF" : "CT" ); |
3143 | return -EIO; |
3144 | } |
3145 | |
3146 | spin_lock_irqsave(&il->hcmd_lock, flags); |
3147 | |
3148 | if (il_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { |
3149 | spin_unlock_irqrestore(lock: &il->hcmd_lock, flags); |
3150 | |
3151 | IL_ERR("Restarting adapter due to command queue full\n" ); |
3152 | queue_work(wq: il->workqueue, work: &il->restart); |
3153 | return -ENOSPC; |
3154 | } |
3155 | |
3156 | idx = il_get_cmd_idx(q, idx: q->write_ptr, is_huge: cmd->flags & CMD_SIZE_HUGE); |
3157 | out_cmd = txq->cmd[idx]; |
3158 | out_meta = &txq->meta[idx]; |
3159 | |
3160 | if (WARN_ON(out_meta->flags & CMD_MAPPED)) { |
3161 | spin_unlock_irqrestore(lock: &il->hcmd_lock, flags); |
3162 | return -ENOSPC; |
3163 | } |
3164 | |
3165 | memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ |
3166 | out_meta->flags = cmd->flags | CMD_MAPPED; |
3167 | if (cmd->flags & CMD_WANT_SKB) |
3168 | out_meta->source = cmd; |
3169 | if (cmd->flags & CMD_ASYNC) |
3170 | out_meta->callback = cmd->callback; |
3171 | |
3172 | out_cmd->hdr.cmd = cmd->id; |
3173 | memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len); |
3174 | |
3175 | /* At this point, the out_cmd now has all of the incoming cmd |
3176 | * information */ |
3177 | |
3178 | out_cmd->hdr.flags = 0; |
3179 | out_cmd->hdr.sequence = |
3180 | cpu_to_le16(QUEUE_TO_SEQ(il->cmd_queue) | IDX_TO_SEQ(q->write_ptr)); |
3181 | if (cmd->flags & CMD_SIZE_HUGE) |
3182 | out_cmd->hdr.sequence |= SEQ_HUGE_FRAME; |
3183 | |
3184 | #ifdef CONFIG_IWLEGACY_DEBUG |
3185 | switch (out_cmd->hdr.cmd) { |
3186 | case C_TX_LINK_QUALITY_CMD: |
3187 | case C_SENSITIVITY: |
3188 | D_HC_DUMP("Sending command %s (#%x), seq: 0x%04X, " |
3189 | "%d bytes at %d[%d]:%d\n" , |
3190 | il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd, |
3191 | le16_to_cpu(out_cmd->hdr.sequence), fix_size, |
3192 | q->write_ptr, idx, il->cmd_queue); |
3193 | break; |
3194 | default: |
3195 | D_HC("Sending command %s (#%x), seq: 0x%04X, " |
3196 | "%d bytes at %d[%d]:%d\n" , |
3197 | il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd, |
3198 | le16_to_cpu(out_cmd->hdr.sequence), fix_size, q->write_ptr, |
3199 | idx, il->cmd_queue); |
3200 | } |
3201 | #endif |
3202 | |
3203 | phys_addr = dma_map_single(&il->pci_dev->dev, &out_cmd->hdr, fix_size, |
3204 | DMA_BIDIRECTIONAL); |
3205 | if (unlikely(dma_mapping_error(&il->pci_dev->dev, phys_addr))) { |
3206 | idx = -ENOMEM; |
3207 | goto out; |
3208 | } |
3209 | dma_unmap_addr_set(out_meta, mapping, phys_addr); |
3210 | dma_unmap_len_set(out_meta, len, fix_size); |
3211 | |
3212 | txq->need_update = 1; |
3213 | |
3214 | if (il->ops->txq_update_byte_cnt_tbl) |
3215 | /* Set up entry in queue's byte count circular buffer */ |
3216 | il->ops->txq_update_byte_cnt_tbl(il, txq, 0); |
3217 | |
3218 | il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, fix_size, 1, |
3219 | U32_PAD(cmd->len)); |
3220 | |
3221 | /* Increment and update queue's write idx */ |
3222 | q->write_ptr = il_queue_inc_wrap(idx: q->write_ptr, n_bd: q->n_bd); |
3223 | il_txq_update_write_ptr(il, txq); |
3224 | |
3225 | out: |
3226 | spin_unlock_irqrestore(lock: &il->hcmd_lock, flags); |
3227 | return idx; |
3228 | } |
3229 | |
3230 | /* |
3231 | * il_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd |
3232 | * |
3233 | * When FW advances 'R' idx, all entries between old and new 'R' idx |
3234 | * need to be reclaimed. As result, some free space forms. If there is |
3235 | * enough free space (> low mark), wake the stack that feeds us. |
3236 | */ |
3237 | static void |
3238 | il_hcmd_queue_reclaim(struct il_priv *il, int txq_id, int idx, int cmd_idx) |
3239 | { |
3240 | struct il_tx_queue *txq = &il->txq[txq_id]; |
3241 | struct il_queue *q = &txq->q; |
3242 | int nfreed = 0; |
3243 | |
3244 | if (idx >= q->n_bd || il_queue_used(q, i: idx) == 0) { |
3245 | IL_ERR("Read idx for DMA queue txq id (%d), idx %d, " |
3246 | "is out of range [0-%d] %d %d.\n" , txq_id, idx, q->n_bd, |
3247 | q->write_ptr, q->read_ptr); |
3248 | return; |
3249 | } |
3250 | |
3251 | for (idx = il_queue_inc_wrap(idx, n_bd: q->n_bd); q->read_ptr != idx; |
3252 | q->read_ptr = il_queue_inc_wrap(idx: q->read_ptr, n_bd: q->n_bd)) { |
3253 | |
3254 | if (nfreed++ > 0) { |
3255 | IL_ERR("HCMD skipped: idx (%d) %d %d\n" , idx, |
3256 | q->write_ptr, q->read_ptr); |
3257 | queue_work(wq: il->workqueue, work: &il->restart); |
3258 | } |
3259 | |
3260 | } |
3261 | } |
3262 | |
3263 | /* |
3264 | * il_tx_cmd_complete - Pull unused buffers off the queue and reclaim them |
3265 | * @rxb: Rx buffer to reclaim |
3266 | * |
3267 | * If an Rx buffer has an async callback associated with it the callback |
3268 | * will be executed. The attached skb (if present) will only be freed |
3269 | * if the callback returns 1 |
3270 | */ |
3271 | void |
3272 | il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb) |
3273 | { |
3274 | struct il_rx_pkt *pkt = rxb_addr(rxb); |
3275 | u16 sequence = le16_to_cpu(pkt->hdr.sequence); |
3276 | int txq_id = SEQ_TO_QUEUE(sequence); |
3277 | int idx = SEQ_TO_IDX(sequence); |
3278 | int cmd_idx; |
3279 | bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME); |
3280 | struct il_device_cmd *cmd; |
3281 | struct il_cmd_meta *meta; |
3282 | struct il_tx_queue *txq = &il->txq[il->cmd_queue]; |
3283 | unsigned long flags; |
3284 | |
3285 | /* If a Tx command is being handled and it isn't in the actual |
3286 | * command queue then there a command routing bug has been introduced |
3287 | * in the queue management code. */ |
3288 | if (WARN |
3289 | (txq_id != il->cmd_queue, |
3290 | "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n" , |
3291 | txq_id, il->cmd_queue, sequence, il->txq[il->cmd_queue].q.read_ptr, |
3292 | il->txq[il->cmd_queue].q.write_ptr)) { |
3293 | il_print_hex_error(il, pkt, 32); |
3294 | return; |
3295 | } |
3296 | |
3297 | cmd_idx = il_get_cmd_idx(q: &txq->q, idx, is_huge: huge); |
3298 | cmd = txq->cmd[cmd_idx]; |
3299 | meta = &txq->meta[cmd_idx]; |
3300 | |
3301 | txq->time_stamp = jiffies; |
3302 | |
3303 | dma_unmap_single(&il->pci_dev->dev, dma_unmap_addr(meta, mapping), |
3304 | dma_unmap_len(meta, len), DMA_BIDIRECTIONAL); |
3305 | |
3306 | /* Input error checking is done when commands are added to queue. */ |
3307 | if (meta->flags & CMD_WANT_SKB) { |
3308 | meta->source->reply_page = (unsigned long)rxb_addr(rxb); |
3309 | rxb->page = NULL; |
3310 | } else if (meta->callback) |
3311 | meta->callback(il, cmd, pkt); |
3312 | |
3313 | spin_lock_irqsave(&il->hcmd_lock, flags); |
3314 | |
3315 | il_hcmd_queue_reclaim(il, txq_id, idx, cmd_idx); |
3316 | |
3317 | if (!(meta->flags & CMD_ASYNC)) { |
3318 | clear_bit(S_HCMD_ACTIVE, addr: &il->status); |
3319 | D_INFO("Clearing HCMD_ACTIVE for command %s\n" , |
3320 | il_get_cmd_string(cmd->hdr.cmd)); |
3321 | wake_up(&il->wait_command_queue); |
3322 | } |
3323 | |
3324 | /* Mark as unmapped */ |
3325 | meta->flags = 0; |
3326 | |
3327 | spin_unlock_irqrestore(lock: &il->hcmd_lock, flags); |
3328 | } |
3329 | EXPORT_SYMBOL(il_tx_cmd_complete); |
3330 | |
3331 | MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965" ); |
3332 | MODULE_VERSION(IWLWIFI_VERSION); |
3333 | MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); |
3334 | MODULE_LICENSE("GPL" ); |
3335 | |
3336 | /* |
3337 | * set bt_coex_active to true, uCode will do kill/defer |
3338 | * every time the priority line is asserted (BT is sending signals on the |
3339 | * priority line in the PCIx). |
3340 | * set bt_coex_active to false, uCode will ignore the BT activity and |
3341 | * perform the normal operation |
3342 | * |
3343 | * User might experience transmit issue on some platform due to WiFi/BT |
3344 | * co-exist problem. The possible behaviors are: |
3345 | * Able to scan and finding all the available AP |
3346 | * Not able to associate with any AP |
3347 | * On those platforms, WiFi communication can be restored by set |
3348 | * "bt_coex_active" module parameter to "false" |
3349 | * |
3350 | * default: bt_coex_active = true (BT_COEX_ENABLE) |
3351 | */ |
3352 | static bool bt_coex_active = true; |
3353 | module_param(bt_coex_active, bool, 0444); |
3354 | MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist" ); |
3355 | |
3356 | u32 il_debug_level; |
3357 | EXPORT_SYMBOL(il_debug_level); |
3358 | |
3359 | const u8 il_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; |
3360 | EXPORT_SYMBOL(il_bcast_addr); |
3361 | |
3362 | #define MAX_BIT_RATE_40_MHZ 150 /* Mbps */ |
3363 | #define MAX_BIT_RATE_20_MHZ 72 /* Mbps */ |
3364 | static void |
3365 | il_init_ht_hw_capab(const struct il_priv *il, |
3366 | struct ieee80211_sta_ht_cap *ht_info, |
3367 | enum nl80211_band band) |
3368 | { |
3369 | u16 max_bit_rate = 0; |
3370 | u8 rx_chains_num = il->hw_params.rx_chains_num; |
3371 | u8 tx_chains_num = il->hw_params.tx_chains_num; |
3372 | |
3373 | ht_info->cap = 0; |
3374 | memset(&ht_info->mcs, 0, sizeof(ht_info->mcs)); |
3375 | |
3376 | ht_info->ht_supported = true; |
3377 | |
3378 | ht_info->cap |= IEEE80211_HT_CAP_SGI_20; |
3379 | max_bit_rate = MAX_BIT_RATE_20_MHZ; |
3380 | if (il->hw_params.ht40_channel & BIT(band)) { |
3381 | ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; |
3382 | ht_info->cap |= IEEE80211_HT_CAP_SGI_40; |
3383 | ht_info->mcs.rx_mask[4] = 0x01; |
3384 | max_bit_rate = MAX_BIT_RATE_40_MHZ; |
3385 | } |
3386 | |
3387 | if (il->cfg->mod_params->amsdu_size_8K) |
3388 | ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU; |
3389 | |
3390 | ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF; |
3391 | ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF; |
3392 | |
3393 | ht_info->mcs.rx_mask[0] = 0xFF; |
3394 | if (rx_chains_num >= 2) |
3395 | ht_info->mcs.rx_mask[1] = 0xFF; |
3396 | if (rx_chains_num >= 3) |
3397 | ht_info->mcs.rx_mask[2] = 0xFF; |
3398 | |
3399 | /* Highest supported Rx data rate */ |
3400 | max_bit_rate *= rx_chains_num; |
3401 | WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK); |
3402 | ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate); |
3403 | |
3404 | /* Tx MCS capabilities */ |
3405 | ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; |
3406 | if (tx_chains_num != rx_chains_num) { |
3407 | ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF; |
3408 | ht_info->mcs.tx_params |= |
3409 | ((tx_chains_num - |
3410 | 1) << IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT); |
3411 | } |
3412 | } |
3413 | |
3414 | /* |
3415 | * il_init_geos - Initialize mac80211's geo/channel info based from eeprom |
3416 | */ |
3417 | int |
3418 | il_init_geos(struct il_priv *il) |
3419 | { |
3420 | struct il_channel_info *ch; |
3421 | struct ieee80211_supported_band *sband; |
3422 | struct ieee80211_channel *channels; |
3423 | struct ieee80211_channel *geo_ch; |
3424 | struct ieee80211_rate *rates; |
3425 | int i = 0; |
3426 | s8 max_tx_power = 0; |
3427 | |
3428 | if (il->bands[NL80211_BAND_2GHZ].n_bitrates || |
3429 | il->bands[NL80211_BAND_5GHZ].n_bitrates) { |
3430 | D_INFO("Geography modes already initialized.\n" ); |
3431 | set_bit(S_GEO_CONFIGURED, addr: &il->status); |
3432 | return 0; |
3433 | } |
3434 | |
3435 | channels = |
3436 | kcalloc(n: il->channel_count, size: sizeof(struct ieee80211_channel), |
3437 | GFP_KERNEL); |
3438 | if (!channels) |
3439 | return -ENOMEM; |
3440 | |
3441 | rates = kcalloc(n: RATE_COUNT_LEGACY, size: sizeof(*rates), GFP_KERNEL); |
3442 | if (!rates) { |
3443 | kfree(objp: channels); |
3444 | return -ENOMEM; |
3445 | } |
3446 | |
3447 | /* 5.2GHz channels start after the 2.4GHz channels */ |
3448 | sband = &il->bands[NL80211_BAND_5GHZ]; |
3449 | sband->channels = &channels[ARRAY_SIZE(il_eeprom_band_1)]; |
3450 | /* just OFDM */ |
3451 | sband->bitrates = &rates[IL_FIRST_OFDM_RATE]; |
3452 | sband->n_bitrates = RATE_COUNT_LEGACY - IL_FIRST_OFDM_RATE; |
3453 | |
3454 | if (il->cfg->sku & IL_SKU_N) |
3455 | il_init_ht_hw_capab(il, ht_info: &sband->ht_cap, band: NL80211_BAND_5GHZ); |
3456 | |
3457 | sband = &il->bands[NL80211_BAND_2GHZ]; |
3458 | sband->channels = channels; |
3459 | /* OFDM & CCK */ |
3460 | sband->bitrates = rates; |
3461 | sband->n_bitrates = RATE_COUNT_LEGACY; |
3462 | |
3463 | if (il->cfg->sku & IL_SKU_N) |
3464 | il_init_ht_hw_capab(il, ht_info: &sband->ht_cap, band: NL80211_BAND_2GHZ); |
3465 | |
3466 | il->ieee_channels = channels; |
3467 | il->ieee_rates = rates; |
3468 | |
3469 | for (i = 0; i < il->channel_count; i++) { |
3470 | ch = &il->channel_info[i]; |
3471 | |
3472 | if (!il_is_channel_valid(ch_info: ch)) |
3473 | continue; |
3474 | |
3475 | sband = &il->bands[ch->band]; |
3476 | |
3477 | geo_ch = &sband->channels[sband->n_channels++]; |
3478 | |
3479 | geo_ch->center_freq = |
3480 | ieee80211_channel_to_frequency(chan: ch->channel, band: ch->band); |
3481 | geo_ch->max_power = ch->max_power_avg; |
3482 | geo_ch->max_antenna_gain = 0xff; |
3483 | geo_ch->hw_value = ch->channel; |
3484 | |
3485 | if (il_is_channel_valid(ch_info: ch)) { |
3486 | if (!(ch->flags & EEPROM_CHANNEL_IBSS)) |
3487 | geo_ch->flags |= IEEE80211_CHAN_NO_IR; |
3488 | |
3489 | if (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) |
3490 | geo_ch->flags |= IEEE80211_CHAN_NO_IR; |
3491 | |
3492 | if (ch->flags & EEPROM_CHANNEL_RADAR) |
3493 | geo_ch->flags |= IEEE80211_CHAN_RADAR; |
3494 | |
3495 | geo_ch->flags |= ch->ht40_extension_channel; |
3496 | |
3497 | if (ch->max_power_avg > max_tx_power) |
3498 | max_tx_power = ch->max_power_avg; |
3499 | } else { |
3500 | geo_ch->flags |= IEEE80211_CHAN_DISABLED; |
3501 | } |
3502 | |
3503 | D_INFO("Channel %d Freq=%d[%sGHz] %s flag=0x%X\n" , ch->channel, |
3504 | geo_ch->center_freq, |
3505 | il_is_channel_a_band(ch) ? "5.2" : "2.4" , |
3506 | geo_ch-> |
3507 | flags & IEEE80211_CHAN_DISABLED ? "restricted" : "valid" , |
3508 | geo_ch->flags); |
3509 | } |
3510 | |
3511 | il->tx_power_device_lmt = max_tx_power; |
3512 | il->tx_power_user_lmt = max_tx_power; |
3513 | il->tx_power_next = max_tx_power; |
3514 | |
3515 | if (il->bands[NL80211_BAND_5GHZ].n_channels == 0 && |
3516 | (il->cfg->sku & IL_SKU_A)) { |
3517 | IL_INFO("Incorrectly detected BG card as ABG. " |
3518 | "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n" , |
3519 | il->pci_dev->device, il->pci_dev->subsystem_device); |
3520 | il->cfg->sku &= ~IL_SKU_A; |
3521 | } |
3522 | |
3523 | IL_INFO("Tunable channels: %d 802.11bg, %d 802.11a channels\n" , |
3524 | il->bands[NL80211_BAND_2GHZ].n_channels, |
3525 | il->bands[NL80211_BAND_5GHZ].n_channels); |
3526 | |
3527 | set_bit(S_GEO_CONFIGURED, addr: &il->status); |
3528 | |
3529 | return 0; |
3530 | } |
3531 | EXPORT_SYMBOL(il_init_geos); |
3532 | |
3533 | /* |
3534 | * il_free_geos - undo allocations in il_init_geos |
3535 | */ |
3536 | void |
3537 | il_free_geos(struct il_priv *il) |
3538 | { |
3539 | kfree(objp: il->ieee_channels); |
3540 | kfree(objp: il->ieee_rates); |
3541 | clear_bit(S_GEO_CONFIGURED, addr: &il->status); |
3542 | } |
3543 | EXPORT_SYMBOL(il_free_geos); |
3544 | |
3545 | static bool |
3546 | il_is_channel_extension(struct il_priv *il, enum nl80211_band band, |
3547 | u16 channel, u8 extension_chan_offset) |
3548 | { |
3549 | const struct il_channel_info *ch_info; |
3550 | |
3551 | ch_info = il_get_channel_info(il, band, channel); |
3552 | if (!il_is_channel_valid(ch_info)) |
3553 | return false; |
3554 | |
3555 | if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE) |
3556 | return !(ch_info-> |
3557 | ht40_extension_channel & IEEE80211_CHAN_NO_HT40PLUS); |
3558 | else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW) |
3559 | return !(ch_info-> |
3560 | ht40_extension_channel & IEEE80211_CHAN_NO_HT40MINUS); |
3561 | |
3562 | return false; |
3563 | } |
3564 | |
3565 | bool |
3566 | il_is_ht40_tx_allowed(struct il_priv *il, struct ieee80211_sta_ht_cap *ht_cap) |
3567 | { |
3568 | if (!il->ht.enabled || !il->ht.is_40mhz) |
3569 | return false; |
3570 | |
3571 | /* |
3572 | * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
3573 | * the bit will not set if it is pure 40MHz case |
3574 | */ |
3575 | if (ht_cap && !ht_cap->ht_supported) |
3576 | return false; |
3577 | |
3578 | #ifdef CONFIG_IWLEGACY_DEBUGFS |
3579 | if (il->disable_ht40) |
3580 | return false; |
3581 | #endif |
3582 | |
3583 | return il_is_channel_extension(il, band: il->band, |
3584 | le16_to_cpu(il->staging.channel), |
3585 | extension_chan_offset: il->ht.extension_chan_offset); |
3586 | } |
3587 | EXPORT_SYMBOL(il_is_ht40_tx_allowed); |
3588 | |
3589 | static u16 noinline |
3590 | il_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val) |
3591 | { |
3592 | u16 new_val; |
3593 | u16 beacon_factor; |
3594 | |
3595 | /* |
3596 | * If mac80211 hasn't given us a beacon interval, program |
3597 | * the default into the device. |
3598 | */ |
3599 | if (!beacon_val) |
3600 | return DEFAULT_BEACON_INTERVAL; |
3601 | |
3602 | /* |
3603 | * If the beacon interval we obtained from the peer |
3604 | * is too large, we'll have to wake up more often |
3605 | * (and in IBSS case, we'll beacon too much) |
3606 | * |
3607 | * For example, if max_beacon_val is 4096, and the |
3608 | * requested beacon interval is 7000, we'll have to |
3609 | * use 3500 to be able to wake up on the beacons. |
3610 | * |
3611 | * This could badly influence beacon detection stats. |
3612 | */ |
3613 | |
3614 | beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val; |
3615 | new_val = beacon_val / beacon_factor; |
3616 | |
3617 | if (!new_val) |
3618 | new_val = max_beacon_val; |
3619 | |
3620 | return new_val; |
3621 | } |
3622 | |
3623 | int |
3624 | il_send_rxon_timing(struct il_priv *il) |
3625 | { |
3626 | u64 tsf; |
3627 | s32 interval_tm, rem; |
3628 | struct ieee80211_conf *conf = NULL; |
3629 | u16 beacon_int; |
3630 | struct ieee80211_vif *vif = il->vif; |
3631 | |
3632 | conf = &il->hw->conf; |
3633 | |
3634 | lockdep_assert_held(&il->mutex); |
3635 | |
3636 | memset(&il->timing, 0, sizeof(struct il_rxon_time_cmd)); |
3637 | |
3638 | il->timing.timestamp = cpu_to_le64(il->timestamp); |
3639 | il->timing.listen_interval = cpu_to_le16(conf->listen_interval); |
3640 | |
3641 | beacon_int = vif ? vif->bss_conf.beacon_int : 0; |
3642 | |
3643 | /* |
3644 | * TODO: For IBSS we need to get atim_win from mac80211, |
3645 | * for now just always use 0 |
3646 | */ |
3647 | il->timing.atim_win = 0; |
3648 | |
3649 | beacon_int = |
3650 | il_adjust_beacon_interval(beacon_val: beacon_int, |
3651 | max_beacon_val: il->hw_params.max_beacon_itrvl * |
3652 | TIME_UNIT); |
3653 | il->timing.beacon_interval = cpu_to_le16(beacon_int); |
3654 | |
3655 | tsf = il->timestamp; /* tsf is modifed by do_div: copy it */ |
3656 | interval_tm = beacon_int * TIME_UNIT; |
3657 | rem = do_div(tsf, interval_tm); |
3658 | il->timing.beacon_init_val = cpu_to_le32(interval_tm - rem); |
3659 | |
3660 | il->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ? : 1) : 1; |
3661 | |
3662 | D_ASSOC("beacon interval %d beacon timer %d beacon tim %d\n" , |
3663 | le16_to_cpu(il->timing.beacon_interval), |
3664 | le32_to_cpu(il->timing.beacon_init_val), |
3665 | le16_to_cpu(il->timing.atim_win)); |
3666 | |
3667 | return il_send_cmd_pdu(il, C_RXON_TIMING, sizeof(il->timing), |
3668 | &il->timing); |
3669 | } |
3670 | EXPORT_SYMBOL(il_send_rxon_timing); |
3671 | |
3672 | void |
3673 | il_set_rxon_hwcrypto(struct il_priv *il, int hw_decrypt) |
3674 | { |
3675 | struct il_rxon_cmd *rxon = &il->staging; |
3676 | |
3677 | if (hw_decrypt) |
3678 | rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK; |
3679 | else |
3680 | rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK; |
3681 | |
3682 | } |
3683 | EXPORT_SYMBOL(il_set_rxon_hwcrypto); |
3684 | |
3685 | /* validate RXON structure is valid */ |
3686 | int |
3687 | il_check_rxon_cmd(struct il_priv *il) |
3688 | { |
3689 | struct il_rxon_cmd *rxon = &il->staging; |
3690 | bool error = false; |
3691 | |
3692 | if (rxon->flags & RXON_FLG_BAND_24G_MSK) { |
3693 | if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) { |
3694 | IL_WARN("check 2.4G: wrong narrow\n" ); |
3695 | error = true; |
3696 | } |
3697 | if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) { |
3698 | IL_WARN("check 2.4G: wrong radar\n" ); |
3699 | error = true; |
3700 | } |
3701 | } else { |
3702 | if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) { |
3703 | IL_WARN("check 5.2G: not short slot!\n" ); |
3704 | error = true; |
3705 | } |
3706 | if (rxon->flags & RXON_FLG_CCK_MSK) { |
3707 | IL_WARN("check 5.2G: CCK!\n" ); |
3708 | error = true; |
3709 | } |
3710 | } |
3711 | if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) { |
3712 | IL_WARN("mac/bssid mcast!\n" ); |
3713 | error = true; |
3714 | } |
3715 | |
3716 | /* make sure basic rates 6Mbps and 1Mbps are supported */ |
3717 | if ((rxon->ofdm_basic_rates & RATE_6M_MASK) == 0 && |
3718 | (rxon->cck_basic_rates & RATE_1M_MASK) == 0) { |
3719 | IL_WARN("neither 1 nor 6 are basic\n" ); |
3720 | error = true; |
3721 | } |
3722 | |
3723 | if (le16_to_cpu(rxon->assoc_id) > 2007) { |
3724 | IL_WARN("aid > 2007\n" ); |
3725 | error = true; |
3726 | } |
3727 | |
3728 | if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) == |
3729 | (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) { |
3730 | IL_WARN("CCK and short slot\n" ); |
3731 | error = true; |
3732 | } |
3733 | |
3734 | if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) == |
3735 | (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) { |
3736 | IL_WARN("CCK and auto detect" ); |
3737 | error = true; |
3738 | } |
3739 | |
3740 | if ((rxon-> |
3741 | flags & (RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK)) == |
3742 | RXON_FLG_TGG_PROTECT_MSK) { |
3743 | IL_WARN("TGg but no auto-detect\n" ); |
3744 | error = true; |
3745 | } |
3746 | |
3747 | if (error) |
3748 | IL_WARN("Tuning to channel %d\n" , le16_to_cpu(rxon->channel)); |
3749 | |
3750 | if (error) { |
3751 | IL_ERR("Invalid RXON\n" ); |
3752 | return -EINVAL; |
3753 | } |
3754 | return 0; |
3755 | } |
3756 | EXPORT_SYMBOL(il_check_rxon_cmd); |
3757 | |
3758 | /* |
3759 | * il_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed |
3760 | * @il: staging_rxon is compared to active_rxon |
3761 | * |
3762 | * If the RXON structure is changing enough to require a new tune, |
3763 | * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that |
3764 | * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required. |
3765 | */ |
3766 | int |
3767 | il_full_rxon_required(struct il_priv *il) |
3768 | { |
3769 | const struct il_rxon_cmd *staging = &il->staging; |
3770 | const struct il_rxon_cmd *active = &il->active; |
3771 | |
3772 | #define CHK(cond) \ |
3773 | if ((cond)) { \ |
3774 | D_INFO("need full RXON - " #cond "\n"); \ |
3775 | return 1; \ |
3776 | } |
3777 | |
3778 | #define CHK_NEQ(c1, c2) \ |
3779 | if ((c1) != (c2)) { \ |
3780 | D_INFO("need full RXON - " \ |
3781 | #c1 " != " #c2 " - %d != %d\n", \ |
3782 | (c1), (c2)); \ |
3783 | return 1; \ |
3784 | } |
3785 | |
3786 | /* These items are only settable from the full RXON command */ |
3787 | CHK(!il_is_associated(il)); |
3788 | CHK(!ether_addr_equal_64bits(staging->bssid_addr, active->bssid_addr)); |
3789 | CHK(!ether_addr_equal_64bits(staging->node_addr, active->node_addr)); |
3790 | CHK(!ether_addr_equal_64bits(staging->wlap_bssid_addr, |
3791 | active->wlap_bssid_addr)); |
3792 | CHK_NEQ(staging->dev_type, active->dev_type); |
3793 | CHK_NEQ(staging->channel, active->channel); |
3794 | CHK_NEQ(staging->air_propagation, active->air_propagation); |
3795 | CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates, |
3796 | active->ofdm_ht_single_stream_basic_rates); |
3797 | CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates, |
3798 | active->ofdm_ht_dual_stream_basic_rates); |
3799 | CHK_NEQ(staging->assoc_id, active->assoc_id); |
3800 | |
3801 | /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can |
3802 | * be updated with the RXON_ASSOC command -- however only some |
3803 | * flag transitions are allowed using RXON_ASSOC */ |
3804 | |
3805 | /* Check if we are not switching bands */ |
3806 | CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK, |
3807 | active->flags & RXON_FLG_BAND_24G_MSK); |
3808 | |
3809 | /* Check if we are switching association toggle */ |
3810 | CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK, |
3811 | active->filter_flags & RXON_FILTER_ASSOC_MSK); |
3812 | |
3813 | #undef CHK |
3814 | #undef CHK_NEQ |
3815 | |
3816 | return 0; |
3817 | } |
3818 | EXPORT_SYMBOL(il_full_rxon_required); |
3819 | |
3820 | u8 |
3821 | il_get_lowest_plcp(struct il_priv *il) |
3822 | { |
3823 | /* |
3824 | * Assign the lowest rate -- should really get this from |
3825 | * the beacon skb from mac80211. |
3826 | */ |
3827 | if (il->staging.flags & RXON_FLG_BAND_24G_MSK) |
3828 | return RATE_1M_PLCP; |
3829 | else |
3830 | return RATE_6M_PLCP; |
3831 | } |
3832 | EXPORT_SYMBOL(il_get_lowest_plcp); |
3833 | |
3834 | static void |
3835 | _il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf) |
3836 | { |
3837 | struct il_rxon_cmd *rxon = &il->staging; |
3838 | |
3839 | if (!il->ht.enabled) { |
3840 | rxon->flags &= |
3841 | ~(RXON_FLG_CHANNEL_MODE_MSK | |
3842 | RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK | RXON_FLG_HT40_PROT_MSK |
3843 | | RXON_FLG_HT_PROT_MSK); |
3844 | return; |
3845 | } |
3846 | |
3847 | rxon->flags |= |
3848 | cpu_to_le32(il->ht.protection << RXON_FLG_HT_OPERATING_MODE_POS); |
3849 | |
3850 | /* Set up channel bandwidth: |
3851 | * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */ |
3852 | /* clear the HT channel mode before set the mode */ |
3853 | rxon->flags &= |
3854 | ~(RXON_FLG_CHANNEL_MODE_MSK | RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); |
3855 | if (il_is_ht40_tx_allowed(il, NULL)) { |
3856 | /* pure ht40 */ |
3857 | if (il->ht.protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) { |
3858 | rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40; |
3859 | /* Note: control channel is opposite of extension channel */ |
3860 | switch (il->ht.extension_chan_offset) { |
3861 | case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: |
3862 | rxon->flags &= |
3863 | ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; |
3864 | break; |
3865 | case IEEE80211_HT_PARAM_CHA_SEC_BELOW: |
3866 | rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; |
3867 | break; |
3868 | } |
3869 | } else { |
3870 | /* Note: control channel is opposite of extension channel */ |
3871 | switch (il->ht.extension_chan_offset) { |
3872 | case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: |
3873 | rxon->flags &= |
3874 | ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); |
3875 | rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED; |
3876 | break; |
3877 | case IEEE80211_HT_PARAM_CHA_SEC_BELOW: |
3878 | rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; |
3879 | rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED; |
3880 | break; |
3881 | case IEEE80211_HT_PARAM_CHA_SEC_NONE: |
3882 | default: |
3883 | /* channel location only valid if in Mixed mode */ |
3884 | IL_ERR("invalid extension channel offset\n" ); |
3885 | break; |
3886 | } |
3887 | } |
3888 | } else { |
3889 | rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY; |
3890 | } |
3891 | |
3892 | if (il->ops->set_rxon_chain) |
3893 | il->ops->set_rxon_chain(il); |
3894 | |
3895 | D_ASSOC("rxon flags 0x%X operation mode :0x%X " |
3896 | "extension channel offset 0x%x\n" , le32_to_cpu(rxon->flags), |
3897 | il->ht.protection, il->ht.extension_chan_offset); |
3898 | } |
3899 | |
3900 | void |
3901 | il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf) |
3902 | { |
3903 | _il_set_rxon_ht(il, ht_conf); |
3904 | } |
3905 | EXPORT_SYMBOL(il_set_rxon_ht); |
3906 | |
3907 | /* Return valid, unused, channel for a passive scan to reset the RF */ |
3908 | u8 |
3909 | il_get_single_channel_number(struct il_priv *il, enum nl80211_band band) |
3910 | { |
3911 | const struct il_channel_info *ch_info; |
3912 | int i; |
3913 | u8 channel = 0; |
3914 | u8 min, max; |
3915 | |
3916 | if (band == NL80211_BAND_5GHZ) { |
3917 | min = 14; |
3918 | max = il->channel_count; |
3919 | } else { |
3920 | min = 0; |
3921 | max = 14; |
3922 | } |
3923 | |
3924 | for (i = min; i < max; i++) { |
3925 | channel = il->channel_info[i].channel; |
3926 | if (channel == le16_to_cpu(il->staging.channel)) |
3927 | continue; |
3928 | |
3929 | ch_info = il_get_channel_info(il, band, channel); |
3930 | if (il_is_channel_valid(ch_info)) |
3931 | break; |
3932 | } |
3933 | |
3934 | return channel; |
3935 | } |
3936 | EXPORT_SYMBOL(il_get_single_channel_number); |
3937 | |
3938 | /* |
3939 | * il_set_rxon_channel - Set the band and channel values in staging RXON |
3940 | * @ch: requested channel as a pointer to struct ieee80211_channel |
3941 | |
3942 | * NOTE: Does not commit to the hardware; it sets appropriate bit fields |
3943 | * in the staging RXON flag structure based on the ch->band |
3944 | */ |
3945 | int |
3946 | il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch) |
3947 | { |
3948 | enum nl80211_band band = ch->band; |
3949 | u16 channel = ch->hw_value; |
3950 | |
3951 | if (le16_to_cpu(il->staging.channel) == channel && il->band == band) |
3952 | return 0; |
3953 | |
3954 | il->staging.channel = cpu_to_le16(channel); |
3955 | if (band == NL80211_BAND_5GHZ) |
3956 | il->staging.flags &= ~RXON_FLG_BAND_24G_MSK; |
3957 | else |
3958 | il->staging.flags |= RXON_FLG_BAND_24G_MSK; |
3959 | |
3960 | il->band = band; |
3961 | |
3962 | D_INFO("Staging channel set to %d [%d]\n" , channel, band); |
3963 | |
3964 | return 0; |
3965 | } |
3966 | EXPORT_SYMBOL(il_set_rxon_channel); |
3967 | |
3968 | void |
3969 | il_set_flags_for_band(struct il_priv *il, enum nl80211_band band, |
3970 | struct ieee80211_vif *vif) |
3971 | { |
3972 | if (band == NL80211_BAND_5GHZ) { |
3973 | il->staging.flags &= |
3974 | ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK | |
3975 | RXON_FLG_CCK_MSK); |
3976 | il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; |
3977 | } else { |
3978 | /* Copied from il_post_associate() */ |
3979 | if (vif && vif->bss_conf.use_short_slot) |
3980 | il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; |
3981 | else |
3982 | il->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; |
3983 | |
3984 | il->staging.flags |= RXON_FLG_BAND_24G_MSK; |
3985 | il->staging.flags |= RXON_FLG_AUTO_DETECT_MSK; |
3986 | il->staging.flags &= ~RXON_FLG_CCK_MSK; |
3987 | } |
3988 | } |
3989 | EXPORT_SYMBOL(il_set_flags_for_band); |
3990 | |
3991 | /* |
3992 | * initialize rxon structure with default values from eeprom |
3993 | */ |
3994 | void |
3995 | il_connection_init_rx_config(struct il_priv *il) |
3996 | { |
3997 | const struct il_channel_info *ch_info; |
3998 | |
3999 | memset(&il->staging, 0, sizeof(il->staging)); |
4000 | |
4001 | switch (il->iw_mode) { |
4002 | case NL80211_IFTYPE_UNSPECIFIED: |
4003 | il->staging.dev_type = RXON_DEV_TYPE_ESS; |
4004 | break; |
4005 | case NL80211_IFTYPE_STATION: |
4006 | il->staging.dev_type = RXON_DEV_TYPE_ESS; |
4007 | il->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK; |
4008 | break; |
4009 | case NL80211_IFTYPE_ADHOC: |
4010 | il->staging.dev_type = RXON_DEV_TYPE_IBSS; |
4011 | il->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK; |
4012 | il->staging.filter_flags = |
4013 | RXON_FILTER_BCON_AWARE_MSK | RXON_FILTER_ACCEPT_GRP_MSK; |
4014 | break; |
4015 | default: |
4016 | IL_ERR("Unsupported interface type %d\n" , il->vif->type); |
4017 | return; |
4018 | } |
4019 | |
4020 | #if 0 |
4021 | /* TODO: Figure out when short_preamble would be set and cache from |
4022 | * that */ |
4023 | if (!hw_to_local(il->hw)->short_preamble) |
4024 | il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; |
4025 | else |
4026 | il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; |
4027 | #endif |
4028 | |
4029 | ch_info = |
4030 | il_get_channel_info(il, il->band, le16_to_cpu(il->active.channel)); |
4031 | |
4032 | if (!ch_info) |
4033 | ch_info = &il->channel_info[0]; |
4034 | |
4035 | il->staging.channel = cpu_to_le16(ch_info->channel); |
4036 | il->band = ch_info->band; |
4037 | |
4038 | il_set_flags_for_band(il, il->band, il->vif); |
4039 | |
4040 | il->staging.ofdm_basic_rates = |
4041 | (IL_OFDM_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF; |
4042 | il->staging.cck_basic_rates = |
4043 | (IL_CCK_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF; |
4044 | |
4045 | /* clear both MIX and PURE40 mode flag */ |
4046 | il->staging.flags &= |
4047 | ~(RXON_FLG_CHANNEL_MODE_MIXED | RXON_FLG_CHANNEL_MODE_PURE_40); |
4048 | if (il->vif) |
4049 | memcpy(il->staging.node_addr, il->vif->addr, ETH_ALEN); |
4050 | |
4051 | il->staging.ofdm_ht_single_stream_basic_rates = 0xff; |
4052 | il->staging.ofdm_ht_dual_stream_basic_rates = 0xff; |
4053 | } |
4054 | EXPORT_SYMBOL(il_connection_init_rx_config); |
4055 | |
4056 | void |
4057 | il_set_rate(struct il_priv *il) |
4058 | { |
4059 | const struct ieee80211_supported_band *hw = NULL; |
4060 | struct ieee80211_rate *rate; |
4061 | int i; |
4062 | |
4063 | hw = il_get_hw_mode(il, band: il->band); |
4064 | if (!hw) { |
4065 | IL_ERR("Failed to set rate: unable to get hw mode\n" ); |
4066 | return; |
4067 | } |
4068 | |
4069 | il->active_rate = 0; |
4070 | |
4071 | for (i = 0; i < hw->n_bitrates; i++) { |
4072 | rate = &(hw->bitrates[i]); |
4073 | if (rate->hw_value < RATE_COUNT_LEGACY) |
4074 | il->active_rate |= (1 << rate->hw_value); |
4075 | } |
4076 | |
4077 | D_RATE("Set active_rate = %0x\n" , il->active_rate); |
4078 | |
4079 | il->staging.cck_basic_rates = |
4080 | (IL_CCK_BASIC_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF; |
4081 | |
4082 | il->staging.ofdm_basic_rates = |
4083 | (IL_OFDM_BASIC_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF; |
4084 | } |
4085 | EXPORT_SYMBOL(il_set_rate); |
4086 | |
4087 | void |
4088 | il_chswitch_done(struct il_priv *il, bool is_success) |
4089 | { |
4090 | if (test_bit(S_EXIT_PENDING, &il->status)) |
4091 | return; |
4092 | |
4093 | if (test_and_clear_bit(S_CHANNEL_SWITCH_PENDING, addr: &il->status)) |
4094 | ieee80211_chswitch_done(vif: il->vif, success: is_success, link_id: 0); |
4095 | } |
4096 | EXPORT_SYMBOL(il_chswitch_done); |
4097 | |
4098 | void |
4099 | il_hdl_csa(struct il_priv *il, struct il_rx_buf *rxb) |
4100 | { |
4101 | struct il_rx_pkt *pkt = rxb_addr(rxb); |
4102 | struct il_csa_notification *csa = &(pkt->u.csa_notif); |
4103 | struct il_rxon_cmd *rxon = (void *)&il->active; |
4104 | |
4105 | if (!test_bit(S_CHANNEL_SWITCH_PENDING, &il->status)) |
4106 | return; |
4107 | |
4108 | if (!le32_to_cpu(csa->status) && csa->channel == il->switch_channel) { |
4109 | rxon->channel = csa->channel; |
4110 | il->staging.channel = csa->channel; |
4111 | D_11H("CSA notif: channel %d\n" , le16_to_cpu(csa->channel)); |
4112 | il_chswitch_done(il, true); |
4113 | } else { |
4114 | IL_ERR("CSA notif (fail) : channel %d\n" , |
4115 | le16_to_cpu(csa->channel)); |
4116 | il_chswitch_done(il, false); |
4117 | } |
4118 | } |
4119 | EXPORT_SYMBOL(il_hdl_csa); |
4120 | |
4121 | #ifdef CONFIG_IWLEGACY_DEBUG |
4122 | void |
4123 | il_print_rx_config_cmd(struct il_priv *il) |
4124 | { |
4125 | struct il_rxon_cmd *rxon = &il->staging; |
4126 | |
4127 | D_RADIO("RX CONFIG:\n" ); |
4128 | il_print_hex_dump(il, IL_DL_RADIO, (u8 *) rxon, sizeof(*rxon)); |
4129 | D_RADIO("u16 channel: 0x%x\n" , le16_to_cpu(rxon->channel)); |
4130 | D_RADIO("u32 flags: 0x%08X\n" , le32_to_cpu(rxon->flags)); |
4131 | D_RADIO("u32 filter_flags: 0x%08x\n" , le32_to_cpu(rxon->filter_flags)); |
4132 | D_RADIO("u8 dev_type: 0x%x\n" , rxon->dev_type); |
4133 | D_RADIO("u8 ofdm_basic_rates: 0x%02x\n" , rxon->ofdm_basic_rates); |
4134 | D_RADIO("u8 cck_basic_rates: 0x%02x\n" , rxon->cck_basic_rates); |
4135 | D_RADIO("u8[6] node_addr: %pM\n" , rxon->node_addr); |
4136 | D_RADIO("u8[6] bssid_addr: %pM\n" , rxon->bssid_addr); |
4137 | D_RADIO("u16 assoc_id: 0x%x\n" , le16_to_cpu(rxon->assoc_id)); |
4138 | } |
4139 | EXPORT_SYMBOL(il_print_rx_config_cmd); |
4140 | #endif |
4141 | /* |
4142 | * il_irq_handle_error - called for HW or SW error interrupt from card |
4143 | */ |
4144 | void |
4145 | il_irq_handle_error(struct il_priv *il) |
4146 | { |
4147 | /* Set the FW error flag -- cleared on il_down */ |
4148 | set_bit(S_FW_ERROR, addr: &il->status); |
4149 | |
4150 | /* Cancel currently queued command. */ |
4151 | clear_bit(S_HCMD_ACTIVE, addr: &il->status); |
4152 | |
4153 | IL_ERR("Loaded firmware version: %s\n" , il->hw->wiphy->fw_version); |
4154 | |
4155 | il->ops->dump_nic_error_log(il); |
4156 | if (il->ops->dump_fh) |
4157 | il->ops->dump_fh(il, NULL, false); |
4158 | #ifdef CONFIG_IWLEGACY_DEBUG |
4159 | if (il_get_debug_level(il) & IL_DL_FW_ERRORS) |
4160 | il_print_rx_config_cmd(il); |
4161 | #endif |
4162 | |
4163 | wake_up(&il->wait_command_queue); |
4164 | |
4165 | /* Keep the restart process from trying to send host |
4166 | * commands by clearing the INIT status bit */ |
4167 | clear_bit(S_READY, addr: &il->status); |
4168 | |
4169 | if (!test_bit(S_EXIT_PENDING, &il->status)) { |
4170 | IL_DBG(IL_DL_FW_ERRORS, |
4171 | "Restarting adapter due to uCode error.\n" ); |
4172 | |
4173 | if (il->cfg->mod_params->restart_fw) |
4174 | queue_work(wq: il->workqueue, work: &il->restart); |
4175 | } |
4176 | } |
4177 | EXPORT_SYMBOL(il_irq_handle_error); |
4178 | |
4179 | static int |
4180 | _il_apm_stop_master(struct il_priv *il) |
4181 | { |
4182 | int ret = 0; |
4183 | |
4184 | /* stop device's busmaster DMA activity */ |
4185 | _il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); |
4186 | |
4187 | ret = |
4188 | _il_poll_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED, |
4189 | CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); |
4190 | if (ret < 0) |
4191 | IL_WARN("Master Disable Timed Out, 100 usec\n" ); |
4192 | |
4193 | D_INFO("stop master\n" ); |
4194 | |
4195 | return ret; |
4196 | } |
4197 | |
4198 | void |
4199 | _il_apm_stop(struct il_priv *il) |
4200 | { |
4201 | lockdep_assert_held(&il->reg_lock); |
4202 | |
4203 | D_INFO("Stop card, put in low power state\n" ); |
4204 | |
4205 | /* Stop device's DMA activity */ |
4206 | _il_apm_stop_master(il); |
4207 | |
4208 | /* Reset the entire device */ |
4209 | _il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); |
4210 | |
4211 | udelay(10); |
4212 | |
4213 | /* |
4214 | * Clear "initialization complete" bit to move adapter from |
4215 | * D0A* (powered-up Active) --> D0U* (Uninitialized) state. |
4216 | */ |
4217 | _il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); |
4218 | } |
4219 | EXPORT_SYMBOL(_il_apm_stop); |
4220 | |
4221 | void |
4222 | il_apm_stop(struct il_priv *il) |
4223 | { |
4224 | unsigned long flags; |
4225 | |
4226 | spin_lock_irqsave(&il->reg_lock, flags); |
4227 | _il_apm_stop(il); |
4228 | spin_unlock_irqrestore(lock: &il->reg_lock, flags); |
4229 | } |
4230 | EXPORT_SYMBOL(il_apm_stop); |
4231 | |
4232 | /* |
4233 | * Start up NIC's basic functionality after it has been reset |
4234 | * (e.g. after platform boot, or shutdown via il_apm_stop()) |
4235 | * NOTE: This does not load uCode nor start the embedded processor |
4236 | */ |
4237 | int |
4238 | il_apm_init(struct il_priv *il) |
4239 | { |
4240 | int ret = 0; |
4241 | u16 lctl; |
4242 | |
4243 | D_INFO("Init card's basic functions\n" ); |
4244 | |
4245 | /* |
4246 | * Use "set_bit" below rather than "write", to preserve any hardware |
4247 | * bits already set by default after reset. |
4248 | */ |
4249 | |
4250 | /* Disable L0S exit timer (platform NMI Work/Around) */ |
4251 | il_set_bit(il, CSR_GIO_CHICKEN_BITS, |
4252 | CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); |
4253 | |
4254 | /* |
4255 | * Disable L0s without affecting L1; |
4256 | * don't wait for ICH L0s (ICH bug W/A) |
4257 | */ |
4258 | il_set_bit(il, CSR_GIO_CHICKEN_BITS, |
4259 | CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); |
4260 | |
4261 | /* Set FH wait threshold to maximum (HW error during stress W/A) */ |
4262 | il_set_bit(il, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL); |
4263 | |
4264 | /* |
4265 | * Enable HAP INTA (interrupt from management bus) to |
4266 | * wake device's PCI Express link L1a -> L0s |
4267 | * NOTE: This is no-op for 3945 (non-existent bit) |
4268 | */ |
4269 | il_set_bit(il, CSR_HW_IF_CONFIG_REG, |
4270 | CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); |
4271 | |
4272 | /* |
4273 | * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition. |
4274 | * Check if BIOS (or OS) enabled L1-ASPM on this device. |
4275 | * If so (likely), disable L0S, so device moves directly L0->L1; |
4276 | * costs negligible amount of power savings. |
4277 | * If not (unlikely), enable L0S, so there is at least some |
4278 | * power savings, even without L1. |
4279 | */ |
4280 | if (il->cfg->set_l0s) { |
4281 | ret = pcie_capability_read_word(dev: il->pci_dev, PCI_EXP_LNKCTL, val: &lctl); |
4282 | if (!ret && (lctl & PCI_EXP_LNKCTL_ASPM_L1)) { |
4283 | /* L1-ASPM enabled; disable(!) L0S */ |
4284 | il_set_bit(il, CSR_GIO_REG, |
4285 | CSR_GIO_REG_VAL_L0S_ENABLED); |
4286 | D_POWER("L1 Enabled; Disabling L0S\n" ); |
4287 | } else { |
4288 | /* L1-ASPM disabled; enable(!) L0S */ |
4289 | il_clear_bit(il, CSR_GIO_REG, |
4290 | CSR_GIO_REG_VAL_L0S_ENABLED); |
4291 | D_POWER("L1 Disabled; Enabling L0S\n" ); |
4292 | } |
4293 | } |
4294 | |
4295 | /* Configure analog phase-lock-loop before activating to D0A */ |
4296 | if (il->cfg->pll_cfg_val) |
4297 | il_set_bit(il, CSR_ANA_PLL_CFG, |
4298 | il->cfg->pll_cfg_val); |
4299 | |
4300 | /* |
4301 | * Set "initialization complete" bit to move adapter from |
4302 | * D0U* --> D0A* (powered-up active) state. |
4303 | */ |
4304 | il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); |
4305 | |
4306 | /* |
4307 | * Wait for clock stabilization; once stabilized, access to |
4308 | * device-internal resources is supported, e.g. il_wr_prph() |
4309 | * and accesses to uCode SRAM. |
4310 | */ |
4311 | ret = |
4312 | _il_poll_bit(il, CSR_GP_CNTRL, |
4313 | CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, |
4314 | CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); |
4315 | if (ret < 0) { |
4316 | D_INFO("Failed to init the card\n" ); |
4317 | goto out; |
4318 | } |
4319 | |
4320 | /* |
4321 | * Enable DMA and BSM (if used) clocks, wait for them to stabilize. |
4322 | * BSM (Boostrap State Machine) is only in 3945 and 4965. |
4323 | * |
4324 | * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits |
4325 | * do not disable clocks. This preserves any hardware bits already |
4326 | * set by default in "CLK_CTRL_REG" after reset. |
4327 | */ |
4328 | if (il->cfg->use_bsm) |
4329 | il_wr_prph(il, APMG_CLK_EN_REG, |
4330 | APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT); |
4331 | else |
4332 | il_wr_prph(il, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT); |
4333 | udelay(20); |
4334 | |
4335 | /* Disable L1-Active */ |
4336 | il_set_bits_prph(il, APMG_PCIDEV_STT_REG, |
4337 | APMG_PCIDEV_STT_VAL_L1_ACT_DIS); |
4338 | |
4339 | out: |
4340 | return ret; |
4341 | } |
4342 | EXPORT_SYMBOL(il_apm_init); |
4343 | |
4344 | int |
4345 | il_set_tx_power(struct il_priv *il, s8 tx_power, bool force) |
4346 | { |
4347 | int ret; |
4348 | s8 prev_tx_power; |
4349 | bool defer; |
4350 | |
4351 | lockdep_assert_held(&il->mutex); |
4352 | |
4353 | if (il->tx_power_user_lmt == tx_power && !force) |
4354 | return 0; |
4355 | |
4356 | if (!il->ops->send_tx_power) |
4357 | return -EOPNOTSUPP; |
4358 | |
4359 | /* 0 dBm mean 1 milliwatt */ |
4360 | if (tx_power < 0) { |
4361 | IL_WARN("Requested user TXPOWER %d below 1 mW.\n" , tx_power); |
4362 | return -EINVAL; |
4363 | } |
4364 | |
4365 | if (tx_power > il->tx_power_device_lmt) { |
4366 | IL_WARN("Requested user TXPOWER %d above upper limit %d.\n" , |
4367 | tx_power, il->tx_power_device_lmt); |
4368 | return -EINVAL; |
4369 | } |
4370 | |
4371 | if (!il_is_ready_rf(il)) |
4372 | return -EIO; |
4373 | |
4374 | /* scan complete and commit_rxon use tx_power_next value, |
4375 | * it always need to be updated for newest request */ |
4376 | il->tx_power_next = tx_power; |
4377 | |
4378 | /* do not set tx power when scanning or channel changing */ |
4379 | defer = test_bit(S_SCANNING, &il->status) || |
4380 | memcmp(p: &il->active, q: &il->staging, size: sizeof(il->staging)); |
4381 | if (defer && !force) { |
4382 | D_INFO("Deferring tx power set\n" ); |
4383 | return 0; |
4384 | } |
4385 | |
4386 | prev_tx_power = il->tx_power_user_lmt; |
4387 | il->tx_power_user_lmt = tx_power; |
4388 | |
4389 | ret = il->ops->send_tx_power(il); |
4390 | |
4391 | /* if fail to set tx_power, restore the orig. tx power */ |
4392 | if (ret) { |
4393 | il->tx_power_user_lmt = prev_tx_power; |
4394 | il->tx_power_next = prev_tx_power; |
4395 | } |
4396 | return ret; |
4397 | } |
4398 | EXPORT_SYMBOL(il_set_tx_power); |
4399 | |
4400 | void |
4401 | il_send_bt_config(struct il_priv *il) |
4402 | { |
4403 | struct il_bt_cmd bt_cmd = { |
4404 | .lead_time = BT_LEAD_TIME_DEF, |
4405 | .max_kill = BT_MAX_KILL_DEF, |
4406 | .kill_ack_mask = 0, |
4407 | .kill_cts_mask = 0, |
4408 | }; |
4409 | |
4410 | if (!bt_coex_active) |
4411 | bt_cmd.flags = BT_COEX_DISABLE; |
4412 | else |
4413 | bt_cmd.flags = BT_COEX_ENABLE; |
4414 | |
4415 | D_INFO("BT coex %s\n" , |
4416 | (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active" ); |
4417 | |
4418 | if (il_send_cmd_pdu(il, C_BT_CONFIG, sizeof(struct il_bt_cmd), &bt_cmd)) |
4419 | IL_ERR("failed to send BT Coex Config\n" ); |
4420 | } |
4421 | EXPORT_SYMBOL(il_send_bt_config); |
4422 | |
4423 | int |
4424 | il_send_stats_request(struct il_priv *il, u8 flags, bool clear) |
4425 | { |
4426 | struct il_stats_cmd stats_cmd = { |
4427 | .configuration_flags = clear ? IL_STATS_CONF_CLEAR_STATS : 0, |
4428 | }; |
4429 | |
4430 | if (flags & CMD_ASYNC) |
4431 | return il_send_cmd_pdu_async(il, C_STATS, sizeof(struct il_stats_cmd), |
4432 | &stats_cmd, NULL); |
4433 | else |
4434 | return il_send_cmd_pdu(il, C_STATS, sizeof(struct il_stats_cmd), |
4435 | &stats_cmd); |
4436 | } |
4437 | EXPORT_SYMBOL(il_send_stats_request); |
4438 | |
4439 | void |
4440 | il_hdl_pm_sleep(struct il_priv *il, struct il_rx_buf *rxb) |
4441 | { |
4442 | #ifdef CONFIG_IWLEGACY_DEBUG |
4443 | struct il_rx_pkt *pkt = rxb_addr(rxb); |
4444 | struct il_sleep_notification *sleep = &(pkt->u.sleep_notif); |
4445 | D_RX("sleep mode: %d, src: %d\n" , |
4446 | sleep->pm_sleep_mode, sleep->pm_wakeup_src); |
4447 | #endif |
4448 | } |
4449 | EXPORT_SYMBOL(il_hdl_pm_sleep); |
4450 | |
4451 | void |
4452 | il_hdl_pm_debug_stats(struct il_priv *il, struct il_rx_buf *rxb) |
4453 | { |
4454 | struct il_rx_pkt *pkt = rxb_addr(rxb); |
4455 | u32 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK; |
4456 | D_RADIO("Dumping %d bytes of unhandled notification for %s:\n" , len, |
4457 | il_get_cmd_string(pkt->hdr.cmd)); |
4458 | il_print_hex_dump(il, IL_DL_RADIO, pkt->u.raw, len); |
4459 | } |
4460 | EXPORT_SYMBOL(il_hdl_pm_debug_stats); |
4461 | |
4462 | void |
4463 | il_hdl_error(struct il_priv *il, struct il_rx_buf *rxb) |
4464 | { |
4465 | struct il_rx_pkt *pkt = rxb_addr(rxb); |
4466 | |
4467 | IL_ERR("Error Reply type 0x%08X cmd %s (0x%02X) " |
4468 | "seq 0x%04X ser 0x%08X\n" , |
4469 | le32_to_cpu(pkt->u.err_resp.error_type), |
4470 | il_get_cmd_string(pkt->u.err_resp.cmd_id), |
4471 | pkt->u.err_resp.cmd_id, |
4472 | le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num), |
4473 | le32_to_cpu(pkt->u.err_resp.error_info)); |
4474 | } |
4475 | EXPORT_SYMBOL(il_hdl_error); |
4476 | |
4477 | void |
4478 | il_clear_isr_stats(struct il_priv *il) |
4479 | { |
4480 | memset(&il->isr_stats, 0, sizeof(il->isr_stats)); |
4481 | } |
4482 | |
4483 | int |
4484 | il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, |
4485 | unsigned int link_id, u16 queue, |
4486 | const struct ieee80211_tx_queue_params *params) |
4487 | { |
4488 | struct il_priv *il = hw->priv; |
4489 | unsigned long flags; |
4490 | int q; |
4491 | |
4492 | D_MAC80211("enter\n" ); |
4493 | |
4494 | if (!il_is_ready_rf(il)) { |
4495 | D_MAC80211("leave - RF not ready\n" ); |
4496 | return -EIO; |
4497 | } |
4498 | |
4499 | if (queue >= AC_NUM) { |
4500 | D_MAC80211("leave - queue >= AC_NUM %d\n" , queue); |
4501 | return 0; |
4502 | } |
4503 | |
4504 | q = AC_NUM - 1 - queue; |
4505 | |
4506 | spin_lock_irqsave(&il->lock, flags); |
4507 | |
4508 | il->qos_data.def_qos_parm.ac[q].cw_min = |
4509 | cpu_to_le16(params->cw_min); |
4510 | il->qos_data.def_qos_parm.ac[q].cw_max = |
4511 | cpu_to_le16(params->cw_max); |
4512 | il->qos_data.def_qos_parm.ac[q].aifsn = params->aifs; |
4513 | il->qos_data.def_qos_parm.ac[q].edca_txop = |
4514 | cpu_to_le16((params->txop * 32)); |
4515 | |
4516 | il->qos_data.def_qos_parm.ac[q].reserved1 = 0; |
4517 | |
4518 | spin_unlock_irqrestore(lock: &il->lock, flags); |
4519 | |
4520 | D_MAC80211("leave\n" ); |
4521 | return 0; |
4522 | } |
4523 | EXPORT_SYMBOL(il_mac_conf_tx); |
4524 | |
4525 | int |
4526 | il_mac_tx_last_beacon(struct ieee80211_hw *hw) |
4527 | { |
4528 | struct il_priv *il = hw->priv; |
4529 | int ret; |
4530 | |
4531 | D_MAC80211("enter\n" ); |
4532 | |
4533 | ret = (il->ibss_manager == IL_IBSS_MANAGER); |
4534 | |
4535 | D_MAC80211("leave ret %d\n" , ret); |
4536 | return ret; |
4537 | } |
4538 | EXPORT_SYMBOL_GPL(il_mac_tx_last_beacon); |
4539 | |
4540 | static int |
4541 | il_set_mode(struct il_priv *il) |
4542 | { |
4543 | il_connection_init_rx_config(il); |
4544 | |
4545 | if (il->ops->set_rxon_chain) |
4546 | il->ops->set_rxon_chain(il); |
4547 | |
4548 | return il_commit_rxon(il); |
4549 | } |
4550 | |
4551 | int |
4552 | il_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) |
4553 | { |
4554 | struct il_priv *il = hw->priv; |
4555 | int err; |
4556 | bool reset; |
4557 | |
4558 | mutex_lock(&il->mutex); |
4559 | D_MAC80211("enter: type %d, addr %pM\n" , vif->type, vif->addr); |
4560 | |
4561 | if (!il_is_ready_rf(il)) { |
4562 | IL_WARN("Try to add interface when device not ready\n" ); |
4563 | err = -EINVAL; |
4564 | goto out; |
4565 | } |
4566 | |
4567 | /* |
4568 | * We do not support multiple virtual interfaces, but on hardware reset |
4569 | * we have to add the same interface again. |
4570 | */ |
4571 | reset = (il->vif == vif); |
4572 | if (il->vif && !reset) { |
4573 | err = -EOPNOTSUPP; |
4574 | goto out; |
4575 | } |
4576 | |
4577 | il->vif = vif; |
4578 | il->iw_mode = vif->type; |
4579 | |
4580 | err = il_set_mode(il); |
4581 | if (err) { |
4582 | IL_WARN("Fail to set mode %d\n" , vif->type); |
4583 | if (!reset) { |
4584 | il->vif = NULL; |
4585 | il->iw_mode = NL80211_IFTYPE_STATION; |
4586 | } |
4587 | } |
4588 | |
4589 | out: |
4590 | D_MAC80211("leave err %d\n" , err); |
4591 | mutex_unlock(lock: &il->mutex); |
4592 | |
4593 | return err; |
4594 | } |
4595 | EXPORT_SYMBOL(il_mac_add_interface); |
4596 | |
4597 | static void |
4598 | il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif) |
4599 | { |
4600 | lockdep_assert_held(&il->mutex); |
4601 | |
4602 | if (il->scan_vif == vif) { |
4603 | il_scan_cancel_timeout(il, 200); |
4604 | il_force_scan_end(il); |
4605 | } |
4606 | |
4607 | il_set_mode(il); |
4608 | } |
4609 | |
4610 | void |
4611 | il_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) |
4612 | { |
4613 | struct il_priv *il = hw->priv; |
4614 | |
4615 | mutex_lock(&il->mutex); |
4616 | D_MAC80211("enter: type %d, addr %pM\n" , vif->type, vif->addr); |
4617 | |
4618 | WARN_ON(il->vif != vif); |
4619 | il->vif = NULL; |
4620 | il->iw_mode = NL80211_IFTYPE_UNSPECIFIED; |
4621 | il_teardown_interface(il, vif); |
4622 | eth_zero_addr(addr: il->bssid); |
4623 | |
4624 | D_MAC80211("leave\n" ); |
4625 | mutex_unlock(lock: &il->mutex); |
4626 | } |
4627 | EXPORT_SYMBOL(il_mac_remove_interface); |
4628 | |
4629 | int |
4630 | il_alloc_txq_mem(struct il_priv *il) |
4631 | { |
4632 | if (!il->txq) |
4633 | il->txq = |
4634 | kcalloc(n: il->cfg->num_of_queues, |
4635 | size: sizeof(struct il_tx_queue), |
4636 | GFP_KERNEL); |
4637 | if (!il->txq) { |
4638 | IL_ERR("Not enough memory for txq\n" ); |
4639 | return -ENOMEM; |
4640 | } |
4641 | return 0; |
4642 | } |
4643 | EXPORT_SYMBOL(il_alloc_txq_mem); |
4644 | |
4645 | void |
4646 | il_free_txq_mem(struct il_priv *il) |
4647 | { |
4648 | kfree(objp: il->txq); |
4649 | il->txq = NULL; |
4650 | } |
4651 | EXPORT_SYMBOL(il_free_txq_mem); |
4652 | |
4653 | int |
4654 | il_force_reset(struct il_priv *il, bool external) |
4655 | { |
4656 | struct il_force_reset *force_reset; |
4657 | |
4658 | if (test_bit(S_EXIT_PENDING, &il->status)) |
4659 | return -EINVAL; |
4660 | |
4661 | force_reset = &il->force_reset; |
4662 | force_reset->reset_request_count++; |
4663 | if (!external) { |
4664 | if (force_reset->last_force_reset_jiffies && |
4665 | time_after(force_reset->last_force_reset_jiffies + |
4666 | force_reset->reset_duration, jiffies)) { |
4667 | D_INFO("force reset rejected\n" ); |
4668 | force_reset->reset_reject_count++; |
4669 | return -EAGAIN; |
4670 | } |
4671 | } |
4672 | force_reset->reset_success_count++; |
4673 | force_reset->last_force_reset_jiffies = jiffies; |
4674 | |
4675 | /* |
4676 | * if the request is from external(ex: debugfs), |
4677 | * then always perform the request in regardless the module |
4678 | * parameter setting |
4679 | * if the request is from internal (uCode error or driver |
4680 | * detect failure), then fw_restart module parameter |
4681 | * need to be check before performing firmware reload |
4682 | */ |
4683 | |
4684 | if (!external && !il->cfg->mod_params->restart_fw) { |
4685 | D_INFO("Cancel firmware reload based on " |
4686 | "module parameter setting\n" ); |
4687 | return 0; |
4688 | } |
4689 | |
4690 | IL_ERR("On demand firmware reload\n" ); |
4691 | |
4692 | /* Set the FW error flag -- cleared on il_down */ |
4693 | set_bit(S_FW_ERROR, addr: &il->status); |
4694 | wake_up(&il->wait_command_queue); |
4695 | /* |
4696 | * Keep the restart process from trying to send host |
4697 | * commands by clearing the INIT status bit |
4698 | */ |
4699 | clear_bit(S_READY, addr: &il->status); |
4700 | queue_work(wq: il->workqueue, work: &il->restart); |
4701 | |
4702 | return 0; |
4703 | } |
4704 | EXPORT_SYMBOL(il_force_reset); |
4705 | |
4706 | int |
4707 | il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif, |
4708 | enum nl80211_iftype newtype, bool newp2p) |
4709 | { |
4710 | struct il_priv *il = hw->priv; |
4711 | int err; |
4712 | |
4713 | mutex_lock(&il->mutex); |
4714 | D_MAC80211("enter: type %d, addr %pM newtype %d newp2p %d\n" , |
4715 | vif->type, vif->addr, newtype, newp2p); |
4716 | |
4717 | if (newp2p) { |
4718 | err = -EOPNOTSUPP; |
4719 | goto out; |
4720 | } |
4721 | |
4722 | if (!il->vif || !il_is_ready_rf(il)) { |
4723 | /* |
4724 | * Huh? But wait ... this can maybe happen when |
4725 | * we're in the middle of a firmware restart! |
4726 | */ |
4727 | err = -EBUSY; |
4728 | goto out; |
4729 | } |
4730 | |
4731 | /* success */ |
4732 | vif->type = newtype; |
4733 | vif->p2p = false; |
4734 | il->iw_mode = newtype; |
4735 | il_teardown_interface(il, vif); |
4736 | err = 0; |
4737 | |
4738 | out: |
4739 | D_MAC80211("leave err %d\n" , err); |
4740 | mutex_unlock(lock: &il->mutex); |
4741 | |
4742 | return err; |
4743 | } |
4744 | EXPORT_SYMBOL(il_mac_change_interface); |
4745 | |
4746 | void il_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, |
4747 | u32 queues, bool drop) |
4748 | { |
4749 | struct il_priv *il = hw->priv; |
4750 | unsigned long timeout = jiffies + msecs_to_jiffies(m: 500); |
4751 | int i; |
4752 | |
4753 | mutex_lock(&il->mutex); |
4754 | D_MAC80211("enter\n" ); |
4755 | |
4756 | if (il->txq == NULL) |
4757 | goto out; |
4758 | |
4759 | for (i = 0; i < il->hw_params.max_txq_num; i++) { |
4760 | struct il_queue *q; |
4761 | |
4762 | if (i == il->cmd_queue) |
4763 | continue; |
4764 | |
4765 | q = &il->txq[i].q; |
4766 | if (q->read_ptr == q->write_ptr) |
4767 | continue; |
4768 | |
4769 | if (time_after(jiffies, timeout)) { |
4770 | IL_ERR("Failed to flush queue %d\n" , q->id); |
4771 | break; |
4772 | } |
4773 | |
4774 | msleep(msecs: 20); |
4775 | } |
4776 | out: |
4777 | D_MAC80211("leave\n" ); |
4778 | mutex_unlock(lock: &il->mutex); |
4779 | } |
4780 | EXPORT_SYMBOL(il_mac_flush); |
4781 | |
4782 | /* |
4783 | * On every watchdog tick we check (latest) time stamp. If it does not |
4784 | * change during timeout period and queue is not empty we reset firmware. |
4785 | */ |
4786 | static int |
4787 | il_check_stuck_queue(struct il_priv *il, int cnt) |
4788 | { |
4789 | struct il_tx_queue *txq = &il->txq[cnt]; |
4790 | struct il_queue *q = &txq->q; |
4791 | unsigned long timeout; |
4792 | unsigned long now = jiffies; |
4793 | int ret; |
4794 | |
4795 | if (q->read_ptr == q->write_ptr) { |
4796 | txq->time_stamp = now; |
4797 | return 0; |
4798 | } |
4799 | |
4800 | timeout = |
4801 | txq->time_stamp + |
4802 | msecs_to_jiffies(m: il->cfg->wd_timeout); |
4803 | |
4804 | if (time_after(now, timeout)) { |
4805 | IL_ERR("Queue %d stuck for %u ms.\n" , q->id, |
4806 | jiffies_to_msecs(now - txq->time_stamp)); |
4807 | ret = il_force_reset(il, false); |
4808 | return (ret == -EAGAIN) ? 0 : 1; |
4809 | } |
4810 | |
4811 | return 0; |
4812 | } |
4813 | |
4814 | /* |
4815 | * Making watchdog tick be a quarter of timeout assure we will |
4816 | * discover the queue hung between timeout and 1.25*timeout |
4817 | */ |
4818 | #define IL_WD_TICK(timeout) ((timeout) / 4) |
4819 | |
4820 | /* |
4821 | * Watchdog timer callback, we check each tx queue for stuck, if hung |
4822 | * we reset the firmware. If everything is fine just rearm the timer. |
4823 | */ |
4824 | void |
4825 | il_bg_watchdog(struct timer_list *t) |
4826 | { |
4827 | struct il_priv *il = from_timer(il, t, watchdog); |
4828 | int cnt; |
4829 | unsigned long timeout; |
4830 | |
4831 | if (test_bit(S_EXIT_PENDING, &il->status)) |
4832 | return; |
4833 | |
4834 | timeout = il->cfg->wd_timeout; |
4835 | if (timeout == 0) |
4836 | return; |
4837 | |
4838 | /* monitor and check for stuck cmd queue */ |
4839 | if (il_check_stuck_queue(il, cnt: il->cmd_queue)) |
4840 | return; |
4841 | |
4842 | /* monitor and check for other stuck queues */ |
4843 | for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) { |
4844 | /* skip as we already checked the command queue */ |
4845 | if (cnt == il->cmd_queue) |
4846 | continue; |
4847 | if (il_check_stuck_queue(il, cnt)) |
4848 | return; |
4849 | } |
4850 | |
4851 | mod_timer(timer: &il->watchdog, |
4852 | expires: jiffies + msecs_to_jiffies(IL_WD_TICK(timeout))); |
4853 | } |
4854 | EXPORT_SYMBOL(il_bg_watchdog); |
4855 | |
4856 | void |
4857 | il_setup_watchdog(struct il_priv *il) |
4858 | { |
4859 | unsigned int timeout = il->cfg->wd_timeout; |
4860 | |
4861 | if (timeout) |
4862 | mod_timer(timer: &il->watchdog, |
4863 | expires: jiffies + msecs_to_jiffies(IL_WD_TICK(timeout))); |
4864 | else |
4865 | del_timer(timer: &il->watchdog); |
4866 | } |
4867 | EXPORT_SYMBOL(il_setup_watchdog); |
4868 | |
4869 | /* |
4870 | * extended beacon time format |
4871 | * time in usec will be changed into a 32-bit value in extended:internal format |
4872 | * the extended part is the beacon counts |
4873 | * the internal part is the time in usec within one beacon interval |
4874 | */ |
4875 | u32 |
4876 | il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval) |
4877 | { |
4878 | u32 quot; |
4879 | u32 rem; |
4880 | u32 interval = beacon_interval * TIME_UNIT; |
4881 | |
4882 | if (!interval || !usec) |
4883 | return 0; |
4884 | |
4885 | quot = |
4886 | (usec / |
4887 | interval) & (il_beacon_time_mask_high(il, |
4888 | tsf_bits: il->hw_params. |
4889 | beacon_time_tsf_bits) >> il-> |
4890 | hw_params.beacon_time_tsf_bits); |
4891 | rem = |
4892 | (usec % interval) & il_beacon_time_mask_low(il, |
4893 | tsf_bits: il->hw_params. |
4894 | beacon_time_tsf_bits); |
4895 | |
4896 | return (quot << il->hw_params.beacon_time_tsf_bits) + rem; |
4897 | } |
4898 | EXPORT_SYMBOL(il_usecs_to_beacons); |
4899 | |
4900 | /* base is usually what we get from ucode with each received frame, |
4901 | * the same as HW timer counter counting down |
4902 | */ |
4903 | __le32 |
4904 | il_add_beacon_time(struct il_priv *il, u32 base, u32 addon, |
4905 | u32 beacon_interval) |
4906 | { |
4907 | u32 base_low = base & il_beacon_time_mask_low(il, |
4908 | tsf_bits: il->hw_params. |
4909 | beacon_time_tsf_bits); |
4910 | u32 addon_low = addon & il_beacon_time_mask_low(il, |
4911 | tsf_bits: il->hw_params. |
4912 | beacon_time_tsf_bits); |
4913 | u32 interval = beacon_interval * TIME_UNIT; |
4914 | u32 res = (base & il_beacon_time_mask_high(il, |
4915 | tsf_bits: il->hw_params. |
4916 | beacon_time_tsf_bits)) + |
4917 | (addon & il_beacon_time_mask_high(il, |
4918 | tsf_bits: il->hw_params. |
4919 | beacon_time_tsf_bits)); |
4920 | |
4921 | if (base_low > addon_low) |
4922 | res += base_low - addon_low; |
4923 | else if (base_low < addon_low) { |
4924 | res += interval + base_low - addon_low; |
4925 | res += (1 << il->hw_params.beacon_time_tsf_bits); |
4926 | } else |
4927 | res += (1 << il->hw_params.beacon_time_tsf_bits); |
4928 | |
4929 | return cpu_to_le32(res); |
4930 | } |
4931 | EXPORT_SYMBOL(il_add_beacon_time); |
4932 | |
4933 | #ifdef CONFIG_PM_SLEEP |
4934 | |
4935 | static int |
4936 | il_pci_suspend(struct device *device) |
4937 | { |
4938 | struct il_priv *il = dev_get_drvdata(dev: device); |
4939 | |
4940 | /* |
4941 | * This function is called when system goes into suspend state |
4942 | * mac80211 will call il_mac_stop() from the mac80211 suspend function |
4943 | * first but since il_mac_stop() has no knowledge of who the caller is, |
4944 | * it will not call apm_ops.stop() to stop the DMA operation. |
4945 | * Calling apm_ops.stop here to make sure we stop the DMA. |
4946 | */ |
4947 | il_apm_stop(il); |
4948 | |
4949 | return 0; |
4950 | } |
4951 | |
4952 | static int |
4953 | il_pci_resume(struct device *device) |
4954 | { |
4955 | struct pci_dev *pdev = to_pci_dev(device); |
4956 | struct il_priv *il = pci_get_drvdata(pdev); |
4957 | bool hw_rfkill = false; |
4958 | |
4959 | /* |
4960 | * We disable the RETRY_TIMEOUT register (0x41) to keep |
4961 | * PCI Tx retries from interfering with C3 CPU state. |
4962 | */ |
4963 | pci_write_config_byte(dev: pdev, PCI_CFG_RETRY_TIMEOUT, val: 0x00); |
4964 | |
4965 | il_enable_interrupts(il); |
4966 | |
4967 | if (!(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) |
4968 | hw_rfkill = true; |
4969 | |
4970 | if (hw_rfkill) |
4971 | set_bit(S_RFKILL, addr: &il->status); |
4972 | else |
4973 | clear_bit(S_RFKILL, addr: &il->status); |
4974 | |
4975 | wiphy_rfkill_set_hw_state(wiphy: il->hw->wiphy, blocked: hw_rfkill); |
4976 | |
4977 | return 0; |
4978 | } |
4979 | |
4980 | SIMPLE_DEV_PM_OPS(il_pm_ops, il_pci_suspend, il_pci_resume); |
4981 | EXPORT_SYMBOL(il_pm_ops); |
4982 | |
4983 | #endif /* CONFIG_PM_SLEEP */ |
4984 | |
4985 | static void |
4986 | il_update_qos(struct il_priv *il) |
4987 | { |
4988 | if (test_bit(S_EXIT_PENDING, &il->status)) |
4989 | return; |
4990 | |
4991 | il->qos_data.def_qos_parm.qos_flags = 0; |
4992 | |
4993 | if (il->qos_data.qos_active) |
4994 | il->qos_data.def_qos_parm.qos_flags |= |
4995 | QOS_PARAM_FLG_UPDATE_EDCA_MSK; |
4996 | |
4997 | if (il->ht.enabled) |
4998 | il->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK; |
4999 | |
5000 | D_QOS("send QoS cmd with Qos active=%d FLAGS=0x%X\n" , |
5001 | il->qos_data.qos_active, il->qos_data.def_qos_parm.qos_flags); |
5002 | |
5003 | il_send_cmd_pdu_async(il, C_QOS_PARAM, sizeof(struct il_qosparam_cmd), |
5004 | &il->qos_data.def_qos_parm, NULL); |
5005 | } |
5006 | |
5007 | /* |
5008 | * il_mac_config - mac80211 config callback |
5009 | */ |
5010 | int |
5011 | il_mac_config(struct ieee80211_hw *hw, u32 changed) |
5012 | { |
5013 | struct il_priv *il = hw->priv; |
5014 | const struct il_channel_info *ch_info; |
5015 | struct ieee80211_conf *conf = &hw->conf; |
5016 | struct ieee80211_channel *channel = conf->chandef.chan; |
5017 | struct il_ht_config *ht_conf = &il->current_ht_config; |
5018 | unsigned long flags = 0; |
5019 | int ret = 0; |
5020 | u16 ch; |
5021 | int scan_active = 0; |
5022 | bool ht_changed = false; |
5023 | |
5024 | mutex_lock(&il->mutex); |
5025 | D_MAC80211("enter: channel %d changed 0x%X\n" , channel->hw_value, |
5026 | changed); |
5027 | |
5028 | if (unlikely(test_bit(S_SCANNING, &il->status))) { |
5029 | scan_active = 1; |
5030 | D_MAC80211("scan active\n" ); |
5031 | } |
5032 | |
5033 | if (changed & |
5034 | (IEEE80211_CONF_CHANGE_SMPS | IEEE80211_CONF_CHANGE_CHANNEL)) { |
5035 | /* mac80211 uses static for non-HT which is what we want */ |
5036 | il->current_ht_config.smps = conf->smps_mode; |
5037 | |
5038 | /* |
5039 | * Recalculate chain counts. |
5040 | * |
5041 | * If monitor mode is enabled then mac80211 will |
5042 | * set up the SM PS mode to OFF if an HT channel is |
5043 | * configured. |
5044 | */ |
5045 | if (il->ops->set_rxon_chain) |
5046 | il->ops->set_rxon_chain(il); |
5047 | } |
5048 | |
5049 | /* during scanning mac80211 will delay channel setting until |
5050 | * scan finish with changed = 0 |
5051 | */ |
5052 | if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) { |
5053 | |
5054 | if (scan_active) |
5055 | goto set_ch_out; |
5056 | |
5057 | ch = channel->hw_value; |
5058 | ch_info = il_get_channel_info(il, channel->band, ch); |
5059 | if (!il_is_channel_valid(ch_info)) { |
5060 | D_MAC80211("leave - invalid channel\n" ); |
5061 | ret = -EINVAL; |
5062 | goto set_ch_out; |
5063 | } |
5064 | |
5065 | if (il->iw_mode == NL80211_IFTYPE_ADHOC && |
5066 | !il_is_channel_ibss(ch: ch_info)) { |
5067 | D_MAC80211("leave - not IBSS channel\n" ); |
5068 | ret = -EINVAL; |
5069 | goto set_ch_out; |
5070 | } |
5071 | |
5072 | spin_lock_irqsave(&il->lock, flags); |
5073 | |
5074 | /* Configure HT40 channels */ |
5075 | if (il->ht.enabled != conf_is_ht(conf)) { |
5076 | il->ht.enabled = conf_is_ht(conf); |
5077 | ht_changed = true; |
5078 | } |
5079 | if (il->ht.enabled) { |
5080 | if (conf_is_ht40_minus(conf)) { |
5081 | il->ht.extension_chan_offset = |
5082 | IEEE80211_HT_PARAM_CHA_SEC_BELOW; |
5083 | il->ht.is_40mhz = true; |
5084 | } else if (conf_is_ht40_plus(conf)) { |
5085 | il->ht.extension_chan_offset = |
5086 | IEEE80211_HT_PARAM_CHA_SEC_ABOVE; |
5087 | il->ht.is_40mhz = true; |
5088 | } else { |
5089 | il->ht.extension_chan_offset = |
5090 | IEEE80211_HT_PARAM_CHA_SEC_NONE; |
5091 | il->ht.is_40mhz = false; |
5092 | } |
5093 | } else |
5094 | il->ht.is_40mhz = false; |
5095 | |
5096 | /* |
5097 | * Default to no protection. Protection mode will |
5098 | * later be set from BSS config in il_ht_conf |
5099 | */ |
5100 | il->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE; |
5101 | |
5102 | /* if we are switching from ht to 2.4 clear flags |
5103 | * from any ht related info since 2.4 does not |
5104 | * support ht */ |
5105 | if ((le16_to_cpu(il->staging.channel) != ch)) |
5106 | il->staging.flags = 0; |
5107 | |
5108 | il_set_rxon_channel(il, channel); |
5109 | il_set_rxon_ht(il, ht_conf); |
5110 | |
5111 | il_set_flags_for_band(il, channel->band, il->vif); |
5112 | |
5113 | spin_unlock_irqrestore(lock: &il->lock, flags); |
5114 | |
5115 | if (il->ops->update_bcast_stations) |
5116 | ret = il->ops->update_bcast_stations(il); |
5117 | |
5118 | set_ch_out: |
5119 | /* The list of supported rates and rate mask can be different |
5120 | * for each band; since the band may have changed, reset |
5121 | * the rate mask to what mac80211 lists */ |
5122 | il_set_rate(il); |
5123 | } |
5124 | |
5125 | if (changed & (IEEE80211_CONF_CHANGE_PS | IEEE80211_CONF_CHANGE_IDLE)) { |
5126 | il->power_data.ps_disabled = !(conf->flags & IEEE80211_CONF_PS); |
5127 | if (!il->power_data.ps_disabled) |
5128 | IL_WARN_ONCE("Enabling power save might cause firmware crashes\n" ); |
5129 | ret = il_power_update_mode(il, false); |
5130 | if (ret) |
5131 | D_MAC80211("Error setting sleep level\n" ); |
5132 | } |
5133 | |
5134 | if (changed & IEEE80211_CONF_CHANGE_POWER) { |
5135 | D_MAC80211("TX Power old=%d new=%d\n" , il->tx_power_user_lmt, |
5136 | conf->power_level); |
5137 | |
5138 | il_set_tx_power(il, conf->power_level, false); |
5139 | } |
5140 | |
5141 | if (!il_is_ready(il)) { |
5142 | D_MAC80211("leave - not ready\n" ); |
5143 | goto out; |
5144 | } |
5145 | |
5146 | if (scan_active) |
5147 | goto out; |
5148 | |
5149 | if (memcmp(p: &il->active, q: &il->staging, size: sizeof(il->staging))) |
5150 | il_commit_rxon(il); |
5151 | else |
5152 | D_INFO("Not re-sending same RXON configuration.\n" ); |
5153 | if (ht_changed) |
5154 | il_update_qos(il); |
5155 | |
5156 | out: |
5157 | D_MAC80211("leave ret %d\n" , ret); |
5158 | mutex_unlock(lock: &il->mutex); |
5159 | |
5160 | return ret; |
5161 | } |
5162 | EXPORT_SYMBOL(il_mac_config); |
5163 | |
5164 | void |
5165 | il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) |
5166 | { |
5167 | struct il_priv *il = hw->priv; |
5168 | unsigned long flags; |
5169 | |
5170 | mutex_lock(&il->mutex); |
5171 | D_MAC80211("enter: type %d, addr %pM\n" , vif->type, vif->addr); |
5172 | |
5173 | spin_lock_irqsave(&il->lock, flags); |
5174 | |
5175 | memset(&il->current_ht_config, 0, sizeof(struct il_ht_config)); |
5176 | |
5177 | /* new association get rid of ibss beacon skb */ |
5178 | dev_consume_skb_irq(skb: il->beacon_skb); |
5179 | il->beacon_skb = NULL; |
5180 | il->timestamp = 0; |
5181 | |
5182 | spin_unlock_irqrestore(lock: &il->lock, flags); |
5183 | |
5184 | il_scan_cancel_timeout(il, 100); |
5185 | if (!il_is_ready_rf(il)) { |
5186 | D_MAC80211("leave - not ready\n" ); |
5187 | mutex_unlock(lock: &il->mutex); |
5188 | return; |
5189 | } |
5190 | |
5191 | /* we are restarting association process */ |
5192 | il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; |
5193 | il_commit_rxon(il); |
5194 | |
5195 | il_set_rate(il); |
5196 | |
5197 | D_MAC80211("leave\n" ); |
5198 | mutex_unlock(lock: &il->mutex); |
5199 | } |
5200 | EXPORT_SYMBOL(il_mac_reset_tsf); |
5201 | |
5202 | static void |
5203 | il_ht_conf(struct il_priv *il, struct ieee80211_vif *vif) |
5204 | { |
5205 | struct il_ht_config *ht_conf = &il->current_ht_config; |
5206 | struct ieee80211_sta *sta; |
5207 | struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; |
5208 | |
5209 | D_ASSOC("enter:\n" ); |
5210 | |
5211 | if (!il->ht.enabled) |
5212 | return; |
5213 | |
5214 | il->ht.protection = |
5215 | bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION; |
5216 | il->ht.non_gf_sta_present = |
5217 | !!(bss_conf-> |
5218 | ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); |
5219 | |
5220 | ht_conf->single_chain_sufficient = false; |
5221 | |
5222 | switch (vif->type) { |
5223 | case NL80211_IFTYPE_STATION: |
5224 | rcu_read_lock(); |
5225 | sta = ieee80211_find_sta(vif, addr: bss_conf->bssid); |
5226 | if (sta) { |
5227 | struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; |
5228 | int maxstreams; |
5229 | |
5230 | maxstreams = |
5231 | (ht_cap->mcs. |
5232 | tx_params & IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK) |
5233 | >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT; |
5234 | maxstreams += 1; |
5235 | |
5236 | if (ht_cap->mcs.rx_mask[1] == 0 && |
5237 | ht_cap->mcs.rx_mask[2] == 0) |
5238 | ht_conf->single_chain_sufficient = true; |
5239 | if (maxstreams <= 1) |
5240 | ht_conf->single_chain_sufficient = true; |
5241 | } else { |
5242 | /* |
5243 | * If at all, this can only happen through a race |
5244 | * when the AP disconnects us while we're still |
5245 | * setting up the connection, in that case mac80211 |
5246 | * will soon tell us about that. |
5247 | */ |
5248 | ht_conf->single_chain_sufficient = true; |
5249 | } |
5250 | rcu_read_unlock(); |
5251 | break; |
5252 | case NL80211_IFTYPE_ADHOC: |
5253 | ht_conf->single_chain_sufficient = true; |
5254 | break; |
5255 | default: |
5256 | break; |
5257 | } |
5258 | |
5259 | D_ASSOC("leave\n" ); |
5260 | } |
5261 | |
5262 | static inline void |
5263 | il_set_no_assoc(struct il_priv *il, struct ieee80211_vif *vif) |
5264 | { |
5265 | /* |
5266 | * inform the ucode that there is no longer an |
5267 | * association and that no more packets should be |
5268 | * sent |
5269 | */ |
5270 | il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; |
5271 | il->staging.assoc_id = 0; |
5272 | il_commit_rxon(il); |
5273 | } |
5274 | |
5275 | static void |
5276 | il_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif) |
5277 | { |
5278 | struct il_priv *il = hw->priv; |
5279 | unsigned long flags; |
5280 | __le64 timestamp; |
5281 | struct sk_buff *skb = ieee80211_beacon_get(hw, vif, link_id: 0); |
5282 | |
5283 | if (!skb) |
5284 | return; |
5285 | |
5286 | D_MAC80211("enter\n" ); |
5287 | |
5288 | lockdep_assert_held(&il->mutex); |
5289 | |
5290 | if (!il->beacon_enabled) { |
5291 | IL_ERR("update beacon with no beaconing enabled\n" ); |
5292 | dev_kfree_skb(skb); |
5293 | return; |
5294 | } |
5295 | |
5296 | spin_lock_irqsave(&il->lock, flags); |
5297 | dev_consume_skb_irq(skb: il->beacon_skb); |
5298 | il->beacon_skb = skb; |
5299 | |
5300 | timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp; |
5301 | il->timestamp = le64_to_cpu(timestamp); |
5302 | |
5303 | D_MAC80211("leave\n" ); |
5304 | spin_unlock_irqrestore(lock: &il->lock, flags); |
5305 | |
5306 | if (!il_is_ready_rf(il)) { |
5307 | D_MAC80211("leave - RF not ready\n" ); |
5308 | return; |
5309 | } |
5310 | |
5311 | il->ops->post_associate(il); |
5312 | } |
5313 | |
5314 | void |
5315 | il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, |
5316 | struct ieee80211_bss_conf *bss_conf, u64 changes) |
5317 | { |
5318 | struct il_priv *il = hw->priv; |
5319 | int ret; |
5320 | |
5321 | mutex_lock(&il->mutex); |
5322 | D_MAC80211("enter: changes 0x%llx\n" , changes); |
5323 | |
5324 | if (!il_is_alive(il)) { |
5325 | D_MAC80211("leave - not alive\n" ); |
5326 | mutex_unlock(lock: &il->mutex); |
5327 | return; |
5328 | } |
5329 | |
5330 | if (changes & BSS_CHANGED_QOS) { |
5331 | unsigned long flags; |
5332 | |
5333 | spin_lock_irqsave(&il->lock, flags); |
5334 | il->qos_data.qos_active = bss_conf->qos; |
5335 | il_update_qos(il); |
5336 | spin_unlock_irqrestore(lock: &il->lock, flags); |
5337 | } |
5338 | |
5339 | if (changes & BSS_CHANGED_BEACON_ENABLED) { |
5340 | /* FIXME: can we remove beacon_enabled ? */ |
5341 | if (vif->bss_conf.enable_beacon) |
5342 | il->beacon_enabled = true; |
5343 | else |
5344 | il->beacon_enabled = false; |
5345 | } |
5346 | |
5347 | if (changes & BSS_CHANGED_BSSID) { |
5348 | D_MAC80211("BSSID %pM\n" , bss_conf->bssid); |
5349 | |
5350 | /* |
5351 | * On passive channel we wait with blocked queues to see if |
5352 | * there is traffic on that channel. If no frame will be |
5353 | * received (what is very unlikely since scan detects AP on |
5354 | * that channel, but theoretically possible), mac80211 associate |
5355 | * procedure will time out and mac80211 will call us with NULL |
5356 | * bssid. We have to unblock queues on such condition. |
5357 | */ |
5358 | if (is_zero_ether_addr(addr: bss_conf->bssid)) |
5359 | il_wake_queues_by_reason(il, IL_STOP_REASON_PASSIVE); |
5360 | |
5361 | /* |
5362 | * If there is currently a HW scan going on in the background, |
5363 | * then we need to cancel it, otherwise sometimes we are not |
5364 | * able to authenticate (FIXME: why ?) |
5365 | */ |
5366 | if (il_scan_cancel_timeout(il, 100)) { |
5367 | D_MAC80211("leave - scan abort failed\n" ); |
5368 | mutex_unlock(lock: &il->mutex); |
5369 | return; |
5370 | } |
5371 | |
5372 | /* mac80211 only sets assoc when in STATION mode */ |
5373 | memcpy(il->staging.bssid_addr, bss_conf->bssid, ETH_ALEN); |
5374 | |
5375 | /* FIXME: currently needed in a few places */ |
5376 | memcpy(il->bssid, bss_conf->bssid, ETH_ALEN); |
5377 | } |
5378 | |
5379 | /* |
5380 | * This needs to be after setting the BSSID in case |
5381 | * mac80211 decides to do both changes at once because |
5382 | * it will invoke post_associate. |
5383 | */ |
5384 | if (vif->type == NL80211_IFTYPE_ADHOC && (changes & BSS_CHANGED_BEACON)) |
5385 | il_beacon_update(hw, vif); |
5386 | |
5387 | if (changes & BSS_CHANGED_ERP_PREAMBLE) { |
5388 | D_MAC80211("ERP_PREAMBLE %d\n" , bss_conf->use_short_preamble); |
5389 | if (bss_conf->use_short_preamble) |
5390 | il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; |
5391 | else |
5392 | il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; |
5393 | } |
5394 | |
5395 | if (changes & BSS_CHANGED_ERP_CTS_PROT) { |
5396 | D_MAC80211("ERP_CTS %d\n" , bss_conf->use_cts_prot); |
5397 | if (bss_conf->use_cts_prot && il->band != NL80211_BAND_5GHZ) |
5398 | il->staging.flags |= RXON_FLG_TGG_PROTECT_MSK; |
5399 | else |
5400 | il->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK; |
5401 | if (bss_conf->use_cts_prot) |
5402 | il->staging.flags |= RXON_FLG_SELF_CTS_EN; |
5403 | else |
5404 | il->staging.flags &= ~RXON_FLG_SELF_CTS_EN; |
5405 | } |
5406 | |
5407 | if (changes & BSS_CHANGED_BASIC_RATES) { |
5408 | /* XXX use this information |
5409 | * |
5410 | * To do that, remove code from il_set_rate() and put something |
5411 | * like this here: |
5412 | * |
5413 | if (A-band) |
5414 | il->staging.ofdm_basic_rates = |
5415 | bss_conf->basic_rates; |
5416 | else |
5417 | il->staging.ofdm_basic_rates = |
5418 | bss_conf->basic_rates >> 4; |
5419 | il->staging.cck_basic_rates = |
5420 | bss_conf->basic_rates & 0xF; |
5421 | */ |
5422 | } |
5423 | |
5424 | if (changes & BSS_CHANGED_HT) { |
5425 | il_ht_conf(il, vif); |
5426 | |
5427 | if (il->ops->set_rxon_chain) |
5428 | il->ops->set_rxon_chain(il); |
5429 | } |
5430 | |
5431 | if (changes & BSS_CHANGED_ASSOC) { |
5432 | D_MAC80211("ASSOC %d\n" , vif->cfg.assoc); |
5433 | if (vif->cfg.assoc) { |
5434 | il->timestamp = bss_conf->sync_tsf; |
5435 | |
5436 | if (!il_is_rfkill(il)) |
5437 | il->ops->post_associate(il); |
5438 | } else |
5439 | il_set_no_assoc(il, vif); |
5440 | } |
5441 | |
5442 | if (changes && il_is_associated(il) && vif->cfg.aid) { |
5443 | D_MAC80211("Changes (%#llx) while associated\n" , changes); |
5444 | ret = il_send_rxon_assoc(il); |
5445 | if (!ret) { |
5446 | /* Sync active_rxon with latest change. */ |
5447 | memcpy((void *)&il->active, &il->staging, |
5448 | sizeof(struct il_rxon_cmd)); |
5449 | } |
5450 | } |
5451 | |
5452 | if (changes & BSS_CHANGED_BEACON_ENABLED) { |
5453 | if (vif->bss_conf.enable_beacon) { |
5454 | memcpy(il->staging.bssid_addr, bss_conf->bssid, |
5455 | ETH_ALEN); |
5456 | memcpy(il->bssid, bss_conf->bssid, ETH_ALEN); |
5457 | il->ops->config_ap(il); |
5458 | } else |
5459 | il_set_no_assoc(il, vif); |
5460 | } |
5461 | |
5462 | if (changes & BSS_CHANGED_IBSS) { |
5463 | ret = il->ops->manage_ibss_station(il, vif, |
5464 | vif->cfg.ibss_joined); |
5465 | if (ret) |
5466 | IL_ERR("failed to %s IBSS station %pM\n" , |
5467 | vif->cfg.ibss_joined ? "add" : "remove" , |
5468 | bss_conf->bssid); |
5469 | } |
5470 | |
5471 | D_MAC80211("leave\n" ); |
5472 | mutex_unlock(lock: &il->mutex); |
5473 | } |
5474 | EXPORT_SYMBOL(il_mac_bss_info_changed); |
5475 | |
5476 | irqreturn_t |
5477 | il_isr(int irq, void *data) |
5478 | { |
5479 | struct il_priv *il = data; |
5480 | u32 inta, inta_mask; |
5481 | u32 inta_fh; |
5482 | unsigned long flags; |
5483 | if (!il) |
5484 | return IRQ_NONE; |
5485 | |
5486 | spin_lock_irqsave(&il->lock, flags); |
5487 | |
5488 | /* Disable (but don't clear!) interrupts here to avoid |
5489 | * back-to-back ISRs and sporadic interrupts from our NIC. |
5490 | * If we have something to service, the tasklet will re-enable ints. |
5491 | * If we *don't* have something, we'll re-enable before leaving here. */ |
5492 | inta_mask = _il_rd(il, CSR_INT_MASK); /* just for debug */ |
5493 | _il_wr(il, CSR_INT_MASK, val: 0x00000000); |
5494 | |
5495 | /* Discover which interrupts are active/pending */ |
5496 | inta = _il_rd(il, CSR_INT); |
5497 | inta_fh = _il_rd(il, CSR_FH_INT_STATUS); |
5498 | |
5499 | /* Ignore interrupt if there's nothing in NIC to service. |
5500 | * This may be due to IRQ shared with another device, |
5501 | * or due to sporadic interrupts thrown from our NIC. */ |
5502 | if (!inta && !inta_fh) { |
5503 | D_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n" ); |
5504 | goto none; |
5505 | } |
5506 | |
5507 | if (inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0) { |
5508 | /* Hardware disappeared. It might have already raised |
5509 | * an interrupt */ |
5510 | IL_WARN("HARDWARE GONE?? INTA == 0x%08x\n" , inta); |
5511 | goto unplugged; |
5512 | } |
5513 | |
5514 | D_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n" , inta, inta_mask, |
5515 | inta_fh); |
5516 | |
5517 | inta &= ~CSR_INT_BIT_SCD; |
5518 | |
5519 | /* il_irq_tasklet() will service interrupts and re-enable them */ |
5520 | if (likely(inta || inta_fh)) |
5521 | tasklet_schedule(t: &il->irq_tasklet); |
5522 | |
5523 | unplugged: |
5524 | spin_unlock_irqrestore(lock: &il->lock, flags); |
5525 | return IRQ_HANDLED; |
5526 | |
5527 | none: |
5528 | /* re-enable interrupts here since we don't have anything to service. */ |
5529 | /* only Re-enable if disabled by irq */ |
5530 | if (test_bit(S_INT_ENABLED, &il->status)) |
5531 | il_enable_interrupts(il); |
5532 | spin_unlock_irqrestore(lock: &il->lock, flags); |
5533 | return IRQ_NONE; |
5534 | } |
5535 | EXPORT_SYMBOL(il_isr); |
5536 | |
5537 | /* |
5538 | * il_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this |
5539 | * function. |
5540 | */ |
5541 | void |
5542 | il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info, |
5543 | __le16 fc, __le32 *tx_flags) |
5544 | { |
5545 | if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) { |
5546 | *tx_flags |= TX_CMD_FLG_RTS_MSK; |
5547 | *tx_flags &= ~TX_CMD_FLG_CTS_MSK; |
5548 | *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; |
5549 | |
5550 | if (!ieee80211_is_mgmt(fc)) |
5551 | return; |
5552 | |
5553 | switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { |
5554 | case cpu_to_le16(IEEE80211_STYPE_AUTH): |
5555 | case cpu_to_le16(IEEE80211_STYPE_DEAUTH): |
5556 | case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ): |
5557 | case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ): |
5558 | *tx_flags &= ~TX_CMD_FLG_RTS_MSK; |
5559 | *tx_flags |= TX_CMD_FLG_CTS_MSK; |
5560 | break; |
5561 | } |
5562 | } else if (info->control.rates[0]. |
5563 | flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { |
5564 | *tx_flags &= ~TX_CMD_FLG_RTS_MSK; |
5565 | *tx_flags |= TX_CMD_FLG_CTS_MSK; |
5566 | *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; |
5567 | } |
5568 | } |
5569 | EXPORT_SYMBOL(il_tx_cmd_protection); |
5570 | |