1 | /* |
2 | * Copyright (c) 2004-2011 Atheros Communications Inc. |
3 | * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. |
4 | * |
5 | * Permission to use, copy, modify, and/or distribute this software for any |
6 | * purpose with or without fee is hereby granted, provided that the above |
7 | * copyright notice and this permission notice appear in all copies. |
8 | * |
9 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
10 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
11 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
12 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
13 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
14 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
15 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
16 | */ |
17 | |
18 | #include <linux/module.h> |
19 | #include <linux/mmc/card.h> |
20 | #include <linux/mmc/mmc.h> |
21 | #include <linux/mmc/host.h> |
22 | #include <linux/mmc/sdio_func.h> |
23 | #include <linux/mmc/sdio_ids.h> |
24 | #include <linux/mmc/sdio.h> |
25 | #include <linux/mmc/sd.h> |
26 | #include "hif.h" |
27 | #include "hif-ops.h" |
28 | #include "target.h" |
29 | #include "debug.h" |
30 | #include "cfg80211.h" |
31 | #include "trace.h" |
32 | |
33 | struct ath6kl_sdio { |
34 | struct sdio_func *func; |
35 | |
36 | /* protects access to bus_req_freeq */ |
37 | spinlock_t lock; |
38 | |
39 | /* free list */ |
40 | struct list_head bus_req_freeq; |
41 | |
42 | /* available bus requests */ |
43 | struct bus_request bus_req[BUS_REQUEST_MAX_NUM]; |
44 | |
45 | struct ath6kl *ar; |
46 | |
47 | u8 *dma_buffer; |
48 | |
49 | /* protects access to dma_buffer */ |
50 | struct mutex dma_buffer_mutex; |
51 | |
52 | /* scatter request list head */ |
53 | struct list_head scat_req; |
54 | |
55 | atomic_t irq_handling; |
56 | wait_queue_head_t irq_wq; |
57 | |
58 | /* protects access to scat_req */ |
59 | spinlock_t scat_lock; |
60 | |
61 | bool scatter_enabled; |
62 | |
63 | bool is_disabled; |
64 | const struct sdio_device_id *id; |
65 | struct work_struct wr_async_work; |
66 | struct list_head wr_asyncq; |
67 | |
68 | /* protects access to wr_asyncq */ |
69 | spinlock_t wr_async_lock; |
70 | }; |
71 | |
72 | #define CMD53_ARG_READ 0 |
73 | #define CMD53_ARG_WRITE 1 |
74 | #define CMD53_ARG_BLOCK_BASIS 1 |
75 | #define CMD53_ARG_FIXED_ADDRESS 0 |
76 | #define CMD53_ARG_INCR_ADDRESS 1 |
77 | |
78 | static int ath6kl_sdio_config(struct ath6kl *ar); |
79 | |
80 | static inline struct ath6kl_sdio *ath6kl_sdio_priv(struct ath6kl *ar) |
81 | { |
82 | return ar->hif_priv; |
83 | } |
84 | |
85 | /* |
86 | * Macro to check if DMA buffer is WORD-aligned and DMA-able. |
87 | * Most host controllers assume the buffer is DMA'able and will |
88 | * bug-check otherwise (i.e. buffers on the stack). virt_addr_valid |
89 | * check fails on stack memory. |
90 | */ |
91 | static inline bool buf_needs_bounce(u8 *buf) |
92 | { |
93 | return ((unsigned long) buf & 0x3) || !virt_addr_valid(buf); |
94 | } |
95 | |
96 | static void ath6kl_sdio_set_mbox_info(struct ath6kl *ar) |
97 | { |
98 | struct ath6kl_mbox_info *mbox_info = &ar->mbox_info; |
99 | |
100 | /* EP1 has an extended range */ |
101 | mbox_info->htc_addr = HIF_MBOX_BASE_ADDR; |
102 | mbox_info->htc_ext_addr = HIF_MBOX0_EXT_BASE_ADDR; |
103 | mbox_info->htc_ext_sz = HIF_MBOX0_EXT_WIDTH; |
104 | mbox_info->block_size = HIF_MBOX_BLOCK_SIZE; |
105 | mbox_info->gmbox_addr = HIF_GMBOX_BASE_ADDR; |
106 | mbox_info->gmbox_sz = HIF_GMBOX_WIDTH; |
107 | } |
108 | |
109 | static inline void ath6kl_sdio_set_cmd53_arg(u32 *arg, u8 rw, u8 func, |
110 | u8 mode, u8 opcode, u32 addr, |
111 | u16 blksz) |
112 | { |
113 | *arg = (((rw & 1) << 31) | |
114 | ((func & 0x7) << 28) | |
115 | ((mode & 1) << 27) | |
116 | ((opcode & 1) << 26) | |
117 | ((addr & 0x1FFFF) << 9) | |
118 | (blksz & 0x1FF)); |
119 | } |
120 | |
121 | static inline void ath6kl_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw, |
122 | unsigned int address, |
123 | unsigned char val) |
124 | { |
125 | const u8 func = 0; |
126 | |
127 | *arg = ((write & 1) << 31) | |
128 | ((func & 0x7) << 28) | |
129 | ((raw & 1) << 27) | |
130 | (1 << 26) | |
131 | ((address & 0x1FFFF) << 9) | |
132 | (1 << 8) | |
133 | (val & 0xFF); |
134 | } |
135 | |
136 | static int ath6kl_sdio_func0_cmd52_wr_byte(struct mmc_card *card, |
137 | unsigned int address, |
138 | unsigned char byte) |
139 | { |
140 | struct mmc_command io_cmd; |
141 | |
142 | memset(&io_cmd, 0, sizeof(io_cmd)); |
143 | ath6kl_sdio_set_cmd52_arg(arg: &io_cmd.arg, write: 1, raw: 0, address, val: byte); |
144 | io_cmd.opcode = SD_IO_RW_DIRECT; |
145 | io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC; |
146 | |
147 | return mmc_wait_for_cmd(host: card->host, cmd: &io_cmd, retries: 0); |
148 | } |
149 | |
150 | static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr, |
151 | u8 *buf, u32 len) |
152 | { |
153 | int ret = 0; |
154 | |
155 | sdio_claim_host(func); |
156 | |
157 | if (request & HIF_WRITE) { |
158 | /* FIXME: looks like ugly workaround for something */ |
159 | if (addr >= HIF_MBOX_BASE_ADDR && |
160 | addr <= HIF_MBOX_END_ADDR) |
161 | addr += (HIF_MBOX_WIDTH - len); |
162 | |
163 | /* FIXME: this also looks like ugly workaround */ |
164 | if (addr == HIF_MBOX0_EXT_BASE_ADDR) |
165 | addr += HIF_MBOX0_EXT_WIDTH - len; |
166 | |
167 | if (request & HIF_FIXED_ADDRESS) |
168 | ret = sdio_writesb(func, addr, src: buf, count: len); |
169 | else |
170 | ret = sdio_memcpy_toio(func, addr, src: buf, count: len); |
171 | } else { |
172 | if (request & HIF_FIXED_ADDRESS) |
173 | ret = sdio_readsb(func, dst: buf, addr, count: len); |
174 | else |
175 | ret = sdio_memcpy_fromio(func, dst: buf, addr, count: len); |
176 | } |
177 | |
178 | sdio_release_host(func); |
179 | |
180 | ath6kl_dbg(mask: ATH6KL_DBG_SDIO, fmt: "%s addr 0x%x%s buf 0x%p len %d\n" , |
181 | request & HIF_WRITE ? "wr" : "rd" , addr, |
182 | request & HIF_FIXED_ADDRESS ? " (fixed)" : "" , buf, len); |
183 | ath6kl_dbg_dump(mask: ATH6KL_DBG_SDIO_DUMP, NULL, prefix: "sdio " , buf, len); |
184 | |
185 | trace_ath6kl_sdio(addr, flags: request, buf, buf_len: len); |
186 | |
187 | return ret; |
188 | } |
189 | |
190 | static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio) |
191 | { |
192 | struct bus_request *bus_req; |
193 | |
194 | spin_lock_bh(lock: &ar_sdio->lock); |
195 | |
196 | if (list_empty(head: &ar_sdio->bus_req_freeq)) { |
197 | spin_unlock_bh(lock: &ar_sdio->lock); |
198 | return NULL; |
199 | } |
200 | |
201 | bus_req = list_first_entry(&ar_sdio->bus_req_freeq, |
202 | struct bus_request, list); |
203 | list_del(entry: &bus_req->list); |
204 | |
205 | spin_unlock_bh(lock: &ar_sdio->lock); |
206 | ath6kl_dbg(mask: ATH6KL_DBG_SCATTER, fmt: "%s: bus request 0x%p\n" , |
207 | __func__, bus_req); |
208 | |
209 | return bus_req; |
210 | } |
211 | |
212 | static void ath6kl_sdio_free_bus_req(struct ath6kl_sdio *ar_sdio, |
213 | struct bus_request *bus_req) |
214 | { |
215 | ath6kl_dbg(mask: ATH6KL_DBG_SCATTER, fmt: "%s: bus request 0x%p\n" , |
216 | __func__, bus_req); |
217 | |
218 | spin_lock_bh(lock: &ar_sdio->lock); |
219 | list_add_tail(new: &bus_req->list, head: &ar_sdio->bus_req_freeq); |
220 | spin_unlock_bh(lock: &ar_sdio->lock); |
221 | } |
222 | |
223 | static void ath6kl_sdio_setup_scat_data(struct hif_scatter_req *scat_req, |
224 | struct mmc_data *data) |
225 | { |
226 | struct scatterlist *sg; |
227 | int i; |
228 | |
229 | data->blksz = HIF_MBOX_BLOCK_SIZE; |
230 | data->blocks = scat_req->len / HIF_MBOX_BLOCK_SIZE; |
231 | |
232 | ath6kl_dbg(mask: ATH6KL_DBG_SCATTER, |
233 | fmt: "hif-scatter: (%s) addr: 0x%X, (block len: %d, block count: %d) , (tot:%d,sg:%d)\n" , |
234 | (scat_req->req & HIF_WRITE) ? "WR" : "RD" , scat_req->addr, |
235 | data->blksz, data->blocks, scat_req->len, |
236 | scat_req->scat_entries); |
237 | |
238 | data->flags = (scat_req->req & HIF_WRITE) ? MMC_DATA_WRITE : |
239 | MMC_DATA_READ; |
240 | |
241 | /* fill SG entries */ |
242 | sg = scat_req->sgentries; |
243 | sg_init_table(sg, scat_req->scat_entries); |
244 | |
245 | /* assemble SG list */ |
246 | for (i = 0; i < scat_req->scat_entries; i++, sg++) { |
247 | ath6kl_dbg(mask: ATH6KL_DBG_SCATTER, fmt: "%d: addr:0x%p, len:%d\n" , |
248 | i, scat_req->scat_list[i].buf, |
249 | scat_req->scat_list[i].len); |
250 | |
251 | sg_set_buf(sg, buf: scat_req->scat_list[i].buf, |
252 | buflen: scat_req->scat_list[i].len); |
253 | } |
254 | |
255 | /* set scatter-gather table for request */ |
256 | data->sg = scat_req->sgentries; |
257 | data->sg_len = scat_req->scat_entries; |
258 | } |
259 | |
260 | static int ath6kl_sdio_scat_rw(struct ath6kl_sdio *ar_sdio, |
261 | struct bus_request *req) |
262 | { |
263 | struct mmc_request mmc_req; |
264 | struct mmc_command cmd; |
265 | struct mmc_data data; |
266 | struct hif_scatter_req *scat_req; |
267 | u8 opcode, rw; |
268 | int status, len; |
269 | |
270 | scat_req = req->scat_req; |
271 | |
272 | if (scat_req->virt_scat) { |
273 | len = scat_req->len; |
274 | if (scat_req->req & HIF_BLOCK_BASIS) |
275 | len = round_down(len, HIF_MBOX_BLOCK_SIZE); |
276 | |
277 | status = ath6kl_sdio_io(func: ar_sdio->func, request: scat_req->req, |
278 | addr: scat_req->addr, buf: scat_req->virt_dma_buf, |
279 | len); |
280 | goto scat_complete; |
281 | } |
282 | |
283 | memset(&mmc_req, 0, sizeof(struct mmc_request)); |
284 | memset(&cmd, 0, sizeof(struct mmc_command)); |
285 | memset(&data, 0, sizeof(struct mmc_data)); |
286 | |
287 | ath6kl_sdio_setup_scat_data(scat_req, data: &data); |
288 | |
289 | opcode = (scat_req->req & HIF_FIXED_ADDRESS) ? |
290 | CMD53_ARG_FIXED_ADDRESS : CMD53_ARG_INCR_ADDRESS; |
291 | |
292 | rw = (scat_req->req & HIF_WRITE) ? CMD53_ARG_WRITE : CMD53_ARG_READ; |
293 | |
294 | /* Fixup the address so that the last byte will fall on MBOX EOM */ |
295 | if (scat_req->req & HIF_WRITE) { |
296 | if (scat_req->addr == HIF_MBOX_BASE_ADDR) |
297 | scat_req->addr += HIF_MBOX_WIDTH - scat_req->len; |
298 | else |
299 | /* Uses extended address range */ |
300 | scat_req->addr += HIF_MBOX0_EXT_WIDTH - scat_req->len; |
301 | } |
302 | |
303 | /* set command argument */ |
304 | ath6kl_sdio_set_cmd53_arg(arg: &cmd.arg, rw, func: ar_sdio->func->num, |
305 | CMD53_ARG_BLOCK_BASIS, opcode, addr: scat_req->addr, |
306 | blksz: data.blocks); |
307 | |
308 | cmd.opcode = SD_IO_RW_EXTENDED; |
309 | cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC; |
310 | |
311 | mmc_req.cmd = &cmd; |
312 | mmc_req.data = &data; |
313 | |
314 | sdio_claim_host(func: ar_sdio->func); |
315 | |
316 | mmc_set_data_timeout(data: &data, card: ar_sdio->func->card); |
317 | |
318 | trace_ath6kl_sdio_scat(addr: scat_req->addr, |
319 | flags: scat_req->req, |
320 | total_len: scat_req->len, |
321 | entries: scat_req->scat_entries, |
322 | list: scat_req->scat_list); |
323 | |
324 | /* synchronous call to process request */ |
325 | mmc_wait_for_req(host: ar_sdio->func->card->host, mrq: &mmc_req); |
326 | |
327 | sdio_release_host(func: ar_sdio->func); |
328 | |
329 | status = cmd.error ? cmd.error : data.error; |
330 | |
331 | scat_complete: |
332 | scat_req->status = status; |
333 | |
334 | if (scat_req->status) |
335 | ath6kl_err(fmt: "Scatter write request failed:%d\n" , |
336 | scat_req->status); |
337 | |
338 | if (scat_req->req & HIF_ASYNCHRONOUS) |
339 | scat_req->complete(ar_sdio->ar->htc_target, scat_req); |
340 | |
341 | return status; |
342 | } |
343 | |
344 | static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio, |
345 | int n_scat_entry, int n_scat_req, |
346 | bool virt_scat) |
347 | { |
348 | struct hif_scatter_req *s_req; |
349 | struct bus_request *bus_req; |
350 | int i, scat_req_sz, scat_list_sz, size; |
351 | u8 *virt_buf; |
352 | |
353 | scat_list_sz = n_scat_entry * sizeof(struct hif_scatter_item); |
354 | scat_req_sz = sizeof(*s_req) + scat_list_sz; |
355 | |
356 | if (!virt_scat) |
357 | size = sizeof(struct scatterlist) * n_scat_entry; |
358 | else |
359 | size = 2 * L1_CACHE_BYTES + |
360 | ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER; |
361 | |
362 | for (i = 0; i < n_scat_req; i++) { |
363 | /* allocate the scatter request */ |
364 | s_req = kzalloc(size: scat_req_sz, GFP_KERNEL); |
365 | if (!s_req) |
366 | return -ENOMEM; |
367 | |
368 | if (virt_scat) { |
369 | virt_buf = kzalloc(size, GFP_KERNEL); |
370 | if (!virt_buf) { |
371 | kfree(objp: s_req); |
372 | return -ENOMEM; |
373 | } |
374 | |
375 | s_req->virt_dma_buf = |
376 | (u8 *)L1_CACHE_ALIGN((unsigned long)virt_buf); |
377 | } else { |
378 | /* allocate sglist */ |
379 | s_req->sgentries = kzalloc(size, GFP_KERNEL); |
380 | |
381 | if (!s_req->sgentries) { |
382 | kfree(objp: s_req); |
383 | return -ENOMEM; |
384 | } |
385 | } |
386 | |
387 | /* allocate a bus request for this scatter request */ |
388 | bus_req = ath6kl_sdio_alloc_busreq(ar_sdio); |
389 | if (!bus_req) { |
390 | kfree(objp: s_req->sgentries); |
391 | kfree(objp: s_req->virt_dma_buf); |
392 | kfree(objp: s_req); |
393 | return -ENOMEM; |
394 | } |
395 | |
396 | /* assign the scatter request to this bus request */ |
397 | bus_req->scat_req = s_req; |
398 | s_req->busrequest = bus_req; |
399 | |
400 | s_req->virt_scat = virt_scat; |
401 | |
402 | /* add it to the scatter pool */ |
403 | hif_scatter_req_add(ar: ar_sdio->ar, s_req); |
404 | } |
405 | |
406 | return 0; |
407 | } |
408 | |
409 | static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf, |
410 | u32 len, u32 request) |
411 | { |
412 | struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); |
413 | u8 *tbuf = NULL; |
414 | int ret; |
415 | bool bounced = false; |
416 | |
417 | if (request & HIF_BLOCK_BASIS) |
418 | len = round_down(len, HIF_MBOX_BLOCK_SIZE); |
419 | |
420 | if (buf_needs_bounce(buf)) { |
421 | if (!ar_sdio->dma_buffer) |
422 | return -ENOMEM; |
423 | mutex_lock(&ar_sdio->dma_buffer_mutex); |
424 | tbuf = ar_sdio->dma_buffer; |
425 | |
426 | if (request & HIF_WRITE) |
427 | memcpy(tbuf, buf, len); |
428 | |
429 | bounced = true; |
430 | } else { |
431 | tbuf = buf; |
432 | } |
433 | |
434 | ret = ath6kl_sdio_io(func: ar_sdio->func, request, addr, buf: tbuf, len); |
435 | if ((request & HIF_READ) && bounced) |
436 | memcpy(buf, tbuf, len); |
437 | |
438 | if (bounced) |
439 | mutex_unlock(lock: &ar_sdio->dma_buffer_mutex); |
440 | |
441 | return ret; |
442 | } |
443 | |
444 | static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio, |
445 | struct bus_request *req) |
446 | { |
447 | if (req->scat_req) { |
448 | ath6kl_sdio_scat_rw(ar_sdio, req); |
449 | } else { |
450 | void *context; |
451 | int status; |
452 | |
453 | status = ath6kl_sdio_read_write_sync(ar: ar_sdio->ar, addr: req->address, |
454 | buf: req->buffer, len: req->length, |
455 | request: req->request); |
456 | context = req->packet; |
457 | ath6kl_sdio_free_bus_req(ar_sdio, bus_req: req); |
458 | ath6kl_hif_rw_comp_handler(context, status); |
459 | } |
460 | } |
461 | |
462 | static void ath6kl_sdio_write_async_work(struct work_struct *work) |
463 | { |
464 | struct ath6kl_sdio *ar_sdio; |
465 | struct bus_request *req, *tmp_req; |
466 | |
467 | ar_sdio = container_of(work, struct ath6kl_sdio, wr_async_work); |
468 | |
469 | spin_lock_bh(lock: &ar_sdio->wr_async_lock); |
470 | list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) { |
471 | list_del(entry: &req->list); |
472 | spin_unlock_bh(lock: &ar_sdio->wr_async_lock); |
473 | __ath6kl_sdio_write_async(ar_sdio, req); |
474 | spin_lock_bh(lock: &ar_sdio->wr_async_lock); |
475 | } |
476 | spin_unlock_bh(lock: &ar_sdio->wr_async_lock); |
477 | } |
478 | |
479 | static void ath6kl_sdio_irq_handler(struct sdio_func *func) |
480 | { |
481 | int status; |
482 | struct ath6kl_sdio *ar_sdio; |
483 | |
484 | ath6kl_dbg(mask: ATH6KL_DBG_SDIO, fmt: "irq\n" ); |
485 | |
486 | ar_sdio = sdio_get_drvdata(func); |
487 | atomic_set(v: &ar_sdio->irq_handling, i: 1); |
488 | /* |
489 | * Release the host during interrups so we can pick it back up when |
490 | * we process commands. |
491 | */ |
492 | sdio_release_host(func: ar_sdio->func); |
493 | |
494 | status = ath6kl_hif_intr_bh_handler(ar: ar_sdio->ar); |
495 | sdio_claim_host(func: ar_sdio->func); |
496 | |
497 | atomic_set(v: &ar_sdio->irq_handling, i: 0); |
498 | wake_up(&ar_sdio->irq_wq); |
499 | |
500 | WARN_ON(status && status != -ECANCELED); |
501 | } |
502 | |
503 | static int ath6kl_sdio_power_on(struct ath6kl *ar) |
504 | { |
505 | struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); |
506 | struct sdio_func *func = ar_sdio->func; |
507 | int ret = 0; |
508 | |
509 | if (!ar_sdio->is_disabled) |
510 | return 0; |
511 | |
512 | ath6kl_dbg(mask: ATH6KL_DBG_BOOT, fmt: "sdio power on\n" ); |
513 | |
514 | sdio_claim_host(func); |
515 | |
516 | ret = sdio_enable_func(func); |
517 | if (ret) { |
518 | ath6kl_err(fmt: "Unable to enable sdio func: %d)\n" , ret); |
519 | sdio_release_host(func); |
520 | return ret; |
521 | } |
522 | |
523 | sdio_release_host(func); |
524 | |
525 | /* |
526 | * Wait for hardware to initialise. It should take a lot less than |
527 | * 10 ms but let's be conservative here. |
528 | */ |
529 | msleep(msecs: 10); |
530 | |
531 | ret = ath6kl_sdio_config(ar); |
532 | if (ret) { |
533 | ath6kl_err(fmt: "Failed to config sdio: %d\n" , ret); |
534 | goto out; |
535 | } |
536 | |
537 | ar_sdio->is_disabled = false; |
538 | |
539 | out: |
540 | return ret; |
541 | } |
542 | |
543 | static int ath6kl_sdio_power_off(struct ath6kl *ar) |
544 | { |
545 | struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); |
546 | int ret; |
547 | |
548 | if (ar_sdio->is_disabled) |
549 | return 0; |
550 | |
551 | ath6kl_dbg(mask: ATH6KL_DBG_BOOT, fmt: "sdio power off\n" ); |
552 | |
553 | /* Disable the card */ |
554 | sdio_claim_host(func: ar_sdio->func); |
555 | ret = sdio_disable_func(func: ar_sdio->func); |
556 | sdio_release_host(func: ar_sdio->func); |
557 | |
558 | if (ret) |
559 | return ret; |
560 | |
561 | ar_sdio->is_disabled = true; |
562 | |
563 | return ret; |
564 | } |
565 | |
566 | static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer, |
567 | u32 length, u32 request, |
568 | struct htc_packet *packet) |
569 | { |
570 | struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); |
571 | struct bus_request *bus_req; |
572 | |
573 | bus_req = ath6kl_sdio_alloc_busreq(ar_sdio); |
574 | |
575 | if (WARN_ON_ONCE(!bus_req)) |
576 | return -ENOMEM; |
577 | |
578 | bus_req->address = address; |
579 | bus_req->buffer = buffer; |
580 | bus_req->length = length; |
581 | bus_req->request = request; |
582 | bus_req->packet = packet; |
583 | |
584 | spin_lock_bh(lock: &ar_sdio->wr_async_lock); |
585 | list_add_tail(new: &bus_req->list, head: &ar_sdio->wr_asyncq); |
586 | spin_unlock_bh(lock: &ar_sdio->wr_async_lock); |
587 | queue_work(wq: ar->ath6kl_wq, work: &ar_sdio->wr_async_work); |
588 | |
589 | return 0; |
590 | } |
591 | |
592 | static void ath6kl_sdio_irq_enable(struct ath6kl *ar) |
593 | { |
594 | struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); |
595 | int ret; |
596 | |
597 | sdio_claim_host(func: ar_sdio->func); |
598 | |
599 | /* Register the isr */ |
600 | ret = sdio_claim_irq(func: ar_sdio->func, handler: ath6kl_sdio_irq_handler); |
601 | if (ret) |
602 | ath6kl_err(fmt: "Failed to claim sdio irq: %d\n" , ret); |
603 | |
604 | sdio_release_host(func: ar_sdio->func); |
605 | } |
606 | |
607 | static bool ath6kl_sdio_is_on_irq(struct ath6kl *ar) |
608 | { |
609 | struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); |
610 | |
611 | return !atomic_read(v: &ar_sdio->irq_handling); |
612 | } |
613 | |
614 | static void ath6kl_sdio_irq_disable(struct ath6kl *ar) |
615 | { |
616 | struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); |
617 | int ret; |
618 | |
619 | sdio_claim_host(func: ar_sdio->func); |
620 | |
621 | if (atomic_read(v: &ar_sdio->irq_handling)) { |
622 | sdio_release_host(func: ar_sdio->func); |
623 | |
624 | ret = wait_event_interruptible(ar_sdio->irq_wq, |
625 | ath6kl_sdio_is_on_irq(ar)); |
626 | if (ret) |
627 | return; |
628 | |
629 | sdio_claim_host(func: ar_sdio->func); |
630 | } |
631 | |
632 | ret = sdio_release_irq(func: ar_sdio->func); |
633 | if (ret) |
634 | ath6kl_err(fmt: "Failed to release sdio irq: %d\n" , ret); |
635 | |
636 | sdio_release_host(func: ar_sdio->func); |
637 | } |
638 | |
639 | static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar) |
640 | { |
641 | struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); |
642 | struct hif_scatter_req *node = NULL; |
643 | |
644 | spin_lock_bh(lock: &ar_sdio->scat_lock); |
645 | |
646 | if (!list_empty(head: &ar_sdio->scat_req)) { |
647 | node = list_first_entry(&ar_sdio->scat_req, |
648 | struct hif_scatter_req, list); |
649 | list_del(entry: &node->list); |
650 | |
651 | node->scat_q_depth = get_queue_depth(queue: &ar_sdio->scat_req); |
652 | } |
653 | |
654 | spin_unlock_bh(lock: &ar_sdio->scat_lock); |
655 | |
656 | return node; |
657 | } |
658 | |
659 | static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar, |
660 | struct hif_scatter_req *s_req) |
661 | { |
662 | struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); |
663 | |
664 | spin_lock_bh(lock: &ar_sdio->scat_lock); |
665 | |
666 | list_add_tail(new: &s_req->list, head: &ar_sdio->scat_req); |
667 | |
668 | spin_unlock_bh(lock: &ar_sdio->scat_lock); |
669 | } |
670 | |
671 | /* scatter gather read write request */ |
672 | static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar, |
673 | struct hif_scatter_req *scat_req) |
674 | { |
675 | struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); |
676 | u32 request = scat_req->req; |
677 | int status = 0; |
678 | |
679 | if (!scat_req->len) |
680 | return -EINVAL; |
681 | |
682 | ath6kl_dbg(mask: ATH6KL_DBG_SCATTER, |
683 | fmt: "hif-scatter: total len: %d scatter entries: %d\n" , |
684 | scat_req->len, scat_req->scat_entries); |
685 | |
686 | if (request & HIF_SYNCHRONOUS) { |
687 | status = ath6kl_sdio_scat_rw(ar_sdio, req: scat_req->busrequest); |
688 | } else { |
689 | spin_lock_bh(lock: &ar_sdio->wr_async_lock); |
690 | list_add_tail(new: &scat_req->busrequest->list, head: &ar_sdio->wr_asyncq); |
691 | spin_unlock_bh(lock: &ar_sdio->wr_async_lock); |
692 | queue_work(wq: ar->ath6kl_wq, work: &ar_sdio->wr_async_work); |
693 | } |
694 | |
695 | return status; |
696 | } |
697 | |
698 | /* clean up scatter support */ |
699 | static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar) |
700 | { |
701 | struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); |
702 | struct hif_scatter_req *s_req, *tmp_req; |
703 | |
704 | /* empty the free list */ |
705 | spin_lock_bh(lock: &ar_sdio->scat_lock); |
706 | list_for_each_entry_safe(s_req, tmp_req, &ar_sdio->scat_req, list) { |
707 | list_del(entry: &s_req->list); |
708 | spin_unlock_bh(lock: &ar_sdio->scat_lock); |
709 | |
710 | /* |
711 | * FIXME: should we also call completion handler with |
712 | * ath6kl_hif_rw_comp_handler() with status -ECANCELED so |
713 | * that the packet is properly freed? |
714 | */ |
715 | if (s_req->busrequest) { |
716 | s_req->busrequest->scat_req = NULL; |
717 | ath6kl_sdio_free_bus_req(ar_sdio, bus_req: s_req->busrequest); |
718 | } |
719 | kfree(objp: s_req->virt_dma_buf); |
720 | kfree(objp: s_req->sgentries); |
721 | kfree(objp: s_req); |
722 | |
723 | spin_lock_bh(lock: &ar_sdio->scat_lock); |
724 | } |
725 | spin_unlock_bh(lock: &ar_sdio->scat_lock); |
726 | |
727 | ar_sdio->scatter_enabled = false; |
728 | } |
729 | |
730 | /* setup of HIF scatter resources */ |
731 | static int ath6kl_sdio_enable_scatter(struct ath6kl *ar) |
732 | { |
733 | struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); |
734 | struct htc_target *target = ar->htc_target; |
735 | int ret = 0; |
736 | bool virt_scat = false; |
737 | |
738 | if (ar_sdio->scatter_enabled) |
739 | return 0; |
740 | |
741 | ar_sdio->scatter_enabled = true; |
742 | |
743 | /* check if host supports scatter and it meets our requirements */ |
744 | if (ar_sdio->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) { |
745 | ath6kl_err(fmt: "host only supports scatter of :%d entries, need: %d\n" , |
746 | ar_sdio->func->card->host->max_segs, |
747 | MAX_SCATTER_ENTRIES_PER_REQ); |
748 | virt_scat = true; |
749 | } |
750 | |
751 | if (!virt_scat) { |
752 | ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio, |
753 | MAX_SCATTER_ENTRIES_PER_REQ, |
754 | MAX_SCATTER_REQUESTS, virt_scat); |
755 | |
756 | if (!ret) { |
757 | ath6kl_dbg(mask: ATH6KL_DBG_BOOT, |
758 | fmt: "hif-scatter enabled requests %d entries %d\n" , |
759 | MAX_SCATTER_REQUESTS, |
760 | MAX_SCATTER_ENTRIES_PER_REQ); |
761 | |
762 | target->max_scat_entries = MAX_SCATTER_ENTRIES_PER_REQ; |
763 | target->max_xfer_szper_scatreq = |
764 | MAX_SCATTER_REQ_TRANSFER_SIZE; |
765 | } else { |
766 | ath6kl_sdio_cleanup_scatter(ar); |
767 | ath6kl_warn(fmt: "hif scatter resource setup failed, trying virtual scatter method\n" ); |
768 | } |
769 | } |
770 | |
771 | if (virt_scat || ret) { |
772 | ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio, |
773 | ATH6KL_SCATTER_ENTRIES_PER_REQ, |
774 | ATH6KL_SCATTER_REQS, virt_scat); |
775 | |
776 | if (ret) { |
777 | ath6kl_err(fmt: "failed to alloc virtual scatter resources !\n" ); |
778 | ath6kl_sdio_cleanup_scatter(ar); |
779 | return ret; |
780 | } |
781 | |
782 | ath6kl_dbg(mask: ATH6KL_DBG_BOOT, |
783 | fmt: "virtual scatter enabled requests %d entries %d\n" , |
784 | ATH6KL_SCATTER_REQS, ATH6KL_SCATTER_ENTRIES_PER_REQ); |
785 | |
786 | target->max_scat_entries = ATH6KL_SCATTER_ENTRIES_PER_REQ; |
787 | target->max_xfer_szper_scatreq = |
788 | ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER; |
789 | } |
790 | |
791 | return 0; |
792 | } |
793 | |
794 | static int ath6kl_sdio_config(struct ath6kl *ar) |
795 | { |
796 | struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); |
797 | struct sdio_func *func = ar_sdio->func; |
798 | int ret; |
799 | |
800 | sdio_claim_host(func); |
801 | |
802 | if (ar_sdio->id->device >= SDIO_DEVICE_ID_ATHEROS_AR6003_00) { |
803 | /* enable 4-bit ASYNC interrupt on AR6003 or later */ |
804 | ret = ath6kl_sdio_func0_cmd52_wr_byte(card: func->card, |
805 | CCCR_SDIO_IRQ_MODE_REG, |
806 | SDIO_IRQ_MODE_ASYNC_4BIT_IRQ); |
807 | if (ret) { |
808 | ath6kl_err(fmt: "Failed to enable 4-bit async irq mode %d\n" , |
809 | ret); |
810 | goto out; |
811 | } |
812 | |
813 | ath6kl_dbg(mask: ATH6KL_DBG_BOOT, fmt: "4-bit async irq mode enabled\n" ); |
814 | } |
815 | |
816 | /* give us some time to enable, in ms */ |
817 | func->enable_timeout = 100; |
818 | |
819 | ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE); |
820 | if (ret) { |
821 | ath6kl_err(fmt: "Set sdio block size %d failed: %d)\n" , |
822 | HIF_MBOX_BLOCK_SIZE, ret); |
823 | goto out; |
824 | } |
825 | |
826 | out: |
827 | sdio_release_host(func); |
828 | |
829 | return ret; |
830 | } |
831 | |
832 | static int ath6kl_set_sdio_pm_caps(struct ath6kl *ar) |
833 | { |
834 | struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); |
835 | struct sdio_func *func = ar_sdio->func; |
836 | mmc_pm_flag_t flags; |
837 | int ret; |
838 | |
839 | flags = sdio_get_host_pm_caps(func); |
840 | |
841 | ath6kl_dbg(mask: ATH6KL_DBG_SUSPEND, fmt: "sdio suspend pm_caps 0x%x\n" , flags); |
842 | |
843 | if (!(flags & MMC_PM_WAKE_SDIO_IRQ) || |
844 | !(flags & MMC_PM_KEEP_POWER)) |
845 | return -EINVAL; |
846 | |
847 | ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); |
848 | if (ret) { |
849 | ath6kl_err(fmt: "set sdio keep pwr flag failed: %d\n" , ret); |
850 | return ret; |
851 | } |
852 | |
853 | /* sdio irq wakes up host */ |
854 | ret = sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ); |
855 | if (ret) |
856 | ath6kl_err(fmt: "set sdio wake irq flag failed: %d\n" , ret); |
857 | |
858 | return ret; |
859 | } |
860 | |
861 | static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow) |
862 | { |
863 | struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); |
864 | struct sdio_func *func = ar_sdio->func; |
865 | mmc_pm_flag_t flags; |
866 | bool try_deepsleep = false; |
867 | int ret; |
868 | |
869 | if (ar->suspend_mode == WLAN_POWER_STATE_WOW || |
870 | (!ar->suspend_mode && wow)) { |
871 | ret = ath6kl_set_sdio_pm_caps(ar); |
872 | if (ret) |
873 | goto cut_pwr; |
874 | |
875 | ret = ath6kl_cfg80211_suspend(ar, mode: ATH6KL_CFG_SUSPEND_WOW, wow); |
876 | if (ret && ret != -ENOTCONN) |
877 | ath6kl_err(fmt: "wow suspend failed: %d\n" , ret); |
878 | |
879 | if (ret && |
880 | (!ar->wow_suspend_mode || |
881 | ar->wow_suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP)) |
882 | try_deepsleep = true; |
883 | else if (ret && |
884 | ar->wow_suspend_mode == WLAN_POWER_STATE_CUT_PWR) |
885 | goto cut_pwr; |
886 | if (!ret) |
887 | return 0; |
888 | } |
889 | |
890 | if (ar->suspend_mode == WLAN_POWER_STATE_DEEP_SLEEP || |
891 | !ar->suspend_mode || try_deepsleep) { |
892 | flags = sdio_get_host_pm_caps(func); |
893 | if (!(flags & MMC_PM_KEEP_POWER)) |
894 | goto cut_pwr; |
895 | |
896 | ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); |
897 | if (ret) |
898 | goto cut_pwr; |
899 | |
900 | /* |
901 | * Workaround to support Deep Sleep with MSM, set the host pm |
902 | * flag as MMC_PM_WAKE_SDIO_IRQ to allow SDCC deiver to disable |
903 | * the sdc2_clock and internally allows MSM to enter |
904 | * TCXO shutdown properly. |
905 | */ |
906 | if ((flags & MMC_PM_WAKE_SDIO_IRQ)) { |
907 | ret = sdio_set_host_pm_flags(func, |
908 | MMC_PM_WAKE_SDIO_IRQ); |
909 | if (ret) |
910 | goto cut_pwr; |
911 | } |
912 | |
913 | ret = ath6kl_cfg80211_suspend(ar, mode: ATH6KL_CFG_SUSPEND_DEEPSLEEP, |
914 | NULL); |
915 | if (ret) |
916 | goto cut_pwr; |
917 | |
918 | return 0; |
919 | } |
920 | |
921 | cut_pwr: |
922 | if (func->card && func->card->host) |
923 | func->card->host->pm_flags &= ~MMC_PM_KEEP_POWER; |
924 | |
925 | return ath6kl_cfg80211_suspend(ar, mode: ATH6KL_CFG_SUSPEND_CUTPOWER, NULL); |
926 | } |
927 | |
928 | static int ath6kl_sdio_resume(struct ath6kl *ar) |
929 | { |
930 | switch (ar->state) { |
931 | case ATH6KL_STATE_OFF: |
932 | case ATH6KL_STATE_CUTPOWER: |
933 | ath6kl_dbg(mask: ATH6KL_DBG_SUSPEND, |
934 | fmt: "sdio resume configuring sdio\n" ); |
935 | |
936 | /* need to set sdio settings after power is cut from sdio */ |
937 | ath6kl_sdio_config(ar); |
938 | break; |
939 | |
940 | case ATH6KL_STATE_ON: |
941 | break; |
942 | |
943 | case ATH6KL_STATE_DEEPSLEEP: |
944 | break; |
945 | |
946 | case ATH6KL_STATE_WOW: |
947 | break; |
948 | |
949 | case ATH6KL_STATE_SUSPENDING: |
950 | break; |
951 | |
952 | case ATH6KL_STATE_RESUMING: |
953 | break; |
954 | |
955 | case ATH6KL_STATE_RECOVERY: |
956 | break; |
957 | } |
958 | |
959 | ath6kl_cfg80211_resume(ar); |
960 | |
961 | return 0; |
962 | } |
963 | |
964 | /* set the window address register (using 4-byte register access ). */ |
965 | static int ath6kl_set_addrwin_reg(struct ath6kl *ar, u32 reg_addr, u32 addr) |
966 | { |
967 | int status; |
968 | u8 addr_val[4]; |
969 | s32 i; |
970 | |
971 | /* |
972 | * Write bytes 1,2,3 of the register to set the upper address bytes, |
973 | * the LSB is written last to initiate the access cycle |
974 | */ |
975 | |
976 | for (i = 1; i <= 3; i++) { |
977 | /* |
978 | * Fill the buffer with the address byte value we want to |
979 | * hit 4 times. |
980 | */ |
981 | memset(addr_val, ((u8 *)&addr)[i], 4); |
982 | |
983 | /* |
984 | * Hit each byte of the register address with a 4-byte |
985 | * write operation to the same address, this is a harmless |
986 | * operation. |
987 | */ |
988 | status = ath6kl_sdio_read_write_sync(ar, addr: reg_addr + i, buf: addr_val, |
989 | len: 4, HIF_WR_SYNC_BYTE_FIX); |
990 | if (status) |
991 | break; |
992 | } |
993 | |
994 | if (status) { |
995 | ath6kl_err(fmt: "%s: failed to write initial bytes of 0x%x to window reg: 0x%X\n" , |
996 | __func__, addr, reg_addr); |
997 | return status; |
998 | } |
999 | |
1000 | /* |
1001 | * Write the address register again, this time write the whole |
1002 | * 4-byte value. The effect here is that the LSB write causes the |
1003 | * cycle to start, the extra 3 byte write to bytes 1,2,3 has no |
1004 | * effect since we are writing the same values again |
1005 | */ |
1006 | status = ath6kl_sdio_read_write_sync(ar, addr: reg_addr, buf: (u8 *)(&addr), |
1007 | len: 4, HIF_WR_SYNC_BYTE_INC); |
1008 | |
1009 | if (status) { |
1010 | ath6kl_err(fmt: "%s: failed to write 0x%x to window reg: 0x%X\n" , |
1011 | __func__, addr, reg_addr); |
1012 | return status; |
1013 | } |
1014 | |
1015 | return 0; |
1016 | } |
1017 | |
1018 | static int ath6kl_sdio_diag_read32(struct ath6kl *ar, u32 address, u32 *data) |
1019 | { |
1020 | int status; |
1021 | |
1022 | /* set window register to start read cycle */ |
1023 | status = ath6kl_set_addrwin_reg(ar, WINDOW_READ_ADDR_ADDRESS, |
1024 | addr: address); |
1025 | |
1026 | if (status) |
1027 | return status; |
1028 | |
1029 | /* read the data */ |
1030 | status = ath6kl_sdio_read_write_sync(ar, WINDOW_DATA_ADDRESS, |
1031 | buf: (u8 *)data, len: sizeof(u32), HIF_RD_SYNC_BYTE_INC); |
1032 | if (status) { |
1033 | ath6kl_err(fmt: "%s: failed to read from window data addr\n" , |
1034 | __func__); |
1035 | return status; |
1036 | } |
1037 | |
1038 | return status; |
1039 | } |
1040 | |
1041 | static int ath6kl_sdio_diag_write32(struct ath6kl *ar, u32 address, |
1042 | __le32 data) |
1043 | { |
1044 | int status; |
1045 | u32 val = (__force u32) data; |
1046 | |
1047 | /* set write data */ |
1048 | status = ath6kl_sdio_read_write_sync(ar, WINDOW_DATA_ADDRESS, |
1049 | buf: (u8 *) &val, len: sizeof(u32), HIF_WR_SYNC_BYTE_INC); |
1050 | if (status) { |
1051 | ath6kl_err(fmt: "%s: failed to write 0x%x to window data addr\n" , |
1052 | __func__, data); |
1053 | return status; |
1054 | } |
1055 | |
1056 | /* set window register, which starts the write cycle */ |
1057 | return ath6kl_set_addrwin_reg(ar, WINDOW_WRITE_ADDR_ADDRESS, |
1058 | addr: address); |
1059 | } |
1060 | |
1061 | static int ath6kl_sdio_bmi_credits(struct ath6kl *ar) |
1062 | { |
1063 | u32 addr; |
1064 | unsigned long timeout; |
1065 | int ret; |
1066 | |
1067 | ar->bmi.cmd_credits = 0; |
1068 | |
1069 | /* Read the counter register to get the command credits */ |
1070 | addr = COUNT_DEC_ADDRESS + (HTC_MAILBOX_NUM_MAX + ENDPOINT1) * 4; |
1071 | |
1072 | timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT); |
1073 | while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) { |
1074 | /* |
1075 | * Hit the credit counter with a 4-byte access, the first byte |
1076 | * read will hit the counter and cause a decrement, while the |
1077 | * remaining 3 bytes has no effect. The rationale behind this |
1078 | * is to make all HIF accesses 4-byte aligned. |
1079 | */ |
1080 | ret = ath6kl_sdio_read_write_sync(ar, addr, |
1081 | buf: (u8 *)&ar->bmi.cmd_credits, len: 4, |
1082 | HIF_RD_SYNC_BYTE_INC); |
1083 | if (ret) { |
1084 | ath6kl_err(fmt: "Unable to decrement the command credit count register: %d\n" , |
1085 | ret); |
1086 | return ret; |
1087 | } |
1088 | |
1089 | /* The counter is only 8 bits. |
1090 | * Ignore anything in the upper 3 bytes |
1091 | */ |
1092 | ar->bmi.cmd_credits &= 0xFF; |
1093 | } |
1094 | |
1095 | if (!ar->bmi.cmd_credits) { |
1096 | ath6kl_err(fmt: "bmi communication timeout\n" ); |
1097 | return -ETIMEDOUT; |
1098 | } |
1099 | |
1100 | return 0; |
1101 | } |
1102 | |
1103 | static int ath6kl_bmi_get_rx_lkahd(struct ath6kl *ar) |
1104 | { |
1105 | unsigned long timeout; |
1106 | u32 rx_word = 0; |
1107 | int ret = 0; |
1108 | |
1109 | timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT); |
1110 | while ((time_before(jiffies, timeout)) && !rx_word) { |
1111 | ret = ath6kl_sdio_read_write_sync(ar, |
1112 | RX_LOOKAHEAD_VALID_ADDRESS, |
1113 | buf: (u8 *)&rx_word, len: sizeof(rx_word), |
1114 | HIF_RD_SYNC_BYTE_INC); |
1115 | if (ret) { |
1116 | ath6kl_err(fmt: "unable to read RX_LOOKAHEAD_VALID\n" ); |
1117 | return ret; |
1118 | } |
1119 | |
1120 | /* all we really want is one bit */ |
1121 | rx_word &= (1 << ENDPOINT1); |
1122 | } |
1123 | |
1124 | if (!rx_word) { |
1125 | ath6kl_err(fmt: "bmi_recv_buf FIFO empty\n" ); |
1126 | return -EINVAL; |
1127 | } |
1128 | |
1129 | return ret; |
1130 | } |
1131 | |
1132 | static int ath6kl_sdio_bmi_write(struct ath6kl *ar, u8 *buf, u32 len) |
1133 | { |
1134 | int ret; |
1135 | u32 addr; |
1136 | |
1137 | ret = ath6kl_sdio_bmi_credits(ar); |
1138 | if (ret) |
1139 | return ret; |
1140 | |
1141 | addr = ar->mbox_info.htc_addr; |
1142 | |
1143 | ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len, |
1144 | HIF_WR_SYNC_BYTE_INC); |
1145 | if (ret) { |
1146 | ath6kl_err(fmt: "unable to send the bmi data to the device\n" ); |
1147 | return ret; |
1148 | } |
1149 | |
1150 | return 0; |
1151 | } |
1152 | |
1153 | static int ath6kl_sdio_bmi_read(struct ath6kl *ar, u8 *buf, u32 len) |
1154 | { |
1155 | int ret; |
1156 | u32 addr; |
1157 | |
1158 | /* |
1159 | * During normal bootup, small reads may be required. |
1160 | * Rather than issue an HIF Read and then wait as the Target |
1161 | * adds successive bytes to the FIFO, we wait here until |
1162 | * we know that response data is available. |
1163 | * |
1164 | * This allows us to cleanly timeout on an unexpected |
1165 | * Target failure rather than risk problems at the HIF level. |
1166 | * In particular, this avoids SDIO timeouts and possibly garbage |
1167 | * data on some host controllers. And on an interconnect |
1168 | * such as Compact Flash (as well as some SDIO masters) which |
1169 | * does not provide any indication on data timeout, it avoids |
1170 | * a potential hang or garbage response. |
1171 | * |
1172 | * Synchronization is more difficult for reads larger than the |
1173 | * size of the MBOX FIFO (128B), because the Target is unable |
1174 | * to push the 129th byte of data until AFTER the Host posts an |
1175 | * HIF Read and removes some FIFO data. So for large reads the |
1176 | * Host proceeds to post an HIF Read BEFORE all the data is |
1177 | * actually available to read. Fortunately, large BMI reads do |
1178 | * not occur in practice -- they're supported for debug/development. |
1179 | * |
1180 | * So Host/Target BMI synchronization is divided into these cases: |
1181 | * CASE 1: length < 4 |
1182 | * Should not happen |
1183 | * |
1184 | * CASE 2: 4 <= length <= 128 |
1185 | * Wait for first 4 bytes to be in FIFO |
1186 | * If CONSERVATIVE_BMI_READ is enabled, also wait for |
1187 | * a BMI command credit, which indicates that the ENTIRE |
1188 | * response is available in the FIFO |
1189 | * |
1190 | * CASE 3: length > 128 |
1191 | * Wait for the first 4 bytes to be in FIFO |
1192 | * |
1193 | * For most uses, a small timeout should be sufficient and we will |
1194 | * usually see a response quickly; but there may be some unusual |
1195 | * (debug) cases of BMI_EXECUTE where we want an larger timeout. |
1196 | * For now, we use an unbounded busy loop while waiting for |
1197 | * BMI_EXECUTE. |
1198 | * |
1199 | * If BMI_EXECUTE ever needs to support longer-latency execution, |
1200 | * especially in production, this code needs to be enhanced to sleep |
1201 | * and yield. Also note that BMI_COMMUNICATION_TIMEOUT is currently |
1202 | * a function of Host processor speed. |
1203 | */ |
1204 | if (len >= 4) { /* NB: Currently, always true */ |
1205 | ret = ath6kl_bmi_get_rx_lkahd(ar); |
1206 | if (ret) |
1207 | return ret; |
1208 | } |
1209 | |
1210 | addr = ar->mbox_info.htc_addr; |
1211 | ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len, |
1212 | HIF_RD_SYNC_BYTE_INC); |
1213 | if (ret) { |
1214 | ath6kl_err(fmt: "Unable to read the bmi data from the device: %d\n" , |
1215 | ret); |
1216 | return ret; |
1217 | } |
1218 | |
1219 | return 0; |
1220 | } |
1221 | |
1222 | static void ath6kl_sdio_stop(struct ath6kl *ar) |
1223 | { |
1224 | struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); |
1225 | struct bus_request *req, *tmp_req; |
1226 | void *context; |
1227 | |
1228 | /* FIXME: make sure that wq is not queued again */ |
1229 | |
1230 | cancel_work_sync(work: &ar_sdio->wr_async_work); |
1231 | |
1232 | spin_lock_bh(lock: &ar_sdio->wr_async_lock); |
1233 | |
1234 | list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) { |
1235 | list_del(entry: &req->list); |
1236 | |
1237 | if (req->scat_req) { |
1238 | /* this is a scatter gather request */ |
1239 | req->scat_req->status = -ECANCELED; |
1240 | req->scat_req->complete(ar_sdio->ar->htc_target, |
1241 | req->scat_req); |
1242 | } else { |
1243 | context = req->packet; |
1244 | ath6kl_sdio_free_bus_req(ar_sdio, bus_req: req); |
1245 | ath6kl_hif_rw_comp_handler(context, status: -ECANCELED); |
1246 | } |
1247 | } |
1248 | |
1249 | spin_unlock_bh(lock: &ar_sdio->wr_async_lock); |
1250 | |
1251 | WARN_ON(get_queue_depth(&ar_sdio->scat_req) != 4); |
1252 | } |
1253 | |
1254 | static const struct ath6kl_hif_ops ath6kl_sdio_ops = { |
1255 | .read_write_sync = ath6kl_sdio_read_write_sync, |
1256 | .write_async = ath6kl_sdio_write_async, |
1257 | .irq_enable = ath6kl_sdio_irq_enable, |
1258 | .irq_disable = ath6kl_sdio_irq_disable, |
1259 | .scatter_req_get = ath6kl_sdio_scatter_req_get, |
1260 | .scatter_req_add = ath6kl_sdio_scatter_req_add, |
1261 | .enable_scatter = ath6kl_sdio_enable_scatter, |
1262 | .scat_req_rw = ath6kl_sdio_async_rw_scatter, |
1263 | .cleanup_scatter = ath6kl_sdio_cleanup_scatter, |
1264 | .suspend = ath6kl_sdio_suspend, |
1265 | .resume = ath6kl_sdio_resume, |
1266 | .diag_read32 = ath6kl_sdio_diag_read32, |
1267 | .diag_write32 = ath6kl_sdio_diag_write32, |
1268 | .bmi_read = ath6kl_sdio_bmi_read, |
1269 | .bmi_write = ath6kl_sdio_bmi_write, |
1270 | .power_on = ath6kl_sdio_power_on, |
1271 | .power_off = ath6kl_sdio_power_off, |
1272 | .stop = ath6kl_sdio_stop, |
1273 | }; |
1274 | |
1275 | #ifdef CONFIG_PM_SLEEP |
1276 | |
1277 | /* |
1278 | * Empty handlers so that mmc subsystem doesn't remove us entirely during |
1279 | * suspend. We instead follow cfg80211 suspend/resume handlers. |
1280 | */ |
1281 | static int ath6kl_sdio_pm_suspend(struct device *device) |
1282 | { |
1283 | ath6kl_dbg(mask: ATH6KL_DBG_SUSPEND, fmt: "sdio pm suspend\n" ); |
1284 | |
1285 | return 0; |
1286 | } |
1287 | |
1288 | static int ath6kl_sdio_pm_resume(struct device *device) |
1289 | { |
1290 | ath6kl_dbg(mask: ATH6KL_DBG_SUSPEND, fmt: "sdio pm resume\n" ); |
1291 | |
1292 | return 0; |
1293 | } |
1294 | |
1295 | static SIMPLE_DEV_PM_OPS(ath6kl_sdio_pm_ops, ath6kl_sdio_pm_suspend, |
1296 | ath6kl_sdio_pm_resume); |
1297 | |
1298 | #define ATH6KL_SDIO_PM_OPS (&ath6kl_sdio_pm_ops) |
1299 | |
1300 | #else |
1301 | |
1302 | #define ATH6KL_SDIO_PM_OPS NULL |
1303 | |
1304 | #endif /* CONFIG_PM_SLEEP */ |
1305 | |
1306 | static int ath6kl_sdio_probe(struct sdio_func *func, |
1307 | const struct sdio_device_id *id) |
1308 | { |
1309 | int ret; |
1310 | struct ath6kl_sdio *ar_sdio; |
1311 | struct ath6kl *ar; |
1312 | int count; |
1313 | |
1314 | ath6kl_dbg(mask: ATH6KL_DBG_BOOT, |
1315 | fmt: "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n" , |
1316 | func->num, func->vendor, func->device, |
1317 | func->max_blksize, func->cur_blksize); |
1318 | |
1319 | ar_sdio = kzalloc(size: sizeof(struct ath6kl_sdio), GFP_KERNEL); |
1320 | if (!ar_sdio) |
1321 | return -ENOMEM; |
1322 | |
1323 | ar_sdio->dma_buffer = kzalloc(HIF_DMA_BUFFER_SIZE, GFP_KERNEL); |
1324 | if (!ar_sdio->dma_buffer) { |
1325 | ret = -ENOMEM; |
1326 | goto err_hif; |
1327 | } |
1328 | |
1329 | ar_sdio->func = func; |
1330 | sdio_set_drvdata(func, ar_sdio); |
1331 | |
1332 | ar_sdio->id = id; |
1333 | ar_sdio->is_disabled = true; |
1334 | |
1335 | spin_lock_init(&ar_sdio->lock); |
1336 | spin_lock_init(&ar_sdio->scat_lock); |
1337 | spin_lock_init(&ar_sdio->wr_async_lock); |
1338 | mutex_init(&ar_sdio->dma_buffer_mutex); |
1339 | |
1340 | INIT_LIST_HEAD(list: &ar_sdio->scat_req); |
1341 | INIT_LIST_HEAD(list: &ar_sdio->bus_req_freeq); |
1342 | INIT_LIST_HEAD(list: &ar_sdio->wr_asyncq); |
1343 | |
1344 | INIT_WORK(&ar_sdio->wr_async_work, ath6kl_sdio_write_async_work); |
1345 | |
1346 | init_waitqueue_head(&ar_sdio->irq_wq); |
1347 | |
1348 | for (count = 0; count < BUS_REQUEST_MAX_NUM; count++) |
1349 | ath6kl_sdio_free_bus_req(ar_sdio, bus_req: &ar_sdio->bus_req[count]); |
1350 | |
1351 | ar = ath6kl_core_create(dev: &ar_sdio->func->dev); |
1352 | if (!ar) { |
1353 | ath6kl_err(fmt: "Failed to alloc ath6kl core\n" ); |
1354 | ret = -ENOMEM; |
1355 | goto err_dma; |
1356 | } |
1357 | |
1358 | ar_sdio->ar = ar; |
1359 | ar->hif_type = ATH6KL_HIF_TYPE_SDIO; |
1360 | ar->hif_priv = ar_sdio; |
1361 | ar->hif_ops = &ath6kl_sdio_ops; |
1362 | ar->bmi.max_data_size = 256; |
1363 | |
1364 | ath6kl_sdio_set_mbox_info(ar); |
1365 | |
1366 | ret = ath6kl_sdio_config(ar); |
1367 | if (ret) { |
1368 | ath6kl_err(fmt: "Failed to config sdio: %d\n" , ret); |
1369 | goto err_core_alloc; |
1370 | } |
1371 | |
1372 | ret = ath6kl_core_init(ar, htc_type: ATH6KL_HTC_TYPE_MBOX); |
1373 | if (ret) { |
1374 | ath6kl_err(fmt: "Failed to init ath6kl core\n" ); |
1375 | goto err_core_alloc; |
1376 | } |
1377 | |
1378 | return ret; |
1379 | |
1380 | err_core_alloc: |
1381 | ath6kl_core_destroy(ar: ar_sdio->ar); |
1382 | err_dma: |
1383 | kfree(objp: ar_sdio->dma_buffer); |
1384 | err_hif: |
1385 | kfree(objp: ar_sdio); |
1386 | |
1387 | return ret; |
1388 | } |
1389 | |
1390 | static void ath6kl_sdio_remove(struct sdio_func *func) |
1391 | { |
1392 | struct ath6kl_sdio *ar_sdio; |
1393 | |
1394 | ath6kl_dbg(mask: ATH6KL_DBG_BOOT, |
1395 | fmt: "sdio removed func %d vendor 0x%x device 0x%x\n" , |
1396 | func->num, func->vendor, func->device); |
1397 | |
1398 | ar_sdio = sdio_get_drvdata(func); |
1399 | |
1400 | ath6kl_stop_txrx(ar: ar_sdio->ar); |
1401 | cancel_work_sync(work: &ar_sdio->wr_async_work); |
1402 | |
1403 | ath6kl_core_cleanup(ar: ar_sdio->ar); |
1404 | ath6kl_core_destroy(ar: ar_sdio->ar); |
1405 | |
1406 | kfree(objp: ar_sdio->dma_buffer); |
1407 | kfree(objp: ar_sdio); |
1408 | } |
1409 | |
1410 | static const struct sdio_device_id ath6kl_sdio_devices[] = { |
1411 | {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6003_00)}, |
1412 | {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6003_01)}, |
1413 | {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6004_00)}, |
1414 | {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6004_01)}, |
1415 | {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6004_02)}, |
1416 | {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6004_18)}, |
1417 | {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6004_19)}, |
1418 | {}, |
1419 | }; |
1420 | |
1421 | MODULE_DEVICE_TABLE(sdio, ath6kl_sdio_devices); |
1422 | |
1423 | static struct sdio_driver ath6kl_sdio_driver = { |
1424 | .name = "ath6kl_sdio" , |
1425 | .id_table = ath6kl_sdio_devices, |
1426 | .probe = ath6kl_sdio_probe, |
1427 | .remove = ath6kl_sdio_remove, |
1428 | .drv.pm = ATH6KL_SDIO_PM_OPS, |
1429 | }; |
1430 | |
1431 | static int __init ath6kl_sdio_init(void) |
1432 | { |
1433 | int ret; |
1434 | |
1435 | ret = sdio_register_driver(&ath6kl_sdio_driver); |
1436 | if (ret) |
1437 | ath6kl_err(fmt: "sdio driver registration failed: %d\n" , ret); |
1438 | |
1439 | return ret; |
1440 | } |
1441 | |
1442 | static void __exit ath6kl_sdio_exit(void) |
1443 | { |
1444 | sdio_unregister_driver(&ath6kl_sdio_driver); |
1445 | } |
1446 | |
1447 | module_init(ath6kl_sdio_init); |
1448 | module_exit(ath6kl_sdio_exit); |
1449 | |
1450 | MODULE_AUTHOR("Atheros Communications, Inc." ); |
1451 | MODULE_DESCRIPTION("Driver support for Atheros AR600x SDIO devices" ); |
1452 | MODULE_LICENSE("Dual BSD/GPL" ); |
1453 | |
1454 | MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR "/" AR6003_HW_2_0_OTP_FILE); |
1455 | MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR "/" AR6003_HW_2_0_FIRMWARE_FILE); |
1456 | MODULE_FIRMWARE(AR6003_HW_2_0_FW_DIR "/" AR6003_HW_2_0_PATCH_FILE); |
1457 | MODULE_FIRMWARE(AR6003_HW_2_0_BOARD_DATA_FILE); |
1458 | MODULE_FIRMWARE(AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE); |
1459 | MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR "/" AR6003_HW_2_1_1_OTP_FILE); |
1460 | MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR "/" AR6003_HW_2_1_1_FIRMWARE_FILE); |
1461 | MODULE_FIRMWARE(AR6003_HW_2_1_1_FW_DIR "/" AR6003_HW_2_1_1_PATCH_FILE); |
1462 | MODULE_FIRMWARE(AR6003_HW_2_1_1_BOARD_DATA_FILE); |
1463 | MODULE_FIRMWARE(AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE); |
1464 | MODULE_FIRMWARE(AR6004_HW_1_0_FW_DIR "/" AR6004_HW_1_0_FIRMWARE_FILE); |
1465 | MODULE_FIRMWARE(AR6004_HW_1_0_BOARD_DATA_FILE); |
1466 | MODULE_FIRMWARE(AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE); |
1467 | MODULE_FIRMWARE(AR6004_HW_1_1_FW_DIR "/" AR6004_HW_1_1_FIRMWARE_FILE); |
1468 | MODULE_FIRMWARE(AR6004_HW_1_1_BOARD_DATA_FILE); |
1469 | MODULE_FIRMWARE(AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE); |
1470 | MODULE_FIRMWARE(AR6004_HW_1_2_FW_DIR "/" AR6004_HW_1_2_FIRMWARE_FILE); |
1471 | MODULE_FIRMWARE(AR6004_HW_1_2_BOARD_DATA_FILE); |
1472 | MODULE_FIRMWARE(AR6004_HW_1_2_DEFAULT_BOARD_DATA_FILE); |
1473 | MODULE_FIRMWARE(AR6004_HW_1_3_FW_DIR "/" AR6004_HW_1_3_FIRMWARE_FILE); |
1474 | MODULE_FIRMWARE(AR6004_HW_1_3_BOARD_DATA_FILE); |
1475 | MODULE_FIRMWARE(AR6004_HW_1_3_DEFAULT_BOARD_DATA_FILE); |
1476 | |