1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (c) 2003-2022, Intel Corporation. All rights reserved. |
4 | * Intel Management Engine Interface (Intel MEI) Linux driver |
5 | */ |
6 | |
7 | #include <linux/sched/signal.h> |
8 | #include <linux/wait.h> |
9 | #include <linux/delay.h> |
10 | #include <linux/slab.h> |
11 | #include <linux/pm_runtime.h> |
12 | #include <linux/dma-mapping.h> |
13 | |
14 | #include <linux/mei.h> |
15 | |
16 | #include "mei_dev.h" |
17 | #include "hbm.h" |
18 | #include "client.h" |
19 | |
20 | /** |
21 | * mei_me_cl_init - initialize me client |
22 | * |
23 | * @me_cl: me client |
24 | */ |
25 | void mei_me_cl_init(struct mei_me_client *me_cl) |
26 | { |
27 | INIT_LIST_HEAD(list: &me_cl->list); |
28 | kref_init(kref: &me_cl->refcnt); |
29 | } |
30 | |
31 | /** |
32 | * mei_me_cl_get - increases me client refcount |
33 | * |
34 | * @me_cl: me client |
35 | * |
36 | * Locking: called under "dev->device_lock" lock |
37 | * |
38 | * Return: me client or NULL |
39 | */ |
40 | struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl) |
41 | { |
42 | if (me_cl && kref_get_unless_zero(kref: &me_cl->refcnt)) |
43 | return me_cl; |
44 | |
45 | return NULL; |
46 | } |
47 | |
48 | /** |
49 | * mei_me_cl_release - free me client |
50 | * |
51 | * @ref: me_client refcount |
52 | * |
53 | * Locking: called under "dev->device_lock" lock |
54 | */ |
55 | static void mei_me_cl_release(struct kref *ref) |
56 | { |
57 | struct mei_me_client *me_cl = |
58 | container_of(ref, struct mei_me_client, refcnt); |
59 | |
60 | kfree(objp: me_cl); |
61 | } |
62 | |
63 | /** |
64 | * mei_me_cl_put - decrease me client refcount and free client if necessary |
65 | * |
66 | * @me_cl: me client |
67 | * |
68 | * Locking: called under "dev->device_lock" lock |
69 | */ |
70 | void mei_me_cl_put(struct mei_me_client *me_cl) |
71 | { |
72 | if (me_cl) |
73 | kref_put(kref: &me_cl->refcnt, release: mei_me_cl_release); |
74 | } |
75 | |
76 | /** |
77 | * __mei_me_cl_del - delete me client from the list and decrease |
78 | * reference counter |
79 | * |
80 | * @dev: mei device |
81 | * @me_cl: me client |
82 | * |
83 | * Locking: dev->me_clients_rwsem |
84 | */ |
85 | static void __mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl) |
86 | { |
87 | if (!me_cl) |
88 | return; |
89 | |
90 | list_del_init(entry: &me_cl->list); |
91 | mei_me_cl_put(me_cl); |
92 | } |
93 | |
94 | /** |
95 | * mei_me_cl_del - delete me client from the list and decrease |
96 | * reference counter |
97 | * |
98 | * @dev: mei device |
99 | * @me_cl: me client |
100 | */ |
101 | void mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl) |
102 | { |
103 | down_write(sem: &dev->me_clients_rwsem); |
104 | __mei_me_cl_del(dev, me_cl); |
105 | up_write(sem: &dev->me_clients_rwsem); |
106 | } |
107 | |
108 | /** |
109 | * mei_me_cl_add - add me client to the list |
110 | * |
111 | * @dev: mei device |
112 | * @me_cl: me client |
113 | */ |
114 | void mei_me_cl_add(struct mei_device *dev, struct mei_me_client *me_cl) |
115 | { |
116 | down_write(sem: &dev->me_clients_rwsem); |
117 | list_add(new: &me_cl->list, head: &dev->me_clients); |
118 | up_write(sem: &dev->me_clients_rwsem); |
119 | } |
120 | |
121 | /** |
122 | * __mei_me_cl_by_uuid - locate me client by uuid |
123 | * increases ref count |
124 | * |
125 | * @dev: mei device |
126 | * @uuid: me client uuid |
127 | * |
128 | * Return: me client or NULL if not found |
129 | * |
130 | * Locking: dev->me_clients_rwsem |
131 | */ |
132 | static struct mei_me_client *__mei_me_cl_by_uuid(struct mei_device *dev, |
133 | const uuid_le *uuid) |
134 | { |
135 | struct mei_me_client *me_cl; |
136 | const uuid_le *pn; |
137 | |
138 | WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem)); |
139 | |
140 | list_for_each_entry(me_cl, &dev->me_clients, list) { |
141 | pn = &me_cl->props.protocol_name; |
142 | if (uuid_le_cmp(u1: *uuid, u2: *pn) == 0) |
143 | return mei_me_cl_get(me_cl); |
144 | } |
145 | |
146 | return NULL; |
147 | } |
148 | |
149 | /** |
150 | * mei_me_cl_by_uuid - locate me client by uuid |
151 | * increases ref count |
152 | * |
153 | * @dev: mei device |
154 | * @uuid: me client uuid |
155 | * |
156 | * Return: me client or NULL if not found |
157 | * |
158 | * Locking: dev->me_clients_rwsem |
159 | */ |
160 | struct mei_me_client *mei_me_cl_by_uuid(struct mei_device *dev, |
161 | const uuid_le *uuid) |
162 | { |
163 | struct mei_me_client *me_cl; |
164 | |
165 | down_read(sem: &dev->me_clients_rwsem); |
166 | me_cl = __mei_me_cl_by_uuid(dev, uuid); |
167 | up_read(sem: &dev->me_clients_rwsem); |
168 | |
169 | return me_cl; |
170 | } |
171 | |
172 | /** |
173 | * mei_me_cl_by_id - locate me client by client id |
174 | * increases ref count |
175 | * |
176 | * @dev: the device structure |
177 | * @client_id: me client id |
178 | * |
179 | * Return: me client or NULL if not found |
180 | * |
181 | * Locking: dev->me_clients_rwsem |
182 | */ |
183 | struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id) |
184 | { |
185 | |
186 | struct mei_me_client *__me_cl, *me_cl = NULL; |
187 | |
188 | down_read(sem: &dev->me_clients_rwsem); |
189 | list_for_each_entry(__me_cl, &dev->me_clients, list) { |
190 | if (__me_cl->client_id == client_id) { |
191 | me_cl = mei_me_cl_get(me_cl: __me_cl); |
192 | break; |
193 | } |
194 | } |
195 | up_read(sem: &dev->me_clients_rwsem); |
196 | |
197 | return me_cl; |
198 | } |
199 | |
200 | /** |
201 | * __mei_me_cl_by_uuid_id - locate me client by client id and uuid |
202 | * increases ref count |
203 | * |
204 | * @dev: the device structure |
205 | * @uuid: me client uuid |
206 | * @client_id: me client id |
207 | * |
208 | * Return: me client or null if not found |
209 | * |
210 | * Locking: dev->me_clients_rwsem |
211 | */ |
212 | static struct mei_me_client *__mei_me_cl_by_uuid_id(struct mei_device *dev, |
213 | const uuid_le *uuid, u8 client_id) |
214 | { |
215 | struct mei_me_client *me_cl; |
216 | const uuid_le *pn; |
217 | |
218 | WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem)); |
219 | |
220 | list_for_each_entry(me_cl, &dev->me_clients, list) { |
221 | pn = &me_cl->props.protocol_name; |
222 | if (uuid_le_cmp(u1: *uuid, u2: *pn) == 0 && |
223 | me_cl->client_id == client_id) |
224 | return mei_me_cl_get(me_cl); |
225 | } |
226 | |
227 | return NULL; |
228 | } |
229 | |
230 | |
231 | /** |
232 | * mei_me_cl_by_uuid_id - locate me client by client id and uuid |
233 | * increases ref count |
234 | * |
235 | * @dev: the device structure |
236 | * @uuid: me client uuid |
237 | * @client_id: me client id |
238 | * |
239 | * Return: me client or null if not found |
240 | */ |
241 | struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev, |
242 | const uuid_le *uuid, u8 client_id) |
243 | { |
244 | struct mei_me_client *me_cl; |
245 | |
246 | down_read(sem: &dev->me_clients_rwsem); |
247 | me_cl = __mei_me_cl_by_uuid_id(dev, uuid, client_id); |
248 | up_read(sem: &dev->me_clients_rwsem); |
249 | |
250 | return me_cl; |
251 | } |
252 | |
253 | /** |
254 | * mei_me_cl_rm_by_uuid - remove all me clients matching uuid |
255 | * |
256 | * @dev: the device structure |
257 | * @uuid: me client uuid |
258 | * |
259 | * Locking: called under "dev->device_lock" lock |
260 | */ |
261 | void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid) |
262 | { |
263 | struct mei_me_client *me_cl; |
264 | |
265 | dev_dbg(dev->dev, "remove %pUl\n" , uuid); |
266 | |
267 | down_write(sem: &dev->me_clients_rwsem); |
268 | me_cl = __mei_me_cl_by_uuid(dev, uuid); |
269 | __mei_me_cl_del(dev, me_cl); |
270 | mei_me_cl_put(me_cl); |
271 | up_write(sem: &dev->me_clients_rwsem); |
272 | } |
273 | |
274 | /** |
275 | * mei_me_cl_rm_by_uuid_id - remove all me clients matching client id |
276 | * |
277 | * @dev: the device structure |
278 | * @uuid: me client uuid |
279 | * @id: me client id |
280 | * |
281 | * Locking: called under "dev->device_lock" lock |
282 | */ |
283 | void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id) |
284 | { |
285 | struct mei_me_client *me_cl; |
286 | |
287 | dev_dbg(dev->dev, "remove %pUl %d\n" , uuid, id); |
288 | |
289 | down_write(sem: &dev->me_clients_rwsem); |
290 | me_cl = __mei_me_cl_by_uuid_id(dev, uuid, client_id: id); |
291 | __mei_me_cl_del(dev, me_cl); |
292 | mei_me_cl_put(me_cl); |
293 | up_write(sem: &dev->me_clients_rwsem); |
294 | } |
295 | |
296 | /** |
297 | * mei_me_cl_rm_all - remove all me clients |
298 | * |
299 | * @dev: the device structure |
300 | * |
301 | * Locking: called under "dev->device_lock" lock |
302 | */ |
303 | void mei_me_cl_rm_all(struct mei_device *dev) |
304 | { |
305 | struct mei_me_client *me_cl, *next; |
306 | |
307 | down_write(sem: &dev->me_clients_rwsem); |
308 | list_for_each_entry_safe(me_cl, next, &dev->me_clients, list) |
309 | __mei_me_cl_del(dev, me_cl); |
310 | up_write(sem: &dev->me_clients_rwsem); |
311 | } |
312 | |
313 | /** |
314 | * mei_io_cb_free - free mei_cb_private related memory |
315 | * |
316 | * @cb: mei callback struct |
317 | */ |
318 | void mei_io_cb_free(struct mei_cl_cb *cb) |
319 | { |
320 | if (cb == NULL) |
321 | return; |
322 | |
323 | list_del(entry: &cb->list); |
324 | kfree(objp: cb->buf.data); |
325 | kfree(objp: cb->ext_hdr); |
326 | kfree(objp: cb); |
327 | } |
328 | |
329 | /** |
330 | * mei_tx_cb_enqueue - queue tx callback |
331 | * |
332 | * @cb: mei callback struct |
333 | * @head: an instance of list to queue on |
334 | * |
335 | * Locking: called under "dev->device_lock" lock |
336 | */ |
337 | static inline void mei_tx_cb_enqueue(struct mei_cl_cb *cb, |
338 | struct list_head *head) |
339 | { |
340 | list_add_tail(new: &cb->list, head); |
341 | cb->cl->tx_cb_queued++; |
342 | } |
343 | |
344 | /** |
345 | * mei_tx_cb_dequeue - dequeue tx callback |
346 | * |
347 | * @cb: mei callback struct to dequeue and free |
348 | * |
349 | * Locking: called under "dev->device_lock" lock |
350 | */ |
351 | static inline void mei_tx_cb_dequeue(struct mei_cl_cb *cb) |
352 | { |
353 | if (!WARN_ON(cb->cl->tx_cb_queued == 0)) |
354 | cb->cl->tx_cb_queued--; |
355 | |
356 | mei_io_cb_free(cb); |
357 | } |
358 | |
359 | /** |
360 | * mei_cl_set_read_by_fp - set pending_read flag to vtag struct for given fp |
361 | * |
362 | * @cl: mei client |
363 | * @fp: pointer to file structure |
364 | * |
365 | * Locking: called under "dev->device_lock" lock |
366 | */ |
367 | static void mei_cl_set_read_by_fp(const struct mei_cl *cl, |
368 | const struct file *fp) |
369 | { |
370 | struct mei_cl_vtag *cl_vtag; |
371 | |
372 | list_for_each_entry(cl_vtag, &cl->vtag_map, list) { |
373 | if (cl_vtag->fp == fp) { |
374 | cl_vtag->pending_read = true; |
375 | return; |
376 | } |
377 | } |
378 | } |
379 | |
380 | /** |
381 | * mei_io_cb_init - allocate and initialize io callback |
382 | * |
383 | * @cl: mei client |
384 | * @type: operation type |
385 | * @fp: pointer to file structure |
386 | * |
387 | * Return: mei_cl_cb pointer or NULL; |
388 | */ |
389 | static struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, |
390 | enum mei_cb_file_ops type, |
391 | const struct file *fp) |
392 | { |
393 | struct mei_cl_cb *cb; |
394 | |
395 | cb = kzalloc(size: sizeof(*cb), GFP_KERNEL); |
396 | if (!cb) |
397 | return NULL; |
398 | |
399 | INIT_LIST_HEAD(list: &cb->list); |
400 | cb->fp = fp; |
401 | cb->cl = cl; |
402 | cb->buf_idx = 0; |
403 | cb->fop_type = type; |
404 | cb->vtag = 0; |
405 | cb->ext_hdr = NULL; |
406 | |
407 | return cb; |
408 | } |
409 | |
410 | /** |
411 | * mei_io_list_flush_cl - removes cbs belonging to the cl. |
412 | * |
413 | * @head: an instance of our list structure |
414 | * @cl: host client |
415 | */ |
416 | static void mei_io_list_flush_cl(struct list_head *head, |
417 | const struct mei_cl *cl) |
418 | { |
419 | struct mei_cl_cb *cb, *next; |
420 | |
421 | list_for_each_entry_safe(cb, next, head, list) { |
422 | if (cl == cb->cl) { |
423 | list_del_init(entry: &cb->list); |
424 | if (cb->fop_type == MEI_FOP_READ) |
425 | mei_io_cb_free(cb); |
426 | } |
427 | } |
428 | } |
429 | |
430 | /** |
431 | * mei_io_tx_list_free_cl - removes cb belonging to the cl and free them |
432 | * |
433 | * @head: An instance of our list structure |
434 | * @cl: host client |
435 | * @fp: file pointer (matching cb file object), may be NULL |
436 | */ |
437 | static void mei_io_tx_list_free_cl(struct list_head *head, |
438 | const struct mei_cl *cl, |
439 | const struct file *fp) |
440 | { |
441 | struct mei_cl_cb *cb, *next; |
442 | |
443 | list_for_each_entry_safe(cb, next, head, list) { |
444 | if (cl == cb->cl && (!fp || fp == cb->fp)) |
445 | mei_tx_cb_dequeue(cb); |
446 | } |
447 | } |
448 | |
449 | /** |
450 | * mei_io_list_free_fp - free cb from a list that matches file pointer |
451 | * |
452 | * @head: io list |
453 | * @fp: file pointer (matching cb file object), may be NULL |
454 | */ |
455 | static void mei_io_list_free_fp(struct list_head *head, const struct file *fp) |
456 | { |
457 | struct mei_cl_cb *cb, *next; |
458 | |
459 | list_for_each_entry_safe(cb, next, head, list) |
460 | if (!fp || fp == cb->fp) |
461 | mei_io_cb_free(cb); |
462 | } |
463 | |
464 | /** |
465 | * mei_cl_free_pending - free pending cb |
466 | * |
467 | * @cl: host client |
468 | */ |
469 | static void mei_cl_free_pending(struct mei_cl *cl) |
470 | { |
471 | struct mei_cl_cb *cb; |
472 | |
473 | cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list); |
474 | mei_io_cb_free(cb); |
475 | } |
476 | |
477 | /** |
478 | * mei_cl_alloc_cb - a convenient wrapper for allocating read cb |
479 | * |
480 | * @cl: host client |
481 | * @length: size of the buffer |
482 | * @fop_type: operation type |
483 | * @fp: associated file pointer (might be NULL) |
484 | * |
485 | * Return: cb on success and NULL on failure |
486 | */ |
487 | struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length, |
488 | enum mei_cb_file_ops fop_type, |
489 | const struct file *fp) |
490 | { |
491 | struct mei_cl_cb *cb; |
492 | |
493 | cb = mei_io_cb_init(cl, type: fop_type, fp); |
494 | if (!cb) |
495 | return NULL; |
496 | |
497 | if (length == 0) |
498 | return cb; |
499 | |
500 | cb->buf.data = kmalloc(roundup(length, MEI_SLOT_SIZE), GFP_KERNEL); |
501 | if (!cb->buf.data) { |
502 | mei_io_cb_free(cb); |
503 | return NULL; |
504 | } |
505 | cb->buf.size = length; |
506 | |
507 | return cb; |
508 | } |
509 | |
510 | /** |
511 | * mei_cl_enqueue_ctrl_wr_cb - a convenient wrapper for allocating |
512 | * and enqueuing of the control commands cb |
513 | * |
514 | * @cl: host client |
515 | * @length: size of the buffer |
516 | * @fop_type: operation type |
517 | * @fp: associated file pointer (might be NULL) |
518 | * |
519 | * Return: cb on success and NULL on failure |
520 | * Locking: called under "dev->device_lock" lock |
521 | */ |
522 | struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length, |
523 | enum mei_cb_file_ops fop_type, |
524 | const struct file *fp) |
525 | { |
526 | struct mei_cl_cb *cb; |
527 | |
528 | /* for RX always allocate at least client's mtu */ |
529 | if (length) |
530 | length = max_t(size_t, length, mei_cl_mtu(cl)); |
531 | |
532 | cb = mei_cl_alloc_cb(cl, length, fop_type, fp); |
533 | if (!cb) |
534 | return NULL; |
535 | |
536 | list_add_tail(new: &cb->list, head: &cl->dev->ctrl_wr_list); |
537 | return cb; |
538 | } |
539 | |
540 | /** |
541 | * mei_cl_read_cb - find this cl's callback in the read list |
542 | * for a specific file |
543 | * |
544 | * @cl: host client |
545 | * @fp: file pointer (matching cb file object), may be NULL |
546 | * |
547 | * Return: cb on success, NULL if cb is not found |
548 | */ |
549 | struct mei_cl_cb *mei_cl_read_cb(struct mei_cl *cl, const struct file *fp) |
550 | { |
551 | struct mei_cl_cb *cb; |
552 | struct mei_cl_cb *ret_cb = NULL; |
553 | |
554 | spin_lock(lock: &cl->rd_completed_lock); |
555 | list_for_each_entry(cb, &cl->rd_completed, list) |
556 | if (!fp || fp == cb->fp) { |
557 | ret_cb = cb; |
558 | break; |
559 | } |
560 | spin_unlock(lock: &cl->rd_completed_lock); |
561 | return ret_cb; |
562 | } |
563 | |
564 | /** |
565 | * mei_cl_flush_queues - flushes queue lists belonging to cl. |
566 | * |
567 | * @cl: host client |
568 | * @fp: file pointer (matching cb file object), may be NULL |
569 | * |
570 | * Return: 0 on success, -EINVAL if cl or cl->dev is NULL. |
571 | */ |
572 | int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp) |
573 | { |
574 | struct mei_device *dev; |
575 | |
576 | if (WARN_ON(!cl || !cl->dev)) |
577 | return -EINVAL; |
578 | |
579 | dev = cl->dev; |
580 | |
581 | cl_dbg(dev, cl, "remove list entry belonging to cl\n" ); |
582 | mei_io_tx_list_free_cl(head: &cl->dev->write_list, cl, fp); |
583 | mei_io_tx_list_free_cl(head: &cl->dev->write_waiting_list, cl, fp); |
584 | /* free pending and control cb only in final flush */ |
585 | if (!fp) { |
586 | mei_io_list_flush_cl(head: &cl->dev->ctrl_wr_list, cl); |
587 | mei_io_list_flush_cl(head: &cl->dev->ctrl_rd_list, cl); |
588 | mei_cl_free_pending(cl); |
589 | } |
590 | spin_lock(lock: &cl->rd_completed_lock); |
591 | mei_io_list_free_fp(head: &cl->rd_completed, fp); |
592 | spin_unlock(lock: &cl->rd_completed_lock); |
593 | |
594 | return 0; |
595 | } |
596 | |
597 | /** |
598 | * mei_cl_init - initializes cl. |
599 | * |
600 | * @cl: host client to be initialized |
601 | * @dev: mei device |
602 | */ |
603 | static void mei_cl_init(struct mei_cl *cl, struct mei_device *dev) |
604 | { |
605 | memset(cl, 0, sizeof(*cl)); |
606 | init_waitqueue_head(&cl->wait); |
607 | init_waitqueue_head(&cl->rx_wait); |
608 | init_waitqueue_head(&cl->tx_wait); |
609 | init_waitqueue_head(&cl->ev_wait); |
610 | INIT_LIST_HEAD(list: &cl->vtag_map); |
611 | spin_lock_init(&cl->rd_completed_lock); |
612 | INIT_LIST_HEAD(list: &cl->rd_completed); |
613 | INIT_LIST_HEAD(list: &cl->rd_pending); |
614 | INIT_LIST_HEAD(list: &cl->link); |
615 | cl->writing_state = MEI_IDLE; |
616 | cl->state = MEI_FILE_UNINITIALIZED; |
617 | cl->dev = dev; |
618 | } |
619 | |
620 | /** |
621 | * mei_cl_allocate - allocates cl structure and sets it up. |
622 | * |
623 | * @dev: mei device |
624 | * Return: The allocated file or NULL on failure |
625 | */ |
626 | struct mei_cl *mei_cl_allocate(struct mei_device *dev) |
627 | { |
628 | struct mei_cl *cl; |
629 | |
630 | cl = kmalloc(size: sizeof(*cl), GFP_KERNEL); |
631 | if (!cl) |
632 | return NULL; |
633 | |
634 | mei_cl_init(cl, dev); |
635 | |
636 | return cl; |
637 | } |
638 | |
639 | /** |
640 | * mei_cl_link - allocate host id in the host map |
641 | * |
642 | * @cl: host client |
643 | * |
644 | * Return: 0 on success |
645 | * -EINVAL on incorrect values |
646 | * -EMFILE if open count exceeded. |
647 | */ |
648 | int mei_cl_link(struct mei_cl *cl) |
649 | { |
650 | struct mei_device *dev; |
651 | int id; |
652 | |
653 | if (WARN_ON(!cl || !cl->dev)) |
654 | return -EINVAL; |
655 | |
656 | dev = cl->dev; |
657 | |
658 | id = find_first_zero_bit(addr: dev->host_clients_map, MEI_CLIENTS_MAX); |
659 | if (id >= MEI_CLIENTS_MAX) { |
660 | dev_err(dev->dev, "id exceeded %d" , MEI_CLIENTS_MAX); |
661 | return -EMFILE; |
662 | } |
663 | |
664 | if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) { |
665 | dev_err(dev->dev, "open_handle_count exceeded %d" , |
666 | MEI_MAX_OPEN_HANDLE_COUNT); |
667 | return -EMFILE; |
668 | } |
669 | |
670 | dev->open_handle_count++; |
671 | |
672 | cl->host_client_id = id; |
673 | list_add_tail(new: &cl->link, head: &dev->file_list); |
674 | |
675 | set_bit(nr: id, addr: dev->host_clients_map); |
676 | |
677 | cl->state = MEI_FILE_INITIALIZING; |
678 | |
679 | cl_dbg(dev, cl, "link cl\n" ); |
680 | return 0; |
681 | } |
682 | |
683 | /** |
684 | * mei_cl_unlink - remove host client from the list |
685 | * |
686 | * @cl: host client |
687 | * |
688 | * Return: always 0 |
689 | */ |
690 | int mei_cl_unlink(struct mei_cl *cl) |
691 | { |
692 | struct mei_device *dev; |
693 | |
694 | /* don't shout on error exit path */ |
695 | if (!cl) |
696 | return 0; |
697 | |
698 | if (WARN_ON(!cl->dev)) |
699 | return 0; |
700 | |
701 | dev = cl->dev; |
702 | |
703 | cl_dbg(dev, cl, "unlink client" ); |
704 | |
705 | if (cl->state == MEI_FILE_UNINITIALIZED) |
706 | return 0; |
707 | |
708 | if (dev->open_handle_count > 0) |
709 | dev->open_handle_count--; |
710 | |
711 | /* never clear the 0 bit */ |
712 | if (cl->host_client_id) |
713 | clear_bit(nr: cl->host_client_id, addr: dev->host_clients_map); |
714 | |
715 | list_del_init(entry: &cl->link); |
716 | |
717 | cl->state = MEI_FILE_UNINITIALIZED; |
718 | cl->writing_state = MEI_IDLE; |
719 | |
720 | WARN_ON(!list_empty(&cl->rd_completed) || |
721 | !list_empty(&cl->rd_pending) || |
722 | !list_empty(&cl->link)); |
723 | |
724 | return 0; |
725 | } |
726 | |
727 | void mei_host_client_init(struct mei_device *dev) |
728 | { |
729 | mei_set_devstate(dev, state: MEI_DEV_ENABLED); |
730 | dev->reset_count = 0; |
731 | |
732 | schedule_work(work: &dev->bus_rescan_work); |
733 | |
734 | pm_runtime_mark_last_busy(dev: dev->dev); |
735 | dev_dbg(dev->dev, "rpm: autosuspend\n" ); |
736 | pm_request_autosuspend(dev: dev->dev); |
737 | } |
738 | |
739 | /** |
740 | * mei_hbuf_acquire - try to acquire host buffer |
741 | * |
742 | * @dev: the device structure |
743 | * Return: true if host buffer was acquired |
744 | */ |
745 | bool mei_hbuf_acquire(struct mei_device *dev) |
746 | { |
747 | if (mei_pg_state(dev) == MEI_PG_ON || |
748 | mei_pg_in_transition(dev)) { |
749 | dev_dbg(dev->dev, "device is in pg\n" ); |
750 | return false; |
751 | } |
752 | |
753 | if (!dev->hbuf_is_ready) { |
754 | dev_dbg(dev->dev, "hbuf is not ready\n" ); |
755 | return false; |
756 | } |
757 | |
758 | dev->hbuf_is_ready = false; |
759 | |
760 | return true; |
761 | } |
762 | |
763 | /** |
764 | * mei_cl_wake_all - wake up readers, writers and event waiters so |
765 | * they can be interrupted |
766 | * |
767 | * @cl: host client |
768 | */ |
769 | static void mei_cl_wake_all(struct mei_cl *cl) |
770 | { |
771 | struct mei_device *dev = cl->dev; |
772 | |
773 | /* synchronized under device mutex */ |
774 | if (waitqueue_active(wq_head: &cl->rx_wait)) { |
775 | cl_dbg(dev, cl, "Waking up reading client!\n" ); |
776 | wake_up_interruptible(&cl->rx_wait); |
777 | } |
778 | /* synchronized under device mutex */ |
779 | if (waitqueue_active(wq_head: &cl->tx_wait)) { |
780 | cl_dbg(dev, cl, "Waking up writing client!\n" ); |
781 | wake_up_interruptible(&cl->tx_wait); |
782 | } |
783 | /* synchronized under device mutex */ |
784 | if (waitqueue_active(wq_head: &cl->ev_wait)) { |
785 | cl_dbg(dev, cl, "Waking up waiting for event clients!\n" ); |
786 | wake_up_interruptible(&cl->ev_wait); |
787 | } |
788 | /* synchronized under device mutex */ |
789 | if (waitqueue_active(wq_head: &cl->wait)) { |
790 | cl_dbg(dev, cl, "Waking up ctrl write clients!\n" ); |
791 | wake_up(&cl->wait); |
792 | } |
793 | } |
794 | |
795 | /** |
796 | * mei_cl_set_disconnected - set disconnected state and clear |
797 | * associated states and resources |
798 | * |
799 | * @cl: host client |
800 | */ |
801 | static void mei_cl_set_disconnected(struct mei_cl *cl) |
802 | { |
803 | struct mei_device *dev = cl->dev; |
804 | |
805 | if (cl->state == MEI_FILE_DISCONNECTED || |
806 | cl->state <= MEI_FILE_INITIALIZING) |
807 | return; |
808 | |
809 | cl->state = MEI_FILE_DISCONNECTED; |
810 | mei_io_tx_list_free_cl(head: &dev->write_list, cl, NULL); |
811 | mei_io_tx_list_free_cl(head: &dev->write_waiting_list, cl, NULL); |
812 | mei_io_list_flush_cl(head: &dev->ctrl_rd_list, cl); |
813 | mei_io_list_flush_cl(head: &dev->ctrl_wr_list, cl); |
814 | mei_cl_wake_all(cl); |
815 | cl->rx_flow_ctrl_creds = 0; |
816 | cl->tx_flow_ctrl_creds = 0; |
817 | cl->timer_count = 0; |
818 | |
819 | if (!cl->me_cl) |
820 | return; |
821 | |
822 | if (!WARN_ON(cl->me_cl->connect_count == 0)) |
823 | cl->me_cl->connect_count--; |
824 | |
825 | if (cl->me_cl->connect_count == 0) |
826 | cl->me_cl->tx_flow_ctrl_creds = 0; |
827 | |
828 | mei_me_cl_put(me_cl: cl->me_cl); |
829 | cl->me_cl = NULL; |
830 | } |
831 | |
832 | static int mei_cl_set_connecting(struct mei_cl *cl, struct mei_me_client *me_cl) |
833 | { |
834 | if (!mei_me_cl_get(me_cl)) |
835 | return -ENOENT; |
836 | |
837 | /* only one connection is allowed for fixed address clients */ |
838 | if (me_cl->props.fixed_address) { |
839 | if (me_cl->connect_count) { |
840 | mei_me_cl_put(me_cl); |
841 | return -EBUSY; |
842 | } |
843 | } |
844 | |
845 | cl->me_cl = me_cl; |
846 | cl->state = MEI_FILE_CONNECTING; |
847 | cl->me_cl->connect_count++; |
848 | |
849 | return 0; |
850 | } |
851 | |
852 | /* |
853 | * mei_cl_send_disconnect - send disconnect request |
854 | * |
855 | * @cl: host client |
856 | * @cb: callback block |
857 | * |
858 | * Return: 0, OK; otherwise, error. |
859 | */ |
860 | static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb) |
861 | { |
862 | struct mei_device *dev; |
863 | int ret; |
864 | |
865 | dev = cl->dev; |
866 | |
867 | ret = mei_hbm_cl_disconnect_req(dev, cl); |
868 | cl->status = ret; |
869 | if (ret) { |
870 | cl->state = MEI_FILE_DISCONNECT_REPLY; |
871 | return ret; |
872 | } |
873 | |
874 | list_move_tail(list: &cb->list, head: &dev->ctrl_rd_list); |
875 | cl->timer_count = dev->timeouts.connect; |
876 | mei_schedule_stall_timer(dev); |
877 | |
878 | return 0; |
879 | } |
880 | |
881 | /** |
882 | * mei_cl_irq_disconnect - processes close related operation from |
883 | * interrupt thread context - send disconnect request |
884 | * |
885 | * @cl: client |
886 | * @cb: callback block. |
887 | * @cmpl_list: complete list. |
888 | * |
889 | * Return: 0, OK; otherwise, error. |
890 | */ |
891 | int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb, |
892 | struct list_head *cmpl_list) |
893 | { |
894 | struct mei_device *dev = cl->dev; |
895 | u32 msg_slots; |
896 | int slots; |
897 | int ret; |
898 | |
899 | msg_slots = mei_hbm2slots(length: sizeof(struct hbm_client_connect_request)); |
900 | slots = mei_hbuf_empty_slots(dev); |
901 | if (slots < 0) |
902 | return -EOVERFLOW; |
903 | |
904 | if ((u32)slots < msg_slots) |
905 | return -EMSGSIZE; |
906 | |
907 | ret = mei_cl_send_disconnect(cl, cb); |
908 | if (ret) |
909 | list_move_tail(list: &cb->list, head: cmpl_list); |
910 | |
911 | return ret; |
912 | } |
913 | |
914 | /** |
915 | * __mei_cl_disconnect - disconnect host client from the me one |
916 | * internal function runtime pm has to be already acquired |
917 | * |
918 | * @cl: host client |
919 | * |
920 | * Return: 0 on success, <0 on failure. |
921 | */ |
922 | static int __mei_cl_disconnect(struct mei_cl *cl) |
923 | { |
924 | struct mei_device *dev; |
925 | struct mei_cl_cb *cb; |
926 | int rets; |
927 | |
928 | dev = cl->dev; |
929 | |
930 | cl->state = MEI_FILE_DISCONNECTING; |
931 | |
932 | cb = mei_cl_enqueue_ctrl_wr_cb(cl, length: 0, fop_type: MEI_FOP_DISCONNECT, NULL); |
933 | if (!cb) { |
934 | rets = -ENOMEM; |
935 | goto out; |
936 | } |
937 | |
938 | if (mei_hbuf_acquire(dev)) { |
939 | rets = mei_cl_send_disconnect(cl, cb); |
940 | if (rets) { |
941 | cl_err(dev, cl, "failed to disconnect.\n" ); |
942 | goto out; |
943 | } |
944 | } |
945 | |
946 | mutex_unlock(lock: &dev->device_lock); |
947 | wait_event_timeout(cl->wait, |
948 | cl->state == MEI_FILE_DISCONNECT_REPLY || |
949 | cl->state == MEI_FILE_DISCONNECTED, |
950 | dev->timeouts.cl_connect); |
951 | mutex_lock(&dev->device_lock); |
952 | |
953 | rets = cl->status; |
954 | if (cl->state != MEI_FILE_DISCONNECT_REPLY && |
955 | cl->state != MEI_FILE_DISCONNECTED) { |
956 | cl_dbg(dev, cl, "timeout on disconnect from FW client.\n" ); |
957 | rets = -ETIME; |
958 | } |
959 | |
960 | out: |
961 | /* we disconnect also on error */ |
962 | mei_cl_set_disconnected(cl); |
963 | if (!rets) |
964 | cl_dbg(dev, cl, "successfully disconnected from FW client.\n" ); |
965 | |
966 | mei_io_cb_free(cb); |
967 | return rets; |
968 | } |
969 | |
970 | /** |
971 | * mei_cl_disconnect - disconnect host client from the me one |
972 | * |
973 | * @cl: host client |
974 | * |
975 | * Locking: called under "dev->device_lock" lock |
976 | * |
977 | * Return: 0 on success, <0 on failure. |
978 | */ |
979 | int mei_cl_disconnect(struct mei_cl *cl) |
980 | { |
981 | struct mei_device *dev; |
982 | int rets; |
983 | |
984 | if (WARN_ON(!cl || !cl->dev)) |
985 | return -ENODEV; |
986 | |
987 | dev = cl->dev; |
988 | |
989 | cl_dbg(dev, cl, "disconnecting" ); |
990 | |
991 | if (!mei_cl_is_connected(cl)) |
992 | return 0; |
993 | |
994 | if (mei_cl_is_fixed_address(cl)) { |
995 | mei_cl_set_disconnected(cl); |
996 | return 0; |
997 | } |
998 | |
999 | if (dev->dev_state == MEI_DEV_POWERING_DOWN || |
1000 | dev->dev_state == MEI_DEV_POWER_DOWN) { |
1001 | cl_dbg(dev, cl, "Device is powering down, don't bother with disconnection\n" ); |
1002 | mei_cl_set_disconnected(cl); |
1003 | return 0; |
1004 | } |
1005 | |
1006 | rets = pm_runtime_get(dev: dev->dev); |
1007 | if (rets < 0 && rets != -EINPROGRESS) { |
1008 | pm_runtime_put_noidle(dev: dev->dev); |
1009 | cl_err(dev, cl, "rpm: get failed %d\n" , rets); |
1010 | return rets; |
1011 | } |
1012 | |
1013 | rets = __mei_cl_disconnect(cl); |
1014 | |
1015 | cl_dbg(dev, cl, "rpm: autosuspend\n" ); |
1016 | pm_runtime_mark_last_busy(dev: dev->dev); |
1017 | pm_runtime_put_autosuspend(dev: dev->dev); |
1018 | |
1019 | return rets; |
1020 | } |
1021 | |
1022 | |
1023 | /** |
1024 | * mei_cl_is_other_connecting - checks if other |
1025 | * client with the same me client id is connecting |
1026 | * |
1027 | * @cl: private data of the file object |
1028 | * |
1029 | * Return: true if other client is connected, false - otherwise. |
1030 | */ |
1031 | static bool mei_cl_is_other_connecting(struct mei_cl *cl) |
1032 | { |
1033 | struct mei_device *dev; |
1034 | struct mei_cl_cb *cb; |
1035 | |
1036 | dev = cl->dev; |
1037 | |
1038 | list_for_each_entry(cb, &dev->ctrl_rd_list, list) { |
1039 | if (cb->fop_type == MEI_FOP_CONNECT && |
1040 | mei_cl_me_id(cl) == mei_cl_me_id(cl: cb->cl)) |
1041 | return true; |
1042 | } |
1043 | |
1044 | return false; |
1045 | } |
1046 | |
1047 | /** |
1048 | * mei_cl_send_connect - send connect request |
1049 | * |
1050 | * @cl: host client |
1051 | * @cb: callback block |
1052 | * |
1053 | * Return: 0, OK; otherwise, error. |
1054 | */ |
1055 | static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb) |
1056 | { |
1057 | struct mei_device *dev; |
1058 | int ret; |
1059 | |
1060 | dev = cl->dev; |
1061 | |
1062 | ret = mei_hbm_cl_connect_req(dev, cl); |
1063 | cl->status = ret; |
1064 | if (ret) { |
1065 | cl->state = MEI_FILE_DISCONNECT_REPLY; |
1066 | return ret; |
1067 | } |
1068 | |
1069 | list_move_tail(list: &cb->list, head: &dev->ctrl_rd_list); |
1070 | cl->timer_count = dev->timeouts.connect; |
1071 | mei_schedule_stall_timer(dev); |
1072 | return 0; |
1073 | } |
1074 | |
1075 | /** |
1076 | * mei_cl_irq_connect - send connect request in irq_thread context |
1077 | * |
1078 | * @cl: host client |
1079 | * @cb: callback block |
1080 | * @cmpl_list: complete list |
1081 | * |
1082 | * Return: 0, OK; otherwise, error. |
1083 | */ |
1084 | int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb, |
1085 | struct list_head *cmpl_list) |
1086 | { |
1087 | struct mei_device *dev = cl->dev; |
1088 | u32 msg_slots; |
1089 | int slots; |
1090 | int rets; |
1091 | |
1092 | if (mei_cl_is_other_connecting(cl)) |
1093 | return 0; |
1094 | |
1095 | msg_slots = mei_hbm2slots(length: sizeof(struct hbm_client_connect_request)); |
1096 | slots = mei_hbuf_empty_slots(dev); |
1097 | if (slots < 0) |
1098 | return -EOVERFLOW; |
1099 | |
1100 | if ((u32)slots < msg_slots) |
1101 | return -EMSGSIZE; |
1102 | |
1103 | rets = mei_cl_send_connect(cl, cb); |
1104 | if (rets) |
1105 | list_move_tail(list: &cb->list, head: cmpl_list); |
1106 | |
1107 | return rets; |
1108 | } |
1109 | |
1110 | /** |
1111 | * mei_cl_connect - connect host client to the me one |
1112 | * |
1113 | * @cl: host client |
1114 | * @me_cl: me client |
1115 | * @fp: pointer to file structure |
1116 | * |
1117 | * Locking: called under "dev->device_lock" lock |
1118 | * |
1119 | * Return: 0 on success, <0 on failure. |
1120 | */ |
1121 | int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl, |
1122 | const struct file *fp) |
1123 | { |
1124 | struct mei_device *dev; |
1125 | struct mei_cl_cb *cb; |
1126 | int rets; |
1127 | |
1128 | if (WARN_ON(!cl || !cl->dev || !me_cl)) |
1129 | return -ENODEV; |
1130 | |
1131 | dev = cl->dev; |
1132 | |
1133 | rets = mei_cl_set_connecting(cl, me_cl); |
1134 | if (rets) |
1135 | goto nortpm; |
1136 | |
1137 | if (mei_cl_is_fixed_address(cl)) { |
1138 | cl->state = MEI_FILE_CONNECTED; |
1139 | rets = 0; |
1140 | goto nortpm; |
1141 | } |
1142 | |
1143 | rets = pm_runtime_get(dev: dev->dev); |
1144 | if (rets < 0 && rets != -EINPROGRESS) { |
1145 | pm_runtime_put_noidle(dev: dev->dev); |
1146 | cl_err(dev, cl, "rpm: get failed %d\n" , rets); |
1147 | goto nortpm; |
1148 | } |
1149 | |
1150 | cb = mei_cl_enqueue_ctrl_wr_cb(cl, length: 0, fop_type: MEI_FOP_CONNECT, fp); |
1151 | if (!cb) { |
1152 | rets = -ENOMEM; |
1153 | goto out; |
1154 | } |
1155 | |
1156 | /* run hbuf acquire last so we don't have to undo */ |
1157 | if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) { |
1158 | rets = mei_cl_send_connect(cl, cb); |
1159 | if (rets) |
1160 | goto out; |
1161 | } |
1162 | |
1163 | mutex_unlock(lock: &dev->device_lock); |
1164 | wait_event_timeout(cl->wait, |
1165 | (cl->state == MEI_FILE_CONNECTED || |
1166 | cl->state == MEI_FILE_DISCONNECTED || |
1167 | cl->state == MEI_FILE_DISCONNECT_REQUIRED || |
1168 | cl->state == MEI_FILE_DISCONNECT_REPLY), |
1169 | dev->timeouts.cl_connect); |
1170 | mutex_lock(&dev->device_lock); |
1171 | |
1172 | if (!mei_cl_is_connected(cl)) { |
1173 | if (cl->state == MEI_FILE_DISCONNECT_REQUIRED) { |
1174 | mei_io_list_flush_cl(head: &dev->ctrl_rd_list, cl); |
1175 | mei_io_list_flush_cl(head: &dev->ctrl_wr_list, cl); |
1176 | /* ignore disconnect return valuue; |
1177 | * in case of failure reset will be invoked |
1178 | */ |
1179 | __mei_cl_disconnect(cl); |
1180 | rets = -EFAULT; |
1181 | goto out; |
1182 | } |
1183 | |
1184 | /* timeout or something went really wrong */ |
1185 | if (!cl->status) |
1186 | cl->status = -EFAULT; |
1187 | } |
1188 | |
1189 | rets = cl->status; |
1190 | out: |
1191 | cl_dbg(dev, cl, "rpm: autosuspend\n" ); |
1192 | pm_runtime_mark_last_busy(dev: dev->dev); |
1193 | pm_runtime_put_autosuspend(dev: dev->dev); |
1194 | |
1195 | mei_io_cb_free(cb); |
1196 | |
1197 | nortpm: |
1198 | if (!mei_cl_is_connected(cl)) |
1199 | mei_cl_set_disconnected(cl); |
1200 | |
1201 | return rets; |
1202 | } |
1203 | |
1204 | /** |
1205 | * mei_cl_alloc_linked - allocate and link host client |
1206 | * |
1207 | * @dev: the device structure |
1208 | * |
1209 | * Return: cl on success ERR_PTR on failure |
1210 | */ |
1211 | struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev) |
1212 | { |
1213 | struct mei_cl *cl; |
1214 | int ret; |
1215 | |
1216 | cl = mei_cl_allocate(dev); |
1217 | if (!cl) { |
1218 | ret = -ENOMEM; |
1219 | goto err; |
1220 | } |
1221 | |
1222 | ret = mei_cl_link(cl); |
1223 | if (ret) |
1224 | goto err; |
1225 | |
1226 | return cl; |
1227 | err: |
1228 | kfree(objp: cl); |
1229 | return ERR_PTR(error: ret); |
1230 | } |
1231 | |
1232 | /** |
1233 | * mei_cl_tx_flow_ctrl_creds - checks flow_control credits for cl. |
1234 | * |
1235 | * @cl: host client |
1236 | * |
1237 | * Return: 1 if tx_flow_ctrl_creds >0, 0 - otherwise. |
1238 | */ |
1239 | static int mei_cl_tx_flow_ctrl_creds(struct mei_cl *cl) |
1240 | { |
1241 | if (WARN_ON(!cl || !cl->me_cl)) |
1242 | return -EINVAL; |
1243 | |
1244 | if (cl->tx_flow_ctrl_creds > 0) |
1245 | return 1; |
1246 | |
1247 | if (mei_cl_is_fixed_address(cl)) |
1248 | return 1; |
1249 | |
1250 | if (mei_cl_is_single_recv_buf(cl)) { |
1251 | if (cl->me_cl->tx_flow_ctrl_creds > 0) |
1252 | return 1; |
1253 | } |
1254 | return 0; |
1255 | } |
1256 | |
1257 | /** |
1258 | * mei_cl_tx_flow_ctrl_creds_reduce - reduces transmit flow control credits |
1259 | * for a client |
1260 | * |
1261 | * @cl: host client |
1262 | * |
1263 | * Return: |
1264 | * 0 on success |
1265 | * -EINVAL when ctrl credits are <= 0 |
1266 | */ |
1267 | static int mei_cl_tx_flow_ctrl_creds_reduce(struct mei_cl *cl) |
1268 | { |
1269 | if (WARN_ON(!cl || !cl->me_cl)) |
1270 | return -EINVAL; |
1271 | |
1272 | if (mei_cl_is_fixed_address(cl)) |
1273 | return 0; |
1274 | |
1275 | if (mei_cl_is_single_recv_buf(cl)) { |
1276 | if (WARN_ON(cl->me_cl->tx_flow_ctrl_creds <= 0)) |
1277 | return -EINVAL; |
1278 | cl->me_cl->tx_flow_ctrl_creds--; |
1279 | } else { |
1280 | if (WARN_ON(cl->tx_flow_ctrl_creds <= 0)) |
1281 | return -EINVAL; |
1282 | cl->tx_flow_ctrl_creds--; |
1283 | } |
1284 | return 0; |
1285 | } |
1286 | |
1287 | /** |
1288 | * mei_cl_vtag_alloc - allocate and fill the vtag structure |
1289 | * |
1290 | * @fp: pointer to file structure |
1291 | * @vtag: vm tag |
1292 | * |
1293 | * Return: |
1294 | * * Pointer to allocated struct - on success |
1295 | * * ERR_PTR(-ENOMEM) on memory allocation failure |
1296 | */ |
1297 | struct mei_cl_vtag *mei_cl_vtag_alloc(struct file *fp, u8 vtag) |
1298 | { |
1299 | struct mei_cl_vtag *cl_vtag; |
1300 | |
1301 | cl_vtag = kzalloc(size: sizeof(*cl_vtag), GFP_KERNEL); |
1302 | if (!cl_vtag) |
1303 | return ERR_PTR(error: -ENOMEM); |
1304 | |
1305 | INIT_LIST_HEAD(list: &cl_vtag->list); |
1306 | cl_vtag->vtag = vtag; |
1307 | cl_vtag->fp = fp; |
1308 | |
1309 | return cl_vtag; |
1310 | } |
1311 | |
1312 | /** |
1313 | * mei_cl_fp_by_vtag - obtain the file pointer by vtag |
1314 | * |
1315 | * @cl: host client |
1316 | * @vtag: virtual tag |
1317 | * |
1318 | * Return: |
1319 | * * A file pointer - on success |
1320 | * * ERR_PTR(-ENOENT) if vtag is not found in the client vtag list |
1321 | */ |
1322 | const struct file *mei_cl_fp_by_vtag(const struct mei_cl *cl, u8 vtag) |
1323 | { |
1324 | struct mei_cl_vtag *vtag_l; |
1325 | |
1326 | list_for_each_entry(vtag_l, &cl->vtag_map, list) |
1327 | /* The client on bus has one fixed fp */ |
1328 | if ((cl->cldev && mei_cldev_enabled(cldev: cl->cldev)) || |
1329 | vtag_l->vtag == vtag) |
1330 | return vtag_l->fp; |
1331 | |
1332 | return ERR_PTR(error: -ENOENT); |
1333 | } |
1334 | |
1335 | /** |
1336 | * mei_cl_reset_read_by_vtag - reset pending_read flag by given vtag |
1337 | * |
1338 | * @cl: host client |
1339 | * @vtag: vm tag |
1340 | */ |
1341 | static void mei_cl_reset_read_by_vtag(const struct mei_cl *cl, u8 vtag) |
1342 | { |
1343 | struct mei_cl_vtag *vtag_l; |
1344 | |
1345 | list_for_each_entry(vtag_l, &cl->vtag_map, list) { |
1346 | /* The client on bus has one fixed vtag map */ |
1347 | if ((cl->cldev && mei_cldev_enabled(cldev: cl->cldev)) || |
1348 | vtag_l->vtag == vtag) { |
1349 | vtag_l->pending_read = false; |
1350 | break; |
1351 | } |
1352 | } |
1353 | } |
1354 | |
1355 | /** |
1356 | * mei_cl_read_vtag_add_fc - add flow control for next pending reader |
1357 | * in the vtag list |
1358 | * |
1359 | * @cl: host client |
1360 | */ |
1361 | static void mei_cl_read_vtag_add_fc(struct mei_cl *cl) |
1362 | { |
1363 | struct mei_cl_vtag *cl_vtag; |
1364 | |
1365 | list_for_each_entry(cl_vtag, &cl->vtag_map, list) { |
1366 | if (cl_vtag->pending_read) { |
1367 | if (mei_cl_enqueue_ctrl_wr_cb(cl, |
1368 | length: mei_cl_mtu(cl), |
1369 | fop_type: MEI_FOP_READ, |
1370 | fp: cl_vtag->fp)) |
1371 | cl->rx_flow_ctrl_creds++; |
1372 | break; |
1373 | } |
1374 | } |
1375 | } |
1376 | |
1377 | /** |
1378 | * mei_cl_vt_support_check - check if client support vtags |
1379 | * |
1380 | * @cl: host client |
1381 | * |
1382 | * Return: |
1383 | * * 0 - supported, or not connected at all |
1384 | * * -EOPNOTSUPP - vtags are not supported by client |
1385 | */ |
1386 | int mei_cl_vt_support_check(const struct mei_cl *cl) |
1387 | { |
1388 | struct mei_device *dev = cl->dev; |
1389 | |
1390 | if (!dev->hbm_f_vt_supported) |
1391 | return -EOPNOTSUPP; |
1392 | |
1393 | if (!cl->me_cl) |
1394 | return 0; |
1395 | |
1396 | return cl->me_cl->props.vt_supported ? 0 : -EOPNOTSUPP; |
1397 | } |
1398 | |
1399 | /** |
1400 | * mei_cl_add_rd_completed - add read completed callback to list with lock |
1401 | * and vtag check |
1402 | * |
1403 | * @cl: host client |
1404 | * @cb: callback block |
1405 | * |
1406 | */ |
1407 | void mei_cl_add_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb) |
1408 | { |
1409 | const struct file *fp; |
1410 | |
1411 | if (!mei_cl_vt_support_check(cl)) { |
1412 | fp = mei_cl_fp_by_vtag(cl, vtag: cb->vtag); |
1413 | if (IS_ERR(ptr: fp)) { |
1414 | /* client already disconnected, discarding */ |
1415 | mei_io_cb_free(cb); |
1416 | return; |
1417 | } |
1418 | cb->fp = fp; |
1419 | mei_cl_reset_read_by_vtag(cl, vtag: cb->vtag); |
1420 | mei_cl_read_vtag_add_fc(cl); |
1421 | } |
1422 | |
1423 | spin_lock(lock: &cl->rd_completed_lock); |
1424 | list_add_tail(new: &cb->list, head: &cl->rd_completed); |
1425 | spin_unlock(lock: &cl->rd_completed_lock); |
1426 | } |
1427 | |
1428 | /** |
1429 | * mei_cl_del_rd_completed - free read completed callback with lock |
1430 | * |
1431 | * @cl: host client |
1432 | * @cb: callback block |
1433 | * |
1434 | */ |
1435 | void mei_cl_del_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb) |
1436 | { |
1437 | spin_lock(lock: &cl->rd_completed_lock); |
1438 | mei_io_cb_free(cb); |
1439 | spin_unlock(lock: &cl->rd_completed_lock); |
1440 | } |
1441 | |
1442 | /** |
1443 | * mei_cl_notify_fop2req - convert fop to proper request |
1444 | * |
1445 | * @fop: client notification start response command |
1446 | * |
1447 | * Return: MEI_HBM_NOTIFICATION_START/STOP |
1448 | */ |
1449 | u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop) |
1450 | { |
1451 | if (fop == MEI_FOP_NOTIFY_START) |
1452 | return MEI_HBM_NOTIFICATION_START; |
1453 | else |
1454 | return MEI_HBM_NOTIFICATION_STOP; |
1455 | } |
1456 | |
1457 | /** |
1458 | * mei_cl_notify_req2fop - convert notification request top file operation type |
1459 | * |
1460 | * @req: hbm notification request type |
1461 | * |
1462 | * Return: MEI_FOP_NOTIFY_START/STOP |
1463 | */ |
1464 | enum mei_cb_file_ops mei_cl_notify_req2fop(u8 req) |
1465 | { |
1466 | if (req == MEI_HBM_NOTIFICATION_START) |
1467 | return MEI_FOP_NOTIFY_START; |
1468 | else |
1469 | return MEI_FOP_NOTIFY_STOP; |
1470 | } |
1471 | |
1472 | /** |
1473 | * mei_cl_irq_notify - send notification request in irq_thread context |
1474 | * |
1475 | * @cl: client |
1476 | * @cb: callback block. |
1477 | * @cmpl_list: complete list. |
1478 | * |
1479 | * Return: 0 on such and error otherwise. |
1480 | */ |
1481 | int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb, |
1482 | struct list_head *cmpl_list) |
1483 | { |
1484 | struct mei_device *dev = cl->dev; |
1485 | u32 msg_slots; |
1486 | int slots; |
1487 | int ret; |
1488 | bool request; |
1489 | |
1490 | msg_slots = mei_hbm2slots(length: sizeof(struct hbm_client_connect_request)); |
1491 | slots = mei_hbuf_empty_slots(dev); |
1492 | if (slots < 0) |
1493 | return -EOVERFLOW; |
1494 | |
1495 | if ((u32)slots < msg_slots) |
1496 | return -EMSGSIZE; |
1497 | |
1498 | request = mei_cl_notify_fop2req(fop: cb->fop_type); |
1499 | ret = mei_hbm_cl_notify_req(dev, cl, request); |
1500 | if (ret) { |
1501 | cl->status = ret; |
1502 | list_move_tail(list: &cb->list, head: cmpl_list); |
1503 | return ret; |
1504 | } |
1505 | |
1506 | list_move_tail(list: &cb->list, head: &dev->ctrl_rd_list); |
1507 | return 0; |
1508 | } |
1509 | |
1510 | /** |
1511 | * mei_cl_notify_request - send notification stop/start request |
1512 | * |
1513 | * @cl: host client |
1514 | * @fp: associate request with file |
1515 | * @request: 1 for start or 0 for stop |
1516 | * |
1517 | * Locking: called under "dev->device_lock" lock |
1518 | * |
1519 | * Return: 0 on such and error otherwise. |
1520 | */ |
1521 | int mei_cl_notify_request(struct mei_cl *cl, |
1522 | const struct file *fp, u8 request) |
1523 | { |
1524 | struct mei_device *dev; |
1525 | struct mei_cl_cb *cb; |
1526 | enum mei_cb_file_ops fop_type; |
1527 | int rets; |
1528 | |
1529 | if (WARN_ON(!cl || !cl->dev)) |
1530 | return -ENODEV; |
1531 | |
1532 | dev = cl->dev; |
1533 | |
1534 | if (!dev->hbm_f_ev_supported) { |
1535 | cl_dbg(dev, cl, "notifications not supported\n" ); |
1536 | return -EOPNOTSUPP; |
1537 | } |
1538 | |
1539 | if (!mei_cl_is_connected(cl)) |
1540 | return -ENODEV; |
1541 | |
1542 | rets = pm_runtime_get(dev: dev->dev); |
1543 | if (rets < 0 && rets != -EINPROGRESS) { |
1544 | pm_runtime_put_noidle(dev: dev->dev); |
1545 | cl_err(dev, cl, "rpm: get failed %d\n" , rets); |
1546 | return rets; |
1547 | } |
1548 | |
1549 | fop_type = mei_cl_notify_req2fop(req: request); |
1550 | cb = mei_cl_enqueue_ctrl_wr_cb(cl, length: 0, fop_type, fp); |
1551 | if (!cb) { |
1552 | rets = -ENOMEM; |
1553 | goto out; |
1554 | } |
1555 | |
1556 | if (mei_hbuf_acquire(dev)) { |
1557 | if (mei_hbm_cl_notify_req(dev, cl, request)) { |
1558 | rets = -ENODEV; |
1559 | goto out; |
1560 | } |
1561 | list_move_tail(list: &cb->list, head: &dev->ctrl_rd_list); |
1562 | } |
1563 | |
1564 | mutex_unlock(lock: &dev->device_lock); |
1565 | wait_event_timeout(cl->wait, |
1566 | cl->notify_en == request || |
1567 | cl->status || |
1568 | !mei_cl_is_connected(cl), |
1569 | dev->timeouts.cl_connect); |
1570 | mutex_lock(&dev->device_lock); |
1571 | |
1572 | if (cl->notify_en != request && !cl->status) |
1573 | cl->status = -EFAULT; |
1574 | |
1575 | rets = cl->status; |
1576 | |
1577 | out: |
1578 | cl_dbg(dev, cl, "rpm: autosuspend\n" ); |
1579 | pm_runtime_mark_last_busy(dev: dev->dev); |
1580 | pm_runtime_put_autosuspend(dev: dev->dev); |
1581 | |
1582 | mei_io_cb_free(cb); |
1583 | return rets; |
1584 | } |
1585 | |
1586 | /** |
1587 | * mei_cl_notify - raise notification |
1588 | * |
1589 | * @cl: host client |
1590 | * |
1591 | * Locking: called under "dev->device_lock" lock |
1592 | */ |
1593 | void mei_cl_notify(struct mei_cl *cl) |
1594 | { |
1595 | struct mei_device *dev; |
1596 | |
1597 | if (!cl || !cl->dev) |
1598 | return; |
1599 | |
1600 | dev = cl->dev; |
1601 | |
1602 | if (!cl->notify_en) |
1603 | return; |
1604 | |
1605 | cl_dbg(dev, cl, "notify event" ); |
1606 | cl->notify_ev = true; |
1607 | if (!mei_cl_bus_notify_event(cl)) |
1608 | wake_up_interruptible(&cl->ev_wait); |
1609 | |
1610 | if (cl->ev_async) |
1611 | kill_fasync(&cl->ev_async, SIGIO, POLL_PRI); |
1612 | |
1613 | } |
1614 | |
1615 | /** |
1616 | * mei_cl_notify_get - get or wait for notification event |
1617 | * |
1618 | * @cl: host client |
1619 | * @block: this request is blocking |
1620 | * @notify_ev: true if notification event was received |
1621 | * |
1622 | * Locking: called under "dev->device_lock" lock |
1623 | * |
1624 | * Return: 0 on such and error otherwise. |
1625 | */ |
1626 | int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev) |
1627 | { |
1628 | struct mei_device *dev; |
1629 | int rets; |
1630 | |
1631 | *notify_ev = false; |
1632 | |
1633 | if (WARN_ON(!cl || !cl->dev)) |
1634 | return -ENODEV; |
1635 | |
1636 | dev = cl->dev; |
1637 | |
1638 | if (!dev->hbm_f_ev_supported) { |
1639 | cl_dbg(dev, cl, "notifications not supported\n" ); |
1640 | return -EOPNOTSUPP; |
1641 | } |
1642 | |
1643 | if (!mei_cl_is_connected(cl)) |
1644 | return -ENODEV; |
1645 | |
1646 | if (cl->notify_ev) |
1647 | goto out; |
1648 | |
1649 | if (!block) |
1650 | return -EAGAIN; |
1651 | |
1652 | mutex_unlock(lock: &dev->device_lock); |
1653 | rets = wait_event_interruptible(cl->ev_wait, cl->notify_ev); |
1654 | mutex_lock(&dev->device_lock); |
1655 | |
1656 | if (rets < 0) |
1657 | return rets; |
1658 | |
1659 | out: |
1660 | *notify_ev = cl->notify_ev; |
1661 | cl->notify_ev = false; |
1662 | return 0; |
1663 | } |
1664 | |
1665 | /** |
1666 | * mei_cl_read_start - the start read client message function. |
1667 | * |
1668 | * @cl: host client |
1669 | * @length: number of bytes to read |
1670 | * @fp: pointer to file structure |
1671 | * |
1672 | * Return: 0 on success, <0 on failure. |
1673 | */ |
1674 | int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp) |
1675 | { |
1676 | struct mei_device *dev; |
1677 | struct mei_cl_cb *cb; |
1678 | int rets; |
1679 | |
1680 | if (WARN_ON(!cl || !cl->dev)) |
1681 | return -ENODEV; |
1682 | |
1683 | dev = cl->dev; |
1684 | |
1685 | if (!mei_cl_is_connected(cl)) |
1686 | return -ENODEV; |
1687 | |
1688 | if (!mei_me_cl_is_active(me_cl: cl->me_cl)) { |
1689 | cl_err(dev, cl, "no such me client\n" ); |
1690 | return -ENOTTY; |
1691 | } |
1692 | |
1693 | if (mei_cl_is_fixed_address(cl)) |
1694 | return 0; |
1695 | |
1696 | /* HW currently supports only one pending read */ |
1697 | if (cl->rx_flow_ctrl_creds) { |
1698 | mei_cl_set_read_by_fp(cl, fp); |
1699 | return -EBUSY; |
1700 | } |
1701 | |
1702 | cb = mei_cl_enqueue_ctrl_wr_cb(cl, length, fop_type: MEI_FOP_READ, fp); |
1703 | if (!cb) |
1704 | return -ENOMEM; |
1705 | |
1706 | mei_cl_set_read_by_fp(cl, fp); |
1707 | |
1708 | rets = pm_runtime_get(dev: dev->dev); |
1709 | if (rets < 0 && rets != -EINPROGRESS) { |
1710 | pm_runtime_put_noidle(dev: dev->dev); |
1711 | cl_err(dev, cl, "rpm: get failed %d\n" , rets); |
1712 | goto nortpm; |
1713 | } |
1714 | |
1715 | rets = 0; |
1716 | if (mei_hbuf_acquire(dev)) { |
1717 | rets = mei_hbm_cl_flow_control_req(dev, cl); |
1718 | if (rets < 0) |
1719 | goto out; |
1720 | |
1721 | list_move_tail(list: &cb->list, head: &cl->rd_pending); |
1722 | } |
1723 | cl->rx_flow_ctrl_creds++; |
1724 | |
1725 | out: |
1726 | cl_dbg(dev, cl, "rpm: autosuspend\n" ); |
1727 | pm_runtime_mark_last_busy(dev: dev->dev); |
1728 | pm_runtime_put_autosuspend(dev: dev->dev); |
1729 | nortpm: |
1730 | if (rets) |
1731 | mei_io_cb_free(cb); |
1732 | |
1733 | return rets; |
1734 | } |
1735 | |
1736 | static inline u8 mei_ext_hdr_set_vtag(void *ext, u8 vtag) |
1737 | { |
1738 | struct mei_ext_hdr_vtag *vtag_hdr = ext; |
1739 | |
1740 | vtag_hdr->hdr.type = MEI_EXT_HDR_VTAG; |
1741 | vtag_hdr->hdr.length = mei_data2slots(length: sizeof(*vtag_hdr)); |
1742 | vtag_hdr->vtag = vtag; |
1743 | vtag_hdr->reserved = 0; |
1744 | return vtag_hdr->hdr.length; |
1745 | } |
1746 | |
1747 | static inline bool mei_ext_hdr_is_gsc(struct mei_ext_hdr *ext) |
1748 | { |
1749 | return ext && ext->type == MEI_EXT_HDR_GSC; |
1750 | } |
1751 | |
1752 | static inline u8 mei_ext_hdr_set_gsc(struct mei_ext_hdr *ext, struct mei_ext_hdr *gsc_hdr) |
1753 | { |
1754 | memcpy(ext, gsc_hdr, mei_ext_hdr_len(gsc_hdr)); |
1755 | return ext->length; |
1756 | } |
1757 | |
1758 | /** |
1759 | * mei_msg_hdr_init - allocate and initialize mei message header |
1760 | * |
1761 | * @cb: message callback structure |
1762 | * |
1763 | * Return: a pointer to initialized header or ERR_PTR on failure |
1764 | */ |
1765 | static struct mei_msg_hdr *mei_msg_hdr_init(const struct mei_cl_cb *cb) |
1766 | { |
1767 | size_t hdr_len; |
1768 | struct mei_ext_meta_hdr *meta; |
1769 | struct mei_msg_hdr *mei_hdr; |
1770 | bool is_ext, is_hbm, is_gsc, is_vtag; |
1771 | struct mei_ext_hdr *next_ext; |
1772 | |
1773 | if (!cb) |
1774 | return ERR_PTR(error: -EINVAL); |
1775 | |
1776 | /* Extended header for vtag is attached only on the first fragment */ |
1777 | is_vtag = (cb->vtag && cb->buf_idx == 0); |
1778 | is_hbm = cb->cl->me_cl->client_id == 0; |
1779 | is_gsc = ((!is_hbm) && cb->cl->dev->hbm_f_gsc_supported && mei_ext_hdr_is_gsc(ext: cb->ext_hdr)); |
1780 | is_ext = is_vtag || is_gsc; |
1781 | |
1782 | /* Compute extended header size */ |
1783 | hdr_len = sizeof(*mei_hdr); |
1784 | |
1785 | if (!is_ext) |
1786 | goto setup_hdr; |
1787 | |
1788 | hdr_len += sizeof(*meta); |
1789 | if (is_vtag) |
1790 | hdr_len += sizeof(struct mei_ext_hdr_vtag); |
1791 | |
1792 | if (is_gsc) |
1793 | hdr_len += mei_ext_hdr_len(ext: cb->ext_hdr); |
1794 | |
1795 | setup_hdr: |
1796 | mei_hdr = kzalloc(size: hdr_len, GFP_KERNEL); |
1797 | if (!mei_hdr) |
1798 | return ERR_PTR(error: -ENOMEM); |
1799 | |
1800 | mei_hdr->host_addr = mei_cl_host_addr(cl: cb->cl); |
1801 | mei_hdr->me_addr = mei_cl_me_id(cl: cb->cl); |
1802 | mei_hdr->internal = cb->internal; |
1803 | mei_hdr->extended = is_ext; |
1804 | |
1805 | if (!is_ext) |
1806 | goto out; |
1807 | |
1808 | meta = (struct mei_ext_meta_hdr *)mei_hdr->extension; |
1809 | meta->size = 0; |
1810 | next_ext = (struct mei_ext_hdr *)meta->hdrs; |
1811 | if (is_vtag) { |
1812 | meta->count++; |
1813 | meta->size += mei_ext_hdr_set_vtag(ext: next_ext, vtag: cb->vtag); |
1814 | next_ext = mei_ext_next(ext: next_ext); |
1815 | } |
1816 | |
1817 | if (is_gsc) { |
1818 | meta->count++; |
1819 | meta->size += mei_ext_hdr_set_gsc(ext: next_ext, gsc_hdr: cb->ext_hdr); |
1820 | next_ext = mei_ext_next(ext: next_ext); |
1821 | } |
1822 | |
1823 | out: |
1824 | mei_hdr->length = hdr_len - sizeof(*mei_hdr); |
1825 | return mei_hdr; |
1826 | } |
1827 | |
1828 | /** |
1829 | * mei_cl_irq_write - write a message to device |
1830 | * from the interrupt thread context |
1831 | * |
1832 | * @cl: client |
1833 | * @cb: callback block. |
1834 | * @cmpl_list: complete list. |
1835 | * |
1836 | * Return: 0, OK; otherwise error. |
1837 | */ |
1838 | int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, |
1839 | struct list_head *cmpl_list) |
1840 | { |
1841 | struct mei_device *dev; |
1842 | struct mei_msg_data *buf; |
1843 | struct mei_msg_hdr *mei_hdr = NULL; |
1844 | size_t hdr_len; |
1845 | size_t hbuf_len, dr_len; |
1846 | size_t buf_len = 0; |
1847 | size_t data_len; |
1848 | int hbuf_slots; |
1849 | u32 dr_slots; |
1850 | u32 dma_len; |
1851 | int rets; |
1852 | bool first_chunk; |
1853 | const void *data = NULL; |
1854 | |
1855 | if (WARN_ON(!cl || !cl->dev)) |
1856 | return -ENODEV; |
1857 | |
1858 | dev = cl->dev; |
1859 | |
1860 | buf = &cb->buf; |
1861 | |
1862 | first_chunk = cb->buf_idx == 0; |
1863 | |
1864 | rets = first_chunk ? mei_cl_tx_flow_ctrl_creds(cl) : 1; |
1865 | if (rets < 0) |
1866 | goto err; |
1867 | |
1868 | if (rets == 0) { |
1869 | cl_dbg(dev, cl, "No flow control credentials: not sending.\n" ); |
1870 | return 0; |
1871 | } |
1872 | |
1873 | if (buf->data) { |
1874 | buf_len = buf->size - cb->buf_idx; |
1875 | data = buf->data + cb->buf_idx; |
1876 | } |
1877 | hbuf_slots = mei_hbuf_empty_slots(dev); |
1878 | if (hbuf_slots < 0) { |
1879 | rets = -EOVERFLOW; |
1880 | goto err; |
1881 | } |
1882 | |
1883 | hbuf_len = mei_slots2data(slots: hbuf_slots) & MEI_MSG_MAX_LEN_MASK; |
1884 | dr_slots = mei_dma_ring_empty_slots(dev); |
1885 | dr_len = mei_slots2data(slots: dr_slots); |
1886 | |
1887 | mei_hdr = mei_msg_hdr_init(cb); |
1888 | if (IS_ERR(ptr: mei_hdr)) { |
1889 | rets = PTR_ERR(ptr: mei_hdr); |
1890 | mei_hdr = NULL; |
1891 | goto err; |
1892 | } |
1893 | |
1894 | hdr_len = sizeof(*mei_hdr) + mei_hdr->length; |
1895 | |
1896 | /** |
1897 | * Split the message only if we can write the whole host buffer |
1898 | * otherwise wait for next time the host buffer is empty. |
1899 | */ |
1900 | if (hdr_len + buf_len <= hbuf_len) { |
1901 | data_len = buf_len; |
1902 | mei_hdr->msg_complete = 1; |
1903 | } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) { |
1904 | mei_hdr->dma_ring = 1; |
1905 | if (buf_len > dr_len) |
1906 | buf_len = dr_len; |
1907 | else |
1908 | mei_hdr->msg_complete = 1; |
1909 | |
1910 | data_len = sizeof(dma_len); |
1911 | dma_len = buf_len; |
1912 | data = &dma_len; |
1913 | } else if ((u32)hbuf_slots == mei_hbuf_depth(dev)) { |
1914 | buf_len = hbuf_len - hdr_len; |
1915 | data_len = buf_len; |
1916 | } else { |
1917 | kfree(objp: mei_hdr); |
1918 | return 0; |
1919 | } |
1920 | mei_hdr->length += data_len; |
1921 | |
1922 | if (mei_hdr->dma_ring && buf->data) |
1923 | mei_dma_ring_write(dev, buf: buf->data + cb->buf_idx, len: buf_len); |
1924 | rets = mei_write_message(dev, hdr: mei_hdr, hdr_len, data, data_len); |
1925 | |
1926 | if (rets) |
1927 | goto err; |
1928 | |
1929 | cl->status = 0; |
1930 | cl->writing_state = MEI_WRITING; |
1931 | cb->buf_idx += buf_len; |
1932 | |
1933 | if (first_chunk) { |
1934 | if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) { |
1935 | rets = -EIO; |
1936 | goto err; |
1937 | } |
1938 | } |
1939 | |
1940 | if (mei_hdr->msg_complete) |
1941 | list_move_tail(list: &cb->list, head: &dev->write_waiting_list); |
1942 | |
1943 | kfree(objp: mei_hdr); |
1944 | return 0; |
1945 | |
1946 | err: |
1947 | kfree(objp: mei_hdr); |
1948 | cl->status = rets; |
1949 | list_move_tail(list: &cb->list, head: cmpl_list); |
1950 | return rets; |
1951 | } |
1952 | |
1953 | /** |
1954 | * mei_cl_write - submit a write cb to mei device |
1955 | * assumes device_lock is locked |
1956 | * |
1957 | * @cl: host client |
1958 | * @cb: write callback with filled data |
1959 | * @timeout: send timeout in milliseconds. |
1960 | * effective only for blocking writes: the cb->blocking is set. |
1961 | * set timeout to the MAX_SCHEDULE_TIMEOUT to maixum allowed wait. |
1962 | * |
1963 | * Return: number of bytes sent on success, <0 on failure. |
1964 | */ |
1965 | ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, unsigned long timeout) |
1966 | { |
1967 | struct mei_device *dev; |
1968 | struct mei_msg_data *buf; |
1969 | struct mei_msg_hdr *mei_hdr = NULL; |
1970 | size_t hdr_len; |
1971 | size_t hbuf_len, dr_len; |
1972 | size_t buf_len; |
1973 | size_t data_len; |
1974 | int hbuf_slots; |
1975 | u32 dr_slots; |
1976 | u32 dma_len; |
1977 | ssize_t rets; |
1978 | bool blocking; |
1979 | const void *data; |
1980 | |
1981 | if (WARN_ON(!cl || !cl->dev)) |
1982 | return -ENODEV; |
1983 | |
1984 | if (WARN_ON(!cb)) |
1985 | return -EINVAL; |
1986 | |
1987 | dev = cl->dev; |
1988 | |
1989 | buf = &cb->buf; |
1990 | buf_len = buf->size; |
1991 | |
1992 | cl_dbg(dev, cl, "buf_len=%zd\n" , buf_len); |
1993 | |
1994 | blocking = cb->blocking; |
1995 | data = buf->data; |
1996 | |
1997 | rets = pm_runtime_get(dev: dev->dev); |
1998 | if (rets < 0 && rets != -EINPROGRESS) { |
1999 | pm_runtime_put_noidle(dev: dev->dev); |
2000 | cl_err(dev, cl, "rpm: get failed %zd\n" , rets); |
2001 | goto free; |
2002 | } |
2003 | |
2004 | cb->buf_idx = 0; |
2005 | cl->writing_state = MEI_IDLE; |
2006 | |
2007 | |
2008 | rets = mei_cl_tx_flow_ctrl_creds(cl); |
2009 | if (rets < 0) |
2010 | goto err; |
2011 | |
2012 | mei_hdr = mei_msg_hdr_init(cb); |
2013 | if (IS_ERR(ptr: mei_hdr)) { |
2014 | rets = PTR_ERR(ptr: mei_hdr); |
2015 | mei_hdr = NULL; |
2016 | goto err; |
2017 | } |
2018 | |
2019 | hdr_len = sizeof(*mei_hdr) + mei_hdr->length; |
2020 | |
2021 | if (rets == 0) { |
2022 | cl_dbg(dev, cl, "No flow control credentials: not sending.\n" ); |
2023 | rets = buf_len; |
2024 | goto out; |
2025 | } |
2026 | |
2027 | if (!mei_hbuf_acquire(dev)) { |
2028 | cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n" ); |
2029 | rets = buf_len; |
2030 | goto out; |
2031 | } |
2032 | |
2033 | hbuf_slots = mei_hbuf_empty_slots(dev); |
2034 | if (hbuf_slots < 0) { |
2035 | buf_len = -EOVERFLOW; |
2036 | goto out; |
2037 | } |
2038 | |
2039 | hbuf_len = mei_slots2data(slots: hbuf_slots) & MEI_MSG_MAX_LEN_MASK; |
2040 | dr_slots = mei_dma_ring_empty_slots(dev); |
2041 | dr_len = mei_slots2data(slots: dr_slots); |
2042 | |
2043 | if (hdr_len + buf_len <= hbuf_len) { |
2044 | data_len = buf_len; |
2045 | mei_hdr->msg_complete = 1; |
2046 | } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) { |
2047 | mei_hdr->dma_ring = 1; |
2048 | if (buf_len > dr_len) |
2049 | buf_len = dr_len; |
2050 | else |
2051 | mei_hdr->msg_complete = 1; |
2052 | |
2053 | data_len = sizeof(dma_len); |
2054 | dma_len = buf_len; |
2055 | data = &dma_len; |
2056 | } else { |
2057 | buf_len = hbuf_len - hdr_len; |
2058 | data_len = buf_len; |
2059 | } |
2060 | |
2061 | mei_hdr->length += data_len; |
2062 | |
2063 | if (mei_hdr->dma_ring && buf->data) |
2064 | mei_dma_ring_write(dev, buf: buf->data, len: buf_len); |
2065 | rets = mei_write_message(dev, hdr: mei_hdr, hdr_len, data, data_len); |
2066 | |
2067 | if (rets) |
2068 | goto err; |
2069 | |
2070 | rets = mei_cl_tx_flow_ctrl_creds_reduce(cl); |
2071 | if (rets) |
2072 | goto err; |
2073 | |
2074 | cl->writing_state = MEI_WRITING; |
2075 | cb->buf_idx = buf_len; |
2076 | /* restore return value */ |
2077 | buf_len = buf->size; |
2078 | |
2079 | out: |
2080 | if (mei_hdr->msg_complete) |
2081 | mei_tx_cb_enqueue(cb, head: &dev->write_waiting_list); |
2082 | else |
2083 | mei_tx_cb_enqueue(cb, head: &dev->write_list); |
2084 | |
2085 | cb = NULL; |
2086 | if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) { |
2087 | |
2088 | mutex_unlock(lock: &dev->device_lock); |
2089 | rets = wait_event_interruptible_timeout(cl->tx_wait, |
2090 | cl->writing_state == MEI_WRITE_COMPLETE || |
2091 | (!mei_cl_is_connected(cl)), |
2092 | msecs_to_jiffies(timeout)); |
2093 | mutex_lock(&dev->device_lock); |
2094 | /* clean all queue on timeout as something fatal happened */ |
2095 | if (rets == 0) { |
2096 | rets = -ETIME; |
2097 | mei_io_tx_list_free_cl(head: &dev->write_list, cl, NULL); |
2098 | mei_io_tx_list_free_cl(head: &dev->write_waiting_list, cl, NULL); |
2099 | } |
2100 | /* wait_event_interruptible returns -ERESTARTSYS */ |
2101 | if (rets > 0) |
2102 | rets = 0; |
2103 | if (rets) { |
2104 | if (signal_pending(current)) |
2105 | rets = -EINTR; |
2106 | goto err; |
2107 | } |
2108 | if (cl->writing_state != MEI_WRITE_COMPLETE) { |
2109 | rets = -EFAULT; |
2110 | goto err; |
2111 | } |
2112 | } |
2113 | |
2114 | rets = buf_len; |
2115 | err: |
2116 | cl_dbg(dev, cl, "rpm: autosuspend\n" ); |
2117 | pm_runtime_mark_last_busy(dev: dev->dev); |
2118 | pm_runtime_put_autosuspend(dev: dev->dev); |
2119 | free: |
2120 | mei_io_cb_free(cb); |
2121 | |
2122 | kfree(objp: mei_hdr); |
2123 | |
2124 | return rets; |
2125 | } |
2126 | |
2127 | /** |
2128 | * mei_cl_complete - processes completed operation for a client |
2129 | * |
2130 | * @cl: private data of the file object. |
2131 | * @cb: callback block. |
2132 | */ |
2133 | void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb) |
2134 | { |
2135 | struct mei_device *dev = cl->dev; |
2136 | |
2137 | switch (cb->fop_type) { |
2138 | case MEI_FOP_WRITE: |
2139 | mei_tx_cb_dequeue(cb); |
2140 | cl->writing_state = MEI_WRITE_COMPLETE; |
2141 | if (waitqueue_active(wq_head: &cl->tx_wait)) { |
2142 | wake_up_interruptible(&cl->tx_wait); |
2143 | } else { |
2144 | pm_runtime_mark_last_busy(dev: dev->dev); |
2145 | pm_request_autosuspend(dev: dev->dev); |
2146 | } |
2147 | break; |
2148 | |
2149 | case MEI_FOP_READ: |
2150 | mei_cl_add_rd_completed(cl, cb); |
2151 | if (!mei_cl_is_fixed_address(cl) && |
2152 | !WARN_ON(!cl->rx_flow_ctrl_creds)) |
2153 | cl->rx_flow_ctrl_creds--; |
2154 | if (!mei_cl_bus_rx_event(cl)) |
2155 | wake_up_interruptible(&cl->rx_wait); |
2156 | break; |
2157 | |
2158 | case MEI_FOP_CONNECT: |
2159 | case MEI_FOP_DISCONNECT: |
2160 | case MEI_FOP_NOTIFY_STOP: |
2161 | case MEI_FOP_NOTIFY_START: |
2162 | case MEI_FOP_DMA_MAP: |
2163 | case MEI_FOP_DMA_UNMAP: |
2164 | if (waitqueue_active(wq_head: &cl->wait)) |
2165 | wake_up(&cl->wait); |
2166 | |
2167 | break; |
2168 | case MEI_FOP_DISCONNECT_RSP: |
2169 | mei_io_cb_free(cb); |
2170 | mei_cl_set_disconnected(cl); |
2171 | break; |
2172 | default: |
2173 | BUG_ON(0); |
2174 | } |
2175 | } |
2176 | |
2177 | |
2178 | /** |
2179 | * mei_cl_all_disconnect - disconnect forcefully all connected clients |
2180 | * |
2181 | * @dev: mei device |
2182 | */ |
2183 | void mei_cl_all_disconnect(struct mei_device *dev) |
2184 | { |
2185 | struct mei_cl *cl; |
2186 | |
2187 | list_for_each_entry(cl, &dev->file_list, link) |
2188 | mei_cl_set_disconnected(cl); |
2189 | } |
2190 | EXPORT_SYMBOL_GPL(mei_cl_all_disconnect); |
2191 | |
2192 | static struct mei_cl *mei_cl_dma_map_find(struct mei_device *dev, u8 buffer_id) |
2193 | { |
2194 | struct mei_cl *cl; |
2195 | |
2196 | list_for_each_entry(cl, &dev->file_list, link) |
2197 | if (cl->dma.buffer_id == buffer_id) |
2198 | return cl; |
2199 | return NULL; |
2200 | } |
2201 | |
2202 | /** |
2203 | * mei_cl_irq_dma_map - send client dma map request in irq_thread context |
2204 | * |
2205 | * @cl: client |
2206 | * @cb: callback block. |
2207 | * @cmpl_list: complete list. |
2208 | * |
2209 | * Return: 0 on such and error otherwise. |
2210 | */ |
2211 | int mei_cl_irq_dma_map(struct mei_cl *cl, struct mei_cl_cb *cb, |
2212 | struct list_head *cmpl_list) |
2213 | { |
2214 | struct mei_device *dev = cl->dev; |
2215 | u32 msg_slots; |
2216 | int slots; |
2217 | int ret; |
2218 | |
2219 | msg_slots = mei_hbm2slots(length: sizeof(struct hbm_client_dma_map_request)); |
2220 | slots = mei_hbuf_empty_slots(dev); |
2221 | if (slots < 0) |
2222 | return -EOVERFLOW; |
2223 | |
2224 | if ((u32)slots < msg_slots) |
2225 | return -EMSGSIZE; |
2226 | |
2227 | ret = mei_hbm_cl_dma_map_req(dev, cl); |
2228 | if (ret) { |
2229 | cl->status = ret; |
2230 | list_move_tail(list: &cb->list, head: cmpl_list); |
2231 | return ret; |
2232 | } |
2233 | |
2234 | list_move_tail(list: &cb->list, head: &dev->ctrl_rd_list); |
2235 | return 0; |
2236 | } |
2237 | |
2238 | /** |
2239 | * mei_cl_irq_dma_unmap - send client dma unmap request in irq_thread context |
2240 | * |
2241 | * @cl: client |
2242 | * @cb: callback block. |
2243 | * @cmpl_list: complete list. |
2244 | * |
2245 | * Return: 0 on such and error otherwise. |
2246 | */ |
2247 | int mei_cl_irq_dma_unmap(struct mei_cl *cl, struct mei_cl_cb *cb, |
2248 | struct list_head *cmpl_list) |
2249 | { |
2250 | struct mei_device *dev = cl->dev; |
2251 | u32 msg_slots; |
2252 | int slots; |
2253 | int ret; |
2254 | |
2255 | msg_slots = mei_hbm2slots(length: sizeof(struct hbm_client_dma_unmap_request)); |
2256 | slots = mei_hbuf_empty_slots(dev); |
2257 | if (slots < 0) |
2258 | return -EOVERFLOW; |
2259 | |
2260 | if ((u32)slots < msg_slots) |
2261 | return -EMSGSIZE; |
2262 | |
2263 | ret = mei_hbm_cl_dma_unmap_req(dev, cl); |
2264 | if (ret) { |
2265 | cl->status = ret; |
2266 | list_move_tail(list: &cb->list, head: cmpl_list); |
2267 | return ret; |
2268 | } |
2269 | |
2270 | list_move_tail(list: &cb->list, head: &dev->ctrl_rd_list); |
2271 | return 0; |
2272 | } |
2273 | |
2274 | static int mei_cl_dma_alloc(struct mei_cl *cl, u8 buf_id, size_t size) |
2275 | { |
2276 | cl->dma.vaddr = dmam_alloc_coherent(dev: cl->dev->dev, size, |
2277 | dma_handle: &cl->dma.daddr, GFP_KERNEL); |
2278 | if (!cl->dma.vaddr) |
2279 | return -ENOMEM; |
2280 | |
2281 | cl->dma.buffer_id = buf_id; |
2282 | cl->dma.size = size; |
2283 | |
2284 | return 0; |
2285 | } |
2286 | |
2287 | static void mei_cl_dma_free(struct mei_cl *cl) |
2288 | { |
2289 | cl->dma.buffer_id = 0; |
2290 | dmam_free_coherent(dev: cl->dev->dev, |
2291 | size: cl->dma.size, vaddr: cl->dma.vaddr, dma_handle: cl->dma.daddr); |
2292 | cl->dma.size = 0; |
2293 | cl->dma.vaddr = NULL; |
2294 | cl->dma.daddr = 0; |
2295 | } |
2296 | |
2297 | /** |
2298 | * mei_cl_dma_alloc_and_map - send client dma map request |
2299 | * |
2300 | * @cl: host client |
2301 | * @fp: pointer to file structure |
2302 | * @buffer_id: id of the mapped buffer |
2303 | * @size: size of the buffer |
2304 | * |
2305 | * Locking: called under "dev->device_lock" lock |
2306 | * |
2307 | * Return: |
2308 | * * -ENODEV |
2309 | * * -EINVAL |
2310 | * * -EOPNOTSUPP |
2311 | * * -EPROTO |
2312 | * * -ENOMEM; |
2313 | */ |
2314 | int mei_cl_dma_alloc_and_map(struct mei_cl *cl, const struct file *fp, |
2315 | u8 buffer_id, size_t size) |
2316 | { |
2317 | struct mei_device *dev; |
2318 | struct mei_cl_cb *cb; |
2319 | int rets; |
2320 | |
2321 | if (WARN_ON(!cl || !cl->dev)) |
2322 | return -ENODEV; |
2323 | |
2324 | dev = cl->dev; |
2325 | |
2326 | if (!dev->hbm_f_cd_supported) { |
2327 | cl_dbg(dev, cl, "client dma is not supported\n" ); |
2328 | return -EOPNOTSUPP; |
2329 | } |
2330 | |
2331 | if (buffer_id == 0) |
2332 | return -EINVAL; |
2333 | |
2334 | if (mei_cl_is_connected(cl)) |
2335 | return -EPROTO; |
2336 | |
2337 | if (cl->dma_mapped) |
2338 | return -EPROTO; |
2339 | |
2340 | if (mei_cl_dma_map_find(dev, buffer_id)) { |
2341 | cl_dbg(dev, cl, "client dma with id %d is already allocated\n" , |
2342 | cl->dma.buffer_id); |
2343 | return -EPROTO; |
2344 | } |
2345 | |
2346 | rets = pm_runtime_get(dev: dev->dev); |
2347 | if (rets < 0 && rets != -EINPROGRESS) { |
2348 | pm_runtime_put_noidle(dev: dev->dev); |
2349 | cl_err(dev, cl, "rpm: get failed %d\n" , rets); |
2350 | return rets; |
2351 | } |
2352 | |
2353 | rets = mei_cl_dma_alloc(cl, buf_id: buffer_id, size); |
2354 | if (rets) { |
2355 | pm_runtime_put_noidle(dev: dev->dev); |
2356 | return rets; |
2357 | } |
2358 | |
2359 | cb = mei_cl_enqueue_ctrl_wr_cb(cl, length: 0, fop_type: MEI_FOP_DMA_MAP, fp); |
2360 | if (!cb) { |
2361 | rets = -ENOMEM; |
2362 | goto out; |
2363 | } |
2364 | |
2365 | if (mei_hbuf_acquire(dev)) { |
2366 | if (mei_hbm_cl_dma_map_req(dev, cl)) { |
2367 | rets = -ENODEV; |
2368 | goto out; |
2369 | } |
2370 | list_move_tail(list: &cb->list, head: &dev->ctrl_rd_list); |
2371 | } |
2372 | |
2373 | cl->status = 0; |
2374 | |
2375 | mutex_unlock(lock: &dev->device_lock); |
2376 | wait_event_timeout(cl->wait, |
2377 | cl->dma_mapped || cl->status, |
2378 | dev->timeouts.cl_connect); |
2379 | mutex_lock(&dev->device_lock); |
2380 | |
2381 | if (!cl->dma_mapped && !cl->status) |
2382 | cl->status = -EFAULT; |
2383 | |
2384 | rets = cl->status; |
2385 | |
2386 | out: |
2387 | if (rets) |
2388 | mei_cl_dma_free(cl); |
2389 | |
2390 | cl_dbg(dev, cl, "rpm: autosuspend\n" ); |
2391 | pm_runtime_mark_last_busy(dev: dev->dev); |
2392 | pm_runtime_put_autosuspend(dev: dev->dev); |
2393 | |
2394 | mei_io_cb_free(cb); |
2395 | return rets; |
2396 | } |
2397 | |
2398 | /** |
2399 | * mei_cl_dma_unmap - send client dma unmap request |
2400 | * |
2401 | * @cl: host client |
2402 | * @fp: pointer to file structure |
2403 | * |
2404 | * Locking: called under "dev->device_lock" lock |
2405 | * |
2406 | * Return: 0 on such and error otherwise. |
2407 | */ |
2408 | int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp) |
2409 | { |
2410 | struct mei_device *dev; |
2411 | struct mei_cl_cb *cb; |
2412 | int rets; |
2413 | |
2414 | if (WARN_ON(!cl || !cl->dev)) |
2415 | return -ENODEV; |
2416 | |
2417 | dev = cl->dev; |
2418 | |
2419 | if (!dev->hbm_f_cd_supported) { |
2420 | cl_dbg(dev, cl, "client dma is not supported\n" ); |
2421 | return -EOPNOTSUPP; |
2422 | } |
2423 | |
2424 | /* do not allow unmap for connected client */ |
2425 | if (mei_cl_is_connected(cl)) |
2426 | return -EPROTO; |
2427 | |
2428 | if (!cl->dma_mapped) |
2429 | return -EPROTO; |
2430 | |
2431 | rets = pm_runtime_get(dev: dev->dev); |
2432 | if (rets < 0 && rets != -EINPROGRESS) { |
2433 | pm_runtime_put_noidle(dev: dev->dev); |
2434 | cl_err(dev, cl, "rpm: get failed %d\n" , rets); |
2435 | return rets; |
2436 | } |
2437 | |
2438 | cb = mei_cl_enqueue_ctrl_wr_cb(cl, length: 0, fop_type: MEI_FOP_DMA_UNMAP, fp); |
2439 | if (!cb) { |
2440 | rets = -ENOMEM; |
2441 | goto out; |
2442 | } |
2443 | |
2444 | if (mei_hbuf_acquire(dev)) { |
2445 | if (mei_hbm_cl_dma_unmap_req(dev, cl)) { |
2446 | rets = -ENODEV; |
2447 | goto out; |
2448 | } |
2449 | list_move_tail(list: &cb->list, head: &dev->ctrl_rd_list); |
2450 | } |
2451 | |
2452 | cl->status = 0; |
2453 | |
2454 | mutex_unlock(lock: &dev->device_lock); |
2455 | wait_event_timeout(cl->wait, |
2456 | !cl->dma_mapped || cl->status, |
2457 | dev->timeouts.cl_connect); |
2458 | mutex_lock(&dev->device_lock); |
2459 | |
2460 | if (cl->dma_mapped && !cl->status) |
2461 | cl->status = -EFAULT; |
2462 | |
2463 | rets = cl->status; |
2464 | |
2465 | if (!rets) |
2466 | mei_cl_dma_free(cl); |
2467 | out: |
2468 | cl_dbg(dev, cl, "rpm: autosuspend\n" ); |
2469 | pm_runtime_mark_last_busy(dev: dev->dev); |
2470 | pm_runtime_put_autosuspend(dev: dev->dev); |
2471 | |
2472 | mei_io_cb_free(cb); |
2473 | return rets; |
2474 | } |
2475 | |