1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Greybus Firmware Management Protocol Driver. |
4 | * |
5 | * Copyright 2016 Google Inc. |
6 | * Copyright 2016 Linaro Ltd. |
7 | */ |
8 | |
9 | #include <linux/cdev.h> |
10 | #include <linux/completion.h> |
11 | #include <linux/firmware.h> |
12 | #include <linux/fs.h> |
13 | #include <linux/idr.h> |
14 | #include <linux/ioctl.h> |
15 | #include <linux/uaccess.h> |
16 | #include <linux/greybus.h> |
17 | |
18 | #include "firmware.h" |
19 | #include "greybus_firmware.h" |
20 | |
21 | #define FW_MGMT_TIMEOUT_MS 1000 |
22 | |
23 | struct fw_mgmt { |
24 | struct device *parent; |
25 | struct gb_connection *connection; |
26 | struct kref kref; |
27 | struct list_head node; |
28 | |
29 | /* Common id-map for interface and backend firmware requests */ |
30 | struct ida id_map; |
31 | struct mutex mutex; |
32 | struct completion completion; |
33 | struct cdev cdev; |
34 | struct device *class_device; |
35 | dev_t dev_num; |
36 | unsigned int timeout_jiffies; |
37 | bool disabled; /* connection getting disabled */ |
38 | |
39 | /* Interface Firmware specific fields */ |
40 | bool mode_switch_started; |
41 | bool intf_fw_loaded; |
42 | u8 intf_fw_request_id; |
43 | u8 intf_fw_status; |
44 | u16 intf_fw_major; |
45 | u16 intf_fw_minor; |
46 | |
47 | /* Backend Firmware specific fields */ |
48 | u8 backend_fw_request_id; |
49 | u8 backend_fw_status; |
50 | }; |
51 | |
52 | /* |
53 | * Number of minor devices this driver supports. |
54 | * There will be exactly one required per Interface. |
55 | */ |
56 | #define NUM_MINORS U8_MAX |
57 | |
58 | static const struct class fw_mgmt_class = { |
59 | .name = "gb_fw_mgmt" , |
60 | }; |
61 | |
62 | static dev_t fw_mgmt_dev_num; |
63 | static DEFINE_IDA(fw_mgmt_minors_map); |
64 | static LIST_HEAD(fw_mgmt_list); |
65 | static DEFINE_MUTEX(list_mutex); |
66 | |
67 | static void fw_mgmt_kref_release(struct kref *kref) |
68 | { |
69 | struct fw_mgmt *fw_mgmt = container_of(kref, struct fw_mgmt, kref); |
70 | |
71 | ida_destroy(ida: &fw_mgmt->id_map); |
72 | kfree(objp: fw_mgmt); |
73 | } |
74 | |
75 | /* |
76 | * All users of fw_mgmt take a reference (from within list_mutex lock), before |
77 | * they get a pointer to play with. And the structure will be freed only after |
78 | * the last user has put the reference to it. |
79 | */ |
80 | static void put_fw_mgmt(struct fw_mgmt *fw_mgmt) |
81 | { |
82 | kref_put(kref: &fw_mgmt->kref, release: fw_mgmt_kref_release); |
83 | } |
84 | |
85 | /* Caller must call put_fw_mgmt() after using struct fw_mgmt */ |
86 | static struct fw_mgmt *get_fw_mgmt(struct cdev *cdev) |
87 | { |
88 | struct fw_mgmt *fw_mgmt; |
89 | |
90 | mutex_lock(&list_mutex); |
91 | |
92 | list_for_each_entry(fw_mgmt, &fw_mgmt_list, node) { |
93 | if (&fw_mgmt->cdev == cdev) { |
94 | kref_get(kref: &fw_mgmt->kref); |
95 | goto unlock; |
96 | } |
97 | } |
98 | |
99 | fw_mgmt = NULL; |
100 | |
101 | unlock: |
102 | mutex_unlock(lock: &list_mutex); |
103 | |
104 | return fw_mgmt; |
105 | } |
106 | |
107 | static int fw_mgmt_interface_fw_version_operation(struct fw_mgmt *fw_mgmt, |
108 | struct fw_mgmt_ioc_get_intf_version *fw_info) |
109 | { |
110 | struct gb_connection *connection = fw_mgmt->connection; |
111 | struct gb_fw_mgmt_interface_fw_version_response response; |
112 | int ret; |
113 | |
114 | ret = gb_operation_sync(connection, |
115 | GB_FW_MGMT_TYPE_INTERFACE_FW_VERSION, NULL, request_size: 0, |
116 | response: &response, response_size: sizeof(response)); |
117 | if (ret) { |
118 | dev_err(fw_mgmt->parent, |
119 | "failed to get interface firmware version (%d)\n" , ret); |
120 | return ret; |
121 | } |
122 | |
123 | fw_info->major = le16_to_cpu(response.major); |
124 | fw_info->minor = le16_to_cpu(response.minor); |
125 | |
126 | strncpy(p: fw_info->firmware_tag, q: response.firmware_tag, |
127 | GB_FIRMWARE_TAG_MAX_SIZE); |
128 | |
129 | /* |
130 | * The firmware-tag should be NULL terminated, otherwise throw error but |
131 | * don't fail. |
132 | */ |
133 | if (fw_info->firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE - 1] != '\0') { |
134 | dev_err(fw_mgmt->parent, |
135 | "fw-version: firmware-tag is not NULL terminated\n" ); |
136 | fw_info->firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE - 1] = '\0'; |
137 | } |
138 | |
139 | return 0; |
140 | } |
141 | |
142 | static int fw_mgmt_load_and_validate_operation(struct fw_mgmt *fw_mgmt, |
143 | u8 load_method, const char *tag) |
144 | { |
145 | struct gb_fw_mgmt_load_and_validate_fw_request request; |
146 | int ret; |
147 | |
148 | if (load_method != GB_FW_LOAD_METHOD_UNIPRO && |
149 | load_method != GB_FW_LOAD_METHOD_INTERNAL) { |
150 | dev_err(fw_mgmt->parent, |
151 | "invalid load-method (%d)\n" , load_method); |
152 | return -EINVAL; |
153 | } |
154 | |
155 | request.load_method = load_method; |
156 | strncpy(p: request.firmware_tag, q: tag, GB_FIRMWARE_TAG_MAX_SIZE); |
157 | |
158 | /* |
159 | * The firmware-tag should be NULL terminated, otherwise throw error and |
160 | * fail. |
161 | */ |
162 | if (request.firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE - 1] != '\0') { |
163 | dev_err(fw_mgmt->parent, "load-and-validate: firmware-tag is not NULL terminated\n" ); |
164 | return -EINVAL; |
165 | } |
166 | |
167 | /* Allocate ids from 1 to 255 (u8-max), 0 is an invalid id */ |
168 | ret = ida_alloc_range(&fw_mgmt->id_map, min: 1, max: 255, GFP_KERNEL); |
169 | if (ret < 0) { |
170 | dev_err(fw_mgmt->parent, "failed to allocate request id (%d)\n" , |
171 | ret); |
172 | return ret; |
173 | } |
174 | |
175 | fw_mgmt->intf_fw_request_id = ret; |
176 | fw_mgmt->intf_fw_loaded = false; |
177 | request.request_id = ret; |
178 | |
179 | ret = gb_operation_sync(connection: fw_mgmt->connection, |
180 | GB_FW_MGMT_TYPE_LOAD_AND_VALIDATE_FW, request: &request, |
181 | request_size: sizeof(request), NULL, response_size: 0); |
182 | if (ret) { |
183 | ida_free(&fw_mgmt->id_map, id: fw_mgmt->intf_fw_request_id); |
184 | fw_mgmt->intf_fw_request_id = 0; |
185 | dev_err(fw_mgmt->parent, |
186 | "load and validate firmware request failed (%d)\n" , |
187 | ret); |
188 | return ret; |
189 | } |
190 | |
191 | return 0; |
192 | } |
193 | |
194 | static int fw_mgmt_interface_fw_loaded_operation(struct gb_operation *op) |
195 | { |
196 | struct gb_connection *connection = op->connection; |
197 | struct fw_mgmt *fw_mgmt = gb_connection_get_data(connection); |
198 | struct gb_fw_mgmt_loaded_fw_request *request; |
199 | |
200 | /* No pending load and validate request ? */ |
201 | if (!fw_mgmt->intf_fw_request_id) { |
202 | dev_err(fw_mgmt->parent, |
203 | "unexpected firmware loaded request received\n" ); |
204 | return -ENODEV; |
205 | } |
206 | |
207 | if (op->request->payload_size != sizeof(*request)) { |
208 | dev_err(fw_mgmt->parent, "illegal size of firmware loaded request (%zu != %zu)\n" , |
209 | op->request->payload_size, sizeof(*request)); |
210 | return -EINVAL; |
211 | } |
212 | |
213 | request = op->request->payload; |
214 | |
215 | /* Invalid request-id ? */ |
216 | if (request->request_id != fw_mgmt->intf_fw_request_id) { |
217 | dev_err(fw_mgmt->parent, "invalid request id for firmware loaded request (%02u != %02u)\n" , |
218 | fw_mgmt->intf_fw_request_id, request->request_id); |
219 | return -ENODEV; |
220 | } |
221 | |
222 | ida_free(&fw_mgmt->id_map, id: fw_mgmt->intf_fw_request_id); |
223 | fw_mgmt->intf_fw_request_id = 0; |
224 | fw_mgmt->intf_fw_status = request->status; |
225 | fw_mgmt->intf_fw_major = le16_to_cpu(request->major); |
226 | fw_mgmt->intf_fw_minor = le16_to_cpu(request->minor); |
227 | |
228 | if (fw_mgmt->intf_fw_status == GB_FW_LOAD_STATUS_FAILED) |
229 | dev_err(fw_mgmt->parent, |
230 | "failed to load interface firmware, status:%02x\n" , |
231 | fw_mgmt->intf_fw_status); |
232 | else if (fw_mgmt->intf_fw_status == GB_FW_LOAD_STATUS_VALIDATION_FAILED) |
233 | dev_err(fw_mgmt->parent, |
234 | "failed to validate interface firmware, status:%02x\n" , |
235 | fw_mgmt->intf_fw_status); |
236 | else |
237 | fw_mgmt->intf_fw_loaded = true; |
238 | |
239 | complete(&fw_mgmt->completion); |
240 | |
241 | return 0; |
242 | } |
243 | |
244 | static int fw_mgmt_backend_fw_version_operation(struct fw_mgmt *fw_mgmt, |
245 | struct fw_mgmt_ioc_get_backend_version *fw_info) |
246 | { |
247 | struct gb_connection *connection = fw_mgmt->connection; |
248 | struct gb_fw_mgmt_backend_fw_version_request request; |
249 | struct gb_fw_mgmt_backend_fw_version_response response; |
250 | int ret; |
251 | |
252 | strncpy(p: request.firmware_tag, q: fw_info->firmware_tag, |
253 | GB_FIRMWARE_TAG_MAX_SIZE); |
254 | |
255 | /* |
256 | * The firmware-tag should be NULL terminated, otherwise throw error and |
257 | * fail. |
258 | */ |
259 | if (request.firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE - 1] != '\0') { |
260 | dev_err(fw_mgmt->parent, "backend-version: firmware-tag is not NULL terminated\n" ); |
261 | return -EINVAL; |
262 | } |
263 | |
264 | ret = gb_operation_sync(connection, |
265 | GB_FW_MGMT_TYPE_BACKEND_FW_VERSION, request: &request, |
266 | request_size: sizeof(request), response: &response, response_size: sizeof(response)); |
267 | if (ret) { |
268 | dev_err(fw_mgmt->parent, "failed to get version of %s backend firmware (%d)\n" , |
269 | fw_info->firmware_tag, ret); |
270 | return ret; |
271 | } |
272 | |
273 | fw_info->status = response.status; |
274 | |
275 | /* Reset version as that should be non-zero only for success case */ |
276 | fw_info->major = 0; |
277 | fw_info->minor = 0; |
278 | |
279 | switch (fw_info->status) { |
280 | case GB_FW_BACKEND_VERSION_STATUS_SUCCESS: |
281 | fw_info->major = le16_to_cpu(response.major); |
282 | fw_info->minor = le16_to_cpu(response.minor); |
283 | break; |
284 | case GB_FW_BACKEND_VERSION_STATUS_NOT_AVAILABLE: |
285 | case GB_FW_BACKEND_VERSION_STATUS_RETRY: |
286 | break; |
287 | case GB_FW_BACKEND_VERSION_STATUS_NOT_SUPPORTED: |
288 | dev_err(fw_mgmt->parent, |
289 | "Firmware with tag %s is not supported by Interface\n" , |
290 | fw_info->firmware_tag); |
291 | break; |
292 | default: |
293 | dev_err(fw_mgmt->parent, "Invalid status received: %u\n" , |
294 | fw_info->status); |
295 | } |
296 | |
297 | return 0; |
298 | } |
299 | |
300 | static int fw_mgmt_backend_fw_update_operation(struct fw_mgmt *fw_mgmt, |
301 | char *tag) |
302 | { |
303 | struct gb_fw_mgmt_backend_fw_update_request request; |
304 | int ret; |
305 | |
306 | strncpy(p: request.firmware_tag, q: tag, GB_FIRMWARE_TAG_MAX_SIZE); |
307 | |
308 | /* |
309 | * The firmware-tag should be NULL terminated, otherwise throw error and |
310 | * fail. |
311 | */ |
312 | if (request.firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE - 1] != '\0') { |
313 | dev_err(fw_mgmt->parent, "backend-update: firmware-tag is not NULL terminated\n" ); |
314 | return -EINVAL; |
315 | } |
316 | |
317 | /* Allocate ids from 1 to 255 (u8-max), 0 is an invalid id */ |
318 | ret = ida_alloc_range(&fw_mgmt->id_map, min: 1, max: 255, GFP_KERNEL); |
319 | if (ret < 0) { |
320 | dev_err(fw_mgmt->parent, "failed to allocate request id (%d)\n" , |
321 | ret); |
322 | return ret; |
323 | } |
324 | |
325 | fw_mgmt->backend_fw_request_id = ret; |
326 | request.request_id = ret; |
327 | |
328 | ret = gb_operation_sync(connection: fw_mgmt->connection, |
329 | GB_FW_MGMT_TYPE_BACKEND_FW_UPDATE, request: &request, |
330 | request_size: sizeof(request), NULL, response_size: 0); |
331 | if (ret) { |
332 | ida_free(&fw_mgmt->id_map, id: fw_mgmt->backend_fw_request_id); |
333 | fw_mgmt->backend_fw_request_id = 0; |
334 | dev_err(fw_mgmt->parent, |
335 | "backend %s firmware update request failed (%d)\n" , tag, |
336 | ret); |
337 | return ret; |
338 | } |
339 | |
340 | return 0; |
341 | } |
342 | |
343 | static int fw_mgmt_backend_fw_updated_operation(struct gb_operation *op) |
344 | { |
345 | struct gb_connection *connection = op->connection; |
346 | struct fw_mgmt *fw_mgmt = gb_connection_get_data(connection); |
347 | struct gb_fw_mgmt_backend_fw_updated_request *request; |
348 | |
349 | /* No pending load and validate request ? */ |
350 | if (!fw_mgmt->backend_fw_request_id) { |
351 | dev_err(fw_mgmt->parent, "unexpected backend firmware updated request received\n" ); |
352 | return -ENODEV; |
353 | } |
354 | |
355 | if (op->request->payload_size != sizeof(*request)) { |
356 | dev_err(fw_mgmt->parent, "illegal size of backend firmware updated request (%zu != %zu)\n" , |
357 | op->request->payload_size, sizeof(*request)); |
358 | return -EINVAL; |
359 | } |
360 | |
361 | request = op->request->payload; |
362 | |
363 | /* Invalid request-id ? */ |
364 | if (request->request_id != fw_mgmt->backend_fw_request_id) { |
365 | dev_err(fw_mgmt->parent, "invalid request id for backend firmware updated request (%02u != %02u)\n" , |
366 | fw_mgmt->backend_fw_request_id, request->request_id); |
367 | return -ENODEV; |
368 | } |
369 | |
370 | ida_free(&fw_mgmt->id_map, id: fw_mgmt->backend_fw_request_id); |
371 | fw_mgmt->backend_fw_request_id = 0; |
372 | fw_mgmt->backend_fw_status = request->status; |
373 | |
374 | if ((fw_mgmt->backend_fw_status != GB_FW_BACKEND_FW_STATUS_SUCCESS) && |
375 | (fw_mgmt->backend_fw_status != GB_FW_BACKEND_FW_STATUS_RETRY)) |
376 | dev_err(fw_mgmt->parent, |
377 | "failed to load backend firmware: %02x\n" , |
378 | fw_mgmt->backend_fw_status); |
379 | |
380 | complete(&fw_mgmt->completion); |
381 | |
382 | return 0; |
383 | } |
384 | |
385 | /* Char device fops */ |
386 | |
387 | static int fw_mgmt_open(struct inode *inode, struct file *file) |
388 | { |
389 | struct fw_mgmt *fw_mgmt = get_fw_mgmt(cdev: inode->i_cdev); |
390 | |
391 | /* fw_mgmt structure can't get freed until file descriptor is closed */ |
392 | if (fw_mgmt) { |
393 | file->private_data = fw_mgmt; |
394 | return 0; |
395 | } |
396 | |
397 | return -ENODEV; |
398 | } |
399 | |
400 | static int fw_mgmt_release(struct inode *inode, struct file *file) |
401 | { |
402 | struct fw_mgmt *fw_mgmt = file->private_data; |
403 | |
404 | put_fw_mgmt(fw_mgmt); |
405 | return 0; |
406 | } |
407 | |
408 | static int fw_mgmt_ioctl(struct fw_mgmt *fw_mgmt, unsigned int cmd, |
409 | void __user *buf) |
410 | { |
411 | struct fw_mgmt_ioc_get_intf_version intf_fw_info; |
412 | struct fw_mgmt_ioc_get_backend_version backend_fw_info; |
413 | struct fw_mgmt_ioc_intf_load_and_validate intf_load; |
414 | struct fw_mgmt_ioc_backend_fw_update backend_update; |
415 | unsigned int timeout; |
416 | int ret; |
417 | |
418 | /* Reject any operations after mode-switch has started */ |
419 | if (fw_mgmt->mode_switch_started) |
420 | return -EBUSY; |
421 | |
422 | switch (cmd) { |
423 | case FW_MGMT_IOC_GET_INTF_FW: |
424 | ret = fw_mgmt_interface_fw_version_operation(fw_mgmt, |
425 | fw_info: &intf_fw_info); |
426 | if (ret) |
427 | return ret; |
428 | |
429 | if (copy_to_user(to: buf, from: &intf_fw_info, n: sizeof(intf_fw_info))) |
430 | return -EFAULT; |
431 | |
432 | return 0; |
433 | case FW_MGMT_IOC_GET_BACKEND_FW: |
434 | if (copy_from_user(to: &backend_fw_info, from: buf, |
435 | n: sizeof(backend_fw_info))) |
436 | return -EFAULT; |
437 | |
438 | ret = fw_mgmt_backend_fw_version_operation(fw_mgmt, |
439 | fw_info: &backend_fw_info); |
440 | if (ret) |
441 | return ret; |
442 | |
443 | if (copy_to_user(to: buf, from: &backend_fw_info, |
444 | n: sizeof(backend_fw_info))) |
445 | return -EFAULT; |
446 | |
447 | return 0; |
448 | case FW_MGMT_IOC_INTF_LOAD_AND_VALIDATE: |
449 | if (copy_from_user(to: &intf_load, from: buf, n: sizeof(intf_load))) |
450 | return -EFAULT; |
451 | |
452 | ret = fw_mgmt_load_and_validate_operation(fw_mgmt, |
453 | load_method: intf_load.load_method, tag: intf_load.firmware_tag); |
454 | if (ret) |
455 | return ret; |
456 | |
457 | if (!wait_for_completion_timeout(x: &fw_mgmt->completion, |
458 | timeout: fw_mgmt->timeout_jiffies)) { |
459 | dev_err(fw_mgmt->parent, "timed out waiting for firmware load and validation to finish\n" ); |
460 | return -ETIMEDOUT; |
461 | } |
462 | |
463 | intf_load.status = fw_mgmt->intf_fw_status; |
464 | intf_load.major = fw_mgmt->intf_fw_major; |
465 | intf_load.minor = fw_mgmt->intf_fw_minor; |
466 | |
467 | if (copy_to_user(to: buf, from: &intf_load, n: sizeof(intf_load))) |
468 | return -EFAULT; |
469 | |
470 | return 0; |
471 | case FW_MGMT_IOC_INTF_BACKEND_FW_UPDATE: |
472 | if (copy_from_user(to: &backend_update, from: buf, |
473 | n: sizeof(backend_update))) |
474 | return -EFAULT; |
475 | |
476 | ret = fw_mgmt_backend_fw_update_operation(fw_mgmt, |
477 | tag: backend_update.firmware_tag); |
478 | if (ret) |
479 | return ret; |
480 | |
481 | if (!wait_for_completion_timeout(x: &fw_mgmt->completion, |
482 | timeout: fw_mgmt->timeout_jiffies)) { |
483 | dev_err(fw_mgmt->parent, "timed out waiting for backend firmware update to finish\n" ); |
484 | return -ETIMEDOUT; |
485 | } |
486 | |
487 | backend_update.status = fw_mgmt->backend_fw_status; |
488 | |
489 | if (copy_to_user(to: buf, from: &backend_update, n: sizeof(backend_update))) |
490 | return -EFAULT; |
491 | |
492 | return 0; |
493 | case FW_MGMT_IOC_SET_TIMEOUT_MS: |
494 | if (get_user(timeout, (unsigned int __user *)buf)) |
495 | return -EFAULT; |
496 | |
497 | if (!timeout) { |
498 | dev_err(fw_mgmt->parent, "timeout can't be zero\n" ); |
499 | return -EINVAL; |
500 | } |
501 | |
502 | fw_mgmt->timeout_jiffies = msecs_to_jiffies(m: timeout); |
503 | |
504 | return 0; |
505 | case FW_MGMT_IOC_MODE_SWITCH: |
506 | if (!fw_mgmt->intf_fw_loaded) { |
507 | dev_err(fw_mgmt->parent, |
508 | "Firmware not loaded for mode-switch\n" ); |
509 | return -EPERM; |
510 | } |
511 | |
512 | /* |
513 | * Disallow new ioctls as the fw-core bundle driver is going to |
514 | * get disconnected soon and the character device will get |
515 | * removed. |
516 | */ |
517 | fw_mgmt->mode_switch_started = true; |
518 | |
519 | ret = gb_interface_request_mode_switch(intf: fw_mgmt->connection->intf); |
520 | if (ret) { |
521 | dev_err(fw_mgmt->parent, "Mode-switch failed: %d\n" , |
522 | ret); |
523 | fw_mgmt->mode_switch_started = false; |
524 | return ret; |
525 | } |
526 | |
527 | return 0; |
528 | default: |
529 | return -ENOTTY; |
530 | } |
531 | } |
532 | |
533 | static long fw_mgmt_ioctl_unlocked(struct file *file, unsigned int cmd, |
534 | unsigned long arg) |
535 | { |
536 | struct fw_mgmt *fw_mgmt = file->private_data; |
537 | struct gb_bundle *bundle = fw_mgmt->connection->bundle; |
538 | int ret = -ENODEV; |
539 | |
540 | /* |
541 | * Serialize ioctls. |
542 | * |
543 | * We don't want the user to do few operations in parallel. For example, |
544 | * updating Interface firmware in parallel for the same Interface. There |
545 | * is no need to do things in parallel for speed and we can avoid having |
546 | * complicated code for now. |
547 | * |
548 | * This is also used to protect ->disabled, which is used to check if |
549 | * the connection is getting disconnected, so that we don't start any |
550 | * new operations. |
551 | */ |
552 | mutex_lock(&fw_mgmt->mutex); |
553 | if (!fw_mgmt->disabled) { |
554 | ret = gb_pm_runtime_get_sync(bundle); |
555 | if (!ret) { |
556 | ret = fw_mgmt_ioctl(fw_mgmt, cmd, buf: (void __user *)arg); |
557 | gb_pm_runtime_put_autosuspend(bundle); |
558 | } |
559 | } |
560 | mutex_unlock(lock: &fw_mgmt->mutex); |
561 | |
562 | return ret; |
563 | } |
564 | |
565 | static const struct file_operations fw_mgmt_fops = { |
566 | .owner = THIS_MODULE, |
567 | .open = fw_mgmt_open, |
568 | .release = fw_mgmt_release, |
569 | .unlocked_ioctl = fw_mgmt_ioctl_unlocked, |
570 | }; |
571 | |
572 | int gb_fw_mgmt_request_handler(struct gb_operation *op) |
573 | { |
574 | u8 type = op->type; |
575 | |
576 | switch (type) { |
577 | case GB_FW_MGMT_TYPE_LOADED_FW: |
578 | return fw_mgmt_interface_fw_loaded_operation(op); |
579 | case GB_FW_MGMT_TYPE_BACKEND_FW_UPDATED: |
580 | return fw_mgmt_backend_fw_updated_operation(op); |
581 | default: |
582 | dev_err(&op->connection->bundle->dev, |
583 | "unsupported request: %u\n" , type); |
584 | return -EINVAL; |
585 | } |
586 | } |
587 | |
588 | int gb_fw_mgmt_connection_init(struct gb_connection *connection) |
589 | { |
590 | struct fw_mgmt *fw_mgmt; |
591 | int ret, minor; |
592 | |
593 | if (!connection) |
594 | return 0; |
595 | |
596 | fw_mgmt = kzalloc(size: sizeof(*fw_mgmt), GFP_KERNEL); |
597 | if (!fw_mgmt) |
598 | return -ENOMEM; |
599 | |
600 | fw_mgmt->parent = &connection->bundle->dev; |
601 | fw_mgmt->timeout_jiffies = msecs_to_jiffies(FW_MGMT_TIMEOUT_MS); |
602 | fw_mgmt->connection = connection; |
603 | |
604 | gb_connection_set_data(connection, data: fw_mgmt); |
605 | init_completion(x: &fw_mgmt->completion); |
606 | ida_init(ida: &fw_mgmt->id_map); |
607 | mutex_init(&fw_mgmt->mutex); |
608 | kref_init(kref: &fw_mgmt->kref); |
609 | |
610 | mutex_lock(&list_mutex); |
611 | list_add(new: &fw_mgmt->node, head: &fw_mgmt_list); |
612 | mutex_unlock(lock: &list_mutex); |
613 | |
614 | ret = gb_connection_enable(connection); |
615 | if (ret) |
616 | goto err_list_del; |
617 | |
618 | minor = ida_alloc_max(ida: &fw_mgmt_minors_map, NUM_MINORS - 1, GFP_KERNEL); |
619 | if (minor < 0) { |
620 | ret = minor; |
621 | goto err_connection_disable; |
622 | } |
623 | |
624 | /* Add a char device to allow userspace to interact with fw-mgmt */ |
625 | fw_mgmt->dev_num = MKDEV(MAJOR(fw_mgmt_dev_num), minor); |
626 | cdev_init(&fw_mgmt->cdev, &fw_mgmt_fops); |
627 | |
628 | ret = cdev_add(&fw_mgmt->cdev, fw_mgmt->dev_num, 1); |
629 | if (ret) |
630 | goto err_remove_ida; |
631 | |
632 | /* Add a soft link to the previously added char-dev within the bundle */ |
633 | fw_mgmt->class_device = device_create(cls: &fw_mgmt_class, parent: fw_mgmt->parent, |
634 | devt: fw_mgmt->dev_num, NULL, |
635 | fmt: "gb-fw-mgmt-%d" , minor); |
636 | if (IS_ERR(ptr: fw_mgmt->class_device)) { |
637 | ret = PTR_ERR(ptr: fw_mgmt->class_device); |
638 | goto err_del_cdev; |
639 | } |
640 | |
641 | return 0; |
642 | |
643 | err_del_cdev: |
644 | cdev_del(&fw_mgmt->cdev); |
645 | err_remove_ida: |
646 | ida_free(&fw_mgmt_minors_map, id: minor); |
647 | err_connection_disable: |
648 | gb_connection_disable(connection); |
649 | err_list_del: |
650 | mutex_lock(&list_mutex); |
651 | list_del(entry: &fw_mgmt->node); |
652 | mutex_unlock(lock: &list_mutex); |
653 | |
654 | put_fw_mgmt(fw_mgmt); |
655 | |
656 | return ret; |
657 | } |
658 | |
659 | void gb_fw_mgmt_connection_exit(struct gb_connection *connection) |
660 | { |
661 | struct fw_mgmt *fw_mgmt; |
662 | |
663 | if (!connection) |
664 | return; |
665 | |
666 | fw_mgmt = gb_connection_get_data(connection); |
667 | |
668 | device_destroy(cls: &fw_mgmt_class, devt: fw_mgmt->dev_num); |
669 | cdev_del(&fw_mgmt->cdev); |
670 | ida_free(&fw_mgmt_minors_map, MINOR(fw_mgmt->dev_num)); |
671 | |
672 | /* |
673 | * Disallow any new ioctl operations on the char device and wait for |
674 | * existing ones to finish. |
675 | */ |
676 | mutex_lock(&fw_mgmt->mutex); |
677 | fw_mgmt->disabled = true; |
678 | mutex_unlock(lock: &fw_mgmt->mutex); |
679 | |
680 | /* All pending greybus operations should have finished by now */ |
681 | gb_connection_disable(connection: fw_mgmt->connection); |
682 | |
683 | /* Disallow new users to get access to the fw_mgmt structure */ |
684 | mutex_lock(&list_mutex); |
685 | list_del(entry: &fw_mgmt->node); |
686 | mutex_unlock(lock: &list_mutex); |
687 | |
688 | /* |
689 | * All current users of fw_mgmt would have taken a reference to it by |
690 | * now, we can drop our reference and wait the last user will get |
691 | * fw_mgmt freed. |
692 | */ |
693 | put_fw_mgmt(fw_mgmt); |
694 | } |
695 | |
696 | int fw_mgmt_init(void) |
697 | { |
698 | int ret; |
699 | |
700 | ret = class_register(class: &fw_mgmt_class); |
701 | if (ret) |
702 | return ret; |
703 | |
704 | ret = alloc_chrdev_region(&fw_mgmt_dev_num, 0, NUM_MINORS, |
705 | "gb_fw_mgmt" ); |
706 | if (ret) |
707 | goto err_remove_class; |
708 | |
709 | return 0; |
710 | |
711 | err_remove_class: |
712 | class_unregister(class: &fw_mgmt_class); |
713 | return ret; |
714 | } |
715 | |
716 | void fw_mgmt_exit(void) |
717 | { |
718 | unregister_chrdev_region(fw_mgmt_dev_num, NUM_MINORS); |
719 | class_unregister(class: &fw_mgmt_class); |
720 | ida_destroy(ida: &fw_mgmt_minors_map); |
721 | } |
722 | |