1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Greybus Component Authentication Protocol (CAP) Driver.
4 *
5 * Copyright 2016 Google Inc.
6 * Copyright 2016 Linaro Ltd.
7 */
8
9#include <linux/greybus.h>
10#include <linux/cdev.h>
11#include <linux/fs.h>
12#include <linux/ioctl.h>
13#include <linux/uaccess.h>
14
15#include "greybus_authentication.h"
16#include "firmware.h"
17
18#define CAP_TIMEOUT_MS 1000
19
20/*
21 * Number of minor devices this driver supports.
22 * There will be exactly one required per Interface.
23 */
24#define NUM_MINORS U8_MAX
25
26struct gb_cap {
27 struct device *parent;
28 struct gb_connection *connection;
29 struct kref kref;
30 struct list_head node;
31 bool disabled; /* connection getting disabled */
32
33 struct mutex mutex;
34 struct cdev cdev;
35 struct device *class_device;
36 dev_t dev_num;
37};
38
39static const struct class cap_class = {
40 .name = "gb_authenticate",
41};
42
43static dev_t cap_dev_num;
44static DEFINE_IDA(cap_minors_map);
45static LIST_HEAD(cap_list);
46static DEFINE_MUTEX(list_mutex);
47
48static void cap_kref_release(struct kref *kref)
49{
50 struct gb_cap *cap = container_of(kref, struct gb_cap, kref);
51
52 kfree(objp: cap);
53}
54
55/*
56 * All users of cap take a reference (from within list_mutex lock), before
57 * they get a pointer to play with. And the structure will be freed only after
58 * the last user has put the reference to it.
59 */
60static void put_cap(struct gb_cap *cap)
61{
62 kref_put(kref: &cap->kref, release: cap_kref_release);
63}
64
65/* Caller must call put_cap() after using struct gb_cap */
66static struct gb_cap *get_cap(struct cdev *cdev)
67{
68 struct gb_cap *cap;
69
70 mutex_lock(&list_mutex);
71
72 list_for_each_entry(cap, &cap_list, node) {
73 if (&cap->cdev == cdev) {
74 kref_get(kref: &cap->kref);
75 goto unlock;
76 }
77 }
78
79 cap = NULL;
80
81unlock:
82 mutex_unlock(lock: &list_mutex);
83
84 return cap;
85}
86
87static int cap_get_endpoint_uid(struct gb_cap *cap, u8 *euid)
88{
89 struct gb_connection *connection = cap->connection;
90 struct gb_cap_get_endpoint_uid_response response;
91 int ret;
92
93 ret = gb_operation_sync(connection, GB_CAP_TYPE_GET_ENDPOINT_UID, NULL,
94 request_size: 0, response: &response, response_size: sizeof(response));
95 if (ret) {
96 dev_err(cap->parent, "failed to get endpoint uid (%d)\n", ret);
97 return ret;
98 }
99
100 memcpy(euid, response.uid, sizeof(response.uid));
101
102 return 0;
103}
104
105static int cap_get_ims_certificate(struct gb_cap *cap, u32 class, u32 id,
106 u8 *certificate, u32 *size, u8 *result)
107{
108 struct gb_connection *connection = cap->connection;
109 struct gb_cap_get_ims_certificate_request *request;
110 struct gb_cap_get_ims_certificate_response *response;
111 size_t max_size = gb_operation_get_payload_size_max(connection);
112 struct gb_operation *op;
113 int ret;
114
115 op = gb_operation_create_flags(connection,
116 GB_CAP_TYPE_GET_IMS_CERTIFICATE,
117 request_size: sizeof(*request), response_size: max_size,
118 GB_OPERATION_FLAG_SHORT_RESPONSE,
119 GFP_KERNEL);
120 if (!op)
121 return -ENOMEM;
122
123 request = op->request->payload;
124 request->certificate_class = cpu_to_le32(class);
125 request->certificate_id = cpu_to_le32(id);
126
127 ret = gb_operation_request_send_sync(operation: op);
128 if (ret) {
129 dev_err(cap->parent, "failed to get certificate (%d)\n", ret);
130 goto done;
131 }
132
133 response = op->response->payload;
134 *result = response->result_code;
135 *size = op->response->payload_size - sizeof(*response);
136 memcpy(certificate, response->certificate, *size);
137
138done:
139 gb_operation_put(operation: op);
140 return ret;
141}
142
143static int cap_authenticate(struct gb_cap *cap, u32 auth_type, u8 *uid,
144 u8 *challenge, u8 *result, u8 *auth_response,
145 u32 *signature_size, u8 *signature)
146{
147 struct gb_connection *connection = cap->connection;
148 struct gb_cap_authenticate_request *request;
149 struct gb_cap_authenticate_response *response;
150 size_t max_size = gb_operation_get_payload_size_max(connection);
151 struct gb_operation *op;
152 int ret;
153
154 op = gb_operation_create_flags(connection, GB_CAP_TYPE_AUTHENTICATE,
155 request_size: sizeof(*request), response_size: max_size,
156 GB_OPERATION_FLAG_SHORT_RESPONSE,
157 GFP_KERNEL);
158 if (!op)
159 return -ENOMEM;
160
161 request = op->request->payload;
162 request->auth_type = cpu_to_le32(auth_type);
163 memcpy(request->uid, uid, sizeof(request->uid));
164 memcpy(request->challenge, challenge, sizeof(request->challenge));
165
166 ret = gb_operation_request_send_sync(operation: op);
167 if (ret) {
168 dev_err(cap->parent, "failed to authenticate (%d)\n", ret);
169 goto done;
170 }
171
172 response = op->response->payload;
173 *result = response->result_code;
174 *signature_size = op->response->payload_size - sizeof(*response);
175 memcpy(auth_response, response->response, sizeof(response->response));
176 memcpy(signature, response->signature, *signature_size);
177
178done:
179 gb_operation_put(operation: op);
180 return ret;
181}
182
183/* Char device fops */
184
185static int cap_open(struct inode *inode, struct file *file)
186{
187 struct gb_cap *cap = get_cap(cdev: inode->i_cdev);
188
189 /* cap structure can't get freed until file descriptor is closed */
190 if (cap) {
191 file->private_data = cap;
192 return 0;
193 }
194
195 return -ENODEV;
196}
197
198static int cap_release(struct inode *inode, struct file *file)
199{
200 struct gb_cap *cap = file->private_data;
201
202 put_cap(cap);
203 return 0;
204}
205
206static int cap_ioctl(struct gb_cap *cap, unsigned int cmd,
207 void __user *buf)
208{
209 struct cap_ioc_get_endpoint_uid endpoint_uid;
210 struct cap_ioc_get_ims_certificate *ims_cert;
211 struct cap_ioc_authenticate *authenticate;
212 size_t size;
213 int ret;
214
215 switch (cmd) {
216 case CAP_IOC_GET_ENDPOINT_UID:
217 ret = cap_get_endpoint_uid(cap, euid: endpoint_uid.uid);
218 if (ret)
219 return ret;
220
221 if (copy_to_user(to: buf, from: &endpoint_uid, n: sizeof(endpoint_uid)))
222 return -EFAULT;
223
224 return 0;
225 case CAP_IOC_GET_IMS_CERTIFICATE:
226 size = sizeof(*ims_cert);
227 ims_cert = memdup_user(buf, size);
228 if (IS_ERR(ptr: ims_cert))
229 return PTR_ERR(ptr: ims_cert);
230
231 ret = cap_get_ims_certificate(cap, class: ims_cert->certificate_class,
232 id: ims_cert->certificate_id,
233 certificate: ims_cert->certificate,
234 size: &ims_cert->cert_size,
235 result: &ims_cert->result_code);
236 if (!ret && copy_to_user(to: buf, from: ims_cert, n: size))
237 ret = -EFAULT;
238 kfree(objp: ims_cert);
239
240 return ret;
241 case CAP_IOC_AUTHENTICATE:
242 size = sizeof(*authenticate);
243 authenticate = memdup_user(buf, size);
244 if (IS_ERR(ptr: authenticate))
245 return PTR_ERR(ptr: authenticate);
246
247 ret = cap_authenticate(cap, auth_type: authenticate->auth_type,
248 uid: authenticate->uid,
249 challenge: authenticate->challenge,
250 result: &authenticate->result_code,
251 auth_response: authenticate->response,
252 signature_size: &authenticate->signature_size,
253 signature: authenticate->signature);
254 if (!ret && copy_to_user(to: buf, from: authenticate, n: size))
255 ret = -EFAULT;
256 kfree(objp: authenticate);
257
258 return ret;
259 default:
260 return -ENOTTY;
261 }
262}
263
264static long cap_ioctl_unlocked(struct file *file, unsigned int cmd,
265 unsigned long arg)
266{
267 struct gb_cap *cap = file->private_data;
268 struct gb_bundle *bundle = cap->connection->bundle;
269 int ret = -ENODEV;
270
271 /*
272 * Serialize ioctls.
273 *
274 * We don't want the user to do multiple authentication operations in
275 * parallel.
276 *
277 * This is also used to protect ->disabled, which is used to check if
278 * the connection is getting disconnected, so that we don't start any
279 * new operations.
280 */
281 mutex_lock(&cap->mutex);
282 if (!cap->disabled) {
283 ret = gb_pm_runtime_get_sync(bundle);
284 if (!ret) {
285 ret = cap_ioctl(cap, cmd, buf: (void __user *)arg);
286 gb_pm_runtime_put_autosuspend(bundle);
287 }
288 }
289 mutex_unlock(lock: &cap->mutex);
290
291 return ret;
292}
293
294static const struct file_operations cap_fops = {
295 .owner = THIS_MODULE,
296 .open = cap_open,
297 .release = cap_release,
298 .unlocked_ioctl = cap_ioctl_unlocked,
299};
300
301int gb_cap_connection_init(struct gb_connection *connection)
302{
303 struct gb_cap *cap;
304 int ret, minor;
305
306 if (!connection)
307 return 0;
308
309 cap = kzalloc(size: sizeof(*cap), GFP_KERNEL);
310 if (!cap)
311 return -ENOMEM;
312
313 cap->parent = &connection->bundle->dev;
314 cap->connection = connection;
315 mutex_init(&cap->mutex);
316 gb_connection_set_data(connection, data: cap);
317 kref_init(kref: &cap->kref);
318
319 mutex_lock(&list_mutex);
320 list_add(new: &cap->node, head: &cap_list);
321 mutex_unlock(lock: &list_mutex);
322
323 ret = gb_connection_enable(connection);
324 if (ret)
325 goto err_list_del;
326
327 minor = ida_alloc_max(ida: &cap_minors_map, NUM_MINORS - 1, GFP_KERNEL);
328 if (minor < 0) {
329 ret = minor;
330 goto err_connection_disable;
331 }
332
333 /* Add a char device to allow userspace to interact with cap */
334 cap->dev_num = MKDEV(MAJOR(cap_dev_num), minor);
335 cdev_init(&cap->cdev, &cap_fops);
336
337 ret = cdev_add(&cap->cdev, cap->dev_num, 1);
338 if (ret)
339 goto err_remove_ida;
340
341 /* Add a soft link to the previously added char-dev within the bundle */
342 cap->class_device = device_create(cls: &cap_class, parent: cap->parent, devt: cap->dev_num,
343 NULL, fmt: "gb-authenticate-%d", minor);
344 if (IS_ERR(ptr: cap->class_device)) {
345 ret = PTR_ERR(ptr: cap->class_device);
346 goto err_del_cdev;
347 }
348
349 return 0;
350
351err_del_cdev:
352 cdev_del(&cap->cdev);
353err_remove_ida:
354 ida_free(&cap_minors_map, id: minor);
355err_connection_disable:
356 gb_connection_disable(connection);
357err_list_del:
358 mutex_lock(&list_mutex);
359 list_del(entry: &cap->node);
360 mutex_unlock(lock: &list_mutex);
361
362 put_cap(cap);
363
364 return ret;
365}
366
367void gb_cap_connection_exit(struct gb_connection *connection)
368{
369 struct gb_cap *cap;
370
371 if (!connection)
372 return;
373
374 cap = gb_connection_get_data(connection);
375
376 device_destroy(cls: &cap_class, devt: cap->dev_num);
377 cdev_del(&cap->cdev);
378 ida_free(&cap_minors_map, MINOR(cap->dev_num));
379
380 /*
381 * Disallow any new ioctl operations on the char device and wait for
382 * existing ones to finish.
383 */
384 mutex_lock(&cap->mutex);
385 cap->disabled = true;
386 mutex_unlock(lock: &cap->mutex);
387
388 /* All pending greybus operations should have finished by now */
389 gb_connection_disable(connection: cap->connection);
390
391 /* Disallow new users to get access to the cap structure */
392 mutex_lock(&list_mutex);
393 list_del(entry: &cap->node);
394 mutex_unlock(lock: &list_mutex);
395
396 /*
397 * All current users of cap would have taken a reference to it by
398 * now, we can drop our reference and wait the last user will get
399 * cap freed.
400 */
401 put_cap(cap);
402}
403
404int cap_init(void)
405{
406 int ret;
407
408 ret = class_register(class: &cap_class);
409 if (ret)
410 return ret;
411
412 ret = alloc_chrdev_region(&cap_dev_num, 0, NUM_MINORS,
413 "gb_authenticate");
414 if (ret)
415 goto err_remove_class;
416
417 return 0;
418
419err_remove_class:
420 class_unregister(class: &cap_class);
421 return ret;
422}
423
424void cap_exit(void)
425{
426 unregister_chrdev_region(cap_dev_num, NUM_MINORS);
427 class_unregister(class: &cap_class);
428 ida_destroy(ida: &cap_minors_map);
429}
430

source code of linux/drivers/staging/greybus/authentication.c