1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Greybus Component Authentication Protocol (CAP) Driver.
4  *
5  * Copyright 2016 Google Inc.
6  * Copyright 2016 Linaro Ltd.
7  */
8 
9 #include <linux/greybus.h>
10 #include <linux/cdev.h>
11 #include <linux/fs.h>
12 #include <linux/ioctl.h>
13 #include <linux/uaccess.h>
14 
15 #include "greybus_authentication.h"
16 #include "firmware.h"
17 
18 #define CAP_TIMEOUT_MS		1000
19 
20 /*
21  * Number of minor devices this driver supports.
22  * There will be exactly one required per Interface.
23  */
24 #define NUM_MINORS		U8_MAX
25 
26 struct gb_cap {
27 	struct device		*parent;
28 	struct gb_connection	*connection;
29 	struct kref		kref;
30 	struct list_head	node;
31 	bool			disabled; /* connection getting disabled */
32 
33 	struct mutex		mutex;
34 	struct cdev		cdev;
35 	struct device		*class_device;
36 	dev_t			dev_num;
37 };
38 
39 static struct class *cap_class;
40 static dev_t cap_dev_num;
41 static DEFINE_IDA(cap_minors_map);
42 static LIST_HEAD(cap_list);
43 static DEFINE_MUTEX(list_mutex);
44 
cap_kref_release(struct kref * kref)45 static void cap_kref_release(struct kref *kref)
46 {
47 	struct gb_cap *cap = container_of(kref, struct gb_cap, kref);
48 
49 	kfree(cap);
50 }
51 
52 /*
53  * All users of cap take a reference (from within list_mutex lock), before
54  * they get a pointer to play with. And the structure will be freed only after
55  * the last user has put the reference to it.
56  */
put_cap(struct gb_cap * cap)57 static void put_cap(struct gb_cap *cap)
58 {
59 	kref_put(&cap->kref, cap_kref_release);
60 }
61 
62 /* Caller must call put_cap() after using struct gb_cap */
get_cap(struct cdev * cdev)63 static struct gb_cap *get_cap(struct cdev *cdev)
64 {
65 	struct gb_cap *cap;
66 
67 	mutex_lock(&list_mutex);
68 
69 	list_for_each_entry(cap, &cap_list, node) {
70 		if (&cap->cdev == cdev) {
71 			kref_get(&cap->kref);
72 			goto unlock;
73 		}
74 	}
75 
76 	cap = NULL;
77 
78 unlock:
79 	mutex_unlock(&list_mutex);
80 
81 	return cap;
82 }
83 
cap_get_endpoint_uid(struct gb_cap * cap,u8 * euid)84 static int cap_get_endpoint_uid(struct gb_cap *cap, u8 *euid)
85 {
86 	struct gb_connection *connection = cap->connection;
87 	struct gb_cap_get_endpoint_uid_response response;
88 	int ret;
89 
90 	ret = gb_operation_sync(connection, GB_CAP_TYPE_GET_ENDPOINT_UID, NULL,
91 				0, &response, sizeof(response));
92 	if (ret) {
93 		dev_err(cap->parent, "failed to get endpoint uid (%d)\n", ret);
94 		return ret;
95 	}
96 
97 	memcpy(euid, response.uid, sizeof(response.uid));
98 
99 	return 0;
100 }
101 
cap_get_ims_certificate(struct gb_cap * cap,u32 class,u32 id,u8 * certificate,u32 * size,u8 * result)102 static int cap_get_ims_certificate(struct gb_cap *cap, u32 class, u32 id,
103 				   u8 *certificate, u32 *size, u8 *result)
104 {
105 	struct gb_connection *connection = cap->connection;
106 	struct gb_cap_get_ims_certificate_request *request;
107 	struct gb_cap_get_ims_certificate_response *response;
108 	size_t max_size = gb_operation_get_payload_size_max(connection);
109 	struct gb_operation *op;
110 	int ret;
111 
112 	op = gb_operation_create_flags(connection,
113 				       GB_CAP_TYPE_GET_IMS_CERTIFICATE,
114 				       sizeof(*request), max_size,
115 				       GB_OPERATION_FLAG_SHORT_RESPONSE,
116 				       GFP_KERNEL);
117 	if (!op)
118 		return -ENOMEM;
119 
120 	request = op->request->payload;
121 	request->certificate_class = cpu_to_le32(class);
122 	request->certificate_id = cpu_to_le32(id);
123 
124 	ret = gb_operation_request_send_sync(op);
125 	if (ret) {
126 		dev_err(cap->parent, "failed to get certificate (%d)\n", ret);
127 		goto done;
128 	}
129 
130 	response = op->response->payload;
131 	*result = response->result_code;
132 	*size = op->response->payload_size - sizeof(*response);
133 	memcpy(certificate, response->certificate, *size);
134 
135 done:
136 	gb_operation_put(op);
137 	return ret;
138 }
139 
cap_authenticate(struct gb_cap * cap,u32 auth_type,u8 * uid,u8 * challenge,u8 * result,u8 * auth_response,u32 * signature_size,u8 * signature)140 static int cap_authenticate(struct gb_cap *cap, u32 auth_type, u8 *uid,
141 			    u8 *challenge, u8 *result, u8 *auth_response,
142 			    u32 *signature_size, u8 *signature)
143 {
144 	struct gb_connection *connection = cap->connection;
145 	struct gb_cap_authenticate_request *request;
146 	struct gb_cap_authenticate_response *response;
147 	size_t max_size = gb_operation_get_payload_size_max(connection);
148 	struct gb_operation *op;
149 	int ret;
150 
151 	op = gb_operation_create_flags(connection, GB_CAP_TYPE_AUTHENTICATE,
152 				       sizeof(*request), max_size,
153 				       GB_OPERATION_FLAG_SHORT_RESPONSE,
154 				       GFP_KERNEL);
155 	if (!op)
156 		return -ENOMEM;
157 
158 	request = op->request->payload;
159 	request->auth_type = cpu_to_le32(auth_type);
160 	memcpy(request->uid, uid, sizeof(request->uid));
161 	memcpy(request->challenge, challenge, sizeof(request->challenge));
162 
163 	ret = gb_operation_request_send_sync(op);
164 	if (ret) {
165 		dev_err(cap->parent, "failed to authenticate (%d)\n", ret);
166 		goto done;
167 	}
168 
169 	response = op->response->payload;
170 	*result = response->result_code;
171 	*signature_size = op->response->payload_size - sizeof(*response);
172 	memcpy(auth_response, response->response, sizeof(response->response));
173 	memcpy(signature, response->signature, *signature_size);
174 
175 done:
176 	gb_operation_put(op);
177 	return ret;
178 }
179 
180 /* Char device fops */
181 
cap_open(struct inode * inode,struct file * file)182 static int cap_open(struct inode *inode, struct file *file)
183 {
184 	struct gb_cap *cap = get_cap(inode->i_cdev);
185 
186 	/* cap structure can't get freed until file descriptor is closed */
187 	if (cap) {
188 		file->private_data = cap;
189 		return 0;
190 	}
191 
192 	return -ENODEV;
193 }
194 
cap_release(struct inode * inode,struct file * file)195 static int cap_release(struct inode *inode, struct file *file)
196 {
197 	struct gb_cap *cap = file->private_data;
198 
199 	put_cap(cap);
200 	return 0;
201 }
202 
cap_ioctl(struct gb_cap * cap,unsigned int cmd,void __user * buf)203 static int cap_ioctl(struct gb_cap *cap, unsigned int cmd,
204 		     void __user *buf)
205 {
206 	struct cap_ioc_get_endpoint_uid endpoint_uid;
207 	struct cap_ioc_get_ims_certificate *ims_cert;
208 	struct cap_ioc_authenticate *authenticate;
209 	size_t size;
210 	int ret;
211 
212 	switch (cmd) {
213 	case CAP_IOC_GET_ENDPOINT_UID:
214 		ret = cap_get_endpoint_uid(cap, endpoint_uid.uid);
215 		if (ret)
216 			return ret;
217 
218 		if (copy_to_user(buf, &endpoint_uid, sizeof(endpoint_uid)))
219 			return -EFAULT;
220 
221 		return 0;
222 	case CAP_IOC_GET_IMS_CERTIFICATE:
223 		size = sizeof(*ims_cert);
224 		ims_cert = memdup_user(buf, size);
225 		if (IS_ERR(ims_cert))
226 			return PTR_ERR(ims_cert);
227 
228 		ret = cap_get_ims_certificate(cap, ims_cert->certificate_class,
229 					      ims_cert->certificate_id,
230 					      ims_cert->certificate,
231 					      &ims_cert->cert_size,
232 					      &ims_cert->result_code);
233 		if (!ret && copy_to_user(buf, ims_cert, size))
234 			ret = -EFAULT;
235 		kfree(ims_cert);
236 
237 		return ret;
238 	case CAP_IOC_AUTHENTICATE:
239 		size = sizeof(*authenticate);
240 		authenticate = memdup_user(buf, size);
241 		if (IS_ERR(authenticate))
242 			return PTR_ERR(authenticate);
243 
244 		ret = cap_authenticate(cap, authenticate->auth_type,
245 				       authenticate->uid,
246 				       authenticate->challenge,
247 				       &authenticate->result_code,
248 				       authenticate->response,
249 				       &authenticate->signature_size,
250 				       authenticate->signature);
251 		if (!ret && copy_to_user(buf, authenticate, size))
252 			ret = -EFAULT;
253 		kfree(authenticate);
254 
255 		return ret;
256 	default:
257 		return -ENOTTY;
258 	}
259 }
260 
cap_ioctl_unlocked(struct file * file,unsigned int cmd,unsigned long arg)261 static long cap_ioctl_unlocked(struct file *file, unsigned int cmd,
262 			       unsigned long arg)
263 {
264 	struct gb_cap *cap = file->private_data;
265 	struct gb_bundle *bundle = cap->connection->bundle;
266 	int ret = -ENODEV;
267 
268 	/*
269 	 * Serialize ioctls.
270 	 *
271 	 * We don't want the user to do multiple authentication operations in
272 	 * parallel.
273 	 *
274 	 * This is also used to protect ->disabled, which is used to check if
275 	 * the connection is getting disconnected, so that we don't start any
276 	 * new operations.
277 	 */
278 	mutex_lock(&cap->mutex);
279 	if (!cap->disabled) {
280 		ret = gb_pm_runtime_get_sync(bundle);
281 		if (!ret) {
282 			ret = cap_ioctl(cap, cmd, (void __user *)arg);
283 			gb_pm_runtime_put_autosuspend(bundle);
284 		}
285 	}
286 	mutex_unlock(&cap->mutex);
287 
288 	return ret;
289 }
290 
291 static const struct file_operations cap_fops = {
292 	.owner		= THIS_MODULE,
293 	.open		= cap_open,
294 	.release	= cap_release,
295 	.unlocked_ioctl	= cap_ioctl_unlocked,
296 };
297 
gb_cap_connection_init(struct gb_connection * connection)298 int gb_cap_connection_init(struct gb_connection *connection)
299 {
300 	struct gb_cap *cap;
301 	int ret, minor;
302 
303 	if (!connection)
304 		return 0;
305 
306 	cap = kzalloc(sizeof(*cap), GFP_KERNEL);
307 	if (!cap)
308 		return -ENOMEM;
309 
310 	cap->parent = &connection->bundle->dev;
311 	cap->connection = connection;
312 	mutex_init(&cap->mutex);
313 	gb_connection_set_data(connection, cap);
314 	kref_init(&cap->kref);
315 
316 	mutex_lock(&list_mutex);
317 	list_add(&cap->node, &cap_list);
318 	mutex_unlock(&list_mutex);
319 
320 	ret = gb_connection_enable(connection);
321 	if (ret)
322 		goto err_list_del;
323 
324 	minor = ida_simple_get(&cap_minors_map, 0, NUM_MINORS, GFP_KERNEL);
325 	if (minor < 0) {
326 		ret = minor;
327 		goto err_connection_disable;
328 	}
329 
330 	/* Add a char device to allow userspace to interact with cap */
331 	cap->dev_num = MKDEV(MAJOR(cap_dev_num), minor);
332 	cdev_init(&cap->cdev, &cap_fops);
333 
334 	ret = cdev_add(&cap->cdev, cap->dev_num, 1);
335 	if (ret)
336 		goto err_remove_ida;
337 
338 	/* Add a soft link to the previously added char-dev within the bundle */
339 	cap->class_device = device_create(cap_class, cap->parent, cap->dev_num,
340 					  NULL, "gb-authenticate-%d", minor);
341 	if (IS_ERR(cap->class_device)) {
342 		ret = PTR_ERR(cap->class_device);
343 		goto err_del_cdev;
344 	}
345 
346 	return 0;
347 
348 err_del_cdev:
349 	cdev_del(&cap->cdev);
350 err_remove_ida:
351 	ida_simple_remove(&cap_minors_map, minor);
352 err_connection_disable:
353 	gb_connection_disable(connection);
354 err_list_del:
355 	mutex_lock(&list_mutex);
356 	list_del(&cap->node);
357 	mutex_unlock(&list_mutex);
358 
359 	put_cap(cap);
360 
361 	return ret;
362 }
363 
gb_cap_connection_exit(struct gb_connection * connection)364 void gb_cap_connection_exit(struct gb_connection *connection)
365 {
366 	struct gb_cap *cap;
367 
368 	if (!connection)
369 		return;
370 
371 	cap = gb_connection_get_data(connection);
372 
373 	device_destroy(cap_class, cap->dev_num);
374 	cdev_del(&cap->cdev);
375 	ida_simple_remove(&cap_minors_map, MINOR(cap->dev_num));
376 
377 	/*
378 	 * Disallow any new ioctl operations on the char device and wait for
379 	 * existing ones to finish.
380 	 */
381 	mutex_lock(&cap->mutex);
382 	cap->disabled = true;
383 	mutex_unlock(&cap->mutex);
384 
385 	/* All pending greybus operations should have finished by now */
386 	gb_connection_disable(cap->connection);
387 
388 	/* Disallow new users to get access to the cap structure */
389 	mutex_lock(&list_mutex);
390 	list_del(&cap->node);
391 	mutex_unlock(&list_mutex);
392 
393 	/*
394 	 * All current users of cap would have taken a reference to it by
395 	 * now, we can drop our reference and wait the last user will get
396 	 * cap freed.
397 	 */
398 	put_cap(cap);
399 }
400 
cap_init(void)401 int cap_init(void)
402 {
403 	int ret;
404 
405 	cap_class = class_create(THIS_MODULE, "gb_authenticate");
406 	if (IS_ERR(cap_class))
407 		return PTR_ERR(cap_class);
408 
409 	ret = alloc_chrdev_region(&cap_dev_num, 0, NUM_MINORS,
410 				  "gb_authenticate");
411 	if (ret)
412 		goto err_remove_class;
413 
414 	return 0;
415 
416 err_remove_class:
417 	class_destroy(cap_class);
418 	return ret;
419 }
420 
cap_exit(void)421 void cap_exit(void)
422 {
423 	unregister_chrdev_region(cap_dev_num, NUM_MINORS);
424 	class_destroy(cap_class);
425 	ida_destroy(&cap_minors_map);
426 }
427