1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/char_dev.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 */
7
8 #include <linux/init.h>
9 #include <linux/fs.h>
10 #include <linux/kdev_t.h>
11 #include <linux/slab.h>
12 #include <linux/string.h>
13
14 #include <linux/major.h>
15 #include <linux/errno.h>
16 #include <linux/module.h>
17 #include <linux/seq_file.h>
18
19 #include <linux/kobject.h>
20 #include <linux/kobj_map.h>
21 #include <linux/cdev.h>
22 #include <linux/mutex.h>
23 #include <linux/backing-dev.h>
24 #include <linux/tty.h>
25
26 #include "internal.h"
27
28 static struct kobj_map *cdev_map;
29
30 static DEFINE_MUTEX(chrdevs_lock);
31
32 #define CHRDEV_MAJOR_HASH_SIZE 255
33
34 static struct char_device_struct {
35 struct char_device_struct *next;
36 unsigned int major;
37 unsigned int baseminor;
38 int minorct;
39 char name[64];
40 struct cdev *cdev; /* will die */
41 } *chrdevs[CHRDEV_MAJOR_HASH_SIZE];
42
43 /* index in the above */
major_to_index(unsigned major)44 static inline int major_to_index(unsigned major)
45 {
46 return major % CHRDEV_MAJOR_HASH_SIZE;
47 }
48
49 #ifdef CONFIG_PROC_FS
50
chrdev_show(struct seq_file * f,off_t offset)51 void chrdev_show(struct seq_file *f, off_t offset)
52 {
53 struct char_device_struct *cd;
54
55 mutex_lock(&chrdevs_lock);
56 for (cd = chrdevs[major_to_index(offset)]; cd; cd = cd->next) {
57 if (cd->major == offset)
58 seq_printf(f, "%3d %s\n", cd->major, cd->name);
59 }
60 mutex_unlock(&chrdevs_lock);
61 }
62
63 #endif /* CONFIG_PROC_FS */
64
find_dynamic_major(void)65 static int find_dynamic_major(void)
66 {
67 int i;
68 struct char_device_struct *cd;
69
70 for (i = ARRAY_SIZE(chrdevs)-1; i >= CHRDEV_MAJOR_DYN_END; i--) {
71 if (chrdevs[i] == NULL)
72 return i;
73 }
74
75 for (i = CHRDEV_MAJOR_DYN_EXT_START;
76 i >= CHRDEV_MAJOR_DYN_EXT_END; i--) {
77 for (cd = chrdevs[major_to_index(i)]; cd; cd = cd->next)
78 if (cd->major == i)
79 break;
80
81 if (cd == NULL)
82 return i;
83 }
84
85 return -EBUSY;
86 }
87
88 /*
89 * Register a single major with a specified minor range.
90 *
91 * If major == 0 this function will dynamically allocate an unused major.
92 * If major > 0 this function will attempt to reserve the range of minors
93 * with given major.
94 *
95 */
96 static struct char_device_struct *
__register_chrdev_region(unsigned int major,unsigned int baseminor,int minorct,const char * name)97 __register_chrdev_region(unsigned int major, unsigned int baseminor,
98 int minorct, const char *name)
99 {
100 struct char_device_struct *cd, *curr, *prev = NULL;
101 int ret;
102 int i;
103
104 if (major >= CHRDEV_MAJOR_MAX) {
105 pr_err("CHRDEV \"%s\" major requested (%u) is greater than the maximum (%u)\n",
106 name, major, CHRDEV_MAJOR_MAX-1);
107 return ERR_PTR(-EINVAL);
108 }
109
110 if (minorct > MINORMASK + 1 - baseminor) {
111 pr_err("CHRDEV \"%s\" minor range requested (%u-%u) is out of range of maximum range (%u-%u) for a single major\n",
112 name, baseminor, baseminor + minorct - 1, 0, MINORMASK);
113 return ERR_PTR(-EINVAL);
114 }
115
116 cd = kzalloc(sizeof(struct char_device_struct), GFP_KERNEL);
117 if (cd == NULL)
118 return ERR_PTR(-ENOMEM);
119
120 mutex_lock(&chrdevs_lock);
121
122 if (major == 0) {
123 ret = find_dynamic_major();
124 if (ret < 0) {
125 pr_err("CHRDEV \"%s\" dynamic allocation region is full\n",
126 name);
127 goto out;
128 }
129 major = ret;
130 }
131
132 ret = -EBUSY;
133 i = major_to_index(major);
134 for (curr = chrdevs[i]; curr; prev = curr, curr = curr->next) {
135 if (curr->major < major)
136 continue;
137
138 if (curr->major > major)
139 break;
140
141 if (curr->baseminor + curr->minorct <= baseminor)
142 continue;
143
144 if (curr->baseminor >= baseminor + minorct)
145 break;
146
147 goto out;
148 }
149
150 cd->major = major;
151 cd->baseminor = baseminor;
152 cd->minorct = minorct;
153 strlcpy(cd->name, name, sizeof(cd->name));
154
155 if (!prev) {
156 cd->next = curr;
157 chrdevs[i] = cd;
158 } else {
159 cd->next = prev->next;
160 prev->next = cd;
161 }
162
163 mutex_unlock(&chrdevs_lock);
164 return cd;
165 out:
166 mutex_unlock(&chrdevs_lock);
167 kfree(cd);
168 return ERR_PTR(ret);
169 }
170
171 static struct char_device_struct *
__unregister_chrdev_region(unsigned major,unsigned baseminor,int minorct)172 __unregister_chrdev_region(unsigned major, unsigned baseminor, int minorct)
173 {
174 struct char_device_struct *cd = NULL, **cp;
175 int i = major_to_index(major);
176
177 mutex_lock(&chrdevs_lock);
178 for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
179 if ((*cp)->major == major &&
180 (*cp)->baseminor == baseminor &&
181 (*cp)->minorct == minorct)
182 break;
183 if (*cp) {
184 cd = *cp;
185 *cp = cd->next;
186 }
187 mutex_unlock(&chrdevs_lock);
188 return cd;
189 }
190
191 /**
192 * register_chrdev_region() - register a range of device numbers
193 * @from: the first in the desired range of device numbers; must include
194 * the major number.
195 * @count: the number of consecutive device numbers required
196 * @name: the name of the device or driver.
197 *
198 * Return value is zero on success, a negative error code on failure.
199 */
register_chrdev_region(dev_t from,unsigned count,const char * name)200 int register_chrdev_region(dev_t from, unsigned count, const char *name)
201 {
202 struct char_device_struct *cd;
203 dev_t to = from + count;
204 dev_t n, next;
205
206 for (n = from; n < to; n = next) {
207 next = MKDEV(MAJOR(n)+1, 0);
208 if (next > to)
209 next = to;
210 cd = __register_chrdev_region(MAJOR(n), MINOR(n),
211 next - n, name);
212 if (IS_ERR(cd))
213 goto fail;
214 }
215 return 0;
216 fail:
217 to = n;
218 for (n = from; n < to; n = next) {
219 next = MKDEV(MAJOR(n)+1, 0);
220 kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
221 }
222 return PTR_ERR(cd);
223 }
224
225 /**
226 * alloc_chrdev_region() - register a range of char device numbers
227 * @dev: output parameter for first assigned number
228 * @baseminor: first of the requested range of minor numbers
229 * @count: the number of minor numbers required
230 * @name: the name of the associated device or driver
231 *
232 * Allocates a range of char device numbers. The major number will be
233 * chosen dynamically, and returned (along with the first minor number)
234 * in @dev. Returns zero or a negative error code.
235 */
alloc_chrdev_region(dev_t * dev,unsigned baseminor,unsigned count,const char * name)236 int alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count,
237 const char *name)
238 {
239 struct char_device_struct *cd;
240 cd = __register_chrdev_region(0, baseminor, count, name);
241 if (IS_ERR(cd))
242 return PTR_ERR(cd);
243 *dev = MKDEV(cd->major, cd->baseminor);
244 return 0;
245 }
246
247 /**
248 * __register_chrdev() - create and register a cdev occupying a range of minors
249 * @major: major device number or 0 for dynamic allocation
250 * @baseminor: first of the requested range of minor numbers
251 * @count: the number of minor numbers required
252 * @name: name of this range of devices
253 * @fops: file operations associated with this devices
254 *
255 * If @major == 0 this functions will dynamically allocate a major and return
256 * its number.
257 *
258 * If @major > 0 this function will attempt to reserve a device with the given
259 * major number and will return zero on success.
260 *
261 * Returns a -ve errno on failure.
262 *
263 * The name of this device has nothing to do with the name of the device in
264 * /dev. It only helps to keep track of the different owners of devices. If
265 * your module name has only one type of devices it's ok to use e.g. the name
266 * of the module here.
267 */
__register_chrdev(unsigned int major,unsigned int baseminor,unsigned int count,const char * name,const struct file_operations * fops)268 int __register_chrdev(unsigned int major, unsigned int baseminor,
269 unsigned int count, const char *name,
270 const struct file_operations *fops)
271 {
272 struct char_device_struct *cd;
273 struct cdev *cdev;
274 int err = -ENOMEM;
275
276 cd = __register_chrdev_region(major, baseminor, count, name);
277 if (IS_ERR(cd))
278 return PTR_ERR(cd);
279
280 cdev = cdev_alloc();
281 if (!cdev)
282 goto out2;
283
284 cdev->owner = fops->owner;
285 cdev->ops = fops;
286 kobject_set_name(&cdev->kobj, "%s", name);
287
288 err = cdev_add(cdev, MKDEV(cd->major, baseminor), count);
289 if (err)
290 goto out;
291
292 cd->cdev = cdev;
293
294 return major ? 0 : cd->major;
295 out:
296 kobject_put(&cdev->kobj);
297 out2:
298 kfree(__unregister_chrdev_region(cd->major, baseminor, count));
299 return err;
300 }
301
302 /**
303 * unregister_chrdev_region() - unregister a range of device numbers
304 * @from: the first in the range of numbers to unregister
305 * @count: the number of device numbers to unregister
306 *
307 * This function will unregister a range of @count device numbers,
308 * starting with @from. The caller should normally be the one who
309 * allocated those numbers in the first place...
310 */
unregister_chrdev_region(dev_t from,unsigned count)311 void unregister_chrdev_region(dev_t from, unsigned count)
312 {
313 dev_t to = from + count;
314 dev_t n, next;
315
316 for (n = from; n < to; n = next) {
317 next = MKDEV(MAJOR(n)+1, 0);
318 if (next > to)
319 next = to;
320 kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
321 }
322 }
323
324 /**
325 * __unregister_chrdev - unregister and destroy a cdev
326 * @major: major device number
327 * @baseminor: first of the range of minor numbers
328 * @count: the number of minor numbers this cdev is occupying
329 * @name: name of this range of devices
330 *
331 * Unregister and destroy the cdev occupying the region described by
332 * @major, @baseminor and @count. This function undoes what
333 * __register_chrdev() did.
334 */
__unregister_chrdev(unsigned int major,unsigned int baseminor,unsigned int count,const char * name)335 void __unregister_chrdev(unsigned int major, unsigned int baseminor,
336 unsigned int count, const char *name)
337 {
338 struct char_device_struct *cd;
339
340 cd = __unregister_chrdev_region(major, baseminor, count);
341 if (cd && cd->cdev)
342 cdev_del(cd->cdev);
343 kfree(cd);
344 }
345
346 static DEFINE_SPINLOCK(cdev_lock);
347
cdev_get(struct cdev * p)348 static struct kobject *cdev_get(struct cdev *p)
349 {
350 struct module *owner = p->owner;
351 struct kobject *kobj;
352
353 if (owner && !try_module_get(owner))
354 return NULL;
355 kobj = kobject_get_unless_zero(&p->kobj);
356 if (!kobj)
357 module_put(owner);
358 return kobj;
359 }
360
cdev_put(struct cdev * p)361 void cdev_put(struct cdev *p)
362 {
363 if (p) {
364 struct module *owner = p->owner;
365 kobject_put(&p->kobj);
366 module_put(owner);
367 }
368 }
369
370 /*
371 * Called every time a character special file is opened
372 */
chrdev_open(struct inode * inode,struct file * filp)373 static int chrdev_open(struct inode *inode, struct file *filp)
374 {
375 const struct file_operations *fops;
376 struct cdev *p;
377 struct cdev *new = NULL;
378 int ret = 0;
379
380 spin_lock(&cdev_lock);
381 p = inode->i_cdev;
382 if (!p) {
383 struct kobject *kobj;
384 int idx;
385 spin_unlock(&cdev_lock);
386 kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx);
387 if (!kobj)
388 return -ENXIO;
389 new = container_of(kobj, struct cdev, kobj);
390 spin_lock(&cdev_lock);
391 /* Check i_cdev again in case somebody beat us to it while
392 we dropped the lock. */
393 p = inode->i_cdev;
394 if (!p) {
395 inode->i_cdev = p = new;
396 list_add(&inode->i_devices, &p->list);
397 new = NULL;
398 } else if (!cdev_get(p))
399 ret = -ENXIO;
400 } else if (!cdev_get(p))
401 ret = -ENXIO;
402 spin_unlock(&cdev_lock);
403 cdev_put(new);
404 if (ret)
405 return ret;
406
407 ret = -ENXIO;
408 fops = fops_get(p->ops);
409 if (!fops)
410 goto out_cdev_put;
411
412 replace_fops(filp, fops);
413 if (filp->f_op->open) {
414 ret = filp->f_op->open(inode, filp);
415 if (ret)
416 goto out_cdev_put;
417 }
418
419 return 0;
420
421 out_cdev_put:
422 cdev_put(p);
423 return ret;
424 }
425
cd_forget(struct inode * inode)426 void cd_forget(struct inode *inode)
427 {
428 spin_lock(&cdev_lock);
429 list_del_init(&inode->i_devices);
430 inode->i_cdev = NULL;
431 inode->i_mapping = &inode->i_data;
432 spin_unlock(&cdev_lock);
433 }
434
cdev_purge(struct cdev * cdev)435 static void cdev_purge(struct cdev *cdev)
436 {
437 spin_lock(&cdev_lock);
438 while (!list_empty(&cdev->list)) {
439 struct inode *inode;
440 inode = container_of(cdev->list.next, struct inode, i_devices);
441 list_del_init(&inode->i_devices);
442 inode->i_cdev = NULL;
443 }
444 spin_unlock(&cdev_lock);
445 }
446
447 /*
448 * Dummy default file-operations: the only thing this does
449 * is contain the open that then fills in the correct operations
450 * depending on the special file...
451 */
452 const struct file_operations def_chr_fops = {
453 .open = chrdev_open,
454 .llseek = noop_llseek,
455 };
456
exact_match(dev_t dev,int * part,void * data)457 static struct kobject *exact_match(dev_t dev, int *part, void *data)
458 {
459 struct cdev *p = data;
460 return &p->kobj;
461 }
462
exact_lock(dev_t dev,void * data)463 static int exact_lock(dev_t dev, void *data)
464 {
465 struct cdev *p = data;
466 return cdev_get(p) ? 0 : -1;
467 }
468
469 /**
470 * cdev_add() - add a char device to the system
471 * @p: the cdev structure for the device
472 * @dev: the first device number for which this device is responsible
473 * @count: the number of consecutive minor numbers corresponding to this
474 * device
475 *
476 * cdev_add() adds the device represented by @p to the system, making it
477 * live immediately. A negative error code is returned on failure.
478 */
cdev_add(struct cdev * p,dev_t dev,unsigned count)479 int cdev_add(struct cdev *p, dev_t dev, unsigned count)
480 {
481 int error;
482
483 p->dev = dev;
484 p->count = count;
485
486 if (WARN_ON(dev == WHITEOUT_DEV)) {
487 error = -EBUSY;
488 goto err;
489 }
490
491 error = kobj_map(cdev_map, dev, count, NULL,
492 exact_match, exact_lock, p);
493 if (error)
494 goto err;
495
496 kobject_get(p->kobj.parent);
497
498 return 0;
499
500 err:
501 kfree_const(p->kobj.name);
502 p->kobj.name = NULL;
503 return error;
504 }
505
506 /**
507 * cdev_set_parent() - set the parent kobject for a char device
508 * @p: the cdev structure
509 * @kobj: the kobject to take a reference to
510 *
511 * cdev_set_parent() sets a parent kobject which will be referenced
512 * appropriately so the parent is not freed before the cdev. This
513 * should be called before cdev_add.
514 */
cdev_set_parent(struct cdev * p,struct kobject * kobj)515 void cdev_set_parent(struct cdev *p, struct kobject *kobj)
516 {
517 WARN_ON(!kobj->state_initialized);
518 p->kobj.parent = kobj;
519 }
520
521 /**
522 * cdev_device_add() - add a char device and it's corresponding
523 * struct device, linkink
524 * @dev: the device structure
525 * @cdev: the cdev structure
526 *
527 * cdev_device_add() adds the char device represented by @cdev to the system,
528 * just as cdev_add does. It then adds @dev to the system using device_add
529 * The dev_t for the char device will be taken from the struct device which
530 * needs to be initialized first. This helper function correctly takes a
531 * reference to the parent device so the parent will not get released until
532 * all references to the cdev are released.
533 *
534 * This helper uses dev->devt for the device number. If it is not set
535 * it will not add the cdev and it will be equivalent to device_add.
536 *
537 * This function should be used whenever the struct cdev and the
538 * struct device are members of the same structure whose lifetime is
539 * managed by the struct device.
540 *
541 * NOTE: Callers must assume that userspace was able to open the cdev and
542 * can call cdev fops callbacks at any time, even if this function fails.
543 */
cdev_device_add(struct cdev * cdev,struct device * dev)544 int cdev_device_add(struct cdev *cdev, struct device *dev)
545 {
546 int rc = 0;
547
548 if (dev->devt) {
549 cdev_set_parent(cdev, &dev->kobj);
550
551 rc = cdev_add(cdev, dev->devt, 1);
552 if (rc)
553 return rc;
554 }
555
556 rc = device_add(dev);
557 if (rc && dev->devt)
558 cdev_del(cdev);
559
560 return rc;
561 }
562
563 /**
564 * cdev_device_del() - inverse of cdev_device_add
565 * @dev: the device structure
566 * @cdev: the cdev structure
567 *
568 * cdev_device_del() is a helper function to call cdev_del and device_del.
569 * It should be used whenever cdev_device_add is used.
570 *
571 * If dev->devt is not set it will not remove the cdev and will be equivalent
572 * to device_del.
573 *
574 * NOTE: This guarantees that associated sysfs callbacks are not running
575 * or runnable, however any cdevs already open will remain and their fops
576 * will still be callable even after this function returns.
577 */
cdev_device_del(struct cdev * cdev,struct device * dev)578 void cdev_device_del(struct cdev *cdev, struct device *dev)
579 {
580 device_del(dev);
581 if (dev->devt)
582 cdev_del(cdev);
583 }
584
cdev_unmap(dev_t dev,unsigned count)585 static void cdev_unmap(dev_t dev, unsigned count)
586 {
587 kobj_unmap(cdev_map, dev, count);
588 }
589
590 /**
591 * cdev_del() - remove a cdev from the system
592 * @p: the cdev structure to be removed
593 *
594 * cdev_del() removes @p from the system, possibly freeing the structure
595 * itself.
596 *
597 * NOTE: This guarantees that cdev device will no longer be able to be
598 * opened, however any cdevs already open will remain and their fops will
599 * still be callable even after cdev_del returns.
600 */
cdev_del(struct cdev * p)601 void cdev_del(struct cdev *p)
602 {
603 cdev_unmap(p->dev, p->count);
604 kobject_put(&p->kobj);
605 }
606
607
cdev_default_release(struct kobject * kobj)608 static void cdev_default_release(struct kobject *kobj)
609 {
610 struct cdev *p = container_of(kobj, struct cdev, kobj);
611 struct kobject *parent = kobj->parent;
612
613 cdev_purge(p);
614 kobject_put(parent);
615 }
616
cdev_dynamic_release(struct kobject * kobj)617 static void cdev_dynamic_release(struct kobject *kobj)
618 {
619 struct cdev *p = container_of(kobj, struct cdev, kobj);
620 struct kobject *parent = kobj->parent;
621
622 cdev_purge(p);
623 kfree(p);
624 kobject_put(parent);
625 }
626
627 static struct kobj_type ktype_cdev_default = {
628 .release = cdev_default_release,
629 };
630
631 static struct kobj_type ktype_cdev_dynamic = {
632 .release = cdev_dynamic_release,
633 };
634
635 /**
636 * cdev_alloc() - allocate a cdev structure
637 *
638 * Allocates and returns a cdev structure, or NULL on failure.
639 */
cdev_alloc(void)640 struct cdev *cdev_alloc(void)
641 {
642 struct cdev *p = kzalloc(sizeof(struct cdev), GFP_KERNEL);
643 if (p) {
644 INIT_LIST_HEAD(&p->list);
645 kobject_init(&p->kobj, &ktype_cdev_dynamic);
646 }
647 return p;
648 }
649
650 /**
651 * cdev_init() - initialize a cdev structure
652 * @cdev: the structure to initialize
653 * @fops: the file_operations for this device
654 *
655 * Initializes @cdev, remembering @fops, making it ready to add to the
656 * system with cdev_add().
657 */
cdev_init(struct cdev * cdev,const struct file_operations * fops)658 void cdev_init(struct cdev *cdev, const struct file_operations *fops)
659 {
660 memset(cdev, 0, sizeof *cdev);
661 INIT_LIST_HEAD(&cdev->list);
662 kobject_init(&cdev->kobj, &ktype_cdev_default);
663 cdev->ops = fops;
664 }
665
base_probe(dev_t dev,int * part,void * data)666 static struct kobject *base_probe(dev_t dev, int *part, void *data)
667 {
668 if (request_module("char-major-%d-%d", MAJOR(dev), MINOR(dev)) > 0)
669 /* Make old-style 2.4 aliases work */
670 request_module("char-major-%d", MAJOR(dev));
671 return NULL;
672 }
673
chrdev_init(void)674 void __init chrdev_init(void)
675 {
676 cdev_map = kobj_map_init(base_probe, &chrdevs_lock);
677 }
678
679
680 /* Let modules do char dev stuff */
681 EXPORT_SYMBOL(register_chrdev_region);
682 EXPORT_SYMBOL(unregister_chrdev_region);
683 EXPORT_SYMBOL(alloc_chrdev_region);
684 EXPORT_SYMBOL(cdev_init);
685 EXPORT_SYMBOL(cdev_alloc);
686 EXPORT_SYMBOL(cdev_del);
687 EXPORT_SYMBOL(cdev_add);
688 EXPORT_SYMBOL(cdev_set_parent);
689 EXPORT_SYMBOL(cdev_device_add);
690 EXPORT_SYMBOL(cdev_device_del);
691 EXPORT_SYMBOL(__register_chrdev);
692 EXPORT_SYMBOL(__unregister_chrdev);
693