1
2
3
4
5
6
7#include <linux/init.h>
8#include <linux/fs.h>
9#include <linux/kdev_t.h>
10#include <linux/slab.h>
11#include <linux/string.h>
12
13#include <linux/major.h>
14#include <linux/errno.h>
15#include <linux/module.h>
16#include <linux/seq_file.h>
17
18#include <linux/kobject.h>
19#include <linux/kobj_map.h>
20#include <linux/cdev.h>
21#include <linux/mutex.h>
22#include <linux/backing-dev.h>
23#include <linux/tty.h>
24
25#include "internal.h"
26
27static struct kobj_map *cdev_map;
28
29static DEFINE_MUTEX(chrdevs_lock);
30
31static struct char_device_struct {
32 struct char_device_struct *next;
33 unsigned int major;
34 unsigned int baseminor;
35 int minorct;
36 char name[64];
37 struct cdev *cdev;
38} *chrdevs[CHRDEV_MAJOR_HASH_SIZE];
39
40
41static inline int major_to_index(unsigned major)
42{
43 return major % CHRDEV_MAJOR_HASH_SIZE;
44}
45
46#ifdef CONFIG_PROC_FS
47
48void chrdev_show(struct seq_file *f, off_t offset)
49{
50 struct char_device_struct *cd;
51
52 if (offset < CHRDEV_MAJOR_HASH_SIZE) {
53 mutex_lock(&chrdevs_lock);
54 for (cd = chrdevs[offset]; cd; cd = cd->next)
55 seq_printf(f, "%3d %s\n", cd->major, cd->name);
56 mutex_unlock(&chrdevs_lock);
57 }
58}
59
60#endif
61
62
63
64
65
66
67
68
69
70
71
72
73static struct char_device_struct *
74__register_chrdev_region(unsigned int major, unsigned int baseminor,
75 int minorct, const char *name)
76{
77 struct char_device_struct *cd, **cp;
78 int ret = 0;
79 int i;
80
81 cd = kzalloc(sizeof(struct char_device_struct), GFP_KERNEL);
82 if (cd == NULL)
83 return ERR_PTR(-ENOMEM);
84
85 mutex_lock(&chrdevs_lock);
86
87
88 if (major == 0) {
89 for (i = ARRAY_SIZE(chrdevs)-1; i > 0; i--) {
90 if (chrdevs[i] == NULL)
91 break;
92 }
93
94 if (i < CHRDEV_MAJOR_DYN_END)
95 pr_warn("CHRDEV \"%s\" major number %d goes below the dynamic allocation range\n",
96 name, i);
97
98 if (i == 0) {
99 ret = -EBUSY;
100 goto out;
101 }
102 major = i;
103 }
104
105 cd->major = major;
106 cd->baseminor = baseminor;
107 cd->minorct = minorct;
108 strlcpy(cd->name, name, sizeof(cd->name));
109
110 i = major_to_index(major);
111
112 for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
113 if ((*cp)->major > major ||
114 ((*cp)->major == major &&
115 (((*cp)->baseminor >= baseminor) ||
116 ((*cp)->baseminor + (*cp)->minorct > baseminor))))
117 break;
118
119
120 if (*cp && (*cp)->major == major) {
121 int old_min = (*cp)->baseminor;
122 int old_max = (*cp)->baseminor + (*cp)->minorct - 1;
123 int new_min = baseminor;
124 int new_max = baseminor + minorct - 1;
125
126
127 if (new_max >= old_min && new_max <= old_max) {
128 ret = -EBUSY;
129 goto out;
130 }
131
132
133 if (new_min <= old_max && new_min >= old_min) {
134 ret = -EBUSY;
135 goto out;
136 }
137 }
138
139 cd->next = *cp;
140 *cp = cd;
141 mutex_unlock(&chrdevs_lock);
142 return cd;
143out:
144 mutex_unlock(&chrdevs_lock);
145 kfree(cd);
146 return ERR_PTR(ret);
147}
148
149static struct char_device_struct *
150__unregister_chrdev_region(unsigned major, unsigned baseminor, int minorct)
151{
152 struct char_device_struct *cd = NULL, **cp;
153 int i = major_to_index(major);
154
155 mutex_lock(&chrdevs_lock);
156 for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
157 if ((*cp)->major == major &&
158 (*cp)->baseminor == baseminor &&
159 (*cp)->minorct == minorct)
160 break;
161 if (*cp) {
162 cd = *cp;
163 *cp = cd->next;
164 }
165 mutex_unlock(&chrdevs_lock);
166 return cd;
167}
168
169
170
171
172
173
174
175
176
177
178int register_chrdev_region(dev_t from, unsigned count, const char *name)
179{
180 struct char_device_struct *cd;
181 dev_t to = from + count;
182 dev_t n, next;
183
184 for (n = from; n < to; n = next) {
185 next = MKDEV(MAJOR(n)+1, 0);
186 if (next > to)
187 next = to;
188 cd = __register_chrdev_region(MAJOR(n), MINOR(n),
189 next - n, name);
190 if (IS_ERR(cd))
191 goto fail;
192 }
193 return 0;
194fail:
195 to = n;
196 for (n = from; n < to; n = next) {
197 next = MKDEV(MAJOR(n)+1, 0);
198 kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
199 }
200 return PTR_ERR(cd);
201}
202
203
204
205
206
207
208
209
210
211
212
213
214int alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count,
215 const char *name)
216{
217 struct char_device_struct *cd;
218 cd = __register_chrdev_region(0, baseminor, count, name);
219 if (IS_ERR(cd))
220 return PTR_ERR(cd);
221 *dev = MKDEV(cd->major, cd->baseminor);
222 return 0;
223}
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246int __register_chrdev(unsigned int major, unsigned int baseminor,
247 unsigned int count, const char *name,
248 const struct file_operations *fops)
249{
250 struct char_device_struct *cd;
251 struct cdev *cdev;
252 int err = -ENOMEM;
253
254 cd = __register_chrdev_region(major, baseminor, count, name);
255 if (IS_ERR(cd))
256 return PTR_ERR(cd);
257
258 cdev = cdev_alloc();
259 if (!cdev)
260 goto out2;
261
262 cdev->owner = fops->owner;
263 cdev->ops = fops;
264 kobject_set_name(&cdev->kobj, "%s", name);
265
266 err = cdev_add(cdev, MKDEV(cd->major, baseminor), count);
267 if (err)
268 goto out;
269
270 cd->cdev = cdev;
271
272 return major ? 0 : cd->major;
273out:
274 kobject_put(&cdev->kobj);
275out2:
276 kfree(__unregister_chrdev_region(cd->major, baseminor, count));
277 return err;
278}
279
280
281
282
283
284
285
286
287
288
289void unregister_chrdev_region(dev_t from, unsigned count)
290{
291 dev_t to = from + count;
292 dev_t n, next;
293
294 for (n = from; n < to; n = next) {
295 next = MKDEV(MAJOR(n)+1, 0);
296 if (next > to)
297 next = to;
298 kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
299 }
300}
301
302
303
304
305
306
307
308
309
310
311
312
313void __unregister_chrdev(unsigned int major, unsigned int baseminor,
314 unsigned int count, const char *name)
315{
316 struct char_device_struct *cd;
317
318 cd = __unregister_chrdev_region(major, baseminor, count);
319 if (cd && cd->cdev)
320 cdev_del(cd->cdev);
321 kfree(cd);
322}
323
324static DEFINE_SPINLOCK(cdev_lock);
325
326static struct kobject *cdev_get(struct cdev *p)
327{
328 struct module *owner = p->owner;
329 struct kobject *kobj;
330
331 if (owner && !try_module_get(owner))
332 return NULL;
333 kobj = kobject_get(&p->kobj);
334 if (!kobj)
335 module_put(owner);
336 return kobj;
337}
338
339void cdev_put(struct cdev *p)
340{
341 if (p) {
342 struct module *owner = p->owner;
343 kobject_put(&p->kobj);
344 module_put(owner);
345 }
346}
347
348
349
350
351static int chrdev_open(struct inode *inode, struct file *filp)
352{
353 const struct file_operations *fops;
354 struct cdev *p;
355 struct cdev *new = NULL;
356 int ret = 0;
357
358 spin_lock(&cdev_lock);
359 p = inode->i_cdev;
360 if (!p) {
361 struct kobject *kobj;
362 int idx;
363 spin_unlock(&cdev_lock);
364 kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx);
365 if (!kobj)
366 return -ENXIO;
367 new = container_of(kobj, struct cdev, kobj);
368 spin_lock(&cdev_lock);
369
370
371 p = inode->i_cdev;
372 if (!p) {
373 inode->i_cdev = p = new;
374 list_add(&inode->i_devices, &p->list);
375 new = NULL;
376 } else if (!cdev_get(p))
377 ret = -ENXIO;
378 } else if (!cdev_get(p))
379 ret = -ENXIO;
380 spin_unlock(&cdev_lock);
381 cdev_put(new);
382 if (ret)
383 return ret;
384
385 ret = -ENXIO;
386 fops = fops_get(p->ops);
387 if (!fops)
388 goto out_cdev_put;
389
390 replace_fops(filp, fops);
391 if (filp->f_op->open) {
392 ret = filp->f_op->open(inode, filp);
393 if (ret)
394 goto out_cdev_put;
395 }
396
397 return 0;
398
399 out_cdev_put:
400 cdev_put(p);
401 return ret;
402}
403
404void cd_forget(struct inode *inode)
405{
406 spin_lock(&cdev_lock);
407 list_del_init(&inode->i_devices);
408 inode->i_cdev = NULL;
409 inode->i_mapping = &inode->i_data;
410 spin_unlock(&cdev_lock);
411}
412
413static void cdev_purge(struct cdev *cdev)
414{
415 spin_lock(&cdev_lock);
416 while (!list_empty(&cdev->list)) {
417 struct inode *inode;
418 inode = container_of(cdev->list.next, struct inode, i_devices);
419 list_del_init(&inode->i_devices);
420 inode->i_cdev = NULL;
421 }
422 spin_unlock(&cdev_lock);
423}
424
425
426
427
428
429
430const struct file_operations def_chr_fops = {
431 .open = chrdev_open,
432 .llseek = noop_llseek,
433};
434
435static struct kobject *exact_match(dev_t dev, int *part, void *data)
436{
437 struct cdev *p = data;
438 return &p->kobj;
439}
440
441static int exact_lock(dev_t dev, void *data)
442{
443 struct cdev *p = data;
444 return cdev_get(p) ? 0 : -1;
445}
446
447
448
449
450
451
452
453
454
455
456
457int cdev_add(struct cdev *p, dev_t dev, unsigned count)
458{
459 int error;
460
461 p->dev = dev;
462 p->count = count;
463
464 error = kobj_map(cdev_map, dev, count, NULL,
465 exact_match, exact_lock, p);
466 if (error)
467 return error;
468
469 kobject_get(p->kobj.parent);
470
471 return 0;
472}
473
474
475
476
477
478
479
480
481
482
483void cdev_set_parent(struct cdev *p, struct kobject *kobj)
484{
485 WARN_ON(!kobj->state_initialized);
486 p->kobj.parent = kobj;
487}
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512int cdev_device_add(struct cdev *cdev, struct device *dev)
513{
514 int rc = 0;
515
516 if (dev->devt) {
517 cdev_set_parent(cdev, &dev->kobj);
518
519 rc = cdev_add(cdev, dev->devt, 1);
520 if (rc)
521 return rc;
522 }
523
524 rc = device_add(dev);
525 if (rc)
526 cdev_del(cdev);
527
528 return rc;
529}
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546void cdev_device_del(struct cdev *cdev, struct device *dev)
547{
548 device_del(dev);
549 if (dev->devt)
550 cdev_del(cdev);
551}
552
553static void cdev_unmap(dev_t dev, unsigned count)
554{
555 kobj_unmap(cdev_map, dev, count);
556}
557
558
559
560
561
562
563
564
565
566
567
568
569void cdev_del(struct cdev *p)
570{
571 cdev_unmap(p->dev, p->count);
572 kobject_put(&p->kobj);
573}
574
575
576static void cdev_default_release(struct kobject *kobj)
577{
578 struct cdev *p = container_of(kobj, struct cdev, kobj);
579 struct kobject *parent = kobj->parent;
580
581 cdev_purge(p);
582 kobject_put(parent);
583}
584
585static void cdev_dynamic_release(struct kobject *kobj)
586{
587 struct cdev *p = container_of(kobj, struct cdev, kobj);
588 struct kobject *parent = kobj->parent;
589
590 cdev_purge(p);
591 kfree(p);
592 kobject_put(parent);
593}
594
595static struct kobj_type ktype_cdev_default = {
596 .release = cdev_default_release,
597};
598
599static struct kobj_type ktype_cdev_dynamic = {
600 .release = cdev_dynamic_release,
601};
602
603
604
605
606
607
608struct cdev *cdev_alloc(void)
609{
610 struct cdev *p = kzalloc(sizeof(struct cdev), GFP_KERNEL);
611 if (p) {
612 INIT_LIST_HEAD(&p->list);
613 kobject_init(&p->kobj, &ktype_cdev_dynamic);
614 }
615 return p;
616}
617
618
619
620
621
622
623
624
625
626void cdev_init(struct cdev *cdev, const struct file_operations *fops)
627{
628 memset(cdev, 0, sizeof *cdev);
629 INIT_LIST_HEAD(&cdev->list);
630 kobject_init(&cdev->kobj, &ktype_cdev_default);
631 cdev->ops = fops;
632}
633
634static struct kobject *base_probe(dev_t dev, int *part, void *data)
635{
636 if (request_module("char-major-%d-%d", MAJOR(dev), MINOR(dev)) > 0)
637
638 request_module("char-major-%d", MAJOR(dev));
639 return NULL;
640}
641
642void __init chrdev_init(void)
643{
644 cdev_map = kobj_map_init(base_probe, &chrdevs_lock);
645}
646
647
648
649EXPORT_SYMBOL(register_chrdev_region);
650EXPORT_SYMBOL(unregister_chrdev_region);
651EXPORT_SYMBOL(alloc_chrdev_region);
652EXPORT_SYMBOL(cdev_init);
653EXPORT_SYMBOL(cdev_alloc);
654EXPORT_SYMBOL(cdev_del);
655EXPORT_SYMBOL(cdev_add);
656EXPORT_SYMBOL(cdev_set_parent);
657EXPORT_SYMBOL(cdev_device_add);
658EXPORT_SYMBOL(cdev_device_del);
659EXPORT_SYMBOL(__register_chrdev);
660EXPORT_SYMBOL(__unregister_chrdev);
661