1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/module.h>
17#include <linux/moduleparam.h>
18#include <linux/kernel.h>
19#include <linux/device.h>
20#include <linux/slab.h>
21#include <linux/fs.h>
22#include <linux/errno.h>
23#include <linux/types.h>
24#include <linux/fcntl.h>
25#include <linux/poll.h>
26#include <linux/init.h>
27#include <linux/ioctl.h>
28#include <linux/cdev.h>
29#include <linux/sched/signal.h>
30#include <linux/uuid.h>
31#include <linux/compat.h>
32#include <linux/jiffies.h>
33#include <linux/interrupt.h>
34
35#include <linux/mei.h>
36
37#include "mei_dev.h"
38#include "client.h"
39
40
41
42
43
44
45
46
47
48static int mei_open(struct inode *inode, struct file *file)
49{
50 struct mei_device *dev;
51 struct mei_cl *cl;
52
53 int err;
54
55 dev = container_of(inode->i_cdev, struct mei_device, cdev);
56 if (!dev)
57 return -ENODEV;
58
59 mutex_lock(&dev->device_lock);
60
61 if (dev->dev_state != MEI_DEV_ENABLED) {
62 dev_dbg(dev->dev, "dev_state != MEI_ENABLED dev_state = %s\n",
63 mei_dev_state_str(dev->dev_state));
64 err = -ENODEV;
65 goto err_unlock;
66 }
67
68 cl = mei_cl_alloc_linked(dev);
69 if (IS_ERR(cl)) {
70 err = PTR_ERR(cl);
71 goto err_unlock;
72 }
73
74 cl->fp = file;
75 file->private_data = cl;
76
77 mutex_unlock(&dev->device_lock);
78
79 return nonseekable_open(inode, file);
80
81err_unlock:
82 mutex_unlock(&dev->device_lock);
83 return err;
84}
85
86
87
88
89
90
91
92
93
94static int mei_release(struct inode *inode, struct file *file)
95{
96 struct mei_cl *cl = file->private_data;
97 struct mei_device *dev;
98 int rets;
99
100 if (WARN_ON(!cl || !cl->dev))
101 return -ENODEV;
102
103 dev = cl->dev;
104
105 mutex_lock(&dev->device_lock);
106
107 rets = mei_cl_disconnect(cl);
108
109 mei_cl_flush_queues(cl, file);
110 cl_dbg(dev, cl, "removing\n");
111
112 mei_cl_unlink(cl);
113
114 file->private_data = NULL;
115
116 kfree(cl);
117
118 mutex_unlock(&dev->device_lock);
119 return rets;
120}
121
122
123
124
125
126
127
128
129
130
131
132
133static ssize_t mei_read(struct file *file, char __user *ubuf,
134 size_t length, loff_t *offset)
135{
136 struct mei_cl *cl = file->private_data;
137 struct mei_device *dev;
138 struct mei_cl_cb *cb = NULL;
139 bool nonblock = !!(file->f_flags & O_NONBLOCK);
140 ssize_t rets;
141
142 if (WARN_ON(!cl || !cl->dev))
143 return -ENODEV;
144
145 dev = cl->dev;
146
147
148 mutex_lock(&dev->device_lock);
149 if (dev->dev_state != MEI_DEV_ENABLED) {
150 rets = -ENODEV;
151 goto out;
152 }
153
154 if (length == 0) {
155 rets = 0;
156 goto out;
157 }
158
159 if (ubuf == NULL) {
160 rets = -EMSGSIZE;
161 goto out;
162 }
163
164 cb = mei_cl_read_cb(cl, file);
165 if (cb)
166 goto copy_buffer;
167
168 if (*offset > 0)
169 *offset = 0;
170
171 rets = mei_cl_read_start(cl, length, file);
172 if (rets && rets != -EBUSY) {
173 cl_dbg(dev, cl, "mei start read failure status = %zd\n", rets);
174 goto out;
175 }
176
177 if (nonblock) {
178 rets = -EAGAIN;
179 goto out;
180 }
181
182 mutex_unlock(&dev->device_lock);
183 if (wait_event_interruptible(cl->rx_wait,
184 !list_empty(&cl->rd_completed) ||
185 !mei_cl_is_connected(cl))) {
186 if (signal_pending(current))
187 return -EINTR;
188 return -ERESTARTSYS;
189 }
190 mutex_lock(&dev->device_lock);
191
192 if (!mei_cl_is_connected(cl)) {
193 rets = -ENODEV;
194 goto out;
195 }
196
197 cb = mei_cl_read_cb(cl, file);
198 if (!cb) {
199 rets = 0;
200 goto out;
201 }
202
203copy_buffer:
204
205 if (cb->status) {
206 rets = cb->status;
207 cl_dbg(dev, cl, "read operation failed %zd\n", rets);
208 goto free;
209 }
210
211 cl_dbg(dev, cl, "buf.size = %zu buf.idx = %zu offset = %lld\n",
212 cb->buf.size, cb->buf_idx, *offset);
213 if (*offset >= cb->buf_idx) {
214 rets = 0;
215 goto free;
216 }
217
218
219
220 length = min_t(size_t, length, cb->buf_idx - *offset);
221
222 if (copy_to_user(ubuf, cb->buf.data + *offset, length)) {
223 dev_dbg(dev->dev, "failed to copy data to userland\n");
224 rets = -EFAULT;
225 goto free;
226 }
227
228 rets = length;
229 *offset += length;
230
231 if (*offset < cb->buf_idx)
232 goto out;
233
234free:
235 mei_io_cb_free(cb);
236 *offset = 0;
237
238out:
239 cl_dbg(dev, cl, "end mei read rets = %zd\n", rets);
240 mutex_unlock(&dev->device_lock);
241 return rets;
242}
243
244
245
246
247
248
249
250
251
252
253static ssize_t mei_write(struct file *file, const char __user *ubuf,
254 size_t length, loff_t *offset)
255{
256 struct mei_cl *cl = file->private_data;
257 struct mei_cl_cb *cb;
258 struct mei_device *dev;
259 ssize_t rets;
260
261 if (WARN_ON(!cl || !cl->dev))
262 return -ENODEV;
263
264 dev = cl->dev;
265
266 mutex_lock(&dev->device_lock);
267
268 if (dev->dev_state != MEI_DEV_ENABLED) {
269 rets = -ENODEV;
270 goto out;
271 }
272
273 if (!mei_cl_is_connected(cl)) {
274 cl_err(dev, cl, "is not connected");
275 rets = -ENODEV;
276 goto out;
277 }
278
279 if (!mei_me_cl_is_active(cl->me_cl)) {
280 rets = -ENOTTY;
281 goto out;
282 }
283
284 if (length > mei_cl_mtu(cl)) {
285 rets = -EFBIG;
286 goto out;
287 }
288
289 if (length == 0) {
290 rets = 0;
291 goto out;
292 }
293
294 while (cl->tx_cb_queued >= dev->tx_queue_limit) {
295 if (file->f_flags & O_NONBLOCK) {
296 rets = -EAGAIN;
297 goto out;
298 }
299 mutex_unlock(&dev->device_lock);
300 rets = wait_event_interruptible(cl->tx_wait,
301 cl->writing_state == MEI_WRITE_COMPLETE ||
302 (!mei_cl_is_connected(cl)));
303 mutex_lock(&dev->device_lock);
304 if (rets) {
305 if (signal_pending(current))
306 rets = -EINTR;
307 goto out;
308 }
309 if (!mei_cl_is_connected(cl)) {
310 rets = -ENODEV;
311 goto out;
312 }
313 }
314
315 cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, file);
316 if (!cb) {
317 rets = -ENOMEM;
318 goto out;
319 }
320
321 rets = copy_from_user(cb->buf.data, ubuf, length);
322 if (rets) {
323 dev_dbg(dev->dev, "failed to copy data from userland\n");
324 rets = -EFAULT;
325 mei_io_cb_free(cb);
326 goto out;
327 }
328
329 rets = mei_cl_write(cl, cb);
330out:
331 mutex_unlock(&dev->device_lock);
332 return rets;
333}
334
335
336
337
338
339
340
341
342
343
344
345static int mei_ioctl_connect_client(struct file *file,
346 struct mei_connect_client_data *data)
347{
348 struct mei_device *dev;
349 struct mei_client *client;
350 struct mei_me_client *me_cl;
351 struct mei_cl *cl;
352 int rets;
353
354 cl = file->private_data;
355 dev = cl->dev;
356
357 if (dev->dev_state != MEI_DEV_ENABLED)
358 return -ENODEV;
359
360 if (cl->state != MEI_FILE_INITIALIZING &&
361 cl->state != MEI_FILE_DISCONNECTED)
362 return -EBUSY;
363
364
365 me_cl = mei_me_cl_by_uuid(dev, &data->in_client_uuid);
366 if (!me_cl) {
367 dev_dbg(dev->dev, "Cannot connect to FW Client UUID = %pUl\n",
368 &data->in_client_uuid);
369 rets = -ENOTTY;
370 goto end;
371 }
372
373 if (me_cl->props.fixed_address) {
374 bool forbidden = dev->override_fixed_address ?
375 !dev->allow_fixed_address : !dev->hbm_f_fa_supported;
376 if (forbidden) {
377 dev_dbg(dev->dev, "Connection forbidden to FW Client UUID = %pUl\n",
378 &data->in_client_uuid);
379 rets = -ENOTTY;
380 goto end;
381 }
382 }
383
384 dev_dbg(dev->dev, "Connect to FW Client ID = %d\n",
385 me_cl->client_id);
386 dev_dbg(dev->dev, "FW Client - Protocol Version = %d\n",
387 me_cl->props.protocol_version);
388 dev_dbg(dev->dev, "FW Client - Max Msg Len = %d\n",
389 me_cl->props.max_msg_length);
390
391
392 client = &data->out_client_properties;
393 client->max_msg_length = me_cl->props.max_msg_length;
394 client->protocol_version = me_cl->props.protocol_version;
395 dev_dbg(dev->dev, "Can connect?\n");
396
397 rets = mei_cl_connect(cl, me_cl, file);
398
399end:
400 mei_me_cl_put(me_cl);
401 return rets;
402}
403
404
405
406
407
408
409
410
411
412
413static int mei_ioctl_client_notify_request(const struct file *file, u32 request)
414{
415 struct mei_cl *cl = file->private_data;
416
417 if (request != MEI_HBM_NOTIFICATION_START &&
418 request != MEI_HBM_NOTIFICATION_STOP)
419 return -EINVAL;
420
421 return mei_cl_notify_request(cl, file, (u8)request);
422}
423
424
425
426
427
428
429
430
431
432static int mei_ioctl_client_notify_get(const struct file *file, u32 *notify_get)
433{
434 struct mei_cl *cl = file->private_data;
435 bool notify_ev;
436 bool block = (file->f_flags & O_NONBLOCK) == 0;
437 int rets;
438
439 rets = mei_cl_notify_get(cl, block, ¬ify_ev);
440 if (rets)
441 return rets;
442
443 *notify_get = notify_ev ? 1 : 0;
444 return 0;
445}
446
447
448
449
450
451
452
453
454
455
456static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
457{
458 struct mei_device *dev;
459 struct mei_cl *cl = file->private_data;
460 struct mei_connect_client_data connect_data;
461 u32 notify_get, notify_req;
462 int rets;
463
464
465 if (WARN_ON(!cl || !cl->dev))
466 return -ENODEV;
467
468 dev = cl->dev;
469
470 dev_dbg(dev->dev, "IOCTL cmd = 0x%x", cmd);
471
472 mutex_lock(&dev->device_lock);
473 if (dev->dev_state != MEI_DEV_ENABLED) {
474 rets = -ENODEV;
475 goto out;
476 }
477
478 switch (cmd) {
479 case IOCTL_MEI_CONNECT_CLIENT:
480 dev_dbg(dev->dev, ": IOCTL_MEI_CONNECT_CLIENT.\n");
481 if (copy_from_user(&connect_data, (char __user *)data,
482 sizeof(struct mei_connect_client_data))) {
483 dev_dbg(dev->dev, "failed to copy data from userland\n");
484 rets = -EFAULT;
485 goto out;
486 }
487
488 rets = mei_ioctl_connect_client(file, &connect_data);
489 if (rets)
490 goto out;
491
492
493 if (copy_to_user((char __user *)data, &connect_data,
494 sizeof(struct mei_connect_client_data))) {
495 dev_dbg(dev->dev, "failed to copy data to userland\n");
496 rets = -EFAULT;
497 goto out;
498 }
499
500 break;
501
502 case IOCTL_MEI_NOTIFY_SET:
503 dev_dbg(dev->dev, ": IOCTL_MEI_NOTIFY_SET.\n");
504 if (copy_from_user(¬ify_req,
505 (char __user *)data, sizeof(notify_req))) {
506 dev_dbg(dev->dev, "failed to copy data from userland\n");
507 rets = -EFAULT;
508 goto out;
509 }
510 rets = mei_ioctl_client_notify_request(file, notify_req);
511 break;
512
513 case IOCTL_MEI_NOTIFY_GET:
514 dev_dbg(dev->dev, ": IOCTL_MEI_NOTIFY_GET.\n");
515 rets = mei_ioctl_client_notify_get(file, ¬ify_get);
516 if (rets)
517 goto out;
518
519 dev_dbg(dev->dev, "copy connect data to user\n");
520 if (copy_to_user((char __user *)data,
521 ¬ify_get, sizeof(notify_get))) {
522 dev_dbg(dev->dev, "failed to copy data to userland\n");
523 rets = -EFAULT;
524 goto out;
525
526 }
527 break;
528
529 default:
530 rets = -ENOIOCTLCMD;
531 }
532
533out:
534 mutex_unlock(&dev->device_lock);
535 return rets;
536}
537
538
539
540
541
542
543
544
545
546
547#ifdef CONFIG_COMPAT
548static long mei_compat_ioctl(struct file *file,
549 unsigned int cmd, unsigned long data)
550{
551 return mei_ioctl(file, cmd, (unsigned long)compat_ptr(data));
552}
553#endif
554
555
556
557
558
559
560
561
562
563
564static __poll_t mei_poll(struct file *file, poll_table *wait)
565{
566 __poll_t req_events = poll_requested_events(wait);
567 struct mei_cl *cl = file->private_data;
568 struct mei_device *dev;
569 __poll_t mask = 0;
570 bool notify_en;
571
572 if (WARN_ON(!cl || !cl->dev))
573 return EPOLLERR;
574
575 dev = cl->dev;
576
577 mutex_lock(&dev->device_lock);
578
579 notify_en = cl->notify_en && (req_events & EPOLLPRI);
580
581 if (dev->dev_state != MEI_DEV_ENABLED ||
582 !mei_cl_is_connected(cl)) {
583 mask = EPOLLERR;
584 goto out;
585 }
586
587 if (notify_en) {
588 poll_wait(file, &cl->ev_wait, wait);
589 if (cl->notify_ev)
590 mask |= EPOLLPRI;
591 }
592
593 if (req_events & (EPOLLIN | EPOLLRDNORM)) {
594 poll_wait(file, &cl->rx_wait, wait);
595
596 if (!list_empty(&cl->rd_completed))
597 mask |= EPOLLIN | EPOLLRDNORM;
598 else
599 mei_cl_read_start(cl, mei_cl_mtu(cl), file);
600 }
601
602 if (req_events & (EPOLLOUT | EPOLLWRNORM)) {
603 poll_wait(file, &cl->tx_wait, wait);
604 if (cl->tx_cb_queued < dev->tx_queue_limit)
605 mask |= EPOLLOUT | EPOLLWRNORM;
606 }
607
608out:
609 mutex_unlock(&dev->device_lock);
610 return mask;
611}
612
613
614
615
616
617
618
619
620static bool mei_cl_is_write_queued(struct mei_cl *cl)
621{
622 struct mei_device *dev = cl->dev;
623 struct mei_cl_cb *cb;
624
625 list_for_each_entry(cb, &dev->write_list, list)
626 if (cb->cl == cl)
627 return true;
628 list_for_each_entry(cb, &dev->write_waiting_list, list)
629 if (cb->cl == cl)
630 return true;
631 return false;
632}
633
634
635
636
637
638
639
640
641
642
643
644static int mei_fsync(struct file *fp, loff_t start, loff_t end, int datasync)
645{
646 struct mei_cl *cl = fp->private_data;
647 struct mei_device *dev;
648 int rets;
649
650 if (WARN_ON(!cl || !cl->dev))
651 return -ENODEV;
652
653 dev = cl->dev;
654
655 mutex_lock(&dev->device_lock);
656
657 if (dev->dev_state != MEI_DEV_ENABLED || !mei_cl_is_connected(cl)) {
658 rets = -ENODEV;
659 goto out;
660 }
661
662 while (mei_cl_is_write_queued(cl)) {
663 mutex_unlock(&dev->device_lock);
664 rets = wait_event_interruptible(cl->tx_wait,
665 cl->writing_state == MEI_WRITE_COMPLETE ||
666 !mei_cl_is_connected(cl));
667 mutex_lock(&dev->device_lock);
668 if (rets) {
669 if (signal_pending(current))
670 rets = -EINTR;
671 goto out;
672 }
673 if (!mei_cl_is_connected(cl)) {
674 rets = -ENODEV;
675 goto out;
676 }
677 }
678 rets = 0;
679out:
680 mutex_unlock(&dev->device_lock);
681 return rets;
682}
683
684
685
686
687
688
689
690
691
692
693
694
695static int mei_fasync(int fd, struct file *file, int band)
696{
697
698 struct mei_cl *cl = file->private_data;
699
700 if (!mei_cl_is_connected(cl))
701 return -ENODEV;
702
703 return fasync_helper(fd, file, band, &cl->ev_async);
704}
705
706
707
708
709
710
711
712
713
714
715static ssize_t fw_status_show(struct device *device,
716 struct device_attribute *attr, char *buf)
717{
718 struct mei_device *dev = dev_get_drvdata(device);
719 struct mei_fw_status fw_status;
720 int err, i;
721 ssize_t cnt = 0;
722
723 mutex_lock(&dev->device_lock);
724 err = mei_fw_status(dev, &fw_status);
725 mutex_unlock(&dev->device_lock);
726 if (err) {
727 dev_err(device, "read fw_status error = %d\n", err);
728 return err;
729 }
730
731 for (i = 0; i < fw_status.count; i++)
732 cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "%08X\n",
733 fw_status.status[i]);
734 return cnt;
735}
736static DEVICE_ATTR_RO(fw_status);
737
738
739
740
741
742
743
744
745
746
747static ssize_t hbm_ver_show(struct device *device,
748 struct device_attribute *attr, char *buf)
749{
750 struct mei_device *dev = dev_get_drvdata(device);
751 struct hbm_version ver;
752
753 mutex_lock(&dev->device_lock);
754 ver = dev->version;
755 mutex_unlock(&dev->device_lock);
756
757 return sprintf(buf, "%u.%u\n", ver.major_version, ver.minor_version);
758}
759static DEVICE_ATTR_RO(hbm_ver);
760
761
762
763
764
765
766
767
768
769
770static ssize_t hbm_ver_drv_show(struct device *device,
771 struct device_attribute *attr, char *buf)
772{
773 return sprintf(buf, "%u.%u\n", HBM_MAJOR_VERSION, HBM_MINOR_VERSION);
774}
775static DEVICE_ATTR_RO(hbm_ver_drv);
776
777static ssize_t tx_queue_limit_show(struct device *device,
778 struct device_attribute *attr, char *buf)
779{
780 struct mei_device *dev = dev_get_drvdata(device);
781 u8 size = 0;
782
783 mutex_lock(&dev->device_lock);
784 size = dev->tx_queue_limit;
785 mutex_unlock(&dev->device_lock);
786
787 return snprintf(buf, PAGE_SIZE, "%u\n", size);
788}
789
790static ssize_t tx_queue_limit_store(struct device *device,
791 struct device_attribute *attr,
792 const char *buf, size_t count)
793{
794 struct mei_device *dev = dev_get_drvdata(device);
795 u8 limit;
796 unsigned int inp;
797 int err;
798
799 err = kstrtouint(buf, 10, &inp);
800 if (err)
801 return err;
802 if (inp > MEI_TX_QUEUE_LIMIT_MAX || inp < MEI_TX_QUEUE_LIMIT_MIN)
803 return -EINVAL;
804 limit = inp;
805
806 mutex_lock(&dev->device_lock);
807 dev->tx_queue_limit = limit;
808 mutex_unlock(&dev->device_lock);
809
810 return count;
811}
812static DEVICE_ATTR_RW(tx_queue_limit);
813
814
815
816
817
818
819
820
821
822
823static ssize_t fw_ver_show(struct device *device,
824 struct device_attribute *attr, char *buf)
825{
826 struct mei_device *dev = dev_get_drvdata(device);
827 struct mei_fw_version *ver;
828 ssize_t cnt = 0;
829 int i;
830
831 ver = dev->fw_ver;
832
833 for (i = 0; i < MEI_MAX_FW_VER_BLOCKS; i++)
834 cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "%u:%u.%u.%u.%u\n",
835 ver[i].platform, ver[i].major, ver[i].minor,
836 ver[i].hotfix, ver[i].buildno);
837 return cnt;
838}
839static DEVICE_ATTR_RO(fw_ver);
840
841static struct attribute *mei_attrs[] = {
842 &dev_attr_fw_status.attr,
843 &dev_attr_hbm_ver.attr,
844 &dev_attr_hbm_ver_drv.attr,
845 &dev_attr_tx_queue_limit.attr,
846 &dev_attr_fw_ver.attr,
847 NULL
848};
849ATTRIBUTE_GROUPS(mei);
850
851
852
853
854static const struct file_operations mei_fops = {
855 .owner = THIS_MODULE,
856 .read = mei_read,
857 .unlocked_ioctl = mei_ioctl,
858#ifdef CONFIG_COMPAT
859 .compat_ioctl = mei_compat_ioctl,
860#endif
861 .open = mei_open,
862 .release = mei_release,
863 .write = mei_write,
864 .poll = mei_poll,
865 .fsync = mei_fsync,
866 .fasync = mei_fasync,
867 .llseek = no_llseek
868};
869
870static struct class *mei_class;
871static dev_t mei_devt;
872#define MEI_MAX_DEVS MINORMASK
873static DEFINE_MUTEX(mei_minor_lock);
874static DEFINE_IDR(mei_idr);
875
876
877
878
879
880
881
882
883static int mei_minor_get(struct mei_device *dev)
884{
885 int ret;
886
887 mutex_lock(&mei_minor_lock);
888 ret = idr_alloc(&mei_idr, dev, 0, MEI_MAX_DEVS, GFP_KERNEL);
889 if (ret >= 0)
890 dev->minor = ret;
891 else if (ret == -ENOSPC)
892 dev_err(dev->dev, "too many mei devices\n");
893
894 mutex_unlock(&mei_minor_lock);
895 return ret;
896}
897
898
899
900
901
902
903static void mei_minor_free(struct mei_device *dev)
904{
905 mutex_lock(&mei_minor_lock);
906 idr_remove(&mei_idr, dev->minor);
907 mutex_unlock(&mei_minor_lock);
908}
909
910int mei_register(struct mei_device *dev, struct device *parent)
911{
912 struct device *clsdev;
913 int ret, devno;
914
915 ret = mei_minor_get(dev);
916 if (ret < 0)
917 return ret;
918
919
920 devno = MKDEV(MAJOR(mei_devt), dev->minor);
921 cdev_init(&dev->cdev, &mei_fops);
922 dev->cdev.owner = parent->driver->owner;
923
924
925 ret = cdev_add(&dev->cdev, devno, 1);
926 if (ret) {
927 dev_err(parent, "unable to add device %d:%d\n",
928 MAJOR(mei_devt), dev->minor);
929 goto err_dev_add;
930 }
931
932 clsdev = device_create_with_groups(mei_class, parent, devno,
933 dev, mei_groups,
934 "mei%d", dev->minor);
935
936 if (IS_ERR(clsdev)) {
937 dev_err(parent, "unable to create device %d:%d\n",
938 MAJOR(mei_devt), dev->minor);
939 ret = PTR_ERR(clsdev);
940 goto err_dev_create;
941 }
942
943 ret = mei_dbgfs_register(dev, dev_name(clsdev));
944 if (ret) {
945 dev_err(clsdev, "cannot register debugfs ret = %d\n", ret);
946 goto err_dev_dbgfs;
947 }
948
949 return 0;
950
951err_dev_dbgfs:
952 device_destroy(mei_class, devno);
953err_dev_create:
954 cdev_del(&dev->cdev);
955err_dev_add:
956 mei_minor_free(dev);
957 return ret;
958}
959EXPORT_SYMBOL_GPL(mei_register);
960
961void mei_deregister(struct mei_device *dev)
962{
963 int devno;
964
965 devno = dev->cdev.dev;
966 cdev_del(&dev->cdev);
967
968 mei_dbgfs_deregister(dev);
969
970 device_destroy(mei_class, devno);
971
972 mei_minor_free(dev);
973}
974EXPORT_SYMBOL_GPL(mei_deregister);
975
976static int __init mei_init(void)
977{
978 int ret;
979
980 mei_class = class_create(THIS_MODULE, "mei");
981 if (IS_ERR(mei_class)) {
982 pr_err("couldn't create class\n");
983 ret = PTR_ERR(mei_class);
984 goto err;
985 }
986
987 ret = alloc_chrdev_region(&mei_devt, 0, MEI_MAX_DEVS, "mei");
988 if (ret < 0) {
989 pr_err("unable to allocate char dev region\n");
990 goto err_class;
991 }
992
993 ret = mei_cl_bus_init();
994 if (ret < 0) {
995 pr_err("unable to initialize bus\n");
996 goto err_chrdev;
997 }
998
999 return 0;
1000
1001err_chrdev:
1002 unregister_chrdev_region(mei_devt, MEI_MAX_DEVS);
1003err_class:
1004 class_destroy(mei_class);
1005err:
1006 return ret;
1007}
1008
1009static void __exit mei_exit(void)
1010{
1011 unregister_chrdev_region(mei_devt, MEI_MAX_DEVS);
1012 class_destroy(mei_class);
1013 mei_cl_bus_exit();
1014}
1015
1016module_init(mei_init);
1017module_exit(mei_exit);
1018
1019MODULE_AUTHOR("Intel Corporation");
1020MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
1021MODULE_LICENSE("GPL v2");
1022
1023