1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/module.h>
17#include <linux/moduleparam.h>
18#include <linux/kernel.h>
19#include <linux/device.h>
20#include <linux/slab.h>
21#include <linux/fs.h>
22#include <linux/errno.h>
23#include <linux/types.h>
24#include <linux/fcntl.h>
25#include <linux/poll.h>
26#include <linux/init.h>
27#include <linux/ioctl.h>
28#include <linux/cdev.h>
29#include <linux/sched.h>
30#include <linux/uuid.h>
31#include <linux/compat.h>
32#include <linux/jiffies.h>
33#include <linux/interrupt.h>
34
35#include <linux/mei.h>
36
37#include "mei_dev.h"
38#include "client.h"
39
40
41
42
43
44
45
46
47
48static int mei_open(struct inode *inode, struct file *file)
49{
50 struct mei_device *dev;
51 struct mei_cl *cl;
52
53 int err;
54
55 dev = container_of(inode->i_cdev, struct mei_device, cdev);
56 if (!dev)
57 return -ENODEV;
58
59 mutex_lock(&dev->device_lock);
60
61 if (dev->dev_state != MEI_DEV_ENABLED) {
62 dev_dbg(dev->dev, "dev_state != MEI_ENABLED dev_state = %s\n",
63 mei_dev_state_str(dev->dev_state));
64 err = -ENODEV;
65 goto err_unlock;
66 }
67
68 cl = mei_cl_alloc_linked(dev);
69 if (IS_ERR(cl)) {
70 err = PTR_ERR(cl);
71 goto err_unlock;
72 }
73
74 cl->fp = file;
75 file->private_data = cl;
76
77 mutex_unlock(&dev->device_lock);
78
79 return nonseekable_open(inode, file);
80
81err_unlock:
82 mutex_unlock(&dev->device_lock);
83 return err;
84}
85
86
87
88
89
90
91
92
93
94static int mei_release(struct inode *inode, struct file *file)
95{
96 struct mei_cl *cl = file->private_data;
97 struct mei_device *dev;
98 int rets;
99
100 if (WARN_ON(!cl || !cl->dev))
101 return -ENODEV;
102
103 dev = cl->dev;
104
105 mutex_lock(&dev->device_lock);
106 if (cl == &dev->iamthif_cl) {
107 rets = mei_amthif_release(dev, file);
108 goto out;
109 }
110 rets = mei_cl_disconnect(cl);
111
112 mei_cl_flush_queues(cl, file);
113 cl_dbg(dev, cl, "removing\n");
114
115 mei_cl_unlink(cl);
116
117 file->private_data = NULL;
118
119 kfree(cl);
120out:
121 mutex_unlock(&dev->device_lock);
122 return rets;
123}
124
125
126
127
128
129
130
131
132
133
134
135
136static ssize_t mei_read(struct file *file, char __user *ubuf,
137 size_t length, loff_t *offset)
138{
139 struct mei_cl *cl = file->private_data;
140 struct mei_device *dev;
141 struct mei_cl_cb *cb = NULL;
142 bool nonblock = !!(file->f_flags & O_NONBLOCK);
143 int rets;
144
145 if (WARN_ON(!cl || !cl->dev))
146 return -ENODEV;
147
148 dev = cl->dev;
149
150
151 mutex_lock(&dev->device_lock);
152 if (dev->dev_state != MEI_DEV_ENABLED) {
153 rets = -ENODEV;
154 goto out;
155 }
156
157 if (length == 0) {
158 rets = 0;
159 goto out;
160 }
161
162 if (ubuf == NULL) {
163 rets = -EMSGSIZE;
164 goto out;
165 }
166
167 cb = mei_cl_read_cb(cl, file);
168 if (cb)
169 goto copy_buffer;
170
171 if (*offset > 0)
172 *offset = 0;
173
174 rets = mei_cl_read_start(cl, length, file);
175 if (rets && rets != -EBUSY) {
176 cl_dbg(dev, cl, "mei start read failure status = %d\n", rets);
177 goto out;
178 }
179
180 if (nonblock) {
181 rets = -EAGAIN;
182 goto out;
183 }
184
185 if (rets == -EBUSY &&
186 !mei_cl_enqueue_ctrl_wr_cb(cl, length, MEI_FOP_READ, file)) {
187 rets = -ENOMEM;
188 goto out;
189 }
190
191 do {
192 mutex_unlock(&dev->device_lock);
193
194 if (wait_event_interruptible(cl->rx_wait,
195 (!list_empty(&cl->rd_completed)) ||
196 (!mei_cl_is_connected(cl)))) {
197
198 if (signal_pending(current))
199 return -EINTR;
200 return -ERESTARTSYS;
201 }
202
203 mutex_lock(&dev->device_lock);
204 if (!mei_cl_is_connected(cl)) {
205 rets = -ENODEV;
206 goto out;
207 }
208
209 cb = mei_cl_read_cb(cl, file);
210 } while (!cb);
211
212copy_buffer:
213
214 if (cb->status) {
215 rets = cb->status;
216 cl_dbg(dev, cl, "read operation failed %d\n", rets);
217 goto free;
218 }
219
220 cl_dbg(dev, cl, "buf.size = %zu buf.idx = %zu offset = %lld\n",
221 cb->buf.size, cb->buf_idx, *offset);
222 if (*offset >= cb->buf_idx) {
223 rets = 0;
224 goto free;
225 }
226
227
228
229 length = min_t(size_t, length, cb->buf_idx - *offset);
230
231 if (copy_to_user(ubuf, cb->buf.data + *offset, length)) {
232 dev_dbg(dev->dev, "failed to copy data to userland\n");
233 rets = -EFAULT;
234 goto free;
235 }
236
237 rets = length;
238 *offset += length;
239
240 if (*offset < cb->buf_idx)
241 goto out;
242
243free:
244 mei_io_cb_free(cb);
245 *offset = 0;
246
247out:
248 cl_dbg(dev, cl, "end mei read rets = %d\n", rets);
249 mutex_unlock(&dev->device_lock);
250 return rets;
251}
252
253
254
255
256
257
258
259
260
261
262static ssize_t mei_write(struct file *file, const char __user *ubuf,
263 size_t length, loff_t *offset)
264{
265 struct mei_cl *cl = file->private_data;
266 struct mei_cl_cb *cb;
267 struct mei_device *dev;
268 int rets;
269
270 if (WARN_ON(!cl || !cl->dev))
271 return -ENODEV;
272
273 dev = cl->dev;
274
275 mutex_lock(&dev->device_lock);
276
277 if (dev->dev_state != MEI_DEV_ENABLED) {
278 rets = -ENODEV;
279 goto out;
280 }
281
282 if (!mei_cl_is_connected(cl)) {
283 cl_err(dev, cl, "is not connected");
284 rets = -ENODEV;
285 goto out;
286 }
287
288 if (!mei_me_cl_is_active(cl->me_cl)) {
289 rets = -ENOTTY;
290 goto out;
291 }
292
293 if (length > mei_cl_mtu(cl)) {
294 rets = -EFBIG;
295 goto out;
296 }
297
298 if (length == 0) {
299 rets = 0;
300 goto out;
301 }
302
303 *offset = 0;
304 cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, file);
305 if (!cb) {
306 rets = -ENOMEM;
307 goto out;
308 }
309
310 rets = copy_from_user(cb->buf.data, ubuf, length);
311 if (rets) {
312 dev_dbg(dev->dev, "failed to copy data from userland\n");
313 rets = -EFAULT;
314 mei_io_cb_free(cb);
315 goto out;
316 }
317
318 if (cl == &dev->iamthif_cl) {
319 rets = mei_amthif_write(cl, cb);
320 if (!rets)
321 rets = length;
322 goto out;
323 }
324
325 rets = mei_cl_write(cl, cb, false);
326out:
327 mutex_unlock(&dev->device_lock);
328 return rets;
329}
330
331
332
333
334
335
336
337
338
339
340
341static int mei_ioctl_connect_client(struct file *file,
342 struct mei_connect_client_data *data)
343{
344 struct mei_device *dev;
345 struct mei_client *client;
346 struct mei_me_client *me_cl;
347 struct mei_cl *cl;
348 int rets;
349
350 cl = file->private_data;
351 dev = cl->dev;
352
353 if (dev->dev_state != MEI_DEV_ENABLED)
354 return -ENODEV;
355
356 if (cl->state != MEI_FILE_INITIALIZING &&
357 cl->state != MEI_FILE_DISCONNECTED)
358 return -EBUSY;
359
360
361 me_cl = mei_me_cl_by_uuid(dev, &data->in_client_uuid);
362 if (!me_cl) {
363 dev_dbg(dev->dev, "Cannot connect to FW Client UUID = %pUl\n",
364 &data->in_client_uuid);
365 rets = -ENOTTY;
366 goto end;
367 }
368
369 if (me_cl->props.fixed_address) {
370 bool forbidden = dev->override_fixed_address ?
371 !dev->allow_fixed_address : !dev->hbm_f_fa_supported;
372 if (forbidden) {
373 dev_dbg(dev->dev, "Connection forbidden to FW Client UUID = %pUl\n",
374 &data->in_client_uuid);
375 rets = -ENOTTY;
376 goto end;
377 }
378 }
379
380 dev_dbg(dev->dev, "Connect to FW Client ID = %d\n",
381 me_cl->client_id);
382 dev_dbg(dev->dev, "FW Client - Protocol Version = %d\n",
383 me_cl->props.protocol_version);
384 dev_dbg(dev->dev, "FW Client - Max Msg Len = %d\n",
385 me_cl->props.max_msg_length);
386
387
388
389
390 if (uuid_le_cmp(data->in_client_uuid, mei_amthif_guid) == 0) {
391 dev_dbg(dev->dev, "FW Client is amthi\n");
392 if (!mei_cl_is_connected(&dev->iamthif_cl)) {
393 rets = -ENODEV;
394 goto end;
395 }
396 mei_cl_unlink(cl);
397
398 kfree(cl);
399 cl = NULL;
400 dev->iamthif_open_count++;
401 file->private_data = &dev->iamthif_cl;
402
403 client = &data->out_client_properties;
404 client->max_msg_length = me_cl->props.max_msg_length;
405 client->protocol_version = me_cl->props.protocol_version;
406 rets = dev->iamthif_cl.status;
407
408 goto end;
409 }
410
411
412 client = &data->out_client_properties;
413 client->max_msg_length = me_cl->props.max_msg_length;
414 client->protocol_version = me_cl->props.protocol_version;
415 dev_dbg(dev->dev, "Can connect?\n");
416
417 rets = mei_cl_connect(cl, me_cl, file);
418
419end:
420 mei_me_cl_put(me_cl);
421 return rets;
422}
423
424
425
426
427
428
429
430
431
432
433static int mei_ioctl_client_notify_request(const struct file *file, u32 request)
434{
435 struct mei_cl *cl = file->private_data;
436
437 if (request != MEI_HBM_NOTIFICATION_START &&
438 request != MEI_HBM_NOTIFICATION_STOP)
439 return -EINVAL;
440
441 return mei_cl_notify_request(cl, file, (u8)request);
442}
443
444
445
446
447
448
449
450
451
452static int mei_ioctl_client_notify_get(const struct file *file, u32 *notify_get)
453{
454 struct mei_cl *cl = file->private_data;
455 bool notify_ev;
456 bool block = (file->f_flags & O_NONBLOCK) == 0;
457 int rets;
458
459 rets = mei_cl_notify_get(cl, block, ¬ify_ev);
460 if (rets)
461 return rets;
462
463 *notify_get = notify_ev ? 1 : 0;
464 return 0;
465}
466
467
468
469
470
471
472
473
474
475
476static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
477{
478 struct mei_device *dev;
479 struct mei_cl *cl = file->private_data;
480 struct mei_connect_client_data connect_data;
481 u32 notify_get, notify_req;
482 int rets;
483
484
485 if (WARN_ON(!cl || !cl->dev))
486 return -ENODEV;
487
488 dev = cl->dev;
489
490 dev_dbg(dev->dev, "IOCTL cmd = 0x%x", cmd);
491
492 mutex_lock(&dev->device_lock);
493 if (dev->dev_state != MEI_DEV_ENABLED) {
494 rets = -ENODEV;
495 goto out;
496 }
497
498 switch (cmd) {
499 case IOCTL_MEI_CONNECT_CLIENT:
500 dev_dbg(dev->dev, ": IOCTL_MEI_CONNECT_CLIENT.\n");
501 if (copy_from_user(&connect_data, (char __user *)data,
502 sizeof(struct mei_connect_client_data))) {
503 dev_dbg(dev->dev, "failed to copy data from userland\n");
504 rets = -EFAULT;
505 goto out;
506 }
507
508 rets = mei_ioctl_connect_client(file, &connect_data);
509 if (rets)
510 goto out;
511
512
513 if (copy_to_user((char __user *)data, &connect_data,
514 sizeof(struct mei_connect_client_data))) {
515 dev_dbg(dev->dev, "failed to copy data to userland\n");
516 rets = -EFAULT;
517 goto out;
518 }
519
520 break;
521
522 case IOCTL_MEI_NOTIFY_SET:
523 dev_dbg(dev->dev, ": IOCTL_MEI_NOTIFY_SET.\n");
524 if (copy_from_user(¬ify_req,
525 (char __user *)data, sizeof(notify_req))) {
526 dev_dbg(dev->dev, "failed to copy data from userland\n");
527 rets = -EFAULT;
528 goto out;
529 }
530 rets = mei_ioctl_client_notify_request(file, notify_req);
531 break;
532
533 case IOCTL_MEI_NOTIFY_GET:
534 dev_dbg(dev->dev, ": IOCTL_MEI_NOTIFY_GET.\n");
535 rets = mei_ioctl_client_notify_get(file, ¬ify_get);
536 if (rets)
537 goto out;
538
539 dev_dbg(dev->dev, "copy connect data to user\n");
540 if (copy_to_user((char __user *)data,
541 ¬ify_get, sizeof(notify_get))) {
542 dev_dbg(dev->dev, "failed to copy data to userland\n");
543 rets = -EFAULT;
544 goto out;
545
546 }
547 break;
548
549 default:
550 dev_err(dev->dev, ": unsupported ioctl %d.\n", cmd);
551 rets = -ENOIOCTLCMD;
552 }
553
554out:
555 mutex_unlock(&dev->device_lock);
556 return rets;
557}
558
559
560
561
562
563
564
565
566
567
568#ifdef CONFIG_COMPAT
569static long mei_compat_ioctl(struct file *file,
570 unsigned int cmd, unsigned long data)
571{
572 return mei_ioctl(file, cmd, (unsigned long)compat_ptr(data));
573}
574#endif
575
576
577
578
579
580
581
582
583
584
585static unsigned int mei_poll(struct file *file, poll_table *wait)
586{
587 unsigned long req_events = poll_requested_events(wait);
588 struct mei_cl *cl = file->private_data;
589 struct mei_device *dev;
590 unsigned int mask = 0;
591 bool notify_en;
592
593 if (WARN_ON(!cl || !cl->dev))
594 return POLLERR;
595
596 dev = cl->dev;
597
598 mutex_lock(&dev->device_lock);
599
600 notify_en = cl->notify_en && (req_events & POLLPRI);
601
602 if (dev->dev_state != MEI_DEV_ENABLED ||
603 !mei_cl_is_connected(cl)) {
604 mask = POLLERR;
605 goto out;
606 }
607
608 if (notify_en) {
609 poll_wait(file, &cl->ev_wait, wait);
610 if (cl->notify_ev)
611 mask |= POLLPRI;
612 }
613
614 if (cl == &dev->iamthif_cl) {
615 mask |= mei_amthif_poll(file, wait);
616 goto out;
617 }
618
619 if (req_events & (POLLIN | POLLRDNORM)) {
620 poll_wait(file, &cl->rx_wait, wait);
621
622 if (!list_empty(&cl->rd_completed))
623 mask |= POLLIN | POLLRDNORM;
624 else
625 mei_cl_read_start(cl, mei_cl_mtu(cl), file);
626 }
627
628out:
629 mutex_unlock(&dev->device_lock);
630 return mask;
631}
632
633
634
635
636
637
638
639
640
641
642
643
644static int mei_fasync(int fd, struct file *file, int band)
645{
646
647 struct mei_cl *cl = file->private_data;
648
649 if (!mei_cl_is_connected(cl))
650 return -ENODEV;
651
652 return fasync_helper(fd, file, band, &cl->ev_async);
653}
654
655
656
657
658
659
660
661
662
663
664static ssize_t fw_status_show(struct device *device,
665 struct device_attribute *attr, char *buf)
666{
667 struct mei_device *dev = dev_get_drvdata(device);
668 struct mei_fw_status fw_status;
669 int err, i;
670 ssize_t cnt = 0;
671
672 mutex_lock(&dev->device_lock);
673 err = mei_fw_status(dev, &fw_status);
674 mutex_unlock(&dev->device_lock);
675 if (err) {
676 dev_err(device, "read fw_status error = %d\n", err);
677 return err;
678 }
679
680 for (i = 0; i < fw_status.count; i++)
681 cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "%08X\n",
682 fw_status.status[i]);
683 return cnt;
684}
685static DEVICE_ATTR_RO(fw_status);
686
687static struct attribute *mei_attrs[] = {
688 &dev_attr_fw_status.attr,
689 NULL
690};
691ATTRIBUTE_GROUPS(mei);
692
693
694
695
696static const struct file_operations mei_fops = {
697 .owner = THIS_MODULE,
698 .read = mei_read,
699 .unlocked_ioctl = mei_ioctl,
700#ifdef CONFIG_COMPAT
701 .compat_ioctl = mei_compat_ioctl,
702#endif
703 .open = mei_open,
704 .release = mei_release,
705 .write = mei_write,
706 .poll = mei_poll,
707 .fasync = mei_fasync,
708 .llseek = no_llseek
709};
710
711static struct class *mei_class;
712static dev_t mei_devt;
713#define MEI_MAX_DEVS MINORMASK
714static DEFINE_MUTEX(mei_minor_lock);
715static DEFINE_IDR(mei_idr);
716
717
718
719
720
721
722
723
724static int mei_minor_get(struct mei_device *dev)
725{
726 int ret;
727
728 mutex_lock(&mei_minor_lock);
729 ret = idr_alloc(&mei_idr, dev, 0, MEI_MAX_DEVS, GFP_KERNEL);
730 if (ret >= 0)
731 dev->minor = ret;
732 else if (ret == -ENOSPC)
733 dev_err(dev->dev, "too many mei devices\n");
734
735 mutex_unlock(&mei_minor_lock);
736 return ret;
737}
738
739
740
741
742
743
744static void mei_minor_free(struct mei_device *dev)
745{
746 mutex_lock(&mei_minor_lock);
747 idr_remove(&mei_idr, dev->minor);
748 mutex_unlock(&mei_minor_lock);
749}
750
751int mei_register(struct mei_device *dev, struct device *parent)
752{
753 struct device *clsdev;
754 int ret, devno;
755
756 ret = mei_minor_get(dev);
757 if (ret < 0)
758 return ret;
759
760
761 devno = MKDEV(MAJOR(mei_devt), dev->minor);
762 cdev_init(&dev->cdev, &mei_fops);
763 dev->cdev.owner = parent->driver->owner;
764
765
766 ret = cdev_add(&dev->cdev, devno, 1);
767 if (ret) {
768 dev_err(parent, "unable to add device %d:%d\n",
769 MAJOR(mei_devt), dev->minor);
770 goto err_dev_add;
771 }
772
773 clsdev = device_create_with_groups(mei_class, parent, devno,
774 dev, mei_groups,
775 "mei%d", dev->minor);
776
777 if (IS_ERR(clsdev)) {
778 dev_err(parent, "unable to create device %d:%d\n",
779 MAJOR(mei_devt), dev->minor);
780 ret = PTR_ERR(clsdev);
781 goto err_dev_create;
782 }
783
784 ret = mei_dbgfs_register(dev, dev_name(clsdev));
785 if (ret) {
786 dev_err(clsdev, "cannot register debugfs ret = %d\n", ret);
787 goto err_dev_dbgfs;
788 }
789
790 return 0;
791
792err_dev_dbgfs:
793 device_destroy(mei_class, devno);
794err_dev_create:
795 cdev_del(&dev->cdev);
796err_dev_add:
797 mei_minor_free(dev);
798 return ret;
799}
800EXPORT_SYMBOL_GPL(mei_register);
801
802void mei_deregister(struct mei_device *dev)
803{
804 int devno;
805
806 devno = dev->cdev.dev;
807 cdev_del(&dev->cdev);
808
809 mei_dbgfs_deregister(dev);
810
811 device_destroy(mei_class, devno);
812
813 mei_minor_free(dev);
814}
815EXPORT_SYMBOL_GPL(mei_deregister);
816
817static int __init mei_init(void)
818{
819 int ret;
820
821 mei_class = class_create(THIS_MODULE, "mei");
822 if (IS_ERR(mei_class)) {
823 pr_err("couldn't create class\n");
824 ret = PTR_ERR(mei_class);
825 goto err;
826 }
827
828 ret = alloc_chrdev_region(&mei_devt, 0, MEI_MAX_DEVS, "mei");
829 if (ret < 0) {
830 pr_err("unable to allocate char dev region\n");
831 goto err_class;
832 }
833
834 ret = mei_cl_bus_init();
835 if (ret < 0) {
836 pr_err("unable to initialize bus\n");
837 goto err_chrdev;
838 }
839
840 return 0;
841
842err_chrdev:
843 unregister_chrdev_region(mei_devt, MEI_MAX_DEVS);
844err_class:
845 class_destroy(mei_class);
846err:
847 return ret;
848}
849
850static void __exit mei_exit(void)
851{
852 unregister_chrdev_region(mei_devt, MEI_MAX_DEVS);
853 class_destroy(mei_class);
854 mei_cl_bus_exit();
855}
856
857module_init(mei_init);
858module_exit(mei_exit);
859
860MODULE_AUTHOR("Intel Corporation");
861MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
862MODULE_LICENSE("GPL v2");
863
864