1
2
3
4
5
6
7
8
9
10
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/kthread.h>
15#include <linux/major.h>
16#include <linux/string.h>
17#include <linux/fcntl.h>
18#include <linux/slab.h>
19#include <linux/poll.h>
20#include <linux/init.h>
21#include <linux/fs.h>
22#include <linux/interrupt.h>
23#include <linux/spinlock.h>
24#include <linux/percpu.h>
25#include <linux/delay.h>
26#include <linux/uaccess.h>
27#include <linux/io.h>
28#include <linux/miscdevice.h>
29#include <linux/sched/signal.h>
30
31#include <asm/byteorder.h>
32#include <asm/irq.h>
33#include <asm/vio.h>
34
35#include "ibmvmc.h"
36
37#define IBMVMC_DRIVER_VERSION "1.0"
38
39
40
41
42static DECLARE_WAIT_QUEUE_HEAD(ibmvmc_read_wait);
43
44static const char ibmvmc_driver_name[] = "ibmvmc";
45
46static struct ibmvmc_struct ibmvmc;
47static struct ibmvmc_hmc hmcs[MAX_HMCS];
48static struct crq_server_adapter ibmvmc_adapter;
49
50static int ibmvmc_max_buf_pool_size = DEFAULT_BUF_POOL_SIZE;
51static int ibmvmc_max_hmcs = DEFAULT_HMCS;
52static int ibmvmc_max_mtu = DEFAULT_MTU;
53
54static inline long h_copy_rdma(s64 length, u64 sliobn, u64 slioba,
55 u64 dliobn, u64 dlioba)
56{
57 long rc = 0;
58
59
60 dma_wmb();
61 pr_debug("ibmvmc: h_copy_rdma(0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n",
62 length, sliobn, slioba, dliobn, dlioba);
63 rc = plpar_hcall_norets(H_COPY_RDMA, length, sliobn, slioba,
64 dliobn, dlioba);
65 pr_debug("ibmvmc: h_copy_rdma rc = 0x%lx\n", rc);
66
67 return rc;
68}
69
70static inline void h_free_crq(uint32_t unit_address)
71{
72 long rc = 0;
73
74 do {
75 if (H_IS_LONG_BUSY(rc))
76 msleep(get_longbusy_msecs(rc));
77
78 rc = plpar_hcall_norets(H_FREE_CRQ, unit_address);
79 } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
80}
81
82
83
84
85
86
87
88
89
90
91
92
93
94static inline long h_request_vmc(u32 *vmc_index)
95{
96 long rc = 0;
97 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
98
99 do {
100 if (H_IS_LONG_BUSY(rc))
101 msleep(get_longbusy_msecs(rc));
102
103
104 rc = plpar_hcall(H_REQUEST_VMC, retbuf);
105 pr_debug("ibmvmc: %s rc = 0x%lx\n", __func__, rc);
106 *vmc_index = retbuf[0];
107 } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
108
109 return rc;
110}
111
112
113
114
115
116
117
118
119
120
121
122static irqreturn_t ibmvmc_handle_event(int irq, void *dev_instance)
123{
124 struct crq_server_adapter *adapter =
125 (struct crq_server_adapter *)dev_instance;
126
127 vio_disable_interrupts(to_vio_dev(adapter->dev));
128 tasklet_schedule(&adapter->work_task);
129
130 return IRQ_HANDLED;
131}
132
133
134
135
136
137
138
139
140
141
142static void ibmvmc_release_crq_queue(struct crq_server_adapter *adapter)
143{
144 struct vio_dev *vdev = to_vio_dev(adapter->dev);
145 struct crq_queue *queue = &adapter->queue;
146
147 free_irq(vdev->irq, (void *)adapter);
148 tasklet_kill(&adapter->work_task);
149
150 if (adapter->reset_task)
151 kthread_stop(adapter->reset_task);
152
153 h_free_crq(vdev->unit_address);
154 dma_unmap_single(adapter->dev,
155 queue->msg_token,
156 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
157 free_page((unsigned long)queue->msgs);
158}
159
160
161
162
163
164
165
166
167
168
169
170
171
172static int ibmvmc_reset_crq_queue(struct crq_server_adapter *adapter)
173{
174 struct vio_dev *vdev = to_vio_dev(adapter->dev);
175 struct crq_queue *queue = &adapter->queue;
176 int rc = 0;
177
178
179 h_free_crq(vdev->unit_address);
180
181
182 memset(queue->msgs, 0x00, PAGE_SIZE);
183 queue->cur = 0;
184
185
186 rc = plpar_hcall_norets(H_REG_CRQ,
187 vdev->unit_address,
188 queue->msg_token, PAGE_SIZE);
189 if (rc == 2)
190
191 dev_warn(adapter->dev, "Partner adapter not ready\n");
192 else if (rc != 0)
193 dev_err(adapter->dev, "couldn't register crq--rc 0x%x\n", rc);
194
195 return rc;
196}
197
198
199
200
201
202
203
204
205static struct ibmvmc_crq_msg *crq_queue_next_crq(struct crq_queue *queue)
206{
207 struct ibmvmc_crq_msg *crq;
208 unsigned long flags;
209
210 spin_lock_irqsave(&queue->lock, flags);
211 crq = &queue->msgs[queue->cur];
212 if (crq->valid & 0x80) {
213 if (++queue->cur == queue->size)
214 queue->cur = 0;
215
216
217
218
219 dma_rmb();
220 } else {
221 crq = NULL;
222 }
223
224 spin_unlock_irqrestore(&queue->lock, flags);
225
226 return crq;
227}
228
229
230
231
232
233
234
235
236
237
238
239
240static long ibmvmc_send_crq(struct crq_server_adapter *adapter,
241 u64 word1, u64 word2)
242{
243 struct vio_dev *vdev = to_vio_dev(adapter->dev);
244 long rc = 0;
245
246 dev_dbg(adapter->dev, "(0x%x, 0x%016llx, 0x%016llx)\n",
247 vdev->unit_address, word1, word2);
248
249
250
251
252
253 dma_wmb();
254 rc = plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
255 dev_dbg(adapter->dev, "rc = 0x%lx\n", rc);
256
257 return rc;
258}
259
260
261
262
263
264
265
266
267
268
269
270
271
272static void *alloc_dma_buffer(struct vio_dev *vdev, size_t size,
273 dma_addr_t *dma_handle)
274{
275
276 void *buffer = kzalloc(size, GFP_KERNEL);
277
278 if (!buffer) {
279 *dma_handle = 0;
280 return NULL;
281 }
282
283
284 *dma_handle = dma_map_single(&vdev->dev, buffer, size,
285 DMA_BIDIRECTIONAL);
286
287 if (dma_mapping_error(&vdev->dev, *dma_handle)) {
288 *dma_handle = 0;
289 kzfree(buffer);
290 return NULL;
291 }
292
293 return buffer;
294}
295
296
297
298
299
300
301
302
303
304
305
306static void free_dma_buffer(struct vio_dev *vdev, size_t size, void *vaddr,
307 dma_addr_t dma_handle)
308{
309
310 dma_unmap_single(&vdev->dev, dma_handle, size, DMA_BIDIRECTIONAL);
311
312
313 kzfree(vaddr);
314}
315
316
317
318
319
320
321
322
323
324static struct ibmvmc_buffer *ibmvmc_get_valid_hmc_buffer(u8 hmc_index)
325{
326 struct ibmvmc_buffer *buffer;
327 struct ibmvmc_buffer *ret_buf = NULL;
328 unsigned long i;
329
330 if (hmc_index > ibmvmc.max_hmc_index)
331 return NULL;
332
333 buffer = hmcs[hmc_index].buffer;
334
335 for (i = 0; i < ibmvmc_max_buf_pool_size; i++) {
336 if (buffer[i].valid && buffer[i].free &&
337 buffer[i].owner == VMC_BUF_OWNER_ALPHA) {
338 buffer[i].free = 0;
339 ret_buf = &buffer[i];
340 break;
341 }
342 }
343
344 return ret_buf;
345}
346
347
348
349
350
351
352
353
354
355
356static struct ibmvmc_buffer *ibmvmc_get_free_hmc_buffer(struct crq_server_adapter *adapter,
357 u8 hmc_index)
358{
359 struct ibmvmc_buffer *buffer;
360 struct ibmvmc_buffer *ret_buf = NULL;
361 unsigned long i;
362
363 if (hmc_index > ibmvmc.max_hmc_index) {
364 dev_info(adapter->dev, "get_free_hmc_buffer: invalid hmc_index=0x%x\n",
365 hmc_index);
366 return NULL;
367 }
368
369 buffer = hmcs[hmc_index].buffer;
370
371 for (i = 0; i < ibmvmc_max_buf_pool_size; i++) {
372 if (buffer[i].free &&
373 buffer[i].owner == VMC_BUF_OWNER_ALPHA) {
374 buffer[i].free = 0;
375 ret_buf = &buffer[i];
376 break;
377 }
378 }
379
380 return ret_buf;
381}
382
383
384
385
386
387
388
389
390static void ibmvmc_free_hmc_buffer(struct ibmvmc_hmc *hmc,
391 struct ibmvmc_buffer *buffer)
392{
393 unsigned long flags;
394
395 spin_lock_irqsave(&hmc->lock, flags);
396 buffer->free = 1;
397 spin_unlock_irqrestore(&hmc->lock, flags);
398}
399
400
401
402
403
404
405
406
407
408static void ibmvmc_count_hmc_buffers(u8 hmc_index, unsigned int *valid,
409 unsigned int *free)
410{
411 struct ibmvmc_buffer *buffer;
412 unsigned long i;
413 unsigned long flags;
414
415 if (hmc_index > ibmvmc.max_hmc_index)
416 return;
417
418 if (!valid || !free)
419 return;
420
421 *valid = 0; *free = 0;
422
423 buffer = hmcs[hmc_index].buffer;
424 spin_lock_irqsave(&hmcs[hmc_index].lock, flags);
425
426 for (i = 0; i < ibmvmc_max_buf_pool_size; i++) {
427 if (buffer[i].valid) {
428 *valid = *valid + 1;
429 if (buffer[i].free)
430 *free = *free + 1;
431 }
432 }
433
434 spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
435}
436
437
438
439
440
441
442
443
444static struct ibmvmc_hmc *ibmvmc_get_free_hmc(void)
445{
446 unsigned long i;
447 unsigned long flags;
448
449
450
451
452 for (i = 0; i <= ibmvmc.max_hmc_index; i++) {
453 spin_lock_irqsave(&hmcs[i].lock, flags);
454 if (hmcs[i].state == ibmhmc_state_free) {
455 hmcs[i].index = i;
456 hmcs[i].state = ibmhmc_state_initial;
457 spin_unlock_irqrestore(&hmcs[i].lock, flags);
458 return &hmcs[i];
459 }
460 spin_unlock_irqrestore(&hmcs[i].lock, flags);
461 }
462
463 return NULL;
464}
465
466
467
468
469
470
471
472
473
474
475
476
477
478static int ibmvmc_return_hmc(struct ibmvmc_hmc *hmc, bool release_readers)
479{
480 struct ibmvmc_buffer *buffer;
481 struct crq_server_adapter *adapter;
482 struct vio_dev *vdev;
483 unsigned long i;
484 unsigned long flags;
485
486 if (!hmc || !hmc->adapter)
487 return -EIO;
488
489 if (release_readers) {
490 if (hmc->file_session) {
491 struct ibmvmc_file_session *session = hmc->file_session;
492
493 session->valid = 0;
494 wake_up_interruptible(&ibmvmc_read_wait);
495 }
496 }
497
498 adapter = hmc->adapter;
499 vdev = to_vio_dev(adapter->dev);
500
501 spin_lock_irqsave(&hmc->lock, flags);
502 hmc->index = 0;
503 hmc->state = ibmhmc_state_free;
504 hmc->queue_head = 0;
505 hmc->queue_tail = 0;
506 buffer = hmc->buffer;
507 for (i = 0; i < ibmvmc_max_buf_pool_size; i++) {
508 if (buffer[i].valid) {
509 free_dma_buffer(vdev,
510 ibmvmc.max_mtu,
511 buffer[i].real_addr_local,
512 buffer[i].dma_addr_local);
513 dev_dbg(adapter->dev, "Forgot buffer id 0x%lx\n", i);
514 }
515 memset(&buffer[i], 0, sizeof(struct ibmvmc_buffer));
516
517 hmc->queue_outbound_msgs[i] = VMC_INVALID_BUFFER_ID;
518 }
519
520 spin_unlock_irqrestore(&hmc->lock, flags);
521
522 return 0;
523}
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543static int ibmvmc_send_open(struct ibmvmc_buffer *buffer,
544 struct ibmvmc_hmc *hmc)
545{
546 struct ibmvmc_crq_msg crq_msg;
547 struct crq_server_adapter *adapter;
548 __be64 *crq_as_u64 = (__be64 *)&crq_msg;
549 int rc = 0;
550
551 if (!hmc || !hmc->adapter)
552 return -EIO;
553
554 adapter = hmc->adapter;
555
556 dev_dbg(adapter->dev, "send_open: 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n",
557 (unsigned long)buffer->size, (unsigned long)adapter->liobn,
558 (unsigned long)buffer->dma_addr_local,
559 (unsigned long)adapter->riobn,
560 (unsigned long)buffer->dma_addr_remote);
561
562 rc = h_copy_rdma(buffer->size,
563 adapter->liobn,
564 buffer->dma_addr_local,
565 adapter->riobn,
566 buffer->dma_addr_remote);
567 if (rc) {
568 dev_err(adapter->dev, "Error: In send_open, h_copy_rdma rc 0x%x\n",
569 rc);
570 return -EIO;
571 }
572
573 hmc->state = ibmhmc_state_opening;
574
575 crq_msg.valid = 0x80;
576 crq_msg.type = VMC_MSG_OPEN;
577 crq_msg.status = 0;
578 crq_msg.var1.rsvd = 0;
579 crq_msg.hmc_session = hmc->session;
580 crq_msg.hmc_index = hmc->index;
581 crq_msg.var2.buffer_id = cpu_to_be16(buffer->id);
582 crq_msg.rsvd = 0;
583 crq_msg.var3.rsvd = 0;
584
585 ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
586 be64_to_cpu(crq_as_u64[1]));
587
588 return rc;
589}
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605static int ibmvmc_send_close(struct ibmvmc_hmc *hmc)
606{
607 struct ibmvmc_crq_msg crq_msg;
608 struct crq_server_adapter *adapter;
609 __be64 *crq_as_u64 = (__be64 *)&crq_msg;
610 int rc = 0;
611
612 if (!hmc || !hmc->adapter)
613 return -EIO;
614
615 adapter = hmc->adapter;
616
617 dev_info(adapter->dev, "CRQ send: close\n");
618
619 crq_msg.valid = 0x80;
620 crq_msg.type = VMC_MSG_CLOSE;
621 crq_msg.status = 0;
622 crq_msg.var1.rsvd = 0;
623 crq_msg.hmc_session = hmc->session;
624 crq_msg.hmc_index = hmc->index;
625 crq_msg.var2.rsvd = 0;
626 crq_msg.rsvd = 0;
627 crq_msg.var3.rsvd = 0;
628
629 ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
630 be64_to_cpu(crq_as_u64[1]));
631
632 return rc;
633}
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651static int ibmvmc_send_capabilities(struct crq_server_adapter *adapter)
652{
653 struct ibmvmc_admin_crq_msg crq_msg;
654 __be64 *crq_as_u64 = (__be64 *)&crq_msg;
655
656 dev_dbg(adapter->dev, "ibmvmc: CRQ send: capabilities\n");
657 crq_msg.valid = 0x80;
658 crq_msg.type = VMC_MSG_CAP;
659 crq_msg.status = 0;
660 crq_msg.rsvd[0] = 0;
661 crq_msg.rsvd[1] = 0;
662 crq_msg.max_hmc = ibmvmc_max_hmcs;
663 crq_msg.max_mtu = cpu_to_be32(ibmvmc_max_mtu);
664 crq_msg.pool_size = cpu_to_be16(ibmvmc_max_buf_pool_size);
665 crq_msg.crq_size = cpu_to_be16(adapter->queue.size);
666 crq_msg.version = cpu_to_be16(IBMVMC_PROTOCOL_VERSION);
667
668 ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
669 be64_to_cpu(crq_as_u64[1]));
670
671 ibmvmc.state = ibmvmc_state_capabilities;
672
673 return 0;
674}
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692static int ibmvmc_send_add_buffer_resp(struct crq_server_adapter *adapter,
693 u8 status, u8 hmc_session,
694 u8 hmc_index, u16 buffer_id)
695{
696 struct ibmvmc_crq_msg crq_msg;
697 __be64 *crq_as_u64 = (__be64 *)&crq_msg;
698
699 dev_dbg(adapter->dev, "CRQ send: add_buffer_resp\n");
700 crq_msg.valid = 0x80;
701 crq_msg.type = VMC_MSG_ADD_BUF_RESP;
702 crq_msg.status = status;
703 crq_msg.var1.rsvd = 0;
704 crq_msg.hmc_session = hmc_session;
705 crq_msg.hmc_index = hmc_index;
706 crq_msg.var2.buffer_id = cpu_to_be16(buffer_id);
707 crq_msg.rsvd = 0;
708 crq_msg.var3.rsvd = 0;
709
710 ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
711 be64_to_cpu(crq_as_u64[1]));
712
713 return 0;
714}
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733static int ibmvmc_send_rem_buffer_resp(struct crq_server_adapter *adapter,
734 u8 status, u8 hmc_session,
735 u8 hmc_index, u16 buffer_id)
736{
737 struct ibmvmc_crq_msg crq_msg;
738 __be64 *crq_as_u64 = (__be64 *)&crq_msg;
739
740 dev_dbg(adapter->dev, "CRQ send: rem_buffer_resp\n");
741 crq_msg.valid = 0x80;
742 crq_msg.type = VMC_MSG_REM_BUF_RESP;
743 crq_msg.status = status;
744 crq_msg.var1.rsvd = 0;
745 crq_msg.hmc_session = hmc_session;
746 crq_msg.hmc_index = hmc_index;
747 crq_msg.var2.buffer_id = cpu_to_be16(buffer_id);
748 crq_msg.rsvd = 0;
749 crq_msg.var3.rsvd = 0;
750
751 ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
752 be64_to_cpu(crq_as_u64[1]));
753
754 return 0;
755}
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777static int ibmvmc_send_msg(struct crq_server_adapter *adapter,
778 struct ibmvmc_buffer *buffer,
779 struct ibmvmc_hmc *hmc, int msg_len)
780{
781 struct ibmvmc_crq_msg crq_msg;
782 __be64 *crq_as_u64 = (__be64 *)&crq_msg;
783 int rc = 0;
784
785 dev_dbg(adapter->dev, "CRQ send: rdma to HV\n");
786 rc = h_copy_rdma(msg_len,
787 adapter->liobn,
788 buffer->dma_addr_local,
789 adapter->riobn,
790 buffer->dma_addr_remote);
791 if (rc) {
792 dev_err(adapter->dev, "Error in send_msg, h_copy_rdma rc 0x%x\n",
793 rc);
794 return rc;
795 }
796
797 crq_msg.valid = 0x80;
798 crq_msg.type = VMC_MSG_SIGNAL;
799 crq_msg.status = 0;
800 crq_msg.var1.rsvd = 0;
801 crq_msg.hmc_session = hmc->session;
802 crq_msg.hmc_index = hmc->index;
803 crq_msg.var2.buffer_id = cpu_to_be16(buffer->id);
804 crq_msg.var3.msg_len = cpu_to_be32(msg_len);
805 dev_dbg(adapter->dev, "CRQ send: msg to HV 0x%llx 0x%llx\n",
806 be64_to_cpu(crq_as_u64[0]), be64_to_cpu(crq_as_u64[1]));
807
808 buffer->owner = VMC_BUF_OWNER_HV;
809 ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
810 be64_to_cpu(crq_as_u64[1]));
811
812 return rc;
813}
814
815
816
817
818
819
820
821
822
823
824static int ibmvmc_open(struct inode *inode, struct file *file)
825{
826 struct ibmvmc_file_session *session;
827 int rc = 0;
828
829 pr_debug("%s: inode = 0x%lx, file = 0x%lx, state = 0x%x\n", __func__,
830 (unsigned long)inode, (unsigned long)file,
831 ibmvmc.state);
832
833 session = kzalloc(sizeof(*session), GFP_KERNEL);
834 session->file = file;
835 file->private_data = session;
836
837 return rc;
838}
839
840
841
842
843
844
845
846
847
848
849
850static int ibmvmc_close(struct inode *inode, struct file *file)
851{
852 struct ibmvmc_file_session *session;
853 struct ibmvmc_hmc *hmc;
854 int rc = 0;
855 unsigned long flags;
856
857 pr_debug("%s: file = 0x%lx, state = 0x%x\n", __func__,
858 (unsigned long)file, ibmvmc.state);
859
860 session = file->private_data;
861 if (!session)
862 return -EIO;
863
864 hmc = session->hmc;
865 if (hmc) {
866 if (!hmc->adapter)
867 return -EIO;
868
869 if (ibmvmc.state == ibmvmc_state_failed) {
870 dev_warn(hmc->adapter->dev, "close: state_failed\n");
871 return -EIO;
872 }
873
874 spin_lock_irqsave(&hmc->lock, flags);
875 if (hmc->state >= ibmhmc_state_opening) {
876 rc = ibmvmc_send_close(hmc);
877 if (rc)
878 dev_warn(hmc->adapter->dev, "close: send_close failed.\n");
879 }
880 spin_unlock_irqrestore(&hmc->lock, flags);
881 }
882
883 kzfree(session);
884
885 return rc;
886}
887
888
889
890
891
892
893
894
895
896
897
898
899
900static ssize_t ibmvmc_read(struct file *file, char *buf, size_t nbytes,
901 loff_t *ppos)
902{
903 struct ibmvmc_file_session *session;
904 struct ibmvmc_hmc *hmc;
905 struct crq_server_adapter *adapter;
906 struct ibmvmc_buffer *buffer;
907 ssize_t n;
908 ssize_t retval = 0;
909 unsigned long flags;
910 DEFINE_WAIT(wait);
911
912 pr_debug("ibmvmc: read: file = 0x%lx, buf = 0x%lx, nbytes = 0x%lx\n",
913 (unsigned long)file, (unsigned long)buf,
914 (unsigned long)nbytes);
915
916 if (nbytes == 0)
917 return 0;
918
919 if (nbytes > ibmvmc.max_mtu) {
920 pr_warn("ibmvmc: read: nbytes invalid 0x%x\n",
921 (unsigned int)nbytes);
922 return -EINVAL;
923 }
924
925 session = file->private_data;
926 if (!session) {
927 pr_warn("ibmvmc: read: no session\n");
928 return -EIO;
929 }
930
931 hmc = session->hmc;
932 if (!hmc) {
933 pr_warn("ibmvmc: read: no hmc\n");
934 return -EIO;
935 }
936
937 adapter = hmc->adapter;
938 if (!adapter) {
939 pr_warn("ibmvmc: read: no adapter\n");
940 return -EIO;
941 }
942
943 do {
944 prepare_to_wait(&ibmvmc_read_wait, &wait, TASK_INTERRUPTIBLE);
945
946 spin_lock_irqsave(&hmc->lock, flags);
947 if (hmc->queue_tail != hmc->queue_head)
948
949 break;
950
951 spin_unlock_irqrestore(&hmc->lock, flags);
952
953 if (!session->valid) {
954 retval = -EBADFD;
955 goto out;
956 }
957 if (file->f_flags & O_NONBLOCK) {
958 retval = -EAGAIN;
959 goto out;
960 }
961
962 schedule();
963
964 if (signal_pending(current)) {
965 retval = -ERESTARTSYS;
966 goto out;
967 }
968 } while (1);
969
970 buffer = &(hmc->buffer[hmc->queue_outbound_msgs[hmc->queue_tail]]);
971 hmc->queue_tail++;
972 if (hmc->queue_tail == ibmvmc_max_buf_pool_size)
973 hmc->queue_tail = 0;
974 spin_unlock_irqrestore(&hmc->lock, flags);
975
976 nbytes = min_t(size_t, nbytes, buffer->msg_len);
977 n = copy_to_user((void *)buf, buffer->real_addr_local, nbytes);
978 dev_dbg(adapter->dev, "read: copy to user nbytes = 0x%lx.\n", nbytes);
979 ibmvmc_free_hmc_buffer(hmc, buffer);
980 retval = nbytes;
981
982 if (n) {
983 dev_warn(adapter->dev, "read: copy to user failed.\n");
984 retval = -EFAULT;
985 }
986
987 out:
988 finish_wait(&ibmvmc_read_wait, &wait);
989 dev_dbg(adapter->dev, "read: out %ld\n", retval);
990 return retval;
991}
992
993
994
995
996
997
998
999
1000
1001
1002static unsigned int ibmvmc_poll(struct file *file, poll_table *wait)
1003{
1004 struct ibmvmc_file_session *session;
1005 struct ibmvmc_hmc *hmc;
1006 unsigned int mask = 0;
1007
1008 session = file->private_data;
1009 if (!session)
1010 return 0;
1011
1012 hmc = session->hmc;
1013 if (!hmc)
1014 return 0;
1015
1016 poll_wait(file, &ibmvmc_read_wait, wait);
1017
1018 if (hmc->queue_head != hmc->queue_tail)
1019 mask |= POLLIN | POLLRDNORM;
1020
1021 return mask;
1022}
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036static ssize_t ibmvmc_write(struct file *file, const char *buffer,
1037 size_t count, loff_t *ppos)
1038{
1039 struct ibmvmc_buffer *vmc_buffer;
1040 struct ibmvmc_file_session *session;
1041 struct crq_server_adapter *adapter;
1042 struct ibmvmc_hmc *hmc;
1043 unsigned char *buf;
1044 unsigned long flags;
1045 size_t bytes;
1046 const char *p = buffer;
1047 size_t c = count;
1048 int ret = 0;
1049
1050 session = file->private_data;
1051 if (!session)
1052 return -EIO;
1053
1054 hmc = session->hmc;
1055 if (!hmc)
1056 return -EIO;
1057
1058 spin_lock_irqsave(&hmc->lock, flags);
1059 if (hmc->state == ibmhmc_state_free) {
1060
1061 ret = -EIO;
1062 goto out;
1063 }
1064
1065 adapter = hmc->adapter;
1066 if (!adapter) {
1067 ret = -EIO;
1068 goto out;
1069 }
1070
1071 if (count > ibmvmc.max_mtu) {
1072 dev_warn(adapter->dev, "invalid buffer size 0x%lx\n",
1073 (unsigned long)count);
1074 ret = -EIO;
1075 goto out;
1076 }
1077
1078
1079 if (hmc->state == ibmhmc_state_opening) {
1080 ret = -EBUSY;
1081 goto out;
1082 }
1083
1084
1085
1086
1087 if (hmc->state != ibmhmc_state_ready) {
1088 ret = -EIO;
1089 goto out;
1090 }
1091
1092 vmc_buffer = ibmvmc_get_valid_hmc_buffer(hmc->index);
1093 if (!vmc_buffer) {
1094
1095
1096
1097
1098 ret = -EBUSY;
1099 goto out;
1100 }
1101 if (!vmc_buffer->real_addr_local) {
1102 dev_err(adapter->dev, "no buffer storage assigned\n");
1103 ret = -EIO;
1104 goto out;
1105 }
1106 buf = vmc_buffer->real_addr_local;
1107
1108 while (c > 0) {
1109 bytes = min_t(size_t, c, vmc_buffer->size);
1110
1111 bytes -= copy_from_user(buf, p, bytes);
1112 if (!bytes) {
1113 ret = -EFAULT;
1114 goto out;
1115 }
1116 c -= bytes;
1117 p += bytes;
1118 }
1119 if (p == buffer)
1120 goto out;
1121
1122 file->f_path.dentry->d_inode->i_mtime = current_time(file_inode(file));
1123 mark_inode_dirty(file->f_path.dentry->d_inode);
1124
1125 dev_dbg(adapter->dev, "write: file = 0x%lx, count = 0x%lx\n",
1126 (unsigned long)file, (unsigned long)count);
1127
1128 ibmvmc_send_msg(adapter, vmc_buffer, hmc, count);
1129 ret = p - buffer;
1130 out:
1131 spin_unlock_irqrestore(&hmc->lock, flags);
1132 return (ssize_t)(ret);
1133}
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144static long ibmvmc_setup_hmc(struct ibmvmc_file_session *session)
1145{
1146 struct ibmvmc_hmc *hmc;
1147 unsigned int valid, free, index;
1148
1149 if (ibmvmc.state == ibmvmc_state_failed) {
1150 pr_warn("ibmvmc: Reserve HMC: state_failed\n");
1151 return -EIO;
1152 }
1153
1154 if (ibmvmc.state < ibmvmc_state_ready) {
1155 pr_warn("ibmvmc: Reserve HMC: not state_ready\n");
1156 return -EAGAIN;
1157 }
1158
1159
1160
1161
1162 for (index = 0; index <= ibmvmc.max_hmc_index; index++) {
1163 valid = 0;
1164 ibmvmc_count_hmc_buffers(index, &valid, &free);
1165 if (valid == 0) {
1166 pr_warn("ibmvmc: buffers not ready for index %d\n",
1167 index);
1168 return -ENOBUFS;
1169 }
1170 }
1171
1172
1173 hmc = ibmvmc_get_free_hmc();
1174 if (!hmc) {
1175 pr_warn("%s: free hmc not found\n", __func__);
1176 return -EBUSY;
1177 }
1178
1179 hmc->session = hmc->session + 1;
1180 if (hmc->session == 0xff)
1181 hmc->session = 1;
1182
1183 session->hmc = hmc;
1184 hmc->adapter = &ibmvmc_adapter;
1185 hmc->file_session = session;
1186 session->valid = 1;
1187
1188 return 0;
1189}
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203static long ibmvmc_ioctl_sethmcid(struct ibmvmc_file_session *session,
1204 unsigned char __user *new_hmc_id)
1205{
1206 struct ibmvmc_hmc *hmc;
1207 struct ibmvmc_buffer *buffer;
1208 size_t bytes;
1209 char print_buffer[HMC_ID_LEN + 1];
1210 unsigned long flags;
1211 long rc = 0;
1212
1213
1214 hmc = session->hmc;
1215 if (!hmc) {
1216 rc = ibmvmc_setup_hmc(session);
1217 if (rc)
1218 return rc;
1219
1220 hmc = session->hmc;
1221 if (!hmc) {
1222 pr_err("ibmvmc: setup_hmc success but no hmc\n");
1223 return -EIO;
1224 }
1225 }
1226
1227 if (hmc->state != ibmhmc_state_initial) {
1228 pr_warn("ibmvmc: sethmcid: invalid state to send open 0x%x\n",
1229 hmc->state);
1230 return -EIO;
1231 }
1232
1233 bytes = copy_from_user(hmc->hmc_id, new_hmc_id, HMC_ID_LEN);
1234 if (bytes)
1235 return -EFAULT;
1236
1237
1238 spin_lock_irqsave(&hmc->lock, flags);
1239 buffer = ibmvmc_get_valid_hmc_buffer(hmc->index);
1240 spin_unlock_irqrestore(&hmc->lock, flags);
1241
1242 if (!buffer || !buffer->real_addr_local) {
1243 pr_warn("ibmvmc: sethmcid: no buffer available\n");
1244 return -EIO;
1245 }
1246
1247
1248 memset(print_buffer, 0, HMC_ID_LEN + 1);
1249 strncpy(print_buffer, hmc->hmc_id, HMC_ID_LEN);
1250 pr_info("ibmvmc: sethmcid: Set HMC ID: \"%s\"\n", print_buffer);
1251
1252 memcpy(buffer->real_addr_local, hmc->hmc_id, HMC_ID_LEN);
1253
1254 rc = ibmvmc_send_open(buffer, hmc);
1255
1256 return rc;
1257}
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269static long ibmvmc_ioctl_query(struct ibmvmc_file_session *session,
1270 struct ibmvmc_query_struct __user *ret_struct)
1271{
1272 struct ibmvmc_query_struct query_struct;
1273 size_t bytes;
1274
1275 memset(&query_struct, 0, sizeof(query_struct));
1276 query_struct.have_vmc = (ibmvmc.state > ibmvmc_state_initial);
1277 query_struct.state = ibmvmc.state;
1278 query_struct.vmc_drc_index = ibmvmc.vmc_drc_index;
1279
1280 bytes = copy_to_user(ret_struct, &query_struct,
1281 sizeof(query_struct));
1282 if (bytes)
1283 return -EFAULT;
1284
1285 return 0;
1286}
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298static long ibmvmc_ioctl_requestvmc(struct ibmvmc_file_session *session,
1299 u32 __user *ret_vmc_index)
1300{
1301
1302 size_t bytes;
1303 long rc;
1304 u32 vmc_drc_index;
1305
1306
1307 rc = h_request_vmc(&vmc_drc_index);
1308 pr_debug("ibmvmc: requestvmc: H_REQUEST_VMC rc = 0x%lx\n", rc);
1309
1310 if (rc == H_SUCCESS) {
1311 rc = 0;
1312 } else if (rc == H_FUNCTION) {
1313 pr_err("ibmvmc: requestvmc: h_request_vmc not supported\n");
1314 return -EPERM;
1315 } else if (rc == H_AUTHORITY) {
1316 pr_err("ibmvmc: requestvmc: hypervisor denied vmc request\n");
1317 return -EPERM;
1318 } else if (rc == H_HARDWARE) {
1319 pr_err("ibmvmc: requestvmc: hypervisor hardware fault\n");
1320 return -EIO;
1321 } else if (rc == H_RESOURCE) {
1322 pr_err("ibmvmc: requestvmc: vmc resource unavailable\n");
1323 return -ENODEV;
1324 } else if (rc == H_NOT_AVAILABLE) {
1325 pr_err("ibmvmc: requestvmc: system cannot be vmc managed\n");
1326 return -EPERM;
1327 } else if (rc == H_PARAMETER) {
1328 pr_err("ibmvmc: requestvmc: invalid parameter\n");
1329 return -EINVAL;
1330 }
1331
1332
1333 ibmvmc.vmc_drc_index = vmc_drc_index;
1334
1335 bytes = copy_to_user(ret_vmc_index, &vmc_drc_index,
1336 sizeof(*ret_vmc_index));
1337 if (bytes) {
1338 pr_warn("ibmvmc: requestvmc: copy to user failed.\n");
1339 return -EFAULT;
1340 }
1341 return rc;
1342}
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355static long ibmvmc_ioctl(struct file *file,
1356 unsigned int cmd, unsigned long arg)
1357{
1358 struct ibmvmc_file_session *session = file->private_data;
1359
1360 pr_debug("ibmvmc: ioctl file=0x%lx, cmd=0x%x, arg=0x%lx, ses=0x%lx\n",
1361 (unsigned long)file, cmd, arg,
1362 (unsigned long)session);
1363
1364 if (!session) {
1365 pr_warn("ibmvmc: ioctl: no session\n");
1366 return -EIO;
1367 }
1368
1369 switch (cmd) {
1370 case VMC_IOCTL_SETHMCID:
1371 return ibmvmc_ioctl_sethmcid(session,
1372 (unsigned char __user *)arg);
1373 case VMC_IOCTL_QUERY:
1374 return ibmvmc_ioctl_query(session,
1375 (struct ibmvmc_query_struct __user *)arg);
1376 case VMC_IOCTL_REQUESTVMC:
1377 return ibmvmc_ioctl_requestvmc(session,
1378 (unsigned int __user *)arg);
1379 default:
1380 pr_warn("ibmvmc: unknown ioctl 0x%x\n", cmd);
1381 return -EINVAL;
1382 }
1383}
1384
1385static const struct file_operations ibmvmc_fops = {
1386 .owner = THIS_MODULE,
1387 .read = ibmvmc_read,
1388 .write = ibmvmc_write,
1389 .poll = ibmvmc_poll,
1390 .unlocked_ioctl = ibmvmc_ioctl,
1391 .open = ibmvmc_open,
1392 .release = ibmvmc_close,
1393};
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422static int ibmvmc_add_buffer(struct crq_server_adapter *adapter,
1423 struct ibmvmc_crq_msg *crq)
1424{
1425 struct ibmvmc_buffer *buffer;
1426 u8 hmc_index;
1427 u8 hmc_session;
1428 u16 buffer_id;
1429 unsigned long flags;
1430 int rc = 0;
1431
1432 if (!crq)
1433 return -1;
1434
1435 hmc_session = crq->hmc_session;
1436 hmc_index = crq->hmc_index;
1437 buffer_id = be16_to_cpu(crq->var2.buffer_id);
1438
1439 if (hmc_index > ibmvmc.max_hmc_index) {
1440 dev_err(adapter->dev, "add_buffer: invalid hmc_index = 0x%x\n",
1441 hmc_index);
1442 ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_HMC_INDEX,
1443 hmc_session, hmc_index, buffer_id);
1444 return -1;
1445 }
1446
1447 if (buffer_id >= ibmvmc.max_buffer_pool_size) {
1448 dev_err(adapter->dev, "add_buffer: invalid buffer_id = 0x%x\n",
1449 buffer_id);
1450 ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_BUFFER_ID,
1451 hmc_session, hmc_index, buffer_id);
1452 return -1;
1453 }
1454
1455 spin_lock_irqsave(&hmcs[hmc_index].lock, flags);
1456 buffer = &hmcs[hmc_index].buffer[buffer_id];
1457
1458 if (buffer->real_addr_local || buffer->dma_addr_local) {
1459 dev_warn(adapter->dev, "add_buffer: already allocated id = 0x%lx\n",
1460 (unsigned long)buffer_id);
1461 spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
1462 ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_BUFFER_ID,
1463 hmc_session, hmc_index, buffer_id);
1464 return -1;
1465 }
1466
1467 buffer->real_addr_local = alloc_dma_buffer(to_vio_dev(adapter->dev),
1468 ibmvmc.max_mtu,
1469 &buffer->dma_addr_local);
1470
1471 if (!buffer->real_addr_local) {
1472 dev_err(adapter->dev, "add_buffer: alloc_dma_buffer failed.\n");
1473 spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
1474 ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INTERFACE_FAILURE,
1475 hmc_session, hmc_index, buffer_id);
1476 return -1;
1477 }
1478
1479 buffer->dma_addr_remote = be32_to_cpu(crq->var3.lioba);
1480 buffer->size = ibmvmc.max_mtu;
1481 buffer->owner = crq->var1.owner;
1482 buffer->free = 1;
1483
1484 dma_wmb();
1485 buffer->valid = 1;
1486 buffer->id = buffer_id;
1487
1488 dev_dbg(adapter->dev, "add_buffer: successfully added a buffer:\n");
1489 dev_dbg(adapter->dev, " index: %d, session: %d, buffer: 0x%x, owner: %d\n",
1490 hmc_index, hmc_session, buffer_id, buffer->owner);
1491 dev_dbg(adapter->dev, " local: 0x%x, remote: 0x%x\n",
1492 (u32)buffer->dma_addr_local,
1493 (u32)buffer->dma_addr_remote);
1494 spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
1495
1496 ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_SUCCESS, hmc_session,
1497 hmc_index, buffer_id);
1498
1499 return rc;
1500}
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539static int ibmvmc_rem_buffer(struct crq_server_adapter *adapter,
1540 struct ibmvmc_crq_msg *crq)
1541{
1542 struct ibmvmc_buffer *buffer;
1543 u8 hmc_index;
1544 u8 hmc_session;
1545 u16 buffer_id = 0;
1546 unsigned long flags;
1547 int rc = 0;
1548
1549 if (!crq)
1550 return -1;
1551
1552 hmc_session = crq->hmc_session;
1553 hmc_index = crq->hmc_index;
1554
1555 if (hmc_index > ibmvmc.max_hmc_index) {
1556 dev_warn(adapter->dev, "rem_buffer: invalid hmc_index = 0x%x\n",
1557 hmc_index);
1558 ibmvmc_send_rem_buffer_resp(adapter, VMC_MSG_INVALID_HMC_INDEX,
1559 hmc_session, hmc_index, buffer_id);
1560 return -1;
1561 }
1562
1563 spin_lock_irqsave(&hmcs[hmc_index].lock, flags);
1564 buffer = ibmvmc_get_free_hmc_buffer(adapter, hmc_index);
1565 if (!buffer) {
1566 dev_info(adapter->dev, "rem_buffer: no buffer to remove\n");
1567 spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
1568 ibmvmc_send_rem_buffer_resp(adapter, VMC_MSG_NO_BUFFER,
1569 hmc_session, hmc_index,
1570 VMC_INVALID_BUFFER_ID);
1571 return -1;
1572 }
1573
1574 buffer_id = buffer->id;
1575
1576 if (buffer->valid)
1577 free_dma_buffer(to_vio_dev(adapter->dev),
1578 ibmvmc.max_mtu,
1579 buffer->real_addr_local,
1580 buffer->dma_addr_local);
1581
1582 memset(buffer, 0, sizeof(struct ibmvmc_buffer));
1583 spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
1584
1585 dev_dbg(adapter->dev, "rem_buffer: removed buffer 0x%x.\n", buffer_id);
1586 ibmvmc_send_rem_buffer_resp(adapter, VMC_MSG_SUCCESS, hmc_session,
1587 hmc_index, buffer_id);
1588
1589 return rc;
1590}
1591
1592static int ibmvmc_recv_msg(struct crq_server_adapter *adapter,
1593 struct ibmvmc_crq_msg *crq)
1594{
1595 struct ibmvmc_buffer *buffer;
1596 struct ibmvmc_hmc *hmc;
1597 unsigned long msg_len;
1598 u8 hmc_index;
1599 u8 hmc_session;
1600 u16 buffer_id;
1601 unsigned long flags;
1602 int rc = 0;
1603
1604 if (!crq)
1605 return -1;
1606
1607
1608 dev_dbg(adapter->dev, "Recv_msg: msg from HV 0x%016llx 0x%016llx\n",
1609 be64_to_cpu(*((unsigned long *)crq)),
1610 be64_to_cpu(*(((unsigned long *)crq) + 1)));
1611
1612 hmc_session = crq->hmc_session;
1613 hmc_index = crq->hmc_index;
1614 buffer_id = be16_to_cpu(crq->var2.buffer_id);
1615 msg_len = be32_to_cpu(crq->var3.msg_len);
1616
1617 if (hmc_index > ibmvmc.max_hmc_index) {
1618 dev_err(adapter->dev, "Recv_msg: invalid hmc_index = 0x%x\n",
1619 hmc_index);
1620 ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_HMC_INDEX,
1621 hmc_session, hmc_index, buffer_id);
1622 return -1;
1623 }
1624
1625 if (buffer_id >= ibmvmc.max_buffer_pool_size) {
1626 dev_err(adapter->dev, "Recv_msg: invalid buffer_id = 0x%x\n",
1627 buffer_id);
1628 ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_BUFFER_ID,
1629 hmc_session, hmc_index, buffer_id);
1630 return -1;
1631 }
1632
1633 hmc = &hmcs[hmc_index];
1634 spin_lock_irqsave(&hmc->lock, flags);
1635
1636 if (hmc->state == ibmhmc_state_free) {
1637 dev_err(adapter->dev, "Recv_msg: invalid hmc state = 0x%x\n",
1638 hmc->state);
1639
1640 spin_unlock_irqrestore(&hmc->lock, flags);
1641 return -1;
1642 }
1643
1644 buffer = &hmc->buffer[buffer_id];
1645
1646 if (buffer->valid == 0 || buffer->owner == VMC_BUF_OWNER_ALPHA) {
1647 dev_err(adapter->dev, "Recv_msg: not valid, or not HV. 0x%x 0x%x\n",
1648 buffer->valid, buffer->owner);
1649 spin_unlock_irqrestore(&hmc->lock, flags);
1650 return -1;
1651 }
1652
1653
1654 rc = h_copy_rdma(msg_len,
1655 adapter->riobn,
1656 buffer->dma_addr_remote,
1657 adapter->liobn,
1658 buffer->dma_addr_local);
1659
1660 dev_dbg(adapter->dev, "Recv_msg: msg_len = 0x%x, buffer_id = 0x%x, queue_head = 0x%x, hmc_idx = 0x%x\n",
1661 (unsigned int)msg_len, (unsigned int)buffer_id,
1662 (unsigned int)hmc->queue_head, (unsigned int)hmc_index);
1663 buffer->msg_len = msg_len;
1664 buffer->free = 0;
1665 buffer->owner = VMC_BUF_OWNER_ALPHA;
1666
1667 if (rc) {
1668 dev_err(adapter->dev, "Failure in recv_msg: h_copy_rdma = 0x%x\n",
1669 rc);
1670 spin_unlock_irqrestore(&hmc->lock, flags);
1671 return -1;
1672 }
1673
1674
1675 hmc->queue_outbound_msgs[hmc->queue_head] = buffer_id;
1676 hmc->queue_head++;
1677 if (hmc->queue_head == ibmvmc_max_buf_pool_size)
1678 hmc->queue_head = 0;
1679
1680 if (hmc->queue_head == hmc->queue_tail)
1681 dev_err(adapter->dev, "outbound buffer queue wrapped.\n");
1682
1683 spin_unlock_irqrestore(&hmc->lock, flags);
1684
1685 wake_up_interruptible(&ibmvmc_read_wait);
1686
1687 return 0;
1688}
1689
1690
1691
1692
1693
1694
1695
1696
1697static void ibmvmc_process_capabilities(struct crq_server_adapter *adapter,
1698 struct ibmvmc_crq_msg *crqp)
1699{
1700 struct ibmvmc_admin_crq_msg *crq = (struct ibmvmc_admin_crq_msg *)crqp;
1701
1702 if ((be16_to_cpu(crq->version) >> 8) !=
1703 (IBMVMC_PROTOCOL_VERSION >> 8)) {
1704 dev_err(adapter->dev, "init failed, incompatible versions 0x%x 0x%x\n",
1705 be16_to_cpu(crq->version),
1706 IBMVMC_PROTOCOL_VERSION);
1707 ibmvmc.state = ibmvmc_state_failed;
1708 return;
1709 }
1710
1711 ibmvmc.max_mtu = min_t(u32, ibmvmc_max_mtu, be32_to_cpu(crq->max_mtu));
1712 ibmvmc.max_buffer_pool_size = min_t(u16, ibmvmc_max_buf_pool_size,
1713 be16_to_cpu(crq->pool_size));
1714 ibmvmc.max_hmc_index = min_t(u8, ibmvmc_max_hmcs, crq->max_hmc) - 1;
1715 ibmvmc.state = ibmvmc_state_ready;
1716
1717 dev_info(adapter->dev, "Capabilities: mtu=0x%x, pool_size=0x%x, max_hmc=0x%x\n",
1718 ibmvmc.max_mtu, ibmvmc.max_buffer_pool_size,
1719 ibmvmc.max_hmc_index);
1720}
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732static int ibmvmc_validate_hmc_session(struct crq_server_adapter *adapter,
1733 struct ibmvmc_crq_msg *crq)
1734{
1735 unsigned char hmc_index;
1736
1737 hmc_index = crq->hmc_index;
1738
1739 if (crq->hmc_session == 0)
1740 return 0;
1741
1742 if (hmc_index > ibmvmc.max_hmc_index)
1743 return -1;
1744
1745 if (hmcs[hmc_index].session != crq->hmc_session) {
1746 dev_warn(adapter->dev, "Drop, bad session: expected 0x%x, recv 0x%x\n",
1747 hmcs[hmc_index].session, crq->hmc_session);
1748 return -1;
1749 }
1750
1751 return 0;
1752}
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764static void ibmvmc_reset(struct crq_server_adapter *adapter, bool xport_event)
1765{
1766 int i;
1767
1768 if (ibmvmc.state != ibmvmc_state_sched_reset) {
1769 dev_info(adapter->dev, "*** Reset to initial state.\n");
1770 for (i = 0; i < ibmvmc_max_hmcs; i++)
1771 ibmvmc_return_hmc(&hmcs[i], xport_event);
1772
1773 if (xport_event) {
1774
1775
1776
1777
1778 ibmvmc.state = ibmvmc_state_crqinit;
1779 } else {
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789 ibmvmc.state = ibmvmc_state_sched_reset;
1790 dev_dbg(adapter->dev, "Device reset scheduled");
1791 wake_up_interruptible(&adapter->reset_wait_queue);
1792 }
1793 }
1794}
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804static int ibmvmc_reset_task(void *data)
1805{
1806 struct crq_server_adapter *adapter = data;
1807 int rc;
1808
1809 set_user_nice(current, -20);
1810
1811 while (!kthread_should_stop()) {
1812 wait_event_interruptible(adapter->reset_wait_queue,
1813 (ibmvmc.state == ibmvmc_state_sched_reset) ||
1814 kthread_should_stop());
1815
1816 if (kthread_should_stop())
1817 break;
1818
1819 dev_dbg(adapter->dev, "CRQ resetting in process context");
1820 tasklet_disable(&adapter->work_task);
1821
1822 rc = ibmvmc_reset_crq_queue(adapter);
1823
1824 if (rc != H_SUCCESS && rc != H_RESOURCE) {
1825 dev_err(adapter->dev, "Error initializing CRQ. rc = 0x%x\n",
1826 rc);
1827 ibmvmc.state = ibmvmc_state_failed;
1828 } else {
1829 ibmvmc.state = ibmvmc_state_crqinit;
1830
1831 if (ibmvmc_send_crq(adapter, 0xC001000000000000LL, 0)
1832 != 0 && rc != H_RESOURCE)
1833 dev_warn(adapter->dev, "Failed to send initialize CRQ message\n");
1834 }
1835
1836 vio_enable_interrupts(to_vio_dev(adapter->dev));
1837 tasklet_enable(&adapter->work_task);
1838 }
1839
1840 return 0;
1841}
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853static void ibmvmc_process_open_resp(struct ibmvmc_crq_msg *crq,
1854 struct crq_server_adapter *adapter)
1855{
1856 unsigned char hmc_index;
1857 unsigned short buffer_id;
1858
1859 hmc_index = crq->hmc_index;
1860 if (hmc_index > ibmvmc.max_hmc_index) {
1861
1862 ibmvmc_reset(adapter, false);
1863 return;
1864 }
1865
1866 if (crq->status) {
1867 dev_warn(adapter->dev, "open_resp: failed - status 0x%x\n",
1868 crq->status);
1869 ibmvmc_return_hmc(&hmcs[hmc_index], false);
1870 return;
1871 }
1872
1873 if (hmcs[hmc_index].state == ibmhmc_state_opening) {
1874 buffer_id = be16_to_cpu(crq->var2.buffer_id);
1875 if (buffer_id >= ibmvmc.max_buffer_pool_size) {
1876 dev_err(adapter->dev, "open_resp: invalid buffer_id = 0x%x\n",
1877 buffer_id);
1878 hmcs[hmc_index].state = ibmhmc_state_failed;
1879 } else {
1880 ibmvmc_free_hmc_buffer(&hmcs[hmc_index],
1881 &hmcs[hmc_index].buffer[buffer_id]);
1882 hmcs[hmc_index].state = ibmhmc_state_ready;
1883 dev_dbg(adapter->dev, "open_resp: set hmc state = ready\n");
1884 }
1885 } else {
1886 dev_warn(adapter->dev, "open_resp: invalid hmc state (0x%x)\n",
1887 hmcs[hmc_index].state);
1888 }
1889}
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903static void ibmvmc_process_close_resp(struct ibmvmc_crq_msg *crq,
1904 struct crq_server_adapter *adapter)
1905{
1906 unsigned char hmc_index;
1907
1908 hmc_index = crq->hmc_index;
1909 if (hmc_index > ibmvmc.max_hmc_index) {
1910 ibmvmc_reset(adapter, false);
1911 return;
1912 }
1913
1914 if (crq->status) {
1915 dev_warn(adapter->dev, "close_resp: failed - status 0x%x\n",
1916 crq->status);
1917 ibmvmc_reset(adapter, false);
1918 return;
1919 }
1920
1921 ibmvmc_return_hmc(&hmcs[hmc_index], false);
1922}
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933static void ibmvmc_crq_process(struct crq_server_adapter *adapter,
1934 struct ibmvmc_crq_msg *crq)
1935{
1936 switch (crq->type) {
1937 case VMC_MSG_CAP_RESP:
1938 dev_dbg(adapter->dev, "CRQ recv: capabilities resp (0x%x)\n",
1939 crq->type);
1940 if (ibmvmc.state == ibmvmc_state_capabilities)
1941 ibmvmc_process_capabilities(adapter, crq);
1942 else
1943 dev_warn(adapter->dev, "caps msg invalid in state 0x%x\n",
1944 ibmvmc.state);
1945 break;
1946 case VMC_MSG_OPEN_RESP:
1947 dev_dbg(adapter->dev, "CRQ recv: open resp (0x%x)\n",
1948 crq->type);
1949 if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
1950 ibmvmc_process_open_resp(crq, adapter);
1951 break;
1952 case VMC_MSG_ADD_BUF:
1953 dev_dbg(adapter->dev, "CRQ recv: add buf (0x%x)\n",
1954 crq->type);
1955 if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
1956 ibmvmc_add_buffer(adapter, crq);
1957 break;
1958 case VMC_MSG_REM_BUF:
1959 dev_dbg(adapter->dev, "CRQ recv: rem buf (0x%x)\n",
1960 crq->type);
1961 if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
1962 ibmvmc_rem_buffer(adapter, crq);
1963 break;
1964 case VMC_MSG_SIGNAL:
1965 dev_dbg(adapter->dev, "CRQ recv: signal msg (0x%x)\n",
1966 crq->type);
1967 if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
1968 ibmvmc_recv_msg(adapter, crq);
1969 break;
1970 case VMC_MSG_CLOSE_RESP:
1971 dev_dbg(adapter->dev, "CRQ recv: close resp (0x%x)\n",
1972 crq->type);
1973 if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
1974 ibmvmc_process_close_resp(crq, adapter);
1975 break;
1976 case VMC_MSG_CAP:
1977 case VMC_MSG_OPEN:
1978 case VMC_MSG_CLOSE:
1979 case VMC_MSG_ADD_BUF_RESP:
1980 case VMC_MSG_REM_BUF_RESP:
1981 dev_warn(adapter->dev, "CRQ recv: unexpected msg (0x%x)\n",
1982 crq->type);
1983 break;
1984 default:
1985 dev_warn(adapter->dev, "CRQ recv: unknown msg (0x%x)\n",
1986 crq->type);
1987 break;
1988 }
1989}
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001static void ibmvmc_handle_crq_init(struct ibmvmc_crq_msg *crq,
2002 struct crq_server_adapter *adapter)
2003{
2004 switch (crq->type) {
2005 case 0x01:
2006 dev_dbg(adapter->dev, "CRQ recv: CRQ init msg - state 0x%x\n",
2007 ibmvmc.state);
2008 if (ibmvmc.state == ibmvmc_state_crqinit) {
2009
2010 if (ibmvmc_send_crq(adapter, 0xC002000000000000,
2011 0) == 0)
2012 ibmvmc_send_capabilities(adapter);
2013 else
2014 dev_err(adapter->dev, " Unable to send init rsp\n");
2015 } else {
2016 dev_err(adapter->dev, "Invalid state 0x%x mtu = 0x%x\n",
2017 ibmvmc.state, ibmvmc.max_mtu);
2018 }
2019
2020 break;
2021 case 0x02:
2022 dev_dbg(adapter->dev, "CRQ recv: initialization resp msg - state 0x%x\n",
2023 ibmvmc.state);
2024 if (ibmvmc.state == ibmvmc_state_crqinit)
2025 ibmvmc_send_capabilities(adapter);
2026 break;
2027 default:
2028 dev_warn(adapter->dev, "Unknown crq message type 0x%lx\n",
2029 (unsigned long)crq->type);
2030 }
2031}
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043static void ibmvmc_handle_crq(struct ibmvmc_crq_msg *crq,
2044 struct crq_server_adapter *adapter)
2045{
2046 switch (crq->valid) {
2047 case 0xC0:
2048 ibmvmc_handle_crq_init(crq, adapter);
2049 break;
2050 case 0xFF:
2051 dev_warn(adapter->dev, "CRQ recv: virtual adapter failed - resetting.\n");
2052 ibmvmc_reset(adapter, true);
2053 break;
2054 case 0x80:
2055 ibmvmc_crq_process(adapter, crq);
2056 break;
2057 default:
2058 dev_warn(adapter->dev, "CRQ recv: unknown msg 0x%02x.\n",
2059 crq->valid);
2060 break;
2061 }
2062}
2063
2064static void ibmvmc_task(unsigned long data)
2065{
2066 struct crq_server_adapter *adapter =
2067 (struct crq_server_adapter *)data;
2068 struct vio_dev *vdev = to_vio_dev(adapter->dev);
2069 struct ibmvmc_crq_msg *crq;
2070 int done = 0;
2071
2072 while (!done) {
2073
2074 while ((crq = crq_queue_next_crq(&adapter->queue)) != NULL) {
2075 ibmvmc_handle_crq(crq, adapter);
2076 crq->valid = 0x00;
2077
2078
2079
2080 if (ibmvmc.state == ibmvmc_state_sched_reset)
2081 return;
2082 }
2083
2084 vio_enable_interrupts(vdev);
2085 crq = crq_queue_next_crq(&adapter->queue);
2086 if (crq) {
2087 vio_disable_interrupts(vdev);
2088 ibmvmc_handle_crq(crq, adapter);
2089 crq->valid = 0x00;
2090
2091
2092
2093 if (ibmvmc.state == ibmvmc_state_sched_reset)
2094 return;
2095 } else {
2096 done = 1;
2097 }
2098 }
2099}
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110static int ibmvmc_init_crq_queue(struct crq_server_adapter *adapter)
2111{
2112 struct vio_dev *vdev = to_vio_dev(adapter->dev);
2113 struct crq_queue *queue = &adapter->queue;
2114 int rc = 0;
2115 int retrc = 0;
2116
2117 queue->msgs = (struct ibmvmc_crq_msg *)get_zeroed_page(GFP_KERNEL);
2118
2119 if (!queue->msgs)
2120 goto malloc_failed;
2121
2122 queue->size = PAGE_SIZE / sizeof(*queue->msgs);
2123
2124 queue->msg_token = dma_map_single(adapter->dev, queue->msgs,
2125 queue->size * sizeof(*queue->msgs),
2126 DMA_BIDIRECTIONAL);
2127
2128 if (dma_mapping_error(adapter->dev, queue->msg_token))
2129 goto map_failed;
2130
2131 retrc = plpar_hcall_norets(H_REG_CRQ,
2132 vdev->unit_address,
2133 queue->msg_token, PAGE_SIZE);
2134 retrc = rc;
2135
2136 if (rc == H_RESOURCE)
2137 rc = ibmvmc_reset_crq_queue(adapter);
2138
2139 if (rc == 2) {
2140 dev_warn(adapter->dev, "Partner adapter not ready\n");
2141 retrc = 0;
2142 } else if (rc != 0) {
2143 dev_err(adapter->dev, "Error %d opening adapter\n", rc);
2144 goto reg_crq_failed;
2145 }
2146
2147 queue->cur = 0;
2148 spin_lock_init(&queue->lock);
2149
2150 tasklet_init(&adapter->work_task, ibmvmc_task, (unsigned long)adapter);
2151
2152 if (request_irq(vdev->irq,
2153 ibmvmc_handle_event,
2154 0, "ibmvmc", (void *)adapter) != 0) {
2155 dev_err(adapter->dev, "couldn't register irq 0x%x\n",
2156 vdev->irq);
2157 goto req_irq_failed;
2158 }
2159
2160 rc = vio_enable_interrupts(vdev);
2161 if (rc != 0) {
2162 dev_err(adapter->dev, "Error %d enabling interrupts!!!\n", rc);
2163 goto req_irq_failed;
2164 }
2165
2166 return retrc;
2167
2168req_irq_failed:
2169
2170
2171
2172 tasklet_kill(&adapter->work_task);
2173 h_free_crq(vdev->unit_address);
2174reg_crq_failed:
2175 dma_unmap_single(adapter->dev,
2176 queue->msg_token,
2177 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
2178map_failed:
2179 free_page((unsigned long)queue->msgs);
2180malloc_failed:
2181 return -ENOMEM;
2182}
2183
2184
2185static int read_dma_window(struct vio_dev *vdev,
2186 struct crq_server_adapter *adapter)
2187{
2188 const __be32 *dma_window;
2189 const __be32 *prop;
2190
2191
2192
2193
2194
2195 dma_window =
2196 (const __be32 *)vio_get_attribute(vdev, "ibm,my-dma-window",
2197 NULL);
2198 if (!dma_window) {
2199 dev_warn(adapter->dev, "Couldn't find ibm,my-dma-window property\n");
2200 return -1;
2201 }
2202
2203 adapter->liobn = be32_to_cpu(*dma_window);
2204 dma_window++;
2205
2206 prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-address-cells",
2207 NULL);
2208 if (!prop) {
2209 dev_warn(adapter->dev, "Couldn't find ibm,#dma-address-cells property\n");
2210 dma_window++;
2211 } else {
2212 dma_window += be32_to_cpu(*prop);
2213 }
2214
2215 prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-size-cells",
2216 NULL);
2217 if (!prop) {
2218 dev_warn(adapter->dev, "Couldn't find ibm,#dma-size-cells property\n");
2219 dma_window++;
2220 } else {
2221 dma_window += be32_to_cpu(*prop);
2222 }
2223
2224
2225 adapter->riobn = be32_to_cpu(*dma_window);
2226
2227 return 0;
2228}
2229
2230static int ibmvmc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
2231{
2232 struct crq_server_adapter *adapter = &ibmvmc_adapter;
2233 int rc;
2234
2235 dev_set_drvdata(&vdev->dev, NULL);
2236 memset(adapter, 0, sizeof(*adapter));
2237 adapter->dev = &vdev->dev;
2238
2239 dev_info(adapter->dev, "Probe for UA 0x%x\n", vdev->unit_address);
2240
2241 rc = read_dma_window(vdev, adapter);
2242 if (rc != 0) {
2243 ibmvmc.state = ibmvmc_state_failed;
2244 return -1;
2245 }
2246
2247 dev_dbg(adapter->dev, "Probe: liobn 0x%x, riobn 0x%x\n",
2248 adapter->liobn, adapter->riobn);
2249
2250 init_waitqueue_head(&adapter->reset_wait_queue);
2251 adapter->reset_task = kthread_run(ibmvmc_reset_task, adapter, "ibmvmc");
2252 if (IS_ERR(adapter->reset_task)) {
2253 dev_err(adapter->dev, "Failed to start reset thread\n");
2254 ibmvmc.state = ibmvmc_state_failed;
2255 rc = PTR_ERR(adapter->reset_task);
2256 adapter->reset_task = NULL;
2257 return rc;
2258 }
2259
2260 rc = ibmvmc_init_crq_queue(adapter);
2261 if (rc != 0 && rc != H_RESOURCE) {
2262 dev_err(adapter->dev, "Error initializing CRQ. rc = 0x%x\n",
2263 rc);
2264 ibmvmc.state = ibmvmc_state_failed;
2265 goto crq_failed;
2266 }
2267
2268 ibmvmc.state = ibmvmc_state_crqinit;
2269
2270
2271
2272
2273
2274 if (ibmvmc_send_crq(adapter, 0xC001000000000000LL, 0) != 0 &&
2275 rc != H_RESOURCE)
2276 dev_warn(adapter->dev, "Failed to send initialize CRQ message\n");
2277
2278 dev_set_drvdata(&vdev->dev, adapter);
2279
2280 return 0;
2281
2282crq_failed:
2283 kthread_stop(adapter->reset_task);
2284 adapter->reset_task = NULL;
2285 return -EPERM;
2286}
2287
2288static int ibmvmc_remove(struct vio_dev *vdev)
2289{
2290 struct crq_server_adapter *adapter = dev_get_drvdata(&vdev->dev);
2291
2292 dev_info(adapter->dev, "Entering remove for UA 0x%x\n",
2293 vdev->unit_address);
2294 ibmvmc_release_crq_queue(adapter);
2295
2296 return 0;
2297}
2298
2299static struct vio_device_id ibmvmc_device_table[] = {
2300 { "ibm,vmc", "IBM,vmc" },
2301 { "", "" }
2302};
2303MODULE_DEVICE_TABLE(vio, ibmvmc_device_table);
2304
2305static struct vio_driver ibmvmc_driver = {
2306 .name = ibmvmc_driver_name,
2307 .id_table = ibmvmc_device_table,
2308 .probe = ibmvmc_probe,
2309 .remove = ibmvmc_remove,
2310};
2311
2312static void __init ibmvmc_scrub_module_parms(void)
2313{
2314 if (ibmvmc_max_mtu > MAX_MTU) {
2315 pr_warn("ibmvmc: Max MTU reduced to %d\n", MAX_MTU);
2316 ibmvmc_max_mtu = MAX_MTU;
2317 } else if (ibmvmc_max_mtu < MIN_MTU) {
2318 pr_warn("ibmvmc: Max MTU increased to %d\n", MIN_MTU);
2319 ibmvmc_max_mtu = MIN_MTU;
2320 }
2321
2322 if (ibmvmc_max_buf_pool_size > MAX_BUF_POOL_SIZE) {
2323 pr_warn("ibmvmc: Max buffer pool size reduced to %d\n",
2324 MAX_BUF_POOL_SIZE);
2325 ibmvmc_max_buf_pool_size = MAX_BUF_POOL_SIZE;
2326 } else if (ibmvmc_max_buf_pool_size < MIN_BUF_POOL_SIZE) {
2327 pr_warn("ibmvmc: Max buffer pool size increased to %d\n",
2328 MIN_BUF_POOL_SIZE);
2329 ibmvmc_max_buf_pool_size = MIN_BUF_POOL_SIZE;
2330 }
2331
2332 if (ibmvmc_max_hmcs > MAX_HMCS) {
2333 pr_warn("ibmvmc: Max HMCs reduced to %d\n", MAX_HMCS);
2334 ibmvmc_max_hmcs = MAX_HMCS;
2335 } else if (ibmvmc_max_hmcs < MIN_HMCS) {
2336 pr_warn("ibmvmc: Max HMCs increased to %d\n", MIN_HMCS);
2337 ibmvmc_max_hmcs = MIN_HMCS;
2338 }
2339}
2340
2341static struct miscdevice ibmvmc_miscdev = {
2342 .name = ibmvmc_driver_name,
2343 .minor = MISC_DYNAMIC_MINOR,
2344 .fops = &ibmvmc_fops,
2345};
2346
2347static int __init ibmvmc_module_init(void)
2348{
2349 int rc, i, j;
2350
2351 ibmvmc.state = ibmvmc_state_initial;
2352 pr_info("ibmvmc: version %s\n", IBMVMC_DRIVER_VERSION);
2353
2354 rc = misc_register(&ibmvmc_miscdev);
2355 if (rc) {
2356 pr_err("ibmvmc: misc registration failed\n");
2357 goto misc_register_failed;
2358 }
2359 pr_info("ibmvmc: node %d:%d\n", MISC_MAJOR,
2360 ibmvmc_miscdev.minor);
2361
2362
2363 memset(hmcs, 0, sizeof(struct ibmvmc_hmc) * MAX_HMCS);
2364 for (i = 0; i < MAX_HMCS; i++) {
2365 spin_lock_init(&hmcs[i].lock);
2366 hmcs[i].state = ibmhmc_state_free;
2367 for (j = 0; j < MAX_BUF_POOL_SIZE; j++)
2368 hmcs[i].queue_outbound_msgs[j] = VMC_INVALID_BUFFER_ID;
2369 }
2370
2371
2372 ibmvmc_scrub_module_parms();
2373
2374
2375
2376
2377
2378 ibmvmc.max_mtu = ibmvmc_max_mtu;
2379 ibmvmc.max_buffer_pool_size = ibmvmc_max_buf_pool_size;
2380 ibmvmc.max_hmc_index = ibmvmc_max_hmcs - 1;
2381
2382 rc = vio_register_driver(&ibmvmc_driver);
2383
2384 if (rc) {
2385 pr_err("ibmvmc: rc %d from vio_register_driver\n", rc);
2386 goto vio_reg_failed;
2387 }
2388
2389 return 0;
2390
2391vio_reg_failed:
2392 misc_deregister(&ibmvmc_miscdev);
2393misc_register_failed:
2394 return rc;
2395}
2396
2397static void __exit ibmvmc_module_exit(void)
2398{
2399 pr_info("ibmvmc: module exit\n");
2400 vio_unregister_driver(&ibmvmc_driver);
2401 misc_deregister(&ibmvmc_miscdev);
2402}
2403
2404module_init(ibmvmc_module_init);
2405module_exit(ibmvmc_module_exit);
2406
2407module_param_named(buf_pool_size, ibmvmc_max_buf_pool_size,
2408 int, 0644);
2409MODULE_PARM_DESC(buf_pool_size, "Buffer pool size");
2410module_param_named(max_hmcs, ibmvmc_max_hmcs, int, 0644);
2411MODULE_PARM_DESC(max_hmcs, "Max HMCs");
2412module_param_named(max_mtu, ibmvmc_max_mtu, int, 0644);
2413MODULE_PARM_DESC(max_mtu, "Max MTU");
2414
2415MODULE_AUTHOR("Steven Royer <seroyer@linux.vnet.ibm.com>");
2416MODULE_DESCRIPTION("IBM VMC");
2417MODULE_VERSION(IBMVMC_DRIVER_VERSION);
2418MODULE_LICENSE("GPL v2");
2419