1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#include <linux/kernel.h>
33#include <linux/init.h>
34#include <linux/types.h>
35#include <linux/pci.h>
36#include <linux/spinlock.h>
37#include <linux/slab.h>
38#include <linux/blkdev.h>
39#include <linux/delay.h>
40#include <linux/completion.h>
41#include <linux/time.h>
42#include <linux/interrupt.h>
43#include <scsi/scsi_host.h>
44
45#include "aacraid.h"
46
47static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
48{
49 struct aac_dev *dev = dev_id;
50 unsigned long bellbits, bellbits_shifted;
51 int our_interrupt = 0;
52 int isFastResponse;
53 u32 index, handle;
54
55 bellbits = src_readl(dev, MUnit.ODR_R);
56 if (bellbits & PmDoorBellResponseSent) {
57 bellbits = PmDoorBellResponseSent;
58
59 src_writel(dev, MUnit.ODR_C, bellbits);
60 src_readl(dev, MUnit.ODR_C);
61 our_interrupt = 1;
62 index = dev->host_rrq_idx;
63 for (;;) {
64 isFastResponse = 0;
65
66 handle = le32_to_cpu(dev->host_rrq[index]) & 0x7fffffff;
67
68 if (handle & 0x40000000)
69 isFastResponse = 1;
70 handle &= 0x0000ffff;
71 if (handle == 0)
72 break;
73
74 aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL);
75
76 dev->host_rrq[index++] = 0;
77 if (index == dev->scsi_host_ptr->can_queue +
78 AAC_NUM_MGT_FIB)
79 index = 0;
80 dev->host_rrq_idx = index;
81 }
82 } else {
83 bellbits_shifted = (bellbits >> SRC_ODR_SHIFT);
84 if (bellbits_shifted & DoorBellAifPending) {
85 src_writel(dev, MUnit.ODR_C, bellbits);
86 src_readl(dev, MUnit.ODR_C);
87 our_interrupt = 1;
88
89 aac_intr_normal(dev, 0, 2, 0, NULL);
90 } else if (bellbits_shifted & OUTBOUNDDOORBELL_0) {
91 unsigned long sflags;
92 struct list_head *entry;
93 int send_it = 0;
94 extern int aac_sync_mode;
95
96 src_writel(dev, MUnit.ODR_C, bellbits);
97 src_readl(dev, MUnit.ODR_C);
98
99 if (!aac_sync_mode) {
100 src_writel(dev, MUnit.ODR_C, bellbits);
101 src_readl(dev, MUnit.ODR_C);
102 our_interrupt = 1;
103 }
104
105 if (dev->sync_fib) {
106 our_interrupt = 1;
107 if (dev->sync_fib->callback)
108 dev->sync_fib->callback(dev->sync_fib->callback_data,
109 dev->sync_fib);
110 spin_lock_irqsave(&dev->sync_fib->event_lock, sflags);
111 if (dev->sync_fib->flags & FIB_CONTEXT_FLAG_WAIT) {
112 dev->management_fib_count--;
113 up(&dev->sync_fib->event_wait);
114 }
115 spin_unlock_irqrestore(&dev->sync_fib->event_lock, sflags);
116 spin_lock_irqsave(&dev->sync_lock, sflags);
117 if (!list_empty(&dev->sync_fib_list)) {
118 entry = dev->sync_fib_list.next;
119 dev->sync_fib = list_entry(entry, struct fib, fiblink);
120 list_del(entry);
121 send_it = 1;
122 } else {
123 dev->sync_fib = NULL;
124 }
125 spin_unlock_irqrestore(&dev->sync_lock, sflags);
126 if (send_it) {
127 aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB,
128 (u32)dev->sync_fib->hw_fib_pa, 0, 0, 0, 0, 0,
129 NULL, NULL, NULL, NULL, NULL);
130 }
131 }
132 }
133 }
134
135 if (our_interrupt) {
136 return IRQ_HANDLED;
137 }
138 return IRQ_NONE;
139}
140
141
142
143
144
145
146static void aac_src_disable_interrupt(struct aac_dev *dev)
147{
148 src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff);
149}
150
151
152
153
154
155
156static void aac_src_enable_interrupt_message(struct aac_dev *dev)
157{
158 src_writel(dev, MUnit.OIMR, dev->OIMR = 0xfffffff8);
159}
160
161
162
163
164
165
166
167
168
169
170
171
172static int src_sync_cmd(struct aac_dev *dev, u32 command,
173 u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6,
174 u32 *status, u32 * r1, u32 * r2, u32 * r3, u32 * r4)
175{
176 unsigned long start;
177 int ok;
178
179
180
181
182 writel(command, &dev->IndexRegs->Mailbox[0]);
183
184
185
186 writel(p1, &dev->IndexRegs->Mailbox[1]);
187 writel(p2, &dev->IndexRegs->Mailbox[2]);
188 writel(p3, &dev->IndexRegs->Mailbox[3]);
189 writel(p4, &dev->IndexRegs->Mailbox[4]);
190
191
192
193
194 src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
195
196
197
198
199 src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff);
200
201
202
203
204
205 src_readl(dev, MUnit.OIMR);
206
207
208
209
210 src_writel(dev, MUnit.IDR, INBOUNDDOORBELL_0 << SRC_IDR_SHIFT);
211
212 if (!dev->sync_mode || command != SEND_SYNCHRONOUS_FIB) {
213 ok = 0;
214 start = jiffies;
215
216
217
218
219 while (time_before(jiffies, start+300*HZ)) {
220 udelay(5);
221
222
223
224 if ((src_readl(dev, MUnit.ODR_R) >> SRC_ODR_SHIFT) & OUTBOUNDDOORBELL_0) {
225
226
227
228 src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
229 ok = 1;
230 break;
231 }
232
233
234
235 msleep(1);
236 }
237 if (unlikely(ok != 1)) {
238
239
240
241 aac_adapter_enable_int(dev);
242 return -ETIMEDOUT;
243 }
244
245
246
247 if (status)
248 *status = readl(&dev->IndexRegs->Mailbox[0]);
249 if (r1)
250 *r1 = readl(&dev->IndexRegs->Mailbox[1]);
251 if (r2)
252 *r2 = readl(&dev->IndexRegs->Mailbox[2]);
253 if (r3)
254 *r3 = readl(&dev->IndexRegs->Mailbox[3]);
255 if (r4)
256 *r4 = readl(&dev->IndexRegs->Mailbox[4]);
257
258
259
260
261 src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
262 }
263
264
265
266
267 aac_adapter_enable_int(dev);
268 return 0;
269}
270
271
272
273
274
275
276
277
278static void aac_src_interrupt_adapter(struct aac_dev *dev)
279{
280 src_sync_cmd(dev, BREAKPOINT_REQUEST,
281 0, 0, 0, 0, 0, 0,
282 NULL, NULL, NULL, NULL, NULL);
283}
284
285
286
287
288
289
290
291
292
293
294static void aac_src_notify_adapter(struct aac_dev *dev, u32 event)
295{
296 switch (event) {
297
298 case AdapNormCmdQue:
299 src_writel(dev, MUnit.ODR_C,
300 INBOUNDDOORBELL_1 << SRC_ODR_SHIFT);
301 break;
302 case HostNormRespNotFull:
303 src_writel(dev, MUnit.ODR_C,
304 INBOUNDDOORBELL_4 << SRC_ODR_SHIFT);
305 break;
306 case AdapNormRespQue:
307 src_writel(dev, MUnit.ODR_C,
308 INBOUNDDOORBELL_2 << SRC_ODR_SHIFT);
309 break;
310 case HostNormCmdNotFull:
311 src_writel(dev, MUnit.ODR_C,
312 INBOUNDDOORBELL_3 << SRC_ODR_SHIFT);
313 break;
314 case FastIo:
315 src_writel(dev, MUnit.ODR_C,
316 INBOUNDDOORBELL_6 << SRC_ODR_SHIFT);
317 break;
318 case AdapPrintfDone:
319 src_writel(dev, MUnit.ODR_C,
320 INBOUNDDOORBELL_5 << SRC_ODR_SHIFT);
321 break;
322 default:
323 BUG();
324 break;
325 }
326}
327
328
329
330
331
332
333
334
335static void aac_src_start_adapter(struct aac_dev *dev)
336{
337 struct aac_init *init;
338
339
340 dev->host_rrq_idx = 0;
341
342 init = dev->init;
343 init->HostElapsedSeconds = cpu_to_le32(get_seconds());
344
345
346 src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa,
347 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
348}
349
350
351
352
353
354
355
356
357static int aac_src_check_health(struct aac_dev *dev)
358{
359 u32 status = src_readl(dev, MUnit.OMR);
360
361
362
363
364 if (unlikely(status & SELF_TEST_FAILED))
365 return -1;
366
367
368
369
370 if (unlikely(status & KERNEL_PANIC))
371 return (status >> 16) & 0xFF;
372
373
374
375 if (unlikely(!(status & KERNEL_UP_AND_RUNNING)))
376 return -3;
377
378
379
380 return 0;
381}
382
383
384
385
386
387
388
389static int aac_src_deliver_message(struct fib *fib)
390{
391 struct aac_dev *dev = fib->dev;
392 struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
393 unsigned long qflags;
394 u32 fibsize;
395 dma_addr_t address;
396 struct aac_fib_xporthdr *pFibX;
397 u16 hdr_size = le16_to_cpu(fib->hw_fib_va->header.Size);
398
399 spin_lock_irqsave(q->lock, qflags);
400 q->numpending++;
401 spin_unlock_irqrestore(q->lock, qflags);
402
403 if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
404
405 fibsize = (hdr_size + 127) / 128 - 1;
406 if (fibsize > (ALIGN32 - 1))
407 return -EMSGSIZE;
408
409 address = fib->hw_fib_pa;
410 fib->hw_fib_va->header.StructType = FIB_MAGIC2;
411 fib->hw_fib_va->header.SenderFibAddress = (u32)address;
412 fib->hw_fib_va->header.u.TimeStamp = 0;
413 BUG_ON(upper_32_bits(address) != 0L);
414 address |= fibsize;
415 } else {
416
417 fibsize = (sizeof(struct aac_fib_xporthdr) + hdr_size + 127) / 128 - 1;
418 if (fibsize > (ALIGN32 - 1))
419 return -EMSGSIZE;
420
421
422 pFibX = (void *)fib->hw_fib_va - sizeof(struct aac_fib_xporthdr);
423 pFibX->Handle = cpu_to_le32(fib->hw_fib_va->header.Handle);
424 pFibX->HostAddress = cpu_to_le64(fib->hw_fib_pa);
425 pFibX->Size = cpu_to_le32(hdr_size);
426
427
428
429
430
431 address = fib->hw_fib_pa - sizeof(struct aac_fib_xporthdr);
432 if (address & (ALIGN32 - 1))
433 return -EINVAL;
434 address |= fibsize;
435 }
436
437 src_writel(dev, MUnit.IQ_H, upper_32_bits(address) & 0xffffffff);
438 src_writel(dev, MUnit.IQ_L, address & 0xffffffff);
439
440 return 0;
441}
442
443
444
445
446
447
448static int aac_src_ioremap(struct aac_dev *dev, u32 size)
449{
450 if (!size) {
451 iounmap(dev->regs.src.bar1);
452 dev->regs.src.bar1 = NULL;
453 iounmap(dev->regs.src.bar0);
454 dev->base = dev->regs.src.bar0 = NULL;
455 return 0;
456 }
457 dev->regs.src.bar1 = ioremap(pci_resource_start(dev->pdev, 2),
458 AAC_MIN_SRC_BAR1_SIZE);
459 dev->base = NULL;
460 if (dev->regs.src.bar1 == NULL)
461 return -1;
462 dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size);
463 if (dev->base == NULL) {
464 iounmap(dev->regs.src.bar1);
465 dev->regs.src.bar1 = NULL;
466 return -1;
467 }
468 dev->IndexRegs = &((struct src_registers __iomem *)
469 dev->base)->u.tupelo.IndexRegs;
470 return 0;
471}
472
473
474
475
476
477
478static int aac_srcv_ioremap(struct aac_dev *dev, u32 size)
479{
480 if (!size) {
481 iounmap(dev->regs.src.bar0);
482 dev->base = dev->regs.src.bar0 = NULL;
483 return 0;
484 }
485 dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size);
486 if (dev->base == NULL)
487 return -1;
488 dev->IndexRegs = &((struct src_registers __iomem *)
489 dev->base)->u.denali.IndexRegs;
490 return 0;
491}
492
493static int aac_src_restart_adapter(struct aac_dev *dev, int bled)
494{
495 u32 var, reset_mask;
496
497 if (bled >= 0) {
498 if (bled)
499 printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n",
500 dev->name, dev->id, bled);
501 bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS,
502 0, 0, 0, 0, 0, 0, &var, &reset_mask, NULL, NULL, NULL);
503 if (bled || (var != 0x00000001))
504 return -EINVAL;
505 if (dev->supplement_adapter_info.SupportedOptions2 &
506 AAC_OPTION_DOORBELL_RESET) {
507 src_writel(dev, MUnit.IDR, reset_mask);
508 msleep(5000);
509 }
510 }
511
512 if (src_readl(dev, MUnit.OMR) & KERNEL_PANIC)
513 return -ENODEV;
514
515 if (startup_timeout < 300)
516 startup_timeout = 300;
517
518 return 0;
519}
520
521
522
523
524
525
526int aac_src_select_comm(struct aac_dev *dev, int comm)
527{
528 switch (comm) {
529 case AAC_COMM_MESSAGE:
530 dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message;
531 dev->a_ops.adapter_intr = aac_src_intr_message;
532 dev->a_ops.adapter_deliver = aac_src_deliver_message;
533 break;
534 default:
535 return 1;
536 }
537 return 0;
538}
539
540
541
542
543
544
545
546int aac_src_init(struct aac_dev *dev)
547{
548 unsigned long start;
549 unsigned long status;
550 int restart = 0;
551 int instance = dev->id;
552 const char *name = dev->name;
553
554 dev->a_ops.adapter_ioremap = aac_src_ioremap;
555 dev->a_ops.adapter_comm = aac_src_select_comm;
556
557 dev->base_size = AAC_MIN_SRC_BAR0_SIZE;
558 if (aac_adapter_ioremap(dev, dev->base_size)) {
559 printk(KERN_WARNING "%s: unable to map adapter.\n", name);
560 goto error_iounmap;
561 }
562
563
564 dev->a_ops.adapter_sync_cmd = src_sync_cmd;
565 dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
566 if ((aac_reset_devices || reset_devices) &&
567 !aac_src_restart_adapter(dev, 0))
568 ++restart;
569
570
571
572 status = src_readl(dev, MUnit.OMR);
573 if (status & KERNEL_PANIC) {
574 if (aac_src_restart_adapter(dev, aac_src_check_health(dev)))
575 goto error_iounmap;
576 ++restart;
577 }
578
579
580
581 status = src_readl(dev, MUnit.OMR);
582 if (status & SELF_TEST_FAILED) {
583 printk(KERN_ERR "%s%d: adapter self-test failed.\n",
584 dev->name, instance);
585 goto error_iounmap;
586 }
587
588
589
590 if (status & MONITOR_PANIC) {
591 printk(KERN_ERR "%s%d: adapter monitor panic.\n",
592 dev->name, instance);
593 goto error_iounmap;
594 }
595 start = jiffies;
596
597
598
599 while (!((status = src_readl(dev, MUnit.OMR)) &
600 KERNEL_UP_AND_RUNNING)) {
601 if ((restart &&
602 (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) ||
603 time_after(jiffies, start+HZ*startup_timeout)) {
604 printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
605 dev->name, instance, status);
606 goto error_iounmap;
607 }
608 if (!restart &&
609 ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) ||
610 time_after(jiffies, start + HZ *
611 ((startup_timeout > 60)
612 ? (startup_timeout - 60)
613 : (startup_timeout / 2))))) {
614 if (likely(!aac_src_restart_adapter(dev,
615 aac_src_check_health(dev))))
616 start = jiffies;
617 ++restart;
618 }
619 msleep(1);
620 }
621 if (restart && aac_commit)
622 aac_commit = 1;
623
624
625
626 dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter;
627 dev->a_ops.adapter_disable_int = aac_src_disable_interrupt;
628 dev->a_ops.adapter_notify = aac_src_notify_adapter;
629 dev->a_ops.adapter_sync_cmd = src_sync_cmd;
630 dev->a_ops.adapter_check_health = aac_src_check_health;
631 dev->a_ops.adapter_restart = aac_src_restart_adapter;
632
633
634
635
636
637 aac_adapter_comm(dev, AAC_COMM_MESSAGE);
638 aac_adapter_disable_int(dev);
639 src_writel(dev, MUnit.ODR_C, 0xffffffff);
640 aac_adapter_enable_int(dev);
641
642 if (aac_init_adapter(dev) == NULL)
643 goto error_iounmap;
644 if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE1)
645 goto error_iounmap;
646
647 dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
648
649 if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
650 IRQF_SHARED, "aacraid", dev) < 0) {
651
652 if (dev->msi)
653 pci_disable_msi(dev->pdev);
654
655 printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
656 name, instance);
657 goto error_iounmap;
658 }
659 dev->dbg_base = pci_resource_start(dev->pdev, 2);
660 dev->dbg_base_mapped = dev->regs.src.bar1;
661 dev->dbg_size = AAC_MIN_SRC_BAR1_SIZE;
662
663 aac_adapter_enable_int(dev);
664
665 if (!dev->sync_mode) {
666
667
668
669
670 aac_src_start_adapter(dev);
671 }
672 return 0;
673
674error_iounmap:
675
676 return -1;
677}
678
679
680
681
682
683
684
685int aac_srcv_init(struct aac_dev *dev)
686{
687 unsigned long start;
688 unsigned long status;
689 int restart = 0;
690 int instance = dev->id;
691 const char *name = dev->name;
692
693 dev->a_ops.adapter_ioremap = aac_srcv_ioremap;
694 dev->a_ops.adapter_comm = aac_src_select_comm;
695
696 dev->base_size = AAC_MIN_SRCV_BAR0_SIZE;
697 if (aac_adapter_ioremap(dev, dev->base_size)) {
698 printk(KERN_WARNING "%s: unable to map adapter.\n", name);
699 goto error_iounmap;
700 }
701
702
703 dev->a_ops.adapter_sync_cmd = src_sync_cmd;
704 dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
705 if ((aac_reset_devices || reset_devices) &&
706 !aac_src_restart_adapter(dev, 0))
707 ++restart;
708
709
710
711
712 status = src_readl(dev, MUnit.OMR);
713 if (status & FLASH_UPD_PENDING) {
714 start = jiffies;
715 do {
716 status = src_readl(dev, MUnit.OMR);
717 if (time_after(jiffies, start+HZ*FWUPD_TIMEOUT)) {
718 printk(KERN_ERR "%s%d: adapter flash update failed.\n",
719 dev->name, instance);
720 goto error_iounmap;
721 }
722 } while (!(status & FLASH_UPD_SUCCESS) &&
723 !(status & FLASH_UPD_FAILED));
724
725
726
727
728 ssleep(10);
729 }
730
731
732
733 status = src_readl(dev, MUnit.OMR);
734 if (status & KERNEL_PANIC) {
735 if (aac_src_restart_adapter(dev, aac_src_check_health(dev)))
736 goto error_iounmap;
737 ++restart;
738 }
739
740
741
742 status = src_readl(dev, MUnit.OMR);
743 if (status & SELF_TEST_FAILED) {
744 printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance);
745 goto error_iounmap;
746 }
747
748
749
750 if (status & MONITOR_PANIC) {
751 printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance);
752 goto error_iounmap;
753 }
754 start = jiffies;
755
756
757
758 while (!((status = src_readl(dev, MUnit.OMR)) &
759 KERNEL_UP_AND_RUNNING) ||
760 status == 0xffffffff) {
761 if ((restart &&
762 (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) ||
763 time_after(jiffies, start+HZ*startup_timeout)) {
764 printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
765 dev->name, instance, status);
766 goto error_iounmap;
767 }
768 if (!restart &&
769 ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) ||
770 time_after(jiffies, start + HZ *
771 ((startup_timeout > 60)
772 ? (startup_timeout - 60)
773 : (startup_timeout / 2))))) {
774 if (likely(!aac_src_restart_adapter(dev, aac_src_check_health(dev))))
775 start = jiffies;
776 ++restart;
777 }
778 msleep(1);
779 }
780 if (restart && aac_commit)
781 aac_commit = 1;
782
783
784
785 dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter;
786 dev->a_ops.adapter_disable_int = aac_src_disable_interrupt;
787 dev->a_ops.adapter_notify = aac_src_notify_adapter;
788 dev->a_ops.adapter_sync_cmd = src_sync_cmd;
789 dev->a_ops.adapter_check_health = aac_src_check_health;
790 dev->a_ops.adapter_restart = aac_src_restart_adapter;
791
792
793
794
795
796 aac_adapter_comm(dev, AAC_COMM_MESSAGE);
797 aac_adapter_disable_int(dev);
798 src_writel(dev, MUnit.ODR_C, 0xffffffff);
799 aac_adapter_enable_int(dev);
800
801 if (aac_init_adapter(dev) == NULL)
802 goto error_iounmap;
803 if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE2)
804 goto error_iounmap;
805 dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
806 if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
807 IRQF_SHARED, "aacraid", dev) < 0) {
808 if (dev->msi)
809 pci_disable_msi(dev->pdev);
810 printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
811 name, instance);
812 goto error_iounmap;
813 }
814 dev->dbg_base = dev->base_start;
815 dev->dbg_base_mapped = dev->base;
816 dev->dbg_size = dev->base_size;
817
818 aac_adapter_enable_int(dev);
819
820 if (!dev->sync_mode) {
821
822
823
824
825 aac_src_start_adapter(dev);
826 }
827 return 0;
828
829error_iounmap:
830
831 return -1;
832}
833
834