1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46#include <linux/module.h>
47#include <linux/slab.h>
48#include <linux/sysctl.h>
49#include <linux/device.h>
50#include <linux/delay.h>
51#include <linux/reboot.h>
52#include <linux/kdebug.h>
53#include <linux/kthread.h>
54#include "xpc.h"
55
56#ifdef CONFIG_X86_64
57#include <asm/traps.h>
58#endif
59
60
61
62struct device_driver xpc_dbg_name = {
63 .name = "xpc"
64};
65
66struct device xpc_part_dbg_subname = {
67 .init_name = "",
68 .driver = &xpc_dbg_name
69};
70
71struct device xpc_chan_dbg_subname = {
72 .init_name = "",
73 .driver = &xpc_dbg_name
74};
75
76struct device *xpc_part = &xpc_part_dbg_subname;
77struct device *xpc_chan = &xpc_chan_dbg_subname;
78
79static int xpc_kdebug_ignore;
80
81
82
83static int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL;
84static int xpc_hb_min_interval = 1;
85static int xpc_hb_max_interval = 10;
86
87static int xpc_hb_check_interval = XPC_HB_CHECK_DEFAULT_INTERVAL;
88static int xpc_hb_check_min_interval = 10;
89static int xpc_hb_check_max_interval = 120;
90
91int xpc_disengage_timelimit = XPC_DISENGAGE_DEFAULT_TIMELIMIT;
92static int xpc_disengage_min_timelimit;
93static int xpc_disengage_max_timelimit = 120;
94
95static struct ctl_table xpc_sys_xpc_hb_dir[] = {
96 {
97 .procname = "hb_interval",
98 .data = &xpc_hb_interval,
99 .maxlen = sizeof(int),
100 .mode = 0644,
101 .proc_handler = proc_dointvec_minmax,
102 .extra1 = &xpc_hb_min_interval,
103 .extra2 = &xpc_hb_max_interval},
104 {
105 .procname = "hb_check_interval",
106 .data = &xpc_hb_check_interval,
107 .maxlen = sizeof(int),
108 .mode = 0644,
109 .proc_handler = proc_dointvec_minmax,
110 .extra1 = &xpc_hb_check_min_interval,
111 .extra2 = &xpc_hb_check_max_interval},
112 {}
113};
114static struct ctl_table xpc_sys_xpc_dir[] = {
115 {
116 .procname = "hb",
117 .mode = 0555,
118 .child = xpc_sys_xpc_hb_dir},
119 {
120 .procname = "disengage_timelimit",
121 .data = &xpc_disengage_timelimit,
122 .maxlen = sizeof(int),
123 .mode = 0644,
124 .proc_handler = proc_dointvec_minmax,
125 .extra1 = &xpc_disengage_min_timelimit,
126 .extra2 = &xpc_disengage_max_timelimit},
127 {}
128};
129static struct ctl_table xpc_sys_dir[] = {
130 {
131 .procname = "xpc",
132 .mode = 0555,
133 .child = xpc_sys_xpc_dir},
134 {}
135};
136static struct ctl_table_header *xpc_sysctl;
137
138
139int xpc_disengage_timedout;
140
141
142int xpc_activate_IRQ_rcvd;
143DEFINE_SPINLOCK(xpc_activate_IRQ_rcvd_lock);
144
145
146DECLARE_WAIT_QUEUE_HEAD(xpc_activate_IRQ_wq);
147
148static unsigned long xpc_hb_check_timeout;
149static struct timer_list xpc_hb_timer;
150
151
152static DECLARE_COMPLETION(xpc_hb_checker_exited);
153
154
155static DECLARE_COMPLETION(xpc_discovery_exited);
156
157static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *);
158
159static int xpc_system_reboot(struct notifier_block *, unsigned long, void *);
160static struct notifier_block xpc_reboot_notifier = {
161 .notifier_call = xpc_system_reboot,
162};
163
164static int xpc_system_die(struct notifier_block *, unsigned long, void *);
165static struct notifier_block xpc_die_notifier = {
166 .notifier_call = xpc_system_die,
167};
168
169struct xpc_arch_operations xpc_arch_ops;
170
171
172
173
174static void
175xpc_timeout_partition_disengage(struct timer_list *t)
176{
177 struct xpc_partition *part = from_timer(part, t, disengage_timer);
178
179 DBUG_ON(time_is_after_jiffies(part->disengage_timeout));
180
181 (void)xpc_partition_disengaged(part);
182
183 DBUG_ON(part->disengage_timeout != 0);
184 DBUG_ON(xpc_arch_ops.partition_engaged(XPC_PARTID(part)));
185}
186
187
188
189
190
191
192static void
193xpc_hb_beater(struct timer_list *unused)
194{
195 xpc_arch_ops.increment_heartbeat();
196
197 if (time_is_before_eq_jiffies(xpc_hb_check_timeout))
198 wake_up_interruptible(&xpc_activate_IRQ_wq);
199
200 xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ);
201 add_timer(&xpc_hb_timer);
202}
203
204static void
205xpc_start_hb_beater(void)
206{
207 xpc_arch_ops.heartbeat_init();
208 timer_setup(&xpc_hb_timer, xpc_hb_beater, 0);
209 xpc_hb_beater(0);
210}
211
212static void
213xpc_stop_hb_beater(void)
214{
215 del_timer_sync(&xpc_hb_timer);
216 xpc_arch_ops.heartbeat_exit();
217}
218
219
220
221
222
223static void
224xpc_check_remote_hb(void)
225{
226 struct xpc_partition *part;
227 short partid;
228 enum xp_retval ret;
229
230 for (partid = 0; partid < xp_max_npartitions; partid++) {
231
232 if (xpc_exiting)
233 break;
234
235 if (partid == xp_partition_id)
236 continue;
237
238 part = &xpc_partitions[partid];
239
240 if (part->act_state == XPC_P_AS_INACTIVE ||
241 part->act_state == XPC_P_AS_DEACTIVATING) {
242 continue;
243 }
244
245 ret = xpc_arch_ops.get_remote_heartbeat(part);
246 if (ret != xpSuccess)
247 XPC_DEACTIVATE_PARTITION(part, ret);
248 }
249}
250
251
252
253
254
255static int
256xpc_hb_checker(void *ignore)
257{
258 int force_IRQ = 0;
259
260
261
262 set_cpus_allowed_ptr(current, cpumask_of(XPC_HB_CHECK_CPU));
263
264
265 xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
266 xpc_start_hb_beater();
267
268 while (!xpc_exiting) {
269
270 dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have "
271 "been received\n",
272 (int)(xpc_hb_check_timeout - jiffies),
273 xpc_activate_IRQ_rcvd);
274
275
276 if (time_is_before_eq_jiffies(xpc_hb_check_timeout)) {
277 xpc_hb_check_timeout = jiffies +
278 (xpc_hb_check_interval * HZ);
279
280 dev_dbg(xpc_part, "checking remote heartbeats\n");
281 xpc_check_remote_hb();
282
283
284
285
286
287 if (is_shub())
288 force_IRQ = 1;
289 }
290
291
292 if (xpc_activate_IRQ_rcvd > 0 || force_IRQ != 0) {
293 force_IRQ = 0;
294 dev_dbg(xpc_part, "processing activate IRQs "
295 "received\n");
296 xpc_arch_ops.process_activate_IRQ_rcvd();
297 }
298
299
300 (void)wait_event_interruptible(xpc_activate_IRQ_wq,
301 (time_is_before_eq_jiffies(
302 xpc_hb_check_timeout) ||
303 xpc_activate_IRQ_rcvd > 0 ||
304 xpc_exiting));
305 }
306
307 xpc_stop_hb_beater();
308
309 dev_dbg(xpc_part, "heartbeat checker is exiting\n");
310
311
312 complete(&xpc_hb_checker_exited);
313 return 0;
314}
315
316
317
318
319
320
321static int
322xpc_initiate_discovery(void *ignore)
323{
324 xpc_discovery();
325
326 dev_dbg(xpc_part, "discovery thread is exiting\n");
327
328
329 complete(&xpc_discovery_exited);
330 return 0;
331}
332
333
334
335
336
337
338
339
340
341
342
343
344
345static void
346xpc_channel_mgr(struct xpc_partition *part)
347{
348 while (part->act_state != XPC_P_AS_DEACTIVATING ||
349 atomic_read(&part->nchannels_active) > 0 ||
350 !xpc_partition_disengaged(part)) {
351
352 xpc_process_sent_chctl_flags(part);
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367 atomic_dec(&part->channel_mgr_requests);
368 (void)wait_event_interruptible(part->channel_mgr_wq,
369 (atomic_read(&part->channel_mgr_requests) > 0 ||
370 part->chctl.all_flags != 0 ||
371 (part->act_state == XPC_P_AS_DEACTIVATING &&
372 atomic_read(&part->nchannels_active) == 0 &&
373 xpc_partition_disengaged(part))));
374 atomic_set(&part->channel_mgr_requests, 1);
375 }
376}
377
378
379
380
381void *
382xpc_kzalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
383{
384
385 *base = kzalloc(size, flags);
386 if (*base == NULL)
387 return NULL;
388
389 if ((u64)*base == L1_CACHE_ALIGN((u64)*base))
390 return *base;
391
392 kfree(*base);
393
394
395 *base = kzalloc(size + L1_CACHE_BYTES, flags);
396 if (*base == NULL)
397 return NULL;
398
399 return (void *)L1_CACHE_ALIGN((u64)*base);
400}
401
402
403
404
405
406static enum xp_retval
407xpc_setup_ch_structures(struct xpc_partition *part)
408{
409 enum xp_retval ret;
410 int ch_number;
411 struct xpc_channel *ch;
412 short partid = XPC_PARTID(part);
413
414
415
416
417
418 DBUG_ON(part->channels != NULL);
419 part->channels = kcalloc(XPC_MAX_NCHANNELS,
420 sizeof(struct xpc_channel),
421 GFP_KERNEL);
422 if (part->channels == NULL) {
423 dev_err(xpc_chan, "can't get memory for channels\n");
424 return xpNoMemory;
425 }
426
427
428
429 part->remote_openclose_args =
430 xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE,
431 GFP_KERNEL, &part->
432 remote_openclose_args_base);
433 if (part->remote_openclose_args == NULL) {
434 dev_err(xpc_chan, "can't get memory for remote connect args\n");
435 ret = xpNoMemory;
436 goto out_1;
437 }
438
439 part->chctl.all_flags = 0;
440 spin_lock_init(&part->chctl_lock);
441
442 atomic_set(&part->channel_mgr_requests, 1);
443 init_waitqueue_head(&part->channel_mgr_wq);
444
445 part->nchannels = XPC_MAX_NCHANNELS;
446
447 atomic_set(&part->nchannels_active, 0);
448 atomic_set(&part->nchannels_engaged, 0);
449
450 for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
451 ch = &part->channels[ch_number];
452
453 ch->partid = partid;
454 ch->number = ch_number;
455 ch->flags = XPC_C_DISCONNECTED;
456
457 atomic_set(&ch->kthreads_assigned, 0);
458 atomic_set(&ch->kthreads_idle, 0);
459 atomic_set(&ch->kthreads_active, 0);
460
461 atomic_set(&ch->references, 0);
462 atomic_set(&ch->n_to_notify, 0);
463
464 spin_lock_init(&ch->lock);
465 init_completion(&ch->wdisconnect_wait);
466
467 atomic_set(&ch->n_on_msg_allocate_wq, 0);
468 init_waitqueue_head(&ch->msg_allocate_wq);
469 init_waitqueue_head(&ch->idle_wq);
470 }
471
472 ret = xpc_arch_ops.setup_ch_structures(part);
473 if (ret != xpSuccess)
474 goto out_2;
475
476
477
478
479
480 part->setup_state = XPC_P_SS_SETUP;
481
482 return xpSuccess;
483
484
485out_2:
486 kfree(part->remote_openclose_args_base);
487 part->remote_openclose_args = NULL;
488out_1:
489 kfree(part->channels);
490 part->channels = NULL;
491 return ret;
492}
493
494
495
496
497
498static void
499xpc_teardown_ch_structures(struct xpc_partition *part)
500{
501 DBUG_ON(atomic_read(&part->nchannels_engaged) != 0);
502 DBUG_ON(atomic_read(&part->nchannels_active) != 0);
503
504
505
506
507
508
509 DBUG_ON(part->setup_state != XPC_P_SS_SETUP);
510 part->setup_state = XPC_P_SS_WTEARDOWN;
511
512 wait_event(part->teardown_wq, (atomic_read(&part->references) == 0));
513
514
515
516 xpc_arch_ops.teardown_ch_structures(part);
517
518 kfree(part->remote_openclose_args_base);
519 part->remote_openclose_args = NULL;
520 kfree(part->channels);
521 part->channels = NULL;
522
523 part->setup_state = XPC_P_SS_TORNDOWN;
524}
525
526
527
528
529
530
531
532
533
534
535
536
537static int
538xpc_activating(void *__partid)
539{
540 short partid = (u64)__partid;
541 struct xpc_partition *part = &xpc_partitions[partid];
542 unsigned long irq_flags;
543
544 DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
545
546 spin_lock_irqsave(&part->act_lock, irq_flags);
547
548 if (part->act_state == XPC_P_AS_DEACTIVATING) {
549 part->act_state = XPC_P_AS_INACTIVE;
550 spin_unlock_irqrestore(&part->act_lock, irq_flags);
551 part->remote_rp_pa = 0;
552 return 0;
553 }
554
555
556 DBUG_ON(part->act_state != XPC_P_AS_ACTIVATION_REQ);
557 part->act_state = XPC_P_AS_ACTIVATING;
558
559 XPC_SET_REASON(part, 0, 0);
560 spin_unlock_irqrestore(&part->act_lock, irq_flags);
561
562 dev_dbg(xpc_part, "activating partition %d\n", partid);
563
564 xpc_arch_ops.allow_hb(partid);
565
566 if (xpc_setup_ch_structures(part) == xpSuccess) {
567 (void)xpc_part_ref(part);
568
569 if (xpc_arch_ops.make_first_contact(part) == xpSuccess) {
570 xpc_mark_partition_active(part);
571 xpc_channel_mgr(part);
572
573 }
574
575 xpc_part_deref(part);
576 xpc_teardown_ch_structures(part);
577 }
578
579 xpc_arch_ops.disallow_hb(partid);
580 xpc_mark_partition_inactive(part);
581
582 if (part->reason == xpReactivating) {
583
584 xpc_arch_ops.request_partition_reactivation(part);
585 }
586
587 return 0;
588}
589
590void
591xpc_activate_partition(struct xpc_partition *part)
592{
593 short partid = XPC_PARTID(part);
594 unsigned long irq_flags;
595 struct task_struct *kthread;
596
597 spin_lock_irqsave(&part->act_lock, irq_flags);
598
599 DBUG_ON(part->act_state != XPC_P_AS_INACTIVE);
600
601 part->act_state = XPC_P_AS_ACTIVATION_REQ;
602 XPC_SET_REASON(part, xpCloneKThread, __LINE__);
603
604 spin_unlock_irqrestore(&part->act_lock, irq_flags);
605
606 kthread = kthread_run(xpc_activating, (void *)((u64)partid), "xpc%02d",
607 partid);
608 if (IS_ERR(kthread)) {
609 spin_lock_irqsave(&part->act_lock, irq_flags);
610 part->act_state = XPC_P_AS_INACTIVE;
611 XPC_SET_REASON(part, xpCloneKThreadFailed, __LINE__);
612 spin_unlock_irqrestore(&part->act_lock, irq_flags);
613 }
614}
615
616void
617xpc_activate_kthreads(struct xpc_channel *ch, int needed)
618{
619 int idle = atomic_read(&ch->kthreads_idle);
620 int assigned = atomic_read(&ch->kthreads_assigned);
621 int wakeup;
622
623 DBUG_ON(needed <= 0);
624
625 if (idle > 0) {
626 wakeup = (needed > idle) ? idle : needed;
627 needed -= wakeup;
628
629 dev_dbg(xpc_chan, "wakeup %d idle kthreads, partid=%d, "
630 "channel=%d\n", wakeup, ch->partid, ch->number);
631
632
633 wake_up_nr(&ch->idle_wq, wakeup);
634 }
635
636 if (needed <= 0)
637 return;
638
639 if (needed + assigned > ch->kthreads_assigned_limit) {
640 needed = ch->kthreads_assigned_limit - assigned;
641 if (needed <= 0)
642 return;
643 }
644
645 dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n",
646 needed, ch->partid, ch->number);
647
648 xpc_create_kthreads(ch, needed, 0);
649}
650
651
652
653
654static void
655xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
656{
657 int (*n_of_deliverable_payloads) (struct xpc_channel *) =
658 xpc_arch_ops.n_of_deliverable_payloads;
659
660 do {
661
662
663 while (n_of_deliverable_payloads(ch) > 0 &&
664 !(ch->flags & XPC_C_DISCONNECTING)) {
665 xpc_deliver_payload(ch);
666 }
667
668 if (atomic_inc_return(&ch->kthreads_idle) >
669 ch->kthreads_idle_limit) {
670
671 atomic_dec(&ch->kthreads_idle);
672 break;
673 }
674
675 dev_dbg(xpc_chan, "idle kthread calling "
676 "wait_event_interruptible_exclusive()\n");
677
678 (void)wait_event_interruptible_exclusive(ch->idle_wq,
679 (n_of_deliverable_payloads(ch) > 0 ||
680 (ch->flags & XPC_C_DISCONNECTING)));
681
682 atomic_dec(&ch->kthreads_idle);
683
684 } while (!(ch->flags & XPC_C_DISCONNECTING));
685}
686
687static int
688xpc_kthread_start(void *args)
689{
690 short partid = XPC_UNPACK_ARG1(args);
691 u16 ch_number = XPC_UNPACK_ARG2(args);
692 struct xpc_partition *part = &xpc_partitions[partid];
693 struct xpc_channel *ch;
694 int n_needed;
695 unsigned long irq_flags;
696 int (*n_of_deliverable_payloads) (struct xpc_channel *) =
697 xpc_arch_ops.n_of_deliverable_payloads;
698
699 dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n",
700 partid, ch_number);
701
702 ch = &part->channels[ch_number];
703
704 if (!(ch->flags & XPC_C_DISCONNECTING)) {
705
706
707
708 spin_lock_irqsave(&ch->lock, irq_flags);
709 if (!(ch->flags & XPC_C_CONNECTEDCALLOUT)) {
710 ch->flags |= XPC_C_CONNECTEDCALLOUT;
711 spin_unlock_irqrestore(&ch->lock, irq_flags);
712
713 xpc_connected_callout(ch);
714
715 spin_lock_irqsave(&ch->lock, irq_flags);
716 ch->flags |= XPC_C_CONNECTEDCALLOUT_MADE;
717 spin_unlock_irqrestore(&ch->lock, irq_flags);
718
719
720
721
722
723
724
725
726 n_needed = n_of_deliverable_payloads(ch) - 1;
727 if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING))
728 xpc_activate_kthreads(ch, n_needed);
729
730 } else {
731 spin_unlock_irqrestore(&ch->lock, irq_flags);
732 }
733
734 xpc_kthread_waitmsgs(part, ch);
735 }
736
737
738
739 spin_lock_irqsave(&ch->lock, irq_flags);
740 if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
741 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
742 ch->flags |= XPC_C_DISCONNECTINGCALLOUT;
743 spin_unlock_irqrestore(&ch->lock, irq_flags);
744
745 xpc_disconnect_callout(ch, xpDisconnecting);
746
747 spin_lock_irqsave(&ch->lock, irq_flags);
748 ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE;
749 }
750 spin_unlock_irqrestore(&ch->lock, irq_flags);
751
752 if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
753 atomic_dec_return(&part->nchannels_engaged) == 0) {
754 xpc_arch_ops.indicate_partition_disengaged(part);
755 }
756
757 xpc_msgqueue_deref(ch);
758
759 dev_dbg(xpc_chan, "kthread exiting, partid=%d, channel=%d\n",
760 partid, ch_number);
761
762 xpc_part_deref(part);
763 return 0;
764}
765
766
767
768
769
770
771
772
773
774
775
776
777
778void
779xpc_create_kthreads(struct xpc_channel *ch, int needed,
780 int ignore_disconnecting)
781{
782 unsigned long irq_flags;
783 u64 args = XPC_PACK_ARGS(ch->partid, ch->number);
784 struct xpc_partition *part = &xpc_partitions[ch->partid];
785 struct task_struct *kthread;
786 void (*indicate_partition_disengaged) (struct xpc_partition *) =
787 xpc_arch_ops.indicate_partition_disengaged;
788
789 while (needed-- > 0) {
790
791
792
793
794
795
796 if (ignore_disconnecting) {
797 if (!atomic_inc_not_zero(&ch->kthreads_assigned)) {
798
799 BUG_ON(!(ch->flags &
800 XPC_C_DISCONNECTINGCALLOUT_MADE));
801 break;
802 }
803
804 } else if (ch->flags & XPC_C_DISCONNECTING) {
805 break;
806
807 } else if (atomic_inc_return(&ch->kthreads_assigned) == 1 &&
808 atomic_inc_return(&part->nchannels_engaged) == 1) {
809 xpc_arch_ops.indicate_partition_engaged(part);
810 }
811 (void)xpc_part_ref(part);
812 xpc_msgqueue_ref(ch);
813
814 kthread = kthread_run(xpc_kthread_start, (void *)args,
815 "xpc%02dc%d", ch->partid, ch->number);
816 if (IS_ERR(kthread)) {
817
818
819
820
821
822
823
824
825
826
827
828
829 if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
830 atomic_dec_return(&part->nchannels_engaged) == 0) {
831 indicate_partition_disengaged(part);
832 }
833 xpc_msgqueue_deref(ch);
834 xpc_part_deref(part);
835
836 if (atomic_read(&ch->kthreads_assigned) <
837 ch->kthreads_idle_limit) {
838
839
840
841
842
843 spin_lock_irqsave(&ch->lock, irq_flags);
844 XPC_DISCONNECT_CHANNEL(ch, xpLackOfResources,
845 &irq_flags);
846 spin_unlock_irqrestore(&ch->lock, irq_flags);
847 }
848 break;
849 }
850 }
851}
852
853void
854xpc_disconnect_wait(int ch_number)
855{
856 unsigned long irq_flags;
857 short partid;
858 struct xpc_partition *part;
859 struct xpc_channel *ch;
860 int wakeup_channel_mgr;
861
862
863 for (partid = 0; partid < xp_max_npartitions; partid++) {
864 part = &xpc_partitions[partid];
865
866 if (!xpc_part_ref(part))
867 continue;
868
869 ch = &part->channels[ch_number];
870
871 if (!(ch->flags & XPC_C_WDISCONNECT)) {
872 xpc_part_deref(part);
873 continue;
874 }
875
876 wait_for_completion(&ch->wdisconnect_wait);
877
878 spin_lock_irqsave(&ch->lock, irq_flags);
879 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED));
880 wakeup_channel_mgr = 0;
881
882 if (ch->delayed_chctl_flags) {
883 if (part->act_state != XPC_P_AS_DEACTIVATING) {
884 spin_lock(&part->chctl_lock);
885 part->chctl.flags[ch->number] |=
886 ch->delayed_chctl_flags;
887 spin_unlock(&part->chctl_lock);
888 wakeup_channel_mgr = 1;
889 }
890 ch->delayed_chctl_flags = 0;
891 }
892
893 ch->flags &= ~XPC_C_WDISCONNECT;
894 spin_unlock_irqrestore(&ch->lock, irq_flags);
895
896 if (wakeup_channel_mgr)
897 xpc_wakeup_channel_mgr(part);
898
899 xpc_part_deref(part);
900 }
901}
902
903static int
904xpc_setup_partitions(void)
905{
906 short partid;
907 struct xpc_partition *part;
908
909 xpc_partitions = kcalloc(xp_max_npartitions,
910 sizeof(struct xpc_partition),
911 GFP_KERNEL);
912 if (xpc_partitions == NULL) {
913 dev_err(xpc_part, "can't get memory for partition structure\n");
914 return -ENOMEM;
915 }
916
917
918
919
920
921
922
923
924
925 for (partid = 0; partid < xp_max_npartitions; partid++) {
926 part = &xpc_partitions[partid];
927
928 DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part));
929
930 part->activate_IRQ_rcvd = 0;
931 spin_lock_init(&part->act_lock);
932 part->act_state = XPC_P_AS_INACTIVE;
933 XPC_SET_REASON(part, 0, 0);
934
935 timer_setup(&part->disengage_timer,
936 xpc_timeout_partition_disengage, 0);
937
938 part->setup_state = XPC_P_SS_UNSET;
939 init_waitqueue_head(&part->teardown_wq);
940 atomic_set(&part->references, 0);
941 }
942
943 return xpc_arch_ops.setup_partitions();
944}
945
946static void
947xpc_teardown_partitions(void)
948{
949 xpc_arch_ops.teardown_partitions();
950 kfree(xpc_partitions);
951}
952
953static void
954xpc_do_exit(enum xp_retval reason)
955{
956 short partid;
957 int active_part_count, printed_waiting_msg = 0;
958 struct xpc_partition *part;
959 unsigned long printmsg_time, disengage_timeout = 0;
960
961
962 DBUG_ON(xpc_exiting == 1);
963
964
965
966
967
968
969 xpc_exiting = 1;
970 wake_up_interruptible(&xpc_activate_IRQ_wq);
971
972
973 wait_for_completion(&xpc_discovery_exited);
974
975
976 wait_for_completion(&xpc_hb_checker_exited);
977
978
979 (void)msleep_interruptible(300);
980
981
982
983 printmsg_time = jiffies + (XPC_DEACTIVATE_PRINTMSG_INTERVAL * HZ);
984 xpc_disengage_timedout = 0;
985
986 do {
987 active_part_count = 0;
988
989 for (partid = 0; partid < xp_max_npartitions; partid++) {
990 part = &xpc_partitions[partid];
991
992 if (xpc_partition_disengaged(part) &&
993 part->act_state == XPC_P_AS_INACTIVE) {
994 continue;
995 }
996
997 active_part_count++;
998
999 XPC_DEACTIVATE_PARTITION(part, reason);
1000
1001 if (part->disengage_timeout > disengage_timeout)
1002 disengage_timeout = part->disengage_timeout;
1003 }
1004
1005 if (xpc_arch_ops.any_partition_engaged()) {
1006 if (time_is_before_jiffies(printmsg_time)) {
1007 dev_info(xpc_part, "waiting for remote "
1008 "partitions to deactivate, timeout in "
1009 "%ld seconds\n", (disengage_timeout -
1010 jiffies) / HZ);
1011 printmsg_time = jiffies +
1012 (XPC_DEACTIVATE_PRINTMSG_INTERVAL * HZ);
1013 printed_waiting_msg = 1;
1014 }
1015
1016 } else if (active_part_count > 0) {
1017 if (printed_waiting_msg) {
1018 dev_info(xpc_part, "waiting for local partition"
1019 " to deactivate\n");
1020 printed_waiting_msg = 0;
1021 }
1022
1023 } else {
1024 if (!xpc_disengage_timedout) {
1025 dev_info(xpc_part, "all partitions have "
1026 "deactivated\n");
1027 }
1028 break;
1029 }
1030
1031
1032 (void)msleep_interruptible(300);
1033
1034 } while (1);
1035
1036 DBUG_ON(xpc_arch_ops.any_partition_engaged());
1037
1038 xpc_teardown_rsvd_page();
1039
1040 if (reason == xpUnloading) {
1041 (void)unregister_die_notifier(&xpc_die_notifier);
1042 (void)unregister_reboot_notifier(&xpc_reboot_notifier);
1043 }
1044
1045
1046 xpc_clear_interface();
1047
1048 if (xpc_sysctl)
1049 unregister_sysctl_table(xpc_sysctl);
1050
1051 xpc_teardown_partitions();
1052
1053 if (is_shub())
1054 xpc_exit_sn2();
1055 else if (is_uv())
1056 xpc_exit_uv();
1057}
1058
1059
1060
1061
1062static int
1063xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
1064{
1065 enum xp_retval reason;
1066
1067 switch (event) {
1068 case SYS_RESTART:
1069 reason = xpSystemReboot;
1070 break;
1071 case SYS_HALT:
1072 reason = xpSystemHalt;
1073 break;
1074 case SYS_POWER_OFF:
1075 reason = xpSystemPoweroff;
1076 break;
1077 default:
1078 reason = xpSystemGoingDown;
1079 }
1080
1081 xpc_do_exit(reason);
1082 return NOTIFY_DONE;
1083}
1084
1085
1086static unsigned int xpc_die_disconnecting;
1087
1088
1089
1090
1091
1092static void
1093xpc_die_deactivate(void)
1094{
1095 struct xpc_partition *part;
1096 short partid;
1097 int any_engaged;
1098 long keep_waiting;
1099 long wait_to_print;
1100
1101 if (cmpxchg(&xpc_die_disconnecting, 0, 1))
1102 return;
1103
1104
1105 xpc_exiting = 1;
1106
1107 xpc_arch_ops.disallow_all_hbs();
1108
1109 for (partid = 0; partid < xp_max_npartitions; partid++) {
1110 part = &xpc_partitions[partid];
1111
1112 if (xpc_arch_ops.partition_engaged(partid) ||
1113 part->act_state != XPC_P_AS_INACTIVE) {
1114 xpc_arch_ops.request_partition_deactivation(part);
1115 xpc_arch_ops.indicate_partition_disengaged(part);
1116 }
1117 }
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128 keep_waiting = xpc_disengage_timelimit * 1000 * 5;
1129 wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL * 1000 * 5;
1130
1131 while (1) {
1132 any_engaged = xpc_arch_ops.any_partition_engaged();
1133 if (!any_engaged) {
1134 dev_info(xpc_part, "all partitions have deactivated\n");
1135 break;
1136 }
1137
1138 if (!keep_waiting--) {
1139 for (partid = 0; partid < xp_max_npartitions;
1140 partid++) {
1141 if (xpc_arch_ops.partition_engaged(partid)) {
1142 dev_info(xpc_part, "deactivate from "
1143 "remote partition %d timed "
1144 "out\n", partid);
1145 }
1146 }
1147 break;
1148 }
1149
1150 if (!wait_to_print--) {
1151 dev_info(xpc_part, "waiting for remote partitions to "
1152 "deactivate, timeout in %ld seconds\n",
1153 keep_waiting / (1000 * 5));
1154 wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL *
1155 1000 * 5;
1156 }
1157
1158 udelay(200);
1159 }
1160}
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170static int
1171xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
1172{
1173#ifdef CONFIG_IA64
1174 switch (event) {
1175 case DIE_MACHINE_RESTART:
1176 case DIE_MACHINE_HALT:
1177 xpc_die_deactivate();
1178 break;
1179
1180 case DIE_KDEBUG_ENTER:
1181
1182 if (!xpc_kdebug_ignore)
1183 break;
1184
1185
1186 case DIE_MCA_MONARCH_ENTER:
1187 case DIE_INIT_MONARCH_ENTER:
1188 xpc_arch_ops.offline_heartbeat();
1189 break;
1190
1191 case DIE_KDEBUG_LEAVE:
1192
1193 if (!xpc_kdebug_ignore)
1194 break;
1195
1196
1197 case DIE_MCA_MONARCH_LEAVE:
1198 case DIE_INIT_MONARCH_LEAVE:
1199 xpc_arch_ops.online_heartbeat();
1200 break;
1201 }
1202#else
1203 struct die_args *die_args = _die_args;
1204
1205 switch (event) {
1206 case DIE_TRAP:
1207 if (die_args->trapnr == X86_TRAP_DF)
1208 xpc_die_deactivate();
1209
1210 if (((die_args->trapnr == X86_TRAP_MF) ||
1211 (die_args->trapnr == X86_TRAP_XF)) &&
1212 !user_mode(die_args->regs))
1213 xpc_die_deactivate();
1214
1215 break;
1216 case DIE_INT3:
1217 case DIE_DEBUG:
1218 break;
1219 case DIE_OOPS:
1220 case DIE_GPF:
1221 default:
1222 xpc_die_deactivate();
1223 }
1224#endif
1225
1226 return NOTIFY_DONE;
1227}
1228
1229int __init
1230xpc_init(void)
1231{
1232 int ret;
1233 struct task_struct *kthread;
1234
1235 dev_set_name(xpc_part, "part");
1236 dev_set_name(xpc_chan, "chan");
1237
1238 if (is_shub()) {
1239
1240
1241
1242
1243
1244
1245 if (xp_max_npartitions != 64) {
1246 dev_err(xpc_part, "max #of partitions not set to 64\n");
1247 ret = -EINVAL;
1248 } else {
1249 ret = xpc_init_sn2();
1250 }
1251
1252 } else if (is_uv()) {
1253 ret = xpc_init_uv();
1254
1255 } else {
1256 ret = -ENODEV;
1257 }
1258
1259 if (ret != 0)
1260 return ret;
1261
1262 ret = xpc_setup_partitions();
1263 if (ret != 0) {
1264 dev_err(xpc_part, "can't get memory for partition structure\n");
1265 goto out_1;
1266 }
1267
1268 xpc_sysctl = register_sysctl_table(xpc_sys_dir);
1269
1270
1271
1272
1273
1274
1275 ret = xpc_setup_rsvd_page();
1276 if (ret != 0) {
1277 dev_err(xpc_part, "can't setup our reserved page\n");
1278 goto out_2;
1279 }
1280
1281
1282 ret = register_reboot_notifier(&xpc_reboot_notifier);
1283 if (ret != 0)
1284 dev_warn(xpc_part, "can't register reboot notifier\n");
1285
1286
1287 ret = register_die_notifier(&xpc_die_notifier);
1288 if (ret != 0)
1289 dev_warn(xpc_part, "can't register die notifier\n");
1290
1291
1292
1293
1294
1295 kthread = kthread_run(xpc_hb_checker, NULL, XPC_HB_CHECK_THREAD_NAME);
1296 if (IS_ERR(kthread)) {
1297 dev_err(xpc_part, "failed while forking hb check thread\n");
1298 ret = -EBUSY;
1299 goto out_3;
1300 }
1301
1302
1303
1304
1305
1306
1307 kthread = kthread_run(xpc_initiate_discovery, NULL,
1308 XPC_DISCOVERY_THREAD_NAME);
1309 if (IS_ERR(kthread)) {
1310 dev_err(xpc_part, "failed while forking discovery thread\n");
1311
1312
1313 complete(&xpc_discovery_exited);
1314
1315 xpc_do_exit(xpUnloading);
1316 return -EBUSY;
1317 }
1318
1319
1320 xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect,
1321 xpc_initiate_send, xpc_initiate_send_notify,
1322 xpc_initiate_received, xpc_initiate_partid_to_nasids);
1323
1324 return 0;
1325
1326
1327out_3:
1328 xpc_teardown_rsvd_page();
1329
1330 (void)unregister_die_notifier(&xpc_die_notifier);
1331 (void)unregister_reboot_notifier(&xpc_reboot_notifier);
1332out_2:
1333 if (xpc_sysctl)
1334 unregister_sysctl_table(xpc_sysctl);
1335
1336 xpc_teardown_partitions();
1337out_1:
1338 if (is_shub())
1339 xpc_exit_sn2();
1340 else if (is_uv())
1341 xpc_exit_uv();
1342 return ret;
1343}
1344
1345module_init(xpc_init);
1346
1347void __exit
1348xpc_exit(void)
1349{
1350 xpc_do_exit(xpUnloading);
1351}
1352
1353module_exit(xpc_exit);
1354
1355MODULE_AUTHOR("Silicon Graphics, Inc.");
1356MODULE_DESCRIPTION("Cross Partition Communication (XPC) support");
1357MODULE_LICENSE("GPL");
1358
1359module_param(xpc_hb_interval, int, 0);
1360MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between "
1361 "heartbeat increments.");
1362
1363module_param(xpc_hb_check_interval, int, 0);
1364MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between "
1365 "heartbeat checks.");
1366
1367module_param(xpc_disengage_timelimit, int, 0);
1368MODULE_PARM_DESC(xpc_disengage_timelimit, "Number of seconds to wait "
1369 "for disengage to complete.");
1370
1371module_param(xpc_kdebug_ignore, int, 0);
1372MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by "
1373 "other partitions when dropping into kdebug.");
1374