1
2
3
4
5
6
7
8
9#include <linux/seq_file.h>
10#include <linux/proc_fs.h>
11#include <linux/debugfs.h>
12#include <linux/kernel.h>
13#include <linux/slab.h>
14#include <linux/delay.h>
15
16#include <asm/mmu_context.h>
17#include <asm/uv/uv.h>
18#include <asm/uv/uv_mmrs.h>
19#include <asm/uv/uv_hub.h>
20#include <asm/uv/uv_bau.h>
21#include <asm/apic.h>
22#include <asm/idle.h>
23#include <asm/tsc.h>
24#include <asm/irq_vectors.h>
25#include <asm/timer.h>
26
27
28static int timeout_base_ns[] = {
29 20,
30 160,
31 1280,
32 10240,
33 81920,
34 655360,
35 5242880,
36 167772160
37};
38
39static int timeout_us;
40static int nobau;
41static int nobau_perm;
42static cycles_t congested_cycles;
43
44
45static int max_concurr = MAX_BAU_CONCURRENT;
46static int max_concurr_const = MAX_BAU_CONCURRENT;
47static int plugged_delay = PLUGGED_DELAY;
48static int plugsb4reset = PLUGSB4RESET;
49static int giveup_limit = GIVEUP_LIMIT;
50static int timeoutsb4reset = TIMEOUTSB4RESET;
51static int ipi_reset_limit = IPI_RESET_LIMIT;
52static int complete_threshold = COMPLETE_THRESHOLD;
53static int congested_respns_us = CONGESTED_RESPONSE_US;
54static int congested_reps = CONGESTED_REPS;
55static int disabled_period = DISABLED_PERIOD;
56
57static struct tunables tunables[] = {
58 {&max_concurr, MAX_BAU_CONCURRENT},
59 {&plugged_delay, PLUGGED_DELAY},
60 {&plugsb4reset, PLUGSB4RESET},
61 {&timeoutsb4reset, TIMEOUTSB4RESET},
62 {&ipi_reset_limit, IPI_RESET_LIMIT},
63 {&complete_threshold, COMPLETE_THRESHOLD},
64 {&congested_respns_us, CONGESTED_RESPONSE_US},
65 {&congested_reps, CONGESTED_REPS},
66 {&disabled_period, DISABLED_PERIOD},
67 {&giveup_limit, GIVEUP_LIMIT}
68};
69
70static struct dentry *tunables_dir;
71static struct dentry *tunables_file;
72
73
74static char *stat_description[] = {
75 "sent: number of shootdown messages sent",
76 "stime: time spent sending messages",
77 "numuvhubs: number of hubs targeted with shootdown",
78 "numuvhubs16: number times 16 or more hubs targeted",
79 "numuvhubs8: number times 8 or more hubs targeted",
80 "numuvhubs4: number times 4 or more hubs targeted",
81 "numuvhubs2: number times 2 or more hubs targeted",
82 "numuvhubs1: number times 1 hub targeted",
83 "numcpus: number of cpus targeted with shootdown",
84 "dto: number of destination timeouts",
85 "retries: destination timeout retries sent",
86 "rok: : destination timeouts successfully retried",
87 "resetp: ipi-style resource resets for plugs",
88 "resett: ipi-style resource resets for timeouts",
89 "giveup: fall-backs to ipi-style shootdowns",
90 "sto: number of source timeouts",
91 "bz: number of stay-busy's",
92 "throt: number times spun in throttle",
93 "swack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE",
94 "recv: shootdown messages received",
95 "rtime: time spent processing messages",
96 "all: shootdown all-tlb messages",
97 "one: shootdown one-tlb messages",
98 "mult: interrupts that found multiple messages",
99 "none: interrupts that found no messages",
100 "retry: number of retry messages processed",
101 "canc: number messages canceled by retries",
102 "nocan: number retries that found nothing to cancel",
103 "reset: number of ipi-style reset requests processed",
104 "rcan: number messages canceled by reset requests",
105 "disable: number times use of the BAU was disabled",
106 "enable: number times use of the BAU was re-enabled"
107};
108
109static int __init
110setup_nobau(char *arg)
111{
112 nobau = 1;
113 return 0;
114}
115early_param("nobau", setup_nobau);
116
117
118static int uv_base_pnode __read_mostly;
119
120static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
121static DEFINE_PER_CPU(struct bau_control, bau_control);
122static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask);
123
124static void
125set_bau_on(void)
126{
127 int cpu;
128 struct bau_control *bcp;
129
130 if (nobau_perm) {
131 pr_info("BAU not initialized; cannot be turned on\n");
132 return;
133 }
134 nobau = 0;
135 for_each_present_cpu(cpu) {
136 bcp = &per_cpu(bau_control, cpu);
137 bcp->nobau = 0;
138 }
139 pr_info("BAU turned on\n");
140 return;
141}
142
143static void
144set_bau_off(void)
145{
146 int cpu;
147 struct bau_control *bcp;
148
149 nobau = 1;
150 for_each_present_cpu(cpu) {
151 bcp = &per_cpu(bau_control, cpu);
152 bcp->nobau = 1;
153 }
154 pr_info("BAU turned off\n");
155 return;
156}
157
158
159
160
161
162static int __init uvhub_to_first_node(int uvhub)
163{
164 int node, b;
165
166 for_each_online_node(node) {
167 b = uv_node_to_blade_id(node);
168 if (uvhub == b)
169 return node;
170 }
171 return -1;
172}
173
174
175
176
177static int __init uvhub_to_first_apicid(int uvhub)
178{
179 int cpu;
180
181 for_each_present_cpu(cpu)
182 if (uvhub == uv_cpu_to_blade_id(cpu))
183 return per_cpu(x86_cpu_to_apicid, cpu);
184 return -1;
185}
186
187
188
189
190
191
192
193
194
195static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp,
196 int do_acknowledge)
197{
198 unsigned long dw;
199 struct bau_pq_entry *msg;
200
201 msg = mdp->msg;
202 if (!msg->canceled && do_acknowledge) {
203 dw = (msg->swack_vec << UV_SW_ACK_NPENDING) | msg->swack_vec;
204 write_mmr_sw_ack(dw);
205 }
206 msg->replied_to = 1;
207 msg->swack_vec = 0;
208}
209
210
211
212
213static void bau_process_retry_msg(struct msg_desc *mdp,
214 struct bau_control *bcp)
215{
216 int i;
217 int cancel_count = 0;
218 unsigned long msg_res;
219 unsigned long mmr = 0;
220 struct bau_pq_entry *msg = mdp->msg;
221 struct bau_pq_entry *msg2;
222 struct ptc_stats *stat = bcp->statp;
223
224 stat->d_retries++;
225
226
227
228 for (msg2 = msg+1, i = 0; i < DEST_Q_SIZE; msg2++, i++) {
229 if (msg2 > mdp->queue_last)
230 msg2 = mdp->queue_first;
231 if (msg2 == msg)
232 break;
233
234
235 if ((msg2->replied_to == 0) && (msg2->canceled == 0) &&
236 (msg2->swack_vec) && ((msg2->swack_vec &
237 msg->swack_vec) == 0) &&
238 (msg2->sending_cpu == msg->sending_cpu) &&
239 (msg2->msg_type != MSG_NOOP)) {
240 mmr = read_mmr_sw_ack();
241 msg_res = msg2->swack_vec;
242
243
244
245
246
247
248 if (mmr & (msg_res << UV_SW_ACK_NPENDING)) {
249 unsigned long mr;
250
251
252
253
254 msg2->canceled = 1;
255 stat->d_canceled++;
256 cancel_count++;
257 mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res;
258 write_mmr_sw_ack(mr);
259 }
260 }
261 }
262 if (!cancel_count)
263 stat->d_nocanceled++;
264}
265
266
267
268
269
270static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp,
271 int do_acknowledge)
272{
273 short socket_ack_count = 0;
274 short *sp;
275 struct atomic_short *asp;
276 struct ptc_stats *stat = bcp->statp;
277 struct bau_pq_entry *msg = mdp->msg;
278 struct bau_control *smaster = bcp->socket_master;
279
280
281
282
283 if (msg->address == TLB_FLUSH_ALL) {
284 local_flush_tlb();
285 stat->d_alltlb++;
286 } else {
287 __flush_tlb_one(msg->address);
288 stat->d_onetlb++;
289 }
290 stat->d_requestee++;
291
292
293
294
295
296
297
298 if (msg->msg_type == MSG_RETRY && bcp == bcp->uvhub_master)
299 bau_process_retry_msg(mdp, bcp);
300
301
302
303
304
305
306
307 sp = &smaster->socket_acknowledge_count[mdp->msg_slot];
308 asp = (struct atomic_short *)sp;
309 socket_ack_count = atom_asr(1, asp);
310 if (socket_ack_count == bcp->cpus_in_socket) {
311 int msg_ack_count;
312
313
314
315
316 *sp = 0;
317 asp = (struct atomic_short *)&msg->acknowledge_count;
318 msg_ack_count = atom_asr(socket_ack_count, asp);
319
320 if (msg_ack_count == bcp->cpus_in_uvhub) {
321
322
323
324
325 reply_to_message(mdp, bcp, do_acknowledge);
326 }
327 }
328
329 return;
330}
331
332
333
334
335static int pnode_to_first_cpu(int pnode, struct bau_control *smaster)
336{
337 int cpu;
338 struct hub_and_pnode *hpp;
339
340 for_each_present_cpu(cpu) {
341 hpp = &smaster->thp[cpu];
342 if (pnode == hpp->pnode)
343 return cpu;
344 }
345 return -1;
346}
347
348
349
350
351
352
353
354
355
356
357
358static void do_reset(void *ptr)
359{
360 int i;
361 struct bau_control *bcp = &per_cpu(bau_control, smp_processor_id());
362 struct reset_args *rap = (struct reset_args *)ptr;
363 struct bau_pq_entry *msg;
364 struct ptc_stats *stat = bcp->statp;
365
366 stat->d_resets++;
367
368
369
370
371
372
373 for (msg = bcp->queue_first, i = 0; i < DEST_Q_SIZE; msg++, i++) {
374 unsigned long msg_res;
375
376
377 if ((msg->replied_to == 0) &&
378 (msg->canceled == 0) &&
379 (msg->sending_cpu == rap->sender) &&
380 (msg->swack_vec) &&
381 (msg->msg_type != MSG_NOOP)) {
382 unsigned long mmr;
383 unsigned long mr;
384
385
386
387 msg->canceled = 1;
388
389
390
391 mmr = read_mmr_sw_ack();
392 msg_res = msg->swack_vec;
393 mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res;
394 if (mmr & msg_res) {
395 stat->d_rcanceled++;
396 write_mmr_sw_ack(mr);
397 }
398 }
399 }
400 return;
401}
402
403
404
405
406
407static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp)
408{
409 int pnode;
410 int apnode;
411 int maskbits;
412 int sender = bcp->cpu;
413 cpumask_t *mask = bcp->uvhub_master->cpumask;
414 struct bau_control *smaster = bcp->socket_master;
415 struct reset_args reset_args;
416
417 reset_args.sender = sender;
418 cpus_clear(*mask);
419
420 maskbits = sizeof(struct pnmask) * BITSPERBYTE;
421
422 for (pnode = 0; pnode < maskbits; pnode++) {
423 int cpu;
424 if (!bau_uvhub_isset(pnode, distribution))
425 continue;
426 apnode = pnode + bcp->partition_base_pnode;
427 cpu = pnode_to_first_cpu(apnode, smaster);
428 cpu_set(cpu, *mask);
429 }
430
431
432 smp_call_function_many(mask, do_reset, (void *)&reset_args, 1);
433 return;
434}
435
436static inline unsigned long cycles_2_us(unsigned long long cyc)
437{
438 unsigned long long ns;
439 unsigned long us;
440 int cpu = smp_processor_id();
441
442 ns = (cyc * per_cpu(cyc2ns, cpu)) >> CYC2NS_SCALE_FACTOR;
443 us = ns / 1000;
444 return us;
445}
446
447
448
449
450
451
452static inline void quiesce_local_uvhub(struct bau_control *hmaster)
453{
454 atom_asr(1, (struct atomic_short *)&hmaster->uvhub_quiesce);
455}
456
457
458
459
460static inline void end_uvhub_quiesce(struct bau_control *hmaster)
461{
462 atom_asr(-1, (struct atomic_short *)&hmaster->uvhub_quiesce);
463}
464
465static unsigned long uv1_read_status(unsigned long mmr_offset, int right_shift)
466{
467 unsigned long descriptor_status;
468
469 descriptor_status = uv_read_local_mmr(mmr_offset);
470 descriptor_status >>= right_shift;
471 descriptor_status &= UV_ACT_STATUS_MASK;
472 return descriptor_status;
473}
474
475
476
477
478
479static int uv1_wait_completion(struct bau_desc *bau_desc,
480 unsigned long mmr_offset, int right_shift,
481 struct bau_control *bcp, long try)
482{
483 unsigned long descriptor_status;
484 cycles_t ttm;
485 struct ptc_stats *stat = bcp->statp;
486
487 descriptor_status = uv1_read_status(mmr_offset, right_shift);
488
489 while ((descriptor_status != DS_IDLE)) {
490
491
492
493
494
495
496 if (descriptor_status == DS_SOURCE_TIMEOUT) {
497 stat->s_stimeout++;
498 return FLUSH_GIVEUP;
499 } else if (descriptor_status == DS_DESTINATION_TIMEOUT) {
500 stat->s_dtimeout++;
501 ttm = get_cycles();
502
503
504
505
506
507
508
509 if (cycles_2_us(ttm - bcp->send_message) < timeout_us) {
510 bcp->conseccompletes = 0;
511 return FLUSH_RETRY_PLUGGED;
512 }
513
514 bcp->conseccompletes = 0;
515 return FLUSH_RETRY_TIMEOUT;
516 } else {
517
518
519
520 cpu_relax();
521 }
522 descriptor_status = uv1_read_status(mmr_offset, right_shift);
523 }
524 bcp->conseccompletes++;
525 return FLUSH_COMPLETE;
526}
527
528
529
530
531
532static unsigned long uv2_read_status(unsigned long offset, int rshft, int desc)
533{
534 unsigned long descriptor_status;
535
536 descriptor_status =
537 ((read_lmmr(offset) >> rshft) & UV_ACT_STATUS_MASK) << 1;
538 return descriptor_status;
539}
540
541
542
543
544
545
546
547
548
549int normal_busy(struct bau_control *bcp)
550{
551 int cpu = bcp->uvhub_cpu;
552 int mmr_offset;
553 int right_shift;
554
555 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
556 right_shift = cpu * UV_ACT_STATUS_SIZE;
557 return (((((read_lmmr(mmr_offset) >> right_shift) &
558 UV_ACT_STATUS_MASK)) << 1) == UV2H_DESC_BUSY);
559}
560
561
562
563
564
565
566int handle_uv2_busy(struct bau_control *bcp)
567{
568 struct ptc_stats *stat = bcp->statp;
569
570 stat->s_uv2_wars++;
571 bcp->busy = 1;
572 return FLUSH_GIVEUP;
573}
574
575static int uv2_wait_completion(struct bau_desc *bau_desc,
576 unsigned long mmr_offset, int right_shift,
577 struct bau_control *bcp, long try)
578{
579 unsigned long descriptor_stat;
580 cycles_t ttm;
581 int desc = bcp->uvhub_cpu;
582 long busy_reps = 0;
583 struct ptc_stats *stat = bcp->statp;
584
585 descriptor_stat = uv2_read_status(mmr_offset, right_shift, desc);
586
587
588 while (descriptor_stat != UV2H_DESC_IDLE) {
589 if ((descriptor_stat == UV2H_DESC_SOURCE_TIMEOUT)) {
590
591
592
593
594
595
596
597 stat->s_stimeout++;
598 return FLUSH_GIVEUP;
599 } else if (descriptor_stat == UV2H_DESC_DEST_TIMEOUT) {
600 ttm = get_cycles();
601
602
603
604
605
606
607
608
609
610
611 if (cycles_2_us(ttm - bcp->send_message) < timeout_us) {
612 bcp->conseccompletes = 0;
613 stat->s_plugged++;
614
615 return FLUSH_GIVEUP;
616 }
617 stat->s_dtimeout++;
618 bcp->conseccompletes = 0;
619
620 return FLUSH_GIVEUP;
621 } else {
622 busy_reps++;
623 if (busy_reps > 1000000) {
624
625 busy_reps = 0;
626 ttm = get_cycles();
627 if ((ttm - bcp->send_message) >
628 bcp->timeout_interval)
629 return handle_uv2_busy(bcp);
630 }
631
632
633
634 cpu_relax();
635 }
636 descriptor_stat = uv2_read_status(mmr_offset, right_shift,
637 desc);
638 }
639 bcp->conseccompletes++;
640 return FLUSH_COMPLETE;
641}
642
643
644
645
646
647
648static int wait_completion(struct bau_desc *bau_desc,
649 struct bau_control *bcp, long try)
650{
651 int right_shift;
652 unsigned long mmr_offset;
653 int desc = bcp->uvhub_cpu;
654
655 if (desc < UV_CPUS_PER_AS) {
656 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
657 right_shift = desc * UV_ACT_STATUS_SIZE;
658 } else {
659 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
660 right_shift = ((desc - UV_CPUS_PER_AS) * UV_ACT_STATUS_SIZE);
661 }
662
663 if (bcp->uvhub_version == 1)
664 return uv1_wait_completion(bau_desc, mmr_offset, right_shift,
665 bcp, try);
666 else
667 return uv2_wait_completion(bau_desc, mmr_offset, right_shift,
668 bcp, try);
669}
670
671static inline cycles_t sec_2_cycles(unsigned long sec)
672{
673 unsigned long ns;
674 cycles_t cyc;
675
676 ns = sec * 1000000000;
677 cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
678 return cyc;
679}
680
681
682
683
684
685
686static void destination_plugged(struct bau_desc *bau_desc,
687 struct bau_control *bcp,
688 struct bau_control *hmaster, struct ptc_stats *stat)
689{
690 udelay(bcp->plugged_delay);
691 bcp->plugged_tries++;
692
693 if (bcp->plugged_tries >= bcp->plugsb4reset) {
694 bcp->plugged_tries = 0;
695
696 quiesce_local_uvhub(hmaster);
697
698 spin_lock(&hmaster->queue_lock);
699 reset_with_ipi(&bau_desc->distribution, bcp);
700 spin_unlock(&hmaster->queue_lock);
701
702 end_uvhub_quiesce(hmaster);
703
704 bcp->ipi_attempts++;
705 stat->s_resets_plug++;
706 }
707}
708
709static void destination_timeout(struct bau_desc *bau_desc,
710 struct bau_control *bcp, struct bau_control *hmaster,
711 struct ptc_stats *stat)
712{
713 hmaster->max_concurr = 1;
714 bcp->timeout_tries++;
715 if (bcp->timeout_tries >= bcp->timeoutsb4reset) {
716 bcp->timeout_tries = 0;
717
718 quiesce_local_uvhub(hmaster);
719
720 spin_lock(&hmaster->queue_lock);
721 reset_with_ipi(&bau_desc->distribution, bcp);
722 spin_unlock(&hmaster->queue_lock);
723
724 end_uvhub_quiesce(hmaster);
725
726 bcp->ipi_attempts++;
727 stat->s_resets_timeout++;
728 }
729}
730
731
732
733
734
735static void disable_for_period(struct bau_control *bcp, struct ptc_stats *stat)
736{
737 int tcpu;
738 struct bau_control *tbcp;
739 struct bau_control *hmaster;
740 cycles_t tm1;
741
742 hmaster = bcp->uvhub_master;
743 spin_lock(&hmaster->disable_lock);
744 if (!bcp->baudisabled) {
745 stat->s_bau_disabled++;
746 tm1 = get_cycles();
747 for_each_present_cpu(tcpu) {
748 tbcp = &per_cpu(bau_control, tcpu);
749 if (tbcp->uvhub_master == hmaster) {
750 tbcp->baudisabled = 1;
751 tbcp->set_bau_on_time =
752 tm1 + bcp->disabled_period;
753 }
754 }
755 }
756 spin_unlock(&hmaster->disable_lock);
757}
758
759static void count_max_concurr(int stat, struct bau_control *bcp,
760 struct bau_control *hmaster)
761{
762 bcp->plugged_tries = 0;
763 bcp->timeout_tries = 0;
764 if (stat != FLUSH_COMPLETE)
765 return;
766 if (bcp->conseccompletes <= bcp->complete_threshold)
767 return;
768 if (hmaster->max_concurr >= hmaster->max_concurr_const)
769 return;
770 hmaster->max_concurr++;
771}
772
773static void record_send_stats(cycles_t time1, cycles_t time2,
774 struct bau_control *bcp, struct ptc_stats *stat,
775 int completion_status, int try)
776{
777 cycles_t elapsed;
778
779 if (time2 > time1) {
780 elapsed = time2 - time1;
781 stat->s_time += elapsed;
782
783 if ((completion_status == FLUSH_COMPLETE) && (try == 1)) {
784 bcp->period_requests++;
785 bcp->period_time += elapsed;
786 if ((elapsed > congested_cycles) &&
787 (bcp->period_requests > bcp->cong_reps) &&
788 ((bcp->period_time / bcp->period_requests) >
789 congested_cycles)) {
790 stat->s_congested++;
791 disable_for_period(bcp, stat);
792 }
793 }
794 } else
795 stat->s_requestor--;
796
797 if (completion_status == FLUSH_COMPLETE && try > 1)
798 stat->s_retriesok++;
799 else if (completion_status == FLUSH_GIVEUP) {
800 stat->s_giveup++;
801 if (get_cycles() > bcp->period_end)
802 bcp->period_giveups = 0;
803 bcp->period_giveups++;
804 if (bcp->period_giveups == 1)
805 bcp->period_end = get_cycles() + bcp->disabled_period;
806 if (bcp->period_giveups > bcp->giveup_limit) {
807 disable_for_period(bcp, stat);
808 stat->s_giveuplimit++;
809 }
810 }
811}
812
813
814
815
816
817static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat)
818{
819 spinlock_t *lock = &hmaster->uvhub_lock;
820 atomic_t *v;
821
822 v = &hmaster->active_descriptor_count;
823 if (!atomic_inc_unless_ge(lock, v, hmaster->max_concurr)) {
824 stat->s_throttles++;
825 do {
826 cpu_relax();
827 } while (!atomic_inc_unless_ge(lock, v, hmaster->max_concurr));
828 }
829}
830
831
832
833
834static void handle_cmplt(int completion_status, struct bau_desc *bau_desc,
835 struct bau_control *bcp, struct bau_control *hmaster,
836 struct ptc_stats *stat)
837{
838 if (completion_status == FLUSH_RETRY_PLUGGED)
839 destination_plugged(bau_desc, bcp, hmaster, stat);
840 else if (completion_status == FLUSH_RETRY_TIMEOUT)
841 destination_timeout(bau_desc, bcp, hmaster, stat);
842}
843
844
845
846
847
848
849
850
851
852
853
854int uv_flush_send_and_wait(struct cpumask *flush_mask, struct bau_control *bcp,
855 struct bau_desc *bau_desc)
856{
857 int seq_number = 0;
858 int completion_stat = 0;
859 int uv1 = 0;
860 long try = 0;
861 unsigned long index;
862 cycles_t time1;
863 cycles_t time2;
864 struct ptc_stats *stat = bcp->statp;
865 struct bau_control *hmaster = bcp->uvhub_master;
866 struct uv1_bau_msg_header *uv1_hdr = NULL;
867 struct uv2_bau_msg_header *uv2_hdr = NULL;
868
869 if (bcp->uvhub_version == 1) {
870 uv1 = 1;
871 uv1_throttle(hmaster, stat);
872 }
873
874 while (hmaster->uvhub_quiesce)
875 cpu_relax();
876
877 time1 = get_cycles();
878 if (uv1)
879 uv1_hdr = &bau_desc->header.uv1_hdr;
880 else
881 uv2_hdr = &bau_desc->header.uv2_hdr;
882
883 do {
884 if (try == 0) {
885 if (uv1)
886 uv1_hdr->msg_type = MSG_REGULAR;
887 else
888 uv2_hdr->msg_type = MSG_REGULAR;
889 seq_number = bcp->message_number++;
890 } else {
891 if (uv1)
892 uv1_hdr->msg_type = MSG_RETRY;
893 else
894 uv2_hdr->msg_type = MSG_RETRY;
895 stat->s_retry_messages++;
896 }
897
898 if (uv1)
899 uv1_hdr->sequence = seq_number;
900 else
901 uv2_hdr->sequence = seq_number;
902 index = (1UL << AS_PUSH_SHIFT) | bcp->uvhub_cpu;
903 bcp->send_message = get_cycles();
904
905 write_mmr_activation(index);
906
907 try++;
908 completion_stat = wait_completion(bau_desc, bcp, try);
909
910 handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat);
911
912 if (bcp->ipi_attempts >= bcp->ipi_reset_limit) {
913 bcp->ipi_attempts = 0;
914 stat->s_overipilimit++;
915 completion_stat = FLUSH_GIVEUP;
916 break;
917 }
918 cpu_relax();
919 } while ((completion_stat == FLUSH_RETRY_PLUGGED) ||
920 (completion_stat == FLUSH_RETRY_TIMEOUT));
921
922 time2 = get_cycles();
923
924 count_max_concurr(completion_stat, bcp, hmaster);
925
926 while (hmaster->uvhub_quiesce)
927 cpu_relax();
928
929 atomic_dec(&hmaster->active_descriptor_count);
930
931 record_send_stats(time1, time2, bcp, stat, completion_stat, try);
932
933 if (completion_stat == FLUSH_GIVEUP)
934
935 return 1;
936 return 0;
937}
938
939
940
941
942
943
944static int check_enable(struct bau_control *bcp, struct ptc_stats *stat)
945{
946 int tcpu;
947 struct bau_control *tbcp;
948 struct bau_control *hmaster;
949
950 hmaster = bcp->uvhub_master;
951 spin_lock(&hmaster->disable_lock);
952 if (bcp->baudisabled && (get_cycles() >= bcp->set_bau_on_time)) {
953 stat->s_bau_reenabled++;
954 for_each_present_cpu(tcpu) {
955 tbcp = &per_cpu(bau_control, tcpu);
956 if (tbcp->uvhub_master == hmaster) {
957 tbcp->baudisabled = 0;
958 tbcp->period_requests = 0;
959 tbcp->period_time = 0;
960 tbcp->period_giveups = 0;
961 }
962 }
963 spin_unlock(&hmaster->disable_lock);
964 return 0;
965 }
966 spin_unlock(&hmaster->disable_lock);
967 return -1;
968}
969
970static void record_send_statistics(struct ptc_stats *stat, int locals, int hubs,
971 int remotes, struct bau_desc *bau_desc)
972{
973 stat->s_requestor++;
974 stat->s_ntargcpu += remotes + locals;
975 stat->s_ntargremotes += remotes;
976 stat->s_ntarglocals += locals;
977
978
979 hubs = bau_uvhub_weight(&bau_desc->distribution);
980 if (locals) {
981 stat->s_ntarglocaluvhub++;
982 stat->s_ntargremoteuvhub += (hubs - 1);
983 } else
984 stat->s_ntargremoteuvhub += hubs;
985
986 stat->s_ntarguvhub += hubs;
987
988 if (hubs >= 16)
989 stat->s_ntarguvhub16++;
990 else if (hubs >= 8)
991 stat->s_ntarguvhub8++;
992 else if (hubs >= 4)
993 stat->s_ntarguvhub4++;
994 else if (hubs >= 2)
995 stat->s_ntarguvhub2++;
996 else
997 stat->s_ntarguvhub1++;
998}
999
1000
1001
1002
1003
1004static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp,
1005 struct bau_desc *bau_desc, int *localsp, int *remotesp)
1006{
1007 int cpu;
1008 int pnode;
1009 int cnt = 0;
1010 struct hub_and_pnode *hpp;
1011
1012 for_each_cpu(cpu, flush_mask) {
1013
1014
1015
1016
1017
1018
1019 hpp = &bcp->socket_master->thp[cpu];
1020 pnode = hpp->pnode - bcp->partition_base_pnode;
1021 bau_uvhub_set(pnode, &bau_desc->distribution);
1022 cnt++;
1023 if (hpp->uvhub == bcp->uvhub)
1024 (*localsp)++;
1025 else
1026 (*remotesp)++;
1027 }
1028 if (!cnt)
1029 return 1;
1030 return 0;
1031}
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
1059 struct mm_struct *mm, unsigned long start,
1060 unsigned long end, unsigned int cpu)
1061{
1062 int locals = 0;
1063 int remotes = 0;
1064 int hubs = 0;
1065 struct bau_desc *bau_desc;
1066 struct cpumask *flush_mask;
1067 struct ptc_stats *stat;
1068 struct bau_control *bcp;
1069 unsigned long descriptor_status;
1070 unsigned long status;
1071
1072 bcp = &per_cpu(bau_control, cpu);
1073 stat = bcp->statp;
1074 stat->s_enters++;
1075
1076 if (bcp->nobau)
1077 return cpumask;
1078
1079 if (bcp->busy) {
1080 descriptor_status =
1081 read_lmmr(UVH_LB_BAU_SB_ACTIVATION_STATUS_0);
1082 status = ((descriptor_status >> (bcp->uvhub_cpu *
1083 UV_ACT_STATUS_SIZE)) & UV_ACT_STATUS_MASK) << 1;
1084 if (status == UV2H_DESC_BUSY)
1085 return cpumask;
1086 bcp->busy = 0;
1087 }
1088
1089
1090 if (bcp->baudisabled) {
1091 if (check_enable(bcp, stat)) {
1092 stat->s_ipifordisabled++;
1093 return cpumask;
1094 }
1095 }
1096
1097
1098
1099
1100
1101
1102 flush_mask = (struct cpumask *)per_cpu(uv_flush_tlb_mask, cpu);
1103
1104 cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
1105
1106 if (cpu_isset(cpu, *cpumask))
1107 stat->s_ntargself++;
1108
1109 bau_desc = bcp->descriptor_base;
1110 bau_desc += (ITEMS_PER_DESC * bcp->uvhub_cpu);
1111 bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
1112 if (set_distrib_bits(flush_mask, bcp, bau_desc, &locals, &remotes))
1113 return NULL;
1114
1115 record_send_statistics(stat, locals, hubs, remotes, bau_desc);
1116
1117 if (!end || (end - start) <= PAGE_SIZE)
1118 bau_desc->payload.address = start;
1119 else
1120 bau_desc->payload.address = TLB_FLUSH_ALL;
1121 bau_desc->payload.sending_cpu = cpu;
1122
1123
1124
1125
1126 if (!uv_flush_send_and_wait(flush_mask, bcp, bau_desc))
1127 return NULL;
1128 else
1129 return cpumask;
1130}
1131
1132
1133
1134
1135
1136struct bau_pq_entry *find_another_by_swack(struct bau_pq_entry *msg,
1137 struct bau_control *bcp)
1138{
1139 struct bau_pq_entry *msg_next = msg + 1;
1140 unsigned char swack_vec = msg->swack_vec;
1141
1142 if (msg_next > bcp->queue_last)
1143 msg_next = bcp->queue_first;
1144 while (msg_next != msg) {
1145 if ((msg_next->canceled == 0) && (msg_next->replied_to == 0) &&
1146 (msg_next->swack_vec == swack_vec))
1147 return msg_next;
1148 msg_next++;
1149 if (msg_next > bcp->queue_last)
1150 msg_next = bcp->queue_first;
1151 }
1152 return NULL;
1153}
1154
1155
1156
1157
1158
1159
1160void process_uv2_message(struct msg_desc *mdp, struct bau_control *bcp)
1161{
1162 unsigned long mmr_image;
1163 unsigned char swack_vec;
1164 struct bau_pq_entry *msg = mdp->msg;
1165 struct bau_pq_entry *other_msg;
1166
1167 mmr_image = read_mmr_sw_ack();
1168 swack_vec = msg->swack_vec;
1169
1170 if ((swack_vec & mmr_image) == 0) {
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180 other_msg = find_another_by_swack(msg, bcp);
1181 if (other_msg) {
1182
1183
1184
1185
1186 bau_process_message(mdp, bcp, 0);
1187
1188
1189
1190
1191
1192 return;
1193 }
1194 }
1195
1196
1197
1198
1199
1200 bau_process_message(mdp, bcp, 1);
1201
1202 return;
1203}
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219void uv_bau_message_interrupt(struct pt_regs *regs)
1220{
1221 int count = 0;
1222 cycles_t time_start;
1223 struct bau_pq_entry *msg;
1224 struct bau_control *bcp;
1225 struct ptc_stats *stat;
1226 struct msg_desc msgdesc;
1227
1228 ack_APIC_irq();
1229 time_start = get_cycles();
1230
1231 bcp = &per_cpu(bau_control, smp_processor_id());
1232 stat = bcp->statp;
1233
1234 msgdesc.queue_first = bcp->queue_first;
1235 msgdesc.queue_last = bcp->queue_last;
1236
1237 msg = bcp->bau_msg_head;
1238 while (msg->swack_vec) {
1239 count++;
1240
1241 msgdesc.msg_slot = msg - msgdesc.queue_first;
1242 msgdesc.msg = msg;
1243 if (bcp->uvhub_version == 2)
1244 process_uv2_message(&msgdesc, bcp);
1245 else
1246 bau_process_message(&msgdesc, bcp, 1);
1247
1248 msg++;
1249 if (msg > msgdesc.queue_last)
1250 msg = msgdesc.queue_first;
1251 bcp->bau_msg_head = msg;
1252 }
1253 stat->d_time += (get_cycles() - time_start);
1254 if (!count)
1255 stat->d_nomsg++;
1256 else if (count > 1)
1257 stat->d_multmsg++;
1258}
1259
1260
1261
1262
1263
1264
1265
1266static void __init enable_timeouts(void)
1267{
1268 int uvhub;
1269 int nuvhubs;
1270 int pnode;
1271 unsigned long mmr_image;
1272
1273 nuvhubs = uv_num_possible_blades();
1274
1275 for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
1276 if (!uv_blade_nr_possible_cpus(uvhub))
1277 continue;
1278
1279 pnode = uv_blade_to_pnode(uvhub);
1280 mmr_image = read_mmr_misc_control(pnode);
1281
1282
1283
1284
1285
1286
1287 mmr_image &= ~(1L << SOFTACK_MSHIFT);
1288 write_mmr_misc_control(pnode, mmr_image);
1289
1290
1291
1292 mmr_image &= ~((unsigned long)0xf << SOFTACK_PSHIFT);
1293 mmr_image |= (SOFTACK_TIMEOUT_PERIOD << SOFTACK_PSHIFT);
1294 write_mmr_misc_control(pnode, mmr_image);
1295
1296
1297
1298
1299
1300
1301 mmr_image |= (1L << SOFTACK_MSHIFT);
1302 if (is_uv2_hub()) {
1303
1304 mmr_image &= ~(1L << UV2_EXT_SHFT);
1305 }
1306 write_mmr_misc_control(pnode, mmr_image);
1307 }
1308}
1309
1310static void *ptc_seq_start(struct seq_file *file, loff_t *offset)
1311{
1312 if (*offset < num_possible_cpus())
1313 return offset;
1314 return NULL;
1315}
1316
1317static void *ptc_seq_next(struct seq_file *file, void *data, loff_t *offset)
1318{
1319 (*offset)++;
1320 if (*offset < num_possible_cpus())
1321 return offset;
1322 return NULL;
1323}
1324
1325static void ptc_seq_stop(struct seq_file *file, void *data)
1326{
1327}
1328
1329static inline unsigned long long usec_2_cycles(unsigned long microsec)
1330{
1331 unsigned long ns;
1332 unsigned long long cyc;
1333
1334 ns = microsec * 1000;
1335 cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
1336 return cyc;
1337}
1338
1339
1340
1341
1342
1343
1344static int ptc_seq_show(struct seq_file *file, void *data)
1345{
1346 struct ptc_stats *stat;
1347 struct bau_control *bcp;
1348 int cpu;
1349
1350 cpu = *(loff_t *)data;
1351 if (!cpu) {
1352 seq_printf(file,
1353 "# cpu bauoff sent stime self locals remotes ncpus localhub ");
1354 seq_printf(file,
1355 "remotehub numuvhubs numuvhubs16 numuvhubs8 ");
1356 seq_printf(file,
1357 "numuvhubs4 numuvhubs2 numuvhubs1 dto snacks retries ");
1358 seq_printf(file,
1359 "rok resetp resett giveup sto bz throt disable ");
1360 seq_printf(file,
1361 "enable wars warshw warwaits enters ipidis plugged ");
1362 seq_printf(file,
1363 "ipiover glim cong swack recv rtime all one mult ");
1364 seq_printf(file,
1365 "none retry canc nocan reset rcan\n");
1366 }
1367 if (cpu < num_possible_cpus() && cpu_online(cpu)) {
1368 bcp = &per_cpu(bau_control, cpu);
1369 stat = bcp->statp;
1370
1371 seq_printf(file,
1372 "cpu %d %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
1373 cpu, bcp->nobau, stat->s_requestor,
1374 cycles_2_us(stat->s_time),
1375 stat->s_ntargself, stat->s_ntarglocals,
1376 stat->s_ntargremotes, stat->s_ntargcpu,
1377 stat->s_ntarglocaluvhub, stat->s_ntargremoteuvhub,
1378 stat->s_ntarguvhub, stat->s_ntarguvhub16);
1379 seq_printf(file, "%ld %ld %ld %ld %ld %ld ",
1380 stat->s_ntarguvhub8, stat->s_ntarguvhub4,
1381 stat->s_ntarguvhub2, stat->s_ntarguvhub1,
1382 stat->s_dtimeout, stat->s_strongnacks);
1383 seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld ",
1384 stat->s_retry_messages, stat->s_retriesok,
1385 stat->s_resets_plug, stat->s_resets_timeout,
1386 stat->s_giveup, stat->s_stimeout,
1387 stat->s_busy, stat->s_throttles);
1388 seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
1389 stat->s_bau_disabled, stat->s_bau_reenabled,
1390 stat->s_uv2_wars, stat->s_uv2_wars_hw,
1391 stat->s_uv2_war_waits, stat->s_enters,
1392 stat->s_ipifordisabled, stat->s_plugged,
1393 stat->s_overipilimit, stat->s_giveuplimit,
1394 stat->s_congested);
1395
1396
1397 seq_printf(file,
1398 "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n",
1399 read_gmmr_sw_ack(uv_cpu_to_pnode(cpu)),
1400 stat->d_requestee, cycles_2_us(stat->d_time),
1401 stat->d_alltlb, stat->d_onetlb, stat->d_multmsg,
1402 stat->d_nomsg, stat->d_retries, stat->d_canceled,
1403 stat->d_nocanceled, stat->d_resets,
1404 stat->d_rcanceled);
1405 }
1406 return 0;
1407}
1408
1409
1410
1411
1412static ssize_t tunables_read(struct file *file, char __user *userbuf,
1413 size_t count, loff_t *ppos)
1414{
1415 char *buf;
1416 int ret;
1417
1418 buf = kasprintf(GFP_KERNEL, "%s %s %s\n%d %d %d %d %d %d %d %d %d %d\n",
1419 "max_concur plugged_delay plugsb4reset timeoutsb4reset",
1420 "ipi_reset_limit complete_threshold congested_response_us",
1421 "congested_reps disabled_period giveup_limit",
1422 max_concurr, plugged_delay, plugsb4reset,
1423 timeoutsb4reset, ipi_reset_limit, complete_threshold,
1424 congested_respns_us, congested_reps, disabled_period,
1425 giveup_limit);
1426
1427 if (!buf)
1428 return -ENOMEM;
1429
1430 ret = simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf));
1431 kfree(buf);
1432 return ret;
1433}
1434
1435
1436
1437
1438
1439
1440static ssize_t ptc_proc_write(struct file *file, const char __user *user,
1441 size_t count, loff_t *data)
1442{
1443 int cpu;
1444 int i;
1445 int elements;
1446 long input_arg;
1447 char optstr[64];
1448 struct ptc_stats *stat;
1449
1450 if (count == 0 || count > sizeof(optstr))
1451 return -EINVAL;
1452 if (copy_from_user(optstr, user, count))
1453 return -EFAULT;
1454 optstr[count - 1] = '\0';
1455
1456 if (!strcmp(optstr, "on")) {
1457 set_bau_on();
1458 return count;
1459 } else if (!strcmp(optstr, "off")) {
1460 set_bau_off();
1461 return count;
1462 }
1463
1464 if (strict_strtol(optstr, 10, &input_arg) < 0) {
1465 printk(KERN_DEBUG "%s is invalid\n", optstr);
1466 return -EINVAL;
1467 }
1468
1469 if (input_arg == 0) {
1470 elements = ARRAY_SIZE(stat_description);
1471 printk(KERN_DEBUG "# cpu: cpu number\n");
1472 printk(KERN_DEBUG "Sender statistics:\n");
1473 for (i = 0; i < elements; i++)
1474 printk(KERN_DEBUG "%s\n", stat_description[i]);
1475 } else if (input_arg == -1) {
1476 for_each_present_cpu(cpu) {
1477 stat = &per_cpu(ptcstats, cpu);
1478 memset(stat, 0, sizeof(struct ptc_stats));
1479 }
1480 }
1481
1482 return count;
1483}
1484
1485static int local_atoi(const char *name)
1486{
1487 int val = 0;
1488
1489 for (;; name++) {
1490 switch (*name) {
1491 case '0' ... '9':
1492 val = 10*val+(*name-'0');
1493 break;
1494 default:
1495 return val;
1496 }
1497 }
1498}
1499
1500
1501
1502
1503
1504static int parse_tunables_write(struct bau_control *bcp, char *instr,
1505 int count)
1506{
1507 char *p;
1508 char *q;
1509 int cnt = 0;
1510 int val;
1511 int e = ARRAY_SIZE(tunables);
1512
1513 p = instr + strspn(instr, WHITESPACE);
1514 q = p;
1515 for (; *p; p = q + strspn(q, WHITESPACE)) {
1516 q = p + strcspn(p, WHITESPACE);
1517 cnt++;
1518 if (q == p)
1519 break;
1520 }
1521 if (cnt != e) {
1522 printk(KERN_INFO "bau tunable error: should be %d values\n", e);
1523 return -EINVAL;
1524 }
1525
1526 p = instr + strspn(instr, WHITESPACE);
1527 q = p;
1528 for (cnt = 0; *p; p = q + strspn(q, WHITESPACE), cnt++) {
1529 q = p + strcspn(p, WHITESPACE);
1530 val = local_atoi(p);
1531 switch (cnt) {
1532 case 0:
1533 if (val == 0) {
1534 max_concurr = MAX_BAU_CONCURRENT;
1535 max_concurr_const = MAX_BAU_CONCURRENT;
1536 continue;
1537 }
1538 if (val < 1 || val > bcp->cpus_in_uvhub) {
1539 printk(KERN_DEBUG
1540 "Error: BAU max concurrent %d is invalid\n",
1541 val);
1542 return -EINVAL;
1543 }
1544 max_concurr = val;
1545 max_concurr_const = val;
1546 continue;
1547 default:
1548 if (val == 0)
1549 *tunables[cnt].tunp = tunables[cnt].deflt;
1550 else
1551 *tunables[cnt].tunp = val;
1552 continue;
1553 }
1554 if (q == p)
1555 break;
1556 }
1557 return 0;
1558}
1559
1560
1561
1562
1563static ssize_t tunables_write(struct file *file, const char __user *user,
1564 size_t count, loff_t *data)
1565{
1566 int cpu;
1567 int ret;
1568 char instr[100];
1569 struct bau_control *bcp;
1570
1571 if (count == 0 || count > sizeof(instr)-1)
1572 return -EINVAL;
1573 if (copy_from_user(instr, user, count))
1574 return -EFAULT;
1575
1576 instr[count] = '\0';
1577
1578 cpu = get_cpu();
1579 bcp = &per_cpu(bau_control, cpu);
1580 ret = parse_tunables_write(bcp, instr, count);
1581 put_cpu();
1582 if (ret)
1583 return ret;
1584
1585 for_each_present_cpu(cpu) {
1586 bcp = &per_cpu(bau_control, cpu);
1587 bcp->max_concurr = max_concurr;
1588 bcp->max_concurr_const = max_concurr;
1589 bcp->plugged_delay = plugged_delay;
1590 bcp->plugsb4reset = plugsb4reset;
1591 bcp->timeoutsb4reset = timeoutsb4reset;
1592 bcp->ipi_reset_limit = ipi_reset_limit;
1593 bcp->complete_threshold = complete_threshold;
1594 bcp->cong_response_us = congested_respns_us;
1595 bcp->cong_reps = congested_reps;
1596 bcp->disabled_period = sec_2_cycles(disabled_period);
1597 bcp->giveup_limit = giveup_limit;
1598 }
1599 return count;
1600}
1601
1602static const struct seq_operations uv_ptc_seq_ops = {
1603 .start = ptc_seq_start,
1604 .next = ptc_seq_next,
1605 .stop = ptc_seq_stop,
1606 .show = ptc_seq_show
1607};
1608
1609static int ptc_proc_open(struct inode *inode, struct file *file)
1610{
1611 return seq_open(file, &uv_ptc_seq_ops);
1612}
1613
1614static int tunables_open(struct inode *inode, struct file *file)
1615{
1616 return 0;
1617}
1618
1619static const struct file_operations proc_uv_ptc_operations = {
1620 .open = ptc_proc_open,
1621 .read = seq_read,
1622 .write = ptc_proc_write,
1623 .llseek = seq_lseek,
1624 .release = seq_release,
1625};
1626
1627static const struct file_operations tunables_fops = {
1628 .open = tunables_open,
1629 .read = tunables_read,
1630 .write = tunables_write,
1631 .llseek = default_llseek,
1632};
1633
1634static int __init uv_ptc_init(void)
1635{
1636 struct proc_dir_entry *proc_uv_ptc;
1637
1638 if (!is_uv_system())
1639 return 0;
1640
1641 proc_uv_ptc = proc_create(UV_PTC_BASENAME, 0444, NULL,
1642 &proc_uv_ptc_operations);
1643 if (!proc_uv_ptc) {
1644 printk(KERN_ERR "unable to create %s proc entry\n",
1645 UV_PTC_BASENAME);
1646 return -EINVAL;
1647 }
1648
1649 tunables_dir = debugfs_create_dir(UV_BAU_TUNABLES_DIR, NULL);
1650 if (!tunables_dir) {
1651 printk(KERN_ERR "unable to create debugfs directory %s\n",
1652 UV_BAU_TUNABLES_DIR);
1653 return -EINVAL;
1654 }
1655 tunables_file = debugfs_create_file(UV_BAU_TUNABLES_FILE, 0600,
1656 tunables_dir, NULL, &tunables_fops);
1657 if (!tunables_file) {
1658 printk(KERN_ERR "unable to create debugfs file %s\n",
1659 UV_BAU_TUNABLES_FILE);
1660 return -EINVAL;
1661 }
1662 return 0;
1663}
1664
1665
1666
1667
1668static void activation_descriptor_init(int node, int pnode, int base_pnode)
1669{
1670 int i;
1671 int cpu;
1672 int uv1 = 0;
1673 unsigned long gpa;
1674 unsigned long m;
1675 unsigned long n;
1676 size_t dsize;
1677 struct bau_desc *bau_desc;
1678 struct bau_desc *bd2;
1679 struct uv1_bau_msg_header *uv1_hdr;
1680 struct uv2_bau_msg_header *uv2_hdr;
1681 struct bau_control *bcp;
1682
1683
1684
1685
1686
1687 dsize = sizeof(struct bau_desc) * ADP_SZ * ITEMS_PER_DESC;
1688 bau_desc = kmalloc_node(dsize, GFP_KERNEL, node);
1689 BUG_ON(!bau_desc);
1690
1691 gpa = uv_gpa(bau_desc);
1692 n = uv_gpa_to_gnode(gpa);
1693 m = uv_gpa_to_offset(gpa);
1694 if (is_uv1_hub())
1695 uv1 = 1;
1696
1697
1698 write_mmr_descriptor_base(pnode, (n << UV_DESC_PSHIFT | m));
1699
1700
1701
1702
1703
1704 for (i = 0, bd2 = bau_desc; i < (ADP_SZ * ITEMS_PER_DESC); i++, bd2++) {
1705 memset(bd2, 0, sizeof(struct bau_desc));
1706 if (uv1) {
1707 uv1_hdr = &bd2->header.uv1_hdr;
1708 uv1_hdr->swack_flag = 1;
1709
1710
1711
1712
1713
1714
1715
1716 uv1_hdr->base_dest_nasid =
1717 UV_PNODE_TO_NASID(base_pnode);
1718 uv1_hdr->dest_subnodeid = UV_LB_SUBNODEID;
1719 uv1_hdr->command = UV_NET_ENDPOINT_INTD;
1720 uv1_hdr->int_both = 1;
1721
1722
1723
1724
1725 } else {
1726
1727
1728
1729
1730 uv2_hdr = &bd2->header.uv2_hdr;
1731 uv2_hdr->swack_flag = 1;
1732 uv2_hdr->base_dest_nasid =
1733 UV_PNODE_TO_NASID(base_pnode);
1734 uv2_hdr->dest_subnodeid = UV_LB_SUBNODEID;
1735 uv2_hdr->command = UV_NET_ENDPOINT_INTD;
1736 }
1737 }
1738 for_each_present_cpu(cpu) {
1739 if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu)))
1740 continue;
1741 bcp = &per_cpu(bau_control, cpu);
1742 bcp->descriptor_base = bau_desc;
1743 }
1744}
1745
1746
1747
1748
1749
1750
1751
1752static void pq_init(int node, int pnode)
1753{
1754 int cpu;
1755 size_t plsize;
1756 char *cp;
1757 void *vp;
1758 unsigned long pn;
1759 unsigned long first;
1760 unsigned long pn_first;
1761 unsigned long last;
1762 struct bau_pq_entry *pqp;
1763 struct bau_control *bcp;
1764
1765 plsize = (DEST_Q_SIZE + 1) * sizeof(struct bau_pq_entry);
1766 vp = kmalloc_node(plsize, GFP_KERNEL, node);
1767 pqp = (struct bau_pq_entry *)vp;
1768 BUG_ON(!pqp);
1769
1770 cp = (char *)pqp + 31;
1771 pqp = (struct bau_pq_entry *)(((unsigned long)cp >> 5) << 5);
1772
1773 for_each_present_cpu(cpu) {
1774 if (pnode != uv_cpu_to_pnode(cpu))
1775 continue;
1776
1777 bcp = &per_cpu(bau_control, cpu);
1778 bcp->queue_first = pqp;
1779 bcp->bau_msg_head = pqp;
1780 bcp->queue_last = pqp + (DEST_Q_SIZE - 1);
1781 }
1782
1783
1784
1785 pn = uv_gpa_to_gnode(uv_gpa(pqp));
1786 first = uv_physnodeaddr(pqp);
1787 pn_first = ((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) | first;
1788 last = uv_physnodeaddr(pqp + (DEST_Q_SIZE - 1));
1789 write_mmr_payload_first(pnode, pn_first);
1790 write_mmr_payload_tail(pnode, first);
1791 write_mmr_payload_last(pnode, last);
1792 write_gmmr_sw_ack(pnode, 0xffffUL);
1793
1794
1795 memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE);
1796}
1797
1798
1799
1800
1801static void __init init_uvhub(int uvhub, int vector, int base_pnode)
1802{
1803 int node;
1804 int pnode;
1805 unsigned long apicid;
1806
1807 node = uvhub_to_first_node(uvhub);
1808 pnode = uv_blade_to_pnode(uvhub);
1809
1810 activation_descriptor_init(node, pnode, base_pnode);
1811
1812 pq_init(node, pnode);
1813
1814
1815
1816
1817 apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits;
1818 write_mmr_data_config(pnode, ((apicid << 32) | vector));
1819}
1820
1821
1822
1823
1824
1825
1826static int calculate_destination_timeout(void)
1827{
1828 unsigned long mmr_image;
1829 int mult1;
1830 int mult2;
1831 int index;
1832 int base;
1833 int ret;
1834 unsigned long ts_ns;
1835
1836 if (is_uv1_hub()) {
1837 mult1 = SOFTACK_TIMEOUT_PERIOD & BAU_MISC_CONTROL_MULT_MASK;
1838 mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL);
1839 index = (mmr_image >> BAU_URGENCY_7_SHIFT) & BAU_URGENCY_7_MASK;
1840 mmr_image = uv_read_local_mmr(UVH_TRANSACTION_TIMEOUT);
1841 mult2 = (mmr_image >> BAU_TRANS_SHIFT) & BAU_TRANS_MASK;
1842 ts_ns = timeout_base_ns[index];
1843 ts_ns *= (mult1 * mult2);
1844 ret = ts_ns / 1000;
1845 } else {
1846
1847 mmr_image = uv_read_local_mmr(UVH_LB_BAU_MISC_CONTROL);
1848 mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT;
1849 if (mmr_image & (1L << UV2_ACK_UNITS_SHFT))
1850 base = 80;
1851 else
1852 base = 10;
1853 mult1 = mmr_image & UV2_ACK_MASK;
1854 ret = mult1 * base;
1855 }
1856 return ret;
1857}
1858
1859static void __init init_per_cpu_tunables(void)
1860{
1861 int cpu;
1862 struct bau_control *bcp;
1863
1864 for_each_present_cpu(cpu) {
1865 bcp = &per_cpu(bau_control, cpu);
1866 bcp->baudisabled = 0;
1867 if (nobau)
1868 bcp->nobau = 1;
1869 bcp->statp = &per_cpu(ptcstats, cpu);
1870
1871 bcp->timeout_interval = usec_2_cycles(2*timeout_us);
1872 bcp->max_concurr = max_concurr;
1873 bcp->max_concurr_const = max_concurr;
1874 bcp->plugged_delay = plugged_delay;
1875 bcp->plugsb4reset = plugsb4reset;
1876 bcp->timeoutsb4reset = timeoutsb4reset;
1877 bcp->ipi_reset_limit = ipi_reset_limit;
1878 bcp->complete_threshold = complete_threshold;
1879 bcp->cong_response_us = congested_respns_us;
1880 bcp->cong_reps = congested_reps;
1881 bcp->disabled_period = sec_2_cycles(disabled_period);
1882 bcp->giveup_limit = giveup_limit;
1883 spin_lock_init(&bcp->queue_lock);
1884 spin_lock_init(&bcp->uvhub_lock);
1885 spin_lock_init(&bcp->disable_lock);
1886 }
1887}
1888
1889
1890
1891
1892static int __init get_cpu_topology(int base_pnode,
1893 struct uvhub_desc *uvhub_descs,
1894 unsigned char *uvhub_mask)
1895{
1896 int cpu;
1897 int pnode;
1898 int uvhub;
1899 int socket;
1900 struct bau_control *bcp;
1901 struct uvhub_desc *bdp;
1902 struct socket_desc *sdp;
1903
1904 for_each_present_cpu(cpu) {
1905 bcp = &per_cpu(bau_control, cpu);
1906
1907 memset(bcp, 0, sizeof(struct bau_control));
1908
1909 pnode = uv_cpu_hub_info(cpu)->pnode;
1910 if ((pnode - base_pnode) >= UV_DISTRIBUTION_SIZE) {
1911 printk(KERN_EMERG
1912 "cpu %d pnode %d-%d beyond %d; BAU disabled\n",
1913 cpu, pnode, base_pnode, UV_DISTRIBUTION_SIZE);
1914 return 1;
1915 }
1916
1917 bcp->osnode = cpu_to_node(cpu);
1918 bcp->partition_base_pnode = base_pnode;
1919
1920 uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
1921 *(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8));
1922 bdp = &uvhub_descs[uvhub];
1923
1924 bdp->num_cpus++;
1925 bdp->uvhub = uvhub;
1926 bdp->pnode = pnode;
1927
1928
1929
1930 socket = bcp->osnode & 1;
1931 bdp->socket_mask |= (1 << socket);
1932 sdp = &bdp->socket[socket];
1933 sdp->cpu_number[sdp->num_cpus] = cpu;
1934 sdp->num_cpus++;
1935 if (sdp->num_cpus > MAX_CPUS_PER_SOCKET) {
1936 printk(KERN_EMERG "%d cpus per socket invalid\n",
1937 sdp->num_cpus);
1938 return 1;
1939 }
1940 }
1941 return 0;
1942}
1943
1944
1945
1946
1947static void make_per_cpu_thp(struct bau_control *smaster)
1948{
1949 int cpu;
1950 size_t hpsz = sizeof(struct hub_and_pnode) * num_possible_cpus();
1951
1952 smaster->thp = kmalloc_node(hpsz, GFP_KERNEL, smaster->osnode);
1953 memset(smaster->thp, 0, hpsz);
1954 for_each_present_cpu(cpu) {
1955 smaster->thp[cpu].pnode = uv_cpu_hub_info(cpu)->pnode;
1956 smaster->thp[cpu].uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
1957 }
1958}
1959
1960
1961
1962
1963static void make_per_hub_cpumask(struct bau_control *hmaster)
1964{
1965 int sz = sizeof(cpumask_t);
1966
1967 hmaster->cpumask = kzalloc_node(sz, GFP_KERNEL, hmaster->osnode);
1968}
1969
1970
1971
1972
1973
1974
1975static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp,
1976 struct bau_control **smasterp,
1977 struct bau_control **hmasterp)
1978{
1979 int i;
1980 int cpu;
1981 struct bau_control *bcp;
1982
1983 for (i = 0; i < sdp->num_cpus; i++) {
1984 cpu = sdp->cpu_number[i];
1985 bcp = &per_cpu(bau_control, cpu);
1986 bcp->cpu = cpu;
1987 if (i == 0) {
1988 *smasterp = bcp;
1989 if (!(*hmasterp))
1990 *hmasterp = bcp;
1991 }
1992 bcp->cpus_in_uvhub = bdp->num_cpus;
1993 bcp->cpus_in_socket = sdp->num_cpus;
1994 bcp->socket_master = *smasterp;
1995 bcp->uvhub = bdp->uvhub;
1996 if (is_uv1_hub())
1997 bcp->uvhub_version = 1;
1998 else if (is_uv2_hub())
1999 bcp->uvhub_version = 2;
2000 else {
2001 printk(KERN_EMERG "uvhub version not 1 or 2\n");
2002 return 1;
2003 }
2004 bcp->uvhub_master = *hmasterp;
2005 bcp->uvhub_cpu = uv_cpu_hub_info(cpu)->blade_processor_id;
2006 if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) {
2007 printk(KERN_EMERG "%d cpus per uvhub invalid\n",
2008 bcp->uvhub_cpu);
2009 return 1;
2010 }
2011 }
2012 return 0;
2013}
2014
2015
2016
2017
2018static int __init summarize_uvhub_sockets(int nuvhubs,
2019 struct uvhub_desc *uvhub_descs,
2020 unsigned char *uvhub_mask)
2021{
2022 int socket;
2023 int uvhub;
2024 unsigned short socket_mask;
2025
2026 for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
2027 struct uvhub_desc *bdp;
2028 struct bau_control *smaster = NULL;
2029 struct bau_control *hmaster = NULL;
2030
2031 if (!(*(uvhub_mask + (uvhub/8)) & (1 << (uvhub%8))))
2032 continue;
2033
2034 bdp = &uvhub_descs[uvhub];
2035 socket_mask = bdp->socket_mask;
2036 socket = 0;
2037 while (socket_mask) {
2038 struct socket_desc *sdp;
2039 if ((socket_mask & 1)) {
2040 sdp = &bdp->socket[socket];
2041 if (scan_sock(sdp, bdp, &smaster, &hmaster))
2042 return 1;
2043 make_per_cpu_thp(smaster);
2044 }
2045 socket++;
2046 socket_mask = (socket_mask >> 1);
2047 }
2048 make_per_hub_cpumask(hmaster);
2049 }
2050 return 0;
2051}
2052
2053
2054
2055
2056static int __init init_per_cpu(int nuvhubs, int base_part_pnode)
2057{
2058 unsigned char *uvhub_mask;
2059 void *vp;
2060 struct uvhub_desc *uvhub_descs;
2061
2062 timeout_us = calculate_destination_timeout();
2063
2064 vp = kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
2065 uvhub_descs = (struct uvhub_desc *)vp;
2066 memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
2067 uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);
2068
2069 if (get_cpu_topology(base_part_pnode, uvhub_descs, uvhub_mask))
2070 goto fail;
2071
2072 if (summarize_uvhub_sockets(nuvhubs, uvhub_descs, uvhub_mask))
2073 goto fail;
2074
2075 kfree(uvhub_descs);
2076 kfree(uvhub_mask);
2077 init_per_cpu_tunables();
2078 return 0;
2079
2080fail:
2081 kfree(uvhub_descs);
2082 kfree(uvhub_mask);
2083 return 1;
2084}
2085
2086
2087
2088
2089static int __init uv_bau_init(void)
2090{
2091 int uvhub;
2092 int pnode;
2093 int nuvhubs;
2094 int cur_cpu;
2095 int cpus;
2096 int vector;
2097 cpumask_var_t *mask;
2098
2099 if (!is_uv_system())
2100 return 0;
2101
2102 for_each_possible_cpu(cur_cpu) {
2103 mask = &per_cpu(uv_flush_tlb_mask, cur_cpu);
2104 zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cur_cpu));
2105 }
2106
2107 nuvhubs = uv_num_possible_blades();
2108 congested_cycles = usec_2_cycles(congested_respns_us);
2109
2110 uv_base_pnode = 0x7fffffff;
2111 for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
2112 cpus = uv_blade_nr_possible_cpus(uvhub);
2113 if (cpus && (uv_blade_to_pnode(uvhub) < uv_base_pnode))
2114 uv_base_pnode = uv_blade_to_pnode(uvhub);
2115 }
2116
2117 enable_timeouts();
2118
2119 if (init_per_cpu(nuvhubs, uv_base_pnode)) {
2120 set_bau_off();
2121 nobau_perm = 1;
2122 return 0;
2123 }
2124
2125 vector = UV_BAU_MESSAGE;
2126 for_each_possible_blade(uvhub)
2127 if (uv_blade_nr_possible_cpus(uvhub))
2128 init_uvhub(uvhub, vector, uv_base_pnode);
2129
2130 alloc_intr_gate(vector, uv_bau_message_intr1);
2131
2132 for_each_possible_blade(uvhub) {
2133 if (uv_blade_nr_possible_cpus(uvhub)) {
2134 unsigned long val;
2135 unsigned long mmr;
2136 pnode = uv_blade_to_pnode(uvhub);
2137
2138 val = 1L << 63;
2139 write_gmmr_activation(pnode, val);
2140 mmr = 1;
2141 if (!is_uv1_hub())
2142 write_mmr_data_broadcast(pnode, mmr);
2143 }
2144 }
2145
2146 return 0;
2147}
2148core_initcall(uv_bau_init);
2149fs_initcall(uv_ptc_init);
2150