1
2
3
4
5
6
7#include <linux/seq_file.h>
8#include <linux/proc_fs.h>
9#include <linux/debugfs.h>
10#include <linux/kernel.h>
11#include <linux/slab.h>
12#include <linux/delay.h>
13
14#include <asm/mmu_context.h>
15#include <asm/uv/uv.h>
16#include <asm/uv/uv_mmrs.h>
17#include <asm/uv/uv_hub.h>
18#include <asm/uv/uv_bau.h>
19#include <asm/apic.h>
20#include <asm/tsc.h>
21#include <asm/irq_vectors.h>
22#include <asm/timer.h>
23
24static struct bau_operations ops __ro_after_init;
25
26static int timeout_us;
27static bool nobau = true;
28static int nobau_perm;
29
30
31static int max_concurr = MAX_BAU_CONCURRENT;
32static int max_concurr_const = MAX_BAU_CONCURRENT;
33static int plugged_delay = PLUGGED_DELAY;
34static int plugsb4reset = PLUGSB4RESET;
35static int giveup_limit = GIVEUP_LIMIT;
36static int timeoutsb4reset = TIMEOUTSB4RESET;
37static int ipi_reset_limit = IPI_RESET_LIMIT;
38static int complete_threshold = COMPLETE_THRESHOLD;
39static int congested_respns_us = CONGESTED_RESPONSE_US;
40static int congested_reps = CONGESTED_REPS;
41static int disabled_period = DISABLED_PERIOD;
42
43static struct tunables tunables[] = {
44 {&max_concurr, MAX_BAU_CONCURRENT},
45 {&plugged_delay, PLUGGED_DELAY},
46 {&plugsb4reset, PLUGSB4RESET},
47 {&timeoutsb4reset, TIMEOUTSB4RESET},
48 {&ipi_reset_limit, IPI_RESET_LIMIT},
49 {&complete_threshold, COMPLETE_THRESHOLD},
50 {&congested_respns_us, CONGESTED_RESPONSE_US},
51 {&congested_reps, CONGESTED_REPS},
52 {&disabled_period, DISABLED_PERIOD},
53 {&giveup_limit, GIVEUP_LIMIT}
54};
55
56static struct dentry *tunables_dir;
57
58
59static char *stat_description[] = {
60 "sent: number of shootdown messages sent",
61 "stime: time spent sending messages",
62 "numuvhubs: number of hubs targeted with shootdown",
63 "numuvhubs16: number times 16 or more hubs targeted",
64 "numuvhubs8: number times 8 or more hubs targeted",
65 "numuvhubs4: number times 4 or more hubs targeted",
66 "numuvhubs2: number times 2 or more hubs targeted",
67 "numuvhubs1: number times 1 hub targeted",
68 "numcpus: number of cpus targeted with shootdown",
69 "dto: number of destination timeouts",
70 "retries: destination timeout retries sent",
71 "rok: : destination timeouts successfully retried",
72 "resetp: ipi-style resource resets for plugs",
73 "resett: ipi-style resource resets for timeouts",
74 "giveup: fall-backs to ipi-style shootdowns",
75 "sto: number of source timeouts",
76 "bz: number of stay-busy's",
77 "throt: number times spun in throttle",
78 "swack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE",
79 "recv: shootdown messages received",
80 "rtime: time spent processing messages",
81 "all: shootdown all-tlb messages",
82 "one: shootdown one-tlb messages",
83 "mult: interrupts that found multiple messages",
84 "none: interrupts that found no messages",
85 "retry: number of retry messages processed",
86 "canc: number messages canceled by retries",
87 "nocan: number retries that found nothing to cancel",
88 "reset: number of ipi-style reset requests processed",
89 "rcan: number messages canceled by reset requests",
90 "disable: number times use of the BAU was disabled",
91 "enable: number times use of the BAU was re-enabled"
92};
93
94static int __init setup_bau(char *arg)
95{
96 int result;
97
98 if (!arg)
99 return -EINVAL;
100
101 result = strtobool(arg, &nobau);
102 if (result)
103 return result;
104
105
106 nobau = !nobau;
107
108 if (!nobau)
109 pr_info("UV BAU Enabled\n");
110 else
111 pr_info("UV BAU Disabled\n");
112
113 return 0;
114}
115early_param("bau", setup_bau);
116
117
118static int uv_base_pnode __read_mostly;
119
120static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
121static DEFINE_PER_CPU(struct bau_control, bau_control);
122static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask);
123
124static void
125set_bau_on(void)
126{
127 int cpu;
128 struct bau_control *bcp;
129
130 if (nobau_perm) {
131 pr_info("BAU not initialized; cannot be turned on\n");
132 return;
133 }
134 nobau = false;
135 for_each_present_cpu(cpu) {
136 bcp = &per_cpu(bau_control, cpu);
137 bcp->nobau = false;
138 }
139 pr_info("BAU turned on\n");
140 return;
141}
142
143static void
144set_bau_off(void)
145{
146 int cpu;
147 struct bau_control *bcp;
148
149 nobau = true;
150 for_each_present_cpu(cpu) {
151 bcp = &per_cpu(bau_control, cpu);
152 bcp->nobau = true;
153 }
154 pr_info("BAU turned off\n");
155 return;
156}
157
158
159
160
161
162static int __init uvhub_to_first_node(int uvhub)
163{
164 int node, b;
165
166 for_each_online_node(node) {
167 b = uv_node_to_blade_id(node);
168 if (uvhub == b)
169 return node;
170 }
171 return -1;
172}
173
174
175
176
177static int __init uvhub_to_first_apicid(int uvhub)
178{
179 int cpu;
180
181 for_each_present_cpu(cpu)
182 if (uvhub == uv_cpu_to_blade_id(cpu))
183 return per_cpu(x86_cpu_to_apicid, cpu);
184 return -1;
185}
186
187
188
189
190
191
192
193
194
195static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp,
196 int do_acknowledge)
197{
198 unsigned long dw;
199 struct bau_pq_entry *msg;
200
201 msg = mdp->msg;
202 if (!msg->canceled && do_acknowledge) {
203 dw = (msg->swack_vec << UV_SW_ACK_NPENDING) | msg->swack_vec;
204 ops.write_l_sw_ack(dw);
205 }
206 msg->replied_to = 1;
207 msg->swack_vec = 0;
208}
209
210
211
212
213static void bau_process_retry_msg(struct msg_desc *mdp,
214 struct bau_control *bcp)
215{
216 int i;
217 int cancel_count = 0;
218 unsigned long msg_res;
219 unsigned long mmr = 0;
220 struct bau_pq_entry *msg = mdp->msg;
221 struct bau_pq_entry *msg2;
222 struct ptc_stats *stat = bcp->statp;
223
224 stat->d_retries++;
225
226
227
228 for (msg2 = msg+1, i = 0; i < DEST_Q_SIZE; msg2++, i++) {
229 if (msg2 > mdp->queue_last)
230 msg2 = mdp->queue_first;
231 if (msg2 == msg)
232 break;
233
234
235 if ((msg2->replied_to == 0) && (msg2->canceled == 0) &&
236 (msg2->swack_vec) && ((msg2->swack_vec &
237 msg->swack_vec) == 0) &&
238 (msg2->sending_cpu == msg->sending_cpu) &&
239 (msg2->msg_type != MSG_NOOP)) {
240 mmr = ops.read_l_sw_ack();
241 msg_res = msg2->swack_vec;
242
243
244
245
246
247
248 if (mmr & (msg_res << UV_SW_ACK_NPENDING)) {
249 unsigned long mr;
250
251
252
253
254 msg2->canceled = 1;
255 stat->d_canceled++;
256 cancel_count++;
257 mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res;
258 ops.write_l_sw_ack(mr);
259 }
260 }
261 }
262 if (!cancel_count)
263 stat->d_nocanceled++;
264}
265
266
267
268
269
270static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp,
271 int do_acknowledge)
272{
273 short socket_ack_count = 0;
274 short *sp;
275 struct atomic_short *asp;
276 struct ptc_stats *stat = bcp->statp;
277 struct bau_pq_entry *msg = mdp->msg;
278 struct bau_control *smaster = bcp->socket_master;
279
280
281
282
283 if (msg->address == TLB_FLUSH_ALL) {
284 flush_tlb_local();
285 stat->d_alltlb++;
286 } else {
287 flush_tlb_one_user(msg->address);
288 stat->d_onetlb++;
289 }
290 stat->d_requestee++;
291
292
293
294
295
296
297
298 if (msg->msg_type == MSG_RETRY && bcp == bcp->uvhub_master)
299 bau_process_retry_msg(mdp, bcp);
300
301
302
303
304
305
306
307 sp = &smaster->socket_acknowledge_count[mdp->msg_slot];
308 asp = (struct atomic_short *)sp;
309 socket_ack_count = atom_asr(1, asp);
310 if (socket_ack_count == bcp->cpus_in_socket) {
311 int msg_ack_count;
312
313
314
315
316 *sp = 0;
317 asp = (struct atomic_short *)&msg->acknowledge_count;
318 msg_ack_count = atom_asr(socket_ack_count, asp);
319
320 if (msg_ack_count == bcp->cpus_in_uvhub) {
321
322
323
324
325 reply_to_message(mdp, bcp, do_acknowledge);
326 }
327 }
328
329 return;
330}
331
332
333
334
335static int pnode_to_first_cpu(int pnode, struct bau_control *smaster)
336{
337 int cpu;
338 struct hub_and_pnode *hpp;
339
340 for_each_present_cpu(cpu) {
341 hpp = &smaster->thp[cpu];
342 if (pnode == hpp->pnode)
343 return cpu;
344 }
345 return -1;
346}
347
348
349
350
351
352
353
354
355
356
357
358static void do_reset(void *ptr)
359{
360 int i;
361 struct bau_control *bcp = &per_cpu(bau_control, smp_processor_id());
362 struct reset_args *rap = (struct reset_args *)ptr;
363 struct bau_pq_entry *msg;
364 struct ptc_stats *stat = bcp->statp;
365
366 stat->d_resets++;
367
368
369
370
371
372
373 for (msg = bcp->queue_first, i = 0; i < DEST_Q_SIZE; msg++, i++) {
374 unsigned long msg_res;
375
376
377 if ((msg->replied_to == 0) &&
378 (msg->canceled == 0) &&
379 (msg->sending_cpu == rap->sender) &&
380 (msg->swack_vec) &&
381 (msg->msg_type != MSG_NOOP)) {
382 unsigned long mmr;
383 unsigned long mr;
384
385
386
387 msg->canceled = 1;
388
389
390
391 mmr = ops.read_l_sw_ack();
392 msg_res = msg->swack_vec;
393 mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res;
394 if (mmr & msg_res) {
395 stat->d_rcanceled++;
396 ops.write_l_sw_ack(mr);
397 }
398 }
399 }
400 return;
401}
402
403
404
405
406
407static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp)
408{
409 int pnode;
410 int apnode;
411 int maskbits;
412 int sender = bcp->cpu;
413 cpumask_t *mask = bcp->uvhub_master->cpumask;
414 struct bau_control *smaster = bcp->socket_master;
415 struct reset_args reset_args;
416
417 reset_args.sender = sender;
418 cpumask_clear(mask);
419
420 maskbits = sizeof(struct pnmask) * BITSPERBYTE;
421
422 for (pnode = 0; pnode < maskbits; pnode++) {
423 int cpu;
424 if (!bau_uvhub_isset(pnode, distribution))
425 continue;
426 apnode = pnode + bcp->partition_base_pnode;
427 cpu = pnode_to_first_cpu(apnode, smaster);
428 cpumask_set_cpu(cpu, mask);
429 }
430
431
432 smp_call_function_many(mask, do_reset, (void *)&reset_args, 1);
433 return;
434}
435
436
437
438
439
440
441static inline unsigned long long cycles_2_ns(unsigned long long cyc)
442{
443 struct cyc2ns_data data;
444 unsigned long long ns;
445
446 cyc2ns_read_begin(&data);
447 ns = mul_u64_u32_shr(cyc, data.cyc2ns_mul, data.cyc2ns_shift);
448 cyc2ns_read_end();
449
450 return ns;
451}
452
453
454
455
456static inline unsigned long long ns_2_cycles(unsigned long long ns)
457{
458 struct cyc2ns_data data;
459 unsigned long long cyc;
460
461 cyc2ns_read_begin(&data);
462 cyc = (ns << data.cyc2ns_shift) / data.cyc2ns_mul;
463 cyc2ns_read_end();
464
465 return cyc;
466}
467
468static inline unsigned long cycles_2_us(unsigned long long cyc)
469{
470 return cycles_2_ns(cyc) / NSEC_PER_USEC;
471}
472
473static inline cycles_t sec_2_cycles(unsigned long sec)
474{
475 return ns_2_cycles(sec * NSEC_PER_SEC);
476}
477
478static inline unsigned long long usec_2_cycles(unsigned long usec)
479{
480 return ns_2_cycles(usec * NSEC_PER_USEC);
481}
482
483
484
485
486
487
488static inline void quiesce_local_uvhub(struct bau_control *hmaster)
489{
490 atom_asr(1, (struct atomic_short *)&hmaster->uvhub_quiesce);
491}
492
493
494
495
496static inline void end_uvhub_quiesce(struct bau_control *hmaster)
497{
498 atom_asr(-1, (struct atomic_short *)&hmaster->uvhub_quiesce);
499}
500
501
502
503
504
505static unsigned long uv2_3_read_status(unsigned long offset, int rshft, int desc)
506{
507 return ((read_lmmr(offset) >> rshft) & UV_ACT_STATUS_MASK) << 1;
508}
509
510
511
512
513
514
515static int handle_uv2_busy(struct bau_control *bcp)
516{
517 struct ptc_stats *stat = bcp->statp;
518
519 stat->s_uv2_wars++;
520 bcp->busy = 1;
521 return FLUSH_GIVEUP;
522}
523
524static int uv2_3_wait_completion(struct bau_desc *bau_desc,
525 struct bau_control *bcp, long try)
526{
527 unsigned long descriptor_stat;
528 cycles_t ttm;
529 u64 mmr_offset = bcp->status_mmr;
530 int right_shift = bcp->status_index;
531 int desc = bcp->uvhub_cpu;
532 long busy_reps = 0;
533 struct ptc_stats *stat = bcp->statp;
534
535 descriptor_stat = uv2_3_read_status(mmr_offset, right_shift, desc);
536
537
538 while (descriptor_stat != UV2H_DESC_IDLE) {
539 if (descriptor_stat == UV2H_DESC_SOURCE_TIMEOUT) {
540
541
542
543
544
545
546
547 stat->s_stimeout++;
548 return FLUSH_GIVEUP;
549 } else if (descriptor_stat == UV2H_DESC_DEST_TIMEOUT) {
550 ttm = get_cycles();
551
552
553
554
555
556
557
558
559
560
561 if (cycles_2_us(ttm - bcp->send_message) < timeout_us) {
562 bcp->conseccompletes = 0;
563 stat->s_plugged++;
564
565 return FLUSH_GIVEUP;
566 }
567 stat->s_dtimeout++;
568 bcp->conseccompletes = 0;
569
570 return FLUSH_GIVEUP;
571 } else {
572 busy_reps++;
573 if (busy_reps > 1000000) {
574
575 busy_reps = 0;
576 ttm = get_cycles();
577 if ((ttm - bcp->send_message) > bcp->timeout_interval)
578 return handle_uv2_busy(bcp);
579 }
580
581
582
583 cpu_relax();
584 }
585 descriptor_stat = uv2_3_read_status(mmr_offset, right_shift, desc);
586 }
587 bcp->conseccompletes++;
588 return FLUSH_COMPLETE;
589}
590
591
592
593
594
595static u64 read_status(u64 status_mmr, int index, int desc)
596{
597 u64 stat;
598
599 stat = ((read_lmmr(status_mmr) >> index) & UV_ACT_STATUS_MASK) << 1;
600 stat |= (read_lmmr(UVH_LB_BAU_SB_ACTIVATION_STATUS_2) >> desc) & 0x1;
601
602 return stat;
603}
604
605static int uv4_wait_completion(struct bau_desc *bau_desc,
606 struct bau_control *bcp, long try)
607{
608 struct ptc_stats *stat = bcp->statp;
609 u64 descriptor_stat;
610 u64 mmr = bcp->status_mmr;
611 int index = bcp->status_index;
612 int desc = bcp->uvhub_cpu;
613
614 descriptor_stat = read_status(mmr, index, desc);
615
616
617 while (descriptor_stat != UV2H_DESC_IDLE) {
618 switch (descriptor_stat) {
619 case UV2H_DESC_SOURCE_TIMEOUT:
620 stat->s_stimeout++;
621 return FLUSH_GIVEUP;
622
623 case UV2H_DESC_DEST_TIMEOUT:
624 stat->s_dtimeout++;
625 bcp->conseccompletes = 0;
626 return FLUSH_RETRY_TIMEOUT;
627
628 case UV2H_DESC_DEST_STRONG_NACK:
629 stat->s_plugged++;
630 bcp->conseccompletes = 0;
631 return FLUSH_RETRY_PLUGGED;
632
633 case UV2H_DESC_DEST_PUT_ERR:
634 bcp->conseccompletes = 0;
635 return FLUSH_GIVEUP;
636
637 default:
638
639 cpu_relax();
640 }
641 descriptor_stat = read_status(mmr, index, desc);
642 }
643 bcp->conseccompletes++;
644 return FLUSH_COMPLETE;
645}
646
647
648
649
650
651
652static void destination_plugged(struct bau_desc *bau_desc,
653 struct bau_control *bcp,
654 struct bau_control *hmaster, struct ptc_stats *stat)
655{
656 udelay(bcp->plugged_delay);
657 bcp->plugged_tries++;
658
659 if (bcp->plugged_tries >= bcp->plugsb4reset) {
660 bcp->plugged_tries = 0;
661
662 quiesce_local_uvhub(hmaster);
663
664 spin_lock(&hmaster->queue_lock);
665 reset_with_ipi(&bau_desc->distribution, bcp);
666 spin_unlock(&hmaster->queue_lock);
667
668 end_uvhub_quiesce(hmaster);
669
670 bcp->ipi_attempts++;
671 stat->s_resets_plug++;
672 }
673}
674
675static void destination_timeout(struct bau_desc *bau_desc,
676 struct bau_control *bcp, struct bau_control *hmaster,
677 struct ptc_stats *stat)
678{
679 hmaster->max_concurr = 1;
680 bcp->timeout_tries++;
681 if (bcp->timeout_tries >= bcp->timeoutsb4reset) {
682 bcp->timeout_tries = 0;
683
684 quiesce_local_uvhub(hmaster);
685
686 spin_lock(&hmaster->queue_lock);
687 reset_with_ipi(&bau_desc->distribution, bcp);
688 spin_unlock(&hmaster->queue_lock);
689
690 end_uvhub_quiesce(hmaster);
691
692 bcp->ipi_attempts++;
693 stat->s_resets_timeout++;
694 }
695}
696
697
698
699
700
701static void disable_for_period(struct bau_control *bcp, struct ptc_stats *stat)
702{
703 int tcpu;
704 struct bau_control *tbcp;
705 struct bau_control *hmaster;
706 cycles_t tm1;
707
708 hmaster = bcp->uvhub_master;
709 spin_lock(&hmaster->disable_lock);
710 if (!bcp->baudisabled) {
711 stat->s_bau_disabled++;
712 tm1 = get_cycles();
713 for_each_present_cpu(tcpu) {
714 tbcp = &per_cpu(bau_control, tcpu);
715 if (tbcp->uvhub_master == hmaster) {
716 tbcp->baudisabled = 1;
717 tbcp->set_bau_on_time =
718 tm1 + bcp->disabled_period;
719 }
720 }
721 }
722 spin_unlock(&hmaster->disable_lock);
723}
724
725static void count_max_concurr(int stat, struct bau_control *bcp,
726 struct bau_control *hmaster)
727{
728 bcp->plugged_tries = 0;
729 bcp->timeout_tries = 0;
730 if (stat != FLUSH_COMPLETE)
731 return;
732 if (bcp->conseccompletes <= bcp->complete_threshold)
733 return;
734 if (hmaster->max_concurr >= hmaster->max_concurr_const)
735 return;
736 hmaster->max_concurr++;
737}
738
739static void record_send_stats(cycles_t time1, cycles_t time2,
740 struct bau_control *bcp, struct ptc_stats *stat,
741 int completion_status, int try)
742{
743 cycles_t elapsed;
744
745 if (time2 > time1) {
746 elapsed = time2 - time1;
747 stat->s_time += elapsed;
748
749 if ((completion_status == FLUSH_COMPLETE) && (try == 1)) {
750 bcp->period_requests++;
751 bcp->period_time += elapsed;
752 if ((elapsed > usec_2_cycles(bcp->cong_response_us)) &&
753 (bcp->period_requests > bcp->cong_reps) &&
754 ((bcp->period_time / bcp->period_requests) >
755 usec_2_cycles(bcp->cong_response_us))) {
756 stat->s_congested++;
757 disable_for_period(bcp, stat);
758 }
759 }
760 } else
761 stat->s_requestor--;
762
763 if (completion_status == FLUSH_COMPLETE && try > 1)
764 stat->s_retriesok++;
765 else if (completion_status == FLUSH_GIVEUP) {
766 stat->s_giveup++;
767 if (get_cycles() > bcp->period_end)
768 bcp->period_giveups = 0;
769 bcp->period_giveups++;
770 if (bcp->period_giveups == 1)
771 bcp->period_end = get_cycles() + bcp->disabled_period;
772 if (bcp->period_giveups > bcp->giveup_limit) {
773 disable_for_period(bcp, stat);
774 stat->s_giveuplimit++;
775 }
776 }
777}
778
779
780
781
782static void handle_cmplt(int completion_status, struct bau_desc *bau_desc,
783 struct bau_control *bcp, struct bau_control *hmaster,
784 struct ptc_stats *stat)
785{
786 if (completion_status == FLUSH_RETRY_PLUGGED)
787 destination_plugged(bau_desc, bcp, hmaster, stat);
788 else if (completion_status == FLUSH_RETRY_TIMEOUT)
789 destination_timeout(bau_desc, bcp, hmaster, stat);
790}
791
792
793
794
795
796
797
798
799
800
801
802static int uv_flush_send_and_wait(struct cpumask *flush_mask,
803 struct bau_control *bcp,
804 struct bau_desc *bau_desc)
805{
806 int seq_number = 0;
807 int completion_stat = 0;
808 long try = 0;
809 unsigned long index;
810 cycles_t time1;
811 cycles_t time2;
812 struct ptc_stats *stat = bcp->statp;
813 struct bau_control *hmaster = bcp->uvhub_master;
814 struct uv2_3_bau_msg_header *uv2_3_hdr = NULL;
815
816 while (hmaster->uvhub_quiesce)
817 cpu_relax();
818
819 time1 = get_cycles();
820 uv2_3_hdr = &bau_desc->header.uv2_3_hdr;
821
822 do {
823 if (try == 0) {
824 uv2_3_hdr->msg_type = MSG_REGULAR;
825 seq_number = bcp->message_number++;
826 } else {
827 uv2_3_hdr->msg_type = MSG_RETRY;
828 stat->s_retry_messages++;
829 }
830
831 uv2_3_hdr->sequence = seq_number;
832 index = (1UL << AS_PUSH_SHIFT) | bcp->uvhub_cpu;
833 bcp->send_message = get_cycles();
834
835 write_mmr_activation(index);
836
837 try++;
838 completion_stat = ops.wait_completion(bau_desc, bcp, try);
839
840 handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat);
841
842 if (bcp->ipi_attempts >= bcp->ipi_reset_limit) {
843 bcp->ipi_attempts = 0;
844 stat->s_overipilimit++;
845 completion_stat = FLUSH_GIVEUP;
846 break;
847 }
848 cpu_relax();
849 } while ((completion_stat == FLUSH_RETRY_PLUGGED) ||
850 (completion_stat == FLUSH_RETRY_TIMEOUT));
851
852 time2 = get_cycles();
853
854 count_max_concurr(completion_stat, bcp, hmaster);
855
856 while (hmaster->uvhub_quiesce)
857 cpu_relax();
858
859 atomic_dec(&hmaster->active_descriptor_count);
860
861 record_send_stats(time1, time2, bcp, stat, completion_stat, try);
862
863 if (completion_stat == FLUSH_GIVEUP)
864
865 return 1;
866 return 0;
867}
868
869
870
871
872
873
874static int check_enable(struct bau_control *bcp, struct ptc_stats *stat)
875{
876 int tcpu;
877 struct bau_control *tbcp;
878 struct bau_control *hmaster;
879
880 hmaster = bcp->uvhub_master;
881 spin_lock(&hmaster->disable_lock);
882 if (bcp->baudisabled && (get_cycles() >= bcp->set_bau_on_time)) {
883 stat->s_bau_reenabled++;
884 for_each_present_cpu(tcpu) {
885 tbcp = &per_cpu(bau_control, tcpu);
886 if (tbcp->uvhub_master == hmaster) {
887 tbcp->baudisabled = 0;
888 tbcp->period_requests = 0;
889 tbcp->period_time = 0;
890 tbcp->period_giveups = 0;
891 }
892 }
893 spin_unlock(&hmaster->disable_lock);
894 return 0;
895 }
896 spin_unlock(&hmaster->disable_lock);
897 return -1;
898}
899
900static void record_send_statistics(struct ptc_stats *stat, int locals, int hubs,
901 int remotes, struct bau_desc *bau_desc)
902{
903 stat->s_requestor++;
904 stat->s_ntargcpu += remotes + locals;
905 stat->s_ntargremotes += remotes;
906 stat->s_ntarglocals += locals;
907
908
909 hubs = bau_uvhub_weight(&bau_desc->distribution);
910 if (locals) {
911 stat->s_ntarglocaluvhub++;
912 stat->s_ntargremoteuvhub += (hubs - 1);
913 } else
914 stat->s_ntargremoteuvhub += hubs;
915
916 stat->s_ntarguvhub += hubs;
917
918 if (hubs >= 16)
919 stat->s_ntarguvhub16++;
920 else if (hubs >= 8)
921 stat->s_ntarguvhub8++;
922 else if (hubs >= 4)
923 stat->s_ntarguvhub4++;
924 else if (hubs >= 2)
925 stat->s_ntarguvhub2++;
926 else
927 stat->s_ntarguvhub1++;
928}
929
930
931
932
933
934static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp,
935 struct bau_desc *bau_desc, int *localsp, int *remotesp)
936{
937 int cpu;
938 int pnode;
939 int cnt = 0;
940 struct hub_and_pnode *hpp;
941
942 for_each_cpu(cpu, flush_mask) {
943
944
945
946
947
948
949 hpp = &bcp->socket_master->thp[cpu];
950 pnode = hpp->pnode - bcp->partition_base_pnode;
951 bau_uvhub_set(pnode, &bau_desc->distribution);
952 cnt++;
953 if (hpp->uvhub == bcp->uvhub)
954 (*localsp)++;
955 else
956 (*remotesp)++;
957 }
958 if (!cnt)
959 return 1;
960 return 0;
961}
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
989 const struct flush_tlb_info *info)
990{
991 unsigned int cpu = smp_processor_id();
992 int locals = 0, remotes = 0, hubs = 0;
993 struct bau_desc *bau_desc;
994 struct cpumask *flush_mask;
995 struct ptc_stats *stat;
996 struct bau_control *bcp;
997 unsigned long descriptor_status, status, address;
998
999 bcp = &per_cpu(bau_control, cpu);
1000
1001 if (bcp->nobau)
1002 return cpumask;
1003
1004 stat = bcp->statp;
1005 stat->s_enters++;
1006
1007 if (bcp->busy) {
1008 descriptor_status =
1009 read_lmmr(UVH_LB_BAU_SB_ACTIVATION_STATUS_0);
1010 status = ((descriptor_status >> (bcp->uvhub_cpu *
1011 UV_ACT_STATUS_SIZE)) & UV_ACT_STATUS_MASK) << 1;
1012 if (status == UV2H_DESC_BUSY)
1013 return cpumask;
1014 bcp->busy = 0;
1015 }
1016
1017
1018 if (bcp->baudisabled) {
1019 if (check_enable(bcp, stat)) {
1020 stat->s_ipifordisabled++;
1021 return cpumask;
1022 }
1023 }
1024
1025
1026
1027
1028
1029
1030 flush_mask = (struct cpumask *)per_cpu(uv_flush_tlb_mask, cpu);
1031
1032 cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
1033
1034 if (cpumask_test_cpu(cpu, cpumask))
1035 stat->s_ntargself++;
1036
1037 bau_desc = bcp->descriptor_base;
1038 bau_desc += (ITEMS_PER_DESC * bcp->uvhub_cpu);
1039 bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
1040 if (set_distrib_bits(flush_mask, bcp, bau_desc, &locals, &remotes))
1041 return NULL;
1042
1043 record_send_statistics(stat, locals, hubs, remotes, bau_desc);
1044
1045 if (!info->end || (info->end - info->start) <= PAGE_SIZE)
1046 address = info->start;
1047 else
1048 address = TLB_FLUSH_ALL;
1049
1050 switch (bcp->uvhub_version) {
1051 case UV_BAU_V2:
1052 case UV_BAU_V3:
1053 bau_desc->payload.uv2_3.address = address;
1054 bau_desc->payload.uv2_3.sending_cpu = cpu;
1055 break;
1056 case UV_BAU_V4:
1057 bau_desc->payload.uv4.address = address;
1058 bau_desc->payload.uv4.sending_cpu = cpu;
1059 bau_desc->payload.uv4.qualifier = BAU_DESC_QUALIFIER;
1060 break;
1061 }
1062
1063
1064
1065
1066
1067 if (!uv_flush_send_and_wait(flush_mask, bcp, bau_desc))
1068 return NULL;
1069 else
1070 return cpumask;
1071}
1072
1073
1074
1075
1076
1077static struct bau_pq_entry *find_another_by_swack(struct bau_pq_entry *msg,
1078 struct bau_control *bcp)
1079{
1080 struct bau_pq_entry *msg_next = msg + 1;
1081 unsigned char swack_vec = msg->swack_vec;
1082
1083 if (msg_next > bcp->queue_last)
1084 msg_next = bcp->queue_first;
1085 while (msg_next != msg) {
1086 if ((msg_next->canceled == 0) && (msg_next->replied_to == 0) &&
1087 (msg_next->swack_vec == swack_vec))
1088 return msg_next;
1089 msg_next++;
1090 if (msg_next > bcp->queue_last)
1091 msg_next = bcp->queue_first;
1092 }
1093 return NULL;
1094}
1095
1096
1097
1098
1099
1100
1101static void process_uv2_message(struct msg_desc *mdp, struct bau_control *bcp)
1102{
1103 unsigned long mmr_image;
1104 unsigned char swack_vec;
1105 struct bau_pq_entry *msg = mdp->msg;
1106 struct bau_pq_entry *other_msg;
1107
1108 mmr_image = ops.read_l_sw_ack();
1109 swack_vec = msg->swack_vec;
1110
1111 if ((swack_vec & mmr_image) == 0) {
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121 other_msg = find_another_by_swack(msg, bcp);
1122 if (other_msg) {
1123
1124
1125
1126
1127 bau_process_message(mdp, bcp, 0);
1128
1129
1130
1131
1132
1133 return;
1134 }
1135 }
1136
1137
1138
1139
1140
1141 bau_process_message(mdp, bcp, 1);
1142
1143 return;
1144}
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160DEFINE_IDTENTRY_SYSVEC(sysvec_uv_bau_message)
1161{
1162 int count = 0;
1163 cycles_t time_start;
1164 struct bau_pq_entry *msg;
1165 struct bau_control *bcp;
1166 struct ptc_stats *stat;
1167 struct msg_desc msgdesc;
1168
1169 ack_APIC_irq();
1170 kvm_set_cpu_l1tf_flush_l1d();
1171 time_start = get_cycles();
1172
1173 bcp = &per_cpu(bau_control, smp_processor_id());
1174 stat = bcp->statp;
1175
1176 msgdesc.queue_first = bcp->queue_first;
1177 msgdesc.queue_last = bcp->queue_last;
1178
1179 msg = bcp->bau_msg_head;
1180 while (msg->swack_vec) {
1181 count++;
1182
1183 msgdesc.msg_slot = msg - msgdesc.queue_first;
1184 msgdesc.msg = msg;
1185 if (bcp->uvhub_version == UV_BAU_V2)
1186 process_uv2_message(&msgdesc, bcp);
1187 else
1188
1189 bau_process_message(&msgdesc, bcp, 1);
1190
1191 msg++;
1192 if (msg > msgdesc.queue_last)
1193 msg = msgdesc.queue_first;
1194 bcp->bau_msg_head = msg;
1195 }
1196 stat->d_time += (get_cycles() - time_start);
1197 if (!count)
1198 stat->d_nomsg++;
1199 else if (count > 1)
1200 stat->d_multmsg++;
1201}
1202
1203
1204
1205
1206
1207
1208
1209static void __init enable_timeouts(void)
1210{
1211 int uvhub;
1212 int nuvhubs;
1213 int pnode;
1214 unsigned long mmr_image;
1215
1216 nuvhubs = uv_num_possible_blades();
1217
1218 for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
1219 if (!uv_blade_nr_possible_cpus(uvhub))
1220 continue;
1221
1222 pnode = uv_blade_to_pnode(uvhub);
1223 mmr_image = read_mmr_misc_control(pnode);
1224
1225
1226
1227
1228
1229
1230 mmr_image &= ~(1L << SOFTACK_MSHIFT);
1231 write_mmr_misc_control(pnode, mmr_image);
1232
1233
1234
1235 mmr_image &= ~((unsigned long)0xf << SOFTACK_PSHIFT);
1236 mmr_image |= (SOFTACK_TIMEOUT_PERIOD << SOFTACK_PSHIFT);
1237 write_mmr_misc_control(pnode, mmr_image);
1238
1239 mmr_image |= (1L << SOFTACK_MSHIFT);
1240 if (is_uv2_hub()) {
1241
1242
1243 mmr_image &= ~(1L << UV2_EXT_SHFT);
1244 } else if (is_uv3_hub()) {
1245 mmr_image &= ~(1L << PREFETCH_HINT_SHFT);
1246 mmr_image |= (1L << SB_STATUS_SHFT);
1247 }
1248 write_mmr_misc_control(pnode, mmr_image);
1249 }
1250}
1251
1252static void *ptc_seq_start(struct seq_file *file, loff_t *offset)
1253{
1254 if (*offset < num_possible_cpus())
1255 return offset;
1256 return NULL;
1257}
1258
1259static void *ptc_seq_next(struct seq_file *file, void *data, loff_t *offset)
1260{
1261 (*offset)++;
1262 if (*offset < num_possible_cpus())
1263 return offset;
1264 return NULL;
1265}
1266
1267static void ptc_seq_stop(struct seq_file *file, void *data)
1268{
1269}
1270
1271
1272
1273
1274
1275
1276static int ptc_seq_show(struct seq_file *file, void *data)
1277{
1278 struct ptc_stats *stat;
1279 struct bau_control *bcp;
1280 int cpu;
1281
1282 cpu = *(loff_t *)data;
1283 if (!cpu) {
1284 seq_puts(file,
1285 "# cpu bauoff sent stime self locals remotes ncpus localhub ");
1286 seq_puts(file, "remotehub numuvhubs numuvhubs16 numuvhubs8 ");
1287 seq_puts(file,
1288 "numuvhubs4 numuvhubs2 numuvhubs1 dto snacks retries ");
1289 seq_puts(file,
1290 "rok resetp resett giveup sto bz throt disable ");
1291 seq_puts(file,
1292 "enable wars warshw warwaits enters ipidis plugged ");
1293 seq_puts(file,
1294 "ipiover glim cong swack recv rtime all one mult ");
1295 seq_puts(file, "none retry canc nocan reset rcan\n");
1296 }
1297 if (cpu < num_possible_cpus() && cpu_online(cpu)) {
1298 bcp = &per_cpu(bau_control, cpu);
1299 if (bcp->nobau) {
1300 seq_printf(file, "cpu %d bau disabled\n", cpu);
1301 return 0;
1302 }
1303 stat = bcp->statp;
1304
1305 seq_printf(file,
1306 "cpu %d %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
1307 cpu, bcp->nobau, stat->s_requestor,
1308 cycles_2_us(stat->s_time),
1309 stat->s_ntargself, stat->s_ntarglocals,
1310 stat->s_ntargremotes, stat->s_ntargcpu,
1311 stat->s_ntarglocaluvhub, stat->s_ntargremoteuvhub,
1312 stat->s_ntarguvhub, stat->s_ntarguvhub16);
1313 seq_printf(file, "%ld %ld %ld %ld %ld %ld ",
1314 stat->s_ntarguvhub8, stat->s_ntarguvhub4,
1315 stat->s_ntarguvhub2, stat->s_ntarguvhub1,
1316 stat->s_dtimeout, stat->s_strongnacks);
1317 seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld ",
1318 stat->s_retry_messages, stat->s_retriesok,
1319 stat->s_resets_plug, stat->s_resets_timeout,
1320 stat->s_giveup, stat->s_stimeout,
1321 stat->s_busy, stat->s_throttles);
1322 seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
1323 stat->s_bau_disabled, stat->s_bau_reenabled,
1324 stat->s_uv2_wars, stat->s_uv2_wars_hw,
1325 stat->s_uv2_war_waits, stat->s_enters,
1326 stat->s_ipifordisabled, stat->s_plugged,
1327 stat->s_overipilimit, stat->s_giveuplimit,
1328 stat->s_congested);
1329
1330
1331 seq_printf(file,
1332 "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n",
1333 ops.read_g_sw_ack(uv_cpu_to_pnode(cpu)),
1334 stat->d_requestee, cycles_2_us(stat->d_time),
1335 stat->d_alltlb, stat->d_onetlb, stat->d_multmsg,
1336 stat->d_nomsg, stat->d_retries, stat->d_canceled,
1337 stat->d_nocanceled, stat->d_resets,
1338 stat->d_rcanceled);
1339 }
1340 return 0;
1341}
1342
1343
1344
1345
1346static ssize_t tunables_read(struct file *file, char __user *userbuf,
1347 size_t count, loff_t *ppos)
1348{
1349 char *buf;
1350 int ret;
1351
1352 buf = kasprintf(GFP_KERNEL, "%s %s %s\n%d %d %d %d %d %d %d %d %d %d\n",
1353 "max_concur plugged_delay plugsb4reset timeoutsb4reset",
1354 "ipi_reset_limit complete_threshold congested_response_us",
1355 "congested_reps disabled_period giveup_limit",
1356 max_concurr, plugged_delay, plugsb4reset,
1357 timeoutsb4reset, ipi_reset_limit, complete_threshold,
1358 congested_respns_us, congested_reps, disabled_period,
1359 giveup_limit);
1360
1361 if (!buf)
1362 return -ENOMEM;
1363
1364 ret = simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf));
1365 kfree(buf);
1366 return ret;
1367}
1368
1369
1370
1371
1372
1373
1374static ssize_t ptc_proc_write(struct file *file, const char __user *user,
1375 size_t count, loff_t *data)
1376{
1377 int cpu;
1378 int i;
1379 int elements;
1380 long input_arg;
1381 char optstr[64];
1382 struct ptc_stats *stat;
1383
1384 if (count == 0 || count > sizeof(optstr))
1385 return -EINVAL;
1386 if (copy_from_user(optstr, user, count))
1387 return -EFAULT;
1388 optstr[count - 1] = '\0';
1389
1390 if (!strcmp(optstr, "on")) {
1391 set_bau_on();
1392 return count;
1393 } else if (!strcmp(optstr, "off")) {
1394 set_bau_off();
1395 return count;
1396 }
1397
1398 if (kstrtol(optstr, 10, &input_arg) < 0) {
1399 pr_debug("%s is invalid\n", optstr);
1400 return -EINVAL;
1401 }
1402
1403 if (input_arg == 0) {
1404 elements = ARRAY_SIZE(stat_description);
1405 pr_debug("# cpu: cpu number\n");
1406 pr_debug("Sender statistics:\n");
1407 for (i = 0; i < elements; i++)
1408 pr_debug("%s\n", stat_description[i]);
1409 } else if (input_arg == -1) {
1410 for_each_present_cpu(cpu) {
1411 stat = &per_cpu(ptcstats, cpu);
1412 memset(stat, 0, sizeof(struct ptc_stats));
1413 }
1414 }
1415
1416 return count;
1417}
1418
1419static int local_atoi(const char *name)
1420{
1421 int val = 0;
1422
1423 for (;; name++) {
1424 switch (*name) {
1425 case '0' ... '9':
1426 val = 10*val+(*name-'0');
1427 break;
1428 default:
1429 return val;
1430 }
1431 }
1432}
1433
1434
1435
1436
1437
1438static int parse_tunables_write(struct bau_control *bcp, char *instr,
1439 int count)
1440{
1441 char *p;
1442 char *q;
1443 int cnt = 0;
1444 int val;
1445 int e = ARRAY_SIZE(tunables);
1446
1447 p = instr + strspn(instr, WHITESPACE);
1448 q = p;
1449 for (; *p; p = q + strspn(q, WHITESPACE)) {
1450 q = p + strcspn(p, WHITESPACE);
1451 cnt++;
1452 if (q == p)
1453 break;
1454 }
1455 if (cnt != e) {
1456 pr_info("bau tunable error: should be %d values\n", e);
1457 return -EINVAL;
1458 }
1459
1460 p = instr + strspn(instr, WHITESPACE);
1461 q = p;
1462 for (cnt = 0; *p; p = q + strspn(q, WHITESPACE), cnt++) {
1463 q = p + strcspn(p, WHITESPACE);
1464 val = local_atoi(p);
1465 switch (cnt) {
1466 case 0:
1467 if (val == 0) {
1468 max_concurr = MAX_BAU_CONCURRENT;
1469 max_concurr_const = MAX_BAU_CONCURRENT;
1470 continue;
1471 }
1472 if (val < 1 || val > bcp->cpus_in_uvhub) {
1473 pr_debug(
1474 "Error: BAU max concurrent %d is invalid\n",
1475 val);
1476 return -EINVAL;
1477 }
1478 max_concurr = val;
1479 max_concurr_const = val;
1480 continue;
1481 default:
1482 if (val == 0)
1483 *tunables[cnt].tunp = tunables[cnt].deflt;
1484 else
1485 *tunables[cnt].tunp = val;
1486 continue;
1487 }
1488 }
1489 return 0;
1490}
1491
1492
1493
1494
1495static ssize_t tunables_write(struct file *file, const char __user *user,
1496 size_t count, loff_t *data)
1497{
1498 int cpu;
1499 int ret;
1500 char instr[100];
1501 struct bau_control *bcp;
1502
1503 if (count == 0 || count > sizeof(instr)-1)
1504 return -EINVAL;
1505 if (copy_from_user(instr, user, count))
1506 return -EFAULT;
1507
1508 instr[count] = '\0';
1509
1510 cpu = get_cpu();
1511 bcp = &per_cpu(bau_control, cpu);
1512 ret = parse_tunables_write(bcp, instr, count);
1513 put_cpu();
1514 if (ret)
1515 return ret;
1516
1517 for_each_present_cpu(cpu) {
1518 bcp = &per_cpu(bau_control, cpu);
1519 bcp->max_concurr = max_concurr;
1520 bcp->max_concurr_const = max_concurr;
1521 bcp->plugged_delay = plugged_delay;
1522 bcp->plugsb4reset = plugsb4reset;
1523 bcp->timeoutsb4reset = timeoutsb4reset;
1524 bcp->ipi_reset_limit = ipi_reset_limit;
1525 bcp->complete_threshold = complete_threshold;
1526 bcp->cong_response_us = congested_respns_us;
1527 bcp->cong_reps = congested_reps;
1528 bcp->disabled_period = sec_2_cycles(disabled_period);
1529 bcp->giveup_limit = giveup_limit;
1530 }
1531 return count;
1532}
1533
1534static const struct seq_operations uv_ptc_seq_ops = {
1535 .start = ptc_seq_start,
1536 .next = ptc_seq_next,
1537 .stop = ptc_seq_stop,
1538 .show = ptc_seq_show
1539};
1540
1541static int ptc_proc_open(struct inode *inode, struct file *file)
1542{
1543 return seq_open(file, &uv_ptc_seq_ops);
1544}
1545
1546static int tunables_open(struct inode *inode, struct file *file)
1547{
1548 return 0;
1549}
1550
1551static const struct proc_ops uv_ptc_proc_ops = {
1552 .proc_open = ptc_proc_open,
1553 .proc_read = seq_read,
1554 .proc_write = ptc_proc_write,
1555 .proc_lseek = seq_lseek,
1556 .proc_release = seq_release,
1557};
1558
1559static const struct file_operations tunables_fops = {
1560 .open = tunables_open,
1561 .read = tunables_read,
1562 .write = tunables_write,
1563 .llseek = default_llseek,
1564};
1565
1566static int __init uv_ptc_init(void)
1567{
1568 struct proc_dir_entry *proc_uv_ptc;
1569
1570 if (!is_uv_system())
1571 return 0;
1572
1573 proc_uv_ptc = proc_create(UV_PTC_BASENAME, 0444, NULL,
1574 &uv_ptc_proc_ops);
1575 if (!proc_uv_ptc) {
1576 pr_err("unable to create %s proc entry\n",
1577 UV_PTC_BASENAME);
1578 return -EINVAL;
1579 }
1580
1581 tunables_dir = debugfs_create_dir(UV_BAU_TUNABLES_DIR, NULL);
1582 debugfs_create_file(UV_BAU_TUNABLES_FILE, 0600, tunables_dir, NULL,
1583 &tunables_fops);
1584 return 0;
1585}
1586
1587
1588
1589
1590static void activation_descriptor_init(int node, int pnode, int base_pnode)
1591{
1592 int i;
1593 int cpu;
1594 unsigned long gpa;
1595 unsigned long m;
1596 unsigned long n;
1597 size_t dsize;
1598 struct bau_desc *bau_desc;
1599 struct bau_desc *bd2;
1600 struct uv2_3_bau_msg_header *uv2_3_hdr;
1601 struct bau_control *bcp;
1602
1603
1604
1605
1606
1607 dsize = sizeof(struct bau_desc) * ADP_SZ * ITEMS_PER_DESC;
1608 bau_desc = kmalloc_node(dsize, GFP_KERNEL, node);
1609 BUG_ON(!bau_desc);
1610
1611 gpa = uv_gpa(bau_desc);
1612 n = uv_gpa_to_gnode(gpa);
1613 m = ops.bau_gpa_to_offset(gpa);
1614
1615
1616 write_mmr_descriptor_base(pnode,
1617 (n << UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT | m));
1618
1619
1620
1621
1622
1623 for (i = 0, bd2 = bau_desc; i < (ADP_SZ * ITEMS_PER_DESC); i++, bd2++) {
1624 memset(bd2, 0, sizeof(struct bau_desc));
1625
1626
1627
1628
1629 uv2_3_hdr = &bd2->header.uv2_3_hdr;
1630 uv2_3_hdr->swack_flag = 1;
1631 uv2_3_hdr->base_dest_nasid = UV_PNODE_TO_NASID(base_pnode);
1632 uv2_3_hdr->dest_subnodeid = UV_LB_SUBNODEID;
1633 uv2_3_hdr->command = UV_NET_ENDPOINT_INTD;
1634 }
1635 for_each_present_cpu(cpu) {
1636 if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu)))
1637 continue;
1638 bcp = &per_cpu(bau_control, cpu);
1639 bcp->descriptor_base = bau_desc;
1640 }
1641}
1642
1643
1644
1645
1646
1647
1648
1649static void pq_init(int node, int pnode)
1650{
1651 int cpu;
1652 size_t plsize;
1653 char *cp;
1654 void *vp;
1655 unsigned long gnode, first, last, tail;
1656 struct bau_pq_entry *pqp;
1657 struct bau_control *bcp;
1658
1659 plsize = (DEST_Q_SIZE + 1) * sizeof(struct bau_pq_entry);
1660 vp = kmalloc_node(plsize, GFP_KERNEL, node);
1661 BUG_ON(!vp);
1662
1663 pqp = (struct bau_pq_entry *)vp;
1664 cp = (char *)pqp + 31;
1665 pqp = (struct bau_pq_entry *)(((unsigned long)cp >> 5) << 5);
1666
1667 for_each_present_cpu(cpu) {
1668 if (pnode != uv_cpu_to_pnode(cpu))
1669 continue;
1670
1671 bcp = &per_cpu(bau_control, cpu);
1672 bcp->queue_first = pqp;
1673 bcp->bau_msg_head = pqp;
1674 bcp->queue_last = pqp + (DEST_Q_SIZE - 1);
1675 }
1676
1677 first = ops.bau_gpa_to_offset(uv_gpa(pqp));
1678 last = ops.bau_gpa_to_offset(uv_gpa(pqp + (DEST_Q_SIZE - 1)));
1679
1680
1681
1682
1683
1684 bcp = &per_cpu(bau_control, smp_processor_id());
1685 if (bcp->uvhub_version <= UV_BAU_V3) {
1686 tail = first;
1687 gnode = uv_gpa_to_gnode(uv_gpa(pqp));
1688 first = (gnode << UV_PAYLOADQ_GNODE_SHIFT) | tail;
1689 write_mmr_payload_tail(pnode, tail);
1690 }
1691
1692 ops.write_payload_first(pnode, first);
1693 ops.write_payload_last(pnode, last);
1694
1695
1696 memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE);
1697}
1698
1699
1700
1701
1702static void __init init_uvhub(int uvhub, int vector, int base_pnode)
1703{
1704 int node;
1705 int pnode;
1706 unsigned long apicid;
1707
1708 node = uvhub_to_first_node(uvhub);
1709 pnode = uv_blade_to_pnode(uvhub);
1710
1711 activation_descriptor_init(node, pnode, base_pnode);
1712
1713 pq_init(node, pnode);
1714
1715
1716
1717
1718 apicid = uvhub_to_first_apicid(uvhub);
1719 write_mmr_data_config(pnode, ((apicid << 32) | vector));
1720}
1721
1722
1723
1724
1725
1726
1727static int calculate_destination_timeout(void)
1728{
1729 unsigned long mmr_image;
1730 int mult1;
1731 int base;
1732 int ret;
1733
1734
1735
1736 mmr_image = uv_read_local_mmr(UVH_LB_BAU_MISC_CONTROL);
1737 mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT;
1738 if (mmr_image & (1L << UV2_ACK_UNITS_SHFT))
1739 base = 80;
1740 else
1741 base = 10;
1742 mult1 = mmr_image & UV2_ACK_MASK;
1743 ret = mult1 * base;
1744
1745 return ret;
1746}
1747
1748static void __init init_per_cpu_tunables(void)
1749{
1750 int cpu;
1751 struct bau_control *bcp;
1752
1753 for_each_present_cpu(cpu) {
1754 bcp = &per_cpu(bau_control, cpu);
1755 bcp->baudisabled = 0;
1756 if (nobau)
1757 bcp->nobau = true;
1758 bcp->statp = &per_cpu(ptcstats, cpu);
1759
1760 bcp->timeout_interval = usec_2_cycles(2*timeout_us);
1761 bcp->max_concurr = max_concurr;
1762 bcp->max_concurr_const = max_concurr;
1763 bcp->plugged_delay = plugged_delay;
1764 bcp->plugsb4reset = plugsb4reset;
1765 bcp->timeoutsb4reset = timeoutsb4reset;
1766 bcp->ipi_reset_limit = ipi_reset_limit;
1767 bcp->complete_threshold = complete_threshold;
1768 bcp->cong_response_us = congested_respns_us;
1769 bcp->cong_reps = congested_reps;
1770 bcp->disabled_period = sec_2_cycles(disabled_period);
1771 bcp->giveup_limit = giveup_limit;
1772 spin_lock_init(&bcp->queue_lock);
1773 spin_lock_init(&bcp->uvhub_lock);
1774 spin_lock_init(&bcp->disable_lock);
1775 }
1776}
1777
1778
1779
1780
1781static int __init get_cpu_topology(int base_pnode,
1782 struct uvhub_desc *uvhub_descs,
1783 unsigned char *uvhub_mask)
1784{
1785 int cpu;
1786 int pnode;
1787 int uvhub;
1788 int socket;
1789 struct bau_control *bcp;
1790 struct uvhub_desc *bdp;
1791 struct socket_desc *sdp;
1792
1793 for_each_present_cpu(cpu) {
1794 bcp = &per_cpu(bau_control, cpu);
1795
1796 memset(bcp, 0, sizeof(struct bau_control));
1797
1798 pnode = uv_cpu_hub_info(cpu)->pnode;
1799 if ((pnode - base_pnode) >= UV_DISTRIBUTION_SIZE) {
1800 pr_emerg(
1801 "cpu %d pnode %d-%d beyond %d; BAU disabled\n",
1802 cpu, pnode, base_pnode, UV_DISTRIBUTION_SIZE);
1803 return 1;
1804 }
1805
1806 bcp->osnode = cpu_to_node(cpu);
1807 bcp->partition_base_pnode = base_pnode;
1808
1809 uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
1810 *(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8));
1811 bdp = &uvhub_descs[uvhub];
1812
1813 bdp->num_cpus++;
1814 bdp->uvhub = uvhub;
1815 bdp->pnode = pnode;
1816
1817
1818
1819 socket = bcp->osnode & 1;
1820 bdp->socket_mask |= (1 << socket);
1821 sdp = &bdp->socket[socket];
1822 sdp->cpu_number[sdp->num_cpus] = cpu;
1823 sdp->num_cpus++;
1824 if (sdp->num_cpus > MAX_CPUS_PER_SOCKET) {
1825 pr_emerg("%d cpus per socket invalid\n",
1826 sdp->num_cpus);
1827 return 1;
1828 }
1829 }
1830 return 0;
1831}
1832
1833
1834
1835
1836static void make_per_cpu_thp(struct bau_control *smaster)
1837{
1838 int cpu;
1839 size_t hpsz = sizeof(struct hub_and_pnode) * num_possible_cpus();
1840
1841 smaster->thp = kzalloc_node(hpsz, GFP_KERNEL, smaster->osnode);
1842 for_each_present_cpu(cpu) {
1843 smaster->thp[cpu].pnode = uv_cpu_hub_info(cpu)->pnode;
1844 smaster->thp[cpu].uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
1845 }
1846}
1847
1848
1849
1850
1851static void make_per_hub_cpumask(struct bau_control *hmaster)
1852{
1853 int sz = sizeof(cpumask_t);
1854
1855 hmaster->cpumask = kzalloc_node(sz, GFP_KERNEL, hmaster->osnode);
1856}
1857
1858
1859
1860
1861
1862
1863static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp,
1864 struct bau_control **smasterp,
1865 struct bau_control **hmasterp)
1866{
1867 int i, cpu, uvhub_cpu;
1868 struct bau_control *bcp;
1869
1870 for (i = 0; i < sdp->num_cpus; i++) {
1871 cpu = sdp->cpu_number[i];
1872 bcp = &per_cpu(bau_control, cpu);
1873 bcp->cpu = cpu;
1874 if (i == 0) {
1875 *smasterp = bcp;
1876 if (!(*hmasterp))
1877 *hmasterp = bcp;
1878 }
1879 bcp->cpus_in_uvhub = bdp->num_cpus;
1880 bcp->cpus_in_socket = sdp->num_cpus;
1881 bcp->socket_master = *smasterp;
1882 bcp->uvhub = bdp->uvhub;
1883 if (is_uv2_hub())
1884 bcp->uvhub_version = UV_BAU_V2;
1885 else if (is_uv3_hub())
1886 bcp->uvhub_version = UV_BAU_V3;
1887 else if (is_uv4_hub())
1888 bcp->uvhub_version = UV_BAU_V4;
1889 else {
1890 pr_emerg("uvhub version not 1, 2, 3, or 4\n");
1891 return 1;
1892 }
1893 bcp->uvhub_master = *hmasterp;
1894 uvhub_cpu = uv_cpu_blade_processor_id(cpu);
1895 bcp->uvhub_cpu = uvhub_cpu;
1896
1897
1898
1899
1900
1901 if (uvhub_cpu < UV_CPUS_PER_AS) {
1902 bcp->status_mmr = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
1903 bcp->status_index = uvhub_cpu * UV_ACT_STATUS_SIZE;
1904 } else {
1905 bcp->status_mmr = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
1906 bcp->status_index = (uvhub_cpu - UV_CPUS_PER_AS)
1907 * UV_ACT_STATUS_SIZE;
1908 }
1909
1910 if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) {
1911 pr_emerg("%d cpus per uvhub invalid\n",
1912 bcp->uvhub_cpu);
1913 return 1;
1914 }
1915 }
1916 return 0;
1917}
1918
1919
1920
1921
1922static int __init summarize_uvhub_sockets(int nuvhubs,
1923 struct uvhub_desc *uvhub_descs,
1924 unsigned char *uvhub_mask)
1925{
1926 int socket;
1927 int uvhub;
1928 unsigned short socket_mask;
1929
1930 for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
1931 struct uvhub_desc *bdp;
1932 struct bau_control *smaster = NULL;
1933 struct bau_control *hmaster = NULL;
1934
1935 if (!(*(uvhub_mask + (uvhub/8)) & (1 << (uvhub%8))))
1936 continue;
1937
1938 bdp = &uvhub_descs[uvhub];
1939 socket_mask = bdp->socket_mask;
1940 socket = 0;
1941 while (socket_mask) {
1942 struct socket_desc *sdp;
1943 if ((socket_mask & 1)) {
1944 sdp = &bdp->socket[socket];
1945 if (scan_sock(sdp, bdp, &smaster, &hmaster))
1946 return 1;
1947 make_per_cpu_thp(smaster);
1948 }
1949 socket++;
1950 socket_mask = (socket_mask >> 1);
1951 }
1952 make_per_hub_cpumask(hmaster);
1953 }
1954 return 0;
1955}
1956
1957
1958
1959
1960static int __init init_per_cpu(int nuvhubs, int base_part_pnode)
1961{
1962 struct uvhub_desc *uvhub_descs;
1963 unsigned char *uvhub_mask = NULL;
1964
1965 if (is_uv3_hub() || is_uv2_hub())
1966 timeout_us = calculate_destination_timeout();
1967
1968 uvhub_descs = kcalloc(nuvhubs, sizeof(struct uvhub_desc), GFP_KERNEL);
1969 if (!uvhub_descs)
1970 goto fail;
1971
1972 uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);
1973 if (!uvhub_mask)
1974 goto fail;
1975
1976 if (get_cpu_topology(base_part_pnode, uvhub_descs, uvhub_mask))
1977 goto fail;
1978
1979 if (summarize_uvhub_sockets(nuvhubs, uvhub_descs, uvhub_mask))
1980 goto fail;
1981
1982 kfree(uvhub_descs);
1983 kfree(uvhub_mask);
1984 init_per_cpu_tunables();
1985 return 0;
1986
1987fail:
1988 kfree(uvhub_descs);
1989 kfree(uvhub_mask);
1990 return 1;
1991}
1992
1993static const struct bau_operations uv2_3_bau_ops __initconst = {
1994 .bau_gpa_to_offset = uv_gpa_to_offset,
1995 .read_l_sw_ack = read_mmr_sw_ack,
1996 .read_g_sw_ack = read_gmmr_sw_ack,
1997 .write_l_sw_ack = write_mmr_sw_ack,
1998 .write_g_sw_ack = write_gmmr_sw_ack,
1999 .write_payload_first = write_mmr_payload_first,
2000 .write_payload_last = write_mmr_payload_last,
2001 .wait_completion = uv2_3_wait_completion,
2002};
2003
2004static const struct bau_operations uv4_bau_ops __initconst = {
2005 .bau_gpa_to_offset = uv_gpa_to_soc_phys_ram,
2006 .read_l_sw_ack = read_mmr_proc_sw_ack,
2007 .read_g_sw_ack = read_gmmr_proc_sw_ack,
2008 .write_l_sw_ack = write_mmr_proc_sw_ack,
2009 .write_g_sw_ack = write_gmmr_proc_sw_ack,
2010 .write_payload_first = write_mmr_proc_payload_first,
2011 .write_payload_last = write_mmr_proc_payload_last,
2012 .wait_completion = uv4_wait_completion,
2013};
2014
2015
2016
2017
2018static int __init uv_bau_init(void)
2019{
2020 int uvhub;
2021 int pnode;
2022 int nuvhubs;
2023 int cur_cpu;
2024 int cpus;
2025 int vector;
2026 cpumask_var_t *mask;
2027
2028 if (!is_uv_system())
2029 return 0;
2030
2031 if (is_uv4_hub())
2032 ops = uv4_bau_ops;
2033 else if (is_uv3_hub())
2034 ops = uv2_3_bau_ops;
2035 else if (is_uv2_hub())
2036 ops = uv2_3_bau_ops;
2037
2038 nuvhubs = uv_num_possible_blades();
2039 if (nuvhubs < 2) {
2040 pr_crit("UV: BAU disabled - insufficient hub count\n");
2041 goto err_bau_disable;
2042 }
2043
2044 for_each_possible_cpu(cur_cpu) {
2045 mask = &per_cpu(uv_flush_tlb_mask, cur_cpu);
2046 zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cur_cpu));
2047 }
2048
2049 uv_base_pnode = 0x7fffffff;
2050 for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
2051 cpus = uv_blade_nr_possible_cpus(uvhub);
2052 if (cpus && (uv_blade_to_pnode(uvhub) < uv_base_pnode))
2053 uv_base_pnode = uv_blade_to_pnode(uvhub);
2054 }
2055
2056
2057 if (is_uv3_hub() || is_uv2_hub())
2058 enable_timeouts();
2059
2060 if (init_per_cpu(nuvhubs, uv_base_pnode)) {
2061 pr_crit("UV: BAU disabled - per CPU init failed\n");
2062 goto err_bau_disable;
2063 }
2064
2065 vector = UV_BAU_MESSAGE;
2066 for_each_possible_blade(uvhub) {
2067 if (uv_blade_nr_possible_cpus(uvhub))
2068 init_uvhub(uvhub, vector, uv_base_pnode);
2069 }
2070
2071 for_each_possible_blade(uvhub) {
2072 if (uv_blade_nr_possible_cpus(uvhub)) {
2073 unsigned long val;
2074 unsigned long mmr;
2075 pnode = uv_blade_to_pnode(uvhub);
2076
2077 val = 1L << 63;
2078 write_gmmr_activation(pnode, val);
2079 mmr = 1;
2080 write_mmr_data_broadcast(pnode, mmr);
2081 }
2082 }
2083
2084 return 0;
2085
2086err_bau_disable:
2087
2088 for_each_possible_cpu(cur_cpu)
2089 free_cpumask_var(per_cpu(uv_flush_tlb_mask, cur_cpu));
2090
2091 set_bau_off();
2092 nobau_perm = 1;
2093
2094 return -EINVAL;
2095}
2096core_initcall(uv_bau_init);
2097fs_initcall(uv_ptc_init);
2098