1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/acpi.h>
19#include <linux/acpi_iort.h>
20#include <linux/bitmap.h>
21#include <linux/cpu.h>
22#include <linux/delay.h>
23#include <linux/dma-iommu.h>
24#include <linux/interrupt.h>
25#include <linux/irqdomain.h>
26#include <linux/log2.h>
27#include <linux/mm.h>
28#include <linux/msi.h>
29#include <linux/of.h>
30#include <linux/of_address.h>
31#include <linux/of_irq.h>
32#include <linux/of_pci.h>
33#include <linux/of_platform.h>
34#include <linux/percpu.h>
35#include <linux/slab.h>
36
37#include <linux/irqchip.h>
38#include <linux/irqchip/arm-gic-v3.h>
39#include <linux/irqchip/arm-gic-v4.h>
40
41#include <asm/cputype.h>
42#include <asm/exception.h>
43
44#include "irq-gic-common.h"
45
46#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
47#define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
48#define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
49
50#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
51
52static u32 lpi_id_bits;
53
54
55
56
57
58
59#define LPI_NRBITS lpi_id_bits
60#define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K)
61#define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
62
63#define LPI_PROP_DEFAULT_PRIO 0xa0
64
65
66
67
68
69
70struct its_collection {
71 u64 target_address;
72 u16 col_id;
73};
74
75
76
77
78
79struct its_baser {
80 void *base;
81 u64 val;
82 u32 order;
83 u32 psz;
84};
85
86struct its_device;
87
88
89
90
91
92
93struct its_node {
94 raw_spinlock_t lock;
95 struct list_head entry;
96 void __iomem *base;
97 phys_addr_t phys_base;
98 struct its_cmd_block *cmd_base;
99 struct its_cmd_block *cmd_write;
100 struct its_baser tables[GITS_BASER_NR_REGS];
101 struct its_collection *collections;
102 struct fwnode_handle *fwnode_handle;
103 u64 (*get_msi_base)(struct its_device *its_dev);
104 struct list_head its_device_list;
105 u64 flags;
106 unsigned long list_nr;
107 u32 ite_size;
108 u32 device_ids;
109 int numa_node;
110 unsigned int msi_domain_flags;
111 u32 pre_its_base;
112 bool is_v4;
113 int vlpi_redist_offset;
114};
115
116#define ITS_ITT_ALIGN SZ_256
117
118
119#define ITS_MAX_VPEID_BITS (16)
120#define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS))
121
122
123#define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
124
125struct event_lpi_map {
126 unsigned long *lpi_map;
127 u16 *col_map;
128 irq_hw_number_t lpi_base;
129 int nr_lpis;
130 struct mutex vlpi_lock;
131 struct its_vm *vm;
132 struct its_vlpi_map *vlpi_maps;
133 int nr_vlpis;
134};
135
136
137
138
139
140
141
142struct its_device {
143 struct list_head entry;
144 struct its_node *its;
145 struct event_lpi_map event_map;
146 void *itt;
147 u32 nr_ites;
148 u32 device_id;
149};
150
151static struct {
152 raw_spinlock_t lock;
153 struct its_device *dev;
154 struct its_vpe **vpes;
155 int next_victim;
156} vpe_proxy;
157
158static LIST_HEAD(its_nodes);
159static DEFINE_SPINLOCK(its_lock);
160static struct rdists *gic_rdists;
161static struct irq_domain *its_parent;
162
163static unsigned long its_list_map;
164static u16 vmovp_seq_num;
165static DEFINE_RAW_SPINLOCK(vmovp_lock);
166
167static DEFINE_IDA(its_vpeid_ida);
168
169#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
170#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
171#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
172
173static struct its_collection *dev_event_to_col(struct its_device *its_dev,
174 u32 event)
175{
176 struct its_node *its = its_dev->its;
177
178 return its->collections + its_dev->event_map.col_map[event];
179}
180
181
182
183
184
185struct its_cmd_desc {
186 union {
187 struct {
188 struct its_device *dev;
189 u32 event_id;
190 } its_inv_cmd;
191
192 struct {
193 struct its_device *dev;
194 u32 event_id;
195 } its_clear_cmd;
196
197 struct {
198 struct its_device *dev;
199 u32 event_id;
200 } its_int_cmd;
201
202 struct {
203 struct its_device *dev;
204 int valid;
205 } its_mapd_cmd;
206
207 struct {
208 struct its_collection *col;
209 int valid;
210 } its_mapc_cmd;
211
212 struct {
213 struct its_device *dev;
214 u32 phys_id;
215 u32 event_id;
216 } its_mapti_cmd;
217
218 struct {
219 struct its_device *dev;
220 struct its_collection *col;
221 u32 event_id;
222 } its_movi_cmd;
223
224 struct {
225 struct its_device *dev;
226 u32 event_id;
227 } its_discard_cmd;
228
229 struct {
230 struct its_collection *col;
231 } its_invall_cmd;
232
233 struct {
234 struct its_vpe *vpe;
235 } its_vinvall_cmd;
236
237 struct {
238 struct its_vpe *vpe;
239 struct its_collection *col;
240 bool valid;
241 } its_vmapp_cmd;
242
243 struct {
244 struct its_vpe *vpe;
245 struct its_device *dev;
246 u32 virt_id;
247 u32 event_id;
248 bool db_enabled;
249 } its_vmapti_cmd;
250
251 struct {
252 struct its_vpe *vpe;
253 struct its_device *dev;
254 u32 event_id;
255 bool db_enabled;
256 } its_vmovi_cmd;
257
258 struct {
259 struct its_vpe *vpe;
260 struct its_collection *col;
261 u16 seq_num;
262 u16 its_list;
263 } its_vmovp_cmd;
264 };
265};
266
267
268
269
270struct its_cmd_block {
271 u64 raw_cmd[4];
272};
273
274#define ITS_CMD_QUEUE_SZ SZ_64K
275#define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
276
277typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *,
278 struct its_cmd_block *,
279 struct its_cmd_desc *);
280
281typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *,
282 struct its_cmd_block *,
283 struct its_cmd_desc *);
284
285static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
286{
287 u64 mask = GENMASK_ULL(h, l);
288 *raw_cmd &= ~mask;
289 *raw_cmd |= (val << l) & mask;
290}
291
292static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
293{
294 its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0);
295}
296
297static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
298{
299 its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32);
300}
301
302static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
303{
304 its_mask_encode(&cmd->raw_cmd[1], id, 31, 0);
305}
306
307static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
308{
309 its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32);
310}
311
312static void its_encode_size(struct its_cmd_block *cmd, u8 size)
313{
314 its_mask_encode(&cmd->raw_cmd[1], size, 4, 0);
315}
316
317static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
318{
319 its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8);
320}
321
322static void its_encode_valid(struct its_cmd_block *cmd, int valid)
323{
324 its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63);
325}
326
327static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
328{
329 its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16);
330}
331
332static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
333{
334 its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
335}
336
337static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid)
338{
339 its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32);
340}
341
342static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id)
343{
344 its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0);
345}
346
347static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id)
348{
349 its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32);
350}
351
352static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid)
353{
354 its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0);
355}
356
357static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num)
358{
359 its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32);
360}
361
362static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
363{
364 its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0);
365}
366
367static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
368{
369 its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16);
370}
371
372static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
373{
374 its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0);
375}
376
377static inline void its_fixup_cmd(struct its_cmd_block *cmd)
378{
379
380 cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]);
381 cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]);
382 cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]);
383 cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]);
384}
385
386static struct its_collection *its_build_mapd_cmd(struct its_node *its,
387 struct its_cmd_block *cmd,
388 struct its_cmd_desc *desc)
389{
390 unsigned long itt_addr;
391 u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
392
393 itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
394 itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN);
395
396 its_encode_cmd(cmd, GITS_CMD_MAPD);
397 its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
398 its_encode_size(cmd, size - 1);
399 its_encode_itt(cmd, itt_addr);
400 its_encode_valid(cmd, desc->its_mapd_cmd.valid);
401
402 its_fixup_cmd(cmd);
403
404 return NULL;
405}
406
407static struct its_collection *its_build_mapc_cmd(struct its_node *its,
408 struct its_cmd_block *cmd,
409 struct its_cmd_desc *desc)
410{
411 its_encode_cmd(cmd, GITS_CMD_MAPC);
412 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
413 its_encode_target(cmd, desc->its_mapc_cmd.col->target_address);
414 its_encode_valid(cmd, desc->its_mapc_cmd.valid);
415
416 its_fixup_cmd(cmd);
417
418 return desc->its_mapc_cmd.col;
419}
420
421static struct its_collection *its_build_mapti_cmd(struct its_node *its,
422 struct its_cmd_block *cmd,
423 struct its_cmd_desc *desc)
424{
425 struct its_collection *col;
426
427 col = dev_event_to_col(desc->its_mapti_cmd.dev,
428 desc->its_mapti_cmd.event_id);
429
430 its_encode_cmd(cmd, GITS_CMD_MAPTI);
431 its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id);
432 its_encode_event_id(cmd, desc->its_mapti_cmd.event_id);
433 its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id);
434 its_encode_collection(cmd, col->col_id);
435
436 its_fixup_cmd(cmd);
437
438 return col;
439}
440
441static struct its_collection *its_build_movi_cmd(struct its_node *its,
442 struct its_cmd_block *cmd,
443 struct its_cmd_desc *desc)
444{
445 struct its_collection *col;
446
447 col = dev_event_to_col(desc->its_movi_cmd.dev,
448 desc->its_movi_cmd.event_id);
449
450 its_encode_cmd(cmd, GITS_CMD_MOVI);
451 its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
452 its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
453 its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
454
455 its_fixup_cmd(cmd);
456
457 return col;
458}
459
460static struct its_collection *its_build_discard_cmd(struct its_node *its,
461 struct its_cmd_block *cmd,
462 struct its_cmd_desc *desc)
463{
464 struct its_collection *col;
465
466 col = dev_event_to_col(desc->its_discard_cmd.dev,
467 desc->its_discard_cmd.event_id);
468
469 its_encode_cmd(cmd, GITS_CMD_DISCARD);
470 its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
471 its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
472
473 its_fixup_cmd(cmd);
474
475 return col;
476}
477
478static struct its_collection *its_build_inv_cmd(struct its_node *its,
479 struct its_cmd_block *cmd,
480 struct its_cmd_desc *desc)
481{
482 struct its_collection *col;
483
484 col = dev_event_to_col(desc->its_inv_cmd.dev,
485 desc->its_inv_cmd.event_id);
486
487 its_encode_cmd(cmd, GITS_CMD_INV);
488 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
489 its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
490
491 its_fixup_cmd(cmd);
492
493 return col;
494}
495
496static struct its_collection *its_build_int_cmd(struct its_node *its,
497 struct its_cmd_block *cmd,
498 struct its_cmd_desc *desc)
499{
500 struct its_collection *col;
501
502 col = dev_event_to_col(desc->its_int_cmd.dev,
503 desc->its_int_cmd.event_id);
504
505 its_encode_cmd(cmd, GITS_CMD_INT);
506 its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
507 its_encode_event_id(cmd, desc->its_int_cmd.event_id);
508
509 its_fixup_cmd(cmd);
510
511 return col;
512}
513
514static struct its_collection *its_build_clear_cmd(struct its_node *its,
515 struct its_cmd_block *cmd,
516 struct its_cmd_desc *desc)
517{
518 struct its_collection *col;
519
520 col = dev_event_to_col(desc->its_clear_cmd.dev,
521 desc->its_clear_cmd.event_id);
522
523 its_encode_cmd(cmd, GITS_CMD_CLEAR);
524 its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
525 its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
526
527 its_fixup_cmd(cmd);
528
529 return col;
530}
531
532static struct its_collection *its_build_invall_cmd(struct its_node *its,
533 struct its_cmd_block *cmd,
534 struct its_cmd_desc *desc)
535{
536 its_encode_cmd(cmd, GITS_CMD_INVALL);
537 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
538
539 its_fixup_cmd(cmd);
540
541 return NULL;
542}
543
544static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
545 struct its_cmd_block *cmd,
546 struct its_cmd_desc *desc)
547{
548 its_encode_cmd(cmd, GITS_CMD_VINVALL);
549 its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id);
550
551 its_fixup_cmd(cmd);
552
553 return desc->its_vinvall_cmd.vpe;
554}
555
556static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
557 struct its_cmd_block *cmd,
558 struct its_cmd_desc *desc)
559{
560 unsigned long vpt_addr;
561 u64 target;
562
563 vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
564 target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
565
566 its_encode_cmd(cmd, GITS_CMD_VMAPP);
567 its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
568 its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
569 its_encode_target(cmd, target);
570 its_encode_vpt_addr(cmd, vpt_addr);
571 its_encode_vpt_size(cmd, LPI_NRBITS - 1);
572
573 its_fixup_cmd(cmd);
574
575 return desc->its_vmapp_cmd.vpe;
576}
577
578static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
579 struct its_cmd_block *cmd,
580 struct its_cmd_desc *desc)
581{
582 u32 db;
583
584 if (desc->its_vmapti_cmd.db_enabled)
585 db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
586 else
587 db = 1023;
588
589 its_encode_cmd(cmd, GITS_CMD_VMAPTI);
590 its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id);
591 its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id);
592 its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id);
593 its_encode_db_phys_id(cmd, db);
594 its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id);
595
596 its_fixup_cmd(cmd);
597
598 return desc->its_vmapti_cmd.vpe;
599}
600
601static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
602 struct its_cmd_block *cmd,
603 struct its_cmd_desc *desc)
604{
605 u32 db;
606
607 if (desc->its_vmovi_cmd.db_enabled)
608 db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
609 else
610 db = 1023;
611
612 its_encode_cmd(cmd, GITS_CMD_VMOVI);
613 its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id);
614 its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id);
615 its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id);
616 its_encode_db_phys_id(cmd, db);
617 its_encode_db_valid(cmd, true);
618
619 its_fixup_cmd(cmd);
620
621 return desc->its_vmovi_cmd.vpe;
622}
623
624static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
625 struct its_cmd_block *cmd,
626 struct its_cmd_desc *desc)
627{
628 u64 target;
629
630 target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset;
631 its_encode_cmd(cmd, GITS_CMD_VMOVP);
632 its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num);
633 its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list);
634 its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
635 its_encode_target(cmd, target);
636
637 its_fixup_cmd(cmd);
638
639 return desc->its_vmovp_cmd.vpe;
640}
641
642static u64 its_cmd_ptr_to_offset(struct its_node *its,
643 struct its_cmd_block *ptr)
644{
645 return (ptr - its->cmd_base) * sizeof(*ptr);
646}
647
648static int its_queue_full(struct its_node *its)
649{
650 int widx;
651 int ridx;
652
653 widx = its->cmd_write - its->cmd_base;
654 ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
655
656
657 if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx)
658 return 1;
659
660 return 0;
661}
662
663static struct its_cmd_block *its_allocate_entry(struct its_node *its)
664{
665 struct its_cmd_block *cmd;
666 u32 count = 1000000;
667
668 while (its_queue_full(its)) {
669 count--;
670 if (!count) {
671 pr_err_ratelimited("ITS queue not draining\n");
672 return NULL;
673 }
674 cpu_relax();
675 udelay(1);
676 }
677
678 cmd = its->cmd_write++;
679
680
681 if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
682 its->cmd_write = its->cmd_base;
683
684
685 cmd->raw_cmd[0] = 0;
686 cmd->raw_cmd[1] = 0;
687 cmd->raw_cmd[2] = 0;
688 cmd->raw_cmd[3] = 0;
689
690 return cmd;
691}
692
693static struct its_cmd_block *its_post_commands(struct its_node *its)
694{
695 u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
696
697 writel_relaxed(wr, its->base + GITS_CWRITER);
698
699 return its->cmd_write;
700}
701
702static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
703{
704
705
706
707
708 if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
709 gic_flush_dcache_to_poc(cmd, sizeof(*cmd));
710 else
711 dsb(ishst);
712}
713
714static int its_wait_for_range_completion(struct its_node *its,
715 struct its_cmd_block *from,
716 struct its_cmd_block *to)
717{
718 u64 rd_idx, from_idx, to_idx;
719 u32 count = 1000000;
720
721 from_idx = its_cmd_ptr_to_offset(its, from);
722 to_idx = its_cmd_ptr_to_offset(its, to);
723
724 while (1) {
725 rd_idx = readl_relaxed(its->base + GITS_CREADR);
726
727
728 if (from_idx < to_idx && rd_idx >= to_idx)
729 break;
730
731
732 if (from_idx >= to_idx && rd_idx >= to_idx && rd_idx < from_idx)
733 break;
734
735 count--;
736 if (!count) {
737 pr_err_ratelimited("ITS queue timeout (%llu %llu %llu)\n",
738 from_idx, to_idx, rd_idx);
739 return -1;
740 }
741 cpu_relax();
742 udelay(1);
743 }
744
745 return 0;
746}
747
748
749#define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \
750void name(struct its_node *its, \
751 buildtype builder, \
752 struct its_cmd_desc *desc) \
753{ \
754 struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \
755 synctype *sync_obj; \
756 unsigned long flags; \
757 \
758 raw_spin_lock_irqsave(&its->lock, flags); \
759 \
760 cmd = its_allocate_entry(its); \
761 if (!cmd) { \
762 raw_spin_unlock_irqrestore(&its->lock, flags); \
763 return; \
764 } \
765 sync_obj = builder(its, cmd, desc); \
766 its_flush_cmd(its, cmd); \
767 \
768 if (sync_obj) { \
769 sync_cmd = its_allocate_entry(its); \
770 if (!sync_cmd) \
771 goto post; \
772 \
773 buildfn(its, sync_cmd, sync_obj); \
774 its_flush_cmd(its, sync_cmd); \
775 } \
776 \
777post: \
778 next_cmd = its_post_commands(its); \
779 raw_spin_unlock_irqrestore(&its->lock, flags); \
780 \
781 if (its_wait_for_range_completion(its, cmd, next_cmd)) \
782 pr_err_ratelimited("ITS cmd %ps failed\n", builder); \
783}
784
785static void its_build_sync_cmd(struct its_node *its,
786 struct its_cmd_block *sync_cmd,
787 struct its_collection *sync_col)
788{
789 its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
790 its_encode_target(sync_cmd, sync_col->target_address);
791
792 its_fixup_cmd(sync_cmd);
793}
794
795static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
796 struct its_collection, its_build_sync_cmd)
797
798static void its_build_vsync_cmd(struct its_node *its,
799 struct its_cmd_block *sync_cmd,
800 struct its_vpe *sync_vpe)
801{
802 its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
803 its_encode_vpeid(sync_cmd, sync_vpe->vpe_id);
804
805 its_fixup_cmd(sync_cmd);
806}
807
808static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t,
809 struct its_vpe, its_build_vsync_cmd)
810
811static void its_send_int(struct its_device *dev, u32 event_id)
812{
813 struct its_cmd_desc desc;
814
815 desc.its_int_cmd.dev = dev;
816 desc.its_int_cmd.event_id = event_id;
817
818 its_send_single_command(dev->its, its_build_int_cmd, &desc);
819}
820
821static void its_send_clear(struct its_device *dev, u32 event_id)
822{
823 struct its_cmd_desc desc;
824
825 desc.its_clear_cmd.dev = dev;
826 desc.its_clear_cmd.event_id = event_id;
827
828 its_send_single_command(dev->its, its_build_clear_cmd, &desc);
829}
830
831static void its_send_inv(struct its_device *dev, u32 event_id)
832{
833 struct its_cmd_desc desc;
834
835 desc.its_inv_cmd.dev = dev;
836 desc.its_inv_cmd.event_id = event_id;
837
838 its_send_single_command(dev->its, its_build_inv_cmd, &desc);
839}
840
841static void its_send_mapd(struct its_device *dev, int valid)
842{
843 struct its_cmd_desc desc;
844
845 desc.its_mapd_cmd.dev = dev;
846 desc.its_mapd_cmd.valid = !!valid;
847
848 its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
849}
850
851static void its_send_mapc(struct its_node *its, struct its_collection *col,
852 int valid)
853{
854 struct its_cmd_desc desc;
855
856 desc.its_mapc_cmd.col = col;
857 desc.its_mapc_cmd.valid = !!valid;
858
859 its_send_single_command(its, its_build_mapc_cmd, &desc);
860}
861
862static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id)
863{
864 struct its_cmd_desc desc;
865
866 desc.its_mapti_cmd.dev = dev;
867 desc.its_mapti_cmd.phys_id = irq_id;
868 desc.its_mapti_cmd.event_id = id;
869
870 its_send_single_command(dev->its, its_build_mapti_cmd, &desc);
871}
872
873static void its_send_movi(struct its_device *dev,
874 struct its_collection *col, u32 id)
875{
876 struct its_cmd_desc desc;
877
878 desc.its_movi_cmd.dev = dev;
879 desc.its_movi_cmd.col = col;
880 desc.its_movi_cmd.event_id = id;
881
882 its_send_single_command(dev->its, its_build_movi_cmd, &desc);
883}
884
885static void its_send_discard(struct its_device *dev, u32 id)
886{
887 struct its_cmd_desc desc;
888
889 desc.its_discard_cmd.dev = dev;
890 desc.its_discard_cmd.event_id = id;
891
892 its_send_single_command(dev->its, its_build_discard_cmd, &desc);
893}
894
895static void its_send_invall(struct its_node *its, struct its_collection *col)
896{
897 struct its_cmd_desc desc;
898
899 desc.its_invall_cmd.col = col;
900
901 its_send_single_command(its, its_build_invall_cmd, &desc);
902}
903
904static void its_send_vmapti(struct its_device *dev, u32 id)
905{
906 struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
907 struct its_cmd_desc desc;
908
909 desc.its_vmapti_cmd.vpe = map->vpe;
910 desc.its_vmapti_cmd.dev = dev;
911 desc.its_vmapti_cmd.virt_id = map->vintid;
912 desc.its_vmapti_cmd.event_id = id;
913 desc.its_vmapti_cmd.db_enabled = map->db_enabled;
914
915 its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc);
916}
917
918static void its_send_vmovi(struct its_device *dev, u32 id)
919{
920 struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
921 struct its_cmd_desc desc;
922
923 desc.its_vmovi_cmd.vpe = map->vpe;
924 desc.its_vmovi_cmd.dev = dev;
925 desc.its_vmovi_cmd.event_id = id;
926 desc.its_vmovi_cmd.db_enabled = map->db_enabled;
927
928 its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
929}
930
931static void its_send_vmapp(struct its_node *its,
932 struct its_vpe *vpe, bool valid)
933{
934 struct its_cmd_desc desc;
935
936 desc.its_vmapp_cmd.vpe = vpe;
937 desc.its_vmapp_cmd.valid = valid;
938 desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
939
940 its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
941}
942
943static void its_send_vmovp(struct its_vpe *vpe)
944{
945 struct its_cmd_desc desc;
946 struct its_node *its;
947 unsigned long flags;
948 int col_id = vpe->col_idx;
949
950 desc.its_vmovp_cmd.vpe = vpe;
951 desc.its_vmovp_cmd.its_list = (u16)its_list_map;
952
953 if (!its_list_map) {
954 its = list_first_entry(&its_nodes, struct its_node, entry);
955 desc.its_vmovp_cmd.seq_num = 0;
956 desc.its_vmovp_cmd.col = &its->collections[col_id];
957 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
958 return;
959 }
960
961
962
963
964
965
966
967
968
969 raw_spin_lock_irqsave(&vmovp_lock, flags);
970
971 desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
972
973
974 list_for_each_entry(its, &its_nodes, entry) {
975 if (!its->is_v4)
976 continue;
977
978 if (!vpe->its_vm->vlpi_count[its->list_nr])
979 continue;
980
981 desc.its_vmovp_cmd.col = &its->collections[col_id];
982 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
983 }
984
985 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
986}
987
988static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
989{
990 struct its_cmd_desc desc;
991
992 desc.its_vinvall_cmd.vpe = vpe;
993 its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
994}
995
996
997
998
999
1000static inline u32 its_get_event_id(struct irq_data *d)
1001{
1002 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1003 return d->hwirq - its_dev->event_map.lpi_base;
1004}
1005
1006static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
1007{
1008 irq_hw_number_t hwirq;
1009 struct page *prop_page;
1010 u8 *cfg;
1011
1012 if (irqd_is_forwarded_to_vcpu(d)) {
1013 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1014 u32 event = its_get_event_id(d);
1015 struct its_vlpi_map *map;
1016
1017 prop_page = its_dev->event_map.vm->vprop_page;
1018 map = &its_dev->event_map.vlpi_maps[event];
1019 hwirq = map->vintid;
1020
1021
1022 map->properties &= ~clr;
1023 map->properties |= set | LPI_PROP_GROUP1;
1024 } else {
1025 prop_page = gic_rdists->prop_page;
1026 hwirq = d->hwirq;
1027 }
1028
1029 cfg = page_address(prop_page) + hwirq - 8192;
1030 *cfg &= ~clr;
1031 *cfg |= set | LPI_PROP_GROUP1;
1032
1033
1034
1035
1036
1037
1038 if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
1039 gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
1040 else
1041 dsb(ishst);
1042}
1043
1044static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
1045{
1046 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1047
1048 lpi_write_config(d, clr, set);
1049 its_send_inv(its_dev, its_get_event_id(d));
1050}
1051
1052static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
1053{
1054 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1055 u32 event = its_get_event_id(d);
1056
1057 if (its_dev->event_map.vlpi_maps[event].db_enabled == enable)
1058 return;
1059
1060 its_dev->event_map.vlpi_maps[event].db_enabled = enable;
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072 its_send_vmovi(its_dev, event);
1073}
1074
1075static void its_mask_irq(struct irq_data *d)
1076{
1077 if (irqd_is_forwarded_to_vcpu(d))
1078 its_vlpi_set_doorbell(d, false);
1079
1080 lpi_update_config(d, LPI_PROP_ENABLED, 0);
1081}
1082
1083static void its_unmask_irq(struct irq_data *d)
1084{
1085 if (irqd_is_forwarded_to_vcpu(d))
1086 its_vlpi_set_doorbell(d, true);
1087
1088 lpi_update_config(d, 0, LPI_PROP_ENABLED);
1089}
1090
1091static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1092 bool force)
1093{
1094 unsigned int cpu;
1095 const struct cpumask *cpu_mask = cpu_online_mask;
1096 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1097 struct its_collection *target_col;
1098 u32 id = its_get_event_id(d);
1099
1100
1101 if (irqd_is_forwarded_to_vcpu(d))
1102 return -EINVAL;
1103
1104
1105 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
1106 if (its_dev->its->numa_node >= 0) {
1107 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
1108 if (!cpumask_intersects(mask_val, cpu_mask))
1109 return -EINVAL;
1110 }
1111 }
1112
1113 cpu = cpumask_any_and(mask_val, cpu_mask);
1114
1115 if (cpu >= nr_cpu_ids)
1116 return -EINVAL;
1117
1118
1119 if (cpu != its_dev->event_map.col_map[id]) {
1120 target_col = &its_dev->its->collections[cpu];
1121 its_send_movi(its_dev, target_col, id);
1122 its_dev->event_map.col_map[id] = cpu;
1123 irq_data_update_effective_affinity(d, cpumask_of(cpu));
1124 }
1125
1126 return IRQ_SET_MASK_OK_DONE;
1127}
1128
1129static u64 its_irq_get_msi_base(struct its_device *its_dev)
1130{
1131 struct its_node *its = its_dev->its;
1132
1133 return its->phys_base + GITS_TRANSLATER;
1134}
1135
1136static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
1137{
1138 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1139 struct its_node *its;
1140 u64 addr;
1141
1142 its = its_dev->its;
1143 addr = its->get_msi_base(its_dev);
1144
1145 msg->address_lo = lower_32_bits(addr);
1146 msg->address_hi = upper_32_bits(addr);
1147 msg->data = its_get_event_id(d);
1148
1149 iommu_dma_map_msi_msg(d->irq, msg);
1150}
1151
1152static int its_irq_set_irqchip_state(struct irq_data *d,
1153 enum irqchip_irq_state which,
1154 bool state)
1155{
1156 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1157 u32 event = its_get_event_id(d);
1158
1159 if (which != IRQCHIP_STATE_PENDING)
1160 return -EINVAL;
1161
1162 if (state)
1163 its_send_int(its_dev, event);
1164 else
1165 its_send_clear(its_dev, event);
1166
1167 return 0;
1168}
1169
1170static void its_map_vm(struct its_node *its, struct its_vm *vm)
1171{
1172 unsigned long flags;
1173
1174
1175 if (!its_list_map)
1176 return;
1177
1178 raw_spin_lock_irqsave(&vmovp_lock, flags);
1179
1180
1181
1182
1183
1184 vm->vlpi_count[its->list_nr]++;
1185
1186 if (vm->vlpi_count[its->list_nr] == 1) {
1187 int i;
1188
1189 for (i = 0; i < vm->nr_vpes; i++) {
1190 struct its_vpe *vpe = vm->vpes[i];
1191 struct irq_data *d = irq_get_irq_data(vpe->irq);
1192
1193
1194 vpe->col_idx = cpumask_first(cpu_online_mask);
1195 its_send_vmapp(its, vpe, true);
1196 its_send_vinvall(its, vpe);
1197 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
1198 }
1199 }
1200
1201 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1202}
1203
1204static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
1205{
1206 unsigned long flags;
1207
1208
1209 if (!its_list_map)
1210 return;
1211
1212 raw_spin_lock_irqsave(&vmovp_lock, flags);
1213
1214 if (!--vm->vlpi_count[its->list_nr]) {
1215 int i;
1216
1217 for (i = 0; i < vm->nr_vpes; i++)
1218 its_send_vmapp(its, vm->vpes[i], false);
1219 }
1220
1221 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1222}
1223
1224static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
1225{
1226 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1227 u32 event = its_get_event_id(d);
1228 int ret = 0;
1229
1230 if (!info->map)
1231 return -EINVAL;
1232
1233 mutex_lock(&its_dev->event_map.vlpi_lock);
1234
1235 if (!its_dev->event_map.vm) {
1236 struct its_vlpi_map *maps;
1237
1238 maps = kzalloc(sizeof(*maps) * its_dev->event_map.nr_lpis,
1239 GFP_KERNEL);
1240 if (!maps) {
1241 ret = -ENOMEM;
1242 goto out;
1243 }
1244
1245 its_dev->event_map.vm = info->map->vm;
1246 its_dev->event_map.vlpi_maps = maps;
1247 } else if (its_dev->event_map.vm != info->map->vm) {
1248 ret = -EINVAL;
1249 goto out;
1250 }
1251
1252
1253 its_dev->event_map.vlpi_maps[event] = *info->map;
1254
1255 if (irqd_is_forwarded_to_vcpu(d)) {
1256
1257 its_send_vmovi(its_dev, event);
1258 } else {
1259
1260 its_map_vm(its_dev->its, info->map->vm);
1261
1262
1263
1264
1265
1266 irqd_set_forwarded_to_vcpu(d);
1267
1268
1269 lpi_write_config(d, 0xff, info->map->properties);
1270
1271
1272 its_send_discard(its_dev, event);
1273
1274
1275 its_send_vmapti(its_dev, event);
1276
1277
1278 its_dev->event_map.nr_vlpis++;
1279 }
1280
1281out:
1282 mutex_unlock(&its_dev->event_map.vlpi_lock);
1283 return ret;
1284}
1285
1286static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
1287{
1288 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1289 u32 event = its_get_event_id(d);
1290 int ret = 0;
1291
1292 mutex_lock(&its_dev->event_map.vlpi_lock);
1293
1294 if (!its_dev->event_map.vm ||
1295 !its_dev->event_map.vlpi_maps[event].vm) {
1296 ret = -EINVAL;
1297 goto out;
1298 }
1299
1300
1301 *info->map = its_dev->event_map.vlpi_maps[event];
1302
1303out:
1304 mutex_unlock(&its_dev->event_map.vlpi_lock);
1305 return ret;
1306}
1307
1308static int its_vlpi_unmap(struct irq_data *d)
1309{
1310 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1311 u32 event = its_get_event_id(d);
1312 int ret = 0;
1313
1314 mutex_lock(&its_dev->event_map.vlpi_lock);
1315
1316 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
1317 ret = -EINVAL;
1318 goto out;
1319 }
1320
1321
1322 its_send_discard(its_dev, event);
1323
1324
1325 irqd_clr_forwarded_to_vcpu(d);
1326 its_send_mapti(its_dev, d->hwirq, event);
1327 lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO |
1328 LPI_PROP_ENABLED |
1329 LPI_PROP_GROUP1));
1330
1331
1332 its_unmap_vm(its_dev->its, its_dev->event_map.vm);
1333
1334
1335
1336
1337
1338 if (!--its_dev->event_map.nr_vlpis) {
1339 its_dev->event_map.vm = NULL;
1340 kfree(its_dev->event_map.vlpi_maps);
1341 }
1342
1343out:
1344 mutex_unlock(&its_dev->event_map.vlpi_lock);
1345 return ret;
1346}
1347
1348static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
1349{
1350 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1351
1352 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
1353 return -EINVAL;
1354
1355 if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI)
1356 lpi_update_config(d, 0xff, info->config);
1357 else
1358 lpi_write_config(d, 0xff, info->config);
1359 its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED));
1360
1361 return 0;
1362}
1363
1364static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
1365{
1366 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1367 struct its_cmd_info *info = vcpu_info;
1368
1369
1370 if (!its_dev->its->is_v4)
1371 return -EINVAL;
1372
1373
1374 if (!info)
1375 return its_vlpi_unmap(d);
1376
1377 switch (info->cmd_type) {
1378 case MAP_VLPI:
1379 return its_vlpi_map(d, info);
1380
1381 case GET_VLPI:
1382 return its_vlpi_get(d, info);
1383
1384 case PROP_UPDATE_VLPI:
1385 case PROP_UPDATE_AND_INV_VLPI:
1386 return its_vlpi_prop_update(d, info);
1387
1388 default:
1389 return -EINVAL;
1390 }
1391}
1392
1393static struct irq_chip its_irq_chip = {
1394 .name = "ITS",
1395 .irq_mask = its_mask_irq,
1396 .irq_unmask = its_unmask_irq,
1397 .irq_eoi = irq_chip_eoi_parent,
1398 .irq_set_affinity = its_set_affinity,
1399 .irq_compose_msi_msg = its_irq_compose_msi_msg,
1400 .irq_set_irqchip_state = its_irq_set_irqchip_state,
1401 .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity,
1402};
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414#define IRQS_PER_CHUNK_SHIFT 5
1415#define IRQS_PER_CHUNK (1UL << IRQS_PER_CHUNK_SHIFT)
1416#define ITS_MAX_LPI_NRBITS 16
1417
1418static unsigned long *lpi_bitmap;
1419static u32 lpi_chunks;
1420static DEFINE_SPINLOCK(lpi_lock);
1421
1422static int its_lpi_to_chunk(int lpi)
1423{
1424 return (lpi - 8192) >> IRQS_PER_CHUNK_SHIFT;
1425}
1426
1427static int its_chunk_to_lpi(int chunk)
1428{
1429 return (chunk << IRQS_PER_CHUNK_SHIFT) + 8192;
1430}
1431
1432static int __init its_lpi_init(u32 id_bits)
1433{
1434 lpi_chunks = its_lpi_to_chunk(1UL << id_bits);
1435
1436 lpi_bitmap = kzalloc(BITS_TO_LONGS(lpi_chunks) * sizeof(long),
1437 GFP_KERNEL);
1438 if (!lpi_bitmap) {
1439 lpi_chunks = 0;
1440 return -ENOMEM;
1441 }
1442
1443 pr_info("ITS: Allocated %d chunks for LPIs\n", (int)lpi_chunks);
1444 return 0;
1445}
1446
1447static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids)
1448{
1449 unsigned long *bitmap = NULL;
1450 int chunk_id;
1451 int nr_chunks;
1452 int i;
1453
1454 nr_chunks = DIV_ROUND_UP(nr_irqs, IRQS_PER_CHUNK);
1455
1456 spin_lock(&lpi_lock);
1457
1458 do {
1459 chunk_id = bitmap_find_next_zero_area(lpi_bitmap, lpi_chunks,
1460 0, nr_chunks, 0);
1461 if (chunk_id < lpi_chunks)
1462 break;
1463
1464 nr_chunks--;
1465 } while (nr_chunks > 0);
1466
1467 if (!nr_chunks)
1468 goto out;
1469
1470 bitmap = kzalloc(BITS_TO_LONGS(nr_chunks * IRQS_PER_CHUNK) * sizeof (long),
1471 GFP_ATOMIC);
1472 if (!bitmap)
1473 goto out;
1474
1475 for (i = 0; i < nr_chunks; i++)
1476 set_bit(chunk_id + i, lpi_bitmap);
1477
1478 *base = its_chunk_to_lpi(chunk_id);
1479 *nr_ids = nr_chunks * IRQS_PER_CHUNK;
1480
1481out:
1482 spin_unlock(&lpi_lock);
1483
1484 if (!bitmap)
1485 *base = *nr_ids = 0;
1486
1487 return bitmap;
1488}
1489
1490static void its_lpi_free_chunks(unsigned long *bitmap, int base, int nr_ids)
1491{
1492 int lpi;
1493
1494 spin_lock(&lpi_lock);
1495
1496 for (lpi = base; lpi < (base + nr_ids); lpi += IRQS_PER_CHUNK) {
1497 int chunk = its_lpi_to_chunk(lpi);
1498
1499 BUG_ON(chunk > lpi_chunks);
1500 if (test_bit(chunk, lpi_bitmap)) {
1501 clear_bit(chunk, lpi_bitmap);
1502 } else {
1503 pr_err("Bad LPI chunk %d\n", chunk);
1504 }
1505 }
1506
1507 spin_unlock(&lpi_lock);
1508
1509 kfree(bitmap);
1510}
1511
1512static struct page *its_allocate_prop_table(gfp_t gfp_flags)
1513{
1514 struct page *prop_page;
1515
1516 prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
1517 if (!prop_page)
1518 return NULL;
1519
1520
1521 memset(page_address(prop_page),
1522 LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1,
1523 LPI_PROPBASE_SZ);
1524
1525
1526 gic_flush_dcache_to_poc(page_address(prop_page), LPI_PROPBASE_SZ);
1527
1528 return prop_page;
1529}
1530
1531static void its_free_prop_table(struct page *prop_page)
1532{
1533 free_pages((unsigned long)page_address(prop_page),
1534 get_order(LPI_PROPBASE_SZ));
1535}
1536
1537static int __init its_alloc_lpi_tables(void)
1538{
1539 phys_addr_t paddr;
1540
1541 lpi_id_bits = min_t(u32, gic_rdists->id_bits, ITS_MAX_LPI_NRBITS);
1542 gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT);
1543 if (!gic_rdists->prop_page) {
1544 pr_err("Failed to allocate PROPBASE\n");
1545 return -ENOMEM;
1546 }
1547
1548 paddr = page_to_phys(gic_rdists->prop_page);
1549 pr_info("GIC: using LPI property table @%pa\n", &paddr);
1550
1551 return its_lpi_init(lpi_id_bits);
1552}
1553
1554static const char *its_base_type_string[] = {
1555 [GITS_BASER_TYPE_DEVICE] = "Devices",
1556 [GITS_BASER_TYPE_VCPU] = "Virtual CPUs",
1557 [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)",
1558 [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections",
1559 [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)",
1560 [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)",
1561 [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)",
1562};
1563
1564static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
1565{
1566 u32 idx = baser - its->tables;
1567
1568 return gits_read_baser(its->base + GITS_BASER + (idx << 3));
1569}
1570
1571static void its_write_baser(struct its_node *its, struct its_baser *baser,
1572 u64 val)
1573{
1574 u32 idx = baser - its->tables;
1575
1576 gits_write_baser(val, its->base + GITS_BASER + (idx << 3));
1577 baser->val = its_read_baser(its, baser);
1578}
1579
1580static int its_setup_baser(struct its_node *its, struct its_baser *baser,
1581 u64 cache, u64 shr, u32 psz, u32 order,
1582 bool indirect)
1583{
1584 u64 val = its_read_baser(its, baser);
1585 u64 esz = GITS_BASER_ENTRY_SIZE(val);
1586 u64 type = GITS_BASER_TYPE(val);
1587 u64 baser_phys, tmp;
1588 u32 alloc_pages;
1589 void *base;
1590
1591retry_alloc_baser:
1592 alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
1593 if (alloc_pages > GITS_BASER_PAGES_MAX) {
1594 pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
1595 &its->phys_base, its_base_type_string[type],
1596 alloc_pages, GITS_BASER_PAGES_MAX);
1597 alloc_pages = GITS_BASER_PAGES_MAX;
1598 order = get_order(GITS_BASER_PAGES_MAX * psz);
1599 }
1600
1601 base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
1602 if (!base)
1603 return -ENOMEM;
1604
1605 baser_phys = virt_to_phys(base);
1606
1607
1608 if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) {
1609
1610
1611 if (psz != SZ_64K) {
1612 pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
1613 free_pages((unsigned long)base, order);
1614 return -ENXIO;
1615 }
1616
1617
1618 baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys);
1619 }
1620
1621retry_baser:
1622 val = (baser_phys |
1623 (type << GITS_BASER_TYPE_SHIFT) |
1624 ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
1625 ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) |
1626 cache |
1627 shr |
1628 GITS_BASER_VALID);
1629
1630 val |= indirect ? GITS_BASER_INDIRECT : 0x0;
1631
1632 switch (psz) {
1633 case SZ_4K:
1634 val |= GITS_BASER_PAGE_SIZE_4K;
1635 break;
1636 case SZ_16K:
1637 val |= GITS_BASER_PAGE_SIZE_16K;
1638 break;
1639 case SZ_64K:
1640 val |= GITS_BASER_PAGE_SIZE_64K;
1641 break;
1642 }
1643
1644 its_write_baser(its, baser, val);
1645 tmp = baser->val;
1646
1647 if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
1648
1649
1650
1651
1652
1653
1654
1655 shr = tmp & GITS_BASER_SHAREABILITY_MASK;
1656 if (!shr) {
1657 cache = GITS_BASER_nC;
1658 gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
1659 }
1660 goto retry_baser;
1661 }
1662
1663 if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) {
1664
1665
1666
1667
1668
1669 free_pages((unsigned long)base, order);
1670 baser->base = NULL;
1671
1672 switch (psz) {
1673 case SZ_16K:
1674 psz = SZ_4K;
1675 goto retry_alloc_baser;
1676 case SZ_64K:
1677 psz = SZ_16K;
1678 goto retry_alloc_baser;
1679 }
1680 }
1681
1682 if (val != tmp) {
1683 pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
1684 &its->phys_base, its_base_type_string[type],
1685 val, tmp);
1686 free_pages((unsigned long)base, order);
1687 return -ENXIO;
1688 }
1689
1690 baser->order = order;
1691 baser->base = base;
1692 baser->psz = psz;
1693 tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz;
1694
1695 pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
1696 &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp),
1697 its_base_type_string[type],
1698 (unsigned long)virt_to_phys(base),
1699 indirect ? "indirect" : "flat", (int)esz,
1700 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
1701
1702 return 0;
1703}
1704
1705static bool its_parse_indirect_baser(struct its_node *its,
1706 struct its_baser *baser,
1707 u32 psz, u32 *order, u32 ids)
1708{
1709 u64 tmp = its_read_baser(its, baser);
1710 u64 type = GITS_BASER_TYPE(tmp);
1711 u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
1712 u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
1713 u32 new_order = *order;
1714 bool indirect = false;
1715
1716
1717 if ((esz << ids) > (psz * 2)) {
1718
1719
1720
1721
1722 its_write_baser(its, baser, val | GITS_BASER_INDIRECT);
1723 indirect = !!(baser->val & GITS_BASER_INDIRECT);
1724
1725 if (indirect) {
1726
1727
1728
1729
1730
1731
1732
1733 ids -= ilog2(psz / (int)esz);
1734 esz = GITS_LVL1_ENTRY_SIZE;
1735 }
1736 }
1737
1738
1739
1740
1741
1742
1743
1744
1745 new_order = max_t(u32, get_order(esz << ids), new_order);
1746 if (new_order >= MAX_ORDER) {
1747 new_order = MAX_ORDER - 1;
1748 ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
1749 pr_warn("ITS@%pa: %s Table too large, reduce ids %u->%u\n",
1750 &its->phys_base, its_base_type_string[type],
1751 its->device_ids, ids);
1752 }
1753
1754 *order = new_order;
1755
1756 return indirect;
1757}
1758
1759static void its_free_tables(struct its_node *its)
1760{
1761 int i;
1762
1763 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
1764 if (its->tables[i].base) {
1765 free_pages((unsigned long)its->tables[i].base,
1766 its->tables[i].order);
1767 its->tables[i].base = NULL;
1768 }
1769 }
1770}
1771
1772static int its_alloc_tables(struct its_node *its)
1773{
1774 u64 shr = GITS_BASER_InnerShareable;
1775 u64 cache = GITS_BASER_RaWaWb;
1776 u32 psz = SZ_64K;
1777 int err, i;
1778
1779 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375)
1780
1781 cache = GITS_BASER_nCnB;
1782
1783 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
1784 struct its_baser *baser = its->tables + i;
1785 u64 val = its_read_baser(its, baser);
1786 u64 type = GITS_BASER_TYPE(val);
1787 u32 order = get_order(psz);
1788 bool indirect = false;
1789
1790 switch (type) {
1791 case GITS_BASER_TYPE_NONE:
1792 continue;
1793
1794 case GITS_BASER_TYPE_DEVICE:
1795 indirect = its_parse_indirect_baser(its, baser,
1796 psz, &order,
1797 its->device_ids);
1798 case GITS_BASER_TYPE_VCPU:
1799 indirect = its_parse_indirect_baser(its, baser,
1800 psz, &order,
1801 ITS_MAX_VPEID_BITS);
1802 break;
1803 }
1804
1805 err = its_setup_baser(its, baser, cache, shr, psz, order, indirect);
1806 if (err < 0) {
1807 its_free_tables(its);
1808 return err;
1809 }
1810
1811
1812 psz = baser->psz;
1813 cache = baser->val & GITS_BASER_CACHEABILITY_MASK;
1814 shr = baser->val & GITS_BASER_SHAREABILITY_MASK;
1815 }
1816
1817 return 0;
1818}
1819
1820static int its_alloc_collections(struct its_node *its)
1821{
1822 its->collections = kzalloc(nr_cpu_ids * sizeof(*its->collections),
1823 GFP_KERNEL);
1824 if (!its->collections)
1825 return -ENOMEM;
1826
1827 return 0;
1828}
1829
1830static struct page *its_allocate_pending_table(gfp_t gfp_flags)
1831{
1832 struct page *pend_page;
1833
1834
1835
1836
1837 pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
1838 get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
1839 if (!pend_page)
1840 return NULL;
1841
1842
1843 gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
1844
1845 return pend_page;
1846}
1847
1848static void its_free_pending_table(struct page *pt)
1849{
1850 free_pages((unsigned long)page_address(pt),
1851 get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
1852}
1853
1854static void its_cpu_init_lpis(void)
1855{
1856 void __iomem *rbase = gic_data_rdist_rd_base();
1857 struct page *pend_page;
1858 u64 val, tmp;
1859
1860
1861 pend_page = gic_data_rdist()->pend_page;
1862 if (!pend_page) {
1863 phys_addr_t paddr;
1864
1865 pend_page = its_allocate_pending_table(GFP_NOWAIT);
1866 if (!pend_page) {
1867 pr_err("Failed to allocate PENDBASE for CPU%d\n",
1868 smp_processor_id());
1869 return;
1870 }
1871
1872 paddr = page_to_phys(pend_page);
1873 pr_info("CPU%d: using LPI pending table @%pa\n",
1874 smp_processor_id(), &paddr);
1875 gic_data_rdist()->pend_page = pend_page;
1876 }
1877
1878
1879 val = readl_relaxed(rbase + GICR_CTLR);
1880 val &= ~GICR_CTLR_ENABLE_LPIS;
1881 writel_relaxed(val, rbase + GICR_CTLR);
1882
1883
1884
1885
1886 dsb(sy);
1887
1888
1889 val = (page_to_phys(gic_rdists->prop_page) |
1890 GICR_PROPBASER_InnerShareable |
1891 GICR_PROPBASER_RaWaWb |
1892 ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
1893
1894 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
1895 tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
1896
1897 if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
1898 if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
1899
1900
1901
1902
1903
1904 val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
1905 GICR_PROPBASER_CACHEABILITY_MASK);
1906 val |= GICR_PROPBASER_nC;
1907 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
1908 }
1909 pr_info_once("GIC: using cache flushing for LPI property table\n");
1910 gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
1911 }
1912
1913
1914 val = (page_to_phys(pend_page) |
1915 GICR_PENDBASER_InnerShareable |
1916 GICR_PENDBASER_RaWaWb);
1917
1918 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
1919 tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
1920
1921 if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
1922
1923
1924
1925
1926 val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
1927 GICR_PENDBASER_CACHEABILITY_MASK);
1928 val |= GICR_PENDBASER_nC;
1929 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
1930 }
1931
1932
1933 val = readl_relaxed(rbase + GICR_CTLR);
1934 val |= GICR_CTLR_ENABLE_LPIS;
1935 writel_relaxed(val, rbase + GICR_CTLR);
1936
1937
1938 dsb(sy);
1939}
1940
1941static void its_cpu_init_collection(void)
1942{
1943 struct its_node *its;
1944 int cpu;
1945
1946 spin_lock(&its_lock);
1947 cpu = smp_processor_id();
1948
1949 list_for_each_entry(its, &its_nodes, entry) {
1950 u64 target;
1951
1952
1953 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
1954 struct device_node *cpu_node;
1955
1956 cpu_node = of_get_cpu_node(cpu, NULL);
1957 if (its->numa_node != NUMA_NO_NODE &&
1958 its->numa_node != of_node_to_nid(cpu_node))
1959 continue;
1960 }
1961
1962
1963
1964
1965
1966 if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
1967
1968
1969
1970
1971 target = gic_data_rdist()->phys_base;
1972 } else {
1973
1974
1975
1976 target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
1977 target = GICR_TYPER_CPU_NUMBER(target) << 16;
1978 }
1979
1980
1981 its->collections[cpu].target_address = target;
1982 its->collections[cpu].col_id = cpu;
1983
1984 its_send_mapc(its, &its->collections[cpu], 1);
1985 its_send_invall(its, &its->collections[cpu]);
1986 }
1987
1988 spin_unlock(&its_lock);
1989}
1990
1991static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
1992{
1993 struct its_device *its_dev = NULL, *tmp;
1994 unsigned long flags;
1995
1996 raw_spin_lock_irqsave(&its->lock, flags);
1997
1998 list_for_each_entry(tmp, &its->its_device_list, entry) {
1999 if (tmp->device_id == dev_id) {
2000 its_dev = tmp;
2001 break;
2002 }
2003 }
2004
2005 raw_spin_unlock_irqrestore(&its->lock, flags);
2006
2007 return its_dev;
2008}
2009
2010static struct its_baser *its_get_baser(struct its_node *its, u32 type)
2011{
2012 int i;
2013
2014 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
2015 if (GITS_BASER_TYPE(its->tables[i].val) == type)
2016 return &its->tables[i];
2017 }
2018
2019 return NULL;
2020}
2021
2022static bool its_alloc_table_entry(struct its_baser *baser, u32 id)
2023{
2024 struct page *page;
2025 u32 esz, idx;
2026 __le64 *table;
2027
2028
2029 esz = GITS_BASER_ENTRY_SIZE(baser->val);
2030 if (!(baser->val & GITS_BASER_INDIRECT))
2031 return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
2032
2033
2034 idx = id >> ilog2(baser->psz / esz);
2035 if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
2036 return false;
2037
2038 table = baser->base;
2039
2040
2041 if (!table[idx]) {
2042 page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(baser->psz));
2043 if (!page)
2044 return false;
2045
2046
2047 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
2048 gic_flush_dcache_to_poc(page_address(page), baser->psz);
2049
2050 table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
2051
2052
2053 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
2054 gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
2055
2056
2057 dsb(sy);
2058 }
2059
2060 return true;
2061}
2062
2063static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
2064{
2065 struct its_baser *baser;
2066
2067 baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
2068
2069
2070 if (!baser)
2071 return (ilog2(dev_id) < its->device_ids);
2072
2073 return its_alloc_table_entry(baser, dev_id);
2074}
2075
2076static bool its_alloc_vpe_table(u32 vpe_id)
2077{
2078 struct its_node *its;
2079
2080
2081
2082
2083
2084
2085
2086
2087 list_for_each_entry(its, &its_nodes, entry) {
2088 struct its_baser *baser;
2089
2090 if (!its->is_v4)
2091 continue;
2092
2093 baser = its_get_baser(its, GITS_BASER_TYPE_VCPU);
2094 if (!baser)
2095 return false;
2096
2097 if (!its_alloc_table_entry(baser, vpe_id))
2098 return false;
2099 }
2100
2101 return true;
2102}
2103
2104static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
2105 int nvecs, bool alloc_lpis)
2106{
2107 struct its_device *dev;
2108 unsigned long *lpi_map = NULL;
2109 unsigned long flags;
2110 u16 *col_map = NULL;
2111 void *itt;
2112 int lpi_base;
2113 int nr_lpis;
2114 int nr_ites;
2115 int sz;
2116
2117 if (!its_alloc_device_table(its, dev_id))
2118 return NULL;
2119
2120 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2121
2122
2123
2124
2125 nr_ites = max(IRQS_PER_CHUNK, roundup_pow_of_two(nvecs));
2126 sz = nr_ites * its->ite_size;
2127 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
2128 itt = kzalloc(sz, GFP_KERNEL);
2129 if (alloc_lpis) {
2130 lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis);
2131 if (lpi_map)
2132 col_map = kzalloc(sizeof(*col_map) * nr_lpis,
2133 GFP_KERNEL);
2134 } else {
2135 col_map = kzalloc(sizeof(*col_map) * nr_ites, GFP_KERNEL);
2136 nr_lpis = 0;
2137 lpi_base = 0;
2138 }
2139
2140 if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
2141 kfree(dev);
2142 kfree(itt);
2143 kfree(lpi_map);
2144 kfree(col_map);
2145 return NULL;
2146 }
2147
2148 gic_flush_dcache_to_poc(itt, sz);
2149
2150 dev->its = its;
2151 dev->itt = itt;
2152 dev->nr_ites = nr_ites;
2153 dev->event_map.lpi_map = lpi_map;
2154 dev->event_map.col_map = col_map;
2155 dev->event_map.lpi_base = lpi_base;
2156 dev->event_map.nr_lpis = nr_lpis;
2157 mutex_init(&dev->event_map.vlpi_lock);
2158 dev->device_id = dev_id;
2159 INIT_LIST_HEAD(&dev->entry);
2160
2161 raw_spin_lock_irqsave(&its->lock, flags);
2162 list_add(&dev->entry, &its->its_device_list);
2163 raw_spin_unlock_irqrestore(&its->lock, flags);
2164
2165
2166 its_send_mapd(dev, 1);
2167
2168 return dev;
2169}
2170
2171static void its_free_device(struct its_device *its_dev)
2172{
2173 unsigned long flags;
2174
2175 raw_spin_lock_irqsave(&its_dev->its->lock, flags);
2176 list_del(&its_dev->entry);
2177 raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
2178 kfree(its_dev->itt);
2179 kfree(its_dev);
2180}
2181
2182static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq)
2183{
2184 int idx;
2185
2186 idx = find_first_zero_bit(dev->event_map.lpi_map,
2187 dev->event_map.nr_lpis);
2188 if (idx == dev->event_map.nr_lpis)
2189 return -ENOSPC;
2190
2191 *hwirq = dev->event_map.lpi_base + idx;
2192 set_bit(idx, dev->event_map.lpi_map);
2193
2194 return 0;
2195}
2196
2197static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
2198 int nvec, msi_alloc_info_t *info)
2199{
2200 struct its_node *its;
2201 struct its_device *its_dev;
2202 struct msi_domain_info *msi_info;
2203 u32 dev_id;
2204
2205
2206
2207
2208
2209
2210
2211 dev_id = info->scratchpad[0].ul;
2212
2213 msi_info = msi_get_domain_info(domain);
2214 its = msi_info->data;
2215
2216 if (!gic_rdists->has_direct_lpi &&
2217 vpe_proxy.dev &&
2218 vpe_proxy.dev->its == its &&
2219 dev_id == vpe_proxy.dev->device_id) {
2220
2221 WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n",
2222 dev_id);
2223 return -EINVAL;
2224 }
2225
2226 its_dev = its_find_device(its, dev_id);
2227 if (its_dev) {
2228
2229
2230
2231
2232
2233 pr_debug("Reusing ITT for devID %x\n", dev_id);
2234 goto out;
2235 }
2236
2237 its_dev = its_create_device(its, dev_id, nvec, true);
2238 if (!its_dev)
2239 return -ENOMEM;
2240
2241 pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
2242out:
2243 info->scratchpad[0].ptr = its_dev;
2244 return 0;
2245}
2246
2247static struct msi_domain_ops its_msi_domain_ops = {
2248 .msi_prepare = its_msi_prepare,
2249};
2250
2251static int its_irq_gic_domain_alloc(struct irq_domain *domain,
2252 unsigned int virq,
2253 irq_hw_number_t hwirq)
2254{
2255 struct irq_fwspec fwspec;
2256
2257 if (irq_domain_get_of_node(domain->parent)) {
2258 fwspec.fwnode = domain->parent->fwnode;
2259 fwspec.param_count = 3;
2260 fwspec.param[0] = GIC_IRQ_TYPE_LPI;
2261 fwspec.param[1] = hwirq;
2262 fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
2263 } else if (is_fwnode_irqchip(domain->parent->fwnode)) {
2264 fwspec.fwnode = domain->parent->fwnode;
2265 fwspec.param_count = 2;
2266 fwspec.param[0] = hwirq;
2267 fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
2268 } else {
2269 return -EINVAL;
2270 }
2271
2272 return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
2273}
2274
2275static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
2276 unsigned int nr_irqs, void *args)
2277{
2278 msi_alloc_info_t *info = args;
2279 struct its_device *its_dev = info->scratchpad[0].ptr;
2280 irq_hw_number_t hwirq;
2281 int err;
2282 int i;
2283
2284 for (i = 0; i < nr_irqs; i++) {
2285 err = its_alloc_device_irq(its_dev, &hwirq);
2286 if (err)
2287 return err;
2288
2289 err = its_irq_gic_domain_alloc(domain, virq + i, hwirq);
2290 if (err)
2291 return err;
2292
2293 irq_domain_set_hwirq_and_chip(domain, virq + i,
2294 hwirq, &its_irq_chip, its_dev);
2295 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i)));
2296 pr_debug("ID:%d pID:%d vID:%d\n",
2297 (int)(hwirq - its_dev->event_map.lpi_base),
2298 (int) hwirq, virq + i);
2299 }
2300
2301 return 0;
2302}
2303
2304static int its_irq_domain_activate(struct irq_domain *domain,
2305 struct irq_data *d, bool reserve)
2306{
2307 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2308 u32 event = its_get_event_id(d);
2309 const struct cpumask *cpu_mask = cpu_online_mask;
2310 int cpu;
2311
2312
2313 if (its_dev->its->numa_node >= 0)
2314 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
2315
2316
2317 cpu = cpumask_first(cpu_mask);
2318 its_dev->event_map.col_map[event] = cpu;
2319 irq_data_update_effective_affinity(d, cpumask_of(cpu));
2320
2321
2322 its_send_mapti(its_dev, d->hwirq, event);
2323 return 0;
2324}
2325
2326static void its_irq_domain_deactivate(struct irq_domain *domain,
2327 struct irq_data *d)
2328{
2329 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2330 u32 event = its_get_event_id(d);
2331
2332
2333 its_send_discard(its_dev, event);
2334}
2335
2336static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
2337 unsigned int nr_irqs)
2338{
2339 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
2340 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2341 int i;
2342
2343 for (i = 0; i < nr_irqs; i++) {
2344 struct irq_data *data = irq_domain_get_irq_data(domain,
2345 virq + i);
2346 u32 event = its_get_event_id(data);
2347
2348
2349 clear_bit(event, its_dev->event_map.lpi_map);
2350
2351
2352 irq_domain_reset_irq_data(data);
2353 }
2354
2355
2356 if (bitmap_empty(its_dev->event_map.lpi_map,
2357 its_dev->event_map.nr_lpis)) {
2358 its_lpi_free_chunks(its_dev->event_map.lpi_map,
2359 its_dev->event_map.lpi_base,
2360 its_dev->event_map.nr_lpis);
2361 kfree(its_dev->event_map.col_map);
2362
2363
2364 its_send_mapd(its_dev, 0);
2365 its_free_device(its_dev);
2366 }
2367
2368 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
2369}
2370
2371static const struct irq_domain_ops its_domain_ops = {
2372 .alloc = its_irq_domain_alloc,
2373 .free = its_irq_domain_free,
2374 .activate = its_irq_domain_activate,
2375 .deactivate = its_irq_domain_deactivate,
2376};
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
2391{
2392
2393 if (vpe->vpe_proxy_event == -1)
2394 return;
2395
2396 its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event);
2397 vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL;
2398
2399
2400
2401
2402
2403
2404
2405
2406 if (vpe_proxy.vpes[vpe_proxy.next_victim])
2407 vpe_proxy.next_victim = vpe->vpe_proxy_event;
2408
2409 vpe->vpe_proxy_event = -1;
2410}
2411
2412static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
2413{
2414 if (!gic_rdists->has_direct_lpi) {
2415 unsigned long flags;
2416
2417 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2418 its_vpe_db_proxy_unmap_locked(vpe);
2419 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2420 }
2421}
2422
2423static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
2424{
2425
2426 if (vpe->vpe_proxy_event != -1)
2427 return;
2428
2429
2430 if (vpe_proxy.vpes[vpe_proxy.next_victim])
2431 its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]);
2432
2433
2434 vpe_proxy.vpes[vpe_proxy.next_victim] = vpe;
2435 vpe->vpe_proxy_event = vpe_proxy.next_victim;
2436 vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites;
2437
2438 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx;
2439 its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event);
2440}
2441
2442static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
2443{
2444 unsigned long flags;
2445 struct its_collection *target_col;
2446
2447 if (gic_rdists->has_direct_lpi) {
2448 void __iomem *rdbase;
2449
2450 rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base;
2451 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
2452 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
2453 cpu_relax();
2454
2455 return;
2456 }
2457
2458 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2459
2460 its_vpe_db_proxy_map_locked(vpe);
2461
2462 target_col = &vpe_proxy.dev->its->collections[to];
2463 its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event);
2464 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to;
2465
2466 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2467}
2468
2469static int its_vpe_set_affinity(struct irq_data *d,
2470 const struct cpumask *mask_val,
2471 bool force)
2472{
2473 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2474 int cpu = cpumask_first(mask_val);
2475
2476
2477
2478
2479
2480
2481
2482 if (vpe->col_idx != cpu) {
2483 int from = vpe->col_idx;
2484
2485 vpe->col_idx = cpu;
2486 its_send_vmovp(vpe);
2487 its_vpe_db_proxy_move(vpe, from, cpu);
2488 }
2489
2490 irq_data_update_effective_affinity(d, cpumask_of(cpu));
2491
2492 return IRQ_SET_MASK_OK_DONE;
2493}
2494
2495static void its_vpe_schedule(struct its_vpe *vpe)
2496{
2497 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2498 u64 val;
2499
2500
2501 val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
2502 GENMASK_ULL(51, 12);
2503 val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
2504 val |= GICR_VPROPBASER_RaWb;
2505 val |= GICR_VPROPBASER_InnerShareable;
2506 gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2507
2508 val = virt_to_phys(page_address(vpe->vpt_page)) &
2509 GENMASK_ULL(51, 16);
2510 val |= GICR_VPENDBASER_RaWaWb;
2511 val |= GICR_VPENDBASER_NonShareable;
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521 val |= GICR_VPENDBASER_PendingLast;
2522 val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
2523 val |= GICR_VPENDBASER_Valid;
2524 gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
2525}
2526
2527static void its_vpe_deschedule(struct its_vpe *vpe)
2528{
2529 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2530 u32 count = 1000000;
2531 bool clean;
2532 u64 val;
2533
2534
2535 val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2536 val &= ~GICR_VPENDBASER_Valid;
2537 gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
2538
2539 do {
2540 val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2541 clean = !(val & GICR_VPENDBASER_Dirty);
2542 if (!clean) {
2543 count--;
2544 cpu_relax();
2545 udelay(1);
2546 }
2547 } while (!clean && count);
2548
2549 if (unlikely(!clean && !count)) {
2550 pr_err_ratelimited("ITS virtual pending table not cleaning\n");
2551 vpe->idai = false;
2552 vpe->pending_last = true;
2553 } else {
2554 vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
2555 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
2556 }
2557}
2558
2559static void its_vpe_invall(struct its_vpe *vpe)
2560{
2561 struct its_node *its;
2562
2563 list_for_each_entry(its, &its_nodes, entry) {
2564 if (!its->is_v4)
2565 continue;
2566
2567 if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
2568 continue;
2569
2570
2571
2572
2573
2574 its_send_vinvall(its, vpe);
2575 return;
2576 }
2577}
2578
2579static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
2580{
2581 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2582 struct its_cmd_info *info = vcpu_info;
2583
2584 switch (info->cmd_type) {
2585 case SCHEDULE_VPE:
2586 its_vpe_schedule(vpe);
2587 return 0;
2588
2589 case DESCHEDULE_VPE:
2590 its_vpe_deschedule(vpe);
2591 return 0;
2592
2593 case INVALL_VPE:
2594 its_vpe_invall(vpe);
2595 return 0;
2596
2597 default:
2598 return -EINVAL;
2599 }
2600}
2601
2602static void its_vpe_send_cmd(struct its_vpe *vpe,
2603 void (*cmd)(struct its_device *, u32))
2604{
2605 unsigned long flags;
2606
2607 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2608
2609 its_vpe_db_proxy_map_locked(vpe);
2610 cmd(vpe_proxy.dev, vpe->vpe_proxy_event);
2611
2612 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2613}
2614
2615static void its_vpe_send_inv(struct irq_data *d)
2616{
2617 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2618
2619 if (gic_rdists->has_direct_lpi) {
2620 void __iomem *rdbase;
2621
2622 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
2623 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_INVLPIR);
2624 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
2625 cpu_relax();
2626 } else {
2627 its_vpe_send_cmd(vpe, its_send_inv);
2628 }
2629}
2630
2631static void its_vpe_mask_irq(struct irq_data *d)
2632{
2633
2634
2635
2636
2637
2638
2639 lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
2640 its_vpe_send_inv(d);
2641}
2642
2643static void its_vpe_unmask_irq(struct irq_data *d)
2644{
2645
2646 lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
2647 its_vpe_send_inv(d);
2648}
2649
2650static int its_vpe_set_irqchip_state(struct irq_data *d,
2651 enum irqchip_irq_state which,
2652 bool state)
2653{
2654 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2655
2656 if (which != IRQCHIP_STATE_PENDING)
2657 return -EINVAL;
2658
2659 if (gic_rdists->has_direct_lpi) {
2660 void __iomem *rdbase;
2661
2662 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
2663 if (state) {
2664 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
2665 } else {
2666 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
2667 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
2668 cpu_relax();
2669 }
2670 } else {
2671 if (state)
2672 its_vpe_send_cmd(vpe, its_send_int);
2673 else
2674 its_vpe_send_cmd(vpe, its_send_clear);
2675 }
2676
2677 return 0;
2678}
2679
2680static struct irq_chip its_vpe_irq_chip = {
2681 .name = "GICv4-vpe",
2682 .irq_mask = its_vpe_mask_irq,
2683 .irq_unmask = its_vpe_unmask_irq,
2684 .irq_eoi = irq_chip_eoi_parent,
2685 .irq_set_affinity = its_vpe_set_affinity,
2686 .irq_set_irqchip_state = its_vpe_set_irqchip_state,
2687 .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity,
2688};
2689
2690static int its_vpe_id_alloc(void)
2691{
2692 return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
2693}
2694
2695static void its_vpe_id_free(u16 id)
2696{
2697 ida_simple_remove(&its_vpeid_ida, id);
2698}
2699
2700static int its_vpe_init(struct its_vpe *vpe)
2701{
2702 struct page *vpt_page;
2703 int vpe_id;
2704
2705
2706 vpe_id = its_vpe_id_alloc();
2707 if (vpe_id < 0)
2708 return vpe_id;
2709
2710
2711 vpt_page = its_allocate_pending_table(GFP_KERNEL);
2712 if (!vpt_page) {
2713 its_vpe_id_free(vpe_id);
2714 return -ENOMEM;
2715 }
2716
2717 if (!its_alloc_vpe_table(vpe_id)) {
2718 its_vpe_id_free(vpe_id);
2719 its_free_pending_table(vpe->vpt_page);
2720 return -ENOMEM;
2721 }
2722
2723 vpe->vpe_id = vpe_id;
2724 vpe->vpt_page = vpt_page;
2725 vpe->vpe_proxy_event = -1;
2726
2727 return 0;
2728}
2729
2730static void its_vpe_teardown(struct its_vpe *vpe)
2731{
2732 its_vpe_db_proxy_unmap(vpe);
2733 its_vpe_id_free(vpe->vpe_id);
2734 its_free_pending_table(vpe->vpt_page);
2735}
2736
2737static void its_vpe_irq_domain_free(struct irq_domain *domain,
2738 unsigned int virq,
2739 unsigned int nr_irqs)
2740{
2741 struct its_vm *vm = domain->host_data;
2742 int i;
2743
2744 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
2745
2746 for (i = 0; i < nr_irqs; i++) {
2747 struct irq_data *data = irq_domain_get_irq_data(domain,
2748 virq + i);
2749 struct its_vpe *vpe = irq_data_get_irq_chip_data(data);
2750
2751 BUG_ON(vm != vpe->its_vm);
2752
2753 clear_bit(data->hwirq, vm->db_bitmap);
2754 its_vpe_teardown(vpe);
2755 irq_domain_reset_irq_data(data);
2756 }
2757
2758 if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) {
2759 its_lpi_free_chunks(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
2760 its_free_prop_table(vm->vprop_page);
2761 }
2762}
2763
2764static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
2765 unsigned int nr_irqs, void *args)
2766{
2767 struct its_vm *vm = args;
2768 unsigned long *bitmap;
2769 struct page *vprop_page;
2770 int base, nr_ids, i, err = 0;
2771
2772 BUG_ON(!vm);
2773
2774 bitmap = its_lpi_alloc_chunks(nr_irqs, &base, &nr_ids);
2775 if (!bitmap)
2776 return -ENOMEM;
2777
2778 if (nr_ids < nr_irqs) {
2779 its_lpi_free_chunks(bitmap, base, nr_ids);
2780 return -ENOMEM;
2781 }
2782
2783 vprop_page = its_allocate_prop_table(GFP_KERNEL);
2784 if (!vprop_page) {
2785 its_lpi_free_chunks(bitmap, base, nr_ids);
2786 return -ENOMEM;
2787 }
2788
2789 vm->db_bitmap = bitmap;
2790 vm->db_lpi_base = base;
2791 vm->nr_db_lpis = nr_ids;
2792 vm->vprop_page = vprop_page;
2793
2794 for (i = 0; i < nr_irqs; i++) {
2795 vm->vpes[i]->vpe_db_lpi = base + i;
2796 err = its_vpe_init(vm->vpes[i]);
2797 if (err)
2798 break;
2799 err = its_irq_gic_domain_alloc(domain, virq + i,
2800 vm->vpes[i]->vpe_db_lpi);
2801 if (err)
2802 break;
2803 irq_domain_set_hwirq_and_chip(domain, virq + i, i,
2804 &its_vpe_irq_chip, vm->vpes[i]);
2805 set_bit(i, bitmap);
2806 }
2807
2808 if (err) {
2809 if (i > 0)
2810 its_vpe_irq_domain_free(domain, virq, i - 1);
2811
2812 its_lpi_free_chunks(bitmap, base, nr_ids);
2813 its_free_prop_table(vprop_page);
2814 }
2815
2816 return err;
2817}
2818
2819static int its_vpe_irq_domain_activate(struct irq_domain *domain,
2820 struct irq_data *d, bool reserve)
2821{
2822 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2823 struct its_node *its;
2824
2825
2826 if (its_list_map)
2827 return 0;
2828
2829
2830 vpe->col_idx = cpumask_first(cpu_online_mask);
2831
2832 list_for_each_entry(its, &its_nodes, entry) {
2833 if (!its->is_v4)
2834 continue;
2835
2836 its_send_vmapp(its, vpe, true);
2837 its_send_vinvall(its, vpe);
2838 }
2839
2840 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
2841
2842 return 0;
2843}
2844
2845static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
2846 struct irq_data *d)
2847{
2848 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2849 struct its_node *its;
2850
2851
2852
2853
2854
2855 if (its_list_map)
2856 return;
2857
2858 list_for_each_entry(its, &its_nodes, entry) {
2859 if (!its->is_v4)
2860 continue;
2861
2862 its_send_vmapp(its, vpe, false);
2863 }
2864}
2865
2866static const struct irq_domain_ops its_vpe_domain_ops = {
2867 .alloc = its_vpe_irq_domain_alloc,
2868 .free = its_vpe_irq_domain_free,
2869 .activate = its_vpe_irq_domain_activate,
2870 .deactivate = its_vpe_irq_domain_deactivate,
2871};
2872
2873static int its_force_quiescent(void __iomem *base)
2874{
2875 u32 count = 1000000;
2876 u32 val;
2877
2878 val = readl_relaxed(base + GITS_CTLR);
2879
2880
2881
2882
2883
2884 if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE))
2885 return 0;
2886
2887
2888 val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe);
2889 writel_relaxed(val, base + GITS_CTLR);
2890
2891
2892 while (1) {
2893 val = readl_relaxed(base + GITS_CTLR);
2894 if (val & GITS_CTLR_QUIESCENT)
2895 return 0;
2896
2897 count--;
2898 if (!count)
2899 return -EBUSY;
2900
2901 cpu_relax();
2902 udelay(1);
2903 }
2904}
2905
2906static bool __maybe_unused its_enable_quirk_cavium_22375(void *data)
2907{
2908 struct its_node *its = data;
2909
2910
2911 its->device_ids = 0x14;
2912 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
2913
2914 return true;
2915}
2916
2917static bool __maybe_unused its_enable_quirk_cavium_23144(void *data)
2918{
2919 struct its_node *its = data;
2920
2921 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
2922
2923 return true;
2924}
2925
2926static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
2927{
2928 struct its_node *its = data;
2929
2930
2931 its->ite_size = 16;
2932
2933 return true;
2934}
2935
2936static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev)
2937{
2938 struct its_node *its = its_dev->its;
2939
2940
2941
2942
2943
2944
2945
2946
2947 return its->pre_its_base + (its_dev->device_id << 2);
2948}
2949
2950static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data)
2951{
2952 struct its_node *its = data;
2953 u32 pre_its_window[2];
2954 u32 ids;
2955
2956 if (!fwnode_property_read_u32_array(its->fwnode_handle,
2957 "socionext,synquacer-pre-its",
2958 pre_its_window,
2959 ARRAY_SIZE(pre_its_window))) {
2960
2961 its->pre_its_base = pre_its_window[0];
2962 its->get_msi_base = its_irq_get_msi_base_pre_its;
2963
2964 ids = ilog2(pre_its_window[1]) - 2;
2965 if (its->device_ids > ids)
2966 its->device_ids = ids;
2967
2968
2969 its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP;
2970 return true;
2971 }
2972 return false;
2973}
2974
2975static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data)
2976{
2977 struct its_node *its = data;
2978
2979
2980
2981
2982
2983 its->vlpi_redist_offset = SZ_128K;
2984 return true;
2985}
2986
2987static const struct gic_quirk its_quirks[] = {
2988#ifdef CONFIG_CAVIUM_ERRATUM_22375
2989 {
2990 .desc = "ITS: Cavium errata 22375, 24313",
2991 .iidr = 0xa100034c,
2992 .mask = 0xffff0fff,
2993 .init = its_enable_quirk_cavium_22375,
2994 },
2995#endif
2996#ifdef CONFIG_CAVIUM_ERRATUM_23144
2997 {
2998 .desc = "ITS: Cavium erratum 23144",
2999 .iidr = 0xa100034c,
3000 .mask = 0xffff0fff,
3001 .init = its_enable_quirk_cavium_23144,
3002 },
3003#endif
3004#ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065
3005 {
3006 .desc = "ITS: QDF2400 erratum 0065",
3007 .iidr = 0x00001070,
3008 .mask = 0xffffffff,
3009 .init = its_enable_quirk_qdf2400_e0065,
3010 },
3011#endif
3012#ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS
3013 {
3014
3015
3016
3017
3018
3019 .desc = "ITS: Socionext Synquacer pre-ITS",
3020 .iidr = 0x0001143b,
3021 .mask = 0xffffffff,
3022 .init = its_enable_quirk_socionext_synquacer,
3023 },
3024#endif
3025#ifdef CONFIG_HISILICON_ERRATUM_161600802
3026 {
3027 .desc = "ITS: Hip07 erratum 161600802",
3028 .iidr = 0x00000004,
3029 .mask = 0xffffffff,
3030 .init = its_enable_quirk_hip07_161600802,
3031 },
3032#endif
3033 {
3034 }
3035};
3036
3037static void its_enable_quirks(struct its_node *its)
3038{
3039 u32 iidr = readl_relaxed(its->base + GITS_IIDR);
3040
3041 gic_enable_quirks(iidr, its_quirks, its);
3042}
3043
3044static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
3045{
3046 struct irq_domain *inner_domain;
3047 struct msi_domain_info *info;
3048
3049 info = kzalloc(sizeof(*info), GFP_KERNEL);
3050 if (!info)
3051 return -ENOMEM;
3052
3053 inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its);
3054 if (!inner_domain) {
3055 kfree(info);
3056 return -ENOMEM;
3057 }
3058
3059 inner_domain->parent = its_parent;
3060 irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
3061 inner_domain->flags |= its->msi_domain_flags;
3062 info->ops = &its_msi_domain_ops;
3063 info->data = its;
3064 inner_domain->host_data = info;
3065
3066 return 0;
3067}
3068
3069static int its_init_vpe_domain(void)
3070{
3071 struct its_node *its;
3072 u32 devid;
3073 int entries;
3074
3075 if (gic_rdists->has_direct_lpi) {
3076 pr_info("ITS: Using DirectLPI for VPE invalidation\n");
3077 return 0;
3078 }
3079
3080
3081 its = list_first_entry(&its_nodes, struct its_node, entry);
3082
3083 entries = roundup_pow_of_two(nr_cpu_ids);
3084 vpe_proxy.vpes = kzalloc(sizeof(*vpe_proxy.vpes) * entries,
3085 GFP_KERNEL);
3086 if (!vpe_proxy.vpes) {
3087 pr_err("ITS: Can't allocate GICv4 proxy device array\n");
3088 return -ENOMEM;
3089 }
3090
3091
3092 devid = GENMASK(its->device_ids - 1, 0);
3093 vpe_proxy.dev = its_create_device(its, devid, entries, false);
3094 if (!vpe_proxy.dev) {
3095 kfree(vpe_proxy.vpes);
3096 pr_err("ITS: Can't allocate GICv4 proxy device\n");
3097 return -ENOMEM;
3098 }
3099
3100 BUG_ON(entries > vpe_proxy.dev->nr_ites);
3101
3102 raw_spin_lock_init(&vpe_proxy.lock);
3103 vpe_proxy.next_victim = 0;
3104 pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n",
3105 devid, vpe_proxy.dev->nr_ites);
3106
3107 return 0;
3108}
3109
3110static int __init its_compute_its_list_map(struct resource *res,
3111 void __iomem *its_base)
3112{
3113 int its_number;
3114 u32 ctlr;
3115
3116
3117
3118
3119
3120
3121
3122 its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX);
3123 if (its_number >= GICv4_ITS_LIST_MAX) {
3124 pr_err("ITS@%pa: No ITSList entry available!\n",
3125 &res->start);
3126 return -EINVAL;
3127 }
3128
3129 ctlr = readl_relaxed(its_base + GITS_CTLR);
3130 ctlr &= ~GITS_CTLR_ITS_NUMBER;
3131 ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT;
3132 writel_relaxed(ctlr, its_base + GITS_CTLR);
3133 ctlr = readl_relaxed(its_base + GITS_CTLR);
3134 if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) {
3135 its_number = ctlr & GITS_CTLR_ITS_NUMBER;
3136 its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT;
3137 }
3138
3139 if (test_and_set_bit(its_number, &its_list_map)) {
3140 pr_err("ITS@%pa: Duplicate ITSList entry %d\n",
3141 &res->start, its_number);
3142 return -EINVAL;
3143 }
3144
3145 return its_number;
3146}
3147
3148static int __init its_probe_one(struct resource *res,
3149 struct fwnode_handle *handle, int numa_node)
3150{
3151 struct its_node *its;
3152 void __iomem *its_base;
3153 u32 val, ctlr;
3154 u64 baser, tmp, typer;
3155 int err;
3156
3157 its_base = ioremap(res->start, resource_size(res));
3158 if (!its_base) {
3159 pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start);
3160 return -ENOMEM;
3161 }
3162
3163 val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
3164 if (val != 0x30 && val != 0x40) {
3165 pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start);
3166 err = -ENODEV;
3167 goto out_unmap;
3168 }
3169
3170 err = its_force_quiescent(its_base);
3171 if (err) {
3172 pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start);
3173 goto out_unmap;
3174 }
3175
3176 pr_info("ITS %pR\n", res);
3177
3178 its = kzalloc(sizeof(*its), GFP_KERNEL);
3179 if (!its) {
3180 err = -ENOMEM;
3181 goto out_unmap;
3182 }
3183
3184 raw_spin_lock_init(&its->lock);
3185 INIT_LIST_HEAD(&its->entry);
3186 INIT_LIST_HEAD(&its->its_device_list);
3187 typer = gic_read_typer(its_base + GITS_TYPER);
3188 its->base = its_base;
3189 its->phys_base = res->start;
3190 its->ite_size = GITS_TYPER_ITT_ENTRY_SIZE(typer);
3191 its->device_ids = GITS_TYPER_DEVBITS(typer);
3192 its->is_v4 = !!(typer & GITS_TYPER_VLPIS);
3193 if (its->is_v4) {
3194 if (!(typer & GITS_TYPER_VMOVP)) {
3195 err = its_compute_its_list_map(res, its_base);
3196 if (err < 0)
3197 goto out_free_its;
3198
3199 its->list_nr = err;
3200
3201 pr_info("ITS@%pa: Using ITS number %d\n",
3202 &res->start, err);
3203 } else {
3204 pr_info("ITS@%pa: Single VMOVP capable\n", &res->start);
3205 }
3206 }
3207
3208 its->numa_node = numa_node;
3209
3210 its->cmd_base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
3211 get_order(ITS_CMD_QUEUE_SZ));
3212 if (!its->cmd_base) {
3213 err = -ENOMEM;
3214 goto out_free_its;
3215 }
3216 its->cmd_write = its->cmd_base;
3217 its->fwnode_handle = handle;
3218 its->get_msi_base = its_irq_get_msi_base;
3219 its->msi_domain_flags = IRQ_DOMAIN_FLAG_MSI_REMAP;
3220
3221 its_enable_quirks(its);
3222
3223 err = its_alloc_tables(its);
3224 if (err)
3225 goto out_free_cmd;
3226
3227 err = its_alloc_collections(its);
3228 if (err)
3229 goto out_free_tables;
3230
3231 baser = (virt_to_phys(its->cmd_base) |
3232 GITS_CBASER_RaWaWb |
3233 GITS_CBASER_InnerShareable |
3234 (ITS_CMD_QUEUE_SZ / SZ_4K - 1) |
3235 GITS_CBASER_VALID);
3236
3237 gits_write_cbaser(baser, its->base + GITS_CBASER);
3238 tmp = gits_read_cbaser(its->base + GITS_CBASER);
3239
3240 if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
3241 if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
3242
3243
3244
3245
3246
3247 baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
3248 GITS_CBASER_CACHEABILITY_MASK);
3249 baser |= GITS_CBASER_nC;
3250 gits_write_cbaser(baser, its->base + GITS_CBASER);
3251 }
3252 pr_info("ITS: using cache flushing for cmd queue\n");
3253 its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
3254 }
3255
3256 gits_write_cwriter(0, its->base + GITS_CWRITER);
3257 ctlr = readl_relaxed(its->base + GITS_CTLR);
3258 ctlr |= GITS_CTLR_ENABLE;
3259 if (its->is_v4)
3260 ctlr |= GITS_CTLR_ImDe;
3261 writel_relaxed(ctlr, its->base + GITS_CTLR);
3262
3263 err = its_init_domain(handle, its);
3264 if (err)
3265 goto out_free_tables;
3266
3267 spin_lock(&its_lock);
3268 list_add(&its->entry, &its_nodes);
3269 spin_unlock(&its_lock);
3270
3271 return 0;
3272
3273out_free_tables:
3274 its_free_tables(its);
3275out_free_cmd:
3276 free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
3277out_free_its:
3278 kfree(its);
3279out_unmap:
3280 iounmap(its_base);
3281 pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err);
3282 return err;
3283}
3284
3285static bool gic_rdists_supports_plpis(void)
3286{
3287 return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
3288}
3289
3290int its_cpu_init(void)
3291{
3292 if (!list_empty(&its_nodes)) {
3293 if (!gic_rdists_supports_plpis()) {
3294 pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
3295 return -ENXIO;
3296 }
3297 its_cpu_init_lpis();
3298 its_cpu_init_collection();
3299 }
3300
3301 return 0;
3302}
3303
3304static const struct of_device_id its_device_id[] = {
3305 { .compatible = "arm,gic-v3-its", },
3306 {},
3307};
3308
3309static int __init its_of_probe(struct device_node *node)
3310{
3311 struct device_node *np;
3312 struct resource res;
3313
3314 for (np = of_find_matching_node(node, its_device_id); np;
3315 np = of_find_matching_node(np, its_device_id)) {
3316 if (!of_device_is_available(np))
3317 continue;
3318 if (!of_property_read_bool(np, "msi-controller")) {
3319 pr_warn("%pOF: no msi-controller property, ITS ignored\n",
3320 np);
3321 continue;
3322 }
3323
3324 if (of_address_to_resource(np, 0, &res)) {
3325 pr_warn("%pOF: no regs?\n", np);
3326 continue;
3327 }
3328
3329 its_probe_one(&res, &np->fwnode, of_node_to_nid(np));
3330 }
3331 return 0;
3332}
3333
3334#ifdef CONFIG_ACPI
3335
3336#define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K)
3337
3338#ifdef CONFIG_ACPI_NUMA
3339struct its_srat_map {
3340
3341 u32 numa_node;
3342
3343 u32 its_id;
3344};
3345
3346static struct its_srat_map *its_srat_maps __initdata;
3347static int its_in_srat __initdata;
3348
3349static int __init acpi_get_its_numa_node(u32 its_id)
3350{
3351 int i;
3352
3353 for (i = 0; i < its_in_srat; i++) {
3354 if (its_id == its_srat_maps[i].its_id)
3355 return its_srat_maps[i].numa_node;
3356 }
3357 return NUMA_NO_NODE;
3358}
3359
3360static int __init gic_acpi_match_srat_its(struct acpi_subtable_header *header,
3361 const unsigned long end)
3362{
3363 return 0;
3364}
3365
3366static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header,
3367 const unsigned long end)
3368{
3369 int node;
3370 struct acpi_srat_gic_its_affinity *its_affinity;
3371
3372 its_affinity = (struct acpi_srat_gic_its_affinity *)header;
3373 if (!its_affinity)
3374 return -EINVAL;
3375
3376 if (its_affinity->header.length < sizeof(*its_affinity)) {
3377 pr_err("SRAT: Invalid header length %d in ITS affinity\n",
3378 its_affinity->header.length);
3379 return -EINVAL;
3380 }
3381
3382 node = acpi_map_pxm_to_node(its_affinity->proximity_domain);
3383
3384 if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
3385 pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node);
3386 return 0;
3387 }
3388
3389 its_srat_maps[its_in_srat].numa_node = node;
3390 its_srat_maps[its_in_srat].its_id = its_affinity->its_id;
3391 its_in_srat++;
3392 pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n",
3393 its_affinity->proximity_domain, its_affinity->its_id, node);
3394
3395 return 0;
3396}
3397
3398static void __init acpi_table_parse_srat_its(void)
3399{
3400 int count;
3401
3402 count = acpi_table_parse_entries(ACPI_SIG_SRAT,
3403 sizeof(struct acpi_table_srat),
3404 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
3405 gic_acpi_match_srat_its, 0);
3406 if (count <= 0)
3407 return;
3408
3409 its_srat_maps = kmalloc(count * sizeof(struct its_srat_map),
3410 GFP_KERNEL);
3411 if (!its_srat_maps) {
3412 pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n");
3413 return;
3414 }
3415
3416 acpi_table_parse_entries(ACPI_SIG_SRAT,
3417 sizeof(struct acpi_table_srat),
3418 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
3419 gic_acpi_parse_srat_its, 0);
3420}
3421
3422
3423static void __init acpi_its_srat_maps_free(void)
3424{
3425 kfree(its_srat_maps);
3426}
3427#else
3428static void __init acpi_table_parse_srat_its(void) { }
3429static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; }
3430static void __init acpi_its_srat_maps_free(void) { }
3431#endif
3432
3433static int __init gic_acpi_parse_madt_its(struct acpi_subtable_header *header,
3434 const unsigned long end)
3435{
3436 struct acpi_madt_generic_translator *its_entry;
3437 struct fwnode_handle *dom_handle;
3438 struct resource res;
3439 int err;
3440
3441 its_entry = (struct acpi_madt_generic_translator *)header;
3442 memset(&res, 0, sizeof(res));
3443 res.start = its_entry->base_address;
3444 res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1;
3445 res.flags = IORESOURCE_MEM;
3446
3447 dom_handle = irq_domain_alloc_fwnode((void *)its_entry->base_address);
3448 if (!dom_handle) {
3449 pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n",
3450 &res.start);
3451 return -ENOMEM;
3452 }
3453
3454 err = iort_register_domain_token(its_entry->translation_id, dom_handle);
3455 if (err) {
3456 pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n",
3457 &res.start, its_entry->translation_id);
3458 goto dom_err;
3459 }
3460
3461 err = its_probe_one(&res, dom_handle,
3462 acpi_get_its_numa_node(its_entry->translation_id));
3463 if (!err)
3464 return 0;
3465
3466 iort_deregister_domain_token(its_entry->translation_id);
3467dom_err:
3468 irq_domain_free_fwnode(dom_handle);
3469 return err;
3470}
3471
3472static void __init its_acpi_probe(void)
3473{
3474 acpi_table_parse_srat_its();
3475 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
3476 gic_acpi_parse_madt_its, 0);
3477 acpi_its_srat_maps_free();
3478}
3479#else
3480static void __init its_acpi_probe(void) { }
3481#endif
3482
3483int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
3484 struct irq_domain *parent_domain)
3485{
3486 struct device_node *of_node;
3487 struct its_node *its;
3488 bool has_v4 = false;
3489 int err;
3490
3491 its_parent = parent_domain;
3492 of_node = to_of_node(handle);
3493 if (of_node)
3494 its_of_probe(of_node);
3495 else
3496 its_acpi_probe();
3497
3498 if (list_empty(&its_nodes)) {
3499 pr_warn("ITS: No ITS available, not enabling LPIs\n");
3500 return -ENXIO;
3501 }
3502
3503 gic_rdists = rdists;
3504 err = its_alloc_lpi_tables();
3505 if (err)
3506 return err;
3507
3508 list_for_each_entry(its, &its_nodes, entry)
3509 has_v4 |= its->is_v4;
3510
3511 if (has_v4 & rdists->has_vlpis) {
3512 if (its_init_vpe_domain() ||
3513 its_init_v4(parent_domain, &its_vpe_domain_ops)) {
3514 rdists->has_vlpis = false;
3515 pr_err("ITS: Disabling GICv4 support\n");
3516 }
3517 }
3518
3519 return 0;
3520}
3521