Warning: That file was not part of the compilation database. It may have many parsing errors.

1/*
2 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/acpi.h>
19#include <linux/acpi_iort.h>
20#include <linux/bitmap.h>
21#include <linux/cpu.h>
22#include <linux/crash_dump.h>
23#include <linux/delay.h>
24#include <linux/dma-iommu.h>
25#include <linux/efi.h>
26#include <linux/interrupt.h>
27#include <linux/irqdomain.h>
28#include <linux/list.h>
29#include <linux/list_sort.h>
30#include <linux/log2.h>
31#include <linux/memblock.h>
32#include <linux/mm.h>
33#include <linux/msi.h>
34#include <linux/of.h>
35#include <linux/of_address.h>
36#include <linux/of_irq.h>
37#include <linux/of_pci.h>
38#include <linux/of_platform.h>
39#include <linux/percpu.h>
40#include <linux/slab.h>
41#include <linux/syscore_ops.h>
42
43#include <linux/irqchip.h>
44#include <linux/irqchip/arm-gic-v3.h>
45#include <linux/irqchip/arm-gic-v4.h>
46
47#include <asm/cputype.h>
48#include <asm/exception.h>
49
50#include "irq-gic-common.h"
51
52#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
53#define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
54#define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
55#define ITS_FLAGS_SAVE_SUSPEND_STATE (1ULL << 3)
56
57#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
58#define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1)
59
60static u32 lpi_id_bits;
61
62/*
63 * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
64 * deal with (one configuration byte per interrupt). PENDBASE has to
65 * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
66 */
67#define LPI_NRBITS lpi_id_bits
68#define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K)
69#define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
70
71#define LPI_PROP_DEFAULT_PRIO GICD_INT_DEF_PRI
72
73/*
74 * Collection structure - just an ID, and a redistributor address to
75 * ping. We use one per CPU as a bag of interrupts assigned to this
76 * CPU.
77 */
78struct its_collection {
79 u64 target_address;
80 u16 col_id;
81};
82
83/*
84 * The ITS_BASER structure - contains memory information, cached
85 * value of BASER register configuration and ITS page size.
86 */
87struct its_baser {
88 void *base;
89 u64 val;
90 u32 order;
91 u32 psz;
92};
93
94struct its_device;
95
96/*
97 * The ITS structure - contains most of the infrastructure, with the
98 * top-level MSI domain, the command queue, the collections, and the
99 * list of devices writing to it.
100 *
101 * dev_alloc_lock has to be taken for device allocations, while the
102 * spinlock must be taken to parse data structures such as the device
103 * list.
104 */
105struct its_node {
106 raw_spinlock_t lock;
107 struct mutex dev_alloc_lock;
108 struct list_head entry;
109 void __iomem *base;
110 phys_addr_t phys_base;
111 struct its_cmd_block *cmd_base;
112 struct its_cmd_block *cmd_write;
113 struct its_baser tables[GITS_BASER_NR_REGS];
114 struct its_collection *collections;
115 struct fwnode_handle *fwnode_handle;
116 u64 (*get_msi_base)(struct its_device *its_dev);
117 u64 cbaser_save;
118 u32 ctlr_save;
119 struct list_head its_device_list;
120 u64 flags;
121 unsigned long list_nr;
122 u32 ite_size;
123 u32 device_ids;
124 int numa_node;
125 unsigned int msi_domain_flags;
126 u32 pre_its_base; /* for Socionext Synquacer */
127 bool is_v4;
128 int vlpi_redist_offset;
129};
130
131#define ITS_ITT_ALIGN SZ_256
132
133/* The maximum number of VPEID bits supported by VLPI commands */
134#define ITS_MAX_VPEID_BITS (16)
135#define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS))
136
137/* Convert page order to size in bytes */
138#define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
139
140struct event_lpi_map {
141 unsigned long *lpi_map;
142 u16 *col_map;
143 irq_hw_number_t lpi_base;
144 int nr_lpis;
145 struct mutex vlpi_lock;
146 struct its_vm *vm;
147 struct its_vlpi_map *vlpi_maps;
148 int nr_vlpis;
149};
150
151/*
152 * The ITS view of a device - belongs to an ITS, owns an interrupt
153 * translation table, and a list of interrupts. If it some of its
154 * LPIs are injected into a guest (GICv4), the event_map.vm field
155 * indicates which one.
156 */
157struct its_device {
158 struct list_head entry;
159 struct its_node *its;
160 struct event_lpi_map event_map;
161 void *itt;
162 u32 nr_ites;
163 u32 device_id;
164 bool shared;
165};
166
167static struct {
168 raw_spinlock_t lock;
169 struct its_device *dev;
170 struct its_vpe **vpes;
171 int next_victim;
172} vpe_proxy;
173
174static LIST_HEAD(its_nodes);
175static DEFINE_RAW_SPINLOCK(its_lock);
176static struct rdists *gic_rdists;
177static struct irq_domain *its_parent;
178
179static unsigned long its_list_map;
180static u16 vmovp_seq_num;
181static DEFINE_RAW_SPINLOCK(vmovp_lock);
182
183static DEFINE_IDA(its_vpeid_ida);
184
185#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
186#define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu))
187#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
188#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
189
190static struct its_collection *dev_event_to_col(struct its_device *its_dev,
191 u32 event)
192{
193 struct its_node *its = its_dev->its;
194
195 return its->collections + its_dev->event_map.col_map[event];
196}
197
198static struct its_collection *valid_col(struct its_collection *col)
199{
200 if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(0, 15)))
201 return NULL;
202
203 return col;
204}
205
206static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe)
207{
208 if (valid_col(its->collections + vpe->col_idx))
209 return vpe;
210
211 return NULL;
212}
213
214/*
215 * ITS command descriptors - parameters to be encoded in a command
216 * block.
217 */
218struct its_cmd_desc {
219 union {
220 struct {
221 struct its_device *dev;
222 u32 event_id;
223 } its_inv_cmd;
224
225 struct {
226 struct its_device *dev;
227 u32 event_id;
228 } its_clear_cmd;
229
230 struct {
231 struct its_device *dev;
232 u32 event_id;
233 } its_int_cmd;
234
235 struct {
236 struct its_device *dev;
237 int valid;
238 } its_mapd_cmd;
239
240 struct {
241 struct its_collection *col;
242 int valid;
243 } its_mapc_cmd;
244
245 struct {
246 struct its_device *dev;
247 u32 phys_id;
248 u32 event_id;
249 } its_mapti_cmd;
250
251 struct {
252 struct its_device *dev;
253 struct its_collection *col;
254 u32 event_id;
255 } its_movi_cmd;
256
257 struct {
258 struct its_device *dev;
259 u32 event_id;
260 } its_discard_cmd;
261
262 struct {
263 struct its_collection *col;
264 } its_invall_cmd;
265
266 struct {
267 struct its_vpe *vpe;
268 } its_vinvall_cmd;
269
270 struct {
271 struct its_vpe *vpe;
272 struct its_collection *col;
273 bool valid;
274 } its_vmapp_cmd;
275
276 struct {
277 struct its_vpe *vpe;
278 struct its_device *dev;
279 u32 virt_id;
280 u32 event_id;
281 bool db_enabled;
282 } its_vmapti_cmd;
283
284 struct {
285 struct its_vpe *vpe;
286 struct its_device *dev;
287 u32 event_id;
288 bool db_enabled;
289 } its_vmovi_cmd;
290
291 struct {
292 struct its_vpe *vpe;
293 struct its_collection *col;
294 u16 seq_num;
295 u16 its_list;
296 } its_vmovp_cmd;
297 };
298};
299
300/*
301 * The ITS command block, which is what the ITS actually parses.
302 */
303struct its_cmd_block {
304 u64 raw_cmd[4];
305};
306
307#define ITS_CMD_QUEUE_SZ SZ_64K
308#define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
309
310typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *,
311 struct its_cmd_block *,
312 struct its_cmd_desc *);
313
314typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *,
315 struct its_cmd_block *,
316 struct its_cmd_desc *);
317
318static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
319{
320 u64 mask = GENMASK_ULL(h, l);
321 *raw_cmd &= ~mask;
322 *raw_cmd |= (val << l) & mask;
323}
324
325static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
326{
327 its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0);
328}
329
330static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
331{
332 its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32);
333}
334
335static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
336{
337 its_mask_encode(&cmd->raw_cmd[1], id, 31, 0);
338}
339
340static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
341{
342 its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32);
343}
344
345static void its_encode_size(struct its_cmd_block *cmd, u8 size)
346{
347 its_mask_encode(&cmd->raw_cmd[1], size, 4, 0);
348}
349
350static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
351{
352 its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8);
353}
354
355static void its_encode_valid(struct its_cmd_block *cmd, int valid)
356{
357 its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63);
358}
359
360static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
361{
362 its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16);
363}
364
365static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
366{
367 its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
368}
369
370static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid)
371{
372 its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32);
373}
374
375static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id)
376{
377 its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0);
378}
379
380static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id)
381{
382 its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32);
383}
384
385static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid)
386{
387 its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0);
388}
389
390static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num)
391{
392 its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32);
393}
394
395static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
396{
397 its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0);
398}
399
400static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
401{
402 its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16);
403}
404
405static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
406{
407 its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0);
408}
409
410static inline void its_fixup_cmd(struct its_cmd_block *cmd)
411{
412 /* Let's fixup BE commands */
413 cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]);
414 cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]);
415 cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]);
416 cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]);
417}
418
419static struct its_collection *its_build_mapd_cmd(struct its_node *its,
420 struct its_cmd_block *cmd,
421 struct its_cmd_desc *desc)
422{
423 unsigned long itt_addr;
424 u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
425
426 itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
427 itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN);
428
429 its_encode_cmd(cmd, GITS_CMD_MAPD);
430 its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
431 its_encode_size(cmd, size - 1);
432 its_encode_itt(cmd, itt_addr);
433 its_encode_valid(cmd, desc->its_mapd_cmd.valid);
434
435 its_fixup_cmd(cmd);
436
437 return NULL;
438}
439
440static struct its_collection *its_build_mapc_cmd(struct its_node *its,
441 struct its_cmd_block *cmd,
442 struct its_cmd_desc *desc)
443{
444 its_encode_cmd(cmd, GITS_CMD_MAPC);
445 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
446 its_encode_target(cmd, desc->its_mapc_cmd.col->target_address);
447 its_encode_valid(cmd, desc->its_mapc_cmd.valid);
448
449 its_fixup_cmd(cmd);
450
451 return desc->its_mapc_cmd.col;
452}
453
454static struct its_collection *its_build_mapti_cmd(struct its_node *its,
455 struct its_cmd_block *cmd,
456 struct its_cmd_desc *desc)
457{
458 struct its_collection *col;
459
460 col = dev_event_to_col(desc->its_mapti_cmd.dev,
461 desc->its_mapti_cmd.event_id);
462
463 its_encode_cmd(cmd, GITS_CMD_MAPTI);
464 its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id);
465 its_encode_event_id(cmd, desc->its_mapti_cmd.event_id);
466 its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id);
467 its_encode_collection(cmd, col->col_id);
468
469 its_fixup_cmd(cmd);
470
471 return valid_col(col);
472}
473
474static struct its_collection *its_build_movi_cmd(struct its_node *its,
475 struct its_cmd_block *cmd,
476 struct its_cmd_desc *desc)
477{
478 struct its_collection *col;
479
480 col = dev_event_to_col(desc->its_movi_cmd.dev,
481 desc->its_movi_cmd.event_id);
482
483 its_encode_cmd(cmd, GITS_CMD_MOVI);
484 its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
485 its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
486 its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
487
488 its_fixup_cmd(cmd);
489
490 return valid_col(col);
491}
492
493static struct its_collection *its_build_discard_cmd(struct its_node *its,
494 struct its_cmd_block *cmd,
495 struct its_cmd_desc *desc)
496{
497 struct its_collection *col;
498
499 col = dev_event_to_col(desc->its_discard_cmd.dev,
500 desc->its_discard_cmd.event_id);
501
502 its_encode_cmd(cmd, GITS_CMD_DISCARD);
503 its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
504 its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
505
506 its_fixup_cmd(cmd);
507
508 return valid_col(col);
509}
510
511static struct its_collection *its_build_inv_cmd(struct its_node *its,
512 struct its_cmd_block *cmd,
513 struct its_cmd_desc *desc)
514{
515 struct its_collection *col;
516
517 col = dev_event_to_col(desc->its_inv_cmd.dev,
518 desc->its_inv_cmd.event_id);
519
520 its_encode_cmd(cmd, GITS_CMD_INV);
521 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
522 its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
523
524 its_fixup_cmd(cmd);
525
526 return valid_col(col);
527}
528
529static struct its_collection *its_build_int_cmd(struct its_node *its,
530 struct its_cmd_block *cmd,
531 struct its_cmd_desc *desc)
532{
533 struct its_collection *col;
534
535 col = dev_event_to_col(desc->its_int_cmd.dev,
536 desc->its_int_cmd.event_id);
537
538 its_encode_cmd(cmd, GITS_CMD_INT);
539 its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
540 its_encode_event_id(cmd, desc->its_int_cmd.event_id);
541
542 its_fixup_cmd(cmd);
543
544 return valid_col(col);
545}
546
547static struct its_collection *its_build_clear_cmd(struct its_node *its,
548 struct its_cmd_block *cmd,
549 struct its_cmd_desc *desc)
550{
551 struct its_collection *col;
552
553 col = dev_event_to_col(desc->its_clear_cmd.dev,
554 desc->its_clear_cmd.event_id);
555
556 its_encode_cmd(cmd, GITS_CMD_CLEAR);
557 its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
558 its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
559
560 its_fixup_cmd(cmd);
561
562 return valid_col(col);
563}
564
565static struct its_collection *its_build_invall_cmd(struct its_node *its,
566 struct its_cmd_block *cmd,
567 struct its_cmd_desc *desc)
568{
569 its_encode_cmd(cmd, GITS_CMD_INVALL);
570 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
571
572 its_fixup_cmd(cmd);
573
574 return NULL;
575}
576
577static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
578 struct its_cmd_block *cmd,
579 struct its_cmd_desc *desc)
580{
581 its_encode_cmd(cmd, GITS_CMD_VINVALL);
582 its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id);
583
584 its_fixup_cmd(cmd);
585
586 return valid_vpe(its, desc->its_vinvall_cmd.vpe);
587}
588
589static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
590 struct its_cmd_block *cmd,
591 struct its_cmd_desc *desc)
592{
593 unsigned long vpt_addr;
594 u64 target;
595
596 vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
597 target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
598
599 its_encode_cmd(cmd, GITS_CMD_VMAPP);
600 its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
601 its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
602 its_encode_target(cmd, target);
603 its_encode_vpt_addr(cmd, vpt_addr);
604 its_encode_vpt_size(cmd, LPI_NRBITS - 1);
605
606 its_fixup_cmd(cmd);
607
608 return valid_vpe(its, desc->its_vmapp_cmd.vpe);
609}
610
611static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
612 struct its_cmd_block *cmd,
613 struct its_cmd_desc *desc)
614{
615 u32 db;
616
617 if (desc->its_vmapti_cmd.db_enabled)
618 db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
619 else
620 db = 1023;
621
622 its_encode_cmd(cmd, GITS_CMD_VMAPTI);
623 its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id);
624 its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id);
625 its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id);
626 its_encode_db_phys_id(cmd, db);
627 its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id);
628
629 its_fixup_cmd(cmd);
630
631 return valid_vpe(its, desc->its_vmapti_cmd.vpe);
632}
633
634static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
635 struct its_cmd_block *cmd,
636 struct its_cmd_desc *desc)
637{
638 u32 db;
639
640 if (desc->its_vmovi_cmd.db_enabled)
641 db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
642 else
643 db = 1023;
644
645 its_encode_cmd(cmd, GITS_CMD_VMOVI);
646 its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id);
647 its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id);
648 its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id);
649 its_encode_db_phys_id(cmd, db);
650 its_encode_db_valid(cmd, true);
651
652 its_fixup_cmd(cmd);
653
654 return valid_vpe(its, desc->its_vmovi_cmd.vpe);
655}
656
657static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
658 struct its_cmd_block *cmd,
659 struct its_cmd_desc *desc)
660{
661 u64 target;
662
663 target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset;
664 its_encode_cmd(cmd, GITS_CMD_VMOVP);
665 its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num);
666 its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list);
667 its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
668 its_encode_target(cmd, target);
669
670 its_fixup_cmd(cmd);
671
672 return valid_vpe(its, desc->its_vmovp_cmd.vpe);
673}
674
675static u64 its_cmd_ptr_to_offset(struct its_node *its,
676 struct its_cmd_block *ptr)
677{
678 return (ptr - its->cmd_base) * sizeof(*ptr);
679}
680
681static int its_queue_full(struct its_node *its)
682{
683 int widx;
684 int ridx;
685
686 widx = its->cmd_write - its->cmd_base;
687 ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
688
689 /* This is incredibly unlikely to happen, unless the ITS locks up. */
690 if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx)
691 return 1;
692
693 return 0;
694}
695
696static struct its_cmd_block *its_allocate_entry(struct its_node *its)
697{
698 struct its_cmd_block *cmd;
699 u32 count = 1000000; /* 1s! */
700
701 while (its_queue_full(its)) {
702 count--;
703 if (!count) {
704 pr_err_ratelimited("ITS queue not draining\n");
705 return NULL;
706 }
707 cpu_relax();
708 udelay(1);
709 }
710
711 cmd = its->cmd_write++;
712
713 /* Handle queue wrapping */
714 if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
715 its->cmd_write = its->cmd_base;
716
717 /* Clear command */
718 cmd->raw_cmd[0] = 0;
719 cmd->raw_cmd[1] = 0;
720 cmd->raw_cmd[2] = 0;
721 cmd->raw_cmd[3] = 0;
722
723 return cmd;
724}
725
726static struct its_cmd_block *its_post_commands(struct its_node *its)
727{
728 u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
729
730 writel_relaxed(wr, its->base + GITS_CWRITER);
731
732 return its->cmd_write;
733}
734
735static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
736{
737 /*
738 * Make sure the commands written to memory are observable by
739 * the ITS.
740 */
741 if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
742 gic_flush_dcache_to_poc(cmd, sizeof(*cmd));
743 else
744 dsb(ishst);
745}
746
747static int its_wait_for_range_completion(struct its_node *its,
748 struct its_cmd_block *from,
749 struct its_cmd_block *to)
750{
751 u64 rd_idx, from_idx, to_idx;
752 u32 count = 1000000; /* 1s! */
753
754 from_idx = its_cmd_ptr_to_offset(its, from);
755 to_idx = its_cmd_ptr_to_offset(its, to);
756
757 while (1) {
758 rd_idx = readl_relaxed(its->base + GITS_CREADR);
759
760 /* Direct case */
761 if (from_idx < to_idx && rd_idx >= to_idx)
762 break;
763
764 /* Wrapped case */
765 if (from_idx >= to_idx && rd_idx >= to_idx && rd_idx < from_idx)
766 break;
767
768 count--;
769 if (!count) {
770 pr_err_ratelimited("ITS queue timeout (%llu %llu %llu)\n",
771 from_idx, to_idx, rd_idx);
772 return -1;
773 }
774 cpu_relax();
775 udelay(1);
776 }
777
778 return 0;
779}
780
781/* Warning, macro hell follows */
782#define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \
783void name(struct its_node *its, \
784 buildtype builder, \
785 struct its_cmd_desc *desc) \
786{ \
787 struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \
788 synctype *sync_obj; \
789 unsigned long flags; \
790 \
791 raw_spin_lock_irqsave(&its->lock, flags); \
792 \
793 cmd = its_allocate_entry(its); \
794 if (!cmd) { /* We're soooooo screewed... */ \
795 raw_spin_unlock_irqrestore(&its->lock, flags); \
796 return; \
797 } \
798 sync_obj = builder(its, cmd, desc); \
799 its_flush_cmd(its, cmd); \
800 \
801 if (sync_obj) { \
802 sync_cmd = its_allocate_entry(its); \
803 if (!sync_cmd) \
804 goto post; \
805 \
806 buildfn(its, sync_cmd, sync_obj); \
807 its_flush_cmd(its, sync_cmd); \
808 } \
809 \
810post: \
811 next_cmd = its_post_commands(its); \
812 raw_spin_unlock_irqrestore(&its->lock, flags); \
813 \
814 if (its_wait_for_range_completion(its, cmd, next_cmd)) \
815 pr_err_ratelimited("ITS cmd %ps failed\n", builder); \
816}
817
818static void its_build_sync_cmd(struct its_node *its,
819 struct its_cmd_block *sync_cmd,
820 struct its_collection *sync_col)
821{
822 its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
823 its_encode_target(sync_cmd, sync_col->target_address);
824
825 its_fixup_cmd(sync_cmd);
826}
827
828static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
829 struct its_collection, its_build_sync_cmd)
830
831static void its_build_vsync_cmd(struct its_node *its,
832 struct its_cmd_block *sync_cmd,
833 struct its_vpe *sync_vpe)
834{
835 its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
836 its_encode_vpeid(sync_cmd, sync_vpe->vpe_id);
837
838 its_fixup_cmd(sync_cmd);
839}
840
841static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t,
842 struct its_vpe, its_build_vsync_cmd)
843
844static void its_send_int(struct its_device *dev, u32 event_id)
845{
846 struct its_cmd_desc desc;
847
848 desc.its_int_cmd.dev = dev;
849 desc.its_int_cmd.event_id = event_id;
850
851 its_send_single_command(dev->its, its_build_int_cmd, &desc);
852}
853
854static void its_send_clear(struct its_device *dev, u32 event_id)
855{
856 struct its_cmd_desc desc;
857
858 desc.its_clear_cmd.dev = dev;
859 desc.its_clear_cmd.event_id = event_id;
860
861 its_send_single_command(dev->its, its_build_clear_cmd, &desc);
862}
863
864static void its_send_inv(struct its_device *dev, u32 event_id)
865{
866 struct its_cmd_desc desc;
867
868 desc.its_inv_cmd.dev = dev;
869 desc.its_inv_cmd.event_id = event_id;
870
871 its_send_single_command(dev->its, its_build_inv_cmd, &desc);
872}
873
874static void its_send_mapd(struct its_device *dev, int valid)
875{
876 struct its_cmd_desc desc;
877
878 desc.its_mapd_cmd.dev = dev;
879 desc.its_mapd_cmd.valid = !!valid;
880
881 its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
882}
883
884static void its_send_mapc(struct its_node *its, struct its_collection *col,
885 int valid)
886{
887 struct its_cmd_desc desc;
888
889 desc.its_mapc_cmd.col = col;
890 desc.its_mapc_cmd.valid = !!valid;
891
892 its_send_single_command(its, its_build_mapc_cmd, &desc);
893}
894
895static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id)
896{
897 struct its_cmd_desc desc;
898
899 desc.its_mapti_cmd.dev = dev;
900 desc.its_mapti_cmd.phys_id = irq_id;
901 desc.its_mapti_cmd.event_id = id;
902
903 its_send_single_command(dev->its, its_build_mapti_cmd, &desc);
904}
905
906static void its_send_movi(struct its_device *dev,
907 struct its_collection *col, u32 id)
908{
909 struct its_cmd_desc desc;
910
911 desc.its_movi_cmd.dev = dev;
912 desc.its_movi_cmd.col = col;
913 desc.its_movi_cmd.event_id = id;
914
915 its_send_single_command(dev->its, its_build_movi_cmd, &desc);
916}
917
918static void its_send_discard(struct its_device *dev, u32 id)
919{
920 struct its_cmd_desc desc;
921
922 desc.its_discard_cmd.dev = dev;
923 desc.its_discard_cmd.event_id = id;
924
925 its_send_single_command(dev->its, its_build_discard_cmd, &desc);
926}
927
928static void its_send_invall(struct its_node *its, struct its_collection *col)
929{
930 struct its_cmd_desc desc;
931
932 desc.its_invall_cmd.col = col;
933
934 its_send_single_command(its, its_build_invall_cmd, &desc);
935}
936
937static void its_send_vmapti(struct its_device *dev, u32 id)
938{
939 struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
940 struct its_cmd_desc desc;
941
942 desc.its_vmapti_cmd.vpe = map->vpe;
943 desc.its_vmapti_cmd.dev = dev;
944 desc.its_vmapti_cmd.virt_id = map->vintid;
945 desc.its_vmapti_cmd.event_id = id;
946 desc.its_vmapti_cmd.db_enabled = map->db_enabled;
947
948 its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc);
949}
950
951static void its_send_vmovi(struct its_device *dev, u32 id)
952{
953 struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
954 struct its_cmd_desc desc;
955
956 desc.its_vmovi_cmd.vpe = map->vpe;
957 desc.its_vmovi_cmd.dev = dev;
958 desc.its_vmovi_cmd.event_id = id;
959 desc.its_vmovi_cmd.db_enabled = map->db_enabled;
960
961 its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
962}
963
964static void its_send_vmapp(struct its_node *its,
965 struct its_vpe *vpe, bool valid)
966{
967 struct its_cmd_desc desc;
968
969 desc.its_vmapp_cmd.vpe = vpe;
970 desc.its_vmapp_cmd.valid = valid;
971 desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
972
973 its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
974}
975
976static void its_send_vmovp(struct its_vpe *vpe)
977{
978 struct its_cmd_desc desc;
979 struct its_node *its;
980 unsigned long flags;
981 int col_id = vpe->col_idx;
982
983 desc.its_vmovp_cmd.vpe = vpe;
984 desc.its_vmovp_cmd.its_list = (u16)its_list_map;
985
986 if (!its_list_map) {
987 its = list_first_entry(&its_nodes, struct its_node, entry);
988 desc.its_vmovp_cmd.seq_num = 0;
989 desc.its_vmovp_cmd.col = &its->collections[col_id];
990 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
991 return;
992 }
993
994 /*
995 * Yet another marvel of the architecture. If using the
996 * its_list "feature", we need to make sure that all ITSs
997 * receive all VMOVP commands in the same order. The only way
998 * to guarantee this is to make vmovp a serialization point.
999 *
1000 * Wall <-- Head.
1001 */
1002 raw_spin_lock_irqsave(&vmovp_lock, flags);
1003
1004 desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
1005
1006 /* Emit VMOVPs */
1007 list_for_each_entry(its, &its_nodes, entry) {
1008 if (!its->is_v4)
1009 continue;
1010
1011 if (!vpe->its_vm->vlpi_count[its->list_nr])
1012 continue;
1013
1014 desc.its_vmovp_cmd.col = &its->collections[col_id];
1015 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1016 }
1017
1018 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1019}
1020
1021static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
1022{
1023 struct its_cmd_desc desc;
1024
1025 desc.its_vinvall_cmd.vpe = vpe;
1026 its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
1027}
1028
1029/*
1030 * irqchip functions - assumes MSI, mostly.
1031 */
1032
1033static inline u32 its_get_event_id(struct irq_data *d)
1034{
1035 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1036 return d->hwirq - its_dev->event_map.lpi_base;
1037}
1038
1039static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
1040{
1041 irq_hw_number_t hwirq;
1042 void *va;
1043 u8 *cfg;
1044
1045 if (irqd_is_forwarded_to_vcpu(d)) {
1046 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1047 u32 event = its_get_event_id(d);
1048 struct its_vlpi_map *map;
1049
1050 va = page_address(its_dev->event_map.vm->vprop_page);
1051 map = &its_dev->event_map.vlpi_maps[event];
1052 hwirq = map->vintid;
1053
1054 /* Remember the updated property */
1055 map->properties &= ~clr;
1056 map->properties |= set | LPI_PROP_GROUP1;
1057 } else {
1058 va = gic_rdists->prop_table_va;
1059 hwirq = d->hwirq;
1060 }
1061
1062 cfg = va + hwirq - 8192;
1063 *cfg &= ~clr;
1064 *cfg |= set | LPI_PROP_GROUP1;
1065
1066 /*
1067 * Make the above write visible to the redistributors.
1068 * And yes, we're flushing exactly: One. Single. Byte.
1069 * Humpf...
1070 */
1071 if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
1072 gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
1073 else
1074 dsb(ishst);
1075}
1076
1077static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
1078{
1079 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1080
1081 lpi_write_config(d, clr, set);
1082 its_send_inv(its_dev, its_get_event_id(d));
1083}
1084
1085static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
1086{
1087 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1088 u32 event = its_get_event_id(d);
1089
1090 if (its_dev->event_map.vlpi_maps[event].db_enabled == enable)
1091 return;
1092
1093 its_dev->event_map.vlpi_maps[event].db_enabled = enable;
1094
1095 /*
1096 * More fun with the architecture:
1097 *
1098 * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
1099 * value or to 1023, depending on the enable bit. But that
1100 * would be issueing a mapping for an /existing/ DevID+EventID
1101 * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI
1102 * to the /same/ vPE, using this opportunity to adjust the
1103 * doorbell. Mouahahahaha. We loves it, Precious.
1104 */
1105 its_send_vmovi(its_dev, event);
1106}
1107
1108static void its_mask_irq(struct irq_data *d)
1109{
1110 if (irqd_is_forwarded_to_vcpu(d))
1111 its_vlpi_set_doorbell(d, false);
1112
1113 lpi_update_config(d, LPI_PROP_ENABLED, 0);
1114}
1115
1116static void its_unmask_irq(struct irq_data *d)
1117{
1118 if (irqd_is_forwarded_to_vcpu(d))
1119 its_vlpi_set_doorbell(d, true);
1120
1121 lpi_update_config(d, 0, LPI_PROP_ENABLED);
1122}
1123
1124static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1125 bool force)
1126{
1127 unsigned int cpu;
1128 const struct cpumask *cpu_mask = cpu_online_mask;
1129 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1130 struct its_collection *target_col;
1131 u32 id = its_get_event_id(d);
1132
1133 /* A forwarded interrupt should use irq_set_vcpu_affinity */
1134 if (irqd_is_forwarded_to_vcpu(d))
1135 return -EINVAL;
1136
1137 /* lpi cannot be routed to a redistributor that is on a foreign node */
1138 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
1139 if (its_dev->its->numa_node >= 0) {
1140 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
1141 if (!cpumask_intersects(mask_val, cpu_mask))
1142 return -EINVAL;
1143 }
1144 }
1145
1146 cpu = cpumask_any_and(mask_val, cpu_mask);
1147
1148 if (cpu >= nr_cpu_ids)
1149 return -EINVAL;
1150
1151 /* don't set the affinity when the target cpu is same as current one */
1152 if (cpu != its_dev->event_map.col_map[id]) {
1153 target_col = &its_dev->its->collections[cpu];
1154 its_send_movi(its_dev, target_col, id);
1155 its_dev->event_map.col_map[id] = cpu;
1156 irq_data_update_effective_affinity(d, cpumask_of(cpu));
1157 }
1158
1159 return IRQ_SET_MASK_OK_DONE;
1160}
1161
1162static u64 its_irq_get_msi_base(struct its_device *its_dev)
1163{
1164 struct its_node *its = its_dev->its;
1165
1166 return its->phys_base + GITS_TRANSLATER;
1167}
1168
1169static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
1170{
1171 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1172 struct its_node *its;
1173 u64 addr;
1174
1175 its = its_dev->its;
1176 addr = its->get_msi_base(its_dev);
1177
1178 msg->address_lo = lower_32_bits(addr);
1179 msg->address_hi = upper_32_bits(addr);
1180 msg->data = its_get_event_id(d);
1181
1182 iommu_dma_map_msi_msg(d->irq, msg);
1183}
1184
1185static int its_irq_set_irqchip_state(struct irq_data *d,
1186 enum irqchip_irq_state which,
1187 bool state)
1188{
1189 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1190 u32 event = its_get_event_id(d);
1191
1192 if (which != IRQCHIP_STATE_PENDING)
1193 return -EINVAL;
1194
1195 if (state)
1196 its_send_int(its_dev, event);
1197 else
1198 its_send_clear(its_dev, event);
1199
1200 return 0;
1201}
1202
1203static void its_map_vm(struct its_node *its, struct its_vm *vm)
1204{
1205 unsigned long flags;
1206
1207 /* Not using the ITS list? Everything is always mapped. */
1208 if (!its_list_map)
1209 return;
1210
1211 raw_spin_lock_irqsave(&vmovp_lock, flags);
1212
1213 /*
1214 * If the VM wasn't mapped yet, iterate over the vpes and get
1215 * them mapped now.
1216 */
1217 vm->vlpi_count[its->list_nr]++;
1218
1219 if (vm->vlpi_count[its->list_nr] == 1) {
1220 int i;
1221
1222 for (i = 0; i < vm->nr_vpes; i++) {
1223 struct its_vpe *vpe = vm->vpes[i];
1224 struct irq_data *d = irq_get_irq_data(vpe->irq);
1225
1226 /* Map the VPE to the first possible CPU */
1227 vpe->col_idx = cpumask_first(cpu_online_mask);
1228 its_send_vmapp(its, vpe, true);
1229 its_send_vinvall(its, vpe);
1230 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
1231 }
1232 }
1233
1234 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1235}
1236
1237static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
1238{
1239 unsigned long flags;
1240
1241 /* Not using the ITS list? Everything is always mapped. */
1242 if (!its_list_map)
1243 return;
1244
1245 raw_spin_lock_irqsave(&vmovp_lock, flags);
1246
1247 if (!--vm->vlpi_count[its->list_nr]) {
1248 int i;
1249
1250 for (i = 0; i < vm->nr_vpes; i++)
1251 its_send_vmapp(its, vm->vpes[i], false);
1252 }
1253
1254 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1255}
1256
1257static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
1258{
1259 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1260 u32 event = its_get_event_id(d);
1261 int ret = 0;
1262
1263 if (!info->map)
1264 return -EINVAL;
1265
1266 mutex_lock(&its_dev->event_map.vlpi_lock);
1267
1268 if (!its_dev->event_map.vm) {
1269 struct its_vlpi_map *maps;
1270
1271 maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps),
1272 GFP_KERNEL);
1273 if (!maps) {
1274 ret = -ENOMEM;
1275 goto out;
1276 }
1277
1278 its_dev->event_map.vm = info->map->vm;
1279 its_dev->event_map.vlpi_maps = maps;
1280 } else if (its_dev->event_map.vm != info->map->vm) {
1281 ret = -EINVAL;
1282 goto out;
1283 }
1284
1285 /* Get our private copy of the mapping information */
1286 its_dev->event_map.vlpi_maps[event] = *info->map;
1287
1288 if (irqd_is_forwarded_to_vcpu(d)) {
1289 /* Already mapped, move it around */
1290 its_send_vmovi(its_dev, event);
1291 } else {
1292 /* Ensure all the VPEs are mapped on this ITS */
1293 its_map_vm(its_dev->its, info->map->vm);
1294
1295 /*
1296 * Flag the interrupt as forwarded so that we can
1297 * start poking the virtual property table.
1298 */
1299 irqd_set_forwarded_to_vcpu(d);
1300
1301 /* Write out the property to the prop table */
1302 lpi_write_config(d, 0xff, info->map->properties);
1303
1304 /* Drop the physical mapping */
1305 its_send_discard(its_dev, event);
1306
1307 /* and install the virtual one */
1308 its_send_vmapti(its_dev, event);
1309
1310 /* Increment the number of VLPIs */
1311 its_dev->event_map.nr_vlpis++;
1312 }
1313
1314out:
1315 mutex_unlock(&its_dev->event_map.vlpi_lock);
1316 return ret;
1317}
1318
1319static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
1320{
1321 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1322 u32 event = its_get_event_id(d);
1323 int ret = 0;
1324
1325 mutex_lock(&its_dev->event_map.vlpi_lock);
1326
1327 if (!its_dev->event_map.vm ||
1328 !its_dev->event_map.vlpi_maps[event].vm) {
1329 ret = -EINVAL;
1330 goto out;
1331 }
1332
1333 /* Copy our mapping information to the incoming request */
1334 *info->map = its_dev->event_map.vlpi_maps[event];
1335
1336out:
1337 mutex_unlock(&its_dev->event_map.vlpi_lock);
1338 return ret;
1339}
1340
1341static int its_vlpi_unmap(struct irq_data *d)
1342{
1343 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1344 u32 event = its_get_event_id(d);
1345 int ret = 0;
1346
1347 mutex_lock(&its_dev->event_map.vlpi_lock);
1348
1349 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
1350 ret = -EINVAL;
1351 goto out;
1352 }
1353
1354 /* Drop the virtual mapping */
1355 its_send_discard(its_dev, event);
1356
1357 /* and restore the physical one */
1358 irqd_clr_forwarded_to_vcpu(d);
1359 its_send_mapti(its_dev, d->hwirq, event);
1360 lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO |
1361 LPI_PROP_ENABLED |
1362 LPI_PROP_GROUP1));
1363
1364 /* Potentially unmap the VM from this ITS */
1365 its_unmap_vm(its_dev->its, its_dev->event_map.vm);
1366
1367 /*
1368 * Drop the refcount and make the device available again if
1369 * this was the last VLPI.
1370 */
1371 if (!--its_dev->event_map.nr_vlpis) {
1372 its_dev->event_map.vm = NULL;
1373 kfree(its_dev->event_map.vlpi_maps);
1374 }
1375
1376out:
1377 mutex_unlock(&its_dev->event_map.vlpi_lock);
1378 return ret;
1379}
1380
1381static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
1382{
1383 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1384
1385 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
1386 return -EINVAL;
1387
1388 if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI)
1389 lpi_update_config(d, 0xff, info->config);
1390 else
1391 lpi_write_config(d, 0xff, info->config);
1392 its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED));
1393
1394 return 0;
1395}
1396
1397static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
1398{
1399 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1400 struct its_cmd_info *info = vcpu_info;
1401
1402 /* Need a v4 ITS */
1403 if (!its_dev->its->is_v4)
1404 return -EINVAL;
1405
1406 /* Unmap request? */
1407 if (!info)
1408 return its_vlpi_unmap(d);
1409
1410 switch (info->cmd_type) {
1411 case MAP_VLPI:
1412 return its_vlpi_map(d, info);
1413
1414 case GET_VLPI:
1415 return its_vlpi_get(d, info);
1416
1417 case PROP_UPDATE_VLPI:
1418 case PROP_UPDATE_AND_INV_VLPI:
1419 return its_vlpi_prop_update(d, info);
1420
1421 default:
1422 return -EINVAL;
1423 }
1424}
1425
1426static struct irq_chip its_irq_chip = {
1427 .name = "ITS",
1428 .irq_mask = its_mask_irq,
1429 .irq_unmask = its_unmask_irq,
1430 .irq_eoi = irq_chip_eoi_parent,
1431 .irq_set_affinity = its_set_affinity,
1432 .irq_compose_msi_msg = its_irq_compose_msi_msg,
1433 .irq_set_irqchip_state = its_irq_set_irqchip_state,
1434 .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity,
1435};
1436
1437
1438/*
1439 * How we allocate LPIs:
1440 *
1441 * lpi_range_list contains ranges of LPIs that are to available to
1442 * allocate from. To allocate LPIs, just pick the first range that
1443 * fits the required allocation, and reduce it by the required
1444 * amount. Once empty, remove the range from the list.
1445 *
1446 * To free a range of LPIs, add a free range to the list, sort it and
1447 * merge the result if the new range happens to be adjacent to an
1448 * already free block.
1449 *
1450 * The consequence of the above is that allocation is cost is low, but
1451 * freeing is expensive. We assumes that freeing rarely occurs.
1452 */
1453#define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */
1454
1455static DEFINE_MUTEX(lpi_range_lock);
1456static LIST_HEAD(lpi_range_list);
1457
1458struct lpi_range {
1459 struct list_head entry;
1460 u32 base_id;
1461 u32 span;
1462};
1463
1464static struct lpi_range *mk_lpi_range(u32 base, u32 span)
1465{
1466 struct lpi_range *range;
1467
1468 range = kzalloc(sizeof(*range), GFP_KERNEL);
1469 if (range) {
1470 INIT_LIST_HEAD(&range->entry);
1471 range->base_id = base;
1472 range->span = span;
1473 }
1474
1475 return range;
1476}
1477
1478static int lpi_range_cmp(void *priv, struct list_head *a, struct list_head *b)
1479{
1480 struct lpi_range *ra, *rb;
1481
1482 ra = container_of(a, struct lpi_range, entry);
1483 rb = container_of(b, struct lpi_range, entry);
1484
1485 return ra->base_id - rb->base_id;
1486}
1487
1488static void merge_lpi_ranges(void)
1489{
1490 struct lpi_range *range, *tmp;
1491
1492 list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
1493 if (!list_is_last(&range->entry, &lpi_range_list) &&
1494 (tmp->base_id == (range->base_id + range->span))) {
1495 tmp->base_id = range->base_id;
1496 tmp->span += range->span;
1497 list_del(&range->entry);
1498 kfree(range);
1499 }
1500 }
1501}
1502
1503static int alloc_lpi_range(u32 nr_lpis, u32 *base)
1504{
1505 struct lpi_range *range, *tmp;
1506 int err = -ENOSPC;
1507
1508 mutex_lock(&lpi_range_lock);
1509
1510 list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
1511 if (range->span >= nr_lpis) {
1512 *base = range->base_id;
1513 range->base_id += nr_lpis;
1514 range->span -= nr_lpis;
1515
1516 if (range->span == 0) {
1517 list_del(&range->entry);
1518 kfree(range);
1519 }
1520
1521 err = 0;
1522 break;
1523 }
1524 }
1525
1526 mutex_unlock(&lpi_range_lock);
1527
1528 pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis);
1529 return err;
1530}
1531
1532static int free_lpi_range(u32 base, u32 nr_lpis)
1533{
1534 struct lpi_range *new;
1535 int err = 0;
1536
1537 mutex_lock(&lpi_range_lock);
1538
1539 new = mk_lpi_range(base, nr_lpis);
1540 if (!new) {
1541 err = -ENOMEM;
1542 goto out;
1543 }
1544
1545 list_add(&new->entry, &lpi_range_list);
1546 list_sort(NULL, &lpi_range_list, lpi_range_cmp);
1547 merge_lpi_ranges();
1548out:
1549 mutex_unlock(&lpi_range_lock);
1550 return err;
1551}
1552
1553static int __init its_lpi_init(u32 id_bits)
1554{
1555 u32 lpis = (1UL << id_bits) - 8192;
1556 u32 numlpis;
1557 int err;
1558
1559 numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer);
1560
1561 if (numlpis > 2 && !WARN_ON(numlpis > lpis)) {
1562 lpis = numlpis;
1563 pr_info("ITS: Using hypervisor restricted LPI range [%u]\n",
1564 lpis);
1565 }
1566
1567 /*
1568 * Initializing the allocator is just the same as freeing the
1569 * full range of LPIs.
1570 */
1571 err = free_lpi_range(8192, lpis);
1572 pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis);
1573 return err;
1574}
1575
1576static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
1577{
1578 unsigned long *bitmap = NULL;
1579 int err = 0;
1580
1581 do {
1582 err = alloc_lpi_range(nr_irqs, base);
1583 if (!err)
1584 break;
1585
1586 nr_irqs /= 2;
1587 } while (nr_irqs > 0);
1588
1589 if (!nr_irqs)
1590 err = -ENOSPC;
1591
1592 if (err)
1593 goto out;
1594
1595 bitmap = kcalloc(BITS_TO_LONGS(nr_irqs), sizeof (long), GFP_ATOMIC);
1596 if (!bitmap)
1597 goto out;
1598
1599 *nr_ids = nr_irqs;
1600
1601out:
1602 if (!bitmap)
1603 *base = *nr_ids = 0;
1604
1605 return bitmap;
1606}
1607
1608static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids)
1609{
1610 WARN_ON(free_lpi_range(base, nr_ids));
1611 kfree(bitmap);
1612}
1613
1614static void gic_reset_prop_table(void *va)
1615{
1616 /* Priority 0xa0, Group-1, disabled */
1617 memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ);
1618
1619 /* Make sure the GIC will observe the written configuration */
1620 gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ);
1621}
1622
1623static struct page *its_allocate_prop_table(gfp_t gfp_flags)
1624{
1625 struct page *prop_page;
1626
1627 prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
1628 if (!prop_page)
1629 return NULL;
1630
1631 gic_reset_prop_table(page_address(prop_page));
1632
1633 return prop_page;
1634}
1635
1636static void its_free_prop_table(struct page *prop_page)
1637{
1638 free_pages((unsigned long)page_address(prop_page),
1639 get_order(LPI_PROPBASE_SZ));
1640}
1641
1642static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size)
1643{
1644 phys_addr_t start, end, addr_end;
1645 u64 i;
1646
1647 /*
1648 * We don't bother checking for a kdump kernel as by
1649 * construction, the LPI tables are out of this kernel's
1650 * memory map.
1651 */
1652 if (is_kdump_kernel())
1653 return true;
1654
1655 addr_end = addr + size - 1;
1656
1657 for_each_reserved_mem_region(i, &start, &end) {
1658 if (addr >= start && addr_end <= end)
1659 return true;
1660 }
1661
1662 /* Not found, not a good sign... */
1663 pr_warn("GICv3: Expected reserved range [%pa:%pa], not found\n",
1664 &addr, &addr_end);
1665 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
1666 return false;
1667}
1668
1669static int gic_reserve_range(phys_addr_t addr, unsigned long size)
1670{
1671 if (efi_enabled(EFI_CONFIG_TABLES))
1672 return efi_mem_reserve_persistent(addr, size);
1673
1674 return 0;
1675}
1676
1677static int __init its_setup_lpi_prop_table(void)
1678{
1679 if (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) {
1680 u64 val;
1681
1682 val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
1683 lpi_id_bits = (val & GICR_PROPBASER_IDBITS_MASK) + 1;
1684
1685 gic_rdists->prop_table_pa = val & GENMASK_ULL(51, 12);
1686 gic_rdists->prop_table_va = memremap(gic_rdists->prop_table_pa,
1687 LPI_PROPBASE_SZ,
1688 MEMREMAP_WB);
1689 gic_reset_prop_table(gic_rdists->prop_table_va);
1690 } else {
1691 struct page *page;
1692
1693 lpi_id_bits = min_t(u32,
1694 GICD_TYPER_ID_BITS(gic_rdists->gicd_typer),
1695 ITS_MAX_LPI_NRBITS);
1696 page = its_allocate_prop_table(GFP_NOWAIT);
1697 if (!page) {
1698 pr_err("Failed to allocate PROPBASE\n");
1699 return -ENOMEM;
1700 }
1701
1702 gic_rdists->prop_table_pa = page_to_phys(page);
1703 gic_rdists->prop_table_va = page_address(page);
1704 WARN_ON(gic_reserve_range(gic_rdists->prop_table_pa,
1705 LPI_PROPBASE_SZ));
1706 }
1707
1708 pr_info("GICv3: using LPI property table @%pa\n",
1709 &gic_rdists->prop_table_pa);
1710
1711 return its_lpi_init(lpi_id_bits);
1712}
1713
1714static const char *its_base_type_string[] = {
1715 [GITS_BASER_TYPE_DEVICE] = "Devices",
1716 [GITS_BASER_TYPE_VCPU] = "Virtual CPUs",
1717 [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)",
1718 [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections",
1719 [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)",
1720 [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)",
1721 [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)",
1722};
1723
1724static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
1725{
1726 u32 idx = baser - its->tables;
1727
1728 return gits_read_baser(its->base + GITS_BASER + (idx << 3));
1729}
1730
1731static void its_write_baser(struct its_node *its, struct its_baser *baser,
1732 u64 val)
1733{
1734 u32 idx = baser - its->tables;
1735
1736 gits_write_baser(val, its->base + GITS_BASER + (idx << 3));
1737 baser->val = its_read_baser(its, baser);
1738}
1739
1740static int its_setup_baser(struct its_node *its, struct its_baser *baser,
1741 u64 cache, u64 shr, u32 psz, u32 order,
1742 bool indirect)
1743{
1744 u64 val = its_read_baser(its, baser);
1745 u64 esz = GITS_BASER_ENTRY_SIZE(val);
1746 u64 type = GITS_BASER_TYPE(val);
1747 u64 baser_phys, tmp;
1748 u32 alloc_pages;
1749 struct page *page;
1750 void *base;
1751
1752retry_alloc_baser:
1753 alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
1754 if (alloc_pages > GITS_BASER_PAGES_MAX) {
1755 pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
1756 &its->phys_base, its_base_type_string[type],
1757 alloc_pages, GITS_BASER_PAGES_MAX);
1758 alloc_pages = GITS_BASER_PAGES_MAX;
1759 order = get_order(GITS_BASER_PAGES_MAX * psz);
1760 }
1761
1762 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
1763 if (!page)
1764 return -ENOMEM;
1765
1766 base = (void *)page_address(page);
1767 baser_phys = virt_to_phys(base);
1768
1769 /* Check if the physical address of the memory is above 48bits */
1770 if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) {
1771
1772 /* 52bit PA is supported only when PageSize=64K */
1773 if (psz != SZ_64K) {
1774 pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
1775 free_pages((unsigned long)base, order);
1776 return -ENXIO;
1777 }
1778
1779 /* Convert 52bit PA to 48bit field */
1780 baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys);
1781 }
1782
1783retry_baser:
1784 val = (baser_phys |
1785 (type << GITS_BASER_TYPE_SHIFT) |
1786 ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
1787 ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) |
1788 cache |
1789 shr |
1790 GITS_BASER_VALID);
1791
1792 val |= indirect ? GITS_BASER_INDIRECT : 0x0;
1793
1794 switch (psz) {
1795 case SZ_4K:
1796 val |= GITS_BASER_PAGE_SIZE_4K;
1797 break;
1798 case SZ_16K:
1799 val |= GITS_BASER_PAGE_SIZE_16K;
1800 break;
1801 case SZ_64K:
1802 val |= GITS_BASER_PAGE_SIZE_64K;
1803 break;
1804 }
1805
1806 its_write_baser(its, baser, val);
1807 tmp = baser->val;
1808
1809 if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
1810 /*
1811 * Shareability didn't stick. Just use
1812 * whatever the read reported, which is likely
1813 * to be the only thing this redistributor
1814 * supports. If that's zero, make it
1815 * non-cacheable as well.
1816 */
1817 shr = tmp & GITS_BASER_SHAREABILITY_MASK;
1818 if (!shr) {
1819 cache = GITS_BASER_nC;
1820 gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
1821 }
1822 goto retry_baser;
1823 }
1824
1825 if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) {
1826 /*
1827 * Page size didn't stick. Let's try a smaller
1828 * size and retry. If we reach 4K, then
1829 * something is horribly wrong...
1830 */
1831 free_pages((unsigned long)base, order);
1832 baser->base = NULL;
1833
1834 switch (psz) {
1835 case SZ_16K:
1836 psz = SZ_4K;
1837 goto retry_alloc_baser;
1838 case SZ_64K:
1839 psz = SZ_16K;
1840 goto retry_alloc_baser;
1841 }
1842 }
1843
1844 if (val != tmp) {
1845 pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
1846 &its->phys_base, its_base_type_string[type],
1847 val, tmp);
1848 free_pages((unsigned long)base, order);
1849 return -ENXIO;
1850 }
1851
1852 baser->order = order;
1853 baser->base = base;
1854 baser->psz = psz;
1855 tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz;
1856
1857 pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
1858 &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp),
1859 its_base_type_string[type],
1860 (unsigned long)virt_to_phys(base),
1861 indirect ? "indirect" : "flat", (int)esz,
1862 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
1863
1864 return 0;
1865}
1866
1867static bool its_parse_indirect_baser(struct its_node *its,
1868 struct its_baser *baser,
1869 u32 psz, u32 *order, u32 ids)
1870{
1871 u64 tmp = its_read_baser(its, baser);
1872 u64 type = GITS_BASER_TYPE(tmp);
1873 u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
1874 u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
1875 u32 new_order = *order;
1876 bool indirect = false;
1877
1878 /* No need to enable Indirection if memory requirement < (psz*2)bytes */
1879 if ((esz << ids) > (psz * 2)) {
1880 /*
1881 * Find out whether hw supports a single or two-level table by
1882 * table by reading bit at offset '62' after writing '1' to it.
1883 */
1884 its_write_baser(its, baser, val | GITS_BASER_INDIRECT);
1885 indirect = !!(baser->val & GITS_BASER_INDIRECT);
1886
1887 if (indirect) {
1888 /*
1889 * The size of the lvl2 table is equal to ITS page size
1890 * which is 'psz'. For computing lvl1 table size,
1891 * subtract ID bits that sparse lvl2 table from 'ids'
1892 * which is reported by ITS hardware times lvl1 table
1893 * entry size.
1894 */
1895 ids -= ilog2(psz / (int)esz);
1896 esz = GITS_LVL1_ENTRY_SIZE;
1897 }
1898 }
1899
1900 /*
1901 * Allocate as many entries as required to fit the
1902 * range of device IDs that the ITS can grok... The ID
1903 * space being incredibly sparse, this results in a
1904 * massive waste of memory if two-level device table
1905 * feature is not supported by hardware.
1906 */
1907 new_order = max_t(u32, get_order(esz << ids), new_order);
1908 if (new_order >= MAX_ORDER) {
1909 new_order = MAX_ORDER - 1;
1910 ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
1911 pr_warn("ITS@%pa: %s Table too large, reduce ids %u->%u\n",
1912 &its->phys_base, its_base_type_string[type],
1913 its->device_ids, ids);
1914 }
1915
1916 *order = new_order;
1917
1918 return indirect;
1919}
1920
1921static void its_free_tables(struct its_node *its)
1922{
1923 int i;
1924
1925 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
1926 if (its->tables[i].base) {
1927 free_pages((unsigned long)its->tables[i].base,
1928 its->tables[i].order);
1929 its->tables[i].base = NULL;
1930 }
1931 }
1932}
1933
1934static int its_alloc_tables(struct its_node *its)
1935{
1936 u64 shr = GITS_BASER_InnerShareable;
1937 u64 cache = GITS_BASER_RaWaWb;
1938 u32 psz = SZ_64K;
1939 int err, i;
1940
1941 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375)
1942 /* erratum 24313: ignore memory access type */
1943 cache = GITS_BASER_nCnB;
1944
1945 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
1946 struct its_baser *baser = its->tables + i;
1947 u64 val = its_read_baser(its, baser);
1948 u64 type = GITS_BASER_TYPE(val);
1949 u32 order = get_order(psz);
1950 bool indirect = false;
1951
1952 switch (type) {
1953 case GITS_BASER_TYPE_NONE:
1954 continue;
1955
1956 case GITS_BASER_TYPE_DEVICE:
1957 indirect = its_parse_indirect_baser(its, baser,
1958 psz, &order,
1959 its->device_ids);
1960 break;
1961
1962 case GITS_BASER_TYPE_VCPU:
1963 indirect = its_parse_indirect_baser(its, baser,
1964 psz, &order,
1965 ITS_MAX_VPEID_BITS);
1966 break;
1967 }
1968
1969 err = its_setup_baser(its, baser, cache, shr, psz, order, indirect);
1970 if (err < 0) {
1971 its_free_tables(its);
1972 return err;
1973 }
1974
1975 /* Update settings which will be used for next BASERn */
1976 psz = baser->psz;
1977 cache = baser->val & GITS_BASER_CACHEABILITY_MASK;
1978 shr = baser->val & GITS_BASER_SHAREABILITY_MASK;
1979 }
1980
1981 return 0;
1982}
1983
1984static int its_alloc_collections(struct its_node *its)
1985{
1986 int i;
1987
1988 its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections),
1989 GFP_KERNEL);
1990 if (!its->collections)
1991 return -ENOMEM;
1992
1993 for (i = 0; i < nr_cpu_ids; i++)
1994 its->collections[i].target_address = ~0ULL;
1995
1996 return 0;
1997}
1998
1999static struct page *its_allocate_pending_table(gfp_t gfp_flags)
2000{
2001 struct page *pend_page;
2002
2003 pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
2004 get_order(LPI_PENDBASE_SZ));
2005 if (!pend_page)
2006 return NULL;
2007
2008 /* Make sure the GIC will observe the zero-ed page */
2009 gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
2010
2011 return pend_page;
2012}
2013
2014static void its_free_pending_table(struct page *pt)
2015{
2016 free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ));
2017}
2018
2019/*
2020 * Booting with kdump and LPIs enabled is generally fine. Any other
2021 * case is wrong in the absence of firmware/EFI support.
2022 */
2023static bool enabled_lpis_allowed(void)
2024{
2025 phys_addr_t addr;
2026 u64 val;
2027
2028 /* Check whether the property table is in a reserved region */
2029 val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
2030 addr = val & GENMASK_ULL(51, 12);
2031
2032 return gic_check_reserved_range(addr, LPI_PROPBASE_SZ);
2033}
2034
2035static int __init allocate_lpi_tables(void)
2036{
2037 u64 val;
2038 int err, cpu;
2039
2040 /*
2041 * If LPIs are enabled while we run this from the boot CPU,
2042 * flag the RD tables as pre-allocated if the stars do align.
2043 */
2044 val = readl_relaxed(gic_data_rdist_rd_base() + GICR_CTLR);
2045 if ((val & GICR_CTLR_ENABLE_LPIS) && enabled_lpis_allowed()) {
2046 gic_rdists->flags |= (RDIST_FLAGS_RD_TABLES_PREALLOCATED |
2047 RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING);
2048 pr_info("GICv3: Using preallocated redistributor tables\n");
2049 }
2050
2051 err = its_setup_lpi_prop_table();
2052 if (err)
2053 return err;
2054
2055 /*
2056 * We allocate all the pending tables anyway, as we may have a
2057 * mix of RDs that have had LPIs enabled, and some that
2058 * don't. We'll free the unused ones as each CPU comes online.
2059 */
2060 for_each_possible_cpu(cpu) {
2061 struct page *pend_page;
2062
2063 pend_page = its_allocate_pending_table(GFP_NOWAIT);
2064 if (!pend_page) {
2065 pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu);
2066 return -ENOMEM;
2067 }
2068
2069 gic_data_rdist_cpu(cpu)->pend_page = pend_page;
2070 }
2071
2072 return 0;
2073}
2074
2075static u64 its_clear_vpend_valid(void __iomem *vlpi_base)
2076{
2077 u32 count = 1000000; /* 1s! */
2078 bool clean;
2079 u64 val;
2080
2081 val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2082 val &= ~GICR_VPENDBASER_Valid;
2083 gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
2084
2085 do {
2086 val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2087 clean = !(val & GICR_VPENDBASER_Dirty);
2088 if (!clean) {
2089 count--;
2090 cpu_relax();
2091 udelay(1);
2092 }
2093 } while (!clean && count);
2094
2095 return val;
2096}
2097
2098static void its_cpu_init_lpis(void)
2099{
2100 void __iomem *rbase = gic_data_rdist_rd_base();
2101 struct page *pend_page;
2102 phys_addr_t paddr;
2103 u64 val, tmp;
2104
2105 if (gic_data_rdist()->lpi_enabled)
2106 return;
2107
2108 val = readl_relaxed(rbase + GICR_CTLR);
2109 if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) &&
2110 (val & GICR_CTLR_ENABLE_LPIS)) {
2111 /*
2112 * Check that we get the same property table on all
2113 * RDs. If we don't, this is hopeless.
2114 */
2115 paddr = gicr_read_propbaser(rbase + GICR_PROPBASER);
2116 paddr &= GENMASK_ULL(51, 12);
2117 if (WARN_ON(gic_rdists->prop_table_pa != paddr))
2118 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
2119
2120 paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER);
2121 paddr &= GENMASK_ULL(51, 16);
2122
2123 WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ));
2124 its_free_pending_table(gic_data_rdist()->pend_page);
2125 gic_data_rdist()->pend_page = NULL;
2126
2127 goto out;
2128 }
2129
2130 pend_page = gic_data_rdist()->pend_page;
2131 paddr = page_to_phys(pend_page);
2132 WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ));
2133
2134 /* set PROPBASE */
2135 val = (gic_rdists->prop_table_pa |
2136 GICR_PROPBASER_InnerShareable |
2137 GICR_PROPBASER_RaWaWb |
2138 ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
2139
2140 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
2141 tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
2142
2143 if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
2144 if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
2145 /*
2146 * The HW reports non-shareable, we must
2147 * remove the cacheability attributes as
2148 * well.
2149 */
2150 val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
2151 GICR_PROPBASER_CACHEABILITY_MASK);
2152 val |= GICR_PROPBASER_nC;
2153 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
2154 }
2155 pr_info_once("GIC: using cache flushing for LPI property table\n");
2156 gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
2157 }
2158
2159 /* set PENDBASE */
2160 val = (page_to_phys(pend_page) |
2161 GICR_PENDBASER_InnerShareable |
2162 GICR_PENDBASER_RaWaWb);
2163
2164 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
2165 tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
2166
2167 if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
2168 /*
2169 * The HW reports non-shareable, we must remove the
2170 * cacheability attributes as well.
2171 */
2172 val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
2173 GICR_PENDBASER_CACHEABILITY_MASK);
2174 val |= GICR_PENDBASER_nC;
2175 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
2176 }
2177
2178 /* Enable LPIs */
2179 val = readl_relaxed(rbase + GICR_CTLR);
2180 val |= GICR_CTLR_ENABLE_LPIS;
2181 writel_relaxed(val, rbase + GICR_CTLR);
2182
2183 if (gic_rdists->has_vlpis) {
2184 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2185
2186 /*
2187 * It's possible for CPU to receive VLPIs before it is
2188 * sheduled as a vPE, especially for the first CPU, and the
2189 * VLPI with INTID larger than 2^(IDbits+1) will be considered
2190 * as out of range and dropped by GIC.
2191 * So we initialize IDbits to known value to avoid VLPI drop.
2192 */
2193 val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
2194 pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n",
2195 smp_processor_id(), val);
2196 gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2197
2198 /*
2199 * Also clear Valid bit of GICR_VPENDBASER, in case some
2200 * ancient programming gets left in and has possibility of
2201 * corrupting memory.
2202 */
2203 val = its_clear_vpend_valid(vlpi_base);
2204 WARN_ON(val & GICR_VPENDBASER_Dirty);
2205 }
2206
2207 /* Make sure the GIC has seen the above */
2208 dsb(sy);
2209out:
2210 gic_data_rdist()->lpi_enabled = true;
2211 pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n",
2212 smp_processor_id(),
2213 gic_data_rdist()->pend_page ? "allocated" : "reserved",
2214 &paddr);
2215}
2216
2217static void its_cpu_init_collection(struct its_node *its)
2218{
2219 int cpu = smp_processor_id();
2220 u64 target;
2221
2222 /* avoid cross node collections and its mapping */
2223 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
2224 struct device_node *cpu_node;
2225
2226 cpu_node = of_get_cpu_node(cpu, NULL);
2227 if (its->numa_node != NUMA_NO_NODE &&
2228 its->numa_node != of_node_to_nid(cpu_node))
2229 return;
2230 }
2231
2232 /*
2233 * We now have to bind each collection to its target
2234 * redistributor.
2235 */
2236 if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
2237 /*
2238 * This ITS wants the physical address of the
2239 * redistributor.
2240 */
2241 target = gic_data_rdist()->phys_base;
2242 } else {
2243 /* This ITS wants a linear CPU number. */
2244 target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
2245 target = GICR_TYPER_CPU_NUMBER(target) << 16;
2246 }
2247
2248 /* Perform collection mapping */
2249 its->collections[cpu].target_address = target;
2250 its->collections[cpu].col_id = cpu;
2251
2252 its_send_mapc(its, &its->collections[cpu], 1);
2253 its_send_invall(its, &its->collections[cpu]);
2254}
2255
2256static void its_cpu_init_collections(void)
2257{
2258 struct its_node *its;
2259
2260 raw_spin_lock(&its_lock);
2261
2262 list_for_each_entry(its, &its_nodes, entry)
2263 its_cpu_init_collection(its);
2264
2265 raw_spin_unlock(&its_lock);
2266}
2267
2268static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
2269{
2270 struct its_device *its_dev = NULL, *tmp;
2271 unsigned long flags;
2272
2273 raw_spin_lock_irqsave(&its->lock, flags);
2274
2275 list_for_each_entry(tmp, &its->its_device_list, entry) {
2276 if (tmp->device_id == dev_id) {
2277 its_dev = tmp;
2278 break;
2279 }
2280 }
2281
2282 raw_spin_unlock_irqrestore(&its->lock, flags);
2283
2284 return its_dev;
2285}
2286
2287static struct its_baser *its_get_baser(struct its_node *its, u32 type)
2288{
2289 int i;
2290
2291 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
2292 if (GITS_BASER_TYPE(its->tables[i].val) == type)
2293 return &its->tables[i];
2294 }
2295
2296 return NULL;
2297}
2298
2299static bool its_alloc_table_entry(struct its_node *its,
2300 struct its_baser *baser, u32 id)
2301{
2302 struct page *page;
2303 u32 esz, idx;
2304 __le64 *table;
2305
2306 /* Don't allow device id that exceeds single, flat table limit */
2307 esz = GITS_BASER_ENTRY_SIZE(baser->val);
2308 if (!(baser->val & GITS_BASER_INDIRECT))
2309 return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
2310
2311 /* Compute 1st level table index & check if that exceeds table limit */
2312 idx = id >> ilog2(baser->psz / esz);
2313 if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
2314 return false;
2315
2316 table = baser->base;
2317
2318 /* Allocate memory for 2nd level table */
2319 if (!table[idx]) {
2320 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
2321 get_order(baser->psz));
2322 if (!page)
2323 return false;
2324
2325 /* Flush Lvl2 table to PoC if hw doesn't support coherency */
2326 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
2327 gic_flush_dcache_to_poc(page_address(page), baser->psz);
2328
2329 table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
2330
2331 /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
2332 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
2333 gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
2334
2335 /* Ensure updated table contents are visible to ITS hardware */
2336 dsb(sy);
2337 }
2338
2339 return true;
2340}
2341
2342static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
2343{
2344 struct its_baser *baser;
2345
2346 baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
2347
2348 /* Don't allow device id that exceeds ITS hardware limit */
2349 if (!baser)
2350 return (ilog2(dev_id) < its->device_ids);
2351
2352 return its_alloc_table_entry(its, baser, dev_id);
2353}
2354
2355static bool its_alloc_vpe_table(u32 vpe_id)
2356{
2357 struct its_node *its;
2358
2359 /*
2360 * Make sure the L2 tables are allocated on *all* v4 ITSs. We
2361 * could try and only do it on ITSs corresponding to devices
2362 * that have interrupts targeted at this VPE, but the
2363 * complexity becomes crazy (and you have tons of memory
2364 * anyway, right?).
2365 */
2366 list_for_each_entry(its, &its_nodes, entry) {
2367 struct its_baser *baser;
2368
2369 if (!its->is_v4)
2370 continue;
2371
2372 baser = its_get_baser(its, GITS_BASER_TYPE_VCPU);
2373 if (!baser)
2374 return false;
2375
2376 if (!its_alloc_table_entry(its, baser, vpe_id))
2377 return false;
2378 }
2379
2380 return true;
2381}
2382
2383static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
2384 int nvecs, bool alloc_lpis)
2385{
2386 struct its_device *dev;
2387 unsigned long *lpi_map = NULL;
2388 unsigned long flags;
2389 u16 *col_map = NULL;
2390 void *itt;
2391 int lpi_base;
2392 int nr_lpis;
2393 int nr_ites;
2394 int sz;
2395
2396 if (!its_alloc_device_table(its, dev_id))
2397 return NULL;
2398
2399 if (WARN_ON(!is_power_of_2(nvecs)))
2400 nvecs = roundup_pow_of_two(nvecs);
2401
2402 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2403 /*
2404 * Even if the device wants a single LPI, the ITT must be
2405 * sized as a power of two (and you need at least one bit...).
2406 */
2407 nr_ites = max(2, nvecs);
2408 sz = nr_ites * its->ite_size;
2409 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
2410 itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
2411 if (alloc_lpis) {
2412 lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
2413 if (lpi_map)
2414 col_map = kcalloc(nr_lpis, sizeof(*col_map),
2415 GFP_KERNEL);
2416 } else {
2417 col_map = kcalloc(nr_ites, sizeof(*col_map), GFP_KERNEL);
2418 nr_lpis = 0;
2419 lpi_base = 0;
2420 }
2421
2422 if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
2423 kfree(dev);
2424 kfree(itt);
2425 kfree(lpi_map);
2426 kfree(col_map);
2427 return NULL;
2428 }
2429
2430 gic_flush_dcache_to_poc(itt, sz);
2431
2432 dev->its = its;
2433 dev->itt = itt;
2434 dev->nr_ites = nr_ites;
2435 dev->event_map.lpi_map = lpi_map;
2436 dev->event_map.col_map = col_map;
2437 dev->event_map.lpi_base = lpi_base;
2438 dev->event_map.nr_lpis = nr_lpis;
2439 mutex_init(&dev->event_map.vlpi_lock);
2440 dev->device_id = dev_id;
2441 INIT_LIST_HEAD(&dev->entry);
2442
2443 raw_spin_lock_irqsave(&its->lock, flags);
2444 list_add(&dev->entry, &its->its_device_list);
2445 raw_spin_unlock_irqrestore(&its->lock, flags);
2446
2447 /* Map device to its ITT */
2448 its_send_mapd(dev, 1);
2449
2450 return dev;
2451}
2452
2453static void its_free_device(struct its_device *its_dev)
2454{
2455 unsigned long flags;
2456
2457 raw_spin_lock_irqsave(&its_dev->its->lock, flags);
2458 list_del(&its_dev->entry);
2459 raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
2460 kfree(its_dev->itt);
2461 kfree(its_dev);
2462}
2463
2464static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq)
2465{
2466 int idx;
2467
2468 idx = bitmap_find_free_region(dev->event_map.lpi_map,
2469 dev->event_map.nr_lpis,
2470 get_count_order(nvecs));
2471 if (idx < 0)
2472 return -ENOSPC;
2473
2474 *hwirq = dev->event_map.lpi_base + idx;
2475 set_bit(idx, dev->event_map.lpi_map);
2476
2477 return 0;
2478}
2479
2480static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
2481 int nvec, msi_alloc_info_t *info)
2482{
2483 struct its_node *its;
2484 struct its_device *its_dev;
2485 struct msi_domain_info *msi_info;
2486 u32 dev_id;
2487 int err = 0;
2488
2489 /*
2490 * We ignore "dev" entierely, and rely on the dev_id that has
2491 * been passed via the scratchpad. This limits this domain's
2492 * usefulness to upper layers that definitely know that they
2493 * are built on top of the ITS.
2494 */
2495 dev_id = info->scratchpad[0].ul;
2496
2497 msi_info = msi_get_domain_info(domain);
2498 its = msi_info->data;
2499
2500 if (!gic_rdists->has_direct_lpi &&
2501 vpe_proxy.dev &&
2502 vpe_proxy.dev->its == its &&
2503 dev_id == vpe_proxy.dev->device_id) {
2504 /* Bad luck. Get yourself a better implementation */
2505 WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n",
2506 dev_id);
2507 return -EINVAL;
2508 }
2509
2510 mutex_lock(&its->dev_alloc_lock);
2511 its_dev = its_find_device(its, dev_id);
2512 if (its_dev) {
2513 /*
2514 * We already have seen this ID, probably through
2515 * another alias (PCI bridge of some sort). No need to
2516 * create the device.
2517 */
2518 its_dev->shared = true;
2519 pr_debug("Reusing ITT for devID %x\n", dev_id);
2520 goto out;
2521 }
2522
2523 its_dev = its_create_device(its, dev_id, nvec, true);
2524 if (!its_dev) {
2525 err = -ENOMEM;
2526 goto out;
2527 }
2528
2529 pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
2530out:
2531 mutex_unlock(&its->dev_alloc_lock);
2532 info->scratchpad[0].ptr = its_dev;
2533 return err;
2534}
2535
2536static struct msi_domain_ops its_msi_domain_ops = {
2537 .msi_prepare = its_msi_prepare,
2538};
2539
2540static int its_irq_gic_domain_alloc(struct irq_domain *domain,
2541 unsigned int virq,
2542 irq_hw_number_t hwirq)
2543{
2544 struct irq_fwspec fwspec;
2545
2546 if (irq_domain_get_of_node(domain->parent)) {
2547 fwspec.fwnode = domain->parent->fwnode;
2548 fwspec.param_count = 3;
2549 fwspec.param[0] = GIC_IRQ_TYPE_LPI;
2550 fwspec.param[1] = hwirq;
2551 fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
2552 } else if (is_fwnode_irqchip(domain->parent->fwnode)) {
2553 fwspec.fwnode = domain->parent->fwnode;
2554 fwspec.param_count = 2;
2555 fwspec.param[0] = hwirq;
2556 fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
2557 } else {
2558 return -EINVAL;
2559 }
2560
2561 return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
2562}
2563
2564static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
2565 unsigned int nr_irqs, void *args)
2566{
2567 msi_alloc_info_t *info = args;
2568 struct its_device *its_dev = info->scratchpad[0].ptr;
2569 irq_hw_number_t hwirq;
2570 int err;
2571 int i;
2572
2573 err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq);
2574 if (err)
2575 return err;
2576
2577 for (i = 0; i < nr_irqs; i++) {
2578 err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
2579 if (err)
2580 return err;
2581
2582 irq_domain_set_hwirq_and_chip(domain, virq + i,
2583 hwirq + i, &its_irq_chip, its_dev);
2584 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i)));
2585 pr_debug("ID:%d pID:%d vID:%d\n",
2586 (int)(hwirq + i - its_dev->event_map.lpi_base),
2587 (int)(hwirq + i), virq + i);
2588 }
2589
2590 return 0;
2591}
2592
2593static int its_irq_domain_activate(struct irq_domain *domain,
2594 struct irq_data *d, bool reserve)
2595{
2596 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2597 u32 event = its_get_event_id(d);
2598 const struct cpumask *cpu_mask = cpu_online_mask;
2599 int cpu;
2600
2601 /* get the cpu_mask of local node */
2602 if (its_dev->its->numa_node >= 0)
2603 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
2604
2605 /* Bind the LPI to the first possible CPU */
2606 cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
2607 if (cpu >= nr_cpu_ids) {
2608 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144)
2609 return -EINVAL;
2610
2611 cpu = cpumask_first(cpu_online_mask);
2612 }
2613
2614 its_dev->event_map.col_map[event] = cpu;
2615 irq_data_update_effective_affinity(d, cpumask_of(cpu));
2616
2617 /* Map the GIC IRQ and event to the device */
2618 its_send_mapti(its_dev, d->hwirq, event);
2619 return 0;
2620}
2621
2622static void its_irq_domain_deactivate(struct irq_domain *domain,
2623 struct irq_data *d)
2624{
2625 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2626 u32 event = its_get_event_id(d);
2627
2628 /* Stop the delivery of interrupts */
2629 its_send_discard(its_dev, event);
2630}
2631
2632static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
2633 unsigned int nr_irqs)
2634{
2635 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
2636 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2637 struct its_node *its = its_dev->its;
2638 int i;
2639
2640 for (i = 0; i < nr_irqs; i++) {
2641 struct irq_data *data = irq_domain_get_irq_data(domain,
2642 virq + i);
2643 u32 event = its_get_event_id(data);
2644
2645 /* Mark interrupt index as unused */
2646 clear_bit(event, its_dev->event_map.lpi_map);
2647
2648 /* Nuke the entry in the domain */
2649 irq_domain_reset_irq_data(data);
2650 }
2651
2652 mutex_lock(&its->dev_alloc_lock);
2653
2654 /*
2655 * If all interrupts have been freed, start mopping the
2656 * floor. This is conditionned on the device not being shared.
2657 */
2658 if (!its_dev->shared &&
2659 bitmap_empty(its_dev->event_map.lpi_map,
2660 its_dev->event_map.nr_lpis)) {
2661 its_lpi_free(its_dev->event_map.lpi_map,
2662 its_dev->event_map.lpi_base,
2663 its_dev->event_map.nr_lpis);
2664 kfree(its_dev->event_map.col_map);
2665
2666 /* Unmap device/itt */
2667 its_send_mapd(its_dev, 0);
2668 its_free_device(its_dev);
2669 }
2670
2671 mutex_unlock(&its->dev_alloc_lock);
2672
2673 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
2674}
2675
2676static const struct irq_domain_ops its_domain_ops = {
2677 .alloc = its_irq_domain_alloc,
2678 .free = its_irq_domain_free,
2679 .activate = its_irq_domain_activate,
2680 .deactivate = its_irq_domain_deactivate,
2681};
2682
2683/*
2684 * This is insane.
2685 *
2686 * If a GICv4 doesn't implement Direct LPIs (which is extremely
2687 * likely), the only way to perform an invalidate is to use a fake
2688 * device to issue an INV command, implying that the LPI has first
2689 * been mapped to some event on that device. Since this is not exactly
2690 * cheap, we try to keep that mapping around as long as possible, and
2691 * only issue an UNMAP if we're short on available slots.
2692 *
2693 * Broken by design(tm).
2694 */
2695static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
2696{
2697 /* Already unmapped? */
2698 if (vpe->vpe_proxy_event == -1)
2699 return;
2700
2701 its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event);
2702 vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL;
2703
2704 /*
2705 * We don't track empty slots at all, so let's move the
2706 * next_victim pointer if we can quickly reuse that slot
2707 * instead of nuking an existing entry. Not clear that this is
2708 * always a win though, and this might just generate a ripple
2709 * effect... Let's just hope VPEs don't migrate too often.
2710 */
2711 if (vpe_proxy.vpes[vpe_proxy.next_victim])
2712 vpe_proxy.next_victim = vpe->vpe_proxy_event;
2713
2714 vpe->vpe_proxy_event = -1;
2715}
2716
2717static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
2718{
2719 if (!gic_rdists->has_direct_lpi) {
2720 unsigned long flags;
2721
2722 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2723 its_vpe_db_proxy_unmap_locked(vpe);
2724 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2725 }
2726}
2727
2728static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
2729{
2730 /* Already mapped? */
2731 if (vpe->vpe_proxy_event != -1)
2732 return;
2733
2734 /* This slot was already allocated. Kick the other VPE out. */
2735 if (vpe_proxy.vpes[vpe_proxy.next_victim])
2736 its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]);
2737
2738 /* Map the new VPE instead */
2739 vpe_proxy.vpes[vpe_proxy.next_victim] = vpe;
2740 vpe->vpe_proxy_event = vpe_proxy.next_victim;
2741 vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites;
2742
2743 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx;
2744 its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event);
2745}
2746
2747static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
2748{
2749 unsigned long flags;
2750 struct its_collection *target_col;
2751
2752 if (gic_rdists->has_direct_lpi) {
2753 void __iomem *rdbase;
2754
2755 rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base;
2756 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
2757 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
2758 cpu_relax();
2759
2760 return;
2761 }
2762
2763 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2764
2765 its_vpe_db_proxy_map_locked(vpe);
2766
2767 target_col = &vpe_proxy.dev->its->collections[to];
2768 its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event);
2769 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to;
2770
2771 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2772}
2773
2774static int its_vpe_set_affinity(struct irq_data *d,
2775 const struct cpumask *mask_val,
2776 bool force)
2777{
2778 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2779 int cpu = cpumask_first(mask_val);
2780
2781 /*
2782 * Changing affinity is mega expensive, so let's be as lazy as
2783 * we can and only do it if we really have to. Also, if mapped
2784 * into the proxy device, we need to move the doorbell
2785 * interrupt to its new location.
2786 */
2787 if (vpe->col_idx != cpu) {
2788 int from = vpe->col_idx;
2789
2790 vpe->col_idx = cpu;
2791 its_send_vmovp(vpe);
2792 its_vpe_db_proxy_move(vpe, from, cpu);
2793 }
2794
2795 irq_data_update_effective_affinity(d, cpumask_of(cpu));
2796
2797 return IRQ_SET_MASK_OK_DONE;
2798}
2799
2800static void its_vpe_schedule(struct its_vpe *vpe)
2801{
2802 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2803 u64 val;
2804
2805 /* Schedule the VPE */
2806 val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
2807 GENMASK_ULL(51, 12);
2808 val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
2809 val |= GICR_VPROPBASER_RaWb;
2810 val |= GICR_VPROPBASER_InnerShareable;
2811 gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2812
2813 val = virt_to_phys(page_address(vpe->vpt_page)) &
2814 GENMASK_ULL(51, 16);
2815 val |= GICR_VPENDBASER_RaWaWb;
2816 val |= GICR_VPENDBASER_NonShareable;
2817 /*
2818 * There is no good way of finding out if the pending table is
2819 * empty as we can race against the doorbell interrupt very
2820 * easily. So in the end, vpe->pending_last is only an
2821 * indication that the vcpu has something pending, not one
2822 * that the pending table is empty. A good implementation
2823 * would be able to read its coarse map pretty quickly anyway,
2824 * making this a tolerable issue.
2825 */
2826 val |= GICR_VPENDBASER_PendingLast;
2827 val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
2828 val |= GICR_VPENDBASER_Valid;
2829 gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
2830}
2831
2832static void its_vpe_deschedule(struct its_vpe *vpe)
2833{
2834 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2835 u64 val;
2836
2837 val = its_clear_vpend_valid(vlpi_base);
2838
2839 if (unlikely(val & GICR_VPENDBASER_Dirty)) {
2840 pr_err_ratelimited("ITS virtual pending table not cleaning\n");
2841 vpe->idai = false;
2842 vpe->pending_last = true;
2843 } else {
2844 vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
2845 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
2846 }
2847}
2848
2849static void its_vpe_invall(struct its_vpe *vpe)
2850{
2851 struct its_node *its;
2852
2853 list_for_each_entry(its, &its_nodes, entry) {
2854 if (!its->is_v4)
2855 continue;
2856
2857 if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
2858 continue;
2859
2860 /*
2861 * Sending a VINVALL to a single ITS is enough, as all
2862 * we need is to reach the redistributors.
2863 */
2864 its_send_vinvall(its, vpe);
2865 return;
2866 }
2867}
2868
2869static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
2870{
2871 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2872 struct its_cmd_info *info = vcpu_info;
2873
2874 switch (info->cmd_type) {
2875 case SCHEDULE_VPE:
2876 its_vpe_schedule(vpe);
2877 return 0;
2878
2879 case DESCHEDULE_VPE:
2880 its_vpe_deschedule(vpe);
2881 return 0;
2882
2883 case INVALL_VPE:
2884 its_vpe_invall(vpe);
2885 return 0;
2886
2887 default:
2888 return -EINVAL;
2889 }
2890}
2891
2892static void its_vpe_send_cmd(struct its_vpe *vpe,
2893 void (*cmd)(struct its_device *, u32))
2894{
2895 unsigned long flags;
2896
2897 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2898
2899 its_vpe_db_proxy_map_locked(vpe);
2900 cmd(vpe_proxy.dev, vpe->vpe_proxy_event);
2901
2902 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2903}
2904
2905static void its_vpe_send_inv(struct irq_data *d)
2906{
2907 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2908
2909 if (gic_rdists->has_direct_lpi) {
2910 void __iomem *rdbase;
2911
2912 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
2913 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_INVLPIR);
2914 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
2915 cpu_relax();
2916 } else {
2917 its_vpe_send_cmd(vpe, its_send_inv);
2918 }
2919}
2920
2921static void its_vpe_mask_irq(struct irq_data *d)
2922{
2923 /*
2924 * We need to unmask the LPI, which is described by the parent
2925 * irq_data. Instead of calling into the parent (which won't
2926 * exactly do the right thing, let's simply use the
2927 * parent_data pointer. Yes, I'm naughty.
2928 */
2929 lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
2930 its_vpe_send_inv(d);
2931}
2932
2933static void its_vpe_unmask_irq(struct irq_data *d)
2934{
2935 /* Same hack as above... */
2936 lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
2937 its_vpe_send_inv(d);
2938}
2939
2940static int its_vpe_set_irqchip_state(struct irq_data *d,
2941 enum irqchip_irq_state which,
2942 bool state)
2943{
2944 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2945
2946 if (which != IRQCHIP_STATE_PENDING)
2947 return -EINVAL;
2948
2949 if (gic_rdists->has_direct_lpi) {
2950 void __iomem *rdbase;
2951
2952 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
2953 if (state) {
2954 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
2955 } else {
2956 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
2957 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
2958 cpu_relax();
2959 }
2960 } else {
2961 if (state)
2962 its_vpe_send_cmd(vpe, its_send_int);
2963 else
2964 its_vpe_send_cmd(vpe, its_send_clear);
2965 }
2966
2967 return 0;
2968}
2969
2970static struct irq_chip its_vpe_irq_chip = {
2971 .name = "GICv4-vpe",
2972 .irq_mask = its_vpe_mask_irq,
2973 .irq_unmask = its_vpe_unmask_irq,
2974 .irq_eoi = irq_chip_eoi_parent,
2975 .irq_set_affinity = its_vpe_set_affinity,
2976 .irq_set_irqchip_state = its_vpe_set_irqchip_state,
2977 .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity,
2978};
2979
2980static int its_vpe_id_alloc(void)
2981{
2982 return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
2983}
2984
2985static void its_vpe_id_free(u16 id)
2986{
2987 ida_simple_remove(&its_vpeid_ida, id);
2988}
2989
2990static int its_vpe_init(struct its_vpe *vpe)
2991{
2992 struct page *vpt_page;
2993 int vpe_id;
2994
2995 /* Allocate vpe_id */
2996 vpe_id = its_vpe_id_alloc();
2997 if (vpe_id < 0)
2998 return vpe_id;
2999
3000 /* Allocate VPT */
3001 vpt_page = its_allocate_pending_table(GFP_KERNEL);
3002 if (!vpt_page) {
3003 its_vpe_id_free(vpe_id);
3004 return -ENOMEM;
3005 }
3006
3007 if (!its_alloc_vpe_table(vpe_id)) {
3008 its_vpe_id_free(vpe_id);
3009 its_free_pending_table(vpe->vpt_page);
3010 return -ENOMEM;
3011 }
3012
3013 vpe->vpe_id = vpe_id;
3014 vpe->vpt_page = vpt_page;
3015 vpe->vpe_proxy_event = -1;
3016
3017 return 0;
3018}
3019
3020static void its_vpe_teardown(struct its_vpe *vpe)
3021{
3022 its_vpe_db_proxy_unmap(vpe);
3023 its_vpe_id_free(vpe->vpe_id);
3024 its_free_pending_table(vpe->vpt_page);
3025}
3026
3027static void its_vpe_irq_domain_free(struct