• Home
  • Raw
  • Download

Lines Matching full:its

81  * value of BASER register configuration and ITS page size.
93 * The ITS structure - contains most of the infrastructure, with the
148 * The ITS view of a device - belongs to an ITS, owns an interrupt
149 * translation table, and a list of interrupts. If it some of its
155 struct its_node *its; member
187 struct its_node *its; in get_its_list() local
190 list_for_each_entry(its, &its_nodes, entry) { in get_its_list()
191 if (!its->is_v4) in get_its_list()
194 if (vm->vlpi_count[its->list_nr]) in get_its_list()
195 __set_bit(its->list_nr, &its_list); in get_its_list()
204 struct its_node *its = its_dev->its; in dev_event_to_col() local
206 return its->collections + its_dev->event_map.col_map[event]; in dev_event_to_col()
217 static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe) in valid_vpe() argument
219 if (valid_col(its->collections + vpe->col_idx)) in valid_vpe()
226 * ITS command descriptors - parameters to be encoded in a command
312 * The ITS command block, which is what the ITS actually parses.
430 static struct its_collection *its_build_mapd_cmd(struct its_node *its, in its_build_mapd_cmd() argument
451 static struct its_collection *its_build_mapc_cmd(struct its_node *its, in its_build_mapc_cmd() argument
465 static struct its_collection *its_build_mapti_cmd(struct its_node *its, in its_build_mapti_cmd() argument
485 static struct its_collection *its_build_movi_cmd(struct its_node *its, in its_build_movi_cmd() argument
504 static struct its_collection *its_build_discard_cmd(struct its_node *its, in its_build_discard_cmd() argument
522 static struct its_collection *its_build_inv_cmd(struct its_node *its, in its_build_inv_cmd() argument
540 static struct its_collection *its_build_int_cmd(struct its_node *its, in its_build_int_cmd() argument
558 static struct its_collection *its_build_clear_cmd(struct its_node *its, in its_build_clear_cmd() argument
576 static struct its_collection *its_build_invall_cmd(struct its_node *its, in its_build_invall_cmd() argument
588 static struct its_vpe *its_build_vinvall_cmd(struct its_node *its, in its_build_vinvall_cmd() argument
597 return valid_vpe(its, desc->its_vinvall_cmd.vpe); in its_build_vinvall_cmd()
600 static struct its_vpe *its_build_vmapp_cmd(struct its_node *its, in its_build_vmapp_cmd() argument
608 target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset; in its_build_vmapp_cmd()
619 return valid_vpe(its, desc->its_vmapp_cmd.vpe); in its_build_vmapp_cmd()
622 static struct its_vpe *its_build_vmapti_cmd(struct its_node *its, in its_build_vmapti_cmd() argument
642 return valid_vpe(its, desc->its_vmapti_cmd.vpe); in its_build_vmapti_cmd()
645 static struct its_vpe *its_build_vmovi_cmd(struct its_node *its, in its_build_vmovi_cmd() argument
665 return valid_vpe(its, desc->its_vmovi_cmd.vpe); in its_build_vmovi_cmd()
668 static struct its_vpe *its_build_vmovp_cmd(struct its_node *its, in its_build_vmovp_cmd() argument
674 target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset; in its_build_vmovp_cmd()
683 return valid_vpe(its, desc->its_vmovp_cmd.vpe); in its_build_vmovp_cmd()
686 static u64 its_cmd_ptr_to_offset(struct its_node *its, in its_cmd_ptr_to_offset() argument
689 return (ptr - its->cmd_base) * sizeof(*ptr); in its_cmd_ptr_to_offset()
692 static int its_queue_full(struct its_node *its) in its_queue_full() argument
697 widx = its->cmd_write - its->cmd_base; in its_queue_full()
698 ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block); in its_queue_full()
700 /* This is incredibly unlikely to happen, unless the ITS locks up. */ in its_queue_full()
707 static struct its_cmd_block *its_allocate_entry(struct its_node *its) in its_allocate_entry() argument
712 while (its_queue_full(its)) { in its_allocate_entry()
715 pr_err_ratelimited("ITS queue not draining\n"); in its_allocate_entry()
722 cmd = its->cmd_write++; in its_allocate_entry()
725 if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES)) in its_allocate_entry()
726 its->cmd_write = its->cmd_base; in its_allocate_entry()
737 static struct its_cmd_block *its_post_commands(struct its_node *its) in its_post_commands() argument
739 u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write); in its_post_commands()
741 writel_relaxed(wr, its->base + GITS_CWRITER); in its_post_commands()
743 return its->cmd_write; in its_post_commands()
746 static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd) in its_flush_cmd() argument
750 * the ITS. in its_flush_cmd()
752 if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING) in its_flush_cmd()
758 static int its_wait_for_range_completion(struct its_node *its, in its_wait_for_range_completion() argument
766 to_idx = its_cmd_ptr_to_offset(its, to); in its_wait_for_range_completion()
775 rd_idx = readl_relaxed(its->base + GITS_CREADR); in its_wait_for_range_completion()
791 pr_err_ratelimited("ITS queue timeout (%llu %llu)\n", in its_wait_for_range_completion()
805 void name(struct its_node *its, \
814 raw_spin_lock_irqsave(&its->lock, flags); \
816 cmd = its_allocate_entry(its); \
818 raw_spin_unlock_irqrestore(&its->lock, flags); \
821 sync_obj = builder(its, cmd, desc); \
822 its_flush_cmd(its, cmd); \
825 sync_cmd = its_allocate_entry(its); \
829 buildfn(its, sync_cmd, sync_obj); \
830 its_flush_cmd(its, sync_cmd); \
834 rd_idx = readl_relaxed(its->base + GITS_CREADR); \
835 next_cmd = its_post_commands(its); \
836 raw_spin_unlock_irqrestore(&its->lock, flags); \
838 if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \
839 pr_err_ratelimited("ITS cmd %ps failed\n", builder); \
842 static void its_build_sync_cmd(struct its_node *its, in its_build_sync_cmd() argument
855 static void its_build_vsync_cmd(struct its_node *its, in BUILD_SINGLE_CMD_FUNC()
875 its_send_single_command(dev->its, its_build_int_cmd, &desc); in BUILD_SINGLE_CMD_FUNC()
885 its_send_single_command(dev->its, its_build_clear_cmd, &desc); in its_send_clear()
895 its_send_single_command(dev->its, its_build_inv_cmd, &desc); in its_send_inv()
905 its_send_single_command(dev->its, its_build_mapd_cmd, &desc); in its_send_mapd()
908 static void its_send_mapc(struct its_node *its, struct its_collection *col, in its_send_mapc() argument
916 its_send_single_command(its, its_build_mapc_cmd, &desc); in its_send_mapc()
927 its_send_single_command(dev->its, its_build_mapti_cmd, &desc); in its_send_mapti()
939 its_send_single_command(dev->its, its_build_movi_cmd, &desc); in its_send_movi()
949 its_send_single_command(dev->its, its_build_discard_cmd, &desc); in its_send_discard()
952 static void its_send_invall(struct its_node *its, struct its_collection *col) in its_send_invall() argument
958 its_send_single_command(its, its_build_invall_cmd, &desc); in its_send_invall()
972 its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc); in its_send_vmapti()
985 its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc); in its_send_vmovi()
988 static void its_send_vmapp(struct its_node *its, in its_send_vmapp() argument
995 desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx]; in its_send_vmapp()
997 its_send_single_vcommand(its, its_build_vmapp_cmd, &desc); in its_send_vmapp()
1003 struct its_node *its; in its_send_vmovp() local
1010 its = list_first_entry(&its_nodes, struct its_node, entry); in its_send_vmovp()
1011 desc.its_vmovp_cmd.col = &its->collections[col_id]; in its_send_vmovp()
1012 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); in its_send_vmovp()
1030 list_for_each_entry(its, &its_nodes, entry) { in its_send_vmovp()
1031 if (!its->is_v4) in its_send_vmovp()
1034 if (!vpe->its_vm->vlpi_count[its->list_nr]) in its_send_vmovp()
1037 desc.its_vmovp_cmd.col = &its->collections[col_id]; in its_send_vmovp()
1038 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); in its_send_vmovp()
1044 static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe) in its_send_vinvall() argument
1049 its_send_single_vcommand(its, its_build_vinvall_cmd, &desc); in its_send_vinvall()
1121 * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI in its_vlpi_set_doorbell()
1161 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { in its_set_affinity()
1162 if (its_dev->its->numa_node >= 0) { in its_set_affinity()
1163 cpu_mask = cpumask_of_node(its_dev->its->numa_node); in its_set_affinity()
1176 target_col = &its_dev->its->collections[cpu]; in its_set_affinity()
1187 struct its_node *its = its_dev->its; in its_irq_get_msi_base() local
1189 return its->phys_base + GITS_TRANSLATER; in its_irq_get_msi_base()
1195 struct its_node *its; in its_irq_compose_msi_msg() local
1198 its = its_dev->its; in its_irq_compose_msi_msg()
1199 addr = its->get_msi_base(its_dev); in its_irq_compose_msi_msg()
1226 static void its_map_vm(struct its_node *its, struct its_vm *vm) in its_map_vm() argument
1230 /* Not using the ITS list? Everything is always mapped. */ in its_map_vm()
1240 vm->vlpi_count[its->list_nr]++; in its_map_vm()
1242 if (vm->vlpi_count[its->list_nr] == 1) { in its_map_vm()
1251 its_send_vmapp(its, vpe, true); in its_map_vm()
1252 its_send_vinvall(its, vpe); in its_map_vm()
1260 static void its_unmap_vm(struct its_node *its, struct its_vm *vm) in its_unmap_vm() argument
1264 /* Not using the ITS list? Everything is always mapped. */ in its_unmap_vm()
1270 if (!--vm->vlpi_count[its->list_nr]) { in its_unmap_vm()
1274 its_send_vmapp(its, vm->vpes[i], false); in its_unmap_vm()
1315 /* Ensure all the VPEs are mapped on this ITS */ in its_vlpi_map()
1316 its_map_vm(its_dev->its, info->map->vm); in its_vlpi_map()
1387 /* Potentially unmap the VM from this ITS */ in its_vlpi_unmap()
1388 its_unmap_vm(its_dev->its, its_dev->event_map.vm); in its_vlpi_unmap()
1425 /* Need a v4 ITS */ in its_irq_set_vcpu_affinity()
1426 if (!its_dev->its->is_v4) in its_irq_set_vcpu_affinity()
1450 .name = "ITS",
1551 pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis); in alloc_lpi_range()
1586 pr_info("ITS: Using hypervisor restricted LPI range [%u]\n", in its_lpi_init()
1595 pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis); in its_lpi_init()
1690 static u64 its_read_baser(struct its_node *its, struct its_baser *baser) in its_read_baser() argument
1692 u32 idx = baser - its->tables; in its_read_baser()
1694 return gits_read_baser(its->base + GITS_BASER + (idx << 3)); in its_read_baser()
1697 static void its_write_baser(struct its_node *its, struct its_baser *baser, in its_write_baser() argument
1700 u32 idx = baser - its->tables; in its_write_baser()
1702 gits_write_baser(val, its->base + GITS_BASER + (idx << 3)); in its_write_baser()
1703 baser->val = its_read_baser(its, baser); in its_write_baser()
1706 static int its_setup_baser(struct its_node *its, struct its_baser *baser, in its_setup_baser() argument
1710 u64 val = its_read_baser(its, baser); in its_setup_baser()
1720 pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n", in its_setup_baser()
1721 &its->phys_base, its_base_type_string[type], in its_setup_baser()
1738 pr_err("ITS: no 52bit PA support when psz=%d\n", psz); in its_setup_baser()
1770 its_write_baser(its, baser, val); in its_setup_baser()
1809 pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n", in its_setup_baser()
1810 &its->phys_base, its_base_type_string[type], in its_setup_baser()
1821 pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n", in its_setup_baser()
1822 &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp), in its_setup_baser()
1831 static bool its_parse_indirect_baser(struct its_node *its, in its_parse_indirect_baser() argument
1835 u64 tmp = its_read_baser(its, baser); in its_parse_indirect_baser()
1848 its_write_baser(its, baser, val | GITS_BASER_INDIRECT); in its_parse_indirect_baser()
1853 * The size of the lvl2 table is equal to ITS page size in its_parse_indirect_baser()
1856 * which is reported by ITS hardware times lvl1 table in its_parse_indirect_baser()
1866 * range of device IDs that the ITS can grok... The ID in its_parse_indirect_baser()
1875 pr_warn("ITS@%pa: %s Table too large, reduce ids %u->%u\n", in its_parse_indirect_baser()
1876 &its->phys_base, its_base_type_string[type], in its_parse_indirect_baser()
1877 its->device_ids, ids); in its_parse_indirect_baser()
1885 static void its_free_tables(struct its_node *its) in its_free_tables() argument
1890 if (its->tables[i].base) { in its_free_tables()
1891 free_pages((unsigned long)its->tables[i].base, in its_free_tables()
1892 its->tables[i].order); in its_free_tables()
1893 its->tables[i].base = NULL; in its_free_tables()
1898 static int its_alloc_tables(struct its_node *its) in its_alloc_tables() argument
1905 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) in its_alloc_tables()
1910 struct its_baser *baser = its->tables + i; in its_alloc_tables()
1911 u64 val = its_read_baser(its, baser); in its_alloc_tables()
1921 indirect = its_parse_indirect_baser(its, baser, in its_alloc_tables()
1923 its->device_ids); in its_alloc_tables()
1927 indirect = its_parse_indirect_baser(its, baser, in its_alloc_tables()
1933 err = its_setup_baser(its, baser, cache, shr, psz, order, indirect); in its_alloc_tables()
1935 its_free_tables(its); in its_alloc_tables()
1948 static int its_alloc_collections(struct its_node *its) in its_alloc_collections() argument
1952 its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections), in its_alloc_collections()
1954 if (!its->collections) in its_alloc_collections()
1958 its->collections[i].target_address = ~0ULL; in its_alloc_collections()
2111 static void its_cpu_init_collection(struct its_node *its) in its_cpu_init_collection() argument
2116 /* avoid cross node collections and its mapping */ in its_cpu_init_collection()
2117 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { in its_cpu_init_collection()
2121 if (its->numa_node != NUMA_NO_NODE && in its_cpu_init_collection()
2122 its->numa_node != of_node_to_nid(cpu_node)) in its_cpu_init_collection()
2127 * We now have to bind each collection to its target in its_cpu_init_collection()
2130 if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) { in its_cpu_init_collection()
2132 * This ITS wants the physical address of the in its_cpu_init_collection()
2137 /* This ITS wants a linear CPU number. */ in its_cpu_init_collection()
2143 its->collections[cpu].target_address = target; in its_cpu_init_collection()
2144 its->collections[cpu].col_id = cpu; in its_cpu_init_collection()
2146 its_send_mapc(its, &its->collections[cpu], 1); in its_cpu_init_collection()
2147 its_send_invall(its, &its->collections[cpu]); in its_cpu_init_collection()
2152 struct its_node *its; in its_cpu_init_collections() local
2156 list_for_each_entry(its, &its_nodes, entry) in its_cpu_init_collections()
2157 its_cpu_init_collection(its); in its_cpu_init_collections()
2162 static struct its_device *its_find_device(struct its_node *its, u32 dev_id) in its_find_device() argument
2167 raw_spin_lock_irqsave(&its->lock, flags); in its_find_device()
2169 list_for_each_entry(tmp, &its->its_device_list, entry) { in its_find_device()
2176 raw_spin_unlock_irqrestore(&its->lock, flags); in its_find_device()
2181 static struct its_baser *its_get_baser(struct its_node *its, u32 type) in its_get_baser() argument
2186 if (GITS_BASER_TYPE(its->tables[i].val) == type) in its_get_baser()
2187 return &its->tables[i]; in its_get_baser()
2227 /* Ensure updated table contents are visible to ITS hardware */ in its_alloc_table_entry()
2234 static bool its_alloc_device_table(struct its_node *its, u32 dev_id) in its_alloc_device_table() argument
2238 baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE); in its_alloc_device_table()
2240 /* Don't allow device id that exceeds ITS hardware limit */ in its_alloc_device_table()
2242 return (ilog2(dev_id) < its->device_ids); in its_alloc_device_table()
2249 struct its_node *its; in its_alloc_vpe_table() local
2258 list_for_each_entry(its, &its_nodes, entry) { in its_alloc_vpe_table()
2261 if (!its->is_v4) in its_alloc_vpe_table()
2264 baser = its_get_baser(its, GITS_BASER_TYPE_VCPU); in its_alloc_vpe_table()
2275 static struct its_device *its_create_device(struct its_node *its, u32 dev_id, in its_create_device() argument
2288 if (!its_alloc_device_table(its, dev_id)) in its_create_device()
2300 sz = nr_ites * its->ite_size; in its_create_device()
2324 dev->its = its; in its_create_device()
2335 raw_spin_lock_irqsave(&its->lock, flags); in its_create_device()
2336 list_add(&dev->entry, &its->its_device_list); in its_create_device()
2337 raw_spin_unlock_irqrestore(&its->lock, flags); in its_create_device()
2339 /* Map device to its ITT */ in its_create_device()
2349 raw_spin_lock_irqsave(&its_dev->its->lock, flags); in its_free_device()
2351 raw_spin_unlock_irqrestore(&its_dev->its->lock, flags); in its_free_device()
2375 struct its_node *its; in its_msi_prepare() local
2385 * are built on top of the ITS. in its_msi_prepare()
2390 its = msi_info->data; in its_msi_prepare()
2394 vpe_proxy.dev->its == its && in its_msi_prepare()
2402 mutex_lock(&its->dev_alloc_lock); in its_msi_prepare()
2403 its_dev = its_find_device(its, dev_id); in its_msi_prepare()
2415 its_dev = its_create_device(its, dev_id, nvec, true); in its_msi_prepare()
2423 mutex_unlock(&its->dev_alloc_lock); in its_msi_prepare()
2497 if (its_dev->its->numa_node >= 0) in its_irq_domain_activate()
2498 cpu_mask = cpumask_of_node(its_dev->its->numa_node); in its_irq_domain_activate()
2503 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) in its_irq_domain_activate()
2532 struct its_node *its = its_dev->its; in its_irq_domain_free() local
2546 mutex_lock(&its->dev_alloc_lock); in its_irq_domain_free()
2565 mutex_unlock(&its->dev_alloc_lock); in its_irq_domain_free()
2661 target_col = &vpe_proxy.dev->its->collections[to]; in its_vpe_db_proxy_move()
2679 * interrupt to its new location. in its_vpe_set_affinity()
2717 * would be able to read its coarse map pretty quickly anyway, in its_vpe_schedule()
2734 pr_err_ratelimited("ITS virtual pending table not cleaning\n"); in its_vpe_deschedule()
2745 struct its_node *its; in its_vpe_invall() local
2747 list_for_each_entry(its, &its_nodes, entry) { in its_vpe_invall()
2748 if (!its->is_v4) in its_vpe_invall()
2751 if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr]) in its_vpe_invall()
2755 * Sending a VINVALL to a single ITS is enough, as all in its_vpe_invall()
2758 its_send_vinvall(its, vpe); in its_vpe_invall()
3013 struct its_node *its; in its_vpe_irq_domain_activate() local
3022 list_for_each_entry(its, &its_nodes, entry) { in its_vpe_irq_domain_activate()
3023 if (!its->is_v4) in its_vpe_irq_domain_activate()
3026 its_send_vmapp(its, vpe, true); in its_vpe_irq_domain_activate()
3027 its_send_vinvall(its, vpe); in its_vpe_irq_domain_activate()
3039 struct its_node *its; in its_vpe_irq_domain_deactivate() local
3048 list_for_each_entry(its, &its_nodes, entry) { in its_vpe_irq_domain_deactivate()
3049 if (!its->is_v4) in its_vpe_irq_domain_deactivate()
3052 its_send_vmapp(its, vpe, false); in its_vpe_irq_domain_deactivate()
3070 * GIC architecture specification requires the ITS to be both in its_force_quiescent()
3077 /* Disable the generation of all interrupts to this ITS */ in its_force_quiescent()
3081 /* Poll GITS_CTLR and wait until ITS becomes quiescent */ in its_force_quiescent()
3098 struct its_node *its = data; in its_enable_quirk_cavium_22375() local
3101 its->device_ids = 0x14; /* 20 bits, 8MB */ in its_enable_quirk_cavium_22375()
3102 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375; in its_enable_quirk_cavium_22375()
3109 struct its_node *its = data; in its_enable_quirk_cavium_23144() local
3111 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144; in its_enable_quirk_cavium_23144()
3118 struct its_node *its = data; in its_enable_quirk_qdf2400_e0065() local
3121 its->ite_size = 16; in its_enable_quirk_qdf2400_e0065()
3128 struct its_node *its = its_dev->its; in its_irq_get_msi_base_pre_its() local
3131 * The Socionext Synquacer SoC has a so-called 'pre-ITS', in its_irq_get_msi_base_pre_its()
3137 return its->pre_its_base + (its_dev->device_id << 2); in its_irq_get_msi_base_pre_its()
3142 struct its_node *its = data; in its_enable_quirk_socionext_synquacer() local
3146 if (!fwnode_property_read_u32_array(its->fwnode_handle, in its_enable_quirk_socionext_synquacer()
3147 "socionext,synquacer-pre-its", in its_enable_quirk_socionext_synquacer()
3151 its->pre_its_base = pre_its_window[0]; in its_enable_quirk_socionext_synquacer()
3152 its->get_msi_base = its_irq_get_msi_base_pre_its; in its_enable_quirk_socionext_synquacer()
3155 if (its->device_ids > ids) in its_enable_quirk_socionext_synquacer()
3156 its->device_ids = ids; in its_enable_quirk_socionext_synquacer()
3158 /* the pre-ITS breaks isolation, so disable MSI remapping */ in its_enable_quirk_socionext_synquacer()
3159 its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP; in its_enable_quirk_socionext_synquacer()
3167 struct its_node *its = data; in its_enable_quirk_hip07_161600802() local
3173 its->vlpi_redist_offset = SZ_128K; in its_enable_quirk_hip07_161600802()
3180 .desc = "ITS: Cavium errata 22375, 24313",
3188 .desc = "ITS: Cavium erratum 23144",
3196 .desc = "ITS: QDF2400 erratum 0065",
3197 .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */
3206 * implementation, but with a 'pre-ITS' added that requires
3209 .desc = "ITS: Socionext Synquacer pre-ITS",
3217 .desc = "ITS: Hip07 erratum 161600802",
3227 static void its_enable_quirks(struct its_node *its) in its_enable_quirks() argument
3229 u32 iidr = readl_relaxed(its->base + GITS_IIDR); in its_enable_quirks()
3231 gic_enable_quirks(iidr, its_quirks, its); in its_enable_quirks()
3236 struct its_node *its; in its_save_disable() local
3240 list_for_each_entry(its, &its_nodes, entry) { in its_save_disable()
3243 if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE)) in its_save_disable()
3246 base = its->base; in its_save_disable()
3247 its->ctlr_save = readl_relaxed(base + GITS_CTLR); in its_save_disable()
3250 pr_err("ITS@%pa: failed to quiesce: %d\n", in its_save_disable()
3251 &its->phys_base, err); in its_save_disable()
3252 writel_relaxed(its->ctlr_save, base + GITS_CTLR); in its_save_disable()
3256 its->cbaser_save = gits_read_cbaser(base + GITS_CBASER); in its_save_disable()
3261 list_for_each_entry_continue_reverse(its, &its_nodes, entry) { in its_save_disable()
3264 if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE)) in its_save_disable()
3267 base = its->base; in its_save_disable()
3268 writel_relaxed(its->ctlr_save, base + GITS_CTLR); in its_save_disable()
3278 struct its_node *its; in its_restore_enable() local
3282 list_for_each_entry(its, &its_nodes, entry) { in its_restore_enable()
3286 if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE)) in its_restore_enable()
3289 base = its->base; in its_restore_enable()
3292 * Make sure that the ITS is disabled. If it fails to quiesce, in its_restore_enable()
3294 * registers is undefined according to the GIC v3 ITS in its_restore_enable()
3299 pr_err("ITS@%pa: failed to quiesce on resume: %d\n", in its_restore_enable()
3300 &its->phys_base, ret); in its_restore_enable()
3304 gits_write_cbaser(its->cbaser_save, base + GITS_CBASER); in its_restore_enable()
3310 its->cmd_write = its->cmd_base; in its_restore_enable()
3315 struct its_baser *baser = &its->tables[i]; in its_restore_enable()
3320 its_write_baser(its, baser, baser->val); in its_restore_enable()
3322 writel_relaxed(its->ctlr_save, base + GITS_CTLR); in its_restore_enable()
3325 * Reinit the collection if it's stored in the ITS. This is in its_restore_enable()
3329 if (its->collections[smp_processor_id()].col_id < in its_restore_enable()
3331 its_cpu_init_collection(its); in its_restore_enable()
3341 static int its_init_domain(struct fwnode_handle *handle, struct its_node *its) in its_init_domain() argument
3350 inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its); in its_init_domain()
3358 inner_domain->flags |= its->msi_domain_flags; in its_init_domain()
3360 info->data = its; in its_init_domain()
3368 struct its_node *its; in its_init_vpe_domain() local
3373 pr_info("ITS: Using DirectLPI for VPE invalidation\n"); in its_init_vpe_domain()
3377 /* Any ITS will do, even if not v4 */ in its_init_vpe_domain()
3378 its = list_first_entry(&its_nodes, struct its_node, entry); in its_init_vpe_domain()
3384 pr_err("ITS: Can't allocate GICv4 proxy device array\n"); in its_init_vpe_domain()
3389 devid = GENMASK(its->device_ids - 1, 0); in its_init_vpe_domain()
3390 vpe_proxy.dev = its_create_device(its, devid, entries, false); in its_init_vpe_domain()
3393 pr_err("ITS: Can't allocate GICv4 proxy device\n"); in its_init_vpe_domain()
3401 pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n", in its_init_vpe_domain()
3421 pr_err("ITS@%pa: No ITSList entry available!\n", in its_compute_its_list_map()
3437 pr_err("ITS@%pa: Duplicate ITSList entry %d\n", in its_compute_its_list_map()
3448 struct its_node *its; in its_probe_one() local
3456 pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start); in its_probe_one()
3462 pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start); in its_probe_one()
3469 pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start); in its_probe_one()
3473 pr_info("ITS %pR\n", res); in its_probe_one()
3475 its = kzalloc(sizeof(*its), GFP_KERNEL); in its_probe_one()
3476 if (!its) { in its_probe_one()
3481 raw_spin_lock_init(&its->lock); in its_probe_one()
3482 mutex_init(&its->dev_alloc_lock); in its_probe_one()
3483 INIT_LIST_HEAD(&its->entry); in its_probe_one()
3484 INIT_LIST_HEAD(&its->its_device_list); in its_probe_one()
3486 its->base = its_base; in its_probe_one()
3487 its->phys_base = res->start; in its_probe_one()
3488 its->ite_size = GITS_TYPER_ITT_ENTRY_SIZE(typer); in its_probe_one()
3489 its->device_ids = GITS_TYPER_DEVBITS(typer); in its_probe_one()
3490 its->is_v4 = !!(typer & GITS_TYPER_VLPIS); in its_probe_one()
3491 if (its->is_v4) { in its_probe_one()
3497 its->list_nr = err; in its_probe_one()
3499 pr_info("ITS@%pa: Using ITS number %d\n", in its_probe_one()
3502 pr_info("ITS@%pa: Single VMOVP capable\n", &res->start); in its_probe_one()
3506 its->numa_node = numa_node; in its_probe_one()
3508 its->cmd_base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, in its_probe_one()
3510 if (!its->cmd_base) { in its_probe_one()
3514 its->cmd_write = its->cmd_base; in its_probe_one()
3515 its->fwnode_handle = handle; in its_probe_one()
3516 its->get_msi_base = its_irq_get_msi_base; in its_probe_one()
3517 its->msi_domain_flags = IRQ_DOMAIN_FLAG_MSI_REMAP; in its_probe_one()
3519 its_enable_quirks(its); in its_probe_one()
3521 err = its_alloc_tables(its); in its_probe_one()
3525 err = its_alloc_collections(its); in its_probe_one()
3529 baser = (virt_to_phys(its->cmd_base) | in its_probe_one()
3535 gits_write_cbaser(baser, its->base + GITS_CBASER); in its_probe_one()
3536 tmp = gits_read_cbaser(its->base + GITS_CBASER); in its_probe_one()
3548 gits_write_cbaser(baser, its->base + GITS_CBASER); in its_probe_one()
3550 pr_info("ITS: using cache flushing for cmd queue\n"); in its_probe_one()
3551 its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING; in its_probe_one()
3554 gits_write_cwriter(0, its->base + GITS_CWRITER); in its_probe_one()
3555 ctlr = readl_relaxed(its->base + GITS_CTLR); in its_probe_one()
3557 if (its->is_v4) in its_probe_one()
3559 writel_relaxed(ctlr, its->base + GITS_CTLR); in its_probe_one()
3562 its->flags |= ITS_FLAGS_SAVE_SUSPEND_STATE; in its_probe_one()
3564 err = its_init_domain(handle, its); in its_probe_one()
3569 list_add(&its->entry, &its_nodes); in its_probe_one()
3575 its_free_tables(its); in its_probe_one()
3577 free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ)); in its_probe_one()
3579 kfree(its); in its_probe_one()
3582 pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err); in its_probe_one()
3672 { .compatible = "arm,gic-v3-its", },
3686 pr_warn("%pOF: no msi-controller property, ITS ignored\n", in its_of_probe()
3709 /* GIC ITS ID */
3744 pr_err("SRAT: Invalid header length %d in ITS affinity\n", in gic_acpi_parse_srat_its()
3752 pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node); in gic_acpi_parse_srat_its()
3759 pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n", in gic_acpi_parse_srat_its()
3789 /* free the its_srat_maps after ITS probing */
3816 pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n", in gic_acpi_parse_madt_its()
3824 pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n", in gic_acpi_parse_madt_its()
3855 struct its_node *its; in its_init() local
3867 pr_warn("ITS: No ITS available, not enabling LPIs\n"); in its_init()
3876 list_for_each_entry(its, &its_nodes, entry) in its_init()
3877 has_v4 |= its->is_v4; in its_init()
3883 pr_err("ITS: Disabling GICv4 support\n"); in its_init()