Lines Matching defs:its

122 #define is_v4(its)		(!!((its)->typer & GITS_TYPER_VLPIS))
123 #define is_v4_1(its) (!!((its)->typer & GITS_TYPER_VMAPP))
124 #define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1)
157 * translation table, and a list of interrupts. If it some of its
163 struct its_node *its;
205 static bool require_its_list_vmovp(struct its_vm *vm, struct its_node *its)
207 return (gic_rdists->has_rvpeid || vm->vlpi_count[its->list_nr]);
217 struct its_node *its;
220 list_for_each_entry(its, &its_nodes, entry) {
221 if (!is_v4(its))
224 if (require_its_list_vmovp(vm, its))
225 __set_bit(its->list_nr, &its_list);
240 struct its_node *its = its_dev->its;
242 return its->collections + its_dev->event_map.col_map[event];
329 static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe)
331 if (valid_col(its->collections + vpe->col_idx))
615 static struct its_collection *its_build_mapd_cmd(struct its_node *its,
636 static struct its_collection *its_build_mapc_cmd(struct its_node *its,
650 static struct its_collection *its_build_mapti_cmd(struct its_node *its,
670 static struct its_collection *its_build_movi_cmd(struct its_node *its,
689 static struct its_collection *its_build_discard_cmd(struct its_node *its,
707 static struct its_collection *its_build_inv_cmd(struct its_node *its,
725 static struct its_collection *its_build_int_cmd(struct its_node *its,
743 static struct its_collection *its_build_clear_cmd(struct its_node *its,
761 static struct its_collection *its_build_invall_cmd(struct its_node *its,
773 static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
782 return valid_vpe(its, desc->its_vinvall_cmd.vpe);
785 static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
789 struct its_vpe *vpe = valid_vpe(its, desc->its_vmapp_cmd.vpe);
799 if (is_v4_1(its)) {
813 target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
819 if (!is_v4_1(its))
844 static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
850 if (!is_v4_1(its) && desc->its_vmapti_cmd.db_enabled)
864 return valid_vpe(its, desc->its_vmapti_cmd.vpe);
867 static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
873 if (!is_v4_1(its) && desc->its_vmovi_cmd.db_enabled)
887 return valid_vpe(its, desc->its_vmovi_cmd.vpe);
890 static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
896 target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset;
903 if (is_v4_1(its)) {
910 return valid_vpe(its, desc->its_vmovp_cmd.vpe);
913 static struct its_vpe *its_build_vinv_cmd(struct its_node *its,
928 return valid_vpe(its, map->vpe);
931 static struct its_vpe *its_build_vint_cmd(struct its_node *its,
946 return valid_vpe(its, map->vpe);
949 static struct its_vpe *its_build_vclear_cmd(struct its_node *its,
964 return valid_vpe(its, map->vpe);
967 static struct its_vpe *its_build_invdb_cmd(struct its_node *its,
971 if (WARN_ON(!is_v4_1(its)))
979 return valid_vpe(its, desc->its_invdb_cmd.vpe);
982 static struct its_vpe *its_build_vsgi_cmd(struct its_node *its,
986 if (WARN_ON(!is_v4_1(its)))
999 return valid_vpe(its, desc->its_vsgi_cmd.vpe);
1002 static u64 its_cmd_ptr_to_offset(struct its_node *its,
1005 return (ptr - its->cmd_base) * sizeof(*ptr);
1008 static int its_queue_full(struct its_node *its)
1013 widx = its->cmd_write - its->cmd_base;
1014 ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
1023 static struct its_cmd_block *its_allocate_entry(struct its_node *its)
1028 while (its_queue_full(its)) {
1038 cmd = its->cmd_write++;
1041 if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
1042 its->cmd_write = its->cmd_base;
1053 static struct its_cmd_block *its_post_commands(struct its_node *its)
1055 u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
1057 writel_relaxed(wr, its->base + GITS_CWRITER);
1059 return its->cmd_write;
1062 static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
1068 if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
1074 static int its_wait_for_range_completion(struct its_node *its,
1082 to_idx = its_cmd_ptr_to_offset(its, to);
1091 rd_idx = readl_relaxed(its->base + GITS_CREADR);
1121 void name(struct its_node *its, \
1130 raw_spin_lock_irqsave(&its->lock, flags); \
1132 cmd = its_allocate_entry(its); \
1134 raw_spin_unlock_irqrestore(&its->lock, flags); \
1137 sync_obj = builder(its, cmd, desc); \
1138 its_flush_cmd(its, cmd); \
1141 sync_cmd = its_allocate_entry(its); \
1145 buildfn(its, sync_cmd, sync_obj); \
1146 its_flush_cmd(its, sync_cmd); \
1150 rd_idx = readl_relaxed(its->base + GITS_CREADR); \
1151 next_cmd = its_post_commands(its); \
1152 raw_spin_unlock_irqrestore(&its->lock, flags); \
1154 if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \
1158 static void its_build_sync_cmd(struct its_node *its,
1171 static void its_build_vsync_cmd(struct its_node *its,
1191 its_send_single_command(dev->its, its_build_int_cmd, &desc);
1201 its_send_single_command(dev->its, its_build_clear_cmd, &desc);
1211 its_send_single_command(dev->its, its_build_inv_cmd, &desc);
1221 its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
1224 static void its_send_mapc(struct its_node *its, struct its_collection *col,
1232 its_send_single_command(its, its_build_mapc_cmd, &desc);
1243 its_send_single_command(dev->its, its_build_mapti_cmd, &desc);
1255 its_send_single_command(dev->its, its_build_movi_cmd, &desc);
1265 its_send_single_command(dev->its, its_build_discard_cmd, &desc);
1268 static void its_send_invall(struct its_node *its, struct its_collection *col)
1274 its_send_single_command(its, its_build_invall_cmd, &desc);
1288 its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc);
1301 its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
1304 static void its_send_vmapp(struct its_node *its,
1311 desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
1313 its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
1319 struct its_node *its;
1326 its = list_first_entry(&its_nodes, struct its_node, entry);
1327 desc.its_vmovp_cmd.col = &its->collections[col_id];
1328 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1346 list_for_each_entry(its, &its_nodes, entry) {
1347 if (!is_v4(its))
1350 if (!require_its_list_vmovp(vpe->its_vm, its))
1353 desc.its_vmovp_cmd.col = &its->collections[col_id];
1354 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1360 static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
1365 its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
1379 its_send_single_vcommand(dev->its, its_build_vinv_cmd, &desc);
1393 its_send_single_vcommand(dev->its, its_build_vint_cmd, &desc);
1407 its_send_single_vcommand(dev->its, its_build_vclear_cmd, &desc);
1410 static void its_send_invdb(struct its_node *its, struct its_vpe *vpe)
1415 its_send_single_vcommand(its, its_build_invdb_cmd, &desc);
1487 WARN_ON(!is_v4_1(its_dev->its));
1505 (is_v4_1(its_dev->its) || !irqd_is_forwarded_to_vcpu(d)))
1523 if (is_v4_1(its_dev->its))
1536 * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
1616 node = its_dev->its->numa_node;
1648 if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144))
1666 if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) &&
1704 target_col = &its_dev->its->collections[cpu];
1721 struct its_node *its = its_dev->its;
1723 return its->phys_base + GITS_TRANSLATER;
1729 struct its_node *its;
1732 its = its_dev->its;
1733 addr = its->get_msi_base(its_dev);
1792 static void its_map_vm(struct its_node *its, struct its_vm *vm)
1805 vm->vlpi_count[its->list_nr]++;
1807 if (vm->vlpi_count[its->list_nr] == 1) {
1816 its_send_vmapp(its, vpe, true);
1817 its_send_vinvall(its, vpe);
1825 static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
1835 if (!--vm->vlpi_count[its->list_nr]) {
1839 its_send_vmapp(its, vm->vpes[i], false);
1881 its_map_vm(its_dev->its, info->map->vm);
1954 its_unmap_vm(its_dev->its, its_dev->event_map.vm);
1992 if (!is_v4(its_dev->its))
2312 static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
2314 u32 idx = baser - its->tables;
2316 return gits_read_baser(its->base + GITS_BASER + (idx << 3));
2319 static void its_write_baser(struct its_node *its, struct its_baser *baser,
2322 u32 idx = baser - its->tables;
2324 gits_write_baser(val, its->base + GITS_BASER + (idx << 3));
2325 baser->val = its_read_baser(its, baser);
2328 static int its_setup_baser(struct its_node *its, struct its_baser *baser,
2331 u64 val = its_read_baser(its, baser);
2343 &its->phys_base, its_base_type_string[type],
2349 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
2396 its_write_baser(its, baser, val);
2416 &its->phys_base, its_base_type_string[type],
2428 &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp),
2437 static bool its_parse_indirect_baser(struct its_node *its,
2441 u64 tmp = its_read_baser(its, baser);
2455 its_write_baser(its, baser, val | GITS_BASER_INDIRECT);
2483 &its->phys_base, its_base_type_string[type],
2484 device_ids(its), ids);
2502 static u32 compute_its_aff(struct its_node *its)
2512 svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer);
2514 val |= FIELD_PREP(GICR_TYPER_AFFINITY, its->mpidr);
2520 struct its_node *its;
2528 list_for_each_entry(its, &its_nodes, entry) {
2531 if (!is_v4_1(its) || its == cur_its)
2534 if (!FIELD_GET(GITS_TYPER_SVPET, its->typer))
2537 if (aff != compute_its_aff(its))
2541 baser = its->tables[2].val;
2545 return its;
2551 static void its_free_tables(struct its_node *its)
2556 if (its->tables[i].base) {
2557 free_pages((unsigned long)its->tables[i].base,
2558 its->tables[i].order);
2559 its->tables[i].base = NULL;
2564 static int its_probe_baser_psz(struct its_node *its, struct its_baser *baser)
2571 val = its_read_baser(its, baser);
2590 its_write_baser(its, baser, val);
2612 static int its_alloc_tables(struct its_node *its)
2618 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375)
2622 if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE) {
2628 struct its_baser *baser = its->tables + i;
2629 u64 val = its_read_baser(its, baser);
2637 if (its_probe_baser_psz(its, baser)) {
2638 its_free_tables(its);
2646 indirect = its_parse_indirect_baser(its, baser, &order,
2647 device_ids(its));
2651 if (is_v4_1(its)) {
2655 if ((sibling = find_sibling_its(its))) {
2657 its_write_baser(its, baser, baser->val);
2662 indirect = its_parse_indirect_baser(its, baser, &order,
2667 err = its_setup_baser(its, baser, cache, shr, order, indirect);
2669 its_free_tables(its);
2683 struct its_node *its;
2690 list_for_each_entry(its, &its_nodes, entry) {
2693 if (!is_v4_1(its))
2696 if (!FIELD_GET(GITS_TYPER_SVPET, its->typer))
2699 if (aff != compute_its_aff(its))
2703 baser = its->tables[2].val;
2708 gic_data_rdist()->vpe_l1_base = its->tables[2].base;
2760 * ours wrt CommonLPIAff. Let's use its own VPROPBASER.
2970 static int its_alloc_collections(struct its_node *its)
2974 its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections),
2976 if (!its->collections)
2980 its->collections[i].target_address = ~0ULL;
3234 static void its_cpu_init_collection(struct its_node *its)
3239 /* avoid cross node collections and its mapping */
3240 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
3244 if (its->numa_node != NUMA_NO_NODE &&
3245 its->numa_node != of_node_to_nid(cpu_node))
3250 * We now have to bind each collection to its target
3253 if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
3266 its->collections[cpu].target_address = target;
3267 its->collections[cpu].col_id = cpu;
3269 its_send_mapc(its, &its->collections[cpu], 1);
3270 its_send_invall(its, &its->collections[cpu]);
3275 struct its_node *its;
3279 list_for_each_entry(its, &its_nodes, entry)
3280 its_cpu_init_collection(its);
3285 static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
3290 raw_spin_lock_irqsave(&its->lock, flags);
3292 list_for_each_entry(tmp, &its->its_device_list, entry) {
3299 raw_spin_unlock_irqrestore(&its->lock, flags);
3304 static struct its_baser *its_get_baser(struct its_node *its, u32 type)
3309 if (GITS_BASER_TYPE(its->tables[i].val) == type)
3310 return &its->tables[i];
3316 static bool its_alloc_table_entry(struct its_node *its,
3337 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
3359 static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
3363 baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
3367 return (ilog2(dev_id) < device_ids(its));
3369 return its_alloc_table_entry(its, baser, dev_id);
3374 struct its_node *its;
3384 list_for_each_entry(its, &its_nodes, entry) {
3387 if (!is_v4(its))
3390 baser = its_get_baser(its, GITS_BASER_TYPE_VCPU);
3394 if (!its_alloc_table_entry(its, baser, vpe_id))
3414 static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
3427 if (!its_alloc_device_table(its, dev_id))
3439 sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1);
3441 itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
3463 dev->its = its;
3474 raw_spin_lock_irqsave(&its->lock, flags);
3475 list_add(&dev->entry, &its->its_device_list);
3476 raw_spin_unlock_irqrestore(&its->lock, flags);
3478 /* Map device to its ITT */
3488 raw_spin_lock_irqsave(&its_dev->its->lock, flags);
3490 raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
3515 struct its_node *its;
3530 its = msi_info->data;
3534 vpe_proxy.dev->its == its &&
3542 mutex_lock(&its->dev_alloc_lock);
3543 its_dev = its_find_device(its, dev_id);
3555 its_dev = its_create_device(its, dev_id, nvec, true);
3566 mutex_unlock(&its->dev_alloc_lock);
3604 struct its_node *its = its_dev->its;
3614 err = iommu_dma_prepare_msi(info->desc, its->get_msi_base(its_dev));
3673 struct its_node *its = its_dev->its;
3687 mutex_lock(&its->dev_alloc_lock);
3705 mutex_unlock(&its->dev_alloc_lock);
3823 target_col = &vpe_proxy.dev->its->collections[to];
3843 * interrupt to its new location.
3920 * would be able to read its coarse map pretty quickly anyway,
3942 struct its_node *its;
3944 list_for_each_entry(its, &its_nodes, entry) {
3945 if (!is_v4(its))
3948 if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
3955 its_send_vinvall(its, vpe);
4076 static struct its_node *its = NULL;
4078 if (!its) {
4079 list_for_each_entry(its, &its_nodes, entry) {
4080 if (is_v4_1(its))
4081 return its;
4085 its = NULL;
4088 return its;
4094 struct its_node *its;
4099 * it to the first valid ITS, and let the HW do its magic.
4101 its = find_4_1_its();
4102 if (its)
4103 its_send_invdb(its, vpe);
4285 struct its_node *its = find_4_1_its();
4290 writeq_relaxed(val, its->sgir_base + GITS_SGIR - SZ_128K);
4580 struct its_node *its;
4593 list_for_each_entry(its, &its_nodes, entry) {
4594 if (!is_v4(its))
4597 its_send_vmapp(its, vpe, true);
4598 its_send_vinvall(its, vpe);
4610 struct its_node *its;
4619 list_for_each_entry(its, &its_nodes, entry) {
4620 if (!is_v4(its))
4623 its_send_vmapp(its, vpe, false);
4678 struct its_node *its = data;
4681 its->typer &= ~GITS_TYPER_DEVBITS;
4682 its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, 20 - 1);
4683 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
4690 struct its_node *its = data;
4692 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
4699 struct its_node *its = data;
4702 its->typer &= ~GITS_TYPER_ITT_ENTRY_SIZE;
4703 its->typer |= FIELD_PREP(GITS_TYPER_ITT_ENTRY_SIZE, 16 - 1);
4710 struct its_node *its = its_dev->its;
4719 return its->pre_its_base + (its_dev->device_id << 2);
4724 struct its_node *its = data;
4728 if (!fwnode_property_read_u32_array(its->fwnode_handle,
4729 "socionext,synquacer-pre-its",
4733 its->pre_its_base = pre_its_window[0];
4734 its->get_msi_base = its_irq_get_msi_base_pre_its;
4737 if (device_ids(its) > ids) {
4738 its->typer &= ~GITS_TYPER_DEVBITS;
4739 its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, ids - 1);
4743 its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_ISOLATED_MSI;
4751 struct its_node *its = data;
4757 its->vlpi_redist_offset = SZ_128K;
4763 struct its_node *its = data;
4769 its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE;
4777 struct its_node *its = data;
4779 its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE;
4846 static void its_enable_quirks(struct its_node *its)
4848 u32 iidr = readl_relaxed(its->base + GITS_IIDR);
4850 gic_enable_quirks(iidr, its_quirks, its);
4852 if (is_of_node(its->fwnode_handle))
4853 gic_enable_of_quirks(to_of_node(its->fwnode_handle),
4854 its_quirks, its);
4859 struct its_node *its;
4863 list_for_each_entry(its, &its_nodes, entry) {
4866 base = its->base;
4867 its->ctlr_save = readl_relaxed(base + GITS_CTLR);
4871 &its->phys_base, err);
4872 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
4876 its->cbaser_save = gits_read_cbaser(base + GITS_CBASER);
4881 list_for_each_entry_continue_reverse(its, &its_nodes, entry) {
4884 base = its->base;
4885 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
4895 struct its_node *its;
4899 list_for_each_entry(its, &its_nodes, entry) {
4903 base = its->base;
4917 &its->phys_base, ret);
4921 gits_write_cbaser(its->cbaser_save, base + GITS_CBASER);
4927 its->cmd_write = its->cmd_base;
4932 struct its_baser *baser = &its->tables[i];
4937 its_write_baser(its, baser, baser->val);
4939 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
4946 if (its->collections[smp_processor_id()].col_id <
4948 its_cpu_init_collection(its);
4990 static int its_init_domain(struct its_node *its)
5000 info->data = its;
5003 its->msi_domain_flags, 0,
5004 its->fwnode_handle, &its_domain_ops,
5018 struct its_node *its;
5028 its = list_first_entry(&its_nodes, struct its_node, entry);
5037 devid = GENMASK(device_ids(its) - 1, 0);
5038 vpe_proxy.dev = its_create_device(its, devid, entries, false);
5055 static int __init its_compute_its_list_map(struct its_node *its)
5069 &its->phys_base);
5073 ctlr = readl_relaxed(its->base + GITS_CTLR);
5076 writel_relaxed(ctlr, its->base + GITS_CTLR);
5077 ctlr = readl_relaxed(its->base + GITS_CTLR);
5085 &its->phys_base, its_number);
5092 static int __init its_probe_one(struct its_node *its)
5099 its_enable_quirks(its);
5101 if (is_v4(its)) {
5102 if (!(its->typer & GITS_TYPER_VMOVP)) {
5103 err = its_compute_its_list_map(its);
5107 its->list_nr = err;
5110 &its->phys_base, err);
5112 pr_info("ITS@%pa: Single VMOVP capable\n", &its->phys_base);
5115 if (is_v4_1(its)) {
5116 u32 svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer);
5118 its->sgir_base = ioremap(its->phys_base + SZ_128K, SZ_64K);
5119 if (!its->sgir_base) {
5124 its->mpidr = readl_relaxed(its->base + GITS_MPIDR);
5127 &its->phys_base, its->mpidr, svpet);
5131 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
5137 its->cmd_base = (void *)page_address(page);
5138 its->cmd_write = its->cmd_base;
5140 err = its_alloc_tables(its);
5144 err = its_alloc_collections(its);
5148 baser = (virt_to_phys(its->cmd_base) |
5154 gits_write_cbaser(baser, its->base + GITS_CBASER);
5155 tmp = gits_read_cbaser(its->base + GITS_CBASER);
5157 if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE)
5170 gits_write_cbaser(baser, its->base + GITS_CBASER);
5173 its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
5176 gits_write_cwriter(0, its->base + GITS_CWRITER);
5177 ctlr = readl_relaxed(its->base + GITS_CTLR);
5179 if (is_v4(its))
5181 writel_relaxed(ctlr, its->base + GITS_CTLR);
5183 err = its_init_domain(its);
5188 list_add(&its->entry, &its_nodes);
5194 its_free_tables(its);
5196 free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
5198 if (its->sgir_base)
5199 iounmap(its->sgir_base);
5201 pr_err("ITS@%pa: failed probing (%d)\n", &its->phys_base, err);
5358 { .compatible = "arm,gic-v3-its", },
5366 struct its_node *its;
5375 its = kzalloc(sizeof(*its), GFP_KERNEL);
5376 if (!its)
5379 raw_spin_lock_init(&its->lock);
5380 mutex_init(&its->dev_alloc_lock);
5381 INIT_LIST_HEAD(&its->entry);
5382 INIT_LIST_HEAD(&its->its_device_list);
5384 its->typer = gic_read_typer(its_base + GITS_TYPER);
5385 its->base = its_base;
5386 its->phys_base = res->start;
5387 its->get_msi_base = its_irq_get_msi_base;
5388 its->msi_domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI;
5390 its->numa_node = numa_node;
5391 its->fwnode_handle = handle;
5393 return its;
5400 static void its_node_destroy(struct its_node *its)
5402 iounmap(its->base);
5403 kfree(its);
5432 struct its_node *its;
5448 its = its_node_init(&res, &np->fwnode, of_node_to_nid(np));
5449 if (!its)
5452 err = its_probe_one(its);
5454 its_node_destroy(its);
5568 struct its_node *its;
5593 its = its_node_init(&res, dom_handle,
5595 if (!its) {
5600 err = its_probe_one(its);
5673 struct its_node *its;
5696 list_for_each_entry(its, &its_nodes, entry) {
5697 has_v4 |= is_v4(its);
5698 has_v4_1 |= is_v4_1(its);