• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2021 Western Digital Corporation or its affiliates.
4  * Copyright (C) 2022 Ventana Micro Systems Inc.
5  */
6 
7 #define pr_fmt(fmt) "riscv-imsic: " fmt
8 #include <linux/acpi.h>
9 #include <linux/bitmap.h>
10 #include <linux/cpu.h>
11 #include <linux/interrupt.h>
12 #include <linux/io.h>
13 #include <linux/irq.h>
14 #include <linux/irqchip.h>
15 #include <linux/irqdomain.h>
16 #include <linux/module.h>
17 #include <linux/msi.h>
18 #include <linux/pci.h>
19 #include <linux/platform_device.h>
20 #include <linux/spinlock.h>
21 #include <linux/smp.h>
22 
23 #include "irq-riscv-imsic-state.h"
24 
imsic_cpu_page_phys(unsigned int cpu,unsigned int guest_index,phys_addr_t * out_msi_pa)25 static bool imsic_cpu_page_phys(unsigned int cpu, unsigned int guest_index,
26 				phys_addr_t *out_msi_pa)
27 {
28 	struct imsic_global_config *global;
29 	struct imsic_local_config *local;
30 
31 	global = &imsic->global;
32 	local = per_cpu_ptr(global->local, cpu);
33 
34 	if (BIT(global->guest_index_bits) <= guest_index)
35 		return false;
36 
37 	if (out_msi_pa)
38 		*out_msi_pa = local->msi_pa + (guest_index * IMSIC_MMIO_PAGE_SZ);
39 
40 	return true;
41 }
42 
imsic_irq_mask(struct irq_data * d)43 static void imsic_irq_mask(struct irq_data *d)
44 {
45 	imsic_vector_mask(irq_data_get_irq_chip_data(d));
46 }
47 
imsic_irq_unmask(struct irq_data * d)48 static void imsic_irq_unmask(struct irq_data *d)
49 {
50 	imsic_vector_unmask(irq_data_get_irq_chip_data(d));
51 }
52 
imsic_irq_retrigger(struct irq_data * d)53 static int imsic_irq_retrigger(struct irq_data *d)
54 {
55 	struct imsic_vector *vec = irq_data_get_irq_chip_data(d);
56 	struct imsic_local_config *local;
57 
58 	if (WARN_ON(!vec))
59 		return -ENOENT;
60 
61 	local = per_cpu_ptr(imsic->global.local, vec->cpu);
62 	writel_relaxed(vec->local_id, local->msi_va);
63 	return 0;
64 }
65 
imsic_irq_compose_vector_msg(struct imsic_vector * vec,struct msi_msg * msg)66 static void imsic_irq_compose_vector_msg(struct imsic_vector *vec, struct msi_msg *msg)
67 {
68 	phys_addr_t msi_addr;
69 
70 	if (WARN_ON(!vec))
71 		return;
72 
73 	if (WARN_ON(!imsic_cpu_page_phys(vec->cpu, 0, &msi_addr)))
74 		return;
75 
76 	msg->address_hi = upper_32_bits(msi_addr);
77 	msg->address_lo = lower_32_bits(msi_addr);
78 	msg->data = vec->local_id;
79 }
80 
imsic_irq_compose_msg(struct irq_data * d,struct msi_msg * msg)81 static void imsic_irq_compose_msg(struct irq_data *d, struct msi_msg *msg)
82 {
83 	imsic_irq_compose_vector_msg(irq_data_get_irq_chip_data(d), msg);
84 }
85 
86 #ifdef CONFIG_SMP
imsic_msi_update_msg(struct irq_data * d,struct imsic_vector * vec)87 static void imsic_msi_update_msg(struct irq_data *d, struct imsic_vector *vec)
88 {
89 	struct msi_msg msg = { };
90 
91 	imsic_irq_compose_vector_msg(vec, &msg);
92 	irq_data_get_irq_chip(d)->irq_write_msi_msg(d, &msg);
93 }
94 
imsic_irq_set_affinity(struct irq_data * d,const struct cpumask * mask_val,bool force)95 static int imsic_irq_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
96 				  bool force)
97 {
98 	struct imsic_vector *old_vec, *new_vec;
99 
100 	old_vec = irq_data_get_irq_chip_data(d);
101 	if (WARN_ON(!old_vec))
102 		return -ENOENT;
103 
104 	/* If old vector cpu belongs to the target cpumask then do nothing */
105 	if (cpumask_test_cpu(old_vec->cpu, mask_val))
106 		return IRQ_SET_MASK_OK_DONE;
107 
108 	/* If move is already in-flight then return failure */
109 	if (imsic_vector_get_move(old_vec))
110 		return -EBUSY;
111 
112 	/* Get a new vector on the desired set of CPUs */
113 	new_vec = imsic_vector_alloc(old_vec->hwirq, mask_val);
114 	if (!new_vec)
115 		return -ENOSPC;
116 
117 	/* Point device to the new vector */
118 	imsic_msi_update_msg(irq_get_irq_data(d->irq), new_vec);
119 
120 	/* Update irq descriptors with the new vector */
121 	d->chip_data = new_vec;
122 
123 	/* Update effective affinity */
124 	irq_data_update_effective_affinity(d, cpumask_of(new_vec->cpu));
125 
126 	/* Move state of the old vector to the new vector */
127 	imsic_vector_move(old_vec, new_vec);
128 
129 	return IRQ_SET_MASK_OK_DONE;
130 }
131 #endif
132 
133 static struct irq_chip imsic_irq_base_chip = {
134 	.name			= "IMSIC",
135 	.irq_mask		= imsic_irq_mask,
136 	.irq_unmask		= imsic_irq_unmask,
137 #ifdef CONFIG_SMP
138 	.irq_set_affinity	= imsic_irq_set_affinity,
139 #endif
140 	.irq_retrigger		= imsic_irq_retrigger,
141 	.irq_compose_msi_msg	= imsic_irq_compose_msg,
142 	.flags			= IRQCHIP_SKIP_SET_WAKE |
143 				  IRQCHIP_MASK_ON_SUSPEND,
144 };
145 
imsic_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)146 static int imsic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
147 				  unsigned int nr_irqs, void *args)
148 {
149 	struct imsic_vector *vec;
150 
151 	/* Multi-MSI is not supported yet. */
152 	if (nr_irqs > 1)
153 		return -EOPNOTSUPP;
154 
155 	vec = imsic_vector_alloc(virq, cpu_online_mask);
156 	if (!vec)
157 		return -ENOSPC;
158 
159 	irq_domain_set_info(domain, virq, virq, &imsic_irq_base_chip, vec,
160 			    handle_simple_irq, NULL, NULL);
161 	irq_set_noprobe(virq);
162 	irq_set_affinity(virq, cpu_online_mask);
163 	irq_data_update_effective_affinity(irq_get_irq_data(virq), cpumask_of(vec->cpu));
164 
165 	return 0;
166 }
167 
imsic_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)168 static void imsic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
169 				  unsigned int nr_irqs)
170 {
171 	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
172 
173 	imsic_vector_free(irq_data_get_irq_chip_data(d));
174 	irq_domain_free_irqs_parent(domain, virq, nr_irqs);
175 }
176 
imsic_irq_domain_select(struct irq_domain * domain,struct irq_fwspec * fwspec,enum irq_domain_bus_token bus_token)177 static int imsic_irq_domain_select(struct irq_domain *domain, struct irq_fwspec *fwspec,
178 				   enum irq_domain_bus_token bus_token)
179 {
180 	const struct msi_parent_ops *ops = domain->msi_parent_ops;
181 	u32 busmask = BIT(bus_token);
182 
183 	if (fwspec->fwnode != domain->fwnode || fwspec->param_count != 0)
184 		return 0;
185 
186 	/* Handle pure domain searches */
187 	if (bus_token == ops->bus_select_token)
188 		return 1;
189 
190 	return !!(ops->bus_select_mask & busmask);
191 }
192 
193 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
imsic_irq_debug_show(struct seq_file * m,struct irq_domain * d,struct irq_data * irqd,int ind)194 static void imsic_irq_debug_show(struct seq_file *m, struct irq_domain *d,
195 				 struct irq_data *irqd, int ind)
196 {
197 	if (!irqd) {
198 		imsic_vector_debug_show_summary(m, ind);
199 		return;
200 	}
201 
202 	imsic_vector_debug_show(m, irq_data_get_irq_chip_data(irqd), ind);
203 }
204 #endif
205 
206 static const struct irq_domain_ops imsic_base_domain_ops = {
207 	.alloc		= imsic_irq_domain_alloc,
208 	.free		= imsic_irq_domain_free,
209 	.select		= imsic_irq_domain_select,
210 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
211 	.debug_show	= imsic_irq_debug_show,
212 #endif
213 };
214 
215 #ifdef CONFIG_RISCV_IMSIC_PCI
216 
imsic_pci_mask_irq(struct irq_data * d)217 static void imsic_pci_mask_irq(struct irq_data *d)
218 {
219 	pci_msi_mask_irq(d);
220 	irq_chip_mask_parent(d);
221 }
222 
imsic_pci_unmask_irq(struct irq_data * d)223 static void imsic_pci_unmask_irq(struct irq_data *d)
224 {
225 	irq_chip_unmask_parent(d);
226 	pci_msi_unmask_irq(d);
227 }
228 
229 #define MATCH_PCI_MSI		BIT(DOMAIN_BUS_PCI_MSI)
230 
231 #else
232 
233 #define MATCH_PCI_MSI		0
234 
235 #endif
236 
imsic_init_dev_msi_info(struct device * dev,struct irq_domain * domain,struct irq_domain * real_parent,struct msi_domain_info * info)237 static bool imsic_init_dev_msi_info(struct device *dev,
238 				    struct irq_domain *domain,
239 				    struct irq_domain *real_parent,
240 				    struct msi_domain_info *info)
241 {
242 	const struct msi_parent_ops *pops = real_parent->msi_parent_ops;
243 
244 	/* MSI parent domain specific settings */
245 	switch (real_parent->bus_token) {
246 	case DOMAIN_BUS_NEXUS:
247 		if (WARN_ON_ONCE(domain != real_parent))
248 			return false;
249 #ifdef CONFIG_SMP
250 		info->chip->irq_set_affinity = irq_chip_set_affinity_parent;
251 #endif
252 		break;
253 	default:
254 		WARN_ON_ONCE(1);
255 		return false;
256 	}
257 
258 	/* Is the target supported? */
259 	switch (info->bus_token) {
260 #ifdef CONFIG_RISCV_IMSIC_PCI
261 	case DOMAIN_BUS_PCI_DEVICE_MSI:
262 	case DOMAIN_BUS_PCI_DEVICE_MSIX:
263 		info->chip->irq_mask = imsic_pci_mask_irq;
264 		info->chip->irq_unmask = imsic_pci_unmask_irq;
265 		break;
266 #endif
267 	case DOMAIN_BUS_DEVICE_MSI:
268 		/*
269 		 * Per-device MSI should never have any MSI feature bits
270 		 * set. It's sole purpose is to create a dumb interrupt
271 		 * chip which has a device specific irq_write_msi_msg()
272 		 * callback.
273 		 */
274 		if (WARN_ON_ONCE(info->flags))
275 			return false;
276 
277 		/* Core managed MSI descriptors */
278 		info->flags |= MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS |
279 			       MSI_FLAG_FREE_MSI_DESCS;
280 		break;
281 	case DOMAIN_BUS_WIRED_TO_MSI:
282 		break;
283 	default:
284 		WARN_ON_ONCE(1);
285 		return false;
286 	}
287 
288 	/* Use hierarchial chip operations re-trigger */
289 	info->chip->irq_retrigger = irq_chip_retrigger_hierarchy;
290 
291 	/*
292 	 * Mask out the domain specific MSI feature flags which are not
293 	 * supported by the real parent.
294 	 */
295 	info->flags &= pops->supported_flags;
296 
297 	/* Enforce the required flags */
298 	info->flags |= pops->required_flags;
299 
300 	return true;
301 }
302 
303 #define MATCH_PLATFORM_MSI		BIT(DOMAIN_BUS_PLATFORM_MSI)
304 
305 static const struct msi_parent_ops imsic_msi_parent_ops = {
306 	.supported_flags	= MSI_GENERIC_FLAGS_MASK |
307 				  MSI_FLAG_PCI_MSIX,
308 	.required_flags		= MSI_FLAG_USE_DEF_DOM_OPS |
309 				  MSI_FLAG_USE_DEF_CHIP_OPS,
310 	.bus_select_token	= DOMAIN_BUS_NEXUS,
311 	.bus_select_mask	= MATCH_PCI_MSI | MATCH_PLATFORM_MSI,
312 	.init_dev_msi_info	= imsic_init_dev_msi_info,
313 };
314 
imsic_irqdomain_init(void)315 int imsic_irqdomain_init(void)
316 {
317 	struct imsic_global_config *global;
318 
319 	if (!imsic || !imsic->fwnode) {
320 		pr_err("early driver not probed\n");
321 		return -ENODEV;
322 	}
323 
324 	if (imsic->base_domain) {
325 		pr_err("%pfwP: irq domain already created\n", imsic->fwnode);
326 		return -ENODEV;
327 	}
328 
329 	/* Create Base IRQ domain */
330 	imsic->base_domain = irq_domain_create_tree(imsic->fwnode,
331 						    &imsic_base_domain_ops, imsic);
332 	if (!imsic->base_domain) {
333 		pr_err("%pfwP: failed to create IMSIC base domain\n", imsic->fwnode);
334 		return -ENOMEM;
335 	}
336 	imsic->base_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
337 	imsic->base_domain->msi_parent_ops = &imsic_msi_parent_ops;
338 
339 	irq_domain_update_bus_token(imsic->base_domain, DOMAIN_BUS_NEXUS);
340 
341 	global = &imsic->global;
342 	pr_info("%pfwP:  hart-index-bits: %d,  guest-index-bits: %d\n",
343 		imsic->fwnode, global->hart_index_bits, global->guest_index_bits);
344 	pr_info("%pfwP: group-index-bits: %d, group-index-shift: %d\n",
345 		imsic->fwnode, global->group_index_bits, global->group_index_shift);
346 	pr_info("%pfwP: per-CPU IDs %d at base address %pa\n",
347 		imsic->fwnode, global->nr_ids, &global->base_addr);
348 	pr_info("%pfwP: total %d interrupts available\n",
349 		imsic->fwnode, num_possible_cpus() * (global->nr_ids - 1));
350 
351 	return 0;
352 }
353 
imsic_platform_probe_common(struct fwnode_handle * fwnode)354 static int imsic_platform_probe_common(struct fwnode_handle *fwnode)
355 {
356 	if (imsic && imsic->fwnode != fwnode) {
357 		pr_err("%pfwP: fwnode mismatch\n", fwnode);
358 		return -ENODEV;
359 	}
360 
361 	return imsic_irqdomain_init();
362 }
363 
imsic_platform_dt_probe(struct platform_device * pdev)364 static int imsic_platform_dt_probe(struct platform_device *pdev)
365 {
366 	return imsic_platform_probe_common(pdev->dev.fwnode);
367 }
368 
369 #ifdef CONFIG_ACPI
370 
371 /*
372  *  On ACPI based systems, PCI enumeration happens early during boot in
373  *  acpi_scan_init(). PCI enumeration expects MSI domain setup before
374  *  it calls pci_set_msi_domain(). Hence, unlike in DT where
375  *  imsic-platform drive probe happens late during boot, ACPI based
376  *  systems need to setup the MSI domain early.
377  */
imsic_platform_acpi_probe(struct fwnode_handle * fwnode)378 int imsic_platform_acpi_probe(struct fwnode_handle *fwnode)
379 {
380 	return imsic_platform_probe_common(fwnode);
381 }
382 
383 #endif
384 
385 static const struct of_device_id imsic_platform_match[] = {
386 	{ .compatible = "riscv,imsics" },
387 	{}
388 };
389 
390 static struct platform_driver imsic_platform_driver = {
391 	.driver = {
392 		.name		= "riscv-imsic",
393 		.of_match_table	= imsic_platform_match,
394 	},
395 	.probe = imsic_platform_dt_probe,
396 };
397 builtin_platform_driver(imsic_platform_driver);
398