1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef LINUX_MSI_H
3 #define LINUX_MSI_H
4
5 #include <linux/kobject.h>
6 #include <linux/list.h>
7
8 struct msi_msg {
9 u32 address_lo; /* low 32 bits of msi message address */
10 u32 address_hi; /* high 32 bits of msi message address */
11 u32 data; /* 16 bits of msi message data */
12 };
13
14 extern int pci_msi_ignore_mask;
15 /* Helper functions */
16 struct irq_data;
17 struct msi_desc;
18 struct pci_dev;
19 struct platform_msi_priv_data;
20 void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
21 #ifdef CONFIG_GENERIC_MSI_IRQ
22 void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
23 #else
get_cached_msi_msg(unsigned int irq,struct msi_msg * msg)24 static inline void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
25 {
26 }
27 #endif
28
29 typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc,
30 struct msi_msg *msg);
31
32 /**
33 * platform_msi_desc - Platform device specific msi descriptor data
34 * @msi_priv_data: Pointer to platform private data
35 * @msi_index: The index of the MSI descriptor for multi MSI
36 */
37 struct platform_msi_desc {
38 struct platform_msi_priv_data *msi_priv_data;
39 u16 msi_index;
40 };
41
42 /**
43 * fsl_mc_msi_desc - FSL-MC device specific msi descriptor data
44 * @msi_index: The index of the MSI descriptor
45 */
46 struct fsl_mc_msi_desc {
47 u16 msi_index;
48 };
49
50 /**
51 * ti_sci_inta_msi_desc - TISCI based INTA specific msi descriptor data
52 * @dev_index: TISCI device index
53 */
54 struct ti_sci_inta_msi_desc {
55 u16 dev_index;
56 };
57
58 /**
59 * struct msi_desc - Descriptor structure for MSI based interrupts
60 * @list: List head for management
61 * @irq: The base interrupt number
62 * @nvec_used: The number of vectors used
63 * @dev: Pointer to the device which uses this descriptor
64 * @msg: The last set MSI message cached for reuse
65 * @affinity: Optional pointer to a cpu affinity mask for this descriptor
66 * @iommu_msi_iova: Optional shifted IOVA from the IOMMU to override the msi_addr.
67 * Only used if iommu_msi_shift != 0
68 * @iommu_msi_shift: Indicates how many bits of the original address should be
69 * preserved when using iommu_msi_iova.
70 *
71 * @write_msi_msg: Callback that may be called when the MSI message
72 * address or data changes
73 * @write_msi_msg_data: Data parameter for the callback.
74 *
75 * @masked: [PCI MSI/X] Mask bits
76 * @is_msix: [PCI MSI/X] True if MSI-X
77 * @multiple: [PCI MSI/X] log2 num of messages allocated
78 * @multi_cap: [PCI MSI/X] log2 num of messages supported
79 * @maskbit: [PCI MSI/X] Mask-Pending bit supported?
80 * @is_64: [PCI MSI/X] Address size: 0=32bit 1=64bit
81 * @entry_nr: [PCI MSI/X] Entry which is described by this descriptor
82 * @default_irq:[PCI MSI/X] The default pre-assigned non-MSI irq
83 * @mask_pos: [PCI MSI] Mask register position
84 * @mask_base: [PCI MSI-X] Mask register base address
85 * @platform: [platform] Platform device specific msi descriptor data
86 * @fsl_mc: [fsl-mc] FSL MC device specific msi descriptor data
87 * @inta: [INTA] TISCI based INTA specific msi descriptor data
88 */
89 struct msi_desc {
90 /* Shared device/bus type independent data */
91 struct list_head list;
92 unsigned int irq;
93 unsigned int nvec_used;
94 struct device *dev;
95 struct msi_msg msg;
96 struct irq_affinity_desc *affinity;
97 #ifdef CONFIG_IRQ_MSI_IOMMU
98 u64 iommu_msi_iova : 58;
99 u64 iommu_msi_shift : 6;
100 #endif
101
102 void (*write_msi_msg)(struct msi_desc *entry, void *data);
103 void *write_msi_msg_data;
104
105 union {
106 /* PCI MSI/X specific data */
107 struct {
108 u32 masked;
109 struct {
110 u8 is_msix : 1;
111 u8 multiple : 3;
112 u8 multi_cap : 3;
113 u8 maskbit : 1;
114 u8 is_64 : 1;
115 u8 is_virtual : 1;
116 u16 entry_nr;
117 unsigned default_irq;
118 } msi_attrib;
119 union {
120 u8 mask_pos;
121 void __iomem *mask_base;
122 };
123 };
124
125 /*
126 * Non PCI variants add their data structure here. New
127 * entries need to use a named structure. We want
128 * proper name spaces for this. The PCI part is
129 * anonymous for now as it would require an immediate
130 * tree wide cleanup.
131 */
132 struct platform_msi_desc platform;
133 struct fsl_mc_msi_desc fsl_mc;
134 struct ti_sci_inta_msi_desc inta;
135 };
136 };
137
138 /* Helpers to hide struct msi_desc implementation details */
139 #define msi_desc_to_dev(desc) ((desc)->dev)
140 #define dev_to_msi_list(dev) (&(dev)->msi_list)
141 #define first_msi_entry(dev) \
142 list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
143 #define for_each_msi_entry(desc, dev) \
144 list_for_each_entry((desc), dev_to_msi_list((dev)), list)
145 #define for_each_msi_entry_safe(desc, tmp, dev) \
146 list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list)
147 #define for_each_msi_vector(desc, __irq, dev) \
148 for_each_msi_entry((desc), (dev)) \
149 if ((desc)->irq) \
150 for (__irq = (desc)->irq; \
151 __irq < ((desc)->irq + (desc)->nvec_used); \
152 __irq++)
153
msi_desc_set_iommu_msi_iova(struct msi_desc * desc,u64 msi_iova,unsigned int msi_shift)154 static inline void msi_desc_set_iommu_msi_iova(struct msi_desc *desc, u64 msi_iova,
155 unsigned int msi_shift)
156 {
157 #ifdef CONFIG_IRQ_MSI_IOMMU
158 desc->iommu_msi_iova = msi_iova >> msi_shift;
159 desc->iommu_msi_shift = msi_shift;
160 #endif
161 }
162
163 #ifdef CONFIG_PCI_MSI
164 #define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev)
165 #define for_each_pci_msi_entry(desc, pdev) \
166 for_each_msi_entry((desc), &(pdev)->dev)
167
168 struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc);
169 void *msi_desc_to_pci_sysdata(struct msi_desc *desc);
170 void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
171 #else /* CONFIG_PCI_MSI */
msi_desc_to_pci_sysdata(struct msi_desc * desc)172 static inline void *msi_desc_to_pci_sysdata(struct msi_desc *desc)
173 {
174 return NULL;
175 }
pci_write_msi_msg(unsigned int irq,struct msi_msg * msg)176 static inline void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg)
177 {
178 }
179 #endif /* CONFIG_PCI_MSI */
180
181 struct msi_desc *alloc_msi_entry(struct device *dev, int nvec,
182 const struct irq_affinity_desc *affinity);
183 void free_msi_entry(struct msi_desc *entry);
184 void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
185 void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
186
187 u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag);
188 void __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
189 void pci_msi_mask_irq(struct irq_data *data);
190 void pci_msi_unmask_irq(struct irq_data *data);
191
192 /*
193 * The arch hooks to setup up msi irqs. Default functions are implemented
194 * as weak symbols so that they /can/ be overriden by architecture specific
195 * code if needed. These hooks must be enabled by the architecture or by
196 * drivers which depend on them via msi_controller based MSI handling.
197 *
198 * If CONFIG_PCI_MSI_ARCH_FALLBACKS is not selected they are replaced by
199 * stubs with warnings.
200 */
201 #ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS
202 int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc);
203 void arch_teardown_msi_irq(unsigned int irq);
204 int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
205 void arch_teardown_msi_irqs(struct pci_dev *dev);
206 void default_teardown_msi_irqs(struct pci_dev *dev);
207 #else
arch_setup_msi_irqs(struct pci_dev * dev,int nvec,int type)208 static inline int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
209 {
210 WARN_ON_ONCE(1);
211 return -ENODEV;
212 }
213
arch_teardown_msi_irqs(struct pci_dev * dev)214 static inline void arch_teardown_msi_irqs(struct pci_dev *dev)
215 {
216 WARN_ON_ONCE(1);
217 }
218 #endif
219
220 /*
221 * The restore hooks are still available as they are useful even
222 * for fully irq domain based setups. Courtesy to XEN/X86.
223 */
224 void arch_restore_msi_irqs(struct pci_dev *dev);
225 void default_restore_msi_irqs(struct pci_dev *dev);
226
227 struct msi_controller {
228 struct module *owner;
229 struct device *dev;
230 struct device_node *of_node;
231 struct list_head list;
232
233 int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev,
234 struct msi_desc *desc);
235 int (*setup_irqs)(struct msi_controller *chip, struct pci_dev *dev,
236 int nvec, int type);
237 void (*teardown_irq)(struct msi_controller *chip, unsigned int irq);
238 };
239
240 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
241
242 #include <linux/irqhandler.h>
243 #include <asm/msi.h>
244
245 struct irq_domain;
246 struct irq_domain_ops;
247 struct irq_chip;
248 struct device_node;
249 struct fwnode_handle;
250 struct msi_domain_info;
251
252 /**
253 * struct msi_domain_ops - MSI interrupt domain callbacks
254 * @get_hwirq: Retrieve the resulting hw irq number
255 * @msi_init: Domain specific init function for MSI interrupts
256 * @msi_free: Domain specific function to free a MSI interrupts
257 * @msi_check: Callback for verification of the domain/info/dev data
258 * @msi_prepare: Prepare the allocation of the interrupts in the domain
259 * @msi_finish: Optional callback to finalize the allocation
260 * @set_desc: Set the msi descriptor for an interrupt
261 * @handle_error: Optional error handler if the allocation fails
262 * @domain_alloc_irqs: Optional function to override the default allocation
263 * function.
264 * @domain_free_irqs: Optional function to override the default free
265 * function.
266 *
267 * @get_hwirq, @msi_init and @msi_free are callbacks used by
268 * msi_create_irq_domain() and related interfaces
269 *
270 * @msi_check, @msi_prepare, @msi_finish, @set_desc and @handle_error
271 * are callbacks used by msi_domain_alloc_irqs() and related
272 * interfaces which are based on msi_desc.
273 *
274 * @domain_alloc_irqs, @domain_free_irqs can be used to override the
275 * default allocation/free functions (__msi_domain_alloc/free_irqs). This
276 * is initially for a wrapper around XENs seperate MSI universe which can't
277 * be wrapped into the regular irq domains concepts by mere mortals. This
278 * allows to universally use msi_domain_alloc/free_irqs without having to
279 * special case XEN all over the place.
280 *
281 * Contrary to other operations @domain_alloc_irqs and @domain_free_irqs
282 * are set to the default implementation if NULL and even when
283 * MSI_FLAG_USE_DEF_DOM_OPS is not set to avoid breaking existing users and
284 * because these callbacks are obviously mandatory.
285 *
286 * This is NOT meant to be abused, but it can be useful to build wrappers
287 * for specialized MSI irq domains which need extra work before and after
288 * calling __msi_domain_alloc_irqs()/__msi_domain_free_irqs().
289 */
290 struct msi_domain_ops {
291 irq_hw_number_t (*get_hwirq)(struct msi_domain_info *info,
292 msi_alloc_info_t *arg);
293 int (*msi_init)(struct irq_domain *domain,
294 struct msi_domain_info *info,
295 unsigned int virq, irq_hw_number_t hwirq,
296 msi_alloc_info_t *arg);
297 void (*msi_free)(struct irq_domain *domain,
298 struct msi_domain_info *info,
299 unsigned int virq);
300 int (*msi_check)(struct irq_domain *domain,
301 struct msi_domain_info *info,
302 struct device *dev);
303 int (*msi_prepare)(struct irq_domain *domain,
304 struct device *dev, int nvec,
305 msi_alloc_info_t *arg);
306 void (*msi_finish)(msi_alloc_info_t *arg, int retval);
307 void (*set_desc)(msi_alloc_info_t *arg,
308 struct msi_desc *desc);
309 int (*handle_error)(struct irq_domain *domain,
310 struct msi_desc *desc, int error);
311 int (*domain_alloc_irqs)(struct irq_domain *domain,
312 struct device *dev, int nvec);
313 void (*domain_free_irqs)(struct irq_domain *domain,
314 struct device *dev);
315 };
316
317 /**
318 * struct msi_domain_info - MSI interrupt domain data
319 * @flags: Flags to decribe features and capabilities
320 * @ops: The callback data structure
321 * @chip: Optional: associated interrupt chip
322 * @chip_data: Optional: associated interrupt chip data
323 * @handler: Optional: associated interrupt flow handler
324 * @handler_data: Optional: associated interrupt flow handler data
325 * @handler_name: Optional: associated interrupt flow handler name
326 * @data: Optional: domain specific data
327 */
328 struct msi_domain_info {
329 u32 flags;
330 struct msi_domain_ops *ops;
331 struct irq_chip *chip;
332 void *chip_data;
333 irq_flow_handler_t handler;
334 void *handler_data;
335 const char *handler_name;
336 void *data;
337 };
338
339 /* Flags for msi_domain_info */
340 enum {
341 /*
342 * Init non implemented ops callbacks with default MSI domain
343 * callbacks.
344 */
345 MSI_FLAG_USE_DEF_DOM_OPS = (1 << 0),
346 /*
347 * Init non implemented chip callbacks with default MSI chip
348 * callbacks.
349 */
350 MSI_FLAG_USE_DEF_CHIP_OPS = (1 << 1),
351 /* Support multiple PCI MSI interrupts */
352 MSI_FLAG_MULTI_PCI_MSI = (1 << 2),
353 /* Support PCI MSIX interrupts */
354 MSI_FLAG_PCI_MSIX = (1 << 3),
355 /* Needs early activate, required for PCI */
356 MSI_FLAG_ACTIVATE_EARLY = (1 << 4),
357 /*
358 * Must reactivate when irq is started even when
359 * MSI_FLAG_ACTIVATE_EARLY has been set.
360 */
361 MSI_FLAG_MUST_REACTIVATE = (1 << 5),
362 /* Is level-triggered capable, using two messages */
363 MSI_FLAG_LEVEL_CAPABLE = (1 << 6),
364 };
365
366 int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
367 bool force);
368
369 struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
370 struct msi_domain_info *info,
371 struct irq_domain *parent);
372 int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
373 int nvec);
374 int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
375 int nvec);
376 void __msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
377 void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
378 struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain);
379
380 struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
381 struct msi_domain_info *info,
382 struct irq_domain *parent);
383 int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
384 irq_write_msi_msg_t write_msi_msg);
385 void platform_msi_domain_free_irqs(struct device *dev);
386
387 /* When an MSI domain is used as an intermediate domain */
388 int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
389 int nvec, msi_alloc_info_t *args);
390 int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
391 int virq, int nvec, msi_alloc_info_t *args);
392 struct irq_domain *
393 __platform_msi_create_device_domain(struct device *dev,
394 unsigned int nvec,
395 bool is_tree,
396 irq_write_msi_msg_t write_msi_msg,
397 const struct irq_domain_ops *ops,
398 void *host_data);
399
400 #define platform_msi_create_device_domain(dev, nvec, write, ops, data) \
401 __platform_msi_create_device_domain(dev, nvec, false, write, ops, data)
402 #define platform_msi_create_device_tree_domain(dev, nvec, write, ops, data) \
403 __platform_msi_create_device_domain(dev, nvec, true, write, ops, data)
404
405 int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
406 unsigned int nr_irqs);
407 void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq,
408 unsigned int nvec);
409 void *platform_msi_get_host_data(struct irq_domain *domain);
410 #endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */
411
412 #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
413 void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg);
414 struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
415 struct msi_domain_info *info,
416 struct irq_domain *parent);
417 int pci_msi_domain_check_cap(struct irq_domain *domain,
418 struct msi_domain_info *info, struct device *dev);
419 u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev);
420 struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev);
421 bool pci_dev_has_special_msi_domain(struct pci_dev *pdev);
422 #else
pci_msi_get_device_domain(struct pci_dev * pdev)423 static inline struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
424 {
425 return NULL;
426 }
427 #endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
428
429 #endif /* LINUX_MSI_H */
430