1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * MSI framework for platform devices
4 *
5 * Copyright (C) 2015 ARM Limited, All Rights Reserved.
6 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 */
8
9 #include <linux/device.h>
10 #include <linux/idr.h>
11 #include <linux/irq.h>
12 #include <linux/irqdomain.h>
13 #include <linux/msi.h>
14 #include <linux/slab.h>
15
16 #define DEV_ID_SHIFT 21
17 #define MAX_DEV_MSIS (1 << (32 - DEV_ID_SHIFT))
18
19 /*
20 * Internal data structure containing a (made up, but unique) devid
21 * and the callback to write the MSI message.
22 */
23 struct platform_msi_priv_data {
24 struct device *dev;
25 void *host_data;
26 const struct attribute_group **msi_irq_groups;
27 msi_alloc_info_t arg;
28 irq_write_msi_msg_t write_msg;
29 int devid;
30 };
31
32 /* The devid allocator */
33 static DEFINE_IDA(platform_msi_devid_ida);
34
35 #ifdef GENERIC_MSI_DOMAIN_OPS
36 /*
37 * Convert an msi_desc to a globaly unique identifier (per-device
38 * devid + msi_desc position in the msi_list).
39 */
platform_msi_calc_hwirq(struct msi_desc * desc)40 static irq_hw_number_t platform_msi_calc_hwirq(struct msi_desc *desc)
41 {
42 u32 devid;
43
44 devid = desc->platform.msi_priv_data->devid;
45
46 return (devid << (32 - DEV_ID_SHIFT)) | desc->platform.msi_index;
47 }
48
platform_msi_set_desc(msi_alloc_info_t * arg,struct msi_desc * desc)49 static void platform_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
50 {
51 arg->desc = desc;
52 arg->hwirq = platform_msi_calc_hwirq(desc);
53 }
54
platform_msi_init(struct irq_domain * domain,struct msi_domain_info * info,unsigned int virq,irq_hw_number_t hwirq,msi_alloc_info_t * arg)55 static int platform_msi_init(struct irq_domain *domain,
56 struct msi_domain_info *info,
57 unsigned int virq, irq_hw_number_t hwirq,
58 msi_alloc_info_t *arg)
59 {
60 return irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
61 info->chip, info->chip_data);
62 }
63
platform_msi_set_proxy_dev(msi_alloc_info_t * arg)64 static void platform_msi_set_proxy_dev(msi_alloc_info_t *arg)
65 {
66 arg->flags |= MSI_ALLOC_FLAGS_PROXY_DEVICE;
67 }
68 #else
69 #define platform_msi_set_desc NULL
70 #define platform_msi_init NULL
71 #define platform_msi_set_proxy_dev(x) do {} while(0)
72 #endif
73
platform_msi_update_dom_ops(struct msi_domain_info * info)74 static void platform_msi_update_dom_ops(struct msi_domain_info *info)
75 {
76 struct msi_domain_ops *ops = info->ops;
77
78 BUG_ON(!ops);
79
80 if (ops->msi_init == NULL)
81 ops->msi_init = platform_msi_init;
82 if (ops->set_desc == NULL)
83 ops->set_desc = platform_msi_set_desc;
84 }
85
platform_msi_write_msg(struct irq_data * data,struct msi_msg * msg)86 static void platform_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
87 {
88 struct msi_desc *desc = irq_data_get_msi_desc(data);
89 struct platform_msi_priv_data *priv_data;
90
91 priv_data = desc->platform.msi_priv_data;
92
93 priv_data->write_msg(desc, msg);
94 }
95
platform_msi_update_chip_ops(struct msi_domain_info * info)96 static void platform_msi_update_chip_ops(struct msi_domain_info *info)
97 {
98 struct irq_chip *chip = info->chip;
99
100 BUG_ON(!chip);
101 if (!chip->irq_mask)
102 chip->irq_mask = irq_chip_mask_parent;
103 if (!chip->irq_unmask)
104 chip->irq_unmask = irq_chip_unmask_parent;
105 if (!chip->irq_eoi)
106 chip->irq_eoi = irq_chip_eoi_parent;
107 if (!chip->irq_set_affinity)
108 chip->irq_set_affinity = msi_domain_set_affinity;
109 if (!chip->irq_write_msi_msg)
110 chip->irq_write_msi_msg = platform_msi_write_msg;
111 if (WARN_ON((info->flags & MSI_FLAG_LEVEL_CAPABLE) &&
112 !(chip->flags & IRQCHIP_SUPPORTS_LEVEL_MSI)))
113 info->flags &= ~MSI_FLAG_LEVEL_CAPABLE;
114 }
115
platform_msi_free_descs(struct device * dev,int base,int nvec)116 static void platform_msi_free_descs(struct device *dev, int base, int nvec)
117 {
118 struct msi_desc *desc, *tmp;
119
120 list_for_each_entry_safe(desc, tmp, dev_to_msi_list(dev), list) {
121 if (desc->platform.msi_index >= base &&
122 desc->platform.msi_index < (base + nvec)) {
123 list_del(&desc->list);
124 free_msi_entry(desc);
125 }
126 }
127 }
128
platform_msi_alloc_descs_with_irq(struct device * dev,int virq,int nvec,struct platform_msi_priv_data * data)129 static int platform_msi_alloc_descs_with_irq(struct device *dev, int virq,
130 int nvec,
131 struct platform_msi_priv_data *data)
132
133 {
134 struct msi_desc *desc;
135 int i, base = 0;
136
137 if (!list_empty(dev_to_msi_list(dev))) {
138 desc = list_last_entry(dev_to_msi_list(dev),
139 struct msi_desc, list);
140 base = desc->platform.msi_index + 1;
141 }
142
143 for (i = 0; i < nvec; i++) {
144 desc = alloc_msi_entry(dev, 1, NULL);
145 if (!desc)
146 break;
147
148 desc->platform.msi_priv_data = data;
149 desc->platform.msi_index = base + i;
150 desc->irq = virq ? virq + i : 0;
151
152 list_add_tail(&desc->list, dev_to_msi_list(dev));
153 }
154
155 if (i != nvec) {
156 /* Clean up the mess */
157 platform_msi_free_descs(dev, base, nvec);
158
159 return -ENOMEM;
160 }
161
162 return 0;
163 }
164
platform_msi_alloc_descs(struct device * dev,int nvec,struct platform_msi_priv_data * data)165 static int platform_msi_alloc_descs(struct device *dev, int nvec,
166 struct platform_msi_priv_data *data)
167
168 {
169 return platform_msi_alloc_descs_with_irq(dev, 0, nvec, data);
170 }
171
172 /**
173 * platform_msi_create_irq_domain - Create a platform MSI interrupt domain
174 * @fwnode: Optional fwnode of the interrupt controller
175 * @info: MSI domain info
176 * @parent: Parent irq domain
177 *
178 * Updates the domain and chip ops and creates a platform MSI
179 * interrupt domain.
180 *
181 * Returns:
182 * A domain pointer or NULL in case of failure.
183 */
platform_msi_create_irq_domain(struct fwnode_handle * fwnode,struct msi_domain_info * info,struct irq_domain * parent)184 struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
185 struct msi_domain_info *info,
186 struct irq_domain *parent)
187 {
188 struct irq_domain *domain;
189
190 if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
191 platform_msi_update_dom_ops(info);
192 if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
193 platform_msi_update_chip_ops(info);
194
195 domain = msi_create_irq_domain(fwnode, info, parent);
196 if (domain)
197 irq_domain_update_bus_token(domain, DOMAIN_BUS_PLATFORM_MSI);
198
199 return domain;
200 }
201
202 static struct platform_msi_priv_data *
platform_msi_alloc_priv_data(struct device * dev,unsigned int nvec,irq_write_msi_msg_t write_msi_msg)203 platform_msi_alloc_priv_data(struct device *dev, unsigned int nvec,
204 irq_write_msi_msg_t write_msi_msg)
205 {
206 struct platform_msi_priv_data *datap;
207 /*
208 * Limit the number of interrupts to 2048 per device. Should we
209 * need to bump this up, DEV_ID_SHIFT should be adjusted
210 * accordingly (which would impact the max number of MSI
211 * capable devices).
212 */
213 if (!dev->msi_domain || !write_msi_msg || !nvec || nvec > MAX_DEV_MSIS)
214 return ERR_PTR(-EINVAL);
215
216 if (dev->msi_domain->bus_token != DOMAIN_BUS_PLATFORM_MSI) {
217 dev_err(dev, "Incompatible msi_domain, giving up\n");
218 return ERR_PTR(-EINVAL);
219 }
220
221 /* Already had a helping of MSI? Greed... */
222 if (!list_empty(dev_to_msi_list(dev)))
223 return ERR_PTR(-EBUSY);
224
225 datap = kzalloc(sizeof(*datap), GFP_KERNEL);
226 if (!datap)
227 return ERR_PTR(-ENOMEM);
228
229 datap->devid = ida_simple_get(&platform_msi_devid_ida,
230 0, 1 << DEV_ID_SHIFT, GFP_KERNEL);
231 if (datap->devid < 0) {
232 int err = datap->devid;
233 kfree(datap);
234 return ERR_PTR(err);
235 }
236
237 datap->write_msg = write_msi_msg;
238 datap->dev = dev;
239
240 return datap;
241 }
242
platform_msi_free_priv_data(struct platform_msi_priv_data * data)243 static void platform_msi_free_priv_data(struct platform_msi_priv_data *data)
244 {
245 ida_simple_remove(&platform_msi_devid_ida, data->devid);
246 kfree(data);
247 }
248
249 /**
250 * platform_msi_domain_alloc_irqs - Allocate MSI interrupts for @dev
251 * @dev: The device for which to allocate interrupts
252 * @nvec: The number of interrupts to allocate
253 * @write_msi_msg: Callback to write an interrupt message for @dev
254 *
255 * Returns:
256 * Zero for success, or an error code in case of failure
257 */
platform_msi_domain_alloc_irqs(struct device * dev,unsigned int nvec,irq_write_msi_msg_t write_msi_msg)258 int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
259 irq_write_msi_msg_t write_msi_msg)
260 {
261 struct platform_msi_priv_data *priv_data;
262 int err;
263
264 priv_data = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg);
265 if (IS_ERR(priv_data))
266 return PTR_ERR(priv_data);
267
268 err = platform_msi_alloc_descs(dev, nvec, priv_data);
269 if (err)
270 goto out_free_priv_data;
271
272 err = msi_domain_alloc_irqs(dev->msi_domain, dev, nvec);
273 if (err)
274 goto out_free_desc;
275
276 priv_data->msi_irq_groups = msi_populate_sysfs(dev);
277 if (IS_ERR(priv_data->msi_irq_groups)) {
278 err = PTR_ERR(priv_data->msi_irq_groups);
279 goto out_free_irqs;
280 }
281
282 return 0;
283
284 out_free_irqs:
285 msi_domain_free_irqs(dev->msi_domain, dev);
286 out_free_desc:
287 platform_msi_free_descs(dev, 0, nvec);
288 out_free_priv_data:
289 platform_msi_free_priv_data(priv_data);
290
291 return err;
292 }
293 EXPORT_SYMBOL_GPL(platform_msi_domain_alloc_irqs);
294
295 /**
296 * platform_msi_domain_free_irqs - Free MSI interrupts for @dev
297 * @dev: The device for which to free interrupts
298 */
platform_msi_domain_free_irqs(struct device * dev)299 void platform_msi_domain_free_irqs(struct device *dev)
300 {
301 if (!list_empty(dev_to_msi_list(dev))) {
302 struct msi_desc *desc;
303
304 desc = first_msi_entry(dev);
305 msi_destroy_sysfs(dev, desc->platform.msi_priv_data->msi_irq_groups);
306 platform_msi_free_priv_data(desc->platform.msi_priv_data);
307 }
308
309 msi_domain_free_irqs(dev->msi_domain, dev);
310 platform_msi_free_descs(dev, 0, MAX_DEV_MSIS);
311 }
312 EXPORT_SYMBOL_GPL(platform_msi_domain_free_irqs);
313
314 /**
315 * platform_msi_get_host_data - Query the private data associated with
316 * a platform-msi domain
317 * @domain: The platform-msi domain
318 *
319 * Returns the private data provided when calling
320 * platform_msi_create_device_domain.
321 */
platform_msi_get_host_data(struct irq_domain * domain)322 void *platform_msi_get_host_data(struct irq_domain *domain)
323 {
324 struct platform_msi_priv_data *data = domain->host_data;
325 return data->host_data;
326 }
327
328 /**
329 * __platform_msi_create_device_domain - Create a platform-msi domain
330 *
331 * @dev: The device generating the MSIs
332 * @nvec: The number of MSIs that need to be allocated
333 * @is_tree: flag to indicate tree hierarchy
334 * @write_msi_msg: Callback to write an interrupt message for @dev
335 * @ops: The hierarchy domain operations to use
336 * @host_data: Private data associated to this domain
337 *
338 * Returns an irqdomain for @nvec interrupts
339 */
340 struct irq_domain *
__platform_msi_create_device_domain(struct device * dev,unsigned int nvec,bool is_tree,irq_write_msi_msg_t write_msi_msg,const struct irq_domain_ops * ops,void * host_data)341 __platform_msi_create_device_domain(struct device *dev,
342 unsigned int nvec,
343 bool is_tree,
344 irq_write_msi_msg_t write_msi_msg,
345 const struct irq_domain_ops *ops,
346 void *host_data)
347 {
348 struct platform_msi_priv_data *data;
349 struct irq_domain *domain;
350 int err;
351
352 data = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg);
353 if (IS_ERR(data))
354 return NULL;
355
356 data->host_data = host_data;
357 domain = irq_domain_create_hierarchy(dev->msi_domain, 0,
358 is_tree ? 0 : nvec,
359 dev->fwnode, ops, data);
360 if (!domain)
361 goto free_priv;
362
363 platform_msi_set_proxy_dev(&data->arg);
364 err = msi_domain_prepare_irqs(domain->parent, dev, nvec, &data->arg);
365 if (err)
366 goto free_domain;
367
368 return domain;
369
370 free_domain:
371 irq_domain_remove(domain);
372 free_priv:
373 platform_msi_free_priv_data(data);
374 return NULL;
375 }
376
377 /**
378 * platform_msi_domain_free - Free interrupts associated with a platform-msi
379 * domain
380 *
381 * @domain: The platform-msi domain
382 * @virq: The base irq from which to perform the free operation
383 * @nvec: How many interrupts to free from @virq
384 */
platform_msi_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nvec)385 void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq,
386 unsigned int nvec)
387 {
388 struct platform_msi_priv_data *data = domain->host_data;
389 struct msi_desc *desc, *tmp;
390 for_each_msi_entry_safe(desc, tmp, data->dev) {
391 if (WARN_ON(!desc->irq || desc->nvec_used != 1))
392 return;
393 if (!(desc->irq >= virq && desc->irq < (virq + nvec)))
394 continue;
395
396 irq_domain_free_irqs_common(domain, desc->irq, 1);
397 list_del(&desc->list);
398 free_msi_entry(desc);
399 }
400 }
401
402 /**
403 * platform_msi_domain_alloc - Allocate interrupts associated with
404 * a platform-msi domain
405 *
406 * @domain: The platform-msi domain
407 * @virq: The base irq from which to perform the allocate operation
408 * @nr_irqs: How many interrupts to free from @virq
409 *
410 * Return 0 on success, or an error code on failure. Must be called
411 * with irq_domain_mutex held (which can only be done as part of a
412 * top-level interrupt allocation).
413 */
platform_msi_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)414 int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
415 unsigned int nr_irqs)
416 {
417 struct platform_msi_priv_data *data = domain->host_data;
418 int err;
419
420 err = platform_msi_alloc_descs_with_irq(data->dev, virq, nr_irqs, data);
421 if (err)
422 return err;
423
424 err = msi_domain_populate_irqs(domain->parent, data->dev,
425 virq, nr_irqs, &data->arg);
426 if (err)
427 platform_msi_domain_free(domain, virq, nr_irqs);
428
429 return err;
430 }
431