1 /*
2 * Platform driver for the Synopsys DesignWare DMA Controller
3 *
4 * Copyright (C) 2007-2008 Atmel Corporation
5 * Copyright (C) 2010-2011 ST Microelectronics
6 * Copyright (C) 2013 Intel Corporation
7 *
8 * Some parts of this driver are derived from the original dw_dmac.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15 #include <linux/module.h>
16 #include <linux/device.h>
17 #include <linux/clk.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/platform_device.h>
20 #include <linux/dmaengine.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/of.h>
23 #include <linux/of_dma.h>
24 #include <linux/acpi.h>
25 #include <linux/acpi_dma.h>
26
27 #include "internal.h"
28
29 #define DRV_NAME "dw_dmac"
30
dw_dma_of_xlate(struct of_phandle_args * dma_spec,struct of_dma * ofdma)31 static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
32 struct of_dma *ofdma)
33 {
34 struct dw_dma *dw = ofdma->of_dma_data;
35 struct dw_dma_slave slave = {
36 .dma_dev = dw->dma.dev,
37 };
38 dma_cap_mask_t cap;
39
40 if (dma_spec->args_count != 3)
41 return NULL;
42
43 slave.src_id = dma_spec->args[0];
44 slave.dst_id = dma_spec->args[0];
45 slave.m_master = dma_spec->args[1];
46 slave.p_master = dma_spec->args[2];
47
48 if (WARN_ON(slave.src_id >= DW_DMA_MAX_NR_REQUESTS ||
49 slave.dst_id >= DW_DMA_MAX_NR_REQUESTS ||
50 slave.m_master >= dw->pdata->nr_masters ||
51 slave.p_master >= dw->pdata->nr_masters))
52 return NULL;
53
54 dma_cap_zero(cap);
55 dma_cap_set(DMA_SLAVE, cap);
56
57 /* TODO: there should be a simpler way to do this */
58 return dma_request_channel(cap, dw_dma_filter, &slave);
59 }
60
61 #ifdef CONFIG_ACPI
dw_dma_acpi_filter(struct dma_chan * chan,void * param)62 static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param)
63 {
64 struct acpi_dma_spec *dma_spec = param;
65 struct dw_dma_slave slave = {
66 .dma_dev = dma_spec->dev,
67 .src_id = dma_spec->slave_id,
68 .dst_id = dma_spec->slave_id,
69 .m_master = 0,
70 .p_master = 1,
71 };
72
73 return dw_dma_filter(chan, &slave);
74 }
75
dw_dma_acpi_controller_register(struct dw_dma * dw)76 static void dw_dma_acpi_controller_register(struct dw_dma *dw)
77 {
78 struct device *dev = dw->dma.dev;
79 struct acpi_dma_filter_info *info;
80 int ret;
81
82 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
83 if (!info)
84 return;
85
86 dma_cap_zero(info->dma_cap);
87 dma_cap_set(DMA_SLAVE, info->dma_cap);
88 info->filter_fn = dw_dma_acpi_filter;
89
90 ret = acpi_dma_controller_register(dev, acpi_dma_simple_xlate, info);
91 if (ret)
92 dev_err(dev, "could not register acpi_dma_controller\n");
93 }
94
dw_dma_acpi_controller_free(struct dw_dma * dw)95 static void dw_dma_acpi_controller_free(struct dw_dma *dw)
96 {
97 struct device *dev = dw->dma.dev;
98
99 acpi_dma_controller_free(dev);
100 }
101 #else /* !CONFIG_ACPI */
dw_dma_acpi_controller_register(struct dw_dma * dw)102 static inline void dw_dma_acpi_controller_register(struct dw_dma *dw) {}
dw_dma_acpi_controller_free(struct dw_dma * dw)103 static inline void dw_dma_acpi_controller_free(struct dw_dma *dw) {}
104 #endif /* !CONFIG_ACPI */
105
106 #ifdef CONFIG_OF
107 static struct dw_dma_platform_data *
dw_dma_parse_dt(struct platform_device * pdev)108 dw_dma_parse_dt(struct platform_device *pdev)
109 {
110 struct device_node *np = pdev->dev.of_node;
111 struct dw_dma_platform_data *pdata;
112 u32 tmp, arr[DW_DMA_MAX_NR_MASTERS], mb[DW_DMA_MAX_NR_CHANNELS];
113 u32 nr_masters;
114 u32 nr_channels;
115
116 if (!np) {
117 dev_err(&pdev->dev, "Missing DT data\n");
118 return NULL;
119 }
120
121 if (of_property_read_u32(np, "dma-masters", &nr_masters))
122 return NULL;
123 if (nr_masters < 1 || nr_masters > DW_DMA_MAX_NR_MASTERS)
124 return NULL;
125
126 if (of_property_read_u32(np, "dma-channels", &nr_channels))
127 return NULL;
128 if (nr_channels > DW_DMA_MAX_NR_CHANNELS)
129 return NULL;
130
131 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
132 if (!pdata)
133 return NULL;
134
135 pdata->nr_masters = nr_masters;
136 pdata->nr_channels = nr_channels;
137
138 if (of_property_read_bool(np, "is_private"))
139 pdata->is_private = true;
140
141 /*
142 * All known devices, which use DT for configuration, support
143 * memory-to-memory transfers. So enable it by default.
144 */
145 pdata->is_memcpy = true;
146
147 if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
148 pdata->chan_allocation_order = (unsigned char)tmp;
149
150 if (!of_property_read_u32(np, "chan_priority", &tmp))
151 pdata->chan_priority = tmp;
152
153 if (!of_property_read_u32(np, "block_size", &tmp))
154 pdata->block_size = tmp;
155
156 if (!of_property_read_u32_array(np, "data-width", arr, nr_masters)) {
157 for (tmp = 0; tmp < nr_masters; tmp++)
158 pdata->data_width[tmp] = arr[tmp];
159 } else if (!of_property_read_u32_array(np, "data_width", arr, nr_masters)) {
160 for (tmp = 0; tmp < nr_masters; tmp++)
161 pdata->data_width[tmp] = BIT(arr[tmp] & 0x07);
162 }
163
164 if (!of_property_read_u32_array(np, "multi-block", mb, nr_channels)) {
165 for (tmp = 0; tmp < nr_channels; tmp++)
166 pdata->multi_block[tmp] = mb[tmp];
167 } else {
168 for (tmp = 0; tmp < nr_channels; tmp++)
169 pdata->multi_block[tmp] = 1;
170 }
171
172 if (!of_property_read_u32(np, "snps,dma-protection-control", &tmp)) {
173 if (tmp > CHAN_PROTCTL_MASK)
174 return NULL;
175 pdata->protctl = tmp;
176 }
177
178 return pdata;
179 }
180 #else
181 static inline struct dw_dma_platform_data *
dw_dma_parse_dt(struct platform_device * pdev)182 dw_dma_parse_dt(struct platform_device *pdev)
183 {
184 return NULL;
185 }
186 #endif
187
dw_probe(struct platform_device * pdev)188 static int dw_probe(struct platform_device *pdev)
189 {
190 struct dw_dma_chip *chip;
191 struct device *dev = &pdev->dev;
192 struct resource *mem;
193 const struct dw_dma_platform_data *pdata;
194 int err;
195
196 chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
197 if (!chip)
198 return -ENOMEM;
199
200 chip->irq = platform_get_irq(pdev, 0);
201 if (chip->irq < 0)
202 return chip->irq;
203
204 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
205 chip->regs = devm_ioremap_resource(dev, mem);
206 if (IS_ERR(chip->regs))
207 return PTR_ERR(chip->regs);
208
209 err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
210 if (err)
211 return err;
212
213 pdata = dev_get_platdata(dev);
214 if (!pdata)
215 pdata = dw_dma_parse_dt(pdev);
216
217 chip->dev = dev;
218 chip->id = pdev->id;
219 chip->pdata = pdata;
220
221 chip->clk = devm_clk_get(chip->dev, "hclk");
222 if (IS_ERR(chip->clk))
223 return PTR_ERR(chip->clk);
224 err = clk_prepare_enable(chip->clk);
225 if (err)
226 return err;
227
228 pm_runtime_enable(&pdev->dev);
229
230 err = dw_dma_probe(chip);
231 if (err)
232 goto err_dw_dma_probe;
233
234 platform_set_drvdata(pdev, chip);
235
236 if (pdev->dev.of_node) {
237 err = of_dma_controller_register(pdev->dev.of_node,
238 dw_dma_of_xlate, chip->dw);
239 if (err)
240 dev_err(&pdev->dev,
241 "could not register of_dma_controller\n");
242 }
243
244 if (ACPI_HANDLE(&pdev->dev))
245 dw_dma_acpi_controller_register(chip->dw);
246
247 return 0;
248
249 err_dw_dma_probe:
250 pm_runtime_disable(&pdev->dev);
251 clk_disable_unprepare(chip->clk);
252 return err;
253 }
254
dw_remove(struct platform_device * pdev)255 static int dw_remove(struct platform_device *pdev)
256 {
257 struct dw_dma_chip *chip = platform_get_drvdata(pdev);
258
259 if (ACPI_HANDLE(&pdev->dev))
260 dw_dma_acpi_controller_free(chip->dw);
261
262 if (pdev->dev.of_node)
263 of_dma_controller_free(pdev->dev.of_node);
264
265 dw_dma_remove(chip);
266 pm_runtime_disable(&pdev->dev);
267 clk_disable_unprepare(chip->clk);
268
269 return 0;
270 }
271
dw_shutdown(struct platform_device * pdev)272 static void dw_shutdown(struct platform_device *pdev)
273 {
274 struct dw_dma_chip *chip = platform_get_drvdata(pdev);
275
276 /*
277 * We have to call dw_dma_disable() to stop any ongoing transfer. On
278 * some platforms we can't do that since DMA device is powered off.
279 * Moreover we have no possibility to check if the platform is affected
280 * or not. That's why we call pm_runtime_get_sync() / pm_runtime_put()
281 * unconditionally. On the other hand we can't use
282 * pm_runtime_suspended() because runtime PM framework is not fully
283 * used by the driver.
284 */
285 pm_runtime_get_sync(chip->dev);
286 dw_dma_disable(chip);
287 pm_runtime_put_sync_suspend(chip->dev);
288
289 clk_disable_unprepare(chip->clk);
290 }
291
292 #ifdef CONFIG_OF
293 static const struct of_device_id dw_dma_of_id_table[] = {
294 { .compatible = "snps,dma-spear1340" },
295 {}
296 };
297 MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
298 #endif
299
300 #ifdef CONFIG_ACPI
301 static const struct acpi_device_id dw_dma_acpi_id_table[] = {
302 { "INTL9C60", 0 },
303 { }
304 };
305 MODULE_DEVICE_TABLE(acpi, dw_dma_acpi_id_table);
306 #endif
307
308 #ifdef CONFIG_PM_SLEEP
309
dw_suspend_late(struct device * dev)310 static int dw_suspend_late(struct device *dev)
311 {
312 struct dw_dma_chip *chip = dev_get_drvdata(dev);
313
314 dw_dma_disable(chip);
315 clk_disable_unprepare(chip->clk);
316
317 return 0;
318 }
319
dw_resume_early(struct device * dev)320 static int dw_resume_early(struct device *dev)
321 {
322 struct dw_dma_chip *chip = dev_get_drvdata(dev);
323 int ret;
324
325 ret = clk_prepare_enable(chip->clk);
326 if (ret)
327 return ret;
328
329 return dw_dma_enable(chip);
330 }
331
332 #endif /* CONFIG_PM_SLEEP */
333
334 static const struct dev_pm_ops dw_dev_pm_ops = {
335 SET_LATE_SYSTEM_SLEEP_PM_OPS(dw_suspend_late, dw_resume_early)
336 };
337
338 static struct platform_driver dw_driver = {
339 .probe = dw_probe,
340 .remove = dw_remove,
341 .shutdown = dw_shutdown,
342 .driver = {
343 .name = DRV_NAME,
344 .pm = &dw_dev_pm_ops,
345 .of_match_table = of_match_ptr(dw_dma_of_id_table),
346 .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table),
347 },
348 };
349
dw_init(void)350 static int __init dw_init(void)
351 {
352 return platform_driver_register(&dw_driver);
353 }
354 subsys_initcall(dw_init);
355
dw_exit(void)356 static void __exit dw_exit(void)
357 {
358 platform_driver_unregister(&dw_driver);
359 }
360 module_exit(dw_exit);
361
362 MODULE_LICENSE("GPL v2");
363 MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller platform driver");
364 MODULE_ALIAS("platform:" DRV_NAME);
365