1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/string.h>
3 #include <linux/kernel.h>
4 #include <linux/of.h>
5 #include <linux/of_device.h>
6 #include <linux/of_address.h>
7 #include <linux/of_iommu.h>
8 #include <linux/of_reserved_mem.h>
9 #include <linux/dma-direct.h> /* for bus_dma_region */
10 #include <linux/dma-map-ops.h>
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/mod_devicetable.h>
14 #include <linux/slab.h>
15 #include <linux/platform_device.h>
16
17 #include <asm/errno.h>
18 #include "of_private.h"
19
20 /**
21 * of_match_device - Tell if a struct device matches an of_device_id list
22 * @matches: array of of device match structures to search in
23 * @dev: the of device structure to match against
24 *
25 * Used by a driver to check whether an platform_device present in the
26 * system is in its list of supported devices.
27 */
of_match_device(const struct of_device_id * matches,const struct device * dev)28 const struct of_device_id *of_match_device(const struct of_device_id *matches,
29 const struct device *dev)
30 {
31 if (!matches || !dev->of_node || dev->of_node_reused)
32 return NULL;
33 return of_match_node(matches, dev->of_node);
34 }
35 EXPORT_SYMBOL(of_match_device);
36
of_device_add(struct platform_device * ofdev)37 int of_device_add(struct platform_device *ofdev)
38 {
39 BUG_ON(ofdev->dev.of_node == NULL);
40
41 /* name and id have to be set so that the platform bus doesn't get
42 * confused on matching */
43 ofdev->name = dev_name(&ofdev->dev);
44 ofdev->id = PLATFORM_DEVID_NONE;
45
46 /*
47 * If this device has not binding numa node in devicetree, that is
48 * of_node_to_nid returns NUMA_NO_NODE. device_add will assume that this
49 * device is on the same node as the parent.
50 */
51 set_dev_node(&ofdev->dev, of_node_to_nid(ofdev->dev.of_node));
52
53 return device_add(&ofdev->dev);
54 }
55
56 static void
of_dma_set_restricted_buffer(struct device * dev,struct device_node * np)57 of_dma_set_restricted_buffer(struct device *dev, struct device_node *np)
58 {
59 struct device_node *node, *of_node = dev->of_node;
60 int count, i;
61
62 if (!IS_ENABLED(CONFIG_DMA_RESTRICTED_POOL))
63 return;
64
65 count = of_property_count_elems_of_size(of_node, "memory-region",
66 sizeof(u32));
67 /*
68 * If dev->of_node doesn't exist or doesn't contain memory-region, try
69 * the OF node having DMA configuration.
70 */
71 if (count <= 0) {
72 of_node = np;
73 count = of_property_count_elems_of_size(
74 of_node, "memory-region", sizeof(u32));
75 }
76
77 for (i = 0; i < count; i++) {
78 node = of_parse_phandle(of_node, "memory-region", i);
79 /*
80 * There might be multiple memory regions, but only one
81 * restricted-dma-pool region is allowed.
82 */
83 if (of_device_is_compatible(node, "restricted-dma-pool") &&
84 of_device_is_available(node)) {
85 of_node_put(node);
86 break;
87 }
88 of_node_put(node);
89 }
90
91 /*
92 * Attempt to initialize a restricted-dma-pool region if one was found.
93 * Note that count can hold a negative error code.
94 */
95 if (i < count && of_reserved_mem_device_init_by_idx(dev, of_node, i))
96 dev_warn(dev, "failed to initialise \"restricted-dma-pool\" memory node\n");
97 }
98
99 /**
100 * of_dma_configure_id - Setup DMA configuration
101 * @dev: Device to apply DMA configuration
102 * @np: Pointer to OF node having DMA configuration
103 * @force_dma: Whether device is to be set up by of_dma_configure() even if
104 * DMA capability is not explicitly described by firmware.
105 * @id: Optional const pointer value input id
106 *
107 * Try to get devices's DMA configuration from DT and update it
108 * accordingly.
109 *
110 * If platform code needs to use its own special DMA configuration, it
111 * can use a platform bus notifier and handle BUS_NOTIFY_ADD_DEVICE events
112 * to fix up DMA configuration.
113 */
of_dma_configure_id(struct device * dev,struct device_node * np,bool force_dma,const u32 * id)114 int of_dma_configure_id(struct device *dev, struct device_node *np,
115 bool force_dma, const u32 *id)
116 {
117 const struct iommu_ops *iommu;
118 const struct bus_dma_region *map = NULL;
119 struct device_node *bus_np;
120 u64 dma_start = 0;
121 u64 mask, end, size = 0;
122 bool coherent;
123 int ret;
124
125 if (np == dev->of_node)
126 bus_np = __of_get_dma_parent(np);
127 else
128 bus_np = of_node_get(np);
129
130 ret = of_dma_get_range(bus_np, &map);
131 of_node_put(bus_np);
132 if (ret < 0) {
133 /*
134 * For legacy reasons, we have to assume some devices need
135 * DMA configuration regardless of whether "dma-ranges" is
136 * correctly specified or not.
137 */
138 if (!force_dma)
139 return ret == -ENODEV ? 0 : ret;
140 } else {
141 const struct bus_dma_region *r = map;
142 u64 dma_end = 0;
143
144 /* Determine the overall bounds of all DMA regions */
145 for (dma_start = ~0; r->size; r++) {
146 /* Take lower and upper limits */
147 if (r->dma_start < dma_start)
148 dma_start = r->dma_start;
149 if (r->dma_start + r->size > dma_end)
150 dma_end = r->dma_start + r->size;
151 }
152 size = dma_end - dma_start;
153
154 /*
155 * Add a work around to treat the size as mask + 1 in case
156 * it is defined in DT as a mask.
157 */
158 if (size & 1) {
159 dev_warn(dev, "Invalid size 0x%llx for dma-range(s)\n",
160 size);
161 size = size + 1;
162 }
163
164 if (!size) {
165 dev_err(dev, "Adjusted size 0x%llx invalid\n", size);
166 kfree(map);
167 return -EINVAL;
168 }
169 }
170
171 /*
172 * If @dev is expected to be DMA-capable then the bus code that created
173 * it should have initialised its dma_mask pointer by this point. For
174 * now, we'll continue the legacy behaviour of coercing it to the
175 * coherent mask if not, but we'll no longer do so quietly.
176 */
177 if (!dev->dma_mask) {
178 dev_warn(dev, "DMA mask not set\n");
179 dev->dma_mask = &dev->coherent_dma_mask;
180 }
181
182 if (!size && dev->coherent_dma_mask)
183 size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
184 else if (!size)
185 size = 1ULL << 32;
186
187 /*
188 * Limit coherent and dma mask based on size and default mask
189 * set by the driver.
190 */
191 end = dma_start + size - 1;
192 mask = DMA_BIT_MASK(ilog2(end) + 1);
193 dev->coherent_dma_mask &= mask;
194 *dev->dma_mask &= mask;
195 /* ...but only set bus limit and range map if we found valid dma-ranges earlier */
196 if (!ret) {
197 dev->bus_dma_limit = end;
198 dev->dma_range_map = map;
199 }
200
201 coherent = of_dma_is_coherent(np);
202 dev_dbg(dev, "device is%sdma coherent\n",
203 coherent ? " " : " not ");
204
205 iommu = of_iommu_configure(dev, np, id);
206 if (PTR_ERR(iommu) == -EPROBE_DEFER) {
207 /* Don't touch range map if it wasn't set from a valid dma-ranges */
208 if (!ret)
209 dev->dma_range_map = NULL;
210 kfree(map);
211 return -EPROBE_DEFER;
212 }
213
214 dev_dbg(dev, "device is%sbehind an iommu\n",
215 iommu ? " " : " not ");
216
217 arch_setup_dma_ops(dev, dma_start, size, iommu, coherent);
218
219 if (!iommu)
220 of_dma_set_restricted_buffer(dev, np);
221
222 return 0;
223 }
224 EXPORT_SYMBOL_GPL(of_dma_configure_id);
225
of_device_register(struct platform_device * pdev)226 int of_device_register(struct platform_device *pdev)
227 {
228 device_initialize(&pdev->dev);
229 return of_device_add(pdev);
230 }
231 EXPORT_SYMBOL(of_device_register);
232
of_device_unregister(struct platform_device * ofdev)233 void of_device_unregister(struct platform_device *ofdev)
234 {
235 device_unregister(&ofdev->dev);
236 }
237 EXPORT_SYMBOL(of_device_unregister);
238
of_device_get_match_data(const struct device * dev)239 const void *of_device_get_match_data(const struct device *dev)
240 {
241 const struct of_device_id *match;
242
243 match = of_match_device(dev->driver->of_match_table, dev);
244 if (!match)
245 return NULL;
246
247 return match->data;
248 }
249 EXPORT_SYMBOL(of_device_get_match_data);
250
of_device_get_modalias(struct device * dev,char * str,ssize_t len)251 static ssize_t of_device_get_modalias(struct device *dev, char *str, ssize_t len)
252 {
253 const char *compat;
254 char *c;
255 struct property *p;
256 ssize_t csize;
257 ssize_t tsize;
258
259 if ((!dev) || (!dev->of_node))
260 return -ENODEV;
261
262 /* Name & Type */
263 /* %p eats all alphanum characters, so %c must be used here */
264 csize = snprintf(str, len, "of:N%pOFn%c%s", dev->of_node, 'T',
265 of_node_get_device_type(dev->of_node));
266 tsize = csize;
267 len -= csize;
268 if (str)
269 str += csize;
270
271 of_property_for_each_string(dev->of_node, "compatible", p, compat) {
272 csize = strlen(compat) + 1;
273 tsize += csize;
274 if (csize > len)
275 continue;
276
277 csize = snprintf(str, len, "C%s", compat);
278 for (c = str; c; ) {
279 c = strchr(c, ' ');
280 if (c)
281 *c++ = '_';
282 }
283 len -= csize;
284 str += csize;
285 }
286
287 return tsize;
288 }
289
of_device_request_module(struct device * dev)290 int of_device_request_module(struct device *dev)
291 {
292 char *str;
293 ssize_t size;
294 int ret;
295
296 size = of_device_get_modalias(dev, NULL, 0);
297 if (size < 0)
298 return size;
299
300 /* Reserve an additional byte for the trailing '\0' */
301 size++;
302
303 str = kmalloc(size, GFP_KERNEL);
304 if (!str)
305 return -ENOMEM;
306
307 of_device_get_modalias(dev, str, size);
308 str[size - 1] = '\0';
309 ret = request_module(str);
310 kfree(str);
311
312 return ret;
313 }
314 EXPORT_SYMBOL_GPL(of_device_request_module);
315
316 /**
317 * of_device_modalias - Fill buffer with newline terminated modalias string
318 * @dev: Calling device
319 * @str: Modalias string
320 * @len: Size of @str
321 */
of_device_modalias(struct device * dev,char * str,ssize_t len)322 ssize_t of_device_modalias(struct device *dev, char *str, ssize_t len)
323 {
324 ssize_t sl = of_device_get_modalias(dev, str, len - 2);
325 if (sl < 0)
326 return sl;
327 if (sl > len - 2)
328 return -ENOMEM;
329
330 str[sl++] = '\n';
331 str[sl] = 0;
332 return sl;
333 }
334 EXPORT_SYMBOL_GPL(of_device_modalias);
335
336 /**
337 * of_device_uevent - Display OF related uevent information
338 * @dev: Device to apply DMA configuration
339 * @env: Kernel object's userspace event reference
340 */
of_device_uevent(struct device * dev,struct kobj_uevent_env * env)341 void of_device_uevent(struct device *dev, struct kobj_uevent_env *env)
342 {
343 const char *compat, *type;
344 struct alias_prop *app;
345 struct property *p;
346 int seen = 0;
347
348 if ((!dev) || (!dev->of_node))
349 return;
350
351 add_uevent_var(env, "OF_NAME=%pOFn", dev->of_node);
352 add_uevent_var(env, "OF_FULLNAME=%pOF", dev->of_node);
353 type = of_node_get_device_type(dev->of_node);
354 if (type)
355 add_uevent_var(env, "OF_TYPE=%s", type);
356
357 /* Since the compatible field can contain pretty much anything
358 * it's not really legal to split it out with commas. We split it
359 * up using a number of environment variables instead. */
360 of_property_for_each_string(dev->of_node, "compatible", p, compat) {
361 add_uevent_var(env, "OF_COMPATIBLE_%d=%s", seen, compat);
362 seen++;
363 }
364 add_uevent_var(env, "OF_COMPATIBLE_N=%d", seen);
365
366 seen = 0;
367 mutex_lock(&of_mutex);
368 list_for_each_entry(app, &aliases_lookup, link) {
369 if (dev->of_node == app->np) {
370 add_uevent_var(env, "OF_ALIAS_%d=%s", seen,
371 app->alias);
372 seen++;
373 }
374 }
375 mutex_unlock(&of_mutex);
376 }
377
of_device_uevent_modalias(struct device * dev,struct kobj_uevent_env * env)378 int of_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env)
379 {
380 int sl;
381
382 if ((!dev) || (!dev->of_node))
383 return -ENODEV;
384
385 /* Devicetree modalias is tricky, we add it in 2 steps */
386 if (add_uevent_var(env, "MODALIAS="))
387 return -ENOMEM;
388
389 sl = of_device_get_modalias(dev, &env->buf[env->buflen-1],
390 sizeof(env->buf) - env->buflen);
391 if (sl >= (sizeof(env->buf) - env->buflen))
392 return -ENOMEM;
393 env->buflen += sl;
394
395 return 0;
396 }
397 EXPORT_SYMBOL_GPL(of_device_uevent_modalias);
398