1
2#include <linux/string.h>
3#include <linux/kernel.h>
4#include <linux/of.h>
5#include <linux/of_device.h>
6#include <linux/of_address.h>
7#include <linux/of_iommu.h>
8#include <linux/of_reserved_mem.h>
9#include <linux/dma-direct.h>
10#include <linux/dma-map-ops.h>
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/mod_devicetable.h>
14#include <linux/slab.h>
15#include <linux/platform_device.h>
16
17#include <asm/errno.h>
18#include "of_private.h"
19
20
21
22
23
24
25
26
27
28const struct of_device_id *of_match_device(const struct of_device_id *matches,
29 const struct device *dev)
30{
31 if (!matches || !dev->of_node || dev->of_node_reused)
32 return NULL;
33 return of_match_node(matches, dev->of_node);
34}
35EXPORT_SYMBOL(of_match_device);
36
37int of_device_add(struct platform_device *ofdev)
38{
39 BUG_ON(ofdev->dev.of_node == NULL);
40
41
42
43 ofdev->name = dev_name(&ofdev->dev);
44 ofdev->id = PLATFORM_DEVID_NONE;
45
46
47
48
49
50
51 set_dev_node(&ofdev->dev, of_node_to_nid(ofdev->dev.of_node));
52
53 return device_add(&ofdev->dev);
54}
55
56static void
57of_dma_set_restricted_buffer(struct device *dev, struct device_node *np)
58{
59 struct device_node *node, *of_node = dev->of_node;
60 int count, i;
61
62 if (!IS_ENABLED(CONFIG_DMA_RESTRICTED_POOL))
63 return;
64
65 count = of_property_count_elems_of_size(of_node, "memory-region",
66 sizeof(u32));
67
68
69
70
71 if (count <= 0) {
72 of_node = np;
73 count = of_property_count_elems_of_size(
74 of_node, "memory-region", sizeof(u32));
75 }
76
77 for (i = 0; i < count; i++) {
78 node = of_parse_phandle(of_node, "memory-region", i);
79
80
81
82
83 if (of_device_is_compatible(node, "restricted-dma-pool") &&
84 of_device_is_available(node))
85 break;
86 }
87
88
89
90
91
92 if (i < count && of_reserved_mem_device_init_by_idx(dev, of_node, i))
93 dev_warn(dev, "failed to initialise \"restricted-dma-pool\" memory node\n");
94}
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111int of_dma_configure_id(struct device *dev, struct device_node *np,
112 bool force_dma, const u32 *id)
113{
114 const struct iommu_ops *iommu;
115 const struct bus_dma_region *map = NULL;
116 u64 dma_start = 0;
117 u64 mask, end, size = 0;
118 bool coherent;
119 int ret;
120
121 ret = of_dma_get_range(np, &map);
122 if (ret < 0) {
123
124
125
126
127
128 if (!force_dma)
129 return ret == -ENODEV ? 0 : ret;
130 } else {
131 const struct bus_dma_region *r = map;
132 u64 dma_end = 0;
133
134
135 for (dma_start = ~0; r->size; r++) {
136
137 if (r->dma_start < dma_start)
138 dma_start = r->dma_start;
139 if (r->dma_start + r->size > dma_end)
140 dma_end = r->dma_start + r->size;
141 }
142 size = dma_end - dma_start;
143
144
145
146
147
148 if (size & 1) {
149 dev_warn(dev, "Invalid size 0x%llx for dma-range(s)\n",
150 size);
151 size = size + 1;
152 }
153
154 if (!size) {
155 dev_err(dev, "Adjusted size 0x%llx invalid\n", size);
156 kfree(map);
157 return -EINVAL;
158 }
159 }
160
161
162
163
164
165
166
167 if (!dev->dma_mask) {
168 dev_warn(dev, "DMA mask not set\n");
169 dev->dma_mask = &dev->coherent_dma_mask;
170 }
171
172 if (!size && dev->coherent_dma_mask)
173 size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
174 else if (!size)
175 size = 1ULL << 32;
176
177
178
179
180
181 end = dma_start + size - 1;
182 mask = DMA_BIT_MASK(ilog2(end) + 1);
183 dev->coherent_dma_mask &= mask;
184 *dev->dma_mask &= mask;
185
186 if (!ret) {
187 dev->bus_dma_limit = end;
188 dev->dma_range_map = map;
189 }
190
191 coherent = of_dma_is_coherent(np);
192 dev_dbg(dev, "device is%sdma coherent\n",
193 coherent ? " " : " not ");
194
195 iommu = of_iommu_configure(dev, np, id);
196 if (PTR_ERR(iommu) == -EPROBE_DEFER) {
197
198 if (!ret)
199 dev->dma_range_map = NULL;
200 kfree(map);
201 return -EPROBE_DEFER;
202 }
203
204 dev_dbg(dev, "device is%sbehind an iommu\n",
205 iommu ? " " : " not ");
206
207 arch_setup_dma_ops(dev, dma_start, size, iommu, coherent);
208
209 if (!iommu)
210 of_dma_set_restricted_buffer(dev, np);
211
212 return 0;
213}
214EXPORT_SYMBOL_GPL(of_dma_configure_id);
215
216int of_device_register(struct platform_device *pdev)
217{
218 device_initialize(&pdev->dev);
219 return of_device_add(pdev);
220}
221EXPORT_SYMBOL(of_device_register);
222
223void of_device_unregister(struct platform_device *ofdev)
224{
225 device_unregister(&ofdev->dev);
226}
227EXPORT_SYMBOL(of_device_unregister);
228
229const void *of_device_get_match_data(const struct device *dev)
230{
231 const struct of_device_id *match;
232
233 match = of_match_device(dev->driver->of_match_table, dev);
234 if (!match)
235 return NULL;
236
237 return match->data;
238}
239EXPORT_SYMBOL(of_device_get_match_data);
240
241static ssize_t of_device_get_modalias(struct device *dev, char *str, ssize_t len)
242{
243 const char *compat;
244 char *c;
245 struct property *p;
246 ssize_t csize;
247 ssize_t tsize;
248
249 if ((!dev) || (!dev->of_node))
250 return -ENODEV;
251
252
253
254 csize = snprintf(str, len, "of:N%pOFn%c%s", dev->of_node, 'T',
255 of_node_get_device_type(dev->of_node));
256 tsize = csize;
257 len -= csize;
258 if (str)
259 str += csize;
260
261 of_property_for_each_string(dev->of_node, "compatible", p, compat) {
262 csize = strlen(compat) + 1;
263 tsize += csize;
264 if (csize > len)
265 continue;
266
267 csize = snprintf(str, len, "C%s", compat);
268 for (c = str; c; ) {
269 c = strchr(c, ' ');
270 if (c)
271 *c++ = '_';
272 }
273 len -= csize;
274 str += csize;
275 }
276
277 return tsize;
278}
279
280int of_device_request_module(struct device *dev)
281{
282 char *str;
283 ssize_t size;
284 int ret;
285
286 size = of_device_get_modalias(dev, NULL, 0);
287 if (size < 0)
288 return size;
289
290 str = kmalloc(size + 1, GFP_KERNEL);
291 if (!str)
292 return -ENOMEM;
293
294 of_device_get_modalias(dev, str, size);
295 str[size] = '\0';
296 ret = request_module(str);
297 kfree(str);
298
299 return ret;
300}
301EXPORT_SYMBOL_GPL(of_device_request_module);
302
303
304
305
306
307
308
309ssize_t of_device_modalias(struct device *dev, char *str, ssize_t len)
310{
311 ssize_t sl = of_device_get_modalias(dev, str, len - 2);
312 if (sl < 0)
313 return sl;
314 if (sl > len - 2)
315 return -ENOMEM;
316
317 str[sl++] = '\n';
318 str[sl] = 0;
319 return sl;
320}
321EXPORT_SYMBOL_GPL(of_device_modalias);
322
323
324
325
326
327
328void of_device_uevent(struct device *dev, struct kobj_uevent_env *env)
329{
330 const char *compat, *type;
331 struct alias_prop *app;
332 struct property *p;
333 int seen = 0;
334
335 if ((!dev) || (!dev->of_node))
336 return;
337
338 add_uevent_var(env, "OF_NAME=%pOFn", dev->of_node);
339 add_uevent_var(env, "OF_FULLNAME=%pOF", dev->of_node);
340 type = of_node_get_device_type(dev->of_node);
341 if (type)
342 add_uevent_var(env, "OF_TYPE=%s", type);
343
344
345
346
347 of_property_for_each_string(dev->of_node, "compatible", p, compat) {
348 add_uevent_var(env, "OF_COMPATIBLE_%d=%s", seen, compat);
349 seen++;
350 }
351 add_uevent_var(env, "OF_COMPATIBLE_N=%d", seen);
352
353 seen = 0;
354 mutex_lock(&of_mutex);
355 list_for_each_entry(app, &aliases_lookup, link) {
356 if (dev->of_node == app->np) {
357 add_uevent_var(env, "OF_ALIAS_%d=%s", seen,
358 app->alias);
359 seen++;
360 }
361 }
362 mutex_unlock(&of_mutex);
363}
364
365int of_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env)
366{
367 int sl;
368
369 if ((!dev) || (!dev->of_node))
370 return -ENODEV;
371
372
373 if (add_uevent_var(env, "MODALIAS="))
374 return -ENOMEM;
375
376 sl = of_device_get_modalias(dev, &env->buf[env->buflen-1],
377 sizeof(env->buf) - env->buflen);
378 if (sl >= (sizeof(env->buf) - env->buflen))
379 return -ENOMEM;
380 env->buflen += sl;
381
382 return 0;
383}
384EXPORT_SYMBOL_GPL(of_device_uevent_modalias);
385