1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#include "../include/mc-sys.h"
36#include "../include/mc-cmd.h"
37#include "../include/mc.h"
38#include <linux/delay.h>
39#include <linux/slab.h>
40#include <linux/ioport.h>
41#include <linux/device.h>
42#include "dpmcp.h"
43
44
45
46
47#define MC_CMD_COMPLETION_TIMEOUT_MS 500
48
49
50
51
52
53#define MC_CMD_COMPLETION_POLLING_MIN_SLEEP_USECS 10
54#define MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS 500
55
56static enum mc_cmd_status mc_cmd_hdr_read_status(struct mc_command *cmd)
57{
58 struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
59
60 return (enum mc_cmd_status)hdr->status;
61}
62
63static u16 mc_cmd_hdr_read_cmdid(struct mc_command *cmd)
64{
65 struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
66 u16 cmd_id = le16_to_cpu(hdr->cmd_id);
67
68 return (cmd_id & MC_CMD_HDR_CMDID_MASK) >> MC_CMD_HDR_CMDID_SHIFT;
69}
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84int __must_check fsl_create_mc_io(struct device *dev,
85 phys_addr_t mc_portal_phys_addr,
86 u32 mc_portal_size,
87 struct fsl_mc_device *dpmcp_dev,
88 u32 flags, struct fsl_mc_io **new_mc_io)
89{
90 int error;
91 struct fsl_mc_io *mc_io;
92 void __iomem *mc_portal_virt_addr;
93 struct resource *res;
94
95 mc_io = devm_kzalloc(dev, sizeof(*mc_io), GFP_KERNEL);
96 if (!mc_io)
97 return -ENOMEM;
98
99 mc_io->dev = dev;
100 mc_io->flags = flags;
101 mc_io->portal_phys_addr = mc_portal_phys_addr;
102 mc_io->portal_size = mc_portal_size;
103 if (flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
104 spin_lock_init(&mc_io->spinlock);
105 else
106 mutex_init(&mc_io->mutex);
107
108 res = devm_request_mem_region(dev,
109 mc_portal_phys_addr,
110 mc_portal_size,
111 "mc_portal");
112 if (!res) {
113 dev_err(dev,
114 "devm_request_mem_region failed for MC portal %#llx\n",
115 mc_portal_phys_addr);
116 return -EBUSY;
117 }
118
119 mc_portal_virt_addr = devm_ioremap_nocache(dev,
120 mc_portal_phys_addr,
121 mc_portal_size);
122 if (!mc_portal_virt_addr) {
123 dev_err(dev,
124 "devm_ioremap_nocache failed for MC portal %#llx\n",
125 mc_portal_phys_addr);
126 return -ENXIO;
127 }
128
129 mc_io->portal_virt_addr = mc_portal_virt_addr;
130 if (dpmcp_dev) {
131 error = fsl_mc_io_set_dpmcp(mc_io, dpmcp_dev);
132 if (error < 0)
133 goto error_destroy_mc_io;
134 }
135
136 *new_mc_io = mc_io;
137 return 0;
138
139error_destroy_mc_io:
140 fsl_destroy_mc_io(mc_io);
141 return error;
142}
143EXPORT_SYMBOL_GPL(fsl_create_mc_io);
144
145
146
147
148
149
150void fsl_destroy_mc_io(struct fsl_mc_io *mc_io)
151{
152 struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev;
153
154 if (dpmcp_dev)
155 fsl_mc_io_unset_dpmcp(mc_io);
156
157 devm_iounmap(mc_io->dev, mc_io->portal_virt_addr);
158 devm_release_mem_region(mc_io->dev,
159 mc_io->portal_phys_addr,
160 mc_io->portal_size);
161
162 mc_io->portal_virt_addr = NULL;
163 devm_kfree(mc_io->dev, mc_io);
164}
165EXPORT_SYMBOL_GPL(fsl_destroy_mc_io);
166
167int fsl_mc_io_set_dpmcp(struct fsl_mc_io *mc_io,
168 struct fsl_mc_device *dpmcp_dev)
169{
170 int error;
171
172 if (WARN_ON(!dpmcp_dev))
173 return -EINVAL;
174
175 if (WARN_ON(mc_io->dpmcp_dev))
176 return -EINVAL;
177
178 if (WARN_ON(dpmcp_dev->mc_io))
179 return -EINVAL;
180
181 error = dpmcp_open(mc_io,
182 0,
183 dpmcp_dev->obj_desc.id,
184 &dpmcp_dev->mc_handle);
185 if (error < 0)
186 return error;
187
188 mc_io->dpmcp_dev = dpmcp_dev;
189 dpmcp_dev->mc_io = mc_io;
190 return 0;
191}
192EXPORT_SYMBOL_GPL(fsl_mc_io_set_dpmcp);
193
194void fsl_mc_io_unset_dpmcp(struct fsl_mc_io *mc_io)
195{
196 int error;
197 struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev;
198
199 if (WARN_ON(!dpmcp_dev))
200 return;
201
202 if (WARN_ON(dpmcp_dev->mc_io != mc_io))
203 return;
204
205 error = dpmcp_close(mc_io,
206 0,
207 dpmcp_dev->mc_handle);
208 if (error < 0) {
209 dev_err(&dpmcp_dev->dev, "dpmcp_close() failed: %d\n",
210 error);
211 }
212
213 mc_io->dpmcp_dev = NULL;
214 dpmcp_dev->mc_io = NULL;
215}
216EXPORT_SYMBOL_GPL(fsl_mc_io_unset_dpmcp);
217
218static int mc_status_to_error(enum mc_cmd_status status)
219{
220 static const int mc_status_to_error_map[] = {
221 [MC_CMD_STATUS_OK] = 0,
222 [MC_CMD_STATUS_AUTH_ERR] = -EACCES,
223 [MC_CMD_STATUS_NO_PRIVILEGE] = -EPERM,
224 [MC_CMD_STATUS_DMA_ERR] = -EIO,
225 [MC_CMD_STATUS_CONFIG_ERR] = -ENXIO,
226 [MC_CMD_STATUS_TIMEOUT] = -ETIMEDOUT,
227 [MC_CMD_STATUS_NO_RESOURCE] = -ENAVAIL,
228 [MC_CMD_STATUS_NO_MEMORY] = -ENOMEM,
229 [MC_CMD_STATUS_BUSY] = -EBUSY,
230 [MC_CMD_STATUS_UNSUPPORTED_OP] = -ENOTSUPP,
231 [MC_CMD_STATUS_INVALID_STATE] = -ENODEV,
232 };
233
234 if (WARN_ON((u32)status >= ARRAY_SIZE(mc_status_to_error_map)))
235 return -EINVAL;
236
237 return mc_status_to_error_map[status];
238}
239
240static const char *mc_status_to_string(enum mc_cmd_status status)
241{
242 static const char *const status_strings[] = {
243 [MC_CMD_STATUS_OK] = "Command completed successfully",
244 [MC_CMD_STATUS_READY] = "Command ready to be processed",
245 [MC_CMD_STATUS_AUTH_ERR] = "Authentication error",
246 [MC_CMD_STATUS_NO_PRIVILEGE] = "No privilege",
247 [MC_CMD_STATUS_DMA_ERR] = "DMA or I/O error",
248 [MC_CMD_STATUS_CONFIG_ERR] = "Configuration error",
249 [MC_CMD_STATUS_TIMEOUT] = "Operation timed out",
250 [MC_CMD_STATUS_NO_RESOURCE] = "No resources",
251 [MC_CMD_STATUS_NO_MEMORY] = "No memory available",
252 [MC_CMD_STATUS_BUSY] = "Device is busy",
253 [MC_CMD_STATUS_UNSUPPORTED_OP] = "Unsupported operation",
254 [MC_CMD_STATUS_INVALID_STATE] = "Invalid state"
255 };
256
257 if ((unsigned int)status >= ARRAY_SIZE(status_strings))
258 return "Unknown MC error";
259
260 return status_strings[status];
261}
262
263
264
265
266
267
268
269static inline void mc_write_command(struct mc_command __iomem *portal,
270 struct mc_command *cmd)
271{
272 int i;
273
274
275 for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++)
276 __raw_writeq(cmd->params[i], &portal->params[i]);
277 __iowmb();
278
279
280 __raw_writeq(cmd->header, &portal->header);
281}
282
283
284
285
286
287
288
289
290
291
292static inline enum mc_cmd_status mc_read_response(struct mc_command __iomem *
293 portal,
294 struct mc_command *resp)
295{
296 int i;
297 enum mc_cmd_status status;
298
299
300 __iormb();
301 resp->header = __raw_readq(&portal->header);
302 __iormb();
303 status = mc_cmd_hdr_read_status(resp);
304 if (status != MC_CMD_STATUS_OK)
305 return status;
306
307
308 for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++)
309 resp->params[i] = __raw_readq(&portal->params[i]);
310 __iormb();
311
312 return status;
313}
314
315
316
317
318
319
320
321
322
323static int mc_polling_wait_preemptible(struct fsl_mc_io *mc_io,
324 struct mc_command *cmd,
325 enum mc_cmd_status *mc_status)
326{
327 enum mc_cmd_status status;
328 unsigned long jiffies_until_timeout =
329 jiffies + msecs_to_jiffies(MC_CMD_COMPLETION_TIMEOUT_MS);
330
331
332
333
334 for (;;) {
335 status = mc_read_response(mc_io->portal_virt_addr, cmd);
336 if (status != MC_CMD_STATUS_READY)
337 break;
338
339
340
341
342
343 usleep_range(MC_CMD_COMPLETION_POLLING_MIN_SLEEP_USECS,
344 MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS);
345
346 if (time_after_eq(jiffies, jiffies_until_timeout)) {
347 dev_dbg(mc_io->dev,
348 "MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n",
349 mc_io->portal_phys_addr,
350 (unsigned int)mc_cmd_hdr_read_token(cmd),
351 (unsigned int)mc_cmd_hdr_read_cmdid(cmd));
352
353 return -ETIMEDOUT;
354 }
355 }
356
357 *mc_status = status;
358 return 0;
359}
360
361
362
363
364
365
366
367
368
369static int mc_polling_wait_atomic(struct fsl_mc_io *mc_io,
370 struct mc_command *cmd,
371 enum mc_cmd_status *mc_status)
372{
373 enum mc_cmd_status status;
374 unsigned long timeout_usecs = MC_CMD_COMPLETION_TIMEOUT_MS * 1000;
375
376 BUILD_BUG_ON((MC_CMD_COMPLETION_TIMEOUT_MS * 1000) %
377 MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS != 0);
378
379 for (;;) {
380 status = mc_read_response(mc_io->portal_virt_addr, cmd);
381 if (status != MC_CMD_STATUS_READY)
382 break;
383
384 udelay(MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS);
385 timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS;
386 if (timeout_usecs == 0) {
387 dev_dbg(mc_io->dev,
388 "MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n",
389 mc_io->portal_phys_addr,
390 (unsigned int)mc_cmd_hdr_read_token(cmd),
391 (unsigned int)mc_cmd_hdr_read_cmdid(cmd));
392
393 return -ETIMEDOUT;
394 }
395 }
396
397 *mc_status = status;
398 return 0;
399}
400
401
402
403
404
405
406
407
408
409int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd)
410{
411 int error;
412 enum mc_cmd_status status;
413 unsigned long irq_flags = 0;
414
415 if (WARN_ON(in_irq() &&
416 !(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)))
417 return -EINVAL;
418
419 if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
420 spin_lock_irqsave(&mc_io->spinlock, irq_flags);
421 else
422 mutex_lock(&mc_io->mutex);
423
424
425
426
427 mc_write_command(mc_io->portal_virt_addr, cmd);
428
429
430
431
432 if (!(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL))
433 error = mc_polling_wait_preemptible(mc_io, cmd, &status);
434 else
435 error = mc_polling_wait_atomic(mc_io, cmd, &status);
436
437 if (error < 0)
438 goto common_exit;
439
440 if (status != MC_CMD_STATUS_OK) {
441 dev_dbg(mc_io->dev,
442 "MC command failed: portal: %#llx, obj handle: %#x, command: %#x, status: %s (%#x)\n",
443 mc_io->portal_phys_addr,
444 (unsigned int)mc_cmd_hdr_read_token(cmd),
445 (unsigned int)mc_cmd_hdr_read_cmdid(cmd),
446 mc_status_to_string(status),
447 (unsigned int)status);
448
449 error = mc_status_to_error(status);
450 goto common_exit;
451 }
452
453 error = 0;
454common_exit:
455 if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
456 spin_unlock_irqrestore(&mc_io->spinlock, irq_flags);
457 else
458 mutex_unlock(&mc_io->mutex);
459
460 return error;
461}
462EXPORT_SYMBOL(mc_send_command);
463