1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31#include <linux/delay.h>
32#include <linux/firmware.h>
33#include <linux/io.h>
34#include <linux/module.h>
35#include <linux/of_address.h>
36#include <linux/of_device.h>
37#include <linux/platform_device.h>
38
39#define DRVNAME "brcmstb-dpfe"
40
41
42#define REG_DCPU_RESET 0x0
43#define REG_TO_DCPU_MBOX 0x10
44#define REG_TO_HOST_MBOX 0x14
45
46
47#define DRAM_MSG_ADDR_OFFSET 0x0
48#define DRAM_MSG_TYPE_OFFSET 0x1c
49#define DRAM_MSG_ADDR_MASK ((1UL << DRAM_MSG_TYPE_OFFSET) - 1)
50#define DRAM_MSG_TYPE_MASK ((1UL << \
51 (BITS_PER_LONG - DRAM_MSG_TYPE_OFFSET)) - 1)
52
53
54#define DCPU_MSG_RAM_START 0x100
55#define DCPU_MSG_RAM(x) (DCPU_MSG_RAM_START + (x) * sizeof(u32))
56
57
58#define DRAM_INFO_INTERVAL 0x0
59#define DRAM_INFO_MR4 0x4
60#define DRAM_INFO_ERROR 0x8
61#define DRAM_INFO_MR4_MASK 0xff
62#define DRAM_INFO_MR4_SHIFT 24
63
64
65#define DRAM_MR4_REFRESH 0x0
66#define DRAM_MR4_SR_ABORT 0x3
67#define DRAM_MR4_PPRE 0x4
68#define DRAM_MR4_TH_OFFS 0x5
69#define DRAM_MR4_TUF 0x7
70
71#define DRAM_MR4_REFRESH_MASK 0x7
72#define DRAM_MR4_SR_ABORT_MASK 0x1
73#define DRAM_MR4_PPRE_MASK 0x1
74#define DRAM_MR4_TH_OFFS_MASK 0x3
75#define DRAM_MR4_TUF_MASK 0x1
76
77
78#define DRAM_VENDOR_MR5 0x0
79#define DRAM_VENDOR_MR6 0x4
80#define DRAM_VENDOR_MR7 0x8
81#define DRAM_VENDOR_MR8 0xc
82#define DRAM_VENDOR_ERROR 0x10
83#define DRAM_VENDOR_MASK 0xff
84#define DRAM_VENDOR_SHIFT 24
85
86
87#define DRAM_DDR_INFO_MR4 0x0
88#define DRAM_DDR_INFO_MR5 0x4
89#define DRAM_DDR_INFO_MR6 0x8
90#define DRAM_DDR_INFO_MR7 0xc
91#define DRAM_DDR_INFO_MR8 0x10
92#define DRAM_DDR_INFO_ERROR 0x14
93#define DRAM_DDR_INFO_MASK 0xff
94
95
96#define DCPU_RESET_SHIFT 0x0
97#define DCPU_RESET_MASK 0x1
98#define DCPU_CLK_DISABLE_SHIFT 0x2
99
100
101#define DCPU_RET_ERROR_BIT BIT(31)
102#define DCPU_RET_SUCCESS 0x1
103#define DCPU_RET_ERR_HEADER (DCPU_RET_ERROR_BIT | BIT(0))
104#define DCPU_RET_ERR_INVAL (DCPU_RET_ERROR_BIT | BIT(1))
105#define DCPU_RET_ERR_CHKSUM (DCPU_RET_ERROR_BIT | BIT(2))
106#define DCPU_RET_ERR_COMMAND (DCPU_RET_ERROR_BIT | BIT(3))
107
108#define DCPU_RET_ERR_TIMEDOUT (DCPU_RET_ERROR_BIT | BIT(4))
109
110
111#define DPFE_BE_MAGIC 0xfe1010fe
112#define DPFE_LE_MAGIC 0xfe0101fe
113
114
115#define ERR_INVALID_MAGIC -1
116#define ERR_INVALID_SIZE -2
117#define ERR_INVALID_CHKSUM -3
118
119
120#define DPFE_MSG_TYPE_COMMAND 1
121#define DPFE_MSG_TYPE_RESPONSE 2
122
123#define DELAY_LOOP_MAX 1000
124
125enum dpfe_msg_fields {
126 MSG_HEADER,
127 MSG_COMMAND,
128 MSG_ARG_COUNT,
129 MSG_ARG0,
130 MSG_FIELD_MAX = 16
131};
132
133enum dpfe_commands {
134 DPFE_CMD_GET_INFO,
135 DPFE_CMD_GET_REFRESH,
136 DPFE_CMD_GET_VENDOR,
137 DPFE_CMD_MAX
138};
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157struct dpfe_firmware_header {
158 u32 magic;
159 u32 sequence;
160 u32 version;
161 u32 imem_size;
162 u32 dmem_size;
163};
164
165
166struct init_data {
167 unsigned int dmem_len;
168 unsigned int imem_len;
169 unsigned int chksum;
170 bool is_big_endian;
171};
172
173
174struct dpfe_api {
175 int version;
176 const char *fw_name;
177 const struct attribute_group **sysfs_attrs;
178 u32 command[DPFE_CMD_MAX][MSG_FIELD_MAX];
179};
180
181
182struct brcmstb_dpfe_priv {
183 void __iomem *regs;
184 void __iomem *dmem;
185 void __iomem *imem;
186 struct device *dev;
187 const struct dpfe_api *dpfe_api;
188 struct mutex lock;
189};
190
191
192
193
194
195static ssize_t show_info(struct device *, struct device_attribute *, char *);
196static ssize_t show_refresh(struct device *, struct device_attribute *, char *);
197static ssize_t store_refresh(struct device *, struct device_attribute *,
198 const char *, size_t);
199static ssize_t show_vendor(struct device *, struct device_attribute *, char *);
200static ssize_t show_dram(struct device *, struct device_attribute *, char *);
201
202
203
204
205
206
207static DEVICE_ATTR(dpfe_info, 0444, show_info, NULL);
208static DEVICE_ATTR(dpfe_refresh, 0644, show_refresh, store_refresh);
209static DEVICE_ATTR(dpfe_vendor, 0444, show_vendor, NULL);
210static DEVICE_ATTR(dpfe_dram, 0444, show_dram, NULL);
211
212
213static struct attribute *dpfe_v2_attrs[] = {
214 &dev_attr_dpfe_info.attr,
215 &dev_attr_dpfe_refresh.attr,
216 &dev_attr_dpfe_vendor.attr,
217 NULL
218};
219ATTRIBUTE_GROUPS(dpfe_v2);
220
221
222static struct attribute *dpfe_v3_attrs[] = {
223 &dev_attr_dpfe_info.attr,
224 &dev_attr_dpfe_dram.attr,
225 NULL
226};
227ATTRIBUTE_GROUPS(dpfe_v3);
228
229
230
231
232
233
234static const struct dpfe_api dpfe_api_old_v2 = {
235 .version = 1,
236 .fw_name = "dpfe.bin",
237 .sysfs_attrs = dpfe_v2_groups,
238 .command = {
239 [DPFE_CMD_GET_INFO] = {
240 [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
241 [MSG_COMMAND] = 1,
242 [MSG_ARG_COUNT] = 1,
243 [MSG_ARG0] = 1,
244 },
245 [DPFE_CMD_GET_REFRESH] = {
246 [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
247 [MSG_COMMAND] = 2,
248 [MSG_ARG_COUNT] = 1,
249 [MSG_ARG0] = 1,
250 },
251 [DPFE_CMD_GET_VENDOR] = {
252 [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
253 [MSG_COMMAND] = 2,
254 [MSG_ARG_COUNT] = 1,
255 [MSG_ARG0] = 2,
256 },
257 }
258};
259
260
261
262
263
264static const struct dpfe_api dpfe_api_new_v2 = {
265 .version = 2,
266 .fw_name = NULL,
267 .sysfs_attrs = dpfe_v2_groups,
268 .command = {
269 [DPFE_CMD_GET_INFO] = {
270 [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
271 [MSG_COMMAND] = 0x101,
272 },
273 [DPFE_CMD_GET_REFRESH] = {
274 [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
275 [MSG_COMMAND] = 0x201,
276 },
277 [DPFE_CMD_GET_VENDOR] = {
278 [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
279 [MSG_COMMAND] = 0x202,
280 },
281 }
282};
283
284
285static const struct dpfe_api dpfe_api_v3 = {
286 .version = 3,
287 .fw_name = NULL,
288 .sysfs_attrs = dpfe_v3_groups,
289 .command = {
290 [DPFE_CMD_GET_INFO] = {
291 [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
292 [MSG_COMMAND] = 0x0101,
293 [MSG_ARG_COUNT] = 1,
294 [MSG_ARG0] = 1,
295 },
296 [DPFE_CMD_GET_REFRESH] = {
297 [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
298 [MSG_COMMAND] = 0x0202,
299 [MSG_ARG_COUNT] = 0,
300 },
301
302 },
303};
304
305static const char *get_error_text(unsigned int i)
306{
307 static const char * const error_text[] = {
308 "Success", "Header code incorrect",
309 "Unknown command or argument", "Incorrect checksum",
310 "Malformed command", "Timed out", "Unknown error",
311 };
312
313 if (unlikely(i >= ARRAY_SIZE(error_text)))
314 i = ARRAY_SIZE(error_text) - 1;
315
316 return error_text[i];
317}
318
319static bool is_dcpu_enabled(struct brcmstb_dpfe_priv *priv)
320{
321 u32 val;
322
323 mutex_lock(&priv->lock);
324 val = readl_relaxed(priv->regs + REG_DCPU_RESET);
325 mutex_unlock(&priv->lock);
326
327 return !(val & DCPU_RESET_MASK);
328}
329
330static void __disable_dcpu(struct brcmstb_dpfe_priv *priv)
331{
332 u32 val;
333
334 if (!is_dcpu_enabled(priv))
335 return;
336
337 mutex_lock(&priv->lock);
338
339
340 val = readl_relaxed(priv->regs + REG_DCPU_RESET);
341 val |= (1 << DCPU_RESET_SHIFT);
342 writel_relaxed(val, priv->regs + REG_DCPU_RESET);
343
344 mutex_unlock(&priv->lock);
345}
346
347static void __enable_dcpu(struct brcmstb_dpfe_priv *priv)
348{
349 void __iomem *regs = priv->regs;
350 u32 val;
351
352 mutex_lock(&priv->lock);
353
354
355 writel_relaxed(0, regs + REG_TO_DCPU_MBOX);
356 writel_relaxed(0, regs + REG_TO_HOST_MBOX);
357
358
359 val = readl_relaxed(regs + REG_DCPU_RESET);
360 val &= ~(1 << DCPU_CLK_DISABLE_SHIFT);
361 writel_relaxed(val, regs + REG_DCPU_RESET);
362
363
364 val = readl_relaxed(regs + REG_DCPU_RESET);
365 val &= ~(1 << DCPU_RESET_SHIFT);
366 writel_relaxed(val, regs + REG_DCPU_RESET);
367
368 mutex_unlock(&priv->lock);
369}
370
371static unsigned int get_msg_chksum(const u32 msg[], unsigned int max)
372{
373 unsigned int sum = 0;
374 unsigned int i;
375
376
377 for (i = 0; i < max; i++)
378 sum += msg[i];
379
380 return sum;
381}
382
383static void __iomem *get_msg_ptr(struct brcmstb_dpfe_priv *priv, u32 response,
384 char *buf, ssize_t *size)
385{
386 unsigned int msg_type;
387 unsigned int offset;
388 void __iomem *ptr = NULL;
389
390
391 if (unlikely(priv->dpfe_api->version >= 3))
392 return NULL;
393
394 msg_type = (response >> DRAM_MSG_TYPE_OFFSET) & DRAM_MSG_TYPE_MASK;
395 offset = (response >> DRAM_MSG_ADDR_OFFSET) & DRAM_MSG_ADDR_MASK;
396
397
398
399
400
401
402
403 switch (msg_type) {
404 case 1:
405 ptr = priv->regs + DCPU_MSG_RAM_START + offset;
406 break;
407 case 0:
408 ptr = priv->dmem + offset;
409 break;
410 default:
411 dev_emerg(priv->dev, "invalid message reply from DCPU: %#x\n",
412 response);
413 if (buf && size)
414 *size = sprintf(buf,
415 "FATAL: communication error with DCPU\n");
416 }
417
418 return ptr;
419}
420
421static void __finalize_command(struct brcmstb_dpfe_priv *priv)
422{
423 unsigned int release_mbox;
424
425
426
427
428
429 release_mbox = (priv->dpfe_api->version < 2)
430 ? REG_TO_HOST_MBOX : REG_TO_DCPU_MBOX;
431 writel_relaxed(0, priv->regs + release_mbox);
432}
433
434static int __send_command(struct brcmstb_dpfe_priv *priv, unsigned int cmd,
435 u32 result[])
436{
437 const u32 *msg = priv->dpfe_api->command[cmd];
438 void __iomem *regs = priv->regs;
439 unsigned int i, chksum, chksum_idx;
440 int ret = 0;
441 u32 resp;
442
443 if (cmd >= DPFE_CMD_MAX)
444 return -1;
445
446 mutex_lock(&priv->lock);
447
448
449 for (i = 0; i < DELAY_LOOP_MAX; i++) {
450 resp = readl_relaxed(regs + REG_TO_HOST_MBOX);
451 if (resp == 0)
452 break;
453 msleep(1);
454 }
455 if (resp != 0) {
456 mutex_unlock(&priv->lock);
457 return -ffs(DCPU_RET_ERR_TIMEDOUT);
458 }
459
460
461 chksum_idx = msg[MSG_ARG_COUNT] + MSG_ARG_COUNT + 1;
462 chksum = get_msg_chksum(msg, chksum_idx);
463
464
465 for (i = 0; i < MSG_FIELD_MAX; i++) {
466 if (i == chksum_idx)
467 writel_relaxed(chksum, regs + DCPU_MSG_RAM(i));
468 else
469 writel_relaxed(msg[i], regs + DCPU_MSG_RAM(i));
470 }
471
472
473 writel_relaxed(1, regs + REG_TO_DCPU_MBOX);
474
475
476 for (i = 0; i < DELAY_LOOP_MAX; i++) {
477
478 resp = readl_relaxed(regs + REG_TO_HOST_MBOX);
479 if (resp > 0)
480 break;
481 msleep(1);
482 }
483
484 if (i == DELAY_LOOP_MAX) {
485 resp = (DCPU_RET_ERR_TIMEDOUT & ~DCPU_RET_ERROR_BIT);
486 ret = -ffs(resp);
487 } else {
488
489 for (i = 0; i < MSG_FIELD_MAX; i++)
490 result[i] = readl_relaxed(regs + DCPU_MSG_RAM(i));
491 chksum_idx = result[MSG_ARG_COUNT] + MSG_ARG_COUNT + 1;
492 }
493
494
495 __finalize_command(priv);
496
497 mutex_unlock(&priv->lock);
498
499 if (ret)
500 return ret;
501
502
503 chksum = get_msg_chksum(result, chksum_idx);
504 if (chksum != result[chksum_idx])
505 resp = DCPU_RET_ERR_CHKSUM;
506
507 if (resp != DCPU_RET_SUCCESS) {
508 resp &= ~DCPU_RET_ERROR_BIT;
509 ret = -ffs(resp);
510 }
511
512 return ret;
513}
514
515
516static int __verify_firmware(struct init_data *init,
517 const struct firmware *fw)
518{
519 const struct dpfe_firmware_header *header = (void *)fw->data;
520 unsigned int dmem_size, imem_size, total_size;
521 bool is_big_endian = false;
522 const u32 *chksum_ptr;
523
524 if (header->magic == DPFE_BE_MAGIC)
525 is_big_endian = true;
526 else if (header->magic != DPFE_LE_MAGIC)
527 return ERR_INVALID_MAGIC;
528
529 if (is_big_endian) {
530 dmem_size = be32_to_cpu(header->dmem_size);
531 imem_size = be32_to_cpu(header->imem_size);
532 } else {
533 dmem_size = le32_to_cpu(header->dmem_size);
534 imem_size = le32_to_cpu(header->imem_size);
535 }
536
537
538 if ((dmem_size % sizeof(u32)) != 0 || (imem_size % sizeof(u32)) != 0)
539 return ERR_INVALID_SIZE;
540
541
542
543
544
545 total_size = dmem_size + imem_size + sizeof(*header) +
546 sizeof(*chksum_ptr);
547 if (total_size != fw->size)
548 return ERR_INVALID_SIZE;
549
550
551 chksum_ptr = (void *)fw->data + sizeof(*header) + dmem_size + imem_size;
552
553 init->is_big_endian = is_big_endian;
554 init->dmem_len = dmem_size;
555 init->imem_len = imem_size;
556 init->chksum = (is_big_endian)
557 ? be32_to_cpu(*chksum_ptr) : le32_to_cpu(*chksum_ptr);
558
559 return 0;
560}
561
562
563static int __verify_fw_checksum(struct init_data *init,
564 struct brcmstb_dpfe_priv *priv,
565 const struct dpfe_firmware_header *header,
566 u32 checksum)
567{
568 u32 magic, sequence, version, sum;
569 u32 __iomem *dmem = priv->dmem;
570 u32 __iomem *imem = priv->imem;
571 unsigned int i;
572
573 if (init->is_big_endian) {
574 magic = be32_to_cpu(header->magic);
575 sequence = be32_to_cpu(header->sequence);
576 version = be32_to_cpu(header->version);
577 } else {
578 magic = le32_to_cpu(header->magic);
579 sequence = le32_to_cpu(header->sequence);
580 version = le32_to_cpu(header->version);
581 }
582
583 sum = magic + sequence + version + init->dmem_len + init->imem_len;
584
585 for (i = 0; i < init->dmem_len / sizeof(u32); i++)
586 sum += readl_relaxed(dmem + i);
587
588 for (i = 0; i < init->imem_len / sizeof(u32); i++)
589 sum += readl_relaxed(imem + i);
590
591 return (sum == checksum) ? 0 : -1;
592}
593
594static int __write_firmware(u32 __iomem *mem, const u32 *fw,
595 unsigned int size, bool is_big_endian)
596{
597 unsigned int i;
598
599
600 size /= sizeof(u32);
601
602
603 for (i = 0; i < size; i++)
604 writel_relaxed(0, mem + i);
605
606
607 if (is_big_endian) {
608 for (i = 0; i < size; i++)
609 writel_relaxed(be32_to_cpu(fw[i]), mem + i);
610 } else {
611 for (i = 0; i < size; i++)
612 writel_relaxed(le32_to_cpu(fw[i]), mem + i);
613 }
614
615 return 0;
616}
617
618static int brcmstb_dpfe_download_firmware(struct brcmstb_dpfe_priv *priv)
619{
620 const struct dpfe_firmware_header *header;
621 unsigned int dmem_size, imem_size;
622 struct device *dev = priv->dev;
623 bool is_big_endian = false;
624 const struct firmware *fw;
625 const u32 *dmem, *imem;
626 struct init_data init;
627 const void *fw_blob;
628 int ret;
629
630
631
632
633
634 if (is_dcpu_enabled(priv)) {
635 u32 response[MSG_FIELD_MAX];
636
637 ret = __send_command(priv, DPFE_CMD_GET_INFO, response);
638 if (!ret)
639 return 0;
640 }
641
642
643
644
645
646
647 if (!priv->dpfe_api->fw_name)
648 return -ENODEV;
649
650 ret = firmware_request_nowarn(&fw, priv->dpfe_api->fw_name, dev);
651
652
653
654
655 if (ret)
656 return (ret == -ENOENT) ? -EPROBE_DEFER : ret;
657
658 ret = __verify_firmware(&init, fw);
659 if (ret) {
660 ret = -EFAULT;
661 goto release_fw;
662 }
663
664 __disable_dcpu(priv);
665
666 is_big_endian = init.is_big_endian;
667 dmem_size = init.dmem_len;
668 imem_size = init.imem_len;
669
670
671 header = (struct dpfe_firmware_header *)fw->data;
672
673 fw_blob = fw->data + sizeof(*header);
674
675 imem = fw_blob;
676
677 dmem = fw_blob + imem_size;
678
679 ret = __write_firmware(priv->dmem, dmem, dmem_size, is_big_endian);
680 if (ret)
681 goto release_fw;
682 ret = __write_firmware(priv->imem, imem, imem_size, is_big_endian);
683 if (ret)
684 goto release_fw;
685
686 ret = __verify_fw_checksum(&init, priv, header, init.chksum);
687 if (ret)
688 goto release_fw;
689
690 __enable_dcpu(priv);
691
692release_fw:
693 release_firmware(fw);
694 return ret;
695}
696
697static ssize_t generic_show(unsigned int command, u32 response[],
698 struct brcmstb_dpfe_priv *priv, char *buf)
699{
700 int ret;
701
702 if (!priv)
703 return sprintf(buf, "ERROR: driver private data not set\n");
704
705 ret = __send_command(priv, command, response);
706 if (ret < 0)
707 return sprintf(buf, "ERROR: %s\n", get_error_text(-ret));
708
709 return 0;
710}
711
712static ssize_t show_info(struct device *dev, struct device_attribute *devattr,
713 char *buf)
714{
715 u32 response[MSG_FIELD_MAX];
716 struct brcmstb_dpfe_priv *priv;
717 unsigned int info;
718 ssize_t ret;
719
720 priv = dev_get_drvdata(dev);
721 ret = generic_show(DPFE_CMD_GET_INFO, response, priv, buf);
722 if (ret)
723 return ret;
724
725 info = response[MSG_ARG0];
726
727 return sprintf(buf, "%u.%u.%u.%u\n",
728 (info >> 24) & 0xff,
729 (info >> 16) & 0xff,
730 (info >> 8) & 0xff,
731 info & 0xff);
732}
733
734static ssize_t show_refresh(struct device *dev,
735 struct device_attribute *devattr, char *buf)
736{
737 u32 response[MSG_FIELD_MAX];
738 void __iomem *info;
739 struct brcmstb_dpfe_priv *priv;
740 u8 refresh, sr_abort, ppre, thermal_offs, tuf;
741 u32 mr4;
742 ssize_t ret;
743
744 priv = dev_get_drvdata(dev);
745 ret = generic_show(DPFE_CMD_GET_REFRESH, response, priv, buf);
746 if (ret)
747 return ret;
748
749 info = get_msg_ptr(priv, response[MSG_ARG0], buf, &ret);
750 if (!info)
751 return ret;
752
753 mr4 = (readl_relaxed(info + DRAM_INFO_MR4) >> DRAM_INFO_MR4_SHIFT) &
754 DRAM_INFO_MR4_MASK;
755
756 refresh = (mr4 >> DRAM_MR4_REFRESH) & DRAM_MR4_REFRESH_MASK;
757 sr_abort = (mr4 >> DRAM_MR4_SR_ABORT) & DRAM_MR4_SR_ABORT_MASK;
758 ppre = (mr4 >> DRAM_MR4_PPRE) & DRAM_MR4_PPRE_MASK;
759 thermal_offs = (mr4 >> DRAM_MR4_TH_OFFS) & DRAM_MR4_TH_OFFS_MASK;
760 tuf = (mr4 >> DRAM_MR4_TUF) & DRAM_MR4_TUF_MASK;
761
762 return sprintf(buf, "%#x %#x %#x %#x %#x %#x %#x\n",
763 readl_relaxed(info + DRAM_INFO_INTERVAL),
764 refresh, sr_abort, ppre, thermal_offs, tuf,
765 readl_relaxed(info + DRAM_INFO_ERROR));
766}
767
768static ssize_t store_refresh(struct device *dev, struct device_attribute *attr,
769 const char *buf, size_t count)
770{
771 u32 response[MSG_FIELD_MAX];
772 struct brcmstb_dpfe_priv *priv;
773 void __iomem *info;
774 unsigned long val;
775 int ret;
776
777 if (kstrtoul(buf, 0, &val) < 0)
778 return -EINVAL;
779
780 priv = dev_get_drvdata(dev);
781 ret = __send_command(priv, DPFE_CMD_GET_REFRESH, response);
782 if (ret)
783 return ret;
784
785 info = get_msg_ptr(priv, response[MSG_ARG0], NULL, NULL);
786 if (!info)
787 return -EIO;
788
789 writel_relaxed(val, info + DRAM_INFO_INTERVAL);
790
791 return count;
792}
793
794static ssize_t show_vendor(struct device *dev, struct device_attribute *devattr,
795 char *buf)
796{
797 u32 response[MSG_FIELD_MAX];
798 struct brcmstb_dpfe_priv *priv;
799 void __iomem *info;
800 ssize_t ret;
801 u32 mr5, mr6, mr7, mr8, err;
802
803 priv = dev_get_drvdata(dev);
804 ret = generic_show(DPFE_CMD_GET_VENDOR, response, priv, buf);
805 if (ret)
806 return ret;
807
808 info = get_msg_ptr(priv, response[MSG_ARG0], buf, &ret);
809 if (!info)
810 return ret;
811
812 mr5 = (readl_relaxed(info + DRAM_VENDOR_MR5) >> DRAM_VENDOR_SHIFT) &
813 DRAM_VENDOR_MASK;
814 mr6 = (readl_relaxed(info + DRAM_VENDOR_MR6) >> DRAM_VENDOR_SHIFT) &
815 DRAM_VENDOR_MASK;
816 mr7 = (readl_relaxed(info + DRAM_VENDOR_MR7) >> DRAM_VENDOR_SHIFT) &
817 DRAM_VENDOR_MASK;
818 mr8 = (readl_relaxed(info + DRAM_VENDOR_MR8) >> DRAM_VENDOR_SHIFT) &
819 DRAM_VENDOR_MASK;
820 err = readl_relaxed(info + DRAM_VENDOR_ERROR) & DRAM_VENDOR_MASK;
821
822 return sprintf(buf, "%#x %#x %#x %#x %#x\n", mr5, mr6, mr7, mr8, err);
823}
824
825static ssize_t show_dram(struct device *dev, struct device_attribute *devattr,
826 char *buf)
827{
828 u32 response[MSG_FIELD_MAX];
829 struct brcmstb_dpfe_priv *priv;
830 ssize_t ret;
831 u32 mr4, mr5, mr6, mr7, mr8, err;
832
833 priv = dev_get_drvdata(dev);
834 ret = generic_show(DPFE_CMD_GET_REFRESH, response, priv, buf);
835 if (ret)
836 return ret;
837
838 mr4 = response[MSG_ARG0 + 0] & DRAM_INFO_MR4_MASK;
839 mr5 = response[MSG_ARG0 + 1] & DRAM_DDR_INFO_MASK;
840 mr6 = response[MSG_ARG0 + 2] & DRAM_DDR_INFO_MASK;
841 mr7 = response[MSG_ARG0 + 3] & DRAM_DDR_INFO_MASK;
842 mr8 = response[MSG_ARG0 + 4] & DRAM_DDR_INFO_MASK;
843 err = response[MSG_ARG0 + 5] & DRAM_DDR_INFO_MASK;
844
845 return sprintf(buf, "%#x %#x %#x %#x %#x %#x\n", mr4, mr5, mr6, mr7,
846 mr8, err);
847}
848
849static int brcmstb_dpfe_resume(struct platform_device *pdev)
850{
851 struct brcmstb_dpfe_priv *priv = platform_get_drvdata(pdev);
852
853 return brcmstb_dpfe_download_firmware(priv);
854}
855
856static int brcmstb_dpfe_probe(struct platform_device *pdev)
857{
858 struct device *dev = &pdev->dev;
859 struct brcmstb_dpfe_priv *priv;
860 struct resource *res;
861 int ret;
862
863 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
864 if (!priv)
865 return -ENOMEM;
866
867 priv->dev = dev;
868
869 mutex_init(&priv->lock);
870 platform_set_drvdata(pdev, priv);
871
872 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dpfe-cpu");
873 priv->regs = devm_ioremap_resource(dev, res);
874 if (IS_ERR(priv->regs)) {
875 dev_err(dev, "couldn't map DCPU registers\n");
876 return -ENODEV;
877 }
878
879 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dpfe-dmem");
880 priv->dmem = devm_ioremap_resource(dev, res);
881 if (IS_ERR(priv->dmem)) {
882 dev_err(dev, "Couldn't map DCPU data memory\n");
883 return -ENOENT;
884 }
885
886 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dpfe-imem");
887 priv->imem = devm_ioremap_resource(dev, res);
888 if (IS_ERR(priv->imem)) {
889 dev_err(dev, "Couldn't map DCPU instruction memory\n");
890 return -ENOENT;
891 }
892
893 priv->dpfe_api = of_device_get_match_data(dev);
894 if (unlikely(!priv->dpfe_api)) {
895
896
897
898
899 dev_err(dev, "Couldn't determine API\n");
900 return -ENOENT;
901 }
902
903 ret = brcmstb_dpfe_download_firmware(priv);
904 if (ret)
905 return dev_err_probe(dev, ret, "Couldn't download firmware\n");
906
907 ret = sysfs_create_groups(&pdev->dev.kobj, priv->dpfe_api->sysfs_attrs);
908 if (!ret)
909 dev_info(dev, "registered with API v%d.\n",
910 priv->dpfe_api->version);
911
912 return ret;
913}
914
915static int brcmstb_dpfe_remove(struct platform_device *pdev)
916{
917 struct brcmstb_dpfe_priv *priv = dev_get_drvdata(&pdev->dev);
918
919 sysfs_remove_groups(&pdev->dev.kobj, priv->dpfe_api->sysfs_attrs);
920
921 return 0;
922}
923
924static const struct of_device_id brcmstb_dpfe_of_match[] = {
925
926 { .compatible = "brcm,bcm7268-dpfe-cpu", .data = &dpfe_api_old_v2 },
927 { .compatible = "brcm,bcm7271-dpfe-cpu", .data = &dpfe_api_old_v2 },
928 { .compatible = "brcm,bcm7278-dpfe-cpu", .data = &dpfe_api_old_v2 },
929 { .compatible = "brcm,bcm7211-dpfe-cpu", .data = &dpfe_api_new_v2 },
930
931 { .compatible = "brcm,dpfe-cpu", .data = &dpfe_api_v3 },
932 {}
933};
934MODULE_DEVICE_TABLE(of, brcmstb_dpfe_of_match);
935
936static struct platform_driver brcmstb_dpfe_driver = {
937 .driver = {
938 .name = DRVNAME,
939 .of_match_table = brcmstb_dpfe_of_match,
940 },
941 .probe = brcmstb_dpfe_probe,
942 .remove = brcmstb_dpfe_remove,
943 .resume = brcmstb_dpfe_resume,
944};
945
946module_platform_driver(brcmstb_dpfe_driver);
947
948MODULE_AUTHOR("Markus Mayer <mmayer@broadcom.com>");
949MODULE_DESCRIPTION("BRCMSTB DDR PHY Front End Driver");
950MODULE_LICENSE("GPL");
951