1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/completion.h>
26#include <linux/slab.h>
27
28#include "rsxx_priv.h"
29
30#define CREG_TIMEOUT_MSEC 10000
31
32typedef void (*creg_cmd_cb)(struct rsxx_cardinfo *card,
33 struct creg_cmd *cmd,
34 int st);
35
36struct creg_cmd {
37 struct list_head list;
38 creg_cmd_cb cb;
39 void *cb_private;
40 unsigned int op;
41 unsigned int addr;
42 int cnt8;
43 void *buf;
44 unsigned int stream;
45 unsigned int status;
46};
47
48static struct kmem_cache *creg_cmd_pool;
49
50
51
52
53#if defined(__LITTLE_ENDIAN)
54#define LITTLE_ENDIAN 1
55#elif defined(__BIG_ENDIAN)
56#define LITTLE_ENDIAN 0
57#else
58#error Unknown endianess!!! Aborting...
59#endif
60
61static int copy_to_creg_data(struct rsxx_cardinfo *card,
62 int cnt8,
63 void *buf,
64 unsigned int stream)
65{
66 int i = 0;
67 u32 *data = buf;
68
69 if (unlikely(card->eeh_state))
70 return -EIO;
71
72 for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
73
74
75
76
77 if (LITTLE_ENDIAN && stream)
78 iowrite32be(data[i], card->regmap + CREG_DATA(i));
79 else
80 iowrite32(data[i], card->regmap + CREG_DATA(i));
81 }
82
83 return 0;
84}
85
86
87static int copy_from_creg_data(struct rsxx_cardinfo *card,
88 int cnt8,
89 void *buf,
90 unsigned int stream)
91{
92 int i = 0;
93 u32 *data = buf;
94
95 if (unlikely(card->eeh_state))
96 return -EIO;
97
98 for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
99
100
101
102
103 if (LITTLE_ENDIAN && stream)
104 data[i] = ioread32be(card->regmap + CREG_DATA(i));
105 else
106 data[i] = ioread32(card->regmap + CREG_DATA(i));
107 }
108
109 return 0;
110}
111
112static void creg_issue_cmd(struct rsxx_cardinfo *card, struct creg_cmd *cmd)
113{
114 int st;
115
116 if (unlikely(card->eeh_state))
117 return;
118
119 iowrite32(cmd->addr, card->regmap + CREG_ADD);
120 iowrite32(cmd->cnt8, card->regmap + CREG_CNT);
121
122 if (cmd->op == CREG_OP_WRITE) {
123 if (cmd->buf) {
124 st = copy_to_creg_data(card, cmd->cnt8,
125 cmd->buf, cmd->stream);
126 if (st)
127 return;
128 }
129 }
130
131 if (unlikely(card->eeh_state))
132 return;
133
134
135 iowrite32(cmd->op, card->regmap + CREG_CMD);
136}
137
138static void creg_kick_queue(struct rsxx_cardinfo *card)
139{
140 if (card->creg_ctrl.active || list_empty(&card->creg_ctrl.queue))
141 return;
142
143 card->creg_ctrl.active = 1;
144 card->creg_ctrl.active_cmd = list_first_entry(&card->creg_ctrl.queue,
145 struct creg_cmd, list);
146 list_del(&card->creg_ctrl.active_cmd->list);
147 card->creg_ctrl.q_depth--;
148
149
150
151
152
153
154
155 mod_timer(&card->creg_ctrl.cmd_timer,
156 jiffies + msecs_to_jiffies(CREG_TIMEOUT_MSEC));
157
158 creg_issue_cmd(card, card->creg_ctrl.active_cmd);
159}
160
161static int creg_queue_cmd(struct rsxx_cardinfo *card,
162 unsigned int op,
163 unsigned int addr,
164 unsigned int cnt8,
165 void *buf,
166 int stream,
167 creg_cmd_cb callback,
168 void *cb_private)
169{
170 struct creg_cmd *cmd;
171
172
173 if (unlikely(card->halt))
174 return -EINVAL;
175
176 if (card->creg_ctrl.reset)
177 return -EAGAIN;
178
179 if (cnt8 > MAX_CREG_DATA8)
180 return -EINVAL;
181
182 cmd = kmem_cache_alloc(creg_cmd_pool, GFP_KERNEL);
183 if (!cmd)
184 return -ENOMEM;
185
186 INIT_LIST_HEAD(&cmd->list);
187
188 cmd->op = op;
189 cmd->addr = addr;
190 cmd->cnt8 = cnt8;
191 cmd->buf = buf;
192 cmd->stream = stream;
193 cmd->cb = callback;
194 cmd->cb_private = cb_private;
195 cmd->status = 0;
196
197 spin_lock_bh(&card->creg_ctrl.lock);
198 list_add_tail(&cmd->list, &card->creg_ctrl.queue);
199 card->creg_ctrl.q_depth++;
200 creg_kick_queue(card);
201 spin_unlock_bh(&card->creg_ctrl.lock);
202
203 return 0;
204}
205
206static void creg_cmd_timed_out(struct timer_list *t)
207{
208 struct rsxx_cardinfo *card = from_timer(card, t, creg_ctrl.cmd_timer);
209 struct creg_cmd *cmd;
210
211 spin_lock(&card->creg_ctrl.lock);
212 cmd = card->creg_ctrl.active_cmd;
213 card->creg_ctrl.active_cmd = NULL;
214 spin_unlock(&card->creg_ctrl.lock);
215
216 if (cmd == NULL) {
217 card->creg_ctrl.creg_stats.creg_timeout++;
218 dev_warn(CARD_TO_DEV(card),
219 "No active command associated with timeout!\n");
220 return;
221 }
222
223 if (cmd->cb)
224 cmd->cb(card, cmd, -ETIMEDOUT);
225
226 kmem_cache_free(creg_cmd_pool, cmd);
227
228
229 spin_lock(&card->creg_ctrl.lock);
230 card->creg_ctrl.active = 0;
231 creg_kick_queue(card);
232 spin_unlock(&card->creg_ctrl.lock);
233}
234
235
236static void creg_cmd_done(struct work_struct *work)
237{
238 struct rsxx_cardinfo *card;
239 struct creg_cmd *cmd;
240 int st = 0;
241
242 card = container_of(work, struct rsxx_cardinfo,
243 creg_ctrl.done_work);
244
245
246
247
248
249 if (del_timer_sync(&card->creg_ctrl.cmd_timer) == 0)
250 card->creg_ctrl.creg_stats.failed_cancel_timer++;
251
252 spin_lock_bh(&card->creg_ctrl.lock);
253 cmd = card->creg_ctrl.active_cmd;
254 card->creg_ctrl.active_cmd = NULL;
255 spin_unlock_bh(&card->creg_ctrl.lock);
256
257 if (cmd == NULL) {
258 dev_err(CARD_TO_DEV(card),
259 "Spurious creg interrupt!\n");
260 return;
261 }
262
263 card->creg_ctrl.creg_stats.stat = ioread32(card->regmap + CREG_STAT);
264 cmd->status = card->creg_ctrl.creg_stats.stat;
265 if ((cmd->status & CREG_STAT_STATUS_MASK) == 0) {
266 dev_err(CARD_TO_DEV(card),
267 "Invalid status on creg command\n");
268
269
270
271
272
273 st = -EIO;
274 goto creg_done;
275 } else if (cmd->status & CREG_STAT_ERROR) {
276 st = -EIO;
277 }
278
279 if ((cmd->op == CREG_OP_READ)) {
280 unsigned int cnt8 = ioread32(card->regmap + CREG_CNT);
281
282
283 if (!cmd->buf) {
284 dev_err(CARD_TO_DEV(card),
285 "Buffer not given for read.\n");
286 st = -EIO;
287 goto creg_done;
288 }
289 if (cnt8 != cmd->cnt8) {
290 dev_err(CARD_TO_DEV(card),
291 "count mismatch\n");
292 st = -EIO;
293 goto creg_done;
294 }
295
296 st = copy_from_creg_data(card, cnt8, cmd->buf, cmd->stream);
297 }
298
299creg_done:
300 if (cmd->cb)
301 cmd->cb(card, cmd, st);
302
303 kmem_cache_free(creg_cmd_pool, cmd);
304
305 spin_lock_bh(&card->creg_ctrl.lock);
306 card->creg_ctrl.active = 0;
307 creg_kick_queue(card);
308 spin_unlock_bh(&card->creg_ctrl.lock);
309}
310
311static void creg_reset(struct rsxx_cardinfo *card)
312{
313 struct creg_cmd *cmd = NULL;
314 struct creg_cmd *tmp;
315 unsigned long flags;
316
317
318
319
320
321 if (!mutex_trylock(&card->creg_ctrl.reset_lock))
322 return;
323
324 card->creg_ctrl.reset = 1;
325 spin_lock_irqsave(&card->irq_lock, flags);
326 rsxx_disable_ier_and_isr(card, CR_INTR_CREG | CR_INTR_EVENT);
327 spin_unlock_irqrestore(&card->irq_lock, flags);
328
329 dev_warn(CARD_TO_DEV(card),
330 "Resetting creg interface for recovery\n");
331
332
333 spin_lock_bh(&card->creg_ctrl.lock);
334 list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
335 list_del(&cmd->list);
336 card->creg_ctrl.q_depth--;
337 if (cmd->cb)
338 cmd->cb(card, cmd, -ECANCELED);
339 kmem_cache_free(creg_cmd_pool, cmd);
340 }
341
342 cmd = card->creg_ctrl.active_cmd;
343 card->creg_ctrl.active_cmd = NULL;
344 if (cmd) {
345 if (timer_pending(&card->creg_ctrl.cmd_timer))
346 del_timer_sync(&card->creg_ctrl.cmd_timer);
347
348 if (cmd->cb)
349 cmd->cb(card, cmd, -ECANCELED);
350 kmem_cache_free(creg_cmd_pool, cmd);
351
352 card->creg_ctrl.active = 0;
353 }
354 spin_unlock_bh(&card->creg_ctrl.lock);
355
356 card->creg_ctrl.reset = 0;
357 spin_lock_irqsave(&card->irq_lock, flags);
358 rsxx_enable_ier_and_isr(card, CR_INTR_CREG | CR_INTR_EVENT);
359 spin_unlock_irqrestore(&card->irq_lock, flags);
360
361 mutex_unlock(&card->creg_ctrl.reset_lock);
362}
363
364
365struct creg_completion {
366 struct completion *cmd_done;
367 int st;
368 u32 creg_status;
369};
370
371static void creg_cmd_done_cb(struct rsxx_cardinfo *card,
372 struct creg_cmd *cmd,
373 int st)
374{
375 struct creg_completion *cmd_completion;
376
377 cmd_completion = cmd->cb_private;
378 BUG_ON(!cmd_completion);
379
380 cmd_completion->st = st;
381 cmd_completion->creg_status = cmd->status;
382 complete(cmd_completion->cmd_done);
383}
384
385static int __issue_creg_rw(struct rsxx_cardinfo *card,
386 unsigned int op,
387 unsigned int addr,
388 unsigned int cnt8,
389 void *buf,
390 int stream,
391 unsigned int *hw_stat)
392{
393 DECLARE_COMPLETION_ONSTACK(cmd_done);
394 struct creg_completion completion;
395 unsigned long timeout;
396 int st;
397
398 completion.cmd_done = &cmd_done;
399 completion.st = 0;
400 completion.creg_status = 0;
401
402 st = creg_queue_cmd(card, op, addr, cnt8, buf, stream, creg_cmd_done_cb,
403 &completion);
404 if (st)
405 return st;
406
407
408
409
410
411
412 timeout = msecs_to_jiffies(CREG_TIMEOUT_MSEC *
413 card->creg_ctrl.q_depth + 20000);
414
415
416
417
418
419 st = wait_for_completion_timeout(completion.cmd_done, timeout);
420 if (st == 0) {
421
422
423
424
425 dev_crit(CARD_TO_DEV(card),
426 "cregs timer failed\n");
427 creg_reset(card);
428 return -EIO;
429 }
430
431 *hw_stat = completion.creg_status;
432
433 if (completion.st) {
434
435
436
437
438
439
440
441 ioread32(card->regmap + SCRATCH);
442
443 dev_warn(CARD_TO_DEV(card),
444 "creg command failed(%d x%08x)\n",
445 completion.st, addr);
446 return completion.st;
447 }
448
449 return 0;
450}
451
452static int issue_creg_rw(struct rsxx_cardinfo *card,
453 u32 addr,
454 unsigned int size8,
455 void *data,
456 int stream,
457 int read)
458{
459 unsigned int hw_stat;
460 unsigned int xfer;
461 unsigned int op;
462 int st;
463
464 op = read ? CREG_OP_READ : CREG_OP_WRITE;
465
466 do {
467 xfer = min_t(unsigned int, size8, MAX_CREG_DATA8);
468
469 st = __issue_creg_rw(card, op, addr, xfer,
470 data, stream, &hw_stat);
471 if (st)
472 return st;
473
474 data = (char *)data + xfer;
475 addr += xfer;
476 size8 -= xfer;
477 } while (size8);
478
479 return 0;
480}
481
482
483int rsxx_creg_write(struct rsxx_cardinfo *card,
484 u32 addr,
485 unsigned int size8,
486 void *data,
487 int byte_stream)
488{
489 return issue_creg_rw(card, addr, size8, data, byte_stream, 0);
490}
491
492int rsxx_creg_read(struct rsxx_cardinfo *card,
493 u32 addr,
494 unsigned int size8,
495 void *data,
496 int byte_stream)
497{
498 return issue_creg_rw(card, addr, size8, data, byte_stream, 1);
499}
500
501int rsxx_get_card_state(struct rsxx_cardinfo *card, unsigned int *state)
502{
503 return rsxx_creg_read(card, CREG_ADD_CARD_STATE,
504 sizeof(*state), state, 0);
505}
506
507int rsxx_get_card_size8(struct rsxx_cardinfo *card, u64 *size8)
508{
509 unsigned int size;
510 int st;
511
512 st = rsxx_creg_read(card, CREG_ADD_CARD_SIZE,
513 sizeof(size), &size, 0);
514 if (st)
515 return st;
516
517 *size8 = (u64)size * RSXX_HW_BLK_SIZE;
518 return 0;
519}
520
521int rsxx_get_num_targets(struct rsxx_cardinfo *card,
522 unsigned int *n_targets)
523{
524 return rsxx_creg_read(card, CREG_ADD_NUM_TARGETS,
525 sizeof(*n_targets), n_targets, 0);
526}
527
528int rsxx_get_card_capabilities(struct rsxx_cardinfo *card,
529 u32 *capabilities)
530{
531 return rsxx_creg_read(card, CREG_ADD_CAPABILITIES,
532 sizeof(*capabilities), capabilities, 0);
533}
534
535int rsxx_issue_card_cmd(struct rsxx_cardinfo *card, u32 cmd)
536{
537 return rsxx_creg_write(card, CREG_ADD_CARD_CMD,
538 sizeof(cmd), &cmd, 0);
539}
540
541
542
543static void hw_log_msg(struct rsxx_cardinfo *card, const char *str, int len)
544{
545 static char level;
546
547
548
549
550
551 if ((len > 3) && (str[0] == '<') && (str[2] == '>')) {
552 level = str[1];
553 str += 3;
554 len -= 3;
555 }
556
557 switch (level) {
558 case '0':
559 dev_emerg(CARD_TO_DEV(card), "HW: %.*s", len, str);
560 break;
561 case '1':
562 dev_alert(CARD_TO_DEV(card), "HW: %.*s", len, str);
563 break;
564 case '2':
565 dev_crit(CARD_TO_DEV(card), "HW: %.*s", len, str);
566 break;
567 case '3':
568 dev_err(CARD_TO_DEV(card), "HW: %.*s", len, str);
569 break;
570 case '4':
571 dev_warn(CARD_TO_DEV(card), "HW: %.*s", len, str);
572 break;
573 case '5':
574 dev_notice(CARD_TO_DEV(card), "HW: %.*s", len, str);
575 break;
576 case '6':
577 dev_info(CARD_TO_DEV(card), "HW: %.*s", len, str);
578 break;
579 case '7':
580 dev_dbg(CARD_TO_DEV(card), "HW: %.*s", len, str);
581 break;
582 default:
583 dev_info(CARD_TO_DEV(card), "HW: %.*s", len, str);
584 break;
585 }
586}
587
588
589
590
591
592
593static int substrncpy(char *dest, const char *src, int count)
594{
595 int max_cnt = count;
596
597 while (count) {
598 count--;
599 *dest = *src;
600 if (*dest == '\0')
601 break;
602 src++;
603 dest++;
604 }
605 return max_cnt - count;
606}
607
608
609static void read_hw_log_done(struct rsxx_cardinfo *card,
610 struct creg_cmd *cmd,
611 int st)
612{
613 char *buf;
614 char *log_str;
615 int cnt;
616 int len;
617 int off;
618
619 buf = cmd->buf;
620 off = 0;
621
622
623 if (st)
624 return;
625
626 while (off < cmd->cnt8) {
627 log_str = &card->log.buf[card->log.buf_len];
628 cnt = min(cmd->cnt8 - off, LOG_BUF_SIZE8 - card->log.buf_len);
629 len = substrncpy(log_str, &buf[off], cnt);
630
631 off += len;
632 card->log.buf_len += len;
633
634
635
636
637
638 if ((log_str[len - 1] == '\0') ||
639 (card->log.buf_len == LOG_BUF_SIZE8)) {
640 if (card->log.buf_len != 1)
641 hw_log_msg(card, card->log.buf,
642 card->log.buf_len);
643 card->log.buf_len = 0;
644 }
645
646 }
647
648 if (cmd->status & CREG_STAT_LOG_PENDING)
649 rsxx_read_hw_log(card);
650}
651
652int rsxx_read_hw_log(struct rsxx_cardinfo *card)
653{
654 int st;
655
656 st = creg_queue_cmd(card, CREG_OP_READ, CREG_ADD_LOG,
657 sizeof(card->log.tmp), card->log.tmp,
658 1, read_hw_log_done, NULL);
659 if (st)
660 dev_err(CARD_TO_DEV(card),
661 "Failed getting log text\n");
662
663 return st;
664}
665
666
667static int issue_reg_cmd(struct rsxx_cardinfo *card,
668 struct rsxx_reg_access *cmd,
669 int read)
670{
671 unsigned int op = read ? CREG_OP_READ : CREG_OP_WRITE;
672
673 return __issue_creg_rw(card, op, cmd->addr, cmd->cnt, cmd->data,
674 cmd->stream, &cmd->stat);
675}
676
677int rsxx_reg_access(struct rsxx_cardinfo *card,
678 struct rsxx_reg_access __user *ucmd,
679 int read)
680{
681 struct rsxx_reg_access cmd;
682 int st;
683
684 st = copy_from_user(&cmd, ucmd, sizeof(cmd));
685 if (st)
686 return -EFAULT;
687
688 if (cmd.cnt > RSXX_MAX_REG_CNT)
689 return -EFAULT;
690
691 st = issue_reg_cmd(card, &cmd, read);
692 if (st)
693 return st;
694
695 st = put_user(cmd.stat, &ucmd->stat);
696 if (st)
697 return -EFAULT;
698
699 if (read) {
700 st = copy_to_user(ucmd->data, cmd.data, cmd.cnt);
701 if (st)
702 return -EFAULT;
703 }
704
705 return 0;
706}
707
708void rsxx_eeh_save_issued_creg(struct rsxx_cardinfo *card)
709{
710 struct creg_cmd *cmd = NULL;
711
712 cmd = card->creg_ctrl.active_cmd;
713 card->creg_ctrl.active_cmd = NULL;
714
715 if (cmd) {
716 del_timer_sync(&card->creg_ctrl.cmd_timer);
717
718 spin_lock_bh(&card->creg_ctrl.lock);
719 list_add(&cmd->list, &card->creg_ctrl.queue);
720 card->creg_ctrl.q_depth++;
721 card->creg_ctrl.active = 0;
722 spin_unlock_bh(&card->creg_ctrl.lock);
723 }
724}
725
726void rsxx_kick_creg_queue(struct rsxx_cardinfo *card)
727{
728 spin_lock_bh(&card->creg_ctrl.lock);
729 if (!list_empty(&card->creg_ctrl.queue))
730 creg_kick_queue(card);
731 spin_unlock_bh(&card->creg_ctrl.lock);
732}
733
734
735int rsxx_creg_setup(struct rsxx_cardinfo *card)
736{
737 card->creg_ctrl.active_cmd = NULL;
738
739 card->creg_ctrl.creg_wq =
740 create_singlethread_workqueue(DRIVER_NAME"_creg");
741 if (!card->creg_ctrl.creg_wq)
742 return -ENOMEM;
743
744 INIT_WORK(&card->creg_ctrl.done_work, creg_cmd_done);
745 mutex_init(&card->creg_ctrl.reset_lock);
746 INIT_LIST_HEAD(&card->creg_ctrl.queue);
747 spin_lock_init(&card->creg_ctrl.lock);
748 timer_setup(&card->creg_ctrl.cmd_timer, creg_cmd_timed_out, 0);
749
750 return 0;
751}
752
753void rsxx_creg_destroy(struct rsxx_cardinfo *card)
754{
755 struct creg_cmd *cmd;
756 struct creg_cmd *tmp;
757 int cnt = 0;
758
759
760 spin_lock_bh(&card->creg_ctrl.lock);
761 list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
762 list_del(&cmd->list);
763 if (cmd->cb)
764 cmd->cb(card, cmd, -ECANCELED);
765 kmem_cache_free(creg_cmd_pool, cmd);
766 cnt++;
767 }
768
769 if (cnt)
770 dev_info(CARD_TO_DEV(card),
771 "Canceled %d queue creg commands\n", cnt);
772
773 cmd = card->creg_ctrl.active_cmd;
774 card->creg_ctrl.active_cmd = NULL;
775 if (cmd) {
776 if (timer_pending(&card->creg_ctrl.cmd_timer))
777 del_timer_sync(&card->creg_ctrl.cmd_timer);
778
779 if (cmd->cb)
780 cmd->cb(card, cmd, -ECANCELED);
781 dev_info(CARD_TO_DEV(card),
782 "Canceled active creg command\n");
783 kmem_cache_free(creg_cmd_pool, cmd);
784 }
785 spin_unlock_bh(&card->creg_ctrl.lock);
786
787 cancel_work_sync(&card->creg_ctrl.done_work);
788}
789
790
791int rsxx_creg_init(void)
792{
793 creg_cmd_pool = KMEM_CACHE(creg_cmd, SLAB_HWCACHE_ALIGN);
794 if (!creg_cmd_pool)
795 return -ENOMEM;
796
797 return 0;
798}
799
800void rsxx_creg_cleanup(void)
801{
802 kmem_cache_destroy(creg_cmd_pool);
803}
804