1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/blkdev.h>
25#include <linux/kthread.h>
26#include <linux/sched.h>
27
28#include "rtsx.h"
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44unsigned int rtsx_stor_access_xfer_buf(unsigned char *buffer,
45 unsigned int buflen,
46 struct scsi_cmnd *srb,
47 unsigned int *index,
48 unsigned int *offset,
49 enum xfer_buf_dir dir)
50{
51 unsigned int cnt;
52
53
54 if (scsi_sg_count(srb) == 0) {
55 unsigned char *sgbuffer;
56
57 if (*offset >= scsi_bufflen(srb))
58 return 0;
59 cnt = min(buflen, scsi_bufflen(srb) - *offset);
60
61 sgbuffer = (unsigned char *)scsi_sglist(srb) + *offset;
62
63 if (dir == TO_XFER_BUF)
64 memcpy(sgbuffer, buffer, cnt);
65 else
66 memcpy(buffer, sgbuffer, cnt);
67 *offset += cnt;
68
69
70
71
72
73
74 } else {
75 struct scatterlist *sg =
76 (struct scatterlist *)scsi_sglist(srb)
77 + *index;
78
79
80
81
82
83
84
85 cnt = 0;
86 while (cnt < buflen && *index < scsi_sg_count(srb)) {
87 struct page *page = sg_page(sg) +
88 ((sg->offset + *offset) >> PAGE_SHIFT);
89 unsigned int poff = (sg->offset + *offset) &
90 (PAGE_SIZE - 1);
91 unsigned int sglen = sg->length - *offset;
92
93 if (sglen > buflen - cnt) {
94
95 sglen = buflen - cnt;
96 *offset += sglen;
97 } else {
98
99 *offset = 0;
100 ++*index;
101 ++sg;
102 }
103
104 while (sglen > 0) {
105 unsigned int plen = min(sglen, (unsigned int)
106 PAGE_SIZE - poff);
107 unsigned char *ptr = kmap(page);
108
109 if (dir == TO_XFER_BUF)
110 memcpy(ptr + poff, buffer + cnt, plen);
111 else
112 memcpy(buffer + cnt, ptr + poff, plen);
113 kunmap(page);
114
115
116 poff = 0;
117 ++page;
118 cnt += plen;
119 sglen -= plen;
120 }
121 }
122 }
123
124
125 return cnt;
126}
127
128
129
130
131
132void rtsx_stor_set_xfer_buf(unsigned char *buffer,
133 unsigned int buflen, struct scsi_cmnd *srb)
134{
135 unsigned int index = 0, offset = 0;
136
137 rtsx_stor_access_xfer_buf(buffer, buflen, srb, &index, &offset,
138 TO_XFER_BUF);
139 if (buflen < scsi_bufflen(srb))
140 scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
141}
142
143void rtsx_stor_get_xfer_buf(unsigned char *buffer,
144 unsigned int buflen, struct scsi_cmnd *srb)
145{
146 unsigned int index = 0, offset = 0;
147
148 rtsx_stor_access_xfer_buf(buffer, buflen, srb, &index, &offset,
149 FROM_XFER_BUF);
150 if (buflen < scsi_bufflen(srb))
151 scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
152}
153
154
155
156
157
158
159
160
161
162
163void rtsx_invoke_transport(struct scsi_cmnd *srb, struct rtsx_chip *chip)
164{
165 int result;
166
167 result = rtsx_scsi_handler(srb, chip);
168
169
170
171
172
173 if (rtsx_chk_stat(chip, RTSX_STAT_ABORT)) {
174 dev_dbg(rtsx_dev(chip), "-- command was aborted\n");
175 srb->result = DID_ABORT << 16;
176 goto handle_errors;
177 }
178
179
180 if (result == TRANSPORT_ERROR) {
181 dev_dbg(rtsx_dev(chip), "-- transport indicates error, resetting\n");
182 srb->result = DID_ERROR << 16;
183 goto handle_errors;
184 }
185
186 srb->result = SAM_STAT_GOOD;
187
188
189
190
191
192
193 if (result == TRANSPORT_FAILED) {
194
195 srb->result = SAM_STAT_CHECK_CONDITION;
196 memcpy(srb->sense_buffer,
197 (unsigned char *)&chip->sense_buffer[SCSI_LUN(srb)],
198 sizeof(struct sense_data_t));
199 }
200
201 return;
202
203handle_errors:
204 return;
205}
206
207void rtsx_add_cmd(struct rtsx_chip *chip,
208 u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
209{
210 __le32 *cb = (__le32 *)(chip->host_cmds_ptr);
211 u32 val = 0;
212
213 val |= (u32)(cmd_type & 0x03) << 30;
214 val |= (u32)(reg_addr & 0x3FFF) << 16;
215 val |= (u32)mask << 8;
216 val |= (u32)data;
217
218 spin_lock_irq(&chip->rtsx->reg_lock);
219 if (chip->ci < (HOST_CMDS_BUF_LEN / 4))
220 cb[(chip->ci)++] = cpu_to_le32(val);
221
222 spin_unlock_irq(&chip->rtsx->reg_lock);
223}
224
225void rtsx_send_cmd_no_wait(struct rtsx_chip *chip)
226{
227 u32 val = BIT(31);
228
229 rtsx_writel(chip, RTSX_HCBAR, chip->host_cmds_addr);
230
231 val |= (u32)(chip->ci * 4) & 0x00FFFFFF;
232
233 val |= 0x40000000;
234 rtsx_writel(chip, RTSX_HCBCTLR, val);
235}
236
237int rtsx_send_cmd(struct rtsx_chip *chip, u8 card, int timeout)
238{
239 struct rtsx_dev *rtsx = chip->rtsx;
240 struct completion trans_done;
241 u32 val = BIT(31);
242 long timeleft;
243 int err = 0;
244
245 if (card == SD_CARD)
246 rtsx->check_card_cd = SD_EXIST;
247 else if (card == MS_CARD)
248 rtsx->check_card_cd = MS_EXIST;
249 else if (card == XD_CARD)
250 rtsx->check_card_cd = XD_EXIST;
251 else
252 rtsx->check_card_cd = 0;
253
254 spin_lock_irq(&rtsx->reg_lock);
255
256
257 rtsx->done = &trans_done;
258 rtsx->trans_result = TRANS_NOT_READY;
259 init_completion(&trans_done);
260 rtsx->trans_state = STATE_TRANS_CMD;
261
262 rtsx_writel(chip, RTSX_HCBAR, chip->host_cmds_addr);
263
264 val |= (u32)(chip->ci * 4) & 0x00FFFFFF;
265
266 val |= 0x40000000;
267 rtsx_writel(chip, RTSX_HCBCTLR, val);
268
269 spin_unlock_irq(&rtsx->reg_lock);
270
271
272 timeleft = wait_for_completion_interruptible_timeout(
273 &trans_done, msecs_to_jiffies(timeout));
274 if (timeleft <= 0) {
275 dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
276 chip->int_reg);
277 err = -ETIMEDOUT;
278 rtsx_trace(chip);
279 goto finish_send_cmd;
280 }
281
282 spin_lock_irq(&rtsx->reg_lock);
283 if (rtsx->trans_result == TRANS_RESULT_FAIL)
284 err = -EIO;
285 else if (rtsx->trans_result == TRANS_RESULT_OK)
286 err = 0;
287
288 spin_unlock_irq(&rtsx->reg_lock);
289
290finish_send_cmd:
291 rtsx->done = NULL;
292 rtsx->trans_state = STATE_TRANS_NONE;
293
294 if (err < 0)
295 rtsx_stop_cmd(chip, card);
296
297 return err;
298}
299
300static inline void rtsx_add_sg_tbl(
301 struct rtsx_chip *chip, u32 addr, u32 len, u8 option)
302{
303 __le64 *sgb = (__le64 *)(chip->host_sg_tbl_ptr);
304 u64 val = 0;
305 u32 temp_len = 0;
306 u8 temp_opt = 0;
307
308 do {
309 if (len > 0x80000) {
310 temp_len = 0x80000;
311 temp_opt = option & (~RTSX_SG_END);
312 } else {
313 temp_len = len;
314 temp_opt = option;
315 }
316 val = ((u64)addr << 32) | ((u64)temp_len << 12) | temp_opt;
317
318 if (chip->sgi < (HOST_SG_TBL_BUF_LEN / 8))
319 sgb[(chip->sgi)++] = cpu_to_le64(val);
320
321 len -= temp_len;
322 addr += temp_len;
323 } while (len);
324}
325
326static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
327 struct scatterlist *sg, int num_sg,
328 unsigned int *index,
329 unsigned int *offset, int size,
330 enum dma_data_direction dma_dir,
331 int timeout)
332{
333 struct rtsx_dev *rtsx = chip->rtsx;
334 struct completion trans_done;
335 u8 dir;
336 int sg_cnt, i, resid;
337 int err = 0;
338 long timeleft;
339 struct scatterlist *sg_ptr;
340 u32 val = TRIG_DMA;
341
342 if (!sg || (num_sg <= 0) || !offset || !index)
343 return -EIO;
344
345 if (dma_dir == DMA_TO_DEVICE)
346 dir = HOST_TO_DEVICE;
347 else if (dma_dir == DMA_FROM_DEVICE)
348 dir = DEVICE_TO_HOST;
349 else
350 return -ENXIO;
351
352 if (card == SD_CARD)
353 rtsx->check_card_cd = SD_EXIST;
354 else if (card == MS_CARD)
355 rtsx->check_card_cd = MS_EXIST;
356 else if (card == XD_CARD)
357 rtsx->check_card_cd = XD_EXIST;
358 else
359 rtsx->check_card_cd = 0;
360
361 spin_lock_irq(&rtsx->reg_lock);
362
363
364 rtsx->done = &trans_done;
365
366 rtsx->trans_state = STATE_TRANS_SG;
367 rtsx->trans_result = TRANS_NOT_READY;
368
369 spin_unlock_irq(&rtsx->reg_lock);
370
371 sg_cnt = dma_map_sg(&rtsx->pci->dev, sg, num_sg, dma_dir);
372
373 resid = size;
374 sg_ptr = sg;
375 chip->sgi = 0;
376
377
378
379
380
381
382 for (i = 0; i < *index; i++)
383 sg_ptr = sg_next(sg_ptr);
384 for (i = *index; i < sg_cnt; i++) {
385 dma_addr_t addr;
386 unsigned int len;
387 u8 option;
388
389 addr = sg_dma_address(sg_ptr);
390 len = sg_dma_len(sg_ptr);
391
392 dev_dbg(rtsx_dev(chip), "DMA addr: 0x%x, Len: 0x%x\n",
393 (unsigned int)addr, len);
394 dev_dbg(rtsx_dev(chip), "*index = %d, *offset = %d\n",
395 *index, *offset);
396
397 addr += *offset;
398
399 if ((len - *offset) > resid) {
400 *offset += resid;
401 len = resid;
402 resid = 0;
403 } else {
404 resid -= (len - *offset);
405 len -= *offset;
406 *offset = 0;
407 *index = *index + 1;
408 }
409 if ((i == (sg_cnt - 1)) || !resid)
410 option = RTSX_SG_VALID | RTSX_SG_END | RTSX_SG_TRANS_DATA;
411 else
412 option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA;
413
414 rtsx_add_sg_tbl(chip, (u32)addr, (u32)len, option);
415
416 if (!resid)
417 break;
418
419 sg_ptr = sg_next(sg_ptr);
420 }
421
422 dev_dbg(rtsx_dev(chip), "SG table count = %d\n", chip->sgi);
423
424 val |= (u32)(dir & 0x01) << 29;
425 val |= ADMA_MODE;
426
427 spin_lock_irq(&rtsx->reg_lock);
428
429 init_completion(&trans_done);
430
431 rtsx_writel(chip, RTSX_HDBAR, chip->host_sg_tbl_addr);
432 rtsx_writel(chip, RTSX_HDBCTLR, val);
433
434 spin_unlock_irq(&rtsx->reg_lock);
435
436 timeleft = wait_for_completion_interruptible_timeout(
437 &trans_done, msecs_to_jiffies(timeout));
438 if (timeleft <= 0) {
439 dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
440 __func__, __LINE__);
441 dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
442 chip->int_reg);
443 err = -ETIMEDOUT;
444 goto out;
445 }
446
447 spin_lock_irq(&rtsx->reg_lock);
448 if (rtsx->trans_result == TRANS_RESULT_FAIL) {
449 err = -EIO;
450 spin_unlock_irq(&rtsx->reg_lock);
451 goto out;
452 }
453 spin_unlock_irq(&rtsx->reg_lock);
454
455
456 spin_lock_irq(&rtsx->reg_lock);
457 if (rtsx->trans_result == TRANS_NOT_READY) {
458 init_completion(&trans_done);
459 spin_unlock_irq(&rtsx->reg_lock);
460 timeleft = wait_for_completion_interruptible_timeout(
461 &trans_done, msecs_to_jiffies(timeout));
462 if (timeleft <= 0) {
463 dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
464 __func__, __LINE__);
465 dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
466 chip->int_reg);
467 err = -ETIMEDOUT;
468 goto out;
469 }
470 } else {
471 spin_unlock_irq(&rtsx->reg_lock);
472 }
473
474 spin_lock_irq(&rtsx->reg_lock);
475 if (rtsx->trans_result == TRANS_RESULT_FAIL)
476 err = -EIO;
477 else if (rtsx->trans_result == TRANS_RESULT_OK)
478 err = 0;
479
480 spin_unlock_irq(&rtsx->reg_lock);
481
482out:
483 rtsx->done = NULL;
484 rtsx->trans_state = STATE_TRANS_NONE;
485 dma_unmap_sg(&rtsx->pci->dev, sg, num_sg, dma_dir);
486
487 if (err < 0)
488 rtsx_stop_cmd(chip, card);
489
490 return err;
491}
492
493static int rtsx_transfer_sglist_adma(struct rtsx_chip *chip, u8 card,
494 struct scatterlist *sg, int num_sg,
495 enum dma_data_direction dma_dir,
496 int timeout)
497{
498 struct rtsx_dev *rtsx = chip->rtsx;
499 struct completion trans_done;
500 u8 dir;
501 int buf_cnt, i;
502 int err = 0;
503 long timeleft;
504 struct scatterlist *sg_ptr;
505
506 if (!sg || (num_sg <= 0))
507 return -EIO;
508
509 if (dma_dir == DMA_TO_DEVICE)
510 dir = HOST_TO_DEVICE;
511 else if (dma_dir == DMA_FROM_DEVICE)
512 dir = DEVICE_TO_HOST;
513 else
514 return -ENXIO;
515
516 if (card == SD_CARD)
517 rtsx->check_card_cd = SD_EXIST;
518 else if (card == MS_CARD)
519 rtsx->check_card_cd = MS_EXIST;
520 else if (card == XD_CARD)
521 rtsx->check_card_cd = XD_EXIST;
522 else
523 rtsx->check_card_cd = 0;
524
525 spin_lock_irq(&rtsx->reg_lock);
526
527
528 rtsx->done = &trans_done;
529
530 rtsx->trans_state = STATE_TRANS_SG;
531 rtsx->trans_result = TRANS_NOT_READY;
532
533 spin_unlock_irq(&rtsx->reg_lock);
534
535 buf_cnt = dma_map_sg(&rtsx->pci->dev, sg, num_sg, dma_dir);
536
537 sg_ptr = sg;
538
539 for (i = 0; i <= buf_cnt / (HOST_SG_TBL_BUF_LEN / 8); i++) {
540 u32 val = TRIG_DMA;
541 int sg_cnt, j;
542
543 if (i == buf_cnt / (HOST_SG_TBL_BUF_LEN / 8))
544 sg_cnt = buf_cnt % (HOST_SG_TBL_BUF_LEN / 8);
545 else
546 sg_cnt = HOST_SG_TBL_BUF_LEN / 8;
547
548 chip->sgi = 0;
549 for (j = 0; j < sg_cnt; j++) {
550 dma_addr_t addr = sg_dma_address(sg_ptr);
551 unsigned int len = sg_dma_len(sg_ptr);
552 u8 option;
553
554 dev_dbg(rtsx_dev(chip), "DMA addr: 0x%x, Len: 0x%x\n",
555 (unsigned int)addr, len);
556
557 if (j == (sg_cnt - 1))
558 option = RTSX_SG_VALID | RTSX_SG_END | RTSX_SG_TRANS_DATA;
559 else
560 option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA;
561
562 rtsx_add_sg_tbl(chip, (u32)addr, (u32)len, option);
563
564 sg_ptr = sg_next(sg_ptr);
565 }
566
567 dev_dbg(rtsx_dev(chip), "SG table count = %d\n", chip->sgi);
568
569 val |= (u32)(dir & 0x01) << 29;
570 val |= ADMA_MODE;
571
572 spin_lock_irq(&rtsx->reg_lock);
573
574 init_completion(&trans_done);
575
576 rtsx_writel(chip, RTSX_HDBAR, chip->host_sg_tbl_addr);
577 rtsx_writel(chip, RTSX_HDBCTLR, val);
578
579 spin_unlock_irq(&rtsx->reg_lock);
580
581 timeleft = wait_for_completion_interruptible_timeout(
582 &trans_done, msecs_to_jiffies(timeout));
583 if (timeleft <= 0) {
584 dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
585 __func__, __LINE__);
586 dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
587 chip->int_reg);
588 err = -ETIMEDOUT;
589 goto out;
590 }
591
592 spin_lock_irq(&rtsx->reg_lock);
593 if (rtsx->trans_result == TRANS_RESULT_FAIL) {
594 err = -EIO;
595 spin_unlock_irq(&rtsx->reg_lock);
596 goto out;
597 }
598 spin_unlock_irq(&rtsx->reg_lock);
599
600 sg_ptr += sg_cnt;
601 }
602
603
604 spin_lock_irq(&rtsx->reg_lock);
605 if (rtsx->trans_result == TRANS_NOT_READY) {
606 init_completion(&trans_done);
607 spin_unlock_irq(&rtsx->reg_lock);
608 timeleft = wait_for_completion_interruptible_timeout(
609 &trans_done, msecs_to_jiffies(timeout));
610 if (timeleft <= 0) {
611 dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
612 __func__, __LINE__);
613 dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
614 chip->int_reg);
615 err = -ETIMEDOUT;
616 goto out;
617 }
618 } else {
619 spin_unlock_irq(&rtsx->reg_lock);
620 }
621
622 spin_lock_irq(&rtsx->reg_lock);
623 if (rtsx->trans_result == TRANS_RESULT_FAIL)
624 err = -EIO;
625 else if (rtsx->trans_result == TRANS_RESULT_OK)
626 err = 0;
627
628 spin_unlock_irq(&rtsx->reg_lock);
629
630out:
631 rtsx->done = NULL;
632 rtsx->trans_state = STATE_TRANS_NONE;
633 dma_unmap_sg(&rtsx->pci->dev, sg, num_sg, dma_dir);
634
635 if (err < 0)
636 rtsx_stop_cmd(chip, card);
637
638 return err;
639}
640
641static int rtsx_transfer_buf(struct rtsx_chip *chip, u8 card, void *buf,
642 size_t len, enum dma_data_direction dma_dir,
643 int timeout)
644{
645 struct rtsx_dev *rtsx = chip->rtsx;
646 struct completion trans_done;
647 dma_addr_t addr;
648 u8 dir;
649 int err = 0;
650 u32 val = BIT(31);
651 long timeleft;
652
653 if (!buf || (len <= 0))
654 return -EIO;
655
656 if (dma_dir == DMA_TO_DEVICE)
657 dir = HOST_TO_DEVICE;
658 else if (dma_dir == DMA_FROM_DEVICE)
659 dir = DEVICE_TO_HOST;
660 else
661 return -ENXIO;
662
663 addr = dma_map_single(&rtsx->pci->dev, buf, len, dma_dir);
664 if (dma_mapping_error(&rtsx->pci->dev, addr))
665 return -ENOMEM;
666
667 if (card == SD_CARD)
668 rtsx->check_card_cd = SD_EXIST;
669 else if (card == MS_CARD)
670 rtsx->check_card_cd = MS_EXIST;
671 else if (card == XD_CARD)
672 rtsx->check_card_cd = XD_EXIST;
673 else
674 rtsx->check_card_cd = 0;
675
676 val |= (u32)(dir & 0x01) << 29;
677 val |= (u32)(len & 0x00FFFFFF);
678
679 spin_lock_irq(&rtsx->reg_lock);
680
681
682 rtsx->done = &trans_done;
683
684 init_completion(&trans_done);
685
686 rtsx->trans_state = STATE_TRANS_BUF;
687 rtsx->trans_result = TRANS_NOT_READY;
688
689 rtsx_writel(chip, RTSX_HDBAR, addr);
690 rtsx_writel(chip, RTSX_HDBCTLR, val);
691
692 spin_unlock_irq(&rtsx->reg_lock);
693
694
695 timeleft = wait_for_completion_interruptible_timeout(
696 &trans_done, msecs_to_jiffies(timeout));
697 if (timeleft <= 0) {
698 dev_dbg(rtsx_dev(chip), "Timeout (%s %d)\n",
699 __func__, __LINE__);
700 dev_dbg(rtsx_dev(chip), "chip->int_reg = 0x%x\n",
701 chip->int_reg);
702 err = -ETIMEDOUT;
703 goto out;
704 }
705
706 spin_lock_irq(&rtsx->reg_lock);
707 if (rtsx->trans_result == TRANS_RESULT_FAIL)
708 err = -EIO;
709 else if (rtsx->trans_result == TRANS_RESULT_OK)
710 err = 0;
711
712 spin_unlock_irq(&rtsx->reg_lock);
713
714out:
715 rtsx->done = NULL;
716 rtsx->trans_state = STATE_TRANS_NONE;
717 dma_unmap_single(&rtsx->pci->dev, addr, len, dma_dir);
718
719 if (err < 0)
720 rtsx_stop_cmd(chip, card);
721
722 return err;
723}
724
725int rtsx_transfer_data_partial(struct rtsx_chip *chip, u8 card,
726 void *buf, size_t len, int use_sg,
727 unsigned int *index, unsigned int *offset,
728 enum dma_data_direction dma_dir, int timeout)
729{
730 int err = 0;
731
732
733 if (rtsx_chk_stat(chip, RTSX_STAT_ABORT))
734 return -EIO;
735
736 if (use_sg) {
737 struct scatterlist *sg = buf;
738
739 err = rtsx_transfer_sglist_adma_partial(chip, card, sg, use_sg,
740 index, offset, (int)len,
741 dma_dir, timeout);
742 } else {
743 err = rtsx_transfer_buf(chip, card,
744 buf, len, dma_dir, timeout);
745 }
746 if (err < 0) {
747 if (RTSX_TST_DELINK(chip)) {
748 RTSX_CLR_DELINK(chip);
749 chip->need_reinit = SD_CARD | MS_CARD | XD_CARD;
750 rtsx_reinit_cards(chip, 1);
751 }
752 }
753
754 return err;
755}
756
757int rtsx_transfer_data(struct rtsx_chip *chip, u8 card, void *buf, size_t len,
758 int use_sg, enum dma_data_direction dma_dir, int timeout)
759{
760 int err = 0;
761
762 dev_dbg(rtsx_dev(chip), "use_sg = %d\n", use_sg);
763
764
765 if (rtsx_chk_stat(chip, RTSX_STAT_ABORT))
766 return -EIO;
767
768 if (use_sg) {
769 err = rtsx_transfer_sglist_adma(chip, card, buf,
770 use_sg, dma_dir, timeout);
771 } else {
772 err = rtsx_transfer_buf(chip, card, buf, len, dma_dir, timeout);
773 }
774
775 if (err < 0) {
776 if (RTSX_TST_DELINK(chip)) {
777 RTSX_CLR_DELINK(chip);
778 chip->need_reinit = SD_CARD | MS_CARD | XD_CARD;
779 rtsx_reinit_cards(chip, 1);
780 }
781 }
782
783 return err;
784}
785
786