1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/fs.h>
21#include <linux/slab.h>
22
23#include "flash.h"
24#include "ffsdefs.h"
25#include "lld.h"
26#include "lld_nand.h"
27#if CMD_DMA
28#include "lld_cdma.h"
29#endif
30
31#define BLK_FROM_ADDR(addr) ((u32)(addr >> DeviceInfo.nBitsInBlockDataSize))
32#define PAGE_FROM_ADDR(addr, Block) ((u16)((addr - (u64)Block * \
33 DeviceInfo.wBlockDataSize) >> DeviceInfo.nBitsInPageDataSize))
34
35#define IS_SPARE_BLOCK(blk) (BAD_BLOCK != (pbt[blk] &\
36 BAD_BLOCK) && SPARE_BLOCK == (pbt[blk] & SPARE_BLOCK))
37
38#define IS_DATA_BLOCK(blk) (0 == (pbt[blk] & BAD_BLOCK))
39
40#define IS_DISCARDED_BLOCK(blk) (BAD_BLOCK != (pbt[blk] &\
41 BAD_BLOCK) && DISCARD_BLOCK == (pbt[blk] & DISCARD_BLOCK))
42
43#define IS_BAD_BLOCK(blk) (BAD_BLOCK == (pbt[blk] & BAD_BLOCK))
44
45#if DEBUG_BNDRY
46void debug_boundary_lineno_error(int chnl, int limit, int no,
47 int lineno, char *filename)
48{
49 if (chnl >= limit)
50 printk(KERN_ERR "Boundary Check Fail value %d >= limit %d, "
51 "at %s:%d. Other info:%d. Aborting...\n",
52 chnl, limit, filename, lineno, no);
53}
54
55#endif
56
57static u16 FTL_Cache_If_Hit(u64 dwPageAddr);
58static int FTL_Cache_Read(u64 dwPageAddr);
59static void FTL_Cache_Read_Page(u8 *pData, u64 dwPageAddr,
60 u16 cache_blk);
61static void FTL_Cache_Write_Page(u8 *pData, u64 dwPageAddr,
62 u8 cache_blk, u16 flag);
63static int FTL_Cache_Write(void);
64static void FTL_Calculate_LRU(void);
65static u32 FTL_Get_Block_Index(u32 wBlockNum);
66
67static int FTL_Search_Block_Table_IN_Block(u32 BT_Block,
68 u8 BT_Tag, u16 *Page);
69static int FTL_Read_Block_Table(void);
70static int FTL_Write_Block_Table(int wForce);
71static int FTL_Write_Block_Table_Data(void);
72static int FTL_Check_Block_Table(int wOldTable);
73static int FTL_Static_Wear_Leveling(void);
74static u32 FTL_Replace_Block_Table(void);
75static int FTL_Write_IN_Progress_Block_Table_Page(void);
76
77static u32 FTL_Get_Page_Num(u64 length);
78static u64 FTL_Get_Physical_Block_Addr(u64 blk_addr);
79
80static u32 FTL_Replace_OneBlock(u32 wBlockNum,
81 u32 wReplaceNum);
82static u32 FTL_Replace_LWBlock(u32 wBlockNum,
83 int *pGarbageCollect);
84static u32 FTL_Replace_MWBlock(void);
85static int FTL_Replace_Block(u64 blk_addr);
86static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX);
87
88struct device_info_tag DeviceInfo;
89struct flash_cache_tag Cache;
90static struct spectra_l2_cache_info cache_l2;
91
92static u8 *cache_l2_page_buf;
93static u8 *cache_l2_blk_buf;
94
95u8 *g_pBlockTable;
96u8 *g_pWearCounter;
97u16 *g_pReadCounter;
98u32 *g_pBTBlocks;
99static u16 g_wBlockTableOffset;
100static u32 g_wBlockTableIndex;
101static u8 g_cBlockTableStatus;
102
103static u8 *g_pTempBuf;
104static u8 *flag_check_blk_table;
105static u8 *tmp_buf_search_bt_in_block;
106static u8 *spare_buf_search_bt_in_block;
107static u8 *spare_buf_bt_search_bt_in_block;
108static u8 *tmp_buf1_read_blk_table;
109static u8 *tmp_buf2_read_blk_table;
110static u8 *flags_static_wear_leveling;
111static u8 *tmp_buf_write_blk_table_data;
112static u8 *tmp_buf_read_disturbance;
113
114u8 *buf_read_page_main_spare;
115u8 *buf_write_page_main_spare;
116u8 *buf_read_page_spare;
117u8 *buf_get_bad_block;
118
119#if (RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE && CMD_DMA)
120struct flash_cache_delta_list_tag int_cache[MAX_CHANS + MAX_DESCS];
121struct flash_cache_tag cache_start_copy;
122#endif
123
124int g_wNumFreeBlocks;
125u8 g_SBDCmdIndex;
126
127static u8 *g_pIPF;
128static u8 bt_flag = FIRST_BT_ID;
129static u8 bt_block_changed;
130
131static u16 cache_block_to_write;
132static u8 last_erased = FIRST_BT_ID;
133
134static u8 GC_Called;
135static u8 BT_GC_Called;
136
137#if CMD_DMA
138#define COPY_BACK_BUF_NUM 10
139
140static u8 ftl_cmd_cnt;
141u8 *g_pBTDelta;
142u8 *g_pBTDelta_Free;
143u8 *g_pBTStartingCopy;
144u8 *g_pWearCounterCopy;
145u16 *g_pReadCounterCopy;
146u8 *g_pBlockTableCopies;
147u8 *g_pNextBlockTable;
148static u8 *cp_back_buf_copies[COPY_BACK_BUF_NUM];
149static int cp_back_buf_idx;
150
151static u8 *g_temp_buf;
152
153#pragma pack(push, 1)
154#pragma pack(1)
155struct BTableChangesDelta {
156 u8 ftl_cmd_cnt;
157 u8 ValidFields;
158 u16 g_wBlockTableOffset;
159 u32 g_wBlockTableIndex;
160 u32 BT_Index;
161 u32 BT_Entry_Value;
162 u32 WC_Index;
163 u8 WC_Entry_Value;
164 u32 RC_Index;
165 u16 RC_Entry_Value;
166};
167
168#pragma pack(pop)
169
170struct BTableChangesDelta *p_BTableChangesDelta;
171#endif
172
173
174#define MARK_BLOCK_AS_BAD(blocknode) (blocknode |= BAD_BLOCK)
175#define MARK_BLK_AS_DISCARD(blk) (blk = (blk & ~SPARE_BLOCK) | DISCARD_BLOCK)
176
177#define FTL_Get_LBAPBA_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
178 sizeof(u32))
179#define FTL_Get_WearCounter_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
180 sizeof(u8))
181#define FTL_Get_ReadCounter_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
182 sizeof(u16))
183#if SUPPORT_LARGE_BLOCKNUM
184#define FTL_Get_LBAPBA_Table_Flash_Size_Bytes() (DeviceInfo.wDataBlockNum *\
185 sizeof(u8) * 3)
186#else
187#define FTL_Get_LBAPBA_Table_Flash_Size_Bytes() (DeviceInfo.wDataBlockNum *\
188 sizeof(u16))
189#endif
190#define FTL_Get_WearCounter_Table_Flash_Size_Bytes \
191 FTL_Get_WearCounter_Table_Mem_Size_Bytes
192#define FTL_Get_ReadCounter_Table_Flash_Size_Bytes \
193 FTL_Get_ReadCounter_Table_Mem_Size_Bytes
194
195static u32 FTL_Get_Block_Table_Flash_Size_Bytes(void)
196{
197 u32 byte_num;
198
199 if (DeviceInfo.MLCDevice) {
200 byte_num = FTL_Get_LBAPBA_Table_Flash_Size_Bytes() +
201 DeviceInfo.wDataBlockNum * sizeof(u8) +
202 DeviceInfo.wDataBlockNum * sizeof(u16);
203 } else {
204 byte_num = FTL_Get_LBAPBA_Table_Flash_Size_Bytes() +
205 DeviceInfo.wDataBlockNum * sizeof(u8);
206 }
207
208 byte_num += 4 * sizeof(u8);
209
210 return byte_num;
211}
212
213static u16 FTL_Get_Block_Table_Flash_Size_Pages(void)
214{
215 return (u16)FTL_Get_Page_Num(FTL_Get_Block_Table_Flash_Size_Bytes());
216}
217
218static int FTL_Copy_Block_Table_To_Flash(u8 *flashBuf, u32 sizeToTx,
219 u32 sizeTxed)
220{
221 u32 wBytesCopied, blk_tbl_size, wBytes;
222 u32 *pbt = (u32 *)g_pBlockTable;
223
224 blk_tbl_size = FTL_Get_LBAPBA_Table_Flash_Size_Bytes();
225 for (wBytes = 0;
226 (wBytes < sizeToTx) && ((wBytes + sizeTxed) < blk_tbl_size);
227 wBytes++) {
228#if SUPPORT_LARGE_BLOCKNUM
229 flashBuf[wBytes] = (u8)(pbt[(wBytes + sizeTxed) / 3]
230 >> (((wBytes + sizeTxed) % 3) ?
231 ((((wBytes + sizeTxed) % 3) == 2) ? 0 : 8) : 16)) & 0xFF;
232#else
233 flashBuf[wBytes] = (u8)(pbt[(wBytes + sizeTxed) / 2]
234 >> (((wBytes + sizeTxed) % 2) ? 0 : 8)) & 0xFF;
235#endif
236 }
237
238 sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
239 blk_tbl_size = FTL_Get_WearCounter_Table_Flash_Size_Bytes();
240 wBytesCopied = wBytes;
241 wBytes = ((blk_tbl_size - sizeTxed) > (sizeToTx - wBytesCopied)) ?
242 (sizeToTx - wBytesCopied) : (blk_tbl_size - sizeTxed);
243 memcpy(flashBuf + wBytesCopied, g_pWearCounter + sizeTxed, wBytes);
244
245 sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
246
247 if (DeviceInfo.MLCDevice) {
248 blk_tbl_size = FTL_Get_ReadCounter_Table_Flash_Size_Bytes();
249 wBytesCopied += wBytes;
250 for (wBytes = 0; ((wBytes + wBytesCopied) < sizeToTx) &&
251 ((wBytes + sizeTxed) < blk_tbl_size); wBytes++)
252 flashBuf[wBytes + wBytesCopied] =
253 (g_pReadCounter[(wBytes + sizeTxed) / 2] >>
254 (((wBytes + sizeTxed) % 2) ? 0 : 8)) & 0xFF;
255 }
256
257 return wBytesCopied + wBytes;
258}
259
260static int FTL_Copy_Block_Table_From_Flash(u8 *flashBuf,
261 u32 sizeToTx, u32 sizeTxed)
262{
263 u32 wBytesCopied, blk_tbl_size, wBytes;
264 u32 *pbt = (u32 *)g_pBlockTable;
265
266 blk_tbl_size = FTL_Get_LBAPBA_Table_Flash_Size_Bytes();
267 for (wBytes = 0; (wBytes < sizeToTx) &&
268 ((wBytes + sizeTxed) < blk_tbl_size); wBytes++) {
269#if SUPPORT_LARGE_BLOCKNUM
270 if (!((wBytes + sizeTxed) % 3))
271 pbt[(wBytes + sizeTxed) / 3] = 0;
272 pbt[(wBytes + sizeTxed) / 3] |=
273 (flashBuf[wBytes] << (((wBytes + sizeTxed) % 3) ?
274 ((((wBytes + sizeTxed) % 3) == 2) ? 0 : 8) : 16));
275#else
276 if (!((wBytes + sizeTxed) % 2))
277 pbt[(wBytes + sizeTxed) / 2] = 0;
278 pbt[(wBytes + sizeTxed) / 2] |=
279 (flashBuf[wBytes] << (((wBytes + sizeTxed) % 2) ?
280 0 : 8));
281#endif
282 }
283
284 sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
285 blk_tbl_size = FTL_Get_WearCounter_Table_Flash_Size_Bytes();
286 wBytesCopied = wBytes;
287 wBytes = ((blk_tbl_size - sizeTxed) > (sizeToTx - wBytesCopied)) ?
288 (sizeToTx - wBytesCopied) : (blk_tbl_size - sizeTxed);
289 memcpy(g_pWearCounter + sizeTxed, flashBuf + wBytesCopied, wBytes);
290 sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
291
292 if (DeviceInfo.MLCDevice) {
293 wBytesCopied += wBytes;
294 blk_tbl_size = FTL_Get_ReadCounter_Table_Flash_Size_Bytes();
295 for (wBytes = 0; ((wBytes + wBytesCopied) < sizeToTx) &&
296 ((wBytes + sizeTxed) < blk_tbl_size); wBytes++) {
297 if (((wBytes + sizeTxed) % 2))
298 g_pReadCounter[(wBytes + sizeTxed) / 2] = 0;
299 g_pReadCounter[(wBytes + sizeTxed) / 2] |=
300 (flashBuf[wBytes] <<
301 (((wBytes + sizeTxed) % 2) ? 0 : 8));
302 }
303 }
304
305 return wBytesCopied+wBytes;
306}
307
308static int FTL_Insert_Block_Table_Signature(u8 *buf, u8 tag)
309{
310 int i;
311
312 for (i = 0; i < BTSIG_BYTES; i++)
313 buf[BTSIG_OFFSET + i] =
314 ((tag + (i * BTSIG_DELTA) - FIRST_BT_ID) %
315 (1 + LAST_BT_ID-FIRST_BT_ID)) + FIRST_BT_ID;
316
317 return PASS;
318}
319
320static int FTL_Extract_Block_Table_Tag(u8 *buf, u8 **tagarray)
321{
322 static u8 tag[BTSIG_BYTES >> 1];
323 int i, j, k, tagi, tagtemp, status;
324
325 *tagarray = (u8 *)tag;
326 tagi = 0;
327
328 for (i = 0; i < (BTSIG_BYTES - 1); i++) {
329 for (j = i + 1; (j < BTSIG_BYTES) &&
330 (tagi < (BTSIG_BYTES >> 1)); j++) {
331 tagtemp = buf[BTSIG_OFFSET + j] -
332 buf[BTSIG_OFFSET + i];
333 if (tagtemp && !(tagtemp % BTSIG_DELTA)) {
334 tagtemp = (buf[BTSIG_OFFSET + i] +
335 (1 + LAST_BT_ID - FIRST_BT_ID) -
336 (i * BTSIG_DELTA)) %
337 (1 + LAST_BT_ID - FIRST_BT_ID);
338 status = FAIL;
339 for (k = 0; k < tagi; k++) {
340 if (tagtemp == tag[k])
341 status = PASS;
342 }
343
344 if (status == FAIL) {
345 tag[tagi++] = tagtemp;
346 i = (j == (i + 1)) ? i + 1 : i;
347 j = (j == (i + 1)) ? i + 1 : i;
348 }
349 }
350 }
351 }
352
353 return tagi;
354}
355
356
357static int FTL_Execute_SPL_Recovery(void)
358{
359 u32 j, block, blks;
360 u32 *pbt = (u32 *)g_pBlockTable;
361 int ret;
362
363 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
364 __FILE__, __LINE__, __func__);
365
366 blks = DeviceInfo.wSpectraEndBlock - DeviceInfo.wSpectraStartBlock;
367 for (j = 0; j <= blks; j++) {
368 block = (pbt[j]);
369 if (((block & BAD_BLOCK) != BAD_BLOCK) &&
370 ((block & SPARE_BLOCK) == SPARE_BLOCK)) {
371 ret = GLOB_LLD_Erase_Block(block & ~BAD_BLOCK);
372 if (FAIL == ret) {
373 nand_dbg_print(NAND_DBG_WARN,
374 "NAND Program fail in %s, Line %d, "
375 "Function: %s, new Bad Block %d "
376 "generated!\n",
377 __FILE__, __LINE__, __func__,
378 (int)(block & ~BAD_BLOCK));
379 MARK_BLOCK_AS_BAD(pbt[j]);
380 }
381 }
382 }
383
384 return PASS;
385}
386
387
388
389
390
391
392
393
394int GLOB_FTL_IdentifyDevice(struct spectra_indentfy_dev_tag *dev_data)
395{
396 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
397 __FILE__, __LINE__, __func__);
398
399 dev_data->NumBlocks = DeviceInfo.wTotalBlocks;
400 dev_data->PagesPerBlock = DeviceInfo.wPagesPerBlock;
401 dev_data->PageDataSize = DeviceInfo.wPageDataSize;
402 dev_data->wECCBytesPerSector = DeviceInfo.wECCBytesPerSector;
403 dev_data->wDataBlockNum = DeviceInfo.wDataBlockNum;
404
405 return PASS;
406}
407
408
409static int allocate_memory(void)
410{
411 u32 block_table_size, page_size, block_size, mem_size;
412 u32 total_bytes = 0;
413 int i;
414#if CMD_DMA
415 int j;
416#endif
417
418 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
419 __FILE__, __LINE__, __func__);
420
421 page_size = DeviceInfo.wPageSize;
422 block_size = DeviceInfo.wPagesPerBlock * DeviceInfo.wPageDataSize;
423
424 block_table_size = DeviceInfo.wDataBlockNum *
425 (sizeof(u32) + sizeof(u8) + sizeof(u16));
426 block_table_size += (DeviceInfo.wPageDataSize -
427 (block_table_size % DeviceInfo.wPageDataSize)) %
428 DeviceInfo.wPageDataSize;
429
430
431 g_pBlockTable = kmalloc(block_table_size, GFP_ATOMIC);
432 if (!g_pBlockTable)
433 goto block_table_fail;
434 memset(g_pBlockTable, 0, block_table_size);
435 total_bytes += block_table_size;
436
437 g_pWearCounter = (u8 *)(g_pBlockTable +
438 DeviceInfo.wDataBlockNum * sizeof(u32));
439
440 if (DeviceInfo.MLCDevice)
441 g_pReadCounter = (u16 *)(g_pBlockTable +
442 DeviceInfo.wDataBlockNum *
443 (sizeof(u32) + sizeof(u8)));
444
445
446 for (i = 0; i < CACHE_ITEM_NUM; i++) {
447 Cache.array[i].address = NAND_CACHE_INIT_ADDR;
448 Cache.array[i].use_cnt = 0;
449 Cache.array[i].changed = CLEAR;
450 Cache.array[i].buf = kmalloc(Cache.cache_item_size,
451 GFP_ATOMIC);
452 if (!Cache.array[i].buf)
453 goto cache_item_fail;
454 memset(Cache.array[i].buf, 0, Cache.cache_item_size);
455 total_bytes += Cache.cache_item_size;
456 }
457
458
459 g_pIPF = kmalloc(page_size, GFP_ATOMIC);
460 if (!g_pIPF)
461 goto ipf_fail;
462 memset(g_pIPF, 0, page_size);
463 total_bytes += page_size;
464
465
466 cache_l2_page_buf = kmalloc(page_size, GFP_ATOMIC);
467 if (!cache_l2_page_buf)
468 goto cache_l2_page_buf_fail;
469 memset(cache_l2_page_buf, 0xff, page_size);
470 total_bytes += page_size;
471
472 cache_l2_blk_buf = kmalloc(block_size, GFP_ATOMIC);
473 if (!cache_l2_blk_buf)
474 goto cache_l2_blk_buf_fail;
475 memset(cache_l2_blk_buf, 0xff, block_size);
476 total_bytes += block_size;
477
478
479 g_pTempBuf = kmalloc(Cache.cache_item_size, GFP_ATOMIC);
480 if (!g_pTempBuf)
481 goto Temp_buf_fail;
482 memset(g_pTempBuf, 0, Cache.cache_item_size);
483 total_bytes += Cache.cache_item_size;
484
485
486 mem_size = (1 + LAST_BT_ID - FIRST_BT_ID) * sizeof(u32);
487 g_pBTBlocks = kmalloc(mem_size, GFP_ATOMIC);
488 if (!g_pBTBlocks)
489 goto bt_blocks_fail;
490 memset(g_pBTBlocks, 0xff, mem_size);
491 total_bytes += mem_size;
492
493
494 flag_check_blk_table = kmalloc(DeviceInfo.wDataBlockNum, GFP_ATOMIC);
495 if (!flag_check_blk_table)
496 goto flag_check_blk_table_fail;
497 total_bytes += DeviceInfo.wDataBlockNum;
498
499
500 tmp_buf_search_bt_in_block = kmalloc(page_size, GFP_ATOMIC);
501 if (!tmp_buf_search_bt_in_block)
502 goto tmp_buf_search_bt_in_block_fail;
503 memset(tmp_buf_search_bt_in_block, 0xff, page_size);
504 total_bytes += page_size;
505
506 mem_size = DeviceInfo.wPageSize - DeviceInfo.wPageDataSize;
507 spare_buf_search_bt_in_block = kmalloc(mem_size, GFP_ATOMIC);
508 if (!spare_buf_search_bt_in_block)
509 goto spare_buf_search_bt_in_block_fail;
510 memset(spare_buf_search_bt_in_block, 0xff, mem_size);
511 total_bytes += mem_size;
512
513 spare_buf_bt_search_bt_in_block = kmalloc(mem_size, GFP_ATOMIC);
514 if (!spare_buf_bt_search_bt_in_block)
515 goto spare_buf_bt_search_bt_in_block_fail;
516 memset(spare_buf_bt_search_bt_in_block, 0xff, mem_size);
517 total_bytes += mem_size;
518
519
520 tmp_buf1_read_blk_table = kmalloc(page_size, GFP_ATOMIC);
521 if (!tmp_buf1_read_blk_table)
522 goto tmp_buf1_read_blk_table_fail;
523 memset(tmp_buf1_read_blk_table, 0xff, page_size);
524 total_bytes += page_size;
525
526 tmp_buf2_read_blk_table = kmalloc(page_size, GFP_ATOMIC);
527 if (!tmp_buf2_read_blk_table)
528 goto tmp_buf2_read_blk_table_fail;
529 memset(tmp_buf2_read_blk_table, 0xff, page_size);
530 total_bytes += page_size;
531
532
533 flags_static_wear_leveling = kmalloc(DeviceInfo.wDataBlockNum,
534 GFP_ATOMIC);
535 if (!flags_static_wear_leveling)
536 goto flags_static_wear_leveling_fail;
537 total_bytes += DeviceInfo.wDataBlockNum;
538
539
540 if (FTL_Get_Block_Table_Flash_Size_Pages() > 3)
541 mem_size = FTL_Get_Block_Table_Flash_Size_Bytes() -
542 2 * DeviceInfo.wPageSize;
543 else
544 mem_size = DeviceInfo.wPageSize;
545 tmp_buf_write_blk_table_data = kmalloc(mem_size, GFP_ATOMIC);
546 if (!tmp_buf_write_blk_table_data)
547 goto tmp_buf_write_blk_table_data_fail;
548 memset(tmp_buf_write_blk_table_data, 0xff, mem_size);
549 total_bytes += mem_size;
550
551
552 tmp_buf_read_disturbance = kmalloc(block_size, GFP_ATOMIC);
553 if (!tmp_buf_read_disturbance)
554 goto tmp_buf_read_disturbance_fail;
555 memset(tmp_buf_read_disturbance, 0xff, block_size);
556 total_bytes += block_size;
557
558
559 buf_read_page_main_spare = kmalloc(DeviceInfo.wPageSize, GFP_ATOMIC);
560 if (!buf_read_page_main_spare)
561 goto buf_read_page_main_spare_fail;
562 total_bytes += DeviceInfo.wPageSize;
563
564
565 buf_write_page_main_spare = kmalloc(DeviceInfo.wPageSize, GFP_ATOMIC);
566 if (!buf_write_page_main_spare)
567 goto buf_write_page_main_spare_fail;
568 total_bytes += DeviceInfo.wPageSize;
569
570
571 buf_read_page_spare = kmalloc(DeviceInfo.wPageSpareSize, GFP_ATOMIC);
572 if (!buf_read_page_spare)
573 goto buf_read_page_spare_fail;
574 memset(buf_read_page_spare, 0xff, DeviceInfo.wPageSpareSize);
575 total_bytes += DeviceInfo.wPageSpareSize;
576
577
578 buf_get_bad_block = kmalloc(DeviceInfo.wPageSpareSize, GFP_ATOMIC);
579 if (!buf_get_bad_block)
580 goto buf_get_bad_block_fail;
581 memset(buf_get_bad_block, 0xff, DeviceInfo.wPageSpareSize);
582 total_bytes += DeviceInfo.wPageSpareSize;
583
584#if CMD_DMA
585 g_temp_buf = kmalloc(block_size, GFP_ATOMIC);
586 if (!g_temp_buf)
587 goto temp_buf_fail;
588 memset(g_temp_buf, 0xff, block_size);
589 total_bytes += block_size;
590
591
592 g_pBTStartingCopy = kmalloc(block_table_size, GFP_ATOMIC);
593 if (!g_pBTStartingCopy)
594 goto bt_starting_copy;
595 memset(g_pBTStartingCopy, 0, block_table_size);
596 total_bytes += block_table_size;
597
598 g_pWearCounterCopy = (u8 *)(g_pBTStartingCopy +
599 DeviceInfo.wDataBlockNum * sizeof(u32));
600
601 if (DeviceInfo.MLCDevice)
602 g_pReadCounterCopy = (u16 *)(g_pBTStartingCopy +
603 DeviceInfo.wDataBlockNum *
604 (sizeof(u32) + sizeof(u8)));
605
606
607 mem_size = 5 * DeviceInfo.wDataBlockNum * sizeof(u32) +
608 5 * DeviceInfo.wDataBlockNum * sizeof(u8);
609 if (DeviceInfo.MLCDevice)
610 mem_size += 5 * DeviceInfo.wDataBlockNum * sizeof(u16);
611 g_pBlockTableCopies = kmalloc(mem_size, GFP_ATOMIC);
612 if (!g_pBlockTableCopies)
613 goto blk_table_copies_fail;
614 memset(g_pBlockTableCopies, 0, mem_size);
615 total_bytes += mem_size;
616 g_pNextBlockTable = g_pBlockTableCopies;
617
618
619 mem_size = MAX_DESCS * sizeof(struct BTableChangesDelta);
620 g_pBTDelta = kmalloc(mem_size, GFP_ATOMIC);
621 if (!g_pBTDelta)
622 goto bt_delta_fail;
623 memset(g_pBTDelta, 0, mem_size);
624 total_bytes += mem_size;
625 g_pBTDelta_Free = g_pBTDelta;
626
627
628 for (j = 0; j < COPY_BACK_BUF_NUM; j++) {
629 cp_back_buf_copies[j] = kmalloc(block_size, GFP_ATOMIC);
630 if (!cp_back_buf_copies[j])
631 goto cp_back_buf_copies_fail;
632 memset(cp_back_buf_copies[j], 0, block_size);
633 total_bytes += block_size;
634 }
635 cp_back_buf_idx = 0;
636
637
638 mem_size = sizeof(struct pending_cmd) * MAX_DESCS;
639 info.pcmds = kzalloc(mem_size, GFP_KERNEL);
640 if (!info.pcmds)
641 goto pending_cmds_buf_fail;
642 total_bytes += mem_size;
643
644
645 mem_size = sizeof(struct cdma_descriptor) * MAX_DESCS;
646 info.cdma_desc_buf = kzalloc(mem_size, GFP_KERNEL);
647 if (!info.cdma_desc_buf)
648 goto cdma_desc_buf_fail;
649 total_bytes += mem_size;
650
651
652 mem_size = sizeof(struct memcpy_descriptor) * MAX_DESCS;
653 info.memcp_desc_buf = kzalloc(mem_size, GFP_KERNEL);
654 if (!info.memcp_desc_buf)
655 goto memcp_desc_buf_fail;
656 total_bytes += mem_size;
657#endif
658
659 nand_dbg_print(NAND_DBG_WARN,
660 "Total memory allocated in FTL layer: %d\n", total_bytes);
661
662 return PASS;
663
664#if CMD_DMA
665memcp_desc_buf_fail:
666 kfree(info.cdma_desc_buf);
667cdma_desc_buf_fail:
668 kfree(info.pcmds);
669pending_cmds_buf_fail:
670cp_back_buf_copies_fail:
671 j--;
672 for (; j >= 0; j--)
673 kfree(cp_back_buf_copies[j]);
674 kfree(g_pBTDelta);
675bt_delta_fail:
676 kfree(g_pBlockTableCopies);
677blk_table_copies_fail:
678 kfree(g_pBTStartingCopy);
679bt_starting_copy:
680 kfree(g_temp_buf);
681temp_buf_fail:
682 kfree(buf_get_bad_block);
683#endif
684
685buf_get_bad_block_fail:
686 kfree(buf_read_page_spare);
687buf_read_page_spare_fail:
688 kfree(buf_write_page_main_spare);
689buf_write_page_main_spare_fail:
690 kfree(buf_read_page_main_spare);
691buf_read_page_main_spare_fail:
692 kfree(tmp_buf_read_disturbance);
693tmp_buf_read_disturbance_fail:
694 kfree(tmp_buf_write_blk_table_data);
695tmp_buf_write_blk_table_data_fail:
696 kfree(flags_static_wear_leveling);
697flags_static_wear_leveling_fail:
698 kfree(tmp_buf2_read_blk_table);
699tmp_buf2_read_blk_table_fail:
700 kfree(tmp_buf1_read_blk_table);
701tmp_buf1_read_blk_table_fail:
702 kfree(spare_buf_bt_search_bt_in_block);
703spare_buf_bt_search_bt_in_block_fail:
704 kfree(spare_buf_search_bt_in_block);
705spare_buf_search_bt_in_block_fail:
706 kfree(tmp_buf_search_bt_in_block);
707tmp_buf_search_bt_in_block_fail:
708 kfree(flag_check_blk_table);
709flag_check_blk_table_fail:
710 kfree(g_pBTBlocks);
711bt_blocks_fail:
712 kfree(g_pTempBuf);
713Temp_buf_fail:
714 kfree(cache_l2_blk_buf);
715cache_l2_blk_buf_fail:
716 kfree(cache_l2_page_buf);
717cache_l2_page_buf_fail:
718 kfree(g_pIPF);
719ipf_fail:
720cache_item_fail:
721 i--;
722 for (; i >= 0; i--)
723 kfree(Cache.array[i].buf);
724 kfree(g_pBlockTable);
725block_table_fail:
726 printk(KERN_ERR "Failed to kmalloc memory in %s Line %d.\n",
727 __FILE__, __LINE__);
728
729 return -ENOMEM;
730}
731
732
733static int free_memory(void)
734{
735 int i;
736
737#if CMD_DMA
738 kfree(info.memcp_desc_buf);
739 kfree(info.cdma_desc_buf);
740 kfree(info.pcmds);
741 for (i = COPY_BACK_BUF_NUM - 1; i >= 0; i--)
742 kfree(cp_back_buf_copies[i]);
743 kfree(g_pBTDelta);
744 kfree(g_pBlockTableCopies);
745 kfree(g_pBTStartingCopy);
746 kfree(g_temp_buf);
747 kfree(buf_get_bad_block);
748#endif
749 kfree(buf_read_page_spare);
750 kfree(buf_write_page_main_spare);
751 kfree(buf_read_page_main_spare);
752 kfree(tmp_buf_read_disturbance);
753 kfree(tmp_buf_write_blk_table_data);
754 kfree(flags_static_wear_leveling);
755 kfree(tmp_buf2_read_blk_table);
756 kfree(tmp_buf1_read_blk_table);
757 kfree(spare_buf_bt_search_bt_in_block);
758 kfree(spare_buf_search_bt_in_block);
759 kfree(tmp_buf_search_bt_in_block);
760 kfree(flag_check_blk_table);
761 kfree(g_pBTBlocks);
762 kfree(g_pTempBuf);
763 kfree(g_pIPF);
764 for (i = CACHE_ITEM_NUM - 1; i >= 0; i--)
765 kfree(Cache.array[i].buf);
766 kfree(g_pBlockTable);
767
768 return 0;
769}
770
771static void dump_cache_l2_table(void)
772{
773 struct list_head *p;
774 struct spectra_l2_cache_list *pnd;
775 int n;
776
777 n = 0;
778 list_for_each(p, &cache_l2.table.list) {
779 pnd = list_entry(p, struct spectra_l2_cache_list, list);
780 nand_dbg_print(NAND_DBG_WARN, "dump_cache_l2_table node: %d, logical_blk_num: %d\n", n, pnd->logical_blk_num);
781
782
783
784
785
786
787 n++;
788 }
789}
790
791
792
793
794
795
796
797
798
799
800int GLOB_FTL_Init(void)
801{
802 int i;
803
804 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
805 __FILE__, __LINE__, __func__);
806
807 Cache.pages_per_item = 1;
808 Cache.cache_item_size = 1 * DeviceInfo.wPageDataSize;
809
810 if (allocate_memory() != PASS)
811 return FAIL;
812
813#if CMD_DMA
814#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
815 memcpy((void *)&cache_start_copy, (void *)&Cache,
816 sizeof(struct flash_cache_tag));
817 memset((void *)&int_cache, -1,
818 sizeof(struct flash_cache_delta_list_tag) *
819 (MAX_CHANS + MAX_DESCS));
820#endif
821 ftl_cmd_cnt = 0;
822#endif
823
824 if (FTL_Read_Block_Table() != PASS)
825 return FAIL;
826
827
828 for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++)
829 cache_l2.blk_array[i] = MAX_U32_VALUE;
830 cache_l2.cur_blk_idx = 0;
831 cache_l2.cur_page_num = 0;
832 INIT_LIST_HEAD(&cache_l2.table.list);
833 cache_l2.table.logical_blk_num = MAX_U32_VALUE;
834
835 dump_cache_l2_table();
836
837 return 0;
838}
839
840
841#if CMD_DMA
842#if 0
843static void save_blk_table_changes(u16 idx)
844{
845 u8 ftl_cmd;
846 u32 *pbt = (u32 *)g_pBTStartingCopy;
847
848#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
849 u16 id;
850 u8 cache_blks;
851
852 id = idx - MAX_CHANS;
853 if (int_cache[id].item != -1) {
854 cache_blks = int_cache[id].item;
855 cache_start_copy.array[cache_blks].address =
856 int_cache[id].cache.address;
857 cache_start_copy.array[cache_blks].changed =
858 int_cache[id].cache.changed;
859 }
860#endif
861
862 ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
863
864 while (ftl_cmd <= PendingCMD[idx].Tag) {
865 if (p_BTableChangesDelta->ValidFields == 0x01) {
866 g_wBlockTableOffset =
867 p_BTableChangesDelta->g_wBlockTableOffset;
868 } else if (p_BTableChangesDelta->ValidFields == 0x0C) {
869 pbt[p_BTableChangesDelta->BT_Index] =
870 p_BTableChangesDelta->BT_Entry_Value;
871 debug_boundary_error(((
872 p_BTableChangesDelta->BT_Index)),
873 DeviceInfo.wDataBlockNum, 0);
874 } else if (p_BTableChangesDelta->ValidFields == 0x03) {
875 g_wBlockTableOffset =
876 p_BTableChangesDelta->g_wBlockTableOffset;
877 g_wBlockTableIndex =
878 p_BTableChangesDelta->g_wBlockTableIndex;
879 } else if (p_BTableChangesDelta->ValidFields == 0x30) {
880 g_pWearCounterCopy[p_BTableChangesDelta->WC_Index] =
881 p_BTableChangesDelta->WC_Entry_Value;
882 } else if ((DeviceInfo.MLCDevice) &&
883 (p_BTableChangesDelta->ValidFields == 0xC0)) {
884 g_pReadCounterCopy[p_BTableChangesDelta->RC_Index] =
885 p_BTableChangesDelta->RC_Entry_Value;
886 nand_dbg_print(NAND_DBG_DEBUG,
887 "In event status setting read counter "
888 "GLOB_ftl_cmd_cnt %u Count %u Index %u\n",
889 ftl_cmd,
890 p_BTableChangesDelta->RC_Entry_Value,
891 (unsigned int)p_BTableChangesDelta->RC_Index);
892 } else {
893 nand_dbg_print(NAND_DBG_DEBUG,
894 "This should never occur \n");
895 }
896 p_BTableChangesDelta += 1;
897 ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
898 }
899}
900
901static void discard_cmds(u16 n)
902{
903 u32 *pbt = (u32 *)g_pBTStartingCopy;
904 u8 ftl_cmd;
905 unsigned long k;
906#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
907 u8 cache_blks;
908 u16 id;
909#endif
910
911 if ((PendingCMD[n].CMD == WRITE_MAIN_CMD) ||
912 (PendingCMD[n].CMD == WRITE_MAIN_SPARE_CMD)) {
913 for (k = 0; k < DeviceInfo.wDataBlockNum; k++) {
914 if (PendingCMD[n].Block == (pbt[k] & (~BAD_BLOCK)))
915 MARK_BLK_AS_DISCARD(pbt[k]);
916 }
917 }
918
919 ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
920 while (ftl_cmd <= PendingCMD[n].Tag) {
921 p_BTableChangesDelta += 1;
922 ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
923 }
924
925#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
926 id = n - MAX_CHANS;
927
928 if (int_cache[id].item != -1) {
929 cache_blks = int_cache[id].item;
930 if (PendingCMD[n].CMD == MEMCOPY_CMD) {
931 if ((cache_start_copy.array[cache_blks].buf <=
932 PendingCMD[n].DataDestAddr) &&
933 ((cache_start_copy.array[cache_blks].buf +
934 Cache.cache_item_size) >
935 PendingCMD[n].DataDestAddr)) {
936 cache_start_copy.array[cache_blks].address =
937 NAND_CACHE_INIT_ADDR;
938 cache_start_copy.array[cache_blks].use_cnt =
939 0;
940 cache_start_copy.array[cache_blks].changed =
941 CLEAR;
942 }
943 } else {
944 cache_start_copy.array[cache_blks].address =
945 int_cache[id].cache.address;
946 cache_start_copy.array[cache_blks].changed =
947 int_cache[id].cache.changed;
948 }
949 }
950#endif
951}
952
953static void process_cmd_pass(int *first_failed_cmd, u16 idx)
954{
955 if (0 == *first_failed_cmd)
956 save_blk_table_changes(idx);
957 else
958 discard_cmds(idx);
959}
960
961static void process_cmd_fail_abort(int *first_failed_cmd,
962 u16 idx, int event)
963{
964 u32 *pbt = (u32 *)g_pBTStartingCopy;
965 u8 ftl_cmd;
966 unsigned long i;
967 int erase_fail, program_fail;
968#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
969 u8 cache_blks;
970 u16 id;
971#endif
972
973 if (0 == *first_failed_cmd)
974 *first_failed_cmd = PendingCMD[idx].SBDCmdIndex;
975
976 nand_dbg_print(NAND_DBG_DEBUG, "Uncorrectable error has occured "
977 "while executing %u Command %u accesing Block %u\n",
978 (unsigned int)p_BTableChangesDelta->ftl_cmd_cnt,
979 PendingCMD[idx].CMD,
980 (unsigned int)PendingCMD[idx].Block);
981
982 ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
983 while (ftl_cmd <= PendingCMD[idx].Tag) {
984 p_BTableChangesDelta += 1;
985 ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
986 }
987
988#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
989 id = idx - MAX_CHANS;
990
991 if (int_cache[id].item != -1) {
992 cache_blks = int_cache[id].item;
993 if ((PendingCMD[idx].CMD == WRITE_MAIN_CMD)) {
994 cache_start_copy.array[cache_blks].address =
995 int_cache[id].cache.address;
996 cache_start_copy.array[cache_blks].changed = SET;
997 } else if ((PendingCMD[idx].CMD == READ_MAIN_CMD)) {
998 cache_start_copy.array[cache_blks].address =
999 NAND_CACHE_INIT_ADDR;
1000 cache_start_copy.array[cache_blks].use_cnt = 0;
1001 cache_start_copy.array[cache_blks].changed =
1002 CLEAR;
1003 } else if (PendingCMD[idx].CMD == ERASE_CMD) {
1004
1005 } else if (PendingCMD[idx].CMD == MEMCOPY_CMD) {
1006
1007 }
1008 }
1009#endif
1010
1011 erase_fail = (event == EVENT_ERASE_FAILURE) &&
1012 (PendingCMD[idx].CMD == ERASE_CMD);
1013
1014 program_fail = (event == EVENT_PROGRAM_FAILURE) &&
1015 ((PendingCMD[idx].CMD == WRITE_MAIN_CMD) ||
1016 (PendingCMD[idx].CMD == WRITE_MAIN_SPARE_CMD));
1017
1018 if (erase_fail || program_fail) {
1019 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
1020 if (PendingCMD[idx].Block ==
1021 (pbt[i] & (~BAD_BLOCK)))
1022 MARK_BLOCK_AS_BAD(pbt[i]);
1023 }
1024 }
1025}
1026
1027static void process_cmd(int *first_failed_cmd, u16 idx, int event)
1028{
1029 u8 ftl_cmd;
1030 int cmd_match = 0;
1031
1032 if (p_BTableChangesDelta->ftl_cmd_cnt == PendingCMD[idx].Tag)
1033 cmd_match = 1;
1034
1035 if (PendingCMD[idx].Status == CMD_PASS) {
1036 process_cmd_pass(first_failed_cmd, idx);
1037 } else if ((PendingCMD[idx].Status == CMD_FAIL) ||
1038 (PendingCMD[idx].Status == CMD_ABORT)) {
1039 process_cmd_fail_abort(first_failed_cmd, idx, event);
1040 } else if ((PendingCMD[idx].Status == CMD_NOT_DONE) &&
1041 PendingCMD[idx].Tag) {
1042 nand_dbg_print(NAND_DBG_DEBUG,
1043 " Command no. %hu is not executed\n",
1044 (unsigned int)PendingCMD[idx].Tag);
1045 ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
1046 while (ftl_cmd <= PendingCMD[idx].Tag) {
1047 p_BTableChangesDelta += 1;
1048 ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
1049 }
1050 }
1051}
1052#endif
1053
1054static void process_cmd(int *first_failed_cmd, u16 idx, int event)
1055{
1056 printk(KERN_ERR "temporary workaround function. "
1057 "Should not be called! \n");
1058}
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074int GLOB_FTL_Event_Status(int *first_failed_cmd)
1075{
1076 int event_code = PASS;
1077 u16 i_P;
1078
1079 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1080 __FILE__, __LINE__, __func__);
1081
1082 *first_failed_cmd = 0;
1083
1084 event_code = GLOB_LLD_Event_Status();
1085
1086 switch (event_code) {
1087 case EVENT_PASS:
1088 nand_dbg_print(NAND_DBG_DEBUG, "Handling EVENT_PASS\n");
1089 break;
1090 case EVENT_UNCORRECTABLE_DATA_ERROR:
1091 nand_dbg_print(NAND_DBG_DEBUG, "Handling Uncorrectable ECC!\n");
1092 break;
1093 case EVENT_PROGRAM_FAILURE:
1094 case EVENT_ERASE_FAILURE:
1095 nand_dbg_print(NAND_DBG_WARN, "Handling Ugly case. "
1096 "Event code: 0x%x\n", event_code);
1097 p_BTableChangesDelta =
1098 (struct BTableChangesDelta *)g_pBTDelta;
1099 for (i_P = MAX_CHANS; i_P < (ftl_cmd_cnt + MAX_CHANS);
1100 i_P++)
1101 process_cmd(first_failed_cmd, i_P, event_code);
1102 memcpy(g_pBlockTable, g_pBTStartingCopy,
1103 DeviceInfo.wDataBlockNum * sizeof(u32));
1104 memcpy(g_pWearCounter, g_pWearCounterCopy,
1105 DeviceInfo.wDataBlockNum * sizeof(u8));
1106 if (DeviceInfo.MLCDevice)
1107 memcpy(g_pReadCounter, g_pReadCounterCopy,
1108 DeviceInfo.wDataBlockNum * sizeof(u16));
1109
1110#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
1111 memcpy((void *)&Cache, (void *)&cache_start_copy,
1112 sizeof(struct flash_cache_tag));
1113 memset((void *)&int_cache, -1,
1114 sizeof(struct flash_cache_delta_list_tag) *
1115 (MAX_DESCS + MAX_CHANS));
1116#endif
1117 break;
1118 default:
1119 nand_dbg_print(NAND_DBG_WARN,
1120 "Handling unexpected event code - 0x%x\n",
1121 event_code);
1122 event_code = ERR;
1123 break;
1124 }
1125
1126 memcpy(g_pBTStartingCopy, g_pBlockTable,
1127 DeviceInfo.wDataBlockNum * sizeof(u32));
1128 memcpy(g_pWearCounterCopy, g_pWearCounter,
1129 DeviceInfo.wDataBlockNum * sizeof(u8));
1130 if (DeviceInfo.MLCDevice)
1131 memcpy(g_pReadCounterCopy, g_pReadCounter,
1132 DeviceInfo.wDataBlockNum * sizeof(u16));
1133
1134 g_pBTDelta_Free = g_pBTDelta;
1135 ftl_cmd_cnt = 0;
1136 g_pNextBlockTable = g_pBlockTableCopies;
1137 cp_back_buf_idx = 0;
1138
1139#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
1140 memcpy((void *)&cache_start_copy, (void *)&Cache,
1141 sizeof(struct flash_cache_tag));
1142 memset((void *)&int_cache, -1,
1143 sizeof(struct flash_cache_delta_list_tag) *
1144 (MAX_DESCS + MAX_CHANS));
1145#endif
1146
1147 return event_code;
1148}
1149
1150
1151
1152
1153
1154
1155
1156u16 glob_ftl_execute_cmds(void)
1157{
1158 nand_dbg_print(NAND_DBG_TRACE,
1159 "glob_ftl_execute_cmds: ftl_cmd_cnt %u\n",
1160 (unsigned int)ftl_cmd_cnt);
1161 g_SBDCmdIndex = 0;
1162 return glob_lld_execute_cmds();
1163}
1164
1165#endif
1166
1167#if !CMD_DMA
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177int GLOB_FTL_Read_Immediate(u8 *read_data, u64 addr)
1178{
1179 int wResult = FAIL;
1180 u32 Block;
1181 u16 Page;
1182 u32 phy_blk;
1183 u32 *pbt = (u32 *)g_pBlockTable;
1184
1185 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1186 __FILE__, __LINE__, __func__);
1187
1188 Block = BLK_FROM_ADDR(addr);
1189 Page = PAGE_FROM_ADDR(addr, Block);
1190
1191 if (!IS_SPARE_BLOCK(Block))
1192 return FAIL;
1193
1194 phy_blk = pbt[Block];
1195 wResult = GLOB_LLD_Read_Page_Main(read_data, phy_blk, Page, 1);
1196
1197 if (DeviceInfo.MLCDevice) {
1198 g_pReadCounter[phy_blk - DeviceInfo.wSpectraStartBlock]++;
1199 if (g_pReadCounter[phy_blk - DeviceInfo.wSpectraStartBlock]
1200 >= MAX_READ_COUNTER)
1201 FTL_Read_Disturbance(phy_blk);
1202 if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
1203 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
1204 FTL_Write_IN_Progress_Block_Table_Page();
1205 }
1206 }
1207
1208 return wResult;
1209}
1210#endif
1211
1212#ifdef SUPPORT_BIG_ENDIAN
1213
1214
1215
1216
1217
1218
1219
1220static void FTL_Invert_Block_Table(void)
1221{
1222 u32 i;
1223 u32 *pbt = (u32 *)g_pBlockTable;
1224
1225 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1226 __FILE__, __LINE__, __func__);
1227
1228#ifdef SUPPORT_LARGE_BLOCKNUM
1229 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
1230 pbt[i] = INVERTUINT32(pbt[i]);
1231 g_pWearCounter[i] = INVERTUINT32(g_pWearCounter[i]);
1232 }
1233#else
1234 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
1235 pbt[i] = INVERTUINT16(pbt[i]);
1236 g_pWearCounter[i] = INVERTUINT16(g_pWearCounter[i]);
1237 }
1238#endif
1239}
1240#endif
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252int GLOB_FTL_Flash_Init(void)
1253{
1254 int status = FAIL;
1255
1256 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1257 __FILE__, __LINE__, __func__);
1258
1259 g_SBDCmdIndex = 0;
1260
1261 status = GLOB_LLD_Flash_Init();
1262
1263 return status;
1264}
1265
1266
1267
1268
1269
1270
1271int GLOB_FTL_Flash_Release(void)
1272{
1273 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1274 __FILE__, __LINE__, __func__);
1275
1276 return GLOB_LLD_Flash_Release();
1277}
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287void GLOB_FTL_Cache_Release(void)
1288{
1289 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1290 __FILE__, __LINE__, __func__);
1291
1292 free_memory();
1293}
1294
1295
1296
1297
1298
1299
1300
1301static u16 FTL_Cache_If_Hit(u64 page_addr)
1302{
1303 u16 item;
1304 u64 addr;
1305 int i;
1306
1307 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1308 __FILE__, __LINE__, __func__);
1309
1310 item = UNHIT_CACHE_ITEM;
1311 for (i = 0; i < CACHE_ITEM_NUM; i++) {
1312 addr = Cache.array[i].address;
1313 if ((page_addr >= addr) &&
1314 (page_addr < (addr + Cache.cache_item_size))) {
1315 item = i;
1316 break;
1317 }
1318 }
1319
1320 return item;
1321}
1322
1323
1324
1325
1326
1327
1328
1329
1330static void FTL_Calculate_LRU(void)
1331{
1332 u16 i, bCurrentLRU, bTempCount;
1333
1334 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1335 __FILE__, __LINE__, __func__);
1336
1337 bCurrentLRU = 0;
1338 bTempCount = MAX_WORD_VALUE;
1339
1340 for (i = 0; i < CACHE_ITEM_NUM; i++) {
1341 if (Cache.array[i].use_cnt < bTempCount) {
1342 bCurrentLRU = i;
1343 bTempCount = Cache.array[i].use_cnt;
1344 }
1345 }
1346
1347 Cache.LRU = bCurrentLRU;
1348}
1349
1350
1351
1352
1353
1354
1355
1356static void FTL_Cache_Read_Page(u8 *data_buf, u64 logic_addr, u16 cache_item)
1357{
1358 u8 *start_addr;
1359
1360 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1361 __FILE__, __LINE__, __func__);
1362
1363 start_addr = Cache.array[cache_item].buf;
1364 start_addr += (u32)(((logic_addr - Cache.array[cache_item].address) >>
1365 DeviceInfo.nBitsInPageDataSize) * DeviceInfo.wPageDataSize);
1366
1367#if CMD_DMA
1368 GLOB_LLD_MemCopy_CMD(data_buf, start_addr,
1369 DeviceInfo.wPageDataSize, 0);
1370 ftl_cmd_cnt++;
1371#else
1372 memcpy(data_buf, start_addr, DeviceInfo.wPageDataSize);
1373#endif
1374
1375 if (Cache.array[cache_item].use_cnt < MAX_WORD_VALUE)
1376 Cache.array[cache_item].use_cnt++;
1377}
1378
1379
1380
1381
1382
1383
1384
1385static int FTL_Cache_Read_All(u8 *pData, u64 phy_addr)
1386{
1387 int wResult = PASS;
1388 u32 Block;
1389 u32 lba;
1390 u16 Page;
1391 u16 PageCount;
1392 u32 *pbt = (u32 *)g_pBlockTable;
1393 u32 i;
1394
1395 Block = BLK_FROM_ADDR(phy_addr);
1396 Page = PAGE_FROM_ADDR(phy_addr, Block);
1397 PageCount = Cache.pages_per_item;
1398
1399 nand_dbg_print(NAND_DBG_DEBUG,
1400 "%s, Line %d, Function: %s, Block: 0x%x\n",
1401 __FILE__, __LINE__, __func__, Block);
1402
1403 lba = 0xffffffff;
1404 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
1405 if ((pbt[i] & (~BAD_BLOCK)) == Block) {
1406 lba = i;
1407 if (IS_SPARE_BLOCK(i) || IS_BAD_BLOCK(i) ||
1408 IS_DISCARDED_BLOCK(i)) {
1409
1410#if CMD_DMA
1411 GLOB_LLD_MemCopy_CMD(pData, g_temp_buf,
1412 PageCount * DeviceInfo.wPageDataSize, 0);
1413 ftl_cmd_cnt++;
1414#else
1415 memset(pData, 0xFF,
1416 PageCount * DeviceInfo.wPageDataSize);
1417#endif
1418 return wResult;
1419 } else {
1420 continue;
1421 }
1422 }
1423 }
1424
1425 if (0xffffffff == lba)
1426 printk(KERN_ERR "FTL_Cache_Read_All: Block is not found in BT\n");
1427
1428#if CMD_DMA
1429 wResult = GLOB_LLD_Read_Page_Main_cdma(pData, Block, Page,
1430 PageCount, LLD_CMD_FLAG_MODE_CDMA);
1431 if (DeviceInfo.MLCDevice) {
1432 g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock]++;
1433 nand_dbg_print(NAND_DBG_DEBUG,
1434 "Read Counter modified in ftl_cmd_cnt %u"
1435 " Block %u Counter%u\n",
1436 ftl_cmd_cnt, (unsigned int)Block,
1437 g_pReadCounter[Block -
1438 DeviceInfo.wSpectraStartBlock]);
1439
1440 p_BTableChangesDelta =
1441 (struct BTableChangesDelta *)g_pBTDelta_Free;
1442 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
1443 p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
1444 p_BTableChangesDelta->RC_Index =
1445 Block - DeviceInfo.wSpectraStartBlock;
1446 p_BTableChangesDelta->RC_Entry_Value =
1447 g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock];
1448 p_BTableChangesDelta->ValidFields = 0xC0;
1449
1450 ftl_cmd_cnt++;
1451
1452 if (g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock] >=
1453 MAX_READ_COUNTER)
1454 FTL_Read_Disturbance(Block);
1455 if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
1456 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
1457 FTL_Write_IN_Progress_Block_Table_Page();
1458 }
1459 } else {
1460 ftl_cmd_cnt++;
1461 }
1462#else
1463 wResult = GLOB_LLD_Read_Page_Main(pData, Block, Page, PageCount);
1464 if (wResult == FAIL)
1465 return wResult;
1466
1467 if (DeviceInfo.MLCDevice) {
1468 g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock]++;
1469 if (g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock] >=
1470 MAX_READ_COUNTER)
1471 FTL_Read_Disturbance(Block);
1472 if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
1473 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
1474 FTL_Write_IN_Progress_Block_Table_Page();
1475 }
1476 }
1477#endif
1478 return wResult;
1479}
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494static int FTL_Cache_Write_All(u8 *pData, u64 blk_addr)
1495{
1496 u16 wResult = PASS;
1497 u32 Block;
1498 u16 Page;
1499 u16 PageCount;
1500
1501 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1502 __FILE__, __LINE__, __func__);
1503
1504 nand_dbg_print(NAND_DBG_DEBUG, "This block %d going to be written "
1505 "on %d\n", cache_block_to_write,
1506 (u32)(blk_addr >> DeviceInfo.nBitsInBlockDataSize));
1507
1508 Block = BLK_FROM_ADDR(blk_addr);
1509 Page = PAGE_FROM_ADDR(blk_addr, Block);
1510 PageCount = Cache.pages_per_item;
1511
1512#if CMD_DMA
1513 if (FAIL == GLOB_LLD_Write_Page_Main_cdma(pData,
1514 Block, Page, PageCount)) {
1515 nand_dbg_print(NAND_DBG_WARN,
1516 "NAND Program fail in %s, Line %d, "
1517 "Function: %s, new Bad Block %d generated! "
1518 "Need Bad Block replacing.\n",
1519 __FILE__, __LINE__, __func__, Block);
1520 wResult = FAIL;
1521 }
1522 ftl_cmd_cnt++;
1523#else
1524 if (FAIL == GLOB_LLD_Write_Page_Main(pData, Block, Page, PageCount)) {
1525 nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in %s,"
1526 " Line %d, Function %s, new Bad Block %d generated!"
1527 "Need Bad Block replacing.\n",
1528 __FILE__, __LINE__, __func__, Block);
1529 wResult = FAIL;
1530 }
1531#endif
1532 return wResult;
1533}
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543int FTL_Copy_Block(u64 old_blk_addr, u64 blk_addr)
1544{
1545 int i, r1, r2, wResult = PASS;
1546
1547 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1548 __FILE__, __LINE__, __func__);
1549
1550 for (i = 0; i < DeviceInfo.wPagesPerBlock; i += Cache.pages_per_item) {
1551 r1 = FTL_Cache_Read_All(g_pTempBuf, old_blk_addr +
1552 i * DeviceInfo.wPageDataSize);
1553 r2 = FTL_Cache_Write_All(g_pTempBuf, blk_addr +
1554 i * DeviceInfo.wPageDataSize);
1555 if ((ERR == r1) || (FAIL == r2)) {
1556 wResult = FAIL;
1557 break;
1558 }
1559 }
1560
1561 return wResult;
1562}
1563
1564
1565static u32 find_least_worn_blk_for_l2_cache(void)
1566{
1567 int i;
1568 u32 *pbt = (u32 *)g_pBlockTable;
1569 u8 least_wear_cnt = MAX_BYTE_VALUE;
1570 u32 least_wear_blk_idx = MAX_U32_VALUE;
1571 u32 phy_idx;
1572
1573 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
1574 if (IS_SPARE_BLOCK(i)) {
1575 phy_idx = (u32)((~BAD_BLOCK) & pbt[i]);
1576 if (phy_idx > DeviceInfo.wSpectraEndBlock)
1577 printk(KERN_ERR "find_least_worn_blk_for_l2_cache: "
1578 "Too big phy block num (%d)\n", phy_idx);
1579 if (g_pWearCounter[phy_idx -DeviceInfo.wSpectraStartBlock] < least_wear_cnt) {
1580 least_wear_cnt = g_pWearCounter[phy_idx - DeviceInfo.wSpectraStartBlock];
1581 least_wear_blk_idx = i;
1582 }
1583 }
1584 }
1585
1586 nand_dbg_print(NAND_DBG_WARN,
1587 "find_least_worn_blk_for_l2_cache: "
1588 "find block %d with least worn counter (%d)\n",
1589 least_wear_blk_idx, least_wear_cnt);
1590
1591 return least_wear_blk_idx;
1592}
1593
1594
1595
1596
1597static int get_l2_cache_blks(void)
1598{
1599 int n;
1600 u32 blk;
1601 u32 *pbt = (u32 *)g_pBlockTable;
1602
1603 for (n = 0; n < BLK_NUM_FOR_L2_CACHE; n++) {
1604 blk = find_least_worn_blk_for_l2_cache();
1605 if (blk >= DeviceInfo.wDataBlockNum) {
1606 nand_dbg_print(NAND_DBG_WARN,
1607 "find_least_worn_blk_for_l2_cache: "
1608 "No enough free NAND blocks (n: %d) for L2 Cache!\n", n);
1609 return FAIL;
1610 }
1611
1612 pbt[blk] = (pbt[blk] & (~BAD_BLOCK)) | DISCARD_BLOCK;
1613
1614 cache_l2.blk_array[n] = pbt[blk] & (~BAD_BLOCK);
1615 }
1616
1617 return PASS;
1618}
1619
1620static int erase_l2_cache_blocks(void)
1621{
1622 int i, ret = PASS;
1623 u32 pblk, lblk = BAD_BLOCK;
1624 u64 addr;
1625 u32 *pbt = (u32 *)g_pBlockTable;
1626
1627 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
1628 __FILE__, __LINE__, __func__);
1629
1630 for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++) {
1631 pblk = cache_l2.blk_array[i];
1632
1633
1634 if (MAX_U32_VALUE == pblk)
1635 continue;
1636
1637 BUG_ON(pblk > DeviceInfo.wSpectraEndBlock);
1638
1639 addr = (u64)pblk << DeviceInfo.nBitsInBlockDataSize;
1640 if (PASS == GLOB_FTL_Block_Erase(addr)) {
1641
1642 lblk = FTL_Get_Block_Index(pblk);
1643 BUG_ON(BAD_BLOCK == lblk);
1644
1645 pbt[lblk] &= (u32)(~DISCARD_BLOCK);
1646 pbt[lblk] |= (u32)(SPARE_BLOCK);
1647 } else {
1648 MARK_BLOCK_AS_BAD(pbt[lblk]);
1649 ret = ERR;
1650 }
1651 }
1652
1653 return ret;
1654}
1655
1656
1657
1658
1659static int flush_l2_cache(void)
1660{
1661 struct list_head *p;
1662 struct spectra_l2_cache_list *pnd, *tmp_pnd;
1663 u32 *pbt = (u32 *)g_pBlockTable;
1664 u32 phy_blk, l2_blk;
1665 u64 addr;
1666 u16 l2_page;
1667 int i, ret = PASS;
1668
1669 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
1670 __FILE__, __LINE__, __func__);
1671
1672 if (list_empty(&cache_l2.table.list))
1673 return ret;
1674
1675
1676
1677 if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
1678 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
1679 FTL_Write_IN_Progress_Block_Table_Page();
1680 }
1681
1682 list_for_each(p, &cache_l2.table.list) {
1683 pnd = list_entry(p, struct spectra_l2_cache_list, list);
1684 if (IS_SPARE_BLOCK(pnd->logical_blk_num) ||
1685 IS_BAD_BLOCK(pnd->logical_blk_num) ||
1686 IS_DISCARDED_BLOCK(pnd->logical_blk_num)) {
1687 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d\n", __FILE__, __LINE__);
1688 memset(cache_l2_blk_buf, 0xff, DeviceInfo.wPagesPerBlock * DeviceInfo.wPageDataSize);
1689 } else {
1690 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d\n", __FILE__, __LINE__);
1691 phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK);
1692 ret = GLOB_LLD_Read_Page_Main(cache_l2_blk_buf,
1693 phy_blk, 0, DeviceInfo.wPagesPerBlock);
1694 if (ret == FAIL) {
1695 printk(KERN_ERR "Read NAND page fail in %s, Line %d\n", __FILE__, __LINE__);
1696 }
1697 }
1698
1699 for (i = 0; i < DeviceInfo.wPagesPerBlock; i++) {
1700 if (pnd->pages_array[i] != MAX_U32_VALUE) {
1701 l2_blk = cache_l2.blk_array[(pnd->pages_array[i] >> 16) & 0xffff];
1702 l2_page = pnd->pages_array[i] & 0xffff;
1703 ret = GLOB_LLD_Read_Page_Main(cache_l2_page_buf, l2_blk, l2_page, 1);
1704 if (ret == FAIL) {
1705 printk(KERN_ERR "Read NAND page fail in %s, Line %d\n", __FILE__, __LINE__);
1706 }
1707 memcpy(cache_l2_blk_buf + i * DeviceInfo.wPageDataSize, cache_l2_page_buf, DeviceInfo.wPageDataSize);
1708 }
1709 }
1710
1711
1712 addr = (u64)pnd->logical_blk_num << DeviceInfo.nBitsInBlockDataSize;
1713 ret = FTL_Replace_Block(addr);
1714 if (ret == FAIL) {
1715 printk(KERN_ERR "FTL_Replace_Block fail in %s, Line %d\n", __FILE__, __LINE__);
1716 }
1717
1718
1719 phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK);
1720 if (FAIL == GLOB_LLD_Write_Page_Main(cache_l2_blk_buf, phy_blk, 0, DeviceInfo.wPagesPerBlock)) {
1721 nand_dbg_print(NAND_DBG_WARN,
1722 "Program NAND block %d fail in %s, Line %d\n",
1723 phy_blk, __FILE__, __LINE__);
1724
1725
1726
1727
1728 MARK_BLK_AS_DISCARD(pbt[pnd->logical_blk_num]);
1729
1730 FTL_Replace_Block(addr);
1731 phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK);
1732 if (FAIL == GLOB_LLD_Write_Page_Main(cache_l2_blk_buf, phy_blk, 0, DeviceInfo.wPagesPerBlock)) {
1733 printk(KERN_ERR "Failed to write back block %d when flush L2 cache."
1734 "Some data will be lost!\n", phy_blk);
1735 MARK_BLOCK_AS_BAD(pbt[pnd->logical_blk_num]);
1736 }
1737 } else {
1738
1739 pbt[pnd->logical_blk_num] &= (~SPARE_BLOCK);
1740 }
1741 }
1742
1743
1744 list_for_each_entry_safe(pnd, tmp_pnd, &cache_l2.table.list, list) {
1745 list_del(&pnd->list);
1746 kfree(pnd);
1747 }
1748
1749
1750 if (erase_l2_cache_blocks() != PASS)
1751 nand_dbg_print(NAND_DBG_WARN,
1752 " Erase L2 cache blocks error in %s, Line %d\n",
1753 __FILE__, __LINE__);
1754
1755
1756 for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++)
1757 cache_l2.blk_array[i] = MAX_U32_VALUE;
1758 cache_l2.cur_blk_idx = 0;
1759 cache_l2.cur_page_num = 0;
1760 INIT_LIST_HEAD(&cache_l2.table.list);
1761 cache_l2.table.logical_blk_num = MAX_U32_VALUE;
1762
1763 return ret;
1764}
1765
1766
1767
1768
1769
1770
1771static int write_back_to_l2_cache(u8 *buf, u64 logical_addr)
1772{
1773 u32 logical_blk_num;
1774 u16 logical_page_num;
1775 struct list_head *p;
1776 struct spectra_l2_cache_list *pnd, *pnd_new;
1777 u32 node_size;
1778 int i, found;
1779
1780 nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
1781 __FILE__, __LINE__, __func__);
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792 if (list_empty(&cache_l2.table.list)) {
1793 BUG_ON(cache_l2.cur_blk_idx != 0);
1794 BUG_ON(cache_l2.cur_page_num!= 0);
1795 BUG_ON(cache_l2.table.logical_blk_num != MAX_U32_VALUE);
1796 if (FAIL == get_l2_cache_blks()) {
1797 GLOB_FTL_Garbage_Collection();
1798 if (FAIL == get_l2_cache_blks()) {
1799 printk(KERN_ALERT "Fail to get L2 cache blks!\n");
1800 return FAIL;
1801 }
1802 }
1803 }
1804
1805 logical_blk_num = BLK_FROM_ADDR(logical_addr);
1806 logical_page_num = PAGE_FROM_ADDR(logical_addr, logical_blk_num);
1807 BUG_ON(logical_blk_num == MAX_U32_VALUE);
1808
1809
1810#if CMD_DMA
1811
1812
1813
1814#else
1815 if (FAIL == GLOB_LLD_Write_Page_Main(buf,
1816 cache_l2.blk_array[cache_l2.cur_blk_idx],
1817 cache_l2.cur_page_num, 1)) {
1818 nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in "
1819 "%s, Line %d, new Bad Block %d generated!\n",
1820 __FILE__, __LINE__,
1821 cache_l2.blk_array[cache_l2.cur_blk_idx]);
1822
1823
1824
1825 return FAIL;
1826 }
1827#endif
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837 found = 0;
1838 list_for_each(p, &cache_l2.table.list) {
1839 pnd = list_entry(p, struct spectra_l2_cache_list, list);
1840 if (pnd->logical_blk_num == logical_blk_num) {
1841 pnd->pages_array[logical_page_num] =
1842 (cache_l2.cur_blk_idx << 16) |
1843 cache_l2.cur_page_num;
1844 found = 1;
1845 break;
1846 }
1847 }
1848 if (!found) {
1849
1850
1851
1852
1853 node_size = sizeof(struct spectra_l2_cache_list) +
1854 sizeof(u32) * DeviceInfo.wPagesPerBlock;
1855 pnd_new = kmalloc(node_size, GFP_ATOMIC);
1856 if (!pnd_new) {
1857 printk(KERN_ERR "Failed to kmalloc in %s Line %d\n",
1858 __FILE__, __LINE__);
1859
1860
1861
1862
1863 }
1864 pnd_new->logical_blk_num = logical_blk_num;
1865 for (i = 0; i < DeviceInfo.wPagesPerBlock; i++)
1866 pnd_new->pages_array[i] = MAX_U32_VALUE;
1867 pnd_new->pages_array[logical_page_num] =
1868 (cache_l2.cur_blk_idx << 16) | cache_l2.cur_page_num;
1869 list_add(&pnd_new->list, &cache_l2.table.list);
1870 }
1871
1872
1873 cache_l2.cur_page_num++;
1874 if (cache_l2.cur_page_num >= DeviceInfo.wPagesPerBlock) {
1875 cache_l2.cur_blk_idx++;
1876 if (cache_l2.cur_blk_idx >= BLK_NUM_FOR_L2_CACHE) {
1877
1878 nand_dbg_print(NAND_DBG_WARN,
1879 "L2 Cache is full, will start to flush it\n");
1880 flush_l2_cache();
1881 } else {
1882 cache_l2.cur_page_num = 0;
1883 }
1884 }
1885
1886 return PASS;
1887}
1888
1889
1890
1891
1892
1893
1894static int search_l2_cache(u8 *buf, u64 logical_addr)
1895{
1896 u32 logical_blk_num;
1897 u16 logical_page_num;
1898 struct list_head *p;
1899 struct spectra_l2_cache_list *pnd;
1900 u32 tmp = MAX_U32_VALUE;
1901 u32 phy_blk;
1902 u16 phy_page;
1903 int ret = FAIL;
1904
1905 logical_blk_num = BLK_FROM_ADDR(logical_addr);
1906 logical_page_num = PAGE_FROM_ADDR(logical_addr, logical_blk_num);
1907
1908 list_for_each(p, &cache_l2.table.list) {
1909 pnd = list_entry(p, struct spectra_l2_cache_list, list);
1910 if (pnd->logical_blk_num == logical_blk_num) {
1911 tmp = pnd->pages_array[logical_page_num];
1912 break;
1913 }
1914 }
1915
1916 if (tmp != MAX_U32_VALUE) {
1917 phy_blk = cache_l2.blk_array[(tmp >> 16) & 0xFFFF];
1918 phy_page = tmp & 0xFFFF;
1919#if CMD_DMA
1920
1921#else
1922 ret = GLOB_LLD_Read_Page_Main(buf, phy_blk, phy_page, 1);
1923#endif
1924 }
1925
1926 return ret;
1927}
1928
1929
1930
1931
1932
1933
1934
1935static void FTL_Cache_Write_Page(u8 *pData, u64 page_addr,
1936 u8 cache_blk, u16 flag)
1937{
1938 u8 *pDest;
1939 u64 addr;
1940
1941 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1942 __FILE__, __LINE__, __func__);
1943
1944 addr = Cache.array[cache_blk].address;
1945 pDest = Cache.array[cache_blk].buf;
1946
1947 pDest += (unsigned long)(page_addr - addr);
1948 Cache.array[cache_blk].changed = SET;
1949#if CMD_DMA
1950#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
1951 int_cache[ftl_cmd_cnt].item = cache_blk;
1952 int_cache[ftl_cmd_cnt].cache.address =
1953 Cache.array[cache_blk].address;
1954 int_cache[ftl_cmd_cnt].cache.changed =
1955 Cache.array[cache_blk].changed;
1956#endif
1957 GLOB_LLD_MemCopy_CMD(pDest, pData, DeviceInfo.wPageDataSize, flag);
1958 ftl_cmd_cnt++;
1959#else
1960 memcpy(pDest, pData, DeviceInfo.wPageDataSize);
1961#endif
1962 if (Cache.array[cache_blk].use_cnt < MAX_WORD_VALUE)
1963 Cache.array[cache_blk].use_cnt++;
1964}
1965
1966
1967
1968
1969
1970
1971
1972
1973static int FTL_Cache_Write(void)
1974{
1975 int i, bResult = PASS;
1976 u16 bNO, least_count = 0xFFFF;
1977
1978 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1979 __FILE__, __LINE__, __func__);
1980
1981 FTL_Calculate_LRU();
1982
1983 bNO = Cache.LRU;
1984 nand_dbg_print(NAND_DBG_DEBUG, "FTL_Cache_Write: "
1985 "Least used cache block is %d\n", bNO);
1986
1987 if (Cache.array[bNO].changed != SET)
1988 return bResult;
1989
1990 nand_dbg_print(NAND_DBG_DEBUG, "FTL_Cache_Write: Cache"
1991 " Block %d containing logical block %d is dirty\n",
1992 bNO,
1993 (u32)(Cache.array[bNO].address >>
1994 DeviceInfo.nBitsInBlockDataSize));
1995#if CMD_DMA
1996#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
1997 int_cache[ftl_cmd_cnt].item = bNO;
1998 int_cache[ftl_cmd_cnt].cache.address =
1999 Cache.array[bNO].address;
2000 int_cache[ftl_cmd_cnt].cache.changed = CLEAR;
2001#endif
2002#endif
2003 bResult = write_back_to_l2_cache(Cache.array[bNO].buf,
2004 Cache.array[bNO].address);
2005 if (bResult != ERR)
2006 Cache.array[bNO].changed = CLEAR;
2007
2008 least_count = Cache.array[bNO].use_cnt;
2009
2010 for (i = 0; i < CACHE_ITEM_NUM; i++) {
2011 if (i == bNO)
2012 continue;
2013 if (Cache.array[i].use_cnt > 0)
2014 Cache.array[i].use_cnt -= least_count;
2015 }
2016
2017 return bResult;
2018}
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028static int FTL_Cache_Read(u64 logical_addr)
2029{
2030 u64 item_addr, phy_addr;
2031 u16 num;
2032 int ret;
2033
2034 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2035 __FILE__, __LINE__, __func__);
2036
2037 num = Cache.LRU;
2038
2039 item_addr = (u64)GLOB_u64_Div(logical_addr, Cache.cache_item_size) *
2040 Cache.cache_item_size;
2041 Cache.array[num].address = item_addr;
2042 Cache.array[num].use_cnt = 1;
2043 Cache.array[num].changed = CLEAR;
2044
2045#if CMD_DMA
2046#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
2047 int_cache[ftl_cmd_cnt].item = num;
2048 int_cache[ftl_cmd_cnt].cache.address =
2049 Cache.array[num].address;
2050 int_cache[ftl_cmd_cnt].cache.changed =
2051 Cache.array[num].changed;
2052#endif
2053#endif
2054
2055
2056
2057
2058 ret = search_l2_cache(Cache.array[num].buf, logical_addr);
2059 if (PASS == ret)
2060 return ret;
2061
2062
2063
2064 phy_addr = FTL_Get_Physical_Block_Addr(item_addr) +
2065 GLOB_u64_Remainder(item_addr, 2);
2066
2067 return FTL_Cache_Read_All(Cache.array[num].buf, phy_addr);
2068}
2069
2070
2071
2072
2073
2074
2075
2076static int FTL_Check_Block_Table(int wOldTable)
2077{
2078 u32 i;
2079 int wResult = PASS;
2080 u32 blk_idx;
2081 u32 *pbt = (u32 *)g_pBlockTable;
2082 u8 *pFlag = flag_check_blk_table;
2083
2084 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2085 __FILE__, __LINE__, __func__);
2086
2087 if (NULL != pFlag) {
2088 memset(pFlag, FAIL, DeviceInfo.wDataBlockNum);
2089 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
2090 blk_idx = (u32)(pbt[i] & (~BAD_BLOCK));
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102 if ((blk_idx > DeviceInfo.wSpectraEndBlock) ||
2103 PASS == pFlag[i]) {
2104 wResult = FAIL;
2105 break;
2106 } else {
2107 pFlag[i] = PASS;
2108 }
2109 }
2110 }
2111
2112 return wResult;
2113}
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125static int FTL_Write_Block_Table(int wForce)
2126{
2127 u32 *pbt = (u32 *)g_pBlockTable;
2128 int wSuccess = PASS;
2129 u32 wTempBlockTableIndex;
2130 u16 bt_pages, new_bt_offset;
2131 u8 blockchangeoccured = 0;
2132
2133 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2134 __FILE__, __LINE__, __func__);
2135
2136 bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
2137
2138 if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus)
2139 return 0;
2140
2141 if (PASS == wForce) {
2142 g_wBlockTableOffset =
2143 (u16)(DeviceInfo.wPagesPerBlock - bt_pages);
2144#if CMD_DMA
2145 p_BTableChangesDelta =
2146 (struct BTableChangesDelta *)g_pBTDelta_Free;
2147 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
2148
2149 p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
2150 p_BTableChangesDelta->g_wBlockTableOffset =
2151 g_wBlockTableOffset;
2152 p_BTableChangesDelta->ValidFields = 0x01;
2153#endif
2154 }
2155
2156 nand_dbg_print(NAND_DBG_DEBUG,
2157 "Inside FTL_Write_Block_Table: block %d Page:%d\n",
2158 g_wBlockTableIndex, g_wBlockTableOffset);
2159
2160 do {
2161 new_bt_offset = g_wBlockTableOffset + bt_pages + 1;
2162 if ((0 == (new_bt_offset % DeviceInfo.wPagesPerBlock)) ||
2163 (new_bt_offset > DeviceInfo.wPagesPerBlock) ||
2164 (FAIL == wSuccess)) {
2165 wTempBlockTableIndex = FTL_Replace_Block_Table();
2166 if (BAD_BLOCK == wTempBlockTableIndex)
2167 return ERR;
2168 if (!blockchangeoccured) {
2169 bt_block_changed = 1;
2170 blockchangeoccured = 1;
2171 }
2172
2173 g_wBlockTableIndex = wTempBlockTableIndex;
2174 g_wBlockTableOffset = 0;
2175 pbt[BLOCK_TABLE_INDEX] = g_wBlockTableIndex;
2176#if CMD_DMA
2177 p_BTableChangesDelta =
2178 (struct BTableChangesDelta *)g_pBTDelta_Free;
2179 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
2180
2181 p_BTableChangesDelta->ftl_cmd_cnt =
2182 ftl_cmd_cnt;
2183 p_BTableChangesDelta->g_wBlockTableOffset =
2184 g_wBlockTableOffset;
2185 p_BTableChangesDelta->g_wBlockTableIndex =
2186 g_wBlockTableIndex;
2187 p_BTableChangesDelta->ValidFields = 0x03;
2188
2189 p_BTableChangesDelta =
2190 (struct BTableChangesDelta *)g_pBTDelta_Free;
2191 g_pBTDelta_Free +=
2192 sizeof(struct BTableChangesDelta);
2193
2194 p_BTableChangesDelta->ftl_cmd_cnt =
2195 ftl_cmd_cnt;
2196 p_BTableChangesDelta->BT_Index =
2197 BLOCK_TABLE_INDEX;
2198 p_BTableChangesDelta->BT_Entry_Value =
2199 pbt[BLOCK_TABLE_INDEX];
2200 p_BTableChangesDelta->ValidFields = 0x0C;
2201#endif
2202 }
2203
2204 wSuccess = FTL_Write_Block_Table_Data();
2205 if (FAIL == wSuccess)
2206 MARK_BLOCK_AS_BAD(pbt[BLOCK_TABLE_INDEX]);
2207 } while (FAIL == wSuccess);
2208
2209 g_cBlockTableStatus = CURRENT_BLOCK_TABLE;
2210
2211 return 1;
2212}
2213
2214static int force_format_nand(void)
2215{
2216 u32 i;
2217
2218
2219 printk(KERN_ALERT "Start to force erase whole NAND device ...\n");
2220 printk(KERN_ALERT "From phyical block %d to %d\n",
2221 DeviceInfo.wSpectraStartBlock, DeviceInfo.wSpectraEndBlock);
2222 for (i = DeviceInfo.wSpectraStartBlock; i <= DeviceInfo.wSpectraEndBlock; i++) {
2223 if (GLOB_LLD_Erase_Block(i))
2224 printk(KERN_ERR "Failed to force erase NAND block %d\n", i);
2225 }
2226 printk(KERN_ALERT "Force Erase ends. Please reboot the system ...\n");
2227 while(1);
2228
2229 return PASS;
2230}
2231
2232int GLOB_FTL_Flash_Format(void)
2233{
2234
2235 return force_format_nand();
2236
2237}
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249static int FTL_Search_Block_Table_IN_Block(u32 BT_Block,
2250 u8 BT_Tag, u16 *Page)
2251{
2252 u16 i, j, k;
2253 u16 Result = PASS;
2254 u16 Last_IPF = 0;
2255 u8 BT_Found = 0;
2256 u8 *tagarray;
2257 u8 *tempbuf = tmp_buf_search_bt_in_block;
2258 u8 *pSpareBuf = spare_buf_search_bt_in_block;
2259 u8 *pSpareBufBTLastPage = spare_buf_bt_search_bt_in_block;
2260 u8 bt_flag_last_page = 0xFF;
2261 u8 search_in_previous_pages = 0;
2262 u16 bt_pages;
2263
2264 nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
2265 __FILE__, __LINE__, __func__);
2266
2267 nand_dbg_print(NAND_DBG_DEBUG,
2268 "Searching block table in %u block\n",
2269 (unsigned int)BT_Block);
2270
2271 bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
2272
2273 for (i = bt_pages; i < DeviceInfo.wPagesPerBlock;
2274 i += (bt_pages + 1)) {
2275 nand_dbg_print(NAND_DBG_DEBUG,
2276 "Searching last IPF: %d\n", i);
2277 Result = GLOB_LLD_Read_Page_Main_Polling(tempbuf,
2278 BT_Block, i, 1);
2279
2280 if (0 == memcmp(tempbuf, g_pIPF, DeviceInfo.wPageDataSize)) {
2281 if ((i + bt_pages + 1) < DeviceInfo.wPagesPerBlock) {
2282 continue;
2283 } else {
2284 search_in_previous_pages = 1;
2285 Last_IPF = i;
2286 }
2287 }
2288
2289 if (!search_in_previous_pages) {
2290 if (i != bt_pages) {
2291 i -= (bt_pages + 1);
2292 Last_IPF = i;
2293 }
2294 }
2295
2296 if (0 == Last_IPF)
2297 break;
2298
2299 if (!search_in_previous_pages) {
2300 i = i + 1;
2301 nand_dbg_print(NAND_DBG_DEBUG,
2302 "Reading the spare area of Block %u Page %u",
2303 (unsigned int)BT_Block, i);
2304 Result = GLOB_LLD_Read_Page_Spare(pSpareBuf,
2305 BT_Block, i, 1);
2306 nand_dbg_print(NAND_DBG_DEBUG,
2307 "Reading the spare area of Block %u Page %u",
2308 (unsigned int)BT_Block, i + bt_pages - 1);
2309 Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage,
2310 BT_Block, i + bt_pages - 1, 1);
2311
2312 k = 0;
2313 j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray);
2314 if (j) {
2315 for (; k < j; k++) {
2316 if (tagarray[k] == BT_Tag)
2317 break;
2318 }
2319 }
2320
2321 if (k < j)
2322 bt_flag = tagarray[k];
2323 else
2324 Result = FAIL;
2325
2326 if (Result == PASS) {
2327 k = 0;
2328 j = FTL_Extract_Block_Table_Tag(
2329 pSpareBufBTLastPage, &tagarray);
2330 if (j) {
2331 for (; k < j; k++) {
2332 if (tagarray[k] == BT_Tag)
2333 break;
2334 }
2335 }
2336
2337 if (k < j)
2338 bt_flag_last_page = tagarray[k];
2339 else
2340 Result = FAIL;
2341
2342 if (Result == PASS) {
2343 if (bt_flag == bt_flag_last_page) {
2344 nand_dbg_print(NAND_DBG_DEBUG,
2345 "Block table is found"
2346 " in page after IPF "
2347 "at block %d "
2348 "page %d\n",
2349 (int)BT_Block, i);
2350 BT_Found = 1;
2351 *Page = i;
2352 g_cBlockTableStatus =
2353 CURRENT_BLOCK_TABLE;
2354 break;
2355 } else {
2356 Result = FAIL;
2357 }
2358 }
2359 }
2360 }
2361
2362 if (search_in_previous_pages)
2363 i = i - bt_pages;
2364 else
2365 i = i - (bt_pages + 1);
2366
2367 Result = PASS;
2368
2369 nand_dbg_print(NAND_DBG_DEBUG,
2370 "Reading the spare area of Block %d Page %d",
2371 (int)BT_Block, i);
2372
2373 Result = GLOB_LLD_Read_Page_Spare(pSpareBuf, BT_Block, i, 1);
2374 nand_dbg_print(NAND_DBG_DEBUG,
2375 "Reading the spare area of Block %u Page %u",
2376 (unsigned int)BT_Block, i + bt_pages - 1);
2377
2378 Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage,
2379 BT_Block, i + bt_pages - 1, 1);
2380
2381 k = 0;
2382 j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray);
2383 if (j) {
2384 for (; k < j; k++) {
2385 if (tagarray[k] == BT_Tag)
2386 break;
2387 }
2388 }
2389
2390 if (k < j)
2391 bt_flag = tagarray[k];
2392 else
2393 Result = FAIL;
2394
2395 if (Result == PASS) {
2396 k = 0;
2397 j = FTL_Extract_Block_Table_Tag(pSpareBufBTLastPage,
2398 &tagarray);
2399 if (j) {
2400 for (; k < j; k++) {
2401 if (tagarray[k] == BT_Tag)
2402 break;
2403 }
2404 }
2405
2406 if (k < j) {
2407 bt_flag_last_page = tagarray[k];
2408 } else {
2409 Result = FAIL;
2410 break;
2411 }
2412
2413 if (Result == PASS) {
2414 if (bt_flag == bt_flag_last_page) {
2415 nand_dbg_print(NAND_DBG_DEBUG,
2416 "Block table is found "
2417 "in page prior to IPF "
2418 "at block %u page %d\n",
2419 (unsigned int)BT_Block, i);
2420 BT_Found = 1;
2421 *Page = i;
2422 g_cBlockTableStatus =
2423 IN_PROGRESS_BLOCK_TABLE;
2424 break;
2425 } else {
2426 Result = FAIL;
2427 break;
2428 }
2429 }
2430 }
2431 }
2432
2433 if (Result == FAIL) {
2434 if ((Last_IPF > bt_pages) && (i < Last_IPF) && (!BT_Found)) {
2435 BT_Found = 1;
2436 *Page = i - (bt_pages + 1);
2437 }
2438 if ((Last_IPF == bt_pages) && (i < Last_IPF) && (!BT_Found))
2439 goto func_return;
2440 }
2441
2442 if (Last_IPF == 0) {
2443 i = 0;
2444 Result = PASS;
2445 nand_dbg_print(NAND_DBG_DEBUG, "Reading the spare area of "
2446 "Block %u Page %u", (unsigned int)BT_Block, i);
2447
2448 Result = GLOB_LLD_Read_Page_Spare(pSpareBuf, BT_Block, i, 1);
2449 nand_dbg_print(NAND_DBG_DEBUG,
2450 "Reading the spare area of Block %u Page %u",
2451 (unsigned int)BT_Block, i + bt_pages - 1);
2452 Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage,
2453 BT_Block, i + bt_pages - 1, 1);
2454
2455 k = 0;
2456 j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray);
2457 if (j) {
2458 for (; k < j; k++) {
2459 if (tagarray[k] == BT_Tag)
2460 break;
2461 }
2462 }
2463
2464 if (k < j)
2465 bt_flag = tagarray[k];
2466 else
2467 Result = FAIL;
2468
2469 if (Result == PASS) {
2470 k = 0;
2471 j = FTL_Extract_Block_Table_Tag(pSpareBufBTLastPage,
2472 &tagarray);
2473 if (j) {
2474 for (; k < j; k++) {
2475 if (tagarray[k] == BT_Tag)
2476 break;
2477 }
2478 }
2479
2480 if (k < j)
2481 bt_flag_last_page = tagarray[k];
2482 else
2483 Result = FAIL;
2484
2485 if (Result == PASS) {
2486 if (bt_flag == bt_flag_last_page) {
2487 nand_dbg_print(NAND_DBG_DEBUG,
2488 "Block table is found "
2489 "in page after IPF at "
2490 "block %u page %u\n",
2491 (unsigned int)BT_Block,
2492 (unsigned int)i);
2493 BT_Found = 1;
2494 *Page = i;
2495 g_cBlockTableStatus =
2496 CURRENT_BLOCK_TABLE;
2497 goto func_return;
2498 } else {
2499 Result = FAIL;
2500 }
2501 }
2502 }
2503
2504 if (Result == FAIL)
2505 goto func_return;
2506 }
2507func_return:
2508 return Result;
2509}
2510
2511u8 *get_blk_table_start_addr(void)
2512{
2513 return g_pBlockTable;
2514}
2515
2516unsigned long get_blk_table_len(void)
2517{
2518 return DeviceInfo.wDataBlockNum * sizeof(u32);
2519}
2520
2521u8 *get_wear_leveling_table_start_addr(void)
2522{
2523 return g_pWearCounter;
2524}
2525
2526unsigned long get_wear_leveling_table_len(void)
2527{
2528 return DeviceInfo.wDataBlockNum * sizeof(u8);
2529}
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548static int FTL_Read_Block_Table(void)
2549{
2550 u16 i = 0;
2551 int k, j;
2552 u8 *tempBuf, *tagarray;
2553 int wResult = FAIL;
2554 int status = FAIL;
2555 u8 block_table_found = 0;
2556 int search_result;
2557 u32 Block;
2558 u16 Page = 0;
2559 u16 PageCount;
2560 u16 bt_pages;
2561 int wBytesCopied = 0, tempvar;
2562
2563 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2564 __FILE__, __LINE__, __func__);
2565
2566 tempBuf = tmp_buf1_read_blk_table;
2567 bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
2568
2569 for (j = DeviceInfo.wSpectraStartBlock;
2570 j <= (int)DeviceInfo.wSpectraEndBlock;
2571 j++) {
2572 status = GLOB_LLD_Read_Page_Spare(tempBuf, j, 0, 1);
2573 k = 0;
2574 i = FTL_Extract_Block_Table_Tag(tempBuf, &tagarray);
2575 if (i) {
2576 status = GLOB_LLD_Read_Page_Main_Polling(tempBuf,
2577 j, 0, 1);
2578 for (; k < i; k++) {
2579 if (tagarray[k] == tempBuf[3])
2580 break;
2581 }
2582 }
2583
2584 if (k < i)
2585 k = tagarray[k];
2586 else
2587 continue;
2588
2589 nand_dbg_print(NAND_DBG_DEBUG,
2590 "Block table is contained in Block %d %d\n",
2591 (unsigned int)j, (unsigned int)k);
2592
2593 if (g_pBTBlocks[k-FIRST_BT_ID] == BTBLOCK_INVAL) {
2594 g_pBTBlocks[k-FIRST_BT_ID] = j;
2595 block_table_found = 1;
2596 } else {
2597 printk(KERN_ERR "FTL_Read_Block_Table -"
2598 "This should never happens. "
2599 "Two block table have same counter %u!\n", k);
2600 }
2601 }
2602
2603 if (block_table_found) {
2604 if (g_pBTBlocks[FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL &&
2605 g_pBTBlocks[LAST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL) {
2606 j = LAST_BT_ID;
2607 while ((j > FIRST_BT_ID) &&
2608 (g_pBTBlocks[j - FIRST_BT_ID] != BTBLOCK_INVAL))
2609 j--;
2610 if (j == FIRST_BT_ID) {
2611 j = LAST_BT_ID;
2612 last_erased = LAST_BT_ID;
2613 } else {
2614 last_erased = (u8)j + 1;
2615 while ((j > FIRST_BT_ID) && (BTBLOCK_INVAL ==
2616 g_pBTBlocks[j - FIRST_BT_ID]))
2617 j--;
2618 }
2619 } else {
2620 j = FIRST_BT_ID;
2621 while (g_pBTBlocks[j - FIRST_BT_ID] == BTBLOCK_INVAL)
2622 j++;
2623 last_erased = (u8)j;
2624 while ((j < LAST_BT_ID) && (BTBLOCK_INVAL !=
2625 g_pBTBlocks[j - FIRST_BT_ID]))
2626 j++;
2627 if (g_pBTBlocks[j-FIRST_BT_ID] == BTBLOCK_INVAL)
2628 j--;
2629 }
2630
2631 if (last_erased > j)
2632 j += (1 + LAST_BT_ID - FIRST_BT_ID);
2633
2634 for (; (j >= last_erased) && (FAIL == wResult); j--) {
2635 i = (j - FIRST_BT_ID) %
2636 (1 + LAST_BT_ID - FIRST_BT_ID);
2637 search_result =
2638 FTL_Search_Block_Table_IN_Block(g_pBTBlocks[i],
2639 i + FIRST_BT_ID, &Page);
2640 if (g_cBlockTableStatus == IN_PROGRESS_BLOCK_TABLE)
2641 block_table_found = 0;
2642
2643 while ((search_result == PASS) && (FAIL == wResult)) {
2644 nand_dbg_print(NAND_DBG_DEBUG,
2645 "FTL_Read_Block_Table:"
2646 "Block: %u Page: %u "
2647 "contains block table\n",
2648 (unsigned int)g_pBTBlocks[i],
2649 (unsigned int)Page);
2650
2651 tempBuf = tmp_buf2_read_blk_table;
2652
2653 for (k = 0; k < bt_pages; k++) {
2654 Block = g_pBTBlocks[i];
2655 PageCount = 1;
2656
2657 status =
2658 GLOB_LLD_Read_Page_Main_Polling(
2659 tempBuf, Block, Page, PageCount);
2660
2661 tempvar = k ? 0 : 4;
2662
2663 wBytesCopied +=
2664 FTL_Copy_Block_Table_From_Flash(
2665 tempBuf + tempvar,
2666 DeviceInfo.wPageDataSize - tempvar,
2667 wBytesCopied);
2668
2669 Page++;
2670 }
2671
2672 wResult = FTL_Check_Block_Table(FAIL);
2673 if (FAIL == wResult) {
2674 block_table_found = 0;
2675 if (Page > bt_pages)
2676 Page -= ((bt_pages<<1) + 1);
2677 else
2678 search_result = FAIL;
2679 }
2680 }
2681 }
2682 }
2683
2684 if (PASS == wResult) {
2685 if (!block_table_found)
2686 FTL_Execute_SPL_Recovery();
2687
2688 if (g_cBlockTableStatus == IN_PROGRESS_BLOCK_TABLE)
2689 g_wBlockTableOffset = (u16)Page + 1;
2690 else
2691 g_wBlockTableOffset = (u16)Page - bt_pages;
2692
2693 g_wBlockTableIndex = (u32)g_pBTBlocks[i];
2694
2695#if CMD_DMA
2696 if (DeviceInfo.MLCDevice)
2697 memcpy(g_pBTStartingCopy, g_pBlockTable,
2698 DeviceInfo.wDataBlockNum * sizeof(u32)
2699 + DeviceInfo.wDataBlockNum * sizeof(u8)
2700 + DeviceInfo.wDataBlockNum * sizeof(u16));
2701 else
2702 memcpy(g_pBTStartingCopy, g_pBlockTable,
2703 DeviceInfo.wDataBlockNum * sizeof(u32)
2704 + DeviceInfo.wDataBlockNum * sizeof(u8));
2705#endif
2706 }
2707
2708 if (FAIL == wResult)
2709 printk(KERN_ERR "Yunpeng - "
2710 "Can not find valid spectra block table!\n");
2711
2712#if AUTO_FORMAT_FLASH
2713 if (FAIL == wResult) {
2714 nand_dbg_print(NAND_DBG_DEBUG, "doing auto-format\n");
2715 wResult = FTL_Format_Flash(0);
2716 }
2717#endif
2718
2719 return wResult;
2720}
2721
2722
2723
2724
2725
2726
2727
2728static u32 FTL_Get_Page_Num(u64 length)
2729{
2730 return (u32)((length >> DeviceInfo.nBitsInPageDataSize) +
2731 (GLOB_u64_Remainder(length , 1) > 0 ? 1 : 0));
2732}
2733
2734
2735
2736
2737
2738
2739
2740
2741static u64 FTL_Get_Physical_Block_Addr(u64 logical_addr)
2742{
2743 u32 *pbt;
2744 u64 physical_addr;
2745
2746 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2747 __FILE__, __LINE__, __func__);
2748
2749 pbt = (u32 *)g_pBlockTable;
2750 physical_addr = (u64) DeviceInfo.wBlockDataSize *
2751 (pbt[BLK_FROM_ADDR(logical_addr)] & (~BAD_BLOCK));
2752
2753 return physical_addr;
2754}
2755
2756
2757
2758
2759
2760
2761
2762static u32 FTL_Get_Block_Index(u32 wBlockNum)
2763{
2764 u32 *pbt = (u32 *)g_pBlockTable;
2765 u32 i;
2766
2767 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2768 __FILE__, __LINE__, __func__);
2769
2770 for (i = 0; i < DeviceInfo.wDataBlockNum; i++)
2771 if (wBlockNum == (pbt[i] & (~BAD_BLOCK)))
2772 return i;
2773
2774 return BAD_BLOCK;
2775}
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785int GLOB_FTL_Wear_Leveling(void)
2786{
2787 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
2788 __FILE__, __LINE__, __func__);
2789
2790 FTL_Static_Wear_Leveling();
2791 GLOB_FTL_Garbage_Collection();
2792
2793 return PASS;
2794}
2795
2796static void find_least_most_worn(u8 *chg,
2797 u32 *least_idx, u8 *least_cnt,
2798 u32 *most_idx, u8 *most_cnt)
2799{
2800 u32 *pbt = (u32 *)g_pBlockTable;
2801 u32 idx;
2802 u8 cnt;
2803 int i;
2804
2805 for (i = BLOCK_TABLE_INDEX + 1; i < DeviceInfo.wDataBlockNum; i++) {
2806 if (IS_BAD_BLOCK(i) || PASS == chg[i])
2807 continue;
2808
2809 idx = (u32) ((~BAD_BLOCK) & pbt[i]);
2810 cnt = g_pWearCounter[idx - DeviceInfo.wSpectraStartBlock];
2811
2812 if (IS_SPARE_BLOCK(i)) {
2813 if (cnt > *most_cnt) {
2814 *most_cnt = cnt;
2815 *most_idx = idx;
2816 }
2817 }
2818
2819 if (IS_DATA_BLOCK(i)) {
2820 if (cnt < *least_cnt) {
2821 *least_cnt = cnt;
2822 *least_idx = idx;
2823 }
2824 }
2825
2826 if (PASS == chg[*most_idx] || PASS == chg[*least_idx]) {
2827 debug_boundary_error(*most_idx,
2828 DeviceInfo.wDataBlockNum, 0);
2829 debug_boundary_error(*least_idx,
2830 DeviceInfo.wDataBlockNum, 0);
2831 continue;
2832 }
2833 }
2834}
2835
2836static int move_blks_for_wear_leveling(u8 *chg,
2837 u32 *least_idx, u32 *rep_blk_num, int *result)
2838{
2839 u32 *pbt = (u32 *)g_pBlockTable;
2840 u32 rep_blk;
2841 int j, ret_cp_blk, ret_erase;
2842 int ret = PASS;
2843
2844 chg[*least_idx] = PASS;
2845 debug_boundary_error(*least_idx, DeviceInfo.wDataBlockNum, 0);
2846
2847 rep_blk = FTL_Replace_MWBlock();
2848 if (rep_blk != BAD_BLOCK) {
2849 nand_dbg_print(NAND_DBG_DEBUG,
2850 "More than two spare blocks exist so do it\n");
2851 nand_dbg_print(NAND_DBG_DEBUG, "Block Replaced is %d\n",
2852 rep_blk);
2853
2854 chg[rep_blk] = PASS;
2855
2856 if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
2857 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
2858 FTL_Write_IN_Progress_Block_Table_Page();
2859 }
2860
2861 for (j = 0; j < RETRY_TIMES; j++) {
2862 ret_cp_blk = FTL_Copy_Block((u64)(*least_idx) *
2863 DeviceInfo.wBlockDataSize,
2864 (u64)rep_blk * DeviceInfo.wBlockDataSize);
2865 if (FAIL == ret_cp_blk) {
2866 ret_erase = GLOB_FTL_Block_Erase((u64)rep_blk
2867 * DeviceInfo.wBlockDataSize);
2868 if (FAIL == ret_erase)
2869 MARK_BLOCK_AS_BAD(pbt[rep_blk]);
2870 } else {
2871 nand_dbg_print(NAND_DBG_DEBUG,
2872 "FTL_Copy_Block == OK\n");
2873 break;
2874 }
2875 }
2876
2877 if (j < RETRY_TIMES) {
2878 u32 tmp;
2879 u32 old_idx = FTL_Get_Block_Index(*least_idx);
2880 u32 rep_idx = FTL_Get_Block_Index(rep_blk);
2881 tmp = (u32)(DISCARD_BLOCK | pbt[old_idx]);
2882 pbt[old_idx] = (u32)((~SPARE_BLOCK) &
2883 pbt[rep_idx]);
2884 pbt[rep_idx] = tmp;
2885#if CMD_DMA
2886 p_BTableChangesDelta = (struct BTableChangesDelta *)
2887 g_pBTDelta_Free;
2888 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
2889 p_BTableChangesDelta->ftl_cmd_cnt =
2890 ftl_cmd_cnt;
2891 p_BTableChangesDelta->BT_Index = old_idx;
2892 p_BTableChangesDelta->BT_Entry_Value = pbt[old_idx];
2893 p_BTableChangesDelta->ValidFields = 0x0C;
2894
2895 p_BTableChangesDelta = (struct BTableChangesDelta *)
2896 g_pBTDelta_Free;
2897 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
2898
2899 p_BTableChangesDelta->ftl_cmd_cnt =
2900 ftl_cmd_cnt;
2901 p_BTableChangesDelta->BT_Index = rep_idx;
2902 p_BTableChangesDelta->BT_Entry_Value = pbt[rep_idx];
2903 p_BTableChangesDelta->ValidFields = 0x0C;
2904#endif
2905 } else {
2906 pbt[FTL_Get_Block_Index(rep_blk)] |= BAD_BLOCK;
2907#if CMD_DMA
2908 p_BTableChangesDelta = (struct BTableChangesDelta *)
2909 g_pBTDelta_Free;
2910 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
2911
2912 p_BTableChangesDelta->ftl_cmd_cnt =
2913 ftl_cmd_cnt;
2914 p_BTableChangesDelta->BT_Index =
2915 FTL_Get_Block_Index(rep_blk);
2916 p_BTableChangesDelta->BT_Entry_Value =
2917 pbt[FTL_Get_Block_Index(rep_blk)];
2918 p_BTableChangesDelta->ValidFields = 0x0C;
2919#endif
2920 *result = FAIL;
2921 ret = FAIL;
2922 }
2923
2924 if (((*rep_blk_num)++) > WEAR_LEVELING_BLOCK_NUM)
2925 ret = FAIL;
2926 } else {
2927 printk(KERN_ERR "Less than 3 spare blocks exist so quit\n");
2928 ret = FAIL;
2929 }
2930
2931 return ret;
2932}
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946int FTL_Static_Wear_Leveling(void)
2947{
2948 u8 most_worn_cnt;
2949 u8 least_worn_cnt;
2950 u32 most_worn_idx;
2951 u32 least_worn_idx;
2952 int result = PASS;
2953 int go_on = PASS;
2954 u32 replaced_blks = 0;
2955 u8 *chang_flag = flags_static_wear_leveling;
2956
2957 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
2958 __FILE__, __LINE__, __func__);
2959
2960 if (!chang_flag)
2961 return FAIL;
2962
2963 memset(chang_flag, FAIL, DeviceInfo.wDataBlockNum);
2964 while (go_on == PASS) {
2965 nand_dbg_print(NAND_DBG_DEBUG,
2966 "starting static wear leveling\n");
2967 most_worn_cnt = 0;
2968 least_worn_cnt = 0xFF;
2969 least_worn_idx = BLOCK_TABLE_INDEX;
2970 most_worn_idx = BLOCK_TABLE_INDEX;
2971
2972 find_least_most_worn(chang_flag, &least_worn_idx,
2973 &least_worn_cnt, &most_worn_idx, &most_worn_cnt);
2974
2975 nand_dbg_print(NAND_DBG_DEBUG,
2976 "Used and least worn is block %u, whos count is %u\n",
2977 (unsigned int)least_worn_idx,
2978 (unsigned int)least_worn_cnt);
2979
2980 nand_dbg_print(NAND_DBG_DEBUG,
2981 "Free and most worn is block %u, whos count is %u\n",
2982 (unsigned int)most_worn_idx,
2983 (unsigned int)most_worn_cnt);
2984
2985 if ((most_worn_cnt > least_worn_cnt) &&
2986 (most_worn_cnt - least_worn_cnt > WEAR_LEVELING_GATE))
2987 go_on = move_blks_for_wear_leveling(chang_flag,
2988 &least_worn_idx, &replaced_blks, &result);
2989 else
2990 go_on = FAIL;
2991 }
2992
2993 return result;
2994}
2995
2996#if CMD_DMA
2997static int do_garbage_collection(u32 discard_cnt)
2998{
2999 u32 *pbt = (u32 *)g_pBlockTable;
3000 u32 pba;
3001 u8 bt_block_erased = 0;
3002 int i, cnt, ret = FAIL;
3003 u64 addr;
3004
3005 i = 0;
3006 while ((i < DeviceInfo.wDataBlockNum) && (discard_cnt > 0) &&
3007 ((ftl_cmd_cnt + 28) < 256)) {
3008 if (((pbt[i] & BAD_BLOCK) != BAD_BLOCK) &&
3009 (pbt[i] & DISCARD_BLOCK)) {
3010 if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
3011 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
3012 FTL_Write_IN_Progress_Block_Table_Page();
3013 }
3014
3015 addr = FTL_Get_Physical_Block_Addr((u64)i *
3016 DeviceInfo.wBlockDataSize);
3017 pba = BLK_FROM_ADDR(addr);
3018
3019 for (cnt = FIRST_BT_ID; cnt <= LAST_BT_ID; cnt++) {
3020 if (pba == g_pBTBlocks[cnt - FIRST_BT_ID]) {
3021 nand_dbg_print(NAND_DBG_DEBUG,
3022 "GC will erase BT block %u\n",
3023 (unsigned int)pba);
3024 discard_cnt--;
3025 i++;
3026 bt_block_erased = 1;
3027 break;
3028 }
3029 }
3030
3031 if (bt_block_erased) {
3032 bt_block_erased = 0;
3033 continue;
3034 }
3035
3036 addr = FTL_Get_Physical_Block_Addr((u64)i *
3037 DeviceInfo.wBlockDataSize);
3038
3039 if (PASS == GLOB_FTL_Block_Erase(addr)) {
3040 pbt[i] &= (u32)(~DISCARD_BLOCK);
3041 pbt[i] |= (u32)(SPARE_BLOCK);
3042 p_BTableChangesDelta =
3043 (struct BTableChangesDelta *)
3044 g_pBTDelta_Free;
3045 g_pBTDelta_Free +=
3046 sizeof(struct BTableChangesDelta);
3047 p_BTableChangesDelta->ftl_cmd_cnt =
3048 ftl_cmd_cnt - 1;
3049 p_BTableChangesDelta->BT_Index = i;
3050 p_BTableChangesDelta->BT_Entry_Value = pbt[i];
3051 p_BTableChangesDelta->ValidFields = 0x0C;
3052 discard_cnt--;
3053 ret = PASS;
3054 } else {
3055 MARK_BLOCK_AS_BAD(pbt[i]);
3056 }
3057 }
3058
3059 i++;
3060 }
3061
3062 return ret;
3063}
3064
3065#else
3066static int do_garbage_collection(u32 discard_cnt)
3067{
3068 u32 *pbt = (u32 *)g_pBlockTable;
3069 u32 pba;
3070 u8 bt_block_erased = 0;
3071 int i, cnt, ret = FAIL;
3072 u64 addr;
3073
3074 i = 0;
3075 while ((i < DeviceInfo.wDataBlockNum) && (discard_cnt > 0)) {
3076 if (((pbt[i] & BAD_BLOCK) != BAD_BLOCK) &&
3077 (pbt[i] & DISCARD_BLOCK)) {
3078 if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
3079 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
3080 FTL_Write_IN_Progress_Block_Table_Page();
3081 }
3082
3083 addr = FTL_Get_Physical_Block_Addr((u64)i *
3084 DeviceInfo.wBlockDataSize);
3085 pba = BLK_FROM_ADDR(addr);
3086
3087 for (cnt = FIRST_BT_ID; cnt <= LAST_BT_ID; cnt++) {
3088 if (pba == g_pBTBlocks[cnt - FIRST_BT_ID]) {
3089 nand_dbg_print(NAND_DBG_DEBUG,
3090 "GC will erase BT block %d\n",
3091 pba);
3092 discard_cnt--;
3093 i++;
3094 bt_block_erased = 1;
3095 break;
3096 }
3097 }
3098
3099 if (bt_block_erased) {
3100 bt_block_erased = 0;
3101 continue;
3102 }
3103
3104
3105 for (cnt = 0; cnt < BLK_NUM_FOR_L2_CACHE; cnt++) {
3106 if (cache_l2.blk_array[cnt] == pba) {
3107 nand_dbg_print(NAND_DBG_DEBUG,
3108 "GC will erase L2 cache blk %d\n",
3109 pba);
3110 break;
3111 }
3112 }
3113 if (cnt < BLK_NUM_FOR_L2_CACHE) {
3114 discard_cnt--;
3115 i++;
3116 continue;
3117 }
3118
3119 addr = FTL_Get_Physical_Block_Addr((u64)i *
3120 DeviceInfo.wBlockDataSize);
3121
3122 if (PASS == GLOB_FTL_Block_Erase(addr)) {
3123 pbt[i] &= (u32)(~DISCARD_BLOCK);
3124 pbt[i] |= (u32)(SPARE_BLOCK);
3125 discard_cnt--;
3126 ret = PASS;
3127 } else {
3128 MARK_BLOCK_AS_BAD(pbt[i]);
3129 }
3130 }
3131
3132 i++;
3133 }
3134
3135 return ret;
3136}
3137#endif
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150int GLOB_FTL_Garbage_Collection(void)
3151{
3152 u32 i;
3153 u32 wDiscard = 0;
3154 int wResult = FAIL;
3155 u32 *pbt = (u32 *)g_pBlockTable;
3156
3157 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
3158 __FILE__, __LINE__, __func__);
3159
3160 if (GC_Called) {
3161 printk(KERN_ALERT "GLOB_FTL_Garbage_Collection() "
3162 "has been re-entered! Exit.\n");
3163 return PASS;
3164 }
3165
3166 GC_Called = 1;
3167
3168 GLOB_FTL_BT_Garbage_Collection();
3169
3170 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
3171 if (IS_DISCARDED_BLOCK(i))
3172 wDiscard++;
3173 }
3174
3175 if (wDiscard <= 0) {
3176 GC_Called = 0;
3177 return wResult;
3178 }
3179
3180 nand_dbg_print(NAND_DBG_DEBUG,
3181 "Found %d discarded blocks\n", wDiscard);
3182
3183 FTL_Write_Block_Table(FAIL);
3184
3185 wResult = do_garbage_collection(wDiscard);
3186
3187 FTL_Write_Block_Table(FAIL);
3188
3189 GC_Called = 0;
3190
3191 return wResult;
3192}
3193
3194
3195#if CMD_DMA
3196static int do_bt_garbage_collection(void)
3197{
3198 u32 pba, lba;
3199 u32 *pbt = (u32 *)g_pBlockTable;
3200 u32 *pBTBlocksNode = (u32 *)g_pBTBlocks;
3201 u64 addr;
3202 int i, ret = FAIL;
3203
3204 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3205 __FILE__, __LINE__, __func__);
3206
3207 if (BT_GC_Called)
3208 return PASS;
3209
3210 BT_GC_Called = 1;
3211
3212 for (i = last_erased; (i <= LAST_BT_ID) &&
3213 (g_pBTBlocks[((i + 2) % (1 + LAST_BT_ID - FIRST_BT_ID)) +
3214 FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL) &&
3215 ((ftl_cmd_cnt + 28)) < 256; i++) {
3216 pba = pBTBlocksNode[i - FIRST_BT_ID];
3217 lba = FTL_Get_Block_Index(pba);
3218 nand_dbg_print(NAND_DBG_DEBUG,
3219 "do_bt_garbage_collection: pba %d, lba %d\n",
3220 pba, lba);
3221 nand_dbg_print(NAND_DBG_DEBUG,
3222 "Block Table Entry: %d", pbt[lba]);
3223
3224 if (((pbt[lba] & BAD_BLOCK) != BAD_BLOCK) &&
3225 (pbt[lba] & DISCARD_BLOCK)) {
3226 nand_dbg_print(NAND_DBG_DEBUG,
3227 "do_bt_garbage_collection_cdma: "
3228 "Erasing Block tables present in block %d\n",
3229 pba);
3230 addr = FTL_Get_Physical_Block_Addr((u64)lba *
3231 DeviceInfo.wBlockDataSize);
3232 if (PASS == GLOB_FTL_Block_Erase(addr)) {
3233 pbt[lba] &= (u32)(~DISCARD_BLOCK);
3234 pbt[lba] |= (u32)(SPARE_BLOCK);
3235
3236 p_BTableChangesDelta =
3237 (struct BTableChangesDelta *)
3238 g_pBTDelta_Free;
3239 g_pBTDelta_Free +=
3240 sizeof(struct BTableChangesDelta);
3241
3242 p_BTableChangesDelta->ftl_cmd_cnt =
3243 ftl_cmd_cnt - 1;
3244 p_BTableChangesDelta->BT_Index = lba;
3245 p_BTableChangesDelta->BT_Entry_Value =
3246 pbt[lba];
3247
3248 p_BTableChangesDelta->ValidFields = 0x0C;
3249
3250 ret = PASS;
3251 pBTBlocksNode[last_erased - FIRST_BT_ID] =
3252 BTBLOCK_INVAL;
3253 nand_dbg_print(NAND_DBG_DEBUG,
3254 "resetting bt entry at index %d "
3255 "value %d\n", i,
3256 pBTBlocksNode[i - FIRST_BT_ID]);
3257 if (last_erased == LAST_BT_ID)
3258 last_erased = FIRST_BT_ID;
3259 else
3260 last_erased++;
3261 } else {
3262 MARK_BLOCK_AS_BAD(pbt[lba]);
3263 }
3264 }
3265 }
3266
3267 BT_GC_Called = 0;
3268
3269 return ret;
3270}
3271
3272#else
3273static int do_bt_garbage_collection(void)
3274{
3275 u32 pba, lba;
3276 u32 *pbt = (u32 *)g_pBlockTable;
3277 u32 *pBTBlocksNode = (u32 *)g_pBTBlocks;
3278 u64 addr;
3279 int i, ret = FAIL;
3280
3281 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3282 __FILE__, __LINE__, __func__);
3283
3284 if (BT_GC_Called)
3285 return PASS;
3286
3287 BT_GC_Called = 1;
3288
3289 for (i = last_erased; (i <= LAST_BT_ID) &&
3290 (g_pBTBlocks[((i + 2) % (1 + LAST_BT_ID - FIRST_BT_ID)) +
3291 FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL); i++) {
3292 pba = pBTBlocksNode[i - FIRST_BT_ID];
3293 lba = FTL_Get_Block_Index(pba);
3294 nand_dbg_print(NAND_DBG_DEBUG,
3295 "do_bt_garbage_collection_cdma: pba %d, lba %d\n",
3296 pba, lba);
3297 nand_dbg_print(NAND_DBG_DEBUG,
3298 "Block Table Entry: %d", pbt[lba]);
3299
3300 if (((pbt[lba] & BAD_BLOCK) != BAD_BLOCK) &&
3301 (pbt[lba] & DISCARD_BLOCK)) {
3302 nand_dbg_print(NAND_DBG_DEBUG,
3303 "do_bt_garbage_collection: "
3304 "Erasing Block tables present in block %d\n",
3305 pba);
3306 addr = FTL_Get_Physical_Block_Addr((u64)lba *
3307 DeviceInfo.wBlockDataSize);
3308 if (PASS == GLOB_FTL_Block_Erase(addr)) {
3309 pbt[lba] &= (u32)(~DISCARD_BLOCK);
3310 pbt[lba] |= (u32)(SPARE_BLOCK);
3311 ret = PASS;
3312 pBTBlocksNode[last_erased - FIRST_BT_ID] =
3313 BTBLOCK_INVAL;
3314 nand_dbg_print(NAND_DBG_DEBUG,
3315 "resetting bt entry at index %d "
3316 "value %d\n", i,
3317 pBTBlocksNode[i - FIRST_BT_ID]);
3318 if (last_erased == LAST_BT_ID)
3319 last_erased = FIRST_BT_ID;
3320 else
3321 last_erased++;
3322 } else {
3323 MARK_BLOCK_AS_BAD(pbt[lba]);
3324 }
3325 }
3326 }
3327
3328 BT_GC_Called = 0;
3329
3330 return ret;
3331}
3332
3333#endif
3334
3335
3336
3337
3338
3339
3340
3341
3342int GLOB_FTL_BT_Garbage_Collection(void)
3343{
3344 return do_bt_garbage_collection();
3345}
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355static u32 FTL_Replace_OneBlock(u32 blk, u32 rep_blk)
3356{
3357 u32 tmp_blk;
3358 u32 replace_node = BAD_BLOCK;
3359 u32 *pbt = (u32 *)g_pBlockTable;
3360
3361 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3362 __FILE__, __LINE__, __func__);
3363
3364 if (rep_blk != BAD_BLOCK) {
3365 if (IS_BAD_BLOCK(blk))
3366 tmp_blk = pbt[blk];
3367 else
3368 tmp_blk = DISCARD_BLOCK | (~SPARE_BLOCK & pbt[blk]);
3369
3370 replace_node = (u32) ((~SPARE_BLOCK) & pbt[rep_blk]);
3371 pbt[blk] = replace_node;
3372 pbt[rep_blk] = tmp_blk;
3373
3374#if CMD_DMA
3375 p_BTableChangesDelta =
3376 (struct BTableChangesDelta *)g_pBTDelta_Free;
3377 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
3378
3379 p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
3380 p_BTableChangesDelta->BT_Index = blk;
3381 p_BTableChangesDelta->BT_Entry_Value = pbt[blk];
3382
3383 p_BTableChangesDelta->ValidFields = 0x0C;
3384
3385 p_BTableChangesDelta =
3386 (struct BTableChangesDelta *)g_pBTDelta_Free;
3387 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
3388
3389 p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
3390 p_BTableChangesDelta->BT_Index = rep_blk;
3391 p_BTableChangesDelta->BT_Entry_Value = pbt[rep_blk];
3392 p_BTableChangesDelta->ValidFields = 0x0C;
3393#endif
3394 }
3395
3396 return replace_node;
3397}
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411static int FTL_Write_Block_Table_Data(void)
3412{
3413 u64 dwBlockTableAddr, pTempAddr;
3414 u32 Block;
3415 u16 Page, PageCount;
3416 u8 *tempBuf = tmp_buf_write_blk_table_data;
3417 int wBytesCopied;
3418 u16 bt_pages;
3419
3420 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3421 __FILE__, __LINE__, __func__);
3422
3423 dwBlockTableAddr =
3424 (u64)((u64)g_wBlockTableIndex * DeviceInfo.wBlockDataSize +
3425 (u64)g_wBlockTableOffset * DeviceInfo.wPageDataSize);
3426 pTempAddr = dwBlockTableAddr;
3427
3428 bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
3429
3430 nand_dbg_print(NAND_DBG_DEBUG, "FTL_Write_Block_Table_Data: "
3431 "page= %d BlockTableIndex= %d "
3432 "BlockTableOffset=%d\n", bt_pages,
3433 g_wBlockTableIndex, g_wBlockTableOffset);
3434
3435 Block = BLK_FROM_ADDR(pTempAddr);
3436 Page = PAGE_FROM_ADDR(pTempAddr, Block);
3437 PageCount = 1;
3438
3439 if (bt_block_changed) {
3440 if (bt_flag == LAST_BT_ID) {
3441 bt_flag = FIRST_BT_ID;
3442 g_pBTBlocks[bt_flag - FIRST_BT_ID] = Block;
3443 } else if (bt_flag < LAST_BT_ID) {
3444 bt_flag++;
3445 g_pBTBlocks[bt_flag - FIRST_BT_ID] = Block;
3446 }
3447
3448 if ((bt_flag > (LAST_BT_ID-4)) &&
3449 g_pBTBlocks[FIRST_BT_ID - FIRST_BT_ID] !=
3450 BTBLOCK_INVAL) {
3451 bt_block_changed = 0;
3452 GLOB_FTL_BT_Garbage_Collection();
3453 }
3454
3455 bt_block_changed = 0;
3456 nand_dbg_print(NAND_DBG_DEBUG,
3457 "Block Table Counter is %u Block %u\n",
3458 bt_flag, (unsigned int)Block);
3459 }
3460
3461 memset(tempBuf, 0, 3);
3462 tempBuf[3] = bt_flag;
3463 wBytesCopied = FTL_Copy_Block_Table_To_Flash(tempBuf + 4,
3464 DeviceInfo.wPageDataSize - 4, 0);
3465 memset(&tempBuf[wBytesCopied + 4], 0xff,
3466 DeviceInfo.wPageSize - (wBytesCopied + 4));
3467 FTL_Insert_Block_Table_Signature(&tempBuf[DeviceInfo.wPageDataSize],
3468 bt_flag);
3469
3470#if CMD_DMA
3471 memcpy(g_pNextBlockTable, tempBuf,
3472 DeviceInfo.wPageSize * sizeof(u8));
3473 nand_dbg_print(NAND_DBG_DEBUG, "Writing First Page of Block Table "
3474 "Block %u Page %u\n", (unsigned int)Block, Page);
3475 if (FAIL == GLOB_LLD_Write_Page_Main_Spare_cdma(g_pNextBlockTable,
3476 Block, Page, 1,
3477 LLD_CMD_FLAG_MODE_CDMA | LLD_CMD_FLAG_ORDER_BEFORE_REST)) {
3478 nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in "
3479 "%s, Line %d, Function: %s, "
3480 "new Bad Block %d generated!\n",
3481 __FILE__, __LINE__, __func__, Block);
3482 goto func_return;
3483 }
3484
3485 ftl_cmd_cnt++;
3486 g_pNextBlockTable += ((DeviceInfo.wPageSize * sizeof(u8)));
3487#else
3488 if (FAIL == GLOB_LLD_Write_Page_Main_Spare(tempBuf, Block, Page, 1)) {
3489 nand_dbg_print(NAND_DBG_WARN,
3490 "NAND Program fail in %s, Line %d, Function: %s, "
3491 "new Bad Block %d generated!\n",
3492 __FILE__, __LINE__, __func__, Block);
3493 goto func_return;
3494 }
3495#endif
3496
3497 if (bt_pages > 1) {
3498 PageCount = bt_pages - 1;
3499 if (PageCount > 1) {
3500 wBytesCopied += FTL_Copy_Block_Table_To_Flash(tempBuf,
3501 DeviceInfo.wPageDataSize * (PageCount - 1),
3502 wBytesCopied);
3503
3504#if CMD_DMA
3505 memcpy(g_pNextBlockTable, tempBuf,
3506 (PageCount - 1) * DeviceInfo.wPageDataSize);
3507 if (FAIL == GLOB_LLD_Write_Page_Main_cdma(
3508 g_pNextBlockTable, Block, Page + 1,
3509 PageCount - 1)) {
3510 nand_dbg_print(NAND_DBG_WARN,
3511 "NAND Program fail in %s, Line %d, "
3512 "Function: %s, "
3513 "new Bad Block %d generated!\n",
3514 __FILE__, __LINE__, __func__,
3515 (int)Block);
3516 goto func_return;
3517 }
3518
3519 ftl_cmd_cnt++;
3520 g_pNextBlockTable += (PageCount - 1) *
3521 DeviceInfo.wPageDataSize * sizeof(u8);
3522#else
3523 if (FAIL == GLOB_LLD_Write_Page_Main(tempBuf,
3524 Block, Page + 1, PageCount - 1)) {
3525 nand_dbg_print(NAND_DBG_WARN,
3526 "NAND Program fail in %s, Line %d, "
3527 "Function: %s, "
3528 "new Bad Block %d generated!\n",
3529 __FILE__, __LINE__, __func__,
3530 (int)Block);
3531 goto func_return;
3532 }
3533#endif
3534 }
3535
3536 wBytesCopied = FTL_Copy_Block_Table_To_Flash(tempBuf,
3537 DeviceInfo.wPageDataSize, wBytesCopied);
3538 memset(&tempBuf[wBytesCopied], 0xff,
3539 DeviceInfo.wPageSize-wBytesCopied);
3540 FTL_Insert_Block_Table_Signature(
3541 &tempBuf[DeviceInfo.wPageDataSize], bt_flag);
3542#if CMD_DMA
3543 memcpy(g_pNextBlockTable, tempBuf,
3544 DeviceInfo.wPageSize * sizeof(u8));
3545 nand_dbg_print(NAND_DBG_DEBUG,
3546 "Writing the last Page of Block Table "
3547 "Block %u Page %u\n",
3548 (unsigned int)Block, Page + bt_pages - 1);
3549 if (FAIL == GLOB_LLD_Write_Page_Main_Spare_cdma(
3550 g_pNextBlockTable, Block, Page + bt_pages - 1, 1,
3551 LLD_CMD_FLAG_MODE_CDMA |
3552 LLD_CMD_FLAG_ORDER_BEFORE_REST)) {
3553 nand_dbg_print(NAND_DBG_WARN,
3554 "NAND Program fail in %s, Line %d, "
3555 "Function: %s, new Bad Block %d generated!\n",
3556 __FILE__, __LINE__, __func__, Block);
3557 goto func_return;
3558 }
3559 ftl_cmd_cnt++;
3560#else
3561 if (FAIL == GLOB_LLD_Write_Page_Main_Spare(tempBuf,
3562 Block, Page+bt_pages - 1, 1)) {
3563 nand_dbg_print(NAND_DBG_WARN,
3564 "NAND Program fail in %s, Line %d, "
3565 "Function: %s, "
3566 "new Bad Block %d generated!\n",
3567 __FILE__, __LINE__, __func__, Block);
3568 goto func_return;
3569 }
3570#endif
3571 }
3572
3573 nand_dbg_print(NAND_DBG_DEBUG, "FTL_Write_Block_Table_Data: done\n");
3574
3575func_return:
3576 return PASS;
3577}
3578
3579
3580
3581
3582
3583
3584
3585static u32 FTL_Replace_Block_Table(void)
3586{
3587 u32 blk;
3588 int gc;
3589
3590 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3591 __FILE__, __LINE__, __func__);
3592
3593 blk = FTL_Replace_LWBlock(BLOCK_TABLE_INDEX, &gc);
3594
3595 if ((BAD_BLOCK == blk) && (PASS == gc)) {
3596 GLOB_FTL_Garbage_Collection();
3597 blk = FTL_Replace_LWBlock(BLOCK_TABLE_INDEX, &gc);
3598 }
3599 if (BAD_BLOCK == blk)
3600 printk(KERN_ERR "%s, %s: There is no spare block. "
3601 "It should never happen\n",
3602 __FILE__, __func__);
3603
3604 nand_dbg_print(NAND_DBG_DEBUG, "New Block table Block is %d\n", blk);
3605
3606 return blk;
3607}
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621static u32 FTL_Replace_LWBlock(u32 wBlockNum, int *pGarbageCollect)
3622{
3623 u32 i;
3624 u32 *pbt = (u32 *)g_pBlockTable;
3625 u8 wLeastWornCounter = 0xFF;
3626 u32 wLeastWornIndex = BAD_BLOCK;
3627 u32 wSpareBlockNum = 0;
3628 u32 wDiscardBlockNum = 0;
3629
3630 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3631 __FILE__, __LINE__, __func__);
3632
3633 if (IS_SPARE_BLOCK(wBlockNum)) {
3634 *pGarbageCollect = FAIL;
3635 pbt[wBlockNum] = (u32)(pbt[wBlockNum] & (~SPARE_BLOCK));
3636#if CMD_DMA
3637 p_BTableChangesDelta =
3638 (struct BTableChangesDelta *)g_pBTDelta_Free;
3639 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
3640 p_BTableChangesDelta->ftl_cmd_cnt =
3641 ftl_cmd_cnt;
3642 p_BTableChangesDelta->BT_Index = (u32)(wBlockNum);
3643 p_BTableChangesDelta->BT_Entry_Value = pbt[wBlockNum];
3644 p_BTableChangesDelta->ValidFields = 0x0C;
3645#endif
3646 return pbt[wBlockNum];
3647 }
3648
3649 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
3650 if (IS_DISCARDED_BLOCK(i))
3651 wDiscardBlockNum++;
3652
3653 if (IS_SPARE_BLOCK(i)) {
3654 u32 wPhysicalIndex = (u32)((~BAD_BLOCK) & pbt[i]);
3655 if (wPhysicalIndex > DeviceInfo.wSpectraEndBlock)
3656 printk(KERN_ERR "FTL_Replace_LWBlock: "
3657 "This should never occur!\n");
3658 if (g_pWearCounter[wPhysicalIndex -
3659 DeviceInfo.wSpectraStartBlock] <
3660 wLeastWornCounter) {
3661 wLeastWornCounter =
3662 g_pWearCounter[wPhysicalIndex -
3663 DeviceInfo.wSpectraStartBlock];
3664 wLeastWornIndex = i;
3665 }
3666 wSpareBlockNum++;
3667 }
3668 }
3669
3670 nand_dbg_print(NAND_DBG_WARN,
3671 "FTL_Replace_LWBlock: Least Worn Counter %d\n",
3672 (int)wLeastWornCounter);
3673
3674 if ((wDiscardBlockNum >= NUM_FREE_BLOCKS_GATE) ||
3675 (wSpareBlockNum <= NUM_FREE_BLOCKS_GATE))
3676 *pGarbageCollect = PASS;
3677 else
3678 *pGarbageCollect = FAIL;
3679
3680 nand_dbg_print(NAND_DBG_DEBUG,
3681 "FTL_Replace_LWBlock: Discarded Blocks %u Spare"
3682 " Blocks %u\n",
3683 (unsigned int)wDiscardBlockNum,
3684 (unsigned int)wSpareBlockNum);
3685
3686 return FTL_Replace_OneBlock(wBlockNum, wLeastWornIndex);
3687}
3688
3689
3690
3691
3692
3693
3694
3695static u32 FTL_Replace_MWBlock(void)
3696{
3697 u32 i;
3698 u32 *pbt = (u32 *)g_pBlockTable;
3699 u8 wMostWornCounter = 0;
3700 u32 wMostWornIndex = BAD_BLOCK;
3701 u32 wSpareBlockNum = 0;
3702
3703 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3704 __FILE__, __LINE__, __func__);
3705
3706 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
3707 if (IS_SPARE_BLOCK(i)) {
3708 u32 wPhysicalIndex = (u32)((~SPARE_BLOCK) & pbt[i]);
3709 if (g_pWearCounter[wPhysicalIndex -
3710 DeviceInfo.wSpectraStartBlock] >
3711 wMostWornCounter) {
3712 wMostWornCounter =
3713 g_pWearCounter[wPhysicalIndex -
3714 DeviceInfo.wSpectraStartBlock];
3715 wMostWornIndex = wPhysicalIndex;
3716 }
3717 wSpareBlockNum++;
3718 }
3719 }
3720
3721 if (wSpareBlockNum <= 2)
3722 return BAD_BLOCK;
3723
3724 return wMostWornIndex;
3725}
3726
3727
3728
3729
3730
3731
3732
3733
3734static int FTL_Replace_Block(u64 blk_addr)
3735{
3736 u32 current_blk = BLK_FROM_ADDR(blk_addr);
3737 u32 *pbt = (u32 *)g_pBlockTable;
3738 int wResult = PASS;
3739 int GarbageCollect = FAIL;
3740
3741 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3742 __FILE__, __LINE__, __func__);
3743
3744 if (IS_SPARE_BLOCK(current_blk)) {
3745 pbt[current_blk] = (~SPARE_BLOCK) & pbt[current_blk];
3746#if CMD_DMA
3747 p_BTableChangesDelta =
3748 (struct BTableChangesDelta *)g_pBTDelta_Free;
3749 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
3750 p_BTableChangesDelta->ftl_cmd_cnt =
3751 ftl_cmd_cnt;
3752 p_BTableChangesDelta->BT_Index = current_blk;
3753 p_BTableChangesDelta->BT_Entry_Value = pbt[current_blk];
3754 p_BTableChangesDelta->ValidFields = 0x0C ;
3755#endif
3756 return wResult;
3757 }
3758
3759 FTL_Replace_LWBlock(current_blk, &GarbageCollect);
3760
3761 if (PASS == GarbageCollect)
3762 wResult = GLOB_FTL_Garbage_Collection();
3763
3764 return wResult;
3765}
3766
3767
3768
3769
3770
3771
3772
3773int GLOB_FTL_Is_BadBlock(u32 wBlockNum)
3774{
3775 u32 *pbt = (u32 *)g_pBlockTable;
3776
3777 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3778 __FILE__, __LINE__, __func__);
3779
3780 if (wBlockNum >= DeviceInfo.wSpectraStartBlock
3781 && BAD_BLOCK == (pbt[wBlockNum] & BAD_BLOCK))
3782 return PASS;
3783 else
3784 return FAIL;
3785}
3786
3787
3788
3789
3790
3791
3792
3793
3794
3795
3796
3797int GLOB_FTL_Flush_Cache(void)
3798{
3799 int i, ret;
3800
3801 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
3802 __FILE__, __LINE__, __func__);
3803
3804 for (i = 0; i < CACHE_ITEM_NUM; i++) {
3805 if (SET == Cache.array[i].changed) {
3806#if CMD_DMA
3807#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
3808 int_cache[ftl_cmd_cnt].item = i;
3809 int_cache[ftl_cmd_cnt].cache.address =
3810 Cache.array[i].address;
3811 int_cache[ftl_cmd_cnt].cache.changed = CLEAR;
3812#endif
3813#endif
3814 ret = write_back_to_l2_cache(Cache.array[i].buf, Cache.array[i].address);
3815 if (PASS == ret) {
3816 Cache.array[i].changed = CLEAR;
3817 } else {
3818 printk(KERN_ALERT "Failed when write back to L2 cache!\n");
3819
3820 }
3821 }
3822 }
3823
3824 flush_l2_cache();
3825
3826 return FTL_Write_Block_Table(FAIL);
3827}
3828
3829
3830
3831
3832
3833
3834
3835
3836
3837int GLOB_FTL_Page_Read(u8 *data, u64 logical_addr)
3838{
3839 u16 cache_item;
3840 int res = PASS;
3841
3842 nand_dbg_print(NAND_DBG_DEBUG, "GLOB_FTL_Page_Read - "
3843 "page_addr: %llu\n", logical_addr);
3844
3845 cache_item = FTL_Cache_If_Hit(logical_addr);
3846
3847 if (UNHIT_CACHE_ITEM == cache_item) {
3848 nand_dbg_print(NAND_DBG_DEBUG,
3849 "GLOB_FTL_Page_Read: Cache not hit\n");
3850 res = FTL_Cache_Write();
3851 if (ERR == FTL_Cache_Read(logical_addr))
3852 res = ERR;
3853 cache_item = Cache.LRU;
3854 }
3855
3856 FTL_Cache_Read_Page(data, logical_addr, cache_item);
3857
3858 return res;
3859}
3860
3861
3862
3863
3864
3865
3866
3867
3868
3869
3870
3871int GLOB_FTL_Page_Write(u8 *pData, u64 dwPageAddr)
3872{
3873 u16 cache_blk;
3874 u32 *pbt = (u32 *)g_pBlockTable;
3875 int wResult = PASS;
3876
3877 nand_dbg_print(NAND_DBG_TRACE, "GLOB_FTL_Page_Write - "
3878 "dwPageAddr: %llu\n", dwPageAddr);
3879
3880 cache_blk = FTL_Cache_If_Hit(dwPageAddr);
3881
3882 if (UNHIT_CACHE_ITEM == cache_blk) {
3883 wResult = FTL_Cache_Write();
3884 if (IS_BAD_BLOCK(BLK_FROM_ADDR(dwPageAddr))) {
3885 wResult = FTL_Replace_Block(dwPageAddr);
3886 pbt[BLK_FROM_ADDR(dwPageAddr)] |= SPARE_BLOCK;
3887 if (wResult == FAIL)
3888 return FAIL;
3889 }
3890 if (ERR == FTL_Cache_Read(dwPageAddr))
3891 wResult = ERR;
3892 cache_blk = Cache.LRU;
3893 FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk, 0);
3894 } else {
3895#if CMD_DMA
3896 FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk,
3897 LLD_CMD_FLAG_ORDER_BEFORE_REST);
3898#else
3899 FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk, 0);
3900#endif
3901 }
3902
3903 return wResult;
3904}
3905
3906
3907
3908
3909
3910
3911
3912
3913
3914
3915
3916int GLOB_FTL_Block_Erase(u64 blk_addr)
3917{
3918 int status;
3919 u32 BlkIdx;
3920
3921 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3922 __FILE__, __LINE__, __func__);
3923
3924 BlkIdx = (u32)(blk_addr >> DeviceInfo.nBitsInBlockDataSize);
3925
3926 if (BlkIdx < DeviceInfo.wSpectraStartBlock) {
3927 printk(KERN_ERR "GLOB_FTL_Block_Erase: "
3928 "This should never occur\n");
3929 return FAIL;
3930 }
3931
3932#if CMD_DMA
3933 status = GLOB_LLD_Erase_Block_cdma(BlkIdx, LLD_CMD_FLAG_MODE_CDMA);
3934 if (status == FAIL)
3935 nand_dbg_print(NAND_DBG_WARN,
3936 "NAND Program fail in %s, Line %d, "
3937 "Function: %s, new Bad Block %d generated!\n",
3938 __FILE__, __LINE__, __func__, BlkIdx);
3939#else
3940 status = GLOB_LLD_Erase_Block(BlkIdx);
3941 if (status == FAIL) {
3942 nand_dbg_print(NAND_DBG_WARN,
3943 "NAND Program fail in %s, Line %d, "
3944 "Function: %s, new Bad Block %d generated!\n",
3945 __FILE__, __LINE__, __func__, BlkIdx);
3946 return status;
3947 }
3948#endif
3949
3950 if (DeviceInfo.MLCDevice) {
3951 g_pReadCounter[BlkIdx - DeviceInfo.wSpectraStartBlock] = 0;
3952 if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
3953 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
3954 FTL_Write_IN_Progress_Block_Table_Page();
3955 }
3956 }
3957
3958 g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock]++;
3959
3960#if CMD_DMA
3961 p_BTableChangesDelta =
3962 (struct BTableChangesDelta *)g_pBTDelta_Free;
3963 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
3964 p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
3965 p_BTableChangesDelta->WC_Index =
3966 BlkIdx - DeviceInfo.wSpectraStartBlock;
3967 p_BTableChangesDelta->WC_Entry_Value =
3968 g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock];
3969 p_BTableChangesDelta->ValidFields = 0x30;
3970
3971 if (DeviceInfo.MLCDevice) {
3972 p_BTableChangesDelta =
3973 (struct BTableChangesDelta *)g_pBTDelta_Free;
3974 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
3975 p_BTableChangesDelta->ftl_cmd_cnt =
3976 ftl_cmd_cnt;
3977 p_BTableChangesDelta->RC_Index =
3978 BlkIdx - DeviceInfo.wSpectraStartBlock;
3979 p_BTableChangesDelta->RC_Entry_Value =
3980 g_pReadCounter[BlkIdx -
3981 DeviceInfo.wSpectraStartBlock];
3982 p_BTableChangesDelta->ValidFields = 0xC0;
3983 }
3984
3985 ftl_cmd_cnt++;
3986#endif
3987
3988 if (g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock] == 0xFE)
3989 FTL_Adjust_Relative_Erase_Count(BlkIdx);
3990
3991 return status;
3992}
3993
3994
3995
3996
3997
3998
3999
4000
4001
4002
4003static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX)
4004{
4005 u8 wLeastWornCounter = MAX_BYTE_VALUE;
4006 u8 wWearCounter;
4007 u32 i, wWearIndex;
4008 u32 *pbt = (u32 *)g_pBlockTable;
4009 int wResult = PASS;
4010
4011 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
4012 __FILE__, __LINE__, __func__);
4013
4014 for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
4015 if (IS_BAD_BLOCK(i))
4016 continue;
4017 wWearIndex = (u32)(pbt[i] & (~BAD_BLOCK));
4018
4019 if ((wWearIndex - DeviceInfo.wSpectraStartBlock) < 0)
4020 printk(KERN_ERR "FTL_Adjust_Relative_Erase_Count:"
4021 "This should never occur\n");
4022 wWearCounter = g_pWearCounter[wWearIndex -
4023 DeviceInfo.wSpectraStartBlock];
4024 if (wWearCounter < wLeastWornCounter)
4025 wLeastWornCounter = wWearCounter;
4026 }
4027
4028 if (wLeastWornCounter == 0) {
4029 nand_dbg_print(NAND_DBG_WARN,
4030 "Adjusting Wear Levelling Counters: Special Case\n");
4031 g_pWearCounter[Index_of_MAX -
4032 DeviceInfo.wSpectraStartBlock]--;
4033#if CMD_DMA
4034 p_BTableChangesDelta =
4035 (struct BTableChangesDelta *)g_pBTDelta_Free;
4036 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
4037 p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
4038 p_BTableChangesDelta->WC_Index =
4039 Index_of_MAX - DeviceInfo.wSpectraStartBlock;
4040 p_BTableChangesDelta->WC_Entry_Value =
4041 g_pWearCounter[Index_of_MAX -
4042 DeviceInfo.wSpectraStartBlock];
4043 p_BTableChangesDelta->ValidFields = 0x30;
4044#endif
4045 FTL_Static_Wear_Leveling();
4046 } else {
4047 for (i = 0; i < DeviceInfo.wDataBlockNum; i++)
4048 if (!IS_BAD_BLOCK(i)) {
4049 wWearIndex = (u32)(pbt[i] & (~BAD_BLOCK));
4050 g_pWearCounter[wWearIndex -
4051 DeviceInfo.wSpectraStartBlock] =
4052 (u8)(g_pWearCounter
4053 [wWearIndex -
4054 DeviceInfo.wSpectraStartBlock] -
4055 wLeastWornCounter);
4056#if CMD_DMA
4057 p_BTableChangesDelta =
4058 (struct BTableChangesDelta *)g_pBTDelta_Free;
4059 g_pBTDelta_Free +=
4060 sizeof(struct BTableChangesDelta);
4061
4062 p_BTableChangesDelta->ftl_cmd_cnt =
4063 ftl_cmd_cnt;
4064 p_BTableChangesDelta->WC_Index = wWearIndex -
4065 DeviceInfo.wSpectraStartBlock;
4066 p_BTableChangesDelta->WC_Entry_Value =
4067 g_pWearCounter[wWearIndex -
4068 DeviceInfo.wSpectraStartBlock];
4069 p_BTableChangesDelta->ValidFields = 0x30;
4070#endif
4071 }
4072 }
4073
4074 return wResult;
4075}
4076
4077
4078
4079
4080
4081
4082
4083
4084static int FTL_Write_IN_Progress_Block_Table_Page(void)
4085{
4086 int wResult = PASS;
4087 u16 bt_pages;
4088 u16 dwIPFPageAddr;
4089#if CMD_DMA
4090#else
4091 u32 *pbt = (u32 *)g_pBlockTable;
4092 u32 wTempBlockTableIndex;
4093#endif
4094
4095 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
4096 __FILE__, __LINE__, __func__);
4097
4098 bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
4099
4100 dwIPFPageAddr = g_wBlockTableOffset + bt_pages;
4101
4102 nand_dbg_print(NAND_DBG_DEBUG, "Writing IPF at "
4103 "Block %d Page %d\n",
4104 g_wBlockTableIndex, dwIPFPageAddr);
4105
4106#if CMD_DMA
4107 wResult = GLOB_LLD_Write_Page_Main_Spare_cdma(g_pIPF,
4108 g_wBlockTableIndex, dwIPFPageAddr, 1,
4109 LLD_CMD_FLAG_MODE_CDMA | LLD_CMD_FLAG_ORDER_BEFORE_REST);
4110 if (wResult == FAIL) {
4111 nand_dbg_print(NAND_DBG_WARN,
4112 "NAND Program fail in %s, Line %d, "
4113 "Function: %s, new Bad Block %d generated!\n",
4114 __FILE__, __LINE__, __func__,
4115 g_wBlockTableIndex);
4116 }
4117 g_wBlockTableOffset = dwIPFPageAddr + 1;
4118 p_BTableChangesDelta = (struct BTableChangesDelta *)g_pBTDelta_Free;
4119 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
4120 p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
4121 p_BTableChangesDelta->g_wBlockTableOffset = g_wBlockTableOffset;
4122 p_BTableChangesDelta->ValidFields = 0x01;
4123 ftl_cmd_cnt++;
4124#else
4125 wResult = GLOB_LLD_Write_Page_Main_Spare(g_pIPF,
4126 g_wBlockTableIndex, dwIPFPageAddr, 1);
4127 if (wResult == FAIL) {
4128 nand_dbg_print(NAND_DBG_WARN,
4129 "NAND Program fail in %s, Line %d, "
4130 "Function: %s, new Bad Block %d generated!\n",
4131 __FILE__, __LINE__, __func__,
4132 (int)g_wBlockTableIndex);
4133 MARK_BLOCK_AS_BAD(pbt[BLOCK_TABLE_INDEX]);
4134 wTempBlockTableIndex = FTL_Replace_Block_Table();
4135 bt_block_changed = 1;
4136 if (BAD_BLOCK == wTempBlockTableIndex)
4137 return ERR;
4138 g_wBlockTableIndex = wTempBlockTableIndex;
4139 g_wBlockTableOffset = 0;
4140
4141 pbt[BLOCK_TABLE_INDEX] = g_wBlockTableIndex;
4142 return FAIL;
4143 }
4144 g_wBlockTableOffset = dwIPFPageAddr + 1;
4145#endif
4146 return wResult;
4147}
4148
4149
4150
4151
4152
4153
4154
4155
4156int FTL_Read_Disturbance(u32 blk_addr)
4157{
4158 int wResult = FAIL;
4159 u32 *pbt = (u32 *) g_pBlockTable;
4160 u32 dwOldBlockAddr = blk_addr;
4161 u32 wBlockNum;
4162 u32 i;
4163 u32 wLeastReadCounter = 0xFFFF;
4164 u32 wLeastReadIndex = BAD_BLOCK;
4165 u32 wSpareBlockNum = 0;
4166 u32 wTempNode;
4167 u32 wReplacedNode;
4168 u8 *g_pTempBuf;
4169
4170 nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
4171 __FILE__, __LINE__, __func__);
4172
4173#if CMD_DMA
4174 g_pTempBuf = cp_back_buf_copies[cp_back_buf_idx];
4175 cp_back_buf_idx++;
4176 if (cp_back_buf_idx > COPY_BACK_BUF_NUM) {
4177 printk(KERN_ERR "cp_back_buf_copies overflow! Exit."
4178 "Maybe too many pending commands in your CDMA chain.\n");
4179 return FAIL;
4180 }
4181#else
4182 g_pTempBuf = tmp_buf_read_disturbance;
4183#endif
4184
4185 wBlockNum = FTL_Get_Block_Index(blk_addr);
4186
4187 do {
4188
4189
4190
4191
4192 for (i = 1; i < DeviceInfo.wDataBlockNum; i++) {
4193 if (IS_SPARE_BLOCK(i)) {
4194 u32 wPhysicalIndex =
4195 (u32)((~SPARE_BLOCK) & pbt[i]);
4196 if (g_pReadCounter[wPhysicalIndex -
4197 DeviceInfo.wSpectraStartBlock] <
4198 wLeastReadCounter) {
4199 wLeastReadCounter =
4200 g_pReadCounter[wPhysicalIndex -
4201 DeviceInfo.wSpectraStartBlock];
4202 wLeastReadIndex = i;
4203 }
4204 wSpareBlockNum++;
4205 }
4206 }
4207
4208 if (wSpareBlockNum <= NUM_FREE_BLOCKS_GATE) {
4209 wResult = GLOB_FTL_Garbage_Collection();
4210 if (PASS == wResult)
4211 continue;
4212 else
4213 break;
4214 } else {
4215 wTempNode = (u32)(DISCARD_BLOCK | pbt[wBlockNum]);
4216 wReplacedNode = (u32)((~SPARE_BLOCK) &
4217 pbt[wLeastReadIndex]);
4218#if CMD_DMA
4219 pbt[wBlockNum] = wReplacedNode;
4220 pbt[wLeastReadIndex] = wTempNode;
4221 p_BTableChangesDelta =
4222 (struct BTableChangesDelta *)g_pBTDelta_Free;
4223 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
4224
4225 p_BTableChangesDelta->ftl_cmd_cnt =
4226 ftl_cmd_cnt;
4227 p_BTableChangesDelta->BT_Index = wBlockNum;
4228 p_BTableChangesDelta->BT_Entry_Value = pbt[wBlockNum];
4229 p_BTableChangesDelta->ValidFields = 0x0C;
4230
4231 p_BTableChangesDelta =
4232 (struct BTableChangesDelta *)g_pBTDelta_Free;
4233 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
4234
4235 p_BTableChangesDelta->ftl_cmd_cnt =
4236 ftl_cmd_cnt;
4237 p_BTableChangesDelta->BT_Index = wLeastReadIndex;
4238 p_BTableChangesDelta->BT_Entry_Value =
4239 pbt[wLeastReadIndex];
4240 p_BTableChangesDelta->ValidFields = 0x0C;
4241
4242 wResult = GLOB_LLD_Read_Page_Main_cdma(g_pTempBuf,
4243 dwOldBlockAddr, 0, DeviceInfo.wPagesPerBlock,
4244 LLD_CMD_FLAG_MODE_CDMA);
4245 if (wResult == FAIL)
4246 return wResult;
4247
4248 ftl_cmd_cnt++;
4249
4250 if (wResult != FAIL) {
4251 if (FAIL == GLOB_LLD_Write_Page_Main_cdma(
4252 g_pTempBuf, pbt[wBlockNum], 0,
4253 DeviceInfo.wPagesPerBlock)) {
4254 nand_dbg_print(NAND_DBG_WARN,
4255 "NAND Program fail in "
4256 "%s, Line %d, Function: %s, "
4257 "new Bad Block %d "
4258 "generated!\n",
4259 __FILE__, __LINE__, __func__,
4260 (int)pbt[wBlockNum]);
4261 wResult = FAIL;
4262 MARK_BLOCK_AS_BAD(pbt[wBlockNum]);
4263 }
4264 ftl_cmd_cnt++;
4265 }
4266#else
4267 wResult = GLOB_LLD_Read_Page_Main(g_pTempBuf,
4268 dwOldBlockAddr, 0, DeviceInfo.wPagesPerBlock);
4269 if (wResult == FAIL)
4270 return wResult;
4271
4272 if (wResult != FAIL) {
4273
4274
4275
4276
4277
4278
4279 wResult = GLOB_LLD_Write_Page_Main(g_pTempBuf,
4280 wReplacedNode, 0,
4281 DeviceInfo.wPagesPerBlock);
4282 if (wResult == FAIL) {
4283 nand_dbg_print(NAND_DBG_WARN,
4284 "NAND Program fail in "
4285 "%s, Line %d, Function: %s, "
4286 "new Bad Block %d "
4287 "generated!\n",
4288 __FILE__, __LINE__, __func__,
4289 (int)wReplacedNode);
4290 MARK_BLOCK_AS_BAD(wReplacedNode);
4291 } else {
4292 pbt[wBlockNum] = wReplacedNode;
4293 pbt[wLeastReadIndex] = wTempNode;
4294 }
4295 }
4296
4297 if ((wResult == PASS) && (g_cBlockTableStatus !=
4298 IN_PROGRESS_BLOCK_TABLE)) {
4299 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
4300 FTL_Write_IN_Progress_Block_Table_Page();
4301 }
4302#endif
4303 }
4304 } while (wResult != PASS)
4305 ;
4306
4307#if CMD_DMA
4308
4309#endif
4310
4311 return wResult;
4312}
4313
4314