1
2
3
4
5
6
7
8
9
10#define pr_fmt(fmt) "nand: " fmt
11
12#include <linux/module.h>
13#include <linux/mtd/nand.h>
14
15
16
17
18
19
20
21
22bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos)
23{
24 if (nanddev_bbt_is_initialized(nand)) {
25 unsigned int entry;
26 int status;
27
28 entry = nanddev_bbt_pos_to_entry(nand, pos);
29 status = nanddev_bbt_get_block_status(nand, entry);
30
31 if (status == NAND_BBT_BLOCK_STATUS_UNKNOWN) {
32 if (nand->ops->isbad(nand, pos))
33 status = NAND_BBT_BLOCK_FACTORY_BAD;
34 else
35 status = NAND_BBT_BLOCK_GOOD;
36
37 nanddev_bbt_set_block_status(nand, entry, status);
38 }
39
40 if (status == NAND_BBT_BLOCK_WORN ||
41 status == NAND_BBT_BLOCK_FACTORY_BAD)
42 return true;
43
44 return false;
45 }
46
47 return nand->ops->isbad(nand, pos);
48}
49EXPORT_SYMBOL_GPL(nanddev_isbad);
50
51
52
53
54
55
56
57
58
59
60
61int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos)
62{
63 struct mtd_info *mtd = nanddev_to_mtd(nand);
64 unsigned int entry;
65 int ret = 0;
66
67 if (nanddev_isbad(nand, pos))
68 return 0;
69
70 ret = nand->ops->markbad(nand, pos);
71 if (ret)
72 pr_warn("failed to write BBM to block @%llx (err = %d)\n",
73 nanddev_pos_to_offs(nand, pos), ret);
74
75 if (!nanddev_bbt_is_initialized(nand))
76 goto out;
77
78 entry = nanddev_bbt_pos_to_entry(nand, pos);
79 ret = nanddev_bbt_set_block_status(nand, entry, NAND_BBT_BLOCK_WORN);
80 if (ret)
81 goto out;
82
83 ret = nanddev_bbt_update(nand);
84
85out:
86 if (!ret)
87 mtd->ecc_stats.badblocks++;
88
89 return ret;
90}
91EXPORT_SYMBOL_GPL(nanddev_markbad);
92
93
94
95
96
97
98
99
100
101
102bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos)
103{
104 unsigned int entry;
105 int status;
106
107 if (!nanddev_bbt_is_initialized(nand))
108 return false;
109
110
111 entry = nanddev_bbt_pos_to_entry(nand, pos);
112 status = nanddev_bbt_get_block_status(nand, entry);
113 return status == NAND_BBT_BLOCK_RESERVED;
114}
115EXPORT_SYMBOL_GPL(nanddev_isreserved);
116
117
118
119
120
121
122
123
124
125
126int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos)
127{
128 if (nanddev_isbad(nand, pos) || nanddev_isreserved(nand, pos)) {
129 pr_warn("attempt to erase a bad/reserved block @%llx\n",
130 nanddev_pos_to_offs(nand, pos));
131 return -EIO;
132 }
133
134 return nand->ops->erase(nand, pos);
135}
136EXPORT_SYMBOL_GPL(nanddev_erase);
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo)
154{
155 struct nand_device *nand = mtd_to_nanddev(mtd);
156 struct nand_pos pos, last;
157 int ret;
158
159 nanddev_offs_to_pos(nand, einfo->addr, &pos);
160 nanddev_offs_to_pos(nand, einfo->addr + einfo->len - 1, &last);
161 while (nanddev_pos_cmp(&pos, &last) <= 0) {
162 ret = nanddev_erase(nand, &pos);
163 if (ret) {
164 einfo->fail_addr = nanddev_pos_to_offs(nand, &pos);
165
166 return ret;
167 }
168
169 nanddev_pos_next_eraseblock(nand, &pos);
170 }
171
172 return 0;
173}
174EXPORT_SYMBOL_GPL(nanddev_mtd_erase);
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189int nanddev_mtd_max_bad_blocks(struct mtd_info *mtd, loff_t offs, size_t len)
190{
191 struct nand_device *nand = mtd_to_nanddev(mtd);
192 struct nand_pos pos, end;
193 unsigned int max_bb = 0;
194
195 if (!nand->memorg.max_bad_eraseblocks_per_lun)
196 return -ENOTSUPP;
197
198 nanddev_offs_to_pos(nand, offs, &pos);
199 nanddev_offs_to_pos(nand, offs + len, &end);
200
201 for (nanddev_offs_to_pos(nand, offs, &pos);
202 nanddev_pos_cmp(&pos, &end) < 0;
203 nanddev_pos_next_lun(nand, &pos))
204 max_bb += nand->memorg.max_bad_eraseblocks_per_lun;
205
206 return max_bb;
207}
208EXPORT_SYMBOL_GPL(nanddev_mtd_max_bad_blocks);
209
210
211
212
213
214static int nanddev_get_ecc_engine(struct nand_device *nand)
215{
216 int engine_type;
217
218
219 of_get_nand_ecc_user_config(nand);
220
221 engine_type = nand->ecc.user_conf.engine_type;
222 if (engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
223 engine_type = nand->ecc.defaults.engine_type;
224
225 switch (engine_type) {
226 case NAND_ECC_ENGINE_TYPE_NONE:
227 return 0;
228 case NAND_ECC_ENGINE_TYPE_SOFT:
229 nand->ecc.engine = nand_ecc_get_sw_engine(nand);
230 break;
231 case NAND_ECC_ENGINE_TYPE_ON_DIE:
232 nand->ecc.engine = nand_ecc_get_on_die_hw_engine(nand);
233 break;
234 case NAND_ECC_ENGINE_TYPE_ON_HOST:
235 pr_err("On-host hardware ECC engines not supported yet\n");
236 break;
237 default:
238 pr_err("Missing ECC engine type\n");
239 }
240
241 if (!nand->ecc.engine)
242 return -EINVAL;
243
244 return 0;
245}
246
247
248
249
250
251static int nanddev_put_ecc_engine(struct nand_device *nand)
252{
253 switch (nand->ecc.ctx.conf.engine_type) {
254 case NAND_ECC_ENGINE_TYPE_ON_HOST:
255 pr_err("On-host hardware ECC engines not supported yet\n");
256 break;
257 case NAND_ECC_ENGINE_TYPE_NONE:
258 case NAND_ECC_ENGINE_TYPE_SOFT:
259 case NAND_ECC_ENGINE_TYPE_ON_DIE:
260 default:
261 break;
262 }
263
264 return 0;
265}
266
267
268
269
270
271static int nanddev_find_ecc_configuration(struct nand_device *nand)
272{
273 int ret;
274
275 if (!nand->ecc.engine)
276 return -ENOTSUPP;
277
278 ret = nand_ecc_init_ctx(nand);
279 if (ret)
280 return ret;
281
282 if (!nand_ecc_is_strong_enough(nand))
283 pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
284 nand->mtd.name);
285
286 return 0;
287}
288
289
290
291
292
293int nanddev_ecc_engine_init(struct nand_device *nand)
294{
295 int ret;
296
297
298 ret = nanddev_get_ecc_engine(nand);
299 if (ret) {
300 pr_err("No ECC engine found\n");
301 return ret;
302 }
303
304
305 if (!nand->ecc.engine)
306 return 0;
307
308
309 ret = nanddev_find_ecc_configuration(nand);
310 if (ret) {
311 pr_err("No suitable ECC configuration\n");
312 nanddev_put_ecc_engine(nand);
313
314 return ret;
315 }
316
317 return 0;
318}
319EXPORT_SYMBOL_GPL(nanddev_ecc_engine_init);
320
321
322
323
324
325void nanddev_ecc_engine_cleanup(struct nand_device *nand)
326{
327 if (nand->ecc.engine)
328 nand_ecc_cleanup_ctx(nand);
329
330 nanddev_put_ecc_engine(nand);
331}
332EXPORT_SYMBOL_GPL(nanddev_ecc_engine_cleanup);
333
334
335
336
337
338
339
340
341
342
343
344
345int nanddev_init(struct nand_device *nand, const struct nand_ops *ops,
346 struct module *owner)
347{
348 struct mtd_info *mtd = nanddev_to_mtd(nand);
349 struct nand_memory_organization *memorg = nanddev_get_memorg(nand);
350
351 if (!nand || !ops)
352 return -EINVAL;
353
354 if (!ops->erase || !ops->markbad || !ops->isbad)
355 return -EINVAL;
356
357 if (!memorg->bits_per_cell || !memorg->pagesize ||
358 !memorg->pages_per_eraseblock || !memorg->eraseblocks_per_lun ||
359 !memorg->planes_per_lun || !memorg->luns_per_target ||
360 !memorg->ntargets)
361 return -EINVAL;
362
363 nand->rowconv.eraseblock_addr_shift =
364 fls(memorg->pages_per_eraseblock - 1);
365 nand->rowconv.lun_addr_shift = fls(memorg->eraseblocks_per_lun - 1) +
366 nand->rowconv.eraseblock_addr_shift;
367
368 nand->ops = ops;
369
370 mtd->type = memorg->bits_per_cell == 1 ?
371 MTD_NANDFLASH : MTD_MLCNANDFLASH;
372 mtd->flags = MTD_CAP_NANDFLASH;
373 mtd->erasesize = memorg->pagesize * memorg->pages_per_eraseblock;
374 mtd->writesize = memorg->pagesize;
375 mtd->writebufsize = memorg->pagesize;
376 mtd->oobsize = memorg->oobsize;
377 mtd->size = nanddev_size(nand);
378 mtd->owner = owner;
379
380 return nanddev_bbt_init(nand);
381}
382EXPORT_SYMBOL_GPL(nanddev_init);
383
384
385
386
387
388
389
390void nanddev_cleanup(struct nand_device *nand)
391{
392 if (nanddev_bbt_is_initialized(nand))
393 nanddev_bbt_cleanup(nand);
394}
395EXPORT_SYMBOL_GPL(nanddev_cleanup);
396
397MODULE_DESCRIPTION("Generic NAND framework");
398MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
399MODULE_LICENSE("GPL v2");
400