1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/kernel.h>
27#include <linux/slab.h>
28#include <linux/module.h>
29#include <linux/types.h>
30#include <linux/bitops.h>
31#include <linux/sizes.h>
32#include <linux/mtd/mtd.h>
33#include <linux/mtd/partitions.h>
34
35
36#define NAND_NOOB_LOGADDR_00 8
37#define NAND_NOOB_LOGADDR_01 9
38#define NAND_NOOB_LOGADDR_10 10
39#define NAND_NOOB_LOGADDR_11 11
40#define NAND_NOOB_LOGADDR_20 12
41#define NAND_NOOB_LOGADDR_21 13
42
43#define BLOCK_IS_RESERVED 0xffff
44#define BLOCK_UNMASK_COMPLEMENT 1
45
46
47#define SHARPSL_NAND_PARTS 3
48#define SHARPSL_FTL_PART_SIZE (7 * SZ_1M)
49#define SHARPSL_PARTINFO1_LADDR 0x00060000
50#define SHARPSL_PARTINFO2_LADDR 0x00064000
51
52#define BOOT_MAGIC 0x424f4f54
53#define FSRO_MAGIC 0x4653524f
54#define FSRW_MAGIC 0x46535257
55
56
57
58
59
60
61
62
63
64struct sharpsl_ftl {
65 unsigned int logmax;
66 unsigned int *log2phy;
67};
68
69
70static int sharpsl_nand_check_ooblayout(struct mtd_info *mtd)
71{
72 u8 freebytes = 0;
73 int section = 0;
74
75 while (true) {
76 struct mtd_oob_region oobfree = { };
77 int ret, i;
78
79 ret = mtd_ooblayout_free(mtd, section++, &oobfree);
80 if (ret)
81 break;
82
83 if (!oobfree.length || oobfree.offset > 15 ||
84 (oobfree.offset + oobfree.length) < 8)
85 continue;
86
87 i = oobfree.offset >= 8 ? oobfree.offset : 8;
88 for (; i < oobfree.offset + oobfree.length && i < 16; i++)
89 freebytes |= BIT(i - 8);
90
91 if (freebytes == 0xff)
92 return 0;
93 }
94
95 return -ENOTSUPP;
96}
97
98static int sharpsl_nand_read_oob(struct mtd_info *mtd, loff_t offs, u8 *buf)
99{
100 struct mtd_oob_ops ops = { };
101 int ret;
102
103 ops.mode = MTD_OPS_PLACE_OOB;
104 ops.ooblen = mtd->oobsize;
105 ops.oobbuf = buf;
106
107 ret = mtd_read_oob(mtd, offs, &ops);
108 if (ret != 0 || mtd->oobsize != ops.oobretlen)
109 return -1;
110
111 return 0;
112}
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132static int sharpsl_nand_get_logical_num(u8 *oob)
133{
134 u16 us;
135 int good0, good1;
136
137 if (oob[NAND_NOOB_LOGADDR_00] == oob[NAND_NOOB_LOGADDR_10] &&
138 oob[NAND_NOOB_LOGADDR_01] == oob[NAND_NOOB_LOGADDR_11]) {
139 good0 = NAND_NOOB_LOGADDR_00;
140 good1 = NAND_NOOB_LOGADDR_01;
141 } else if (oob[NAND_NOOB_LOGADDR_10] == oob[NAND_NOOB_LOGADDR_20] &&
142 oob[NAND_NOOB_LOGADDR_11] == oob[NAND_NOOB_LOGADDR_21]) {
143 good0 = NAND_NOOB_LOGADDR_10;
144 good1 = NAND_NOOB_LOGADDR_11;
145 } else if (oob[NAND_NOOB_LOGADDR_20] == oob[NAND_NOOB_LOGADDR_00] &&
146 oob[NAND_NOOB_LOGADDR_21] == oob[NAND_NOOB_LOGADDR_01]) {
147 good0 = NAND_NOOB_LOGADDR_20;
148 good1 = NAND_NOOB_LOGADDR_21;
149 } else {
150 return -EINVAL;
151 }
152
153 us = oob[good0] | oob[good1] << 8;
154
155
156 if (hweight16(us) & BLOCK_UNMASK_COMPLEMENT)
157 return -EINVAL;
158
159
160 if (us == BLOCK_IS_RESERVED)
161 return BLOCK_IS_RESERVED;
162
163 return (us >> 1) & GENMASK(9, 0);
164}
165
166static int sharpsl_nand_init_ftl(struct mtd_info *mtd, struct sharpsl_ftl *ftl)
167{
168 unsigned int block_num, log_num, phymax;
169 loff_t block_adr;
170 u8 *oob;
171 int i, ret;
172
173 oob = kzalloc(mtd->oobsize, GFP_KERNEL);
174 if (!oob)
175 return -ENOMEM;
176
177 phymax = mtd_div_by_eb(SHARPSL_FTL_PART_SIZE, mtd);
178
179
180 ftl->logmax = ((phymax * 95) / 100) - 1;
181
182 ftl->log2phy = kmalloc_array(ftl->logmax, sizeof(*ftl->log2phy),
183 GFP_KERNEL);
184 if (!ftl->log2phy) {
185 ret = -ENOMEM;
186 goto exit;
187 }
188
189
190 for (i = 0; i < ftl->logmax; i++)
191 ftl->log2phy[i] = UINT_MAX;
192
193
194 for (block_num = 0; block_num < phymax; block_num++) {
195 block_adr = (loff_t)block_num * mtd->erasesize;
196
197 if (mtd_block_isbad(mtd, block_adr))
198 continue;
199
200 if (sharpsl_nand_read_oob(mtd, block_adr, oob))
201 continue;
202
203
204 log_num = sharpsl_nand_get_logical_num(oob);
205
206
207 if (log_num > 0 && log_num < ftl->logmax) {
208 if (ftl->log2phy[log_num] == UINT_MAX)
209 ftl->log2phy[log_num] = block_num;
210 }
211 }
212
213 pr_info("Sharp SL FTL: %d blocks used (%d logical, %d reserved)\n",
214 phymax, ftl->logmax, phymax - ftl->logmax);
215
216 ret = 0;
217exit:
218 kfree(oob);
219 return ret;
220}
221
222static void sharpsl_nand_cleanup_ftl(struct sharpsl_ftl *ftl)
223{
224 kfree(ftl->log2phy);
225}
226
227static int sharpsl_nand_read_laddr(struct mtd_info *mtd,
228 loff_t from,
229 size_t len,
230 void *buf,
231 struct sharpsl_ftl *ftl)
232{
233 unsigned int log_num, final_log_num;
234 unsigned int block_num;
235 loff_t block_adr;
236 loff_t block_ofs;
237 size_t retlen;
238 int err;
239
240 log_num = mtd_div_by_eb((u32)from, mtd);
241 final_log_num = mtd_div_by_eb(((u32)from + len - 1), mtd);
242
243 if (len <= 0 || log_num >= ftl->logmax || final_log_num > log_num)
244 return -EINVAL;
245
246 block_num = ftl->log2phy[log_num];
247 block_adr = (loff_t)block_num * mtd->erasesize;
248 block_ofs = mtd_mod_by_eb((u32)from, mtd);
249
250 err = mtd_read(mtd, block_adr + block_ofs, len, &retlen, buf);
251
252 if (mtd_is_bitflip(err))
253 err = 0;
254
255 if (!err && retlen != len)
256 err = -EIO;
257
258 if (err)
259 pr_err("sharpslpart: error, read failed at %#llx\n",
260 block_adr + block_ofs);
261
262 return err;
263}
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282struct sharpsl_nand_partinfo {
283 __le32 start;
284 __le32 end;
285 __be32 magic;
286 u32 reserved;
287};
288
289static int sharpsl_nand_read_partinfo(struct mtd_info *master,
290 loff_t from,
291 size_t len,
292 struct sharpsl_nand_partinfo *buf,
293 struct sharpsl_ftl *ftl)
294{
295 int ret;
296
297 ret = sharpsl_nand_read_laddr(master, from, len, buf, ftl);
298 if (ret)
299 return ret;
300
301
302 if (be32_to_cpu(buf[0].magic) != BOOT_MAGIC ||
303 be32_to_cpu(buf[1].magic) != FSRO_MAGIC ||
304 be32_to_cpu(buf[2].magic) != FSRW_MAGIC) {
305 pr_err("sharpslpart: magic values mismatch\n");
306 return -EINVAL;
307 }
308
309
310 buf[2].end = cpu_to_le32(master->size);
311
312
313 if (le32_to_cpu(buf[0].end) <= le32_to_cpu(buf[0].start) ||
314 le32_to_cpu(buf[1].start) < le32_to_cpu(buf[0].end) ||
315 le32_to_cpu(buf[1].end) <= le32_to_cpu(buf[1].start) ||
316 le32_to_cpu(buf[2].start) < le32_to_cpu(buf[1].end) ||
317 le32_to_cpu(buf[2].end) <= le32_to_cpu(buf[2].start)) {
318 pr_err("sharpslpart: partition sizes mismatch\n");
319 return -EINVAL;
320 }
321
322 return 0;
323}
324
325static int sharpsl_parse_mtd_partitions(struct mtd_info *master,
326 const struct mtd_partition **pparts,
327 struct mtd_part_parser_data *data)
328{
329 struct sharpsl_ftl ftl;
330 struct sharpsl_nand_partinfo buf[SHARPSL_NAND_PARTS];
331 struct mtd_partition *sharpsl_nand_parts;
332 int err;
333
334
335 err = sharpsl_nand_check_ooblayout(master);
336 if (err)
337 return err;
338
339
340 err = sharpsl_nand_init_ftl(master, &ftl);
341 if (err)
342 return err;
343
344
345 pr_info("sharpslpart: try reading first partition table\n");
346 err = sharpsl_nand_read_partinfo(master,
347 SHARPSL_PARTINFO1_LADDR,
348 sizeof(buf), buf, &ftl);
349 if (err) {
350
351 pr_warn("sharpslpart: first partition table is invalid, retry using the second\n");
352 err = sharpsl_nand_read_partinfo(master,
353 SHARPSL_PARTINFO2_LADDR,
354 sizeof(buf), buf, &ftl);
355 }
356
357
358 sharpsl_nand_cleanup_ftl(&ftl);
359
360 if (err) {
361 pr_err("sharpslpart: both partition tables are invalid\n");
362 return err;
363 }
364
365 sharpsl_nand_parts = kcalloc(SHARPSL_NAND_PARTS,
366 sizeof(*sharpsl_nand_parts),
367 GFP_KERNEL);
368 if (!sharpsl_nand_parts)
369 return -ENOMEM;
370
371
372 sharpsl_nand_parts[0].name = "smf";
373 sharpsl_nand_parts[0].offset = le32_to_cpu(buf[0].start);
374 sharpsl_nand_parts[0].size = le32_to_cpu(buf[0].end) -
375 le32_to_cpu(buf[0].start);
376
377 sharpsl_nand_parts[1].name = "root";
378 sharpsl_nand_parts[1].offset = le32_to_cpu(buf[1].start);
379 sharpsl_nand_parts[1].size = le32_to_cpu(buf[1].end) -
380 le32_to_cpu(buf[1].start);
381
382 sharpsl_nand_parts[2].name = "home";
383 sharpsl_nand_parts[2].offset = le32_to_cpu(buf[2].start);
384 sharpsl_nand_parts[2].size = le32_to_cpu(buf[2].end) -
385 le32_to_cpu(buf[2].start);
386
387 *pparts = sharpsl_nand_parts;
388 return SHARPSL_NAND_PARTS;
389}
390
391static struct mtd_part_parser sharpsl_mtd_parser = {
392 .parse_fn = sharpsl_parse_mtd_partitions,
393 .name = "sharpslpart",
394};
395module_mtd_part_parser(sharpsl_mtd_parser);
396
397MODULE_LICENSE("GPL");
398MODULE_AUTHOR("Andrea Adami <andrea.adami@gmail.com>");
399MODULE_DESCRIPTION("MTD partitioning for NAND flash on Sharp SL Series");
400