1 | // SPDX-License-Identifier: GPL-2.0+ |
---|---|
2 | /* |
3 | * Freescale GPMI NAND Flash Driver |
4 | * |
5 | * Copyright (C) 2010-2015 Freescale Semiconductor, Inc. |
6 | * Copyright (C) 2008 Embedded Alley Solutions, Inc. |
7 | */ |
8 | #include <linux/clk.h> |
9 | #include <linux/delay.h> |
10 | #include <linux/slab.h> |
11 | #include <linux/sched/task_stack.h> |
12 | #include <linux/interrupt.h> |
13 | #include <linux/module.h> |
14 | #include <linux/mtd/partitions.h> |
15 | #include <linux/of.h> |
16 | #include <linux/platform_device.h> |
17 | #include <linux/pm_runtime.h> |
18 | #include <linux/pinctrl/consumer.h> |
19 | #include <linux/dma/mxs-dma.h> |
20 | #include <linux/string_choices.h> |
21 | #include "gpmi-nand.h" |
22 | #include "gpmi-regs.h" |
23 | #include "bch-regs.h" |
24 | |
25 | /* Resource names for the GPMI NAND driver. */ |
26 | #define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME "gpmi-nand" |
27 | #define GPMI_NAND_BCH_REGS_ADDR_RES_NAME "bch" |
28 | #define GPMI_NAND_BCH_INTERRUPT_RES_NAME "bch" |
29 | |
30 | /* Converts time to clock cycles */ |
31 | #define TO_CYCLES(duration, period) DIV_ROUND_UP_ULL(duration, period) |
32 | |
33 | #define MXS_SET_ADDR 0x4 |
34 | #define MXS_CLR_ADDR 0x8 |
35 | /* |
36 | * Clear the bit and poll it cleared. This is usually called with |
37 | * a reset address and mask being either SFTRST(bit 31) or CLKGATE |
38 | * (bit 30). |
39 | */ |
40 | static int clear_poll_bit(void __iomem *addr, u32 mask) |
41 | { |
42 | int timeout = 0x400; |
43 | |
44 | /* clear the bit */ |
45 | writel(val: mask, addr: addr + MXS_CLR_ADDR); |
46 | |
47 | /* |
48 | * SFTRST needs 3 GPMI clocks to settle, the reference manual |
49 | * recommends to wait 1us. |
50 | */ |
51 | udelay(usec: 1); |
52 | |
53 | /* poll the bit becoming clear */ |
54 | while ((readl(addr) & mask) && --timeout) |
55 | /* nothing */; |
56 | |
57 | return !timeout; |
58 | } |
59 | |
60 | #define MODULE_CLKGATE (1 << 30) |
61 | #define MODULE_SFTRST (1 << 31) |
62 | /* |
63 | * The current mxs_reset_block() will do two things: |
64 | * [1] enable the module. |
65 | * [2] reset the module. |
66 | * |
67 | * In most of the cases, it's ok. |
68 | * But in MX23, there is a hardware bug in the BCH block (see erratum #2847). |
69 | * If you try to soft reset the BCH block, it becomes unusable until |
70 | * the next hard reset. This case occurs in the NAND boot mode. When the board |
71 | * boots by NAND, the ROM of the chip will initialize the BCH blocks itself. |
72 | * So If the driver tries to reset the BCH again, the BCH will not work anymore. |
73 | * You will see a DMA timeout in this case. The bug has been fixed |
74 | * in the following chips, such as MX28. |
75 | * |
76 | * To avoid this bug, just add a new parameter `just_enable` for |
77 | * the mxs_reset_block(), and rewrite it here. |
78 | */ |
79 | static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable) |
80 | { |
81 | int ret; |
82 | int timeout = 0x400; |
83 | |
84 | /* clear and poll SFTRST */ |
85 | ret = clear_poll_bit(addr: reset_addr, MODULE_SFTRST); |
86 | if (unlikely(ret)) |
87 | goto error; |
88 | |
89 | /* clear CLKGATE */ |
90 | writel(MODULE_CLKGATE, addr: reset_addr + MXS_CLR_ADDR); |
91 | |
92 | if (!just_enable) { |
93 | /* set SFTRST to reset the block */ |
94 | writel(MODULE_SFTRST, addr: reset_addr + MXS_SET_ADDR); |
95 | udelay(usec: 1); |
96 | |
97 | /* poll CLKGATE becoming set */ |
98 | while ((!(readl(addr: reset_addr) & MODULE_CLKGATE)) && --timeout) |
99 | /* nothing */; |
100 | if (unlikely(!timeout)) |
101 | goto error; |
102 | } |
103 | |
104 | /* clear and poll SFTRST */ |
105 | ret = clear_poll_bit(addr: reset_addr, MODULE_SFTRST); |
106 | if (unlikely(ret)) |
107 | goto error; |
108 | |
109 | /* clear and poll CLKGATE */ |
110 | ret = clear_poll_bit(addr: reset_addr, MODULE_CLKGATE); |
111 | if (unlikely(ret)) |
112 | goto error; |
113 | |
114 | return 0; |
115 | |
116 | error: |
117 | pr_err("%s(%p): module reset timeout\n", __func__, reset_addr); |
118 | return -ETIMEDOUT; |
119 | } |
120 | |
121 | static int __gpmi_enable_clk(struct gpmi_nand_data *this, bool v) |
122 | { |
123 | struct clk *clk; |
124 | int ret; |
125 | int i; |
126 | |
127 | for (i = 0; i < GPMI_CLK_MAX; i++) { |
128 | clk = this->resources.clock[i]; |
129 | if (!clk) |
130 | break; |
131 | |
132 | if (v) { |
133 | ret = clk_prepare_enable(clk); |
134 | if (ret) |
135 | goto err_clk; |
136 | } else { |
137 | clk_disable_unprepare(clk); |
138 | } |
139 | } |
140 | return 0; |
141 | |
142 | err_clk: |
143 | for (; i > 0; i--) |
144 | clk_disable_unprepare(clk: this->resources.clock[i - 1]); |
145 | return ret; |
146 | } |
147 | |
148 | static int gpmi_init(struct gpmi_nand_data *this) |
149 | { |
150 | struct resources *r = &this->resources; |
151 | int ret; |
152 | |
153 | ret = pm_runtime_resume_and_get(dev: this->dev); |
154 | if (ret < 0) |
155 | return ret; |
156 | |
157 | ret = gpmi_reset_block(reset_addr: r->gpmi_regs, just_enable: false); |
158 | if (ret) |
159 | goto err_out; |
160 | |
161 | /* |
162 | * Reset BCH here, too. We got failures otherwise :( |
163 | * See later BCH reset for explanation of MX23 and MX28 handling |
164 | */ |
165 | ret = gpmi_reset_block(reset_addr: r->bch_regs, GPMI_IS_MXS(this)); |
166 | if (ret) |
167 | goto err_out; |
168 | |
169 | /* Choose NAND mode. */ |
170 | writel(BM_GPMI_CTRL1_GPMI_MODE, addr: r->gpmi_regs + HW_GPMI_CTRL1_CLR); |
171 | |
172 | /* Set the IRQ polarity. */ |
173 | writel(BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY, |
174 | addr: r->gpmi_regs + HW_GPMI_CTRL1_SET); |
175 | |
176 | /* Disable Write-Protection. */ |
177 | writel(BM_GPMI_CTRL1_DEV_RESET, addr: r->gpmi_regs + HW_GPMI_CTRL1_SET); |
178 | |
179 | /* Select BCH ECC. */ |
180 | writel(BM_GPMI_CTRL1_BCH_MODE, addr: r->gpmi_regs + HW_GPMI_CTRL1_SET); |
181 | |
182 | /* |
183 | * Decouple the chip select from dma channel. We use dma0 for all |
184 | * the chips, force all NAND RDY_BUSY inputs to be sourced from |
185 | * RDY_BUSY0. |
186 | */ |
187 | writel(BM_GPMI_CTRL1_DECOUPLE_CS | BM_GPMI_CTRL1_GANGED_RDYBUSY, |
188 | addr: r->gpmi_regs + HW_GPMI_CTRL1_SET); |
189 | |
190 | err_out: |
191 | pm_runtime_mark_last_busy(dev: this->dev); |
192 | pm_runtime_put_autosuspend(dev: this->dev); |
193 | return ret; |
194 | } |
195 | |
196 | /* This function is very useful. It is called only when the bug occur. */ |
197 | static void gpmi_dump_info(struct gpmi_nand_data *this) |
198 | { |
199 | struct resources *r = &this->resources; |
200 | struct bch_geometry *geo = &this->bch_geometry; |
201 | u32 reg; |
202 | int i; |
203 | |
204 | dev_err(this->dev, "Show GPMI registers :\n"); |
205 | for (i = 0; i <= HW_GPMI_DEBUG / 0x10 + 1; i++) { |
206 | reg = readl(addr: r->gpmi_regs + i * 0x10); |
207 | dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg); |
208 | } |
209 | |
210 | /* start to print out the BCH info */ |
211 | dev_err(this->dev, "Show BCH registers :\n"); |
212 | for (i = 0; i <= HW_BCH_VERSION / 0x10 + 1; i++) { |
213 | reg = readl(addr: r->bch_regs + i * 0x10); |
214 | dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg); |
215 | } |
216 | dev_err(this->dev, "BCH Geometry :\n" |
217 | "GF length : %u\n" |
218 | "ECC Strength : %u\n" |
219 | "Page Size in Bytes : %u\n" |
220 | "Metadata Size in Bytes : %u\n" |
221 | "ECC0 Chunk Size in Bytes: %u\n" |
222 | "ECCn Chunk Size in Bytes: %u\n" |
223 | "ECC Chunk Count : %u\n" |
224 | "Payload Size in Bytes : %u\n" |
225 | "Auxiliary Size in Bytes: %u\n" |
226 | "Auxiliary Status Offset: %u\n" |
227 | "Block Mark Byte Offset : %u\n" |
228 | "Block Mark Bit Offset : %u\n", |
229 | geo->gf_len, |
230 | geo->ecc_strength, |
231 | geo->page_size, |
232 | geo->metadata_size, |
233 | geo->ecc0_chunk_size, |
234 | geo->eccn_chunk_size, |
235 | geo->ecc_chunk_count, |
236 | geo->payload_size, |
237 | geo->auxiliary_size, |
238 | geo->auxiliary_status_offset, |
239 | geo->block_mark_byte_offset, |
240 | geo->block_mark_bit_offset); |
241 | } |
242 | |
243 | static bool gpmi_check_ecc(struct gpmi_nand_data *this) |
244 | { |
245 | struct nand_chip *chip = &this->nand; |
246 | struct bch_geometry *geo = &this->bch_geometry; |
247 | struct nand_device *nand = &chip->base; |
248 | struct nand_ecc_props *conf = &nand->ecc.ctx.conf; |
249 | |
250 | conf->step_size = geo->eccn_chunk_size; |
251 | conf->strength = geo->ecc_strength; |
252 | |
253 | /* Do the sanity check. */ |
254 | if (GPMI_IS_MXS(this)) { |
255 | /* The mx23/mx28 only support the GF13. */ |
256 | if (geo->gf_len == 14) |
257 | return false; |
258 | } |
259 | |
260 | if (geo->ecc_strength > this->devdata->bch_max_ecc_strength) |
261 | return false; |
262 | |
263 | if (!nand_ecc_is_strong_enough(nand)) |
264 | return false; |
265 | |
266 | return true; |
267 | } |
268 | |
269 | /* check if bbm locates in data chunk rather than ecc chunk */ |
270 | static bool bbm_in_data_chunk(struct gpmi_nand_data *this, |
271 | unsigned int *chunk_num) |
272 | { |
273 | struct bch_geometry *geo = &this->bch_geometry; |
274 | struct nand_chip *chip = &this->nand; |
275 | struct mtd_info *mtd = nand_to_mtd(chip); |
276 | unsigned int i, j; |
277 | |
278 | if (geo->ecc0_chunk_size != geo->eccn_chunk_size) { |
279 | dev_err(this->dev, |
280 | "The size of ecc0_chunk must equal to eccn_chunk\n"); |
281 | return false; |
282 | } |
283 | |
284 | i = (mtd->writesize * 8 - geo->metadata_size * 8) / |
285 | (geo->gf_len * geo->ecc_strength + |
286 | geo->eccn_chunk_size * 8); |
287 | |
288 | j = (mtd->writesize * 8 - geo->metadata_size * 8) - |
289 | (geo->gf_len * geo->ecc_strength + |
290 | geo->eccn_chunk_size * 8) * i; |
291 | |
292 | if (j < geo->eccn_chunk_size * 8) { |
293 | *chunk_num = i+1; |
294 | dev_dbg(this->dev, "Set ecc to %d and bbm in chunk %d\n", |
295 | geo->ecc_strength, *chunk_num); |
296 | return true; |
297 | } |
298 | |
299 | return false; |
300 | } |
301 | |
302 | /* |
303 | * If we can get the ECC information from the nand chip, we do not |
304 | * need to calculate them ourselves. |
305 | * |
306 | * We may have available oob space in this case. |
307 | */ |
308 | static int set_geometry_by_ecc_info(struct gpmi_nand_data *this, |
309 | unsigned int ecc_strength, |
310 | unsigned int ecc_step) |
311 | { |
312 | struct bch_geometry *geo = &this->bch_geometry; |
313 | struct nand_chip *chip = &this->nand; |
314 | struct mtd_info *mtd = nand_to_mtd(chip); |
315 | unsigned int block_mark_bit_offset; |
316 | |
317 | switch (ecc_step) { |
318 | case SZ_512: |
319 | geo->gf_len = 13; |
320 | break; |
321 | case SZ_1K: |
322 | geo->gf_len = 14; |
323 | break; |
324 | default: |
325 | dev_err(this->dev, |
326 | "unsupported nand chip. ecc bits : %d, ecc size : %d\n", |
327 | nanddev_get_ecc_requirements(&chip->base)->strength, |
328 | nanddev_get_ecc_requirements(&chip->base)->step_size); |
329 | return -EINVAL; |
330 | } |
331 | geo->ecc0_chunk_size = ecc_step; |
332 | geo->eccn_chunk_size = ecc_step; |
333 | geo->ecc_strength = round_up(ecc_strength, 2); |
334 | if (!gpmi_check_ecc(this)) |
335 | return -EINVAL; |
336 | |
337 | /* Keep the C >= O */ |
338 | if (geo->eccn_chunk_size < mtd->oobsize) { |
339 | dev_err(this->dev, |
340 | "unsupported nand chip. ecc size: %d, oob size : %d\n", |
341 | ecc_step, mtd->oobsize); |
342 | return -EINVAL; |
343 | } |
344 | |
345 | /* The default value, see comment in the legacy_set_geometry(). */ |
346 | geo->metadata_size = 10; |
347 | |
348 | geo->ecc_chunk_count = mtd->writesize / geo->eccn_chunk_size; |
349 | |
350 | /* |
351 | * Now, the NAND chip with 2K page(data chunk is 512byte) shows below: |
352 | * |
353 | * | P | |
354 | * |<----------------------------------------------------->| |
355 | * | | |
356 | * | (Block Mark) | |
357 | * | P' | | | | |
358 | * |<-------------------------------------------->| D | | O' | |
359 | * | |<---->| |<--->| |
360 | * V V V V V |
361 | * +---+----------+-+----------+-+----------+-+----------+-+-----+ |
362 | * | M | data |E| data |E| data |E| data |E| | |
363 | * +---+----------+-+----------+-+----------+-+----------+-+-----+ |
364 | * ^ ^ |
365 | * | O | |
366 | * |<------------>| |
367 | * | | |
368 | * |
369 | * P : the page size for BCH module. |
370 | * E : The ECC strength. |
371 | * G : the length of Galois Field. |
372 | * N : The chunk count of per page. |
373 | * M : the metasize of per page. |
374 | * C : the ecc chunk size, aka the "data" above. |
375 | * P': the nand chip's page size. |
376 | * O : the nand chip's oob size. |
377 | * O': the free oob. |
378 | * |
379 | * The formula for P is : |
380 | * |
381 | * E * G * N |
382 | * P = ------------ + P' + M |
383 | * 8 |
384 | * |
385 | * The position of block mark moves forward in the ECC-based view |
386 | * of page, and the delta is: |
387 | * |
388 | * E * G * (N - 1) |
389 | * D = (---------------- + M) |
390 | * 8 |
391 | * |
392 | * Please see the comment in legacy_set_geometry(). |
393 | * With the condition C >= O , we still can get same result. |
394 | * So the bit position of the physical block mark within the ECC-based |
395 | * view of the page is : |
396 | * (P' - D) * 8 |
397 | */ |
398 | geo->page_size = mtd->writesize + geo->metadata_size + |
399 | (geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8; |
400 | |
401 | geo->payload_size = mtd->writesize; |
402 | |
403 | geo->auxiliary_status_offset = ALIGN(geo->metadata_size, 4); |
404 | geo->auxiliary_size = ALIGN(geo->metadata_size, 4) |
405 | + ALIGN(geo->ecc_chunk_count, 4); |
406 | |
407 | if (!this->swap_block_mark) |
408 | return 0; |
409 | |
410 | /* For bit swap. */ |
411 | block_mark_bit_offset = mtd->writesize * 8 - |
412 | (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1) |
413 | + geo->metadata_size * 8); |
414 | |
415 | geo->block_mark_byte_offset = block_mark_bit_offset / 8; |
416 | geo->block_mark_bit_offset = block_mark_bit_offset % 8; |
417 | return 0; |
418 | } |
419 | |
420 | /* |
421 | * Calculate the ECC strength by hand: |
422 | * E : The ECC strength. |
423 | * G : the length of Galois Field. |
424 | * N : The chunk count of per page. |
425 | * O : the oobsize of the NAND chip. |
426 | * M : the metasize of per page. |
427 | * |
428 | * The formula is : |
429 | * E * G * N |
430 | * ------------ <= (O - M) |
431 | * 8 |
432 | * |
433 | * So, we get E by: |
434 | * (O - M) * 8 |
435 | * E <= ------------- |
436 | * G * N |
437 | */ |
438 | static inline int get_ecc_strength(struct gpmi_nand_data *this) |
439 | { |
440 | struct bch_geometry *geo = &this->bch_geometry; |
441 | struct mtd_info *mtd = nand_to_mtd(chip: &this->nand); |
442 | int ecc_strength; |
443 | |
444 | ecc_strength = ((mtd->oobsize - geo->metadata_size) * 8) |
445 | / (geo->gf_len * geo->ecc_chunk_count); |
446 | |
447 | /* We need the minor even number. */ |
448 | return round_down(ecc_strength, 2); |
449 | } |
450 | |
451 | static int set_geometry_for_large_oob(struct gpmi_nand_data *this) |
452 | { |
453 | struct bch_geometry *geo = &this->bch_geometry; |
454 | struct nand_chip *chip = &this->nand; |
455 | struct mtd_info *mtd = nand_to_mtd(chip); |
456 | const struct nand_ecc_props *requirements = |
457 | nanddev_get_ecc_requirements(nand: &chip->base); |
458 | unsigned int block_mark_bit_offset; |
459 | unsigned int max_ecc; |
460 | unsigned int bbm_chunk; |
461 | unsigned int i; |
462 | |
463 | /* sanity check for the minimum ecc nand required */ |
464 | if (!(requirements->strength > 0 && |
465 | requirements->step_size > 0)) |
466 | return -EINVAL; |
467 | geo->ecc_strength = requirements->strength; |
468 | |
469 | /* check if platform can support this nand */ |
470 | if (!gpmi_check_ecc(this)) { |
471 | dev_err(this->dev, |
472 | "unsupported NAND chip, minimum ecc required %d\n", |
473 | geo->ecc_strength); |
474 | return -EINVAL; |
475 | } |
476 | |
477 | /* calculate the maximum ecc platform can support*/ |
478 | geo->metadata_size = 10; |
479 | geo->gf_len = 14; |
480 | geo->ecc0_chunk_size = 1024; |
481 | geo->eccn_chunk_size = 1024; |
482 | geo->ecc_chunk_count = mtd->writesize / geo->eccn_chunk_size; |
483 | max_ecc = min(get_ecc_strength(this), |
484 | this->devdata->bch_max_ecc_strength); |
485 | |
486 | /* |
487 | * search a supported ecc strength that makes bbm |
488 | * located in data chunk |
489 | */ |
490 | geo->ecc_strength = max_ecc; |
491 | while (!(geo->ecc_strength < requirements->strength)) { |
492 | if (bbm_in_data_chunk(this, chunk_num: &bbm_chunk)) |
493 | goto geo_setting; |
494 | geo->ecc_strength -= 2; |
495 | } |
496 | |
497 | /* if none of them works, keep using the minimum ecc */ |
498 | /* nand required but changing ecc page layout */ |
499 | geo->ecc_strength = requirements->strength; |
500 | /* add extra ecc for meta data */ |
501 | geo->ecc0_chunk_size = 0; |
502 | geo->ecc_chunk_count = (mtd->writesize / geo->eccn_chunk_size) + 1; |
503 | geo->ecc_for_meta = 1; |
504 | /* check if oob can afford this extra ecc chunk */ |
505 | if (mtd->oobsize * 8 < geo->metadata_size * 8 + |
506 | geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) { |
507 | dev_err(this->dev, "unsupported NAND chip with new layout\n"); |
508 | return -EINVAL; |
509 | } |
510 | |
511 | /* calculate in which chunk bbm located */ |
512 | bbm_chunk = (mtd->writesize * 8 - geo->metadata_size * 8 - |
513 | geo->gf_len * geo->ecc_strength) / |
514 | (geo->gf_len * geo->ecc_strength + |
515 | geo->eccn_chunk_size * 8) + 1; |
516 | |
517 | geo_setting: |
518 | |
519 | geo->page_size = mtd->writesize + geo->metadata_size + |
520 | (geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8; |
521 | geo->payload_size = mtd->writesize; |
522 | |
523 | /* |
524 | * The auxiliary buffer contains the metadata and the ECC status. The |
525 | * metadata is padded to the nearest 32-bit boundary. The ECC status |
526 | * contains one byte for every ECC chunk, and is also padded to the |
527 | * nearest 32-bit boundary. |
528 | */ |
529 | geo->auxiliary_status_offset = ALIGN(geo->metadata_size, 4); |
530 | geo->auxiliary_size = ALIGN(geo->metadata_size, 4) |
531 | + ALIGN(geo->ecc_chunk_count, 4); |
532 | |
533 | if (!this->swap_block_mark) |
534 | return 0; |
535 | |
536 | /* calculate the number of ecc chunk behind the bbm */ |
537 | i = (mtd->writesize / geo->eccn_chunk_size) - bbm_chunk + 1; |
538 | |
539 | block_mark_bit_offset = mtd->writesize * 8 - |
540 | (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - i) |
541 | + geo->metadata_size * 8); |
542 | |
543 | geo->block_mark_byte_offset = block_mark_bit_offset / 8; |
544 | geo->block_mark_bit_offset = block_mark_bit_offset % 8; |
545 | |
546 | dev_dbg(this->dev, "BCH Geometry :\n" |
547 | "GF length : %u\n" |
548 | "ECC Strength : %u\n" |
549 | "Page Size in Bytes : %u\n" |
550 | "Metadata Size in Bytes : %u\n" |
551 | "ECC0 Chunk Size in Bytes: %u\n" |
552 | "ECCn Chunk Size in Bytes: %u\n" |
553 | "ECC Chunk Count : %u\n" |
554 | "Payload Size in Bytes : %u\n" |
555 | "Auxiliary Size in Bytes: %u\n" |
556 | "Auxiliary Status Offset: %u\n" |
557 | "Block Mark Byte Offset : %u\n" |
558 | "Block Mark Bit Offset : %u\n" |
559 | "Block Mark in chunk : %u\n" |
560 | "Ecc for Meta data : %u\n", |
561 | geo->gf_len, |
562 | geo->ecc_strength, |
563 | geo->page_size, |
564 | geo->metadata_size, |
565 | geo->ecc0_chunk_size, |
566 | geo->eccn_chunk_size, |
567 | geo->ecc_chunk_count, |
568 | geo->payload_size, |
569 | geo->auxiliary_size, |
570 | geo->auxiliary_status_offset, |
571 | geo->block_mark_byte_offset, |
572 | geo->block_mark_bit_offset, |
573 | bbm_chunk, |
574 | geo->ecc_for_meta); |
575 | |
576 | return 0; |
577 | } |
578 | |
579 | static int legacy_set_geometry(struct gpmi_nand_data *this) |
580 | { |
581 | struct bch_geometry *geo = &this->bch_geometry; |
582 | struct mtd_info *mtd = nand_to_mtd(chip: &this->nand); |
583 | unsigned int metadata_size; |
584 | unsigned int status_size; |
585 | unsigned int block_mark_bit_offset; |
586 | |
587 | /* |
588 | * The size of the metadata can be changed, though we set it to 10 |
589 | * bytes now. But it can't be too large, because we have to save |
590 | * enough space for BCH. |
591 | */ |
592 | geo->metadata_size = 10; |
593 | |
594 | /* The default for the length of Galois Field. */ |
595 | geo->gf_len = 13; |
596 | |
597 | /* The default for chunk size. */ |
598 | geo->ecc0_chunk_size = 512; |
599 | geo->eccn_chunk_size = 512; |
600 | while (geo->eccn_chunk_size < mtd->oobsize) { |
601 | geo->ecc0_chunk_size *= 2; /* keep C >= O */ |
602 | geo->eccn_chunk_size *= 2; /* keep C >= O */ |
603 | geo->gf_len = 14; |
604 | } |
605 | |
606 | geo->ecc_chunk_count = mtd->writesize / geo->eccn_chunk_size; |
607 | |
608 | /* We use the same ECC strength for all chunks. */ |
609 | geo->ecc_strength = get_ecc_strength(this); |
610 | if (!gpmi_check_ecc(this)) { |
611 | dev_err(this->dev, |
612 | "ecc strength: %d cannot be supported by the controller (%d)\n" |
613 | "try to use minimum ecc strength that NAND chip required\n", |
614 | geo->ecc_strength, |
615 | this->devdata->bch_max_ecc_strength); |
616 | return -EINVAL; |
617 | } |
618 | |
619 | geo->page_size = mtd->writesize + geo->metadata_size + |
620 | (geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8; |
621 | geo->payload_size = mtd->writesize; |
622 | |
623 | /* |
624 | * The auxiliary buffer contains the metadata and the ECC status. The |
625 | * metadata is padded to the nearest 32-bit boundary. The ECC status |
626 | * contains one byte for every ECC chunk, and is also padded to the |
627 | * nearest 32-bit boundary. |
628 | */ |
629 | metadata_size = ALIGN(geo->metadata_size, 4); |
630 | status_size = ALIGN(geo->ecc_chunk_count, 4); |
631 | |
632 | geo->auxiliary_size = metadata_size + status_size; |
633 | geo->auxiliary_status_offset = metadata_size; |
634 | |
635 | if (!this->swap_block_mark) |
636 | return 0; |
637 | |
638 | /* |
639 | * We need to compute the byte and bit offsets of |
640 | * the physical block mark within the ECC-based view of the page. |
641 | * |
642 | * NAND chip with 2K page shows below: |
643 | * (Block Mark) |
644 | * | | |
645 | * | D | |
646 | * |<---->| |
647 | * V V |
648 | * +---+----------+-+----------+-+----------+-+----------+-+ |
649 | * | M | data |E| data |E| data |E| data |E| |
650 | * +---+----------+-+----------+-+----------+-+----------+-+ |
651 | * |
652 | * The position of block mark moves forward in the ECC-based view |
653 | * of page, and the delta is: |
654 | * |
655 | * E * G * (N - 1) |
656 | * D = (---------------- + M) |
657 | * 8 |
658 | * |
659 | * With the formula to compute the ECC strength, and the condition |
660 | * : C >= O (C is the ecc chunk size) |
661 | * |
662 | * It's easy to deduce to the following result: |
663 | * |
664 | * E * G (O - M) C - M C - M |
665 | * ----------- <= ------- <= -------- < --------- |
666 | * 8 N N (N - 1) |
667 | * |
668 | * So, we get: |
669 | * |
670 | * E * G * (N - 1) |
671 | * D = (---------------- + M) < C |
672 | * 8 |
673 | * |
674 | * The above inequality means the position of block mark |
675 | * within the ECC-based view of the page is still in the data chunk, |
676 | * and it's NOT in the ECC bits of the chunk. |
677 | * |
678 | * Use the following to compute the bit position of the |
679 | * physical block mark within the ECC-based view of the page: |
680 | * (page_size - D) * 8 |
681 | * |
682 | * --Huang Shijie |
683 | */ |
684 | block_mark_bit_offset = mtd->writesize * 8 - |
685 | (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1) |
686 | + geo->metadata_size * 8); |
687 | |
688 | geo->block_mark_byte_offset = block_mark_bit_offset / 8; |
689 | geo->block_mark_bit_offset = block_mark_bit_offset % 8; |
690 | return 0; |
691 | } |
692 | |
693 | static int common_nfc_set_geometry(struct gpmi_nand_data *this) |
694 | { |
695 | struct nand_chip *chip = &this->nand; |
696 | struct mtd_info *mtd = nand_to_mtd(chip: &this->nand); |
697 | const struct nand_ecc_props *requirements = |
698 | nanddev_get_ecc_requirements(nand: &chip->base); |
699 | bool use_minimun_ecc; |
700 | int err; |
701 | |
702 | use_minimun_ecc = of_property_read_bool(np: this->dev->of_node, |
703 | propname: "fsl,use-minimum-ecc"); |
704 | |
705 | /* use legacy bch geometry settings by default*/ |
706 | if ((!use_minimun_ecc && mtd->oobsize < 1024) || |
707 | !(requirements->strength > 0 && requirements->step_size > 0)) { |
708 | dev_dbg(this->dev, "use legacy bch geometry\n"); |
709 | err = legacy_set_geometry(this); |
710 | if (!err) |
711 | return 0; |
712 | } |
713 | |
714 | /* for large oob nand */ |
715 | if (mtd->oobsize > 1024) { |
716 | dev_dbg(this->dev, "use large oob bch geometry\n"); |
717 | err = set_geometry_for_large_oob(this); |
718 | if (!err) |
719 | return 0; |
720 | } |
721 | |
722 | /* otherwise use the minimum ecc nand chip required */ |
723 | dev_dbg(this->dev, "use minimum ecc bch geometry\n"); |
724 | err = set_geometry_by_ecc_info(this, ecc_strength: requirements->strength, |
725 | ecc_step: requirements->step_size); |
726 | if (err) |
727 | dev_err(this->dev, "none of the bch geometry setting works\n"); |
728 | |
729 | return err; |
730 | } |
731 | |
732 | /* Configures the geometry for BCH. */ |
733 | static int bch_set_geometry(struct gpmi_nand_data *this) |
734 | { |
735 | struct resources *r = &this->resources; |
736 | int ret; |
737 | |
738 | ret = common_nfc_set_geometry(this); |
739 | if (ret) |
740 | return ret; |
741 | |
742 | ret = pm_runtime_resume_and_get(dev: this->dev); |
743 | if (ret < 0) { |
744 | return ret; |
745 | } |
746 | |
747 | /* |
748 | * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this |
749 | * chip, otherwise it will lock up. So we skip resetting BCH on the MX23. |
750 | * and MX28. |
751 | */ |
752 | ret = gpmi_reset_block(reset_addr: r->bch_regs, GPMI_IS_MXS(this)); |
753 | if (ret) |
754 | goto err_out; |
755 | |
756 | /* Set *all* chip selects to use layout 0. */ |
757 | writel(val: 0, addr: r->bch_regs + HW_BCH_LAYOUTSELECT); |
758 | |
759 | ret = 0; |
760 | err_out: |
761 | pm_runtime_mark_last_busy(dev: this->dev); |
762 | pm_runtime_put_autosuspend(dev: this->dev); |
763 | |
764 | return ret; |
765 | } |
766 | |
767 | /* |
768 | * <1> Firstly, we should know what's the GPMI-clock means. |
769 | * The GPMI-clock is the internal clock in the gpmi nand controller. |
770 | * If you set 100MHz to gpmi nand controller, the GPMI-clock's period |
771 | * is 10ns. Mark the GPMI-clock's period as GPMI-clock-period. |
772 | * |
773 | * <2> Secondly, we should know what's the frequency on the nand chip pins. |
774 | * The frequency on the nand chip pins is derived from the GPMI-clock. |
775 | * We can get it from the following equation: |
776 | * |
777 | * F = G / (DS + DH) |
778 | * |
779 | * F : the frequency on the nand chip pins. |
780 | * G : the GPMI clock, such as 100MHz. |
781 | * DS : GPMI_HW_GPMI_TIMING0:DATA_SETUP |
782 | * DH : GPMI_HW_GPMI_TIMING0:DATA_HOLD |
783 | * |
784 | * <3> Thirdly, when the frequency on the nand chip pins is above 33MHz, |
785 | * the nand EDO(extended Data Out) timing could be applied. |
786 | * The GPMI implements a feedback read strobe to sample the read data. |
787 | * The feedback read strobe can be delayed to support the nand EDO timing |
788 | * where the read strobe may deasserts before the read data is valid, and |
789 | * read data is valid for some time after read strobe. |
790 | * |
791 | * The following figure illustrates some aspects of a NAND Flash read: |
792 | * |
793 | * |<---tREA---->| |
794 | * | | |
795 | * | | | |
796 | * |<--tRP-->| | |
797 | * | | | |
798 | * __ ___|__________________________________ |
799 | * RDN \________/ | |
800 | * | |
801 | * /---------\ |
802 | * Read Data --------------< >--------- |
803 | * \---------/ |
804 | * | | |
805 | * |<-D->| |
806 | * FeedbackRDN ________ ____________ |
807 | * \___________/ |
808 | * |
809 | * D stands for delay, set in the HW_GPMI_CTRL1:RDN_DELAY. |
810 | * |
811 | * |
812 | * <4> Now, we begin to describe how to compute the right RDN_DELAY. |
813 | * |
814 | * 4.1) From the aspect of the nand chip pins: |
815 | * Delay = (tREA + C - tRP) {1} |
816 | * |
817 | * tREA : the maximum read access time. |
818 | * C : a constant to adjust the delay. default is 4000ps. |
819 | * tRP : the read pulse width, which is exactly: |
820 | * tRP = (GPMI-clock-period) * DATA_SETUP |
821 | * |
822 | * 4.2) From the aspect of the GPMI nand controller: |
823 | * Delay = RDN_DELAY * 0.125 * RP {2} |
824 | * |
825 | * RP : the DLL reference period. |
826 | * if (GPMI-clock-period > DLL_THRETHOLD) |
827 | * RP = GPMI-clock-period / 2; |
828 | * else |
829 | * RP = GPMI-clock-period; |
830 | * |
831 | * Set the HW_GPMI_CTRL1:HALF_PERIOD if GPMI-clock-period |
832 | * is greater DLL_THRETHOLD. In other SOCs, the DLL_THRETHOLD |
833 | * is 16000ps, but in mx6q, we use 12000ps. |
834 | * |
835 | * 4.3) since {1} equals {2}, we get: |
836 | * |
837 | * (tREA + 4000 - tRP) * 8 |
838 | * RDN_DELAY = ----------------------- {3} |
839 | * RP |
840 | */ |
841 | static int gpmi_nfc_compute_timings(struct gpmi_nand_data *this, |
842 | const struct nand_sdr_timings *sdr) |
843 | { |
844 | struct gpmi_nfc_hardware_timing *hw = &this->hw; |
845 | struct resources *r = &this->resources; |
846 | unsigned int dll_threshold_ps = this->devdata->max_chain_delay; |
847 | unsigned int period_ps, reference_period_ps; |
848 | unsigned int data_setup_cycles, data_hold_cycles, addr_setup_cycles; |
849 | unsigned int tRP_ps; |
850 | bool use_half_period; |
851 | int sample_delay_ps, sample_delay_factor; |
852 | unsigned int busy_timeout_cycles; |
853 | u8 wrn_dly_sel; |
854 | unsigned long clk_rate, min_rate; |
855 | u64 busy_timeout_ps; |
856 | |
857 | if (sdr->tRC_min >= 30000) { |
858 | /* ONFI non-EDO modes [0-3] */ |
859 | hw->clk_rate = 22000000; |
860 | min_rate = 0; |
861 | wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS; |
862 | } else if (sdr->tRC_min >= 25000) { |
863 | /* ONFI EDO mode 4 */ |
864 | hw->clk_rate = 80000000; |
865 | min_rate = 22000000; |
866 | wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY; |
867 | } else { |
868 | /* ONFI EDO mode 5 */ |
869 | hw->clk_rate = 100000000; |
870 | min_rate = 80000000; |
871 | wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY; |
872 | } |
873 | |
874 | clk_rate = clk_round_rate(clk: r->clock[0], rate: hw->clk_rate); |
875 | if (clk_rate <= min_rate) { |
876 | dev_err(this->dev, "clock setting: expected %ld, got %ld\n", |
877 | hw->clk_rate, clk_rate); |
878 | return -ENOTSUPP; |
879 | } |
880 | |
881 | hw->clk_rate = clk_rate; |
882 | /* SDR core timings are given in picoseconds */ |
883 | period_ps = div_u64(dividend: (u64)NSEC_PER_SEC * 1000, divisor: hw->clk_rate); |
884 | |
885 | addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps); |
886 | data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps); |
887 | data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps); |
888 | busy_timeout_ps = max(sdr->tBERS_max, sdr->tPROG_max); |
889 | busy_timeout_cycles = TO_CYCLES(busy_timeout_ps, period_ps); |
890 | |
891 | hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) | |
892 | BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) | |
893 | BF_GPMI_TIMING0_DATA_SETUP(data_setup_cycles); |
894 | hw->timing1 = BF_GPMI_TIMING1_BUSY_TIMEOUT(DIV_ROUND_UP(busy_timeout_cycles, 4096)); |
895 | |
896 | /* |
897 | * Derive NFC ideal delay from {3}: |
898 | * |
899 | * (tREA + 4000 - tRP) * 8 |
900 | * RDN_DELAY = ----------------------- |
901 | * RP |
902 | */ |
903 | if (period_ps > dll_threshold_ps) { |
904 | use_half_period = true; |
905 | reference_period_ps = period_ps / 2; |
906 | } else { |
907 | use_half_period = false; |
908 | reference_period_ps = period_ps; |
909 | } |
910 | |
911 | tRP_ps = data_setup_cycles * period_ps; |
912 | sample_delay_ps = (sdr->tREA_max + 4000 - tRP_ps) * 8; |
913 | if (sample_delay_ps > 0) |
914 | sample_delay_factor = sample_delay_ps / reference_period_ps; |
915 | else |
916 | sample_delay_factor = 0; |
917 | |
918 | hw->ctrl1n = BF_GPMI_CTRL1_WRN_DLY_SEL(wrn_dly_sel); |
919 | if (sample_delay_factor) |
920 | hw->ctrl1n |= BF_GPMI_CTRL1_RDN_DELAY(sample_delay_factor) | |
921 | BM_GPMI_CTRL1_DLL_ENABLE | |
922 | (use_half_period ? BM_GPMI_CTRL1_HALF_PERIOD : 0); |
923 | return 0; |
924 | } |
925 | |
926 | static int gpmi_nfc_apply_timings(struct gpmi_nand_data *this) |
927 | { |
928 | struct gpmi_nfc_hardware_timing *hw = &this->hw; |
929 | struct resources *r = &this->resources; |
930 | void __iomem *gpmi_regs = r->gpmi_regs; |
931 | unsigned int dll_wait_time_us; |
932 | int ret; |
933 | |
934 | /* Clock dividers do NOT guarantee a clean clock signal on its output |
935 | * during the change of the divide factor on i.MX6Q/UL/SX. On i.MX7/8, |
936 | * all clock dividers provide these guarantee. |
937 | */ |
938 | if (GPMI_IS_MX6Q(this) || GPMI_IS_MX6SX(this)) |
939 | clk_disable_unprepare(clk: r->clock[0]); |
940 | |
941 | ret = clk_set_rate(clk: r->clock[0], rate: hw->clk_rate); |
942 | if (ret) { |
943 | dev_err(this->dev, "cannot set clock rate to %lu Hz: %d\n", hw->clk_rate, ret); |
944 | return ret; |
945 | } |
946 | |
947 | if (GPMI_IS_MX6Q(this) || GPMI_IS_MX6SX(this)) { |
948 | ret = clk_prepare_enable(clk: r->clock[0]); |
949 | if (ret) |
950 | return ret; |
951 | } |
952 | |
953 | writel(val: hw->timing0, addr: gpmi_regs + HW_GPMI_TIMING0); |
954 | writel(val: hw->timing1, addr: gpmi_regs + HW_GPMI_TIMING1); |
955 | |
956 | /* |
957 | * Clear several CTRL1 fields, DLL must be disabled when setting |
958 | * RDN_DELAY or HALF_PERIOD. |
959 | */ |
960 | writel(BM_GPMI_CTRL1_CLEAR_MASK, addr: gpmi_regs + HW_GPMI_CTRL1_CLR); |
961 | writel(val: hw->ctrl1n, addr: gpmi_regs + HW_GPMI_CTRL1_SET); |
962 | |
963 | /* Wait 64 clock cycles before using the GPMI after enabling the DLL */ |
964 | dll_wait_time_us = USEC_PER_SEC / hw->clk_rate * 64; |
965 | if (!dll_wait_time_us) |
966 | dll_wait_time_us = 1; |
967 | |
968 | /* Wait for the DLL to settle. */ |
969 | udelay(usec: dll_wait_time_us); |
970 | |
971 | return 0; |
972 | } |
973 | |
974 | static int gpmi_setup_interface(struct nand_chip *chip, int chipnr, |
975 | const struct nand_interface_config *conf) |
976 | { |
977 | struct gpmi_nand_data *this = nand_get_controller_data(chip); |
978 | const struct nand_sdr_timings *sdr; |
979 | int ret; |
980 | |
981 | /* Retrieve required NAND timings */ |
982 | sdr = nand_get_sdr_timings(conf); |
983 | if (IS_ERR(ptr: sdr)) |
984 | return PTR_ERR(ptr: sdr); |
985 | |
986 | /* Only MX28/MX6 GPMI controller can reach EDO timings */ |
987 | if (sdr->tRC_min <= 25000 && !this->devdata->support_edo_timing) |
988 | return -ENOTSUPP; |
989 | |
990 | /* Stop here if this call was just a check */ |
991 | if (chipnr < 0) |
992 | return 0; |
993 | |
994 | /* Do the actual derivation of the controller timings */ |
995 | ret = gpmi_nfc_compute_timings(this, sdr); |
996 | if (ret) |
997 | return ret; |
998 | |
999 | this->hw.must_apply_timings = true; |
1000 | |
1001 | return 0; |
1002 | } |
1003 | |
1004 | /* Clears a BCH interrupt. */ |
1005 | static void gpmi_clear_bch(struct gpmi_nand_data *this) |
1006 | { |
1007 | struct resources *r = &this->resources; |
1008 | writel(BM_BCH_CTRL_COMPLETE_IRQ, addr: r->bch_regs + HW_BCH_CTRL_CLR); |
1009 | } |
1010 | |
1011 | static struct dma_chan *get_dma_chan(struct gpmi_nand_data *this) |
1012 | { |
1013 | /* We use the DMA channel 0 to access all the nand chips. */ |
1014 | return this->dma_chans[0]; |
1015 | } |
1016 | |
1017 | /* This will be called after the DMA operation is finished. */ |
1018 | static void dma_irq_callback(void *param) |
1019 | { |
1020 | struct gpmi_nand_data *this = param; |
1021 | struct completion *dma_c = &this->dma_done; |
1022 | |
1023 | complete(dma_c); |
1024 | } |
1025 | |
1026 | static irqreturn_t bch_irq(int irq, void *cookie) |
1027 | { |
1028 | struct gpmi_nand_data *this = cookie; |
1029 | |
1030 | gpmi_clear_bch(this); |
1031 | complete(&this->bch_done); |
1032 | return IRQ_HANDLED; |
1033 | } |
1034 | |
1035 | static int gpmi_raw_len_to_len(struct gpmi_nand_data *this, int raw_len) |
1036 | { |
1037 | /* |
1038 | * raw_len is the length to read/write including bch data which |
1039 | * we are passed in exec_op. Calculate the data length from it. |
1040 | */ |
1041 | if (this->bch) |
1042 | return ALIGN_DOWN(raw_len, this->bch_geometry.eccn_chunk_size); |
1043 | else |
1044 | return raw_len; |
1045 | } |
1046 | |
1047 | /* Can we use the upper's buffer directly for DMA? */ |
1048 | static bool prepare_data_dma(struct gpmi_nand_data *this, const void *buf, |
1049 | int raw_len, struct scatterlist *sgl, |
1050 | enum dma_data_direction dr) |
1051 | { |
1052 | int ret; |
1053 | int len = gpmi_raw_len_to_len(this, raw_len); |
1054 | |
1055 | /* first try to map the upper buffer directly */ |
1056 | if (virt_addr_valid(buf) && !object_is_on_stack(obj: buf)) { |
1057 | sg_init_one(sgl, buf, len); |
1058 | ret = dma_map_sg(this->dev, sgl, 1, dr); |
1059 | if (ret == 0) |
1060 | goto map_fail; |
1061 | |
1062 | return true; |
1063 | } |
1064 | |
1065 | map_fail: |
1066 | /* We have to use our own DMA buffer. */ |
1067 | sg_init_one(sgl, this->data_buffer_dma, len); |
1068 | |
1069 | if (dr == DMA_TO_DEVICE && buf != this->data_buffer_dma) |
1070 | memcpy(this->data_buffer_dma, buf, len); |
1071 | |
1072 | dma_map_sg(this->dev, sgl, 1, dr); |
1073 | |
1074 | return false; |
1075 | } |
1076 | |
1077 | /* add our owner bbt descriptor */ |
1078 | static uint8_t scan_ff_pattern[] = { 0xff }; |
1079 | static struct nand_bbt_descr gpmi_bbt_descr = { |
1080 | .options = 0, |
1081 | .offs = 0, |
1082 | .len = 1, |
1083 | .pattern = scan_ff_pattern |
1084 | }; |
1085 | |
1086 | /* |
1087 | * We may change the layout if we can get the ECC info from the datasheet, |
1088 | * else we will use all the (page + OOB). |
1089 | */ |
1090 | static int gpmi_ooblayout_ecc(struct mtd_info *mtd, int section, |
1091 | struct mtd_oob_region *oobregion) |
1092 | { |
1093 | struct nand_chip *chip = mtd_to_nand(mtd); |
1094 | struct gpmi_nand_data *this = nand_get_controller_data(chip); |
1095 | struct bch_geometry *geo = &this->bch_geometry; |
1096 | |
1097 | if (section) |
1098 | return -ERANGE; |
1099 | |
1100 | oobregion->offset = 0; |
1101 | oobregion->length = geo->page_size - mtd->writesize; |
1102 | |
1103 | return 0; |
1104 | } |
1105 | |
1106 | static int gpmi_ooblayout_free(struct mtd_info *mtd, int section, |
1107 | struct mtd_oob_region *oobregion) |
1108 | { |
1109 | struct nand_chip *chip = mtd_to_nand(mtd); |
1110 | struct gpmi_nand_data *this = nand_get_controller_data(chip); |
1111 | struct bch_geometry *geo = &this->bch_geometry; |
1112 | |
1113 | if (section) |
1114 | return -ERANGE; |
1115 | |
1116 | /* The available oob size we have. */ |
1117 | if (geo->page_size < mtd->writesize + mtd->oobsize) { |
1118 | oobregion->offset = geo->page_size - mtd->writesize; |
1119 | oobregion->length = mtd->oobsize - oobregion->offset; |
1120 | } |
1121 | |
1122 | return 0; |
1123 | } |
1124 | |
1125 | static const char * const gpmi_clks_for_mx2x[] = { |
1126 | "gpmi_io", |
1127 | }; |
1128 | |
1129 | static const struct mtd_ooblayout_ops gpmi_ooblayout_ops = { |
1130 | .ecc = gpmi_ooblayout_ecc, |
1131 | .free = gpmi_ooblayout_free, |
1132 | }; |
1133 | |
1134 | static const struct gpmi_devdata gpmi_devdata_imx23 = { |
1135 | .type = IS_MX23, |
1136 | .bch_max_ecc_strength = 20, |
1137 | .max_chain_delay = 16000, |
1138 | .clks = gpmi_clks_for_mx2x, |
1139 | .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x), |
1140 | }; |
1141 | |
1142 | static const struct gpmi_devdata gpmi_devdata_imx28 = { |
1143 | .type = IS_MX28, |
1144 | .bch_max_ecc_strength = 20, |
1145 | .max_chain_delay = 16000, |
1146 | .support_edo_timing = true, |
1147 | .clks = gpmi_clks_for_mx2x, |
1148 | .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x), |
1149 | }; |
1150 | |
1151 | static const char * const gpmi_clks_for_mx6[] = { |
1152 | "gpmi_io", "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch", |
1153 | }; |
1154 | |
1155 | static const struct gpmi_devdata gpmi_devdata_imx6q = { |
1156 | .type = IS_MX6Q, |
1157 | .bch_max_ecc_strength = 40, |
1158 | .max_chain_delay = 12000, |
1159 | .support_edo_timing = true, |
1160 | .clks = gpmi_clks_for_mx6, |
1161 | .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6), |
1162 | }; |
1163 | |
1164 | static const struct gpmi_devdata gpmi_devdata_imx6sx = { |
1165 | .type = IS_MX6SX, |
1166 | .bch_max_ecc_strength = 62, |
1167 | .max_chain_delay = 12000, |
1168 | .support_edo_timing = true, |
1169 | .clks = gpmi_clks_for_mx6, |
1170 | .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6), |
1171 | }; |
1172 | |
1173 | static const char * const gpmi_clks_for_mx7d[] = { |
1174 | "gpmi_io", "gpmi_bch_apb", |
1175 | }; |
1176 | |
1177 | static const struct gpmi_devdata gpmi_devdata_imx7d = { |
1178 | .type = IS_MX7D, |
1179 | .bch_max_ecc_strength = 62, |
1180 | .max_chain_delay = 12000, |
1181 | .support_edo_timing = true, |
1182 | .clks = gpmi_clks_for_mx7d, |
1183 | .clks_count = ARRAY_SIZE(gpmi_clks_for_mx7d), |
1184 | }; |
1185 | |
1186 | static const char *gpmi_clks_for_mx8qxp[GPMI_CLK_MAX] = { |
1187 | "gpmi_io", "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", |
1188 | }; |
1189 | |
1190 | static const struct gpmi_devdata gpmi_devdata_imx8qxp = { |
1191 | .type = IS_MX8QXP, |
1192 | .bch_max_ecc_strength = 62, |
1193 | .max_chain_delay = 12000, |
1194 | .support_edo_timing = true, |
1195 | .clks = gpmi_clks_for_mx8qxp, |
1196 | .clks_count = ARRAY_SIZE(gpmi_clks_for_mx8qxp), |
1197 | }; |
1198 | |
1199 | static int acquire_register_block(struct gpmi_nand_data *this, |
1200 | const char *res_name) |
1201 | { |
1202 | struct platform_device *pdev = this->pdev; |
1203 | struct resources *res = &this->resources; |
1204 | void __iomem *p; |
1205 | |
1206 | p = devm_platform_ioremap_resource_byname(pdev, name: res_name); |
1207 | if (IS_ERR(ptr: p)) |
1208 | return PTR_ERR(ptr: p); |
1209 | |
1210 | if (!strcmp(res_name, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME)) |
1211 | res->gpmi_regs = p; |
1212 | else if (!strcmp(res_name, GPMI_NAND_BCH_REGS_ADDR_RES_NAME)) |
1213 | res->bch_regs = p; |
1214 | else |
1215 | dev_err(this->dev, "unknown resource name : %s\n", res_name); |
1216 | |
1217 | return 0; |
1218 | } |
1219 | |
1220 | static int acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h) |
1221 | { |
1222 | struct platform_device *pdev = this->pdev; |
1223 | const char *res_name = GPMI_NAND_BCH_INTERRUPT_RES_NAME; |
1224 | int err; |
1225 | |
1226 | err = platform_get_irq_byname(pdev, res_name); |
1227 | if (err < 0) |
1228 | return err; |
1229 | |
1230 | err = devm_request_irq(dev: this->dev, irq: err, handler: irq_h, irqflags: 0, devname: res_name, dev_id: this); |
1231 | if (err) |
1232 | dev_err(this->dev, "error requesting BCH IRQ\n"); |
1233 | |
1234 | return err; |
1235 | } |
1236 | |
1237 | static void release_dma_channels(struct gpmi_nand_data *this) |
1238 | { |
1239 | unsigned int i; |
1240 | for (i = 0; i < DMA_CHANS; i++) |
1241 | if (this->dma_chans[i]) { |
1242 | dma_release_channel(chan: this->dma_chans[i]); |
1243 | this->dma_chans[i] = NULL; |
1244 | } |
1245 | } |
1246 | |
1247 | static int acquire_dma_channels(struct gpmi_nand_data *this) |
1248 | { |
1249 | struct platform_device *pdev = this->pdev; |
1250 | struct dma_chan *dma_chan; |
1251 | int ret = 0; |
1252 | |
1253 | /* request dma channel */ |
1254 | dma_chan = dma_request_chan(dev: &pdev->dev, name: "rx-tx"); |
1255 | if (IS_ERR(ptr: dma_chan)) { |
1256 | ret = dev_err_probe(dev: this->dev, err: PTR_ERR(ptr: dma_chan), |
1257 | fmt: "DMA channel request failed\n"); |
1258 | release_dma_channels(this); |
1259 | } else { |
1260 | this->dma_chans[0] = dma_chan; |
1261 | } |
1262 | |
1263 | return ret; |
1264 | } |
1265 | |
1266 | static int gpmi_get_clks(struct gpmi_nand_data *this) |
1267 | { |
1268 | struct resources *r = &this->resources; |
1269 | struct clk *clk; |
1270 | int err, i; |
1271 | |
1272 | for (i = 0; i < this->devdata->clks_count; i++) { |
1273 | clk = devm_clk_get(dev: this->dev, id: this->devdata->clks[i]); |
1274 | if (IS_ERR(ptr: clk)) { |
1275 | err = PTR_ERR(ptr: clk); |
1276 | goto err_clock; |
1277 | } |
1278 | |
1279 | r->clock[i] = clk; |
1280 | } |
1281 | |
1282 | return 0; |
1283 | |
1284 | err_clock: |
1285 | dev_dbg(this->dev, "failed in finding the clocks.\n"); |
1286 | return err; |
1287 | } |
1288 | |
1289 | static int acquire_resources(struct gpmi_nand_data *this) |
1290 | { |
1291 | int ret; |
1292 | |
1293 | ret = acquire_register_block(this, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME); |
1294 | if (ret) |
1295 | goto exit_regs; |
1296 | |
1297 | ret = acquire_register_block(this, GPMI_NAND_BCH_REGS_ADDR_RES_NAME); |
1298 | if (ret) |
1299 | goto exit_regs; |
1300 | |
1301 | ret = acquire_bch_irq(this, irq_h: bch_irq); |
1302 | if (ret) |
1303 | goto exit_regs; |
1304 | |
1305 | ret = acquire_dma_channels(this); |
1306 | if (ret) |
1307 | goto exit_regs; |
1308 | |
1309 | ret = gpmi_get_clks(this); |
1310 | if (ret) |
1311 | goto exit_clock; |
1312 | return 0; |
1313 | |
1314 | exit_clock: |
1315 | release_dma_channels(this); |
1316 | exit_regs: |
1317 | return ret; |
1318 | } |
1319 | |
1320 | static void release_resources(struct gpmi_nand_data *this) |
1321 | { |
1322 | release_dma_channels(this); |
1323 | } |
1324 | |
1325 | static void gpmi_free_dma_buffer(struct gpmi_nand_data *this) |
1326 | { |
1327 | struct device *dev = this->dev; |
1328 | struct bch_geometry *geo = &this->bch_geometry; |
1329 | |
1330 | if (this->auxiliary_virt && virt_addr_valid(this->auxiliary_virt)) |
1331 | dma_free_coherent(dev, size: geo->auxiliary_size, |
1332 | cpu_addr: this->auxiliary_virt, |
1333 | dma_handle: this->auxiliary_phys); |
1334 | kfree(objp: this->data_buffer_dma); |
1335 | kfree(objp: this->raw_buffer); |
1336 | |
1337 | this->data_buffer_dma = NULL; |
1338 | this->raw_buffer = NULL; |
1339 | } |
1340 | |
1341 | /* Allocate the DMA buffers */ |
1342 | static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this) |
1343 | { |
1344 | struct bch_geometry *geo = &this->bch_geometry; |
1345 | struct device *dev = this->dev; |
1346 | struct mtd_info *mtd = nand_to_mtd(chip: &this->nand); |
1347 | |
1348 | /* |
1349 | * [2] Allocate a read/write data buffer. |
1350 | * The gpmi_alloc_dma_buffer can be called twice. |
1351 | * We allocate a PAGE_SIZE length buffer if gpmi_alloc_dma_buffer |
1352 | * is called before the NAND identification; and we allocate a |
1353 | * buffer of the real NAND page size when the gpmi_alloc_dma_buffer |
1354 | * is called after. |
1355 | */ |
1356 | this->data_buffer_dma = kzalloc(mtd->writesize ?: PAGE_SIZE, |
1357 | GFP_DMA | GFP_KERNEL); |
1358 | if (this->data_buffer_dma == NULL) |
1359 | goto error_alloc; |
1360 | |
1361 | this->auxiliary_virt = dma_alloc_coherent(dev, size: geo->auxiliary_size, |
1362 | dma_handle: &this->auxiliary_phys, GFP_DMA); |
1363 | if (!this->auxiliary_virt) |
1364 | goto error_alloc; |
1365 | |
1366 | this->raw_buffer = kzalloc((mtd->writesize ?: PAGE_SIZE) + mtd->oobsize, GFP_KERNEL); |
1367 | if (!this->raw_buffer) |
1368 | goto error_alloc; |
1369 | |
1370 | return 0; |
1371 | |
1372 | error_alloc: |
1373 | gpmi_free_dma_buffer(this); |
1374 | return -ENOMEM; |
1375 | } |
1376 | |
1377 | /* |
1378 | * Handles block mark swapping. |
1379 | * It can be called in swapping the block mark, or swapping it back, |
1380 | * because the operations are the same. |
1381 | */ |
1382 | static void block_mark_swapping(struct gpmi_nand_data *this, |
1383 | void *payload, void *auxiliary) |
1384 | { |
1385 | struct bch_geometry *nfc_geo = &this->bch_geometry; |
1386 | unsigned char *p; |
1387 | unsigned char *a; |
1388 | unsigned int bit; |
1389 | unsigned char mask; |
1390 | unsigned char from_data; |
1391 | unsigned char from_oob; |
1392 | |
1393 | if (!this->swap_block_mark) |
1394 | return; |
1395 | |
1396 | /* |
1397 | * If control arrives here, we're swapping. Make some convenience |
1398 | * variables. |
1399 | */ |
1400 | bit = nfc_geo->block_mark_bit_offset; |
1401 | p = payload + nfc_geo->block_mark_byte_offset; |
1402 | a = auxiliary; |
1403 | |
1404 | /* |
1405 | * Get the byte from the data area that overlays the block mark. Since |
1406 | * the ECC engine applies its own view to the bits in the page, the |
1407 | * physical block mark won't (in general) appear on a byte boundary in |
1408 | * the data. |
1409 | */ |
1410 | from_data = (p[0] >> bit) | (p[1] << (8 - bit)); |
1411 | |
1412 | /* Get the byte from the OOB. */ |
1413 | from_oob = a[0]; |
1414 | |
1415 | /* Swap them. */ |
1416 | a[0] = from_data; |
1417 | |
1418 | mask = (0x1 << bit) - 1; |
1419 | p[0] = (p[0] & mask) | (from_oob << bit); |
1420 | |
1421 | mask = ~0 << bit; |
1422 | p[1] = (p[1] & mask) | (from_oob >> (8 - bit)); |
1423 | } |
1424 | |
1425 | static int gpmi_count_bitflips(struct nand_chip *chip, void *buf, int first, |
1426 | int last, int meta) |
1427 | { |
1428 | struct gpmi_nand_data *this = nand_get_controller_data(chip); |
1429 | struct bch_geometry *nfc_geo = &this->bch_geometry; |
1430 | struct mtd_info *mtd = nand_to_mtd(chip); |
1431 | int i; |
1432 | unsigned char *status; |
1433 | unsigned int max_bitflips = 0; |
1434 | |
1435 | /* Loop over status bytes, accumulating ECC status. */ |
1436 | status = this->auxiliary_virt + ALIGN(meta, 4); |
1437 | |
1438 | for (i = first; i < last; i++, status++) { |
1439 | if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED)) |
1440 | continue; |
1441 | |
1442 | if (*status == STATUS_UNCORRECTABLE) { |
1443 | int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len; |
1444 | u8 *eccbuf = this->raw_buffer; |
1445 | int offset, bitoffset; |
1446 | int eccbytes; |
1447 | int flips; |
1448 | |
1449 | /* Read ECC bytes into our internal raw_buffer */ |
1450 | offset = nfc_geo->metadata_size * 8; |
1451 | offset += ((8 * nfc_geo->eccn_chunk_size) + eccbits) * (i + 1); |
1452 | offset -= eccbits; |
1453 | bitoffset = offset % 8; |
1454 | eccbytes = DIV_ROUND_UP(offset + eccbits, 8); |
1455 | offset /= 8; |
1456 | eccbytes -= offset; |
1457 | nand_change_read_column_op(chip, offset_in_page: offset, buf: eccbuf, |
1458 | len: eccbytes, force_8bit: false); |
1459 | |
1460 | /* |
1461 | * ECC data are not byte aligned and we may have |
1462 | * in-band data in the first and last byte of |
1463 | * eccbuf. Set non-eccbits to one so that |
1464 | * nand_check_erased_ecc_chunk() does not count them |
1465 | * as bitflips. |
1466 | */ |
1467 | if (bitoffset) |
1468 | eccbuf[0] |= GENMASK(bitoffset - 1, 0); |
1469 | |
1470 | bitoffset = (bitoffset + eccbits) % 8; |
1471 | if (bitoffset) |
1472 | eccbuf[eccbytes - 1] |= GENMASK(7, bitoffset); |
1473 | |
1474 | /* |
1475 | * The ECC hardware has an uncorrectable ECC status |
1476 | * code in case we have bitflips in an erased page. As |
1477 | * nothing was written into this subpage the ECC is |
1478 | * obviously wrong and we can not trust it. We assume |
1479 | * at this point that we are reading an erased page and |
1480 | * try to correct the bitflips in buffer up to |
1481 | * ecc_strength bitflips. If this is a page with random |
1482 | * data, we exceed this number of bitflips and have a |
1483 | * ECC failure. Otherwise we use the corrected buffer. |
1484 | */ |
1485 | if (i == 0) { |
1486 | /* The first block includes metadata */ |
1487 | flips = nand_check_erased_ecc_chunk( |
1488 | data: buf + i * nfc_geo->eccn_chunk_size, |
1489 | datalen: nfc_geo->eccn_chunk_size, |
1490 | ecc: eccbuf, ecclen: eccbytes, |
1491 | extraoob: this->auxiliary_virt, |
1492 | extraooblen: nfc_geo->metadata_size, |
1493 | threshold: nfc_geo->ecc_strength); |
1494 | } else { |
1495 | flips = nand_check_erased_ecc_chunk( |
1496 | data: buf + i * nfc_geo->eccn_chunk_size, |
1497 | datalen: nfc_geo->eccn_chunk_size, |
1498 | ecc: eccbuf, ecclen: eccbytes, |
1499 | NULL, extraooblen: 0, |
1500 | threshold: nfc_geo->ecc_strength); |
1501 | } |
1502 | |
1503 | if (flips > 0) { |
1504 | max_bitflips = max_t(unsigned int, max_bitflips, |
1505 | flips); |
1506 | mtd->ecc_stats.corrected += flips; |
1507 | continue; |
1508 | } |
1509 | |
1510 | mtd->ecc_stats.failed++; |
1511 | continue; |
1512 | } |
1513 | |
1514 | mtd->ecc_stats.corrected += *status; |
1515 | max_bitflips = max_t(unsigned int, max_bitflips, *status); |
1516 | } |
1517 | |
1518 | return max_bitflips; |
1519 | } |
1520 | |
1521 | static void gpmi_bch_layout_std(struct gpmi_nand_data *this) |
1522 | { |
1523 | struct bch_geometry *geo = &this->bch_geometry; |
1524 | unsigned int ecc_strength = geo->ecc_strength >> 1; |
1525 | unsigned int gf_len = geo->gf_len; |
1526 | unsigned int block0_size = geo->ecc0_chunk_size; |
1527 | unsigned int blockn_size = geo->eccn_chunk_size; |
1528 | |
1529 | this->bch_flashlayout0 = |
1530 | BF_BCH_FLASH0LAYOUT0_NBLOCKS(geo->ecc_chunk_count - 1) | |
1531 | BF_BCH_FLASH0LAYOUT0_META_SIZE(geo->metadata_size) | |
1532 | BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this) | |
1533 | BF_BCH_FLASH0LAYOUT0_GF(gf_len, this) | |
1534 | BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block0_size, this); |
1535 | |
1536 | this->bch_flashlayout1 = |
1537 | BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(geo->page_size) | |
1538 | BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this) | |
1539 | BF_BCH_FLASH0LAYOUT1_GF(gf_len, this) | |
1540 | BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(blockn_size, this); |
1541 | } |
1542 | |
1543 | static int gpmi_ecc_read_page(struct nand_chip *chip, uint8_t *buf, |
1544 | int oob_required, int page) |
1545 | { |
1546 | struct gpmi_nand_data *this = nand_get_controller_data(chip); |
1547 | struct mtd_info *mtd = nand_to_mtd(chip); |
1548 | struct bch_geometry *geo = &this->bch_geometry; |
1549 | unsigned int max_bitflips; |
1550 | int ret; |
1551 | |
1552 | gpmi_bch_layout_std(this); |
1553 | this->bch = true; |
1554 | |
1555 | ret = nand_read_page_op(chip, page, offset_in_page: 0, buf, len: geo->page_size); |
1556 | if (ret) |
1557 | return ret; |
1558 | |
1559 | max_bitflips = gpmi_count_bitflips(chip, buf, first: 0, |
1560 | last: geo->ecc_chunk_count, |
1561 | meta: geo->auxiliary_status_offset); |
1562 | |
1563 | /* handle the block mark swapping */ |
1564 | block_mark_swapping(this, payload: buf, auxiliary: this->auxiliary_virt); |
1565 | |
1566 | if (oob_required) { |
1567 | /* |
1568 | * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob() |
1569 | * for details about our policy for delivering the OOB. |
1570 | * |
1571 | * We fill the caller's buffer with set bits, and then copy the |
1572 | * block mark to th caller's buffer. Note that, if block mark |
1573 | * swapping was necessary, it has already been done, so we can |
1574 | * rely on the first byte of the auxiliary buffer to contain |
1575 | * the block mark. |
1576 | */ |
1577 | memset(chip->oob_poi, ~0, mtd->oobsize); |
1578 | chip->oob_poi[0] = ((uint8_t *)this->auxiliary_virt)[0]; |
1579 | } |
1580 | |
1581 | return max_bitflips; |
1582 | } |
1583 | |
1584 | /* Fake a virtual small page for the subpage read */ |
1585 | static int gpmi_ecc_read_subpage(struct nand_chip *chip, uint32_t offs, |
1586 | uint32_t len, uint8_t *buf, int page) |
1587 | { |
1588 | struct gpmi_nand_data *this = nand_get_controller_data(chip); |
1589 | struct bch_geometry *geo = &this->bch_geometry; |
1590 | int size = chip->ecc.size; /* ECC chunk size */ |
1591 | int meta, n, page_size; |
1592 | unsigned int max_bitflips; |
1593 | unsigned int ecc_strength; |
1594 | int first, last, marker_pos; |
1595 | int ecc_parity_size; |
1596 | int col = 0; |
1597 | int ret; |
1598 | |
1599 | /* The size of ECC parity */ |
1600 | ecc_parity_size = geo->gf_len * geo->ecc_strength / 8; |
1601 | |
1602 | /* Align it with the chunk size */ |
1603 | first = offs / size; |
1604 | last = (offs + len - 1) / size; |
1605 | |
1606 | if (this->swap_block_mark) { |
1607 | /* |
1608 | * Find the chunk which contains the Block Marker. |
1609 | * If this chunk is in the range of [first, last], |
1610 | * we have to read out the whole page. |
1611 | * Why? since we had swapped the data at the position of Block |
1612 | * Marker to the metadata which is bound with the chunk 0. |
1613 | */ |
1614 | marker_pos = geo->block_mark_byte_offset / size; |
1615 | if (last >= marker_pos && first <= marker_pos) { |
1616 | dev_dbg(this->dev, |
1617 | "page:%d, first:%d, last:%d, marker at:%d\n", |
1618 | page, first, last, marker_pos); |
1619 | return gpmi_ecc_read_page(chip, buf, oob_required: 0, page); |
1620 | } |
1621 | } |
1622 | |
1623 | /* |
1624 | * if there is an ECC dedicate for meta: |
1625 | * - need to add an extra ECC size when calculating col and page_size, |
1626 | * if the meta size is NOT zero. |
1627 | * - ecc0_chunk size need to set to the same size as other chunks, |
1628 | * if the meta size is zero. |
1629 | */ |
1630 | |
1631 | meta = geo->metadata_size; |
1632 | if (first) { |
1633 | if (geo->ecc_for_meta) |
1634 | col = meta + ecc_parity_size |
1635 | + (size + ecc_parity_size) * first; |
1636 | else |
1637 | col = meta + (size + ecc_parity_size) * first; |
1638 | |
1639 | meta = 0; |
1640 | buf = buf + first * size; |
1641 | } |
1642 | |
1643 | ecc_parity_size = geo->gf_len * geo->ecc_strength / 8; |
1644 | n = last - first + 1; |
1645 | |
1646 | if (geo->ecc_for_meta && meta) |
1647 | page_size = meta + ecc_parity_size |
1648 | + (size + ecc_parity_size) * n; |
1649 | else |
1650 | page_size = meta + (size + ecc_parity_size) * n; |
1651 | |
1652 | ecc_strength = geo->ecc_strength >> 1; |
1653 | |
1654 | this->bch_flashlayout0 = BF_BCH_FLASH0LAYOUT0_NBLOCKS( |
1655 | (geo->ecc_for_meta ? n : n - 1)) | |
1656 | BF_BCH_FLASH0LAYOUT0_META_SIZE(meta) | |
1657 | BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this) | |
1658 | BF_BCH_FLASH0LAYOUT0_GF(geo->gf_len, this) | |
1659 | BF_BCH_FLASH0LAYOUT0_DATA0_SIZE((geo->ecc_for_meta ? |
1660 | 0 : geo->ecc0_chunk_size), this); |
1661 | |
1662 | this->bch_flashlayout1 = BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size) | |
1663 | BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this) | |
1664 | BF_BCH_FLASH0LAYOUT1_GF(geo->gf_len, this) | |
1665 | BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(geo->eccn_chunk_size, this); |
1666 | |
1667 | this->bch = true; |
1668 | |
1669 | ret = nand_read_page_op(chip, page, offset_in_page: col, buf, len: page_size); |
1670 | if (ret) |
1671 | return ret; |
1672 | |
1673 | dev_dbg(this->dev, "page:%d(%d:%d)%d, chunk:(%d:%d), BCH PG size:%d\n", |
1674 | page, offs, len, col, first, n, page_size); |
1675 | |
1676 | max_bitflips = gpmi_count_bitflips(chip, buf, first, last, meta); |
1677 | |
1678 | return max_bitflips; |
1679 | } |
1680 | |
1681 | static int gpmi_ecc_write_page(struct nand_chip *chip, const uint8_t *buf, |
1682 | int oob_required, int page) |
1683 | { |
1684 | struct mtd_info *mtd = nand_to_mtd(chip); |
1685 | struct gpmi_nand_data *this = nand_get_controller_data(chip); |
1686 | struct bch_geometry *nfc_geo = &this->bch_geometry; |
1687 | |
1688 | dev_dbg(this->dev, "ecc write page.\n"); |
1689 | |
1690 | gpmi_bch_layout_std(this); |
1691 | this->bch = true; |
1692 | |
1693 | memcpy(this->auxiliary_virt, chip->oob_poi, nfc_geo->auxiliary_size); |
1694 | |
1695 | if (this->swap_block_mark) { |
1696 | /* |
1697 | * When doing bad block marker swapping we must always copy the |
1698 | * input buffer as we can't modify the const buffer. |
1699 | */ |
1700 | memcpy(this->data_buffer_dma, buf, mtd->writesize); |
1701 | buf = this->data_buffer_dma; |
1702 | block_mark_swapping(this, payload: this->data_buffer_dma, |
1703 | auxiliary: this->auxiliary_virt); |
1704 | } |
1705 | |
1706 | return nand_prog_page_op(chip, page, offset_in_page: 0, buf, len: nfc_geo->page_size); |
1707 | } |
1708 | |
1709 | /* |
1710 | * There are several places in this driver where we have to handle the OOB and |
1711 | * block marks. This is the function where things are the most complicated, so |
1712 | * this is where we try to explain it all. All the other places refer back to |
1713 | * here. |
1714 | * |
1715 | * These are the rules, in order of decreasing importance: |
1716 | * |
1717 | * 1) Nothing the caller does can be allowed to imperil the block mark. |
1718 | * |
1719 | * 2) In read operations, the first byte of the OOB we return must reflect the |
1720 | * true state of the block mark, no matter where that block mark appears in |
1721 | * the physical page. |
1722 | * |
1723 | * 3) ECC-based read operations return an OOB full of set bits (since we never |
1724 | * allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads |
1725 | * return). |
1726 | * |
1727 | * 4) "Raw" read operations return a direct view of the physical bytes in the |
1728 | * page, using the conventional definition of which bytes are data and which |
1729 | * are OOB. This gives the caller a way to see the actual, physical bytes |
1730 | * in the page, without the distortions applied by our ECC engine. |
1731 | * |
1732 | * |
1733 | * What we do for this specific read operation depends on two questions: |
1734 | * |
1735 | * 1) Are we doing a "raw" read, or an ECC-based read? |
1736 | * |
1737 | * 2) Are we using block mark swapping or transcription? |
1738 | * |
1739 | * There are four cases, illustrated by the following Karnaugh map: |
1740 | * |
1741 | * | Raw | ECC-based | |
1742 | * -------------+-------------------------+-------------------------+ |
1743 | * | Read the conventional | | |
1744 | * | OOB at the end of the | | |
1745 | * Swapping | page and return it. It | | |
1746 | * | contains exactly what | | |
1747 | * | we want. | Read the block mark and | |
1748 | * -------------+-------------------------+ return it in a buffer | |
1749 | * | Read the conventional | full of set bits. | |
1750 | * | OOB at the end of the | | |
1751 | * | page and also the block | | |
1752 | * Transcribing | mark in the metadata. | | |
1753 | * | Copy the block mark | | |
1754 | * | into the first byte of | | |
1755 | * | the OOB. | | |
1756 | * -------------+-------------------------+-------------------------+ |
1757 | * |
1758 | * Note that we break rule #4 in the Transcribing/Raw case because we're not |
1759 | * giving an accurate view of the actual, physical bytes in the page (we're |
1760 | * overwriting the block mark). That's OK because it's more important to follow |
1761 | * rule #2. |
1762 | * |
1763 | * It turns out that knowing whether we want an "ECC-based" or "raw" read is not |
1764 | * easy. When reading a page, for example, the NAND Flash MTD code calls our |
1765 | * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an |
1766 | * ECC-based or raw view of the page is implicit in which function it calls |
1767 | * (there is a similar pair of ECC-based/raw functions for writing). |
1768 | */ |
1769 | static int gpmi_ecc_read_oob(struct nand_chip *chip, int page) |
1770 | { |
1771 | struct mtd_info *mtd = nand_to_mtd(chip); |
1772 | struct gpmi_nand_data *this = nand_get_controller_data(chip); |
1773 | int ret; |
1774 | |
1775 | /* clear the OOB buffer */ |
1776 | memset(chip->oob_poi, ~0, mtd->oobsize); |
1777 | |
1778 | /* Read out the conventional OOB. */ |
1779 | ret = nand_read_page_op(chip, page, offset_in_page: mtd->writesize, buf: chip->oob_poi, |
1780 | len: mtd->oobsize); |
1781 | if (ret) |
1782 | return ret; |
1783 | |
1784 | /* |
1785 | * Now, we want to make sure the block mark is correct. In the |
1786 | * non-transcribing case (!GPMI_IS_MX23()), we already have it. |
1787 | * Otherwise, we need to explicitly read it. |
1788 | */ |
1789 | if (GPMI_IS_MX23(this)) { |
1790 | /* Read the block mark into the first byte of the OOB buffer. */ |
1791 | ret = nand_read_page_op(chip, page, offset_in_page: 0, buf: chip->oob_poi, len: 1); |
1792 | if (ret) |
1793 | return ret; |
1794 | } |
1795 | |
1796 | return 0; |
1797 | } |
1798 | |
1799 | static int gpmi_ecc_write_oob(struct nand_chip *chip, int page) |
1800 | { |
1801 | struct mtd_info *mtd = nand_to_mtd(chip); |
1802 | struct mtd_oob_region of = { }; |
1803 | |
1804 | /* Do we have available oob area? */ |
1805 | mtd_ooblayout_free(mtd, section: 0, oobfree: &of); |
1806 | if (!of.length) |
1807 | return -EPERM; |
1808 | |
1809 | if (!nand_is_slc(chip)) |
1810 | return -EPERM; |
1811 | |
1812 | return nand_prog_page_op(chip, page, offset_in_page: mtd->writesize + of.offset, |
1813 | buf: chip->oob_poi + of.offset, len: of.length); |
1814 | } |
1815 | |
1816 | /* |
1817 | * This function reads a NAND page without involving the ECC engine (no HW |
1818 | * ECC correction). |
1819 | * The tricky part in the GPMI/BCH controller is that it stores ECC bits |
1820 | * inline (interleaved with payload DATA), and do not align data chunk on |
1821 | * byte boundaries. |
1822 | * We thus need to take care moving the payload data and ECC bits stored in the |
1823 | * page into the provided buffers, which is why we're using nand_extract_bits(). |
1824 | * |
1825 | * See set_geometry_by_ecc_info inline comments to have a full description |
1826 | * of the layout used by the GPMI controller. |
1827 | */ |
1828 | static int gpmi_ecc_read_page_raw(struct nand_chip *chip, uint8_t *buf, |
1829 | int oob_required, int page) |
1830 | { |
1831 | struct mtd_info *mtd = nand_to_mtd(chip); |
1832 | struct gpmi_nand_data *this = nand_get_controller_data(chip); |
1833 | struct bch_geometry *nfc_geo = &this->bch_geometry; |
1834 | int eccsize = nfc_geo->eccn_chunk_size; |
1835 | int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len; |
1836 | u8 *tmp_buf = this->raw_buffer; |
1837 | size_t src_bit_off; |
1838 | size_t oob_bit_off; |
1839 | size_t oob_byte_off; |
1840 | uint8_t *oob = chip->oob_poi; |
1841 | int step; |
1842 | int ret; |
1843 | |
1844 | ret = nand_read_page_op(chip, page, offset_in_page: 0, buf: tmp_buf, |
1845 | len: mtd->writesize + mtd->oobsize); |
1846 | if (ret) |
1847 | return ret; |
1848 | |
1849 | /* |
1850 | * If required, swap the bad block marker and the data stored in the |
1851 | * metadata section, so that we don't wrongly consider a block as bad. |
1852 | * |
1853 | * See the layout description for a detailed explanation on why this |
1854 | * is needed. |
1855 | */ |
1856 | if (this->swap_block_mark) |
1857 | swap(tmp_buf[0], tmp_buf[mtd->writesize]); |
1858 | |
1859 | /* |
1860 | * Copy the metadata section into the oob buffer (this section is |
1861 | * guaranteed to be aligned on a byte boundary). |
1862 | */ |
1863 | if (oob_required) |
1864 | memcpy(oob, tmp_buf, nfc_geo->metadata_size); |
1865 | |
1866 | oob_bit_off = nfc_geo->metadata_size * 8; |
1867 | src_bit_off = oob_bit_off; |
1868 | |
1869 | /* Extract interleaved payload data and ECC bits */ |
1870 | for (step = 0; step < nfc_geo->ecc_chunk_count; step++) { |
1871 | if (buf) |
1872 | nand_extract_bits(dst: buf, dst_off: step * eccsize * 8, src: tmp_buf, |
1873 | src_off: src_bit_off, nbits: eccsize * 8); |
1874 | src_bit_off += eccsize * 8; |
1875 | |
1876 | /* Align last ECC block to align a byte boundary */ |
1877 | if (step == nfc_geo->ecc_chunk_count - 1 && |
1878 | (oob_bit_off + eccbits) % 8) |
1879 | eccbits += 8 - ((oob_bit_off + eccbits) % 8); |
1880 | |
1881 | if (oob_required) |
1882 | nand_extract_bits(dst: oob, dst_off: oob_bit_off, src: tmp_buf, |
1883 | src_off: src_bit_off, nbits: eccbits); |
1884 | |
1885 | src_bit_off += eccbits; |
1886 | oob_bit_off += eccbits; |
1887 | } |
1888 | |
1889 | if (oob_required) { |
1890 | oob_byte_off = oob_bit_off / 8; |
1891 | |
1892 | if (oob_byte_off < mtd->oobsize) |
1893 | memcpy(oob + oob_byte_off, |
1894 | tmp_buf + mtd->writesize + oob_byte_off, |
1895 | mtd->oobsize - oob_byte_off); |
1896 | } |
1897 | |
1898 | return 0; |
1899 | } |
1900 | |
1901 | /* |
1902 | * This function writes a NAND page without involving the ECC engine (no HW |
1903 | * ECC generation). |
1904 | * The tricky part in the GPMI/BCH controller is that it stores ECC bits |
1905 | * inline (interleaved with payload DATA), and do not align data chunk on |
1906 | * byte boundaries. |
1907 | * We thus need to take care moving the OOB area at the right place in the |
1908 | * final page, which is why we're using nand_extract_bits(). |
1909 | * |
1910 | * See set_geometry_by_ecc_info inline comments to have a full description |
1911 | * of the layout used by the GPMI controller. |
1912 | */ |
1913 | static int gpmi_ecc_write_page_raw(struct nand_chip *chip, const uint8_t *buf, |
1914 | int oob_required, int page) |
1915 | { |
1916 | struct mtd_info *mtd = nand_to_mtd(chip); |
1917 | struct gpmi_nand_data *this = nand_get_controller_data(chip); |
1918 | struct bch_geometry *nfc_geo = &this->bch_geometry; |
1919 | int eccsize = nfc_geo->eccn_chunk_size; |
1920 | int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len; |
1921 | u8 *tmp_buf = this->raw_buffer; |
1922 | uint8_t *oob = chip->oob_poi; |
1923 | size_t dst_bit_off; |
1924 | size_t oob_bit_off; |
1925 | size_t oob_byte_off; |
1926 | int step; |
1927 | |
1928 | /* |
1929 | * Initialize all bits to 1 in case we don't have a buffer for the |
1930 | * payload or oob data in order to leave unspecified bits of data |
1931 | * to their initial state. |
1932 | */ |
1933 | if (!buf || !oob_required) |
1934 | memset(tmp_buf, 0xff, mtd->writesize + mtd->oobsize); |
1935 | |
1936 | /* |
1937 | * First copy the metadata section (stored in oob buffer) at the |
1938 | * beginning of the page, as imposed by the GPMI layout. |
1939 | */ |
1940 | memcpy(tmp_buf, oob, nfc_geo->metadata_size); |
1941 | oob_bit_off = nfc_geo->metadata_size * 8; |
1942 | dst_bit_off = oob_bit_off; |
1943 | |
1944 | /* Interleave payload data and ECC bits */ |
1945 | for (step = 0; step < nfc_geo->ecc_chunk_count; step++) { |
1946 | if (buf) |
1947 | nand_extract_bits(dst: tmp_buf, dst_off: dst_bit_off, src: buf, |
1948 | src_off: step * eccsize * 8, nbits: eccsize * 8); |
1949 | dst_bit_off += eccsize * 8; |
1950 | |
1951 | /* Align last ECC block to align a byte boundary */ |
1952 | if (step == nfc_geo->ecc_chunk_count - 1 && |
1953 | (oob_bit_off + eccbits) % 8) |
1954 | eccbits += 8 - ((oob_bit_off + eccbits) % 8); |
1955 | |
1956 | if (oob_required) |
1957 | nand_extract_bits(dst: tmp_buf, dst_off: dst_bit_off, src: oob, |
1958 | src_off: oob_bit_off, nbits: eccbits); |
1959 | |
1960 | dst_bit_off += eccbits; |
1961 | oob_bit_off += eccbits; |
1962 | } |
1963 | |
1964 | oob_byte_off = oob_bit_off / 8; |
1965 | |
1966 | if (oob_required && oob_byte_off < mtd->oobsize) |
1967 | memcpy(tmp_buf + mtd->writesize + oob_byte_off, |
1968 | oob + oob_byte_off, mtd->oobsize - oob_byte_off); |
1969 | |
1970 | /* |
1971 | * If required, swap the bad block marker and the first byte of the |
1972 | * metadata section, so that we don't modify the bad block marker. |
1973 | * |
1974 | * See the layout description for a detailed explanation on why this |
1975 | * is needed. |
1976 | */ |
1977 | if (this->swap_block_mark) |
1978 | swap(tmp_buf[0], tmp_buf[mtd->writesize]); |
1979 | |
1980 | return nand_prog_page_op(chip, page, offset_in_page: 0, buf: tmp_buf, |
1981 | len: mtd->writesize + mtd->oobsize); |
1982 | } |
1983 | |
1984 | static int gpmi_ecc_read_oob_raw(struct nand_chip *chip, int page) |
1985 | { |
1986 | return gpmi_ecc_read_page_raw(chip, NULL, oob_required: 1, page); |
1987 | } |
1988 | |
1989 | static int gpmi_ecc_write_oob_raw(struct nand_chip *chip, int page) |
1990 | { |
1991 | return gpmi_ecc_write_page_raw(chip, NULL, oob_required: 1, page); |
1992 | } |
1993 | |
1994 | static int gpmi_block_markbad(struct nand_chip *chip, loff_t ofs) |
1995 | { |
1996 | struct mtd_info *mtd = nand_to_mtd(chip); |
1997 | struct gpmi_nand_data *this = nand_get_controller_data(chip); |
1998 | int ret = 0; |
1999 | uint8_t *block_mark; |
2000 | int column, page, chipnr; |
2001 | |
2002 | chipnr = (int)(ofs >> chip->chip_shift); |
2003 | nand_select_target(chip, cs: chipnr); |
2004 | |
2005 | column = !GPMI_IS_MX23(this) ? mtd->writesize : 0; |
2006 | |
2007 | /* Write the block mark. */ |
2008 | block_mark = this->data_buffer_dma; |
2009 | block_mark[0] = 0; /* bad block marker */ |
2010 | |
2011 | /* Shift to get page */ |
2012 | page = (int)(ofs >> chip->page_shift); |
2013 | |
2014 | ret = nand_prog_page_op(chip, page, offset_in_page: column, buf: block_mark, len: 1); |
2015 | |
2016 | nand_deselect_target(chip); |
2017 | |
2018 | return ret; |
2019 | } |
2020 | |
2021 | static int nand_boot_set_geometry(struct gpmi_nand_data *this) |
2022 | { |
2023 | struct boot_rom_geometry *geometry = &this->rom_geometry; |
2024 | |
2025 | /* |
2026 | * Set the boot block stride size. |
2027 | * |
2028 | * In principle, we should be reading this from the OTP bits, since |
2029 | * that's where the ROM is going to get it. In fact, we don't have any |
2030 | * way to read the OTP bits, so we go with the default and hope for the |
2031 | * best. |
2032 | */ |
2033 | geometry->stride_size_in_pages = 64; |
2034 | |
2035 | /* |
2036 | * Set the search area stride exponent. |
2037 | * |
2038 | * In principle, we should be reading this from the OTP bits, since |
2039 | * that's where the ROM is going to get it. In fact, we don't have any |
2040 | * way to read the OTP bits, so we go with the default and hope for the |
2041 | * best. |
2042 | */ |
2043 | geometry->search_area_stride_exponent = 2; |
2044 | return 0; |
2045 | } |
2046 | |
2047 | static const char *fingerprint = "STMP"; |
2048 | static int mx23_check_transcription_stamp(struct gpmi_nand_data *this) |
2049 | { |
2050 | struct boot_rom_geometry *rom_geo = &this->rom_geometry; |
2051 | struct device *dev = this->dev; |
2052 | struct nand_chip *chip = &this->nand; |
2053 | unsigned int search_area_size_in_strides; |
2054 | unsigned int stride; |
2055 | unsigned int page; |
2056 | u8 *buffer = nand_get_data_buf(chip); |
2057 | int found_an_ncb_fingerprint = false; |
2058 | int ret; |
2059 | |
2060 | /* Compute the number of strides in a search area. */ |
2061 | search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent; |
2062 | |
2063 | nand_select_target(chip, cs: 0); |
2064 | |
2065 | /* |
2066 | * Loop through the first search area, looking for the NCB fingerprint. |
2067 | */ |
2068 | dev_dbg(dev, "Scanning for an NCB fingerprint...\n"); |
2069 | |
2070 | for (stride = 0; stride < search_area_size_in_strides; stride++) { |
2071 | /* Compute the page addresses. */ |
2072 | page = stride * rom_geo->stride_size_in_pages; |
2073 | |
2074 | dev_dbg(dev, "Looking for a fingerprint in page 0x%x\n", page); |
2075 | |
2076 | /* |
2077 | * Read the NCB fingerprint. The fingerprint is four bytes long |
2078 | * and starts in the 12th byte of the page. |
2079 | */ |
2080 | ret = nand_read_page_op(chip, page, offset_in_page: 12, buf: buffer, |
2081 | strlen(fingerprint)); |
2082 | if (ret) |
2083 | continue; |
2084 | |
2085 | /* Look for the fingerprint. */ |
2086 | if (!memcmp(p: buffer, q: fingerprint, strlen(fingerprint))) { |
2087 | found_an_ncb_fingerprint = true; |
2088 | break; |
2089 | } |
2090 | |
2091 | } |
2092 | |
2093 | nand_deselect_target(chip); |
2094 | |
2095 | if (found_an_ncb_fingerprint) |
2096 | dev_dbg(dev, "\tFound a fingerprint\n"); |
2097 | else |
2098 | dev_dbg(dev, "\tNo fingerprint found\n"); |
2099 | return found_an_ncb_fingerprint; |
2100 | } |
2101 | |
2102 | /* Writes a transcription stamp. */ |
2103 | static int mx23_write_transcription_stamp(struct gpmi_nand_data *this) |
2104 | { |
2105 | struct device *dev = this->dev; |
2106 | struct boot_rom_geometry *rom_geo = &this->rom_geometry; |
2107 | struct nand_chip *chip = &this->nand; |
2108 | struct mtd_info *mtd = nand_to_mtd(chip); |
2109 | unsigned int block_size_in_pages; |
2110 | unsigned int search_area_size_in_strides; |
2111 | unsigned int search_area_size_in_pages; |
2112 | unsigned int search_area_size_in_blocks; |
2113 | unsigned int block; |
2114 | unsigned int stride; |
2115 | unsigned int page; |
2116 | u8 *buffer = nand_get_data_buf(chip); |
2117 | int status; |
2118 | |
2119 | /* Compute the search area geometry. */ |
2120 | block_size_in_pages = mtd->erasesize / mtd->writesize; |
2121 | search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent; |
2122 | search_area_size_in_pages = search_area_size_in_strides * |
2123 | rom_geo->stride_size_in_pages; |
2124 | search_area_size_in_blocks = |
2125 | (search_area_size_in_pages + (block_size_in_pages - 1)) / |
2126 | block_size_in_pages; |
2127 | |
2128 | dev_dbg(dev, "Search Area Geometry :\n"); |
2129 | dev_dbg(dev, "\tin Blocks : %u\n", search_area_size_in_blocks); |
2130 | dev_dbg(dev, "\tin Strides: %u\n", search_area_size_in_strides); |
2131 | dev_dbg(dev, "\tin Pages : %u\n", search_area_size_in_pages); |
2132 | |
2133 | nand_select_target(chip, cs: 0); |
2134 | |
2135 | /* Loop over blocks in the first search area, erasing them. */ |
2136 | dev_dbg(dev, "Erasing the search area...\n"); |
2137 | |
2138 | for (block = 0; block < search_area_size_in_blocks; block++) { |
2139 | /* Erase this block. */ |
2140 | dev_dbg(dev, "\tErasing block 0x%x\n", block); |
2141 | status = nand_erase_op(chip, eraseblock: block); |
2142 | if (status) |
2143 | dev_err(dev, "[%s] Erase failed.\n", __func__); |
2144 | } |
2145 | |
2146 | /* Write the NCB fingerprint into the page buffer. */ |
2147 | memset(buffer, ~0, mtd->writesize); |
2148 | memcpy(buffer + 12, fingerprint, strlen(fingerprint)); |
2149 | |
2150 | /* Loop through the first search area, writing NCB fingerprints. */ |
2151 | dev_dbg(dev, "Writing NCB fingerprints...\n"); |
2152 | for (stride = 0; stride < search_area_size_in_strides; stride++) { |
2153 | /* Compute the page addresses. */ |
2154 | page = stride * rom_geo->stride_size_in_pages; |
2155 | |
2156 | /* Write the first page of the current stride. */ |
2157 | dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page); |
2158 | |
2159 | status = chip->ecc.write_page_raw(chip, buffer, 0, page); |
2160 | if (status) |
2161 | dev_err(dev, "[%s] Write failed.\n", __func__); |
2162 | } |
2163 | |
2164 | nand_deselect_target(chip); |
2165 | |
2166 | return 0; |
2167 | } |
2168 | |
2169 | static int mx23_boot_init(struct gpmi_nand_data *this) |
2170 | { |
2171 | struct device *dev = this->dev; |
2172 | struct nand_chip *chip = &this->nand; |
2173 | struct mtd_info *mtd = nand_to_mtd(chip); |
2174 | unsigned int block_count; |
2175 | unsigned int block; |
2176 | int chipnr; |
2177 | int page; |
2178 | loff_t byte; |
2179 | uint8_t block_mark; |
2180 | int ret = 0; |
2181 | |
2182 | /* |
2183 | * If control arrives here, we can't use block mark swapping, which |
2184 | * means we're forced to use transcription. First, scan for the |
2185 | * transcription stamp. If we find it, then we don't have to do |
2186 | * anything -- the block marks are already transcribed. |
2187 | */ |
2188 | if (mx23_check_transcription_stamp(this)) |
2189 | return 0; |
2190 | |
2191 | /* |
2192 | * If control arrives here, we couldn't find a transcription stamp, so |
2193 | * so we presume the block marks are in the conventional location. |
2194 | */ |
2195 | dev_dbg(dev, "Transcribing bad block marks...\n"); |
2196 | |
2197 | /* Compute the number of blocks in the entire medium. */ |
2198 | block_count = nanddev_eraseblocks_per_target(nand: &chip->base); |
2199 | |
2200 | /* |
2201 | * Loop over all the blocks in the medium, transcribing block marks as |
2202 | * we go. |
2203 | */ |
2204 | for (block = 0; block < block_count; block++) { |
2205 | /* |
2206 | * Compute the chip, page and byte addresses for this block's |
2207 | * conventional mark. |
2208 | */ |
2209 | chipnr = block >> (chip->chip_shift - chip->phys_erase_shift); |
2210 | page = block << (chip->phys_erase_shift - chip->page_shift); |
2211 | byte = block << chip->phys_erase_shift; |
2212 | |
2213 | /* Send the command to read the conventional block mark. */ |
2214 | nand_select_target(chip, cs: chipnr); |
2215 | ret = nand_read_page_op(chip, page, offset_in_page: mtd->writesize, buf: &block_mark, |
2216 | len: 1); |
2217 | nand_deselect_target(chip); |
2218 | |
2219 | if (ret) |
2220 | continue; |
2221 | |
2222 | /* |
2223 | * Check if the block is marked bad. If so, we need to mark it |
2224 | * again, but this time the result will be a mark in the |
2225 | * location where we transcribe block marks. |
2226 | */ |
2227 | if (block_mark != 0xff) { |
2228 | dev_dbg(dev, "Transcribing mark in block %u\n", block); |
2229 | ret = chip->legacy.block_markbad(chip, byte); |
2230 | if (ret) |
2231 | dev_err(dev, |
2232 | "Failed to mark block bad with ret %d\n", |
2233 | ret); |
2234 | } |
2235 | } |
2236 | |
2237 | /* Write the stamp that indicates we've transcribed the block marks. */ |
2238 | mx23_write_transcription_stamp(this); |
2239 | return 0; |
2240 | } |
2241 | |
2242 | static int nand_boot_init(struct gpmi_nand_data *this) |
2243 | { |
2244 | nand_boot_set_geometry(this); |
2245 | |
2246 | /* This is ROM arch-specific initilization before the BBT scanning. */ |
2247 | if (GPMI_IS_MX23(this)) |
2248 | return mx23_boot_init(this); |
2249 | return 0; |
2250 | } |
2251 | |
2252 | static int gpmi_set_geometry(struct gpmi_nand_data *this) |
2253 | { |
2254 | int ret; |
2255 | |
2256 | /* Free the temporary DMA memory for reading ID. */ |
2257 | gpmi_free_dma_buffer(this); |
2258 | |
2259 | /* Set up the NFC geometry which is used by BCH. */ |
2260 | ret = bch_set_geometry(this); |
2261 | if (ret) { |
2262 | dev_err(this->dev, "Error setting BCH geometry : %d\n", ret); |
2263 | return ret; |
2264 | } |
2265 | |
2266 | /* Alloc the new DMA buffers according to the pagesize and oobsize */ |
2267 | return gpmi_alloc_dma_buffer(this); |
2268 | } |
2269 | |
2270 | static int gpmi_init_last(struct gpmi_nand_data *this) |
2271 | { |
2272 | struct nand_chip *chip = &this->nand; |
2273 | struct mtd_info *mtd = nand_to_mtd(chip); |
2274 | struct nand_ecc_ctrl *ecc = &chip->ecc; |
2275 | struct bch_geometry *bch_geo = &this->bch_geometry; |
2276 | int ret; |
2277 | |
2278 | /* Set up the medium geometry */ |
2279 | ret = gpmi_set_geometry(this); |
2280 | if (ret) |
2281 | return ret; |
2282 | |
2283 | /* Init the nand_ecc_ctrl{} */ |
2284 | ecc->read_page = gpmi_ecc_read_page; |
2285 | ecc->write_page = gpmi_ecc_write_page; |
2286 | ecc->read_oob = gpmi_ecc_read_oob; |
2287 | ecc->write_oob = gpmi_ecc_write_oob; |
2288 | ecc->read_page_raw = gpmi_ecc_read_page_raw; |
2289 | ecc->write_page_raw = gpmi_ecc_write_page_raw; |
2290 | ecc->read_oob_raw = gpmi_ecc_read_oob_raw; |
2291 | ecc->write_oob_raw = gpmi_ecc_write_oob_raw; |
2292 | ecc->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST; |
2293 | ecc->size = bch_geo->eccn_chunk_size; |
2294 | ecc->strength = bch_geo->ecc_strength; |
2295 | mtd_set_ooblayout(mtd, ooblayout: &gpmi_ooblayout_ops); |
2296 | |
2297 | /* |
2298 | * We only enable the subpage read when: |
2299 | * (1) the chip is imx6, and |
2300 | * (2) the size of the ECC parity is byte aligned. |
2301 | */ |
2302 | if (GPMI_IS_MX6(this) && |
2303 | ((bch_geo->gf_len * bch_geo->ecc_strength) % 8) == 0) { |
2304 | ecc->read_subpage = gpmi_ecc_read_subpage; |
2305 | chip->options |= NAND_SUBPAGE_READ; |
2306 | } |
2307 | |
2308 | return 0; |
2309 | } |
2310 | |
2311 | static int gpmi_nand_attach_chip(struct nand_chip *chip) |
2312 | { |
2313 | struct gpmi_nand_data *this = nand_get_controller_data(chip); |
2314 | int ret; |
2315 | |
2316 | if (chip->bbt_options & NAND_BBT_USE_FLASH) { |
2317 | chip->bbt_options |= NAND_BBT_NO_OOB; |
2318 | |
2319 | if (of_property_read_bool(np: this->dev->of_node, |
2320 | propname: "fsl,no-blockmark-swap")) |
2321 | this->swap_block_mark = false; |
2322 | } |
2323 | dev_dbg(this->dev, "Blockmark swapping %s\n", |
2324 | str_enabled_disabled(this->swap_block_mark)); |
2325 | |
2326 | ret = gpmi_init_last(this); |
2327 | if (ret) |
2328 | return ret; |
2329 | |
2330 | chip->options |= NAND_SKIP_BBTSCAN; |
2331 | |
2332 | return 0; |
2333 | } |
2334 | |
2335 | static struct gpmi_transfer *get_next_transfer(struct gpmi_nand_data *this) |
2336 | { |
2337 | struct gpmi_transfer *transfer = &this->transfers[this->ntransfers]; |
2338 | |
2339 | this->ntransfers++; |
2340 | |
2341 | if (this->ntransfers == GPMI_MAX_TRANSFERS) |
2342 | return NULL; |
2343 | |
2344 | return transfer; |
2345 | } |
2346 | |
2347 | static struct dma_async_tx_descriptor *gpmi_chain_command( |
2348 | struct gpmi_nand_data *this, u8 cmd, const u8 *addr, int naddr) |
2349 | { |
2350 | struct dma_chan *channel = get_dma_chan(this); |
2351 | struct dma_async_tx_descriptor *desc; |
2352 | struct gpmi_transfer *transfer; |
2353 | int chip = this->nand.cur_cs; |
2354 | u32 pio[3]; |
2355 | |
2356 | /* [1] send out the PIO words */ |
2357 | pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE) |
2358 | | BM_GPMI_CTRL0_WORD_LENGTH |
2359 | | BF_GPMI_CTRL0_CS(chip, this) |
2360 | | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) |
2361 | | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_CLE) |
2362 | | BM_GPMI_CTRL0_ADDRESS_INCREMENT |
2363 | | BF_GPMI_CTRL0_XFER_COUNT(naddr + 1); |
2364 | pio[1] = 0; |
2365 | pio[2] = 0; |
2366 | desc = mxs_dmaengine_prep_pio(chan: channel, pio, ARRAY_SIZE(pio), |
2367 | dir: DMA_TRANS_NONE, flags: 0); |
2368 | if (!desc) |
2369 | return NULL; |
2370 | |
2371 | transfer = get_next_transfer(this); |
2372 | if (!transfer) |
2373 | return NULL; |
2374 | |
2375 | transfer->cmdbuf[0] = cmd; |
2376 | if (naddr) |
2377 | memcpy(&transfer->cmdbuf[1], addr, naddr); |
2378 | |
2379 | sg_init_one(&transfer->sgl, transfer->cmdbuf, naddr + 1); |
2380 | dma_map_sg(this->dev, &transfer->sgl, 1, DMA_TO_DEVICE); |
2381 | |
2382 | transfer->direction = DMA_TO_DEVICE; |
2383 | |
2384 | desc = dmaengine_prep_slave_sg(chan: channel, sgl: &transfer->sgl, sg_len: 1, dir: DMA_MEM_TO_DEV, |
2385 | MXS_DMA_CTRL_WAIT4END); |
2386 | return desc; |
2387 | } |
2388 | |
2389 | static struct dma_async_tx_descriptor *gpmi_chain_wait_ready( |
2390 | struct gpmi_nand_data *this) |
2391 | { |
2392 | struct dma_chan *channel = get_dma_chan(this); |
2393 | u32 pio[2]; |
2394 | |
2395 | pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY) |
2396 | | BM_GPMI_CTRL0_WORD_LENGTH |
2397 | | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this) |
2398 | | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) |
2399 | | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA) |
2400 | | BF_GPMI_CTRL0_XFER_COUNT(0); |
2401 | pio[1] = 0; |
2402 | |
2403 | return mxs_dmaengine_prep_pio(chan: channel, pio, npio: 2, dir: DMA_TRANS_NONE, |
2404 | MXS_DMA_CTRL_WAIT4END | MXS_DMA_CTRL_WAIT4RDY); |
2405 | } |
2406 | |
2407 | static struct dma_async_tx_descriptor *gpmi_chain_data_read( |
2408 | struct gpmi_nand_data *this, void *buf, int raw_len, bool *direct) |
2409 | { |
2410 | struct dma_async_tx_descriptor *desc; |
2411 | struct dma_chan *channel = get_dma_chan(this); |
2412 | struct gpmi_transfer *transfer; |
2413 | u32 pio[6] = {}; |
2414 | |
2415 | transfer = get_next_transfer(this); |
2416 | if (!transfer) |
2417 | return NULL; |
2418 | |
2419 | transfer->direction = DMA_FROM_DEVICE; |
2420 | |
2421 | *direct = prepare_data_dma(this, buf, raw_len, sgl: &transfer->sgl, |
2422 | dr: DMA_FROM_DEVICE); |
2423 | |
2424 | pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ) |
2425 | | BM_GPMI_CTRL0_WORD_LENGTH |
2426 | | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this) |
2427 | | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) |
2428 | | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA) |
2429 | | BF_GPMI_CTRL0_XFER_COUNT(raw_len); |
2430 | |
2431 | if (this->bch) { |
2432 | pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC |
2433 | | BF_GPMI_ECCCTRL_ECC_CMD(BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE) |
2434 | | BF_GPMI_ECCCTRL_BUFFER_MASK(BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE |
2435 | | BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY); |
2436 | pio[3] = raw_len; |
2437 | pio[4] = transfer->sgl.dma_address; |
2438 | pio[5] = this->auxiliary_phys; |
2439 | } |
2440 | |
2441 | desc = mxs_dmaengine_prep_pio(chan: channel, pio, ARRAY_SIZE(pio), |
2442 | dir: DMA_TRANS_NONE, flags: 0); |
2443 | if (!desc) |
2444 | return NULL; |
2445 | |
2446 | if (!this->bch) |
2447 | desc = dmaengine_prep_slave_sg(chan: channel, sgl: &transfer->sgl, sg_len: 1, |
2448 | dir: DMA_DEV_TO_MEM, |
2449 | MXS_DMA_CTRL_WAIT4END); |
2450 | |
2451 | return desc; |
2452 | } |
2453 | |
2454 | static struct dma_async_tx_descriptor *gpmi_chain_data_write( |
2455 | struct gpmi_nand_data *this, const void *buf, int raw_len) |
2456 | { |
2457 | struct dma_chan *channel = get_dma_chan(this); |
2458 | struct dma_async_tx_descriptor *desc; |
2459 | struct gpmi_transfer *transfer; |
2460 | u32 pio[6] = {}; |
2461 | |
2462 | transfer = get_next_transfer(this); |
2463 | if (!transfer) |
2464 | return NULL; |
2465 | |
2466 | transfer->direction = DMA_TO_DEVICE; |
2467 | |
2468 | prepare_data_dma(this, buf, raw_len, sgl: &transfer->sgl, dr: DMA_TO_DEVICE); |
2469 | |
2470 | pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE) |
2471 | | BM_GPMI_CTRL0_WORD_LENGTH |
2472 | | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this) |
2473 | | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) |
2474 | | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA) |
2475 | | BF_GPMI_CTRL0_XFER_COUNT(raw_len); |
2476 | |
2477 | if (this->bch) { |
2478 | pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC |
2479 | | BF_GPMI_ECCCTRL_ECC_CMD(BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE) |
2480 | | BF_GPMI_ECCCTRL_BUFFER_MASK(BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE | |
2481 | BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY); |
2482 | pio[3] = raw_len; |
2483 | pio[4] = transfer->sgl.dma_address; |
2484 | pio[5] = this->auxiliary_phys; |
2485 | } |
2486 | |
2487 | desc = mxs_dmaengine_prep_pio(chan: channel, pio, ARRAY_SIZE(pio), |
2488 | dir: DMA_TRANS_NONE, |
2489 | flags: (this->bch ? MXS_DMA_CTRL_WAIT4END : 0)); |
2490 | if (!desc) |
2491 | return NULL; |
2492 | |
2493 | if (!this->bch) |
2494 | desc = dmaengine_prep_slave_sg(chan: channel, sgl: &transfer->sgl, sg_len: 1, |
2495 | dir: DMA_MEM_TO_DEV, |
2496 | MXS_DMA_CTRL_WAIT4END); |
2497 | |
2498 | return desc; |
2499 | } |
2500 | |
2501 | static int gpmi_nfc_exec_op(struct nand_chip *chip, |
2502 | const struct nand_operation *op, |
2503 | bool check_only) |
2504 | { |
2505 | const struct nand_op_instr *instr; |
2506 | struct gpmi_nand_data *this = nand_get_controller_data(chip); |
2507 | struct dma_async_tx_descriptor *desc = NULL; |
2508 | int i, ret, buf_len = 0, nbufs = 0; |
2509 | u8 cmd = 0; |
2510 | void *buf_read = NULL; |
2511 | const void *buf_write = NULL; |
2512 | bool direct = false; |
2513 | struct completion *dma_completion, *bch_completion; |
2514 | unsigned long to; |
2515 | |
2516 | if (check_only) |
2517 | return 0; |
2518 | |
2519 | this->ntransfers = 0; |
2520 | for (i = 0; i < GPMI_MAX_TRANSFERS; i++) |
2521 | this->transfers[i].direction = DMA_NONE; |
2522 | |
2523 | ret = pm_runtime_resume_and_get(dev: this->dev); |
2524 | if (ret < 0) |
2525 | return ret; |
2526 | |
2527 | /* |
2528 | * This driver currently supports only one NAND chip. Plus, dies share |
2529 | * the same configuration. So once timings have been applied on the |
2530 | * controller side, they will not change anymore. When the time will |
2531 | * come, the check on must_apply_timings will have to be dropped. |
2532 | */ |
2533 | if (this->hw.must_apply_timings) { |
2534 | this->hw.must_apply_timings = false; |
2535 | ret = gpmi_nfc_apply_timings(this); |
2536 | if (ret) |
2537 | goto out_pm; |
2538 | } |
2539 | |
2540 | dev_dbg(this->dev, "%s: %d instructions\n", __func__, op->ninstrs); |
2541 | |
2542 | for (i = 0; i < op->ninstrs; i++) { |
2543 | instr = &op->instrs[i]; |
2544 | |
2545 | nand_op_trace(prefix: " ", instr); |
2546 | |
2547 | switch (instr->type) { |
2548 | case NAND_OP_WAITRDY_INSTR: |
2549 | desc = gpmi_chain_wait_ready(this); |
2550 | break; |
2551 | case NAND_OP_CMD_INSTR: |
2552 | cmd = instr->ctx.cmd.opcode; |
2553 | |
2554 | /* |
2555 | * When this command has an address cycle chain it |
2556 | * together with the address cycle |
2557 | */ |
2558 | if (i + 1 != op->ninstrs && |
2559 | op->instrs[i + 1].type == NAND_OP_ADDR_INSTR) |
2560 | continue; |
2561 | |
2562 | desc = gpmi_chain_command(this, cmd, NULL, naddr: 0); |
2563 | |
2564 | break; |
2565 | case NAND_OP_ADDR_INSTR: |
2566 | desc = gpmi_chain_command(this, cmd, addr: instr->ctx.addr.addrs, |
2567 | naddr: instr->ctx.addr.naddrs); |
2568 | break; |
2569 | case NAND_OP_DATA_OUT_INSTR: |
2570 | buf_write = instr->ctx.data.buf.out; |
2571 | buf_len = instr->ctx.data.len; |
2572 | nbufs++; |
2573 | |
2574 | desc = gpmi_chain_data_write(this, buf: buf_write, raw_len: buf_len); |
2575 | |
2576 | break; |
2577 | case NAND_OP_DATA_IN_INSTR: |
2578 | if (!instr->ctx.data.len) |
2579 | break; |
2580 | buf_read = instr->ctx.data.buf.in; |
2581 | buf_len = instr->ctx.data.len; |
2582 | nbufs++; |
2583 | |
2584 | desc = gpmi_chain_data_read(this, buf: buf_read, raw_len: buf_len, |
2585 | direct: &direct); |
2586 | break; |
2587 | } |
2588 | |
2589 | if (!desc) { |
2590 | ret = -ENXIO; |
2591 | goto unmap; |
2592 | } |
2593 | } |
2594 | |
2595 | dev_dbg(this->dev, "%s setup done\n", __func__); |
2596 | |
2597 | if (nbufs > 1) { |
2598 | dev_err(this->dev, "Multiple data instructions not supported\n"); |
2599 | ret = -EINVAL; |
2600 | goto unmap; |
2601 | } |
2602 | |
2603 | if (this->bch) { |
2604 | writel(val: this->bch_flashlayout0, |
2605 | addr: this->resources.bch_regs + HW_BCH_FLASH0LAYOUT0); |
2606 | writel(val: this->bch_flashlayout1, |
2607 | addr: this->resources.bch_regs + HW_BCH_FLASH0LAYOUT1); |
2608 | } |
2609 | |
2610 | desc->callback = dma_irq_callback; |
2611 | desc->callback_param = this; |
2612 | dma_completion = &this->dma_done; |
2613 | bch_completion = NULL; |
2614 | |
2615 | init_completion(x: dma_completion); |
2616 | |
2617 | if (this->bch && buf_read) { |
2618 | writel(BM_BCH_CTRL_COMPLETE_IRQ_EN, |
2619 | addr: this->resources.bch_regs + HW_BCH_CTRL_SET); |
2620 | bch_completion = &this->bch_done; |
2621 | init_completion(x: bch_completion); |
2622 | } |
2623 | |
2624 | dmaengine_submit(desc); |
2625 | dma_async_issue_pending(chan: get_dma_chan(this)); |
2626 | |
2627 | to = wait_for_completion_timeout(x: dma_completion, timeout: msecs_to_jiffies(m: 1000)); |
2628 | if (!to) { |
2629 | dev_err(this->dev, "DMA timeout, last DMA\n"); |
2630 | gpmi_dump_info(this); |
2631 | ret = -ETIMEDOUT; |
2632 | goto unmap; |
2633 | } |
2634 | |
2635 | if (this->bch && buf_read) { |
2636 | to = wait_for_completion_timeout(x: bch_completion, timeout: msecs_to_jiffies(m: 1000)); |
2637 | if (!to) { |
2638 | dev_err(this->dev, "BCH timeout, last DMA\n"); |
2639 | gpmi_dump_info(this); |
2640 | ret = -ETIMEDOUT; |
2641 | goto unmap; |
2642 | } |
2643 | } |
2644 | |
2645 | writel(BM_BCH_CTRL_COMPLETE_IRQ_EN, |
2646 | addr: this->resources.bch_regs + HW_BCH_CTRL_CLR); |
2647 | gpmi_clear_bch(this); |
2648 | |
2649 | ret = 0; |
2650 | |
2651 | unmap: |
2652 | for (i = 0; i < this->ntransfers; i++) { |
2653 | struct gpmi_transfer *transfer = &this->transfers[i]; |
2654 | |
2655 | if (transfer->direction != DMA_NONE) |
2656 | dma_unmap_sg(this->dev, &transfer->sgl, 1, |
2657 | transfer->direction); |
2658 | } |
2659 | |
2660 | if (!ret && buf_read && !direct) |
2661 | memcpy(buf_read, this->data_buffer_dma, |
2662 | gpmi_raw_len_to_len(this, buf_len)); |
2663 | |
2664 | this->bch = false; |
2665 | |
2666 | out_pm: |
2667 | pm_runtime_mark_last_busy(dev: this->dev); |
2668 | pm_runtime_put_autosuspend(dev: this->dev); |
2669 | |
2670 | return ret; |
2671 | } |
2672 | |
2673 | static const struct nand_controller_ops gpmi_nand_controller_ops = { |
2674 | .attach_chip = gpmi_nand_attach_chip, |
2675 | .setup_interface = gpmi_setup_interface, |
2676 | .exec_op = gpmi_nfc_exec_op, |
2677 | }; |
2678 | |
2679 | static int gpmi_nand_init(struct gpmi_nand_data *this) |
2680 | { |
2681 | struct nand_chip *chip = &this->nand; |
2682 | struct mtd_info *mtd = nand_to_mtd(chip); |
2683 | int ret; |
2684 | |
2685 | /* init the MTD data structures */ |
2686 | mtd->name = "gpmi-nand"; |
2687 | mtd->dev.parent = this->dev; |
2688 | |
2689 | /* init the nand_chip{}, we don't support a 16-bit NAND Flash bus. */ |
2690 | nand_set_controller_data(chip, priv: this); |
2691 | nand_set_flash_node(chip, np: this->pdev->dev.of_node); |
2692 | chip->legacy.block_markbad = gpmi_block_markbad; |
2693 | chip->badblock_pattern = &gpmi_bbt_descr; |
2694 | chip->options |= NAND_NO_SUBPAGE_WRITE; |
2695 | |
2696 | /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */ |
2697 | this->swap_block_mark = !GPMI_IS_MX23(this); |
2698 | |
2699 | /* |
2700 | * Allocate a temporary DMA buffer for reading ID in the |
2701 | * nand_scan_ident(). |
2702 | */ |
2703 | this->bch_geometry.payload_size = 1024; |
2704 | this->bch_geometry.auxiliary_size = 128; |
2705 | ret = gpmi_alloc_dma_buffer(this); |
2706 | if (ret) |
2707 | return ret; |
2708 | |
2709 | nand_controller_init(nfc: &this->base); |
2710 | this->base.ops = &gpmi_nand_controller_ops; |
2711 | chip->controller = &this->base; |
2712 | |
2713 | ret = nand_scan(chip, GPMI_IS_MX6(this) ? 2 : 1); |
2714 | if (ret) |
2715 | goto err_out; |
2716 | |
2717 | ret = nand_boot_init(this); |
2718 | if (ret) |
2719 | goto err_nand_cleanup; |
2720 | ret = nand_create_bbt(chip); |
2721 | if (ret) |
2722 | goto err_nand_cleanup; |
2723 | |
2724 | ret = mtd_device_register(mtd, NULL, 0); |
2725 | if (ret) |
2726 | goto err_nand_cleanup; |
2727 | return 0; |
2728 | |
2729 | err_nand_cleanup: |
2730 | nand_cleanup(chip); |
2731 | err_out: |
2732 | gpmi_free_dma_buffer(this); |
2733 | return ret; |
2734 | } |
2735 | |
2736 | static const struct of_device_id gpmi_nand_id_table[] = { |
2737 | { .compatible = "fsl,imx23-gpmi-nand", .data = &gpmi_devdata_imx23, }, |
2738 | { .compatible = "fsl,imx28-gpmi-nand", .data = &gpmi_devdata_imx28, }, |
2739 | { .compatible = "fsl,imx6q-gpmi-nand", .data = &gpmi_devdata_imx6q, }, |
2740 | { .compatible = "fsl,imx6sx-gpmi-nand", .data = &gpmi_devdata_imx6sx, }, |
2741 | { .compatible = "fsl,imx7d-gpmi-nand", .data = &gpmi_devdata_imx7d,}, |
2742 | { .compatible = "fsl,imx8qxp-gpmi-nand", .data = &gpmi_devdata_imx8qxp, }, |
2743 | {} |
2744 | }; |
2745 | MODULE_DEVICE_TABLE(of, gpmi_nand_id_table); |
2746 | |
2747 | static int gpmi_nand_probe(struct platform_device *pdev) |
2748 | { |
2749 | struct gpmi_nand_data *this; |
2750 | int ret; |
2751 | |
2752 | this = devm_kzalloc(dev: &pdev->dev, size: sizeof(*this), GFP_KERNEL); |
2753 | if (!this) |
2754 | return -ENOMEM; |
2755 | |
2756 | this->devdata = of_device_get_match_data(dev: &pdev->dev); |
2757 | platform_set_drvdata(pdev, data: this); |
2758 | this->pdev = pdev; |
2759 | this->dev = &pdev->dev; |
2760 | |
2761 | ret = acquire_resources(this); |
2762 | if (ret) |
2763 | goto exit_acquire_resources; |
2764 | |
2765 | pm_runtime_enable(dev: &pdev->dev); |
2766 | pm_runtime_set_autosuspend_delay(dev: &pdev->dev, delay: 500); |
2767 | pm_runtime_use_autosuspend(dev: &pdev->dev); |
2768 | |
2769 | ret = gpmi_init(this); |
2770 | if (ret) |
2771 | goto exit_nfc_init; |
2772 | |
2773 | ret = gpmi_nand_init(this); |
2774 | if (ret) |
2775 | goto exit_nfc_init; |
2776 | |
2777 | dev_info(this->dev, "driver registered.\n"); |
2778 | |
2779 | return 0; |
2780 | |
2781 | exit_nfc_init: |
2782 | pm_runtime_dont_use_autosuspend(dev: &pdev->dev); |
2783 | pm_runtime_disable(dev: &pdev->dev); |
2784 | release_resources(this); |
2785 | exit_acquire_resources: |
2786 | |
2787 | return ret; |
2788 | } |
2789 | |
2790 | static void gpmi_nand_remove(struct platform_device *pdev) |
2791 | { |
2792 | struct gpmi_nand_data *this = platform_get_drvdata(pdev); |
2793 | struct nand_chip *chip = &this->nand; |
2794 | int ret; |
2795 | |
2796 | ret = mtd_device_unregister(master: nand_to_mtd(chip)); |
2797 | WARN_ON(ret); |
2798 | nand_cleanup(chip); |
2799 | gpmi_free_dma_buffer(this); |
2800 | release_resources(this); |
2801 | pm_runtime_dont_use_autosuspend(dev: &pdev->dev); |
2802 | pm_runtime_disable(dev: &pdev->dev); |
2803 | } |
2804 | |
2805 | static int gpmi_pm_suspend(struct device *dev) |
2806 | { |
2807 | int ret; |
2808 | |
2809 | pinctrl_pm_select_sleep_state(dev); |
2810 | ret = pm_runtime_force_suspend(dev); |
2811 | |
2812 | return ret; |
2813 | } |
2814 | |
2815 | static int gpmi_pm_resume(struct device *dev) |
2816 | { |
2817 | struct gpmi_nand_data *this = dev_get_drvdata(dev); |
2818 | int ret; |
2819 | |
2820 | ret = pm_runtime_force_resume(dev); |
2821 | if (ret) { |
2822 | dev_err(this->dev, "Error in resume %d\n", ret); |
2823 | return ret; |
2824 | } |
2825 | |
2826 | pinctrl_pm_select_default_state(dev); |
2827 | |
2828 | /* re-init the GPMI registers */ |
2829 | ret = gpmi_init(this); |
2830 | if (ret) { |
2831 | dev_err(this->dev, "Error setting GPMI : %d\n", ret); |
2832 | return ret; |
2833 | } |
2834 | |
2835 | /* Set flag to get timing setup restored for next exec_op */ |
2836 | if (this->hw.clk_rate) |
2837 | this->hw.must_apply_timings = true; |
2838 | |
2839 | /* re-init the BCH registers */ |
2840 | ret = bch_set_geometry(this); |
2841 | if (ret) { |
2842 | dev_err(this->dev, "Error setting BCH : %d\n", ret); |
2843 | return ret; |
2844 | } |
2845 | |
2846 | return 0; |
2847 | } |
2848 | |
2849 | #define gpmi_enable_clk(x) __gpmi_enable_clk(x, true) |
2850 | #define gpmi_disable_clk(x) __gpmi_enable_clk(x, false) |
2851 | |
2852 | static int gpmi_runtime_suspend(struct device *dev) |
2853 | { |
2854 | struct gpmi_nand_data *this = dev_get_drvdata(dev); |
2855 | |
2856 | gpmi_disable_clk(this); |
2857 | |
2858 | return 0; |
2859 | } |
2860 | |
2861 | static int gpmi_runtime_resume(struct device *dev) |
2862 | { |
2863 | struct gpmi_nand_data *this = dev_get_drvdata(dev); |
2864 | int ret; |
2865 | |
2866 | ret = gpmi_enable_clk(this); |
2867 | if (ret) |
2868 | return ret; |
2869 | |
2870 | return 0; |
2871 | |
2872 | } |
2873 | |
2874 | static const struct dev_pm_ops gpmi_pm_ops = { |
2875 | SYSTEM_SLEEP_PM_OPS(gpmi_pm_suspend, gpmi_pm_resume) |
2876 | RUNTIME_PM_OPS(gpmi_runtime_suspend, gpmi_runtime_resume, NULL) |
2877 | }; |
2878 | |
2879 | static struct platform_driver gpmi_nand_driver = { |
2880 | .driver = { |
2881 | .name = "gpmi-nand", |
2882 | .pm = pm_ptr(&gpmi_pm_ops), |
2883 | .of_match_table = gpmi_nand_id_table, |
2884 | }, |
2885 | .probe = gpmi_nand_probe, |
2886 | .remove = gpmi_nand_remove, |
2887 | }; |
2888 | module_platform_driver(gpmi_nand_driver); |
2889 | |
2890 | MODULE_AUTHOR("Freescale Semiconductor, Inc."); |
2891 | MODULE_DESCRIPTION("i.MX GPMI NAND Flash Controller Driver"); |
2892 | MODULE_LICENSE("GPL"); |
2893 |
Definitions
- clear_poll_bit
- gpmi_reset_block
- __gpmi_enable_clk
- gpmi_init
- gpmi_dump_info
- gpmi_check_ecc
- bbm_in_data_chunk
- set_geometry_by_ecc_info
- get_ecc_strength
- set_geometry_for_large_oob
- legacy_set_geometry
- common_nfc_set_geometry
- bch_set_geometry
- gpmi_nfc_compute_timings
- gpmi_nfc_apply_timings
- gpmi_setup_interface
- gpmi_clear_bch
- get_dma_chan
- dma_irq_callback
- bch_irq
- gpmi_raw_len_to_len
- prepare_data_dma
- scan_ff_pattern
- gpmi_bbt_descr
- gpmi_ooblayout_ecc
- gpmi_ooblayout_free
- gpmi_clks_for_mx2x
- gpmi_ooblayout_ops
- gpmi_devdata_imx23
- gpmi_devdata_imx28
- gpmi_clks_for_mx6
- gpmi_devdata_imx6q
- gpmi_devdata_imx6sx
- gpmi_clks_for_mx7d
- gpmi_devdata_imx7d
- gpmi_clks_for_mx8qxp
- gpmi_devdata_imx8qxp
- acquire_register_block
- acquire_bch_irq
- release_dma_channels
- acquire_dma_channels
- gpmi_get_clks
- acquire_resources
- release_resources
- gpmi_free_dma_buffer
- gpmi_alloc_dma_buffer
- block_mark_swapping
- gpmi_count_bitflips
- gpmi_bch_layout_std
- gpmi_ecc_read_page
- gpmi_ecc_read_subpage
- gpmi_ecc_write_page
- gpmi_ecc_read_oob
- gpmi_ecc_write_oob
- gpmi_ecc_read_page_raw
- gpmi_ecc_write_page_raw
- gpmi_ecc_read_oob_raw
- gpmi_ecc_write_oob_raw
- gpmi_block_markbad
- nand_boot_set_geometry
- fingerprint
- mx23_check_transcription_stamp
- mx23_write_transcription_stamp
- mx23_boot_init
- nand_boot_init
- gpmi_set_geometry
- gpmi_init_last
- gpmi_nand_attach_chip
- get_next_transfer
- gpmi_chain_command
- gpmi_chain_wait_ready
- gpmi_chain_data_read
- gpmi_chain_data_write
- gpmi_nfc_exec_op
- gpmi_nand_controller_ops
- gpmi_nand_init
- gpmi_nand_id_table
- gpmi_nand_probe
- gpmi_nand_remove
- gpmi_pm_suspend
- gpmi_pm_resume
- gpmi_runtime_suspend
- gpmi_runtime_resume
- gpmi_pm_ops
Improve your Profiling and Debugging skills
Find out more