1
2
3
4
5
6
7#include <linux/clk.h>
8#include <linux/completion.h>
9#include <linux/delay.h>
10#include <linux/dma-mapping.h>
11#include <linux/dmaengine.h>
12#include <linux/err.h>
13#include <linux/errno.h>
14#include <linux/interrupt.h>
15#include <linux/io.h>
16#include <linux/iopoll.h>
17#include <linux/jiffies.h>
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/mtd/mtd.h>
21#include <linux/mtd/partitions.h>
22#include <linux/mtd/spi-nor.h>
23#include <linux/of_device.h>
24#include <linux/of.h>
25#include <linux/platform_device.h>
26#include <linux/pm_runtime.h>
27#include <linux/reset.h>
28#include <linux/firmware/xlnx-zynqmp.h>
29#include <linux/sched.h>
30#include <linux/of_gpio.h>
31#include <linux/spi/spi.h>
32#include <linux/timer.h>
33#include <linux/workqueue.h>
34
35#define CQSPI_NAME "cadence-qspi"
36#define CQSPI_MAX_CHIPSELECT 16
37
38
39#define CQSPI_NEEDS_WR_DELAY BIT(0)
40#define CQSPI_HAS_DMA BIT(1)
41#define CQSPI_SUPPORT_RESET BIT(2)
42
43
44#define CQSPI_BASE_HWCAPS_MASK \
45 (SNOR_HWCAPS_READ | SNOR_HWCAPS_READ_FAST | \
46 SNOR_HWCAPS_READ_1_1_2 | SNOR_HWCAPS_READ_1_1_4 | \
47 SNOR_HWCAPS_PP)
48
49struct cqspi_st;
50
51struct cqspi_flash_pdata {
52 struct spi_nor nor;
53 struct cqspi_st *cqspi;
54 u32 clk_rate;
55 u32 read_delay;
56 u32 tshsl_ns;
57 u32 tsd2d_ns;
58 u32 tchsh_ns;
59 u32 tslch_ns;
60 u8 inst_width;
61 u8 addr_width;
62 u8 data_width;
63 u8 cs;
64 bool registered;
65 bool use_direct_mode;
66};
67
68struct cqspi_st {
69 struct platform_device *pdev;
70
71 struct clk *clk;
72 unsigned int sclk;
73
74 void __iomem *iobase;
75 void __iomem *ahb_base;
76 resource_size_t ahb_size;
77 struct completion transfer_complete;
78 struct mutex bus_mutex;
79
80 struct dma_chan *rx_chan;
81 struct completion rx_dma_complete;
82 dma_addr_t mmap_phys_base;
83
84 int current_cs;
85 int current_page_size;
86 int current_erase_size;
87 int current_addr_width;
88 unsigned long master_ref_clk_hz;
89 bool is_decoded_cs;
90 u32 fifo_depth;
91 u32 fifo_width;
92 bool rclk_en;
93 u32 trigger_address;
94 u32 wr_delay;
95 struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT];
96 bool read_dma;
97 void *rxbuf;
98 int bytes_to_rx;
99 int bytes_to_dma;
100 loff_t addr;
101 dma_addr_t dma_addr;
102 u8 edge_mode;
103 bool extra_dummy;
104 u8 access_mode;
105 bool unalined_byte_cnt;
106 u8 dll_mode;
107 struct completion tuning_complete;
108 struct completion request_complete;
109 int (*indirect_read_dma)(struct spi_nor *nor, u_char *rxbuf,
110 loff_t from_addr, size_t n_rx);
111 int (*flash_reset)(struct cqspi_st *cqspi, u8 reset_type);
112 const struct zynqmp_eemi_ops *eemi_ops;
113};
114
115struct cqspi_driver_platdata {
116 u32 hwcaps_mask;
117 u8 quirks;
118};
119
120
121#define CQSPI_TIMEOUT_MS 500
122#define CQSPI_READ_TIMEOUT_MS 10
123#define CQSPI_TUNING_TIMEOUT_MS 5000
124#define CQSPI_TUNING_PERIODICITY_MS 300000
125
126
127#define CQSPI_INST_TYPE_SINGLE 0
128#define CQSPI_INST_TYPE_DUAL 1
129#define CQSPI_INST_TYPE_QUAD 2
130#define CQSPI_INST_TYPE_OCTAL 3
131
132#define CQSPI_DUMMY_CLKS_PER_BYTE 8
133#define CQSPI_DUMMY_BYTES_MAX 4
134#define CQSPI_DUMMY_CLKS_MAX 31
135
136#define CQSPI_STIG_DATA_LEN_MAX 8
137
138
139#define CQSPI_EDGE_MODE_SDR 0
140#define CQSPI_EDGE_MODE_DDR 1
141
142
143#define CQSPI_REG_CONFIG 0x00
144#define CQSPI_REG_CONFIG_ENABLE_MASK BIT(0)
145#define CQSPI_REG_CONFIG_PHY_ENABLE_MASK BIT(3)
146#define CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL BIT(7)
147#define CQSPI_REG_CONFIG_DECODE_MASK BIT(9)
148#define CQSPI_REG_CONFIG_CHIPSELECT_LSB 10
149#define CQSPI_REG_CONFIG_DMA_MASK BIT(15)
150#define CQSPI_REG_CONFIG_AHB_ADDR_REMAP_MASK BIT(16)
151#define CQSPI_REG_CONFIG_DTR_PROT_EN_MASK BIT(24)
152#define CQSPI_REG_CONFIG_BAUD_LSB 19
153#define CQSPI_REG_CONFIG_IDLE_LSB 31
154#define CQSPI_REG_CONFIG_CHIPSELECT_MASK 0xF
155#define CQSPI_REG_CONFIG_BAUD_MASK 0xF
156
157#define CQSPI_REG_RD_INSTR 0x04
158#define CQSPI_REG_RD_INSTR_OPCODE_LSB 0
159#define CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB 8
160#define CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB 12
161#define CQSPI_REG_RD_INSTR_TYPE_DATA_LSB 16
162#define CQSPI_REG_RD_INSTR_MODE_EN_LSB 20
163#define CQSPI_REG_RD_INSTR_DUMMY_LSB 24
164#define CQSPI_REG_RD_INSTR_TYPE_INSTR_MASK 0x3
165#define CQSPI_REG_RD_INSTR_TYPE_ADDR_MASK 0x3
166#define CQSPI_REG_RD_INSTR_TYPE_DATA_MASK 0x3
167#define CQSPI_REG_RD_INSTR_DUMMY_MASK 0x1F
168
169#define CQSPI_REG_WR_INSTR 0x08
170#define CQSPI_REG_WR_INSTR_OPCODE_LSB 0
171#define CQSPI_REG_WR_INSTR_OPCODE_MASK 0xFF
172#define CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB 12
173#define CQSPI_REG_WR_INSTR_TYPE_DATA_LSB 16
174
175#define CQSPI_REG_DELAY 0x0C
176#define CQSPI_REG_DELAY_TSLCH_LSB 0
177#define CQSPI_REG_DELAY_TCHSH_LSB 8
178#define CQSPI_REG_DELAY_TSD2D_LSB 16
179#define CQSPI_REG_DELAY_TSHSL_LSB 24
180#define CQSPI_REG_DELAY_TSLCH_MASK 0xFF
181#define CQSPI_REG_DELAY_TCHSH_MASK 0xFF
182#define CQSPI_REG_DELAY_TSD2D_MASK 0xFF
183#define CQSPI_REG_DELAY_TSHSL_MASK 0xFF
184
185#define CQSPI_REG_READCAPTURE 0x10
186#define CQSPI_REG_READCAPTURE_DQS_ENABLE BIT(8)
187#define CQSPI_REG_READCAPTURE_BYPASS_LSB 0
188#define CQSPI_REG_READCAPTURE_DELAY_LSB 1
189#define CQSPI_REG_READCAPTURE_DELAY_MASK 0xF
190
191#define CQSPI_REG_SIZE 0x14
192#define CQSPI_REG_SIZE_ADDRESS_LSB 0
193#define CQSPI_REG_SIZE_PAGE_LSB 4
194#define CQSPI_REG_SIZE_BLOCK_LSB 16
195#define CQSPI_REG_SIZE_ADDRESS_MASK 0xF
196#define CQSPI_REG_SIZE_PAGE_MASK 0xFFF
197#define CQSPI_REG_SIZE_BLOCK_MASK 0x3F
198
199#define CQSPI_REG_SRAMPARTITION 0x18
200#define CQSPI_REG_INDIRECTTRIGGER 0x1C
201
202#define CQSPI_REG_DMA 0x20
203#define CQSPI_REG_DMA_SINGLE_LSB 0
204#define CQSPI_REG_DMA_BURST_LSB 8
205#define CQSPI_REG_DMA_SINGLE_MASK 0xFF
206#define CQSPI_REG_DMA_BURST_MASK 0xFF
207#define CQSPI_REG_DMA_VAL 0x602
208
209#define CQSPI_REG_REMAP 0x24
210#define CQSPI_REG_MODE_BIT 0x28
211
212#define CQSPI_REG_SDRAMLEVEL 0x2C
213#define CQSPI_REG_SDRAMLEVEL_RD_LSB 0
214#define CQSPI_REG_SDRAMLEVEL_WR_LSB 16
215#define CQSPI_REG_SDRAMLEVEL_RD_MASK 0xFFFF
216#define CQSPI_REG_SDRAMLEVEL_WR_MASK 0xFFFF
217
218#define CQSPI_REG_WRCOMPLETION 0x38
219#define CQSPI_REG_WRCOMPLETION_POLLCNT_MASK 0xFF0000
220#define CQSPI_REG_WRCOMPLETION_POLLCNY_LSB 16
221
222#define CQSPI_REG_IRQSTATUS 0x40
223#define CQSPI_REG_IRQMASK 0x44
224#define CQSPI_REG_ECO 0x48
225
226#define CQSPI_REG_INDIRECTRD 0x60
227#define CQSPI_REG_INDIRECTRD_START_MASK BIT(0)
228#define CQSPI_REG_INDIRECTRD_CANCEL_MASK BIT(1)
229#define CQSPI_REG_INDIRECTRD_DONE_MASK BIT(5)
230
231#define CQSPI_REG_INDIRECTRDWATERMARK 0x64
232#define CQSPI_REG_INDIRECTRDSTARTADDR 0x68
233#define CQSPI_REG_INDIRECTRDBYTES 0x6C
234
235#define CQSPI_REG_CMDCTRL 0x90
236#define CQSPI_REG_CMDCTRL_EXECUTE_MASK BIT(0)
237#define CQSPI_REG_CMDCTRL_INPROGRESS_MASK BIT(1)
238#define CQSPI_REG_CMDCTRL_DUMMY_BYTES_LSB 7
239#define CQSPI_REG_CMDCTRL_WR_BYTES_LSB 12
240#define CQSPI_REG_CMDCTRL_WR_EN_LSB 15
241#define CQSPI_REG_CMDCTRL_ADD_BYTES_LSB 16
242#define CQSPI_REG_CMDCTRL_ADDR_EN_LSB 19
243#define CQSPI_REG_CMDCTRL_RD_BYTES_LSB 20
244#define CQSPI_REG_CMDCTRL_RD_EN_LSB 23
245#define CQSPI_REG_CMDCTRL_OPCODE_LSB 24
246#define CQSPI_REG_CMDCTRL_WR_BYTES_MASK 0x7
247#define CQSPI_REG_CMDCTRL_ADD_BYTES_MASK 0x3
248#define CQSPI_REG_CMDCTRL_RD_BYTES_MASK 0x7
249#define CQSPI_REG_CMDCTRL_DUMMY_BYTES_MASK 0x1F
250
251#define CQSPI_REG_INDIRECTWR 0x70
252#define CQSPI_REG_INDIRECTWR_START_MASK BIT(0)
253#define CQSPI_REG_INDIRECTWR_CANCEL_MASK BIT(1)
254#define CQSPI_REG_INDIRECTWR_DONE_MASK BIT(5)
255
256#define CQSPI_REG_INDIRECTWRWATERMARK 0x74
257#define CQSPI_REG_INDIRECTWRSTARTADDR 0x78
258#define CQSPI_REG_INDIRECTWRBYTES 0x7C
259
260#define CQSPI_REG_INDTRIG_ADDRRANGE 0x80
261#define CQSPI_REG_INDTRIG_ADDRRANGE_WIDTH 0x6
262
263#define CQSPI_REG_CMDADDRESS 0x94
264#define CQSPI_REG_CMDREADDATALOWER 0xA0
265#define CQSPI_REG_CMDREADDATAUPPER 0xA4
266#define CQSPI_REG_CMDWRITEDATALOWER 0xA8
267#define CQSPI_REG_CMDWRITEDATAUPPER 0xAC
268
269#define CQSPI_REG_PHY_CONFIG 0xB4
270#define CQSPI_REG_PHY_CONFIG_RESYNC_FLD_MASK 0x80000000
271#define CQSPI_REG_PHY_CONFIG_RESET_FLD_MASK 0x40000000
272#define CQSPI_REG_PHY_CONFIG_TX_DLL_DLY_LSB 16
273
274#define CQSPI_REG_PHY_MASTER_CTRL 0xB8
275#define CQSPI_REG_DLL_LOWER 0xBC
276#define CQSPI_REG_DLL_LOWER_LPBK_LOCK_MASK 0x8000
277#define CQSPI_REG_DLL_LOWER_DLL_LOCK_MASK 0x1
278
279#define CQSPI_REG_DMA_SRC_ADDR 0x1000
280#define CQSPI_REG_DMA_DST_ADDR 0x1800
281#define CQSPI_REG_DMA_DST_SIZE 0x1804
282#define CQSPI_REG_DMA_DST_STS 0x1808
283#define CQSPI_REG_DMA_DST_CTRL 0x180C
284#define CQSPI_REG_DMA_DST_CTRL_VAL 0xF43FFA00
285
286#define CQSPI_REG_DMA_DTS_I_STS 0x1814
287#define CQSPI_REG_DMA_DST_I_EN 0x1818
288#define CQSPI_REG_DMA_DST_I_EN_DONE BIT(1)
289
290#define CQSPI_REG_DMA_DST_I_DIS 0x181C
291#define CQSPI_REG_DMA_DST_I_DIS_DONE BIT(1)
292#define CQSPI_REG_DMA_DST_ALL_I_DIS_MASK 0xFE
293#define CQSPI_REG_DMA_DST_I_MASK 0x1820
294#define CQSPI_REG_DMA_DST_ADDR_MSB 0x1828
295
296
297#define CQSPI_REG_IRQ_MODE_ERR BIT(0)
298#define CQSPI_REG_IRQ_UNDERFLOW BIT(1)
299#define CQSPI_REG_IRQ_IND_COMP BIT(2)
300#define CQSPI_REG_IRQ_IND_RD_REJECT BIT(3)
301#define CQSPI_REG_IRQ_WR_PROTECTED_ERR BIT(4)
302#define CQSPI_REG_IRQ_ILLEGAL_AHB_ERR BIT(5)
303#define CQSPI_REG_IRQ_WATERMARK BIT(6)
304#define CQSPI_REG_IRQ_IND_SRAM_FULL BIT(12)
305
306#define CQSPI_IRQ_MASK_RD (CQSPI_REG_IRQ_WATERMARK | \
307 CQSPI_REG_IRQ_IND_SRAM_FULL | \
308 CQSPI_REG_IRQ_IND_COMP)
309
310#define CQSPI_IRQ_MASK_WR (CQSPI_REG_IRQ_IND_COMP | \
311 CQSPI_REG_IRQ_WATERMARK | \
312 CQSPI_REG_IRQ_UNDERFLOW)
313
314#define CQSPI_IRQ_STATUS_MASK 0x1FFFF
315#define CQSPI_MIO_NODE_ID_12 0x14108027
316#define CQSPI_READ_ID 0x9F
317#define CQSPI_FAST_READ 0x0C
318#define CQSPI_READ_ID_LEN 6
319#define TERA_MACRO 1000000000000l
320
321#define CQSPI_RESET_TYPE_HWPIN 0
322
323#define CQSPI_DMA_MODE 0
324#define CQSPI_LINEAR_MODE 1
325
326#define RESET_OSPI 0xc10402e
327#define DEV_OSPI 0x1822402a
328
329#define SILICON_VER_MASK 0xFF
330#define SILICON_VER_1 0x10
331#define CQSPI_DLL_MODE_MASTER 0
332#define CQSPI_DLL_MODE_BYPASS 1
333#define TAP_GRAN_SEL_MIN_FREQ 120000000
334#define CQSPI_TX_TAP_MASTER 0x19
335#define CQSPI_MAX_DLL_TAPS 128
336
337#define CQSPI_CS0 0
338#define CQSPI_CS1 1
339
340static int cqspi_wait_for_bit(void __iomem *reg, const u32 mask, bool clr)
341{
342 u32 val;
343
344 return readl_relaxed_poll_timeout(reg, val,
345 (((clr ? ~val : val) & mask) == mask),
346 10, CQSPI_TIMEOUT_MS * 1000);
347}
348
349static bool cqspi_is_idle(struct cqspi_st *cqspi)
350{
351 u32 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
352
353 return reg & (1 << CQSPI_REG_CONFIG_IDLE_LSB);
354}
355
356static u32 cqspi_get_rd_sram_level(struct cqspi_st *cqspi)
357{
358 u32 reg = readl(cqspi->iobase + CQSPI_REG_SDRAMLEVEL);
359
360 reg >>= CQSPI_REG_SDRAMLEVEL_RD_LSB;
361 return reg & CQSPI_REG_SDRAMLEVEL_RD_MASK;
362}
363
364static unsigned int cqspi_calc_rdreg(struct spi_nor *nor, const u8 opcode)
365{
366 struct cqspi_flash_pdata *f_pdata = nor->priv;
367 u32 rdreg = 0;
368
369 rdreg |= f_pdata->inst_width << CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB;
370 rdreg |= f_pdata->addr_width << CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB;
371 rdreg |= f_pdata->data_width << CQSPI_REG_RD_INSTR_TYPE_DATA_LSB;
372
373 return rdreg;
374}
375
376static int cqspi_wait_idle(struct cqspi_st *cqspi)
377{
378 const unsigned int poll_idle_retry = 3;
379 unsigned int count = 0;
380 unsigned long timeout;
381
382 timeout = jiffies + msecs_to_jiffies(CQSPI_TIMEOUT_MS);
383 while (1) {
384
385
386
387
388
389 if (cqspi_is_idle(cqspi))
390 count++;
391 else
392 count = 0;
393
394 if (count >= poll_idle_retry)
395 return 0;
396
397 if (time_after(jiffies, timeout)) {
398
399 dev_err(&cqspi->pdev->dev,
400 "QSPI is still busy after %dms timeout.\n",
401 CQSPI_TIMEOUT_MS);
402 return -ETIMEDOUT;
403 }
404
405 cpu_relax();
406 }
407}
408
409static int cqspi_exec_flash_cmd(struct cqspi_st *cqspi, unsigned int reg)
410{
411 void __iomem *reg_base = cqspi->iobase;
412 int ret;
413
414
415 writel(reg, reg_base + CQSPI_REG_CMDCTRL);
416
417 reg |= CQSPI_REG_CMDCTRL_EXECUTE_MASK;
418 writel(reg, reg_base + CQSPI_REG_CMDCTRL);
419
420
421 ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_CMDCTRL,
422 CQSPI_REG_CMDCTRL_INPROGRESS_MASK, 1);
423 if (ret) {
424 dev_err(&cqspi->pdev->dev,
425 "Flash command execution timed out.\n");
426 return ret;
427 }
428
429
430 return cqspi_wait_idle(cqspi);
431}
432
433static void process_dma_irq(struct cqspi_st *cqspi)
434{
435 struct platform_device *pdev = cqspi->pdev;
436 struct device *dev = &pdev->dev;
437 unsigned int rem;
438 unsigned int reg;
439 unsigned int data;
440 u8 addr_bytes;
441 u8 opcode;
442 u8 dummy_cycles;
443
444
445 writel(CQSPI_REG_DMA_DST_I_DIS_DONE,
446 cqspi->iobase + CQSPI_REG_DMA_DST_I_DIS);
447
448
449 writel(CQSPI_REG_INDIRECTRD_DONE_MASK,
450 cqspi->iobase + CQSPI_REG_INDIRECTRD);
451 dma_unmap_single(dev, cqspi->dma_addr, cqspi->bytes_to_dma,
452 DMA_FROM_DEVICE);
453 rem = cqspi->bytes_to_rx - cqspi->bytes_to_dma;
454
455
456 if (rem) {
457 cqspi->rxbuf += cqspi->bytes_to_dma;
458 writel(cqspi->addr + cqspi->bytes_to_dma,
459 cqspi->iobase + CQSPI_REG_CMDADDRESS);
460 if (cqspi->edge_mode == CQSPI_EDGE_MODE_DDR) {
461 opcode = (u8)readl(cqspi->iobase + CQSPI_REG_RD_INSTR);
462 dummy_cycles = (readl(cqspi->iobase +
463 CQSPI_REG_RD_INSTR) >>
464 CQSPI_REG_RD_INSTR_DUMMY_LSB) &
465 CQSPI_REG_RD_INSTR_DUMMY_MASK;
466 } else {
467 opcode = CQSPI_FAST_READ;
468 dummy_cycles = 8;
469 writel((dummy_cycles << CQSPI_REG_RD_INSTR_DUMMY_LSB) |
470 opcode, cqspi->iobase + CQSPI_REG_RD_INSTR);
471 }
472 addr_bytes = readl(cqspi->iobase + CQSPI_REG_SIZE) &
473 CQSPI_REG_SIZE_ADDRESS_MASK;
474 reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
475 reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB);
476 reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB);
477 reg |= (addr_bytes & CQSPI_REG_CMDCTRL_ADD_BYTES_MASK) <<
478 CQSPI_REG_CMDCTRL_ADD_BYTES_LSB;
479 reg |= (dummy_cycles & CQSPI_REG_CMDCTRL_DUMMY_BYTES_MASK) <<
480 CQSPI_REG_CMDCTRL_DUMMY_BYTES_LSB;
481 cqspi->unalined_byte_cnt = false;
482 if (cqspi->edge_mode == CQSPI_EDGE_MODE_DDR &&
483 ((rem % 2) != 0)) {
484 cqspi->unalined_byte_cnt = true;
485 }
486
487 reg |= (((rem - 1 + cqspi->unalined_byte_cnt) &
488 CQSPI_REG_CMDCTRL_RD_BYTES_MASK) <<
489 CQSPI_REG_CMDCTRL_RD_BYTES_LSB);
490 cqspi_exec_flash_cmd(cqspi, reg);
491 data = readl(cqspi->iobase + CQSPI_REG_CMDREADDATALOWER);
492
493
494 memcpy(cqspi->rxbuf, &data, rem);
495 }
496}
497
498static irqreturn_t cqspi_irq_handler(int this_irq, void *dev)
499{
500 struct cqspi_st *cqspi = dev;
501 unsigned int irq_status;
502 unsigned int dma_status;
503
504
505 irq_status = readl(cqspi->iobase + CQSPI_REG_IRQSTATUS);
506 irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR;
507
508
509 if (irq_status)
510 writel(irq_status, cqspi->iobase + CQSPI_REG_IRQSTATUS);
511
512
513 dma_status = readl(cqspi->iobase + CQSPI_REG_DMA_DTS_I_STS);
514 dma_status &= CQSPI_REG_DMA_DST_I_EN_DONE;
515
516
517 if (dma_status)
518 writel(dma_status, cqspi->iobase + CQSPI_REG_DMA_DTS_I_STS);
519
520 if (irq_status || dma_status)
521 complete(&cqspi->transfer_complete);
522
523 return IRQ_HANDLED;
524}
525
526static int cqspi_command_read(struct spi_nor *nor,
527 const u8 *txbuf, const unsigned n_tx,
528 u8 *rxbuf, const unsigned n_rx)
529{
530 struct cqspi_flash_pdata *f_pdata = nor->priv;
531 struct cqspi_st *cqspi = f_pdata->cqspi;
532 void __iomem *reg_base = cqspi->iobase;
533 unsigned int rdreg;
534 unsigned int reg;
535 unsigned int read_len;
536 int status;
537 u8 dummy_cycles;
538
539 if (!n_rx || n_rx > CQSPI_STIG_DATA_LEN_MAX || !rxbuf) {
540 dev_err(nor->dev, "Invalid input argument, len %d rxbuf 0x%p\n",
541 n_rx, rxbuf);
542 return -EINVAL;
543 }
544
545 reg = txbuf[0] << CQSPI_REG_CMDCTRL_OPCODE_LSB;
546
547 rdreg = cqspi_calc_rdreg(nor, txbuf[0]);
548 writel(rdreg, reg_base + CQSPI_REG_RD_INSTR);
549
550 reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB);
551
552
553 reg |= (((n_rx - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK)
554 << CQSPI_REG_CMDCTRL_RD_BYTES_LSB);
555 if (cqspi->edge_mode == CQSPI_EDGE_MODE_DDR)
556 dummy_cycles = 8;
557 else
558 dummy_cycles = 0;
559 if (cqspi->extra_dummy)
560 dummy_cycles++;
561 reg |= ((dummy_cycles & CQSPI_REG_CMDCTRL_DUMMY_BYTES_MASK)
562 << CQSPI_REG_CMDCTRL_DUMMY_BYTES_LSB);
563 status = cqspi_exec_flash_cmd(cqspi, reg);
564 if (status)
565 return status;
566
567 reg = readl(reg_base + CQSPI_REG_CMDREADDATALOWER);
568
569
570 read_len = (n_rx > 4) ? 4 : n_rx;
571 memcpy(rxbuf, ®, read_len);
572 rxbuf += read_len;
573
574 if (n_rx > 4) {
575 reg = readl(reg_base + CQSPI_REG_CMDREADDATAUPPER);
576
577 read_len = n_rx - read_len;
578 memcpy(rxbuf, ®, read_len);
579 }
580
581 return 0;
582}
583
584static int cqspi_command_write(struct spi_nor *nor, const u8 opcode,
585 const u8 *txbuf, const unsigned n_tx)
586{
587 struct cqspi_flash_pdata *f_pdata = nor->priv;
588 struct cqspi_st *cqspi = f_pdata->cqspi;
589 void __iomem *reg_base = cqspi->iobase;
590 unsigned int reg;
591 unsigned int data;
592 u32 write_len;
593 int ret;
594
595 if (n_tx > CQSPI_STIG_DATA_LEN_MAX || (n_tx && !txbuf)) {
596 dev_err(nor->dev,
597 "Invalid input argument, cmdlen %d txbuf 0x%p\n",
598 n_tx, txbuf);
599 return -EINVAL;
600 }
601
602 reg = f_pdata->data_width << CQSPI_REG_WR_INSTR_TYPE_DATA_LSB;
603 reg |= f_pdata->addr_width << CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB;
604 writel(reg, reg_base + CQSPI_REG_WR_INSTR);
605 reg = cqspi_calc_rdreg(nor, opcode);
606 writel(reg, reg_base + CQSPI_REG_RD_INSTR);
607
608 reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
609 if (n_tx) {
610 reg |= (0x1 << CQSPI_REG_CMDCTRL_WR_EN_LSB);
611 reg |= ((n_tx - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK)
612 << CQSPI_REG_CMDCTRL_WR_BYTES_LSB;
613 if (nor->is_addrvalid) {
614 reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB);
615 reg |= ((nor->addr_width - 1) &
616 CQSPI_REG_CMDCTRL_ADD_BYTES_MASK) <<
617 CQSPI_REG_CMDCTRL_ADD_BYTES_LSB;
618 writel(nor->reg_addr, reg_base + CQSPI_REG_CMDADDRESS);
619 }
620 data = 0;
621 write_len = (n_tx > 4) ? 4 : n_tx;
622 memcpy(&data, txbuf, write_len);
623 txbuf += write_len;
624 writel(data, reg_base + CQSPI_REG_CMDWRITEDATALOWER);
625
626 if (n_tx > 4) {
627 data = 0;
628 write_len = n_tx - 4;
629 memcpy(&data, txbuf, write_len);
630 writel(data, reg_base + CQSPI_REG_CMDWRITEDATAUPPER);
631 }
632 }
633 ret = cqspi_exec_flash_cmd(cqspi, reg);
634 return ret;
635}
636
637static int cqspi_command_write_addr(struct spi_nor *nor,
638 const u8 opcode, const unsigned int addr)
639{
640 struct cqspi_flash_pdata *f_pdata = nor->priv;
641 struct cqspi_st *cqspi = f_pdata->cqspi;
642 void __iomem *reg_base = cqspi->iobase;
643 unsigned int reg;
644
645 reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
646 reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB);
647 reg |= ((nor->addr_width - 1) & CQSPI_REG_CMDCTRL_ADD_BYTES_MASK)
648 << CQSPI_REG_CMDCTRL_ADD_BYTES_LSB;
649
650 writel(addr, reg_base + CQSPI_REG_CMDADDRESS);
651
652 return cqspi_exec_flash_cmd(cqspi, reg);
653}
654
655static int cqspi_read_setup(struct spi_nor *nor)
656{
657 struct cqspi_flash_pdata *f_pdata = nor->priv;
658 struct cqspi_st *cqspi = f_pdata->cqspi;
659 void __iomem *reg_base = cqspi->iobase;
660 unsigned int dummy_clk = 0;
661 unsigned int reg;
662
663 reg = nor->read_opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB;
664 reg |= cqspi_calc_rdreg(nor, nor->read_opcode);
665
666
667 dummy_clk = nor->read_dummy;
668 if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
669 dummy_clk = CQSPI_DUMMY_CLKS_MAX;
670
671 if (!(nor->flags & SNOR_F_BROKEN_OCTAL_DDR)) {
672 if (cqspi->extra_dummy)
673 dummy_clk++;
674 if (dummy_clk)
675 reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK)
676 << CQSPI_REG_RD_INSTR_DUMMY_LSB;
677 } else {
678 if (dummy_clk / 8) {
679 reg |= (1 << CQSPI_REG_RD_INSTR_MODE_EN_LSB);
680
681 writel(0xFF, reg_base + CQSPI_REG_MODE_BIT);
682
683
684 if (f_pdata->inst_width != CQSPI_INST_TYPE_QUAD)
685 dummy_clk -= 8;
686
687 if (dummy_clk)
688 reg |= (dummy_clk &
689 CQSPI_REG_RD_INSTR_DUMMY_MASK)
690 << CQSPI_REG_RD_INSTR_DUMMY_LSB;
691 }
692 }
693
694 writel(reg, reg_base + CQSPI_REG_RD_INSTR);
695
696
697 reg = readl(reg_base + CQSPI_REG_SIZE);
698 reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
699 reg |= (nor->addr_width - 1);
700 writel(reg, reg_base + CQSPI_REG_SIZE);
701 return 0;
702}
703
704static int cqspi_indirect_read_execute(struct spi_nor *nor, u8 *rxbuf,
705 loff_t from_addr, const size_t n_rx)
706{
707 struct cqspi_flash_pdata *f_pdata = nor->priv;
708 struct cqspi_st *cqspi = f_pdata->cqspi;
709 void __iomem *reg_base = cqspi->iobase;
710 void __iomem *ahb_base = cqspi->ahb_base;
711 unsigned int remaining = n_rx;
712 unsigned int mod_bytes = n_rx % 4;
713 unsigned int bytes_to_read = 0;
714 u8 *rxbuf_end = rxbuf + n_rx;
715 u8 *rxbuf_start = rxbuf;
716 int ret = 0;
717 u32 reg;
718 u8 extra_bytes = 0;
719
720 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
721 reg &= ~CQSPI_REG_CONFIG_DMA_MASK;
722 writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
723
724 if (cqspi->eemi_ops && cqspi->access_mode == CQSPI_DMA_MODE) {
725 cqspi_wait_idle(cqspi);
726 cqspi->eemi_ops->ioctl(DEV_OSPI, IOCTL_OSPI_MUX_SELECT,
727 PM_OSPI_MUX_SEL_LINEAR, 0, NULL);
728 cqspi->access_mode = CQSPI_LINEAR_MODE;
729 cqspi_wait_idle(cqspi);
730 }
731
732 writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
733 if (cqspi->edge_mode == CQSPI_EDGE_MODE_DDR &&
734 ((from_addr % 2) != 0) && !cqspi->unalined_byte_cnt) {
735 if (!cqspi->unalined_byte_cnt) {
736 extra_bytes = 2;
737 mod_bytes += 1;
738 } else if (((n_rx + 1) % 4) != 0) {
739 mod_bytes += 1;
740 }
741 }
742
743 writel(remaining + cqspi->unalined_byte_cnt +
744 extra_bytes, reg_base + CQSPI_REG_INDIRECTRDBYTES);
745
746
747 writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
748
749 writel(CQSPI_IRQ_MASK_RD, reg_base + CQSPI_REG_IRQMASK);
750
751 reinit_completion(&cqspi->transfer_complete);
752 writel(CQSPI_REG_INDIRECTRD_START_MASK,
753 reg_base + CQSPI_REG_INDIRECTRD);
754
755 while (remaining > 0) {
756 if (!wait_for_completion_timeout(&cqspi->transfer_complete,
757 msecs_to_jiffies(CQSPI_READ_TIMEOUT_MS)))
758 ret = -ETIMEDOUT;
759
760 bytes_to_read = cqspi_get_rd_sram_level(cqspi);
761
762 if (ret && bytes_to_read == 0) {
763 dev_err(nor->dev, "Indirect read timeout, no bytes\n");
764 goto failrd;
765 }
766
767 while (bytes_to_read != 0) {
768 unsigned int word_remain = round_down(remaining, 4);
769
770 bytes_to_read *= cqspi->fifo_width;
771 bytes_to_read = bytes_to_read > remaining ?
772 remaining : bytes_to_read;
773 bytes_to_read = round_down(bytes_to_read, 4);
774
775 if (bytes_to_read) {
776 u8 offset = 0;
777
778 if (cqspi->edge_mode == CQSPI_EDGE_MODE_DDR &&
779 ((from_addr % 2) != 0) && rxbuf ==
780 rxbuf_start) {
781 unsigned int temp = ioread32(ahb_base);
782
783 temp >>= 8;
784 memcpy(rxbuf, &temp, 3);
785 bytes_to_read -= 1;
786 offset = 3;
787 }
788 if (bytes_to_read >= 4)
789 ioread32_rep(ahb_base, rxbuf + offset,
790 (bytes_to_read / 4));
791 } else if (!word_remain && mod_bytes) {
792 unsigned int temp = ioread32(ahb_base);
793
794 bytes_to_read = remaining > mod_bytes ?
795 remaining : mod_bytes;
796 memcpy(rxbuf, &temp, min((unsigned int)
797 (rxbuf_end - rxbuf),
798 bytes_to_read));
799 }
800 rxbuf += bytes_to_read;
801 remaining -= bytes_to_read;
802 bytes_to_read = cqspi_get_rd_sram_level(cqspi);
803 }
804
805 if (remaining > 0)
806 reinit_completion(&cqspi->transfer_complete);
807 }
808
809
810 ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTRD,
811 CQSPI_REG_INDIRECTRD_DONE_MASK, 0);
812 if (ret) {
813 dev_err(nor->dev,
814 "Indirect read completion error (%i)\n", ret);
815 goto failrd;
816 }
817
818
819 writel(0, reg_base + CQSPI_REG_IRQMASK);
820
821
822 writel(CQSPI_REG_INDIRECTRD_DONE_MASK, reg_base + CQSPI_REG_INDIRECTRD);
823
824 return 0;
825
826failrd:
827
828 writel(0, reg_base + CQSPI_REG_IRQMASK);
829
830
831 writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
832 reg_base + CQSPI_REG_INDIRECTRD);
833 return ret;
834}
835
836static int cqspi_write_setup(struct spi_nor *nor, const u8 opcode)
837{
838 unsigned int reg;
839 struct cqspi_flash_pdata *f_pdata = nor->priv;
840 struct cqspi_st *cqspi = f_pdata->cqspi;
841 void __iomem *reg_base = cqspi->iobase;
842
843
844 reg = opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB;
845 reg |= f_pdata->data_width << CQSPI_REG_WR_INSTR_TYPE_DATA_LSB;
846 reg |= f_pdata->addr_width << CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB;
847 writel(reg, reg_base + CQSPI_REG_WR_INSTR);
848 reg = cqspi_calc_rdreg(nor, opcode);
849 writel(reg, reg_base + CQSPI_REG_RD_INSTR);
850
851 reg = readl(reg_base + CQSPI_REG_SIZE);
852 reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
853 reg |= (nor->addr_width - 1);
854 writel(reg, reg_base + CQSPI_REG_SIZE);
855 return 0;
856}
857
858static int cqspi_indirect_write_execute(struct spi_nor *nor, loff_t to_addr,
859 const u8 *txbuf, const size_t n_tx)
860{
861 const unsigned int page_size = nor->page_size;
862 struct cqspi_flash_pdata *f_pdata = nor->priv;
863 struct cqspi_st *cqspi = f_pdata->cqspi;
864 void __iomem *reg_base = cqspi->iobase;
865 unsigned int remaining = n_tx;
866 unsigned int write_bytes;
867 int ret;
868 u32 reg;
869
870 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
871 reg &= ~CQSPI_REG_CONFIG_DMA_MASK;
872 writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
873
874 if (cqspi->eemi_ops && cqspi->access_mode == CQSPI_DMA_MODE) {
875 cqspi_wait_idle(cqspi);
876 cqspi->eemi_ops->ioctl(DEV_OSPI, IOCTL_OSPI_MUX_SELECT,
877 PM_OSPI_MUX_SEL_LINEAR, 0, NULL);
878 cqspi->access_mode = CQSPI_LINEAR_MODE;
879 cqspi_wait_idle(cqspi);
880 }
881
882 writel(to_addr, reg_base + CQSPI_REG_INDIRECTWRSTARTADDR);
883 writel(remaining + cqspi->unalined_byte_cnt,
884 reg_base + CQSPI_REG_INDIRECTWRBYTES);
885
886
887 writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
888
889 writel(CQSPI_IRQ_MASK_WR, reg_base + CQSPI_REG_IRQMASK);
890
891 reinit_completion(&cqspi->transfer_complete);
892 writel(CQSPI_REG_INDIRECTWR_START_MASK,
893 reg_base + CQSPI_REG_INDIRECTWR);
894
895
896
897
898
899
900
901 if (cqspi->wr_delay)
902 ndelay(cqspi->wr_delay);
903
904 while (remaining > 0) {
905 size_t write_words, mod_bytes;
906
907 write_bytes = remaining > page_size ? page_size : remaining;
908 write_words = write_bytes / 4;
909 mod_bytes = write_bytes % 4;
910
911 if (write_words) {
912 iowrite32_rep(cqspi->ahb_base, txbuf, write_words);
913 txbuf += (write_words * 4);
914 }
915 if (mod_bytes) {
916 unsigned int temp = 0xFFFFFFFF;
917
918 memcpy(&temp, txbuf, mod_bytes);
919 iowrite32(temp, cqspi->ahb_base);
920 txbuf += mod_bytes;
921 }
922
923 if (!wait_for_completion_timeout(&cqspi->transfer_complete,
924 msecs_to_jiffies(CQSPI_TIMEOUT_MS))) {
925 dev_err(nor->dev, "Indirect write timeout\n");
926 ret = -ETIMEDOUT;
927 goto failwr;
928 }
929
930 remaining -= write_bytes;
931
932 if (remaining > 0)
933 reinit_completion(&cqspi->transfer_complete);
934 }
935
936
937 ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTWR,
938 CQSPI_REG_INDIRECTWR_DONE_MASK, 0);
939 if (ret) {
940 dev_err(nor->dev,
941 "Indirect write completion error (%i)\n", ret);
942 goto failwr;
943 }
944
945
946 writel(0, reg_base + CQSPI_REG_IRQMASK);
947
948
949 writel(CQSPI_REG_INDIRECTWR_DONE_MASK, reg_base + CQSPI_REG_INDIRECTWR);
950
951 cqspi_wait_idle(cqspi);
952
953 return 0;
954
955failwr:
956
957 writel(0, reg_base + CQSPI_REG_IRQMASK);
958
959
960 writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
961 reg_base + CQSPI_REG_INDIRECTWR);
962 return ret;
963}
964
965static void cqspi_chipselect(struct spi_nor *nor)
966{
967 struct cqspi_flash_pdata *f_pdata = nor->priv;
968 struct cqspi_st *cqspi = f_pdata->cqspi;
969 void __iomem *reg_base = cqspi->iobase;
970 unsigned int chip_select = f_pdata->cs;
971 unsigned int reg;
972
973 reg = readl(reg_base + CQSPI_REG_CONFIG);
974 if (cqspi->is_decoded_cs) {
975 reg |= CQSPI_REG_CONFIG_DECODE_MASK;
976 } else {
977 reg &= ~CQSPI_REG_CONFIG_DECODE_MASK;
978
979
980
981
982
983
984
985 chip_select = 0xF & ~(1 << chip_select);
986 }
987
988 reg &= ~(CQSPI_REG_CONFIG_CHIPSELECT_MASK
989 << CQSPI_REG_CONFIG_CHIPSELECT_LSB);
990 reg |= (chip_select & CQSPI_REG_CONFIG_CHIPSELECT_MASK)
991 << CQSPI_REG_CONFIG_CHIPSELECT_LSB;
992 writel(reg, reg_base + CQSPI_REG_CONFIG);
993}
994
995static void cqspi_configure_cs_and_sizes(struct spi_nor *nor)
996{
997 struct cqspi_flash_pdata *f_pdata = nor->priv;
998 struct cqspi_st *cqspi = f_pdata->cqspi;
999 void __iomem *iobase = cqspi->iobase;
1000 unsigned int reg;
1001
1002
1003 reg = readl(iobase + CQSPI_REG_SIZE);
1004 reg &= ~(CQSPI_REG_SIZE_PAGE_MASK << CQSPI_REG_SIZE_PAGE_LSB);
1005 reg &= ~(CQSPI_REG_SIZE_BLOCK_MASK << CQSPI_REG_SIZE_BLOCK_LSB);
1006 reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
1007 reg |= (nor->page_size << CQSPI_REG_SIZE_PAGE_LSB);
1008 reg |= (ilog2(nor->mtd.erasesize) << CQSPI_REG_SIZE_BLOCK_LSB);
1009 reg |= (nor->addr_width - 1);
1010 writel(reg, iobase + CQSPI_REG_SIZE);
1011
1012
1013 cqspi_chipselect(nor);
1014
1015
1016 cqspi->current_page_size = nor->page_size;
1017 cqspi->current_erase_size = nor->mtd.erasesize;
1018 cqspi->current_addr_width = nor->addr_width;
1019}
1020
1021static unsigned int calculate_ticks_for_ns(const unsigned int ref_clk_hz,
1022 const unsigned int ns_val)
1023{
1024 unsigned int ticks;
1025
1026 ticks = ref_clk_hz / 1000;
1027 ticks = DIV_ROUND_UP(ticks * ns_val, 1000000);
1028
1029 return ticks;
1030}
1031
1032static void cqspi_delay(struct spi_nor *nor)
1033{
1034 struct cqspi_flash_pdata *f_pdata = nor->priv;
1035 struct cqspi_st *cqspi = f_pdata->cqspi;
1036 void __iomem *iobase = cqspi->iobase;
1037 const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz;
1038 unsigned int tshsl, tchsh, tslch, tsd2d;
1039 unsigned int reg;
1040 unsigned int tsclk;
1041
1042
1043 tsclk = DIV_ROUND_UP(ref_clk_hz, cqspi->sclk);
1044
1045 tshsl = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tshsl_ns);
1046
1047 if (tshsl < tsclk)
1048 tshsl = tsclk;
1049
1050 tchsh = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tchsh_ns);
1051 tslch = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tslch_ns);
1052 tsd2d = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tsd2d_ns);
1053
1054 reg = (tshsl & CQSPI_REG_DELAY_TSHSL_MASK)
1055 << CQSPI_REG_DELAY_TSHSL_LSB;
1056 reg |= (tchsh & CQSPI_REG_DELAY_TCHSH_MASK)
1057 << CQSPI_REG_DELAY_TCHSH_LSB;
1058 reg |= (tslch & CQSPI_REG_DELAY_TSLCH_MASK)
1059 << CQSPI_REG_DELAY_TSLCH_LSB;
1060 reg |= (tsd2d & CQSPI_REG_DELAY_TSD2D_MASK)
1061 << CQSPI_REG_DELAY_TSD2D_LSB;
1062 writel(reg, iobase + CQSPI_REG_DELAY);
1063}
1064
1065static void cqspi_config_baudrate_div(struct cqspi_st *cqspi)
1066{
1067 const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz;
1068 void __iomem *reg_base = cqspi->iobase;
1069 u32 reg, div;
1070
1071
1072 div = DIV_ROUND_UP(ref_clk_hz, 2 * cqspi->sclk) - 1;
1073
1074 reg = readl(reg_base + CQSPI_REG_CONFIG);
1075 reg &= ~(CQSPI_REG_CONFIG_BAUD_MASK << CQSPI_REG_CONFIG_BAUD_LSB);
1076 reg |= (div & CQSPI_REG_CONFIG_BAUD_MASK) << CQSPI_REG_CONFIG_BAUD_LSB;
1077 writel(reg, reg_base + CQSPI_REG_CONFIG);
1078}
1079
1080static void cqspi_readdata_capture(struct cqspi_st *cqspi,
1081 const bool bypass,
1082 const unsigned int delay)
1083{
1084 void __iomem *reg_base = cqspi->iobase;
1085 unsigned int reg;
1086
1087 reg = readl(reg_base + CQSPI_REG_READCAPTURE);
1088
1089 if (bypass)
1090 reg |= (1 << CQSPI_REG_READCAPTURE_BYPASS_LSB);
1091 else
1092 reg &= ~(1 << CQSPI_REG_READCAPTURE_BYPASS_LSB);
1093
1094 reg &= ~(CQSPI_REG_READCAPTURE_DELAY_MASK
1095 << CQSPI_REG_READCAPTURE_DELAY_LSB);
1096
1097 reg |= (delay & CQSPI_REG_READCAPTURE_DELAY_MASK)
1098 << CQSPI_REG_READCAPTURE_DELAY_LSB;
1099
1100 writel(reg, reg_base + CQSPI_REG_READCAPTURE);
1101}
1102
1103static void cqspi_controller_enable(struct cqspi_st *cqspi, bool enable)
1104{
1105 void __iomem *reg_base = cqspi->iobase;
1106 unsigned int reg;
1107
1108 reg = readl(reg_base + CQSPI_REG_CONFIG);
1109
1110 if (enable)
1111 reg |= CQSPI_REG_CONFIG_ENABLE_MASK;
1112 else
1113 reg &= ~CQSPI_REG_CONFIG_ENABLE_MASK;
1114
1115 writel(reg, reg_base + CQSPI_REG_CONFIG);
1116}
1117
1118static void cqspi_configure(struct spi_nor *nor)
1119{
1120 struct cqspi_flash_pdata *f_pdata = nor->priv;
1121 struct cqspi_st *cqspi = f_pdata->cqspi;
1122 const unsigned int sclk = f_pdata->clk_rate;
1123 int switch_cs = (cqspi->current_cs != f_pdata->cs);
1124 int switch_ck = (cqspi->sclk != sclk);
1125
1126 if ((cqspi->current_page_size != nor->page_size) ||
1127 (cqspi->current_erase_size != nor->mtd.erasesize) ||
1128 (cqspi->current_addr_width != nor->addr_width))
1129 switch_cs = 1;
1130
1131 if (switch_cs || switch_ck)
1132 cqspi_controller_enable(cqspi, 0);
1133
1134
1135 if (switch_cs) {
1136 cqspi->current_cs = f_pdata->cs;
1137 cqspi_configure_cs_and_sizes(nor);
1138 }
1139
1140
1141 if (switch_ck) {
1142 cqspi->sclk = sclk;
1143 cqspi_config_baudrate_div(cqspi);
1144 cqspi_delay(nor);
1145 cqspi_readdata_capture(cqspi, !cqspi->rclk_en,
1146 f_pdata->read_delay);
1147 }
1148
1149 if (switch_cs || switch_ck)
1150 cqspi_controller_enable(cqspi, 1);
1151}
1152
1153static int cqspi_set_protocol(struct spi_nor *nor, const int read)
1154{
1155 struct cqspi_flash_pdata *f_pdata = nor->priv;
1156
1157 f_pdata->inst_width = CQSPI_INST_TYPE_SINGLE;
1158 f_pdata->addr_width = CQSPI_INST_TYPE_SINGLE;
1159 f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
1160
1161 if (nor->flags & SNOR_F_UPPER_CS)
1162 f_pdata->cs = CQSPI_CS1;
1163 else
1164 f_pdata->cs = CQSPI_CS0;
1165
1166 if (read) {
1167 switch (nor->read_proto) {
1168 case SNOR_PROTO_1_1_1:
1169 f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
1170 break;
1171 case SNOR_PROTO_1_1_2:
1172 f_pdata->data_width = CQSPI_INST_TYPE_DUAL;
1173 break;
1174 case SNOR_PROTO_1_1_4:
1175 f_pdata->data_width = CQSPI_INST_TYPE_QUAD;
1176 break;
1177 case SNOR_PROTO_1_1_8:
1178 f_pdata->data_width = CQSPI_INST_TYPE_OCTAL;
1179 break;
1180 case SNOR_PROTO_8_8_8:
1181 if (f_pdata->cqspi->edge_mode == CQSPI_EDGE_MODE_DDR) {
1182 f_pdata->inst_width = CQSPI_INST_TYPE_OCTAL;
1183 f_pdata->addr_width = CQSPI_INST_TYPE_OCTAL;
1184 f_pdata->data_width = CQSPI_INST_TYPE_OCTAL;
1185 }
1186 break;
1187 default:
1188 return -EINVAL;
1189 }
1190 } else {
1191 switch (nor->write_proto) {
1192 case SNOR_PROTO_1_1_1:
1193 f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
1194 break;
1195 case SNOR_PROTO_1_1_2:
1196 f_pdata->data_width = CQSPI_INST_TYPE_DUAL;
1197 break;
1198 case SNOR_PROTO_1_1_4:
1199 f_pdata->data_width = CQSPI_INST_TYPE_QUAD;
1200 break;
1201 case SNOR_PROTO_1_1_8:
1202 f_pdata->data_width = CQSPI_INST_TYPE_OCTAL;
1203 break;
1204 case SNOR_PROTO_8_8_8:
1205 if (f_pdata->cqspi->edge_mode == CQSPI_EDGE_MODE_DDR) {
1206 f_pdata->inst_width = CQSPI_INST_TYPE_OCTAL;
1207 f_pdata->addr_width = CQSPI_INST_TYPE_OCTAL;
1208 f_pdata->data_width = CQSPI_INST_TYPE_OCTAL;
1209 }
1210 break;
1211 default:
1212 return -EINVAL;
1213 }
1214 }
1215
1216 cqspi_configure(nor);
1217
1218 return 0;
1219}
1220
1221static ssize_t cqspi_write(struct spi_nor *nor, loff_t to,
1222 size_t len, const u_char *buf)
1223{
1224 struct cqspi_flash_pdata *f_pdata = nor->priv;
1225 struct cqspi_st *cqspi = f_pdata->cqspi;
1226 int ret;
1227
1228 reinit_completion(&cqspi->request_complete);
1229
1230 if (cqspi->edge_mode == CQSPI_EDGE_MODE_DDR &&
1231 !cqspi->tuning_complete.done) {
1232 if (!wait_for_completion_timeout(&cqspi->tuning_complete,
1233 msecs_to_jiffies(CQSPI_TUNING_TIMEOUT_MS))) {
1234 return -ETIMEDOUT;
1235 }
1236 }
1237
1238 ret = cqspi_set_protocol(nor, 0);
1239 if (ret)
1240 return ret;
1241
1242 ret = cqspi_write_setup(nor, nor->program_opcode);
1243 if (ret)
1244 return ret;
1245
1246 cqspi->unalined_byte_cnt = false;
1247 if (cqspi->edge_mode == CQSPI_EDGE_MODE_DDR &&
1248 ((len % 2) != 0)) {
1249 cqspi->unalined_byte_cnt = true;
1250 }
1251
1252 if (f_pdata->use_direct_mode) {
1253 memcpy_toio(cqspi->ahb_base + to, buf, len);
1254 ret = cqspi_wait_idle(cqspi);
1255 } else {
1256 ret = cqspi_indirect_write_execute(nor, to, buf, len);
1257 }
1258 if (ret)
1259 return ret;
1260
1261 return len;
1262}
1263
1264static void cqspi_rx_dma_callback(void *param)
1265{
1266 struct cqspi_st *cqspi = param;
1267
1268 complete(&cqspi->rx_dma_complete);
1269}
1270
1271static int cqspi_direct_read_execute(struct spi_nor *nor, u_char *buf,
1272 loff_t from, size_t len)
1273{
1274 struct cqspi_flash_pdata *f_pdata = nor->priv;
1275 struct cqspi_st *cqspi = f_pdata->cqspi;
1276 enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
1277 dma_addr_t dma_src = (dma_addr_t)cqspi->mmap_phys_base + from;
1278 int ret = 0;
1279 struct dma_async_tx_descriptor *tx;
1280 dma_cookie_t cookie;
1281 dma_addr_t dma_dst;
1282
1283 if (!cqspi->rx_chan || !virt_addr_valid(buf)) {
1284 memcpy_fromio(buf, cqspi->ahb_base + from, len);
1285 return 0;
1286 }
1287
1288 dma_dst = dma_map_single(nor->dev, buf, len, DMA_FROM_DEVICE);
1289 if (dma_mapping_error(nor->dev, dma_dst)) {
1290 dev_err(nor->dev, "dma mapping failed\n");
1291 return -ENOMEM;
1292 }
1293 tx = dmaengine_prep_dma_memcpy(cqspi->rx_chan, dma_dst, dma_src,
1294 len, flags);
1295 if (!tx) {
1296 dev_err(nor->dev, "device_prep_dma_memcpy error\n");
1297 ret = -EIO;
1298 goto err_unmap;
1299 }
1300
1301 tx->callback = cqspi_rx_dma_callback;
1302 tx->callback_param = cqspi;
1303 cookie = tx->tx_submit(tx);
1304 reinit_completion(&cqspi->rx_dma_complete);
1305
1306 ret = dma_submit_error(cookie);
1307 if (ret) {
1308 dev_err(nor->dev, "dma_submit_error %d\n", cookie);
1309 ret = -EIO;
1310 goto err_unmap;
1311 }
1312
1313 dma_async_issue_pending(cqspi->rx_chan);
1314 if (!wait_for_completion_timeout(&cqspi->rx_dma_complete,
1315 msecs_to_jiffies(len))) {
1316 dmaengine_terminate_sync(cqspi->rx_chan);
1317 dev_err(nor->dev, "DMA wait_for_completion_timeout\n");
1318 ret = -ETIMEDOUT;
1319 goto err_unmap;
1320 }
1321
1322err_unmap:
1323 dma_unmap_single(nor->dev, dma_dst, len, DMA_FROM_DEVICE);
1324
1325 return ret;
1326}
1327
1328static ssize_t cqspi_read(struct spi_nor *nor, loff_t from,
1329 size_t len, u_char *buf)
1330{
1331 struct cqspi_flash_pdata *f_pdata = nor->priv;
1332 struct cqspi_st *cqspi = f_pdata->cqspi;
1333 u64 dma_align = (u64)(uintptr_t)buf;
1334 int ret;
1335 bool use_dma = true;
1336
1337 reinit_completion(&cqspi->request_complete);
1338
1339 if (cqspi->edge_mode == CQSPI_EDGE_MODE_DDR &&
1340 !cqspi->tuning_complete.done) {
1341 if (!wait_for_completion_timeout(&cqspi->tuning_complete,
1342 msecs_to_jiffies(CQSPI_TUNING_TIMEOUT_MS))) {
1343 return -ETIMEDOUT;
1344 }
1345 }
1346
1347 ret = cqspi_set_protocol(nor, 1);
1348 if (ret)
1349 return ret;
1350
1351 ret = cqspi_read_setup(nor);
1352 if (ret)
1353 return ret;
1354
1355 cqspi->unalined_byte_cnt = false;
1356 if (cqspi->edge_mode == CQSPI_EDGE_MODE_DDR) {
1357 if ((len % 2) != 0)
1358 cqspi->unalined_byte_cnt = true;
1359 if ((from % 2) != 0)
1360 use_dma = 0;
1361 }
1362
1363 if (f_pdata->use_direct_mode) {
1364 ret = cqspi_direct_read_execute(nor, buf, from, len);
1365 } else if (cqspi->read_dma && virt_addr_valid(buf) && use_dma &&
1366 cqspi->indirect_read_dma && len >= 4 &&
1367 ((dma_align & 0x3) == 0) && !is_vmalloc_addr(buf)) {
1368 ret = cqspi->indirect_read_dma(nor, buf, from, len);
1369 } else {
1370 ret = cqspi_indirect_read_execute(nor, buf, from, len);
1371 }
1372 if (ret)
1373 return ret;
1374
1375 complete(&cqspi->request_complete);
1376
1377 return len;
1378}
1379
1380static int cqspi_erase(struct spi_nor *nor, loff_t offs)
1381{
1382 struct cqspi_flash_pdata *f_pdata = nor->priv;
1383 struct cqspi_st *cqspi = f_pdata->cqspi;
1384 int ret;
1385
1386 reinit_completion(&cqspi->request_complete);
1387
1388 if (cqspi->edge_mode == CQSPI_EDGE_MODE_DDR &&
1389 !cqspi->tuning_complete.done) {
1390 if (!wait_for_completion_timeout(&cqspi->tuning_complete,
1391 msecs_to_jiffies(CQSPI_TUNING_TIMEOUT_MS))) {
1392 return -ETIMEDOUT;
1393 }
1394 }
1395
1396 ret = cqspi_set_protocol(nor, 0);
1397 if (ret)
1398 return ret;
1399
1400 ret = cqspi_write_setup(nor, nor->erase_opcode);
1401 if (ret)
1402 return ret;
1403
1404
1405 ret = cqspi_command_write_addr(nor, nor->erase_opcode, offs);
1406 if (ret)
1407 return ret;
1408
1409 return 0;
1410}
1411
1412static int cqspi_prep(struct spi_nor *nor, enum spi_nor_ops ops)
1413{
1414 struct cqspi_flash_pdata *f_pdata = nor->priv;
1415 struct cqspi_st *cqspi = f_pdata->cqspi;
1416
1417 mutex_lock(&cqspi->bus_mutex);
1418
1419 return 0;
1420}
1421
1422static void cqspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
1423{
1424 struct cqspi_flash_pdata *f_pdata = nor->priv;
1425 struct cqspi_st *cqspi = f_pdata->cqspi;
1426
1427 mutex_unlock(&cqspi->bus_mutex);
1428}
1429
1430static int cqspi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
1431{
1432 struct cqspi_flash_pdata *f_pdata = nor->priv;
1433 struct cqspi_st *cqspi = f_pdata->cqspi;
1434 int ret;
1435
1436 reinit_completion(&cqspi->request_complete);
1437
1438 if (cqspi->edge_mode == CQSPI_EDGE_MODE_DDR &&
1439 !cqspi->tuning_complete.done) {
1440 if (!wait_for_completion_timeout(&cqspi->tuning_complete,
1441 msecs_to_jiffies(CQSPI_TUNING_TIMEOUT_MS))) {
1442 return -ETIMEDOUT;
1443 }
1444 }
1445
1446 ret = cqspi_set_protocol(nor, 0);
1447 if (!ret) {
1448 if (cqspi->edge_mode == CQSPI_EDGE_MODE_DDR)
1449 len = ((len % 2) != 0) ? (len + 1) : len;
1450 ret = cqspi_command_read(nor, &opcode, 1, buf, len);
1451 }
1452
1453 if ((opcode == SPINOR_OP_RDFSR && ((FSR_READY & buf[0]) != 0)) ||
1454 (SPINOR_OP_RDSR != opcode && SPINOR_OP_RDFSR != opcode)) {
1455 complete(&cqspi->request_complete);
1456 }
1457
1458 return ret;
1459}
1460
1461static int cqspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
1462{
1463 struct cqspi_flash_pdata *f_pdata = nor->priv;
1464 struct cqspi_st *cqspi = f_pdata->cqspi;
1465 int ret;
1466
1467 reinit_completion(&cqspi->request_complete);
1468
1469 if (cqspi->edge_mode == CQSPI_EDGE_MODE_DDR &&
1470 !cqspi->tuning_complete.done) {
1471 if (!wait_for_completion_timeout(&cqspi->tuning_complete,
1472 msecs_to_jiffies(CQSPI_TUNING_TIMEOUT_MS))) {
1473 return -ETIMEDOUT;
1474 }
1475 }
1476
1477 ret = cqspi_set_protocol(nor, 0);
1478 if (!ret)
1479 ret = cqspi_command_write(nor, opcode, buf, len);
1480
1481 if (opcode != SPINOR_OP_WREN)
1482 complete(&cqspi->request_complete);
1483
1484 return ret;
1485}
1486
1487static int cqspi_of_get_flash_pdata(struct platform_device *pdev,
1488 struct cqspi_flash_pdata *f_pdata,
1489 struct device_node *np)
1490{
1491 if (of_property_read_u32(np, "cdns,read-delay", &f_pdata->read_delay)) {
1492 dev_err(&pdev->dev, "couldn't determine read-delay\n");
1493 return -ENXIO;
1494 }
1495
1496 if (of_property_read_u32(np, "cdns,tshsl-ns", &f_pdata->tshsl_ns)) {
1497 dev_err(&pdev->dev, "couldn't determine tshsl-ns\n");
1498 return -ENXIO;
1499 }
1500
1501 if (of_property_read_u32(np, "cdns,tsd2d-ns", &f_pdata->tsd2d_ns)) {
1502 dev_err(&pdev->dev, "couldn't determine tsd2d-ns\n");
1503 return -ENXIO;
1504 }
1505
1506 if (of_property_read_u32(np, "cdns,tchsh-ns", &f_pdata->tchsh_ns)) {
1507 dev_err(&pdev->dev, "couldn't determine tchsh-ns\n");
1508 return -ENXIO;
1509 }
1510
1511 if (of_property_read_u32(np, "cdns,tslch-ns", &f_pdata->tslch_ns)) {
1512 dev_err(&pdev->dev, "couldn't determine tslch-ns\n");
1513 return -ENXIO;
1514 }
1515
1516 if (of_property_read_u32(np, "spi-max-frequency", &f_pdata->clk_rate)) {
1517 dev_err(&pdev->dev, "couldn't determine spi-max-frequency\n");
1518 return -ENXIO;
1519 }
1520
1521 return 0;
1522}
1523
1524static int cqspi_of_get_pdata(struct platform_device *pdev)
1525{
1526 struct device_node *np = pdev->dev.of_node;
1527 struct cqspi_st *cqspi = platform_get_drvdata(pdev);
1528
1529 cqspi->is_decoded_cs = of_property_read_bool(np, "cdns,is-decoded-cs");
1530
1531 if (of_property_read_u32(np, "cdns,fifo-depth", &cqspi->fifo_depth)) {
1532 dev_err(&pdev->dev, "couldn't determine fifo-depth\n");
1533 return -ENXIO;
1534 }
1535
1536 if (of_property_read_u32(np, "cdns,fifo-width", &cqspi->fifo_width)) {
1537 dev_err(&pdev->dev, "couldn't determine fifo-width\n");
1538 return -ENXIO;
1539 }
1540
1541 if (of_property_read_u32(np, "cdns,trigger-address",
1542 &cqspi->trigger_address)) {
1543 dev_err(&pdev->dev, "couldn't determine trigger-address\n");
1544 return -ENXIO;
1545 }
1546
1547 cqspi->rclk_en = of_property_read_bool(np, "cdns,rclk-en");
1548
1549 return 0;
1550}
1551
1552static int cqspi_setdlldelay(struct spi_nor *nor)
1553{
1554 struct cqspi_flash_pdata *f_pdata = nor->priv;
1555 struct cqspi_st *cqspi = f_pdata->cqspi;
1556 int i;
1557 u8 j;
1558 int ret;
1559 u8 id[CQSPI_READ_ID_LEN];
1560 bool rxtapfound = false;
1561 u8 min_rxtap = 0;
1562 u8 max_rxtap = 0;
1563 u8 avg_rxtap;
1564 bool id_matched;
1565 u32 txtap = 0;
1566 u8 max_tap;
1567 s8 max_windowsize = -1;
1568 u8 windowsize;
1569 u8 dummy_incr;
1570 u8 dummy_flag = 0;
1571 u8 count;
1572 u8 opcode = CQSPI_READ_ID;
1573
1574 max_tap = ((TERA_MACRO / cqspi->master_ref_clk_hz) / 160);
1575 if (cqspi->dll_mode == CQSPI_DLL_MODE_MASTER) {
1576
1577 writel(0, cqspi->iobase + CQSPI_REG_PHY_CONFIG);
1578
1579
1580 writel(0x4, cqspi->iobase + CQSPI_REG_PHY_MASTER_CTRL);
1581
1582
1583 writel(CQSPI_REG_PHY_CONFIG_RESET_FLD_MASK,
1584 cqspi->iobase + CQSPI_REG_PHY_CONFIG);
1585
1586
1587 ret = cqspi_wait_for_bit(cqspi->iobase + CQSPI_REG_DLL_LOWER,
1588 CQSPI_REG_DLL_LOWER_LPBK_LOCK_MASK, 0);
1589 if (ret) {
1590 dev_err(nor->dev,
1591 "Loopback lock bit error (%i)\n", ret);
1592 return ret;
1593 }
1594
1595
1596 writel(CQSPI_REG_PHY_CONFIG_RESET_FLD_MASK,
1597 cqspi->iobase + CQSPI_REG_PHY_CONFIG);
1598 writel(CQSPI_REG_PHY_CONFIG_RESET_FLD_MASK |
1599 CQSPI_REG_PHY_CONFIG_RESYNC_FLD_MASK,
1600 cqspi->iobase + CQSPI_REG_PHY_CONFIG);
1601
1602 txtap = CQSPI_TX_TAP_MASTER <<
1603 CQSPI_REG_PHY_CONFIG_TX_DLL_DLY_LSB;
1604 max_tap = CQSPI_MAX_DLL_TAPS;
1605 }
1606
1607 cqspi->extra_dummy = false;
1608 for (dummy_incr = 0; dummy_incr <= 1; dummy_incr++) {
1609 if (dummy_incr)
1610 cqspi->extra_dummy = true;
1611 for (i = 0; i <= max_tap; i++) {
1612 writel((txtap | i |
1613 CQSPI_REG_PHY_CONFIG_RESET_FLD_MASK),
1614 cqspi->iobase + CQSPI_REG_PHY_CONFIG);
1615 writel((CQSPI_REG_PHY_CONFIG_RESYNC_FLD_MASK | txtap |
1616 i | CQSPI_REG_PHY_CONFIG_RESET_FLD_MASK),
1617 cqspi->iobase + CQSPI_REG_PHY_CONFIG);
1618 if (cqspi->dll_mode == CQSPI_DLL_MODE_MASTER) {
1619 ret = cqspi_wait_for_bit(cqspi->iobase +
1620 CQSPI_REG_DLL_LOWER,
1621 CQSPI_REG_DLL_LOWER_DLL_LOCK_MASK, 0);
1622 if (ret)
1623 return ret;
1624 }
1625 count = 0;
1626 do {
1627 count += 1;
1628 ret = cqspi_set_protocol(nor, 0);
1629 if (!ret)
1630 ret = cqspi_command_read(nor, &opcode,
1631 1, id, CQSPI_READ_ID_LEN);
1632 if (ret < 0) {
1633 dev_err(nor->dev,
1634 "error %d reading JEDEC ID\n",
1635 ret);
1636 return ret;
1637 }
1638 id_matched = true;
1639 for (j = 0; j < CQSPI_READ_ID_LEN; j++) {
1640 if (nor->device_id[j] != id[j]) {
1641 id_matched = false;
1642 break;
1643 }
1644 }
1645 } while (id_matched && (count <= 10));
1646
1647 if (id_matched) {
1648 if (!rxtapfound) {
1649 min_rxtap = i;
1650 max_rxtap = i;
1651 rxtapfound = true;
1652 } else {
1653 max_rxtap = i;
1654 }
1655 }
1656 if (!id_matched || i == max_tap) {
1657 if (rxtapfound) {
1658 windowsize = max_rxtap - min_rxtap + 1;
1659 if (windowsize > max_windowsize) {
1660 dummy_flag = dummy_incr;
1661 max_windowsize = windowsize;
1662 avg_rxtap = (max_rxtap +
1663 min_rxtap) / 2;
1664 }
1665 i = max_tap;
1666 rxtapfound = false;
1667 }
1668 }
1669 }
1670 if (!dummy_incr) {
1671 rxtapfound = false;
1672 min_rxtap = 0;
1673 max_rxtap = 0;
1674 }
1675 }
1676 if (!dummy_flag)
1677 cqspi->extra_dummy = false;
1678 if (max_windowsize < 3)
1679 return -EINVAL;
1680
1681 writel((txtap | avg_rxtap | CQSPI_REG_PHY_CONFIG_RESET_FLD_MASK),
1682 cqspi->iobase + CQSPI_REG_PHY_CONFIG);
1683 writel((CQSPI_REG_PHY_CONFIG_RESYNC_FLD_MASK | txtap | avg_rxtap |
1684 CQSPI_REG_PHY_CONFIG_RESET_FLD_MASK),
1685 cqspi->iobase + CQSPI_REG_PHY_CONFIG);
1686 if (cqspi->dll_mode == CQSPI_DLL_MODE_MASTER) {
1687 ret = cqspi_wait_for_bit(cqspi->iobase + CQSPI_REG_DLL_LOWER,
1688 CQSPI_REG_DLL_LOWER_DLL_LOCK_MASK, 0);
1689 if (ret)
1690 return ret;
1691 }
1692
1693 return 0;
1694}
1695
1696static void cqspi_periodictuning(struct work_struct *work)
1697{
1698 struct delayed_work *d = to_delayed_work(work);
1699 struct spi_nor *nor = container_of(d, struct spi_nor, complete_work);
1700 struct cqspi_flash_pdata *f_pdata = nor->priv;
1701 struct cqspi_st *cqspi = f_pdata->cqspi;
1702 int ret;
1703
1704 if (!cqspi->request_complete.done)
1705 wait_for_completion(&cqspi->request_complete);
1706
1707 reinit_completion(&cqspi->tuning_complete);
1708 ret = cqspi_setdlldelay(nor);
1709 complete_all(&cqspi->tuning_complete);
1710 if (ret) {
1711 dev_err(nor->dev,
1712 "Setting dll delay error (%i)\n", ret);
1713 } else {
1714 schedule_delayed_work(&nor->complete_work,
1715 msecs_to_jiffies(CQSPI_TUNING_PERIODICITY_MS));
1716 }
1717}
1718
1719static void cqspi_setup_ddrmode(struct spi_nor *nor)
1720{
1721 struct cqspi_flash_pdata *f_pdata = nor->priv;
1722 struct cqspi_st *cqspi = f_pdata->cqspi;
1723 u32 reg;
1724
1725 cqspi_controller_enable(cqspi, 0);
1726
1727 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
1728 reg |= (CQSPI_REG_CONFIG_PHY_ENABLE_MASK);
1729 writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
1730
1731
1732 reg = readl(cqspi->iobase + CQSPI_REG_WRCOMPLETION);
1733 reg &= ~CQSPI_REG_WRCOMPLETION_POLLCNT_MASK;
1734 writel(reg, cqspi->iobase + CQSPI_REG_WRCOMPLETION);
1735
1736 reg |= (0x3 << CQSPI_REG_WRCOMPLETION_POLLCNY_LSB);
1737 writel(reg, cqspi->iobase + CQSPI_REG_WRCOMPLETION);
1738
1739 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
1740 reg |= CQSPI_REG_CONFIG_DTR_PROT_EN_MASK;
1741 writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
1742
1743 reg = readl(cqspi->iobase + CQSPI_REG_READCAPTURE);
1744 reg |= CQSPI_REG_READCAPTURE_DQS_ENABLE;
1745 writel(reg, cqspi->iobase + CQSPI_REG_READCAPTURE);
1746
1747 cqspi->edge_mode = CQSPI_EDGE_MODE_DDR;
1748
1749 cqspi_controller_enable(cqspi, 1);
1750}
1751
1752static int cqspi_setup_edgemode(struct spi_nor *nor)
1753{
1754 int ret;
1755
1756 cqspi_setup_ddrmode(nor);
1757
1758 ret = cqspi_setdlldelay(nor);
1759
1760 return ret;
1761}
1762
1763static void cqspi_controller_init(struct cqspi_st *cqspi)
1764{
1765 u32 reg;
1766
1767 cqspi_controller_enable(cqspi, 0);
1768
1769
1770 writel(0, cqspi->iobase + CQSPI_REG_REMAP);
1771
1772
1773 writel(CQSPI_REG_PHY_CONFIG_RESET_FLD_MASK,
1774 cqspi->iobase + CQSPI_REG_PHY_CONFIG);
1775
1776
1777 writel(0, cqspi->iobase + CQSPI_REG_IRQMASK);
1778 writel(CQSPI_REG_DMA_DST_ALL_I_DIS_MASK,
1779 cqspi->iobase + CQSPI_REG_DMA_DST_I_DIS);
1780
1781
1782 writel(cqspi->fifo_depth / 2, cqspi->iobase + CQSPI_REG_SRAMPARTITION);
1783
1784
1785 writel(cqspi->trigger_address,
1786 cqspi->iobase + CQSPI_REG_INDIRECTTRIGGER);
1787
1788
1789 writel(cqspi->fifo_depth * cqspi->fifo_width / 2,
1790 cqspi->iobase + CQSPI_REG_INDIRECTRDWATERMARK);
1791
1792 writel(cqspi->fifo_depth * cqspi->fifo_width / 8,
1793 cqspi->iobase + CQSPI_REG_INDIRECTWRWATERMARK);
1794
1795 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
1796 reg &= ~CQSPI_REG_CONFIG_DTR_PROT_EN_MASK;
1797 reg &= ~CQSPI_REG_CONFIG_PHY_ENABLE_MASK;
1798 if (cqspi->read_dma) {
1799 reg &= ~CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL;
1800 reg |= CQSPI_REG_CONFIG_DMA_MASK;
1801 } else {
1802
1803 reg |= CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL;
1804 }
1805 writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
1806
1807 cqspi_controller_enable(cqspi, 1);
1808}
1809
1810static int cqspi_versal_flash_reset(struct cqspi_st *cqspi, u8 reset_type)
1811{
1812 struct platform_device *pdev = cqspi->pdev;
1813 int ret;
1814 int gpio;
1815 enum of_gpio_flags flags;
1816
1817 if (reset_type == CQSPI_RESET_TYPE_HWPIN) {
1818 gpio = of_get_named_gpio_flags(pdev->dev.of_node,
1819 "reset-gpios", 0, &flags);
1820 if (!gpio_is_valid(gpio))
1821 return -EIO;
1822 ret = devm_gpio_request_one(&pdev->dev, gpio, flags,
1823 "flash-reset");
1824 if (ret) {
1825 dev_err(&pdev->dev,
1826 "failed to get reset-gpios: %d\n", ret);
1827 return -EIO;
1828 }
1829
1830
1831 cqspi->eemi_ops->pinctrl_request(CQSPI_MIO_NODE_ID_12);
1832
1833
1834 cqspi->eemi_ops->pinctrl_set_config(CQSPI_MIO_NODE_ID_12,
1835 PM_PINCTRL_CONFIG_SCHMITT_CMOS,
1836 PM_PINCTRL_INPUT_TYPE_SCHMITT);
1837
1838
1839 gpio_direction_output(gpio, 1);
1840
1841
1842 cqspi->eemi_ops->pinctrl_set_config(CQSPI_MIO_NODE_ID_12,
1843 PM_PINCTRL_CONFIG_TRI_STATE,
1844 PM_PINCTRL_TRI_STATE_DISABLE);
1845 udelay(1);
1846
1847
1848 gpio_set_value(gpio, 0);
1849 udelay(1);
1850
1851
1852 gpio_set_value(gpio, 1);
1853 udelay(1);
1854 } else {
1855 ret = -EINVAL;
1856 }
1857
1858 return ret;
1859}
1860
1861static int cqspi_versal_indirect_read_dma(struct spi_nor *nor, u_char *rxbuf,
1862 loff_t from_addr, size_t n_rx)
1863{
1864 struct cqspi_flash_pdata *f_pdata = nor->priv;
1865 struct cqspi_st *cqspi = f_pdata->cqspi;
1866 void __iomem *reg_base = cqspi->iobase;
1867 unsigned int rx_rem;
1868 int ret = 0;
1869 u32 reg;
1870
1871 rx_rem = n_rx % 4;
1872 cqspi->bytes_to_rx = n_rx;
1873 cqspi->bytes_to_dma = (n_rx - rx_rem);
1874 cqspi->addr = from_addr;
1875 cqspi->rxbuf = rxbuf;
1876
1877 if (cqspi->eemi_ops && cqspi->access_mode == CQSPI_LINEAR_MODE) {
1878 cqspi_wait_idle(cqspi);
1879 reg = readl(cqspi->iobase + CQSPI_REG_PHY_CONFIG);
1880
1881 if (cqspi->dll_mode != CQSPI_DLL_MODE_MASTER) {
1882
1883 cqspi->eemi_ops->reset_assert(RESET_OSPI,
1884 PM_RESET_ACTION_ASSERT);
1885 }
1886 cqspi->eemi_ops->ioctl(DEV_OSPI, IOCTL_OSPI_MUX_SELECT,
1887 PM_OSPI_MUX_SEL_DMA, 0, NULL);
1888 cqspi->access_mode = CQSPI_DMA_MODE;
1889 if (cqspi->dll_mode != CQSPI_DLL_MODE_MASTER) {
1890 cqspi->eemi_ops->reset_assert(RESET_OSPI,
1891 PM_RESET_ACTION_RELEASE);
1892 }
1893 cqspi_wait_idle(cqspi);
1894 if (cqspi->dll_mode != CQSPI_DLL_MODE_MASTER) {
1895 cqspi_controller_init(cqspi);
1896 cqspi->current_cs = -1;
1897 cqspi->sclk = 0;
1898
1899 ret = cqspi_set_protocol(nor, 1);
1900 if (ret)
1901 return ret;
1902
1903 if (cqspi->edge_mode == CQSPI_EDGE_MODE_DDR) {
1904 cqspi_setup_ddrmode(nor);
1905 writel(CQSPI_REG_PHY_CONFIG_RESYNC_FLD_MASK |
1906 reg, cqspi->iobase +
1907 CQSPI_REG_PHY_CONFIG);
1908 }
1909
1910 ret = cqspi_read_setup(nor);
1911 if (ret)
1912 return ret;
1913 }
1914 }
1915
1916 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
1917 reg |= CQSPI_REG_CONFIG_DMA_MASK;
1918 writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
1919
1920 writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
1921 writel(cqspi->bytes_to_dma, reg_base + CQSPI_REG_INDIRECTRDBYTES);
1922 writel(CQSPI_REG_INDTRIG_ADDRRANGE_WIDTH,
1923 reg_base + CQSPI_REG_INDTRIG_ADDRRANGE);
1924
1925
1926 writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
1927
1928
1929 writel(CQSPI_REG_DMA_DST_I_EN_DONE,
1930 reg_base + CQSPI_REG_DMA_DST_I_EN);
1931
1932
1933 writel(CQSPI_REG_DMA_VAL, reg_base + CQSPI_REG_DMA);
1934
1935 cqspi->dma_addr = dma_map_single(nor->dev, rxbuf, cqspi->bytes_to_dma,
1936 DMA_FROM_DEVICE);
1937 if (dma_mapping_error(nor->dev, cqspi->dma_addr)) {
1938 dev_err(nor->dev, "ERR:rxdma:memory not mapped\n");
1939 goto failrd;
1940 }
1941
1942 writel(lower_32_bits(cqspi->dma_addr),
1943 reg_base + CQSPI_REG_DMA_DST_ADDR);
1944 writel(upper_32_bits(cqspi->dma_addr),
1945 reg_base + CQSPI_REG_DMA_DST_ADDR_MSB);
1946
1947
1948 writel(cqspi->trigger_address, reg_base + CQSPI_REG_DMA_SRC_ADDR);
1949
1950
1951 writel(cqspi->bytes_to_dma, reg_base + CQSPI_REG_DMA_DST_SIZE);
1952
1953
1954 writel(CQSPI_REG_DMA_DST_CTRL_VAL, reg_base + CQSPI_REG_DMA_DST_CTRL);
1955
1956 writel(CQSPI_REG_INDIRECTRD_START_MASK,
1957 reg_base + CQSPI_REG_INDIRECTRD);
1958
1959 reinit_completion(&cqspi->transfer_complete);
1960
1961 if (!wait_for_completion_timeout(&cqspi->transfer_complete,
1962 msecs_to_jiffies(CQSPI_READ_TIMEOUT_MS))) {
1963 ret = -ETIMEDOUT;
1964 goto failrd;
1965 }
1966
1967
1968 ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTRD,
1969 CQSPI_REG_INDIRECTRD_DONE_MASK, 0);
1970 if (ret) {
1971 dev_err(nor->dev,
1972 "Indirect read completion error (%i)\n", ret);
1973 goto failrd;
1974 }
1975
1976 process_dma_irq(cqspi);
1977
1978 return 0;
1979
1980failrd:
1981
1982 writel(CQSPI_REG_DMA_DST_I_DIS_DONE,
1983 reg_base + CQSPI_REG_DMA_DST_I_DIS);
1984
1985 dma_unmap_single(nor->dev, cqspi->dma_addr, cqspi->bytes_to_dma,
1986 DMA_DEV_TO_MEM);
1987
1988
1989 writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
1990 reg_base + CQSPI_REG_INDIRECTRD);
1991
1992 return ret;
1993}
1994
1995static void cqspi_request_mmap_dma(struct cqspi_st *cqspi)
1996{
1997 dma_cap_mask_t mask;
1998
1999 dma_cap_zero(mask);
2000 dma_cap_set(DMA_MEMCPY, mask);
2001
2002 cqspi->rx_chan = dma_request_chan_by_mask(&mask);
2003 if (IS_ERR(cqspi->rx_chan)) {
2004 dev_err(&cqspi->pdev->dev, "No Rx DMA available\n");
2005 cqspi->rx_chan = NULL;
2006 }
2007 init_completion(&cqspi->rx_dma_complete);
2008}
2009
2010static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
2011{
2012 struct platform_device *pdev = cqspi->pdev;
2013 struct device *dev = &pdev->dev;
2014 const struct cqspi_driver_platdata *ddata;
2015 struct spi_nor_hwcaps hwcaps;
2016 struct cqspi_flash_pdata *f_pdata;
2017 struct spi_nor *nor = NULL;
2018 struct mtd_info *mtd;
2019 unsigned int cs;
2020 int i, ret;
2021
2022 ddata = of_device_get_match_data(dev);
2023 if (!ddata) {
2024 dev_err(dev, "Couldn't find driver data\n");
2025 return -EINVAL;
2026 }
2027 hwcaps.mask = ddata->hwcaps_mask;
2028
2029
2030 for_each_available_child_of_node(dev->of_node, np) {
2031 ret = of_property_read_u32(np, "reg", &cs);
2032 if (ret) {
2033 dev_err(dev, "Couldn't determine chip select.\n");
2034 goto err;
2035 }
2036
2037 if (cs >= CQSPI_MAX_CHIPSELECT) {
2038 ret = -EINVAL;
2039 dev_err(dev, "Chip select %d out of range.\n", cs);
2040 goto err;
2041 }
2042
2043 f_pdata = &cqspi->f_pdata[cs];
2044 f_pdata->cqspi = cqspi;
2045 f_pdata->cs = cs;
2046
2047 ret = cqspi_of_get_flash_pdata(pdev, f_pdata, np);
2048 if (ret)
2049 goto err;
2050
2051 nor = &f_pdata->nor;
2052 mtd = &nor->mtd;
2053
2054 mtd->priv = nor;
2055
2056 nor->dev = dev;
2057 spi_nor_set_flash_node(nor, np);
2058 nor->priv = f_pdata;
2059
2060 nor->read_reg = cqspi_read_reg;
2061 nor->write_reg = cqspi_write_reg;
2062 nor->read = cqspi_read;
2063 nor->write = cqspi_write;
2064 nor->erase = cqspi_erase;
2065 nor->prepare = cqspi_prep;
2066 nor->unprepare = cqspi_unprep;
2067
2068 mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%s.%d",
2069 dev_name(dev), cs);
2070 if (!mtd->name) {
2071 ret = -ENOMEM;
2072 goto err;
2073 }
2074
2075 if (ddata->quirks & CQSPI_SUPPORT_RESET) {
2076 ret = cqspi->flash_reset(cqspi,
2077 CQSPI_RESET_TYPE_HWPIN);
2078 if (ret)
2079 goto err;
2080 }
2081
2082 ret = spi_nor_scan(nor, NULL, &hwcaps);
2083 if (ret)
2084 goto err;
2085
2086 ret = mtd_device_register(mtd, NULL, 0);
2087 if (ret)
2088 goto err;
2089
2090 f_pdata->registered = true;
2091
2092 if (mtd->size <= cqspi->ahb_size && !cqspi->read_dma) {
2093 f_pdata->use_direct_mode = true;
2094 dev_dbg(nor->dev, "using direct mode for %s\n",
2095 mtd->name);
2096
2097 if (!cqspi->rx_chan)
2098 cqspi_request_mmap_dma(cqspi);
2099 }
2100 }
2101
2102 if (nor && !(nor->flags & SNOR_F_BROKEN_OCTAL_DDR)) {
2103 ret = cqspi_setup_edgemode(nor);
2104 if (ret)
2105 goto err;
2106 complete_all(&cqspi->tuning_complete);
2107 complete_all(&cqspi->request_complete);
2108 INIT_DELAYED_WORK(&nor->complete_work, cqspi_periodictuning);
2109 schedule_delayed_work(&nor->complete_work,
2110 msecs_to_jiffies(CQSPI_TUNING_PERIODICITY_MS));
2111 }
2112
2113 return 0;
2114
2115err:
2116 for (i = 0; i < CQSPI_MAX_CHIPSELECT; i++)
2117 if (cqspi->f_pdata[i].registered)
2118 mtd_device_unregister(&cqspi->f_pdata[i].nor.mtd);
2119 return ret;
2120}
2121
2122static int cqspi_probe(struct platform_device *pdev)
2123{
2124 struct device_node *np = pdev->dev.of_node;
2125 struct device *dev = &pdev->dev;
2126 struct cqspi_st *cqspi;
2127 struct resource *res;
2128 struct resource *res_ahb;
2129 struct reset_control *rstc, *rstc_ocp;
2130 const struct cqspi_driver_platdata *ddata;
2131 int ret;
2132 int irq;
2133 u32 idcode;
2134 u32 version;
2135
2136 cqspi = devm_kzalloc(dev, sizeof(*cqspi), GFP_KERNEL);
2137 if (!cqspi)
2138 return -ENOMEM;
2139
2140 mutex_init(&cqspi->bus_mutex);
2141 cqspi->pdev = pdev;
2142 platform_set_drvdata(pdev, cqspi);
2143
2144
2145 ret = cqspi_of_get_pdata(pdev);
2146 if (ret) {
2147 dev_err(dev, "Cannot get mandatory OF data.\n");
2148 return -ENODEV;
2149 }
2150
2151
2152 cqspi->clk = devm_clk_get(dev, NULL);
2153 if (IS_ERR(cqspi->clk)) {
2154 dev_err(dev, "Cannot claim QSPI clock.\n");
2155 return PTR_ERR(cqspi->clk);
2156 }
2157
2158
2159 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2160 cqspi->iobase = devm_ioremap_resource(dev, res);
2161 if (IS_ERR(cqspi->iobase)) {
2162 dev_err(dev, "Cannot remap controller address.\n");
2163 return PTR_ERR(cqspi->iobase);
2164 }
2165
2166
2167 res_ahb = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2168 cqspi->ahb_base = devm_ioremap_resource(dev, res_ahb);
2169 if (IS_ERR(cqspi->ahb_base)) {
2170 dev_err(dev, "Cannot remap AHB address.\n");
2171 return PTR_ERR(cqspi->ahb_base);
2172 }
2173 cqspi->mmap_phys_base = (dma_addr_t)res_ahb->start;
2174 cqspi->ahb_size = resource_size(res_ahb);
2175
2176 init_completion(&cqspi->transfer_complete);
2177 init_completion(&cqspi->tuning_complete);
2178 init_completion(&cqspi->request_complete);
2179
2180
2181 irq = platform_get_irq(pdev, 0);
2182 if (irq < 0)
2183 return -ENXIO;
2184
2185 pm_runtime_enable(dev);
2186 ret = pm_runtime_get_sync(dev);
2187 if (ret < 0) {
2188 pm_runtime_put_noidle(dev);
2189 return ret;
2190 }
2191
2192 ret = clk_prepare_enable(cqspi->clk);
2193 if (ret) {
2194 dev_err(dev, "Cannot enable QSPI clock.\n");
2195 goto probe_clk_failed;
2196 }
2197
2198
2199 rstc = devm_reset_control_get_optional_exclusive(dev, "qspi");
2200 if (IS_ERR(rstc)) {
2201 dev_err(dev, "Cannot get QSPI reset.\n");
2202 return PTR_ERR(rstc);
2203 }
2204
2205 rstc_ocp = devm_reset_control_get_optional_exclusive(dev, "qspi-ocp");
2206 if (IS_ERR(rstc_ocp)) {
2207 dev_err(dev, "Cannot get QSPI OCP reset.\n");
2208 return PTR_ERR(rstc_ocp);
2209 }
2210
2211 reset_control_assert(rstc);
2212 reset_control_deassert(rstc);
2213
2214 reset_control_assert(rstc_ocp);
2215 reset_control_deassert(rstc_ocp);
2216
2217 cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk);
2218 ddata = of_device_get_match_data(dev);
2219 if (ddata && (ddata->quirks & CQSPI_NEEDS_WR_DELAY))
2220 cqspi->wr_delay = 5 * DIV_ROUND_UP(NSEC_PER_SEC,
2221 cqspi->master_ref_clk_hz);
2222
2223 if (ddata && (ddata->quirks & CQSPI_HAS_DMA)) {
2224 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
2225 cqspi->read_dma = true;
2226 }
2227
2228 if (of_device_is_compatible(pdev->dev.of_node,
2229 "xlnx,versal-ospi-1.0")) {
2230 cqspi->eemi_ops = zynqmp_pm_get_eemi_ops();
2231 if (IS_ERR(cqspi->eemi_ops))
2232 return PTR_ERR(cqspi->eemi_ops);
2233 if (cqspi->read_dma)
2234 cqspi->indirect_read_dma =
2235 cqspi_versal_indirect_read_dma;
2236 cqspi->flash_reset = cqspi_versal_flash_reset;
2237 cqspi->access_mode = CQSPI_DMA_MODE;
2238 cqspi->dll_mode = CQSPI_DLL_MODE_BYPASS;
2239
2240 ret = cqspi->eemi_ops->get_chipid(&idcode, &version);
2241 if (ret < 0) {
2242 dev_err(dev, "Cannot get chipid is %d\n", ret);
2243 goto probe_clk_failed;
2244 }
2245 if ((version & SILICON_VER_MASK) != SILICON_VER_1) {
2246 cqspi->dll_mode = CQSPI_DLL_MODE_MASTER;
2247 if (cqspi->master_ref_clk_hz >= TAP_GRAN_SEL_MIN_FREQ)
2248 writel(0x1, cqspi->iobase + CQSPI_REG_ECO);
2249 }
2250 }
2251
2252 ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0,
2253 pdev->name, cqspi);
2254 if (ret) {
2255 dev_err(dev, "Cannot request IRQ.\n");
2256 goto probe_irq_failed;
2257 }
2258
2259 cqspi_wait_idle(cqspi);
2260 cqspi_controller_init(cqspi);
2261 cqspi->current_cs = -1;
2262 cqspi->sclk = 0;
2263 cqspi->extra_dummy = false;
2264 cqspi->edge_mode = CQSPI_EDGE_MODE_SDR;
2265 cqspi->unalined_byte_cnt = false;
2266
2267 ret = cqspi_setup_flash(cqspi, np);
2268 if (ret) {
2269 dev_err(dev, "Cadence QSPI NOR probe failed %d\n", ret);
2270 goto probe_setup_failed;
2271 }
2272
2273 return ret;
2274probe_setup_failed:
2275 cqspi_controller_enable(cqspi, 0);
2276probe_irq_failed:
2277 clk_disable_unprepare(cqspi->clk);
2278probe_clk_failed:
2279 pm_runtime_put_sync(dev);
2280 pm_runtime_disable(dev);
2281 return ret;
2282}
2283
2284static int cqspi_remove(struct platform_device *pdev)
2285{
2286 struct cqspi_st *cqspi = platform_get_drvdata(pdev);
2287 int i;
2288
2289 for (i = 0; i < CQSPI_MAX_CHIPSELECT; i++)
2290 if (cqspi->f_pdata[i].registered)
2291 mtd_device_unregister(&cqspi->f_pdata[i].nor.mtd);
2292
2293 cqspi_controller_enable(cqspi, 0);
2294
2295 if (cqspi->rx_chan)
2296 dma_release_channel(cqspi->rx_chan);
2297
2298 clk_disable_unprepare(cqspi->clk);
2299
2300 pm_runtime_put_sync(&pdev->dev);
2301 pm_runtime_disable(&pdev->dev);
2302
2303 return 0;
2304}
2305
2306#ifdef CONFIG_PM_SLEEP
2307static int cqspi_suspend(struct device *dev)
2308{
2309 struct cqspi_st *cqspi = dev_get_drvdata(dev);
2310
2311 cqspi_controller_enable(cqspi, 0);
2312 return 0;
2313}
2314
2315static int cqspi_resume(struct device *dev)
2316{
2317 struct cqspi_st *cqspi = dev_get_drvdata(dev);
2318
2319 cqspi_controller_enable(cqspi, 1);
2320 return 0;
2321}
2322
2323static const struct dev_pm_ops cqspi__dev_pm_ops = {
2324 .suspend = cqspi_suspend,
2325 .resume = cqspi_resume,
2326};
2327
2328#define CQSPI_DEV_PM_OPS (&cqspi__dev_pm_ops)
2329#else
2330#define CQSPI_DEV_PM_OPS NULL
2331#endif
2332
2333static const struct cqspi_driver_platdata cdns_qspi = {
2334 .hwcaps_mask = CQSPI_BASE_HWCAPS_MASK,
2335};
2336
2337static const struct cqspi_driver_platdata k2g_qspi = {
2338 .hwcaps_mask = CQSPI_BASE_HWCAPS_MASK,
2339 .quirks = CQSPI_NEEDS_WR_DELAY,
2340};
2341
2342static const struct cqspi_driver_platdata am654_ospi = {
2343 .hwcaps_mask = CQSPI_BASE_HWCAPS_MASK | SNOR_HWCAPS_READ_1_1_8,
2344 .quirks = CQSPI_NEEDS_WR_DELAY,
2345};
2346
2347static const struct cqspi_driver_platdata versal_ospi = {
2348 .hwcaps_mask = (SNOR_HWCAPS_READ | SNOR_HWCAPS_READ_FAST |
2349 SNOR_HWCAPS_PP | SNOR_HWCAPS_PP_8_8_8 |
2350 SNOR_HWCAPS_READ_1_1_8 | SNOR_HWCAPS_READ_8_8_8),
2351 .quirks = CQSPI_HAS_DMA | CQSPI_SUPPORT_RESET,
2352};
2353
2354static const struct of_device_id cqspi_dt_ids[] = {
2355 {
2356 .compatible = "cdns,qspi-nor",
2357 .data = &cdns_qspi,
2358 },
2359 {
2360 .compatible = "ti,k2g-qspi",
2361 .data = &k2g_qspi,
2362 },
2363 {
2364 .compatible = "ti,am654-ospi",
2365 .data = &am654_ospi,
2366 },
2367 {
2368 .compatible = "xlnx,versal-ospi-1.0",
2369 .data = (void *)&versal_ospi,
2370 },
2371 { }
2372};
2373
2374MODULE_DEVICE_TABLE(of, cqspi_dt_ids);
2375
2376static struct platform_driver cqspi_platform_driver = {
2377 .probe = cqspi_probe,
2378 .remove = cqspi_remove,
2379 .driver = {
2380 .name = CQSPI_NAME,
2381 .pm = CQSPI_DEV_PM_OPS,
2382 .of_match_table = cqspi_dt_ids,
2383 },
2384};
2385
2386module_platform_driver(cqspi_platform_driver);
2387
2388MODULE_DESCRIPTION("Cadence QSPI Controller Driver");
2389MODULE_LICENSE("GPL v2");
2390MODULE_ALIAS("platform:" CQSPI_NAME);
2391MODULE_AUTHOR("Ley Foon Tan <lftan@altera.com>");
2392MODULE_AUTHOR("Graham Moore <grmoore@opensource.altera.com>");
2393