1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (c) 2022 Nuvoton Technology Corp.
4  * NPCM Flash Interface Unit(FIU) SPI master controller driver.
5  */
6 
7 #include <clk.h>
8 #include <dm.h>
9 #include <spi.h>
10 #include <spi-mem.h>
11 #include <linux/bitfield.h>
12 #include <linux/log2.h>
13 #include <linux/iopoll.h>
14 #include <power/regulator.h>
15 
16 #define DW_SIZE			4
17 #define CHUNK_SIZE		16
18 #define XFER_TIMEOUT		1000000
19 
20 /* FIU UMA Configuration Register (UMA_CFG) */
21 #define UMA_CFG_RDATSIZ_MASK	GENMASK(28, 24)
22 #define UMA_CFG_DBSIZ_MASK	GENMASK(23, 21)
23 #define UMA_CFG_WDATSIZ_MASK	GENMASK(20, 16)
24 #define UMA_CFG_ADDSIZ_MASK	GENMASK(13, 11)
25 #define UMA_CFG_RDBPCK_MASK	GENMASK(9, 8)
26 #define UMA_CFG_DBPCK_MASK	GENMASK(7, 6)
27 #define UMA_CFG_WDBPCK_MASK	GENMASK(5, 4)
28 #define UMA_CFG_ADBPCK_MASK	GENMASK(3, 2)
29 #define UMA_CFG_CMBPCK_MASK	GENMASK(1, 0)
30 #define UMA_CFG_CMDSIZ_SHIFT	10
31 
32 /* FIU UMA Control and Status Register (UMA_CTS) */
33 #define UMA_CTS_SW_CS		BIT(16)
34 #define UMA_CTS_EXEC_DONE	BIT(0)
35 #define UMA_CTS_RDYST		BIT(24)
36 #define UMA_CTS_DEV_NUM_MASK	GENMASK(9, 8)
37 
38 /* Direct Write Configuration Register */
39 #define DWR_CFG_WBURST_MASK	GENMASK(25, 24)
40 #define DWR_CFG_ADDSIZ_MASK	GENMASK(17, 16)
41 #define DWR_CFG_ABPCK_MASK	GENMASK(11, 10)
42 #define DRW_CFG_DBPCK_MASK	GENMASK(9, 8)
43 #define DRW_CFG_WRCMD		2
44 enum {
45 	DWR_WBURST_1_BYTE,
46 	DWR_WBURST_16_BYTE = 3,
47 };
48 
49 enum {
50 	DWR_ADDSIZ_24_BIT,
51 	DWR_ADDSIZ_32_BIT,
52 };
53 
54 enum {
55 	DWR_ABPCK_BIT_PER_CLK,
56 	DWR_ABPCK_2_BIT_PER_CLK,
57 	DWR_ABPCK_4_BIT_PER_CLK,
58 };
59 
60 enum {
61 	DWR_DBPCK_BIT_PER_CLK,
62 	DWR_DBPCK_2_BIT_PER_CLK,
63 	DWR_DBPCK_4_BIT_PER_CLK,
64 };
65 
66 struct npcm_fiu_regs {
67 	unsigned int    drd_cfg;
68 	unsigned int    dwr_cfg;
69 	unsigned int    uma_cfg;
70 	unsigned int    uma_cts;
71 	unsigned int    uma_cmd;
72 	unsigned int    uma_addr;
73 	unsigned int    prt_cfg;
74 	unsigned char	res1[4];
75 	unsigned int    uma_dw0;
76 	unsigned int    uma_dw1;
77 	unsigned int    uma_dw2;
78 	unsigned int    uma_dw3;
79 	unsigned int    uma_dr0;
80 	unsigned int    uma_dr1;
81 	unsigned int    uma_dr2;
82 	unsigned int    uma_dr3;
83 	unsigned int    prt_cmd0;
84 	unsigned int    prt_cmd1;
85 	unsigned int    prt_cmd2;
86 	unsigned int    prt_cmd3;
87 	unsigned int    prt_cmd4;
88 	unsigned int    prt_cmd5;
89 	unsigned int    prt_cmd6;
90 	unsigned int    prt_cmd7;
91 	unsigned int    prt_cmd8;
92 	unsigned int    prt_cmd9;
93 	unsigned int    stuff[4];
94 	unsigned int    fiu_cfg;
95 };
96 
97 struct npcm_fiu_priv {
98 	struct npcm_fiu_regs *regs;
99 };
100 
npcm_fiu_spi_set_speed(struct udevice * bus,uint speed)101 static int npcm_fiu_spi_set_speed(struct udevice *bus, uint speed)
102 {
103 	return 0;
104 }
105 
npcm_fiu_spi_set_mode(struct udevice * bus,uint mode)106 static int npcm_fiu_spi_set_mode(struct udevice *bus, uint mode)
107 {
108 	return 0;
109 }
110 
activate_cs(struct npcm_fiu_regs * regs,int cs)111 static inline void activate_cs(struct npcm_fiu_regs *regs, int cs)
112 {
113 	writel(FIELD_PREP(UMA_CTS_DEV_NUM_MASK, cs), &regs->uma_cts);
114 }
115 
deactivate_cs(struct npcm_fiu_regs * regs,int cs)116 static inline void deactivate_cs(struct npcm_fiu_regs *regs, int cs)
117 {
118 	writel(FIELD_PREP(UMA_CTS_DEV_NUM_MASK, cs) | UMA_CTS_SW_CS, &regs->uma_cts);
119 }
120 
fiu_uma_read(struct udevice * bus,u8 * buf,u32 size)121 static int fiu_uma_read(struct udevice *bus, u8 *buf, u32 size)
122 {
123 	struct npcm_fiu_priv *priv = dev_get_priv(bus);
124 	struct npcm_fiu_regs *regs = priv->regs;
125 	u32 data_reg[4];
126 	u32 val;
127 	int ret;
128 
129 	/* Set data size */
130 	writel(FIELD_PREP(UMA_CFG_RDATSIZ_MASK, size), &regs->uma_cfg);
131 
132 	/* Initiate the read */
133 	writel(readl(&regs->uma_cts) | UMA_CTS_EXEC_DONE, &regs->uma_cts);
134 
135 	/* Wait for completion */
136 	ret = readl_poll_timeout(&regs->uma_cts, val,
137 				 !(val & UMA_CTS_EXEC_DONE), XFER_TIMEOUT);
138 	if (ret) {
139 		printf("npcm_fiu: read timeout\n");
140 		return ret;
141 	}
142 
143 	/* Copy data from data registers */
144 	if (size)
145 		data_reg[0] = readl(&regs->uma_dr0);
146 	if (size > DW_SIZE)
147 		data_reg[1] = readl(&regs->uma_dr1);
148 	if (size > DW_SIZE * 2)
149 		data_reg[2] = readl(&regs->uma_dr2);
150 	if (size > DW_SIZE * 3)
151 		data_reg[3] = readl(&regs->uma_dr3);
152 	memcpy(buf, data_reg, size);
153 
154 	return 0;
155 }
156 
fiu_uma_write(struct udevice * bus,const u8 * buf,u32 size)157 static int fiu_uma_write(struct udevice *bus, const u8 *buf, u32 size)
158 {
159 	struct npcm_fiu_priv *priv = dev_get_priv(bus);
160 	struct npcm_fiu_regs *regs = priv->regs;
161 	u32 data_reg[4];
162 	u32 val;
163 	int ret;
164 
165 	/* Set data size */
166 	writel(FIELD_PREP(UMA_CFG_WDATSIZ_MASK, size), &regs->uma_cfg);
167 
168 	/* Write data to data registers */
169 	memcpy(data_reg, buf, size);
170 	if (size)
171 		writel(data_reg[0], &regs->uma_dw0);
172 	if (size > DW_SIZE)
173 		writel(data_reg[1], &regs->uma_dw1);
174 	if (size > DW_SIZE * 2)
175 		writel(data_reg[2], &regs->uma_dw2);
176 	if (size > DW_SIZE * 3)
177 		writel(data_reg[3], &regs->uma_dw3);
178 
179 	/* Initiate the transaction */
180 	writel(readl(&regs->uma_cts) | UMA_CTS_EXEC_DONE, &regs->uma_cts);
181 
182 	/* Wait for completion */
183 	ret = readl_poll_timeout(&regs->uma_cts, val,
184 				 !(val & UMA_CTS_EXEC_DONE), XFER_TIMEOUT);
185 	if (ret)
186 		printf("npcm_fiu: write timeout\n");
187 
188 	return ret;
189 }
190 
npcm_fiu_spi_xfer(struct udevice * dev,unsigned int bitlen,const void * dout,void * din,unsigned long flags)191 static int npcm_fiu_spi_xfer(struct udevice *dev, unsigned int bitlen,
192 			     const void *dout, void *din, unsigned long flags)
193 {
194 	struct udevice *bus = dev->parent;
195 	struct npcm_fiu_priv *priv = dev_get_priv(bus);
196 	struct npcm_fiu_regs *regs = priv->regs;
197 	struct dm_spi_slave_plat *slave_plat =
198 			dev_get_parent_plat(dev);
199 	const u8 *tx = dout;
200 	u8 *rx = din;
201 	int bytes = bitlen / 8;
202 	int ret = 0;
203 	int len;
204 
205 	if (flags & SPI_XFER_BEGIN)
206 		activate_cs(regs, slave_plat->cs);
207 
208 	while (bytes) {
209 		len = (bytes > CHUNK_SIZE) ? CHUNK_SIZE : bytes;
210 		if (tx) {
211 			ret = fiu_uma_write(bus, tx, len);
212 			if (ret)
213 				break;
214 			tx += len;
215 		} else {
216 			ret = fiu_uma_read(bus, rx, len);
217 			if (ret)
218 				break;
219 			rx += len;
220 		}
221 		bytes -= len;
222 	}
223 
224 	if (flags & SPI_XFER_END)
225 		deactivate_cs(regs, slave_plat->cs);
226 
227 	return ret;
228 }
229 
npcm_fiu_uma_operation(struct npcm_fiu_priv * priv,const struct spi_mem_op * op,u32 addr,const u8 * tx,u8 * rx,u32 nbytes,bool started)230 static int npcm_fiu_uma_operation(struct npcm_fiu_priv *priv, const struct spi_mem_op *op,
231 				  u32 addr, const u8 *tx, u8 *rx, u32 nbytes, bool started)
232 {
233 	struct npcm_fiu_regs *regs = priv->regs;
234 	u32 uma_cfg = 0, val;
235 	u32 data_reg[4];
236 	int ret;
237 
238 	debug("fiu_uma: opcode 0x%x, dir %d, addr 0x%x, %d bytes\n",
239 	      op->cmd.opcode, op->data.dir, addr, nbytes);
240 	debug("         buswidth cmd:%d, addr:%d, dummy:%d, data:%d\n",
241 	      op->cmd.buswidth, op->addr.buswidth, op->dummy.buswidth,
242 	      op->data.buswidth);
243 	debug("         size cmd:%d, addr:%d, dummy:%d, data:%d\n",
244 	      1, op->addr.nbytes, op->dummy.nbytes, op->data.nbytes);
245 	debug("         tx %p, rx %p\n", tx, rx);
246 
247 	if (!started) {
248 		/* Send cmd/addr in the begin of an transaction */
249 		writel(op->cmd.opcode, &regs->uma_cmd);
250 
251 		uma_cfg |= FIELD_PREP(UMA_CFG_CMBPCK_MASK, ilog2(op->cmd.buswidth)) |
252 			   (1 << UMA_CFG_CMDSIZ_SHIFT);
253 		/* Configure addr bytes */
254 		if (op->addr.nbytes) {
255 			uma_cfg |= FIELD_PREP(UMA_CFG_ADBPCK_MASK, ilog2(op->addr.buswidth)) |
256 				   FIELD_PREP(UMA_CFG_ADDSIZ_MASK, op->addr.nbytes);
257 			writel(addr, &regs->uma_addr);
258 		}
259 		/* Configure dummy bytes */
260 		if (op->dummy.nbytes)
261 			uma_cfg |= FIELD_PREP(UMA_CFG_DBPCK_MASK, ilog2(op->dummy.buswidth)) |
262 				   FIELD_PREP(UMA_CFG_DBSIZ_MASK, op->dummy.nbytes);
263 	}
264 	/* Set data bus width and data size */
265 	if (op->data.dir == SPI_MEM_DATA_IN && nbytes)
266 		uma_cfg |= FIELD_PREP(UMA_CFG_RDBPCK_MASK, ilog2(op->data.buswidth)) |
267 			   FIELD_PREP(UMA_CFG_RDATSIZ_MASK, nbytes);
268 	else if (op->data.dir == SPI_MEM_DATA_OUT && nbytes)
269 		uma_cfg |= FIELD_PREP(UMA_CFG_WDBPCK_MASK, ilog2(op->data.buswidth)) |
270 			   FIELD_PREP(UMA_CFG_WDATSIZ_MASK, nbytes);
271 	writel(uma_cfg, &regs->uma_cfg);
272 
273 	if (op->data.dir == SPI_MEM_DATA_OUT && nbytes) {
274 		memcpy(data_reg, tx, nbytes);
275 
276 		if (nbytes)
277 			writel(data_reg[0], &regs->uma_dw0);
278 		if (nbytes > DW_SIZE)
279 			writel(data_reg[1], &regs->uma_dw1);
280 		if (nbytes > DW_SIZE * 2)
281 			writel(data_reg[2], &regs->uma_dw2);
282 		if (nbytes > DW_SIZE * 3)
283 			writel(data_reg[3], &regs->uma_dw3);
284 	}
285 	/* Initiate the transaction */
286 	writel(readl(&regs->uma_cts) | UMA_CTS_EXEC_DONE, &regs->uma_cts);
287 
288 	/* Wait for completion */
289 	ret = readl_poll_timeout(&regs->uma_cts, val,
290 				 !(val & UMA_CTS_EXEC_DONE), XFER_TIMEOUT);
291 	if (ret) {
292 		printf("npcm_fiu: UMA op timeout\n");
293 		return ret;
294 	}
295 
296 	if (op->data.dir == SPI_MEM_DATA_IN && nbytes) {
297 		if (nbytes)
298 			data_reg[0] = readl(&regs->uma_dr0);
299 		if (nbytes > DW_SIZE)
300 			data_reg[1] = readl(&regs->uma_dr1);
301 		if (nbytes > DW_SIZE * 2)
302 			data_reg[2] = readl(&regs->uma_dr2);
303 		if (nbytes > DW_SIZE * 3)
304 			data_reg[3] = readl(&regs->uma_dr3);
305 
306 		memcpy(rx, data_reg, nbytes);
307 	}
308 
309 	return 0;
310 }
311 
npcm_fiu_exec_op(struct spi_slave * slave,const struct spi_mem_op * op)312 static int npcm_fiu_exec_op(struct spi_slave *slave,
313 			    const struct spi_mem_op *op)
314 {
315 	struct udevice *bus = slave->dev->parent;
316 	struct npcm_fiu_priv *priv = dev_get_priv(bus);
317 	struct npcm_fiu_regs *regs = priv->regs;
318 	struct dm_spi_slave_plat *slave_plat = dev_get_parent_plat(slave->dev);
319 	u32 bytes, len, addr;
320 	const u8 *tx;
321 	u8 *rx;
322 	bool started = false;
323 	int ret;
324 
325 	bytes = op->data.nbytes;
326 	addr = (u32)op->addr.val;
327 	if (!bytes) {
328 		activate_cs(regs, slave_plat->cs);
329 		ret = npcm_fiu_uma_operation(priv, op, addr, NULL, NULL, 0, false);
330 		deactivate_cs(regs, slave_plat->cs);
331 		return ret;
332 	}
333 
334 	tx = op->data.buf.out;
335 	rx = op->data.buf.in;
336 	/*
337 	 * Use SW-control CS for write to extend the transaction and
338 	 *     keep the Write Enable state.
339 	 * Use HW-control CS for read to avoid clock and timing issues.
340 	 */
341 	if (op->data.dir == SPI_MEM_DATA_OUT)
342 		activate_cs(regs, slave_plat->cs);
343 	else
344 		writel(FIELD_PREP(UMA_CTS_DEV_NUM_MASK, slave_plat->cs) | UMA_CTS_SW_CS,
345 		       &regs->uma_cts);
346 	while (bytes) {
347 		len = (bytes > CHUNK_SIZE) ? CHUNK_SIZE : bytes;
348 		ret = npcm_fiu_uma_operation(priv, op, addr, tx, rx, len, started);
349 		if (ret)
350 			return ret;
351 
352 		/* CS is kept low for uma write, extend the transaction */
353 		if (op->data.dir == SPI_MEM_DATA_OUT)
354 			started = true;
355 
356 		bytes -= len;
357 		addr += len;
358 		if (tx)
359 			tx += len;
360 		if (rx)
361 			rx += len;
362 	}
363 	if (op->data.dir == SPI_MEM_DATA_OUT)
364 		deactivate_cs(regs, slave_plat->cs);
365 
366 	return 0;
367 }
368 
npcm_fiu_spi_probe(struct udevice * bus)369 static int npcm_fiu_spi_probe(struct udevice *bus)
370 {
371 	struct npcm_fiu_priv *priv = dev_get_priv(bus);
372 	struct udevice *vqspi_supply;
373 	int vqspi_uv;
374 
375 	priv->regs = (struct npcm_fiu_regs *)dev_read_addr_ptr(bus);
376 
377 	if (IS_ENABLED(CONFIG_DM_REGULATOR)) {
378 		device_get_supply_regulator(bus, "vqspi-supply", &vqspi_supply);
379 		vqspi_uv = dev_read_u32_default(bus, "vqspi-microvolt", 0);
380 		/* Set IO voltage */
381 		if (vqspi_supply && vqspi_uv)
382 			regulator_set_value(vqspi_supply, vqspi_uv);
383 	}
384 
385 	return 0;
386 }
387 
npcm_fiu_spi_bind(struct udevice * bus)388 static int npcm_fiu_spi_bind(struct udevice *bus)
389 {
390 	struct npcm_fiu_regs *regs;
391 
392 	if (dev_read_bool(bus, "nuvoton,spix-mode")) {
393 		regs = dev_read_addr_ptr(bus);
394 		if (!regs)
395 			return -EINVAL;
396 
397 		/* Setup direct write cfg for SPIX */
398 		writel(FIELD_PREP(DWR_CFG_WBURST_MASK, DWR_WBURST_16_BYTE) |
399 		       FIELD_PREP(DWR_CFG_ADDSIZ_MASK, DWR_ADDSIZ_24_BIT) |
400 		       FIELD_PREP(DWR_CFG_ABPCK_MASK, DWR_ABPCK_4_BIT_PER_CLK) |
401 		       FIELD_PREP(DRW_CFG_DBPCK_MASK, DWR_DBPCK_4_BIT_PER_CLK) |
402 		       DRW_CFG_WRCMD, &regs->dwr_cfg);
403 	}
404 
405 	return 0;
406 }
407 
408 static const struct spi_controller_mem_ops npcm_fiu_mem_ops = {
409 	.exec_op = npcm_fiu_exec_op,
410 };
411 
412 static const struct dm_spi_ops npcm_fiu_spi_ops = {
413 	.xfer           = npcm_fiu_spi_xfer,
414 	.set_speed      = npcm_fiu_spi_set_speed,
415 	.set_mode       = npcm_fiu_spi_set_mode,
416 	.mem_ops        = &npcm_fiu_mem_ops,
417 };
418 
419 static const struct udevice_id npcm_fiu_spi_ids[] = {
420 	{ .compatible = "nuvoton,npcm845-fiu" },
421 	{ .compatible = "nuvoton,npcm750-fiu" },
422 	{ }
423 };
424 
425 U_BOOT_DRIVER(npcm_fiu_spi) = {
426 	.name   = "npcm_fiu_spi",
427 	.id     = UCLASS_SPI,
428 	.of_match = npcm_fiu_spi_ids,
429 	.ops    = &npcm_fiu_spi_ops,
430 	.priv_auto = sizeof(struct npcm_fiu_priv),
431 	.probe  = npcm_fiu_spi_probe,
432 	.bind = npcm_fiu_spi_bind,
433 };
434