1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2018 Marvell International Ltd.
4 */
5
6 #include <dm.h>
7 #include <dm/of_access.h>
8 #include <malloc.h>
9 #include <memalign.h>
10 #include <nand.h>
11 #include <pci.h>
12 #include <pci_ids.h>
13 #include <time.h>
14 #include <linux/bitfield.h>
15 #include <linux/ctype.h>
16 #include <linux/delay.h>
17 #include <linux/errno.h>
18 #include <linux/err.h>
19 #include <linux/ioport.h>
20 #include <linux/libfdt.h>
21 #include <linux/mtd/mtd.h>
22 #include <linux/mtd/nand_bch.h>
23 #include <linux/mtd/nand_ecc.h>
24 #include <asm/io.h>
25 #include <asm/types.h>
26 #include <asm/dma-mapping.h>
27 #include <asm/arch/clock.h>
28 #include "octeontx_bch.h"
29
30 static LIST_HEAD(octeontx_bch_devices);
31 static unsigned int num_vfs = BCH_NR_VF;
32 static void *bch_pf;
33 static void *bch_vf;
34 static void *token;
35 static bool bch_pf_initialized;
36 static bool bch_vf_initialized;
37
pci_enable_sriov(struct udevice * dev,int nr_virtfn)38 static int pci_enable_sriov(struct udevice *dev, int nr_virtfn)
39 {
40 int ret;
41
42 ret = pci_sriov_init(dev, nr_virtfn);
43 if (ret)
44 printf("%s(%s): pci_sriov_init returned %d\n", __func__,
45 dev->name, ret);
46 return ret;
47 }
48
octeontx_bch_getv(void)49 void *octeontx_bch_getv(void)
50 {
51 if (!bch_vf)
52 return NULL;
53 if (bch_vf_initialized && bch_pf_initialized)
54 return bch_vf;
55 else
56 return NULL;
57 }
58
octeontx_bch_putv(void * token)59 void octeontx_bch_putv(void *token)
60 {
61 bch_vf_initialized = !!token;
62 bch_vf = token;
63 }
64
octeontx_bch_getp(void)65 void *octeontx_bch_getp(void)
66 {
67 return token;
68 }
69
octeontx_bch_putp(void * token)70 void octeontx_bch_putp(void *token)
71 {
72 bch_pf = token;
73 bch_pf_initialized = !!token;
74 }
75
do_bch_init(struct bch_device * bch)76 static int do_bch_init(struct bch_device *bch)
77 {
78 return 0;
79 }
80
bch_reset(struct bch_device * bch)81 static void bch_reset(struct bch_device *bch)
82 {
83 writeq(1, bch->reg_base + BCH_CTL);
84 mdelay(2);
85 }
86
bch_disable(struct bch_device * bch)87 static void bch_disable(struct bch_device *bch)
88 {
89 writeq(~0ull, bch->reg_base + BCH_ERR_INT_ENA_W1C);
90 writeq(~0ull, bch->reg_base + BCH_ERR_INT);
91 bch_reset(bch);
92 }
93
bch_check_bist_status(struct bch_device * bch)94 static u32 bch_check_bist_status(struct bch_device *bch)
95 {
96 return readq(bch->reg_base + BCH_BIST_RESULT);
97 }
98
bch_device_init(struct bch_device * bch)99 static int bch_device_init(struct bch_device *bch)
100 {
101 u64 bist;
102 int rc;
103
104 debug("%s: Resetting...\n", __func__);
105 /* Reset the PF when probed first */
106 bch_reset(bch);
107
108 debug("%s: Checking BIST...\n", __func__);
109 /* Check BIST status */
110 bist = (u64)bch_check_bist_status(bch);
111 if (bist) {
112 dev_err(dev, "BCH BIST failed with code 0x%llx\n", bist);
113 return -ENODEV;
114 }
115
116 /* Get max VQs/VFs supported by the device */
117
118 bch->max_vfs = pci_sriov_get_totalvfs(bch->dev);
119 debug("%s: %d vfs\n", __func__, bch->max_vfs);
120 if (num_vfs > bch->max_vfs) {
121 dev_warn(dev, "Num of VFs to enable %d is greater than max available. Enabling %d VFs.\n",
122 num_vfs, bch->max_vfs);
123 num_vfs = bch->max_vfs;
124 }
125 bch->vfs_enabled = bch->max_vfs;
126 /* Get number of VQs/VFs to be enabled */
127 /* TODO: Get CLK frequency */
128 /* Reset device parameters */
129
130 debug("%s: Doing initialization\n", __func__);
131 rc = do_bch_init(bch);
132
133 return rc;
134 }
135
bch_sriov_configure(struct udevice * dev,int numvfs)136 static int bch_sriov_configure(struct udevice *dev, int numvfs)
137 {
138 struct bch_device *bch = dev_get_priv(dev);
139 int ret = -EBUSY;
140
141 debug("%s(%s, %d), bch: %p, vfs_in_use: %d, enabled: %d\n", __func__,
142 dev->name, numvfs, bch, bch->vfs_in_use, bch->vfs_enabled);
143 if (bch->vfs_in_use)
144 goto exit;
145
146 ret = 0;
147
148 if (numvfs > 0) {
149 debug("%s: Enabling sriov\n", __func__);
150 ret = pci_enable_sriov(dev, numvfs);
151 if (ret == 0) {
152 bch->flags |= BCH_FLAG_SRIOV_ENABLED;
153 ret = numvfs;
154 bch->vfs_enabled = numvfs;
155 }
156 }
157
158 debug("VFs enabled: %d\n", ret);
159 exit:
160 debug("%s: Returning %d\n", __func__, ret);
161 return ret;
162 }
163
octeontx_pci_bchpf_probe(struct udevice * dev)164 static int octeontx_pci_bchpf_probe(struct udevice *dev)
165 {
166 struct bch_device *bch;
167 int ret;
168
169 debug("%s(%s)\n", __func__, dev->name);
170 bch = dev_get_priv(dev);
171 if (!bch)
172 return -ENOMEM;
173
174 bch->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, 0, 0,
175 PCI_REGION_TYPE, PCI_REGION_MEM);
176 bch->dev = dev;
177
178 debug("%s: base address: %p\n", __func__, bch->reg_base);
179 ret = bch_device_init(bch);
180 if (ret) {
181 printf("%s(%s): init returned %d\n", __func__, dev->name, ret);
182 return ret;
183 }
184 INIT_LIST_HEAD(&bch->list);
185 list_add(&bch->list, &octeontx_bch_devices);
186 token = (void *)dev;
187
188 debug("%s: Configuring SRIOV\n", __func__);
189 bch_sriov_configure(dev, num_vfs);
190 debug("%s: Done.\n", __func__);
191 octeontx_bch_putp(bch);
192
193 return 0;
194 }
195
196 static const struct pci_device_id octeontx_bchpf_pci_id_table[] = {
197 { PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_BCH) },
198 {},
199 };
200
201 static const struct pci_device_id octeontx_bchvf_pci_id_table[] = {
202 { PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_BCHVF)},
203 {},
204 };
205
206 /**
207 * Given a data block calculate the ecc data and fill in the response
208 *
209 * @param[in] block 8-byte aligned pointer to data block to calculate ECC
210 * @param block_size Size of block in bytes, must be a multiple of two.
211 * @param bch_level Number of errors that must be corrected. The number of
212 * parity bytes is equal to ((15 * bch_level) + 7) / 8.
213 * Must be 4, 8, 16, 24, 32, 40, 48, 56, 60 or 64.
214 * @param[out] ecc 8-byte aligned pointer to where ecc data should go
215 * @param[in] resp pointer to where responses will be written.
216 *
217 * Return: Zero on success, negative on failure.
218 */
octeontx_bch_encode(struct bch_vf * vf,dma_addr_t block,u16 block_size,u8 bch_level,dma_addr_t ecc,dma_addr_t resp)219 int octeontx_bch_encode(struct bch_vf *vf, dma_addr_t block, u16 block_size,
220 u8 bch_level, dma_addr_t ecc, dma_addr_t resp)
221 {
222 union bch_cmd cmd;
223 int rc;
224
225 memset(&cmd, 0, sizeof(cmd));
226 cmd.s.cword.ecc_gen = eg_gen;
227 cmd.s.cword.ecc_level = bch_level;
228 cmd.s.cword.size = block_size;
229
230 cmd.s.oword.ptr = ecc;
231 cmd.s.iword.ptr = block;
232 cmd.s.rword.ptr = resp;
233 rc = octeontx_cmd_queue_write(QID_BCH, 1,
234 sizeof(cmd) / sizeof(uint64_t), cmd.u);
235 if (rc)
236 return -1;
237
238 octeontx_bch_write_doorbell(1, vf);
239
240 return 0;
241 }
242
243 /**
244 * Given a data block and ecc data correct the data block
245 *
246 * @param[in] block_ecc_in 8-byte aligned pointer to data block with ECC
247 * data concatenated to the end to correct
248 * @param block_size Size of block in bytes, must be a multiple of
249 * two.
250 * @param bch_level Number of errors that must be corrected. The
251 * number of parity bytes is equal to
252 * ((15 * bch_level) + 7) / 8.
253 * Must be 4, 8, 16, 24, 32, 40, 48, 56, 60 or 64.
254 * @param[out] block_out 8-byte aligned pointer to corrected data buffer.
255 * This should not be the same as block_ecc_in.
256 * @param[in] resp pointer to where responses will be written.
257 *
258 * Return: Zero on success, negative on failure.
259 */
260
octeontx_bch_decode(struct bch_vf * vf,dma_addr_t block_ecc_in,u16 block_size,u8 bch_level,dma_addr_t block_out,dma_addr_t resp)261 int octeontx_bch_decode(struct bch_vf *vf, dma_addr_t block_ecc_in,
262 u16 block_size, u8 bch_level,
263 dma_addr_t block_out, dma_addr_t resp)
264 {
265 union bch_cmd cmd;
266 int rc;
267
268 memset(&cmd, 0, sizeof(cmd));
269 cmd.s.cword.ecc_gen = eg_correct;
270 cmd.s.cword.ecc_level = bch_level;
271 cmd.s.cword.size = block_size;
272
273 cmd.s.oword.ptr = block_out;
274 cmd.s.iword.ptr = block_ecc_in;
275 cmd.s.rword.ptr = resp;
276 rc = octeontx_cmd_queue_write(QID_BCH, 1,
277 sizeof(cmd) / sizeof(uint64_t), cmd.u);
278 if (rc)
279 return -1;
280
281 octeontx_bch_write_doorbell(1, vf);
282 return 0;
283 }
284 EXPORT_SYMBOL(octeontx_bch_decode);
285
octeontx_bch_wait(struct bch_vf * vf,union bch_resp * resp,dma_addr_t handle)286 int octeontx_bch_wait(struct bch_vf *vf, union bch_resp *resp,
287 dma_addr_t handle)
288 {
289 ulong start = get_timer(0);
290
291 __iormb(); /* HW is updating *resp */
292 while (!resp->s.done && get_timer(start) < 10)
293 __iormb(); /* HW is updating *resp */
294
295 if (resp->s.done)
296 return 0;
297
298 return -ETIMEDOUT;
299 }
300
301 struct bch_q octeontx_bch_q[QID_MAX];
302
octeontx_cmd_queue_initialize(struct udevice * dev,int queue_id,int max_depth,int fpa_pool,int pool_size)303 static int octeontx_cmd_queue_initialize(struct udevice *dev, int queue_id,
304 int max_depth, int fpa_pool,
305 int pool_size)
306 {
307 /* some params are for later merge with CPT or cn83xx */
308 struct bch_q *q = &octeontx_bch_q[queue_id];
309 unsigned long paddr;
310 u64 *chunk_buffer;
311 int chunk = max_depth + 1;
312 int i, size;
313
314 if ((unsigned int)queue_id >= QID_MAX)
315 return -EINVAL;
316 if (max_depth & chunk) /* must be 2^N - 1 */
317 return -EINVAL;
318
319 size = NQS * chunk * sizeof(u64);
320 chunk_buffer = dma_alloc_coherent(size, &paddr);
321 if (!chunk_buffer)
322 return -ENOMEM;
323
324 q->base_paddr = paddr;
325 q->dev = dev;
326 q->index = 0;
327 q->max_depth = max_depth;
328 q->pool_size_m1 = pool_size;
329 q->base_vaddr = chunk_buffer;
330
331 for (i = 0; i < NQS; i++) {
332 u64 *ixp;
333 int inext = (i + 1) * chunk - 1;
334 int j = (i + 1) % NQS;
335 int jnext = j * chunk;
336 dma_addr_t jbase = q->base_paddr + jnext * sizeof(u64);
337
338 ixp = &chunk_buffer[inext];
339 *ixp = jbase;
340 }
341
342 return 0;
343 }
344
octeontx_pci_bchvf_probe(struct udevice * dev)345 static int octeontx_pci_bchvf_probe(struct udevice *dev)
346 {
347 struct bch_vf *vf;
348 union bch_vqx_ctl ctl;
349 union bch_vqx_cmd_buf cbuf;
350 int err;
351
352 debug("%s(%s)\n", __func__, dev->name);
353 vf = dev_get_priv(dev);
354 if (!vf)
355 return -ENOMEM;
356
357 vf->dev = dev;
358
359 /* Map PF's configuration registers */
360 vf->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, 0, 0,
361 PCI_REGION_TYPE, PCI_REGION_MEM);
362 debug("%s: reg base: %p\n", __func__, vf->reg_base);
363
364 err = octeontx_cmd_queue_initialize(dev, QID_BCH, QDEPTH - 1, 0,
365 sizeof(union bch_cmd) * QDEPTH);
366 if (err) {
367 dev_err(dev, "octeontx_cmd_queue_initialize() failed\n");
368 goto release;
369 }
370
371 ctl.u = readq(vf->reg_base + BCH_VQX_CTL(0));
372
373 cbuf.u = 0;
374 cbuf.s.ldwb = 1;
375 cbuf.s.dfb = 1;
376 cbuf.s.size = QDEPTH;
377 writeq(cbuf.u, vf->reg_base + BCH_VQX_CMD_BUF(0));
378
379 writeq(ctl.u, vf->reg_base + BCH_VQX_CTL(0));
380
381 writeq(octeontx_bch_q[QID_BCH].base_paddr,
382 vf->reg_base + BCH_VQX_CMD_PTR(0));
383
384 octeontx_bch_putv(vf);
385
386 debug("%s: bch vf initialization complete\n", __func__);
387
388 if (octeontx_bch_getv())
389 return octeontx_pci_nand_deferred_probe();
390
391 return -1;
392
393 release:
394 return err;
395 }
396
octeontx_pci_bchpf_remove(struct udevice * dev)397 static int octeontx_pci_bchpf_remove(struct udevice *dev)
398 {
399 struct bch_device *bch = dev_get_priv(dev);
400
401 bch_disable(bch);
402 return 0;
403 }
404
405 U_BOOT_DRIVER(octeontx_pci_bchpf) = {
406 .name = BCHPF_DRIVER_NAME,
407 .id = UCLASS_MISC,
408 .probe = octeontx_pci_bchpf_probe,
409 .remove = octeontx_pci_bchpf_remove,
410 .priv_auto = sizeof(struct bch_device),
411 .flags = DM_FLAG_OS_PREPARE,
412 };
413
414 U_BOOT_DRIVER(octeontx_pci_bchvf) = {
415 .name = BCHVF_DRIVER_NAME,
416 .id = UCLASS_MISC,
417 .probe = octeontx_pci_bchvf_probe,
418 .priv_auto = sizeof(struct bch_vf),
419 };
420
421 U_BOOT_PCI_DEVICE(octeontx_pci_bchpf, octeontx_bchpf_pci_id_table);
422 U_BOOT_PCI_DEVICE(octeontx_pci_bchvf, octeontx_bchvf_pci_id_table);
423