1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/aer.h>
3 #include <linux/delay.h>
4 #include <linux/firmware.h>
5 #include <linux/list.h>
6 #include <linux/module.h>
7 #include <linux/mutex.h>
8 #include <linux/pci.h>
9 #include <linux/pci_ids.h>
10
11 #include "nitrox_dev.h"
12 #include "nitrox_common.h"
13 #include "nitrox_csr.h"
14 #include "nitrox_hal.h"
15 #include "nitrox_isr.h"
16 #include "nitrox_debugfs.h"
17
18 #define CNN55XX_DEV_ID 0x12
19 #define UCODE_HLEN 48
20 #define DEFAULT_SE_GROUP 0
21 #define DEFAULT_AE_GROUP 0
22
23 #define DRIVER_VERSION "1.2"
24 #define CNN55XX_UCD_BLOCK_SIZE 32768
25 #define CNN55XX_MAX_UCODE_SIZE (CNN55XX_UCD_BLOCK_SIZE * 2)
26 #define FW_DIR "cavium/"
27 /* SE microcode */
28 #define SE_FW FW_DIR "cnn55xx_se.fw"
29 /* AE microcode */
30 #define AE_FW FW_DIR "cnn55xx_ae.fw"
31
32 static const char nitrox_driver_name[] = "CNN55XX";
33
34 static LIST_HEAD(ndevlist);
35 static DEFINE_MUTEX(devlist_lock);
36 static unsigned int num_devices;
37
38 /*
39 * nitrox_pci_tbl - PCI Device ID Table
40 */
41 static const struct pci_device_id nitrox_pci_tbl[] = {
42 {PCI_VDEVICE(CAVIUM, CNN55XX_DEV_ID), 0},
43 /* required last entry */
44 {0, }
45 };
46 MODULE_DEVICE_TABLE(pci, nitrox_pci_tbl);
47
48 static unsigned int qlen = DEFAULT_CMD_QLEN;
49 module_param(qlen, uint, 0644);
50 MODULE_PARM_DESC(qlen, "Command queue length - default 2048");
51
52 /**
53 * struct ucode - Firmware Header
54 * @id: microcode ID
55 * @version: firmware version
56 * @code_size: code section size
57 * @raz: alignment
58 * @code: code section
59 */
60 struct ucode {
61 u8 id;
62 char version[VERSION_LEN - 1];
63 __be32 code_size;
64 u8 raz[12];
65 u64 code[];
66 };
67
68 /*
69 * write_to_ucd_unit - Write Firmware to NITROX UCD unit
70 */
write_to_ucd_unit(struct nitrox_device * ndev,u32 ucode_size,u64 * ucode_data,int block_num)71 static void write_to_ucd_unit(struct nitrox_device *ndev, u32 ucode_size,
72 u64 *ucode_data, int block_num)
73 {
74 u32 code_size;
75 u64 offset, data;
76 int i = 0;
77
78 /*
79 * UCD structure
80 *
81 * -------------
82 * | BLK 7 |
83 * -------------
84 * | BLK 6 |
85 * -------------
86 * | ... |
87 * -------------
88 * | BLK 0 |
89 * -------------
90 * Total of 8 blocks, each size 32KB
91 */
92
93 /* set the block number */
94 offset = UCD_UCODE_LOAD_BLOCK_NUM;
95 nitrox_write_csr(ndev, offset, block_num);
96
97 code_size = roundup(ucode_size, 16);
98 while (code_size) {
99 data = ucode_data[i];
100 /* write 8 bytes at a time */
101 offset = UCD_UCODE_LOAD_IDX_DATAX(i);
102 nitrox_write_csr(ndev, offset, data);
103 code_size -= 8;
104 i++;
105 }
106
107 usleep_range(300, 400);
108 }
109
nitrox_load_fw(struct nitrox_device * ndev)110 static int nitrox_load_fw(struct nitrox_device *ndev)
111 {
112 const struct firmware *fw;
113 const char *fw_name;
114 struct ucode *ucode;
115 u64 *ucode_data;
116 u64 offset;
117 union ucd_core_eid_ucode_block_num core_2_eid_val;
118 union aqm_grp_execmsk_lo aqm_grp_execmask_lo;
119 union aqm_grp_execmsk_hi aqm_grp_execmask_hi;
120 u32 ucode_size;
121 int ret, i = 0;
122
123 fw_name = SE_FW;
124 dev_info(DEV(ndev), "Loading firmware \"%s\"\n", fw_name);
125
126 ret = request_firmware(&fw, fw_name, DEV(ndev));
127 if (ret < 0) {
128 dev_err(DEV(ndev), "failed to get firmware %s\n", fw_name);
129 return ret;
130 }
131
132 ucode = (struct ucode *)fw->data;
133
134 ucode_size = be32_to_cpu(ucode->code_size) * 2;
135 if (!ucode_size || ucode_size > CNN55XX_MAX_UCODE_SIZE) {
136 dev_err(DEV(ndev), "Invalid ucode size: %u for firmware %s\n",
137 ucode_size, fw_name);
138 release_firmware(fw);
139 return -EINVAL;
140 }
141 ucode_data = ucode->code;
142
143 /* copy the firmware version */
144 memcpy(&ndev->hw.fw_name[0][0], ucode->version, (VERSION_LEN - 2));
145 ndev->hw.fw_name[0][VERSION_LEN - 1] = '\0';
146
147 /* Load SE Firmware on UCD Block 0 */
148 write_to_ucd_unit(ndev, ucode_size, ucode_data, 0);
149
150 release_firmware(fw);
151
152 /* put all SE cores in DEFAULT_SE_GROUP */
153 offset = POM_GRP_EXECMASKX(DEFAULT_SE_GROUP);
154 nitrox_write_csr(ndev, offset, (~0ULL));
155
156 /* write block number and firmware length
157 * bit:<2:0> block number
158 * bit:3 is set SE uses 32KB microcode
159 * bit:3 is clear SE uses 64KB microcode
160 */
161 core_2_eid_val.value = 0ULL;
162 core_2_eid_val.ucode_blk = 0;
163 if (ucode_size <= CNN55XX_UCD_BLOCK_SIZE)
164 core_2_eid_val.ucode_len = 1;
165 else
166 core_2_eid_val.ucode_len = 0;
167
168 for (i = 0; i < ndev->hw.se_cores; i++) {
169 offset = UCD_SE_EID_UCODE_BLOCK_NUMX(i);
170 nitrox_write_csr(ndev, offset, core_2_eid_val.value);
171 }
172
173
174 fw_name = AE_FW;
175 dev_info(DEV(ndev), "Loading firmware \"%s\"\n", fw_name);
176
177 ret = request_firmware(&fw, fw_name, DEV(ndev));
178 if (ret < 0) {
179 dev_err(DEV(ndev), "failed to get firmware %s\n", fw_name);
180 return ret;
181 }
182
183 ucode = (struct ucode *)fw->data;
184
185 ucode_size = be32_to_cpu(ucode->code_size) * 2;
186 if (!ucode_size || ucode_size > CNN55XX_MAX_UCODE_SIZE) {
187 dev_err(DEV(ndev), "Invalid ucode size: %u for firmware %s\n",
188 ucode_size, fw_name);
189 release_firmware(fw);
190 return -EINVAL;
191 }
192 ucode_data = ucode->code;
193
194 /* copy the firmware version */
195 memcpy(&ndev->hw.fw_name[1][0], ucode->version, (VERSION_LEN - 2));
196 ndev->hw.fw_name[1][VERSION_LEN - 1] = '\0';
197
198 /* Load AE Firmware on UCD Block 2 */
199 write_to_ucd_unit(ndev, ucode_size, ucode_data, 2);
200
201 release_firmware(fw);
202
203 /* put all AE cores in DEFAULT_AE_GROUP */
204 offset = AQM_GRP_EXECMSK_LOX(DEFAULT_AE_GROUP);
205 aqm_grp_execmask_lo.exec_0_to_39 = 0xFFFFFFFFFFULL;
206 nitrox_write_csr(ndev, offset, aqm_grp_execmask_lo.value);
207 offset = AQM_GRP_EXECMSK_HIX(DEFAULT_AE_GROUP);
208 aqm_grp_execmask_hi.exec_40_to_79 = 0xFFFFFFFFFFULL;
209 nitrox_write_csr(ndev, offset, aqm_grp_execmask_hi.value);
210
211 /* write block number and firmware length
212 * bit:<2:0> block number
213 * bit:3 is set AE uses 32KB microcode
214 * bit:3 is clear AE uses 64KB microcode
215 */
216 core_2_eid_val.value = 0ULL;
217 core_2_eid_val.ucode_blk = 2;
218 if (ucode_size <= CNN55XX_UCD_BLOCK_SIZE)
219 core_2_eid_val.ucode_len = 1;
220 else
221 core_2_eid_val.ucode_len = 0;
222
223 for (i = 0; i < ndev->hw.ae_cores; i++) {
224 offset = UCD_AE_EID_UCODE_BLOCK_NUMX(i);
225 nitrox_write_csr(ndev, offset, core_2_eid_val.value);
226 }
227
228 return 0;
229 }
230
231 /**
232 * nitrox_add_to_devlist - add NITROX device to global device list
233 * @ndev: NITROX device
234 */
nitrox_add_to_devlist(struct nitrox_device * ndev)235 static int nitrox_add_to_devlist(struct nitrox_device *ndev)
236 {
237 struct nitrox_device *dev;
238 int ret = 0;
239
240 INIT_LIST_HEAD(&ndev->list);
241 refcount_set(&ndev->refcnt, 1);
242
243 mutex_lock(&devlist_lock);
244 list_for_each_entry(dev, &ndevlist, list) {
245 if (dev == ndev) {
246 ret = -EEXIST;
247 goto unlock;
248 }
249 }
250 ndev->idx = num_devices++;
251 list_add_tail(&ndev->list, &ndevlist);
252 unlock:
253 mutex_unlock(&devlist_lock);
254 return ret;
255 }
256
257 /**
258 * nitrox_remove_from_devlist - remove NITROX device from
259 * global device list
260 * @ndev: NITROX device
261 */
nitrox_remove_from_devlist(struct nitrox_device * ndev)262 static void nitrox_remove_from_devlist(struct nitrox_device *ndev)
263 {
264 mutex_lock(&devlist_lock);
265 list_del(&ndev->list);
266 num_devices--;
267 mutex_unlock(&devlist_lock);
268 }
269
nitrox_get_first_device(void)270 struct nitrox_device *nitrox_get_first_device(void)
271 {
272 struct nitrox_device *ndev = NULL, *iter;
273
274 mutex_lock(&devlist_lock);
275 list_for_each_entry(iter, &ndevlist, list) {
276 if (nitrox_ready(iter)) {
277 ndev = iter;
278 break;
279 }
280 }
281 mutex_unlock(&devlist_lock);
282 if (!ndev)
283 return NULL;
284
285 refcount_inc(&ndev->refcnt);
286 /* barrier to sync with other cpus */
287 smp_mb__after_atomic();
288 return ndev;
289 }
290
nitrox_put_device(struct nitrox_device * ndev)291 void nitrox_put_device(struct nitrox_device *ndev)
292 {
293 if (!ndev)
294 return;
295
296 refcount_dec(&ndev->refcnt);
297 /* barrier to sync with other cpus */
298 smp_mb__after_atomic();
299 }
300
nitrox_device_flr(struct pci_dev * pdev)301 static int nitrox_device_flr(struct pci_dev *pdev)
302 {
303 int pos = 0;
304
305 pos = pci_save_state(pdev);
306 if (pos) {
307 dev_err(&pdev->dev, "Failed to save pci state\n");
308 return -ENOMEM;
309 }
310
311 pcie_reset_flr(pdev, PCI_RESET_DO_RESET);
312
313 pci_restore_state(pdev);
314
315 return 0;
316 }
317
nitrox_pf_sw_init(struct nitrox_device * ndev)318 static int nitrox_pf_sw_init(struct nitrox_device *ndev)
319 {
320 int err;
321
322 err = nitrox_common_sw_init(ndev);
323 if (err)
324 return err;
325
326 err = nitrox_register_interrupts(ndev);
327 if (err)
328 nitrox_common_sw_cleanup(ndev);
329
330 return err;
331 }
332
nitrox_pf_sw_cleanup(struct nitrox_device * ndev)333 static void nitrox_pf_sw_cleanup(struct nitrox_device *ndev)
334 {
335 nitrox_unregister_interrupts(ndev);
336 nitrox_common_sw_cleanup(ndev);
337 }
338
339 /**
340 * nitrox_bist_check - Check NITROX BIST registers status
341 * @ndev: NITROX device
342 */
nitrox_bist_check(struct nitrox_device * ndev)343 static int nitrox_bist_check(struct nitrox_device *ndev)
344 {
345 u64 value = 0;
346 int i;
347
348 for (i = 0; i < NR_CLUSTERS; i++) {
349 value += nitrox_read_csr(ndev, EMU_BIST_STATUSX(i));
350 value += nitrox_read_csr(ndev, EFL_CORE_BIST_REGX(i));
351 }
352 value += nitrox_read_csr(ndev, UCD_BIST_STATUS);
353 value += nitrox_read_csr(ndev, NPS_CORE_BIST_REG);
354 value += nitrox_read_csr(ndev, NPS_CORE_NPC_BIST_REG);
355 value += nitrox_read_csr(ndev, NPS_PKT_SLC_BIST_REG);
356 value += nitrox_read_csr(ndev, NPS_PKT_IN_BIST_REG);
357 value += nitrox_read_csr(ndev, POM_BIST_REG);
358 value += nitrox_read_csr(ndev, BMI_BIST_REG);
359 value += nitrox_read_csr(ndev, EFL_TOP_BIST_STAT);
360 value += nitrox_read_csr(ndev, BMO_BIST_REG);
361 value += nitrox_read_csr(ndev, LBC_BIST_STATUS);
362 value += nitrox_read_csr(ndev, PEM_BIST_STATUSX(0));
363 if (value)
364 return -EIO;
365 return 0;
366 }
367
nitrox_pf_hw_init(struct nitrox_device * ndev)368 static int nitrox_pf_hw_init(struct nitrox_device *ndev)
369 {
370 int err;
371
372 err = nitrox_bist_check(ndev);
373 if (err) {
374 dev_err(&ndev->pdev->dev, "BIST check failed\n");
375 return err;
376 }
377 /* get cores information */
378 nitrox_get_hwinfo(ndev);
379
380 nitrox_config_nps_core_unit(ndev);
381 nitrox_config_aqm_unit(ndev);
382 nitrox_config_nps_pkt_unit(ndev);
383 nitrox_config_pom_unit(ndev);
384 nitrox_config_efl_unit(ndev);
385 /* configure IO units */
386 nitrox_config_bmi_unit(ndev);
387 nitrox_config_bmo_unit(ndev);
388 /* configure Local Buffer Cache */
389 nitrox_config_lbc_unit(ndev);
390 nitrox_config_rand_unit(ndev);
391
392 /* load firmware on cores */
393 err = nitrox_load_fw(ndev);
394 if (err)
395 return err;
396
397 nitrox_config_emu_unit(ndev);
398
399 return 0;
400 }
401
402 /**
403 * nitrox_probe - NITROX Initialization function.
404 * @pdev: PCI device information struct
405 * @id: entry in nitrox_pci_tbl
406 *
407 * Return: 0, if the driver is bound to the device, or
408 * a negative error if there is failure.
409 */
nitrox_probe(struct pci_dev * pdev,const struct pci_device_id * id)410 static int nitrox_probe(struct pci_dev *pdev,
411 const struct pci_device_id *id)
412 {
413 struct nitrox_device *ndev;
414 int err;
415
416 dev_info_once(&pdev->dev, "%s driver version %s\n",
417 nitrox_driver_name, DRIVER_VERSION);
418
419 err = pci_enable_device_mem(pdev);
420 if (err)
421 return err;
422
423 /* do FLR */
424 err = nitrox_device_flr(pdev);
425 if (err) {
426 dev_err(&pdev->dev, "FLR failed\n");
427 goto flr_fail;
428 }
429
430 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
431 dev_dbg(&pdev->dev, "DMA to 64-BIT address\n");
432 } else {
433 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
434 if (err) {
435 dev_err(&pdev->dev, "DMA configuration failed\n");
436 goto flr_fail;
437 }
438 }
439
440 err = pci_request_mem_regions(pdev, nitrox_driver_name);
441 if (err)
442 goto flr_fail;
443 pci_set_master(pdev);
444
445 ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
446 if (!ndev) {
447 err = -ENOMEM;
448 goto ndev_fail;
449 }
450
451 pci_set_drvdata(pdev, ndev);
452 ndev->pdev = pdev;
453
454 /* add to device list */
455 nitrox_add_to_devlist(ndev);
456
457 ndev->hw.vendor_id = pdev->vendor;
458 ndev->hw.device_id = pdev->device;
459 ndev->hw.revision_id = pdev->revision;
460 /* command timeout in jiffies */
461 ndev->timeout = msecs_to_jiffies(CMD_TIMEOUT);
462 ndev->node = dev_to_node(&pdev->dev);
463 if (ndev->node == NUMA_NO_NODE)
464 ndev->node = 0;
465
466 ndev->bar_addr = ioremap(pci_resource_start(pdev, 0),
467 pci_resource_len(pdev, 0));
468 if (!ndev->bar_addr) {
469 err = -EIO;
470 goto ioremap_err;
471 }
472 /* allocate command queus based on cpus, max queues are 64 */
473 ndev->nr_queues = min_t(u32, MAX_PF_QUEUES, num_online_cpus());
474 ndev->qlen = qlen;
475
476 err = nitrox_pf_sw_init(ndev);
477 if (err)
478 goto pf_sw_fail;
479
480 err = nitrox_pf_hw_init(ndev);
481 if (err)
482 goto pf_hw_fail;
483
484 nitrox_debugfs_init(ndev);
485
486 /* clear the statistics */
487 atomic64_set(&ndev->stats.posted, 0);
488 atomic64_set(&ndev->stats.completed, 0);
489 atomic64_set(&ndev->stats.dropped, 0);
490
491 atomic_set(&ndev->state, __NDEV_READY);
492 /* barrier to sync with other cpus */
493 smp_mb__after_atomic();
494
495 err = nitrox_crypto_register();
496 if (err)
497 goto crypto_fail;
498
499 return 0;
500
501 crypto_fail:
502 nitrox_debugfs_exit(ndev);
503 atomic_set(&ndev->state, __NDEV_NOT_READY);
504 /* barrier to sync with other cpus */
505 smp_mb__after_atomic();
506 pf_hw_fail:
507 nitrox_pf_sw_cleanup(ndev);
508 pf_sw_fail:
509 iounmap(ndev->bar_addr);
510 ioremap_err:
511 nitrox_remove_from_devlist(ndev);
512 kfree(ndev);
513 pci_set_drvdata(pdev, NULL);
514 ndev_fail:
515 pci_release_mem_regions(pdev);
516 flr_fail:
517 pci_disable_device(pdev);
518 return err;
519 }
520
521 /**
522 * nitrox_remove - Unbind the driver from the device.
523 * @pdev: PCI device information struct
524 */
nitrox_remove(struct pci_dev * pdev)525 static void nitrox_remove(struct pci_dev *pdev)
526 {
527 struct nitrox_device *ndev = pci_get_drvdata(pdev);
528
529 if (!ndev)
530 return;
531
532 if (!refcount_dec_and_test(&ndev->refcnt)) {
533 dev_err(DEV(ndev), "Device refcnt not zero (%d)\n",
534 refcount_read(&ndev->refcnt));
535 return;
536 }
537
538 dev_info(DEV(ndev), "Removing Device %x:%x\n",
539 ndev->hw.vendor_id, ndev->hw.device_id);
540
541 atomic_set(&ndev->state, __NDEV_NOT_READY);
542 /* barrier to sync with other cpus */
543 smp_mb__after_atomic();
544
545 nitrox_remove_from_devlist(ndev);
546
547 /* disable SR-IOV */
548 nitrox_sriov_configure(pdev, 0);
549 nitrox_crypto_unregister();
550 nitrox_debugfs_exit(ndev);
551 nitrox_pf_sw_cleanup(ndev);
552
553 iounmap(ndev->bar_addr);
554 kfree(ndev);
555
556 pci_set_drvdata(pdev, NULL);
557 pci_release_mem_regions(pdev);
558 pci_disable_device(pdev);
559 }
560
nitrox_shutdown(struct pci_dev * pdev)561 static void nitrox_shutdown(struct pci_dev *pdev)
562 {
563 pci_set_drvdata(pdev, NULL);
564 pci_release_mem_regions(pdev);
565 pci_disable_device(pdev);
566 }
567
568 static struct pci_driver nitrox_driver = {
569 .name = nitrox_driver_name,
570 .id_table = nitrox_pci_tbl,
571 .probe = nitrox_probe,
572 .remove = nitrox_remove,
573 .shutdown = nitrox_shutdown,
574 .sriov_configure = nitrox_sriov_configure,
575 };
576
577 module_pci_driver(nitrox_driver);
578
579 MODULE_AUTHOR("Srikanth Jampala <Jampala.Srikanth@cavium.com>");
580 MODULE_DESCRIPTION("Cavium CNN55XX PF Driver" DRIVER_VERSION " ");
581 MODULE_LICENSE("GPL");
582 MODULE_VERSION(DRIVER_VERSION);
583 MODULE_FIRMWARE(SE_FW);
584