Lines Matching refs:rf

74 static void irdma_puda_ce_handler(struct irdma_pci_f *rf,  in irdma_puda_ce_handler()  argument
77 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_puda_ce_handler()
104 static void irdma_process_ceq(struct irdma_pci_f *rf, struct irdma_ceq *ceq) in irdma_process_ceq() argument
106 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_process_ceq()
126 queue_work(rf->cqp_cmpl_wq, &rf->cqp_cmpl_work); in irdma_process_ceq()
129 irdma_puda_ce_handler(rf, cq); in irdma_process_ceq()
202 static void irdma_process_aeq(struct irdma_pci_f *rf) in irdma_process_aeq() argument
204 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_process_aeq()
205 struct irdma_aeq *aeq = &rf->aeq; in irdma_process_aeq()
215 struct irdma_device *iwdev = rf->iwdev; in irdma_process_aeq()
236 spin_lock_irqsave(&rf->qptable_lock, flags); in irdma_process_aeq()
237 iwqp = rf->qp_table[info->qp_cq_id]; in irdma_process_aeq()
239 spin_unlock_irqrestore(&rf->qptable_lock, in irdma_process_aeq()
251 spin_unlock_irqrestore(&rf->qptable_lock, flags); in irdma_process_aeq()
431 struct irdma_pci_f *rf = from_tasklet(rf, t, dpc_tasklet); in irdma_dpc() local
433 if (rf->msix_shared) in irdma_dpc()
434 irdma_process_ceq(rf, rf->ceqlist); in irdma_dpc()
435 irdma_process_aeq(rf); in irdma_dpc()
436 irdma_ena_intr(&rf->sc_dev, rf->iw_msixtbl[0].idx); in irdma_dpc()
446 struct irdma_pci_f *rf = iwceq->rf; in irdma_ceq_dpc() local
448 irdma_process_ceq(rf, iwceq); in irdma_ceq_dpc()
449 irdma_ena_intr(&rf->sc_dev, iwceq->msix_idx); in irdma_ceq_dpc()
459 static enum irdma_status_code irdma_save_msix_info(struct irdma_pci_f *rf) in irdma_save_msix_info() argument
468 if (!rf->msix_count) in irdma_save_msix_info()
471 size = sizeof(struct irdma_msix_vector) * rf->msix_count; in irdma_save_msix_info()
472 size += struct_size(iw_qvlist, qv_info, rf->msix_count); in irdma_save_msix_info()
473 rf->iw_msixtbl = kzalloc(size, GFP_KERNEL); in irdma_save_msix_info()
474 if (!rf->iw_msixtbl) in irdma_save_msix_info()
477 rf->iw_qvlist = (struct irdma_qvlist_info *) in irdma_save_msix_info()
478 (&rf->iw_msixtbl[rf->msix_count]); in irdma_save_msix_info()
479 iw_qvlist = rf->iw_qvlist; in irdma_save_msix_info()
481 iw_qvlist->num_vectors = rf->msix_count; in irdma_save_msix_info()
482 if (rf->msix_count <= num_online_cpus()) in irdma_save_msix_info()
483 rf->msix_shared = true; in irdma_save_msix_info()
485 pmsix = rf->msix_entries; in irdma_save_msix_info()
486 for (i = 0, ceq_idx = 0; i < rf->msix_count; i++, iw_qvinfo++) { in irdma_save_msix_info()
487 rf->iw_msixtbl[i].idx = pmsix->entry; in irdma_save_msix_info()
488 rf->iw_msixtbl[i].irq = pmsix->vector; in irdma_save_msix_info()
489 rf->iw_msixtbl[i].cpu_affinity = ceq_idx; in irdma_save_msix_info()
492 if (rf->msix_shared) in irdma_save_msix_info()
501 iw_qvinfo->v_idx = rf->iw_msixtbl[i].idx; in irdma_save_msix_info()
515 struct irdma_pci_f *rf = data; in irdma_irq_handler() local
517 tasklet_schedule(&rf->dpc_tasklet); in irdma_irq_handler()
532 ibdev_err(to_ibdev(&iwceq->rf->sc_dev), "expected irq = %d received irq = %d\n", in irdma_ceq_handler()
547 static void irdma_destroy_irq(struct irdma_pci_f *rf, in irdma_destroy_irq() argument
550 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_destroy_irq()
565 static void irdma_destroy_cqp(struct irdma_pci_f *rf, bool free_hwcqp) in irdma_destroy_cqp() argument
568 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_destroy_cqp()
569 struct irdma_cqp *cqp = &rf->cqp; in irdma_destroy_cqp()
571 if (rf->cqp_cmpl_wq) in irdma_destroy_cqp()
572 destroy_workqueue(rf->cqp_cmpl_wq); in irdma_destroy_cqp()
578 irdma_cleanup_pending_cqp_op(rf); in irdma_destroy_cqp()
588 static void irdma_destroy_virt_aeq(struct irdma_pci_f *rf) in irdma_destroy_virt_aeq() argument
590 struct irdma_aeq *aeq = &rf->aeq; in irdma_destroy_virt_aeq()
594 irdma_unmap_vm_page_list(&rf->hw, pg_arr, pg_cnt); in irdma_destroy_virt_aeq()
595 irdma_free_pble(rf->pble_rsrc, &aeq->palloc); in irdma_destroy_virt_aeq()
607 static void irdma_destroy_aeq(struct irdma_pci_f *rf) in irdma_destroy_aeq() argument
610 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_destroy_aeq()
611 struct irdma_aeq *aeq = &rf->aeq; in irdma_destroy_aeq()
613 if (!rf->msix_shared) { in irdma_destroy_aeq()
614 rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, rf->iw_msixtbl->idx, false); in irdma_destroy_aeq()
615 irdma_destroy_irq(rf, rf->iw_msixtbl, rf); in irdma_destroy_aeq()
617 if (rf->reset) in irdma_destroy_aeq()
627 irdma_destroy_virt_aeq(rf); in irdma_destroy_aeq()
643 static void irdma_destroy_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq) in irdma_destroy_ceq() argument
646 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_destroy_ceq()
648 if (rf->reset) in irdma_destroy_ceq()
673 static void irdma_del_ceq_0(struct irdma_pci_f *rf) in irdma_del_ceq_0() argument
675 struct irdma_ceq *iwceq = rf->ceqlist; in irdma_del_ceq_0()
678 if (rf->msix_shared) { in irdma_del_ceq_0()
679 msix_vec = &rf->iw_msixtbl[0]; in irdma_del_ceq_0()
680 rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, in irdma_del_ceq_0()
683 irdma_destroy_irq(rf, msix_vec, rf); in irdma_del_ceq_0()
685 msix_vec = &rf->iw_msixtbl[1]; in irdma_del_ceq_0()
686 irdma_destroy_irq(rf, msix_vec, iwceq); in irdma_del_ceq_0()
689 irdma_destroy_ceq(rf, iwceq); in irdma_del_ceq_0()
690 rf->sc_dev.ceq_valid = false; in irdma_del_ceq_0()
691 rf->ceqs_count = 0; in irdma_del_ceq_0()
701 static void irdma_del_ceqs(struct irdma_pci_f *rf) in irdma_del_ceqs() argument
703 struct irdma_ceq *iwceq = &rf->ceqlist[1]; in irdma_del_ceqs()
707 if (rf->msix_shared) in irdma_del_ceqs()
708 msix_vec = &rf->iw_msixtbl[1]; in irdma_del_ceqs()
710 msix_vec = &rf->iw_msixtbl[2]; in irdma_del_ceqs()
712 for (i = 1; i < rf->ceqs_count; i++, msix_vec++, iwceq++) { in irdma_del_ceqs()
713 rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, msix_vec->ceq_id, in irdma_del_ceqs()
715 irdma_destroy_irq(rf, msix_vec, iwceq); in irdma_del_ceqs()
716 irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq, in irdma_del_ceqs()
718 dma_free_coherent(rf->sc_dev.hw->device, iwceq->mem.size, in irdma_del_ceqs()
722 rf->ceqs_count = 1; in irdma_del_ceqs()
732 static void irdma_destroy_ccq(struct irdma_pci_f *rf) in irdma_destroy_ccq() argument
734 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_destroy_ccq()
735 struct irdma_ccq *ccq = &rf->ccq; in irdma_destroy_ccq()
738 if (!rf->reset) in irdma_destroy_ccq()
816 irdma_create_hmc_objs(struct irdma_pci_f *rf, bool privileged, enum irdma_vers vers) in irdma_create_hmc_objs() argument
818 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_create_hmc_objs()
825 info.entry_type = rf->sd_type; in irdma_create_hmc_objs()
872 irdma_obj_aligned_mem(struct irdma_pci_f *rf, struct irdma_dma_mem *memptr, in irdma_obj_aligned_mem() argument
878 va = (unsigned long)rf->obj_next.va; in irdma_obj_aligned_mem()
884 memptr->pa = rf->obj_next.pa + extra; in irdma_obj_aligned_mem()
886 if (((u8 *)memptr->va + size) > ((u8 *)rf->obj_mem.va + rf->obj_mem.size)) in irdma_obj_aligned_mem()
889 rf->obj_next.va = (u8 *)memptr->va + size; in irdma_obj_aligned_mem()
890 rf->obj_next.pa = memptr->pa + size; in irdma_obj_aligned_mem()
902 static enum irdma_status_code irdma_create_cqp(struct irdma_pci_f *rf) in irdma_create_cqp() argument
907 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_create_cqp()
909 struct irdma_cqp *cqp = &rf->cqp; in irdma_create_cqp()
935 status = irdma_obj_aligned_mem(rf, &mem, sizeof(struct irdma_cqp_ctx), in irdma_create_cqp()
949 cqp_init_info.hmc_profile = rf->rsrc_profile; in irdma_create_cqp()
951 cqp_init_info.protocol_used = rf->protocol_used; in irdma_create_cqp()
953 switch (rf->rdma_ver) { in irdma_create_cqp()
990 irdma_destroy_cqp(rf, false); in irdma_create_cqp()
1002 static enum irdma_status_code irdma_create_ccq(struct irdma_pci_f *rf) in irdma_create_ccq() argument
1004 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_create_ccq()
1007 struct irdma_ccq *ccq = &rf->ccq; in irdma_create_ccq()
1020 status = irdma_obj_aligned_mem(rf, &ccq->shadow_area, in irdma_create_ccq()
1036 info.vsi = &rf->default_vsi; in irdma_create_ccq()
1061 status = irdma_alloc_local_mac_entry(iwdev->rf, in irdma_alloc_set_mac()
1064 status = irdma_add_local_mac_entry(iwdev->rf, in irdma_alloc_set_mac()
1068 irdma_del_local_mac_entry(iwdev->rf, in irdma_alloc_set_mac()
1086 irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq, in irdma_cfg_ceq_vector() argument
1091 if (rf->msix_shared && !ceq_id) { in irdma_cfg_ceq_vector()
1092 tasklet_setup(&rf->dpc_tasklet, irdma_dpc); in irdma_cfg_ceq_vector()
1094 "AEQCEQ", rf); in irdma_cfg_ceq_vector()
1105 ibdev_dbg(&rf->iwdev->ibdev, "ERR: ceq irq config fail\n"); in irdma_cfg_ceq_vector()
1110 rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, ceq_id, msix_vec->idx, true); in irdma_cfg_ceq_vector()
1122 static enum irdma_status_code irdma_cfg_aeq_vector(struct irdma_pci_f *rf) in irdma_cfg_aeq_vector() argument
1124 struct irdma_msix_vector *msix_vec = rf->iw_msixtbl; in irdma_cfg_aeq_vector()
1127 if (!rf->msix_shared) { in irdma_cfg_aeq_vector()
1128 tasklet_setup(&rf->dpc_tasklet, irdma_dpc); in irdma_cfg_aeq_vector()
1130 "irdma", rf); in irdma_cfg_aeq_vector()
1133 ibdev_dbg(&rf->iwdev->ibdev, "ERR: aeq irq config fail\n"); in irdma_cfg_aeq_vector()
1137 rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, msix_vec->idx, true); in irdma_cfg_aeq_vector()
1152 static enum irdma_status_code irdma_create_ceq(struct irdma_pci_f *rf, in irdma_create_ceq() argument
1159 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_create_ceq()
1164 iwceq->rf = rf; in irdma_create_ceq()
1165 ceq_size = min(rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt, in irdma_create_ceq()
1181 scratch = (uintptr_t)&rf->cqp.sc_cqp; in irdma_create_ceq()
1185 status = irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq, in irdma_create_ceq()
1208 static enum irdma_status_code irdma_setup_ceq_0(struct irdma_pci_f *rf) in irdma_setup_ceq_0() argument
1216 num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs); in irdma_setup_ceq_0()
1217 rf->ceqlist = kcalloc(num_ceqs, sizeof(*rf->ceqlist), GFP_KERNEL); in irdma_setup_ceq_0()
1218 if (!rf->ceqlist) { in irdma_setup_ceq_0()
1223 iwceq = &rf->ceqlist[0]; in irdma_setup_ceq_0()
1224 status = irdma_create_ceq(rf, iwceq, 0, &rf->default_vsi); in irdma_setup_ceq_0()
1226 ibdev_dbg(&rf->iwdev->ibdev, "ERR: create ceq status = %d\n", in irdma_setup_ceq_0()
1232 i = rf->msix_shared ? 0 : 1; in irdma_setup_ceq_0()
1233 msix_vec = &rf->iw_msixtbl[i]; in irdma_setup_ceq_0()
1236 status = irdma_cfg_ceq_vector(rf, iwceq, 0, msix_vec); in irdma_setup_ceq_0()
1238 irdma_destroy_ceq(rf, iwceq); in irdma_setup_ceq_0()
1242 irdma_ena_intr(&rf->sc_dev, msix_vec->idx); in irdma_setup_ceq_0()
1243 rf->ceqs_count++; in irdma_setup_ceq_0()
1246 if (status && !rf->ceqs_count) { in irdma_setup_ceq_0()
1247 kfree(rf->ceqlist); in irdma_setup_ceq_0()
1248 rf->ceqlist = NULL; in irdma_setup_ceq_0()
1251 rf->sc_dev.ceq_valid = true; in irdma_setup_ceq_0()
1265 static enum irdma_status_code irdma_setup_ceqs(struct irdma_pci_f *rf, in irdma_setup_ceqs() argument
1275 num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs); in irdma_setup_ceqs()
1276 i = (rf->msix_shared) ? 1 : 2; in irdma_setup_ceqs()
1278 iwceq = &rf->ceqlist[ceq_id]; in irdma_setup_ceqs()
1279 status = irdma_create_ceq(rf, iwceq, ceq_id, vsi); in irdma_setup_ceqs()
1281 ibdev_dbg(&rf->iwdev->ibdev, in irdma_setup_ceqs()
1286 msix_vec = &rf->iw_msixtbl[i]; in irdma_setup_ceqs()
1289 status = irdma_cfg_ceq_vector(rf, iwceq, ceq_id, msix_vec); in irdma_setup_ceqs()
1291 irdma_destroy_ceq(rf, iwceq); in irdma_setup_ceqs()
1294 irdma_ena_intr(&rf->sc_dev, msix_vec->idx); in irdma_setup_ceqs()
1295 rf->ceqs_count++; in irdma_setup_ceqs()
1301 irdma_del_ceqs(rf); in irdma_setup_ceqs()
1306 static enum irdma_status_code irdma_create_virt_aeq(struct irdma_pci_f *rf, in irdma_create_virt_aeq() argument
1310 struct irdma_aeq *aeq = &rf->aeq; in irdma_create_virt_aeq()
1314 if (rf->rdma_ver < IRDMA_GEN_2) in irdma_create_virt_aeq()
1324 status = irdma_get_pble(rf->pble_rsrc, &aeq->palloc, pg_cnt, true); in irdma_create_virt_aeq()
1331 status = irdma_map_vm_page_list(&rf->hw, aeq->mem.va, pg_arr, pg_cnt); in irdma_create_virt_aeq()
1333 irdma_free_pble(rf->pble_rsrc, &aeq->palloc); in irdma_create_virt_aeq()
1348 static enum irdma_status_code irdma_create_aeq(struct irdma_pci_f *rf) in irdma_create_aeq() argument
1352 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_create_aeq()
1353 struct irdma_aeq *aeq = &rf->aeq; in irdma_create_aeq()
1354 struct irdma_hmc_info *hmc_info = rf->sc_dev.hmc_info; in irdma_create_aeq()
1356 u8 multiplier = (rf->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) ? 2 : 1; in irdma_create_aeq()
1371 status = irdma_create_virt_aeq(rf, aeq_size); in irdma_create_aeq()
1385 info.msix_idx = rf->iw_msixtbl->idx; in irdma_create_aeq()
1398 irdma_destroy_virt_aeq(rf); in irdma_create_aeq()
1415 static enum irdma_status_code irdma_setup_aeq(struct irdma_pci_f *rf) in irdma_setup_aeq() argument
1417 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_setup_aeq()
1420 status = irdma_create_aeq(rf); in irdma_setup_aeq()
1424 status = irdma_cfg_aeq_vector(rf); in irdma_setup_aeq()
1426 irdma_destroy_aeq(rf); in irdma_setup_aeq()
1430 if (!rf->msix_shared) in irdma_setup_aeq()
1431 irdma_ena_intr(dev, rf->iw_msixtbl[0].idx); in irdma_setup_aeq()
1453 info.sq_size = min(iwdev->rf->max_qp / 2, (u32)32768); in irdma_initialize_ilq()
1483 info.sq_size = min(iwdev->rf->max_qp / 2, (u32)32768); in irdma_initialize_ieq()
1501 struct irdma_pci_f *rf = iwdev->rf; in irdma_reinitialize_ieq() local
1505 iwdev->rf->reset = true; in irdma_reinitialize_ieq()
1506 rf->gen_ops.request_reset(rf); in irdma_reinitialize_ieq()
1518 static enum irdma_status_code irdma_hmc_setup(struct irdma_pci_f *rf) in irdma_hmc_setup() argument
1523 if (rf->rdma_ver == IRDMA_GEN_1) in irdma_hmc_setup()
1524 qpcnt = rsrc_limits_table[rf->limits_sel].qplimit * 2; in irdma_hmc_setup()
1526 qpcnt = rsrc_limits_table[rf->limits_sel].qplimit; in irdma_hmc_setup()
1528 rf->sd_type = IRDMA_SD_TYPE_DIRECT; in irdma_hmc_setup()
1529 status = irdma_cfg_fpm_val(&rf->sc_dev, qpcnt); in irdma_hmc_setup()
1533 status = irdma_create_hmc_objs(rf, true, rf->rdma_ver); in irdma_hmc_setup()
1542 static void irdma_del_init_mem(struct irdma_pci_f *rf) in irdma_del_init_mem() argument
1544 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_del_init_mem()
1548 kfree(rf->mem_rsrc); in irdma_del_init_mem()
1549 rf->mem_rsrc = NULL; in irdma_del_init_mem()
1550 dma_free_coherent(rf->hw.device, rf->obj_mem.size, rf->obj_mem.va, in irdma_del_init_mem()
1551 rf->obj_mem.pa); in irdma_del_init_mem()
1552 rf->obj_mem.va = NULL; in irdma_del_init_mem()
1553 if (rf->rdma_ver != IRDMA_GEN_1) { in irdma_del_init_mem()
1554 kfree(rf->allocated_ws_nodes); in irdma_del_init_mem()
1555 rf->allocated_ws_nodes = NULL; in irdma_del_init_mem()
1557 kfree(rf->ceqlist); in irdma_del_init_mem()
1558 rf->ceqlist = NULL; in irdma_del_init_mem()
1559 kfree(rf->iw_msixtbl); in irdma_del_init_mem()
1560 rf->iw_msixtbl = NULL; in irdma_del_init_mem()
1561 kfree(rf->hmc_info_mem); in irdma_del_init_mem()
1562 rf->hmc_info_mem = NULL; in irdma_del_init_mem()
1573 static enum irdma_status_code irdma_initialize_dev(struct irdma_pci_f *rf) in irdma_initialize_dev() argument
1576 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_initialize_dev()
1585 rf->hmc_info_mem = kzalloc(size, GFP_KERNEL); in irdma_initialize_dev()
1586 if (!rf->hmc_info_mem) in irdma_initialize_dev()
1589 rf->pble_rsrc = (struct irdma_hmc_pble_rsrc *)rf->hmc_info_mem; in irdma_initialize_dev()
1590 dev->hmc_info = &rf->hw.hmc; in irdma_initialize_dev()
1592 (rf->pble_rsrc + 1); in irdma_initialize_dev()
1594 status = irdma_obj_aligned_mem(rf, &mem, IRDMA_QUERY_FPM_BUF_SIZE, in irdma_initialize_dev()
1602 status = irdma_obj_aligned_mem(rf, &mem, IRDMA_COMMIT_FPM_BUF_SIZE, in irdma_initialize_dev()
1610 info.bar0 = rf->hw.hw_addr; in irdma_initialize_dev()
1611 info.hmc_fn_id = PCI_FUNC(rf->pcidev->devfn); in irdma_initialize_dev()
1612 info.hw = &rf->hw; in irdma_initialize_dev()
1613 status = irdma_sc_dev_init(rf->rdma_ver, &rf->sc_dev, &info); in irdma_initialize_dev()
1619 kfree(rf->hmc_info_mem); in irdma_initialize_dev()
1620 rf->hmc_info_mem = NULL; in irdma_initialize_dev()
1638 if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) in irdma_rt_deinit_hw()
1639 irdma_del_local_mac_entry(iwdev->rf, in irdma_rt_deinit_hw()
1648 iwdev->rf->reset); in irdma_rt_deinit_hw()
1654 iwdev->rf->reset); in irdma_rt_deinit_hw()
1670 static enum irdma_status_code irdma_setup_init_state(struct irdma_pci_f *rf) in irdma_setup_init_state() argument
1674 status = irdma_save_msix_info(rf); in irdma_setup_init_state()
1678 rf->hw.device = &rf->pcidev->dev; in irdma_setup_init_state()
1679 rf->obj_mem.size = ALIGN(8192, IRDMA_HW_PAGE_SIZE); in irdma_setup_init_state()
1680 rf->obj_mem.va = dma_alloc_coherent(rf->hw.device, rf->obj_mem.size, in irdma_setup_init_state()
1681 &rf->obj_mem.pa, GFP_KERNEL); in irdma_setup_init_state()
1682 if (!rf->obj_mem.va) { in irdma_setup_init_state()
1687 rf->obj_next = rf->obj_mem; in irdma_setup_init_state()
1688 status = irdma_initialize_dev(rf); in irdma_setup_init_state()
1695 dma_free_coherent(rf->hw.device, rf->obj_mem.size, rf->obj_mem.va, in irdma_setup_init_state()
1696 rf->obj_mem.pa); in irdma_setup_init_state()
1697 rf->obj_mem.va = NULL; in irdma_setup_init_state()
1699 kfree(rf->iw_msixtbl); in irdma_setup_init_state()
1700 rf->iw_msixtbl = NULL; in irdma_setup_init_state()
1712 iwdev->rf->used_pds = find_next_zero_bit(iwdev->rf->allocated_pds, in irdma_get_used_rsrc()
1713 iwdev->rf->max_pd, 0); in irdma_get_used_rsrc()
1714 iwdev->rf->used_qps = find_next_zero_bit(iwdev->rf->allocated_qps, in irdma_get_used_rsrc()
1715 iwdev->rf->max_qp, 0); in irdma_get_used_rsrc()
1716 iwdev->rf->used_cqs = find_next_zero_bit(iwdev->rf->allocated_cqs, in irdma_get_used_rsrc()
1717 iwdev->rf->max_cq, 0); in irdma_get_used_rsrc()
1718 iwdev->rf->used_mrs = find_next_zero_bit(iwdev->rf->allocated_mrs, in irdma_get_used_rsrc()
1719 iwdev->rf->max_mr, 0); in irdma_get_used_rsrc()
1722 void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf) in irdma_ctrl_deinit_hw() argument
1724 enum init_completion_state state = rf->init_state; in irdma_ctrl_deinit_hw()
1726 rf->init_state = INVALID_STATE; in irdma_ctrl_deinit_hw()
1727 if (rf->rsrc_created) { in irdma_ctrl_deinit_hw()
1728 irdma_destroy_aeq(rf); in irdma_ctrl_deinit_hw()
1729 irdma_destroy_pble_prm(rf->pble_rsrc); in irdma_ctrl_deinit_hw()
1730 irdma_del_ceqs(rf); in irdma_ctrl_deinit_hw()
1731 rf->rsrc_created = false; in irdma_ctrl_deinit_hw()
1735 irdma_del_ceq_0(rf); in irdma_ctrl_deinit_hw()
1738 irdma_destroy_ccq(rf); in irdma_ctrl_deinit_hw()
1742 irdma_del_hmc_objects(&rf->sc_dev, rf->sc_dev.hmc_info, true, in irdma_ctrl_deinit_hw()
1743 rf->reset, rf->rdma_ver); in irdma_ctrl_deinit_hw()
1746 irdma_destroy_cqp(rf, true); in irdma_ctrl_deinit_hw()
1749 irdma_del_init_mem(rf); in irdma_ctrl_deinit_hw()
1753 ibdev_warn(&rf->iwdev->ibdev, "bad init_state = %d\n", rf->init_state); in irdma_ctrl_deinit_hw()
1769 struct irdma_pci_f *rf = iwdev->rf; in irdma_rt_init_hw() local
1770 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_rt_init_hw()
1779 vsi_info.register_qset = rf->gen_ops.register_qset; in irdma_rt_init_hw()
1780 vsi_info.unregister_qset = rf->gen_ops.unregister_qset; in irdma_rt_init_hw()
1784 status = irdma_setup_cm_core(iwdev, rf->rdma_ver); in irdma_rt_init_hw()
1812 if (!rf->rsrc_created) { in irdma_rt_init_hw()
1813 status = irdma_setup_ceqs(rf, &iwdev->vsi); in irdma_rt_init_hw()
1819 status = irdma_hmc_init_pble(&rf->sc_dev, in irdma_rt_init_hw()
1820 rf->pble_rsrc); in irdma_rt_init_hw()
1822 irdma_del_ceqs(rf); in irdma_rt_init_hw()
1828 status = irdma_setup_aeq(rf); in irdma_rt_init_hw()
1830 irdma_destroy_pble_prm(rf->pble_rsrc); in irdma_rt_init_hw()
1831 irdma_del_ceqs(rf); in irdma_rt_init_hw()
1835 rf->rsrc_created = true; in irdma_rt_init_hw()
1842 if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) in irdma_rt_init_hw()
1860 dev_err(&rf->pcidev->dev, "HW runtime init FAIL status = %d last cmpl = %d\n", in irdma_rt_init_hw()
1873 enum irdma_status_code irdma_ctrl_init_hw(struct irdma_pci_f *rf) in irdma_ctrl_init_hw() argument
1875 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_ctrl_init_hw()
1878 status = irdma_setup_init_state(rf); in irdma_ctrl_init_hw()
1881 rf->init_state = INITIAL_STATE; in irdma_ctrl_init_hw()
1883 status = irdma_create_cqp(rf); in irdma_ctrl_init_hw()
1886 rf->init_state = CQP_CREATED; in irdma_ctrl_init_hw()
1888 status = irdma_hmc_setup(rf); in irdma_ctrl_init_hw()
1891 rf->init_state = HMC_OBJS_CREATED; in irdma_ctrl_init_hw()
1893 status = irdma_initialize_hw_rsrc(rf); in irdma_ctrl_init_hw()
1896 rf->init_state = HW_RSRC_INITIALIZED; in irdma_ctrl_init_hw()
1898 status = irdma_create_ccq(rf); in irdma_ctrl_init_hw()
1901 rf->init_state = CCQ_CREATED; in irdma_ctrl_init_hw()
1904 if (rf->rdma_ver != IRDMA_GEN_1) { in irdma_ctrl_init_hw()
1910 status = irdma_setup_ceq_0(rf); in irdma_ctrl_init_hw()
1913 rf->init_state = CEQ0_CREATED; in irdma_ctrl_init_hw()
1915 rf->cqp_cmpl_wq = alloc_ordered_workqueue("cqp_cmpl_wq", in irdma_ctrl_init_hw()
1917 if (!rf->cqp_cmpl_wq) { in irdma_ctrl_init_hw()
1921 INIT_WORK(&rf->cqp_cmpl_work, cqp_compl_worker); in irdma_ctrl_init_hw()
1926 dev_err(&rf->pcidev->dev, "IRDMA hardware initialization FAILED init_state=%d status=%d\n", in irdma_ctrl_init_hw()
1927 rf->init_state, status); in irdma_ctrl_init_hw()
1928 irdma_ctrl_deinit_hw(rf); in irdma_ctrl_init_hw()
1936 static void irdma_set_hw_rsrc(struct irdma_pci_f *rf) in irdma_set_hw_rsrc() argument
1938 rf->allocated_qps = (void *)(rf->mem_rsrc + in irdma_set_hw_rsrc()
1939 (sizeof(struct irdma_arp_entry) * rf->arp_table_size)); in irdma_set_hw_rsrc()
1940 rf->allocated_cqs = &rf->allocated_qps[BITS_TO_LONGS(rf->max_qp)]; in irdma_set_hw_rsrc()
1941 rf->allocated_mrs = &rf->allocated_cqs[BITS_TO_LONGS(rf->max_cq)]; in irdma_set_hw_rsrc()
1942 rf->allocated_pds = &rf->allocated_mrs[BITS_TO_LONGS(rf->max_mr)]; in irdma_set_hw_rsrc()
1943 rf->allocated_ahs = &rf->allocated_pds[BITS_TO_LONGS(rf->max_pd)]; in irdma_set_hw_rsrc()
1944 rf->allocated_mcgs = &rf->allocated_ahs[BITS_TO_LONGS(rf->max_ah)]; in irdma_set_hw_rsrc()
1945 rf->allocated_arps = &rf->allocated_mcgs[BITS_TO_LONGS(rf->max_mcg)]; in irdma_set_hw_rsrc()
1946 rf->qp_table = (struct irdma_qp **) in irdma_set_hw_rsrc()
1947 (&rf->allocated_arps[BITS_TO_LONGS(rf->arp_table_size)]); in irdma_set_hw_rsrc()
1949 spin_lock_init(&rf->rsrc_lock); in irdma_set_hw_rsrc()
1950 spin_lock_init(&rf->arp_lock); in irdma_set_hw_rsrc()
1951 spin_lock_init(&rf->qptable_lock); in irdma_set_hw_rsrc()
1952 spin_lock_init(&rf->qh_list_lock); in irdma_set_hw_rsrc()
1959 static u32 irdma_calc_mem_rsrc_size(struct irdma_pci_f *rf) in irdma_calc_mem_rsrc_size() argument
1963 rsrc_size = sizeof(struct irdma_arp_entry) * rf->arp_table_size; in irdma_calc_mem_rsrc_size()
1964 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_qp); in irdma_calc_mem_rsrc_size()
1965 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mr); in irdma_calc_mem_rsrc_size()
1966 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_cq); in irdma_calc_mem_rsrc_size()
1967 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_pd); in irdma_calc_mem_rsrc_size()
1968 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->arp_table_size); in irdma_calc_mem_rsrc_size()
1969 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_ah); in irdma_calc_mem_rsrc_size()
1970 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mcg); in irdma_calc_mem_rsrc_size()
1971 rsrc_size += sizeof(struct irdma_qp **) * rf->max_qp; in irdma_calc_mem_rsrc_size()
1980 u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf) in irdma_initialize_hw_rsrc() argument
1986 if (rf->rdma_ver != IRDMA_GEN_1) { in irdma_initialize_hw_rsrc()
1987 rf->allocated_ws_nodes = in irdma_initialize_hw_rsrc()
1990 if (!rf->allocated_ws_nodes) in irdma_initialize_hw_rsrc()
1993 set_bit(0, rf->allocated_ws_nodes); in irdma_initialize_hw_rsrc()
1994 rf->max_ws_node_id = IRDMA_MAX_WS_NODES; in irdma_initialize_hw_rsrc()
1996 rf->max_cqe = rf->sc_dev.hw_attrs.uk_attrs.max_hw_cq_size; in irdma_initialize_hw_rsrc()
1997 rf->max_qp = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt; in irdma_initialize_hw_rsrc()
1998 rf->max_mr = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt; in irdma_initialize_hw_rsrc()
1999 rf->max_cq = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt; in irdma_initialize_hw_rsrc()
2000 rf->max_pd = rf->sc_dev.hw_attrs.max_hw_pds; in irdma_initialize_hw_rsrc()
2001 rf->arp_table_size = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].cnt; in irdma_initialize_hw_rsrc()
2002 rf->max_ah = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt; in irdma_initialize_hw_rsrc()
2003 rf->max_mcg = rf->max_qp; in irdma_initialize_hw_rsrc()
2005 rsrc_size = irdma_calc_mem_rsrc_size(rf); in irdma_initialize_hw_rsrc()
2006 rf->mem_rsrc = kzalloc(rsrc_size, GFP_KERNEL); in irdma_initialize_hw_rsrc()
2007 if (!rf->mem_rsrc) { in irdma_initialize_hw_rsrc()
2012 rf->arp_table = (struct irdma_arp_entry *)rf->mem_rsrc; in irdma_initialize_hw_rsrc()
2014 irdma_set_hw_rsrc(rf); in irdma_initialize_hw_rsrc()
2016 set_bit(0, rf->allocated_mrs); in irdma_initialize_hw_rsrc()
2017 set_bit(0, rf->allocated_qps); in irdma_initialize_hw_rsrc()
2018 set_bit(0, rf->allocated_cqs); in irdma_initialize_hw_rsrc()
2019 set_bit(0, rf->allocated_pds); in irdma_initialize_hw_rsrc()
2020 set_bit(0, rf->allocated_arps); in irdma_initialize_hw_rsrc()
2021 set_bit(0, rf->allocated_ahs); in irdma_initialize_hw_rsrc()
2022 set_bit(0, rf->allocated_mcgs); in irdma_initialize_hw_rsrc()
2023 set_bit(2, rf->allocated_qps); /* qp 2 IEQ */ in irdma_initialize_hw_rsrc()
2024 set_bit(1, rf->allocated_qps); /* qp 1 ILQ */ in irdma_initialize_hw_rsrc()
2025 set_bit(1, rf->allocated_cqs); in irdma_initialize_hw_rsrc()
2026 set_bit(1, rf->allocated_pds); in irdma_initialize_hw_rsrc()
2027 set_bit(2, rf->allocated_cqs); in irdma_initialize_hw_rsrc()
2028 set_bit(2, rf->allocated_pds); in irdma_initialize_hw_rsrc()
2030 INIT_LIST_HEAD(&rf->mc_qht_list.list); in irdma_initialize_hw_rsrc()
2032 mrdrvbits = 24 - max(get_count_order(rf->max_mr), 14); in irdma_initialize_hw_rsrc()
2033 rf->mr_stagmask = ~(((1 << mrdrvbits) - 1) << (32 - mrdrvbits)); in irdma_initialize_hw_rsrc()
2038 kfree(rf->allocated_ws_nodes); in irdma_initialize_hw_rsrc()
2039 rf->allocated_ws_nodes = NULL; in irdma_initialize_hw_rsrc()
2049 void irdma_cqp_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq) in irdma_cqp_ce_handler() argument
2052 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_cqp_ce_handler()
2060 spin_lock_irqsave(&rf->cqp.compl_lock, flags); in irdma_cqp_ce_handler()
2062 spin_unlock_irqrestore(&rf->cqp.compl_lock, flags); in irdma_cqp_ce_handler()
2071 ibdev_err(&rf->iwdev->ibdev, "cqp opcode = 0x%x maj_err_code = 0x%x min_err_code = 0x%x\n", in irdma_cqp_ce_handler()
2082 irdma_put_cqp_request(&rf->cqp, cqp_request); in irdma_cqp_ce_handler()
2086 irdma_put_cqp_request(&rf->cqp, cqp_request); in irdma_cqp_ce_handler()
2105 struct irdma_pci_f *rf = container_of(work, struct irdma_pci_f, in cqp_compl_worker() local
2107 struct irdma_sc_cq *cq = &rf->ccq.sc_cq; in cqp_compl_worker()
2109 irdma_cqp_ce_handler(rf, cq); in cqp_compl_worker()
2170 void irdma_del_local_mac_entry(struct irdma_pci_f *rf, u16 idx) in irdma_del_local_mac_entry() argument
2172 struct irdma_cqp *iwcqp = &rf->cqp; in irdma_del_local_mac_entry()
2188 irdma_handle_cqp_op(rf, cqp_request); in irdma_del_local_mac_entry()
2199 int irdma_add_local_mac_entry(struct irdma_pci_f *rf, const u8 *mac_addr, u16 idx) in irdma_add_local_mac_entry() argument
2202 struct irdma_cqp *iwcqp = &rf->cqp; in irdma_add_local_mac_entry()
2221 status = irdma_handle_cqp_op(rf, cqp_request); in irdma_add_local_mac_entry()
2236 int irdma_alloc_local_mac_entry(struct irdma_pci_f *rf, u16 *mac_tbl_idx) in irdma_alloc_local_mac_entry() argument
2238 struct irdma_cqp *iwcqp = &rf->cqp; in irdma_alloc_local_mac_entry()
2252 status = irdma_handle_cqp_op(rf, cqp_request); in irdma_alloc_local_mac_entry()
2276 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, add_port); in irdma_cqp_manage_apbvt_cmd()
2287 cqp_info->in.u.manage_apbvt_entry.cqp = &iwdev->rf->cqp.sc_cqp; in irdma_cqp_manage_apbvt_cmd()
2292 status = irdma_handle_cqp_op(iwdev->rf, cqp_request); in irdma_cqp_manage_apbvt_cmd()
2293 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); in irdma_cqp_manage_apbvt_cmd()
2370 void irdma_manage_arp_cache(struct irdma_pci_f *rf, in irdma_manage_arp_cache() argument
2379 arp_index = irdma_arp_table(rf, ip_addr, ipv4, mac_addr, action); in irdma_manage_arp_cache()
2383 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false); in irdma_manage_arp_cache()
2397 cqp_info->in.u.add_arp_cache_entry.cqp = &rf->cqp.sc_cqp; in irdma_manage_arp_cache()
2402 cqp_info->in.u.del_arp_cache_entry.cqp = &rf->cqp.sc_cqp; in irdma_manage_arp_cache()
2407 irdma_handle_cqp_op(rf, cqp_request); in irdma_manage_arp_cache()
2408 irdma_put_cqp_request(&rf->cqp, cqp_request); in irdma_manage_arp_cache()
2440 struct irdma_cqp *iwcqp = &iwdev->rf->cqp; in irdma_manage_qhash()
2504 cqp_info->in.u.manage_qhash_table_entry.cqp = &iwdev->rf->cqp.sc_cqp; in irdma_manage_qhash()
2508 status = irdma_handle_cqp_op(iwdev->rf, cqp_request); in irdma_manage_qhash()
2561 enum irdma_status_code irdma_hw_flush_wqes(struct irdma_pci_f *rf, in irdma_hw_flush_wqes() argument
2572 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait); in irdma_hw_flush_wqes()
2585 status = irdma_handle_cqp_op(rf, cqp_request); in irdma_hw_flush_wqes()
2589 irdma_put_cqp_request(&rf->cqp, cqp_request); in irdma_hw_flush_wqes()
2620 new_req = irdma_alloc_and_get_cqp_request(&rf->cqp, true); in irdma_hw_flush_wqes()
2633 status = irdma_handle_cqp_op(rf, new_req); in irdma_hw_flush_wqes()
2642 irdma_put_cqp_request(&rf->cqp, new_req); in irdma_hw_flush_wqes()
2653 ibdev_dbg(&rf->iwdev->ibdev, in irdma_hw_flush_wqes()
2655 iwqp->ibqp.qp_num, rf->protocol_used, iwqp->iwarp_state, in irdma_hw_flush_wqes()
2660 irdma_put_cqp_request(&rf->cqp, cqp_request); in irdma_hw_flush_wqes()
2672 void irdma_gen_ae(struct irdma_pci_f *rf, struct irdma_sc_qp *qp, in irdma_gen_ae() argument
2679 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait); in irdma_gen_ae()
2691 irdma_handle_cqp_op(rf, cqp_request); in irdma_gen_ae()
2692 irdma_put_cqp_request(&rf->cqp, cqp_request); in irdma_gen_ae()
2698 struct irdma_pci_f *rf = iwqp->iwdev->rf; in irdma_flush_wqes() local
2729 (void)irdma_hw_flush_wqes(rf, &iwqp->sc_qp, &info, in irdma_flush_wqes()