| /linux/drivers/infiniband/hw/cxgb4/ |
| A D | device.c | 796 if (rdev->lldi.udb_density != rdev->lldi.ucq_density) { in c4iw_rdev_open() 798 pci_name(rdev->lldi.pdev), rdev->lldi.udb_density, in c4iw_rdev_open() 802 if (rdev->lldi.vr->qp.start != rdev->lldi.vr->cq.start || in c4iw_rdev_open() 803 rdev->lldi.vr->qp.size != rdev->lldi.vr->cq.size) { in c4iw_rdev_open() 805 pci_name(rdev->lldi.pdev), rdev->lldi.vr->qp.start, in c4iw_rdev_open() 806 rdev->lldi.vr->qp.size, rdev->lldi.vr->cq.size, in c4iw_rdev_open() 824 pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start, in c4iw_rdev_open() 827 rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start, in c4iw_rdev_open() 836 rdev->lldi.db_reg, rdev->lldi.gts_reg, in c4iw_rdev_open() 1091 ctx->lldi.nchan, ctx->lldi.nrxq, in c4iw_uld_add() [all …]
|
| A D | provider.c | 268 dev->rdev.lldi.ports[0]->dev_addr); in c4iw_query_device() 270 props->fw_ver = dev->rdev.lldi.fw_vers; in c4iw_query_device() 279 props->max_qp = dev->rdev.lldi.vr->qp.size / 2; in c4iw_query_device() 280 props->max_srq = dev->rdev.lldi.vr->srq.size; in c4iw_query_device() 291 props->max_cq = dev->rdev.lldi.vr->qp.size; in c4iw_query_device() 357 c4iw_dev->rdev.lldi.pdev->device); in board_id_show() 517 for (i = 0; i < rdev->lldi.nports; i++) { in set_netdevs() 534 dev->rdev.lldi.ports[0]->dev_addr); in c4iw_register_device() 539 dev->ibdev.phys_port_cnt = dev->rdev.lldi.nports; in c4iw_register_device() 552 &dev->rdev.lldi.pdev->dev); in c4iw_register_device() [all …]
|
| A D | cm.c | 401 ep->com.dev->rdev.lldi.ports[0], in _c4iw_free_ep() 2113 step = cdev->rdev.lldi.ntxq / in import_ep() 2114 cdev->rdev.lldi.nchan; in import_ep() 2116 step = cdev->rdev.lldi.nrxq / in import_ep() 2117 cdev->rdev.lldi.nchan; in import_ep() 2131 step = cdev->rdev.lldi.ntxq / in import_ep() 2132 cdev->rdev.lldi.nchan; in import_ep() 2135 step = cdev->rdev.lldi.nrxq / in import_ep() 2136 cdev->rdev.lldi.nchan; in import_ep() 4178 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; in rx_pkt() [all …]
|
| A D | resource.c | 43 rdev->lldi.vr->qp.start, in c4iw_init_qid_table() 44 rdev->lldi.vr->qp.size, in c4iw_init_qid_table() 45 rdev->lldi.vr->qp.size, 0)) in c4iw_init_qid_table() 48 for (i = rdev->lldi.vr->qp.start; in c4iw_init_qid_table() 49 i < rdev->lldi.vr->qp.start + rdev->lldi.vr->qp.size; i++) in c4iw_init_qid_table() 308 pbl_start = rdev->lldi.vr->pbl.start; in c4iw_pblpool_create() 309 pbl_chunk = rdev->lldi.vr->pbl.size; in c4iw_pblpool_create() 350 pci_name(rdev->lldi.pdev)); in c4iw_rqtpool_alloc() 395 if (rdev->lldi.vr->srq.size) in c4iw_rqtpool_create() 489 start = rdev->lldi.vr->ocq.start; in c4iw_ocqp_pool_create() [all …]
|
| A D | mem.c | 56 return (is_t4(dev->rdev.lldi.adapter_type) || in mr_exceeds_hw_limits() 57 is_t5(dev->rdev.lldi.adapter_type)) && in mr_exceeds_hw_limits() 92 T5_ULP_MEMIO_FID_V(rdev->lldi.rxq_ids[0])); in _c4iw_write_mem_dma_aligned() 120 if (is_t4(rdev->lldi.adapter_type)) in _c4iw_write_mem_inline() 201 if (dma_mapping_error(&rdev->lldi.pdev->dev, daddr)) in _c4iw_write_mem_dma() 240 if (!rdev->lldi.ulptx_memwrite_dsgl || !use_dsgl) { in write_adapter_mem() 255 pci_name(rdev->lldi.pdev)); in write_adapter_mem() 333 (rdev->lldi.vr->stag.start >> 5), in write_tpt_entry() 352 pbl_addr, rdev->lldi.vr->pbl.start, in write_pbl() 668 dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev, in c4iw_alloc_mr() [all …]
|
| A D | iw_cxgb4.h | 180 struct cxgb4_lld_info lldi; member 205 return (int)(rdev->lldi.vr->stag.size >> 5); in c4iw_num_stags() 274 func, pci_name(rdev->lldi.pdev), hwtid, qpid); in c4iw_wait_for_reply() 281 pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid); in c4iw_wait_for_reply() 333 struct cxgb4_lld_info lldi; member 577 if (addr >= pci_resource_start(rdev->lldi.pdev, 0) && in insert_flag_to_mmap() 578 (addr < (pci_resource_start(rdev->lldi.pdev, 0) + in insert_flag_to_mmap() 579 pci_resource_len(rdev->lldi.pdev, 0)))) in insert_flag_to_mmap() 582 (addr < (pci_resource_start(rdev->lldi.pdev, 2) + in insert_flag_to_mmap() 583 pci_resource_len(rdev->lldi.pdev, 2)))) { in insert_flag_to_mmap() [all …]
|
| A D | qp.c | 74 dev_warn(&dev->rdev.lldi.pdev->dev, in alloc_ird() 122 rdev->lldi.vr->ocq.start; in alloc_oc_sq() 124 rdev->lldi.vr->ocq.start); in alloc_oc_sq() 162 dma_free_coherent(&rdev->lldi.pdev->dev, in destroy_qp() 193 if (is_t4(rdev->lldi.adapter_type)) in c4iw_bar2_addrs() 280 wq->db = rdev->lldi.db_reg; in create_qp() 1237 rhp->rdev.lldi.ports[0]); in c4iw_post_send() 1312 qhp->rhp->rdev.lldi.ports[0]); in c4iw_post_receive() 1810 rhp->rdev.lldi.vr->rq.start); in rdma_init() 2631 rdev->lldi.vr->rq.start); in alloc_srq_queue() [all …]
|
| A D | ev.c | 43 ret = cxgb4_read_tpte(dev->rdev.lldi.ports[0], stag, in print_tpte() 46 dev_err(&dev->rdev.lldi.pdev->dev, in print_tpte() 66 dev_err(&dev->rdev.lldi.pdev->dev, in dump_err_cqe()
|
| A D | cq.c | 64 dma_free_coherent(&(rdev->lldi.pdev->dev), in destroy_cq() 98 cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize, in create_cq() 142 rdev->lldi.ciq_ids[cq->vector])); in create_cq() 160 cq->gts = rdev->lldi.gts_reg; in create_cq() 168 pci_name(rdev->lldi.pdev), cq->cqid); in create_cq() 174 dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue, in create_cq() 1021 if (vector >= rhp->rdev.lldi.nciq) in c4iw_create_cq()
|
| /linux/drivers/scsi/cxgbi/cxgb4i/ |
| A D | cxgb4i.c | 1863 step = lldi->ntxq / lldi->nchan; in init_act_open() 1865 step = lldi->nrxq / lldi->nchan; in init_act_open() 2173 lldi->vr->ppod_edram.start, lldi->vr->ppod_edram.size); in cxgb4i_ddp_init() 2176 lldi->vr->iscsi.size, lldi->iscsi_llimit, in cxgb4i_ddp_init() 2222 cdev = cxgbi_device_register(sizeof(*lldi), lldi->nports); in t4_uld_add() 2228 cdev, lldi->adapter_type, lldi->nports, in t4_uld_add() 2229 lldi->ports[0]->name, lldi->nchan, lldi->ntxq, in t4_uld_add() 2230 lldi->nrxq, lldi->wr_cred); in t4_uld_add() 2236 memcpy(cxgbi_cdev_priv(cdev), lldi, sizeof(*lldi)); in t4_uld_add() 2238 cdev->pdev = lldi->pdev; in t4_uld_add() [all …]
|
| /linux/drivers/target/iscsi/cxgbit/ |
| A D | cxgbit_ddp.c | 298 struct cxgb4_lld_info *lldi = &cdev->lldi; in cxgbit_ddp_init() local 299 struct net_device *ndev = cdev->lldi.ports[0]; in cxgbit_ddp_init() 303 if (!lldi->vr->iscsi.size) { in cxgbit_ddp_init() 310 tformat.pgsz_order[i] = (lldi->iscsi_pgsz_order >> (i << 3)) in cxgbit_ddp_init() 312 cxgbi_tagmask_check(lldi->iscsi_tagmask, &tformat); in cxgbit_ddp_init() 314 ret = cxgbi_ppm_init(lldi->iscsi_ppm, cdev->lldi.ports[0], in cxgbit_ddp_init() 315 cdev->lldi.pdev, &cdev->lldi, &tformat, in cxgbit_ddp_init() 316 lldi->vr->iscsi.size, lldi->iscsi_llimit, in cxgbit_ddp_init() 317 lldi->vr->iscsi.start, 2, in cxgbit_ddp_init() 318 lldi->vr->ppod_edram.start, in cxgbit_ddp_init() [all …]
|
| A D | cxgbit_cm.c | 234 struct cxgb4_lld_info *lldi = &cdev->lldi; in cxgbit_find_device() local 941 step = cdev->lldi.ntxq / in cxgbit_offload_init() 942 cdev->lldi.nchan; in cxgbit_offload_init() 944 step = cdev->lldi.nrxq / in cxgbit_offload_init() 945 cdev->lldi.nchan; in cxgbit_offload_init() 978 cdev->lldi.nports; in cxgbit_offload_init() 983 cdev->lldi.nports; in cxgbit_offload_init() 1135 struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi; in cxgbit_pass_accept_rpl() local 1902 struct cxgb4_lld_info *lldi = &cdev->lldi; in cxgbit_set_tcb_rpl() local 1923 struct cxgb4_lld_info *lldi = &cdev->lldi; in cxgbit_rx_data() local [all …]
|
| A D | cxgbit_main.c | 33 struct cxgb4_lld_info *lldi = &cdev->lldi; in cxgbit_set_mdsl() local 38 if (is_t5(lldi->adapter_type)) { in cxgbit_set_mdsl() 42 mdsl = lldi->iscsi_iolen - CXGBIT_PDU_NONPAYLOAD_LEN; in cxgbit_set_mdsl() 57 if (is_t4(lldi->adapter_type)) in cxgbit_uld_add() 67 cdev->lldi = *lldi; in cxgbit_uld_add() 78 pci_name(lldi->pdev)); in cxgbit_uld_add() 80 if (lldi->fw_vers >= 0x10d2b00) in cxgbit_uld_add() 91 pci_name(lldi->pdev)); in cxgbit_uld_add() 166 pci_name(cdev->lldi.pdev), state); in cxgbit_uld_state_change() 439 struct cxgb4_lld_info *lldi = &cdev->lldi; in cxgbit_uld_lro_rx_handler() local [all …]
|
| A D | cxgbit.h | 133 struct cxgb4_lld_info lldi; member 349 return (struct cxgbi_ppm *)(*cdev->lldi.iscsi_ppm); in cdev2ppm()
|
| A D | cxgbit_target.c | 163 const struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi; in cxgbit_tx_data_wr() local 169 u32 force = is_t5(lldi->adapter_type) ? TX_FORCE_V(!submode) : in cxgbit_tx_data_wr() 719 struct cxgbi_ppm *ppm = *csk->com.cdev->lldi.iscsi_ppm; in cxgbit_set_params() 750 if (is_t5(cdev->lldi.adapter_type)) in cxgbit_set_params() 1580 struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi; in cxgbit_rx_skb() local 1584 if (is_t5(lldi->adapter_type)) in cxgbit_rx_skb()
|
| /linux/drivers/net/ethernet/chelsio/inline_crypto/chtls/ |
| A D | chtls_main.c | 170 kfree(cdev->lldi); in chtls_free_uld() 245 lldi = kzalloc(sizeof(*lldi), GFP_KERNEL); in chtls_uld_add() 246 if (!lldi) in chtls_uld_add() 252 *lldi = *info; in chtls_uld_add() 253 cdev->lldi = lldi; in chtls_uld_add() 254 cdev->pdev = lldi->pdev; in chtls_uld_add() 255 cdev->tids = lldi->tids; in chtls_uld_add() 257 cdev->mtus = lldi->mtus; in chtls_uld_add() 258 cdev->tids = lldi->tids; in chtls_uld_add() 280 if (lldi->vr->key.size) in chtls_uld_add() [all …]
|
| A D | chtls_cm.c | 209 cxgb4_ofld_send(cdev->lldi->ports[0], skb); in abort_arp_failure() 691 cdev->lldi->rxq_ids[0]); in chtls_listen_start() 707 cdev->lldi->rxq_ids[0]); in chtls_listen_start() 742 cxgb4_remove_server(cdev->lldi->ports[0], stid, in chtls_listen_stop() 1011 struct cxgb4_lld_info *lldi; in chtls_pass_accept_rpl() local 1024 lldi = csk->cdev->lldi; in chtls_pass_accept_rpl() 1048 if (!is_t5(lldi->adapter_type)) in chtls_pass_accept_rpl() 1229 step = cdev->lldi->nrxq / cdev->lldi->nchan; in chtls_recv_sock() 1232 csk->rss_qid = cdev->lldi->rxq_ids[rxq_idx]; in chtls_recv_sock() 1419 cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb); in chtls_pass_accept_request() [all …]
|
| A D | chtls_hw.c | 158 int chtls_init_kmap(struct chtls_dev *cdev, struct cxgb4_lld_info *lldi) in chtls_init_kmap() argument 163 num_key_ctx = (lldi->vr->key.size / TLS_KEY_CONTEXT_SZ); in chtls_init_kmap() 173 cdev->kmap.start = lldi->vr->key.start; in chtls_init_kmap()
|
| A D | chtls.h | 213 struct cxgb4_lld_info *lldi; member 561 int chtls_init_kmap(struct chtls_dev *cdev, struct cxgb4_lld_info *lldi);
|
| /linux/drivers/net/ethernet/chelsio/libcxgb/ |
| A D | libcxgb_cm.c | 78 cxgb_our_interface(struct cxgb4_lld_info *lldi, in cxgb_our_interface() argument 85 for (i = 0; i < lldi->nports; i++) in cxgb_our_interface() 86 if (lldi->ports[i] == egress_dev) in cxgb_our_interface() 92 cxgb_find_route(struct cxgb4_lld_info *lldi, in cxgb_find_route() argument 109 if (!cxgb_our_interface(lldi, get_real_dev, n->dev) && in cxgb_find_route() 121 cxgb_find_route6(struct cxgb4_lld_info *lldi, in cxgb_find_route6() argument 138 (!cxgb_our_interface(lldi, get_real_dev, in cxgb_find_route6()
|
| /linux/drivers/crypto/chelsio/ |
| A D | chcr_algo.c | 771 fid = u_ctx->lldi.rxq_ids[0]; in create_wreq() 1218 skb->dev = u_ctx->lldi.ports[0]; in chcr_handle_cipher_resp() 1392 skb->dev = u_ctx->lldi.ports[0]; in chcr_aes_encrypt() 1435 skb->dev = u_ctx->lldi.ports[0]; in chcr_aes_decrypt() 1454 ntxq = u_ctx->lldi.ntxq; in chcr_device_init() 1455 rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan; in chcr_device_init() 1458 ctx->nrxq = u_ctx->lldi.nrxq; in chcr_device_init() 1737 skb->dev = u_ctx->lldi.ports[0]; in chcr_ahash_update() 1819 skb->dev = u_ctx->lldi.ports[0]; in chcr_ahash_final() 1912 skb->dev = u_ctx->lldi.ports[0]; in chcr_ahash_finup() [all …]
|
| A D | chcr_core.h | 107 struct cxgb4_lld_info lldi; member 127 return pci_get_drvdata(u_ctx->lldi.pdev); in padap()
|
| /linux/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/ |
| A D | chcr_ktls.h | 76 struct cxgb4_lld_info lldi; member 117 return u_ctx->lldi.rxq_ids[0]; in chcr_get_first_rx_qid()
|
| /linux/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/ |
| A D | chcr_ipsec.h | 22 struct cxgb4_lld_info lldi; member
|
| A D | chcr_ipsec.c | 111 u_ctx->lldi = *infop; in ch_ipsec_uld_add() 123 pr_info("%s: Up\n", pci_name(u_ctx->lldi.pdev)); in ch_ipsec_uld_state_change() 131 pr_info("%s: Down\n", pci_name(u_ctx->lldi.pdev)); in ch_ipsec_uld_state_change() 811 adap = pci_get_drvdata(u_ctx->lldi.pdev); in ch_ipsec_exit()
|