| /linux/drivers/crypto/cavium/zip/ |
| A D | zip_device.c | 59 return ((zip_dev->iq[queue].sw_head - zip_dev->iq[queue].sw_tail) * in zip_cmd_queue_consumed() 128 zip_dev->iq[queue].sw_head = zip_dev->iq[queue].sw_tail; in zip_load_instr() 139 zip_dev->iq[queue].pend_cnt++; in zip_load_instr() 151 zip_dev->iq[queue].sw_head, zip_dev->iq[queue].sw_tail, in zip_load_instr() 152 zip_dev->iq[queue].hw_tail); in zip_load_instr() 155 zip_dev->iq[queue].pend_cnt); in zip_load_instr() 187 zip_dev->iq[queue].hw_tail = zip_dev->iq[queue].sw_head; in zip_update_cmd_bufs() 193 zip_dev->iq[queue].done_cnt++; in zip_update_cmd_bufs() 194 zip_dev->iq[queue].pend_cnt--; in zip_update_cmd_bufs() 197 zip_dev->iq[queue].sw_head, zip_dev->iq[queue].sw_tail, in zip_update_cmd_bufs() [all …]
|
| A D | zip_mem.c | 59 zip->iq[q].sw_head = (u64 *)__get_free_pages((GFP_KERNEL | GFP_DMA), in zip_cmd_qbuf_alloc() 62 if (!zip->iq[q].sw_head) in zip_cmd_qbuf_alloc() 65 memset(zip->iq[q].sw_head, 0, ZIP_CMD_QBUF_SIZE); in zip_cmd_qbuf_alloc() 67 zip_dbg("cmd_qbuf_alloc[%d] Success : %p\n", q, zip->iq[q].sw_head); in zip_cmd_qbuf_alloc() 78 zip_dbg("Freeing cmd_qbuf 0x%lx\n", zip->iq[q].sw_tail); in zip_cmd_qbuf_free() 80 free_pages((u64)zip->iq[q].sw_tail, get_order(ZIP_CMD_QBUF_SIZE)); in zip_cmd_qbuf_free()
|
| A D | zip_main.c | 167 memset(&zip->iq[q], 0x0, sizeof(struct zip_iq)); in zip_init_hw() 169 spin_lock_init(&zip->iq[q].lock); in zip_init_hw() 180 zip->iq[q].sw_tail = zip->iq[q].sw_head; in zip_init_hw() 181 zip->iq[q].hw_tail = zip->iq[q].sw_head; in zip_init_hw() 185 que_sbuf_addr.s.ptr = (__pa(zip->iq[q].sw_head) >> in zip_init_hw() 198 zip->iq[q].sw_head, zip->iq[q].sw_tail, in zip_init_hw() 199 zip->iq[q].hw_tail); in zip_init_hw()
|
| /linux/drivers/net/ethernet/cavium/liquidio/ |
| A D | request_manager.c | 86 iq->base_addr = lio_dma_alloc(oct, q_size, &iq->base_addr_dma); in octeon_init_instr_queue() 103 lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma); in octeon_init_instr_queue() 110 iq_no, iq->base_addr, &iq->base_addr_dma, iq->max_count); in octeon_init_instr_queue() 149 lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma); in octeon_init_instr_queue() 275 writel(iq->fill_cnt, iq->doorbell_reg); in ring_doorbell() 301 iqptr = iq->base_addr + (cmdsize * iq->host_write_index); in __copy_cmd_into_iq() 329 iq->host_write_index = incr_index(iq->host_write_index, 1, in __post_command2() 457 iq->octeon_read_index = oct->fn_list.update_iq_read_idx(iq); in octeon_flush_iq() 461 if (iq->flush_index == iq->octeon_read_index) in octeon_flush_iq() 506 if (!iq) in __check_db_timeout() [all …]
|
| A D | cn23xx_vf_regs.h | 70 #define CN23XX_VF_SLI_IQ_PKT_CONTROL64(iq) \ argument 71 (CN23XX_VF_SLI_IQ_PKT_CONTROL_START64 + ((iq) * CN23XX_VF_IQ_OFFSET)) 73 #define CN23XX_VF_SLI_IQ_BASE_ADDR64(iq) \ argument 74 (CN23XX_VF_SLI_IQ_BASE_ADDR_START64 + ((iq) * CN23XX_VF_IQ_OFFSET)) 76 #define CN23XX_VF_SLI_IQ_SIZE(iq) \ argument 77 (CN23XX_VF_SLI_IQ_SIZE_START + ((iq) * CN23XX_VF_IQ_OFFSET)) 79 #define CN23XX_VF_SLI_IQ_DOORBELL(iq) \ argument 80 (CN23XX_VF_SLI_IQ_DOORBELL_START + ((iq) * CN23XX_VF_IQ_OFFSET)) 82 #define CN23XX_VF_SLI_IQ_INSTR_COUNT64(iq) \ argument 83 (CN23XX_VF_SLI_IQ_INSTR_COUNT_START64 + ((iq) * CN23XX_VF_IQ_OFFSET))
|
| A D | cn66xx_regs.h | 143 #define CN6XXX_SLI_IQ_BASE_ADDR64(iq) \ argument 144 (CN6XXX_SLI_IQ_BASE_ADDR_START64 + ((iq) * CN6XXX_IQ_OFFSET)) 146 #define CN6XXX_SLI_IQ_SIZE(iq) \ argument 147 (CN6XXX_SLI_IQ_SIZE_START + ((iq) * CN6XXX_IQ_OFFSET)) 149 #define CN6XXX_SLI_IQ_PKT_INSTR_HDR64(iq) \ argument 152 #define CN6XXX_SLI_IQ_DOORBELL(iq) \ argument 153 (CN6XXX_SLI_IQ_DOORBELL_START + ((iq) * CN6XXX_IQ_OFFSET)) 155 #define CN6XXX_SLI_IQ_INSTR_COUNT(iq) \ argument 156 (CN6XXX_SLI_IQ_INSTR_COUNT_START + ((iq) * CN6XXX_IQ_OFFSET)) 158 #define CN66XX_SLI_IQ_BP64(iq) \ argument [all …]
|
| A D | cn23xx_vf_device.c | 104 struct octeon_instr_queue *iq; in cn23xx_vf_setup_global_input_regs() local 116 iq = oct->instr_queue[q_no]; in cn23xx_vf_setup_global_input_regs() 118 if (iq) in cn23xx_vf_setup_global_input_regs() 219 iq->base_addr_dma); in cn23xx_setup_vf_iq_regs() 225 iq->doorbell_reg = in cn23xx_setup_vf_iq_regs() 227 iq->inst_cnt_reg = in cn23xx_setup_vf_iq_regs() 230 iq_no, iq->doorbell_reg, iq->inst_cnt_reg); in cn23xx_setup_vf_iq_regs() 240 iq->inst_cnt_reg); in cn23xx_setup_vf_iq_regs() 242 iq->reset_instr_cnt = 0; in cn23xx_setup_vf_iq_regs() 531 iq->pkt_in_done = pkt_in_done; in cn23xx_update_read_index() [all …]
|
| A D | cn23xx_pf_regs.h | 170 #define CN23XX_SLI_IQ_PKT_CONTROL64(iq) \ argument 171 (CN23XX_SLI_IQ_PKT_CONTROL_START64 + ((iq) * CN23XX_IQ_OFFSET)) 173 #define CN23XX_SLI_IQ_BASE_ADDR64(iq) \ argument 174 (CN23XX_SLI_IQ_BASE_ADDR_START64 + ((iq) * CN23XX_IQ_OFFSET)) 176 #define CN23XX_SLI_IQ_SIZE(iq) \ argument 177 (CN23XX_SLI_IQ_SIZE_START + ((iq) * CN23XX_IQ_OFFSET)) 179 #define CN23XX_SLI_IQ_DOORBELL(iq) \ argument 180 (CN23XX_SLI_IQ_DOORBELL_START + ((iq) * CN23XX_IQ_OFFSET)) 182 #define CN23XX_SLI_IQ_INSTR_COUNT64(iq) \ argument 183 (CN23XX_SLI_IQ_INSTR_COUNT_START64 + ((iq) * CN23XX_IQ_OFFSET))
|
| A D | cn66xx_device.c | 272 iq->base_addr_dma); in lio_cn6xxx_setup_iq_regs() 282 iq_no, iq->doorbell_reg, iq->inst_cnt_reg); in lio_cn6xxx_setup_iq_regs() 287 iq->reset_instr_cnt = readl(iq->inst_cnt_reg); in lio_cn6xxx_setup_iq_regs() 339 mask |= oct->io_qmask.iq; in lio_cn6xxx_enable_io_queues() 357 mask ^= oct->io_qmask.iq; in lio_cn6xxx_disable_io_queues() 361 mask = (u32)oct->io_qmask.iq; in lio_cn6xxx_disable_io_queues() 370 if (!(oct->io_qmask.iq & BIT_ULL(i))) in lio_cn6xxx_disable_io_queues() 451 u32 new_idx = readl(iq->inst_cnt_reg); in lio_cn6xxx_update_read_index() 457 if (iq->reset_instr_cnt < new_idx) in lio_cn6xxx_update_read_index() 458 new_idx -= iq->reset_instr_cnt; in lio_cn6xxx_update_read_index() [all …]
|
| A D | octeon_config.h | 121 #define CFG_GET_IQ_CFG(cfg) ((cfg)->iq) 122 #define CFG_GET_IQ_MAX_Q(cfg) ((cfg)->iq.max_iqs) 123 #define CFG_GET_IQ_PENDING_LIST_SIZE(cfg) ((cfg)->iq.pending_list_size) 124 #define CFG_GET_IQ_INSTR_TYPE(cfg) ((cfg)->iq.instr_type) 125 #define CFG_GET_IQ_DB_MIN(cfg) ((cfg)->iq.db_min) 126 #define CFG_GET_IQ_DB_TIMEOUT(cfg) ((cfg)->iq.db_timeout) 128 #define CFG_GET_IQ_INTR_PKT(cfg) ((cfg)->iq.iq_intr_pkt) 129 #define CFG_SET_IQ_INTR_PKT(cfg, val) (cfg)->iq.iq_intr_pkt = val 410 struct octeon_iq_config iq; member
|
| A D | octeon_device.c | 41 .iq = { 150 .iq = { 316 .iq = { 419 .iq = { 1441 if (iq) { in lio_enable_irq() 1442 spin_lock_bh(&iq->lock); in lio_enable_irq() 1443 writel(iq->pkts_processed, iq->inst_cnt_reg); in lio_enable_irq() 1444 iq->pkt_in_done -= iq->pkts_processed; in lio_enable_irq() 1445 iq->pkts_processed = 0; in lio_enable_irq() 1448 oct = iq->oct_dev; in lio_enable_irq() [all …]
|
| A D | lio_vf_main.c | 126 iq = oct->instr_queue[i]; in pcierror_quiesce_device() 129 spin_lock_bh(&iq->lock); in pcierror_quiesce_device() 130 iq->fill_cnt = 0; in pcierror_quiesce_device() 131 iq->octeon_read_index = iq->host_write_index; in pcierror_quiesce_device() 135 spin_unlock_bh(&iq->lock); in pcierror_quiesce_device() 488 iq = oct->instr_queue[i]; in octeon_destroy_resources() 491 spin_lock_bh(&iq->lock); in octeon_destroy_resources() 492 iq->fill_cnt = 0; in octeon_destroy_resources() 493 iq->octeon_read_index = iq->host_write_index; in octeon_destroy_resources() 829 int i, frags, iq; in free_netsgbuf() local [all …]
|
| A D | cn23xx_pf_device.c | 403 struct octeon_instr_queue *iq; in cn23xx_pf_setup_global_input_regs() local 446 iq = oct->instr_queue[q_no]; in cn23xx_pf_setup_global_input_regs() 447 if (iq) in cn23xx_pf_setup_global_input_regs() 596 iq->base_addr_dma); in cn23xx_setup_iq_regs() 602 iq->doorbell_reg = in cn23xx_setup_iq_regs() 604 iq->inst_cnt_reg = in cn23xx_setup_iq_regs() 607 iq_no, iq->doorbell_reg, iq->inst_cnt_reg); in cn23xx_setup_iq_regs() 617 iq->inst_cnt_reg); in cn23xx_setup_iq_regs() 625 iq->reset_instr_cnt = 0; in cn23xx_setup_iq_regs() 1090 iq->pkt_in_done = pkt_in_done; in cn23xx_update_read_index() [all …]
|
| A D | cn68xx_regs.h | 32 #define CN68XX_SLI_IQ_PORT_PKIND(iq) \ argument 33 (CN68XX_SLI_IQ_PORT0_PKIND + ((iq) * CN6XXX_IQ_OFFSET))
|
| A D | lio_main.c | 266 iq = oct->instr_queue[i]; in pcierror_quiesce_device() 269 spin_lock_bh(&iq->lock); in pcierror_quiesce_device() 270 iq->fill_cnt = 0; in pcierror_quiesce_device() 271 iq->octeon_read_index = iq->host_write_index; in pcierror_quiesce_device() 275 spin_unlock_bh(&iq->lock); in pcierror_quiesce_device() 472 int q, iq; in check_txq_status() local 476 iq = lio->linfo.txpciq[q % in check_txq_status() 1012 iq->fill_cnt = 0; in octeon_destroy_resources() 1013 iq->octeon_read_index = iq->host_write_index; in octeon_destroy_resources() 1445 int i, frags, iq; in free_netsgbuf() local [all …]
|
| A D | lio_core.c | 502 struct octeon_instr_queue *iq = oct->instr_queue[iq_num]; in lio_update_txq_status() local 506 netdev = oct->props[iq->ifidx].netdev; in lio_update_txq_status() 515 if (__netif_subqueue_stopped(netdev, iq->q_index) && in lio_update_txq_status() 518 netif_wake_subqueue(netdev, iq->q_index); in lio_update_txq_status() 743 struct octeon_instr_queue *iq; in liquidio_napi_poll() local 757 iq = oct->instr_queue[iq_no]; in liquidio_napi_poll() 758 if (iq) { in liquidio_napi_poll() 762 if (atomic_read(&iq->instr_pending)) in liquidio_napi_poll() 764 tx_done = octeon_flush_iq(oct, iq, budget); in liquidio_napi_poll() 780 (iq && iq->pkt_in_done >= MAX_REG_CNT) || in liquidio_napi_poll()
|
| A D | octeon_iq.h | 375 struct octeon_instr_queue *iq, u32 napi_budget); 397 octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
|
| /linux/drivers/crypto/marvell/octeontx2/ |
| A D | otx2_cptlf.h | 119 if (iq->real_vaddr) in otx2_cpt_free_instruction_queues() 121 iq->size, in otx2_cpt_free_instruction_queues() 122 iq->real_vaddr, in otx2_cpt_free_instruction_queues() 124 iq->real_vaddr = NULL; in otx2_cpt_free_instruction_queues() 125 iq->vaddr = NULL; in otx2_cpt_free_instruction_queues() 144 iq->real_vaddr = dma_alloc_coherent(&lfs->pdev->dev, iq->size, in otx2_cpt_alloc_instruction_queues() 146 if (!iq->real_vaddr) { in otx2_cpt_alloc_instruction_queues() 150 iq->vaddr = iq->real_vaddr + OTX2_CPT_INST_GRP_QLEN_BYTES; in otx2_cpt_alloc_instruction_queues() 151 iq->dma_addr = iq->real_dma_addr + OTX2_CPT_INST_GRP_QLEN_BYTES; in otx2_cpt_alloc_instruction_queues() 154 iq->vaddr = PTR_ALIGN(iq->vaddr, OTX2_CPT_INST_Q_ALIGNMENT); in otx2_cpt_alloc_instruction_queues() [all …]
|
| /linux/drivers/scsi/csiostor/ |
| A D | csio_isr.c | 212 csio_scsi_isr_handler(struct csio_q *iq) in csio_scsi_isr_handler() argument 214 struct csio_hw *hw = (struct csio_hw *)iq->owner; in csio_scsi_isr_handler() 223 if (unlikely(csio_wr_process_iq(hw, iq, csio_process_scsi_cmpl, in csio_scsi_isr_handler() 258 struct csio_q *iq = (struct csio_q *) dev_id; in csio_scsi_isr() local 261 if (unlikely(!iq)) in csio_scsi_isr() 264 hw = (struct csio_hw *)iq->owner; in csio_scsi_isr() 271 csio_scsi_isr_handler(iq); in csio_scsi_isr() 288 struct csio_q *iq = priv; in csio_scsi_intx_handler() local 290 csio_scsi_isr_handler(iq); in csio_scsi_intx_handler()
|
| A D | csio_wr.h | 410 struct csio_iq iq; member 463 #define csio_q_iqid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.iq.iqid) 465 ((__hw)->wrm.q_arr[(__idx)]->un.iq.physiqid) 467 ((__hw)->wrm.q_arr[(__idx)]->un.iq.flq_idx) 473 #define csio_iq_has_fl(__iq) ((__iq)->un.iq.flq_idx != -1) 476 csio_q_flid((__hw), (__hw)->wrm.q_arr[(__iq_qidx)]->un.iq.flq_idx)
|
| A D | csio_wr.c | 255 q->un.iq.genbit = 1; in csio_wr_alloc_q() 278 q->un.iq.flq_idx = flq_idx; in csio_wr_alloc_q() 280 flq = wrm->q_arr[q->un.iq.flq_idx]; in csio_wr_alloc_q() 306 q->un.iq.flq_idx = -1; in csio_wr_alloc_q() 310 q->un.iq.iq_intx_handler = iq_intx_handler; in csio_wr_alloc_q() 771 q->un.iq.genbit = 1; in csio_wr_cleanup_iq_ftr() 1141 wrm->q_arr[q->un.iq.flq_idx] : NULL; in csio_wr_process_iq() 1185 q_completed->un.iq.iq_intx_handler); in csio_wr_process_iq() 1211 q->un.iq.genbit ^= 0x1; in csio_wr_process_iq() 1252 INGRESSQID_V(q->un.iq.physiqid) | in csio_wr_process_iq() [all …]
|
| /linux/drivers/media/tuners/ |
| A D | r820t.c | 1603 if (iq[0].value > iq[i - 1].value) in r820t_compre_cor() 1604 swap(iq[0], iq[i - 1]); in r820t_compre_cor() 1622 tmp.phase_y = iq[0].phase_y; in r820t_compre_step() 1623 tmp.gain_x = iq[0].gain_x; in r820t_compre_step() 1646 iq[0].gain_x = tmp.gain_x; in r820t_compre_step() 1648 iq[0].value = tmp.value; in r820t_compre_step() 1688 iq[i].value = rc; in r820t_iq_tree() 1691 iq[i].gain_x = fix_val; in r820t_iq_tree() 1692 iq[i].phase_y = var_val; in r820t_iq_tree() 1694 iq[i].phase_y = fix_val; in r820t_iq_tree() [all …]
|
| /linux/drivers/net/ethernet/chelsio/cxgb4/ |
| A D | cxgb4_filter.c | 330 int iq; in get_filter_steerq() local 338 if (fs->iq) in get_filter_steerq() 340 iq = 0; in get_filter_steerq() 351 iq = fs->iq; in get_filter_steerq() 354 return iq; in get_filter_steerq() 1388 if (iq < 0) in cxgb4_set_hash_filter() 1389 return iq; in cxgb4_set_hash_filter() 1398 f->fs.iq = iq; in cxgb4_set_hash_filter() 1559 if (iq < 0) in __cxgb4_set_filter() 1560 return iq; in __cxgb4_set_filter() [all …]
|
| A D | sge.c | 4383 iq->size = roundup(iq->size, 16); in t4_sge_alloc_rxq() 4385 iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0, in t4_sge_alloc_rxq() 4388 if (!iq->desc) in t4_sge_alloc_rxq() 4469 iq->cur_desc = iq->desc; in t4_sge_alloc_rxq() 4470 iq->cidx = 0; in t4_sge_alloc_rxq() 4471 iq->gen = 1; in t4_sge_alloc_rxq() 4472 iq->next_intr_params = iq->intr_params; in t4_sge_alloc_rxq() 4490 adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq; in t4_sge_alloc_rxq() 4549 if (iq->desc) { in t4_sge_alloc_rxq() 4550 dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len, in t4_sge_alloc_rxq() [all …]
|
| /linux/scripts/ |
| A D | tags.sh | 251 if $1 --version 2>&1 | grep -iq universal; then 288 if $1 --version 2>&1 | grep -iq exuberant; then 290 elif $1 --version 2>&1 | grep -iq emacs; then
|