Home
last modified time | relevance | path

Searched refs:qid (Results 1 – 25 of 324) sorted by relevance

12345678910>>...13

/linux/drivers/net/ethernet/mellanox/mlx5/core/en/
A Dhtb.c17 u16 qid; member
81 node->qid = qid; in mlx5e_htb_node_create_leaf()
150 u16 qid; in mlx5e_htb_get_txq_by_classid() local
160 qid = READ_ONCE(node->qid); in mlx5e_htb_get_txq_by_classid()
292 int qid; in mlx5e_htb_leaf_alloc_queue() local
390 qid = node->qid; in mlx5e_htb_leaf_to_inner()
437 if (node->qid == qid) in mlx5e_htb_node_find_by_qid()
460 qid = node->qid; in mlx5e_htb_leaf_del()
518 WRITE_ONCE(node->qid, qid); in mlx5e_htb_leaf_del()
570 qid = node->qid; in mlx5e_htb_leaf_del_last()
[all …]
A Dqos.c61 ix = qid % params->num_channels; in mlx5e_get_qos_sq()
62 qid /= params->num_channels; in mlx5e_get_qos_sq()
76 int txq_ix, ix, qid, err = 0; in mlx5e_open_qos_sq() local
161 u16 qid; in mlx5e_activate_qos_sq() local
172 priv->txq2sq[qid] = sq; in mlx5e_activate_qos_sq()
173 priv->txq2sq_stats[qid] = sq->stats; in mlx5e_activate_qos_sq()
192 sq = mlx5e_get_qos_sq(priv, qid); in mlx5e_deactivate_qos_sq()
221 ix = qid % params->num_channels; in mlx5e_close_qos_sq()
222 qid /= params->num_channels; in mlx5e_close_qos_sq()
428 htb_qopt->qid = res; in mlx5e_htb_setup_tc()
[all …]
/linux/drivers/vdpa/pds/
A Dvdpa_dev.c95 iowrite16(qid, pdsv->vqs[qid].notify); in pds_vdpa_kick_vq()
122 free_irq(pdsv->vqs[qid].irq, &pdsv->vqs[qid]); in pds_vdpa_release_irq()
135 __func__, qid, pdsv->vqs[qid].ready, ready); in pds_vdpa_set_vq_ready()
148 err = pds_vdpa_cmd_init_vq(pdsv, qid, invert_idx, &pdsv->vqs[qid]); in pds_vdpa_set_vq_ready()
155 err = pds_vdpa_cmd_reset_vq(pdsv, qid, invert_idx, &pdsv->vqs[qid]); in pds_vdpa_set_vq_ready()
396 for (qid = 0; qid < pdsv->num_vqs; ++qid) { in pds_vdpa_request_irqs()
399 snprintf(pdsv->vqs[qid].irq_name, sizeof(pdsv->vqs[qid].irq_name), in pds_vdpa_request_irqs()
419 while (qid--) in pds_vdpa_request_irqs()
433 int qid; in pds_vdpa_release_irqs() local
444 for (qid = 0; qid < pdsv->num_vqs; qid++) in pds_vdpa_release_irqs()
[all …]
A Dcmds.c125 int pds_vdpa_cmd_init_vq(struct pds_vdpa_device *pdsv, u16 qid, u16 invert_idx, in pds_vdpa_cmd_init_vq() argument
134 .vdpa_vq_init.qid = cpu_to_le16(qid), in pds_vdpa_cmd_init_vq()
139 .vdpa_vq_init.intr_index = cpu_to_le16(qid), in pds_vdpa_cmd_init_vq()
147 __func__, qid, ilog2(vq_info->q_len), in pds_vdpa_cmd_init_vq()
154 qid, comp.status, ERR_PTR(err)); in pds_vdpa_cmd_init_vq()
159 int pds_vdpa_cmd_reset_vq(struct pds_vdpa_device *pdsv, u16 qid, u16 invert_idx, in pds_vdpa_cmd_reset_vq() argument
168 .vdpa_vq_reset.qid = cpu_to_le16(qid), in pds_vdpa_cmd_reset_vq()
177 qid, comp.status, ERR_PTR(err)); in pds_vdpa_cmd_reset_vq()
/linux/arch/s390/include/asm/
A Dap.h135 static inline struct ap_queue_status ap_tapq(ap_qid_t qid, in ap_tapq() argument
148 : [qid] "d" (qid) in ap_tapq()
167 qid |= 1UL << 23; /* set T bit*/ in ap_test_queue()
168 return ap_tapq(qid, info); in ap_test_queue()
180 unsigned long reg0 = qid | (1UL << 24); /* fc 1UL is RAPQ */ in ap_rapq()
303 static inline struct ap_queue_status ap_aqic(ap_qid_t qid, in ap_aqic() argument
383 static inline struct ap_queue_status ap_bapq(ap_qid_t qid) in ap_bapq() argument
385 unsigned long reg0 = qid | (7UL << 24); /* fc 7 is BAPQ */ in ap_bapq()
411 unsigned long reg0 = qid | (8UL << 24); /* fc 8 is AAPQ */ in ap_aapq()
439 static inline struct ap_queue_status ap_nqap(ap_qid_t qid, in ap_nqap() argument
[all …]
/linux/drivers/net/ethernet/marvell/prestera/
A Dprestera_pci.c244 return fw->evt_queue[qid].len; in prestera_fw_evtq_len()
256 u8 qid, u32 idx) in prestera_fw_evtq_rd_set() argument
265 return fw->evt_queue[qid].addr; in prestera_fw_evtq_buf()
298 int qid; in prestera_fw_evtq_pick() local
300 for (qid = 0; qid < fw->evt_qnum; qid++) { in prestera_fw_evtq_pick()
302 return qid; in prestera_fw_evtq_pick()
321 u8 qid; in prestera_fw_evt_work_fn() local
373 return fw->cmd_queue[qid].len; in prestera_fw_cmdq_len()
455 u8 qid; in prestera_fw_init() local
478 for (qid = 0; qid < fw->cmd_qnum; qid++) { in prestera_fw_init()
[all …]
/linux/drivers/infiniband/hw/cxgb4/
A Dresource.c111 u32 qid; in c4iw_get_cqid() local
119 qid = entry->qid; in c4iw_get_cqid()
143 entry->qid = qid; in c4iw_get_cqid()
157 if (rdev->stats.qid.cur > rdev->stats.qid.max) in c4iw_get_cqid()
158 rdev->stats.qid.max = rdev->stats.qid.cur; in c4iw_get_cqid()
172 entry->qid = qid; in c4iw_put_cqid()
189 qid = entry->qid; in c4iw_get_qpid()
217 entry->qid = qid; in c4iw_get_qpid()
231 if (rdev->stats.qid.cur > rdev->stats.qid.max) in c4iw_get_qpid()
232 rdev->stats.qid.max = rdev->stats.qid.cur; in c4iw_get_qpid()
[all …]
/linux/drivers/s390/crypto/
A Dap_queue.c169 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); in ap_sm_recv()
180 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid), in ap_sm_recv()
224 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); in ap_sm_read()
249 ap_qid_t qid = aq->qid; in ap_sm_write() local
298 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); in ap_sm_write()
338 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); in ap_sm_reset()
379 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); in ap_sm_reset_wait()
707 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); in ap_functions_show()
863 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); in se_bind_show()
991 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); in se_associate_show()
[all …]
A Dzcrypt_api.c723 qid = pref_zq->queue->qid; in zcrypt_rsa_modexpo()
737 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); in zcrypt_rsa_modexpo()
827 qid = pref_zq->queue->qid; in zcrypt_rsa_crt()
841 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); in zcrypt_rsa_crt()
950 qid = pref_zq->queue->qid; in _zcrypt_send_cprb()
971 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); in _zcrypt_send_cprb()
1153 qid = pref_zq->queue->qid; in _zcrypt_send_ep11_cprb()
1253 qid = pref_zq->queue->qid; in zcrypt_rng()
1287 stat->qid = zq->queue->qid; in zcrypt_device_status_mask()
1309 stat->qid = zq->queue->qid; in zcrypt_device_status_mask_ext()
[all …]
/linux/drivers/vdpa/ifcvf/
A Difcvf_base.c17 vp_iowrite16(qid, &cfg->queue_select); in ifcvf_set_vq_vector()
76 if (qid >= hw->nr_vring) in ifcvf_get_vq_size()
87 u16 queue_size, max_size, qid; in ifcvf_get_max_vq_size() local
90 for (qid = 1; qid < hw->nr_vring; qid++) { in ifcvf_get_max_vq_size()
91 queue_size = ifcvf_get_vq_size(hw, qid); in ifcvf_get_max_vq_size()
351 vp_iowrite16(qid, &cfg->queue_select); in ifcvf_set_vq_num()
392 u16 qid; in ifcvf_reset_vring() local
394 for (qid = 0; qid < hw->nr_vring; qid++) { in ifcvf_reset_vring()
395 hw->vring[qid].cb.callback = NULL; in ifcvf_reset_vring()
396 hw->vring[qid].cb.private = NULL; in ifcvf_reset_vring()
[all …]
A Difcvf_base.h108 void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid);
119 u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid);
120 int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num);
122 u16 ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector);
124 void ifcvf_set_vq_num(struct ifcvf_hw *hw, u16 qid, u32 num);
125 int ifcvf_set_vq_address(struct ifcvf_hw *hw, u16 qid, u64 desc_area,
127 bool ifcvf_get_vq_ready(struct ifcvf_hw *hw, u16 qid);
128 void ifcvf_set_vq_ready(struct ifcvf_hw *hw, u16 qid, bool ready);
132 u16 ifcvf_get_vq_size(struct ifcvf_hw *hw, u16 qid);
/linux/drivers/nvme/host/
A Dauth.c28 int qid; member
65 if (qid != 0) { in nvme_auth_submit()
83 qid == 0 ? NVME_QID_ANY : qid, flags); in nvme_auth_submit()
245 chap->qid); in nvme_auth_process_dhchap_challenge()
266 chap->qid); in nvme_auth_process_dhchap_challenge()
358 if (chap->qid == 0) in nvme_auth_process_dhchap_success1()
376 chap->qid); in nvme_auth_process_dhchap_success1()
382 if (chap->qid == 0) in nvme_auth_process_dhchap_success1()
445 chap->qid, ret); in nvme_auth_dhchap_setup_host_response()
523 chap->qid, ret); in nvme_auth_dhchap_setup_ctrl_response()
[all …]
A Dtrace.h26 #define parse_nvme_cmd(qid, opcode, fctype, cdw10) \ argument
29 ((qid) ? \
53 __field(int, qid)
64 __entry->qid = nvme_req_qid(req);
77 __entry->qid, __entry->cid, __entry->nsid,
91 __field(int, qid)
100 __entry->qid = nvme_req_qid(req);
110 __entry->qid, __entry->cid, __entry->result,
147 __field(int, qid)
154 __entry->qid = nvme_req_qid(req);
[all …]
/linux/drivers/scsi/lpfc/
A Dlpfc_debugfs.h583 if (phba->sli4_hba.els_wq->queue_id == qid) { in lpfc_debug_dump_wq_by_id()
584 pr_err("ELS WQ[Qid:%d]\n", qid); in lpfc_debug_dump_wq_by_id()
590 pr_err("NVME LS WQ[Qid:%d]\n", qid); in lpfc_debug_dump_wq_by_id()
606 if (phba->sli4_hba.mbx_wq->queue_id == qid) { in lpfc_debug_dump_mq_by_id()
607 printk(KERN_ERR "MBX WQ[Qid:%d]\n", qid); in lpfc_debug_dump_mq_by_id()
623 if (phba->sli4_hba.hdr_rq->queue_id == qid) { in lpfc_debug_dump_rq_by_id()
624 printk(KERN_ERR "HDR RQ[Qid:%d]\n", qid); in lpfc_debug_dump_rq_by_id()
629 printk(KERN_ERR "DAT RQ[Qid:%d]\n", qid); in lpfc_debug_dump_rq_by_id()
658 pr_err("ELS CQ[Qid:%d]\n", qid); in lpfc_debug_dump_cq_by_id()
664 pr_err("NVME LS CQ[Qid:%d]\n", qid); in lpfc_debug_dump_cq_by_id()
[all …]
/linux/drivers/nvme/target/
A Dfabrics-cmd-auth.c41 __func__, ctrl->cntlid, req->sq->qid, in nvmet_auth_negotiate()
74 __func__, ctrl->cntlid, req->sq->qid, in nvmet_auth_negotiate()
107 __func__, ctrl->cntlid, req->sq->qid, in nvmet_auth_negotiate()
120 __func__, ctrl->cntlid, req->sq->qid, in nvmet_auth_reply()
137 ctrl->cntlid, req->sq->qid); in nvmet_auth_reply()
143 ctrl->cntlid, req->sq->qid); in nvmet_auth_reply()
150 ctrl->cntlid, req->sq->qid); in nvmet_auth_reply()
243 if (!req->sq->qid) { in nvmet_execute_auth_send()
330 ctrl->cntlid, req->sq->qid, in nvmet_execute_auth_send()
408 ctrl->cntlid, req->sq->qid); in nvmet_auth_success1()
[all …]
A Dtrace.h28 #define parse_nvme_cmd(qid, opcode, fctype, cdw10) \ argument
31 (qid ? \
73 __field(int, qid)
86 __entry->qid = req->sq->qid;
100 __entry->qid, __entry->cid, __entry->nsid,
102 show_opcode_name(__entry->qid, __entry->opcode,
104 parse_nvme_cmd(__entry->qid, __entry->opcode,
114 __field(int, qid)
121 __entry->qid = req->cq->qid;
130 __entry->qid, __entry->cid, __entry->result, __entry->status)
A Dfabrics-cmd.c140 u16 qid = le16_to_cpu(c->qid); in nvmet_install_queue() local
154 if (ctrl->sqs[qid] != NULL) { in nvmet_install_queue()
161 if (qid && sqsize > mqes) { in nvmet_install_queue()
177 nvmet_cq_setup(ctrl, req->cq, qid, sqsize + 1); in nvmet_install_queue()
178 nvmet_sq_setup(ctrl, req->sq, qid, sqsize + 1); in nvmet_install_queue()
189 qid, ctrl->cntlid, ret); in nvmet_install_queue()
190 ctrl->sqs[qid] = NULL; in nvmet_install_queue()
289 u16 qid = le16_to_cpu(c->qid); in nvmet_execute_io_connect() local
321 if (unlikely(qid > ctrl->subsys->max_qid)) { in nvmet_execute_io_connect()
322 pr_warn("invalid queue id (%d)\n", qid); in nvmet_execute_io_connect()
[all …]
/linux/drivers/net/ethernet/intel/ixgbe/
A Dixgbe_xsk.c15 int qid = ring->ring_idx; in ixgbe_xsk_pool() local
25 u16 qid) in ixgbe_xsk_pool_enable() argument
31 if (qid >= adapter->num_rx_queues) in ixgbe_xsk_pool_enable()
34 if (qid >= netdev->real_num_rx_queues || in ixgbe_xsk_pool_enable()
35 qid >= netdev->real_num_tx_queues) in ixgbe_xsk_pool_enable()
46 ixgbe_txrx_ring_disable(adapter, qid); in ixgbe_xsk_pool_enable()
48 set_bit(qid, adapter->af_xdp_zc_qps); in ixgbe_xsk_pool_enable()
51 ixgbe_txrx_ring_enable(adapter, qid); in ixgbe_xsk_pool_enable()
84 ixgbe_txrx_ring_enable(adapter, qid); in ixgbe_xsk_pool_disable()
91 u16 qid) in ixgbe_xsk_pool_setup() argument
[all …]
/linux/drivers/net/ethernet/marvell/octeontx2/nic/
A Dqos.c513 WRITE_ONCE(node->qid, qid); in otx2_qos_sw_create_leaf_node()
554 if (node->qid == qid) in otx2_sw_node_find_by_qid()
590 u16 qid; in otx2_get_txq_by_classid() local
598 qid = READ_ONCE(node->qid); in otx2_get_txq_by_classid()
1364 u16 qid; in otx2_qos_leaf_to_inner() local
1412 qid = node->qid; in otx2_qos_leaf_to_inner()
1455 WRITE_ONCE(node->qid, qid); in otx2_qos_leaf_to_inner()
1545 qid = node->qid; in otx2_qos_leaf_del()
1597 node->qid = qid; in otx2_qos_leaf_del()
1624 qid = node->qid; in otx2_qos_leaf_del_last()
[all …]
/linux/drivers/gpu/drm/amd/amdkfd/
A Dkfd_process_queue_manager.c48 unsigned int qid) in assign_queue_slot_by_qid() argument
62 unsigned int *qid) in find_available_queue_slot() argument
78 *qid = found; in find_available_queue_slot()
305 unsigned int *qid, in pqm_create_queue() argument
350 *qid = q_data->q_id; in pqm_create_queue()
648 unsigned int qid) in pqm_get_kernel_queue() argument
660 unsigned int qid) in pqm_get_user_queue() argument
669 unsigned int qid, in pqm_get_wave_state() argument
679 qid); in pqm_get_wave_state()
788 unsigned int qid, in pqm_checkpoint_mqd() argument
[all …]
/linux/drivers/vdpa/octeon_ep/
A Doctep_vdpa.h78 void octep_notify_queue(struct octep_hw *oct_hw, u16 qid);
80 int octep_set_vq_address(struct octep_hw *oct_hw, u16 qid, u64 desc_area, u64 driver_area,
82 void octep_set_vq_num(struct octep_hw *oct_hw, u16 qid, u32 num);
83 void octep_set_vq_ready(struct octep_hw *oct_hw, u16 qid, bool ready);
84 bool octep_get_vq_ready(struct octep_hw *oct_hw, u16 qid);
85 int octep_set_vq_state(struct octep_hw *oct_hw, u16 qid, const struct vdpa_vq_state *state);
86 int octep_get_vq_state(struct octep_hw *oct_hw, u16 qid, struct vdpa_vq_state *state);
/linux/include/linux/
A Dquota.h79 extern qid_t from_kqid(struct user_namespace *to, struct kqid qid);
80 extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid);
81 extern bool qid_valid(struct kqid qid);
98 enum quota_type type, qid_t qid) in make_kqid() argument
105 kqid.uid = make_kuid(from, qid); in make_kqid()
108 kqid.gid = make_kgid(from, qid); in make_kqid()
111 kqid.projid = make_kprojid(from, qid); in make_kqid()
189 return from_kqid(ns, qid) != (qid_t) -1; in qid_has_mapping()
339 int (*get_next_id) (struct super_block *sb, struct kqid *qid);
511 extern void quota_send_warning(struct kqid qid, dev_t dev,
[all …]
/linux/drivers/vdpa/alibaba/
A Deni_vdpa.c261 return vp_legacy_get_queue_size(ldev, qid); in eni_vdpa_get_vq_size()
280 if (!vp_legacy_get_queue_enable(ldev, qid) in eni_vdpa_set_vq_state()
288 static void eni_vdpa_set_vq_cb(struct vdpa_device *vdpa, u16 qid, in eni_vdpa_set_vq_cb() argument
293 eni_vdpa->vring[qid].cb = *cb; in eni_vdpa_set_vq_cb()
306 vp_legacy_set_queue_address(ldev, qid, 0); in eni_vdpa_set_vq_ready()
313 return vp_legacy_get_queue_enable(ldev, qid); in eni_vdpa_get_vq_ready()
321 u16 n = vp_legacy_get_queue_size(ldev, qid); in eni_vdpa_set_vq_num()
330 qid, n, num); in eni_vdpa_set_vq_num()
340 vp_legacy_set_queue_address(ldev, qid, pfn); in eni_vdpa_set_vq_address()
345 static void eni_vdpa_kick_vq(struct vdpa_device *vdpa, u16 qid) in eni_vdpa_kick_vq() argument
[all …]
/linux/fs/xfs/
A Dxfs_quotaops.c225 struct kqid qid, in xfs_fs_get_dqblk() argument
234 id = from_kqid(&init_user_ns, qid); in xfs_fs_get_dqblk()
235 return xfs_qm_scall_getquota(mp, id, xfs_quota_type(qid.type), qdq); in xfs_fs_get_dqblk()
242 struct kqid *qid, in xfs_fs_get_nextdqblk() argument
252 id = from_kqid(&init_user_ns, *qid); in xfs_fs_get_nextdqblk()
253 ret = xfs_qm_scall_getquota_next(mp, &id, xfs_quota_type(qid->type), in xfs_fs_get_nextdqblk()
259 *qid = make_kqid(current_user_ns(), qid->type, id); in xfs_fs_get_nextdqblk()
266 struct kqid qid, in xfs_fs_set_dqblk() argument
276 return xfs_qm_scall_setqlim(mp, from_kqid(&init_user_ns, qid), in xfs_fs_set_dqblk()
277 xfs_quota_type(qid.type), qdq); in xfs_fs_set_dqblk()
/linux/drivers/vdpa/virtio_pci/
A Dvp_vdpa.c302 !vp_modern_get_queue_enable(mdev, qid)) { in vp_vdpa_set_vq_state()
318 vp_vdpa->vring[qid].cb = *cb; in vp_vdpa_set_vq_cb()
322 u16 qid, bool ready) in vp_vdpa_set_vq_ready() argument
326 vp_modern_set_queue_enable(mdev, qid, ready); in vp_vdpa_set_vq_ready()
333 return vp_modern_get_queue_enable(mdev, qid); in vp_vdpa_get_vq_ready()
341 vp_modern_set_queue_size(mdev, qid, num); in vp_vdpa_set_vq_num()
348 return vp_modern_get_queue_size(mdev, qid); in vp_vdpa_get_vq_size()
357 vp_modern_queue_address(mdev, qid, desc_area, in vp_vdpa_set_vq_address()
363 static void vp_vdpa_kick_vq(struct vdpa_device *vdpa, u16 qid) in vp_vdpa_kick_vq() argument
367 vp_iowrite16(qid, vp_vdpa->vring[qid].notify); in vp_vdpa_kick_vq()
[all …]

Completed in 72 milliseconds

12345678910>>...13