Home
last modified time | relevance | path

Searched refs:qid (Results 1 – 25 of 271) sorted by relevance

1234567891011

/linux/drivers/net/ethernet/mellanox/mlx5/core/en/
A Dqos.c59 u16 qid; member
77 node->qid = qid; in mlx5e_sw_node_create_leaf()
156 u16 qid; in mlx5e_get_txq_by_classid() local
166 qid = READ_ONCE(node->qid); in mlx5e_get_txq_by_classid()
237 qid = node->qid / params->num_channels; in mlx5e_open_qos_sq()
679 qid = node->qid; in mlx5e_htb_leaf_to_inner()
726 if (node->qid == qid) in mlx5e_sw_node_find_by_qid()
768 qid = node->qid; in mlx5e_htb_leaf_del()
826 WRITE_ONCE(node->qid, qid); in mlx5e_htb_leaf_del()
876 qid = node->qid; in mlx5e_htb_leaf_del_last()
[all …]
/linux/drivers/net/ethernet/marvell/prestera/
A Dprestera_pci.c233 return fw->evt_queue[qid].len; in prestera_fw_evtq_len()
245 u8 qid, u32 idx) in prestera_fw_evtq_rd_set() argument
254 return fw->evt_queue[qid].addr; in prestera_fw_evtq_buf()
287 int qid; in prestera_fw_evtq_pick() local
289 for (qid = 0; qid < fw->evt_qnum; qid++) { in prestera_fw_evtq_pick()
291 return qid; in prestera_fw_evtq_pick()
310 u8 qid; in prestera_fw_evt_work_fn() local
362 return fw->cmd_queue[qid].len; in prestera_fw_cmdq_len()
444 u8 qid; in prestera_fw_init() local
467 for (qid = 0; qid < fw->cmd_qnum; qid++) { in prestera_fw_init()
[all …]
/linux/drivers/infiniband/hw/cxgb4/
A Dresource.c111 u32 qid; in c4iw_get_cqid() local
119 qid = entry->qid; in c4iw_get_cqid()
143 entry->qid = qid; in c4iw_get_cqid()
157 if (rdev->stats.qid.cur > rdev->stats.qid.max) in c4iw_get_cqid()
158 rdev->stats.qid.max = rdev->stats.qid.cur; in c4iw_get_cqid()
172 entry->qid = qid; in c4iw_put_cqid()
189 qid = entry->qid; in c4iw_get_qpid()
217 entry->qid = qid; in c4iw_get_qpid()
231 if (rdev->stats.qid.cur > rdev->stats.qid.max) in c4iw_get_qpid()
232 rdev->stats.qid.max = rdev->stats.qid.cur; in c4iw_get_qpid()
[all …]
/linux/arch/s390/include/asm/
A Dap.h92 : [qid] "d" (qid) in ap_tapq()
112 qid |= 1UL << 23; /* set T bit*/ in ap_test_queue()
113 return ap_tapq(qid, info); in ap_test_queue()
122 static inline struct ap_queue_status ap_rapq(ap_qid_t qid) in ap_rapq() argument
124 unsigned long reg0 = qid | (1UL << 24); /* fc 1UL is RAPQ */ in ap_rapq()
143 static inline struct ap_queue_status ap_zapq(ap_qid_t qid) in ap_zapq() argument
145 unsigned long reg0 = qid | (2UL << 24); /* fc 2UL is ZAPQ */ in ap_zapq()
231 static inline struct ap_queue_status ap_aqic(ap_qid_t qid, in ap_aqic() argument
235 unsigned long reg0 = qid | (3UL << 24); /* fc 3UL is AQIC */ in ap_aqic()
321 static inline struct ap_queue_status ap_nqap(ap_qid_t qid, in ap_nqap() argument
[all …]
/linux/drivers/scsi/lpfc/
A Dlpfc_debugfs.h583 if (phba->sli4_hba.els_wq->queue_id == qid) { in lpfc_debug_dump_wq_by_id()
584 pr_err("ELS WQ[Qid:%d]\n", qid); in lpfc_debug_dump_wq_by_id()
590 pr_err("NVME LS WQ[Qid:%d]\n", qid); in lpfc_debug_dump_wq_by_id()
606 if (phba->sli4_hba.mbx_wq->queue_id == qid) { in lpfc_debug_dump_mq_by_id()
607 printk(KERN_ERR "MBX WQ[Qid:%d]\n", qid); in lpfc_debug_dump_mq_by_id()
623 if (phba->sli4_hba.hdr_rq->queue_id == qid) { in lpfc_debug_dump_rq_by_id()
624 printk(KERN_ERR "HDR RQ[Qid:%d]\n", qid); in lpfc_debug_dump_rq_by_id()
629 printk(KERN_ERR "DAT RQ[Qid:%d]\n", qid); in lpfc_debug_dump_rq_by_id()
658 pr_err("ELS CQ[Qid:%d]\n", qid); in lpfc_debug_dump_cq_by_id()
664 pr_err("NVME LS CQ[Qid:%d]\n", qid); in lpfc_debug_dump_cq_by_id()
[all …]
/linux/drivers/gpu/drm/amd/amdkfd/
A Dkfd_process_queue_manager.c46 unsigned int *qid) in find_available_queue_slot() argument
62 *qid = found; in find_available_queue_slot()
87 pqn = get_queue_by_qid(pqm, qid); in pqm_set_gws()
178 q_properties->queue_id = qid; in init_user_queue()
196 unsigned int *qid, in pqm_create_queue() argument
352 pqn = get_queue_by_qid(pqm, qid); in pqm_destroy_queue()
418 pqn = get_queue_by_qid(pqm, qid); in pqm_update_queue_properties()
459 unsigned int qid) in pqm_get_kernel_queue() argument
471 unsigned int qid) in pqm_get_user_queue() argument
480 unsigned int qid, in pqm_get_wave_state() argument
[all …]
/linux/drivers/s390/crypto/
A Dap_queue.c75 qid |= 0x400000UL; in __ap_send()
182 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); in ap_sm_recv()
232 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); in ap_sm_read()
247 ap_qid_t qid = aq->qid; in ap_sm_write() local
258 qid = 0xFF00; in ap_sm_write()
299 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); in ap_sm_write()
337 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); in ap_sm_reset()
380 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); in ap_sm_reset_wait()
421 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); in ap_sm_setirq_wait()
578 __func__, AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); in reset_store()
[all …]
A Dzcrypt_api.c721 qid = pref_zq->queue->qid; in zcrypt_rsa_modexpo()
735 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); in zcrypt_rsa_modexpo()
829 qid = pref_zq->queue->qid; in zcrypt_rsa_crt()
843 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); in zcrypt_rsa_crt()
949 qid = pref_zq->queue->qid; in _zcrypt_send_cprb()
974 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); in _zcrypt_send_cprb()
1120 qid = pref_zq->queue->qid; in _zcrypt_send_ep11_cprb()
1196 qid = pref_zq->queue->qid; in zcrypt_rng()
1230 stat->qid = zq->queue->qid; in zcrypt_device_status_mask()
1255 stat->qid = zq->queue->qid; in zcrypt_device_status_mask_ext()
[all …]
A Dzcrypt_msgtype6.c654 AP_QID_QUEUE(zq->queue->qid), in convert_type86_ica()
660 AP_QID_CARD(zq->queue->qid), in convert_type86_ica()
661 AP_QID_QUEUE(zq->queue->qid), in convert_type86_ica()
806 AP_QID_CARD(zq->queue->qid), in convert_response_ica()
807 AP_QID_QUEUE(zq->queue->qid), in convert_response_ica()
841 AP_QID_CARD(zq->queue->qid), in convert_response_xcrb()
842 AP_QID_QUEUE(zq->queue->qid), in convert_response_xcrb()
871 AP_QID_CARD(zq->queue->qid), in convert_response_ep11_xcrb()
902 AP_QID_CARD(zq->queue->qid), in convert_response_rng()
1241 AP_QID_QUEUE(zq->queue->qid); in zcrypt_msgtype6_send_ep11_cprb()
[all …]
A Dap_bus.c881 to_ap_queue(dev)->qid); in ap_device_probe()
933 if (aq->qid == qid) { in ap_get_qdev()
1445 __func__, AP_QID_CARD(qid), in ap_get_compatible_type()
1446 AP_QID_QUEUE(qid), rawtype); in ap_get_compatible_type()
1471 __func__, AP_QID_CARD(qid), in ap_get_compatible_type()
1475 __func__, AP_QID_CARD(qid), AP_QID_QUEUE(qid), in ap_get_compatible_type()
1528 ap_qid_t qid; in ap_scan_domains() local
1541 qid = AP_MKQID(ac->id, dom); in ap_scan_domains()
1543 (void *)(long) qid, in ap_scan_domains()
1650 ap_qid_t qid; in ap_scan_adapter() local
[all …]
/linux/fs/9p/
A Dcache.c86 if (buflen != sizeof(v9inode->qid.version)) in v9fs_cache_inode_check_aux()
89 if (memcmp(buffer, &v9inode->qid.version, in v9fs_cache_inode_check_aux()
90 sizeof(v9inode->qid.version))) in v9fs_cache_inode_check_aux()
117 &v9inode->qid.path, in v9fs_cache_inode_get_cookie()
118 sizeof(v9inode->qid.path), in v9fs_cache_inode_get_cookie()
119 &v9inode->qid.version, in v9fs_cache_inode_get_cookie()
120 sizeof(v9inode->qid.version), in v9fs_cache_inode_get_cookie()
190 &v9inode->qid.path, in v9fs_cache_inode_reset_cookie()
191 sizeof(v9inode->qid.path), in v9fs_cache_inode_reset_cookie()
192 &v9inode->qid.version, in v9fs_cache_inode_reset_cookie()
[all …]
/linux/drivers/nvme/host/
A Dtrace.h26 #define parse_nvme_cmd(qid, opcode, fctype, cdw10) \ argument
29 ((qid) ? \
53 __field(int, qid)
64 __entry->qid = nvme_req_qid(req);
77 __entry->qid, __entry->cid, __entry->nsid,
91 __field(int, qid)
100 __entry->qid = nvme_req_qid(req);
110 __entry->qid, __entry->cid, __entry->result,
150 __field(int, qid)
157 __entry->qid = nvme_req_qid(req);
[all …]
/linux/drivers/nvme/target/
A Dfabrics-cmd.c109 u16 qid = le16_to_cpu(c->qid); in nvmet_install_queue() local
123 if (ctrl->sqs[qid] != NULL) { in nvmet_install_queue()
124 pr_warn("qid %u has already been created\n", qid); in nvmet_install_queue()
145 nvmet_cq_setup(ctrl, req->cq, qid, sqsize + 1); in nvmet_install_queue()
146 nvmet_sq_setup(ctrl, req->sq, qid, sqsize + 1); in nvmet_install_queue()
157 qid, ctrl->cntlid, ret); in nvmet_install_queue()
158 ctrl->sqs[qid] = NULL; in nvmet_install_queue()
241 u16 qid = le16_to_cpu(c->qid); in nvmet_execute_io_connect() local
274 if (unlikely(qid > ctrl->subsys->max_qid)) { in nvmet_execute_io_connect()
275 pr_warn("invalid queue id (%d)\n", qid); in nvmet_execute_io_connect()
[all …]
A Dtrace.h28 #define parse_nvme_cmd(qid, opcode, fctype, cdw10) \ argument
31 (qid ? \
68 __field(int, qid)
81 __entry->qid = req->sq->qid;
95 __entry->qid, __entry->cid, __entry->nsid,
97 show_opcode_name(__entry->qid, __entry->opcode,
99 parse_nvme_cmd(__entry->qid, __entry->opcode,
109 __field(int, qid)
116 __entry->qid = req->cq->qid;
125 __entry->qid, __entry->cid, __entry->result, __entry->status)
/linux/drivers/net/ethernet/intel/ixgbe/
A Dixgbe_xsk.c15 int qid = ring->ring_idx; in ixgbe_xsk_pool() local
25 u16 qid) in ixgbe_xsk_pool_enable() argument
31 if (qid >= adapter->num_rx_queues) in ixgbe_xsk_pool_enable()
34 if (qid >= netdev->real_num_rx_queues || in ixgbe_xsk_pool_enable()
35 qid >= netdev->real_num_tx_queues) in ixgbe_xsk_pool_enable()
46 ixgbe_txrx_ring_disable(adapter, qid); in ixgbe_xsk_pool_enable()
48 set_bit(qid, adapter->af_xdp_zc_qps); in ixgbe_xsk_pool_enable()
51 ixgbe_txrx_ring_enable(adapter, qid); in ixgbe_xsk_pool_enable()
84 ixgbe_txrx_ring_enable(adapter, qid); in ixgbe_xsk_pool_disable()
91 u16 qid) in ixgbe_xsk_pool_setup() argument
[all …]
/linux/include/linux/
A Dquota.h79 extern qid_t from_kqid(struct user_namespace *to, struct kqid qid);
80 extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid);
81 extern bool qid_valid(struct kqid qid);
98 enum quota_type type, qid_t qid) in make_kqid() argument
105 kqid.uid = make_kuid(from, qid); in make_kqid()
108 kqid.gid = make_kgid(from, qid); in make_kqid()
111 kqid.projid = make_kprojid(from, qid); in make_kqid()
189 return from_kqid(ns, qid) != (qid_t) -1; in qid_has_mapping()
337 int (*get_next_id) (struct super_block *sb, struct kqid *qid);
509 extern void quota_send_warning(struct kqid qid, dev_t dev,
[all …]
/linux/drivers/vdpa/virtio_pci/
A Dvp_vdpa.c269 !vp_modern_get_queue_enable(mdev, qid)) { in vp_vdpa_set_vq_state()
285 vp_vdpa->vring[qid].cb = *cb; in vp_vdpa_set_vq_cb()
289 u16 qid, bool ready) in vp_vdpa_set_vq_ready() argument
293 vp_modern_set_queue_enable(mdev, qid, ready); in vp_vdpa_set_vq_ready()
300 return vp_modern_get_queue_enable(mdev, qid); in vp_vdpa_get_vq_ready()
308 vp_modern_set_queue_size(mdev, qid, num); in vp_vdpa_set_vq_num()
317 vp_modern_queue_address(mdev, qid, desc_area, in vp_vdpa_set_vq_address()
323 static void vp_vdpa_kick_vq(struct vdpa_device *vdpa, u16 qid) in vp_vdpa_kick_vq() argument
327 vp_iowrite16(qid, vp_vdpa->vring[qid].notify); in vp_vdpa_kick_vq()
405 vp_vdpa_get_vq_notification(struct vdpa_device *vdpa, u16 qid) in vp_vdpa_get_vq_notification() argument
[all …]
/linux/fs/xfs/
A Dxfs_quotaops.c216 struct kqid qid, in xfs_fs_get_dqblk() argument
225 id = from_kqid(&init_user_ns, qid); in xfs_fs_get_dqblk()
226 return xfs_qm_scall_getquota(mp, id, xfs_quota_type(qid.type), qdq); in xfs_fs_get_dqblk()
233 struct kqid *qid, in xfs_fs_get_nextdqblk() argument
243 id = from_kqid(&init_user_ns, *qid); in xfs_fs_get_nextdqblk()
244 ret = xfs_qm_scall_getquota_next(mp, &id, xfs_quota_type(qid->type), in xfs_fs_get_nextdqblk()
250 *qid = make_kqid(current_user_ns(), qid->type, id); in xfs_fs_get_nextdqblk()
257 struct kqid qid, in xfs_fs_set_dqblk() argument
267 return xfs_qm_scall_setqlim(mp, from_kqid(&init_user_ns, qid), in xfs_fs_set_dqblk()
268 xfs_quota_type(qid.type), qdq); in xfs_fs_set_dqblk()
/linux/drivers/vdpa/ifcvf/
A Difcvf_main.c294 vf->vring[qid].cb = *cb; in ifcvf_vdpa_set_vq_cb()
298 u16 qid, bool ready) in ifcvf_vdpa_set_vq_ready() argument
302 vf->vring[qid].ready = ready; in ifcvf_vdpa_set_vq_ready()
309 return vf->vring[qid].ready; in ifcvf_vdpa_get_vq_ready()
317 vf->vring[qid].size = num; in ifcvf_vdpa_set_vq_num()
326 vf->vring[qid].desc = desc_area; in ifcvf_vdpa_set_vq_address()
327 vf->vring[qid].avail = driver_area; in ifcvf_vdpa_set_vq_address()
328 vf->vring[qid].used = device_area; in ifcvf_vdpa_set_vq_address()
337 ifcvf_notify_queue(vf, qid); in ifcvf_vdpa_kick_vq()
419 u16 qid) in ifcvf_vdpa_get_vq_irq() argument
[all …]
/linux/drivers/vdpa/alibaba/
A Deni_vdpa.c266 if (!vp_legacy_get_queue_enable(ldev, qid) in eni_vdpa_set_vq_state()
274 static void eni_vdpa_set_vq_cb(struct vdpa_device *vdpa, u16 qid, in eni_vdpa_set_vq_cb() argument
279 eni_vdpa->vring[qid].cb = *cb; in eni_vdpa_set_vq_cb()
292 vp_legacy_set_queue_address(ldev, qid, 0); in eni_vdpa_set_vq_ready()
299 return vp_legacy_get_queue_enable(ldev, qid); in eni_vdpa_get_vq_ready()
302 static void eni_vdpa_set_vq_num(struct vdpa_device *vdpa, u16 qid, in eni_vdpa_set_vq_num() argument
307 u16 n = vp_legacy_get_queue_size(ldev, qid); in eni_vdpa_set_vq_num()
316 qid, n, num); in eni_vdpa_set_vq_num()
326 vp_legacy_set_queue_address(ldev, qid, pfn); in eni_vdpa_set_vq_address()
331 static void eni_vdpa_kick_vq(struct vdpa_device *vdpa, u16 qid) in eni_vdpa_kick_vq() argument
[all …]
/linux/drivers/net/wireless/mediatek/mt76/
A Dsdio_txrx.c81 mt76s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid, in mt76s_rx_run_queue() argument
84 struct mt76_queue *q = &dev->q_rx[qid]; in mt76s_rx_run_queue()
90 for (i = 0; i < intr->rx.num[qid]; i++) in mt76s_rx_run_queue()
91 len += round_up(intr->rx.len[qid][i] + 4, 4); in mt76s_rx_run_queue()
105 err = sdio_readsb(sdio->func, buf, MCR_WRDR(qid), len); in mt76s_rx_run_queue()
112 for (i = 0; i < intr->rx.num[qid]; i++) { in mt76s_rx_run_queue()
231 qid = mcu ? ARRAY_SIZE(sdio->xmit_buf) - 1 : q->qid; in mt76s_tx_run_queue()
259 memcpy(sdio->xmit_buf[qid] + len, e->skb->data, in mt76s_tx_run_queue()
265 memcpy(sdio->xmit_buf[qid] + len, iter->data, in mt76s_tx_run_queue()
272 memset(sdio->xmit_buf[qid] + len, 0, pad); in mt76s_tx_run_queue()
[all …]
A Dtx.c281 struct mt76_queue *q = phy->q_tx[qid]; in __mt76_tx_queue_skb()
313 int qid = skb_get_queue_mapping(skb); in mt76_tx() local
321 if (WARN_ON(qid >= MT_TXQ_PSD)) { in mt76_tx()
322 qid = MT_TXQ_BE; in mt76_tx()
323 skb_set_queue_mapping(skb, qid); in mt76_tx()
330 qid = MT_TXQ_PSD; in mt76_tx()
331 skb_set_queue_mapping(skb, qid); in mt76_tx()
341 q = phy->q_tx[qid]; in mt76_tx()
501 struct mt76_queue *q = phy->q_tx[qid]; in mt76_txq_schedule_list()
519 txq = ieee80211_next_txq(phy->hw, qid); in mt76_txq_schedule_list()
[all …]
/linux/net/9p/
A Dclient.c896 memset(&fid->qid, 0, sizeof(fid->qid)); in p9_fid_create()
1147 qid.type, qid.path, qid.version); in p9_client_attach()
1149 memmove(&fid->qid, &qid, sizeof(struct p9_qid)); in p9_client_attach()
1270 qid.path, qid.version, iounit); in p9_client_open()
1314 qid->type, qid->path, qid->version, iounit); in p9_client_create_dotl()
1358 qid.type, qid.path, qid.version, iounit); in p9_client_fcreate()
1396 qid->type, qid->path, qid->version); in p9_client_symlink()
1786 ret->qid.type, ret->qid.path, ret->qid.version, in p9_client_getattr_dotl()
1853 wst->qid.path, wst->qid.version, in p9_client_wstat()
2164 qid->type, qid->path, qid->version); in p9_client_mknod_dotl()
[all …]
/linux/drivers/net/ethernet/intel/i40e/
A Di40e_xsk.c43 u16 qid) in i40e_xsk_pool_enable() argument
52 if (qid >= vsi->num_queue_pairs) in i40e_xsk_pool_enable()
55 if (qid >= netdev->real_num_rx_queues || in i40e_xsk_pool_enable()
56 qid >= netdev->real_num_tx_queues) in i40e_xsk_pool_enable()
63 set_bit(qid, vsi->af_xdp_zc_qps); in i40e_xsk_pool_enable()
68 err = i40e_queue_pair_disable(vsi, qid); in i40e_xsk_pool_enable()
72 err = i40e_queue_pair_enable(vsi, qid); in i40e_xsk_pool_enable()
112 clear_bit(qid, vsi->af_xdp_zc_qps); in i40e_xsk_pool_disable()
116 err = i40e_queue_pair_enable(vsi, qid); in i40e_xsk_pool_disable()
136 u16 qid) in i40e_xsk_pool_setup() argument
[all …]
/linux/fs/quota/
A Dkqid.c120 bool qid_valid(struct kqid qid) in qid_valid() argument
122 switch (qid.type) { in qid_valid()
124 return uid_valid(qid.uid); in qid_valid()
126 return gid_valid(qid.gid); in qid_valid()
128 return projid_valid(qid.projid); in qid_valid()

Completed in 58 milliseconds

1234567891011