| /linux/drivers/net/ethernet/mellanox/mlx5/core/ |
| A D | en_tx.c | 52 mlx5e_dma_get(sq, --sq->dma_fifo_pc); in mlx5e_dma_unmap_wqe_err() 348 if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room))) { in mlx5e_tx_check_stop() 363 pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc); in mlx5e_tx_flush() 370 wqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc); in mlx5e_tx_flush() 371 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &wqe->ctrl); in mlx5e_tx_flush() 423 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg); in mlx5e_txwqe_complete() 582 pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc); in mlx5e_tx_mpwqe_session_complete() 634 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg); in mlx5e_sq_xmit_mpwqe() 639 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg); in mlx5e_sq_xmit_mpwqe() 782 mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) && in mlx5e_txqsq_wake() [all …]
|
| A D | wc.c | 201 sq->sqn, err); in create_wc_sq() 230 err = mlx5_wq_cyc_create(mdev, ¶m, wq, &sq->wq, &sq->wq_ctrl); in mlx5_wc_create_sq() 254 mlx5_core_destroy_sq(sq->cq.mdev, sq->sqn); in mlx5_wc_destroy_sq() 265 pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc); in mlx5_wc_post_nop() 283 sq->pc++; in mlx5_wc_post_nop() 284 sq->wq.db[MLX5_SND_DBR] = cpu_to_be32(sq->pc); in mlx5_wc_post_nop() 291 __iowrite64_copy(sq->bfreg.map + sq->bfreg.offset, mmio_wqe, in mlx5_wc_post_nop() 328 sq->cc++; in mlx5_wc_poll_cq() 342 sq = kzalloc(sizeof(*sq), GFP_KERNEL); in mlx5_core_test_wc() 343 if (!sq) in mlx5_core_test_wc() [all …]
|
| A D | en_txrx.c | 58 net_dim(sq->dim, dim_sample); in mlx5e_handle_tx_dim() 75 struct mlx5_wq_cyc *wq = &sq->wq; in mlx5e_trigger_irq() 77 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); in mlx5e_trigger_irq() 84 nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc); in mlx5e_trigger_irq() 85 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl); in mlx5e_trigger_irq() 159 if (sq) in mlx5e_napi_poll() 220 mlx5e_handle_tx_dim(&c->sq[i]); in mlx5e_napi_poll() 221 mlx5e_cq_arm(&c->sq[i].cq); in mlx5e_napi_poll() 227 if (sq) { in mlx5e_napi_poll() 228 mlx5e_handle_tx_dim(sq); in mlx5e_napi_poll() [all …]
|
| A D | en_dim.c | 58 struct mlx5e_txqsq *sq = dim->priv; in mlx5e_tx_dim_work() local 62 mlx5e_complete_dim_work(dim, cur_moder, sq->cq.mdev, &sq->cq.mcq); in mlx5e_tx_dim_work() 124 int mlx5e_dim_tx_change(struct mlx5e_txqsq *sq, bool enable) in mlx5e_dim_tx_change() argument 126 if (enable == !!sq->dim) in mlx5e_dim_tx_change() 130 struct mlx5e_channel *c = sq->channel; in mlx5e_dim_tx_change() 134 c->tx_cq_moder.cq_period_mode, &sq->cq.mcq, sq); in mlx5e_dim_tx_change() 138 sq->dim = dim; in mlx5e_dim_tx_change() 140 __set_bit(MLX5E_SQ_STATE_DIM, &sq->state); in mlx5e_dim_tx_change() 142 __clear_bit(MLX5E_SQ_STATE_DIM, &sq->state); in mlx5e_dim_tx_change() 144 mlx5e_dim_disable(sq->dim); in mlx5e_dim_tx_change() [all …]
|
| /linux/drivers/nvme/target/ |
| A D | fabrics-cmd-auth.c | 21 __func__, sq->ctrl->cntlid, sq->qid, sq->dhchap_tid); in nvmet_auth_expired_work() 23 sq->dhchap_tid = -1; in nvmet_auth_expired_work() 30 sq->authenticated = false; in nvmet_auth_sq_init() 165 req->sq->dhchap_c2); in nvmet_auth_reply() 243 if (!req->sq->qid) { in nvmet_execute_auth_send() 281 req->sq->dhchap_step = in nvmet_execute_auth_send() 331 req->sq->dhchap_status, req->sq->dhchap_step); in nvmet_execute_auth_send() 388 __func__, ctrl->cntlid, req->sq->qid, req->sq->dhchap_s1, in nvmet_auth_challenge() 475 ctrl->cntlid, req->sq->qid, req->sq->dhchap_step); in nvmet_execute_auth_receive() 503 ctrl->cntlid, req->sq->qid, req->sq->dhchap_status); in nvmet_execute_auth_receive() [all …]
|
| A D | auth.c | 236 kfree(sq->dhchap_c1); in nvmet_auth_sq_free() 237 sq->dhchap_c1 = NULL; in nvmet_auth_sq_free() 238 kfree(sq->dhchap_c2); in nvmet_auth_sq_free() 239 sq->dhchap_c2 = NULL; in nvmet_auth_sq_free() 240 kfree(sq->dhchap_skey); in nvmet_auth_sq_free() 241 sq->dhchap_skey = NULL; in nvmet_auth_sq_free() 334 ctrl->cntlid, req->sq->qid, req->sq->dhchap_s1, in nvmet_auth_host_hash() 335 req->sq->dhchap_tid); in nvmet_auth_host_hash() 529 req->sq->dhchap_skey = kzalloc(req->sq->dhchap_skey_len, GFP_KERNEL); in nvmet_auth_ctrl_sesskey() 530 if (!req->sq->dhchap_skey) in nvmet_auth_ctrl_sesskey() [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/en/ |
| A D | xdp.c | 368 mlx5e_post_nop(wq, sq->sqn, &sq->pc); in mlx5e_xdpsq_get_next_pi() 385 pi = mlx5e_xdpsq_get_next_pi(sq, sq->max_sq_mpw_wqebbs); in mlx5e_xdp_mpwqe_session_start() 431 if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, in mlx5e_xmit_xdp_frame_check_mpwqe() 513 if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, stop_room))) { in mlx5e_xmit_xdp_frame_check_stop_room() 648 sq->pc++; in mlx5e_xmit_xdp_frame() 771 sqcc = sq->cc; in mlx5e_poll_xdpsq_cq() 797 mlx5e_dump_error_cqe(&sq->cq, sq->sqn, in mlx5e_poll_xdpsq_cq() 815 sq->cc = sqcc; in mlx5e_poll_xdpsq_cq() 828 while (sq->cc != sq->pc) { in mlx5e_free_xdpsq_descs() 832 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc); in mlx5e_free_xdpsq_descs() [all …]
|
| A D | reporter_tx.c | 30 if (sq->cc == sq->pc) in mlx5e_wait_for_sq_flush() 38 sq->sqn, sq->cc, sq->pc); in mlx5e_wait_for_sq_flush() 45 WARN_ONCE(sq->cc != sq->pc, in mlx5e_reset_txqsq_cc_pc() 47 sq->sqn, sq->cc, sq->pc); in mlx5e_reset_txqsq_cc_pc() 48 sq->cc = 0; in mlx5e_reset_txqsq_cc_pc() 76 sq = ctx; in mlx5e_tx_reporter_err_cqe_recover() 140 sq = to_ctx->sq; in mlx5e_tx_reporter_timeout_recover() 343 struct mlx5e_txqsq *sq = &c->sq[tc]; in mlx5e_tx_reporter_diagnose() local 436 struct mlx5e_txqsq *sq = &c->sq[tc]; in mlx5e_tx_reporter_dump_all_sqs() local 493 to_ctx.sq = sq; in mlx5e_reporter_tx_timeout() [all …]
|
| A D | xdp.h | 108 void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq); 110 void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq); 111 void mlx5e_set_xmit_fp(struct mlx5e_xdpsq *sq, bool is_mpw); 158 static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq) in mlx5e_xmit_xdp_doorbell() argument 160 if (sq->doorbell_cseg) { in mlx5e_xmit_xdp_doorbell() 161 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, sq->doorbell_cseg); in mlx5e_xmit_xdp_doorbell() 162 sq->doorbell_cseg = NULL; in mlx5e_xmit_xdp_doorbell() 171 u16 outstanding = sq->xdpi_fifo_pc - sq->xdpi_fifo_cc; in mlx5e_xdp_get_inline_state() 200 mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq, in mlx5e_xdp_mpwqe_add_dseg() argument 204 struct mlx5e_tx_mpwqe *session = &sq->mpwqe; in mlx5e_xdp_mpwqe_add_dseg() [all …]
|
| A D | qos.c | 79 struct mlx5e_txqsq *sq; in mlx5e_open_qos_sq() local 117 sq = kzalloc(sizeof(*sq), GFP_KERNEL); in mlx5e_open_qos_sq() 119 if (!sq) in mlx5e_open_qos_sq() 146 kfree(sq); in mlx5e_open_qos_sq() 160 struct mlx5e_txqsq *sq; in mlx5e_activate_qos_sq() local 172 priv->txq2sq[qid] = sq; in mlx5e_activate_qos_sq() 189 struct mlx5e_txqsq *sq; in mlx5e_deactivate_qos_sq() local 216 struct mlx5e_txqsq *sq; in mlx5e_close_qos_sq() local 231 mlx5e_close_txqsq(sq); in mlx5e_close_qos_sq() 233 kfree(sq); in mlx5e_close_qos_sq() [all …]
|
| /linux/tools/include/io_uring/ |
| A D | mini_liburing.h | 94 munmap(sq->khead, sq->ring_sz); in io_uring_mmap() 154 struct io_uring_sq *sq = &ring->sq; in io_uring_get_sqe() local 156 if (sq->sqe_tail + 1 - sq->sqe_head > *sq->kring_entries) in io_uring_get_sqe() 158 return &sq->sqes[sq->sqe_tail++ & *sq->kring_mask]; in io_uring_get_sqe() 187 struct io_uring_sq *sq = &ring->sq; in io_uring_submit() local 193 if (*sq->khead != *sq->ktail) { in io_uring_submit() 197 if (sq->sqe_head == sq->sqe_tail) in io_uring_submit() 201 to_submit = sq->sqe_tail - sq->sqe_head; in io_uring_submit() 204 sq->array[ktail++ & mask] = sq->sqe_head++ & mask; in io_uring_submit() 222 struct io_uring_sq *sq = &ring->sq; in io_uring_queue_exit() local [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/ |
| A D | tx.c | 49 u16 pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc); in mlx5e_xsk_tx_post_err() 56 nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc); in mlx5e_xsk_tx_post_err() 59 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, in mlx5e_xsk_tx_post_err() 61 sq->doorbell_cseg = &nopwqe->ctrl; in mlx5e_xsk_tx_post_err() 78 sq); in mlx5e_xsk_tx() 108 if (sq->mpwqe.wqe) in mlx5e_xsk_tx() 109 mlx5e_xdp_mpwqe_complete(sq); in mlx5e_xsk_tx() 111 mlx5e_xsk_tx_post_err(sq, &xdpi); in mlx5e_xsk_tx() 130 if (sq->mpwqe.wqe) in mlx5e_xsk_tx() 131 mlx5e_xdp_mpwqe_complete(sq); in mlx5e_xsk_tx() [all …]
|
| /linux/sound/oss/dmasound/ |
| A D | dmasound_core.c | 480 sq->max_count = sq->numBufs ; in sq_setup() 481 sq->max_active = sq->numBufs ; in sq_setup() 482 sq->block_size = sq->bufSize; in sq_setup() 484 sq->user_frags = sq->numBufs ; in sq_setup() 485 sq->user_frag_size = sq->bufSize ; in sq_setup() 506 if ( sq->block_size <= 0 || sq->block_size > sq->bufSize) { in sq_setup() 510 sq->block_size = sq->bufSize ; in sq_setup() 513 sq->max_count = sq->user_frags ; in sq_setup() 515 sq->max_active = (sq->max_active <= sq->max_count) ? in sq_setup() 522 sq->max_active = sq->numBufs ; in sq_setup() [all …]
|
| /linux/drivers/net/ethernet/cavium/thunder/ |
| A D | nicvf_queues.c | 514 sq->desc = sq->dmem.base; in nicvf_init_snd_queue() 587 sq->tso_hdrs, sq->tso_hdrs_phys); in nicvf_free_snd_queue() 593 while (sq->head != sq->tail) { in nicvf_free_snd_queue() 594 skb = (struct sk_buff *)sq->skbuff[sq->head]; in nicvf_free_snd_queue() 613 nicvf_unmap_sndq_buffers(nic, sq, sq->head, in nicvf_free_snd_queue() 620 sq->head &= (sq->dmem.q_len - 1); in nicvf_free_snd_queue() 866 sq = &qs->sq[qidx]; in nicvf_snd_queue_config() 885 mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx; in nicvf_snd_queue_config() 1129 sq->tail &= (sq->dmem.q_len - 1); in nicvf_get_sq_desc() 1150 sq->head &= (sq->dmem.q_len - 1); in nicvf_put_sq_desc() [all …]
|
| /linux/drivers/net/ethernet/marvell/octeontx2/nic/ |
| A D | qos_sq.c | 76 sq = &qset->sq[qidx]; in otx2_qos_sq_aura_pool_init() 77 sq->sqb_count = 0; in otx2_qos_sq_aura_pool_init() 78 sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(*sq->sqb_ptrs), GFP_KERNEL); in otx2_qos_sq_aura_pool_init() 89 sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr; in otx2_qos_sq_aura_pool_init() 106 sq->sqb_count = 0; in otx2_qos_sq_aura_pool_init() 124 sq = &qset->sq[qidx]; in otx2_qos_sq_free_sqbs() 125 if (!sq->sqb_ptrs) in otx2_qos_sq_free_sqbs() 140 sq = &qset->sq[qidx]; in otx2_qos_sq_free_sqbs() 143 kfree(sq->sg); in otx2_qos_sq_free_sqbs() 147 memset((void *)sq, 0, sizeof(*sq)); in otx2_qos_sq_free_sqbs() [all …]
|
| A D | otx2_txrx.c | 466 sq = &pfvf->qset.sq[qidx]; in otx2_tx_napi_handler() 612 sq->head &= (sq->sqe_cnt - 1); in otx2_sqe_flush() 625 sq->sg[sq->head].num_segs = 0; in otx2_sqe_add_sg() 654 sq->sg[sq->head].num_segs++; in otx2_sqe_add_sg() 657 sq->sg[sq->head].skb = (u64)skb; in otx2_sqe_add_sg() 1155 free_desc = (sq->cons_head - sq->head - 1 + sq->sqe_cnt) & (sq->sqe_cnt - 1); in otx2_sq_append_skb() 1266 sq = &pfvf->qset.sq[qidx]; in otx2_cleanup_tx_cqes() 1331 sq = &pfvf->qset.sq[sq_idx]; in otx2_free_pending_sqe() 1369 sq->sg[sq->head].size[0] = len; in otx2_xdp_sqe_add_sg() 1370 sq->sg[sq->head].num_segs = 1; in otx2_xdp_sqe_add_sg() [all …]
|
| /linux/drivers/infiniband/hw/erdma/ |
| A D | erdma_cmdq.c | 91 struct erdma_cmdq_sq *sq = &cmdq->sq; in erdma_cmdq_sq_init() local 94 sq->depth = cmdq->max_outstandings * sq->wqebb_cnt; in erdma_cmdq_sq_init() 98 if (!sq->qbuf) in erdma_cmdq_sq_init() 102 if (!sq->dbrec) in erdma_cmdq_sq_init() 118 sq->qbuf, sq->qbuf_dma_addr); in erdma_cmdq_sq_init() 217 cmdq->sq.qbuf, cmdq->sq.qbuf_dma_addr); in erdma_cmdq_init() 240 cmdq->sq.qbuf, cmdq->sq.qbuf_dma_addr); in erdma_cmdq_destroy() 270 wqe = get_queue_entry(cmdq->sq.qbuf, cmdq->sq.pi, cmdq->sq.depth, in push_cmdq_sqe() 274 cmdq->sq.pi += cmdq->sq.wqebb_cnt; in push_cmdq_sqe() 302 sqe = get_queue_entry(cmdq->sq.qbuf, sqe_idx, cmdq->sq.depth, in erdma_poll_single_cmd_completion() [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ |
| A D | ktls_rx.c | 149 mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_rx->crypto_info, in post_static_params() 160 sq->pc += num_wqebbs; in post_static_params() 180 mlx5e_ktls_build_progress_params(wqe, sq->pc, sq->sqn, in post_progress_params() 191 sq->pc += num_wqebbs; in post_progress_params() 205 sq = &c->async_icosq; in post_rx_param_wqes() 215 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg); in post_rx_param_wqes() 312 sq->pc++; in resync_post_get_progress_params() 313 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg); in resync_post_get_progress_params() 346 sq = &c->async_icosq; in resync_handle_work() 370 sq = &c->async_icosq; in resync_handle_seq_match() [all …]
|
| /linux/drivers/net/ethernet/intel/ice/ |
| A D | ice_controlq.c | 77 if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask) in ice_check_sq_alive() 78 return (rd32(hw, cq->sq.len) & (cq->sq.len_mask | in ice_check_sq_alive() 226 if (!cq->sq.dma_head) in ice_alloc_sq_bufs() 228 cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head; in ice_alloc_sq_bufs() 249 cq->sq.r.sq_bi[i].va, cq->sq.r.sq_bi[i].pa); in ice_alloc_sq_bufs() 859 struct ice_ctl_q_ring *sq = &cq->sq; in ice_clean_sq() local 1056 desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use); in ice_sq_send_cmd() 1063 dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use]; in ice_sq_send_cmd() 1083 if (cq->sq.next_to_use == cq->sq.count) in ice_sq_send_cmd() 1085 wr32(hw, cq->sq.tail, cq->sq.next_to_use); in ice_sq_send_cmd() [all …]
|
| /linux/drivers/soc/qcom/ |
| A D | qmi_interface.c | 168 struct sockaddr_qrtr sq; in qmi_send_new_lookup() local 178 sq.sq_family = qmi->sq.sq_family; in qmi_send_new_lookup() 179 sq.sq_node = qmi->sq.sq_node; in qmi_send_new_lookup() 182 msg.msg_name = &sq; in qmi_send_new_lookup() 243 sq.sq_family = qmi->sq.sq_family; in qmi_send_new_server() 244 sq.sq_node = qmi->sq.sq_node; in qmi_send_new_server() 247 msg.msg_name = &sq; in qmi_send_new_server() 462 qmi->sq = sq; in qmi_handle_net_reset() 528 struct msghdr msg = { .msg_name = &sq, .msg_namelen = sizeof(sq) }; in qmi_data_ready_work() 558 if (sq.sq_node == qmi->sq.sq_node && in qmi_data_ready_work() [all …]
|
| /linux/net/qrtr/ |
| A D | ns.c | 53 struct sockaddr_qrtr sq; member 327 ret = say_hello(sq); in ctrl_cmd_hello() 339 struct sockaddr_qrtr sq; in ctrl_cmd_bye() local 368 sq.sq_node = srv->node; in ctrl_cmd_bye() 369 sq.sq_port = srv->port; in ctrl_cmd_bye() 391 struct sockaddr_qrtr sq; in ctrl_cmd_del_client() local 442 sq.sq_node = srv->node; in ctrl_cmd_del_client() 541 lookup->sq = *from; in ctrl_cmd_new_lookup() 627 sq.sq_node, sq.sq_port); in qrtr_ns_worker() 674 sq.sq_node, sq.sq_port); in qrtr_ns_worker() [all …]
|
| /linux/drivers/net/ethernet/huawei/hinic/ |
| A D | hinic_hw_qp.c | 59 #define SQ_DB_ADDR(sq, pi) ((u64 *)((sq)->db_base) + SQ_DB_PI_LOW(pi)) argument 61 #define SQ_MASKED_IDX(sq, idx) ((idx) & (sq)->wq->mask) argument 100 wq = sq->wq; in hinic_sq_prepare_ctxt() 226 if (!sq->saved_skb) in alloc_sq_skb_arr() 238 vfree(sq->saved_skb); in free_sq_skb_arr() 286 sq->hwif = hwif; in hinic_init_sq() 288 sq->wq = wq; in hinic_init_sq() 290 sq->irq = entry->vector; in hinic_init_sq() 307 free_sq_skb_arr(sq); in hinic_clean_sq() 617 struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq); in sq_prepare_db() [all …]
|
| A D | hinic_tx.c | 503 qp = container_of(txq->sq, struct hinic_qp, sq); in hinic_lb_xmit_frame() 564 qp = container_of(txq->sq, struct hinic_qp, sq); in hinic_xmit_frame() 668 struct hinic_sq *sq = txq->sq; in free_all_tx_skbs() local 700 struct hinic_qp *qp = container_of(txq->sq, struct hinic_qp, sq); in free_tx_poll() 703 struct hinic_sq *sq = txq->sq; in free_tx_poll() local 745 hinic_get_sq_free_wqebbs(sq) >= HINIC_MIN_TX_NUM_WQEBBS(sq)) { in free_tx_poll() 804 struct hinic_sq *sq = txq->sq; in tx_request_irq() local 808 qp = container_of(sq, struct hinic_qp, sq); in tx_request_irq() 844 struct hinic_sq *sq = txq->sq; in tx_free_irq() local 861 struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq); in hinic_init_txq() [all …]
|
| /linux/block/ |
| A D | blk-throttle.c | 68 if (sq && sq->parent_sq) in sq_to_tg() 461 if (!sq->nr_pending) in throtl_schedule_next_dispatch() 468 throtl_schedule_pending_timer(sq, sq->first_pending_disptime); in throtl_schedule_next_dispatch() 988 if (sq->nr_queued[READ] || sq->nr_queued[WRITE]) in throtl_select_dispatch() 1017 struct throtl_service_queue *sq = from_timer(sq, t, pending_timer); in throtl_pending_timer_fn() local 1042 sq->nr_queued[READ] + sq->nr_queued[WRITE], in throtl_pending_timer_fn() 1043 sq->nr_queued[READ], sq->nr_queued[WRITE]); in throtl_pending_timer_fn() 1069 sq = parent_sq; in throtl_pending_timer_fn() 1654 sq = sq->parent_sq; in __blk_throtl_bio() 1655 tg = sq_to_tg(sq); in __blk_throtl_bio() [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/ |
| A D | mlx5hws_send.c | 106 *sq->wq.db = cpu_to_be32(sq->cur_post); in hws_send_engine_post_ring() 144 idx = sq->cur_post & sq->buf_mask; in mlx5hws_send_engine_post_end() 145 sq->last_idx = idx; in mlx5hws_send_engine_post_end() 155 sq->sqn << 8); in mlx5hws_send_engine_post_end() 301 wqe_ctrl = mlx5_wq_cyc_get_wqe(&sq->wq, sq->last_idx); in mlx5hws_send_engine_flush_queue() 570 sq->mdev = mdev; in hws_send_ring_alloc_sq() 581 if (!sq->dep_wqe) { in hws_send_ring_alloc_sq() 586 sq->wr_priv = kzalloc(sizeof(*sq->wr_priv) * buf_sz, GFP_KERNEL); in hws_send_ring_alloc_sq() 587 if (!sq->wr_priv) { in hws_send_ring_alloc_sq() 605 if (!sq) in hws_send_ring_free_sq() [all …]
|