Lines Matching refs:cq

36 	struct ice_ctl_q_info *cq = &hw->adminq;  in ice_adminq_init_regs()  local
38 ICE_CQ_INIT_REGS(cq, PF_FW); in ice_adminq_init_regs()
49 struct ice_ctl_q_info *cq = &hw->mailboxq; in ice_mailbox_init_regs() local
51 ICE_CQ_INIT_REGS(cq, PF_MBX); in ice_mailbox_init_regs()
62 struct ice_ctl_q_info *cq = &hw->sbq; in ice_sb_init_regs() local
64 ICE_CQ_INIT_REGS(cq, PF_SB); in ice_sb_init_regs()
74 bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq) in ice_check_sq_alive() argument
77 if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask) in ice_check_sq_alive()
78 return (rd32(hw, cq->sq.len) & (cq->sq.len_mask | in ice_check_sq_alive()
79 cq->sq.len_ena_mask)) == in ice_check_sq_alive()
80 (cq->num_sq_entries | cq->sq.len_ena_mask); in ice_check_sq_alive()
91 ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) in ice_alloc_ctrlq_sq_ring() argument
93 size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc); in ice_alloc_ctrlq_sq_ring()
95 cq->sq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size, in ice_alloc_ctrlq_sq_ring()
96 &cq->sq.desc_buf.pa, in ice_alloc_ctrlq_sq_ring()
98 if (!cq->sq.desc_buf.va) in ice_alloc_ctrlq_sq_ring()
100 cq->sq.desc_buf.size = size; in ice_alloc_ctrlq_sq_ring()
102 cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries, in ice_alloc_ctrlq_sq_ring()
104 if (!cq->sq.cmd_buf) { in ice_alloc_ctrlq_sq_ring()
105 dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size, in ice_alloc_ctrlq_sq_ring()
106 cq->sq.desc_buf.va, cq->sq.desc_buf.pa); in ice_alloc_ctrlq_sq_ring()
107 cq->sq.desc_buf.va = NULL; in ice_alloc_ctrlq_sq_ring()
108 cq->sq.desc_buf.pa = 0; in ice_alloc_ctrlq_sq_ring()
109 cq->sq.desc_buf.size = 0; in ice_alloc_ctrlq_sq_ring()
122 ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) in ice_alloc_ctrlq_rq_ring() argument
124 size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc); in ice_alloc_ctrlq_rq_ring()
126 cq->rq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size, in ice_alloc_ctrlq_rq_ring()
127 &cq->rq.desc_buf.pa, in ice_alloc_ctrlq_rq_ring()
129 if (!cq->rq.desc_buf.va) in ice_alloc_ctrlq_rq_ring()
131 cq->rq.desc_buf.size = size; in ice_alloc_ctrlq_rq_ring()
158 ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) in ice_alloc_rq_bufs() argument
165 cq->rq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_rq_entries, in ice_alloc_rq_bufs()
166 sizeof(cq->rq.desc_buf), GFP_KERNEL); in ice_alloc_rq_bufs()
167 if (!cq->rq.dma_head) in ice_alloc_rq_bufs()
169 cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head; in ice_alloc_rq_bufs()
172 for (i = 0; i < cq->num_rq_entries; i++) { in ice_alloc_rq_bufs()
176 bi = &cq->rq.r.rq_bi[i]; in ice_alloc_rq_bufs()
178 cq->rq_buf_size, &bi->pa, in ice_alloc_rq_bufs()
182 bi->size = cq->rq_buf_size; in ice_alloc_rq_bufs()
185 desc = ICE_CTL_Q_DESC(cq->rq, i); in ice_alloc_rq_bufs()
188 if (cq->rq_buf_size > ICE_AQ_LG_BUF) in ice_alloc_rq_bufs()
211 dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size, in ice_alloc_rq_bufs()
212 cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa); in ice_alloc_rq_bufs()
213 cq->rq.r.rq_bi[i].va = NULL; in ice_alloc_rq_bufs()
214 cq->rq.r.rq_bi[i].pa = 0; in ice_alloc_rq_bufs()
215 cq->rq.r.rq_bi[i].size = 0; in ice_alloc_rq_bufs()
217 cq->rq.r.rq_bi = NULL; in ice_alloc_rq_bufs()
218 devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head); in ice_alloc_rq_bufs()
219 cq->rq.dma_head = NULL; in ice_alloc_rq_bufs()
230 ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) in ice_alloc_sq_bufs() argument
235 cq->sq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries, in ice_alloc_sq_bufs()
236 sizeof(cq->sq.desc_buf), GFP_KERNEL); in ice_alloc_sq_bufs()
237 if (!cq->sq.dma_head) in ice_alloc_sq_bufs()
239 cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head; in ice_alloc_sq_bufs()
242 for (i = 0; i < cq->num_sq_entries; i++) { in ice_alloc_sq_bufs()
245 bi = &cq->sq.r.sq_bi[i]; in ice_alloc_sq_bufs()
247 cq->sq_buf_size, &bi->pa, in ice_alloc_sq_bufs()
251 bi->size = cq->sq_buf_size; in ice_alloc_sq_bufs()
259 dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.r.sq_bi[i].size, in ice_alloc_sq_bufs()
260 cq->sq.r.sq_bi[i].va, cq->sq.r.sq_bi[i].pa); in ice_alloc_sq_bufs()
261 cq->sq.r.sq_bi[i].va = NULL; in ice_alloc_sq_bufs()
262 cq->sq.r.sq_bi[i].pa = 0; in ice_alloc_sq_bufs()
263 cq->sq.r.sq_bi[i].size = 0; in ice_alloc_sq_bufs()
265 cq->sq.r.sq_bi = NULL; in ice_alloc_sq_bufs()
266 devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head); in ice_alloc_sq_bufs()
267 cq->sq.dma_head = NULL; in ice_alloc_sq_bufs()
298 static int ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) in ice_cfg_sq_regs() argument
300 return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries); in ice_cfg_sq_regs()
310 static int ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) in ice_cfg_rq_regs() argument
314 status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries); in ice_cfg_rq_regs()
319 wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1)); in ice_cfg_rq_regs()
362 static int ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) in ice_init_sq() argument
366 if (cq->sq.count > 0) { in ice_init_sq()
373 if (!cq->num_sq_entries || !cq->sq_buf_size) { in ice_init_sq()
378 cq->sq.next_to_use = 0; in ice_init_sq()
379 cq->sq.next_to_clean = 0; in ice_init_sq()
382 ret_code = ice_alloc_ctrlq_sq_ring(hw, cq); in ice_init_sq()
387 ret_code = ice_alloc_sq_bufs(hw, cq); in ice_init_sq()
392 ret_code = ice_cfg_sq_regs(hw, cq); in ice_init_sq()
397 cq->sq.count = cq->num_sq_entries; in ice_init_sq()
401 ICE_FREE_CQ_BUFS(hw, cq, sq); in ice_init_sq()
402 ice_free_cq_ring(hw, &cq->sq); in ice_init_sq()
422 static int ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) in ice_init_rq() argument
426 if (cq->rq.count > 0) { in ice_init_rq()
433 if (!cq->num_rq_entries || !cq->rq_buf_size) { in ice_init_rq()
438 cq->rq.next_to_use = 0; in ice_init_rq()
439 cq->rq.next_to_clean = 0; in ice_init_rq()
442 ret_code = ice_alloc_ctrlq_rq_ring(hw, cq); in ice_init_rq()
447 ret_code = ice_alloc_rq_bufs(hw, cq); in ice_init_rq()
452 ret_code = ice_cfg_rq_regs(hw, cq); in ice_init_rq()
457 cq->rq.count = cq->num_rq_entries; in ice_init_rq()
461 ICE_FREE_CQ_BUFS(hw, cq, rq); in ice_init_rq()
462 ice_free_cq_ring(hw, &cq->rq); in ice_init_rq()
475 static int ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) in ice_shutdown_sq() argument
479 mutex_lock(&cq->sq_lock); in ice_shutdown_sq()
481 if (!cq->sq.count) { in ice_shutdown_sq()
487 wr32(hw, cq->sq.head, 0); in ice_shutdown_sq()
488 wr32(hw, cq->sq.tail, 0); in ice_shutdown_sq()
489 wr32(hw, cq->sq.len, 0); in ice_shutdown_sq()
490 wr32(hw, cq->sq.bal, 0); in ice_shutdown_sq()
491 wr32(hw, cq->sq.bah, 0); in ice_shutdown_sq()
493 cq->sq.count = 0; /* to indicate uninitialized queue */ in ice_shutdown_sq()
496 ICE_FREE_CQ_BUFS(hw, cq, sq); in ice_shutdown_sq()
497 ice_free_cq_ring(hw, &cq->sq); in ice_shutdown_sq()
500 mutex_unlock(&cq->sq_lock); in ice_shutdown_sq()
541 static int ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) in ice_shutdown_rq() argument
545 mutex_lock(&cq->rq_lock); in ice_shutdown_rq()
547 if (!cq->rq.count) { in ice_shutdown_rq()
553 wr32(hw, cq->rq.head, 0); in ice_shutdown_rq()
554 wr32(hw, cq->rq.tail, 0); in ice_shutdown_rq()
555 wr32(hw, cq->rq.len, 0); in ice_shutdown_rq()
556 wr32(hw, cq->rq.bal, 0); in ice_shutdown_rq()
557 wr32(hw, cq->rq.bah, 0); in ice_shutdown_rq()
560 cq->rq.count = 0; in ice_shutdown_rq()
563 ICE_FREE_CQ_BUFS(hw, cq, rq); in ice_shutdown_rq()
564 ice_free_cq_ring(hw, &cq->rq); in ice_shutdown_rq()
567 mutex_unlock(&cq->rq_lock); in ice_shutdown_rq()
577 struct ice_ctl_q_info *cq = &hw->adminq; in ice_init_check_adminq() local
592 ice_shutdown_rq(hw, cq); in ice_init_check_adminq()
593 ice_shutdown_sq(hw, cq); in ice_init_check_adminq()
613 struct ice_ctl_q_info *cq; in ice_init_ctrlq() local
619 cq = &hw->adminq; in ice_init_ctrlq()
623 cq = &hw->sbq; in ice_init_ctrlq()
627 cq = &hw->mailboxq; in ice_init_ctrlq()
632 cq->qtype = q_type; in ice_init_ctrlq()
635 if (!cq->num_rq_entries || !cq->num_sq_entries || in ice_init_ctrlq()
636 !cq->rq_buf_size || !cq->sq_buf_size) { in ice_init_ctrlq()
641 cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT; in ice_init_ctrlq()
644 ret_code = ice_init_sq(hw, cq); in ice_init_ctrlq()
649 ret_code = ice_init_rq(hw, cq); in ice_init_ctrlq()
657 ice_shutdown_sq(hw, cq); in ice_init_ctrlq()
696 struct ice_ctl_q_info *cq; in ice_shutdown_ctrlq() local
700 cq = &hw->adminq; in ice_shutdown_ctrlq()
701 if (ice_check_sq_alive(hw, cq)) in ice_shutdown_ctrlq()
705 cq = &hw->sbq; in ice_shutdown_ctrlq()
708 cq = &hw->mailboxq; in ice_shutdown_ctrlq()
714 ice_shutdown_sq(hw, cq); in ice_shutdown_ctrlq()
715 ice_shutdown_rq(hw, cq); in ice_shutdown_ctrlq()
791 static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq) in ice_init_ctrlq_locks() argument
793 mutex_init(&cq->sq_lock); in ice_init_ctrlq_locks()
794 mutex_init(&cq->rq_lock); in ice_init_ctrlq_locks()
829 static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq) in ice_destroy_ctrlq_locks() argument
831 mutex_destroy(&cq->sq_lock); in ice_destroy_ctrlq_locks()
832 mutex_destroy(&cq->rq_lock); in ice_destroy_ctrlq_locks()
862 static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) in ice_clean_sq() argument
864 struct ice_ctl_q_ring *sq = &cq->sq; in ice_clean_sq()
872 while (rd32(hw, cq->sq.head) != ntc) { in ice_clean_sq()
873 ice_debug(hw, ICE_DBG_AQ_MSG, "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head)); in ice_clean_sq()
941 static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq) in ice_sq_done() argument
946 return rd32(hw, cq->sq.head) == cq->sq.next_to_use; in ice_sq_done()
962 ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, in ice_sq_send_cmd() argument
978 mutex_lock(&cq->sq_lock); in ice_sq_send_cmd()
980 cq->sq_last_status = ICE_AQ_RC_OK; in ice_sq_send_cmd()
982 if (!cq->sq.count) { in ice_sq_send_cmd()
994 if (buf_size > cq->sq_buf_size) { in ice_sq_send_cmd()
1006 val = rd32(hw, cq->sq.head); in ice_sq_send_cmd()
1007 if (val >= cq->num_sq_entries) { in ice_sq_send_cmd()
1014 details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use); in ice_sq_send_cmd()
1025 if (ice_clean_sq(hw, cq) == 0) { in ice_sq_send_cmd()
1032 desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use); in ice_sq_send_cmd()
1039 dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use]; in ice_sq_send_cmd()
1058 (cq->sq.next_to_use)++; in ice_sq_send_cmd()
1059 if (cq->sq.next_to_use == cq->sq.count) in ice_sq_send_cmd()
1060 cq->sq.next_to_use = 0; in ice_sq_send_cmd()
1061 wr32(hw, cq->sq.tail, cq->sq.next_to_use); in ice_sq_send_cmd()
1064 if (ice_sq_done(hw, cq)) in ice_sq_send_cmd()
1069 } while (total_delay < cq->sq_cmd_timeout); in ice_sq_send_cmd()
1072 if (ice_sq_done(hw, cq)) { in ice_sq_send_cmd()
1098 cq->sq_last_status = (enum ice_aq_err)retval; in ice_sq_send_cmd()
1112 if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask || in ice_sq_send_cmd()
1113 rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) { in ice_sq_send_cmd()
1123 mutex_unlock(&cq->sq_lock); in ice_sq_send_cmd()
1154 ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, in ice_clean_rq_elem() argument
1157 u16 ntc = cq->rq.next_to_clean; in ice_clean_rq_elem()
1171 mutex_lock(&cq->rq_lock); in ice_clean_rq_elem()
1173 if (!cq->rq.count) { in ice_clean_rq_elem()
1180 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); in ice_clean_rq_elem()
1189 desc = ICE_CTL_Q_DESC(cq->rq, ntc); in ice_clean_rq_elem()
1203 memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len); in ice_clean_rq_elem()
1207 ice_debug_cq(hw, (void *)desc, e->msg_buf, cq->rq_buf_size); in ice_clean_rq_elem()
1212 bi = &cq->rq.r.rq_bi[ntc]; in ice_clean_rq_elem()
1216 if (cq->rq_buf_size > ICE_AQ_LG_BUF) in ice_clean_rq_elem()
1223 wr32(hw, cq->rq.tail, ntc); in ice_clean_rq_elem()
1226 if (ntc == cq->num_rq_entries) in ice_clean_rq_elem()
1228 cq->rq.next_to_clean = ntc; in ice_clean_rq_elem()
1229 cq->rq.next_to_use = ntu; in ice_clean_rq_elem()
1235 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); in ice_clean_rq_elem()
1236 *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc)); in ice_clean_rq_elem()
1239 mutex_unlock(&cq->rq_lock); in ice_clean_rq_elem()