Lines Matching refs:trans

19 static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,  in iwl_pcie_gen2_update_byte_tbl()  argument
43 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { in iwl_pcie_gen2_update_byte_tbl()
47 WARN_ON(trans->txqs.bc_table_dword); in iwl_pcie_gen2_update_byte_tbl()
55 WARN_ON(!trans->txqs.bc_table_dword); in iwl_pcie_gen2_update_byte_tbl()
66 void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq) in iwl_txq_inc_wr_ptr() argument
70 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr); in iwl_txq_inc_wr_ptr()
76 iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16)); in iwl_txq_inc_wr_ptr()
79 static u8 iwl_txq_gen2_get_num_tbs(struct iwl_trans *trans, in iwl_txq_gen2_get_num_tbs() argument
85 void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta, in iwl_txq_gen2_tfd_unmap() argument
91 num_tbs = iwl_txq_gen2_get_num_tbs(trans, tfd); in iwl_txq_gen2_tfd_unmap()
93 if (num_tbs > trans->txqs.tfd.max_tbs) { in iwl_txq_gen2_tfd_unmap()
94 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); in iwl_txq_gen2_tfd_unmap()
101 dma_unmap_page(trans->dev, in iwl_txq_gen2_tfd_unmap()
106 dma_unmap_single(trans->dev, in iwl_txq_gen2_tfd_unmap()
115 void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) in iwl_txq_gen2_free_tfd() argument
128 iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta, in iwl_txq_gen2_free_tfd()
129 iwl_txq_get_tfd(trans, txq, idx)); in iwl_txq_gen2_free_tfd()
138 iwl_op_mode_free_skb(trans->op_mode, skb); in iwl_txq_gen2_free_tfd()
143 int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd, in iwl_txq_gen2_set_tb() argument
146 int idx = iwl_txq_gen2_get_num_tbs(trans, tfd); in iwl_txq_gen2_set_tb()
165 if (le16_to_cpu(tfd->num_tbs) >= trans->txqs.tfd.max_tbs) { in iwl_txq_gen2_set_tb()
166 IWL_ERR(trans, "Error can not send more than %d chunks\n", in iwl_txq_gen2_set_tb()
167 trans->txqs.tfd.max_tbs); in iwl_txq_gen2_set_tb()
179 static struct page *get_workaround_page(struct iwl_trans *trans, in get_workaround_page() argument
185 page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs); in get_workaround_page()
204 static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans, in iwl_txq_gen2_set_tb_with_wa() argument
214 if (unlikely(dma_mapping_error(trans->dev, phys))) in iwl_txq_gen2_set_tb_with_wa()
218 ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len); in iwl_txq_gen2_set_tb_with_wa()
244 page = get_workaround_page(trans, skb); in iwl_txq_gen2_set_tb_with_wa()
252 phys = dma_map_single(trans->dev, page_address(page), len, in iwl_txq_gen2_set_tb_with_wa()
254 if (unlikely(dma_mapping_error(trans->dev, phys))) in iwl_txq_gen2_set_tb_with_wa()
256 ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len); in iwl_txq_gen2_set_tb_with_wa()
263 IWL_WARN(trans, in iwl_txq_gen2_set_tb_with_wa()
270 dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE); in iwl_txq_gen2_set_tb_with_wa()
272 dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE); in iwl_txq_gen2_set_tb_with_wa()
274 trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len); in iwl_txq_gen2_set_tb_with_wa()
280 struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len, in get_page_hdr() argument
283 struct iwl_tso_hdr_page *p = this_cpu_ptr(trans->txqs.tso_hdr_page); in get_page_hdr()
286 page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs); in get_page_hdr()
325 static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans, in iwl_txq_gen2_build_amsdu() argument
341 trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), in iwl_txq_gen2_build_amsdu()
354 hdr_page = get_page_hdr(trans, hdr_room, skb); in iwl_txq_gen2_build_amsdu()
406 tb_phys = dma_map_single(trans->dev, start_hdr, in iwl_txq_gen2_build_amsdu()
408 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) in iwl_txq_gen2_build_amsdu()
415 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb_len); in iwl_txq_gen2_build_amsdu()
416 trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, in iwl_txq_gen2_build_amsdu()
429 tb_phys = dma_map_single(trans->dev, tso.data, in iwl_txq_gen2_build_amsdu()
431 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, in iwl_txq_gen2_build_amsdu()
453 iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans, in iwl_txq_gen2_build_tx_amsdu() argument
462 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); in iwl_txq_gen2_build_tx_amsdu()
474 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); in iwl_txq_gen2_build_tx_amsdu()
489 tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE); in iwl_txq_gen2_build_tx_amsdu()
490 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) in iwl_txq_gen2_build_tx_amsdu()
496 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, len); in iwl_txq_gen2_build_tx_amsdu()
498 if (iwl_txq_gen2_build_amsdu(trans, skb, tfd, len + IWL_FIRST_TB_SIZE, in iwl_txq_gen2_build_tx_amsdu()
507 iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd); in iwl_txq_gen2_build_tx_amsdu()
511 static int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans, in iwl_txq_gen2_tx_add_frags() argument
527 tb_phys = skb_frag_dma_map(trans->dev, frag, 0, in iwl_txq_gen2_tx_add_frags()
529 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, in iwl_txq_gen2_tx_add_frags()
540 iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans, in iwl_txq_gen2_build_tx() argument
550 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); in iwl_txq_gen2_build_tx()
566 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); in iwl_txq_gen2_build_tx()
584 tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); in iwl_txq_gen2_build_tx()
585 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) in iwl_txq_gen2_build_tx()
591 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb1_len); in iwl_txq_gen2_build_tx()
592 trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr, in iwl_txq_gen2_build_tx()
601 tb_phys = dma_map_single(trans->dev, skb->data + hdr_len, in iwl_txq_gen2_build_tx()
603 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, in iwl_txq_gen2_build_tx()
610 if (iwl_txq_gen2_tx_add_frags(trans, skb, tfd, out_meta)) in iwl_txq_gen2_build_tx()
616 tb_phys = dma_map_single(trans->dev, frag->data, in iwl_txq_gen2_build_tx()
618 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, in iwl_txq_gen2_build_tx()
623 if (iwl_txq_gen2_tx_add_frags(trans, frag, tfd, out_meta)) in iwl_txq_gen2_build_tx()
630 iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd); in iwl_txq_gen2_build_tx()
635 struct iwl_tfh_tfd *iwl_txq_gen2_build_tfd(struct iwl_trans *trans, in iwl_txq_gen2_build_tfd() argument
643 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); in iwl_txq_gen2_build_tfd()
652 if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) in iwl_txq_gen2_build_tfd()
669 return iwl_txq_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb, in iwl_txq_gen2_build_tfd()
671 return iwl_txq_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta, in iwl_txq_gen2_build_tfd()
675 int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q) in iwl_txq_space() argument
686 if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size) in iwl_txq_space()
689 max = trans->trans_cfg->base_params->max_tfd_queue_size - 1; in iwl_txq_space()
696 (trans->trans_cfg->base_params->max_tfd_queue_size - 1); in iwl_txq_space()
704 int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, in iwl_txq_gen2_tx() argument
708 struct iwl_txq *txq = trans->txqs.txq[txq_id]; in iwl_txq_gen2_tx()
717 if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used), in iwl_txq_gen2_tx()
722 skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) && in iwl_txq_gen2_tx()
728 if (iwl_txq_space(trans, txq) < txq->high_mark) { in iwl_txq_gen2_tx()
729 iwl_txq_stop(trans, txq); in iwl_txq_gen2_tx()
732 if (unlikely(iwl_txq_space(trans, txq) < 3)) { in iwl_txq_gen2_tx()
736 trans->txqs.dev_cmd_offs); in iwl_txq_gen2_tx()
759 tfd = iwl_txq_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta); in iwl_txq_gen2_tx()
765 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { in iwl_txq_gen2_tx()
778 iwl_pcie_gen2_update_byte_tbl(trans, txq, cmd_len, in iwl_txq_gen2_tx()
779 iwl_txq_gen2_get_num_tbs(trans, tfd)); in iwl_txq_gen2_tx()
786 txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); in iwl_txq_gen2_tx()
787 iwl_txq_inc_wr_ptr(trans, txq); in iwl_txq_gen2_tx()
801 void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id) in iwl_txq_gen2_unmap() argument
803 struct iwl_txq *txq = trans->txqs.txq[txq_id]; in iwl_txq_gen2_unmap()
807 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", in iwl_txq_gen2_unmap()
810 if (txq_id != trans->txqs.cmd.q_id) { in iwl_txq_gen2_unmap()
815 iwl_txq_free_tso_page(trans, skb); in iwl_txq_gen2_unmap()
817 iwl_txq_gen2_free_tfd(trans, txq); in iwl_txq_gen2_unmap()
818 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); in iwl_txq_gen2_unmap()
824 iwl_op_mode_free_skb(trans->op_mode, skb); in iwl_txq_gen2_unmap()
830 iwl_wake_queue(trans, txq); in iwl_txq_gen2_unmap()
833 static void iwl_txq_gen2_free_memory(struct iwl_trans *trans, in iwl_txq_gen2_free_memory() argument
836 struct device *dev = trans->dev; in iwl_txq_gen2_free_memory()
841 trans->txqs.tfd.size * txq->n_window, in iwl_txq_gen2_free_memory()
850 dma_pool_free(trans->txqs.bc_pool, in iwl_txq_gen2_free_memory()
863 static void iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id) in iwl_txq_gen2_free() argument
872 txq = trans->txqs.txq[txq_id]; in iwl_txq_gen2_free()
877 iwl_txq_gen2_unmap(trans, txq_id); in iwl_txq_gen2_free()
880 if (txq_id == trans->txqs.cmd.q_id) in iwl_txq_gen2_free()
887 iwl_txq_gen2_free_memory(trans, txq); in iwl_txq_gen2_free()
889 trans->txqs.txq[txq_id] = NULL; in iwl_txq_gen2_free()
891 clear_bit(txq_id, trans->txqs.queue_used); in iwl_txq_gen2_free()
920 int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, in iwl_txq_init() argument
925 trans->trans_cfg->base_params->max_tfd_queue_size; in iwl_txq_init()
954 void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb) in iwl_txq_free_tso_page() argument
959 page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs); in iwl_txq_free_tso_page()
972 void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq) in iwl_txq_log_scd_error() argument
979 if (trans->trans_cfg->use_tfh) { in iwl_txq_log_scd_error()
980 IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id, in iwl_txq_log_scd_error()
986 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id)); in iwl_txq_log_scd_error()
990 IWL_ERR(trans, in iwl_txq_log_scd_error()
995 iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & in iwl_txq_log_scd_error()
996 (trans->trans_cfg->base_params->max_tfd_queue_size - 1), in iwl_txq_log_scd_error()
997 iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) & in iwl_txq_log_scd_error()
998 (trans->trans_cfg->base_params->max_tfd_queue_size - 1), in iwl_txq_log_scd_error()
999 iwl_read_direct32(trans, FH_TX_TRB_REG(fifo))); in iwl_txq_log_scd_error()
1005 struct iwl_trans *trans = txq->trans; in iwl_txq_stuck_timer() local
1015 iwl_txq_log_scd_error(trans, txq); in iwl_txq_stuck_timer()
1017 iwl_force_nmi(trans); in iwl_txq_stuck_timer()
1020 int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, in iwl_txq_alloc() argument
1023 size_t tfd_sz = trans->txqs.tfd.size * in iwl_txq_alloc()
1024 trans->trans_cfg->base_params->max_tfd_queue_size; in iwl_txq_alloc()
1031 if (trans->trans_cfg->use_tfh) in iwl_txq_alloc()
1032 tfd_sz = trans->txqs.tfd.size * slots_num; in iwl_txq_alloc()
1035 txq->trans = trans; in iwl_txq_alloc()
1057 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, in iwl_txq_alloc()
1066 txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz, in iwl_txq_alloc()
1074 dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr); in iwl_txq_alloc()
1085 static int iwl_txq_dyn_alloc_dma(struct iwl_trans *trans, in iwl_txq_dyn_alloc_dma() argument
1093 WARN_ON(!trans->txqs.bc_tbl_size); in iwl_txq_dyn_alloc_dma()
1095 bc_tbl_size = trans->txqs.bc_tbl_size; in iwl_txq_dyn_alloc_dma()
1105 txq->bc_tbl.addr = dma_pool_alloc(trans->txqs.bc_pool, GFP_KERNEL, in iwl_txq_dyn_alloc_dma()
1108 IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); in iwl_txq_dyn_alloc_dma()
1113 ret = iwl_txq_alloc(trans, txq, size, false); in iwl_txq_dyn_alloc_dma()
1115 IWL_ERR(trans, "Tx queue alloc failed\n"); in iwl_txq_dyn_alloc_dma()
1118 ret = iwl_txq_init(trans, txq, size, false); in iwl_txq_dyn_alloc_dma()
1120 IWL_ERR(trans, "Tx queue init failed\n"); in iwl_txq_dyn_alloc_dma()
1130 iwl_txq_gen2_free_memory(trans, txq); in iwl_txq_dyn_alloc_dma()
1134 static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq, in iwl_txq_alloc_response() argument
1151 if (qid >= ARRAY_SIZE(trans->txqs.txq)) { in iwl_txq_alloc_response()
1157 if (test_and_set_bit(qid, trans->txqs.queue_used)) { in iwl_txq_alloc_response()
1163 if (WARN_ONCE(trans->txqs.txq[qid], in iwl_txq_alloc_response()
1170 trans->txqs.txq[qid] = txq; in iwl_txq_alloc_response()
1171 wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1); in iwl_txq_alloc_response()
1177 IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid); in iwl_txq_alloc_response()
1184 iwl_txq_gen2_free_memory(trans, txq); in iwl_txq_alloc_response()
1188 int iwl_txq_dyn_alloc(struct iwl_trans *trans, __le16 flags, u8 sta_id, u8 tid, in iwl_txq_dyn_alloc() argument
1205 ret = iwl_txq_dyn_alloc_dma(trans, &txq, size, timeout); in iwl_txq_dyn_alloc()
1213 ret = iwl_trans_send_cmd(trans, &hcmd); in iwl_txq_dyn_alloc()
1217 return iwl_txq_alloc_response(trans, txq, &hcmd); in iwl_txq_dyn_alloc()
1220 iwl_txq_gen2_free_memory(trans, txq); in iwl_txq_dyn_alloc()
1224 void iwl_txq_dyn_free(struct iwl_trans *trans, int queue) in iwl_txq_dyn_free() argument
1236 if (!test_and_clear_bit(queue, trans->txqs.queue_used)) { in iwl_txq_dyn_free()
1237 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), in iwl_txq_dyn_free()
1242 iwl_txq_gen2_free(trans, queue); in iwl_txq_dyn_free()
1244 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue); in iwl_txq_dyn_free()
1247 void iwl_txq_gen2_tx_free(struct iwl_trans *trans) in iwl_txq_gen2_tx_free() argument
1251 memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); in iwl_txq_gen2_tx_free()
1254 for (i = 0; i < ARRAY_SIZE(trans->txqs.txq); i++) { in iwl_txq_gen2_tx_free()
1255 if (!trans->txqs.txq[i]) in iwl_txq_gen2_tx_free()
1258 iwl_txq_gen2_free(trans, i); in iwl_txq_gen2_tx_free()
1262 int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size) in iwl_txq_gen2_init() argument
1268 if (!trans->txqs.txq[txq_id]) { in iwl_txq_gen2_init()
1271 IWL_ERR(trans, "Not enough memory for tx queue\n"); in iwl_txq_gen2_init()
1274 trans->txqs.txq[txq_id] = queue; in iwl_txq_gen2_init()
1275 ret = iwl_txq_alloc(trans, queue, queue_size, true); in iwl_txq_gen2_init()
1277 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); in iwl_txq_gen2_init()
1281 queue = trans->txqs.txq[txq_id]; in iwl_txq_gen2_init()
1284 ret = iwl_txq_init(trans, queue, queue_size, in iwl_txq_gen2_init()
1285 (txq_id == trans->txqs.cmd.q_id)); in iwl_txq_gen2_init()
1287 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); in iwl_txq_gen2_init()
1290 trans->txqs.txq[txq_id]->id = txq_id; in iwl_txq_gen2_init()
1291 set_bit(txq_id, trans->txqs.queue_used); in iwl_txq_gen2_init()
1296 iwl_txq_gen2_tx_free(trans); in iwl_txq_gen2_init()
1300 static inline dma_addr_t iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans *trans, in iwl_txq_gen1_tfd_tb_get_addr() argument
1308 if (trans->trans_cfg->use_tfh) { in iwl_txq_gen1_tfd_tb_get_addr()
1332 void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans, in iwl_txq_gen1_tfd_unmap() argument
1337 void *tfd = iwl_txq_get_tfd(trans, txq, index); in iwl_txq_gen1_tfd_unmap()
1340 num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd); in iwl_txq_gen1_tfd_unmap()
1342 if (num_tbs > trans->txqs.tfd.max_tbs) { in iwl_txq_gen1_tfd_unmap()
1343 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); in iwl_txq_gen1_tfd_unmap()
1352 dma_unmap_page(trans->dev, in iwl_txq_gen1_tfd_unmap()
1353 iwl_txq_gen1_tfd_tb_get_addr(trans, in iwl_txq_gen1_tfd_unmap()
1355 iwl_txq_gen1_tfd_tb_get_len(trans, in iwl_txq_gen1_tfd_unmap()
1359 dma_unmap_single(trans->dev, in iwl_txq_gen1_tfd_unmap()
1360 iwl_txq_gen1_tfd_tb_get_addr(trans, in iwl_txq_gen1_tfd_unmap()
1362 iwl_txq_gen1_tfd_tb_get_len(trans, in iwl_txq_gen1_tfd_unmap()
1369 if (trans->trans_cfg->use_tfh) { in iwl_txq_gen1_tfd_unmap()
1386 void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans, in iwl_txq_gen1_update_byte_cnt_tbl() argument
1400 scd_bc_tbl = trans->txqs.scd_bc_tbls.addr; in iwl_txq_gen1_update_byte_cnt_tbl()
1415 if (trans->txqs.bc_table_dword) in iwl_txq_gen1_update_byte_cnt_tbl()
1430 void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans, in iwl_txq_gen1_inval_byte_cnt_tbl() argument
1433 struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans->txqs.scd_bc_tbls.addr; in iwl_txq_gen1_inval_byte_cnt_tbl()
1443 if (txq_id != trans->txqs.cmd.q_id) in iwl_txq_gen1_inval_byte_cnt_tbl()
1464 void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) in iwl_txq_free_tfd() argument
1481 iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr); in iwl_txq_free_tfd()
1491 iwl_op_mode_free_skb(trans->op_mode, skb); in iwl_txq_free_tfd()
1521 void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn, in iwl_txq_reclaim() argument
1524 struct iwl_txq *txq = trans->txqs.txq[txq_id]; in iwl_txq_reclaim()
1530 if (WARN_ON(txq_id == trans->txqs.cmd.q_id)) in iwl_txq_reclaim()
1535 if (!test_bit(txq_id, trans->txqs.queue_used)) { in iwl_txq_reclaim()
1536 IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n", in iwl_txq_reclaim()
1544 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n", in iwl_txq_reclaim()
1549 last_to_free = iwl_txq_dec_wrap(trans, tfd_num); in iwl_txq_reclaim()
1552 IWL_ERR(trans, in iwl_txq_reclaim()
1555 trans->trans_cfg->base_params->max_tfd_queue_size, in iwl_txq_reclaim()
1558 iwl_op_mode_time_point(trans->op_mode, in iwl_txq_reclaim()
1569 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr), in iwl_txq_reclaim()
1576 iwl_txq_free_tso_page(trans, skb); in iwl_txq_reclaim()
1582 if (!trans->trans_cfg->use_tfh) in iwl_txq_reclaim()
1583 iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq); in iwl_txq_reclaim()
1585 iwl_txq_free_tfd(trans, txq); in iwl_txq_reclaim()
1590 if (iwl_txq_space(trans, txq) > txq->low_mark && in iwl_txq_reclaim()
1591 test_bit(txq_id, trans->txqs.queue_stopped)) { in iwl_txq_reclaim()
1620 trans->txqs.dev_cmd_offs); in iwl_txq_reclaim()
1627 iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id); in iwl_txq_reclaim()
1630 if (iwl_txq_space(trans, txq) > txq->low_mark) in iwl_txq_reclaim()
1631 iwl_wake_queue(trans, txq); in iwl_txq_reclaim()
1642 void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr) in iwl_txq_set_q_ptrs() argument
1644 struct iwl_txq *txq = trans->txqs.txq[txq_id]; in iwl_txq_set_q_ptrs()
1654 void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs, in iwl_trans_txq_freeze_timer() argument
1660 struct iwl_txq *txq = trans->txqs.txq[queue]; in iwl_trans_txq_freeze_timer()
1670 IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n", in iwl_trans_txq_freeze_timer()
1708 static int iwl_trans_txq_send_hcmd_sync(struct iwl_trans *trans, in iwl_trans_txq_send_hcmd_sync() argument
1711 const char *cmd_str = iwl_get_cmd_string(trans, cmd->id); in iwl_trans_txq_send_hcmd_sync()
1712 struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; in iwl_trans_txq_send_hcmd_sync()
1716 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str); in iwl_trans_txq_send_hcmd_sync()
1719 &trans->status), in iwl_trans_txq_send_hcmd_sync()
1723 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str); in iwl_trans_txq_send_hcmd_sync()
1725 cmd_idx = trans->ops->send_cmd(trans, cmd); in iwl_trans_txq_send_hcmd_sync()
1728 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); in iwl_trans_txq_send_hcmd_sync()
1729 IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n", in iwl_trans_txq_send_hcmd_sync()
1734 ret = wait_event_timeout(trans->wait_command_queue, in iwl_trans_txq_send_hcmd_sync()
1736 &trans->status), in iwl_trans_txq_send_hcmd_sync()
1739 IWL_ERR(trans, "Error sending %s: time out after %dms.\n", in iwl_trans_txq_send_hcmd_sync()
1742 IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n", in iwl_trans_txq_send_hcmd_sync()
1745 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); in iwl_trans_txq_send_hcmd_sync()
1746 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", in iwl_trans_txq_send_hcmd_sync()
1750 iwl_trans_sync_nmi(trans); in iwl_trans_txq_send_hcmd_sync()
1754 if (test_bit(STATUS_FW_ERROR, &trans->status)) { in iwl_trans_txq_send_hcmd_sync()
1755 IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str); in iwl_trans_txq_send_hcmd_sync()
1762 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { in iwl_trans_txq_send_hcmd_sync()
1763 IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n"); in iwl_trans_txq_send_hcmd_sync()
1769 IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str); in iwl_trans_txq_send_hcmd_sync()
1795 int iwl_trans_txq_send_hcmd(struct iwl_trans *trans, in iwl_trans_txq_send_hcmd() argument
1799 if (test_bit(STATUS_TRANS_DEAD, &trans->status)) in iwl_trans_txq_send_hcmd()
1803 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { in iwl_trans_txq_send_hcmd()
1804 IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", in iwl_trans_txq_send_hcmd()
1809 if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 && in iwl_trans_txq_send_hcmd()
1811 IWL_DEBUG_WOWLAN(trans, "Dropping CMD 0x%x: D3\n", cmd->id); in iwl_trans_txq_send_hcmd()
1822 ret = trans->ops->send_cmd(trans, cmd); in iwl_trans_txq_send_hcmd()
1824 IWL_ERR(trans, in iwl_trans_txq_send_hcmd()
1826 iwl_get_cmd_string(trans, cmd->id), ret); in iwl_trans_txq_send_hcmd()
1832 return iwl_trans_txq_send_hcmd_sync(trans, cmd); in iwl_trans_txq_send_hcmd()