1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /*
3 * Copyright (C) 2003-2015, 2018-2022 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
6 */
7 #ifndef __iwl_trans_int_pcie_h__
8 #define __iwl_trans_int_pcie_h__
9
10 #include <linux/spinlock.h>
11 #include <linux/interrupt.h>
12 #include <linux/skbuff.h>
13 #include <linux/wait.h>
14 #include <linux/pci.h>
15 #include <linux/timer.h>
16 #include <linux/cpu.h>
17
18 #include "iwl-fh.h"
19 #include "iwl-csr.h"
20 #include "iwl-trans.h"
21 #include "iwl-debug.h"
22 #include "iwl-io.h"
23 #include "iwl-op-mode.h"
24 #include "iwl-drv.h"
25 #include "queue/tx.h"
26
27 /*
28 * RX related structures and functions
29 */
30 #define RX_NUM_QUEUES 1
31 #define RX_POST_REQ_ALLOC 2
32 #define RX_CLAIM_REQ_ALLOC 8
33 #define RX_PENDING_WATERMARK 16
34 #define FIRST_RX_QUEUE 512
35
36 struct iwl_host_cmd;
37
38 /*This file includes the declaration that are internal to the
39 * trans_pcie layer */
40
41 /**
42 * struct iwl_rx_mem_buffer
43 * @page_dma: bus address of rxb page
44 * @page: driver's pointer to the rxb page
45 * @list: list entry for the membuffer
46 * @invalid: rxb is in driver ownership - not owned by HW
47 * @vid: index of this rxb in the global table
48 * @offset: indicates which offset of the page (in bytes)
49 * this buffer uses (if multiple RBs fit into one page)
50 */
51 struct iwl_rx_mem_buffer {
52 dma_addr_t page_dma;
53 struct page *page;
54 struct list_head list;
55 u32 offset;
56 u16 vid;
57 bool invalid;
58 };
59
60 /**
61 * struct isr_statistics - interrupt statistics
62 *
63 */
64 struct isr_statistics {
65 u32 hw;
66 u32 sw;
67 u32 err_code;
68 u32 sch;
69 u32 alive;
70 u32 rfkill;
71 u32 ctkill;
72 u32 wakeup;
73 u32 rx;
74 u32 tx;
75 u32 unhandled;
76 };
77
78 /**
79 * struct iwl_rx_transfer_desc - transfer descriptor
80 * @addr: ptr to free buffer start address
81 * @rbid: unique tag of the buffer
82 * @reserved: reserved
83 */
84 struct iwl_rx_transfer_desc {
85 __le16 rbid;
86 __le16 reserved[3];
87 __le64 addr;
88 } __packed;
89
90 #define IWL_RX_CD_FLAGS_FRAGMENTED BIT(0)
91
92 /**
93 * struct iwl_rx_completion_desc - completion descriptor
94 * @reserved1: reserved
95 * @rbid: unique tag of the received buffer
96 * @flags: flags (0: fragmented, all others: reserved)
97 * @reserved2: reserved
98 */
99 struct iwl_rx_completion_desc {
100 __le32 reserved1;
101 __le16 rbid;
102 u8 flags;
103 u8 reserved2[25];
104 } __packed;
105
106 /**
107 * struct iwl_rx_completion_desc_bz - Bz completion descriptor
108 * @rbid: unique tag of the received buffer
109 * @flags: flags (0: fragmented, all others: reserved)
110 * @reserved: reserved
111 */
112 struct iwl_rx_completion_desc_bz {
113 __le16 rbid;
114 u8 flags;
115 u8 reserved[1];
116 } __packed;
117
118 /**
119 * struct iwl_rxq - Rx queue
120 * @id: queue index
121 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
122 * Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
123 * In AX210 devices it is a pointer to a list of iwl_rx_transfer_desc's
124 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
125 * @used_bd: driver's pointer to buffer of used receive buffer descriptors (rbd)
126 * @used_bd_dma: physical address of buffer of used receive buffer descriptors (rbd)
127 * @read: Shared index to newest available Rx buffer
128 * @write: Shared index to oldest written Rx packet
129 * @free_count: Number of pre-allocated buffers in rx_free
130 * @used_count: Number of RBDs handled to allocator to use for allocation
131 * @write_actual:
132 * @rx_free: list of RBDs with allocated RB ready for use
133 * @rx_used: list of RBDs with no RB attached
134 * @need_update: flag to indicate we need to update read/write index
135 * @rb_stts: driver's pointer to receive buffer status
136 * @rb_stts_dma: bus address of receive buffer status
137 * @lock:
138 * @queue: actual rx queue. Not used for multi-rx queue.
139 * @next_rb_is_fragment: indicates that the previous RB that we handled set
140 * the fragmented flag, so the next one is still another fragment
141 *
142 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
143 */
144 struct iwl_rxq {
145 int id;
146 void *bd;
147 dma_addr_t bd_dma;
148 void *used_bd;
149 dma_addr_t used_bd_dma;
150 u32 read;
151 u32 write;
152 u32 free_count;
153 u32 used_count;
154 u32 write_actual;
155 u32 queue_size;
156 struct list_head rx_free;
157 struct list_head rx_used;
158 bool need_update, next_rb_is_fragment;
159 void *rb_stts;
160 dma_addr_t rb_stts_dma;
161 spinlock_t lock;
162 struct napi_struct napi;
163 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
164 };
165
166 /**
167 * struct iwl_rb_allocator - Rx allocator
168 * @req_pending: number of requests the allcator had not processed yet
169 * @req_ready: number of requests honored and ready for claiming
170 * @rbd_allocated: RBDs with pages allocated and ready to be handled to
171 * the queue. This is a list of &struct iwl_rx_mem_buffer
172 * @rbd_empty: RBDs with no page attached for allocator use. This is a list
173 * of &struct iwl_rx_mem_buffer
174 * @lock: protects the rbd_allocated and rbd_empty lists
175 * @alloc_wq: work queue for background calls
176 * @rx_alloc: work struct for background calls
177 */
178 struct iwl_rb_allocator {
179 atomic_t req_pending;
180 atomic_t req_ready;
181 struct list_head rbd_allocated;
182 struct list_head rbd_empty;
183 spinlock_t lock;
184 struct workqueue_struct *alloc_wq;
185 struct work_struct rx_alloc;
186 };
187
188 /**
189 * iwl_get_closed_rb_stts - get closed rb stts from different structs
190 * @rxq - the rxq to get the rb stts from
191 */
iwl_get_closed_rb_stts(struct iwl_trans * trans,struct iwl_rxq * rxq)192 static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
193 struct iwl_rxq *rxq)
194 {
195 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
196 __le16 *rb_stts = rxq->rb_stts;
197
198 return READ_ONCE(*rb_stts);
199 } else {
200 struct iwl_rb_status *rb_stts = rxq->rb_stts;
201
202 return READ_ONCE(rb_stts->closed_rb_num);
203 }
204 }
205
206 #ifdef CONFIG_IWLWIFI_DEBUGFS
207 /**
208 * enum iwl_fw_mon_dbgfs_state - the different states of the monitor_data
209 * debugfs file
210 *
211 * @IWL_FW_MON_DBGFS_STATE_CLOSED: the file is closed.
212 * @IWL_FW_MON_DBGFS_STATE_OPEN: the file is open.
213 * @IWL_FW_MON_DBGFS_STATE_DISABLED: the file is disabled, once this state is
214 * set the file can no longer be used.
215 */
216 enum iwl_fw_mon_dbgfs_state {
217 IWL_FW_MON_DBGFS_STATE_CLOSED,
218 IWL_FW_MON_DBGFS_STATE_OPEN,
219 IWL_FW_MON_DBGFS_STATE_DISABLED,
220 };
221 #endif
222
223 /**
224 * enum iwl_shared_irq_flags - level of sharing for irq
225 * @IWL_SHARED_IRQ_NON_RX: interrupt vector serves non rx causes.
226 * @IWL_SHARED_IRQ_FIRST_RSS: interrupt vector serves first RSS queue.
227 */
228 enum iwl_shared_irq_flags {
229 IWL_SHARED_IRQ_NON_RX = BIT(0),
230 IWL_SHARED_IRQ_FIRST_RSS = BIT(1),
231 };
232
233 /**
234 * enum iwl_image_response_code - image response values
235 * @IWL_IMAGE_RESP_DEF: the default value of the register
236 * @IWL_IMAGE_RESP_SUCCESS: iml was read successfully
237 * @IWL_IMAGE_RESP_FAIL: iml reading failed
238 */
239 enum iwl_image_response_code {
240 IWL_IMAGE_RESP_DEF = 0,
241 IWL_IMAGE_RESP_SUCCESS = 1,
242 IWL_IMAGE_RESP_FAIL = 2,
243 };
244
245 /**
246 * struct cont_rec: continuous recording data structure
247 * @prev_wr_ptr: the last address that was read in monitor_data
248 * debugfs file
249 * @prev_wrap_cnt: the wrap count that was used during the last read in
250 * monitor_data debugfs file
251 * @state: the state of monitor_data debugfs file as described
252 * in &iwl_fw_mon_dbgfs_state enum
253 * @mutex: locked while reading from monitor_data debugfs file
254 */
255 #ifdef CONFIG_IWLWIFI_DEBUGFS
256 struct cont_rec {
257 u32 prev_wr_ptr;
258 u32 prev_wrap_cnt;
259 u8 state;
260 /* Used to sync monitor_data debugfs file with driver unload flow */
261 struct mutex mutex;
262 };
263 #endif
264
265 enum iwl_pcie_fw_reset_state {
266 FW_RESET_IDLE,
267 FW_RESET_REQUESTED,
268 FW_RESET_OK,
269 FW_RESET_ERROR,
270 };
271
272 /**
273 * enum wl_pcie_imr_status - imr dma transfer state
274 * @IMR_D2S_IDLE: default value of the dma transfer
275 * @IMR_D2S_REQUESTED: dma transfer requested
276 * @IMR_D2S_COMPLETED: dma transfer completed
277 * @IMR_D2S_ERROR: dma transfer error
278 */
279 enum iwl_pcie_imr_status {
280 IMR_D2S_IDLE,
281 IMR_D2S_REQUESTED,
282 IMR_D2S_COMPLETED,
283 IMR_D2S_ERROR,
284 };
285
286 /**
287 * struct iwl_trans_pcie - PCIe transport specific data
288 * @rxq: all the RX queue data
289 * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues
290 * @global_table: table mapping received VID from hw to rxb
291 * @rba: allocator for RX replenishing
292 * @ctxt_info: context information for FW self init
293 * @ctxt_info_gen3: context information for gen3 devices
294 * @prph_info: prph info for self init
295 * @prph_scratch: prph scratch for self init
296 * @ctxt_info_dma_addr: dma addr of context information
297 * @prph_info_dma_addr: dma addr of prph info
298 * @prph_scratch_dma_addr: dma addr of prph scratch
299 * @ctxt_info_dma_addr: dma addr of context information
300 * @init_dram: DRAM data of firmware image (including paging).
301 * Context information addresses will be taken from here.
302 * This is driver's local copy for keeping track of size and
303 * count for allocating and freeing the memory.
304 * @iml: image loader image virtual address
305 * @iml_dma_addr: image loader image DMA address
306 * @trans: pointer to the generic transport area
307 * @scd_base_addr: scheduler sram base address in SRAM
308 * @kw: keep warm address
309 * @pnvm_dram: DRAM area that contains the PNVM data
310 * @pci_dev: basic pci-network driver stuff
311 * @hw_base: pci hardware address support
312 * @ucode_write_complete: indicates that the ucode has been copied.
313 * @ucode_write_waitq: wait queue for uCode load
314 * @cmd_queue - command queue number
315 * @def_rx_queue - default rx queue number
316 * @rx_buf_size: Rx buffer size
317 * @scd_set_active: should the transport configure the SCD for HCMD queue
318 * @rx_page_order: page order for receive buffer size
319 * @rx_buf_bytes: RX buffer (RB) size in bytes
320 * @reg_lock: protect hw register access
321 * @mutex: to protect stop_device / start_fw / start_hw
322 * @cmd_in_flight: true when we have a host command in flight
323 #ifdef CONFIG_IWLWIFI_DEBUGFS
324 * @fw_mon_data: fw continuous recording data
325 #endif
326 * @msix_entries: array of MSI-X entries
327 * @msix_enabled: true if managed to enable MSI-X
328 * @shared_vec_mask: the type of causes the shared vector handles
329 * (see iwl_shared_irq_flags).
330 * @alloc_vecs: the number of interrupt vectors allocated by the OS
331 * @def_irq: default irq for non rx causes
332 * @fh_init_mask: initial unmasked fh causes
333 * @hw_init_mask: initial unmasked hw causes
334 * @fh_mask: current unmasked fh causes
335 * @hw_mask: current unmasked hw causes
336 * @in_rescan: true if we have triggered a device rescan
337 * @base_rb_stts: base virtual address of receive buffer status for all queues
338 * @base_rb_stts_dma: base physical address of receive buffer status
339 * @supported_dma_mask: DMA mask to validate the actual address against,
340 * will be DMA_BIT_MASK(11) or DMA_BIT_MASK(12) depending on the device
341 * @alloc_page_lock: spinlock for the page allocator
342 * @alloc_page: allocated page to still use parts of
343 * @alloc_page_used: how much of the allocated page was already used (bytes)
344 * @imr_status: imr dma state machine
345 * @wait_queue_head_t: imr wait queue for dma completion
346 * @rf_name: name/version of the CRF, if any
347 */
348 struct iwl_trans_pcie {
349 struct iwl_rxq *rxq;
350 struct iwl_rx_mem_buffer *rx_pool;
351 struct iwl_rx_mem_buffer **global_table;
352 struct iwl_rb_allocator rba;
353 union {
354 struct iwl_context_info *ctxt_info;
355 struct iwl_context_info_gen3 *ctxt_info_gen3;
356 };
357 struct iwl_prph_info *prph_info;
358 struct iwl_prph_scratch *prph_scratch;
359 void *iml;
360 dma_addr_t ctxt_info_dma_addr;
361 dma_addr_t prph_info_dma_addr;
362 dma_addr_t prph_scratch_dma_addr;
363 dma_addr_t iml_dma_addr;
364 struct iwl_trans *trans;
365
366 struct net_device napi_dev;
367
368 /* INT ICT Table */
369 __le32 *ict_tbl;
370 dma_addr_t ict_tbl_dma;
371 int ict_index;
372 bool use_ict;
373 bool is_down, opmode_down;
374 s8 debug_rfkill;
375 struct isr_statistics isr_stats;
376
377 spinlock_t irq_lock;
378 struct mutex mutex;
379 u32 inta_mask;
380 u32 scd_base_addr;
381 struct iwl_dma_ptr kw;
382
383 struct iwl_dram_data pnvm_dram;
384 struct iwl_dram_data reduce_power_dram;
385
386 struct iwl_txq *txq_memory;
387
388 /* PCI bus related data */
389 struct pci_dev *pci_dev;
390 u8 __iomem *hw_base;
391
392 bool ucode_write_complete;
393 bool sx_complete;
394 wait_queue_head_t ucode_write_waitq;
395 wait_queue_head_t sx_waitq;
396
397 u8 def_rx_queue;
398 u8 n_no_reclaim_cmds;
399 u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
400 u16 num_rx_bufs;
401
402 enum iwl_amsdu_size rx_buf_size;
403 bool scd_set_active;
404 bool pcie_dbg_dumped_once;
405 u32 rx_page_order;
406 u32 rx_buf_bytes;
407 u32 supported_dma_mask;
408
409 /* allocator lock for the two values below */
410 spinlock_t alloc_page_lock;
411 struct page *alloc_page;
412 u32 alloc_page_used;
413
414 /*protect hw register */
415 spinlock_t reg_lock;
416 bool cmd_hold_nic_awake;
417
418 #ifdef CONFIG_IWLWIFI_DEBUGFS
419 struct cont_rec fw_mon_data;
420 #endif
421
422 struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
423 bool msix_enabled;
424 u8 shared_vec_mask;
425 u32 alloc_vecs;
426 u32 def_irq;
427 u32 fh_init_mask;
428 u32 hw_init_mask;
429 u32 fh_mask;
430 u32 hw_mask;
431 cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES];
432 u16 tx_cmd_queue_size;
433 bool in_rescan;
434
435 void *base_rb_stts;
436 dma_addr_t base_rb_stts_dma;
437
438 bool fw_reset_handshake;
439 enum iwl_pcie_fw_reset_state fw_reset_state;
440 wait_queue_head_t fw_reset_waitq;
441 enum iwl_pcie_imr_status imr_status;
442 wait_queue_head_t imr_waitq;
443 char rf_name[32];
444 };
445
446 static inline struct iwl_trans_pcie *
IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans * trans)447 IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans)
448 {
449 return (void *)trans->trans_specific;
450 }
451
iwl_pcie_clear_irq(struct iwl_trans * trans,int queue)452 static inline void iwl_pcie_clear_irq(struct iwl_trans *trans, int queue)
453 {
454 /*
455 * Before sending the interrupt the HW disables it to prevent
456 * a nested interrupt. This is done by writing 1 to the corresponding
457 * bit in the mask register. After handling the interrupt, it should be
458 * re-enabled by clearing this bit. This register is defined as
459 * write 1 clear (W1C) register, meaning that it's being clear
460 * by writing 1 to the bit.
461 */
462 iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(queue));
463 }
464
465 static inline struct iwl_trans *
iwl_trans_pcie_get_trans(struct iwl_trans_pcie * trans_pcie)466 iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
467 {
468 return container_of((void *)trans_pcie, struct iwl_trans,
469 trans_specific);
470 }
471
472 /*
473 * Convention: trans API functions: iwl_trans_pcie_XXX
474 * Other functions: iwl_pcie_XXX
475 */
476 struct iwl_trans
477 *iwl_trans_pcie_alloc(struct pci_dev *pdev,
478 const struct pci_device_id *ent,
479 const struct iwl_cfg_trans_params *cfg_trans);
480 void iwl_trans_pcie_free(struct iwl_trans *trans);
481
482 bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans);
483 #define _iwl_trans_pcie_grab_nic_access(trans) \
484 __cond_lock(nic_access_nobh, \
485 likely(__iwl_trans_pcie_grab_nic_access(trans)))
486
487 /*****************************************************
488 * RX
489 ******************************************************/
490 int iwl_pcie_rx_init(struct iwl_trans *trans);
491 int iwl_pcie_gen2_rx_init(struct iwl_trans *trans);
492 irqreturn_t iwl_pcie_msix_isr(int irq, void *data);
493 irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
494 irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
495 irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id);
496 int iwl_pcie_rx_stop(struct iwl_trans *trans);
497 void iwl_pcie_rx_free(struct iwl_trans *trans);
498 void iwl_pcie_free_rbs_pool(struct iwl_trans *trans);
499 void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq);
500 void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
501 struct iwl_rxq *rxq);
502
503 /*****************************************************
504 * ICT - interrupt handling
505 ******************************************************/
506 irqreturn_t iwl_pcie_isr(int irq, void *data);
507 int iwl_pcie_alloc_ict(struct iwl_trans *trans);
508 void iwl_pcie_free_ict(struct iwl_trans *trans);
509 void iwl_pcie_reset_ict(struct iwl_trans *trans);
510 void iwl_pcie_disable_ict(struct iwl_trans *trans);
511
512 /*****************************************************
513 * TX / HCMD
514 ******************************************************/
515 int iwl_pcie_tx_init(struct iwl_trans *trans);
516 void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
517 int iwl_pcie_tx_stop(struct iwl_trans *trans);
518 void iwl_pcie_tx_free(struct iwl_trans *trans);
519 bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
520 const struct iwl_trans_txq_scd_cfg *cfg,
521 unsigned int wdg_timeout);
522 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
523 bool configure_scd);
524 void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
525 bool shared_mode);
526 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
527 struct iwl_device_tx_cmd *dev_cmd, int txq_id);
528 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
529 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
530 void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
531 struct iwl_rx_cmd_buffer *rxb);
532 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
533
534 /*****************************************************
535 * Error handling
536 ******************************************************/
537 void iwl_pcie_dump_csr(struct iwl_trans *trans);
538
539 /*****************************************************
540 * Helpers
541 ******************************************************/
_iwl_disable_interrupts(struct iwl_trans * trans)542 static inline void _iwl_disable_interrupts(struct iwl_trans *trans)
543 {
544 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
545
546 clear_bit(STATUS_INT_ENABLED, &trans->status);
547 if (!trans_pcie->msix_enabled) {
548 /* disable interrupts from uCode/NIC to host */
549 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
550
551 /* acknowledge/clear/reset any interrupts still pending
552 * from uCode or flow handler (Rx/Tx DMA) */
553 iwl_write32(trans, CSR_INT, 0xffffffff);
554 iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
555 } else {
556 /* disable all the interrupt we might use */
557 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
558 trans_pcie->fh_init_mask);
559 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
560 trans_pcie->hw_init_mask);
561 }
562 IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
563 }
564
iwl_pcie_get_num_sections(const struct fw_img * fw,int start)565 static inline int iwl_pcie_get_num_sections(const struct fw_img *fw,
566 int start)
567 {
568 int i = 0;
569
570 while (start < fw->num_sec &&
571 fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
572 fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
573 start++;
574 i++;
575 }
576
577 return i;
578 }
579
iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans * trans)580 static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
581 {
582 struct iwl_self_init_dram *dram = &trans->init_dram;
583 int i;
584
585 if (!dram->fw) {
586 WARN_ON(dram->fw_cnt);
587 return;
588 }
589
590 for (i = 0; i < dram->fw_cnt; i++)
591 dma_free_coherent(trans->dev, dram->fw[i].size,
592 dram->fw[i].block, dram->fw[i].physical);
593
594 kfree(dram->fw);
595 dram->fw_cnt = 0;
596 dram->fw = NULL;
597 }
598
iwl_disable_interrupts(struct iwl_trans * trans)599 static inline void iwl_disable_interrupts(struct iwl_trans *trans)
600 {
601 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
602
603 spin_lock_bh(&trans_pcie->irq_lock);
604 _iwl_disable_interrupts(trans);
605 spin_unlock_bh(&trans_pcie->irq_lock);
606 }
607
_iwl_enable_interrupts(struct iwl_trans * trans)608 static inline void _iwl_enable_interrupts(struct iwl_trans *trans)
609 {
610 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
611
612 IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
613 set_bit(STATUS_INT_ENABLED, &trans->status);
614 if (!trans_pcie->msix_enabled) {
615 trans_pcie->inta_mask = CSR_INI_SET_MASK;
616 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
617 } else {
618 /*
619 * fh/hw_mask keeps all the unmasked causes.
620 * Unlike msi, in msix cause is enabled when it is unset.
621 */
622 trans_pcie->hw_mask = trans_pcie->hw_init_mask;
623 trans_pcie->fh_mask = trans_pcie->fh_init_mask;
624 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
625 ~trans_pcie->fh_mask);
626 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
627 ~trans_pcie->hw_mask);
628 }
629 }
630
iwl_enable_interrupts(struct iwl_trans * trans)631 static inline void iwl_enable_interrupts(struct iwl_trans *trans)
632 {
633 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
634
635 spin_lock_bh(&trans_pcie->irq_lock);
636 _iwl_enable_interrupts(trans);
637 spin_unlock_bh(&trans_pcie->irq_lock);
638 }
iwl_enable_hw_int_msk_msix(struct iwl_trans * trans,u32 msk)639 static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk)
640 {
641 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
642
643 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk);
644 trans_pcie->hw_mask = msk;
645 }
646
iwl_enable_fh_int_msk_msix(struct iwl_trans * trans,u32 msk)647 static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk)
648 {
649 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
650
651 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk);
652 trans_pcie->fh_mask = msk;
653 }
654
iwl_enable_fw_load_int(struct iwl_trans * trans)655 static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
656 {
657 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
658
659 IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n");
660 if (!trans_pcie->msix_enabled) {
661 trans_pcie->inta_mask = CSR_INT_BIT_FH_TX;
662 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
663 } else {
664 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
665 trans_pcie->hw_init_mask);
666 iwl_enable_fh_int_msk_msix(trans,
667 MSIX_FH_INT_CAUSES_D2S_CH0_NUM);
668 }
669 }
670
iwl_enable_fw_load_int_ctx_info(struct iwl_trans * trans)671 static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans)
672 {
673 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
674
675 IWL_DEBUG_ISR(trans, "Enabling ALIVE interrupt only\n");
676
677 if (!trans_pcie->msix_enabled) {
678 /*
679 * When we'll receive the ALIVE interrupt, the ISR will call
680 * iwl_enable_fw_load_int_ctx_info again to set the ALIVE
681 * interrupt (which is not really needed anymore) but also the
682 * RX interrupt which will allow us to receive the ALIVE
683 * notification (which is Rx) and continue the flow.
684 */
685 trans_pcie->inta_mask = CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX;
686 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
687 } else {
688 iwl_enable_hw_int_msk_msix(trans,
689 MSIX_HW_INT_CAUSES_REG_ALIVE);
690 /*
691 * Leave all the FH causes enabled to get the ALIVE
692 * notification.
693 */
694 iwl_enable_fh_int_msk_msix(trans, trans_pcie->fh_init_mask);
695 }
696 }
697
queue_name(struct device * dev,struct iwl_trans_pcie * trans_p,int i)698 static inline const char *queue_name(struct device *dev,
699 struct iwl_trans_pcie *trans_p, int i)
700 {
701 if (trans_p->shared_vec_mask) {
702 int vec = trans_p->shared_vec_mask &
703 IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
704
705 if (i == 0)
706 return DRV_NAME ":shared_IRQ";
707
708 return devm_kasprintf(dev, GFP_KERNEL,
709 DRV_NAME ":queue_%d", i + vec);
710 }
711 if (i == 0)
712 return DRV_NAME ":default_queue";
713
714 if (i == trans_p->alloc_vecs - 1)
715 return DRV_NAME ":exception";
716
717 return devm_kasprintf(dev, GFP_KERNEL,
718 DRV_NAME ":queue_%d", i);
719 }
720
iwl_enable_rfkill_int(struct iwl_trans * trans)721 static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
722 {
723 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
724
725 IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
726 if (!trans_pcie->msix_enabled) {
727 trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
728 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
729 } else {
730 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
731 trans_pcie->fh_init_mask);
732 iwl_enable_hw_int_msk_msix(trans,
733 MSIX_HW_INT_CAUSES_REG_RF_KILL);
734 }
735
736 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000) {
737 /*
738 * On 9000-series devices this bit isn't enabled by default, so
739 * when we power down the device we need set the bit to allow it
740 * to wake up the PCI-E bus for RF-kill interrupts.
741 */
742 iwl_set_bit(trans, CSR_GP_CNTRL,
743 CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
744 }
745 }
746
747 void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans);
748
iwl_is_rfkill_set(struct iwl_trans * trans)749 static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
750 {
751 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
752
753 lockdep_assert_held(&trans_pcie->mutex);
754
755 if (trans_pcie->debug_rfkill == 1)
756 return true;
757
758 return !(iwl_read32(trans, CSR_GP_CNTRL) &
759 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
760 }
761
__iwl_trans_pcie_set_bits_mask(struct iwl_trans * trans,u32 reg,u32 mask,u32 value)762 static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
763 u32 reg, u32 mask, u32 value)
764 {
765 u32 v;
766
767 #ifdef CONFIG_IWLWIFI_DEBUG
768 WARN_ON_ONCE(value & ~mask);
769 #endif
770
771 v = iwl_read32(trans, reg);
772 v &= ~mask;
773 v |= value;
774 iwl_write32(trans, reg, v);
775 }
776
__iwl_trans_pcie_clear_bit(struct iwl_trans * trans,u32 reg,u32 mask)777 static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
778 u32 reg, u32 mask)
779 {
780 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
781 }
782
__iwl_trans_pcie_set_bit(struct iwl_trans * trans,u32 reg,u32 mask)783 static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
784 u32 reg, u32 mask)
785 {
786 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
787 }
788
iwl_pcie_dbg_on(struct iwl_trans * trans)789 static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans)
790 {
791 return (trans->dbg.dest_tlv || iwl_trans_dbg_ini_valid(trans));
792 }
793
794 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
795 void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
796
797 #ifdef CONFIG_IWLWIFI_DEBUGFS
798 void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
799 #else
iwl_trans_pcie_dbgfs_register(struct iwl_trans * trans)800 static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { }
801 #endif
802
803 void iwl_pcie_rx_allocator_work(struct work_struct *data);
804
805 /* common functions that are used by gen2 transport */
806 int iwl_pcie_gen2_apm_init(struct iwl_trans *trans);
807 void iwl_pcie_apm_config(struct iwl_trans *trans);
808 int iwl_pcie_prepare_card_hw(struct iwl_trans *trans);
809 void iwl_pcie_synchronize_irqs(struct iwl_trans *trans);
810 bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans);
811 void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
812 bool was_in_rfkill);
813 void iwl_pcie_apm_stop_master(struct iwl_trans *trans);
814 void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie);
815 int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
816 struct iwl_dma_ptr *ptr, size_t size);
817 void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr);
818 void iwl_pcie_apply_destination(struct iwl_trans *trans);
819
820 /* common functions that are used by gen3 transport */
821 void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);
822
823 /* transport gen 2 exported functions */
824 int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
825 const struct fw_img *fw, bool run_in_rfkill);
826 void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr);
827 int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
828 struct iwl_host_cmd *cmd);
829 void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
830 void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
831 void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
832 bool test, bool reset);
833 int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
834 struct iwl_host_cmd *cmd);
835 int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
836 struct iwl_host_cmd *cmd);
837 void iwl_trans_pcie_copy_imr_fh(struct iwl_trans *trans,
838 u32 dst_addr, u64 src_addr, u32 byte_cnt);
839 int iwl_trans_pcie_copy_imr(struct iwl_trans *trans,
840 u32 dst_addr, u64 src_addr, u32 byte_cnt);
841
842 #endif /* __iwl_trans_int_pcie_h__ */
843