1 /* SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 * Google virtual Ethernet (gve) driver
3 *
4 * Copyright (C) 2015-2021 Google, Inc.
5 */
6
7 #ifndef _GVE_H_
8 #define _GVE_H_
9
10 #include <linux/dma-mapping.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/u64_stats_sync.h>
14
15 #include "gve_desc.h"
16 #include "gve_desc_dqo.h"
17
18 #ifndef PCI_VENDOR_ID_GOOGLE
19 #define PCI_VENDOR_ID_GOOGLE 0x1ae0
20 #endif
21
22 #define PCI_DEV_ID_GVNIC 0x0042
23
24 #define GVE_REGISTER_BAR 0
25 #define GVE_DOORBELL_BAR 2
26
27 /* Driver can alloc up to 2 segments for the header and 2 for the payload. */
28 #define GVE_TX_MAX_IOVEC 4
29 /* 1 for management, 1 for rx, 1 for tx */
30 #define GVE_MIN_MSIX 3
31
32 /* Numbers of gve tx/rx stats in stats report. */
33 #define GVE_TX_STATS_REPORT_NUM 6
34 #define GVE_RX_STATS_REPORT_NUM 2
35
36 /* Interval to schedule a stats report update, 20000ms. */
37 #define GVE_STATS_REPORT_TIMER_PERIOD 20000
38
39 /* Numbers of NIC tx/rx stats in stats report. */
40 #define NIC_TX_STATS_REPORT_NUM 0
41 #define NIC_RX_STATS_REPORT_NUM 4
42
43 #define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1))
44
45 /* PTYPEs are always 10 bits. */
46 #define GVE_NUM_PTYPES 1024
47
48 #define GVE_RX_BUFFER_SIZE_DQO 2048
49
50 /* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
51 struct gve_rx_desc_queue {
52 struct gve_rx_desc *desc_ring; /* the descriptor ring */
53 dma_addr_t bus; /* the bus for the desc_ring */
54 u8 seqno; /* the next expected seqno for this desc*/
55 };
56
57 /* The page info for a single slot in the RX data queue */
58 struct gve_rx_slot_page_info {
59 struct page *page;
60 void *page_address;
61 u32 page_offset; /* offset to write to in page */
62 int pagecnt_bias; /* expected pagecnt if only the driver has a ref */
63 u16 pad; /* adjustment for rx padding */
64 u8 can_flip; /* tracks if the networking stack is using the page */
65 };
66
67 /* A list of pages registered with the device during setup and used by a queue
68 * as buffers
69 */
70 struct gve_queue_page_list {
71 u32 id; /* unique id */
72 u32 num_entries;
73 struct page **pages; /* list of num_entries pages */
74 dma_addr_t *page_buses; /* the dma addrs of the pages */
75 };
76
77 /* Each slot in the data ring has a 1:1 mapping to a slot in the desc ring */
78 struct gve_rx_data_queue {
79 union gve_rx_data_slot *data_ring; /* read by NIC */
80 dma_addr_t data_bus; /* dma mapping of the slots */
81 struct gve_rx_slot_page_info *page_info; /* page info of the buffers */
82 struct gve_queue_page_list *qpl; /* qpl assigned to this queue */
83 u8 raw_addressing; /* use raw_addressing? */
84 };
85
86 struct gve_priv;
87
88 /* RX buffer queue for posting buffers to HW.
89 * Each RX (completion) queue has a corresponding buffer queue.
90 */
91 struct gve_rx_buf_queue_dqo {
92 struct gve_rx_desc_dqo *desc_ring;
93 dma_addr_t bus;
94 u32 head; /* Pointer to start cleaning buffers at. */
95 u32 tail; /* Last posted buffer index + 1 */
96 u32 mask; /* Mask for indices to the size of the ring */
97 };
98
99 /* RX completion queue to receive packets from HW. */
100 struct gve_rx_compl_queue_dqo {
101 struct gve_rx_compl_desc_dqo *desc_ring;
102 dma_addr_t bus;
103
104 /* Number of slots which did not have a buffer posted yet. We should not
105 * post more buffers than the queue size to avoid HW overrunning the
106 * queue.
107 */
108 int num_free_slots;
109
110 /* HW uses a "generation bit" to notify SW of new descriptors. When a
111 * descriptor's generation bit is different from the current generation,
112 * that descriptor is ready to be consumed by SW.
113 */
114 u8 cur_gen_bit;
115
116 /* Pointer into desc_ring where the next completion descriptor will be
117 * received.
118 */
119 u32 head;
120 u32 mask; /* Mask for indices to the size of the ring */
121 };
122
123 /* Stores state for tracking buffers posted to HW */
124 struct gve_rx_buf_state_dqo {
125 /* The page posted to HW. */
126 struct gve_rx_slot_page_info page_info;
127
128 /* The DMA address corresponding to `page_info`. */
129 dma_addr_t addr;
130
131 /* Last offset into the page when it only had a single reference, at
132 * which point every other offset is free to be reused.
133 */
134 u32 last_single_ref_offset;
135
136 /* Linked list index to next element in the list, or -1 if none */
137 s16 next;
138 };
139
140 /* `head` and `tail` are indices into an array, or -1 if empty. */
141 struct gve_index_list {
142 s16 head;
143 s16 tail;
144 };
145
146 /* A single received packet split across multiple buffers may be
147 * reconstructed using the information in this structure.
148 */
149 struct gve_rx_ctx {
150 /* head and tail of skb chain for the current packet or NULL if none */
151 struct sk_buff *skb_head;
152 struct sk_buff *skb_tail;
153 u32 total_size;
154 u8 frag_cnt;
155 bool drop_pkt;
156 };
157
158 struct gve_rx_cnts {
159 u32 ok_pkt_bytes;
160 u16 ok_pkt_cnt;
161 u16 total_pkt_cnt;
162 u16 cont_pkt_cnt;
163 u16 desc_err_pkt_cnt;
164 };
165
166 /* Contains datapath state used to represent an RX queue. */
167 struct gve_rx_ring {
168 struct gve_priv *gve;
169 union {
170 /* GQI fields */
171 struct {
172 struct gve_rx_desc_queue desc;
173 struct gve_rx_data_queue data;
174
175 /* threshold for posting new buffs and descs */
176 u32 db_threshold;
177 u16 packet_buffer_size;
178
179 u32 qpl_copy_pool_mask;
180 u32 qpl_copy_pool_head;
181 struct gve_rx_slot_page_info *qpl_copy_pool;
182 };
183
184 /* DQO fields. */
185 struct {
186 struct gve_rx_buf_queue_dqo bufq;
187 struct gve_rx_compl_queue_dqo complq;
188
189 struct gve_rx_buf_state_dqo *buf_states;
190 u16 num_buf_states;
191
192 /* Linked list of gve_rx_buf_state_dqo. Index into
193 * buf_states, or -1 if empty.
194 */
195 s16 free_buf_states;
196
197 /* Linked list of gve_rx_buf_state_dqo. Indexes into
198 * buf_states, or -1 if empty.
199 *
200 * This list contains buf_states which are pointing to
201 * valid buffers.
202 *
203 * We use a FIFO here in order to increase the
204 * probability that buffers can be reused by increasing
205 * the time between usages.
206 */
207 struct gve_index_list recycled_buf_states;
208
209 /* Linked list of gve_rx_buf_state_dqo. Indexes into
210 * buf_states, or -1 if empty.
211 *
212 * This list contains buf_states which have buffers
213 * which cannot be reused yet.
214 */
215 struct gve_index_list used_buf_states;
216 } dqo;
217 };
218
219 u64 rbytes; /* free-running bytes received */
220 u64 rpackets; /* free-running packets received */
221 u32 cnt; /* free-running total number of completed packets */
222 u32 fill_cnt; /* free-running total number of descs and buffs posted */
223 u32 mask; /* masks the cnt and fill_cnt to the size of the ring */
224 u64 rx_copybreak_pkt; /* free-running count of copybreak packets */
225 u64 rx_copied_pkt; /* free-running total number of copied packets */
226 u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
227 u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */
228 u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */
229 u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */
230 u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */
231 u64 rx_frag_copy_cnt; /* free-running count of rx segments copied */
232 u64 rx_frag_alloc_cnt; /* free-running count of rx page allocations */
233
234 u32 q_num; /* queue index */
235 u32 ntfy_id; /* notification block index */
236 struct gve_queue_resources *q_resources; /* head and tail pointer idx */
237 dma_addr_t q_resources_bus; /* dma address for the queue resources */
238 struct u64_stats_sync statss; /* sync stats for 32bit archs */
239
240 struct gve_rx_ctx ctx; /* Info for packet currently being processed in this ring. */
241 };
242
243 /* A TX desc ring entry */
244 union gve_tx_desc {
245 struct gve_tx_pkt_desc pkt; /* first desc for a packet */
246 struct gve_tx_mtd_desc mtd; /* optional metadata descriptor */
247 struct gve_tx_seg_desc seg; /* subsequent descs for a packet */
248 };
249
250 /* Tracks the memory in the fifo occupied by a segment of a packet */
251 struct gve_tx_iovec {
252 u32 iov_offset; /* offset into this segment */
253 u32 iov_len; /* length */
254 u32 iov_padding; /* padding associated with this segment */
255 };
256
257 /* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc
258 * ring entry but only used for a pkt_desc not a seg_desc
259 */
260 struct gve_tx_buffer_state {
261 struct sk_buff *skb; /* skb for this pkt */
262 union {
263 struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */
264 struct {
265 DEFINE_DMA_UNMAP_ADDR(dma);
266 DEFINE_DMA_UNMAP_LEN(len);
267 };
268 };
269 };
270
271 /* A TX buffer - each queue has one */
272 struct gve_tx_fifo {
273 void *base; /* address of base of FIFO */
274 u32 size; /* total size */
275 atomic_t available; /* how much space is still available */
276 u32 head; /* offset to write at */
277 struct gve_queue_page_list *qpl; /* QPL mapped into this FIFO */
278 };
279
280 /* TX descriptor for DQO format */
281 union gve_tx_desc_dqo {
282 struct gve_tx_pkt_desc_dqo pkt;
283 struct gve_tx_tso_context_desc_dqo tso_ctx;
284 struct gve_tx_general_context_desc_dqo general_ctx;
285 };
286
287 enum gve_packet_state {
288 /* Packet is in free list, available to be allocated.
289 * This should always be zero since state is not explicitly initialized.
290 */
291 GVE_PACKET_STATE_UNALLOCATED,
292 /* Packet is expecting a regular data completion or miss completion */
293 GVE_PACKET_STATE_PENDING_DATA_COMPL,
294 /* Packet has received a miss completion and is expecting a
295 * re-injection completion.
296 */
297 GVE_PACKET_STATE_PENDING_REINJECT_COMPL,
298 /* No valid completion received within the specified timeout. */
299 GVE_PACKET_STATE_TIMED_OUT_COMPL,
300 };
301
302 struct gve_tx_pending_packet_dqo {
303 struct sk_buff *skb; /* skb for this packet */
304
305 /* 0th element corresponds to the linear portion of `skb`, should be
306 * unmapped with `dma_unmap_single`.
307 *
308 * All others correspond to `skb`'s frags and should be unmapped with
309 * `dma_unmap_page`.
310 */
311 DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]);
312 DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]);
313 u16 num_bufs;
314
315 /* Linked list index to next element in the list, or -1 if none */
316 s16 next;
317
318 /* Linked list index to prev element in the list, or -1 if none.
319 * Used for tracking either outstanding miss completions or prematurely
320 * freed packets.
321 */
322 s16 prev;
323
324 /* Identifies the current state of the packet as defined in
325 * `enum gve_packet_state`.
326 */
327 u8 state;
328
329 /* If packet is an outstanding miss completion, then the packet is
330 * freed if the corresponding re-injection completion is not received
331 * before kernel jiffies exceeds timeout_jiffies.
332 */
333 unsigned long timeout_jiffies;
334 };
335
336 /* Contains datapath state used to represent a TX queue. */
337 struct gve_tx_ring {
338 /* Cacheline 0 -- Accessed & dirtied during transmit */
339 union {
340 /* GQI fields */
341 struct {
342 struct gve_tx_fifo tx_fifo;
343 u32 req; /* driver tracked head pointer */
344 u32 done; /* driver tracked tail pointer */
345 };
346
347 /* DQO fields. */
348 struct {
349 /* Linked list of gve_tx_pending_packet_dqo. Index into
350 * pending_packets, or -1 if empty.
351 *
352 * This is a consumer list owned by the TX path. When it
353 * runs out, the producer list is stolen from the
354 * completion handling path
355 * (dqo_compl.free_pending_packets).
356 */
357 s16 free_pending_packets;
358
359 /* Cached value of `dqo_compl.hw_tx_head` */
360 u32 head;
361 u32 tail; /* Last posted buffer index + 1 */
362
363 /* Index of the last descriptor with "report event" bit
364 * set.
365 */
366 u32 last_re_idx;
367 } dqo_tx;
368 };
369
370 /* Cacheline 1 -- Accessed & dirtied during gve_clean_tx_done */
371 union {
372 /* GQI fields */
373 struct {
374 /* Spinlock for when cleanup in progress */
375 spinlock_t clean_lock;
376 };
377
378 /* DQO fields. */
379 struct {
380 u32 head; /* Last read on compl_desc */
381
382 /* Tracks the current gen bit of compl_q */
383 u8 cur_gen_bit;
384
385 /* Linked list of gve_tx_pending_packet_dqo. Index into
386 * pending_packets, or -1 if empty.
387 *
388 * This is the producer list, owned by the completion
389 * handling path. When the consumer list
390 * (dqo_tx.free_pending_packets) is runs out, this list
391 * will be stolen.
392 */
393 atomic_t free_pending_packets;
394
395 /* Last TX ring index fetched by HW */
396 atomic_t hw_tx_head;
397
398 /* List to track pending packets which received a miss
399 * completion but not a corresponding reinjection.
400 */
401 struct gve_index_list miss_completions;
402
403 /* List to track pending packets that were completed
404 * before receiving a valid completion because they
405 * reached a specified timeout.
406 */
407 struct gve_index_list timed_out_completions;
408 } dqo_compl;
409 } ____cacheline_aligned;
410 u64 pkt_done; /* free-running - total packets completed */
411 u64 bytes_done; /* free-running - total bytes completed */
412 u64 dropped_pkt; /* free-running - total packets dropped */
413 u64 dma_mapping_error; /* count of dma mapping errors */
414
415 /* Cacheline 2 -- Read-mostly fields */
416 union {
417 /* GQI fields */
418 struct {
419 union gve_tx_desc *desc;
420
421 /* Maps 1:1 to a desc */
422 struct gve_tx_buffer_state *info;
423 };
424
425 /* DQO fields. */
426 struct {
427 union gve_tx_desc_dqo *tx_ring;
428 struct gve_tx_compl_desc *compl_ring;
429
430 struct gve_tx_pending_packet_dqo *pending_packets;
431 s16 num_pending_packets;
432
433 u32 complq_mask; /* complq size is complq_mask + 1 */
434 } dqo;
435 } ____cacheline_aligned;
436 struct netdev_queue *netdev_txq;
437 struct gve_queue_resources *q_resources; /* head and tail pointer idx */
438 struct device *dev;
439 u32 mask; /* masks req and done down to queue size */
440 u8 raw_addressing; /* use raw_addressing? */
441
442 /* Slow-path fields */
443 u32 q_num ____cacheline_aligned; /* queue idx */
444 u32 stop_queue; /* count of queue stops */
445 u32 wake_queue; /* count of queue wakes */
446 u32 queue_timeout; /* count of queue timeouts */
447 u32 ntfy_id; /* notification block index */
448 u32 last_kick_msec; /* Last time the queue was kicked */
449 dma_addr_t bus; /* dma address of the descr ring */
450 dma_addr_t q_resources_bus; /* dma address of the queue resources */
451 dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */
452 struct u64_stats_sync statss; /* sync stats for 32bit archs */
453 } ____cacheline_aligned;
454
455 /* Wraps the info for one irq including the napi struct and the queues
456 * associated with that irq.
457 */
458 struct gve_notify_block {
459 __be32 *irq_db_index; /* pointer to idx into Bar2 */
460 char name[IFNAMSIZ + 16]; /* name registered with the kernel */
461 struct napi_struct napi; /* kernel napi struct for this block */
462 struct gve_priv *priv;
463 struct gve_tx_ring *tx; /* tx rings on this block */
464 struct gve_rx_ring *rx; /* rx rings on this block */
465 };
466
467 /* Tracks allowed and current queue settings */
468 struct gve_queue_config {
469 u16 max_queues;
470 u16 num_queues; /* current */
471 };
472
473 /* Tracks the available and used qpl IDs */
474 struct gve_qpl_config {
475 u32 qpl_map_size; /* map memory size */
476 unsigned long *qpl_id_map; /* bitmap of used qpl ids */
477 };
478
479 struct gve_options_dqo_rda {
480 u16 tx_comp_ring_entries; /* number of tx_comp descriptors */
481 u16 rx_buff_ring_entries; /* number of rx_buff descriptors */
482 };
483
484 struct gve_irq_db {
485 __be32 index;
486 } ____cacheline_aligned;
487
488 struct gve_ptype {
489 u8 l3_type; /* `gve_l3_type` in gve_adminq.h */
490 u8 l4_type; /* `gve_l4_type` in gve_adminq.h */
491 };
492
493 struct gve_ptype_lut {
494 struct gve_ptype ptypes[GVE_NUM_PTYPES];
495 };
496
497 /* GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value
498 * when the entire configure_device_resources command is zeroed out and the
499 * queue_format is not specified.
500 */
501 enum gve_queue_format {
502 GVE_QUEUE_FORMAT_UNSPECIFIED = 0x0,
503 GVE_GQI_RDA_FORMAT = 0x1,
504 GVE_GQI_QPL_FORMAT = 0x2,
505 GVE_DQO_RDA_FORMAT = 0x3,
506 };
507
508 struct gve_priv {
509 struct net_device *dev;
510 struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
511 struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */
512 struct gve_queue_page_list *qpls; /* array of num qpls */
513 struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */
514 struct gve_irq_db *irq_db_indices; /* array of num_ntfy_blks */
515 dma_addr_t irq_db_indices_bus;
516 struct msix_entry *msix_vectors; /* array of num_ntfy_blks + 1 */
517 char mgmt_msix_name[IFNAMSIZ + 16];
518 u32 mgmt_msix_idx;
519 __be32 *counter_array; /* array of num_event_counters */
520 dma_addr_t counter_array_bus;
521
522 u16 num_event_counters;
523 u16 tx_desc_cnt; /* num desc per ring */
524 u16 rx_desc_cnt; /* num desc per ring */
525 u16 tx_pages_per_qpl; /* tx buffer length */
526 u16 rx_data_slot_cnt; /* rx buffer length */
527 u64 max_registered_pages;
528 u64 num_registered_pages; /* num pages registered with NIC */
529 u32 rx_copybreak; /* copy packets smaller than this */
530 u16 default_num_queues; /* default num queues to set up */
531
532 struct gve_queue_config tx_cfg;
533 struct gve_queue_config rx_cfg;
534 struct gve_qpl_config qpl_cfg; /* map used QPL ids */
535 u32 num_ntfy_blks; /* spilt between TX and RX so must be even */
536
537 struct gve_registers __iomem *reg_bar0; /* see gve_register.h */
538 __be32 __iomem *db_bar2; /* "array" of doorbells */
539 u32 msg_enable; /* level for netif* netdev print macros */
540 struct pci_dev *pdev;
541
542 /* metrics */
543 u32 tx_timeo_cnt;
544
545 /* Admin queue - see gve_adminq.h*/
546 union gve_adminq_command *adminq;
547 dma_addr_t adminq_bus_addr;
548 u32 adminq_mask; /* masks prod_cnt to adminq size */
549 u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */
550 u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */
551 u32 adminq_timeouts; /* free-running count of AQ cmds timeouts */
552 /* free-running count of per AQ cmd executed */
553 u32 adminq_describe_device_cnt;
554 u32 adminq_cfg_device_resources_cnt;
555 u32 adminq_register_page_list_cnt;
556 u32 adminq_unregister_page_list_cnt;
557 u32 adminq_create_tx_queue_cnt;
558 u32 adminq_create_rx_queue_cnt;
559 u32 adminq_destroy_tx_queue_cnt;
560 u32 adminq_destroy_rx_queue_cnt;
561 u32 adminq_dcfg_device_resources_cnt;
562 u32 adminq_set_driver_parameter_cnt;
563 u32 adminq_report_stats_cnt;
564 u32 adminq_report_link_speed_cnt;
565 u32 adminq_get_ptype_map_cnt;
566 u32 adminq_verify_driver_compatibility_cnt;
567
568 /* Global stats */
569 u32 interface_up_cnt; /* count of times interface turned up since last reset */
570 u32 interface_down_cnt; /* count of times interface turned down since last reset */
571 u32 reset_cnt; /* count of reset */
572 u32 page_alloc_fail; /* count of page alloc fails */
573 u32 dma_mapping_error; /* count of dma mapping errors */
574 u32 stats_report_trigger_cnt; /* count of device-requested stats-reports since last reset */
575 u32 suspend_cnt; /* count of times suspended */
576 u32 resume_cnt; /* count of times resumed */
577 struct workqueue_struct *gve_wq;
578 struct work_struct service_task;
579 struct work_struct stats_report_task;
580 unsigned long service_task_flags;
581 unsigned long state_flags;
582
583 struct gve_stats_report *stats_report;
584 u64 stats_report_len;
585 dma_addr_t stats_report_bus; /* dma address for the stats report */
586 unsigned long ethtool_flags;
587
588 unsigned long stats_report_timer_period;
589 struct timer_list stats_report_timer;
590
591 /* Gvnic device link speed from hypervisor. */
592 u64 link_speed;
593 bool up_before_suspend; /* True if dev was up before suspend */
594
595 struct gve_options_dqo_rda options_dqo_rda;
596 struct gve_ptype_lut *ptype_lut_dqo;
597
598 /* Must be a power of two. */
599 int data_buffer_size_dqo;
600
601 enum gve_queue_format queue_format;
602
603 /* Interrupt coalescing settings */
604 u32 tx_coalesce_usecs;
605 u32 rx_coalesce_usecs;
606 };
607
608 enum gve_service_task_flags_bit {
609 GVE_PRIV_FLAGS_DO_RESET = 1,
610 GVE_PRIV_FLAGS_RESET_IN_PROGRESS = 2,
611 GVE_PRIV_FLAGS_PROBE_IN_PROGRESS = 3,
612 GVE_PRIV_FLAGS_DO_REPORT_STATS = 4,
613 };
614
615 enum gve_state_flags_bit {
616 GVE_PRIV_FLAGS_ADMIN_QUEUE_OK = 1,
617 GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK = 2,
618 GVE_PRIV_FLAGS_DEVICE_RINGS_OK = 3,
619 GVE_PRIV_FLAGS_NAPI_ENABLED = 4,
620 };
621
622 enum gve_ethtool_flags_bit {
623 GVE_PRIV_FLAGS_REPORT_STATS = 0,
624 };
625
gve_get_do_reset(struct gve_priv * priv)626 static inline bool gve_get_do_reset(struct gve_priv *priv)
627 {
628 return test_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
629 }
630
gve_set_do_reset(struct gve_priv * priv)631 static inline void gve_set_do_reset(struct gve_priv *priv)
632 {
633 set_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
634 }
635
gve_clear_do_reset(struct gve_priv * priv)636 static inline void gve_clear_do_reset(struct gve_priv *priv)
637 {
638 clear_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
639 }
640
gve_get_reset_in_progress(struct gve_priv * priv)641 static inline bool gve_get_reset_in_progress(struct gve_priv *priv)
642 {
643 return test_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS,
644 &priv->service_task_flags);
645 }
646
gve_set_reset_in_progress(struct gve_priv * priv)647 static inline void gve_set_reset_in_progress(struct gve_priv *priv)
648 {
649 set_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
650 }
651
gve_clear_reset_in_progress(struct gve_priv * priv)652 static inline void gve_clear_reset_in_progress(struct gve_priv *priv)
653 {
654 clear_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
655 }
656
gve_get_probe_in_progress(struct gve_priv * priv)657 static inline bool gve_get_probe_in_progress(struct gve_priv *priv)
658 {
659 return test_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS,
660 &priv->service_task_flags);
661 }
662
gve_set_probe_in_progress(struct gve_priv * priv)663 static inline void gve_set_probe_in_progress(struct gve_priv *priv)
664 {
665 set_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
666 }
667
gve_clear_probe_in_progress(struct gve_priv * priv)668 static inline void gve_clear_probe_in_progress(struct gve_priv *priv)
669 {
670 clear_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
671 }
672
gve_get_do_report_stats(struct gve_priv * priv)673 static inline bool gve_get_do_report_stats(struct gve_priv *priv)
674 {
675 return test_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS,
676 &priv->service_task_flags);
677 }
678
gve_set_do_report_stats(struct gve_priv * priv)679 static inline void gve_set_do_report_stats(struct gve_priv *priv)
680 {
681 set_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
682 }
683
gve_clear_do_report_stats(struct gve_priv * priv)684 static inline void gve_clear_do_report_stats(struct gve_priv *priv)
685 {
686 clear_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
687 }
688
gve_get_admin_queue_ok(struct gve_priv * priv)689 static inline bool gve_get_admin_queue_ok(struct gve_priv *priv)
690 {
691 return test_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
692 }
693
gve_set_admin_queue_ok(struct gve_priv * priv)694 static inline void gve_set_admin_queue_ok(struct gve_priv *priv)
695 {
696 set_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
697 }
698
gve_clear_admin_queue_ok(struct gve_priv * priv)699 static inline void gve_clear_admin_queue_ok(struct gve_priv *priv)
700 {
701 clear_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
702 }
703
gve_get_device_resources_ok(struct gve_priv * priv)704 static inline bool gve_get_device_resources_ok(struct gve_priv *priv)
705 {
706 return test_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
707 }
708
gve_set_device_resources_ok(struct gve_priv * priv)709 static inline void gve_set_device_resources_ok(struct gve_priv *priv)
710 {
711 set_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
712 }
713
gve_clear_device_resources_ok(struct gve_priv * priv)714 static inline void gve_clear_device_resources_ok(struct gve_priv *priv)
715 {
716 clear_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
717 }
718
gve_get_device_rings_ok(struct gve_priv * priv)719 static inline bool gve_get_device_rings_ok(struct gve_priv *priv)
720 {
721 return test_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
722 }
723
gve_set_device_rings_ok(struct gve_priv * priv)724 static inline void gve_set_device_rings_ok(struct gve_priv *priv)
725 {
726 set_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
727 }
728
gve_clear_device_rings_ok(struct gve_priv * priv)729 static inline void gve_clear_device_rings_ok(struct gve_priv *priv)
730 {
731 clear_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
732 }
733
gve_get_napi_enabled(struct gve_priv * priv)734 static inline bool gve_get_napi_enabled(struct gve_priv *priv)
735 {
736 return test_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
737 }
738
gve_set_napi_enabled(struct gve_priv * priv)739 static inline void gve_set_napi_enabled(struct gve_priv *priv)
740 {
741 set_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
742 }
743
gve_clear_napi_enabled(struct gve_priv * priv)744 static inline void gve_clear_napi_enabled(struct gve_priv *priv)
745 {
746 clear_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
747 }
748
gve_get_report_stats(struct gve_priv * priv)749 static inline bool gve_get_report_stats(struct gve_priv *priv)
750 {
751 return test_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
752 }
753
gve_clear_report_stats(struct gve_priv * priv)754 static inline void gve_clear_report_stats(struct gve_priv *priv)
755 {
756 clear_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
757 }
758
759 /* Returns the address of the ntfy_blocks irq doorbell
760 */
gve_irq_doorbell(struct gve_priv * priv,struct gve_notify_block * block)761 static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv,
762 struct gve_notify_block *block)
763 {
764 return &priv->db_bar2[be32_to_cpu(*block->irq_db_index)];
765 }
766
767 /* Returns the index into ntfy_blocks of the given tx ring's block
768 */
gve_tx_idx_to_ntfy(struct gve_priv * priv,u32 queue_idx)769 static inline u32 gve_tx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
770 {
771 return queue_idx;
772 }
773
774 /* Returns the index into ntfy_blocks of the given rx ring's block
775 */
gve_rx_idx_to_ntfy(struct gve_priv * priv,u32 queue_idx)776 static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
777 {
778 return (priv->num_ntfy_blks / 2) + queue_idx;
779 }
780
781 /* Returns the number of tx queue page lists
782 */
gve_num_tx_qpls(struct gve_priv * priv)783 static inline u32 gve_num_tx_qpls(struct gve_priv *priv)
784 {
785 if (priv->queue_format != GVE_GQI_QPL_FORMAT)
786 return 0;
787
788 return priv->tx_cfg.num_queues;
789 }
790
791 /* Returns the number of rx queue page lists
792 */
gve_num_rx_qpls(struct gve_priv * priv)793 static inline u32 gve_num_rx_qpls(struct gve_priv *priv)
794 {
795 if (priv->queue_format != GVE_GQI_QPL_FORMAT)
796 return 0;
797
798 return priv->rx_cfg.num_queues;
799 }
800
801 /* Returns a pointer to the next available tx qpl in the list of qpls
802 */
803 static inline
gve_assign_tx_qpl(struct gve_priv * priv)804 struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv)
805 {
806 int id = find_first_zero_bit(priv->qpl_cfg.qpl_id_map,
807 priv->qpl_cfg.qpl_map_size);
808
809 /* we are out of tx qpls */
810 if (id >= gve_num_tx_qpls(priv))
811 return NULL;
812
813 set_bit(id, priv->qpl_cfg.qpl_id_map);
814 return &priv->qpls[id];
815 }
816
817 /* Returns a pointer to the next available rx qpl in the list of qpls
818 */
819 static inline
gve_assign_rx_qpl(struct gve_priv * priv)820 struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv)
821 {
822 int id = find_next_zero_bit(priv->qpl_cfg.qpl_id_map,
823 priv->qpl_cfg.qpl_map_size,
824 gve_num_tx_qpls(priv));
825
826 /* we are out of rx qpls */
827 if (id == gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv))
828 return NULL;
829
830 set_bit(id, priv->qpl_cfg.qpl_id_map);
831 return &priv->qpls[id];
832 }
833
834 /* Unassigns the qpl with the given id
835 */
gve_unassign_qpl(struct gve_priv * priv,int id)836 static inline void gve_unassign_qpl(struct gve_priv *priv, int id)
837 {
838 clear_bit(id, priv->qpl_cfg.qpl_id_map);
839 }
840
841 /* Returns the correct dma direction for tx and rx qpls
842 */
gve_qpl_dma_dir(struct gve_priv * priv,int id)843 static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
844 int id)
845 {
846 if (id < gve_num_tx_qpls(priv))
847 return DMA_TO_DEVICE;
848 else
849 return DMA_FROM_DEVICE;
850 }
851
gve_is_gqi(struct gve_priv * priv)852 static inline bool gve_is_gqi(struct gve_priv *priv)
853 {
854 return priv->queue_format == GVE_GQI_RDA_FORMAT ||
855 priv->queue_format == GVE_GQI_QPL_FORMAT;
856 }
857
858 /* buffers */
859 int gve_alloc_page(struct gve_priv *priv, struct device *dev,
860 struct page **page, dma_addr_t *dma,
861 enum dma_data_direction, gfp_t gfp_flags);
862 void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
863 enum dma_data_direction);
864 /* tx handling */
865 netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
866 bool gve_tx_poll(struct gve_notify_block *block, int budget);
867 int gve_tx_alloc_rings(struct gve_priv *priv);
868 void gve_tx_free_rings_gqi(struct gve_priv *priv);
869 u32 gve_tx_load_event_counter(struct gve_priv *priv,
870 struct gve_tx_ring *tx);
871 bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
872 /* rx handling */
873 void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
874 int gve_rx_poll(struct gve_notify_block *block, int budget);
875 bool gve_rx_work_pending(struct gve_rx_ring *rx);
876 int gve_rx_alloc_rings(struct gve_priv *priv);
877 void gve_rx_free_rings_gqi(struct gve_priv *priv);
878 /* Reset */
879 void gve_schedule_reset(struct gve_priv *priv);
880 int gve_reset(struct gve_priv *priv, bool attempt_teardown);
881 int gve_adjust_queues(struct gve_priv *priv,
882 struct gve_queue_config new_rx_config,
883 struct gve_queue_config new_tx_config);
884 /* report stats handling */
885 void gve_handle_report_stats(struct gve_priv *priv);
886 /* exported by ethtool.c */
887 extern const struct ethtool_ops gve_ethtool_ops;
888 /* needed by ethtool */
889 extern const char gve_version_str[];
890 #endif /* _GVE_H_ */
891