1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /****************************************************************************
3 * Driver for Solarflare network controllers and boards
4 * Copyright 2005-2006 Fen Systems Ltd.
5 * Copyright 2006-2013 Solarflare Communications Inc.
6 */
7
8 #ifndef EF4_NIC_H
9 #define EF4_NIC_H
10
11 #include <linux/net_tstamp.h>
12 #include <linux/i2c-algo-bit.h>
13 #include "net_driver.h"
14 #include "efx.h"
15
16 enum {
17 EF4_REV_FALCON_A0 = 0,
18 EF4_REV_FALCON_A1 = 1,
19 EF4_REV_FALCON_B0 = 2,
20 };
21
ef4_nic_rev(struct ef4_nic * efx)22 static inline int ef4_nic_rev(struct ef4_nic *efx)
23 {
24 return efx->type->revision;
25 }
26
27 u32 ef4_farch_fpga_ver(struct ef4_nic *efx);
28
29 /* NIC has two interlinked PCI functions for the same port. */
ef4_nic_is_dual_func(struct ef4_nic * efx)30 static inline bool ef4_nic_is_dual_func(struct ef4_nic *efx)
31 {
32 return ef4_nic_rev(efx) < EF4_REV_FALCON_B0;
33 }
34
35 /* Read the current event from the event queue */
ef4_event(struct ef4_channel * channel,unsigned int index)36 static inline ef4_qword_t *ef4_event(struct ef4_channel *channel,
37 unsigned int index)
38 {
39 return ((ef4_qword_t *) (channel->eventq.buf.addr)) +
40 (index & channel->eventq_mask);
41 }
42
43 /* See if an event is present
44 *
45 * We check both the high and low dword of the event for all ones. We
46 * wrote all ones when we cleared the event, and no valid event can
47 * have all ones in either its high or low dwords. This approach is
48 * robust against reordering.
49 *
50 * Note that using a single 64-bit comparison is incorrect; even
51 * though the CPU read will be atomic, the DMA write may not be.
52 */
ef4_event_present(ef4_qword_t * event)53 static inline int ef4_event_present(ef4_qword_t *event)
54 {
55 return !(EF4_DWORD_IS_ALL_ONES(event->dword[0]) |
56 EF4_DWORD_IS_ALL_ONES(event->dword[1]));
57 }
58
59 /* Returns a pointer to the specified transmit descriptor in the TX
60 * descriptor queue belonging to the specified channel.
61 */
62 static inline ef4_qword_t *
ef4_tx_desc(struct ef4_tx_queue * tx_queue,unsigned int index)63 ef4_tx_desc(struct ef4_tx_queue *tx_queue, unsigned int index)
64 {
65 return ((ef4_qword_t *) (tx_queue->txd.buf.addr)) + index;
66 }
67
68 /* Get partner of a TX queue, seen as part of the same net core queue */
ef4_tx_queue_partner(struct ef4_tx_queue * tx_queue)69 static inline struct ef4_tx_queue *ef4_tx_queue_partner(struct ef4_tx_queue *tx_queue)
70 {
71 if (tx_queue->queue & EF4_TXQ_TYPE_OFFLOAD)
72 return tx_queue - EF4_TXQ_TYPE_OFFLOAD;
73 else
74 return tx_queue + EF4_TXQ_TYPE_OFFLOAD;
75 }
76
77 /* Report whether this TX queue would be empty for the given write_count.
78 * May return false negative.
79 */
__ef4_nic_tx_is_empty(struct ef4_tx_queue * tx_queue,unsigned int write_count)80 static inline bool __ef4_nic_tx_is_empty(struct ef4_tx_queue *tx_queue,
81 unsigned int write_count)
82 {
83 unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count);
84
85 if (empty_read_count == 0)
86 return false;
87
88 return ((empty_read_count ^ write_count) & ~EF4_EMPTY_COUNT_VALID) == 0;
89 }
90
91 /* Decide whether to push a TX descriptor to the NIC vs merely writing
92 * the doorbell. This can reduce latency when we are adding a single
93 * descriptor to an empty queue, but is otherwise pointless. Further,
94 * Falcon and Siena have hardware bugs (SF bug 33851) that may be
95 * triggered if we don't check this.
96 * We use the write_count used for the last doorbell push, to get the
97 * NIC's view of the tx queue.
98 */
ef4_nic_may_push_tx_desc(struct ef4_tx_queue * tx_queue,unsigned int write_count)99 static inline bool ef4_nic_may_push_tx_desc(struct ef4_tx_queue *tx_queue,
100 unsigned int write_count)
101 {
102 bool was_empty = __ef4_nic_tx_is_empty(tx_queue, write_count);
103
104 tx_queue->empty_read_count = 0;
105 return was_empty && tx_queue->write_count - write_count == 1;
106 }
107
108 /* Returns a pointer to the specified descriptor in the RX descriptor queue */
109 static inline ef4_qword_t *
ef4_rx_desc(struct ef4_rx_queue * rx_queue,unsigned int index)110 ef4_rx_desc(struct ef4_rx_queue *rx_queue, unsigned int index)
111 {
112 return ((ef4_qword_t *) (rx_queue->rxd.buf.addr)) + index;
113 }
114
115 enum {
116 PHY_TYPE_NONE = 0,
117 PHY_TYPE_TXC43128 = 1,
118 PHY_TYPE_88E1111 = 2,
119 PHY_TYPE_SFX7101 = 3,
120 PHY_TYPE_QT2022C2 = 4,
121 PHY_TYPE_PM8358 = 6,
122 PHY_TYPE_SFT9001A = 8,
123 PHY_TYPE_QT2025C = 9,
124 PHY_TYPE_SFT9001B = 10,
125 };
126
127 #define FALCON_XMAC_LOOPBACKS \
128 ((1 << LOOPBACK_XGMII) | \
129 (1 << LOOPBACK_XGXS) | \
130 (1 << LOOPBACK_XAUI))
131
132 /* Alignment of PCIe DMA boundaries (4KB) */
133 #define EF4_PAGE_SIZE 4096
134 /* Size and alignment of buffer table entries (same) */
135 #define EF4_BUF_SIZE EF4_PAGE_SIZE
136
137 /* NIC-generic software stats */
138 enum {
139 GENERIC_STAT_rx_noskb_drops,
140 GENERIC_STAT_rx_nodesc_trunc,
141 GENERIC_STAT_COUNT
142 };
143
144 /**
145 * struct falcon_board_type - board operations and type information
146 * @id: Board type id, as found in NVRAM
147 * @init: Allocate resources and initialise peripheral hardware
148 * @init_phy: Do board-specific PHY initialisation
149 * @fini: Shut down hardware and free resources
150 * @set_id_led: Set state of identifying LED or revert to automatic function
151 * @monitor: Board-specific health check function
152 */
153 struct falcon_board_type {
154 u8 id;
155 int (*init) (struct ef4_nic *nic);
156 void (*init_phy) (struct ef4_nic *efx);
157 void (*fini) (struct ef4_nic *nic);
158 void (*set_id_led) (struct ef4_nic *efx, enum ef4_led_mode mode);
159 int (*monitor) (struct ef4_nic *nic);
160 };
161
162 /**
163 * struct falcon_board - board information
164 * @type: Type of board
165 * @major: Major rev. ('A', 'B' ...)
166 * @minor: Minor rev. (0, 1, ...)
167 * @i2c_adap: I2C adapter for on-board peripherals
168 * @i2c_data: Data for bit-banging algorithm
169 * @hwmon_client: I2C client for hardware monitor
170 * @ioexp_client: I2C client for power/port control
171 */
172 struct falcon_board {
173 const struct falcon_board_type *type;
174 int major;
175 int minor;
176 struct i2c_adapter i2c_adap;
177 struct i2c_algo_bit_data i2c_data;
178 struct i2c_client *hwmon_client, *ioexp_client;
179 };
180
181 /**
182 * struct falcon_spi_device - a Falcon SPI (Serial Peripheral Interface) device
183 * @device_id: Controller's id for the device
184 * @size: Size (in bytes)
185 * @addr_len: Number of address bytes in read/write commands
186 * @munge_address: Flag whether addresses should be munged.
187 * Some devices with 9-bit addresses (e.g. AT25040A EEPROM)
188 * use bit 3 of the command byte as address bit A8, rather
189 * than having a two-byte address. If this flag is set, then
190 * commands should be munged in this way.
191 * @erase_command: Erase command (or 0 if sector erase not needed).
192 * @erase_size: Erase sector size (in bytes)
193 * Erase commands affect sectors with this size and alignment.
194 * This must be a power of two.
195 * @block_size: Write block size (in bytes).
196 * Write commands are limited to blocks with this size and alignment.
197 */
198 struct falcon_spi_device {
199 int device_id;
200 unsigned int size;
201 unsigned int addr_len;
202 unsigned int munge_address:1;
203 u8 erase_command;
204 unsigned int erase_size;
205 unsigned int block_size;
206 };
207
falcon_spi_present(const struct falcon_spi_device * spi)208 static inline bool falcon_spi_present(const struct falcon_spi_device *spi)
209 {
210 return spi->size != 0;
211 }
212
213 enum {
214 FALCON_STAT_tx_bytes = GENERIC_STAT_COUNT,
215 FALCON_STAT_tx_packets,
216 FALCON_STAT_tx_pause,
217 FALCON_STAT_tx_control,
218 FALCON_STAT_tx_unicast,
219 FALCON_STAT_tx_multicast,
220 FALCON_STAT_tx_broadcast,
221 FALCON_STAT_tx_lt64,
222 FALCON_STAT_tx_64,
223 FALCON_STAT_tx_65_to_127,
224 FALCON_STAT_tx_128_to_255,
225 FALCON_STAT_tx_256_to_511,
226 FALCON_STAT_tx_512_to_1023,
227 FALCON_STAT_tx_1024_to_15xx,
228 FALCON_STAT_tx_15xx_to_jumbo,
229 FALCON_STAT_tx_gtjumbo,
230 FALCON_STAT_tx_non_tcpudp,
231 FALCON_STAT_tx_mac_src_error,
232 FALCON_STAT_tx_ip_src_error,
233 FALCON_STAT_rx_bytes,
234 FALCON_STAT_rx_good_bytes,
235 FALCON_STAT_rx_bad_bytes,
236 FALCON_STAT_rx_packets,
237 FALCON_STAT_rx_good,
238 FALCON_STAT_rx_bad,
239 FALCON_STAT_rx_pause,
240 FALCON_STAT_rx_control,
241 FALCON_STAT_rx_unicast,
242 FALCON_STAT_rx_multicast,
243 FALCON_STAT_rx_broadcast,
244 FALCON_STAT_rx_lt64,
245 FALCON_STAT_rx_64,
246 FALCON_STAT_rx_65_to_127,
247 FALCON_STAT_rx_128_to_255,
248 FALCON_STAT_rx_256_to_511,
249 FALCON_STAT_rx_512_to_1023,
250 FALCON_STAT_rx_1024_to_15xx,
251 FALCON_STAT_rx_15xx_to_jumbo,
252 FALCON_STAT_rx_gtjumbo,
253 FALCON_STAT_rx_bad_lt64,
254 FALCON_STAT_rx_bad_gtjumbo,
255 FALCON_STAT_rx_overflow,
256 FALCON_STAT_rx_symbol_error,
257 FALCON_STAT_rx_align_error,
258 FALCON_STAT_rx_length_error,
259 FALCON_STAT_rx_internal_error,
260 FALCON_STAT_rx_nodesc_drop_cnt,
261 FALCON_STAT_COUNT
262 };
263
264 /**
265 * struct falcon_nic_data - Falcon NIC state
266 * @pci_dev2: Secondary function of Falcon A
267 * @efx: ef4_nic pointer
268 * @board: Board state and functions
269 * @stats: Hardware statistics
270 * @stats_disable_count: Nest count for disabling statistics fetches
271 * @stats_pending: Is there a pending DMA of MAC statistics.
272 * @stats_timer: A timer for regularly fetching MAC statistics.
273 * @spi_flash: SPI flash device
274 * @spi_eeprom: SPI EEPROM device
275 * @spi_lock: SPI bus lock
276 * @mdio_lock: MDIO bus lock
277 * @xmac_poll_required: XMAC link state needs polling
278 */
279 struct falcon_nic_data {
280 struct pci_dev *pci_dev2;
281 struct ef4_nic *efx;
282 struct falcon_board board;
283 u64 stats[FALCON_STAT_COUNT];
284 unsigned int stats_disable_count;
285 bool stats_pending;
286 struct timer_list stats_timer;
287 struct falcon_spi_device spi_flash;
288 struct falcon_spi_device spi_eeprom;
289 struct mutex spi_lock;
290 struct mutex mdio_lock;
291 bool xmac_poll_required;
292 };
293
falcon_board(struct ef4_nic * efx)294 static inline struct falcon_board *falcon_board(struct ef4_nic *efx)
295 {
296 struct falcon_nic_data *data = efx->nic_data;
297 return &data->board;
298 }
299
300 struct ethtool_ts_info;
301
302 extern const struct ef4_nic_type falcon_a1_nic_type;
303 extern const struct ef4_nic_type falcon_b0_nic_type;
304
305 /**************************************************************************
306 *
307 * Externs
308 *
309 **************************************************************************
310 */
311
312 int falcon_probe_board(struct ef4_nic *efx, u16 revision_info);
313
314 /* TX data path */
ef4_nic_probe_tx(struct ef4_tx_queue * tx_queue)315 static inline int ef4_nic_probe_tx(struct ef4_tx_queue *tx_queue)
316 {
317 return tx_queue->efx->type->tx_probe(tx_queue);
318 }
ef4_nic_init_tx(struct ef4_tx_queue * tx_queue)319 static inline void ef4_nic_init_tx(struct ef4_tx_queue *tx_queue)
320 {
321 tx_queue->efx->type->tx_init(tx_queue);
322 }
ef4_nic_remove_tx(struct ef4_tx_queue * tx_queue)323 static inline void ef4_nic_remove_tx(struct ef4_tx_queue *tx_queue)
324 {
325 tx_queue->efx->type->tx_remove(tx_queue);
326 }
ef4_nic_push_buffers(struct ef4_tx_queue * tx_queue)327 static inline void ef4_nic_push_buffers(struct ef4_tx_queue *tx_queue)
328 {
329 tx_queue->efx->type->tx_write(tx_queue);
330 }
331
332 /* RX data path */
ef4_nic_probe_rx(struct ef4_rx_queue * rx_queue)333 static inline int ef4_nic_probe_rx(struct ef4_rx_queue *rx_queue)
334 {
335 return rx_queue->efx->type->rx_probe(rx_queue);
336 }
ef4_nic_init_rx(struct ef4_rx_queue * rx_queue)337 static inline void ef4_nic_init_rx(struct ef4_rx_queue *rx_queue)
338 {
339 rx_queue->efx->type->rx_init(rx_queue);
340 }
ef4_nic_remove_rx(struct ef4_rx_queue * rx_queue)341 static inline void ef4_nic_remove_rx(struct ef4_rx_queue *rx_queue)
342 {
343 rx_queue->efx->type->rx_remove(rx_queue);
344 }
ef4_nic_notify_rx_desc(struct ef4_rx_queue * rx_queue)345 static inline void ef4_nic_notify_rx_desc(struct ef4_rx_queue *rx_queue)
346 {
347 rx_queue->efx->type->rx_write(rx_queue);
348 }
ef4_nic_generate_fill_event(struct ef4_rx_queue * rx_queue)349 static inline void ef4_nic_generate_fill_event(struct ef4_rx_queue *rx_queue)
350 {
351 rx_queue->efx->type->rx_defer_refill(rx_queue);
352 }
353
354 /* Event data path */
ef4_nic_probe_eventq(struct ef4_channel * channel)355 static inline int ef4_nic_probe_eventq(struct ef4_channel *channel)
356 {
357 return channel->efx->type->ev_probe(channel);
358 }
ef4_nic_init_eventq(struct ef4_channel * channel)359 static inline int ef4_nic_init_eventq(struct ef4_channel *channel)
360 {
361 return channel->efx->type->ev_init(channel);
362 }
ef4_nic_fini_eventq(struct ef4_channel * channel)363 static inline void ef4_nic_fini_eventq(struct ef4_channel *channel)
364 {
365 channel->efx->type->ev_fini(channel);
366 }
ef4_nic_remove_eventq(struct ef4_channel * channel)367 static inline void ef4_nic_remove_eventq(struct ef4_channel *channel)
368 {
369 channel->efx->type->ev_remove(channel);
370 }
371 static inline int
ef4_nic_process_eventq(struct ef4_channel * channel,int quota)372 ef4_nic_process_eventq(struct ef4_channel *channel, int quota)
373 {
374 return channel->efx->type->ev_process(channel, quota);
375 }
ef4_nic_eventq_read_ack(struct ef4_channel * channel)376 static inline void ef4_nic_eventq_read_ack(struct ef4_channel *channel)
377 {
378 channel->efx->type->ev_read_ack(channel);
379 }
380 void ef4_nic_event_test_start(struct ef4_channel *channel);
381
382 /* queue operations */
383 int ef4_farch_tx_probe(struct ef4_tx_queue *tx_queue);
384 void ef4_farch_tx_init(struct ef4_tx_queue *tx_queue);
385 void ef4_farch_tx_fini(struct ef4_tx_queue *tx_queue);
386 void ef4_farch_tx_remove(struct ef4_tx_queue *tx_queue);
387 void ef4_farch_tx_write(struct ef4_tx_queue *tx_queue);
388 unsigned int ef4_farch_tx_limit_len(struct ef4_tx_queue *tx_queue,
389 dma_addr_t dma_addr, unsigned int len);
390 int ef4_farch_rx_probe(struct ef4_rx_queue *rx_queue);
391 void ef4_farch_rx_init(struct ef4_rx_queue *rx_queue);
392 void ef4_farch_rx_fini(struct ef4_rx_queue *rx_queue);
393 void ef4_farch_rx_remove(struct ef4_rx_queue *rx_queue);
394 void ef4_farch_rx_write(struct ef4_rx_queue *rx_queue);
395 void ef4_farch_rx_defer_refill(struct ef4_rx_queue *rx_queue);
396 int ef4_farch_ev_probe(struct ef4_channel *channel);
397 int ef4_farch_ev_init(struct ef4_channel *channel);
398 void ef4_farch_ev_fini(struct ef4_channel *channel);
399 void ef4_farch_ev_remove(struct ef4_channel *channel);
400 int ef4_farch_ev_process(struct ef4_channel *channel, int quota);
401 void ef4_farch_ev_read_ack(struct ef4_channel *channel);
402 void ef4_farch_ev_test_generate(struct ef4_channel *channel);
403
404 /* filter operations */
405 int ef4_farch_filter_table_probe(struct ef4_nic *efx);
406 void ef4_farch_filter_table_restore(struct ef4_nic *efx);
407 void ef4_farch_filter_table_remove(struct ef4_nic *efx);
408 void ef4_farch_filter_update_rx_scatter(struct ef4_nic *efx);
409 s32 ef4_farch_filter_insert(struct ef4_nic *efx, struct ef4_filter_spec *spec,
410 bool replace);
411 int ef4_farch_filter_remove_safe(struct ef4_nic *efx,
412 enum ef4_filter_priority priority,
413 u32 filter_id);
414 int ef4_farch_filter_get_safe(struct ef4_nic *efx,
415 enum ef4_filter_priority priority, u32 filter_id,
416 struct ef4_filter_spec *);
417 int ef4_farch_filter_clear_rx(struct ef4_nic *efx,
418 enum ef4_filter_priority priority);
419 u32 ef4_farch_filter_count_rx_used(struct ef4_nic *efx,
420 enum ef4_filter_priority priority);
421 u32 ef4_farch_filter_get_rx_id_limit(struct ef4_nic *efx);
422 s32 ef4_farch_filter_get_rx_ids(struct ef4_nic *efx,
423 enum ef4_filter_priority priority, u32 *buf,
424 u32 size);
425 #ifdef CONFIG_RFS_ACCEL
426 s32 ef4_farch_filter_rfs_insert(struct ef4_nic *efx,
427 struct ef4_filter_spec *spec);
428 bool ef4_farch_filter_rfs_expire_one(struct ef4_nic *efx, u32 flow_id,
429 unsigned int index);
430 #endif
431 void ef4_farch_filter_sync_rx_mode(struct ef4_nic *efx);
432
433 bool ef4_nic_event_present(struct ef4_channel *channel);
434
435 /* Some statistics are computed as A - B where A and B each increase
436 * linearly with some hardware counter(s) and the counters are read
437 * asynchronously. If the counters contributing to B are always read
438 * after those contributing to A, the computed value may be lower than
439 * the true value by some variable amount, and may decrease between
440 * subsequent computations.
441 *
442 * We should never allow statistics to decrease or to exceed the true
443 * value. Since the computed value will never be greater than the
444 * true value, we can achieve this by only storing the computed value
445 * when it increases.
446 */
ef4_update_diff_stat(u64 * stat,u64 diff)447 static inline void ef4_update_diff_stat(u64 *stat, u64 diff)
448 {
449 if ((s64)(diff - *stat) > 0)
450 *stat = diff;
451 }
452
453 /* Interrupts */
454 int ef4_nic_init_interrupt(struct ef4_nic *efx);
455 int ef4_nic_irq_test_start(struct ef4_nic *efx);
456 void ef4_nic_fini_interrupt(struct ef4_nic *efx);
457 void ef4_farch_irq_enable_master(struct ef4_nic *efx);
458 int ef4_farch_irq_test_generate(struct ef4_nic *efx);
459 void ef4_farch_irq_disable_master(struct ef4_nic *efx);
460 irqreturn_t ef4_farch_msi_interrupt(int irq, void *dev_id);
461 irqreturn_t ef4_farch_legacy_interrupt(int irq, void *dev_id);
462 irqreturn_t ef4_farch_fatal_interrupt(struct ef4_nic *efx);
463
ef4_nic_event_test_irq_cpu(struct ef4_channel * channel)464 static inline int ef4_nic_event_test_irq_cpu(struct ef4_channel *channel)
465 {
466 return READ_ONCE(channel->event_test_cpu);
467 }
ef4_nic_irq_test_irq_cpu(struct ef4_nic * efx)468 static inline int ef4_nic_irq_test_irq_cpu(struct ef4_nic *efx)
469 {
470 return READ_ONCE(efx->last_irq_cpu);
471 }
472
473 /* Global Resources */
474 int ef4_nic_flush_queues(struct ef4_nic *efx);
475 int ef4_farch_fini_dmaq(struct ef4_nic *efx);
476 void ef4_farch_finish_flr(struct ef4_nic *efx);
477 void falcon_start_nic_stats(struct ef4_nic *efx);
478 void falcon_stop_nic_stats(struct ef4_nic *efx);
479 int falcon_reset_xaui(struct ef4_nic *efx);
480 void ef4_farch_dimension_resources(struct ef4_nic *efx, unsigned sram_lim_qw);
481 void ef4_farch_init_common(struct ef4_nic *efx);
482 void ef4_farch_rx_push_indir_table(struct ef4_nic *efx);
483
484 int ef4_nic_alloc_buffer(struct ef4_nic *efx, struct ef4_buffer *buffer,
485 unsigned int len, gfp_t gfp_flags);
486 void ef4_nic_free_buffer(struct ef4_nic *efx, struct ef4_buffer *buffer);
487
488 /* Tests */
489 struct ef4_farch_register_test {
490 unsigned address;
491 ef4_oword_t mask;
492 };
493 int ef4_farch_test_registers(struct ef4_nic *efx,
494 const struct ef4_farch_register_test *regs,
495 size_t n_regs);
496
497 size_t ef4_nic_get_regs_len(struct ef4_nic *efx);
498 void ef4_nic_get_regs(struct ef4_nic *efx, void *buf);
499
500 size_t ef4_nic_describe_stats(const struct ef4_hw_stat_desc *desc, size_t count,
501 const unsigned long *mask, u8 *names);
502 void ef4_nic_update_stats(const struct ef4_hw_stat_desc *desc, size_t count,
503 const unsigned long *mask, u64 *stats,
504 const void *dma_buf, bool accumulate);
505 void ef4_nic_fix_nodesc_drop_stat(struct ef4_nic *efx, u64 *stat);
506
507 #define EF4_MAX_FLUSH_TIME 5000
508
509 void ef4_farch_generate_event(struct ef4_nic *efx, unsigned int evq,
510 ef4_qword_t *event);
511
512 #endif /* EF4_NIC_H */
513