1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
4 * DWC Ether MAC version 4.00 has been used for developing this code.
5 *
6 * This only implements the mac core functions for this chip.
7 *
8 * Copyright (C) 2015 STMicroelectronics Ltd
9 *
10 * Author: Alexandre Torgue <alexandre.torgue@st.com>
11 */
12
13 #include <linux/crc32.h>
14 #include <linux/slab.h>
15 #include <linux/ethtool.h>
16 #include <linux/io.h>
17 #include "stmmac.h"
18 #include "stmmac_pcs.h"
19 #include "dwmac4.h"
20 #include "dwmac5.h"
21
dwmac4_core_init(struct mac_device_info * hw,struct net_device * dev)22 static void dwmac4_core_init(struct mac_device_info *hw,
23 struct net_device *dev)
24 {
25 struct stmmac_priv *priv = netdev_priv(dev);
26 void __iomem *ioaddr = hw->pcsr;
27 u32 value = readl(ioaddr + GMAC_CONFIG);
28
29 value |= GMAC_CORE_INIT;
30
31 if (hw->ps) {
32 value |= GMAC_CONFIG_TE;
33
34 value &= hw->link.speed_mask;
35 switch (hw->ps) {
36 case SPEED_1000:
37 value |= hw->link.speed1000;
38 break;
39 case SPEED_100:
40 value |= hw->link.speed100;
41 break;
42 case SPEED_10:
43 value |= hw->link.speed10;
44 break;
45 }
46 }
47
48 writel(value, ioaddr + GMAC_CONFIG);
49
50 /* Enable GMAC interrupts */
51 value = GMAC_INT_DEFAULT_ENABLE;
52
53 if (hw->pcs)
54 value |= GMAC_PCS_IRQ_DEFAULT;
55
56 /* Enable FPE interrupt */
57 if ((GMAC_HW_FEAT_FPESEL & readl(ioaddr + GMAC_HW_FEATURE3)) >> 26)
58 value |= GMAC_INT_FPE_EN;
59
60 writel(value, ioaddr + GMAC_INT_EN);
61
62 if (GMAC_INT_DEFAULT_ENABLE & GMAC_INT_TSIE)
63 init_waitqueue_head(&priv->tstamp_busy_wait);
64 }
65
dwmac4_rx_queue_enable(struct mac_device_info * hw,u8 mode,u32 queue)66 static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
67 u8 mode, u32 queue)
68 {
69 void __iomem *ioaddr = hw->pcsr;
70 u32 value = readl(ioaddr + GMAC_RXQ_CTRL0);
71
72 value &= GMAC_RX_QUEUE_CLEAR(queue);
73 if (mode == MTL_QUEUE_AVB)
74 value |= GMAC_RX_AV_QUEUE_ENABLE(queue);
75 else if (mode == MTL_QUEUE_DCB)
76 value |= GMAC_RX_DCB_QUEUE_ENABLE(queue);
77
78 writel(value, ioaddr + GMAC_RXQ_CTRL0);
79 }
80
dwmac4_rx_queue_priority(struct mac_device_info * hw,u32 prio,u32 queue)81 static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
82 u32 prio, u32 queue)
83 {
84 void __iomem *ioaddr = hw->pcsr;
85 u32 base_register;
86 u32 value;
87
88 base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3;
89 if (queue >= 4)
90 queue -= 4;
91
92 value = readl(ioaddr + base_register);
93
94 value &= ~GMAC_RXQCTRL_PSRQX_MASK(queue);
95 value |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
96 GMAC_RXQCTRL_PSRQX_MASK(queue);
97 writel(value, ioaddr + base_register);
98 }
99
dwmac4_tx_queue_priority(struct mac_device_info * hw,u32 prio,u32 queue)100 static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
101 u32 prio, u32 queue)
102 {
103 void __iomem *ioaddr = hw->pcsr;
104 u32 base_register;
105 u32 value;
106
107 base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1;
108 if (queue >= 4)
109 queue -= 4;
110
111 value = readl(ioaddr + base_register);
112
113 value &= ~GMAC_TXQCTRL_PSTQX_MASK(queue);
114 value |= (prio << GMAC_TXQCTRL_PSTQX_SHIFT(queue)) &
115 GMAC_TXQCTRL_PSTQX_MASK(queue);
116
117 writel(value, ioaddr + base_register);
118 }
119
dwmac4_rx_queue_routing(struct mac_device_info * hw,u8 packet,u32 queue)120 static void dwmac4_rx_queue_routing(struct mac_device_info *hw,
121 u8 packet, u32 queue)
122 {
123 void __iomem *ioaddr = hw->pcsr;
124 u32 value;
125
126 static const struct stmmac_rx_routing route_possibilities[] = {
127 { GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT },
128 { GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT },
129 { GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT },
130 { GMAC_RXQCTRL_UPQ_MASK, GMAC_RXQCTRL_UPQ_SHIFT },
131 { GMAC_RXQCTRL_MCBCQ_MASK, GMAC_RXQCTRL_MCBCQ_SHIFT },
132 };
133
134 value = readl(ioaddr + GMAC_RXQ_CTRL1);
135
136 /* routing configuration */
137 value &= ~route_possibilities[packet - 1].reg_mask;
138 value |= (queue << route_possibilities[packet-1].reg_shift) &
139 route_possibilities[packet - 1].reg_mask;
140
141 /* some packets require extra ops */
142 if (packet == PACKET_AVCPQ) {
143 value &= ~GMAC_RXQCTRL_TACPQE;
144 value |= 0x1 << GMAC_RXQCTRL_TACPQE_SHIFT;
145 } else if (packet == PACKET_MCBCQ) {
146 value &= ~GMAC_RXQCTRL_MCBCQEN;
147 value |= 0x1 << GMAC_RXQCTRL_MCBCQEN_SHIFT;
148 }
149
150 writel(value, ioaddr + GMAC_RXQ_CTRL1);
151 }
152
dwmac4_prog_mtl_rx_algorithms(struct mac_device_info * hw,u32 rx_alg)153 static void dwmac4_prog_mtl_rx_algorithms(struct mac_device_info *hw,
154 u32 rx_alg)
155 {
156 void __iomem *ioaddr = hw->pcsr;
157 u32 value = readl(ioaddr + MTL_OPERATION_MODE);
158
159 value &= ~MTL_OPERATION_RAA;
160 switch (rx_alg) {
161 case MTL_RX_ALGORITHM_SP:
162 value |= MTL_OPERATION_RAA_SP;
163 break;
164 case MTL_RX_ALGORITHM_WSP:
165 value |= MTL_OPERATION_RAA_WSP;
166 break;
167 default:
168 break;
169 }
170
171 writel(value, ioaddr + MTL_OPERATION_MODE);
172 }
173
dwmac4_prog_mtl_tx_algorithms(struct mac_device_info * hw,u32 tx_alg)174 static void dwmac4_prog_mtl_tx_algorithms(struct mac_device_info *hw,
175 u32 tx_alg)
176 {
177 void __iomem *ioaddr = hw->pcsr;
178 u32 value = readl(ioaddr + MTL_OPERATION_MODE);
179
180 value &= ~MTL_OPERATION_SCHALG_MASK;
181 switch (tx_alg) {
182 case MTL_TX_ALGORITHM_WRR:
183 value |= MTL_OPERATION_SCHALG_WRR;
184 break;
185 case MTL_TX_ALGORITHM_WFQ:
186 value |= MTL_OPERATION_SCHALG_WFQ;
187 break;
188 case MTL_TX_ALGORITHM_DWRR:
189 value |= MTL_OPERATION_SCHALG_DWRR;
190 break;
191 case MTL_TX_ALGORITHM_SP:
192 value |= MTL_OPERATION_SCHALG_SP;
193 break;
194 default:
195 break;
196 }
197
198 writel(value, ioaddr + MTL_OPERATION_MODE);
199 }
200
dwmac4_set_mtl_tx_queue_weight(struct mac_device_info * hw,u32 weight,u32 queue)201 static void dwmac4_set_mtl_tx_queue_weight(struct mac_device_info *hw,
202 u32 weight, u32 queue)
203 {
204 void __iomem *ioaddr = hw->pcsr;
205 u32 value = readl(ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
206
207 value &= ~MTL_TXQ_WEIGHT_ISCQW_MASK;
208 value |= weight & MTL_TXQ_WEIGHT_ISCQW_MASK;
209 writel(value, ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
210 }
211
dwmac4_map_mtl_dma(struct mac_device_info * hw,u32 queue,u32 chan)212 static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan)
213 {
214 void __iomem *ioaddr = hw->pcsr;
215 u32 value;
216
217 if (queue < 4) {
218 value = readl(ioaddr + MTL_RXQ_DMA_MAP0);
219 value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue);
220 value |= MTL_RXQ_DMA_QXMDMACH(chan, queue);
221 writel(value, ioaddr + MTL_RXQ_DMA_MAP0);
222 } else {
223 value = readl(ioaddr + MTL_RXQ_DMA_MAP1);
224 value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue - 4);
225 value |= MTL_RXQ_DMA_QXMDMACH(chan, queue - 4);
226 writel(value, ioaddr + MTL_RXQ_DMA_MAP1);
227 }
228 }
229
dwmac4_config_cbs(struct mac_device_info * hw,u32 send_slope,u32 idle_slope,u32 high_credit,u32 low_credit,u32 queue)230 static void dwmac4_config_cbs(struct mac_device_info *hw,
231 u32 send_slope, u32 idle_slope,
232 u32 high_credit, u32 low_credit, u32 queue)
233 {
234 void __iomem *ioaddr = hw->pcsr;
235 u32 value;
236
237 pr_debug("Queue %d configured as AVB. Parameters:\n", queue);
238 pr_debug("\tsend_slope: 0x%08x\n", send_slope);
239 pr_debug("\tidle_slope: 0x%08x\n", idle_slope);
240 pr_debug("\thigh_credit: 0x%08x\n", high_credit);
241 pr_debug("\tlow_credit: 0x%08x\n", low_credit);
242
243 /* enable AV algorithm */
244 value = readl(ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
245 value |= MTL_ETS_CTRL_AVALG;
246 value |= MTL_ETS_CTRL_CC;
247 writel(value, ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
248
249 /* configure send slope */
250 value = readl(ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
251 value &= ~MTL_SEND_SLP_CRED_SSC_MASK;
252 value |= send_slope & MTL_SEND_SLP_CRED_SSC_MASK;
253 writel(value, ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
254
255 /* configure idle slope (same register as tx weight) */
256 dwmac4_set_mtl_tx_queue_weight(hw, idle_slope, queue);
257
258 /* configure high credit */
259 value = readl(ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
260 value &= ~MTL_HIGH_CRED_HC_MASK;
261 value |= high_credit & MTL_HIGH_CRED_HC_MASK;
262 writel(value, ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
263
264 /* configure high credit */
265 value = readl(ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
266 value &= ~MTL_HIGH_CRED_LC_MASK;
267 value |= low_credit & MTL_HIGH_CRED_LC_MASK;
268 writel(value, ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
269 }
270
dwmac4_dump_regs(struct mac_device_info * hw,u32 * reg_space)271 static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space)
272 {
273 void __iomem *ioaddr = hw->pcsr;
274 int i;
275
276 for (i = 0; i < GMAC_REG_NUM; i++)
277 reg_space[i] = readl(ioaddr + i * 4);
278 }
279
dwmac4_rx_ipc_enable(struct mac_device_info * hw)280 static int dwmac4_rx_ipc_enable(struct mac_device_info *hw)
281 {
282 void __iomem *ioaddr = hw->pcsr;
283 u32 value = readl(ioaddr + GMAC_CONFIG);
284
285 if (hw->rx_csum)
286 value |= GMAC_CONFIG_IPC;
287 else
288 value &= ~GMAC_CONFIG_IPC;
289
290 writel(value, ioaddr + GMAC_CONFIG);
291
292 value = readl(ioaddr + GMAC_CONFIG);
293
294 return !!(value & GMAC_CONFIG_IPC);
295 }
296
dwmac4_pmt(struct mac_device_info * hw,unsigned long mode)297 static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
298 {
299 void __iomem *ioaddr = hw->pcsr;
300 unsigned int pmt = 0;
301 u32 config;
302
303 if (mode & WAKE_MAGIC) {
304 pr_debug("GMAC: WOL Magic frame\n");
305 pmt |= power_down | magic_pkt_en;
306 }
307 if (mode & WAKE_UCAST) {
308 pr_debug("GMAC: WOL on global unicast\n");
309 pmt |= power_down | global_unicast | wake_up_frame_en;
310 }
311
312 if (pmt) {
313 /* The receiver must be enabled for WOL before powering down */
314 config = readl(ioaddr + GMAC_CONFIG);
315 config |= GMAC_CONFIG_RE;
316 writel(config, ioaddr + GMAC_CONFIG);
317 }
318 writel(pmt, ioaddr + GMAC_PMT);
319 }
320
dwmac4_set_umac_addr(struct mac_device_info * hw,const unsigned char * addr,unsigned int reg_n)321 static void dwmac4_set_umac_addr(struct mac_device_info *hw,
322 const unsigned char *addr, unsigned int reg_n)
323 {
324 void __iomem *ioaddr = hw->pcsr;
325
326 stmmac_dwmac4_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
327 GMAC_ADDR_LOW(reg_n));
328 }
329
dwmac4_get_umac_addr(struct mac_device_info * hw,unsigned char * addr,unsigned int reg_n)330 static void dwmac4_get_umac_addr(struct mac_device_info *hw,
331 unsigned char *addr, unsigned int reg_n)
332 {
333 void __iomem *ioaddr = hw->pcsr;
334
335 stmmac_dwmac4_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
336 GMAC_ADDR_LOW(reg_n));
337 }
338
dwmac4_set_eee_mode(struct mac_device_info * hw,bool en_tx_lpi_clockgating)339 static void dwmac4_set_eee_mode(struct mac_device_info *hw,
340 bool en_tx_lpi_clockgating)
341 {
342 void __iomem *ioaddr = hw->pcsr;
343 u32 value;
344
345 /* Enable the link status receive on RGMII, SGMII ore SMII
346 * receive path and instruct the transmit to enter in LPI
347 * state.
348 */
349 value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
350 value |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA;
351
352 if (en_tx_lpi_clockgating)
353 value |= GMAC4_LPI_CTRL_STATUS_LPITCSE;
354
355 writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
356 }
357
dwmac4_reset_eee_mode(struct mac_device_info * hw)358 static void dwmac4_reset_eee_mode(struct mac_device_info *hw)
359 {
360 void __iomem *ioaddr = hw->pcsr;
361 u32 value;
362
363 value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
364 value &= ~(GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA);
365 writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
366 }
367
dwmac4_set_eee_pls(struct mac_device_info * hw,int link)368 static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link)
369 {
370 void __iomem *ioaddr = hw->pcsr;
371 u32 value;
372
373 value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
374
375 if (link)
376 value |= GMAC4_LPI_CTRL_STATUS_PLS;
377 else
378 value &= ~GMAC4_LPI_CTRL_STATUS_PLS;
379
380 writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
381 }
382
dwmac4_set_eee_lpi_entry_timer(struct mac_device_info * hw,int et)383 static void dwmac4_set_eee_lpi_entry_timer(struct mac_device_info *hw, int et)
384 {
385 void __iomem *ioaddr = hw->pcsr;
386 int value = et & STMMAC_ET_MAX;
387 int regval;
388
389 /* Program LPI entry timer value into register */
390 writel(value, ioaddr + GMAC4_LPI_ENTRY_TIMER);
391
392 /* Enable/disable LPI entry timer */
393 regval = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
394 regval |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA;
395
396 if (et)
397 regval |= GMAC4_LPI_CTRL_STATUS_LPIATE;
398 else
399 regval &= ~GMAC4_LPI_CTRL_STATUS_LPIATE;
400
401 writel(regval, ioaddr + GMAC4_LPI_CTRL_STATUS);
402 }
403
dwmac4_set_eee_timer(struct mac_device_info * hw,int ls,int tw)404 static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
405 {
406 void __iomem *ioaddr = hw->pcsr;
407 int value = ((tw & 0xffff)) | ((ls & 0x3ff) << 16);
408
409 /* Program the timers in the LPI timer control register:
410 * LS: minimum time (ms) for which the link
411 * status from PHY should be ok before transmitting
412 * the LPI pattern.
413 * TW: minimum time (us) for which the core waits
414 * after it has stopped transmitting the LPI pattern.
415 */
416 writel(value, ioaddr + GMAC4_LPI_TIMER_CTRL);
417 }
418
dwmac4_write_single_vlan(struct net_device * dev,u16 vid)419 static void dwmac4_write_single_vlan(struct net_device *dev, u16 vid)
420 {
421 void __iomem *ioaddr = (void __iomem *)dev->base_addr;
422 u32 val;
423
424 val = readl(ioaddr + GMAC_VLAN_TAG);
425 val &= ~GMAC_VLAN_TAG_VID;
426 val |= GMAC_VLAN_TAG_ETV | vid;
427
428 writel(val, ioaddr + GMAC_VLAN_TAG);
429 }
430
dwmac4_write_vlan_filter(struct net_device * dev,struct mac_device_info * hw,u8 index,u32 data)431 static int dwmac4_write_vlan_filter(struct net_device *dev,
432 struct mac_device_info *hw,
433 u8 index, u32 data)
434 {
435 void __iomem *ioaddr = (void __iomem *)dev->base_addr;
436 int i, timeout = 10;
437 u32 val;
438
439 if (index >= hw->num_vlan)
440 return -EINVAL;
441
442 writel(data, ioaddr + GMAC_VLAN_TAG_DATA);
443
444 val = readl(ioaddr + GMAC_VLAN_TAG);
445 val &= ~(GMAC_VLAN_TAG_CTRL_OFS_MASK |
446 GMAC_VLAN_TAG_CTRL_CT |
447 GMAC_VLAN_TAG_CTRL_OB);
448 val |= (index << GMAC_VLAN_TAG_CTRL_OFS_SHIFT) | GMAC_VLAN_TAG_CTRL_OB;
449
450 writel(val, ioaddr + GMAC_VLAN_TAG);
451
452 for (i = 0; i < timeout; i++) {
453 val = readl(ioaddr + GMAC_VLAN_TAG);
454 if (!(val & GMAC_VLAN_TAG_CTRL_OB))
455 return 0;
456 udelay(1);
457 }
458
459 netdev_err(dev, "Timeout accessing MAC_VLAN_Tag_Filter\n");
460
461 return -EBUSY;
462 }
463
dwmac4_add_hw_vlan_rx_fltr(struct net_device * dev,struct mac_device_info * hw,__be16 proto,u16 vid)464 static int dwmac4_add_hw_vlan_rx_fltr(struct net_device *dev,
465 struct mac_device_info *hw,
466 __be16 proto, u16 vid)
467 {
468 int index = -1;
469 u32 val = 0;
470 int i, ret;
471
472 if (vid > 4095)
473 return -EINVAL;
474
475 if (hw->promisc) {
476 netdev_err(dev,
477 "Adding VLAN in promisc mode not supported\n");
478 return -EPERM;
479 }
480
481 /* Single Rx VLAN Filter */
482 if (hw->num_vlan == 1) {
483 /* For single VLAN filter, VID 0 means VLAN promiscuous */
484 if (vid == 0) {
485 netdev_warn(dev, "Adding VLAN ID 0 is not supported\n");
486 return -EPERM;
487 }
488
489 if (hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) {
490 netdev_err(dev, "Only single VLAN ID supported\n");
491 return -EPERM;
492 }
493
494 hw->vlan_filter[0] = vid;
495 dwmac4_write_single_vlan(dev, vid);
496
497 return 0;
498 }
499
500 /* Extended Rx VLAN Filter Enable */
501 val |= GMAC_VLAN_TAG_DATA_ETV | GMAC_VLAN_TAG_DATA_VEN | vid;
502
503 for (i = 0; i < hw->num_vlan; i++) {
504 if (hw->vlan_filter[i] == val)
505 return 0;
506 else if (!(hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN))
507 index = i;
508 }
509
510 if (index == -1) {
511 netdev_err(dev, "MAC_VLAN_Tag_Filter full (size: %0u)\n",
512 hw->num_vlan);
513 return -EPERM;
514 }
515
516 ret = dwmac4_write_vlan_filter(dev, hw, index, val);
517
518 if (!ret)
519 hw->vlan_filter[index] = val;
520
521 return ret;
522 }
523
dwmac4_del_hw_vlan_rx_fltr(struct net_device * dev,struct mac_device_info * hw,__be16 proto,u16 vid)524 static int dwmac4_del_hw_vlan_rx_fltr(struct net_device *dev,
525 struct mac_device_info *hw,
526 __be16 proto, u16 vid)
527 {
528 int i, ret = 0;
529
530 if (hw->promisc) {
531 netdev_err(dev,
532 "Deleting VLAN in promisc mode not supported\n");
533 return -EPERM;
534 }
535
536 /* Single Rx VLAN Filter */
537 if (hw->num_vlan == 1) {
538 if ((hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) == vid) {
539 hw->vlan_filter[0] = 0;
540 dwmac4_write_single_vlan(dev, 0);
541 }
542 return 0;
543 }
544
545 /* Extended Rx VLAN Filter Enable */
546 for (i = 0; i < hw->num_vlan; i++) {
547 if ((hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VID) == vid) {
548 ret = dwmac4_write_vlan_filter(dev, hw, i, 0);
549
550 if (!ret)
551 hw->vlan_filter[i] = 0;
552 else
553 return ret;
554 }
555 }
556
557 return ret;
558 }
559
dwmac4_vlan_promisc_enable(struct net_device * dev,struct mac_device_info * hw)560 static void dwmac4_vlan_promisc_enable(struct net_device *dev,
561 struct mac_device_info *hw)
562 {
563 void __iomem *ioaddr = hw->pcsr;
564 u32 value;
565 u32 hash;
566 u32 val;
567 int i;
568
569 /* Single Rx VLAN Filter */
570 if (hw->num_vlan == 1) {
571 dwmac4_write_single_vlan(dev, 0);
572 return;
573 }
574
575 /* Extended Rx VLAN Filter Enable */
576 for (i = 0; i < hw->num_vlan; i++) {
577 if (hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN) {
578 val = hw->vlan_filter[i] & ~GMAC_VLAN_TAG_DATA_VEN;
579 dwmac4_write_vlan_filter(dev, hw, i, val);
580 }
581 }
582
583 hash = readl(ioaddr + GMAC_VLAN_HASH_TABLE);
584 if (hash & GMAC_VLAN_VLHT) {
585 value = readl(ioaddr + GMAC_VLAN_TAG);
586 if (value & GMAC_VLAN_VTHM) {
587 value &= ~GMAC_VLAN_VTHM;
588 writel(value, ioaddr + GMAC_VLAN_TAG);
589 }
590 }
591 }
592
dwmac4_restore_hw_vlan_rx_fltr(struct net_device * dev,struct mac_device_info * hw)593 static void dwmac4_restore_hw_vlan_rx_fltr(struct net_device *dev,
594 struct mac_device_info *hw)
595 {
596 void __iomem *ioaddr = hw->pcsr;
597 u32 value;
598 u32 hash;
599 u32 val;
600 int i;
601
602 /* Single Rx VLAN Filter */
603 if (hw->num_vlan == 1) {
604 dwmac4_write_single_vlan(dev, hw->vlan_filter[0]);
605 return;
606 }
607
608 /* Extended Rx VLAN Filter Enable */
609 for (i = 0; i < hw->num_vlan; i++) {
610 if (hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN) {
611 val = hw->vlan_filter[i];
612 dwmac4_write_vlan_filter(dev, hw, i, val);
613 }
614 }
615
616 hash = readl(ioaddr + GMAC_VLAN_HASH_TABLE);
617 if (hash & GMAC_VLAN_VLHT) {
618 value = readl(ioaddr + GMAC_VLAN_TAG);
619 value |= GMAC_VLAN_VTHM;
620 writel(value, ioaddr + GMAC_VLAN_TAG);
621 }
622 }
623
dwmac4_set_filter(struct mac_device_info * hw,struct net_device * dev)624 static void dwmac4_set_filter(struct mac_device_info *hw,
625 struct net_device *dev)
626 {
627 void __iomem *ioaddr = (void __iomem *)dev->base_addr;
628 int numhashregs = (hw->multicast_filter_bins >> 5);
629 int mcbitslog2 = hw->mcast_bits_log2;
630 unsigned int value;
631 u32 mc_filter[8];
632 int i;
633
634 memset(mc_filter, 0, sizeof(mc_filter));
635
636 value = readl(ioaddr + GMAC_PACKET_FILTER);
637 value &= ~GMAC_PACKET_FILTER_HMC;
638 value &= ~GMAC_PACKET_FILTER_HPF;
639 value &= ~GMAC_PACKET_FILTER_PCF;
640 value &= ~GMAC_PACKET_FILTER_PM;
641 value &= ~GMAC_PACKET_FILTER_PR;
642 value &= ~GMAC_PACKET_FILTER_RA;
643 if (dev->flags & IFF_PROMISC) {
644 /* VLAN Tag Filter Fail Packets Queuing */
645 if (hw->vlan_fail_q_en) {
646 value = readl(ioaddr + GMAC_RXQ_CTRL4);
647 value &= ~GMAC_RXQCTRL_VFFQ_MASK;
648 value |= GMAC_RXQCTRL_VFFQE |
649 (hw->vlan_fail_q << GMAC_RXQCTRL_VFFQ_SHIFT);
650 writel(value, ioaddr + GMAC_RXQ_CTRL4);
651 value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_RA;
652 } else {
653 value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_PCF;
654 }
655
656 } else if ((dev->flags & IFF_ALLMULTI) ||
657 (netdev_mc_count(dev) > hw->multicast_filter_bins)) {
658 /* Pass all multi */
659 value |= GMAC_PACKET_FILTER_PM;
660 /* Set all the bits of the HASH tab */
661 memset(mc_filter, 0xff, sizeof(mc_filter));
662 } else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) {
663 struct netdev_hw_addr *ha;
664
665 /* Hash filter for multicast */
666 value |= GMAC_PACKET_FILTER_HMC;
667
668 netdev_for_each_mc_addr(ha, dev) {
669 /* The upper n bits of the calculated CRC are used to
670 * index the contents of the hash table. The number of
671 * bits used depends on the hardware configuration
672 * selected at core configuration time.
673 */
674 u32 bit_nr = bitrev32(~crc32_le(~0, ha->addr,
675 ETH_ALEN)) >> (32 - mcbitslog2);
676 /* The most significant bit determines the register to
677 * use (H/L) while the other 5 bits determine the bit
678 * within the register.
679 */
680 mc_filter[bit_nr >> 5] |= (1 << (bit_nr & 0x1f));
681 }
682 }
683
684 for (i = 0; i < numhashregs; i++)
685 writel(mc_filter[i], ioaddr + GMAC_HASH_TAB(i));
686
687 value |= GMAC_PACKET_FILTER_HPF;
688
689 /* Handle multiple unicast addresses */
690 if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
691 /* Switch to promiscuous mode if more than 128 addrs
692 * are required
693 */
694 value |= GMAC_PACKET_FILTER_PR;
695 } else {
696 struct netdev_hw_addr *ha;
697 int reg = 1;
698
699 netdev_for_each_uc_addr(ha, dev) {
700 dwmac4_set_umac_addr(hw, ha->addr, reg);
701 reg++;
702 }
703
704 while (reg < GMAC_MAX_PERFECT_ADDRESSES) {
705 writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
706 writel(0, ioaddr + GMAC_ADDR_LOW(reg));
707 reg++;
708 }
709 }
710
711 /* VLAN filtering */
712 if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
713 value |= GMAC_PACKET_FILTER_VTFE;
714
715 writel(value, ioaddr + GMAC_PACKET_FILTER);
716
717 if (dev->flags & IFF_PROMISC && !hw->vlan_fail_q_en) {
718 if (!hw->promisc) {
719 hw->promisc = 1;
720 dwmac4_vlan_promisc_enable(dev, hw);
721 }
722 } else {
723 if (hw->promisc) {
724 hw->promisc = 0;
725 dwmac4_restore_hw_vlan_rx_fltr(dev, hw);
726 }
727 }
728 }
729
dwmac4_flow_ctrl(struct mac_device_info * hw,unsigned int duplex,unsigned int fc,unsigned int pause_time,u32 tx_cnt)730 static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
731 unsigned int fc, unsigned int pause_time,
732 u32 tx_cnt)
733 {
734 void __iomem *ioaddr = hw->pcsr;
735 unsigned int flow = 0;
736 u32 queue = 0;
737
738 pr_debug("GMAC Flow-Control:\n");
739 if (fc & FLOW_RX) {
740 pr_debug("\tReceive Flow-Control ON\n");
741 flow |= GMAC_RX_FLOW_CTRL_RFE;
742 } else {
743 pr_debug("\tReceive Flow-Control OFF\n");
744 }
745 writel(flow, ioaddr + GMAC_RX_FLOW_CTRL);
746
747 if (fc & FLOW_TX) {
748 pr_debug("\tTransmit Flow-Control ON\n");
749
750 if (duplex)
751 pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
752
753 for (queue = 0; queue < tx_cnt; queue++) {
754 flow = GMAC_TX_FLOW_CTRL_TFE;
755
756 if (duplex)
757 flow |=
758 (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT);
759
760 writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
761 }
762 } else {
763 for (queue = 0; queue < tx_cnt; queue++)
764 writel(0, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
765 }
766 }
767
dwmac4_ctrl_ane(void __iomem * ioaddr,bool ane,bool srgmi_ral,bool loopback)768 static void dwmac4_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral,
769 bool loopback)
770 {
771 dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
772 }
773
dwmac4_rane(void __iomem * ioaddr,bool restart)774 static void dwmac4_rane(void __iomem *ioaddr, bool restart)
775 {
776 dwmac_rane(ioaddr, GMAC_PCS_BASE, restart);
777 }
778
dwmac4_get_adv_lp(void __iomem * ioaddr,struct rgmii_adv * adv)779 static void dwmac4_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
780 {
781 dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
782 }
783
784 /* RGMII or SMII interface */
dwmac4_phystatus(void __iomem * ioaddr,struct stmmac_extra_stats * x)785 static void dwmac4_phystatus(void __iomem *ioaddr, struct stmmac_extra_stats *x)
786 {
787 u32 status;
788
789 status = readl(ioaddr + GMAC_PHYIF_CONTROL_STATUS);
790 x->irq_rgmii_n++;
791
792 /* Check the link status */
793 if (status & GMAC_PHYIF_CTRLSTATUS_LNKSTS) {
794 int speed_value;
795
796 x->pcs_link = 1;
797
798 speed_value = ((status & GMAC_PHYIF_CTRLSTATUS_SPEED) >>
799 GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT);
800 if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_125)
801 x->pcs_speed = SPEED_1000;
802 else if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_25)
803 x->pcs_speed = SPEED_100;
804 else
805 x->pcs_speed = SPEED_10;
806
807 x->pcs_duplex = (status & GMAC_PHYIF_CTRLSTATUS_LNKMOD_MASK);
808
809 pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed,
810 x->pcs_duplex ? "Full" : "Half");
811 } else {
812 x->pcs_link = 0;
813 pr_info("Link is Down\n");
814 }
815 }
816
dwmac4_irq_mtl_status(struct mac_device_info * hw,u32 chan)817 static int dwmac4_irq_mtl_status(struct mac_device_info *hw, u32 chan)
818 {
819 void __iomem *ioaddr = hw->pcsr;
820 u32 mtl_int_qx_status;
821 int ret = 0;
822
823 mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
824
825 /* Check MTL Interrupt */
826 if (mtl_int_qx_status & MTL_INT_QX(chan)) {
827 /* read Queue x Interrupt status */
828 u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(chan));
829
830 if (status & MTL_RX_OVERFLOW_INT) {
831 /* clear Interrupt */
832 writel(status | MTL_RX_OVERFLOW_INT,
833 ioaddr + MTL_CHAN_INT_CTRL(chan));
834 ret = CORE_IRQ_MTL_RX_OVERFLOW;
835 }
836 }
837
838 return ret;
839 }
840
dwmac4_irq_status(struct mac_device_info * hw,struct stmmac_extra_stats * x)841 static int dwmac4_irq_status(struct mac_device_info *hw,
842 struct stmmac_extra_stats *x)
843 {
844 void __iomem *ioaddr = hw->pcsr;
845 u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
846 u32 intr_enable = readl(ioaddr + GMAC_INT_EN);
847 int ret = 0;
848
849 /* Discard disabled bits */
850 intr_status &= intr_enable;
851
852 /* Not used events (e.g. MMC interrupts) are not handled. */
853 if ((intr_status & mmc_tx_irq))
854 x->mmc_tx_irq_n++;
855 if (unlikely(intr_status & mmc_rx_irq))
856 x->mmc_rx_irq_n++;
857 if (unlikely(intr_status & mmc_rx_csum_offload_irq))
858 x->mmc_rx_csum_offload_irq_n++;
859 /* Clear the PMT bits 5 and 6 by reading the PMT status reg */
860 if (unlikely(intr_status & pmt_irq)) {
861 readl(ioaddr + GMAC_PMT);
862 x->irq_receive_pmt_irq_n++;
863 }
864
865 /* MAC tx/rx EEE LPI entry/exit interrupts */
866 if (intr_status & lpi_irq) {
867 /* Clear LPI interrupt by reading MAC_LPI_Control_Status */
868 u32 status = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
869
870 if (status & GMAC4_LPI_CTRL_STATUS_TLPIEN) {
871 ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
872 x->irq_tx_path_in_lpi_mode_n++;
873 }
874 if (status & GMAC4_LPI_CTRL_STATUS_TLPIEX) {
875 ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
876 x->irq_tx_path_exit_lpi_mode_n++;
877 }
878 if (status & GMAC4_LPI_CTRL_STATUS_RLPIEN)
879 x->irq_rx_path_in_lpi_mode_n++;
880 if (status & GMAC4_LPI_CTRL_STATUS_RLPIEX)
881 x->irq_rx_path_exit_lpi_mode_n++;
882 }
883
884 dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
885 if (intr_status & PCS_RGSMIIIS_IRQ)
886 dwmac4_phystatus(ioaddr, x);
887
888 return ret;
889 }
890
dwmac4_debug(void __iomem * ioaddr,struct stmmac_extra_stats * x,u32 rx_queues,u32 tx_queues)891 static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x,
892 u32 rx_queues, u32 tx_queues)
893 {
894 u32 value;
895 u32 queue;
896
897 for (queue = 0; queue < tx_queues; queue++) {
898 value = readl(ioaddr + MTL_CHAN_TX_DEBUG(queue));
899
900 if (value & MTL_DEBUG_TXSTSFSTS)
901 x->mtl_tx_status_fifo_full++;
902 if (value & MTL_DEBUG_TXFSTS)
903 x->mtl_tx_fifo_not_empty++;
904 if (value & MTL_DEBUG_TWCSTS)
905 x->mmtl_fifo_ctrl++;
906 if (value & MTL_DEBUG_TRCSTS_MASK) {
907 u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK)
908 >> MTL_DEBUG_TRCSTS_SHIFT;
909 if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
910 x->mtl_tx_fifo_read_ctrl_write++;
911 else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
912 x->mtl_tx_fifo_read_ctrl_wait++;
913 else if (trcsts == MTL_DEBUG_TRCSTS_READ)
914 x->mtl_tx_fifo_read_ctrl_read++;
915 else
916 x->mtl_tx_fifo_read_ctrl_idle++;
917 }
918 if (value & MTL_DEBUG_TXPAUSED)
919 x->mac_tx_in_pause++;
920 }
921
922 for (queue = 0; queue < rx_queues; queue++) {
923 value = readl(ioaddr + MTL_CHAN_RX_DEBUG(queue));
924
925 if (value & MTL_DEBUG_RXFSTS_MASK) {
926 u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
927 >> MTL_DEBUG_RRCSTS_SHIFT;
928
929 if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
930 x->mtl_rx_fifo_fill_level_full++;
931 else if (rxfsts == MTL_DEBUG_RXFSTS_AT)
932 x->mtl_rx_fifo_fill_above_thresh++;
933 else if (rxfsts == MTL_DEBUG_RXFSTS_BT)
934 x->mtl_rx_fifo_fill_below_thresh++;
935 else
936 x->mtl_rx_fifo_fill_level_empty++;
937 }
938 if (value & MTL_DEBUG_RRCSTS_MASK) {
939 u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >>
940 MTL_DEBUG_RRCSTS_SHIFT;
941
942 if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
943 x->mtl_rx_fifo_read_ctrl_flush++;
944 else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT)
945 x->mtl_rx_fifo_read_ctrl_read_data++;
946 else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA)
947 x->mtl_rx_fifo_read_ctrl_status++;
948 else
949 x->mtl_rx_fifo_read_ctrl_idle++;
950 }
951 if (value & MTL_DEBUG_RWCSTS)
952 x->mtl_rx_fifo_ctrl_active++;
953 }
954
955 /* GMAC debug */
956 value = readl(ioaddr + GMAC_DEBUG);
957
958 if (value & GMAC_DEBUG_TFCSTS_MASK) {
959 u32 tfcsts = (value & GMAC_DEBUG_TFCSTS_MASK)
960 >> GMAC_DEBUG_TFCSTS_SHIFT;
961
962 if (tfcsts == GMAC_DEBUG_TFCSTS_XFER)
963 x->mac_tx_frame_ctrl_xfer++;
964 else if (tfcsts == GMAC_DEBUG_TFCSTS_GEN_PAUSE)
965 x->mac_tx_frame_ctrl_pause++;
966 else if (tfcsts == GMAC_DEBUG_TFCSTS_WAIT)
967 x->mac_tx_frame_ctrl_wait++;
968 else
969 x->mac_tx_frame_ctrl_idle++;
970 }
971 if (value & GMAC_DEBUG_TPESTS)
972 x->mac_gmii_tx_proto_engine++;
973 if (value & GMAC_DEBUG_RFCFCSTS_MASK)
974 x->mac_rx_frame_ctrl_fifo = (value & GMAC_DEBUG_RFCFCSTS_MASK)
975 >> GMAC_DEBUG_RFCFCSTS_SHIFT;
976 if (value & GMAC_DEBUG_RPESTS)
977 x->mac_gmii_rx_proto_engine++;
978 }
979
dwmac4_set_mac_loopback(void __iomem * ioaddr,bool enable)980 static void dwmac4_set_mac_loopback(void __iomem *ioaddr, bool enable)
981 {
982 u32 value = readl(ioaddr + GMAC_CONFIG);
983
984 if (enable)
985 value |= GMAC_CONFIG_LM;
986 else
987 value &= ~GMAC_CONFIG_LM;
988
989 writel(value, ioaddr + GMAC_CONFIG);
990 }
991
dwmac4_update_vlan_hash(struct mac_device_info * hw,u32 hash,__le16 perfect_match,bool is_double)992 static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash,
993 __le16 perfect_match, bool is_double)
994 {
995 void __iomem *ioaddr = hw->pcsr;
996 u32 value;
997
998 writel(hash, ioaddr + GMAC_VLAN_HASH_TABLE);
999
1000 value = readl(ioaddr + GMAC_VLAN_TAG);
1001
1002 if (hash) {
1003 value |= GMAC_VLAN_VTHM | GMAC_VLAN_ETV;
1004 if (is_double) {
1005 value |= GMAC_VLAN_EDVLP;
1006 value |= GMAC_VLAN_ESVL;
1007 value |= GMAC_VLAN_DOVLTC;
1008 }
1009
1010 writel(value, ioaddr + GMAC_VLAN_TAG);
1011 } else if (perfect_match) {
1012 u32 value = GMAC_VLAN_ETV;
1013
1014 if (is_double) {
1015 value |= GMAC_VLAN_EDVLP;
1016 value |= GMAC_VLAN_ESVL;
1017 value |= GMAC_VLAN_DOVLTC;
1018 }
1019
1020 writel(value | perfect_match, ioaddr + GMAC_VLAN_TAG);
1021 } else {
1022 value &= ~(GMAC_VLAN_VTHM | GMAC_VLAN_ETV);
1023 value &= ~(GMAC_VLAN_EDVLP | GMAC_VLAN_ESVL);
1024 value &= ~GMAC_VLAN_DOVLTC;
1025 value &= ~GMAC_VLAN_VID;
1026
1027 writel(value, ioaddr + GMAC_VLAN_TAG);
1028 }
1029 }
1030
dwmac4_sarc_configure(void __iomem * ioaddr,int val)1031 static void dwmac4_sarc_configure(void __iomem *ioaddr, int val)
1032 {
1033 u32 value = readl(ioaddr + GMAC_CONFIG);
1034
1035 value &= ~GMAC_CONFIG_SARC;
1036 value |= val << GMAC_CONFIG_SARC_SHIFT;
1037
1038 writel(value, ioaddr + GMAC_CONFIG);
1039 }
1040
dwmac4_enable_vlan(struct mac_device_info * hw,u32 type)1041 static void dwmac4_enable_vlan(struct mac_device_info *hw, u32 type)
1042 {
1043 void __iomem *ioaddr = hw->pcsr;
1044 u32 value;
1045
1046 value = readl(ioaddr + GMAC_VLAN_INCL);
1047 value |= GMAC_VLAN_VLTI;
1048 value |= GMAC_VLAN_CSVL; /* Only use SVLAN */
1049 value &= ~GMAC_VLAN_VLC;
1050 value |= (type << GMAC_VLAN_VLC_SHIFT) & GMAC_VLAN_VLC;
1051 writel(value, ioaddr + GMAC_VLAN_INCL);
1052 }
1053
dwmac4_set_arp_offload(struct mac_device_info * hw,bool en,u32 addr)1054 static void dwmac4_set_arp_offload(struct mac_device_info *hw, bool en,
1055 u32 addr)
1056 {
1057 void __iomem *ioaddr = hw->pcsr;
1058 u32 value;
1059
1060 writel(addr, ioaddr + GMAC_ARP_ADDR);
1061
1062 value = readl(ioaddr + GMAC_CONFIG);
1063 if (en)
1064 value |= GMAC_CONFIG_ARPEN;
1065 else
1066 value &= ~GMAC_CONFIG_ARPEN;
1067 writel(value, ioaddr + GMAC_CONFIG);
1068 }
1069
dwmac4_config_l3_filter(struct mac_device_info * hw,u32 filter_no,bool en,bool ipv6,bool sa,bool inv,u32 match)1070 static int dwmac4_config_l3_filter(struct mac_device_info *hw, u32 filter_no,
1071 bool en, bool ipv6, bool sa, bool inv,
1072 u32 match)
1073 {
1074 void __iomem *ioaddr = hw->pcsr;
1075 u32 value;
1076
1077 value = readl(ioaddr + GMAC_PACKET_FILTER);
1078 value |= GMAC_PACKET_FILTER_IPFE;
1079 writel(value, ioaddr + GMAC_PACKET_FILTER);
1080
1081 value = readl(ioaddr + GMAC_L3L4_CTRL(filter_no));
1082
1083 /* For IPv6 not both SA/DA filters can be active */
1084 if (ipv6) {
1085 value |= GMAC_L3PEN0;
1086 value &= ~(GMAC_L3SAM0 | GMAC_L3SAIM0);
1087 value &= ~(GMAC_L3DAM0 | GMAC_L3DAIM0);
1088 if (sa) {
1089 value |= GMAC_L3SAM0;
1090 if (inv)
1091 value |= GMAC_L3SAIM0;
1092 } else {
1093 value |= GMAC_L3DAM0;
1094 if (inv)
1095 value |= GMAC_L3DAIM0;
1096 }
1097 } else {
1098 value &= ~GMAC_L3PEN0;
1099 if (sa) {
1100 value |= GMAC_L3SAM0;
1101 if (inv)
1102 value |= GMAC_L3SAIM0;
1103 } else {
1104 value |= GMAC_L3DAM0;
1105 if (inv)
1106 value |= GMAC_L3DAIM0;
1107 }
1108 }
1109
1110 writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no));
1111
1112 if (sa) {
1113 writel(match, ioaddr + GMAC_L3_ADDR0(filter_no));
1114 } else {
1115 writel(match, ioaddr + GMAC_L3_ADDR1(filter_no));
1116 }
1117
1118 if (!en)
1119 writel(0, ioaddr + GMAC_L3L4_CTRL(filter_no));
1120
1121 return 0;
1122 }
1123
dwmac4_config_l4_filter(struct mac_device_info * hw,u32 filter_no,bool en,bool udp,bool sa,bool inv,u32 match)1124 static int dwmac4_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
1125 bool en, bool udp, bool sa, bool inv,
1126 u32 match)
1127 {
1128 void __iomem *ioaddr = hw->pcsr;
1129 u32 value;
1130
1131 value = readl(ioaddr + GMAC_PACKET_FILTER);
1132 value |= GMAC_PACKET_FILTER_IPFE;
1133 writel(value, ioaddr + GMAC_PACKET_FILTER);
1134
1135 value = readl(ioaddr + GMAC_L3L4_CTRL(filter_no));
1136 if (udp) {
1137 value |= GMAC_L4PEN0;
1138 } else {
1139 value &= ~GMAC_L4PEN0;
1140 }
1141
1142 value &= ~(GMAC_L4SPM0 | GMAC_L4SPIM0);
1143 value &= ~(GMAC_L4DPM0 | GMAC_L4DPIM0);
1144 if (sa) {
1145 value |= GMAC_L4SPM0;
1146 if (inv)
1147 value |= GMAC_L4SPIM0;
1148 } else {
1149 value |= GMAC_L4DPM0;
1150 if (inv)
1151 value |= GMAC_L4DPIM0;
1152 }
1153
1154 writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no));
1155
1156 if (sa) {
1157 value = match & GMAC_L4SP0;
1158 } else {
1159 value = (match << GMAC_L4DP0_SHIFT) & GMAC_L4DP0;
1160 }
1161
1162 writel(value, ioaddr + GMAC_L4_ADDR(filter_no));
1163
1164 if (!en)
1165 writel(0, ioaddr + GMAC_L3L4_CTRL(filter_no));
1166
1167 return 0;
1168 }
1169
1170 const struct stmmac_ops dwmac4_ops = {
1171 .core_init = dwmac4_core_init,
1172 .set_mac = stmmac_set_mac,
1173 .rx_ipc = dwmac4_rx_ipc_enable,
1174 .rx_queue_enable = dwmac4_rx_queue_enable,
1175 .rx_queue_prio = dwmac4_rx_queue_priority,
1176 .tx_queue_prio = dwmac4_tx_queue_priority,
1177 .rx_queue_routing = dwmac4_rx_queue_routing,
1178 .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
1179 .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
1180 .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
1181 .map_mtl_to_dma = dwmac4_map_mtl_dma,
1182 .config_cbs = dwmac4_config_cbs,
1183 .dump_regs = dwmac4_dump_regs,
1184 .host_irq_status = dwmac4_irq_status,
1185 .host_mtl_irq_status = dwmac4_irq_mtl_status,
1186 .flow_ctrl = dwmac4_flow_ctrl,
1187 .pmt = dwmac4_pmt,
1188 .set_umac_addr = dwmac4_set_umac_addr,
1189 .get_umac_addr = dwmac4_get_umac_addr,
1190 .set_eee_mode = dwmac4_set_eee_mode,
1191 .reset_eee_mode = dwmac4_reset_eee_mode,
1192 .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
1193 .set_eee_timer = dwmac4_set_eee_timer,
1194 .set_eee_pls = dwmac4_set_eee_pls,
1195 .pcs_ctrl_ane = dwmac4_ctrl_ane,
1196 .pcs_rane = dwmac4_rane,
1197 .pcs_get_adv_lp = dwmac4_get_adv_lp,
1198 .debug = dwmac4_debug,
1199 .set_filter = dwmac4_set_filter,
1200 .set_mac_loopback = dwmac4_set_mac_loopback,
1201 .update_vlan_hash = dwmac4_update_vlan_hash,
1202 .sarc_configure = dwmac4_sarc_configure,
1203 .enable_vlan = dwmac4_enable_vlan,
1204 .set_arp_offload = dwmac4_set_arp_offload,
1205 .config_l3_filter = dwmac4_config_l3_filter,
1206 .config_l4_filter = dwmac4_config_l4_filter,
1207 .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
1208 .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
1209 .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
1210 };
1211
1212 const struct stmmac_ops dwmac410_ops = {
1213 .core_init = dwmac4_core_init,
1214 .set_mac = stmmac_dwmac4_set_mac,
1215 .rx_ipc = dwmac4_rx_ipc_enable,
1216 .rx_queue_enable = dwmac4_rx_queue_enable,
1217 .rx_queue_prio = dwmac4_rx_queue_priority,
1218 .tx_queue_prio = dwmac4_tx_queue_priority,
1219 .rx_queue_routing = dwmac4_rx_queue_routing,
1220 .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
1221 .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
1222 .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
1223 .map_mtl_to_dma = dwmac4_map_mtl_dma,
1224 .config_cbs = dwmac4_config_cbs,
1225 .dump_regs = dwmac4_dump_regs,
1226 .host_irq_status = dwmac4_irq_status,
1227 .host_mtl_irq_status = dwmac4_irq_mtl_status,
1228 .flow_ctrl = dwmac4_flow_ctrl,
1229 .pmt = dwmac4_pmt,
1230 .set_umac_addr = dwmac4_set_umac_addr,
1231 .get_umac_addr = dwmac4_get_umac_addr,
1232 .set_eee_mode = dwmac4_set_eee_mode,
1233 .reset_eee_mode = dwmac4_reset_eee_mode,
1234 .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
1235 .set_eee_timer = dwmac4_set_eee_timer,
1236 .set_eee_pls = dwmac4_set_eee_pls,
1237 .pcs_ctrl_ane = dwmac4_ctrl_ane,
1238 .pcs_rane = dwmac4_rane,
1239 .pcs_get_adv_lp = dwmac4_get_adv_lp,
1240 .debug = dwmac4_debug,
1241 .set_filter = dwmac4_set_filter,
1242 .flex_pps_config = dwmac5_flex_pps_config,
1243 .set_mac_loopback = dwmac4_set_mac_loopback,
1244 .update_vlan_hash = dwmac4_update_vlan_hash,
1245 .sarc_configure = dwmac4_sarc_configure,
1246 .enable_vlan = dwmac4_enable_vlan,
1247 .set_arp_offload = dwmac4_set_arp_offload,
1248 .config_l3_filter = dwmac4_config_l3_filter,
1249 .config_l4_filter = dwmac4_config_l4_filter,
1250 .est_configure = dwmac5_est_configure,
1251 .est_irq_status = dwmac5_est_irq_status,
1252 .fpe_configure = dwmac5_fpe_configure,
1253 .fpe_send_mpacket = dwmac5_fpe_send_mpacket,
1254 .fpe_irq_status = dwmac5_fpe_irq_status,
1255 .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
1256 .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
1257 .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
1258 };
1259
1260 const struct stmmac_ops dwmac510_ops = {
1261 .core_init = dwmac4_core_init,
1262 .set_mac = stmmac_dwmac4_set_mac,
1263 .rx_ipc = dwmac4_rx_ipc_enable,
1264 .rx_queue_enable = dwmac4_rx_queue_enable,
1265 .rx_queue_prio = dwmac4_rx_queue_priority,
1266 .tx_queue_prio = dwmac4_tx_queue_priority,
1267 .rx_queue_routing = dwmac4_rx_queue_routing,
1268 .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
1269 .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
1270 .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
1271 .map_mtl_to_dma = dwmac4_map_mtl_dma,
1272 .config_cbs = dwmac4_config_cbs,
1273 .dump_regs = dwmac4_dump_regs,
1274 .host_irq_status = dwmac4_irq_status,
1275 .host_mtl_irq_status = dwmac4_irq_mtl_status,
1276 .flow_ctrl = dwmac4_flow_ctrl,
1277 .pmt = dwmac4_pmt,
1278 .set_umac_addr = dwmac4_set_umac_addr,
1279 .get_umac_addr = dwmac4_get_umac_addr,
1280 .set_eee_mode = dwmac4_set_eee_mode,
1281 .reset_eee_mode = dwmac4_reset_eee_mode,
1282 .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
1283 .set_eee_timer = dwmac4_set_eee_timer,
1284 .set_eee_pls = dwmac4_set_eee_pls,
1285 .pcs_ctrl_ane = dwmac4_ctrl_ane,
1286 .pcs_rane = dwmac4_rane,
1287 .pcs_get_adv_lp = dwmac4_get_adv_lp,
1288 .debug = dwmac4_debug,
1289 .set_filter = dwmac4_set_filter,
1290 .safety_feat_config = dwmac5_safety_feat_config,
1291 .safety_feat_irq_status = dwmac5_safety_feat_irq_status,
1292 .safety_feat_dump = dwmac5_safety_feat_dump,
1293 .rxp_config = dwmac5_rxp_config,
1294 .flex_pps_config = dwmac5_flex_pps_config,
1295 .set_mac_loopback = dwmac4_set_mac_loopback,
1296 .update_vlan_hash = dwmac4_update_vlan_hash,
1297 .sarc_configure = dwmac4_sarc_configure,
1298 .enable_vlan = dwmac4_enable_vlan,
1299 .set_arp_offload = dwmac4_set_arp_offload,
1300 .config_l3_filter = dwmac4_config_l3_filter,
1301 .config_l4_filter = dwmac4_config_l4_filter,
1302 .est_configure = dwmac5_est_configure,
1303 .est_irq_status = dwmac5_est_irq_status,
1304 .fpe_configure = dwmac5_fpe_configure,
1305 .fpe_send_mpacket = dwmac5_fpe_send_mpacket,
1306 .fpe_irq_status = dwmac5_fpe_irq_status,
1307 .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
1308 .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
1309 .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
1310 };
1311
dwmac4_get_num_vlan(void __iomem * ioaddr)1312 static u32 dwmac4_get_num_vlan(void __iomem *ioaddr)
1313 {
1314 u32 val, num_vlan;
1315
1316 val = readl(ioaddr + GMAC_HW_FEATURE3);
1317 switch (val & GMAC_HW_FEAT_NRVF) {
1318 case 0:
1319 num_vlan = 1;
1320 break;
1321 case 1:
1322 num_vlan = 4;
1323 break;
1324 case 2:
1325 num_vlan = 8;
1326 break;
1327 case 3:
1328 num_vlan = 16;
1329 break;
1330 case 4:
1331 num_vlan = 24;
1332 break;
1333 case 5:
1334 num_vlan = 32;
1335 break;
1336 default:
1337 num_vlan = 1;
1338 }
1339
1340 return num_vlan;
1341 }
1342
dwmac4_setup(struct stmmac_priv * priv)1343 int dwmac4_setup(struct stmmac_priv *priv)
1344 {
1345 struct mac_device_info *mac = priv->hw;
1346
1347 dev_info(priv->device, "\tDWMAC4/5\n");
1348
1349 priv->dev->priv_flags |= IFF_UNICAST_FLT;
1350 mac->pcsr = priv->ioaddr;
1351 mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
1352 mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
1353 mac->mcast_bits_log2 = 0;
1354
1355 if (mac->multicast_filter_bins)
1356 mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
1357
1358 mac->link.duplex = GMAC_CONFIG_DM;
1359 mac->link.speed10 = GMAC_CONFIG_PS;
1360 mac->link.speed100 = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
1361 mac->link.speed1000 = 0;
1362 mac->link.speed2500 = GMAC_CONFIG_FES;
1363 mac->link.speed_mask = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
1364 mac->mii.addr = GMAC_MDIO_ADDR;
1365 mac->mii.data = GMAC_MDIO_DATA;
1366 mac->mii.addr_shift = 21;
1367 mac->mii.addr_mask = GENMASK(25, 21);
1368 mac->mii.reg_shift = 16;
1369 mac->mii.reg_mask = GENMASK(20, 16);
1370 mac->mii.clk_csr_shift = 8;
1371 mac->mii.clk_csr_mask = GENMASK(11, 8);
1372 mac->num_vlan = dwmac4_get_num_vlan(priv->ioaddr);
1373
1374 return 0;
1375 }
1376