1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /*
3 * Microsemi Ocelot Switch driver
4 *
5 * Copyright (c) 2017 Microsemi Corporation
6 */
7 #include <linux/dsa/ocelot.h>
8 #include <linux/if_bridge.h>
9 #include <linux/iopoll.h>
10 #include <soc/mscc/ocelot_vcap.h>
11 #include "ocelot.h"
12 #include "ocelot_vcap.h"
13
14 #define TABLE_UPDATE_SLEEP_US 10
15 #define TABLE_UPDATE_TIMEOUT_US 100000
16 #define MEM_INIT_SLEEP_US 1000
17 #define MEM_INIT_TIMEOUT_US 100000
18
19 #define OCELOT_RSV_VLAN_RANGE_START 4000
20
21 struct ocelot_mact_entry {
22 u8 mac[ETH_ALEN];
23 u16 vid;
24 enum macaccess_entry_type type;
25 };
26
27 /* Caller must hold &ocelot->mact_lock */
ocelot_mact_read_macaccess(struct ocelot * ocelot)28 static inline u32 ocelot_mact_read_macaccess(struct ocelot *ocelot)
29 {
30 return ocelot_read(ocelot, ANA_TABLES_MACACCESS);
31 }
32
33 /* Caller must hold &ocelot->mact_lock */
ocelot_mact_wait_for_completion(struct ocelot * ocelot)34 static inline int ocelot_mact_wait_for_completion(struct ocelot *ocelot)
35 {
36 u32 val;
37
38 return readx_poll_timeout(ocelot_mact_read_macaccess,
39 ocelot, val,
40 (val & ANA_TABLES_MACACCESS_MAC_TABLE_CMD_M) ==
41 MACACCESS_CMD_IDLE,
42 TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US);
43 }
44
45 /* Caller must hold &ocelot->mact_lock */
ocelot_mact_select(struct ocelot * ocelot,const unsigned char mac[ETH_ALEN],unsigned int vid)46 static void ocelot_mact_select(struct ocelot *ocelot,
47 const unsigned char mac[ETH_ALEN],
48 unsigned int vid)
49 {
50 u32 macl = 0, mach = 0;
51
52 /* Set the MAC address to handle and the vlan associated in a format
53 * understood by the hardware.
54 */
55 mach |= vid << 16;
56 mach |= mac[0] << 8;
57 mach |= mac[1] << 0;
58 macl |= mac[2] << 24;
59 macl |= mac[3] << 16;
60 macl |= mac[4] << 8;
61 macl |= mac[5] << 0;
62
63 ocelot_write(ocelot, macl, ANA_TABLES_MACLDATA);
64 ocelot_write(ocelot, mach, ANA_TABLES_MACHDATA);
65
66 }
67
__ocelot_mact_learn(struct ocelot * ocelot,int port,const unsigned char mac[ETH_ALEN],unsigned int vid,enum macaccess_entry_type type)68 static int __ocelot_mact_learn(struct ocelot *ocelot, int port,
69 const unsigned char mac[ETH_ALEN],
70 unsigned int vid, enum macaccess_entry_type type)
71 {
72 u32 cmd = ANA_TABLES_MACACCESS_VALID |
73 ANA_TABLES_MACACCESS_DEST_IDX(port) |
74 ANA_TABLES_MACACCESS_ENTRYTYPE(type) |
75 ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_LEARN);
76 unsigned int mc_ports;
77 int err;
78
79 /* Set MAC_CPU_COPY if the CPU port is used by a multicast entry */
80 if (type == ENTRYTYPE_MACv4)
81 mc_ports = (mac[1] << 8) | mac[2];
82 else if (type == ENTRYTYPE_MACv6)
83 mc_ports = (mac[0] << 8) | mac[1];
84 else
85 mc_ports = 0;
86
87 if (mc_ports & BIT(ocelot->num_phys_ports))
88 cmd |= ANA_TABLES_MACACCESS_MAC_CPU_COPY;
89
90 ocelot_mact_select(ocelot, mac, vid);
91
92 /* Issue a write command */
93 ocelot_write(ocelot, cmd, ANA_TABLES_MACACCESS);
94
95 err = ocelot_mact_wait_for_completion(ocelot);
96
97 return err;
98 }
99
ocelot_mact_learn(struct ocelot * ocelot,int port,const unsigned char mac[ETH_ALEN],unsigned int vid,enum macaccess_entry_type type)100 int ocelot_mact_learn(struct ocelot *ocelot, int port,
101 const unsigned char mac[ETH_ALEN],
102 unsigned int vid, enum macaccess_entry_type type)
103 {
104 int ret;
105
106 mutex_lock(&ocelot->mact_lock);
107 ret = __ocelot_mact_learn(ocelot, port, mac, vid, type);
108 mutex_unlock(&ocelot->mact_lock);
109
110 return ret;
111 }
112 EXPORT_SYMBOL(ocelot_mact_learn);
113
ocelot_mact_forget(struct ocelot * ocelot,const unsigned char mac[ETH_ALEN],unsigned int vid)114 int ocelot_mact_forget(struct ocelot *ocelot,
115 const unsigned char mac[ETH_ALEN], unsigned int vid)
116 {
117 int err;
118
119 mutex_lock(&ocelot->mact_lock);
120
121 ocelot_mact_select(ocelot, mac, vid);
122
123 /* Issue a forget command */
124 ocelot_write(ocelot,
125 ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_FORGET),
126 ANA_TABLES_MACACCESS);
127
128 err = ocelot_mact_wait_for_completion(ocelot);
129
130 mutex_unlock(&ocelot->mact_lock);
131
132 return err;
133 }
134 EXPORT_SYMBOL(ocelot_mact_forget);
135
ocelot_mact_lookup(struct ocelot * ocelot,int * dst_idx,const unsigned char mac[ETH_ALEN],unsigned int vid,enum macaccess_entry_type * type)136 int ocelot_mact_lookup(struct ocelot *ocelot, int *dst_idx,
137 const unsigned char mac[ETH_ALEN],
138 unsigned int vid, enum macaccess_entry_type *type)
139 {
140 int val;
141
142 mutex_lock(&ocelot->mact_lock);
143
144 ocelot_mact_select(ocelot, mac, vid);
145
146 /* Issue a read command with MACACCESS_VALID=1. */
147 ocelot_write(ocelot, ANA_TABLES_MACACCESS_VALID |
148 ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_READ),
149 ANA_TABLES_MACACCESS);
150
151 if (ocelot_mact_wait_for_completion(ocelot)) {
152 mutex_unlock(&ocelot->mact_lock);
153 return -ETIMEDOUT;
154 }
155
156 /* Read back the entry flags */
157 val = ocelot_read(ocelot, ANA_TABLES_MACACCESS);
158
159 mutex_unlock(&ocelot->mact_lock);
160
161 if (!(val & ANA_TABLES_MACACCESS_VALID))
162 return -ENOENT;
163
164 *dst_idx = ANA_TABLES_MACACCESS_DEST_IDX_X(val);
165 *type = ANA_TABLES_MACACCESS_ENTRYTYPE_X(val);
166
167 return 0;
168 }
169 EXPORT_SYMBOL(ocelot_mact_lookup);
170
ocelot_mact_learn_streamdata(struct ocelot * ocelot,int dst_idx,const unsigned char mac[ETH_ALEN],unsigned int vid,enum macaccess_entry_type type,int sfid,int ssid)171 int ocelot_mact_learn_streamdata(struct ocelot *ocelot, int dst_idx,
172 const unsigned char mac[ETH_ALEN],
173 unsigned int vid,
174 enum macaccess_entry_type type,
175 int sfid, int ssid)
176 {
177 int ret;
178
179 mutex_lock(&ocelot->mact_lock);
180
181 ocelot_write(ocelot,
182 (sfid < 0 ? 0 : ANA_TABLES_STREAMDATA_SFID_VALID) |
183 ANA_TABLES_STREAMDATA_SFID(sfid) |
184 (ssid < 0 ? 0 : ANA_TABLES_STREAMDATA_SSID_VALID) |
185 ANA_TABLES_STREAMDATA_SSID(ssid),
186 ANA_TABLES_STREAMDATA);
187
188 ret = __ocelot_mact_learn(ocelot, dst_idx, mac, vid, type);
189
190 mutex_unlock(&ocelot->mact_lock);
191
192 return ret;
193 }
194 EXPORT_SYMBOL(ocelot_mact_learn_streamdata);
195
ocelot_mact_init(struct ocelot * ocelot)196 static void ocelot_mact_init(struct ocelot *ocelot)
197 {
198 /* Configure the learning mode entries attributes:
199 * - Do not copy the frame to the CPU extraction queues.
200 * - Use the vlan and mac_cpoy for dmac lookup.
201 */
202 ocelot_rmw(ocelot, 0,
203 ANA_AGENCTRL_LEARN_CPU_COPY | ANA_AGENCTRL_IGNORE_DMAC_FLAGS
204 | ANA_AGENCTRL_LEARN_FWD_KILL
205 | ANA_AGENCTRL_LEARN_IGNORE_VLAN,
206 ANA_AGENCTRL);
207
208 /* Clear the MAC table. We are not concurrent with anyone, so
209 * holding &ocelot->mact_lock is pointless.
210 */
211 ocelot_write(ocelot, MACACCESS_CMD_INIT, ANA_TABLES_MACACCESS);
212 }
213
ocelot_vcap_enable(struct ocelot * ocelot,int port)214 static void ocelot_vcap_enable(struct ocelot *ocelot, int port)
215 {
216 ocelot_write_gix(ocelot, ANA_PORT_VCAP_S2_CFG_S2_ENA |
217 ANA_PORT_VCAP_S2_CFG_S2_IP6_CFG(0xa),
218 ANA_PORT_VCAP_S2_CFG, port);
219
220 ocelot_write_gix(ocelot, ANA_PORT_VCAP_CFG_S1_ENA,
221 ANA_PORT_VCAP_CFG, port);
222
223 ocelot_rmw_gix(ocelot, REW_PORT_CFG_ES0_EN,
224 REW_PORT_CFG_ES0_EN,
225 REW_PORT_CFG, port);
226 }
227
ocelot_single_vlan_aware_bridge(struct ocelot * ocelot,struct netlink_ext_ack * extack)228 static int ocelot_single_vlan_aware_bridge(struct ocelot *ocelot,
229 struct netlink_ext_ack *extack)
230 {
231 struct net_device *bridge = NULL;
232 int port;
233
234 for (port = 0; port < ocelot->num_phys_ports; port++) {
235 struct ocelot_port *ocelot_port = ocelot->ports[port];
236
237 if (!ocelot_port || !ocelot_port->bridge ||
238 !br_vlan_enabled(ocelot_port->bridge))
239 continue;
240
241 if (!bridge) {
242 bridge = ocelot_port->bridge;
243 continue;
244 }
245
246 if (bridge == ocelot_port->bridge)
247 continue;
248
249 NL_SET_ERR_MSG_MOD(extack,
250 "Only one VLAN-aware bridge is supported");
251 return -EBUSY;
252 }
253
254 return 0;
255 }
256
ocelot_vlant_read_vlanaccess(struct ocelot * ocelot)257 static inline u32 ocelot_vlant_read_vlanaccess(struct ocelot *ocelot)
258 {
259 return ocelot_read(ocelot, ANA_TABLES_VLANACCESS);
260 }
261
ocelot_vlant_wait_for_completion(struct ocelot * ocelot)262 static inline int ocelot_vlant_wait_for_completion(struct ocelot *ocelot)
263 {
264 u32 val;
265
266 return readx_poll_timeout(ocelot_vlant_read_vlanaccess,
267 ocelot,
268 val,
269 (val & ANA_TABLES_VLANACCESS_VLAN_TBL_CMD_M) ==
270 ANA_TABLES_VLANACCESS_CMD_IDLE,
271 TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US);
272 }
273
ocelot_vlant_set_mask(struct ocelot * ocelot,u16 vid,u32 mask)274 static int ocelot_vlant_set_mask(struct ocelot *ocelot, u16 vid, u32 mask)
275 {
276 /* Select the VID to configure */
277 ocelot_write(ocelot, ANA_TABLES_VLANTIDX_V_INDEX(vid),
278 ANA_TABLES_VLANTIDX);
279 /* Set the vlan port members mask and issue a write command */
280 ocelot_write(ocelot, ANA_TABLES_VLANACCESS_VLAN_PORT_MASK(mask) |
281 ANA_TABLES_VLANACCESS_CMD_WRITE,
282 ANA_TABLES_VLANACCESS);
283
284 return ocelot_vlant_wait_for_completion(ocelot);
285 }
286
ocelot_port_num_untagged_vlans(struct ocelot * ocelot,int port)287 static int ocelot_port_num_untagged_vlans(struct ocelot *ocelot, int port)
288 {
289 struct ocelot_bridge_vlan *vlan;
290 int num_untagged = 0;
291
292 list_for_each_entry(vlan, &ocelot->vlans, list) {
293 if (!(vlan->portmask & BIT(port)))
294 continue;
295
296 /* Ignore the VLAN added by ocelot_add_vlan_unaware_pvid(),
297 * because this is never active in hardware at the same time as
298 * the bridge VLANs, which only matter in VLAN-aware mode.
299 */
300 if (vlan->vid >= OCELOT_RSV_VLAN_RANGE_START)
301 continue;
302
303 if (vlan->untagged & BIT(port))
304 num_untagged++;
305 }
306
307 return num_untagged;
308 }
309
ocelot_port_num_tagged_vlans(struct ocelot * ocelot,int port)310 static int ocelot_port_num_tagged_vlans(struct ocelot *ocelot, int port)
311 {
312 struct ocelot_bridge_vlan *vlan;
313 int num_tagged = 0;
314
315 list_for_each_entry(vlan, &ocelot->vlans, list) {
316 if (!(vlan->portmask & BIT(port)))
317 continue;
318
319 if (!(vlan->untagged & BIT(port)))
320 num_tagged++;
321 }
322
323 return num_tagged;
324 }
325
326 /* We use native VLAN when we have to mix egress-tagged VLANs with exactly
327 * _one_ egress-untagged VLAN (_the_ native VLAN)
328 */
ocelot_port_uses_native_vlan(struct ocelot * ocelot,int port)329 static bool ocelot_port_uses_native_vlan(struct ocelot *ocelot, int port)
330 {
331 return ocelot_port_num_tagged_vlans(ocelot, port) &&
332 ocelot_port_num_untagged_vlans(ocelot, port) == 1;
333 }
334
335 static struct ocelot_bridge_vlan *
ocelot_port_find_native_vlan(struct ocelot * ocelot,int port)336 ocelot_port_find_native_vlan(struct ocelot *ocelot, int port)
337 {
338 struct ocelot_bridge_vlan *vlan;
339
340 list_for_each_entry(vlan, &ocelot->vlans, list)
341 if (vlan->portmask & BIT(port) && vlan->untagged & BIT(port))
342 return vlan;
343
344 return NULL;
345 }
346
347 /* Keep in sync REW_TAG_CFG_TAG_CFG and, if applicable,
348 * REW_PORT_VLAN_CFG_PORT_VID, with the bridge VLAN table and VLAN awareness
349 * state of the port.
350 */
ocelot_port_manage_port_tag(struct ocelot * ocelot,int port)351 static void ocelot_port_manage_port_tag(struct ocelot *ocelot, int port)
352 {
353 struct ocelot_port *ocelot_port = ocelot->ports[port];
354 enum ocelot_port_tag_config tag_cfg;
355 bool uses_native_vlan = false;
356
357 if (ocelot_port->vlan_aware) {
358 uses_native_vlan = ocelot_port_uses_native_vlan(ocelot, port);
359
360 if (uses_native_vlan)
361 tag_cfg = OCELOT_PORT_TAG_NATIVE;
362 else if (ocelot_port_num_untagged_vlans(ocelot, port))
363 tag_cfg = OCELOT_PORT_TAG_DISABLED;
364 else
365 tag_cfg = OCELOT_PORT_TAG_TRUNK;
366 } else {
367 tag_cfg = OCELOT_PORT_TAG_DISABLED;
368 }
369
370 ocelot_rmw_gix(ocelot, REW_TAG_CFG_TAG_CFG(tag_cfg),
371 REW_TAG_CFG_TAG_CFG_M,
372 REW_TAG_CFG, port);
373
374 if (uses_native_vlan) {
375 struct ocelot_bridge_vlan *native_vlan;
376
377 /* Not having a native VLAN is impossible, because
378 * ocelot_port_num_untagged_vlans has returned 1.
379 * So there is no use in checking for NULL here.
380 */
381 native_vlan = ocelot_port_find_native_vlan(ocelot, port);
382
383 ocelot_rmw_gix(ocelot,
384 REW_PORT_VLAN_CFG_PORT_VID(native_vlan->vid),
385 REW_PORT_VLAN_CFG_PORT_VID_M,
386 REW_PORT_VLAN_CFG, port);
387 }
388 }
389
ocelot_bridge_num_find(struct ocelot * ocelot,const struct net_device * bridge)390 int ocelot_bridge_num_find(struct ocelot *ocelot,
391 const struct net_device *bridge)
392 {
393 int port;
394
395 for (port = 0; port < ocelot->num_phys_ports; port++) {
396 struct ocelot_port *ocelot_port = ocelot->ports[port];
397
398 if (ocelot_port && ocelot_port->bridge == bridge)
399 return ocelot_port->bridge_num;
400 }
401
402 return -1;
403 }
404 EXPORT_SYMBOL_GPL(ocelot_bridge_num_find);
405
ocelot_vlan_unaware_pvid(struct ocelot * ocelot,const struct net_device * bridge)406 static u16 ocelot_vlan_unaware_pvid(struct ocelot *ocelot,
407 const struct net_device *bridge)
408 {
409 int bridge_num;
410
411 /* Standalone ports use VID 0 */
412 if (!bridge)
413 return 0;
414
415 bridge_num = ocelot_bridge_num_find(ocelot, bridge);
416 if (WARN_ON(bridge_num < 0))
417 return 0;
418
419 /* VLAN-unaware bridges use a reserved VID going from 4095 downwards */
420 return VLAN_N_VID - bridge_num - 1;
421 }
422
423 /* Default vlan to clasify for untagged frames (may be zero) */
ocelot_port_set_pvid(struct ocelot * ocelot,int port,const struct ocelot_bridge_vlan * pvid_vlan)424 static void ocelot_port_set_pvid(struct ocelot *ocelot, int port,
425 const struct ocelot_bridge_vlan *pvid_vlan)
426 {
427 struct ocelot_port *ocelot_port = ocelot->ports[port];
428 u16 pvid = ocelot_vlan_unaware_pvid(ocelot, ocelot_port->bridge);
429 u32 val = 0;
430
431 ocelot_port->pvid_vlan = pvid_vlan;
432
433 if (ocelot_port->vlan_aware && pvid_vlan)
434 pvid = pvid_vlan->vid;
435
436 ocelot_rmw_gix(ocelot,
437 ANA_PORT_VLAN_CFG_VLAN_VID(pvid),
438 ANA_PORT_VLAN_CFG_VLAN_VID_M,
439 ANA_PORT_VLAN_CFG, port);
440
441 /* If there's no pvid, we should drop not only untagged traffic (which
442 * happens automatically), but also 802.1p traffic which gets
443 * classified to VLAN 0, but that is always in our RX filter, so it
444 * would get accepted were it not for this setting.
445 */
446 if (!pvid_vlan && ocelot_port->vlan_aware)
447 val = ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
448 ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA;
449
450 ocelot_rmw_gix(ocelot, val,
451 ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
452 ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA,
453 ANA_PORT_DROP_CFG, port);
454 }
455
ocelot_bridge_vlan_find(struct ocelot * ocelot,u16 vid)456 static struct ocelot_bridge_vlan *ocelot_bridge_vlan_find(struct ocelot *ocelot,
457 u16 vid)
458 {
459 struct ocelot_bridge_vlan *vlan;
460
461 list_for_each_entry(vlan, &ocelot->vlans, list)
462 if (vlan->vid == vid)
463 return vlan;
464
465 return NULL;
466 }
467
ocelot_vlan_member_add(struct ocelot * ocelot,int port,u16 vid,bool untagged)468 static int ocelot_vlan_member_add(struct ocelot *ocelot, int port, u16 vid,
469 bool untagged)
470 {
471 struct ocelot_bridge_vlan *vlan = ocelot_bridge_vlan_find(ocelot, vid);
472 unsigned long portmask;
473 int err;
474
475 if (vlan) {
476 portmask = vlan->portmask | BIT(port);
477
478 err = ocelot_vlant_set_mask(ocelot, vid, portmask);
479 if (err)
480 return err;
481
482 vlan->portmask = portmask;
483 /* Bridge VLANs can be overwritten with a different
484 * egress-tagging setting, so make sure to override an untagged
485 * with a tagged VID if that's going on.
486 */
487 if (untagged)
488 vlan->untagged |= BIT(port);
489 else
490 vlan->untagged &= ~BIT(port);
491
492 return 0;
493 }
494
495 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
496 if (!vlan)
497 return -ENOMEM;
498
499 portmask = BIT(port);
500
501 err = ocelot_vlant_set_mask(ocelot, vid, portmask);
502 if (err) {
503 kfree(vlan);
504 return err;
505 }
506
507 vlan->vid = vid;
508 vlan->portmask = portmask;
509 if (untagged)
510 vlan->untagged = BIT(port);
511 INIT_LIST_HEAD(&vlan->list);
512 list_add_tail(&vlan->list, &ocelot->vlans);
513
514 return 0;
515 }
516
ocelot_vlan_member_del(struct ocelot * ocelot,int port,u16 vid)517 static int ocelot_vlan_member_del(struct ocelot *ocelot, int port, u16 vid)
518 {
519 struct ocelot_bridge_vlan *vlan = ocelot_bridge_vlan_find(ocelot, vid);
520 unsigned long portmask;
521 int err;
522
523 if (!vlan)
524 return 0;
525
526 portmask = vlan->portmask & ~BIT(port);
527
528 err = ocelot_vlant_set_mask(ocelot, vid, portmask);
529 if (err)
530 return err;
531
532 vlan->portmask = portmask;
533 if (vlan->portmask)
534 return 0;
535
536 list_del(&vlan->list);
537 kfree(vlan);
538
539 return 0;
540 }
541
ocelot_add_vlan_unaware_pvid(struct ocelot * ocelot,int port,const struct net_device * bridge)542 static int ocelot_add_vlan_unaware_pvid(struct ocelot *ocelot, int port,
543 const struct net_device *bridge)
544 {
545 u16 vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
546
547 return ocelot_vlan_member_add(ocelot, port, vid, true);
548 }
549
ocelot_del_vlan_unaware_pvid(struct ocelot * ocelot,int port,const struct net_device * bridge)550 static int ocelot_del_vlan_unaware_pvid(struct ocelot *ocelot, int port,
551 const struct net_device *bridge)
552 {
553 u16 vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
554
555 return ocelot_vlan_member_del(ocelot, port, vid);
556 }
557
ocelot_port_vlan_filtering(struct ocelot * ocelot,int port,bool vlan_aware,struct netlink_ext_ack * extack)558 int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
559 bool vlan_aware, struct netlink_ext_ack *extack)
560 {
561 struct ocelot_vcap_block *block = &ocelot->block[VCAP_IS1];
562 struct ocelot_port *ocelot_port = ocelot->ports[port];
563 struct ocelot_vcap_filter *filter;
564 int err = 0;
565 u32 val;
566
567 list_for_each_entry(filter, &block->rules, list) {
568 if (filter->ingress_port_mask & BIT(port) &&
569 filter->action.vid_replace_ena) {
570 NL_SET_ERR_MSG_MOD(extack,
571 "Cannot change VLAN state with vlan modify rules active");
572 return -EBUSY;
573 }
574 }
575
576 err = ocelot_single_vlan_aware_bridge(ocelot, extack);
577 if (err)
578 return err;
579
580 if (vlan_aware)
581 err = ocelot_del_vlan_unaware_pvid(ocelot, port,
582 ocelot_port->bridge);
583 else if (ocelot_port->bridge)
584 err = ocelot_add_vlan_unaware_pvid(ocelot, port,
585 ocelot_port->bridge);
586 if (err)
587 return err;
588
589 ocelot_port->vlan_aware = vlan_aware;
590
591 if (vlan_aware)
592 val = ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
593 ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1);
594 else
595 val = 0;
596 ocelot_rmw_gix(ocelot, val,
597 ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
598 ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M,
599 ANA_PORT_VLAN_CFG, port);
600
601 ocelot_port_set_pvid(ocelot, port, ocelot_port->pvid_vlan);
602 ocelot_port_manage_port_tag(ocelot, port);
603
604 return 0;
605 }
606 EXPORT_SYMBOL(ocelot_port_vlan_filtering);
607
ocelot_vlan_prepare(struct ocelot * ocelot,int port,u16 vid,bool pvid,bool untagged,struct netlink_ext_ack * extack)608 int ocelot_vlan_prepare(struct ocelot *ocelot, int port, u16 vid, bool pvid,
609 bool untagged, struct netlink_ext_ack *extack)
610 {
611 if (untagged) {
612 /* We are adding an egress-tagged VLAN */
613 if (ocelot_port_uses_native_vlan(ocelot, port)) {
614 NL_SET_ERR_MSG_MOD(extack,
615 "Port with egress-tagged VLANs cannot have more than one egress-untagged (native) VLAN");
616 return -EBUSY;
617 }
618 } else {
619 /* We are adding an egress-tagged VLAN */
620 if (ocelot_port_num_untagged_vlans(ocelot, port) > 1) {
621 NL_SET_ERR_MSG_MOD(extack,
622 "Port with more than one egress-untagged VLAN cannot have egress-tagged VLANs");
623 return -EBUSY;
624 }
625 }
626
627 if (vid > OCELOT_RSV_VLAN_RANGE_START) {
628 NL_SET_ERR_MSG_MOD(extack,
629 "VLAN range 4000-4095 reserved for VLAN-unaware bridging");
630 return -EBUSY;
631 }
632
633 return 0;
634 }
635 EXPORT_SYMBOL(ocelot_vlan_prepare);
636
ocelot_vlan_add(struct ocelot * ocelot,int port,u16 vid,bool pvid,bool untagged)637 int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
638 bool untagged)
639 {
640 int err;
641
642 /* Ignore VID 0 added to our RX filter by the 8021q module, since
643 * that collides with OCELOT_STANDALONE_PVID and changes it from
644 * egress-untagged to egress-tagged.
645 */
646 if (!vid)
647 return 0;
648
649 err = ocelot_vlan_member_add(ocelot, port, vid, untagged);
650 if (err)
651 return err;
652
653 /* Default ingress vlan classification */
654 if (pvid)
655 ocelot_port_set_pvid(ocelot, port,
656 ocelot_bridge_vlan_find(ocelot, vid));
657
658 /* Untagged egress vlan clasification */
659 ocelot_port_manage_port_tag(ocelot, port);
660
661 return 0;
662 }
663 EXPORT_SYMBOL(ocelot_vlan_add);
664
ocelot_vlan_del(struct ocelot * ocelot,int port,u16 vid)665 int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid)
666 {
667 struct ocelot_port *ocelot_port = ocelot->ports[port];
668 bool del_pvid = false;
669 int err;
670
671 if (!vid)
672 return 0;
673
674 if (ocelot_port->pvid_vlan && ocelot_port->pvid_vlan->vid == vid)
675 del_pvid = true;
676
677 err = ocelot_vlan_member_del(ocelot, port, vid);
678 if (err)
679 return err;
680
681 /* Ingress */
682 if (del_pvid)
683 ocelot_port_set_pvid(ocelot, port, NULL);
684
685 /* Egress */
686 ocelot_port_manage_port_tag(ocelot, port);
687
688 return 0;
689 }
690 EXPORT_SYMBOL(ocelot_vlan_del);
691
ocelot_vlan_init(struct ocelot * ocelot)692 static void ocelot_vlan_init(struct ocelot *ocelot)
693 {
694 unsigned long all_ports = GENMASK(ocelot->num_phys_ports - 1, 0);
695 u16 port, vid;
696
697 /* Clear VLAN table, by default all ports are members of all VLANs */
698 ocelot_write(ocelot, ANA_TABLES_VLANACCESS_CMD_INIT,
699 ANA_TABLES_VLANACCESS);
700 ocelot_vlant_wait_for_completion(ocelot);
701
702 /* Configure the port VLAN memberships */
703 for (vid = 1; vid < VLAN_N_VID; vid++)
704 ocelot_vlant_set_mask(ocelot, vid, 0);
705
706 /* We need VID 0 to get traffic on standalone ports.
707 * It is added automatically if the 8021q module is loaded, but we
708 * can't rely on that since it might not be.
709 */
710 ocelot_vlant_set_mask(ocelot, OCELOT_STANDALONE_PVID, all_ports);
711
712 /* Set vlan ingress filter mask to all ports but the CPU port by
713 * default.
714 */
715 ocelot_write(ocelot, all_ports, ANA_VLANMASK);
716
717 for (port = 0; port < ocelot->num_phys_ports; port++) {
718 ocelot_write_gix(ocelot, 0, REW_PORT_VLAN_CFG, port);
719 ocelot_write_gix(ocelot, 0, REW_TAG_CFG, port);
720 }
721 }
722
ocelot_read_eq_avail(struct ocelot * ocelot,int port)723 static u32 ocelot_read_eq_avail(struct ocelot *ocelot, int port)
724 {
725 return ocelot_read_rix(ocelot, QSYS_SW_STATUS, port);
726 }
727
ocelot_port_flush(struct ocelot * ocelot,int port)728 static int ocelot_port_flush(struct ocelot *ocelot, int port)
729 {
730 unsigned int pause_ena;
731 int err, val;
732
733 /* Disable dequeuing from the egress queues */
734 ocelot_rmw_rix(ocelot, QSYS_PORT_MODE_DEQUEUE_DIS,
735 QSYS_PORT_MODE_DEQUEUE_DIS,
736 QSYS_PORT_MODE, port);
737
738 /* Disable flow control */
739 ocelot_fields_read(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, &pause_ena);
740 ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0);
741
742 /* Disable priority flow control */
743 ocelot_fields_write(ocelot, port,
744 QSYS_SWITCH_PORT_MODE_TX_PFC_ENA, 0);
745
746 /* Wait at least the time it takes to receive a frame of maximum length
747 * at the port.
748 * Worst-case delays for 10 kilobyte jumbo frames are:
749 * 8 ms on a 10M port
750 * 800 μs on a 100M port
751 * 80 μs on a 1G port
752 * 32 μs on a 2.5G port
753 */
754 usleep_range(8000, 10000);
755
756 /* Disable half duplex backpressure. */
757 ocelot_rmw_rix(ocelot, 0, SYS_FRONT_PORT_MODE_HDX_MODE,
758 SYS_FRONT_PORT_MODE, port);
759
760 /* Flush the queues associated with the port. */
761 ocelot_rmw_gix(ocelot, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG_FLUSH_ENA,
762 REW_PORT_CFG, port);
763
764 /* Enable dequeuing from the egress queues. */
765 ocelot_rmw_rix(ocelot, 0, QSYS_PORT_MODE_DEQUEUE_DIS, QSYS_PORT_MODE,
766 port);
767
768 /* Wait until flushing is complete. */
769 err = read_poll_timeout(ocelot_read_eq_avail, val, !val,
770 100, 2000000, false, ocelot, port);
771
772 /* Clear flushing again. */
773 ocelot_rmw_gix(ocelot, 0, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG, port);
774
775 /* Re-enable flow control */
776 ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, pause_ena);
777
778 return err;
779 }
780
ocelot_phylink_mac_link_down(struct ocelot * ocelot,int port,unsigned int link_an_mode,phy_interface_t interface,unsigned long quirks)781 void ocelot_phylink_mac_link_down(struct ocelot *ocelot, int port,
782 unsigned int link_an_mode,
783 phy_interface_t interface,
784 unsigned long quirks)
785 {
786 struct ocelot_port *ocelot_port = ocelot->ports[port];
787 int err;
788
789 ocelot_port->speed = SPEED_UNKNOWN;
790
791 ocelot_port_rmwl(ocelot_port, 0, DEV_MAC_ENA_CFG_RX_ENA,
792 DEV_MAC_ENA_CFG);
793
794 if (ocelot->ops->cut_through_fwd) {
795 mutex_lock(&ocelot->fwd_domain_lock);
796 ocelot->ops->cut_through_fwd(ocelot);
797 mutex_unlock(&ocelot->fwd_domain_lock);
798 }
799
800 ocelot_fields_write(ocelot, port, QSYS_SWITCH_PORT_MODE_PORT_ENA, 0);
801
802 err = ocelot_port_flush(ocelot, port);
803 if (err)
804 dev_err(ocelot->dev, "failed to flush port %d: %d\n",
805 port, err);
806
807 /* Put the port in reset. */
808 if (interface != PHY_INTERFACE_MODE_QSGMII ||
809 !(quirks & OCELOT_QUIRK_QSGMII_PORTS_MUST_BE_UP))
810 ocelot_port_rmwl(ocelot_port,
811 DEV_CLOCK_CFG_MAC_TX_RST |
812 DEV_CLOCK_CFG_MAC_RX_RST,
813 DEV_CLOCK_CFG_MAC_TX_RST |
814 DEV_CLOCK_CFG_MAC_RX_RST,
815 DEV_CLOCK_CFG);
816 }
817 EXPORT_SYMBOL_GPL(ocelot_phylink_mac_link_down);
818
ocelot_phylink_mac_link_up(struct ocelot * ocelot,int port,struct phy_device * phydev,unsigned int link_an_mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause,unsigned long quirks)819 void ocelot_phylink_mac_link_up(struct ocelot *ocelot, int port,
820 struct phy_device *phydev,
821 unsigned int link_an_mode,
822 phy_interface_t interface,
823 int speed, int duplex,
824 bool tx_pause, bool rx_pause,
825 unsigned long quirks)
826 {
827 struct ocelot_port *ocelot_port = ocelot->ports[port];
828 int mac_speed, mode = 0;
829 u32 mac_fc_cfg;
830
831 ocelot_port->speed = speed;
832
833 /* The MAC might be integrated in systems where the MAC speed is fixed
834 * and it's the PCS who is performing the rate adaptation, so we have
835 * to write "1000Mbps" into the LINK_SPEED field of DEV_CLOCK_CFG
836 * (which is also its default value).
837 */
838 if ((quirks & OCELOT_QUIRK_PCS_PERFORMS_RATE_ADAPTATION) ||
839 speed == SPEED_1000) {
840 mac_speed = OCELOT_SPEED_1000;
841 mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA;
842 } else if (speed == SPEED_2500) {
843 mac_speed = OCELOT_SPEED_2500;
844 mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA;
845 } else if (speed == SPEED_100) {
846 mac_speed = OCELOT_SPEED_100;
847 } else {
848 mac_speed = OCELOT_SPEED_10;
849 }
850
851 if (duplex == DUPLEX_FULL)
852 mode |= DEV_MAC_MODE_CFG_FDX_ENA;
853
854 ocelot_port_writel(ocelot_port, mode, DEV_MAC_MODE_CFG);
855
856 /* Take port out of reset by clearing the MAC_TX_RST, MAC_RX_RST and
857 * PORT_RST bits in DEV_CLOCK_CFG.
858 */
859 ocelot_port_writel(ocelot_port, DEV_CLOCK_CFG_LINK_SPEED(mac_speed),
860 DEV_CLOCK_CFG);
861
862 switch (speed) {
863 case SPEED_10:
864 mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(OCELOT_SPEED_10);
865 break;
866 case SPEED_100:
867 mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(OCELOT_SPEED_100);
868 break;
869 case SPEED_1000:
870 case SPEED_2500:
871 mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(OCELOT_SPEED_1000);
872 break;
873 default:
874 dev_err(ocelot->dev, "Unsupported speed on port %d: %d\n",
875 port, speed);
876 return;
877 }
878
879 if (rx_pause)
880 mac_fc_cfg |= SYS_MAC_FC_CFG_RX_FC_ENA;
881
882 if (tx_pause)
883 mac_fc_cfg |= SYS_MAC_FC_CFG_TX_FC_ENA |
884 SYS_MAC_FC_CFG_PAUSE_VAL_CFG(0xffff) |
885 SYS_MAC_FC_CFG_FC_LATENCY_CFG(0x7) |
886 SYS_MAC_FC_CFG_ZERO_PAUSE_ENA;
887
888 /* Flow control. Link speed is only used here to evaluate the time
889 * specification in incoming pause frames.
890 */
891 ocelot_write_rix(ocelot, mac_fc_cfg, SYS_MAC_FC_CFG, port);
892
893 ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, port);
894
895 /* Don't attempt to send PAUSE frames on the NPI port, it's broken */
896 if (port != ocelot->npi)
897 ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA,
898 tx_pause);
899
900 /* Undo the effects of ocelot_phylink_mac_link_down:
901 * enable MAC module
902 */
903 ocelot_port_writel(ocelot_port, DEV_MAC_ENA_CFG_RX_ENA |
904 DEV_MAC_ENA_CFG_TX_ENA, DEV_MAC_ENA_CFG);
905
906 /* If the port supports cut-through forwarding, update the masks before
907 * enabling forwarding on the port.
908 */
909 if (ocelot->ops->cut_through_fwd) {
910 mutex_lock(&ocelot->fwd_domain_lock);
911 ocelot->ops->cut_through_fwd(ocelot);
912 mutex_unlock(&ocelot->fwd_domain_lock);
913 }
914
915 /* Core: Enable port for frame transfer */
916 ocelot_fields_write(ocelot, port,
917 QSYS_SWITCH_PORT_MODE_PORT_ENA, 1);
918 }
919 EXPORT_SYMBOL_GPL(ocelot_phylink_mac_link_up);
920
ocelot_rx_frame_word(struct ocelot * ocelot,u8 grp,bool ifh,u32 * rval)921 static int ocelot_rx_frame_word(struct ocelot *ocelot, u8 grp, bool ifh,
922 u32 *rval)
923 {
924 u32 bytes_valid, val;
925
926 val = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
927 if (val == XTR_NOT_READY) {
928 if (ifh)
929 return -EIO;
930
931 do {
932 val = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
933 } while (val == XTR_NOT_READY);
934 }
935
936 switch (val) {
937 case XTR_ABORT:
938 return -EIO;
939 case XTR_EOF_0:
940 case XTR_EOF_1:
941 case XTR_EOF_2:
942 case XTR_EOF_3:
943 case XTR_PRUNED:
944 bytes_valid = XTR_VALID_BYTES(val);
945 val = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
946 if (val == XTR_ESCAPE)
947 *rval = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
948 else
949 *rval = val;
950
951 return bytes_valid;
952 case XTR_ESCAPE:
953 *rval = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
954
955 return 4;
956 default:
957 *rval = val;
958
959 return 4;
960 }
961 }
962
ocelot_xtr_poll_xfh(struct ocelot * ocelot,int grp,u32 * xfh)963 static int ocelot_xtr_poll_xfh(struct ocelot *ocelot, int grp, u32 *xfh)
964 {
965 int i, err = 0;
966
967 for (i = 0; i < OCELOT_TAG_LEN / 4; i++) {
968 err = ocelot_rx_frame_word(ocelot, grp, true, &xfh[i]);
969 if (err != 4)
970 return (err < 0) ? err : -EIO;
971 }
972
973 return 0;
974 }
975
ocelot_ptp_rx_timestamp(struct ocelot * ocelot,struct sk_buff * skb,u64 timestamp)976 void ocelot_ptp_rx_timestamp(struct ocelot *ocelot, struct sk_buff *skb,
977 u64 timestamp)
978 {
979 struct skb_shared_hwtstamps *shhwtstamps;
980 u64 tod_in_ns, full_ts_in_ns;
981 struct timespec64 ts;
982
983 ocelot_ptp_gettime64(&ocelot->ptp_info, &ts);
984
985 tod_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec);
986 if ((tod_in_ns & 0xffffffff) < timestamp)
987 full_ts_in_ns = (((tod_in_ns >> 32) - 1) << 32) |
988 timestamp;
989 else
990 full_ts_in_ns = (tod_in_ns & GENMASK_ULL(63, 32)) |
991 timestamp;
992
993 shhwtstamps = skb_hwtstamps(skb);
994 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
995 shhwtstamps->hwtstamp = full_ts_in_ns;
996 }
997 EXPORT_SYMBOL(ocelot_ptp_rx_timestamp);
998
ocelot_xtr_poll_frame(struct ocelot * ocelot,int grp,struct sk_buff ** nskb)999 int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb)
1000 {
1001 u64 timestamp, src_port, len;
1002 u32 xfh[OCELOT_TAG_LEN / 4];
1003 struct net_device *dev;
1004 struct sk_buff *skb;
1005 int sz, buf_len;
1006 u32 val, *buf;
1007 int err;
1008
1009 err = ocelot_xtr_poll_xfh(ocelot, grp, xfh);
1010 if (err)
1011 return err;
1012
1013 ocelot_xfh_get_src_port(xfh, &src_port);
1014 ocelot_xfh_get_len(xfh, &len);
1015 ocelot_xfh_get_rew_val(xfh, ×tamp);
1016
1017 if (WARN_ON(src_port >= ocelot->num_phys_ports))
1018 return -EINVAL;
1019
1020 dev = ocelot->ops->port_to_netdev(ocelot, src_port);
1021 if (!dev)
1022 return -EINVAL;
1023
1024 skb = netdev_alloc_skb(dev, len);
1025 if (unlikely(!skb)) {
1026 netdev_err(dev, "Unable to allocate sk_buff\n");
1027 return -ENOMEM;
1028 }
1029
1030 buf_len = len - ETH_FCS_LEN;
1031 buf = (u32 *)skb_put(skb, buf_len);
1032
1033 len = 0;
1034 do {
1035 sz = ocelot_rx_frame_word(ocelot, grp, false, &val);
1036 if (sz < 0) {
1037 err = sz;
1038 goto out_free_skb;
1039 }
1040 *buf++ = val;
1041 len += sz;
1042 } while (len < buf_len);
1043
1044 /* Read the FCS */
1045 sz = ocelot_rx_frame_word(ocelot, grp, false, &val);
1046 if (sz < 0) {
1047 err = sz;
1048 goto out_free_skb;
1049 }
1050
1051 /* Update the statistics if part of the FCS was read before */
1052 len -= ETH_FCS_LEN - sz;
1053
1054 if (unlikely(dev->features & NETIF_F_RXFCS)) {
1055 buf = (u32 *)skb_put(skb, ETH_FCS_LEN);
1056 *buf = val;
1057 }
1058
1059 if (ocelot->ptp)
1060 ocelot_ptp_rx_timestamp(ocelot, skb, timestamp);
1061
1062 /* Everything we see on an interface that is in the HW bridge
1063 * has already been forwarded.
1064 */
1065 if (ocelot->ports[src_port]->bridge)
1066 skb->offload_fwd_mark = 1;
1067
1068 skb->protocol = eth_type_trans(skb, dev);
1069
1070 *nskb = skb;
1071
1072 return 0;
1073
1074 out_free_skb:
1075 kfree_skb(skb);
1076 return err;
1077 }
1078 EXPORT_SYMBOL(ocelot_xtr_poll_frame);
1079
ocelot_can_inject(struct ocelot * ocelot,int grp)1080 bool ocelot_can_inject(struct ocelot *ocelot, int grp)
1081 {
1082 u32 val = ocelot_read(ocelot, QS_INJ_STATUS);
1083
1084 if (!(val & QS_INJ_STATUS_FIFO_RDY(BIT(grp))))
1085 return false;
1086 if (val & QS_INJ_STATUS_WMARK_REACHED(BIT(grp)))
1087 return false;
1088
1089 return true;
1090 }
1091 EXPORT_SYMBOL(ocelot_can_inject);
1092
ocelot_ifh_port_set(void * ifh,int port,u32 rew_op,u32 vlan_tag)1093 void ocelot_ifh_port_set(void *ifh, int port, u32 rew_op, u32 vlan_tag)
1094 {
1095 ocelot_ifh_set_bypass(ifh, 1);
1096 ocelot_ifh_set_dest(ifh, BIT_ULL(port));
1097 ocelot_ifh_set_tag_type(ifh, IFH_TAG_TYPE_C);
1098 if (vlan_tag)
1099 ocelot_ifh_set_vlan_tci(ifh, vlan_tag);
1100 if (rew_op)
1101 ocelot_ifh_set_rew_op(ifh, rew_op);
1102 }
1103 EXPORT_SYMBOL(ocelot_ifh_port_set);
1104
ocelot_port_inject_frame(struct ocelot * ocelot,int port,int grp,u32 rew_op,struct sk_buff * skb)1105 void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp,
1106 u32 rew_op, struct sk_buff *skb)
1107 {
1108 u32 ifh[OCELOT_TAG_LEN / 4] = {0};
1109 unsigned int i, count, last;
1110
1111 ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) |
1112 QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp);
1113
1114 ocelot_ifh_port_set(ifh, port, rew_op, skb_vlan_tag_get(skb));
1115
1116 for (i = 0; i < OCELOT_TAG_LEN / 4; i++)
1117 ocelot_write_rix(ocelot, ifh[i], QS_INJ_WR, grp);
1118
1119 count = DIV_ROUND_UP(skb->len, 4);
1120 last = skb->len % 4;
1121 for (i = 0; i < count; i++)
1122 ocelot_write_rix(ocelot, ((u32 *)skb->data)[i], QS_INJ_WR, grp);
1123
1124 /* Add padding */
1125 while (i < (OCELOT_BUFFER_CELL_SZ / 4)) {
1126 ocelot_write_rix(ocelot, 0, QS_INJ_WR, grp);
1127 i++;
1128 }
1129
1130 /* Indicate EOF and valid bytes in last word */
1131 ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) |
1132 QS_INJ_CTRL_VLD_BYTES(skb->len < OCELOT_BUFFER_CELL_SZ ? 0 : last) |
1133 QS_INJ_CTRL_EOF,
1134 QS_INJ_CTRL, grp);
1135
1136 /* Add dummy CRC */
1137 ocelot_write_rix(ocelot, 0, QS_INJ_WR, grp);
1138 skb_tx_timestamp(skb);
1139
1140 skb->dev->stats.tx_packets++;
1141 skb->dev->stats.tx_bytes += skb->len;
1142 }
1143 EXPORT_SYMBOL(ocelot_port_inject_frame);
1144
ocelot_drain_cpu_queue(struct ocelot * ocelot,int grp)1145 void ocelot_drain_cpu_queue(struct ocelot *ocelot, int grp)
1146 {
1147 while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp))
1148 ocelot_read_rix(ocelot, QS_XTR_RD, grp);
1149 }
1150 EXPORT_SYMBOL(ocelot_drain_cpu_queue);
1151
ocelot_fdb_add(struct ocelot * ocelot,int port,const unsigned char * addr,u16 vid,const struct net_device * bridge)1152 int ocelot_fdb_add(struct ocelot *ocelot, int port, const unsigned char *addr,
1153 u16 vid, const struct net_device *bridge)
1154 {
1155 if (!vid)
1156 vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
1157
1158 return ocelot_mact_learn(ocelot, port, addr, vid, ENTRYTYPE_LOCKED);
1159 }
1160 EXPORT_SYMBOL(ocelot_fdb_add);
1161
ocelot_fdb_del(struct ocelot * ocelot,int port,const unsigned char * addr,u16 vid,const struct net_device * bridge)1162 int ocelot_fdb_del(struct ocelot *ocelot, int port, const unsigned char *addr,
1163 u16 vid, const struct net_device *bridge)
1164 {
1165 if (!vid)
1166 vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
1167
1168 return ocelot_mact_forget(ocelot, addr, vid);
1169 }
1170 EXPORT_SYMBOL(ocelot_fdb_del);
1171
1172 /* Caller must hold &ocelot->mact_lock */
ocelot_mact_read(struct ocelot * ocelot,int port,int row,int col,struct ocelot_mact_entry * entry)1173 static int ocelot_mact_read(struct ocelot *ocelot, int port, int row, int col,
1174 struct ocelot_mact_entry *entry)
1175 {
1176 u32 val, dst, macl, mach;
1177 char mac[ETH_ALEN];
1178
1179 /* Set row and column to read from */
1180 ocelot_field_write(ocelot, ANA_TABLES_MACTINDX_M_INDEX, row);
1181 ocelot_field_write(ocelot, ANA_TABLES_MACTINDX_BUCKET, col);
1182
1183 /* Issue a read command */
1184 ocelot_write(ocelot,
1185 ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_READ),
1186 ANA_TABLES_MACACCESS);
1187
1188 if (ocelot_mact_wait_for_completion(ocelot))
1189 return -ETIMEDOUT;
1190
1191 /* Read the entry flags */
1192 val = ocelot_read(ocelot, ANA_TABLES_MACACCESS);
1193 if (!(val & ANA_TABLES_MACACCESS_VALID))
1194 return -EINVAL;
1195
1196 /* If the entry read has another port configured as its destination,
1197 * do not report it.
1198 */
1199 dst = (val & ANA_TABLES_MACACCESS_DEST_IDX_M) >> 3;
1200 if (dst != port)
1201 return -EINVAL;
1202
1203 /* Get the entry's MAC address and VLAN id */
1204 macl = ocelot_read(ocelot, ANA_TABLES_MACLDATA);
1205 mach = ocelot_read(ocelot, ANA_TABLES_MACHDATA);
1206
1207 mac[0] = (mach >> 8) & 0xff;
1208 mac[1] = (mach >> 0) & 0xff;
1209 mac[2] = (macl >> 24) & 0xff;
1210 mac[3] = (macl >> 16) & 0xff;
1211 mac[4] = (macl >> 8) & 0xff;
1212 mac[5] = (macl >> 0) & 0xff;
1213
1214 entry->vid = (mach >> 16) & 0xfff;
1215 ether_addr_copy(entry->mac, mac);
1216
1217 return 0;
1218 }
1219
ocelot_mact_flush(struct ocelot * ocelot,int port)1220 int ocelot_mact_flush(struct ocelot *ocelot, int port)
1221 {
1222 int err;
1223
1224 mutex_lock(&ocelot->mact_lock);
1225
1226 /* Program ageing filter for a single port */
1227 ocelot_write(ocelot, ANA_ANAGEFIL_PID_EN | ANA_ANAGEFIL_PID_VAL(port),
1228 ANA_ANAGEFIL);
1229
1230 /* Flushing dynamic FDB entries requires two successive age scans */
1231 ocelot_write(ocelot,
1232 ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_AGE),
1233 ANA_TABLES_MACACCESS);
1234
1235 err = ocelot_mact_wait_for_completion(ocelot);
1236 if (err) {
1237 mutex_unlock(&ocelot->mact_lock);
1238 return err;
1239 }
1240
1241 /* And second... */
1242 ocelot_write(ocelot,
1243 ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_AGE),
1244 ANA_TABLES_MACACCESS);
1245
1246 err = ocelot_mact_wait_for_completion(ocelot);
1247
1248 /* Restore ageing filter */
1249 ocelot_write(ocelot, 0, ANA_ANAGEFIL);
1250
1251 mutex_unlock(&ocelot->mact_lock);
1252
1253 return err;
1254 }
1255 EXPORT_SYMBOL_GPL(ocelot_mact_flush);
1256
ocelot_fdb_dump(struct ocelot * ocelot,int port,dsa_fdb_dump_cb_t * cb,void * data)1257 int ocelot_fdb_dump(struct ocelot *ocelot, int port,
1258 dsa_fdb_dump_cb_t *cb, void *data)
1259 {
1260 int err = 0;
1261 int i, j;
1262
1263 /* We could take the lock just around ocelot_mact_read, but doing so
1264 * thousands of times in a row seems rather pointless and inefficient.
1265 */
1266 mutex_lock(&ocelot->mact_lock);
1267
1268 /* Loop through all the mac tables entries. */
1269 for (i = 0; i < ocelot->num_mact_rows; i++) {
1270 for (j = 0; j < 4; j++) {
1271 struct ocelot_mact_entry entry;
1272 bool is_static;
1273
1274 err = ocelot_mact_read(ocelot, port, i, j, &entry);
1275 /* If the entry is invalid (wrong port, invalid...),
1276 * skip it.
1277 */
1278 if (err == -EINVAL)
1279 continue;
1280 else if (err)
1281 break;
1282
1283 is_static = (entry.type == ENTRYTYPE_LOCKED);
1284
1285 /* Hide the reserved VLANs used for
1286 * VLAN-unaware bridging.
1287 */
1288 if (entry.vid > OCELOT_RSV_VLAN_RANGE_START)
1289 entry.vid = 0;
1290
1291 err = cb(entry.mac, entry.vid, is_static, data);
1292 if (err)
1293 break;
1294 }
1295 }
1296
1297 mutex_unlock(&ocelot->mact_lock);
1298
1299 return err;
1300 }
1301 EXPORT_SYMBOL(ocelot_fdb_dump);
1302
ocelot_trap_add(struct ocelot * ocelot,int port,unsigned long cookie,bool take_ts,void (* populate)(struct ocelot_vcap_filter * f))1303 int ocelot_trap_add(struct ocelot *ocelot, int port,
1304 unsigned long cookie, bool take_ts,
1305 void (*populate)(struct ocelot_vcap_filter *f))
1306 {
1307 struct ocelot_vcap_block *block_vcap_is2;
1308 struct ocelot_vcap_filter *trap;
1309 bool new = false;
1310 int err;
1311
1312 block_vcap_is2 = &ocelot->block[VCAP_IS2];
1313
1314 trap = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, cookie,
1315 false);
1316 if (!trap) {
1317 trap = kzalloc(sizeof(*trap), GFP_KERNEL);
1318 if (!trap)
1319 return -ENOMEM;
1320
1321 populate(trap);
1322 trap->prio = 1;
1323 trap->id.cookie = cookie;
1324 trap->id.tc_offload = false;
1325 trap->block_id = VCAP_IS2;
1326 trap->type = OCELOT_VCAP_FILTER_OFFLOAD;
1327 trap->lookup = 0;
1328 trap->action.cpu_copy_ena = true;
1329 trap->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
1330 trap->action.port_mask = 0;
1331 trap->take_ts = take_ts;
1332 trap->is_trap = true;
1333 new = true;
1334 }
1335
1336 trap->ingress_port_mask |= BIT(port);
1337
1338 if (new)
1339 err = ocelot_vcap_filter_add(ocelot, trap, NULL);
1340 else
1341 err = ocelot_vcap_filter_replace(ocelot, trap);
1342 if (err) {
1343 trap->ingress_port_mask &= ~BIT(port);
1344 if (!trap->ingress_port_mask)
1345 kfree(trap);
1346 return err;
1347 }
1348
1349 return 0;
1350 }
1351
ocelot_trap_del(struct ocelot * ocelot,int port,unsigned long cookie)1352 int ocelot_trap_del(struct ocelot *ocelot, int port, unsigned long cookie)
1353 {
1354 struct ocelot_vcap_block *block_vcap_is2;
1355 struct ocelot_vcap_filter *trap;
1356
1357 block_vcap_is2 = &ocelot->block[VCAP_IS2];
1358
1359 trap = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, cookie,
1360 false);
1361 if (!trap)
1362 return 0;
1363
1364 trap->ingress_port_mask &= ~BIT(port);
1365 if (!trap->ingress_port_mask)
1366 return ocelot_vcap_filter_del(ocelot, trap);
1367
1368 return ocelot_vcap_filter_replace(ocelot, trap);
1369 }
1370
ocelot_get_bond_mask(struct ocelot * ocelot,struct net_device * bond)1371 static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond)
1372 {
1373 u32 mask = 0;
1374 int port;
1375
1376 lockdep_assert_held(&ocelot->fwd_domain_lock);
1377
1378 for (port = 0; port < ocelot->num_phys_ports; port++) {
1379 struct ocelot_port *ocelot_port = ocelot->ports[port];
1380
1381 if (!ocelot_port)
1382 continue;
1383
1384 if (ocelot_port->bond == bond)
1385 mask |= BIT(port);
1386 }
1387
1388 return mask;
1389 }
1390
1391 /* The logical port number of a LAG is equal to the lowest numbered physical
1392 * port ID present in that LAG. It may change if that port ever leaves the LAG.
1393 */
ocelot_bond_get_id(struct ocelot * ocelot,struct net_device * bond)1394 int ocelot_bond_get_id(struct ocelot *ocelot, struct net_device *bond)
1395 {
1396 int bond_mask = ocelot_get_bond_mask(ocelot, bond);
1397
1398 if (!bond_mask)
1399 return -ENOENT;
1400
1401 return __ffs(bond_mask);
1402 }
1403 EXPORT_SYMBOL_GPL(ocelot_bond_get_id);
1404
1405 /* Returns the mask of user ports assigned to this DSA tag_8021q CPU port.
1406 * Note that when CPU ports are in a LAG, the user ports are assigned to the
1407 * 'primary' CPU port, the one whose physical port number gives the logical
1408 * port number of the LAG.
1409 *
1410 * We leave PGID_SRC poorly configured for the 'secondary' CPU port in the LAG
1411 * (to which no user port is assigned), but it appears that forwarding from
1412 * this secondary CPU port looks at the PGID_SRC associated with the logical
1413 * port ID that it's assigned to, which *is* configured properly.
1414 */
ocelot_dsa_8021q_cpu_assigned_ports(struct ocelot * ocelot,struct ocelot_port * cpu)1415 static u32 ocelot_dsa_8021q_cpu_assigned_ports(struct ocelot *ocelot,
1416 struct ocelot_port *cpu)
1417 {
1418 u32 mask = 0;
1419 int port;
1420
1421 for (port = 0; port < ocelot->num_phys_ports; port++) {
1422 struct ocelot_port *ocelot_port = ocelot->ports[port];
1423
1424 if (!ocelot_port)
1425 continue;
1426
1427 if (ocelot_port->dsa_8021q_cpu == cpu)
1428 mask |= BIT(port);
1429 }
1430
1431 if (cpu->bond)
1432 mask &= ~ocelot_get_bond_mask(ocelot, cpu->bond);
1433
1434 return mask;
1435 }
1436
1437 /* Returns the DSA tag_8021q CPU port that the given port is assigned to,
1438 * or the bit mask of CPU ports if said CPU port is in a LAG.
1439 */
ocelot_port_assigned_dsa_8021q_cpu_mask(struct ocelot * ocelot,int port)1440 u32 ocelot_port_assigned_dsa_8021q_cpu_mask(struct ocelot *ocelot, int port)
1441 {
1442 struct ocelot_port *ocelot_port = ocelot->ports[port];
1443 struct ocelot_port *cpu_port = ocelot_port->dsa_8021q_cpu;
1444
1445 if (!cpu_port)
1446 return 0;
1447
1448 if (cpu_port->bond)
1449 return ocelot_get_bond_mask(ocelot, cpu_port->bond);
1450
1451 return BIT(cpu_port->index);
1452 }
1453 EXPORT_SYMBOL_GPL(ocelot_port_assigned_dsa_8021q_cpu_mask);
1454
ocelot_get_bridge_fwd_mask(struct ocelot * ocelot,int src_port)1455 u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, int src_port)
1456 {
1457 struct ocelot_port *ocelot_port = ocelot->ports[src_port];
1458 const struct net_device *bridge;
1459 u32 mask = 0;
1460 int port;
1461
1462 if (!ocelot_port || ocelot_port->stp_state != BR_STATE_FORWARDING)
1463 return 0;
1464
1465 bridge = ocelot_port->bridge;
1466 if (!bridge)
1467 return 0;
1468
1469 for (port = 0; port < ocelot->num_phys_ports; port++) {
1470 ocelot_port = ocelot->ports[port];
1471
1472 if (!ocelot_port)
1473 continue;
1474
1475 if (ocelot_port->stp_state == BR_STATE_FORWARDING &&
1476 ocelot_port->bridge == bridge)
1477 mask |= BIT(port);
1478 }
1479
1480 return mask;
1481 }
1482 EXPORT_SYMBOL_GPL(ocelot_get_bridge_fwd_mask);
1483
ocelot_apply_bridge_fwd_mask(struct ocelot * ocelot,bool joining)1484 static void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot, bool joining)
1485 {
1486 int port;
1487
1488 lockdep_assert_held(&ocelot->fwd_domain_lock);
1489
1490 /* If cut-through forwarding is supported, update the masks before a
1491 * port joins the forwarding domain, to avoid potential underruns if it
1492 * has the highest speed from the new domain.
1493 */
1494 if (joining && ocelot->ops->cut_through_fwd)
1495 ocelot->ops->cut_through_fwd(ocelot);
1496
1497 /* Apply FWD mask. The loop is needed to add/remove the current port as
1498 * a source for the other ports.
1499 */
1500 for (port = 0; port < ocelot->num_phys_ports; port++) {
1501 struct ocelot_port *ocelot_port = ocelot->ports[port];
1502 unsigned long mask;
1503
1504 if (!ocelot_port) {
1505 /* Unused ports can't send anywhere */
1506 mask = 0;
1507 } else if (ocelot_port->is_dsa_8021q_cpu) {
1508 /* The DSA tag_8021q CPU ports need to be able to
1509 * forward packets to all ports assigned to them.
1510 */
1511 mask = ocelot_dsa_8021q_cpu_assigned_ports(ocelot,
1512 ocelot_port);
1513 } else if (ocelot_port->bridge) {
1514 struct net_device *bond = ocelot_port->bond;
1515
1516 mask = ocelot_get_bridge_fwd_mask(ocelot, port);
1517 mask &= ~BIT(port);
1518
1519 mask |= ocelot_port_assigned_dsa_8021q_cpu_mask(ocelot,
1520 port);
1521
1522 if (bond)
1523 mask &= ~ocelot_get_bond_mask(ocelot, bond);
1524 } else {
1525 /* Standalone ports forward only to DSA tag_8021q CPU
1526 * ports (if those exist), or to the hardware CPU port
1527 * module otherwise.
1528 */
1529 mask = ocelot_port_assigned_dsa_8021q_cpu_mask(ocelot,
1530 port);
1531 }
1532
1533 ocelot_write_rix(ocelot, mask, ANA_PGID_PGID, PGID_SRC + port);
1534 }
1535
1536 /* If cut-through forwarding is supported and a port is leaving, there
1537 * is a chance that cut-through was disabled on the other ports due to
1538 * the port which is leaving (it has a higher link speed). We need to
1539 * update the cut-through masks of the remaining ports no earlier than
1540 * after the port has left, to prevent underruns from happening between
1541 * the cut-through update and the forwarding domain update.
1542 */
1543 if (!joining && ocelot->ops->cut_through_fwd)
1544 ocelot->ops->cut_through_fwd(ocelot);
1545 }
1546
1547 /* Update PGID_CPU which is the destination port mask used for whitelisting
1548 * unicast addresses filtered towards the host. In the normal and NPI modes,
1549 * this points to the analyzer entry for the CPU port module, while in DSA
1550 * tag_8021q mode, it is a bit mask of all active CPU ports.
1551 * PGID_SRC will take care of forwarding a packet from one user port to
1552 * no more than a single CPU port.
1553 */
ocelot_update_pgid_cpu(struct ocelot * ocelot)1554 static void ocelot_update_pgid_cpu(struct ocelot *ocelot)
1555 {
1556 int pgid_cpu = 0;
1557 int port;
1558
1559 for (port = 0; port < ocelot->num_phys_ports; port++) {
1560 struct ocelot_port *ocelot_port = ocelot->ports[port];
1561
1562 if (!ocelot_port || !ocelot_port->is_dsa_8021q_cpu)
1563 continue;
1564
1565 pgid_cpu |= BIT(port);
1566 }
1567
1568 if (!pgid_cpu)
1569 pgid_cpu = BIT(ocelot->num_phys_ports);
1570
1571 ocelot_write_rix(ocelot, pgid_cpu, ANA_PGID_PGID, PGID_CPU);
1572 }
1573
ocelot_port_setup_dsa_8021q_cpu(struct ocelot * ocelot,int cpu)1574 void ocelot_port_setup_dsa_8021q_cpu(struct ocelot *ocelot, int cpu)
1575 {
1576 struct ocelot_port *cpu_port = ocelot->ports[cpu];
1577 u16 vid;
1578
1579 mutex_lock(&ocelot->fwd_domain_lock);
1580
1581 cpu_port->is_dsa_8021q_cpu = true;
1582
1583 for (vid = OCELOT_RSV_VLAN_RANGE_START; vid < VLAN_N_VID; vid++)
1584 ocelot_vlan_member_add(ocelot, cpu, vid, true);
1585
1586 ocelot_update_pgid_cpu(ocelot);
1587
1588 mutex_unlock(&ocelot->fwd_domain_lock);
1589 }
1590 EXPORT_SYMBOL_GPL(ocelot_port_setup_dsa_8021q_cpu);
1591
ocelot_port_teardown_dsa_8021q_cpu(struct ocelot * ocelot,int cpu)1592 void ocelot_port_teardown_dsa_8021q_cpu(struct ocelot *ocelot, int cpu)
1593 {
1594 struct ocelot_port *cpu_port = ocelot->ports[cpu];
1595 u16 vid;
1596
1597 mutex_lock(&ocelot->fwd_domain_lock);
1598
1599 cpu_port->is_dsa_8021q_cpu = false;
1600
1601 for (vid = OCELOT_RSV_VLAN_RANGE_START; vid < VLAN_N_VID; vid++)
1602 ocelot_vlan_member_del(ocelot, cpu_port->index, vid);
1603
1604 ocelot_update_pgid_cpu(ocelot);
1605
1606 mutex_unlock(&ocelot->fwd_domain_lock);
1607 }
1608 EXPORT_SYMBOL_GPL(ocelot_port_teardown_dsa_8021q_cpu);
1609
ocelot_port_assign_dsa_8021q_cpu(struct ocelot * ocelot,int port,int cpu)1610 void ocelot_port_assign_dsa_8021q_cpu(struct ocelot *ocelot, int port,
1611 int cpu)
1612 {
1613 struct ocelot_port *cpu_port = ocelot->ports[cpu];
1614
1615 mutex_lock(&ocelot->fwd_domain_lock);
1616
1617 ocelot->ports[port]->dsa_8021q_cpu = cpu_port;
1618 ocelot_apply_bridge_fwd_mask(ocelot, true);
1619
1620 mutex_unlock(&ocelot->fwd_domain_lock);
1621 }
1622 EXPORT_SYMBOL_GPL(ocelot_port_assign_dsa_8021q_cpu);
1623
ocelot_port_unassign_dsa_8021q_cpu(struct ocelot * ocelot,int port)1624 void ocelot_port_unassign_dsa_8021q_cpu(struct ocelot *ocelot, int port)
1625 {
1626 mutex_lock(&ocelot->fwd_domain_lock);
1627
1628 ocelot->ports[port]->dsa_8021q_cpu = NULL;
1629 ocelot_apply_bridge_fwd_mask(ocelot, true);
1630
1631 mutex_unlock(&ocelot->fwd_domain_lock);
1632 }
1633 EXPORT_SYMBOL_GPL(ocelot_port_unassign_dsa_8021q_cpu);
1634
ocelot_bridge_stp_state_set(struct ocelot * ocelot,int port,u8 state)1635 void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state)
1636 {
1637 struct ocelot_port *ocelot_port = ocelot->ports[port];
1638 u32 learn_ena = 0;
1639
1640 mutex_lock(&ocelot->fwd_domain_lock);
1641
1642 ocelot_port->stp_state = state;
1643
1644 if ((state == BR_STATE_LEARNING || state == BR_STATE_FORWARDING) &&
1645 ocelot_port->learn_ena)
1646 learn_ena = ANA_PORT_PORT_CFG_LEARN_ENA;
1647
1648 ocelot_rmw_gix(ocelot, learn_ena, ANA_PORT_PORT_CFG_LEARN_ENA,
1649 ANA_PORT_PORT_CFG, port);
1650
1651 ocelot_apply_bridge_fwd_mask(ocelot, state == BR_STATE_FORWARDING);
1652
1653 mutex_unlock(&ocelot->fwd_domain_lock);
1654 }
1655 EXPORT_SYMBOL(ocelot_bridge_stp_state_set);
1656
ocelot_set_ageing_time(struct ocelot * ocelot,unsigned int msecs)1657 void ocelot_set_ageing_time(struct ocelot *ocelot, unsigned int msecs)
1658 {
1659 unsigned int age_period = ANA_AUTOAGE_AGE_PERIOD(msecs / 2000);
1660
1661 /* Setting AGE_PERIOD to zero effectively disables automatic aging,
1662 * which is clearly not what our intention is. So avoid that.
1663 */
1664 if (!age_period)
1665 age_period = 1;
1666
1667 ocelot_rmw(ocelot, age_period, ANA_AUTOAGE_AGE_PERIOD_M, ANA_AUTOAGE);
1668 }
1669 EXPORT_SYMBOL(ocelot_set_ageing_time);
1670
ocelot_multicast_get(struct ocelot * ocelot,const unsigned char * addr,u16 vid)1671 static struct ocelot_multicast *ocelot_multicast_get(struct ocelot *ocelot,
1672 const unsigned char *addr,
1673 u16 vid)
1674 {
1675 struct ocelot_multicast *mc;
1676
1677 list_for_each_entry(mc, &ocelot->multicast, list) {
1678 if (ether_addr_equal(mc->addr, addr) && mc->vid == vid)
1679 return mc;
1680 }
1681
1682 return NULL;
1683 }
1684
ocelot_classify_mdb(const unsigned char * addr)1685 static enum macaccess_entry_type ocelot_classify_mdb(const unsigned char *addr)
1686 {
1687 if (addr[0] == 0x01 && addr[1] == 0x00 && addr[2] == 0x5e)
1688 return ENTRYTYPE_MACv4;
1689 if (addr[0] == 0x33 && addr[1] == 0x33)
1690 return ENTRYTYPE_MACv6;
1691 return ENTRYTYPE_LOCKED;
1692 }
1693
ocelot_pgid_alloc(struct ocelot * ocelot,int index,unsigned long ports)1694 static struct ocelot_pgid *ocelot_pgid_alloc(struct ocelot *ocelot, int index,
1695 unsigned long ports)
1696 {
1697 struct ocelot_pgid *pgid;
1698
1699 pgid = kzalloc(sizeof(*pgid), GFP_KERNEL);
1700 if (!pgid)
1701 return ERR_PTR(-ENOMEM);
1702
1703 pgid->ports = ports;
1704 pgid->index = index;
1705 refcount_set(&pgid->refcount, 1);
1706 list_add_tail(&pgid->list, &ocelot->pgids);
1707
1708 return pgid;
1709 }
1710
ocelot_pgid_free(struct ocelot * ocelot,struct ocelot_pgid * pgid)1711 static void ocelot_pgid_free(struct ocelot *ocelot, struct ocelot_pgid *pgid)
1712 {
1713 if (!refcount_dec_and_test(&pgid->refcount))
1714 return;
1715
1716 list_del(&pgid->list);
1717 kfree(pgid);
1718 }
1719
ocelot_mdb_get_pgid(struct ocelot * ocelot,const struct ocelot_multicast * mc)1720 static struct ocelot_pgid *ocelot_mdb_get_pgid(struct ocelot *ocelot,
1721 const struct ocelot_multicast *mc)
1722 {
1723 struct ocelot_pgid *pgid;
1724 int index;
1725
1726 /* According to VSC7514 datasheet 3.9.1.5 IPv4 Multicast Entries and
1727 * 3.9.1.6 IPv6 Multicast Entries, "Instead of a lookup in the
1728 * destination mask table (PGID), the destination set is programmed as
1729 * part of the entry MAC address.", and the DEST_IDX is set to 0.
1730 */
1731 if (mc->entry_type == ENTRYTYPE_MACv4 ||
1732 mc->entry_type == ENTRYTYPE_MACv6)
1733 return ocelot_pgid_alloc(ocelot, 0, mc->ports);
1734
1735 list_for_each_entry(pgid, &ocelot->pgids, list) {
1736 /* When searching for a nonreserved multicast PGID, ignore the
1737 * dummy PGID of zero that we have for MACv4/MACv6 entries
1738 */
1739 if (pgid->index && pgid->ports == mc->ports) {
1740 refcount_inc(&pgid->refcount);
1741 return pgid;
1742 }
1743 }
1744
1745 /* Search for a free index in the nonreserved multicast PGID area */
1746 for_each_nonreserved_multicast_dest_pgid(ocelot, index) {
1747 bool used = false;
1748
1749 list_for_each_entry(pgid, &ocelot->pgids, list) {
1750 if (pgid->index == index) {
1751 used = true;
1752 break;
1753 }
1754 }
1755
1756 if (!used)
1757 return ocelot_pgid_alloc(ocelot, index, mc->ports);
1758 }
1759
1760 return ERR_PTR(-ENOSPC);
1761 }
1762
ocelot_encode_ports_to_mdb(unsigned char * addr,struct ocelot_multicast * mc)1763 static void ocelot_encode_ports_to_mdb(unsigned char *addr,
1764 struct ocelot_multicast *mc)
1765 {
1766 ether_addr_copy(addr, mc->addr);
1767
1768 if (mc->entry_type == ENTRYTYPE_MACv4) {
1769 addr[0] = 0;
1770 addr[1] = mc->ports >> 8;
1771 addr[2] = mc->ports & 0xff;
1772 } else if (mc->entry_type == ENTRYTYPE_MACv6) {
1773 addr[0] = mc->ports >> 8;
1774 addr[1] = mc->ports & 0xff;
1775 }
1776 }
1777
ocelot_port_mdb_add(struct ocelot * ocelot,int port,const struct switchdev_obj_port_mdb * mdb,const struct net_device * bridge)1778 int ocelot_port_mdb_add(struct ocelot *ocelot, int port,
1779 const struct switchdev_obj_port_mdb *mdb,
1780 const struct net_device *bridge)
1781 {
1782 unsigned char addr[ETH_ALEN];
1783 struct ocelot_multicast *mc;
1784 struct ocelot_pgid *pgid;
1785 u16 vid = mdb->vid;
1786
1787 if (!vid)
1788 vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
1789
1790 mc = ocelot_multicast_get(ocelot, mdb->addr, vid);
1791 if (!mc) {
1792 /* New entry */
1793 mc = devm_kzalloc(ocelot->dev, sizeof(*mc), GFP_KERNEL);
1794 if (!mc)
1795 return -ENOMEM;
1796
1797 mc->entry_type = ocelot_classify_mdb(mdb->addr);
1798 ether_addr_copy(mc->addr, mdb->addr);
1799 mc->vid = vid;
1800
1801 list_add_tail(&mc->list, &ocelot->multicast);
1802 } else {
1803 /* Existing entry. Clean up the current port mask from
1804 * hardware now, because we'll be modifying it.
1805 */
1806 ocelot_pgid_free(ocelot, mc->pgid);
1807 ocelot_encode_ports_to_mdb(addr, mc);
1808 ocelot_mact_forget(ocelot, addr, vid);
1809 }
1810
1811 mc->ports |= BIT(port);
1812
1813 pgid = ocelot_mdb_get_pgid(ocelot, mc);
1814 if (IS_ERR(pgid)) {
1815 dev_err(ocelot->dev,
1816 "Cannot allocate PGID for mdb %pM vid %d\n",
1817 mc->addr, mc->vid);
1818 devm_kfree(ocelot->dev, mc);
1819 return PTR_ERR(pgid);
1820 }
1821 mc->pgid = pgid;
1822
1823 ocelot_encode_ports_to_mdb(addr, mc);
1824
1825 if (mc->entry_type != ENTRYTYPE_MACv4 &&
1826 mc->entry_type != ENTRYTYPE_MACv6)
1827 ocelot_write_rix(ocelot, pgid->ports, ANA_PGID_PGID,
1828 pgid->index);
1829
1830 return ocelot_mact_learn(ocelot, pgid->index, addr, vid,
1831 mc->entry_type);
1832 }
1833 EXPORT_SYMBOL(ocelot_port_mdb_add);
1834
ocelot_port_mdb_del(struct ocelot * ocelot,int port,const struct switchdev_obj_port_mdb * mdb,const struct net_device * bridge)1835 int ocelot_port_mdb_del(struct ocelot *ocelot, int port,
1836 const struct switchdev_obj_port_mdb *mdb,
1837 const struct net_device *bridge)
1838 {
1839 unsigned char addr[ETH_ALEN];
1840 struct ocelot_multicast *mc;
1841 struct ocelot_pgid *pgid;
1842 u16 vid = mdb->vid;
1843
1844 if (!vid)
1845 vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
1846
1847 mc = ocelot_multicast_get(ocelot, mdb->addr, vid);
1848 if (!mc)
1849 return -ENOENT;
1850
1851 ocelot_encode_ports_to_mdb(addr, mc);
1852 ocelot_mact_forget(ocelot, addr, vid);
1853
1854 ocelot_pgid_free(ocelot, mc->pgid);
1855 mc->ports &= ~BIT(port);
1856 if (!mc->ports) {
1857 list_del(&mc->list);
1858 devm_kfree(ocelot->dev, mc);
1859 return 0;
1860 }
1861
1862 /* We have a PGID with fewer ports now */
1863 pgid = ocelot_mdb_get_pgid(ocelot, mc);
1864 if (IS_ERR(pgid))
1865 return PTR_ERR(pgid);
1866 mc->pgid = pgid;
1867
1868 ocelot_encode_ports_to_mdb(addr, mc);
1869
1870 if (mc->entry_type != ENTRYTYPE_MACv4 &&
1871 mc->entry_type != ENTRYTYPE_MACv6)
1872 ocelot_write_rix(ocelot, pgid->ports, ANA_PGID_PGID,
1873 pgid->index);
1874
1875 return ocelot_mact_learn(ocelot, pgid->index, addr, vid,
1876 mc->entry_type);
1877 }
1878 EXPORT_SYMBOL(ocelot_port_mdb_del);
1879
ocelot_port_bridge_join(struct ocelot * ocelot,int port,struct net_device * bridge,int bridge_num,struct netlink_ext_ack * extack)1880 int ocelot_port_bridge_join(struct ocelot *ocelot, int port,
1881 struct net_device *bridge, int bridge_num,
1882 struct netlink_ext_ack *extack)
1883 {
1884 struct ocelot_port *ocelot_port = ocelot->ports[port];
1885 int err;
1886
1887 err = ocelot_single_vlan_aware_bridge(ocelot, extack);
1888 if (err)
1889 return err;
1890
1891 mutex_lock(&ocelot->fwd_domain_lock);
1892
1893 ocelot_port->bridge = bridge;
1894 ocelot_port->bridge_num = bridge_num;
1895
1896 ocelot_apply_bridge_fwd_mask(ocelot, true);
1897
1898 mutex_unlock(&ocelot->fwd_domain_lock);
1899
1900 if (br_vlan_enabled(bridge))
1901 return 0;
1902
1903 return ocelot_add_vlan_unaware_pvid(ocelot, port, bridge);
1904 }
1905 EXPORT_SYMBOL(ocelot_port_bridge_join);
1906
ocelot_port_bridge_leave(struct ocelot * ocelot,int port,struct net_device * bridge)1907 void ocelot_port_bridge_leave(struct ocelot *ocelot, int port,
1908 struct net_device *bridge)
1909 {
1910 struct ocelot_port *ocelot_port = ocelot->ports[port];
1911
1912 mutex_lock(&ocelot->fwd_domain_lock);
1913
1914 if (!br_vlan_enabled(bridge))
1915 ocelot_del_vlan_unaware_pvid(ocelot, port, bridge);
1916
1917 ocelot_port->bridge = NULL;
1918 ocelot_port->bridge_num = -1;
1919
1920 ocelot_port_set_pvid(ocelot, port, NULL);
1921 ocelot_port_manage_port_tag(ocelot, port);
1922 ocelot_apply_bridge_fwd_mask(ocelot, false);
1923
1924 mutex_unlock(&ocelot->fwd_domain_lock);
1925 }
1926 EXPORT_SYMBOL(ocelot_port_bridge_leave);
1927
ocelot_set_aggr_pgids(struct ocelot * ocelot)1928 static void ocelot_set_aggr_pgids(struct ocelot *ocelot)
1929 {
1930 unsigned long visited = GENMASK(ocelot->num_phys_ports - 1, 0);
1931 int i, port, lag;
1932
1933 /* Reset destination and aggregation PGIDS */
1934 for_each_unicast_dest_pgid(ocelot, port)
1935 ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, port);
1936
1937 for_each_aggr_pgid(ocelot, i)
1938 ocelot_write_rix(ocelot, GENMASK(ocelot->num_phys_ports - 1, 0),
1939 ANA_PGID_PGID, i);
1940
1941 /* The visited ports bitmask holds the list of ports offloading any
1942 * bonding interface. Initially we mark all these ports as unvisited,
1943 * then every time we visit a port in this bitmask, we know that it is
1944 * the lowest numbered port, i.e. the one whose logical ID == physical
1945 * port ID == LAG ID. So we mark as visited all further ports in the
1946 * bitmask that are offloading the same bonding interface. This way,
1947 * we set up the aggregation PGIDs only once per bonding interface.
1948 */
1949 for (port = 0; port < ocelot->num_phys_ports; port++) {
1950 struct ocelot_port *ocelot_port = ocelot->ports[port];
1951
1952 if (!ocelot_port || !ocelot_port->bond)
1953 continue;
1954
1955 visited &= ~BIT(port);
1956 }
1957
1958 /* Now, set PGIDs for each active LAG */
1959 for (lag = 0; lag < ocelot->num_phys_ports; lag++) {
1960 struct net_device *bond = ocelot->ports[lag]->bond;
1961 int num_active_ports = 0;
1962 unsigned long bond_mask;
1963 u8 aggr_idx[16];
1964
1965 if (!bond || (visited & BIT(lag)))
1966 continue;
1967
1968 bond_mask = ocelot_get_bond_mask(ocelot, bond);
1969
1970 for_each_set_bit(port, &bond_mask, ocelot->num_phys_ports) {
1971 struct ocelot_port *ocelot_port = ocelot->ports[port];
1972
1973 // Destination mask
1974 ocelot_write_rix(ocelot, bond_mask,
1975 ANA_PGID_PGID, port);
1976
1977 if (ocelot_port->lag_tx_active)
1978 aggr_idx[num_active_ports++] = port;
1979 }
1980
1981 for_each_aggr_pgid(ocelot, i) {
1982 u32 ac;
1983
1984 ac = ocelot_read_rix(ocelot, ANA_PGID_PGID, i);
1985 ac &= ~bond_mask;
1986 /* Don't do division by zero if there was no active
1987 * port. Just make all aggregation codes zero.
1988 */
1989 if (num_active_ports)
1990 ac |= BIT(aggr_idx[i % num_active_ports]);
1991 ocelot_write_rix(ocelot, ac, ANA_PGID_PGID, i);
1992 }
1993
1994 /* Mark all ports in the same LAG as visited to avoid applying
1995 * the same config again.
1996 */
1997 for (port = lag; port < ocelot->num_phys_ports; port++) {
1998 struct ocelot_port *ocelot_port = ocelot->ports[port];
1999
2000 if (!ocelot_port)
2001 continue;
2002
2003 if (ocelot_port->bond == bond)
2004 visited |= BIT(port);
2005 }
2006 }
2007 }
2008
2009 /* When offloading a bonding interface, the switch ports configured under the
2010 * same bond must have the same logical port ID, equal to the physical port ID
2011 * of the lowest numbered physical port in that bond. Otherwise, in standalone/
2012 * bridged mode, each port has a logical port ID equal to its physical port ID.
2013 */
ocelot_setup_logical_port_ids(struct ocelot * ocelot)2014 static void ocelot_setup_logical_port_ids(struct ocelot *ocelot)
2015 {
2016 int port;
2017
2018 for (port = 0; port < ocelot->num_phys_ports; port++) {
2019 struct ocelot_port *ocelot_port = ocelot->ports[port];
2020 struct net_device *bond;
2021
2022 if (!ocelot_port)
2023 continue;
2024
2025 bond = ocelot_port->bond;
2026 if (bond) {
2027 int lag = ocelot_bond_get_id(ocelot, bond);
2028
2029 ocelot_rmw_gix(ocelot,
2030 ANA_PORT_PORT_CFG_PORTID_VAL(lag),
2031 ANA_PORT_PORT_CFG_PORTID_VAL_M,
2032 ANA_PORT_PORT_CFG, port);
2033 } else {
2034 ocelot_rmw_gix(ocelot,
2035 ANA_PORT_PORT_CFG_PORTID_VAL(port),
2036 ANA_PORT_PORT_CFG_PORTID_VAL_M,
2037 ANA_PORT_PORT_CFG, port);
2038 }
2039 }
2040 }
2041
ocelot_migrate_mc(struct ocelot * ocelot,struct ocelot_multicast * mc,unsigned long from_mask,unsigned long to_mask)2042 static int ocelot_migrate_mc(struct ocelot *ocelot, struct ocelot_multicast *mc,
2043 unsigned long from_mask, unsigned long to_mask)
2044 {
2045 unsigned char addr[ETH_ALEN];
2046 struct ocelot_pgid *pgid;
2047 u16 vid = mc->vid;
2048
2049 dev_dbg(ocelot->dev,
2050 "Migrating multicast %pM vid %d from port mask 0x%lx to 0x%lx\n",
2051 mc->addr, mc->vid, from_mask, to_mask);
2052
2053 /* First clean up the current port mask from hardware, because
2054 * we'll be modifying it.
2055 */
2056 ocelot_pgid_free(ocelot, mc->pgid);
2057 ocelot_encode_ports_to_mdb(addr, mc);
2058 ocelot_mact_forget(ocelot, addr, vid);
2059
2060 mc->ports &= ~from_mask;
2061 mc->ports |= to_mask;
2062
2063 pgid = ocelot_mdb_get_pgid(ocelot, mc);
2064 if (IS_ERR(pgid)) {
2065 dev_err(ocelot->dev,
2066 "Cannot allocate PGID for mdb %pM vid %d\n",
2067 mc->addr, mc->vid);
2068 devm_kfree(ocelot->dev, mc);
2069 return PTR_ERR(pgid);
2070 }
2071 mc->pgid = pgid;
2072
2073 ocelot_encode_ports_to_mdb(addr, mc);
2074
2075 if (mc->entry_type != ENTRYTYPE_MACv4 &&
2076 mc->entry_type != ENTRYTYPE_MACv6)
2077 ocelot_write_rix(ocelot, pgid->ports, ANA_PGID_PGID,
2078 pgid->index);
2079
2080 return ocelot_mact_learn(ocelot, pgid->index, addr, vid,
2081 mc->entry_type);
2082 }
2083
ocelot_migrate_mdbs(struct ocelot * ocelot,unsigned long from_mask,unsigned long to_mask)2084 int ocelot_migrate_mdbs(struct ocelot *ocelot, unsigned long from_mask,
2085 unsigned long to_mask)
2086 {
2087 struct ocelot_multicast *mc;
2088 int err;
2089
2090 list_for_each_entry(mc, &ocelot->multicast, list) {
2091 if (!(mc->ports & from_mask))
2092 continue;
2093
2094 err = ocelot_migrate_mc(ocelot, mc, from_mask, to_mask);
2095 if (err)
2096 return err;
2097 }
2098
2099 return 0;
2100 }
2101 EXPORT_SYMBOL_GPL(ocelot_migrate_mdbs);
2102
2103 /* Documentation for PORTID_VAL says:
2104 * Logical port number for front port. If port is not a member of a LLAG,
2105 * then PORTID must be set to the physical port number.
2106 * If port is a member of a LLAG, then PORTID must be set to the common
2107 * PORTID_VAL used for all member ports of the LLAG.
2108 * The value must not exceed the number of physical ports on the device.
2109 *
2110 * This means we have little choice but to migrate FDB entries pointing towards
2111 * a logical port when that changes.
2112 */
ocelot_migrate_lag_fdbs(struct ocelot * ocelot,struct net_device * bond,int lag)2113 static void ocelot_migrate_lag_fdbs(struct ocelot *ocelot,
2114 struct net_device *bond,
2115 int lag)
2116 {
2117 struct ocelot_lag_fdb *fdb;
2118 int err;
2119
2120 lockdep_assert_held(&ocelot->fwd_domain_lock);
2121
2122 list_for_each_entry(fdb, &ocelot->lag_fdbs, list) {
2123 if (fdb->bond != bond)
2124 continue;
2125
2126 err = ocelot_mact_forget(ocelot, fdb->addr, fdb->vid);
2127 if (err) {
2128 dev_err(ocelot->dev,
2129 "failed to delete LAG %s FDB %pM vid %d: %pe\n",
2130 bond->name, fdb->addr, fdb->vid, ERR_PTR(err));
2131 }
2132
2133 err = ocelot_mact_learn(ocelot, lag, fdb->addr, fdb->vid,
2134 ENTRYTYPE_LOCKED);
2135 if (err) {
2136 dev_err(ocelot->dev,
2137 "failed to migrate LAG %s FDB %pM vid %d: %pe\n",
2138 bond->name, fdb->addr, fdb->vid, ERR_PTR(err));
2139 }
2140 }
2141 }
2142
ocelot_port_lag_join(struct ocelot * ocelot,int port,struct net_device * bond,struct netdev_lag_upper_info * info,struct netlink_ext_ack * extack)2143 int ocelot_port_lag_join(struct ocelot *ocelot, int port,
2144 struct net_device *bond,
2145 struct netdev_lag_upper_info *info,
2146 struct netlink_ext_ack *extack)
2147 {
2148 if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
2149 NL_SET_ERR_MSG_MOD(extack,
2150 "Can only offload LAG using hash TX type");
2151 return -EOPNOTSUPP;
2152 }
2153
2154 mutex_lock(&ocelot->fwd_domain_lock);
2155
2156 ocelot->ports[port]->bond = bond;
2157
2158 ocelot_setup_logical_port_ids(ocelot);
2159 ocelot_apply_bridge_fwd_mask(ocelot, true);
2160 ocelot_set_aggr_pgids(ocelot);
2161
2162 mutex_unlock(&ocelot->fwd_domain_lock);
2163
2164 return 0;
2165 }
2166 EXPORT_SYMBOL(ocelot_port_lag_join);
2167
ocelot_port_lag_leave(struct ocelot * ocelot,int port,struct net_device * bond)2168 void ocelot_port_lag_leave(struct ocelot *ocelot, int port,
2169 struct net_device *bond)
2170 {
2171 int old_lag_id, new_lag_id;
2172
2173 mutex_lock(&ocelot->fwd_domain_lock);
2174
2175 old_lag_id = ocelot_bond_get_id(ocelot, bond);
2176
2177 ocelot->ports[port]->bond = NULL;
2178
2179 ocelot_setup_logical_port_ids(ocelot);
2180 ocelot_apply_bridge_fwd_mask(ocelot, false);
2181 ocelot_set_aggr_pgids(ocelot);
2182
2183 new_lag_id = ocelot_bond_get_id(ocelot, bond);
2184
2185 if (new_lag_id >= 0 && old_lag_id != new_lag_id)
2186 ocelot_migrate_lag_fdbs(ocelot, bond, new_lag_id);
2187
2188 mutex_unlock(&ocelot->fwd_domain_lock);
2189 }
2190 EXPORT_SYMBOL(ocelot_port_lag_leave);
2191
ocelot_port_lag_change(struct ocelot * ocelot,int port,bool lag_tx_active)2192 void ocelot_port_lag_change(struct ocelot *ocelot, int port, bool lag_tx_active)
2193 {
2194 struct ocelot_port *ocelot_port = ocelot->ports[port];
2195
2196 mutex_lock(&ocelot->fwd_domain_lock);
2197
2198 ocelot_port->lag_tx_active = lag_tx_active;
2199
2200 /* Rebalance the LAGs */
2201 ocelot_set_aggr_pgids(ocelot);
2202
2203 mutex_unlock(&ocelot->fwd_domain_lock);
2204 }
2205 EXPORT_SYMBOL(ocelot_port_lag_change);
2206
ocelot_lag_fdb_add(struct ocelot * ocelot,struct net_device * bond,const unsigned char * addr,u16 vid,const struct net_device * bridge)2207 int ocelot_lag_fdb_add(struct ocelot *ocelot, struct net_device *bond,
2208 const unsigned char *addr, u16 vid,
2209 const struct net_device *bridge)
2210 {
2211 struct ocelot_lag_fdb *fdb;
2212 int lag, err;
2213
2214 fdb = kzalloc(sizeof(*fdb), GFP_KERNEL);
2215 if (!fdb)
2216 return -ENOMEM;
2217
2218 mutex_lock(&ocelot->fwd_domain_lock);
2219
2220 if (!vid)
2221 vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
2222
2223 ether_addr_copy(fdb->addr, addr);
2224 fdb->vid = vid;
2225 fdb->bond = bond;
2226
2227 lag = ocelot_bond_get_id(ocelot, bond);
2228
2229 err = ocelot_mact_learn(ocelot, lag, addr, vid, ENTRYTYPE_LOCKED);
2230 if (err) {
2231 mutex_unlock(&ocelot->fwd_domain_lock);
2232 kfree(fdb);
2233 return err;
2234 }
2235
2236 list_add_tail(&fdb->list, &ocelot->lag_fdbs);
2237 mutex_unlock(&ocelot->fwd_domain_lock);
2238
2239 return 0;
2240 }
2241 EXPORT_SYMBOL_GPL(ocelot_lag_fdb_add);
2242
ocelot_lag_fdb_del(struct ocelot * ocelot,struct net_device * bond,const unsigned char * addr,u16 vid,const struct net_device * bridge)2243 int ocelot_lag_fdb_del(struct ocelot *ocelot, struct net_device *bond,
2244 const unsigned char *addr, u16 vid,
2245 const struct net_device *bridge)
2246 {
2247 struct ocelot_lag_fdb *fdb, *tmp;
2248
2249 mutex_lock(&ocelot->fwd_domain_lock);
2250
2251 if (!vid)
2252 vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
2253
2254 list_for_each_entry_safe(fdb, tmp, &ocelot->lag_fdbs, list) {
2255 if (!ether_addr_equal(fdb->addr, addr) || fdb->vid != vid ||
2256 fdb->bond != bond)
2257 continue;
2258
2259 ocelot_mact_forget(ocelot, addr, vid);
2260 list_del(&fdb->list);
2261 mutex_unlock(&ocelot->fwd_domain_lock);
2262 kfree(fdb);
2263
2264 return 0;
2265 }
2266
2267 mutex_unlock(&ocelot->fwd_domain_lock);
2268
2269 return -ENOENT;
2270 }
2271 EXPORT_SYMBOL_GPL(ocelot_lag_fdb_del);
2272
2273 /* Configure the maximum SDU (L2 payload) on RX to the value specified in @sdu.
2274 * The length of VLAN tags is accounted for automatically via DEV_MAC_TAGS_CFG.
2275 * In the special case that it's the NPI port that we're configuring, the
2276 * length of the tag and optional prefix needs to be accounted for privately,
2277 * in order to be able to sustain communication at the requested @sdu.
2278 */
ocelot_port_set_maxlen(struct ocelot * ocelot,int port,size_t sdu)2279 void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu)
2280 {
2281 struct ocelot_port *ocelot_port = ocelot->ports[port];
2282 int maxlen = sdu + ETH_HLEN + ETH_FCS_LEN;
2283 int pause_start, pause_stop;
2284 int atop, atop_tot;
2285
2286 if (port == ocelot->npi) {
2287 maxlen += OCELOT_TAG_LEN;
2288
2289 if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_SHORT)
2290 maxlen += OCELOT_SHORT_PREFIX_LEN;
2291 else if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_LONG)
2292 maxlen += OCELOT_LONG_PREFIX_LEN;
2293 }
2294
2295 ocelot_port_writel(ocelot_port, maxlen, DEV_MAC_MAXLEN_CFG);
2296
2297 /* Set Pause watermark hysteresis */
2298 pause_start = 6 * maxlen / OCELOT_BUFFER_CELL_SZ;
2299 pause_stop = 4 * maxlen / OCELOT_BUFFER_CELL_SZ;
2300 ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_START,
2301 pause_start);
2302 ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_STOP,
2303 pause_stop);
2304
2305 /* Tail dropping watermarks */
2306 atop_tot = (ocelot->packet_buffer_size - 9 * maxlen) /
2307 OCELOT_BUFFER_CELL_SZ;
2308 atop = (9 * maxlen) / OCELOT_BUFFER_CELL_SZ;
2309 ocelot_write_rix(ocelot, ocelot->ops->wm_enc(atop), SYS_ATOP, port);
2310 ocelot_write(ocelot, ocelot->ops->wm_enc(atop_tot), SYS_ATOP_TOT_CFG);
2311 }
2312 EXPORT_SYMBOL(ocelot_port_set_maxlen);
2313
ocelot_get_max_mtu(struct ocelot * ocelot,int port)2314 int ocelot_get_max_mtu(struct ocelot *ocelot, int port)
2315 {
2316 int max_mtu = 65535 - ETH_HLEN - ETH_FCS_LEN;
2317
2318 if (port == ocelot->npi) {
2319 max_mtu -= OCELOT_TAG_LEN;
2320
2321 if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_SHORT)
2322 max_mtu -= OCELOT_SHORT_PREFIX_LEN;
2323 else if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_LONG)
2324 max_mtu -= OCELOT_LONG_PREFIX_LEN;
2325 }
2326
2327 return max_mtu;
2328 }
2329 EXPORT_SYMBOL(ocelot_get_max_mtu);
2330
ocelot_port_set_learning(struct ocelot * ocelot,int port,bool enabled)2331 static void ocelot_port_set_learning(struct ocelot *ocelot, int port,
2332 bool enabled)
2333 {
2334 struct ocelot_port *ocelot_port = ocelot->ports[port];
2335 u32 val = 0;
2336
2337 if (enabled)
2338 val = ANA_PORT_PORT_CFG_LEARN_ENA;
2339
2340 ocelot_rmw_gix(ocelot, val, ANA_PORT_PORT_CFG_LEARN_ENA,
2341 ANA_PORT_PORT_CFG, port);
2342
2343 ocelot_port->learn_ena = enabled;
2344 }
2345
ocelot_port_set_ucast_flood(struct ocelot * ocelot,int port,bool enabled)2346 static void ocelot_port_set_ucast_flood(struct ocelot *ocelot, int port,
2347 bool enabled)
2348 {
2349 u32 val = 0;
2350
2351 if (enabled)
2352 val = BIT(port);
2353
2354 ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_UC);
2355 }
2356
ocelot_port_set_mcast_flood(struct ocelot * ocelot,int port,bool enabled)2357 static void ocelot_port_set_mcast_flood(struct ocelot *ocelot, int port,
2358 bool enabled)
2359 {
2360 u32 val = 0;
2361
2362 if (enabled)
2363 val = BIT(port);
2364
2365 ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MC);
2366 ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MCIPV4);
2367 ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MCIPV6);
2368 }
2369
ocelot_port_set_bcast_flood(struct ocelot * ocelot,int port,bool enabled)2370 static void ocelot_port_set_bcast_flood(struct ocelot *ocelot, int port,
2371 bool enabled)
2372 {
2373 u32 val = 0;
2374
2375 if (enabled)
2376 val = BIT(port);
2377
2378 ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_BC);
2379 }
2380
ocelot_port_pre_bridge_flags(struct ocelot * ocelot,int port,struct switchdev_brport_flags flags)2381 int ocelot_port_pre_bridge_flags(struct ocelot *ocelot, int port,
2382 struct switchdev_brport_flags flags)
2383 {
2384 if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
2385 BR_BCAST_FLOOD))
2386 return -EINVAL;
2387
2388 return 0;
2389 }
2390 EXPORT_SYMBOL(ocelot_port_pre_bridge_flags);
2391
ocelot_port_bridge_flags(struct ocelot * ocelot,int port,struct switchdev_brport_flags flags)2392 void ocelot_port_bridge_flags(struct ocelot *ocelot, int port,
2393 struct switchdev_brport_flags flags)
2394 {
2395 if (flags.mask & BR_LEARNING)
2396 ocelot_port_set_learning(ocelot, port,
2397 !!(flags.val & BR_LEARNING));
2398
2399 if (flags.mask & BR_FLOOD)
2400 ocelot_port_set_ucast_flood(ocelot, port,
2401 !!(flags.val & BR_FLOOD));
2402
2403 if (flags.mask & BR_MCAST_FLOOD)
2404 ocelot_port_set_mcast_flood(ocelot, port,
2405 !!(flags.val & BR_MCAST_FLOOD));
2406
2407 if (flags.mask & BR_BCAST_FLOOD)
2408 ocelot_port_set_bcast_flood(ocelot, port,
2409 !!(flags.val & BR_BCAST_FLOOD));
2410 }
2411 EXPORT_SYMBOL(ocelot_port_bridge_flags);
2412
ocelot_port_get_default_prio(struct ocelot * ocelot,int port)2413 int ocelot_port_get_default_prio(struct ocelot *ocelot, int port)
2414 {
2415 int val = ocelot_read_gix(ocelot, ANA_PORT_QOS_CFG, port);
2416
2417 return ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL_X(val);
2418 }
2419 EXPORT_SYMBOL_GPL(ocelot_port_get_default_prio);
2420
ocelot_port_set_default_prio(struct ocelot * ocelot,int port,u8 prio)2421 int ocelot_port_set_default_prio(struct ocelot *ocelot, int port, u8 prio)
2422 {
2423 if (prio >= OCELOT_NUM_TC)
2424 return -ERANGE;
2425
2426 ocelot_rmw_gix(ocelot,
2427 ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL(prio),
2428 ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL_M,
2429 ANA_PORT_QOS_CFG,
2430 port);
2431
2432 return 0;
2433 }
2434 EXPORT_SYMBOL_GPL(ocelot_port_set_default_prio);
2435
ocelot_port_get_dscp_prio(struct ocelot * ocelot,int port,u8 dscp)2436 int ocelot_port_get_dscp_prio(struct ocelot *ocelot, int port, u8 dscp)
2437 {
2438 int qos_cfg = ocelot_read_gix(ocelot, ANA_PORT_QOS_CFG, port);
2439 int dscp_cfg = ocelot_read_rix(ocelot, ANA_DSCP_CFG, dscp);
2440
2441 /* Return error if DSCP prioritization isn't enabled */
2442 if (!(qos_cfg & ANA_PORT_QOS_CFG_QOS_DSCP_ENA))
2443 return -EOPNOTSUPP;
2444
2445 if (qos_cfg & ANA_PORT_QOS_CFG_DSCP_TRANSLATE_ENA) {
2446 dscp = ANA_DSCP_CFG_DSCP_TRANSLATE_VAL_X(dscp_cfg);
2447 /* Re-read ANA_DSCP_CFG for the translated DSCP */
2448 dscp_cfg = ocelot_read_rix(ocelot, ANA_DSCP_CFG, dscp);
2449 }
2450
2451 /* If the DSCP value is not trusted, the QoS classification falls back
2452 * to VLAN PCP or port-based default.
2453 */
2454 if (!(dscp_cfg & ANA_DSCP_CFG_DSCP_TRUST_ENA))
2455 return -EOPNOTSUPP;
2456
2457 return ANA_DSCP_CFG_QOS_DSCP_VAL_X(dscp_cfg);
2458 }
2459 EXPORT_SYMBOL_GPL(ocelot_port_get_dscp_prio);
2460
ocelot_port_add_dscp_prio(struct ocelot * ocelot,int port,u8 dscp,u8 prio)2461 int ocelot_port_add_dscp_prio(struct ocelot *ocelot, int port, u8 dscp, u8 prio)
2462 {
2463 int mask, val;
2464
2465 if (prio >= OCELOT_NUM_TC)
2466 return -ERANGE;
2467
2468 /* There is at least one app table priority (this one), so we need to
2469 * make sure DSCP prioritization is enabled on the port.
2470 * Also make sure DSCP translation is disabled
2471 * (dcbnl doesn't support it).
2472 */
2473 mask = ANA_PORT_QOS_CFG_QOS_DSCP_ENA |
2474 ANA_PORT_QOS_CFG_DSCP_TRANSLATE_ENA;
2475
2476 ocelot_rmw_gix(ocelot, ANA_PORT_QOS_CFG_QOS_DSCP_ENA, mask,
2477 ANA_PORT_QOS_CFG, port);
2478
2479 /* Trust this DSCP value and map it to the given QoS class */
2480 val = ANA_DSCP_CFG_DSCP_TRUST_ENA | ANA_DSCP_CFG_QOS_DSCP_VAL(prio);
2481
2482 ocelot_write_rix(ocelot, val, ANA_DSCP_CFG, dscp);
2483
2484 return 0;
2485 }
2486 EXPORT_SYMBOL_GPL(ocelot_port_add_dscp_prio);
2487
ocelot_port_del_dscp_prio(struct ocelot * ocelot,int port,u8 dscp,u8 prio)2488 int ocelot_port_del_dscp_prio(struct ocelot *ocelot, int port, u8 dscp, u8 prio)
2489 {
2490 int dscp_cfg = ocelot_read_rix(ocelot, ANA_DSCP_CFG, dscp);
2491 int mask, i;
2492
2493 /* During a "dcb app replace" command, the new app table entry will be
2494 * added first, then the old one will be deleted. But the hardware only
2495 * supports one QoS class per DSCP value (duh), so if we blindly delete
2496 * the app table entry for this DSCP value, we end up deleting the
2497 * entry with the new priority. Avoid that by checking whether user
2498 * space wants to delete the priority which is currently configured, or
2499 * something else which is no longer current.
2500 */
2501 if (ANA_DSCP_CFG_QOS_DSCP_VAL_X(dscp_cfg) != prio)
2502 return 0;
2503
2504 /* Untrust this DSCP value */
2505 ocelot_write_rix(ocelot, 0, ANA_DSCP_CFG, dscp);
2506
2507 for (i = 0; i < 64; i++) {
2508 int dscp_cfg = ocelot_read_rix(ocelot, ANA_DSCP_CFG, i);
2509
2510 /* There are still app table entries on the port, so we need to
2511 * keep DSCP enabled, nothing to do.
2512 */
2513 if (dscp_cfg & ANA_DSCP_CFG_DSCP_TRUST_ENA)
2514 return 0;
2515 }
2516
2517 /* Disable DSCP QoS classification if there isn't any trusted
2518 * DSCP value left.
2519 */
2520 mask = ANA_PORT_QOS_CFG_QOS_DSCP_ENA |
2521 ANA_PORT_QOS_CFG_DSCP_TRANSLATE_ENA;
2522
2523 ocelot_rmw_gix(ocelot, 0, mask, ANA_PORT_QOS_CFG, port);
2524
2525 return 0;
2526 }
2527 EXPORT_SYMBOL_GPL(ocelot_port_del_dscp_prio);
2528
ocelot_mirror_get(struct ocelot * ocelot,int to,struct netlink_ext_ack * extack)2529 struct ocelot_mirror *ocelot_mirror_get(struct ocelot *ocelot, int to,
2530 struct netlink_ext_ack *extack)
2531 {
2532 struct ocelot_mirror *m = ocelot->mirror;
2533
2534 if (m) {
2535 if (m->to != to) {
2536 NL_SET_ERR_MSG_MOD(extack,
2537 "Mirroring already configured towards different egress port");
2538 return ERR_PTR(-EBUSY);
2539 }
2540
2541 refcount_inc(&m->refcount);
2542 return m;
2543 }
2544
2545 m = kzalloc(sizeof(*m), GFP_KERNEL);
2546 if (!m)
2547 return ERR_PTR(-ENOMEM);
2548
2549 m->to = to;
2550 refcount_set(&m->refcount, 1);
2551 ocelot->mirror = m;
2552
2553 /* Program the mirror port to hardware */
2554 ocelot_write(ocelot, BIT(to), ANA_MIRRORPORTS);
2555
2556 return m;
2557 }
2558
ocelot_mirror_put(struct ocelot * ocelot)2559 void ocelot_mirror_put(struct ocelot *ocelot)
2560 {
2561 struct ocelot_mirror *m = ocelot->mirror;
2562
2563 if (!refcount_dec_and_test(&m->refcount))
2564 return;
2565
2566 ocelot_write(ocelot, 0, ANA_MIRRORPORTS);
2567 ocelot->mirror = NULL;
2568 kfree(m);
2569 }
2570
ocelot_port_mirror_add(struct ocelot * ocelot,int from,int to,bool ingress,struct netlink_ext_ack * extack)2571 int ocelot_port_mirror_add(struct ocelot *ocelot, int from, int to,
2572 bool ingress, struct netlink_ext_ack *extack)
2573 {
2574 struct ocelot_mirror *m = ocelot_mirror_get(ocelot, to, extack);
2575
2576 if (IS_ERR(m))
2577 return PTR_ERR(m);
2578
2579 if (ingress) {
2580 ocelot_rmw_gix(ocelot, ANA_PORT_PORT_CFG_SRC_MIRROR_ENA,
2581 ANA_PORT_PORT_CFG_SRC_MIRROR_ENA,
2582 ANA_PORT_PORT_CFG, from);
2583 } else {
2584 ocelot_rmw(ocelot, BIT(from), BIT(from),
2585 ANA_EMIRRORPORTS);
2586 }
2587
2588 return 0;
2589 }
2590 EXPORT_SYMBOL_GPL(ocelot_port_mirror_add);
2591
ocelot_port_mirror_del(struct ocelot * ocelot,int from,bool ingress)2592 void ocelot_port_mirror_del(struct ocelot *ocelot, int from, bool ingress)
2593 {
2594 if (ingress) {
2595 ocelot_rmw_gix(ocelot, 0, ANA_PORT_PORT_CFG_SRC_MIRROR_ENA,
2596 ANA_PORT_PORT_CFG, from);
2597 } else {
2598 ocelot_rmw(ocelot, 0, BIT(from), ANA_EMIRRORPORTS);
2599 }
2600
2601 ocelot_mirror_put(ocelot);
2602 }
2603 EXPORT_SYMBOL_GPL(ocelot_port_mirror_del);
2604
ocelot_init_port(struct ocelot * ocelot,int port)2605 void ocelot_init_port(struct ocelot *ocelot, int port)
2606 {
2607 struct ocelot_port *ocelot_port = ocelot->ports[port];
2608
2609 skb_queue_head_init(&ocelot_port->tx_skbs);
2610
2611 /* Basic L2 initialization */
2612
2613 /* Set MAC IFG Gaps
2614 * FDX: TX_IFG = 5, RX_IFG1 = RX_IFG2 = 0
2615 * !FDX: TX_IFG = 5, RX_IFG1 = RX_IFG2 = 5
2616 */
2617 ocelot_port_writel(ocelot_port, DEV_MAC_IFG_CFG_TX_IFG(5),
2618 DEV_MAC_IFG_CFG);
2619
2620 /* Load seed (0) and set MAC HDX late collision */
2621 ocelot_port_writel(ocelot_port, DEV_MAC_HDX_CFG_LATE_COL_POS(67) |
2622 DEV_MAC_HDX_CFG_SEED_LOAD,
2623 DEV_MAC_HDX_CFG);
2624 mdelay(1);
2625 ocelot_port_writel(ocelot_port, DEV_MAC_HDX_CFG_LATE_COL_POS(67),
2626 DEV_MAC_HDX_CFG);
2627
2628 /* Set Max Length and maximum tags allowed */
2629 ocelot_port_set_maxlen(ocelot, port, ETH_DATA_LEN);
2630 ocelot_port_writel(ocelot_port, DEV_MAC_TAGS_CFG_TAG_ID(ETH_P_8021AD) |
2631 DEV_MAC_TAGS_CFG_VLAN_AWR_ENA |
2632 DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA |
2633 DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA,
2634 DEV_MAC_TAGS_CFG);
2635
2636 /* Set SMAC of Pause frame (00:00:00:00:00:00) */
2637 ocelot_port_writel(ocelot_port, 0, DEV_MAC_FC_MAC_HIGH_CFG);
2638 ocelot_port_writel(ocelot_port, 0, DEV_MAC_FC_MAC_LOW_CFG);
2639
2640 /* Enable transmission of pause frames */
2641 ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 1);
2642
2643 /* Drop frames with multicast source address */
2644 ocelot_rmw_gix(ocelot, ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA,
2645 ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA,
2646 ANA_PORT_DROP_CFG, port);
2647
2648 /* Set default VLAN and tag type to 8021Q. */
2649 ocelot_rmw_gix(ocelot, REW_PORT_VLAN_CFG_PORT_TPID(ETH_P_8021Q),
2650 REW_PORT_VLAN_CFG_PORT_TPID_M,
2651 REW_PORT_VLAN_CFG, port);
2652
2653 /* Disable source address learning for standalone mode */
2654 ocelot_port_set_learning(ocelot, port, false);
2655
2656 /* Set the port's initial logical port ID value, enable receiving
2657 * frames on it, and configure the MAC address learning type to
2658 * automatic.
2659 */
2660 ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_LEARNAUTO |
2661 ANA_PORT_PORT_CFG_RECV_ENA |
2662 ANA_PORT_PORT_CFG_PORTID_VAL(port),
2663 ANA_PORT_PORT_CFG, port);
2664
2665 /* Enable vcap lookups */
2666 ocelot_vcap_enable(ocelot, port);
2667 }
2668 EXPORT_SYMBOL(ocelot_init_port);
2669
2670 /* Configure and enable the CPU port module, which is a set of queues
2671 * accessible through register MMIO, frame DMA or Ethernet (in case
2672 * NPI mode is used).
2673 */
ocelot_cpu_port_init(struct ocelot * ocelot)2674 static void ocelot_cpu_port_init(struct ocelot *ocelot)
2675 {
2676 int cpu = ocelot->num_phys_ports;
2677
2678 /* The unicast destination PGID for the CPU port module is unused */
2679 ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, cpu);
2680 /* Instead set up a multicast destination PGID for traffic copied to
2681 * the CPU. Whitelisted MAC addresses like the port netdevice MAC
2682 * addresses will be copied to the CPU via this PGID.
2683 */
2684 ocelot_write_rix(ocelot, BIT(cpu), ANA_PGID_PGID, PGID_CPU);
2685 ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_RECV_ENA |
2686 ANA_PORT_PORT_CFG_PORTID_VAL(cpu),
2687 ANA_PORT_PORT_CFG, cpu);
2688
2689 /* Enable CPU port module */
2690 ocelot_fields_write(ocelot, cpu, QSYS_SWITCH_PORT_MODE_PORT_ENA, 1);
2691 /* CPU port Injection/Extraction configuration */
2692 ocelot_fields_write(ocelot, cpu, SYS_PORT_MODE_INCL_XTR_HDR,
2693 OCELOT_TAG_PREFIX_NONE);
2694 ocelot_fields_write(ocelot, cpu, SYS_PORT_MODE_INCL_INJ_HDR,
2695 OCELOT_TAG_PREFIX_NONE);
2696
2697 /* Configure the CPU port to be VLAN aware */
2698 ocelot_write_gix(ocelot,
2699 ANA_PORT_VLAN_CFG_VLAN_VID(OCELOT_STANDALONE_PVID) |
2700 ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
2701 ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1),
2702 ANA_PORT_VLAN_CFG, cpu);
2703 }
2704
ocelot_detect_features(struct ocelot * ocelot)2705 static void ocelot_detect_features(struct ocelot *ocelot)
2706 {
2707 int mmgt, eq_ctrl;
2708
2709 /* For Ocelot, Felix, Seville, Serval etc, SYS:MMGT:MMGT:FREECNT holds
2710 * the number of 240-byte free memory words (aka 4-cell chunks) and not
2711 * 192 bytes as the documentation incorrectly says.
2712 */
2713 mmgt = ocelot_read(ocelot, SYS_MMGT);
2714 ocelot->packet_buffer_size = 240 * SYS_MMGT_FREECNT(mmgt);
2715
2716 eq_ctrl = ocelot_read(ocelot, QSYS_EQ_CTRL);
2717 ocelot->num_frame_refs = QSYS_MMGT_EQ_CTRL_FP_FREE_CNT(eq_ctrl);
2718 }
2719
ocelot_mem_init_status(struct ocelot * ocelot)2720 static int ocelot_mem_init_status(struct ocelot *ocelot)
2721 {
2722 unsigned int val;
2723 int err;
2724
2725 err = regmap_field_read(ocelot->regfields[SYS_RESET_CFG_MEM_INIT],
2726 &val);
2727
2728 return err ?: val;
2729 }
2730
ocelot_reset(struct ocelot * ocelot)2731 int ocelot_reset(struct ocelot *ocelot)
2732 {
2733 int err;
2734 u32 val;
2735
2736 err = regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_INIT], 1);
2737 if (err)
2738 return err;
2739
2740 err = regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
2741 if (err)
2742 return err;
2743
2744 /* MEM_INIT is a self-clearing bit. Wait for it to be cleared (should be
2745 * 100us) before enabling the switch core.
2746 */
2747 err = readx_poll_timeout(ocelot_mem_init_status, ocelot, val, !val,
2748 MEM_INIT_SLEEP_US, MEM_INIT_TIMEOUT_US);
2749 if (err)
2750 return err;
2751
2752 err = regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
2753 if (err)
2754 return err;
2755
2756 return regmap_field_write(ocelot->regfields[SYS_RESET_CFG_CORE_ENA], 1);
2757 }
2758 EXPORT_SYMBOL(ocelot_reset);
2759
ocelot_init(struct ocelot * ocelot)2760 int ocelot_init(struct ocelot *ocelot)
2761 {
2762 int i, ret;
2763 u32 port;
2764
2765 if (ocelot->ops->reset) {
2766 ret = ocelot->ops->reset(ocelot);
2767 if (ret) {
2768 dev_err(ocelot->dev, "Switch reset failed\n");
2769 return ret;
2770 }
2771 }
2772
2773 mutex_init(&ocelot->ptp_lock);
2774 mutex_init(&ocelot->mact_lock);
2775 mutex_init(&ocelot->fwd_domain_lock);
2776 mutex_init(&ocelot->tas_lock);
2777 spin_lock_init(&ocelot->ptp_clock_lock);
2778 spin_lock_init(&ocelot->ts_id_lock);
2779
2780 ocelot->owq = alloc_ordered_workqueue("ocelot-owq", 0);
2781 if (!ocelot->owq)
2782 return -ENOMEM;
2783
2784 ret = ocelot_stats_init(ocelot);
2785 if (ret)
2786 goto err_stats_init;
2787
2788 INIT_LIST_HEAD(&ocelot->multicast);
2789 INIT_LIST_HEAD(&ocelot->pgids);
2790 INIT_LIST_HEAD(&ocelot->vlans);
2791 INIT_LIST_HEAD(&ocelot->lag_fdbs);
2792 ocelot_detect_features(ocelot);
2793 ocelot_mact_init(ocelot);
2794 ocelot_vlan_init(ocelot);
2795 ocelot_vcap_init(ocelot);
2796 ocelot_cpu_port_init(ocelot);
2797
2798 if (ocelot->ops->psfp_init)
2799 ocelot->ops->psfp_init(ocelot);
2800
2801 if (ocelot->mm_supported) {
2802 ret = ocelot_mm_init(ocelot);
2803 if (ret)
2804 goto err_mm_init;
2805 }
2806
2807 for (port = 0; port < ocelot->num_phys_ports; port++) {
2808 /* Clear all counters (5 groups) */
2809 ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port) |
2810 SYS_STAT_CFG_STAT_CLEAR_SHOT(0x7f),
2811 SYS_STAT_CFG);
2812 }
2813
2814 /* Only use S-Tag */
2815 ocelot_write(ocelot, ETH_P_8021AD, SYS_VLAN_ETYPE_CFG);
2816
2817 /* Aggregation mode */
2818 ocelot_write(ocelot, ANA_AGGR_CFG_AC_SMAC_ENA |
2819 ANA_AGGR_CFG_AC_DMAC_ENA |
2820 ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA |
2821 ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA |
2822 ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA |
2823 ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA,
2824 ANA_AGGR_CFG);
2825
2826 /* Set MAC age time to default value. The entry is aged after
2827 * 2*AGE_PERIOD
2828 */
2829 ocelot_write(ocelot,
2830 ANA_AUTOAGE_AGE_PERIOD(BR_DEFAULT_AGEING_TIME / 2 / HZ),
2831 ANA_AUTOAGE);
2832
2833 /* Disable learning for frames discarded by VLAN ingress filtering */
2834 regmap_field_write(ocelot->regfields[ANA_ADVLEARN_VLAN_CHK], 1);
2835
2836 /* Setup frame ageing - fixed value "2 sec" - in 6.5 us units */
2837 ocelot_write(ocelot, SYS_FRM_AGING_AGE_TX_ENA |
2838 SYS_FRM_AGING_MAX_AGE(307692), SYS_FRM_AGING);
2839
2840 /* Setup flooding PGIDs */
2841 for (i = 0; i < ocelot->num_flooding_pgids; i++)
2842 ocelot_write_rix(ocelot, ANA_FLOODING_FLD_MULTICAST(PGID_MC) |
2843 ANA_FLOODING_FLD_BROADCAST(PGID_BC) |
2844 ANA_FLOODING_FLD_UNICAST(PGID_UC),
2845 ANA_FLOODING, i);
2846 ocelot_write(ocelot, ANA_FLOODING_IPMC_FLD_MC6_DATA(PGID_MCIPV6) |
2847 ANA_FLOODING_IPMC_FLD_MC6_CTRL(PGID_MC) |
2848 ANA_FLOODING_IPMC_FLD_MC4_DATA(PGID_MCIPV4) |
2849 ANA_FLOODING_IPMC_FLD_MC4_CTRL(PGID_MC),
2850 ANA_FLOODING_IPMC);
2851
2852 for (port = 0; port < ocelot->num_phys_ports; port++) {
2853 /* Transmit the frame to the local port. */
2854 ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, port);
2855 /* Do not forward BPDU frames to the front ports. */
2856 ocelot_write_gix(ocelot,
2857 ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff),
2858 ANA_PORT_CPU_FWD_BPDU_CFG,
2859 port);
2860 /* Ensure bridging is disabled */
2861 ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_SRC + port);
2862 }
2863
2864 for_each_nonreserved_multicast_dest_pgid(ocelot, i) {
2865 u32 val = ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports - 1, 0));
2866
2867 ocelot_write_rix(ocelot, val, ANA_PGID_PGID, i);
2868 }
2869
2870 ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_BLACKHOLE);
2871
2872 /* Allow broadcast and unknown L2 multicast to the CPU. */
2873 ocelot_rmw_rix(ocelot, ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)),
2874 ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)),
2875 ANA_PGID_PGID, PGID_MC);
2876 ocelot_rmw_rix(ocelot, ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)),
2877 ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)),
2878 ANA_PGID_PGID, PGID_BC);
2879 ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV4);
2880 ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV6);
2881
2882 /* Allow manual injection via DEVCPU_QS registers, and byte swap these
2883 * registers endianness.
2884 */
2885 ocelot_write_rix(ocelot, QS_INJ_GRP_CFG_BYTE_SWAP |
2886 QS_INJ_GRP_CFG_MODE(1), QS_INJ_GRP_CFG, 0);
2887 ocelot_write_rix(ocelot, QS_XTR_GRP_CFG_BYTE_SWAP |
2888 QS_XTR_GRP_CFG_MODE(1), QS_XTR_GRP_CFG, 0);
2889 ocelot_write(ocelot, ANA_CPUQ_CFG_CPUQ_MIRROR(2) |
2890 ANA_CPUQ_CFG_CPUQ_LRN(2) |
2891 ANA_CPUQ_CFG_CPUQ_MAC_COPY(2) |
2892 ANA_CPUQ_CFG_CPUQ_SRC_COPY(2) |
2893 ANA_CPUQ_CFG_CPUQ_LOCKED_PORTMOVE(2) |
2894 ANA_CPUQ_CFG_CPUQ_ALLBRIDGE(6) |
2895 ANA_CPUQ_CFG_CPUQ_IPMC_CTRL(6) |
2896 ANA_CPUQ_CFG_CPUQ_IGMP(6) |
2897 ANA_CPUQ_CFG_CPUQ_MLD(6), ANA_CPUQ_CFG);
2898 for (i = 0; i < 16; i++)
2899 ocelot_write_rix(ocelot, ANA_CPUQ_8021_CFG_CPUQ_GARP_VAL(6) |
2900 ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(6),
2901 ANA_CPUQ_8021_CFG, i);
2902
2903 return 0;
2904
2905 err_mm_init:
2906 ocelot_stats_deinit(ocelot);
2907 err_stats_init:
2908 destroy_workqueue(ocelot->owq);
2909 return ret;
2910 }
2911 EXPORT_SYMBOL(ocelot_init);
2912
ocelot_deinit(struct ocelot * ocelot)2913 void ocelot_deinit(struct ocelot *ocelot)
2914 {
2915 ocelot_stats_deinit(ocelot);
2916 destroy_workqueue(ocelot->owq);
2917 }
2918 EXPORT_SYMBOL(ocelot_deinit);
2919
ocelot_deinit_port(struct ocelot * ocelot,int port)2920 void ocelot_deinit_port(struct ocelot *ocelot, int port)
2921 {
2922 struct ocelot_port *ocelot_port = ocelot->ports[port];
2923
2924 skb_queue_purge(&ocelot_port->tx_skbs);
2925 }
2926 EXPORT_SYMBOL(ocelot_deinit_port);
2927
2928 MODULE_LICENSE("Dual MIT/GPL");
2929