1 // SPDX-License-Identifier: GPL-2.0+
2 
3 #include <linux/if_bridge.h>
4 
5 #include "lan966x_main.h"
6 
lan966x_lag_set_aggr_pgids(struct lan966x * lan966x)7 static void lan966x_lag_set_aggr_pgids(struct lan966x *lan966x)
8 {
9 	u32 visited = GENMASK(lan966x->num_phys_ports - 1, 0);
10 	int p, lag, i;
11 
12 	/* Reset destination and aggregation PGIDS */
13 	for (p = 0; p < lan966x->num_phys_ports; ++p)
14 		lan_wr(ANA_PGID_PGID_SET(BIT(p)),
15 		       lan966x, ANA_PGID(p));
16 
17 	for (p = PGID_AGGR; p < PGID_SRC; ++p)
18 		lan_wr(ANA_PGID_PGID_SET(visited),
19 		       lan966x, ANA_PGID(p));
20 
21 	/* The visited ports bitmask holds the list of ports offloading any
22 	 * bonding interface. Initially we mark all these ports as unvisited,
23 	 * then every time we visit a port in this bitmask, we know that it is
24 	 * the lowest numbered port, i.e. the one whose logical ID == physical
25 	 * port ID == LAG ID. So we mark as visited all further ports in the
26 	 * bitmask that are offloading the same bonding interface. This way,
27 	 * we set up the aggregation PGIDs only once per bonding interface.
28 	 */
29 	for (p = 0; p < lan966x->num_phys_ports; ++p) {
30 		struct lan966x_port *port = lan966x->ports[p];
31 
32 		if (!port || !port->bond)
33 			continue;
34 
35 		visited &= ~BIT(p);
36 	}
37 
38 	/* Now, set PGIDs for each active LAG */
39 	for (lag = 0; lag < lan966x->num_phys_ports; ++lag) {
40 		struct net_device *bond = lan966x->ports[lag]->bond;
41 		int num_active_ports = 0;
42 		unsigned long bond_mask;
43 		u8 aggr_idx[16];
44 
45 		if (!bond || (visited & BIT(lag)))
46 			continue;
47 
48 		bond_mask = lan966x_lag_get_mask(lan966x, bond);
49 
50 		for_each_set_bit(p, &bond_mask, lan966x->num_phys_ports) {
51 			struct lan966x_port *port = lan966x->ports[p];
52 
53 			lan_wr(ANA_PGID_PGID_SET(bond_mask),
54 			       lan966x, ANA_PGID(p));
55 			if (port->lag_tx_active)
56 				aggr_idx[num_active_ports++] = p;
57 		}
58 
59 		for (i = PGID_AGGR; i < PGID_SRC; ++i) {
60 			u32 ac;
61 
62 			ac = lan_rd(lan966x, ANA_PGID(i));
63 			ac &= ~bond_mask;
64 			/* Don't do division by zero if there was no active
65 			 * port. Just make all aggregation codes zero.
66 			 */
67 			if (num_active_ports)
68 				ac |= BIT(aggr_idx[i % num_active_ports]);
69 			lan_wr(ANA_PGID_PGID_SET(ac),
70 			       lan966x, ANA_PGID(i));
71 		}
72 
73 		/* Mark all ports in the same LAG as visited to avoid applying
74 		 * the same config again.
75 		 */
76 		for (p = lag; p < lan966x->num_phys_ports; p++) {
77 			struct lan966x_port *port = lan966x->ports[p];
78 
79 			if (!port)
80 				continue;
81 
82 			if (port->bond == bond)
83 				visited |= BIT(p);
84 		}
85 	}
86 }
87 
lan966x_lag_set_port_ids(struct lan966x * lan966x)88 static void lan966x_lag_set_port_ids(struct lan966x *lan966x)
89 {
90 	struct lan966x_port *port;
91 	u32 bond_mask;
92 	u32 lag_id;
93 	int p;
94 
95 	for (p = 0; p < lan966x->num_phys_ports; ++p) {
96 		port = lan966x->ports[p];
97 		if (!port)
98 			continue;
99 
100 		lag_id = port->chip_port;
101 
102 		bond_mask = lan966x_lag_get_mask(lan966x, port->bond);
103 		if (bond_mask)
104 			lag_id = __ffs(bond_mask);
105 
106 		lan_rmw(ANA_PORT_CFG_PORTID_VAL_SET(lag_id),
107 			ANA_PORT_CFG_PORTID_VAL,
108 			lan966x, ANA_PORT_CFG(port->chip_port));
109 	}
110 }
111 
lan966x_lag_update_ids(struct lan966x * lan966x)112 static void lan966x_lag_update_ids(struct lan966x *lan966x)
113 {
114 	lan966x_lag_set_port_ids(lan966x);
115 	lan966x_update_fwd_mask(lan966x);
116 	lan966x_lag_set_aggr_pgids(lan966x);
117 }
118 
lan966x_lag_port_join(struct lan966x_port * port,struct net_device * brport_dev,struct net_device * bond,struct netlink_ext_ack * extack)119 int lan966x_lag_port_join(struct lan966x_port *port,
120 			  struct net_device *brport_dev,
121 			  struct net_device *bond,
122 			  struct netlink_ext_ack *extack)
123 {
124 	struct lan966x *lan966x = port->lan966x;
125 	struct net_device *dev = port->dev;
126 	u32 lag_id = -1;
127 	u32 bond_mask;
128 	int err;
129 
130 	bond_mask = lan966x_lag_get_mask(lan966x, bond);
131 	if (bond_mask)
132 		lag_id = __ffs(bond_mask);
133 
134 	port->bond = bond;
135 	lan966x_lag_update_ids(lan966x);
136 
137 	err = switchdev_bridge_port_offload(brport_dev, dev, port,
138 					    &lan966x_switchdev_nb,
139 					    &lan966x_switchdev_blocking_nb,
140 					    false, extack);
141 	if (err)
142 		goto out;
143 
144 	lan966x_port_stp_state_set(port, br_port_get_stp_state(brport_dev));
145 
146 	if (lan966x_lag_first_port(port->bond, port->dev) &&
147 	    lag_id != -1)
148 		lan966x_mac_lag_replace_port_entry(lan966x,
149 						   lan966x->ports[lag_id],
150 						   port);
151 
152 	return 0;
153 
154 out:
155 	port->bond = NULL;
156 	lan966x_lag_update_ids(lan966x);
157 
158 	return err;
159 }
160 
lan966x_lag_port_leave(struct lan966x_port * port,struct net_device * bond)161 void lan966x_lag_port_leave(struct lan966x_port *port, struct net_device *bond)
162 {
163 	struct lan966x *lan966x = port->lan966x;
164 	u32 bond_mask;
165 	u32 lag_id;
166 
167 	if (lan966x_lag_first_port(port->bond, port->dev)) {
168 		bond_mask = lan966x_lag_get_mask(lan966x, port->bond);
169 		bond_mask &= ~BIT(port->chip_port);
170 		if (bond_mask) {
171 			lag_id = __ffs(bond_mask);
172 			lan966x_mac_lag_replace_port_entry(lan966x, port,
173 							   lan966x->ports[lag_id]);
174 		} else {
175 			lan966x_mac_lag_remove_port_entry(lan966x, port);
176 		}
177 	}
178 
179 	port->bond = NULL;
180 	lan966x_lag_update_ids(lan966x);
181 	lan966x_port_stp_state_set(port, BR_STATE_FORWARDING);
182 }
183 
lan966x_lag_port_check_hash_types(struct lan966x * lan966x,enum netdev_lag_hash hash_type)184 static bool lan966x_lag_port_check_hash_types(struct lan966x *lan966x,
185 					      enum netdev_lag_hash hash_type)
186 {
187 	int p;
188 
189 	for (p = 0; p < lan966x->num_phys_ports; ++p) {
190 		struct lan966x_port *port = lan966x->ports[p];
191 
192 		if (!port || !port->bond)
193 			continue;
194 
195 		if (port->hash_type != hash_type)
196 			return false;
197 	}
198 
199 	return true;
200 }
201 
lan966x_lag_port_prechangeupper(struct net_device * dev,struct netdev_notifier_changeupper_info * info)202 int lan966x_lag_port_prechangeupper(struct net_device *dev,
203 				    struct netdev_notifier_changeupper_info *info)
204 {
205 	struct lan966x_port *port = netdev_priv(dev);
206 	struct lan966x *lan966x = port->lan966x;
207 	struct netdev_lag_upper_info *lui;
208 	struct netlink_ext_ack *extack;
209 
210 	extack = netdev_notifier_info_to_extack(&info->info);
211 	lui = info->upper_info;
212 	if (!lui) {
213 		port->hash_type = NETDEV_LAG_HASH_NONE;
214 		return NOTIFY_DONE;
215 	}
216 
217 	if (lui->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
218 		NL_SET_ERR_MSG_MOD(extack,
219 				   "LAG device using unsupported Tx type");
220 		return -EINVAL;
221 	}
222 
223 	if (!lan966x_lag_port_check_hash_types(lan966x, lui->hash_type)) {
224 		NL_SET_ERR_MSG_MOD(extack,
225 				   "LAG devices can have only the same hash_type");
226 		return -EINVAL;
227 	}
228 
229 	switch (lui->hash_type) {
230 	case NETDEV_LAG_HASH_L2:
231 		lan_wr(ANA_AGGR_CFG_AC_DMAC_ENA_SET(1) |
232 		       ANA_AGGR_CFG_AC_SMAC_ENA_SET(1),
233 		       lan966x, ANA_AGGR_CFG);
234 		break;
235 	case NETDEV_LAG_HASH_L34:
236 		lan_wr(ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA_SET(1) |
237 		       ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA_SET(1) |
238 		       ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA_SET(1),
239 		       lan966x, ANA_AGGR_CFG);
240 		break;
241 	case NETDEV_LAG_HASH_L23:
242 		lan_wr(ANA_AGGR_CFG_AC_DMAC_ENA_SET(1) |
243 		       ANA_AGGR_CFG_AC_SMAC_ENA_SET(1) |
244 		       ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA_SET(1) |
245 		       ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA_SET(1),
246 		       lan966x, ANA_AGGR_CFG);
247 		break;
248 	default:
249 		NL_SET_ERR_MSG_MOD(extack,
250 				   "LAG device using unsupported hash type");
251 		return -EINVAL;
252 	}
253 
254 	port->hash_type = lui->hash_type;
255 
256 	return NOTIFY_OK;
257 }
258 
lan966x_lag_port_changelowerstate(struct net_device * dev,struct netdev_notifier_changelowerstate_info * info)259 int lan966x_lag_port_changelowerstate(struct net_device *dev,
260 				      struct netdev_notifier_changelowerstate_info *info)
261 {
262 	struct netdev_lag_lower_state_info *lag = info->lower_state_info;
263 	struct lan966x_port *port = netdev_priv(dev);
264 	struct lan966x *lan966x = port->lan966x;
265 	bool is_active;
266 
267 	if (!port->bond)
268 		return NOTIFY_DONE;
269 
270 	is_active = lag->link_up && lag->tx_enabled;
271 	if (port->lag_tx_active == is_active)
272 		return NOTIFY_DONE;
273 
274 	port->lag_tx_active = is_active;
275 	lan966x_lag_set_aggr_pgids(lan966x);
276 
277 	return NOTIFY_OK;
278 }
279 
lan966x_lag_netdev_prechangeupper(struct net_device * dev,struct netdev_notifier_changeupper_info * info)280 int lan966x_lag_netdev_prechangeupper(struct net_device *dev,
281 				      struct netdev_notifier_changeupper_info *info)
282 {
283 	struct lan966x_port *port;
284 	struct net_device *lower;
285 	struct list_head *iter;
286 	int err;
287 
288 	netdev_for_each_lower_dev(dev, lower, iter) {
289 		if (!lan966x_netdevice_check(lower))
290 			continue;
291 
292 		port = netdev_priv(lower);
293 		if (port->bond != dev)
294 			continue;
295 
296 		err = lan966x_port_prechangeupper(lower, dev, info);
297 		if (err)
298 			return err;
299 	}
300 
301 	return NOTIFY_DONE;
302 }
303 
lan966x_lag_netdev_changeupper(struct net_device * dev,struct netdev_notifier_changeupper_info * info)304 int lan966x_lag_netdev_changeupper(struct net_device *dev,
305 				   struct netdev_notifier_changeupper_info *info)
306 {
307 	struct lan966x_port *port;
308 	struct net_device *lower;
309 	struct list_head *iter;
310 	int err;
311 
312 	netdev_for_each_lower_dev(dev, lower, iter) {
313 		if (!lan966x_netdevice_check(lower))
314 			continue;
315 
316 		port = netdev_priv(lower);
317 		if (port->bond != dev)
318 			continue;
319 
320 		err = lan966x_port_changeupper(lower, dev, info);
321 		if (err)
322 			return err;
323 	}
324 
325 	return NOTIFY_DONE;
326 }
327 
lan966x_lag_first_port(struct net_device * lag,struct net_device * dev)328 bool lan966x_lag_first_port(struct net_device *lag, struct net_device *dev)
329 {
330 	struct lan966x_port *port = netdev_priv(dev);
331 	struct lan966x *lan966x = port->lan966x;
332 	unsigned long bond_mask;
333 
334 	if (port->bond != lag)
335 		return false;
336 
337 	bond_mask = lan966x_lag_get_mask(lan966x, lag);
338 	if (bond_mask && port->chip_port == __ffs(bond_mask))
339 		return true;
340 
341 	return false;
342 }
343 
lan966x_lag_get_mask(struct lan966x * lan966x,struct net_device * bond)344 u32 lan966x_lag_get_mask(struct lan966x *lan966x, struct net_device *bond)
345 {
346 	struct lan966x_port *port;
347 	u32 mask = 0;
348 	int p;
349 
350 	if (!bond)
351 		return mask;
352 
353 	for (p = 0; p < lan966x->num_phys_ports; p++) {
354 		port = lan966x->ports[p];
355 		if (!port)
356 			continue;
357 
358 		if (port->bond == bond)
359 			mask |= BIT(p);
360 	}
361 
362 	return mask;
363 }
364