1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020 Marvell International Ltd.
4  *
5  * Small helper utilities.
6  */
7 
8 #include <log.h>
9 #include <time.h>
10 #include <linux/delay.h>
11 
12 #include <mach/cvmx-regs.h>
13 #include <mach/cvmx-csr-enums.h>
14 #include <mach/octeon-model.h>
15 #include <mach/octeon-feature.h>
16 #include <mach/cvmx-gmxx-defs.h>
17 #include <mach/cvmx-ipd-defs.h>
18 #include <mach/cvmx-pko-defs.h>
19 #include <mach/cvmx-ipd.h>
20 #include <mach/cvmx-hwpko.h>
21 #include <mach/cvmx-pki.h>
22 #include <mach/cvmx-pip.h>
23 #include <mach/cvmx-helper.h>
24 #include <mach/cvmx-helper-util.h>
25 #include <mach/cvmx-helper-pki.h>
26 
27 /**
28  * @INTERNAL
29  * These are the interface types needed to convert interface numbers to ipd
30  * ports.
31  *
32  * @param GMII
33  *	This type is used for sgmii, rgmii, xaui and rxaui interfaces.
34  * @param ILK
35  *	This type is used for ilk interfaces.
36  * @param SRIO
37  *	This type is used for serial-RapidIo interfaces.
38  * @param NPI
39  *	This type is used for npi interfaces.
40  * @param LB
41  *	This type is used for loopback interfaces.
42  * @param INVALID_IF_TYPE
43  *	This type indicates the interface hasn't been configured.
44  */
45 enum port_map_if_type { INVALID_IF_TYPE = 0, GMII, ILK, SRIO, NPI, LB };
46 
47 /**
48  * @INTERNAL
49  * This structure is used to map interface numbers to ipd ports.
50  *
51  * @param type
52  *	Interface type
53  * @param first_ipd_port
54  *	First IPD port number assigned to this interface.
55  * @param last_ipd_port
56  *	Last IPD port number assigned to this interface.
57  * @param ipd_port_adj
58  *	Different octeon chips require different ipd ports for the
59  *	same interface port/mode configuration. This value is used
60  *	to account for that difference.
61  */
62 struct ipd_port_map {
63 	enum port_map_if_type type;
64 	int first_ipd_port;
65 	int last_ipd_port;
66 	int ipd_port_adj;
67 };
68 
69 /**
70  * @INTERNAL
71  * Interface number to ipd port map for the octeon 68xx.
72  */
73 static const struct ipd_port_map ipd_port_map_68xx[CVMX_HELPER_MAX_IFACE] = {
74 	{ GMII, 0x800, 0x8ff, 0x40 }, /* Interface 0 */
75 	{ GMII, 0x900, 0x9ff, 0x40 }, /* Interface 1 */
76 	{ GMII, 0xa00, 0xaff, 0x40 }, /* Interface 2 */
77 	{ GMII, 0xb00, 0xbff, 0x40 }, /* Interface 3 */
78 	{ GMII, 0xc00, 0xcff, 0x40 }, /* Interface 4 */
79 	{ ILK, 0x400, 0x4ff, 0x00 },  /* Interface 5 */
80 	{ ILK, 0x500, 0x5ff, 0x00 },  /* Interface 6 */
81 	{ NPI, 0x100, 0x120, 0x00 },  /* Interface 7 */
82 	{ LB, 0x000, 0x008, 0x00 },   /* Interface 8 */
83 };
84 
85 /**
86  * @INTERNAL
87  * Interface number to ipd port map for the octeon 78xx.
88  *
89  * This mapping corresponds to WQE(CHAN) enumeration in
90  * HRM Sections 11.15, PKI_CHAN_E, Section 11.6
91  *
92  */
93 static const struct ipd_port_map ipd_port_map_78xx[CVMX_HELPER_MAX_IFACE] = {
94 	{ GMII, 0x800, 0x83f, 0x00 }, /* Interface 0 - BGX0 */
95 	{ GMII, 0x900, 0x93f, 0x00 }, /* Interface 1  -BGX1 */
96 	{ GMII, 0xa00, 0xa3f, 0x00 }, /* Interface 2  -BGX2 */
97 	{ GMII, 0xb00, 0xb3f, 0x00 }, /* Interface 3 - BGX3 */
98 	{ GMII, 0xc00, 0xc3f, 0x00 }, /* Interface 4 - BGX4 */
99 	{ GMII, 0xd00, 0xd3f, 0x00 }, /* Interface 5 - BGX5 */
100 	{ ILK, 0x400, 0x4ff, 0x00 },  /* Interface 6 - ILK0 */
101 	{ ILK, 0x500, 0x5ff, 0x00 },  /* Interface 7 - ILK1 */
102 	{ NPI, 0x100, 0x13f, 0x00 },  /* Interface 8 - DPI */
103 	{ LB, 0x000, 0x03f, 0x00 },   /* Interface 9 - LOOPBACK */
104 };
105 
106 /**
107  * @INTERNAL
108  * Interface number to ipd port map for the octeon 73xx.
109  */
110 static const struct ipd_port_map ipd_port_map_73xx[CVMX_HELPER_MAX_IFACE] = {
111 	{ GMII, 0x800, 0x83f, 0x00 }, /* Interface 0 - BGX(0,0-3) */
112 	{ GMII, 0x900, 0x93f, 0x00 }, /* Interface 1  -BGX(1,0-3) */
113 	{ GMII, 0xa00, 0xa3f, 0x00 }, /* Interface 2  -BGX(2,0-3) */
114 	{ NPI, 0x100, 0x17f, 0x00 },  /* Interface 3 - DPI */
115 	{ LB, 0x000, 0x03f, 0x00 },   /* Interface 4 - LOOPBACK */
116 };
117 
118 /**
119  * @INTERNAL
120  * Interface number to ipd port map for the octeon 75xx.
121  */
122 static const struct ipd_port_map ipd_port_map_75xx[CVMX_HELPER_MAX_IFACE] = {
123 	{ GMII, 0x800, 0x83f, 0x00 }, /* Interface 0 - BGX0 */
124 	{ SRIO, 0x240, 0x241, 0x00 }, /* Interface 1 - SRIO 0 */
125 	{ SRIO, 0x242, 0x243, 0x00 }, /* Interface 2 - SRIO 1 */
126 	{ NPI, 0x100, 0x13f, 0x00 },  /* Interface 3 - DPI */
127 	{ LB, 0x000, 0x03f, 0x00 },   /* Interface 4 - LOOPBACK */
128 };
129 
130 /**
131  * Convert a interface mode into a human readable string
132  *
133  * @param mode   Mode to convert
134  *
135  * Return: String
136  */
cvmx_helper_interface_mode_to_string(cvmx_helper_interface_mode_t mode)137 const char *cvmx_helper_interface_mode_to_string(cvmx_helper_interface_mode_t mode)
138 {
139 	switch (mode) {
140 	case CVMX_HELPER_INTERFACE_MODE_DISABLED:
141 		return "DISABLED";
142 	case CVMX_HELPER_INTERFACE_MODE_RGMII:
143 		return "RGMII";
144 	case CVMX_HELPER_INTERFACE_MODE_GMII:
145 		return "GMII";
146 	case CVMX_HELPER_INTERFACE_MODE_SPI:
147 		return "SPI";
148 	case CVMX_HELPER_INTERFACE_MODE_PCIE:
149 		return "PCIE";
150 	case CVMX_HELPER_INTERFACE_MODE_XAUI:
151 		return "XAUI";
152 	case CVMX_HELPER_INTERFACE_MODE_RXAUI:
153 		return "RXAUI";
154 	case CVMX_HELPER_INTERFACE_MODE_SGMII:
155 		return "SGMII";
156 	case CVMX_HELPER_INTERFACE_MODE_QSGMII:
157 		return "QSGMII";
158 	case CVMX_HELPER_INTERFACE_MODE_PICMG:
159 		return "PICMG";
160 	case CVMX_HELPER_INTERFACE_MODE_NPI:
161 		return "NPI";
162 	case CVMX_HELPER_INTERFACE_MODE_LOOP:
163 		return "LOOP";
164 	case CVMX_HELPER_INTERFACE_MODE_SRIO:
165 		return "SRIO";
166 	case CVMX_HELPER_INTERFACE_MODE_ILK:
167 		return "ILK";
168 	case CVMX_HELPER_INTERFACE_MODE_AGL:
169 		return "AGL";
170 	case CVMX_HELPER_INTERFACE_MODE_XLAUI:
171 		return "XLAUI";
172 	case CVMX_HELPER_INTERFACE_MODE_XFI:
173 		return "XFI";
174 	case CVMX_HELPER_INTERFACE_MODE_40G_KR4:
175 		return "40G_KR4";
176 	case CVMX_HELPER_INTERFACE_MODE_10G_KR:
177 		return "10G_KR";
178 	case CVMX_HELPER_INTERFACE_MODE_MIXED:
179 		return "MIXED";
180 	}
181 	return "UNKNOWN";
182 }
183 
184 /**
185  * @INTERNAL
186  *
187  * Extract NO_WPTR mode from PIP/IPD register
188  */
__cvmx_ipd_mode_no_wptr(void)189 static int __cvmx_ipd_mode_no_wptr(void)
190 {
191 	if (octeon_has_feature(OCTEON_FEATURE_NO_WPTR)) {
192 		cvmx_ipd_ctl_status_t ipd_ctl_status;
193 
194 		ipd_ctl_status.u64 = csr_rd(CVMX_IPD_CTL_STATUS);
195 		return ipd_ctl_status.s.no_wptr;
196 	}
197 	return 0;
198 }
199 
200 static cvmx_buf_ptr_t __cvmx_packet_short_ptr[4];
201 static int8_t __cvmx_wqe_pool = -1;
202 
203 /**
204  * @INTERNAL
205  * Prepare packet pointer templace for dynamic short
206  * packets.
207  */
cvmx_packet_short_ptr_calculate(void)208 static void cvmx_packet_short_ptr_calculate(void)
209 {
210 	unsigned int i, off;
211 	union cvmx_pip_gbl_cfg pip_gbl_cfg;
212 	union cvmx_pip_ip_offset pip_ip_offset;
213 
214 	/* Fill in the common values for all cases */
215 	for (i = 0; i < 4; i++) {
216 		if (__cvmx_ipd_mode_no_wptr())
217 			/* packet pool, set to 0 in hardware */
218 			__cvmx_wqe_pool = 0;
219 		else
220 			/* WQE pool as configured */
221 			__cvmx_wqe_pool = csr_rd(CVMX_IPD_WQE_FPA_QUEUE) & 7;
222 
223 		__cvmx_packet_short_ptr[i].s.pool = __cvmx_wqe_pool;
224 		__cvmx_packet_short_ptr[i].s.size = cvmx_fpa_get_block_size(__cvmx_wqe_pool);
225 		__cvmx_packet_short_ptr[i].s.size -= 32;
226 		__cvmx_packet_short_ptr[i].s.addr = 32;
227 	}
228 
229 	pip_gbl_cfg.u64 = csr_rd(CVMX_PIP_GBL_CFG);
230 	pip_ip_offset.u64 = csr_rd(CVMX_PIP_IP_OFFSET);
231 
232 	/* RAW_FULL: index = 0 */
233 	i = 0;
234 	off = pip_gbl_cfg.s.raw_shf;
235 	__cvmx_packet_short_ptr[i].s.addr += off;
236 	__cvmx_packet_short_ptr[i].s.size -= off;
237 	__cvmx_packet_short_ptr[i].s.back += off >> 7;
238 
239 	/* NON-IP: index = 1 */
240 	i = 1;
241 	off = pip_gbl_cfg.s.nip_shf;
242 	__cvmx_packet_short_ptr[i].s.addr += off;
243 	__cvmx_packet_short_ptr[i].s.size -= off;
244 	__cvmx_packet_short_ptr[i].s.back += off >> 7;
245 
246 	/* IPv4: index = 2 */
247 	i = 2;
248 	off = (pip_ip_offset.s.offset << 3) + 4;
249 	__cvmx_packet_short_ptr[i].s.addr += off;
250 	__cvmx_packet_short_ptr[i].s.size -= off;
251 	__cvmx_packet_short_ptr[i].s.back += off >> 7;
252 
253 	/* IPv6: index = 3 */
254 	i = 3;
255 	off = (pip_ip_offset.s.offset << 3) + 0;
256 	__cvmx_packet_short_ptr[i].s.addr += off;
257 	__cvmx_packet_short_ptr[i].s.size -= off;
258 	__cvmx_packet_short_ptr[i].s.back += off >> 7;
259 
260 	/* For IPv4/IPv6: subtract work->word2.s.ip_offset
261 	 * to addr, if it is smaller than IP_OFFSET[OFFSET]*8
262 	 * which is stored in __cvmx_packet_short_ptr[3].s.addr
263 	 */
264 }
265 
266 /**
267  * Extract packet data buffer pointer from work queue entry.
268  *
269  * Returns the legacy (Octeon1/Octeon2) buffer pointer structure
270  * for the linked buffer list.
271  * On CN78XX, the native buffer pointer structure is converted into
272  * the legacy format.
273  * The legacy buf_ptr is then stored in the WQE, and word0 reserved
274  * field is set to indicate that the buffer pointers were translated.
275  * If the packet data is only found inside the work queue entry,
276  * a standard buffer pointer structure is created for it.
277  */
cvmx_wqe_get_packet_ptr(cvmx_wqe_t * work)278 cvmx_buf_ptr_t cvmx_wqe_get_packet_ptr(cvmx_wqe_t *work)
279 {
280 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
281 		cvmx_wqe_78xx_t *wqe = (void *)work;
282 		cvmx_buf_ptr_t optr, lptr;
283 		cvmx_buf_ptr_pki_t nptr;
284 		unsigned int pool, bufs;
285 		int node = cvmx_get_node_num();
286 
287 		/* In case of repeated calls of this function */
288 		if (wqe->pki_wqe_translated || wqe->word2.software) {
289 			optr.u64 = wqe->packet_ptr.u64;
290 			return optr;
291 		}
292 
293 		bufs = wqe->word0.bufs;
294 		pool = wqe->word0.aura;
295 		nptr.u64 = wqe->packet_ptr.u64;
296 
297 		optr.u64 = 0;
298 		optr.s.pool = pool;
299 		optr.s.addr = nptr.addr;
300 		if (bufs == 1) {
301 			optr.s.size = pki_dflt_pool[node].buffer_size -
302 				      pki_dflt_style[node].parm_cfg.first_skip - 8 -
303 				      wqe->word0.apad;
304 		} else {
305 			optr.s.size = nptr.size;
306 		}
307 
308 		/* Calculate the "back" offset */
309 		if (!nptr.packet_outside_wqe) {
310 			optr.s.back = (nptr.addr -
311 				       cvmx_ptr_to_phys(wqe)) >> 7;
312 		} else {
313 			optr.s.back =
314 				(pki_dflt_style[node].parm_cfg.first_skip +
315 				 8 + wqe->word0.apad) >> 7;
316 		}
317 		lptr = optr;
318 
319 		/* Follow pointer and convert all linked pointers */
320 		while (bufs > 1) {
321 			void *vptr;
322 
323 			vptr = cvmx_phys_to_ptr(lptr.s.addr);
324 
325 			memcpy(&nptr, vptr - 8, 8);
326 			/*
327 			 * Errata (PKI-20776) PKI_BUFLINK_S's are endian-swapped
328 			 * CN78XX pass 1.x has a bug where the packet pointer
329 			 * in each segment is written in the opposite
330 			 * endianness of the configured mode. Fix these here
331 			 */
332 			if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
333 				nptr.u64 = __builtin_bswap64(nptr.u64);
334 			lptr.u64 = 0;
335 			lptr.s.pool = pool;
336 			lptr.s.addr = nptr.addr;
337 			lptr.s.size = nptr.size;
338 			lptr.s.back = (pki_dflt_style[0].parm_cfg.later_skip + 8) >>
339 				      7; /* TBD: not guaranteed !! */
340 
341 			memcpy(vptr - 8, &lptr, 8);
342 			bufs--;
343 		}
344 		/* Store translated bufptr in WQE, and set indicator */
345 		wqe->pki_wqe_translated = 1;
346 		wqe->packet_ptr.u64 = optr.u64;
347 		return optr;
348 
349 	} else {
350 		unsigned int i;
351 		unsigned int off = 0;
352 		cvmx_buf_ptr_t bptr;
353 
354 		if (cvmx_likely(work->word2.s.bufs > 0))
355 			return work->packet_ptr;
356 
357 		if (cvmx_unlikely(work->word2.s.software))
358 			return work->packet_ptr;
359 
360 		/* first packet, precalculate packet_ptr templaces */
361 		if (cvmx_unlikely(__cvmx_packet_short_ptr[0].u64 == 0))
362 			cvmx_packet_short_ptr_calculate();
363 
364 		/* calculate templace index */
365 		i = work->word2.s_cn38xx.not_IP | work->word2.s_cn38xx.rcv_error;
366 		i = 2 ^ (i << 1);
367 
368 		/* IPv4/IPv6: Adjust IP offset */
369 		if (cvmx_likely(i & 2)) {
370 			i |= work->word2.s.is_v6;
371 			off = work->word2.s.ip_offset;
372 		} else {
373 			/* RAWFULL/RAWSCHED should be handled here */
374 			i = 1; /* not-IP */
375 			off = 0;
376 		}
377 
378 		/* Get the right templace */
379 		bptr = __cvmx_packet_short_ptr[i];
380 		bptr.s.addr -= off;
381 		bptr.s.back = bptr.s.addr >> 7;
382 
383 		/* Add actual WQE paddr to the templace offset */
384 		bptr.s.addr += cvmx_ptr_to_phys(work);
385 
386 		/* Adjust word2.bufs so that _free_data() handles it
387 		 * in the same way as PKO
388 		 */
389 		work->word2.s.bufs = 1;
390 
391 		/* Store the new buffer pointer back into WQE */
392 		work->packet_ptr = bptr;
393 
394 		/* Returned the synthetic buffer_pointer */
395 		return bptr;
396 	}
397 }
398 
cvmx_wqe_free(cvmx_wqe_t * work)399 void cvmx_wqe_free(cvmx_wqe_t *work)
400 {
401 	unsigned int bufs, ncl = 1;
402 	u64 paddr, paddr1;
403 
404 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
405 		cvmx_wqe_78xx_t *wqe = (void *)work;
406 		cvmx_fpa3_gaura_t aura;
407 		cvmx_buf_ptr_pki_t bptr;
408 
409 		bufs = wqe->word0.bufs;
410 
411 		if (!wqe->pki_wqe_translated && bufs != 0) {
412 			/* Handle cn78xx native untralsated WQE */
413 
414 			bptr = wqe->packet_ptr;
415 
416 			/* Do nothing - first packet buffer shares WQE buffer */
417 			if (!bptr.packet_outside_wqe)
418 				return;
419 		} else if (cvmx_likely(bufs != 0)) {
420 			/* Handle translated 78XX WQE */
421 			paddr = (work->packet_ptr.s.addr & (~0x7full)) -
422 				(work->packet_ptr.s.back << 7);
423 			paddr1 = cvmx_ptr_to_phys(work);
424 
425 			/* do not free WQE if contains first data buffer */
426 			if (paddr == paddr1)
427 				return;
428 		}
429 
430 		/* WQE is separate from packet buffer, free it */
431 		aura = __cvmx_fpa3_gaura(wqe->word0.aura >> 10, wqe->word0.aura & 0x3ff);
432 
433 		cvmx_fpa3_free(work, aura, ncl);
434 	} else {
435 		/* handle legacy WQE */
436 		bufs = work->word2.s_cn38xx.bufs;
437 
438 		if (cvmx_likely(bufs != 0)) {
439 			/* Check if the first data buffer is inside WQE */
440 			paddr = (work->packet_ptr.s.addr & (~0x7full)) -
441 				(work->packet_ptr.s.back << 7);
442 			paddr1 = cvmx_ptr_to_phys(work);
443 
444 			/* do not free WQE if contains first data buffer */
445 			if (paddr == paddr1)
446 				return;
447 		}
448 
449 		/* precalculate packet_ptr, WQE pool number */
450 		if (cvmx_unlikely(__cvmx_wqe_pool < 0))
451 			cvmx_packet_short_ptr_calculate();
452 		cvmx_fpa1_free(work, __cvmx_wqe_pool, ncl);
453 	}
454 }
455 
456 /**
457  * Free the packet buffers contained in a work queue entry.
458  * The work queue entry is also freed if it contains packet data.
459  * If however the packet starts outside the WQE, the WQE will
460  * not be freed. The application should call cvmx_wqe_free()
461  * to free the WQE buffer that contains no packet data.
462  *
463  * @param work   Work queue entry with packet to free
464  */
cvmx_helper_free_packet_data(cvmx_wqe_t * work)465 void cvmx_helper_free_packet_data(cvmx_wqe_t *work)
466 {
467 	u64 number_buffers;
468 	u64 start_of_buffer;
469 	u64 next_buffer_ptr;
470 	cvmx_fpa3_gaura_t aura;
471 	unsigned int ncl;
472 	cvmx_buf_ptr_t buffer_ptr;
473 	cvmx_buf_ptr_pki_t bptr;
474 	cvmx_wqe_78xx_t *wqe = (void *)work;
475 	int o3_pki_wqe = 0;
476 
477 	number_buffers = cvmx_wqe_get_bufs(work);
478 
479 	buffer_ptr.u64 = work->packet_ptr.u64;
480 
481 	/* Zero-out WQE WORD3 so that the WQE is freed by cvmx_wqe_free() */
482 	work->packet_ptr.u64 = 0;
483 
484 	if (number_buffers == 0)
485 		return;
486 
487 	/* Interpret PKI-style bufptr unless it has been translated */
488 	if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE) &&
489 	    !wqe->pki_wqe_translated) {
490 		o3_pki_wqe = 1;
491 		cvmx_wqe_pki_errata_20776(work);
492 		aura = __cvmx_fpa3_gaura(wqe->word0.aura >> 10,
493 					 wqe->word0.aura & 0x3ff);
494 	} else {
495 		start_of_buffer = ((buffer_ptr.s.addr >> 7) -
496 				   buffer_ptr.s.back) << 7;
497 		next_buffer_ptr =
498 			*(uint64_t *)cvmx_phys_to_ptr(buffer_ptr.s.addr - 8);
499 		/*
500 		 * Since the number of buffers is not zero, we know this is not
501 		 * a dynamic short packet. We need to check if it is a packet
502 		 * received with IPD_CTL_STATUS[NO_WPTR]. If this is true,
503 		 * we need to free all buffers except for the first one.
504 		 * The caller doesn't expect their WQE pointer to be freed
505 		 */
506 		if (cvmx_ptr_to_phys(work) == start_of_buffer) {
507 			buffer_ptr.u64 = next_buffer_ptr;
508 			number_buffers--;
509 		}
510 	}
511 	while (number_buffers--) {
512 		if (o3_pki_wqe) {
513 			bptr.u64 = buffer_ptr.u64;
514 
515 			ncl = (bptr.size + CVMX_CACHE_LINE_SIZE - 1) /
516 				CVMX_CACHE_LINE_SIZE;
517 
518 			/* XXX- assumes the buffer is cache-line aligned */
519 			start_of_buffer = (bptr.addr >> 7) << 7;
520 
521 			/*
522 			 * Read pointer to next buffer before we free the
523 			 * current buffer.
524 			 */
525 			next_buffer_ptr = *(uint64_t *)cvmx_phys_to_ptr(bptr.addr - 8);
526 			/* FPA AURA comes from WQE, includes node */
527 			cvmx_fpa3_free(cvmx_phys_to_ptr(start_of_buffer),
528 				       aura, ncl);
529 		} else {
530 			ncl = (buffer_ptr.s.size + CVMX_CACHE_LINE_SIZE - 1) /
531 				      CVMX_CACHE_LINE_SIZE +
532 			      buffer_ptr.s.back;
533 			/*
534 			 * Calculate buffer start using "back" offset,
535 			 * Remember the back pointer is in cache lines,
536 			 * not 64bit words
537 			 */
538 			start_of_buffer = ((buffer_ptr.s.addr >> 7) -
539 					   buffer_ptr.s.back) << 7;
540 			/*
541 			 * Read pointer to next buffer before we free
542 			 * the current buffer.
543 			 */
544 			next_buffer_ptr =
545 				*(uint64_t *)cvmx_phys_to_ptr(buffer_ptr.s.addr - 8);
546 			/* FPA pool comes from buf_ptr itself */
547 			if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
548 				aura = cvmx_fpa1_pool_to_fpa3_aura(buffer_ptr.s.pool);
549 				cvmx_fpa3_free(cvmx_phys_to_ptr(start_of_buffer),
550 					       aura, ncl);
551 			} else {
552 				cvmx_fpa1_free(cvmx_phys_to_ptr(start_of_buffer),
553 					       buffer_ptr.s.pool, ncl);
554 			}
555 		}
556 		buffer_ptr.u64 = next_buffer_ptr;
557 	}
558 }
559 
560 /**
561  * @INTERNAL
562  * Setup the common GMX settings that determine the number of
563  * ports. These setting apply to almost all configurations of all
564  * chips.
565  *
566  * @param xiface Interface to configure
567  * @param num_ports Number of ports on the interface
568  *
569  * Return: Zero on success, negative on failure
570  */
__cvmx_helper_setup_gmx(int xiface,int num_ports)571 int __cvmx_helper_setup_gmx(int xiface, int num_ports)
572 {
573 	union cvmx_gmxx_tx_prts gmx_tx_prts;
574 	union cvmx_gmxx_rx_prts gmx_rx_prts;
575 	union cvmx_pko_reg_gmx_port_mode pko_mode;
576 	union cvmx_gmxx_txx_thresh gmx_tx_thresh;
577 	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
578 	int index;
579 
580 	/*
581 	 * The common BGX settings are already done in the appropriate
582 	 * enable functions, nothing to do here.
583 	 */
584 	if (octeon_has_feature(OCTEON_FEATURE_BGX))
585 		return 0;
586 
587 	/* Tell GMX the number of TX ports on this interface */
588 	gmx_tx_prts.u64 = csr_rd(CVMX_GMXX_TX_PRTS(xi.interface));
589 	gmx_tx_prts.s.prts = num_ports;
590 	csr_wr(CVMX_GMXX_TX_PRTS(xi.interface), gmx_tx_prts.u64);
591 
592 	/*
593 	 * Tell GMX the number of RX ports on this interface.  This only applies
594 	 * to *GMII and XAUI ports.
595 	 */
596 	switch (cvmx_helper_interface_get_mode(xiface)) {
597 	case CVMX_HELPER_INTERFACE_MODE_RGMII:
598 	case CVMX_HELPER_INTERFACE_MODE_SGMII:
599 	case CVMX_HELPER_INTERFACE_MODE_QSGMII:
600 	case CVMX_HELPER_INTERFACE_MODE_GMII:
601 	case CVMX_HELPER_INTERFACE_MODE_XAUI:
602 	case CVMX_HELPER_INTERFACE_MODE_RXAUI:
603 		if (num_ports > 4) {
604 			debug("%s: Illegal num_ports\n", __func__);
605 			return -1;
606 		}
607 
608 		gmx_rx_prts.u64 = csr_rd(CVMX_GMXX_RX_PRTS(xi.interface));
609 		gmx_rx_prts.s.prts = num_ports;
610 		csr_wr(CVMX_GMXX_RX_PRTS(xi.interface), gmx_rx_prts.u64);
611 		break;
612 
613 	default:
614 		break;
615 	}
616 
617 	/*
618 	 * Skip setting CVMX_PKO_REG_GMX_PORT_MODE on 30XX, 31XX, 50XX,
619 	 * and 68XX.
620 	 */
621 	if (!OCTEON_IS_MODEL(OCTEON_CN68XX)) {
622 		/* Tell PKO the number of ports on this interface */
623 		pko_mode.u64 = csr_rd(CVMX_PKO_REG_GMX_PORT_MODE);
624 		if (xi.interface == 0) {
625 			if (num_ports == 1)
626 				pko_mode.s.mode0 = 4;
627 			else if (num_ports == 2)
628 				pko_mode.s.mode0 = 3;
629 			else if (num_ports <= 4)
630 				pko_mode.s.mode0 = 2;
631 			else if (num_ports <= 8)
632 				pko_mode.s.mode0 = 1;
633 			else
634 				pko_mode.s.mode0 = 0;
635 		} else {
636 			if (num_ports == 1)
637 				pko_mode.s.mode1 = 4;
638 			else if (num_ports == 2)
639 				pko_mode.s.mode1 = 3;
640 			else if (num_ports <= 4)
641 				pko_mode.s.mode1 = 2;
642 			else if (num_ports <= 8)
643 				pko_mode.s.mode1 = 1;
644 			else
645 				pko_mode.s.mode1 = 0;
646 		}
647 		csr_wr(CVMX_PKO_REG_GMX_PORT_MODE, pko_mode.u64);
648 	}
649 
650 	/*
651 	 * Set GMX to buffer as much data as possible before starting
652 	 * transmit. This reduces the chances that we have a TX under run
653 	 * due to memory contention. Any packet that fits entirely in the
654 	 * GMX FIFO can never have an under run regardless of memory load.
655 	 */
656 	gmx_tx_thresh.u64 = csr_rd(CVMX_GMXX_TXX_THRESH(0, xi.interface));
657 	/* ccn - common cnt numberator */
658 	int ccn = 0x100;
659 
660 	/* Choose the max value for the number of ports */
661 	if (num_ports <= 1)
662 		gmx_tx_thresh.s.cnt = ccn / 1;
663 	else if (num_ports == 2)
664 		gmx_tx_thresh.s.cnt = ccn / 2;
665 	else
666 		gmx_tx_thresh.s.cnt = ccn / 4;
667 
668 	/*
669 	 * SPI and XAUI can have lots of ports but the GMX hardware
670 	 * only ever has a max of 4
671 	 */
672 	if (num_ports > 4)
673 		num_ports = 4;
674 	for (index = 0; index < num_ports; index++)
675 		csr_wr(CVMX_GMXX_TXX_THRESH(index, xi.interface), gmx_tx_thresh.u64);
676 
677 	/*
678 	 * For o68, we need to setup the pipes
679 	 */
680 	if (OCTEON_IS_MODEL(OCTEON_CN68XX) && xi.interface < CVMX_HELPER_MAX_GMX) {
681 		union cvmx_gmxx_txx_pipe config;
682 
683 		for (index = 0; index < num_ports; index++) {
684 			config.u64 = 0;
685 
686 			if (__cvmx_helper_cfg_pko_port_base(xiface, index) >= 0) {
687 				config.u64 = csr_rd(CVMX_GMXX_TXX_PIPE(index,
688 								       xi.interface));
689 				config.s.nump = __cvmx_helper_cfg_pko_port_num(xiface,
690 									       index);
691 				config.s.base = __cvmx_helper_cfg_pko_port_base(xiface,
692 										index);
693 				csr_wr(CVMX_GMXX_TXX_PIPE(index, xi.interface),
694 				       config.u64);
695 			}
696 		}
697 	}
698 
699 	return 0;
700 }
701 
cvmx_helper_get_pko_port(int interface,int port)702 int cvmx_helper_get_pko_port(int interface, int port)
703 {
704 	return cvmx_pko_get_base_pko_port(interface, port);
705 }
706 
cvmx_helper_get_ipd_port(int xiface,int index)707 int cvmx_helper_get_ipd_port(int xiface, int index)
708 {
709 	struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
710 
711 	if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
712 		const struct ipd_port_map *port_map;
713 		int ipd_port;
714 
715 		if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
716 			port_map = ipd_port_map_68xx;
717 			ipd_port = 0;
718 		} else if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
719 			port_map = ipd_port_map_78xx;
720 			ipd_port = cvmx_helper_node_to_ipd_port(xi.node, 0);
721 		} else if (OCTEON_IS_MODEL(OCTEON_CN73XX)) {
722 			port_map = ipd_port_map_73xx;
723 			ipd_port = 0;
724 		} else if (OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
725 			port_map = ipd_port_map_75xx;
726 			ipd_port = 0;
727 		} else {
728 			return -1;
729 		}
730 
731 		ipd_port += port_map[xi.interface].first_ipd_port;
732 		if (port_map[xi.interface].type == GMII) {
733 			cvmx_helper_interface_mode_t mode;
734 
735 			mode = cvmx_helper_interface_get_mode(xiface);
736 			if (mode == CVMX_HELPER_INTERFACE_MODE_XAUI ||
737 			    (mode == CVMX_HELPER_INTERFACE_MODE_RXAUI &&
738 			     OCTEON_IS_MODEL(OCTEON_CN68XX))) {
739 				ipd_port += port_map[xi.interface].ipd_port_adj;
740 				return ipd_port;
741 			} else {
742 				return ipd_port + (index * 16);
743 			}
744 		} else if (port_map[xi.interface].type == ILK) {
745 			return ipd_port + index;
746 		} else if (port_map[xi.interface].type == NPI) {
747 			return ipd_port + index;
748 		} else if (port_map[xi.interface].type == SRIO) {
749 			return ipd_port + index;
750 		} else if (port_map[xi.interface].type == LB) {
751 			return ipd_port + index;
752 		}
753 
754 		debug("ERROR: %s: interface %u:%u bad mode\n",
755 		      __func__, xi.node, xi.interface);
756 		return -1;
757 	} else if (cvmx_helper_interface_get_mode(xiface) ==
758 		   CVMX_HELPER_INTERFACE_MODE_AGL) {
759 		return 24;
760 	}
761 
762 	switch (xi.interface) {
763 	case 0:
764 		return index;
765 	case 1:
766 		return index + 16;
767 	case 2:
768 		return index + 32;
769 	case 3:
770 		return index + 36;
771 	case 4:
772 		return index + 40;
773 	case 5:
774 		return index + 42;
775 	case 6:
776 		return index + 44;
777 	case 7:
778 		return index + 46;
779 	}
780 	return -1;
781 }
782 
cvmx_helper_get_pknd(int xiface,int index)783 int cvmx_helper_get_pknd(int xiface, int index)
784 {
785 	if (octeon_has_feature(OCTEON_FEATURE_PKND))
786 		return __cvmx_helper_cfg_pknd(xiface, index);
787 
788 	return CVMX_INVALID_PKND;
789 }
790 
cvmx_helper_get_bpid(int interface,int port)791 int cvmx_helper_get_bpid(int interface, int port)
792 {
793 	if (octeon_has_feature(OCTEON_FEATURE_PKND))
794 		return __cvmx_helper_cfg_bpid(interface, port);
795 
796 	return CVMX_INVALID_BPID;
797 }
798 
799 /**
800  * Returns the interface number for an IPD/PKO port number.
801  *
802  * @param ipd_port IPD/PKO port number
803  *
804  * Return: Interface number
805  */
cvmx_helper_get_interface_num(int ipd_port)806 int cvmx_helper_get_interface_num(int ipd_port)
807 {
808 	if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
809 		const struct ipd_port_map *port_map;
810 		int i;
811 		struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(ipd_port);
812 
813 		port_map = ipd_port_map_68xx;
814 		for (i = 0; i < CVMX_HELPER_MAX_IFACE; i++) {
815 			if (xp.port >= port_map[i].first_ipd_port &&
816 			    xp.port <= port_map[i].last_ipd_port)
817 				return i;
818 		}
819 		return -1;
820 	} else if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
821 		const struct ipd_port_map *port_map;
822 		int i;
823 		struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(ipd_port);
824 
825 		port_map = ipd_port_map_78xx;
826 		for (i = 0; i < CVMX_HELPER_MAX_IFACE; i++) {
827 			if (xp.port >= port_map[i].first_ipd_port &&
828 			    xp.port <= port_map[i].last_ipd_port)
829 				return cvmx_helper_node_interface_to_xiface(xp.node, i);
830 		}
831 		return -1;
832 	} else if (OCTEON_IS_MODEL(OCTEON_CN73XX)) {
833 		const struct ipd_port_map *port_map;
834 		int i;
835 		struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(ipd_port);
836 
837 		port_map = ipd_port_map_73xx;
838 		for (i = 0; i < CVMX_HELPER_MAX_IFACE; i++) {
839 			if (xp.port >= port_map[i].first_ipd_port &&
840 			    xp.port <= port_map[i].last_ipd_port)
841 				return i;
842 		}
843 		return -1;
844 	} else if (OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
845 		const struct ipd_port_map *port_map;
846 		int i;
847 		struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(ipd_port);
848 
849 		port_map = ipd_port_map_75xx;
850 		for (i = 0; i < CVMX_HELPER_MAX_IFACE; i++) {
851 			if (xp.port >= port_map[i].first_ipd_port &&
852 			    xp.port <= port_map[i].last_ipd_port)
853 				return i;
854 		}
855 		return -1;
856 	} else if (OCTEON_IS_MODEL(OCTEON_CN70XX) && ipd_port == 24) {
857 		return 4;
858 	}
859 
860 	if (ipd_port < 16)
861 		return 0;
862 	else if (ipd_port < 32)
863 		return 1;
864 	else if (ipd_port < 36)
865 		return 2;
866 	else if (ipd_port < 40)
867 		return 3;
868 	else if (ipd_port < 42)
869 		return 4;
870 	else if (ipd_port < 44)
871 		return 5;
872 	else if (ipd_port < 46)
873 		return 6;
874 	else if (ipd_port < 48)
875 		return 7;
876 
877 	debug("%s: Illegal IPD port number %d\n", __func__, ipd_port);
878 	return -1;
879 }
880 
881 /**
882  * Returns the interface index number for an IPD/PKO port
883  * number.
884  *
885  * @param ipd_port IPD/PKO port number
886  *
887  * Return: Interface index number
888  */
cvmx_helper_get_interface_index_num(int ipd_port)889 int cvmx_helper_get_interface_index_num(int ipd_port)
890 {
891 	if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
892 		const struct ipd_port_map *port_map;
893 		int port;
894 		enum port_map_if_type type = INVALID_IF_TYPE;
895 		int i;
896 		int num_interfaces;
897 
898 		if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
899 			port_map = ipd_port_map_68xx;
900 		} else if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
901 			struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(ipd_port);
902 
903 			port_map = ipd_port_map_78xx;
904 			ipd_port = xp.port;
905 		} else if (OCTEON_IS_MODEL(OCTEON_CN73XX)) {
906 			struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(ipd_port);
907 
908 			port_map = ipd_port_map_73xx;
909 			ipd_port = xp.port;
910 		} else if (OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
911 			struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(ipd_port);
912 
913 			port_map = ipd_port_map_75xx;
914 			ipd_port = xp.port;
915 		} else {
916 			return -1;
917 		}
918 
919 		num_interfaces = cvmx_helper_get_number_of_interfaces();
920 
921 		/* Get the interface type of the ipd port */
922 		for (i = 0; i < num_interfaces; i++) {
923 			if (ipd_port >= port_map[i].first_ipd_port &&
924 			    ipd_port <= port_map[i].last_ipd_port) {
925 				type = port_map[i].type;
926 				break;
927 			}
928 		}
929 
930 		/* Convert the ipd port to the interface port */
931 		switch (type) {
932 		/* Ethernet interfaces have a channel in lower 4 bits
933 		 * that is does not discriminate traffic, and is ignored.
934 		 */
935 		case GMII:
936 			port = ipd_port - port_map[i].first_ipd_port;
937 
938 			/* CN68XX adds 0x40 to IPD_PORT when in XAUI/RXAUI
939 			 * mode of operation, adjust for that case
940 			 */
941 			if (port >= port_map[i].ipd_port_adj)
942 				port -= port_map[i].ipd_port_adj;
943 
944 			port >>= 4;
945 			return port;
946 
947 		/*
948 		 * These interfaces do not have physical ports,
949 		 * but have logical channels instead that separate
950 		 * traffic into logical streams
951 		 */
952 		case ILK:
953 		case SRIO:
954 		case NPI:
955 		case LB:
956 			port = ipd_port - port_map[i].first_ipd_port;
957 			return port;
958 
959 		default:
960 			printf("ERROR: %s: Illegal IPD port number %#x\n",
961 			       __func__, ipd_port);
962 			return -1;
963 		}
964 	}
965 	if (OCTEON_IS_MODEL(OCTEON_CN70XX))
966 		return ipd_port & 3;
967 	if (ipd_port < 32)
968 		return ipd_port & 15;
969 	else if (ipd_port < 40)
970 		return ipd_port & 3;
971 	else if (ipd_port < 48)
972 		return ipd_port & 1;
973 
974 	debug("%s: Illegal IPD port number\n", __func__);
975 
976 	return -1;
977 }
978