1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2018-2022 Marvell International Ltd.
4  *
5  * Support library for the hardware Free Pool Allocator.
6  */
7 
8 #include <errno.h>
9 #include <log.h>
10 #include <time.h>
11 #include <linux/delay.h>
12 
13 #include <mach/cvmx-regs.h>
14 #include <mach/cvmx-csr.h>
15 #include <mach/cvmx-bootmem.h>
16 #include <mach/octeon-model.h>
17 #include <mach/cvmx-fuse.h>
18 #include <mach/octeon-feature.h>
19 #include <mach/cvmx-qlm.h>
20 #include <mach/octeon_qlm.h>
21 #include <mach/cvmx-pcie.h>
22 #include <mach/cvmx-coremask.h>
23 #include <mach/cvmx-range.h>
24 #include <mach/cvmx-global-resources.h>
25 
26 #include <mach/cvmx-agl-defs.h>
27 #include <mach/cvmx-bgxx-defs.h>
28 #include <mach/cvmx-ciu-defs.h>
29 #include <mach/cvmx-gmxx-defs.h>
30 #include <mach/cvmx-gserx-defs.h>
31 #include <mach/cvmx-ilk-defs.h>
32 #include <mach/cvmx-ipd-defs.h>
33 #include <mach/cvmx-pcsx-defs.h>
34 #include <mach/cvmx-pcsxx-defs.h>
35 #include <mach/cvmx-pki-defs.h>
36 #include <mach/cvmx-pko-defs.h>
37 #include <mach/cvmx-xcv-defs.h>
38 
39 #include <mach/cvmx-hwpko.h>
40 #include <mach/cvmx-ilk.h>
41 #include <mach/cvmx-pki.h>
42 #include <mach/cvmx-pko3.h>
43 #include <mach/cvmx-pko3-queue.h>
44 #include <mach/cvmx-pko3-resources.h>
45 
46 #include <mach/cvmx-helper.h>
47 #include <mach/cvmx-helper-board.h>
48 #include <mach/cvmx-helper-cfg.h>
49 
50 #include <mach/cvmx-helper-bgx.h>
51 #include <mach/cvmx-helper-cfg.h>
52 #include <mach/cvmx-helper-util.h>
53 #include <mach/cvmx-helper-pki.h>
54 
55 static const int debug;
56 
57 /* Due to suspected errata, we may not be able to let the FPA_AURAX_CNT
58  * get too close to 0, to avoid a spurious wrap-around error
59  */
60 const unsigned int __cvmx_fpa3_cnt_offset = 32;
61 
62 /* For advanced checks, a guard-band is created around the internal
63  * stack, to make sure the stack is not overwritten.
64  */
65 const u64 magic_pattern = 0xbab4faced095f00d;
66 const unsigned int guard_band_size = 0 << 10; /* 1KiB default*/
67 
68 #define CVMX_CACHE_LINE_SHIFT (7)
69 
70 #define CVMX_FPA3_NAME_LEN (16)
71 
72 typedef struct {
73 	char name[CVMX_FPA3_NAME_LEN];
74 	u64 stack_paddr; /* Internal stack storage */
75 	u64 bufs_paddr;	 /* Buffer pool base address */
76 	u64 stack_psize; /* Internal stack storage size */
77 	u64 bufs_psize;	 /* Buffer pool raw size */
78 	u64 buf_count;	 /* Number of buffer filled */
79 	u64 buf_size;	 /* Buffer size */
80 } cvmx_fpa3_poolx_info_t;
81 
82 typedef struct {
83 	char name[CVMX_FPA3_NAME_LEN];
84 	unsigned int buf_size; /* Buffer size */
85 } cvmx_fpa3_aurax_info_t;
86 
87 typedef struct {
88 	char name[CVMX_FPA1_NAME_SIZE];
89 	u64 size; /* Block size of pool buffers */
90 	u64 buffer_count;
91 	u64 base_paddr; /* Base physical addr */
92 			/* if buffer is allocated at initialization */
93 } cvmx_fpa1_pool_info_t;
94 
95 /**
96  * FPA1/FPA3 info structure is stored in a named block
97  * that is allocated once and shared among applications.
98  */
99 static cvmx_fpa1_pool_info_t *cvmx_fpa1_pool_info;
100 static cvmx_fpa3_poolx_info_t *cvmx_fpa3_pool_info[CVMX_MAX_NODES];
101 static cvmx_fpa3_aurax_info_t *cvmx_fpa3_aura_info[CVMX_MAX_NODES];
102 
103 /**
104  * Return the size of buffers held in a POOL
105  *
106  * @param pool is the POOL handle
107  * @return buffer size in bytes
108  *
109  */
cvmx_fpa3_get_pool_buf_size(cvmx_fpa3_pool_t pool)110 int cvmx_fpa3_get_pool_buf_size(cvmx_fpa3_pool_t pool)
111 {
112 	cvmx_fpa_poolx_cfg_t pool_cfg;
113 
114 	if (!__cvmx_fpa3_pool_valid(pool))
115 		return -1;
116 
117 	pool_cfg.u64 = csr_rd_node(pool.node, CVMX_FPA_POOLX_CFG(pool.lpool));
118 	return pool_cfg.cn78xx.buf_size << CVMX_CACHE_LINE_SHIFT;
119 }
120 
121 /**
122  * Return the size of buffers held in a buffer pool
123  *
124  * @param pool is the pool number
125  *
126  * This function will work with CN78XX models in backward-compatible mode
127  */
cvmx_fpa_get_block_size(int pool)128 unsigned int cvmx_fpa_get_block_size(int pool)
129 {
130 	if (octeon_has_feature(OCTEON_FEATURE_FPA3)) {
131 		return cvmx_fpa3_get_pool_buf_size(cvmx_fpa3_aura_to_pool(
132 			cvmx_fpa1_pool_to_fpa3_aura(pool)));
133 	} else {
134 		if ((unsigned int)pool >= CVMX_FPA1_NUM_POOLS)
135 			return 0;
136 		if (!cvmx_fpa1_pool_info)
137 			cvmx_fpa_global_init_node(0);
138 		return cvmx_fpa1_pool_info[pool].size;
139 	}
140 }
141 
cvmx_fpa3_set_aura_name(cvmx_fpa3_gaura_t aura,const char * name)142 static void cvmx_fpa3_set_aura_name(cvmx_fpa3_gaura_t aura, const char *name)
143 {
144 	cvmx_fpa3_aurax_info_t *pinfo;
145 
146 	pinfo = cvmx_fpa3_aura_info[aura.node];
147 	if (!pinfo)
148 		return;
149 	pinfo += aura.laura;
150 	memset(pinfo->name, 0, sizeof(pinfo->name));
151 	if (name)
152 		strlcpy(pinfo->name, name, sizeof(pinfo->name));
153 }
154 
cvmx_fpa3_set_pool_name(cvmx_fpa3_pool_t pool,const char * name)155 static void cvmx_fpa3_set_pool_name(cvmx_fpa3_pool_t pool, const char *name)
156 {
157 	cvmx_fpa3_poolx_info_t *pinfo;
158 
159 	pinfo = cvmx_fpa3_pool_info[pool.node];
160 	if (!pinfo)
161 		return;
162 	pinfo += pool.lpool;
163 	memset(pinfo->name, 0, sizeof(pinfo->name));
164 	if (name)
165 		strlcpy(pinfo->name, name, sizeof(pinfo->name));
166 }
167 
cvmx_fpa_set_name(int pool_num,const char * name)168 static void cvmx_fpa_set_name(int pool_num, const char *name)
169 {
170 	if (octeon_has_feature(OCTEON_FEATURE_FPA3)) {
171 		cvmx_fpa3_set_aura_name(cvmx_fpa1_pool_to_fpa3_aura(pool_num),
172 					name);
173 	} else {
174 		cvmx_fpa1_pool_info_t *pinfo;
175 
176 		if ((unsigned int)pool_num >= CVMX_FPA1_NUM_POOLS)
177 			return;
178 		if (!cvmx_fpa1_pool_info)
179 			cvmx_fpa_global_init_node(0);
180 		pinfo = &cvmx_fpa1_pool_info[pool_num];
181 		memset(pinfo->name, 0, sizeof(pinfo->name));
182 		if (name)
183 			strlcpy(pinfo->name, name, sizeof(pinfo->name));
184 	}
185 }
186 
cvmx_fpa3_aura_cfg(cvmx_fpa3_gaura_t aura,cvmx_fpa3_pool_t pool,u64 limit,u64 threshold,int ptr_dis)187 static int cvmx_fpa3_aura_cfg(cvmx_fpa3_gaura_t aura, cvmx_fpa3_pool_t pool,
188 			      u64 limit, u64 threshold, int ptr_dis)
189 {
190 	cvmx_fpa3_aurax_info_t *pinfo;
191 	cvmx_fpa_aurax_cfg_t aura_cfg;
192 	cvmx_fpa_poolx_cfg_t pool_cfg;
193 	cvmx_fpa_aurax_cnt_t cnt_reg;
194 	cvmx_fpa_aurax_cnt_limit_t limit_reg;
195 	cvmx_fpa_aurax_cnt_threshold_t thresh_reg;
196 	cvmx_fpa_aurax_int_t int_reg;
197 	unsigned int block_size;
198 
199 	if (debug)
200 		debug("%s: AURA %u:%u POOL %u:%u\n", __func__, aura.node,
201 		      aura.laura, pool.node, pool.lpool);
202 
203 	if (aura.node != pool.node) {
204 		printf("ERROR: %s: AURA/POOL node mismatch\n", __func__);
205 		return -1;
206 	}
207 
208 	if (!__cvmx_fpa3_aura_valid(aura)) {
209 		printf("ERROR: %s: AURA invalid\n", __func__);
210 		return -1;
211 	}
212 
213 	if (!__cvmx_fpa3_pool_valid(pool)) {
214 		printf("ERROR: %s: POOL invalid\n", __func__);
215 		return -1;
216 	}
217 
218 	/* Record POOL block size in AURA info entry */
219 	pool_cfg.u64 = csr_rd_node(pool.node, CVMX_FPA_POOLX_CFG(pool.lpool));
220 
221 	block_size = pool_cfg.cn78xx.buf_size << 7;
222 	pinfo = cvmx_fpa3_aura_info[aura.node];
223 	if (!pinfo)
224 		return -1;
225 	pinfo += aura.laura;
226 
227 	pinfo->buf_size = block_size;
228 
229 	/* block_size should be >0 except for POOL=0 which is never enabled*/
230 	if (pool_cfg.cn78xx.ena && block_size == 0) {
231 		printf("ERROR; %s: POOL buf_size invalid\n", __func__);
232 		return -1;
233 	}
234 
235 	/* Initialize AURA count, limit and threshold registers */
236 	cnt_reg.u64 = 0;
237 	cnt_reg.cn78xx.cnt = 0 + __cvmx_fpa3_cnt_offset;
238 
239 	limit_reg.u64 = 0;
240 	limit_reg.cn78xx.limit = limit;
241 	/* Apply count offset, unless it cases a wrap-around */
242 	if ((limit + __cvmx_fpa3_cnt_offset) < CVMX_FPA3_AURAX_LIMIT_MAX)
243 		limit_reg.cn78xx.limit += __cvmx_fpa3_cnt_offset;
244 
245 	thresh_reg.u64 = 0;
246 	thresh_reg.cn78xx.thresh = threshold;
247 	/* Apply count offset, unless it cases a wrap-around */
248 	if ((threshold + __cvmx_fpa3_cnt_offset) < CVMX_FPA3_AURAX_LIMIT_MAX)
249 		thresh_reg.cn78xx.thresh += __cvmx_fpa3_cnt_offset;
250 
251 	csr_wr_node(aura.node, CVMX_FPA_AURAX_CNT(aura.laura), cnt_reg.u64);
252 	csr_wr_node(aura.node, CVMX_FPA_AURAX_CNT_LIMIT(aura.laura),
253 		    limit_reg.u64);
254 	csr_wr_node(aura.node, CVMX_FPA_AURAX_CNT_THRESHOLD(aura.laura),
255 		    thresh_reg.u64);
256 
257 	/* Clear any pending error interrupts */
258 	int_reg.u64 = 0;
259 	int_reg.cn78xx.thresh = 1;
260 
261 	/* Follow a write to clear FPA_AURAX_INT[THRESH] with a read as
262 	 * a workaround to Errata FPA-23410. If FPA_AURAX_INT[THRESH]
263 	 * isn't clear, try again.
264 	 */
265 	do {
266 		csr_wr_node(aura.node, CVMX_FPA_AURAX_INT(aura.laura),
267 			    int_reg.u64);
268 		int_reg.u64 =
269 			csr_rd_node(aura.node, CVMX_FPA_AURAX_INT(aura.laura));
270 	} while (int_reg.s.thresh);
271 
272 	/* Disable backpressure etc.*/
273 	csr_wr_node(aura.node, CVMX_FPA_AURAX_CNT_LEVELS(aura.laura), 0);
274 	csr_wr_node(aura.node, CVMX_FPA_AURAX_POOL_LEVELS(aura.laura), 0);
275 
276 	aura_cfg.u64 = 0;
277 	aura_cfg.s.ptr_dis = ptr_dis;
278 	csr_wr_node(aura.node, CVMX_FPA_AURAX_CFG(aura.laura), aura_cfg.u64);
279 	csr_wr_node(aura.node, CVMX_FPA_AURAX_POOL(aura.laura), pool.lpool);
280 
281 	return 0;
282 }
283 
284 /**
285  * @INTERNAL
286  *
287  * Fill a newly created FPA3 POOL with buffers
288  * using a temporary AURA.
289  */
cvmx_fpa3_pool_populate(cvmx_fpa3_pool_t pool,unsigned int buf_cnt,unsigned int buf_sz,void * mem_ptr,unsigned int mem_node)290 static int cvmx_fpa3_pool_populate(cvmx_fpa3_pool_t pool, unsigned int buf_cnt,
291 				   unsigned int buf_sz, void *mem_ptr,
292 				   unsigned int mem_node)
293 {
294 	cvmx_fpa3_poolx_info_t *pinfo;
295 	cvmx_fpa3_gaura_t aura;
296 	cvmx_fpa3_pool_t zero_pool;
297 	cvmx_fpa_poolx_cfg_t pool_cfg;
298 	cvmx_fpa_poolx_start_addr_t pool_start_reg;
299 	cvmx_fpa_poolx_end_addr_t pool_end_reg;
300 	cvmx_fpa_poolx_available_t avail_reg;
301 	cvmx_fpa_poolx_threshold_t thresh_reg;
302 	cvmx_fpa_poolx_int_t int_reg;
303 	unsigned int block_size, align;
304 	unsigned long long mem_size;
305 	u64 paddr;
306 	unsigned int i;
307 
308 	if (debug)
309 		debug("%s: POOL %u:%u buf_sz=%u count=%d\n", __func__,
310 		      pool.node, pool.lpool, buf_sz, buf_cnt);
311 
312 	if (!__cvmx_fpa3_pool_valid(pool))
313 		return -1;
314 
315 	zero_pool = __cvmx_fpa3_pool(pool.node, 0);
316 
317 	pool_cfg.u64 = csr_rd_node(pool.node, CVMX_FPA_POOLX_CFG(pool.lpool));
318 
319 	block_size = pool_cfg.cn78xx.buf_size << 7;
320 
321 	if (pool_cfg.cn78xx.nat_align) {
322 		/* Assure block_size is legit */
323 		if (block_size > (1 << 17)) {
324 			printf("ERROR: %s: POOL %u:%u block size %u is not valid\n",
325 			       __func__, pool.node, pool.lpool, block_size);
326 			return -1;
327 		}
328 	}
329 	align = CVMX_CACHE_LINE_SIZE;
330 
331 	pinfo = cvmx_fpa3_pool_info[pool.node];
332 	if (!pinfo)
333 		return -1;
334 	pinfo += pool.lpool;
335 
336 	if (pinfo->buf_size != block_size || block_size != buf_sz) {
337 		printf("ERROR: %s: POOL %u:%u buffer size mismatch\n", __func__,
338 		       pool.node, pool.lpool);
339 		return -1;
340 	}
341 
342 	if (!mem_ptr) {
343 		/* When allocating our own memory
344 		 * make sure at least 'buf_cnt' blocks
345 		 * will fit into it.
346 		 */
347 		mem_size = (long long)buf_cnt * block_size + (block_size - 128);
348 
349 		mem_ptr = cvmx_helper_mem_alloc(mem_node, mem_size, align);
350 
351 		if (!mem_ptr) {
352 			printf("ERROR: %s: POOL %u:%u out of memory, could not allocate %llu bytes\n",
353 			       __func__, pool.node, pool.lpool, mem_size);
354 			return -1;
355 		}
356 
357 		/* Record memory base for use in shutdown */
358 		pinfo->bufs_paddr = cvmx_ptr_to_phys(mem_ptr);
359 	} else {
360 		/* caller-allocated memory is sized simply, may reduce count */
361 		mem_size = (long long)buf_cnt * block_size;
362 		/* caller responsable to free this memory too */
363 	}
364 
365 	/* Recalculate buf_cnt after possible alignment adjustment */
366 	buf_cnt = mem_size / block_size;
367 
368 	/* Get temporary AURA */
369 	aura = cvmx_fpa3_reserve_aura(pool.node, -1);
370 	if (!__cvmx_fpa3_aura_valid(aura))
371 		return -1;
372 
373 	/* Attach the temporary AURA to the POOL */
374 	(void)cvmx_fpa3_aura_cfg(aura, pool, buf_cnt, buf_cnt + 1, 0);
375 
376 	/* Set AURA count to buffer count to avoid wrap-around */
377 	csr_wr_node(aura.node, CVMX_FPA_AURAX_CNT(aura.laura), buf_cnt);
378 
379 	/* Set POOL threshold just above buf count so it does not misfire */
380 	thresh_reg.u64 = 0;
381 	thresh_reg.cn78xx.thresh = buf_cnt + 1;
382 	csr_wr_node(pool.node, CVMX_FPA_POOLX_THRESHOLD(pool.lpool),
383 		    thresh_reg.u64);
384 
385 	/* Set buffer memory region bounds checking */
386 	paddr = (cvmx_ptr_to_phys(mem_ptr) >> 7) << 7;
387 	pool_start_reg.u64 = 0;
388 	pool_end_reg.u64 = 0;
389 	pool_start_reg.cn78xx.addr = paddr >> 7;
390 	pool_end_reg.cn78xx.addr = (paddr + mem_size + 127) >> 7;
391 
392 	csr_wr_node(pool.node, CVMX_FPA_POOLX_START_ADDR(pool.lpool),
393 		    pool_start_reg.u64);
394 	csr_wr_node(pool.node, CVMX_FPA_POOLX_END_ADDR(pool.lpool),
395 		    pool_end_reg.u64);
396 
397 	/* Make sure 'paddr' is divisible by 'block_size' */
398 	i = (paddr % block_size);
399 	if (i > 0) {
400 		i = block_size - i;
401 		paddr += i;
402 		mem_size -= i;
403 	}
404 
405 	/* The above alignment mimics how the FPA3 hardware
406 	 * aligns pointers to the buffer size, which only
407 	 * needs to be multiple of cache line size
408 	 */
409 
410 	if (debug && paddr != cvmx_ptr_to_phys(mem_ptr))
411 		debug("%s: pool mem paddr %#llx adjusted to %#llx for block size %#x\n",
412 		      __func__, CAST_ULL(cvmx_ptr_to_phys(mem_ptr)),
413 		      CAST_ULL(paddr), block_size);
414 
415 	for (i = 0; i < buf_cnt; i++) {
416 		void *ptr = cvmx_phys_to_ptr(paddr);
417 
418 		cvmx_fpa3_free_nosync(ptr, aura, 0);
419 
420 		paddr += block_size;
421 
422 		if ((paddr + block_size - 1) >= (paddr + mem_size))
423 			break;
424 	}
425 
426 	if (debug && i < buf_cnt) {
427 		debug("%s: buffer count reduced from %u to %u\n", __func__,
428 		      buf_cnt, i);
429 		buf_cnt = i;
430 	}
431 
432 	/* Wait for all buffers to reach the POOL before removing temp AURA */
433 	do {
434 		CVMX_SYNC;
435 		avail_reg.u64 = csr_rd_node(
436 			pool.node, CVMX_FPA_POOLX_AVAILABLE(pool.lpool));
437 	} while (avail_reg.cn78xx.count < buf_cnt);
438 
439 	/* Detach the temporary AURA */
440 	(void)cvmx_fpa3_aura_cfg(aura, zero_pool, 0, 0, 0);
441 
442 	/* Release temporary AURA */
443 	(void)cvmx_fpa3_release_aura(aura);
444 
445 	/* Clear all POOL interrupts */
446 	int_reg.u64 = 0;
447 	int_reg.cn78xx.ovfls = 1;
448 	int_reg.cn78xx.crcerr = 1;
449 	int_reg.cn78xx.range = 1;
450 	int_reg.cn78xx.thresh = 1;
451 	csr_wr_node(pool.node, CVMX_FPA_POOLX_INT(pool.lpool), int_reg.u64);
452 
453 	/* Record buffer count for shutdown */
454 	pinfo->buf_count = buf_cnt;
455 
456 	return buf_cnt;
457 }
458 
459 /**
460  * @INTERNAL
461  *
462  * Fill a legacy FPA pool with buffers
463  */
cvmx_fpa1_fill_pool(cvmx_fpa1_pool_t pool,int num_blocks,void * buffer)464 static int cvmx_fpa1_fill_pool(cvmx_fpa1_pool_t pool, int num_blocks,
465 			       void *buffer)
466 {
467 	cvmx_fpa_poolx_start_addr_t pool_start_reg;
468 	cvmx_fpa_poolx_end_addr_t pool_end_reg;
469 	unsigned int block_size = cvmx_fpa_get_block_size(pool);
470 	unsigned int mem_size;
471 	char *bufp;
472 
473 	if ((unsigned int)pool >= CVMX_FPA1_NUM_POOLS)
474 		return -1;
475 
476 	mem_size = block_size * num_blocks;
477 
478 	if (!buffer) {
479 		buffer = cvmx_helper_mem_alloc(0, mem_size,
480 					       CVMX_CACHE_LINE_SIZE);
481 
482 		cvmx_fpa1_pool_info[pool].base_paddr = cvmx_ptr_to_phys(buffer);
483 	} else {
484 		/* Align user-supplied buffer to cache line size */
485 		unsigned int off =
486 			(CVMX_CACHE_LINE_SIZE - 1) & cvmx_ptr_to_phys(buffer);
487 		if (off > 0) {
488 			//			buffer += CVMX_CACHE_LINE_SIZE - off;
489 			buffer = (char *)buffer + CVMX_CACHE_LINE_SIZE - off;
490 			mem_size -= CVMX_CACHE_LINE_SIZE - off;
491 			num_blocks = mem_size / block_size;
492 		}
493 	}
494 
495 	if (debug)
496 		debug("%s: memory at %p size %#x\n", __func__, buffer,
497 		      mem_size);
498 
499 	pool_start_reg.u64 = 0;
500 	pool_end_reg.u64 = 0;
501 
502 	/* buffer pointer range checks are highly recommended, but optional */
503 	pool_start_reg.cn61xx.addr = 1; /* catch NULL pointers */
504 	pool_end_reg.cn61xx.addr = (1ull << (40 - 7)) - 1; /* max paddr */
505 	if (!OCTEON_IS_MODEL(OCTEON_CN63XX)) {
506 		csr_wr(CVMX_FPA_POOLX_START_ADDR(pool), pool_start_reg.u64);
507 		csr_wr(CVMX_FPA_POOLX_END_ADDR(pool), pool_end_reg.u64);
508 	}
509 
510 	bufp = (char *)buffer;
511 	while (num_blocks--) {
512 		cvmx_fpa1_free(bufp, pool, 0);
513 		cvmx_fpa1_pool_info[pool].buffer_count++;
514 		bufp += block_size;
515 	}
516 	return 0;
517 }
518 
519 /**
520  * @INTERNAL
521  *
522  * Setup a legacy FPA pool
523  */
cvmx_fpa1_pool_init(cvmx_fpa1_pool_t pool_id,int num_blocks,int block_size,void * buffer)524 static int cvmx_fpa1_pool_init(cvmx_fpa1_pool_t pool_id, int num_blocks,
525 			       int block_size, void *buffer)
526 {
527 	int max_pool = cvmx_fpa_get_max_pools();
528 
529 	if (pool_id < 0 || pool_id >= max_pool) {
530 		printf("ERROR: %s pool %d invalid\n", __func__, pool_id);
531 		return -1;
532 	}
533 
534 	if (!cvmx_fpa1_pool_info)
535 		cvmx_fpa_global_init_node(0);
536 
537 	if (debug)
538 		debug("%s: initializing info pool %d\n", __func__, pool_id);
539 
540 	cvmx_fpa1_pool_info[pool_id].size = block_size;
541 	cvmx_fpa1_pool_info[pool_id].buffer_count = 0;
542 
543 	if (debug)
544 		debug("%s: enabling unit for pool %d\n", __func__, pool_id);
545 
546 	return 0;
547 }
548 
549 /**
550  * Initialize global configuration for FPA block for specified node.
551  *
552  * @param node is the node number
553  *
554  * @note this function sets the initial QoS averaging timing parameters,
555  * for the entire FPA unit (per node), which may be overridden on a
556  * per AURA basis.
557  */
cvmx_fpa_global_init_node(int node)558 int cvmx_fpa_global_init_node(int node)
559 {
560 	/* There are just the initial parameter values */
561 #define FPA_RED_AVG_DLY 1
562 #define FPA_RED_LVL_DLY 3
563 #define FPA_QOS_AVRG	0
564 	/* Setting up avg_dly and prb_dly, enable bits */
565 	if (octeon_has_feature(OCTEON_FEATURE_FPA3)) {
566 		char pool_info_name[32] = "cvmx_fpa3_pools_";
567 		char aura_info_name[32] = "cvmx_fpa3_auras_";
568 		char ns[2] = "0";
569 
570 		ns[0] += node;
571 		strcat(pool_info_name, ns);
572 		strcat(aura_info_name, ns);
573 
574 		cvmx_fpa3_config_red_params(node, FPA_QOS_AVRG, FPA_RED_LVL_DLY,
575 					    FPA_RED_AVG_DLY);
576 
577 		/* Allocate the pinfo named block */
578 		cvmx_fpa3_pool_info[node] = (cvmx_fpa3_poolx_info_t *)
579 			cvmx_bootmem_alloc_named_range_once(
580 				sizeof(cvmx_fpa3_pool_info[0][0]) *
581 					cvmx_fpa3_num_pools(),
582 				0, 0, 0, pool_info_name, NULL);
583 
584 		cvmx_fpa3_aura_info[node] = (cvmx_fpa3_aurax_info_t *)
585 			cvmx_bootmem_alloc_named_range_once(
586 				sizeof(cvmx_fpa3_aura_info[0][0]) *
587 					cvmx_fpa3_num_auras(),
588 				0, 0, 0, aura_info_name, NULL);
589 
590 		//XXX add allocation error check
591 
592 		/* Setup zero_pool on this node */
593 		cvmx_fpa3_reserve_pool(node, 0);
594 		cvmx_fpa3_pool_info[node][0].buf_count = 0;
595 	} else {
596 		char pool_info_name[32] = "cvmx_fpa_pool";
597 
598 		/* Allocate the pinfo named block */
599 		cvmx_fpa1_pool_info = (cvmx_fpa1_pool_info_t *)
600 			cvmx_bootmem_alloc_named_range_once(
601 				sizeof(cvmx_fpa1_pool_info[0]) *
602 					CVMX_FPA1_NUM_POOLS,
603 				0, 0, 0, pool_info_name, NULL);
604 
605 		cvmx_fpa1_enable();
606 	}
607 
608 	return 0;
609 }
610 
__memset_u64(u64 * ptr,u64 pattern,unsigned int words)611 static void __memset_u64(u64 *ptr, u64 pattern, unsigned int words)
612 {
613 	while (words--)
614 		*ptr++ = pattern;
615 }
616 
617 /**
618  * @INTERNAL
619  * Initialize pool pointer-storage memory
620  *
621  * Unlike legacy FPA, which used free buffers to store pointers that
622  * exceed on-chip memory, FPA3 requires a dedicated memory buffer for
623  * free pointer stack back-store.
624  *
625  * @param pool - pool to initialize
626  * @param mem_node - if memory should be allocated from a different node
627  * @param max_buffer_cnt - maximum block capacity of pool
628  * @param align - buffer alignment mode,
629  *   current FPA_NATURAL_ALIGNMENT is supported
630  * @param buffer_sz - size of buffers in pool
631  */
cvmx_fpa3_pool_stack_init(cvmx_fpa3_pool_t pool,unsigned int mem_node,unsigned int max_buffer_cnt,enum cvmx_fpa3_pool_alignment_e align,unsigned int buffer_sz)632 static int cvmx_fpa3_pool_stack_init(cvmx_fpa3_pool_t pool,
633 				     unsigned int mem_node,
634 				     unsigned int max_buffer_cnt,
635 				     enum cvmx_fpa3_pool_alignment_e align,
636 				     unsigned int buffer_sz)
637 {
638 	cvmx_fpa3_poolx_info_t *pinfo;
639 	u64 stack_paddr;
640 	void *mem_ptr;
641 	unsigned int stack_memory_size;
642 	cvmx_fpa_poolx_cfg_t pool_cfg;
643 	cvmx_fpa_poolx_fpf_marks_t pool_fpf_marks;
644 
645 	if (debug)
646 		debug("%s: POOL %u:%u bufsz=%u maxbuf=%u\n", __func__,
647 		      pool.node, pool.lpool, buffer_sz, max_buffer_cnt);
648 
649 	if (!__cvmx_fpa3_pool_valid(pool)) {
650 		printf("ERROR: %s: POOL invalid\n", __func__);
651 		return -1;
652 	}
653 
654 	pinfo = cvmx_fpa3_pool_info[pool.node];
655 	if (!pinfo) {
656 		printf("ERROR: %s: FPA on node#%u is not initialized\n",
657 		       __func__, pool.node);
658 		return -1;
659 	}
660 	pinfo += pool.lpool;
661 
662 	/* Calculate stack size based on buffer count with one line to spare */
663 	stack_memory_size = (max_buffer_cnt * 128) / 29 + 128 + 127;
664 
665 	/* Increase stack size by band guard */
666 	stack_memory_size += guard_band_size << 1;
667 
668 	/* Align size to cache line */
669 	stack_memory_size = (stack_memory_size >> 7) << 7;
670 
671 	/* Allocate internal stack */
672 	mem_ptr = cvmx_helper_mem_alloc(mem_node, stack_memory_size,
673 					CVMX_CACHE_LINE_SIZE);
674 
675 	if (debug)
676 		debug("%s: stack_mem=%u ptr=%p\n", __func__, stack_memory_size,
677 		      mem_ptr);
678 
679 	if (!mem_ptr) {
680 		debug("ERROR: %sFailed to allocate stack for POOL %u:%u\n",
681 		      __func__, pool.node, pool.lpool);
682 		return -1;
683 	}
684 
685 	/* Initialize guard bands */
686 	if (guard_band_size > 0) {
687 		__memset_u64((u64 *)mem_ptr, magic_pattern,
688 			     guard_band_size >> 3);
689 		__memset_u64((u64 *)((char *)mem_ptr + stack_memory_size -
690 				     guard_band_size),
691 			     magic_pattern, guard_band_size >> 3);
692 	}
693 
694 	pinfo->stack_paddr = cvmx_ptr_to_phys(mem_ptr);
695 	pinfo->stack_psize = stack_memory_size;
696 
697 	/* Calculate usable stack start */
698 	stack_paddr = cvmx_ptr_to_phys((char *)mem_ptr + guard_band_size);
699 
700 	csr_wr_node(pool.node, CVMX_FPA_POOLX_STACK_BASE(pool.lpool),
701 		    stack_paddr);
702 	csr_wr_node(pool.node, CVMX_FPA_POOLX_STACK_ADDR(pool.lpool),
703 		    stack_paddr);
704 
705 	/* Calculate usable stack end  - start of last cache line */
706 	stack_paddr = stack_paddr + stack_memory_size - (guard_band_size << 1);
707 
708 	csr_wr_node(pool.node, CVMX_FPA_POOLX_STACK_END(pool.lpool),
709 		    stack_paddr);
710 
711 	if (debug)
712 		debug("%s: Stack paddr %#llx - %#llx\n", __func__,
713 		      CAST_ULL(csr_rd_node(pool.node, CVMX_FPA_POOLX_STACK_BASE(
714 							      pool.lpool))),
715 		      CAST_ULL(csr_rd_node(pool.node, CVMX_FPA_POOLX_STACK_END(
716 							      pool.lpool))));
717 
718 	/* Setup buffer size for this pool until it is shutdown */
719 	pinfo->buf_size = buffer_sz;
720 
721 	pool_cfg.u64 = 0;
722 	pool_cfg.cn78xx.buf_size = buffer_sz >> 7;
723 	pool_cfg.cn78xx.l_type = 0x2;
724 	pool_cfg.cn78xx.ena = 0;
725 	if (align == FPA_NATURAL_ALIGNMENT)
726 		pool_cfg.cn78xx.nat_align = 1;
727 
728 	/* FPA-26117, FPA-22443 */
729 	pool_fpf_marks.u64 =
730 		csr_rd_node(pool.node, CVMX_FPA_POOLX_FPF_MARKS(pool.lpool));
731 	pool_fpf_marks.s.fpf_rd = 0x80;
732 	csr_wr_node(pool.node, CVMX_FPA_POOLX_FPF_MARKS(pool.lpool),
733 		    pool_fpf_marks.u64);
734 
735 	csr_wr_node(pool.node, CVMX_FPA_POOLX_CFG(pool.lpool), pool_cfg.u64);
736 	pool_cfg.cn78xx.ena = 1;
737 	csr_wr_node(pool.node, CVMX_FPA_POOLX_CFG(pool.lpool), pool_cfg.u64);
738 
739 	/* Pool is now ready to be filled up */
740 	return 0;
741 }
742 
743 /**
744  * Create an FPA POOL and fill it up with buffers
745  *
746  * @param node is the node number for the pool and memory location
747  * @param desired_pool is the local pool number desired
748  *	or -1 for first available
749  * @param name is the symbolic name to assign the POOL
750  * @param block_size is the size of all buffers held in this POOL
751  * @param num_blocks is the number of free buffers to fill into the POOL
752  * @param buffer is an optionally caller-supplied memory for the buffers
753  *	or NULL to cause the buffer memory to be allocated automatically.
754  * @return the POOL handle
755  *
756  * Note: if the buffer memory is supplied by caller, the application
757  * will be responsable to free this memory.
758  *
759  * Only supported on CN78XX.
760  */
cvmx_fpa3_setup_fill_pool(int node,int desired_pool,const char * name,unsigned int block_size,unsigned int num_blocks,void * buffer)761 cvmx_fpa3_pool_t cvmx_fpa3_setup_fill_pool(int node, int desired_pool,
762 					   const char *name,
763 					   unsigned int block_size,
764 					   unsigned int num_blocks,
765 					   void *buffer)
766 {
767 	cvmx_fpa3_pool_t pool;
768 	unsigned int mem_node;
769 	int rc;
770 
771 	if (node < 0)
772 		node = cvmx_get_node_num();
773 
774 	if (debug)
775 		debug("%s: desired pool=%d bufsize=%u cnt=%u '%s'\n", __func__,
776 		      desired_pool, block_size, num_blocks, name);
777 
778 	/* Use memory from the node local to the AURA/POOL */
779 	mem_node = node;
780 
781 	if (num_blocks == 0 || num_blocks > 1 << 30) {
782 		printf("ERROR: %s: invalid block count %u\n", __func__,
783 		       num_blocks);
784 		return CVMX_FPA3_INVALID_POOL;
785 	}
786 
787 	/*
788 	 * Check for block size validity:
789 	 * With user-supplied buffer, can't increase block size,
790 	 * so make sure it is at least 128, and is aligned to 128
791 	 * For all cases make sure it is not too big
792 	 */
793 	if ((buffer && (block_size < CVMX_CACHE_LINE_SIZE ||
794 			(block_size & (CVMX_CACHE_LINE_SIZE - 1)))) ||
795 	    (block_size > (1 << 17))) {
796 		printf("ERROR: %s: invalid block size %u\n", __func__,
797 		       block_size);
798 		return CVMX_FPA3_INVALID_POOL;
799 	}
800 
801 	if (block_size < CVMX_CACHE_LINE_SIZE)
802 		block_size = CVMX_CACHE_LINE_SIZE;
803 
804 	/* Reserve POOL */
805 	pool = cvmx_fpa3_reserve_pool(node, desired_pool);
806 
807 	if (!__cvmx_fpa3_pool_valid(pool)) {
808 		printf("ERROR: %s: POOL %u:%d not available\n", __func__, node,
809 		       desired_pool);
810 		return CVMX_FPA3_INVALID_POOL;
811 	}
812 
813 	/* Initialize POOL with stack storage */
814 	rc = cvmx_fpa3_pool_stack_init(pool, mem_node, num_blocks,
815 				       FPA_NATURAL_ALIGNMENT, block_size);
816 	if (rc < 0) {
817 		printf("ERROR: %s: POOL %u:%u stack setup failed\n", __func__,
818 		       pool.node, pool.lpool);
819 		cvmx_fpa3_release_pool(pool);
820 		return CVMX_FPA3_INVALID_POOL;
821 	}
822 
823 	/* Populate the POOL with buffers */
824 	rc = cvmx_fpa3_pool_populate(pool, num_blocks, block_size, buffer,
825 				     mem_node);
826 	if (rc < 0) {
827 		printf("ERROR: %s: POOL %u:%u memory fill failed\n", __func__,
828 		       pool.node, pool.lpool);
829 		cvmx_fpa3_release_pool(pool);
830 		return CVMX_FPA3_INVALID_POOL;
831 	}
832 
833 	cvmx_fpa3_set_pool_name(pool, name);
834 
835 	return pool;
836 }
837 
838 /**
839  * Attach an AURA to an existing POOL
840  *
841  * @param pool is the handle of the POOL to be attached
842  * @param desired_aura is the number of the AURA resired
843  *	or -1 for the AURA to be automatically assigned
844  * @param name is a symbolic name for the new AURA
845  * @param block_size is the size of all buffers that will be handed
846  *	out by this AURA
847  * @param num_blocks is the maximum number of buffers that can be
848  *	handed out by this AURA, and can not exceed the number
849  *	of buffers filled into the attached POOL
850  * @return the AURA handle
851  *
852  * Only supported on CN78XX.
853  */
cvmx_fpa3_set_aura_for_pool(cvmx_fpa3_pool_t pool,int desired_aura,const char * name,unsigned int block_size,unsigned int num_blocks)854 cvmx_fpa3_gaura_t cvmx_fpa3_set_aura_for_pool(cvmx_fpa3_pool_t pool,
855 					      int desired_aura,
856 					      const char *name,
857 					      unsigned int block_size,
858 					      unsigned int num_blocks)
859 {
860 	cvmx_fpa3_gaura_t aura;
861 	cvmx_fpa_poolx_available_t avail_reg;
862 	const char *emsg;
863 	int rc;
864 
865 	if (debug)
866 		debug("%s: aura=%d bufsize=%u cnt=%u '%s'\n", __func__,
867 		      desired_aura, block_size, num_blocks, name);
868 
869 	if (!__cvmx_fpa3_pool_valid(pool)) {
870 		printf("ERROR: %s: POOL argument invalid\n", __func__);
871 		return CVMX_FPA3_INVALID_GAURA;
872 	}
873 
874 	/* Verify the AURA buffer count limit is not above POOL buffer count */
875 	avail_reg.u64 =
876 		csr_rd_node(pool.node, CVMX_FPA_POOLX_AVAILABLE(pool.lpool));
877 	if (avail_reg.cn78xx.count < num_blocks) {
878 		printf("WARNING: %s: AURA %u:%u buffer count limit %u reduced to POOL available count %u\n",
879 		       __func__, aura.node, aura.laura, num_blocks,
880 		       (unsigned int)avail_reg.cn78xx.count);
881 		num_blocks = avail_reg.cn78xx.count;
882 	}
883 
884 	/* Reserve an AURA number, follow desired number */
885 	aura = cvmx_fpa3_reserve_aura(pool.node, desired_aura);
886 
887 	if (!__cvmx_fpa3_aura_valid(aura)) {
888 		printf("ERROR: %s: AURA %u:%d not available\n", __func__,
889 		       pool.node, desired_aura);
890 		return CVMX_FPA3_INVALID_GAURA;
891 	}
892 
893 	/* Initialize AURA attached to the above POOL */
894 	rc = cvmx_fpa3_aura_cfg(aura, pool, num_blocks, num_blocks + 1, 0);
895 	if (rc < 0) {
896 		emsg = "AURA configuration";
897 		goto _fail;
898 	}
899 
900 	cvmx_fpa3_set_aura_name(aura, name);
901 
902 	return aura;
903 
904 _fail:
905 	printf("ERROR: %s: %s\n", __func__, emsg);
906 	cvmx_fpa3_release_aura(aura);
907 	return CVMX_FPA3_INVALID_GAURA;
908 }
909 
910 /**
911  * Create a combination of an AURA and a POOL
912  *
913  * @param node is the node number for the pool and memory location
914  * @param desired_aura is the number of the AURA resired
915  *	or -1 for the AURA to be automatically assigned
916  * @param name is a symbolic name for the new AURA
917  * @param block_size is the size of all buffers that will be handed
918  *	out by this AURA
919  * @param num_blocks is the maximum number of buffers that can be
920  *	handed out by this AURA, and can not exceed the number
921  *	of buffers filled into the attached POOL
922  * @param buffer is an optionally caller-supplied memory for the buffers
923  *	or NULL to cause the buffer memory to be allocated automatically.
924  *
925  * @return the AURA handle
926  *
927  * Note: if the buffer memory is supplied by caller, the application
928  * will be responsable to free this memory.
929  * The POOL number is always automatically assigned.
930  *
931  * Only supported on CN78XX.
932  */
cvmx_fpa3_setup_aura_and_pool(int node,int desired_aura,const char * name,void * buffer,unsigned int block_size,unsigned int num_blocks)933 cvmx_fpa3_gaura_t cvmx_fpa3_setup_aura_and_pool(int node, int desired_aura,
934 						const char *name, void *buffer,
935 						unsigned int block_size,
936 						unsigned int num_blocks)
937 {
938 	cvmx_fpa3_gaura_t aura = CVMX_FPA3_INVALID_GAURA;
939 	cvmx_fpa3_pool_t pool = CVMX_FPA3_INVALID_POOL;
940 	const char *emsg = "";
941 	unsigned int mem_node;
942 	int rc;
943 
944 	if (debug)
945 		debug("%s: aura=%d size=%u cnt=%u '%s'\n", __func__,
946 		      desired_aura, block_size, num_blocks, name);
947 
948 	if (node < 0)
949 		node = cvmx_get_node_num();
950 
951 	if (num_blocks == 0 || num_blocks > 1 << 30) {
952 		printf("ERROR: %s: invalid block count %u\n", __func__,
953 		       num_blocks);
954 		return CVMX_FPA3_INVALID_GAURA;
955 	}
956 
957 	/* Use memory from the node local to the AURA/POOL */
958 	mem_node = node;
959 
960 	/* Reserve an AURA number, follow desired number */
961 	aura = cvmx_fpa3_reserve_aura(node, desired_aura);
962 
963 	if (!__cvmx_fpa3_aura_valid(aura)) {
964 		emsg = "AURA not available";
965 		goto _fail;
966 	}
967 
968 	/* Reserve POOL dynamically to underpin this AURA */
969 	pool = cvmx_fpa3_reserve_pool(node, -1);
970 
971 	if (!__cvmx_fpa3_pool_valid(pool)) {
972 		emsg = "POOL not available";
973 		goto _fail;
974 	}
975 
976 	/*
977 	 * Check for block size validity:
978 	 * With user-supplied buffer, can't increase block size,
979 	 * so make sure it is at least 128, and is aligned to 128
980 	 * For all cases make sure it is not too big
981 	 */
982 	if ((buffer && (block_size < CVMX_CACHE_LINE_SIZE ||
983 			(block_size & (CVMX_CACHE_LINE_SIZE - 1)))) ||
984 	    block_size > (1 << 17)) {
985 		printf("ERROR: %s: invalid block size %u\n", __func__,
986 		       block_size);
987 		emsg = "invalid block size";
988 		goto _fail;
989 	}
990 
991 	if (block_size < CVMX_CACHE_LINE_SIZE)
992 		block_size = CVMX_CACHE_LINE_SIZE;
993 
994 	/* Initialize POOL with stack storage */
995 	rc = cvmx_fpa3_pool_stack_init(pool, mem_node, num_blocks,
996 				       FPA_NATURAL_ALIGNMENT, block_size);
997 	if (rc < 0) {
998 		emsg = "POOL Stack setup";
999 		goto _fail;
1000 	}
1001 
1002 	/* Populate the AURA/POOL with buffers */
1003 	rc = cvmx_fpa3_pool_populate(pool, num_blocks, block_size, buffer,
1004 				     mem_node);
1005 	if (rc < 0) {
1006 		emsg = "POOL buffer memory";
1007 		goto _fail;
1008 	}
1009 
1010 	/* Initialize AURA attached to the above POOL */
1011 	rc = cvmx_fpa3_aura_cfg(aura, pool, num_blocks, num_blocks + 1, 0);
1012 	if (rc < 0) {
1013 		emsg = "AURA configuration";
1014 		goto _fail;
1015 	}
1016 
1017 	cvmx_fpa3_set_aura_name(aura, name);
1018 	cvmx_fpa3_set_pool_name(pool, name);
1019 
1020 	if (debug)
1021 		debug("%s: AURA %u:%u ready, avail=%lld\n", __func__, aura.node,
1022 		      aura.laura, cvmx_fpa3_get_available(aura));
1023 
1024 	return aura;
1025 
1026 _fail:
1027 	printf("ERROR: %s: Failed in %s\n", __func__, emsg);
1028 	/* These will silently fail if POOL/AURA is not valid */
1029 	cvmx_fpa3_release_aura(aura);
1030 	cvmx_fpa3_release_pool(pool);
1031 	return CVMX_FPA3_INVALID_GAURA;
1032 }
1033 
1034 /**
1035  * Setup a legacy FPA pool
1036  *
1037  * @param desired_pool is the POOL number desired or -1 for automatic
1038  *	assignment
1039  * @param name is the symbolic POOL name
1040  * @param block_size is the size of all buffers held in this POOL
1041  * @param num_blocks is the number of free buffers to fill into the POOL
1042  * @param buffer is an optionally caller-supplied memory for the buffers
1043  *	or NULL to cause the buffer memory to be allocated automatically.
1044  * @return pool number or -1 on error.
1045  *
1046  * Note: if the buffer memory is supplied by caller, the application
1047  * will be responsable to free this memory.
1048  */
cvmx_fpa1_setup_pool(int desired_pool,const char * name,void * buffer,unsigned int block_size,unsigned int num_blocks)1049 int cvmx_fpa1_setup_pool(int desired_pool, const char *name, void *buffer,
1050 			 unsigned int block_size, unsigned int num_blocks)
1051 {
1052 	cvmx_fpa1_pool_t pool = CVMX_FPA1_INVALID_POOL;
1053 	int rc;
1054 
1055 	if (debug)
1056 		debug("%s: desired pool %d, name '%s', mem %p size %u count %u\n",
1057 		      __func__, desired_pool, name, buffer, block_size,
1058 		      num_blocks);
1059 
1060 	/* Reserve desired pool or get one dynamically */
1061 	pool = cvmx_fpa1_reserve_pool(desired_pool);
1062 
1063 	/* Validate reserved pool, if successful */
1064 	if (pool < 0 || pool >= cvmx_fpa_get_max_pools()) {
1065 		/* global resources would have printed an error message here */
1066 		return CVMX_FPA1_INVALID_POOL;
1067 	}
1068 
1069 	/* Initialize the pool */
1070 	rc = cvmx_fpa1_pool_init(pool, num_blocks, block_size, buffer);
1071 	if (rc < 0) {
1072 		printf("ERROR: %s: failed pool %u init\n", __func__, pool);
1073 		cvmx_fpa1_release_pool(pool);
1074 		return CVMX_FPA1_INVALID_POOL;
1075 	}
1076 
1077 	rc = cvmx_fpa1_fill_pool(pool, num_blocks, buffer);
1078 	if (rc < 0) {
1079 		printf("ERROR: %s: failed pool %u memory\n", __func__, pool);
1080 		cvmx_fpa1_release_pool(pool);
1081 		return CVMX_FPA1_INVALID_POOL;
1082 	}
1083 
1084 	if (debug)
1085 		debug("%s: pool %d filled up\b", __func__, pool);
1086 
1087 	cvmx_fpa_set_name(pool, name);
1088 	return pool;
1089 }
1090 
1091 /**
1092  * Setup an FPA pool with buffers
1093  *
1094  * @param pool is the POOL number desired or -1 for automatic assignment
1095  * @param name is the symbolic POOL name
1096  * @param buffer is an optionally caller-supplied memory for the buffers
1097  *	or NULL to cause the buffer memory to be allocated automatically.
1098  * @param block_size is the size of all buffers held in this POOL
1099  * @param num_blocks is the number of free buffers to fill into the POOL
1100  * @param buffer is an optionally caller-supplied memory for the buffers
1101  *	or NULL to cause the buffer memory to be allocated automatically.
1102  *
1103  * @return pool number or -1 on error.
1104  *
1105  * Note: if the buffer memory is supplied by caller, the application
1106  * will be responsable to free this memory.
1107  * This function will work with CN78XX models in backward-compatible mode
1108  */
cvmx_fpa_setup_pool(int pool,const char * name,void * buffer,u64 block_size,u64 num_blocks)1109 int cvmx_fpa_setup_pool(int pool, const char *name, void *buffer,
1110 			u64 block_size, u64 num_blocks)
1111 {
1112 	if (octeon_has_feature(OCTEON_FEATURE_FPA3)) {
1113 		cvmx_fpa3_gaura_t aura;
1114 
1115 		aura = cvmx_fpa3_setup_aura_and_pool(-1, pool, name, buffer,
1116 						     block_size, num_blocks);
1117 		if (!__cvmx_fpa3_aura_valid(aura))
1118 			return -1;
1119 		if (aura.laura >= CVMX_FPA1_NUM_POOLS && pool >= 0)
1120 			printf("WARNING: %s: AURA %u is out of range for backward-compatible operation\n",
1121 			       __func__, aura.laura);
1122 		return aura.laura;
1123 	} else {
1124 		return cvmx_fpa1_setup_pool(pool, name, buffer, block_size,
1125 					    num_blocks);
1126 	}
1127 }
1128