1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2018-2022 Marvell International Ltd.
4  *
5  * PKI Support.
6  */
7 
8 #include <time.h>
9 #include <log.h>
10 #include <linux/delay.h>
11 
12 #include <mach/cvmx-regs.h>
13 #include <mach/cvmx-csr.h>
14 #include <mach/cvmx-bootmem.h>
15 #include <mach/octeon-model.h>
16 #include <mach/cvmx-fuse.h>
17 #include <mach/octeon-feature.h>
18 #include <mach/cvmx-qlm.h>
19 #include <mach/octeon_qlm.h>
20 #include <mach/cvmx-pcie.h>
21 #include <mach/cvmx-coremask.h>
22 
23 #include <mach/cvmx-agl-defs.h>
24 #include <mach/cvmx-bgxx-defs.h>
25 #include <mach/cvmx-ciu-defs.h>
26 #include <mach/cvmx-gmxx-defs.h>
27 #include <mach/cvmx-gserx-defs.h>
28 #include <mach/cvmx-ilk-defs.h>
29 #include <mach/cvmx-ipd-defs.h>
30 #include <mach/cvmx-pcsx-defs.h>
31 #include <mach/cvmx-pcsxx-defs.h>
32 #include <mach/cvmx-pki-defs.h>
33 #include <mach/cvmx-pko-defs.h>
34 #include <mach/cvmx-xcv-defs.h>
35 
36 #include <mach/cvmx-hwpko.h>
37 #include <mach/cvmx-ilk.h>
38 #include <mach/cvmx-pki.h>
39 #include <mach/cvmx-pki-cluster.h>
40 #include <mach/cvmx-pki-resources.h>
41 
42 #include <mach/cvmx-helper.h>
43 #include <mach/cvmx-helper-board.h>
44 #include <mach/cvmx-helper-cfg.h>
45 
46 /**
47  * This function enables PKI
48  *
49  * @param node  Node to enable PKI.
50  */
cvmx_pki_enable(int node)51 void cvmx_pki_enable(int node)
52 {
53 	cvmx_pki_sft_rst_t sft_rst;
54 	cvmx_pki_buf_ctl_t buf_ctl;
55 
56 	sft_rst.u64 = csr_rd_node(node, CVMX_PKI_SFT_RST);
57 	while (sft_rst.s.busy != 0)
58 		sft_rst.u64 = csr_rd_node(node, CVMX_PKI_SFT_RST);
59 
60 	buf_ctl.u64 = csr_rd_node(node, CVMX_PKI_BUF_CTL);
61 	if (buf_ctl.s.pki_en)
62 		debug("Warning: Enabling PKI when PKI already enabled.\n");
63 
64 	buf_ctl.s.pki_en = 1;
65 	csr_wr_node(node, CVMX_PKI_BUF_CTL, buf_ctl.u64);
66 }
67 
68 /**
69  * This function sets the clusters in PKI.
70  *
71  * @param node  Node to set clusters.
72  */
cvmx_pki_setup_clusters(int node)73 int cvmx_pki_setup_clusters(int node)
74 {
75 	int i;
76 
77 	for (i = 0; i < cvmx_pki_cluster_code_length; i++)
78 		csr_wr_node(node, CVMX_PKI_IMEMX(i),
79 			    cvmx_pki_cluster_code_default[i]);
80 
81 	return 0;
82 }
83 
84 /**
85  * This function reads global configuration of PKI block.
86  *
87  * @param node  Node number.
88  * @param gbl_cfg  Pointer to struct to read global configuration.
89  */
cvmx_pki_read_global_config(int node,struct cvmx_pki_global_config * gbl_cfg)90 void cvmx_pki_read_global_config(int node,
91 				 struct cvmx_pki_global_config *gbl_cfg)
92 {
93 	cvmx_pki_stat_ctl_t stat_ctl;
94 	cvmx_pki_icgx_cfg_t icg_cfg;
95 	cvmx_pki_gbl_pen_t gbl_pen;
96 	cvmx_pki_tag_secret_t tag_secret;
97 	cvmx_pki_frm_len_chkx_t frm_len_chk;
98 	cvmx_pki_buf_ctl_t buf_ctl;
99 	unsigned int cl_grp;
100 	int id;
101 
102 	stat_ctl.u64 = csr_rd_node(node, CVMX_PKI_STAT_CTL);
103 	gbl_cfg->stat_mode = stat_ctl.s.mode;
104 
105 	for (cl_grp = 0; cl_grp < CVMX_PKI_NUM_CLUSTER_GROUP; cl_grp++) {
106 		icg_cfg.u64 = csr_rd_node(node, CVMX_PKI_ICGX_CFG(cl_grp));
107 		gbl_cfg->cluster_mask[cl_grp] = icg_cfg.s.clusters;
108 	}
109 	gbl_pen.u64 = csr_rd_node(node, CVMX_PKI_GBL_PEN);
110 	gbl_cfg->gbl_pen.virt_pen = gbl_pen.s.virt_pen;
111 	gbl_cfg->gbl_pen.clg_pen = gbl_pen.s.clg_pen;
112 	gbl_cfg->gbl_pen.cl2_pen = gbl_pen.s.cl2_pen;
113 	gbl_cfg->gbl_pen.l4_pen = gbl_pen.s.l4_pen;
114 	gbl_cfg->gbl_pen.il3_pen = gbl_pen.s.il3_pen;
115 	gbl_cfg->gbl_pen.l3_pen = gbl_pen.s.l3_pen;
116 	gbl_cfg->gbl_pen.mpls_pen = gbl_pen.s.mpls_pen;
117 	gbl_cfg->gbl_pen.fulc_pen = gbl_pen.s.fulc_pen;
118 	gbl_cfg->gbl_pen.dsa_pen = gbl_pen.s.dsa_pen;
119 	gbl_cfg->gbl_pen.hg_pen = gbl_pen.s.hg_pen;
120 
121 	tag_secret.u64 = csr_rd_node(node, CVMX_PKI_TAG_SECRET);
122 	gbl_cfg->tag_secret.dst6 = tag_secret.s.dst6;
123 	gbl_cfg->tag_secret.src6 = tag_secret.s.src6;
124 	gbl_cfg->tag_secret.dst = tag_secret.s.dst;
125 	gbl_cfg->tag_secret.src = tag_secret.s.src;
126 
127 	for (id = 0; id < CVMX_PKI_NUM_FRAME_CHECK; id++) {
128 		frm_len_chk.u64 = csr_rd_node(node, CVMX_PKI_FRM_LEN_CHKX(id));
129 		gbl_cfg->frm_len[id].maxlen = frm_len_chk.s.maxlen;
130 		gbl_cfg->frm_len[id].minlen = frm_len_chk.s.minlen;
131 	}
132 	buf_ctl.u64 = csr_rd_node(node, CVMX_PKI_BUF_CTL);
133 	gbl_cfg->fpa_wait = buf_ctl.s.fpa_wait;
134 }
135 
136 /**
137  * This function writes max and min frame lengths to hardware which can be used
138  * to check the size of frame arrived.There are 2 possible combination which are
139  * indicated by id field.
140  *
141  * @param node  Node number.
142  * @param id  Choose which frame len register to write to
143  * @param len_chk  Struct containing byte count for max-sized/min-sized frame check.
144  */
cvmx_pki_write_frame_len(int node,int id,struct cvmx_pki_frame_len len_chk)145 static void cvmx_pki_write_frame_len(int node, int id,
146 				     struct cvmx_pki_frame_len len_chk)
147 {
148 	cvmx_pki_frm_len_chkx_t frm_len_chk;
149 
150 	frm_len_chk.u64 = csr_rd_node(node, CVMX_PKI_FRM_LEN_CHKX(id));
151 	frm_len_chk.s.maxlen = len_chk.maxlen;
152 	frm_len_chk.s.minlen = len_chk.minlen;
153 	csr_wr_node(node, CVMX_PKI_FRM_LEN_CHKX(id), frm_len_chk.u64);
154 }
155 
156 /**
157  * This function writes global configuration of PKI into hw.
158  *
159  * @param node  Node number.
160  * @param gbl_cfg  Pointer to struct to global configuration.
161  */
cvmx_pki_write_global_config(int node,struct cvmx_pki_global_config * gbl_cfg)162 void cvmx_pki_write_global_config(int node,
163 				  struct cvmx_pki_global_config *gbl_cfg)
164 {
165 	cvmx_pki_stat_ctl_t stat_ctl;
166 	cvmx_pki_buf_ctl_t buf_ctl;
167 	unsigned int cl_grp;
168 
169 	for (cl_grp = 0; cl_grp < CVMX_PKI_NUM_CLUSTER_GROUP; cl_grp++)
170 		cvmx_pki_attach_cluster_to_group(node, cl_grp,
171 						 gbl_cfg->cluster_mask[cl_grp]);
172 
173 	stat_ctl.u64 = 0;
174 	stat_ctl.s.mode = gbl_cfg->stat_mode;
175 	csr_wr_node(node, CVMX_PKI_STAT_CTL, stat_ctl.u64);
176 
177 	buf_ctl.u64 = csr_rd_node(node, CVMX_PKI_BUF_CTL);
178 	buf_ctl.s.fpa_wait = gbl_cfg->fpa_wait;
179 	csr_wr_node(node, CVMX_PKI_BUF_CTL, buf_ctl.u64);
180 
181 	cvmx_pki_write_global_parse(node, gbl_cfg->gbl_pen);
182 	cvmx_pki_write_tag_secret(node, gbl_cfg->tag_secret);
183 	cvmx_pki_write_frame_len(node, 0, gbl_cfg->frm_len[0]);
184 	cvmx_pki_write_frame_len(node, 1, gbl_cfg->frm_len[1]);
185 }
186 
187 /**
188  * This function reads per pkind parameters in hardware which defines how
189  * the incoming packet is processed.
190  *
191  * @param node  Node number.
192  * @param pkind  PKI supports a large number of incoming interfaces and packets
193  *     arriving on different interfaces or channels may want to be processed
194  *     differently. PKI uses the pkind to determine how the incoming packet
195  *     is processed.
196  * @param pkind_cfg  Pointer to struct conatining pkind configuration read
197  *     from the hardware.
198  */
cvmx_pki_read_pkind_config(int node,int pkind,struct cvmx_pki_pkind_config * pkind_cfg)199 int cvmx_pki_read_pkind_config(int node, int pkind,
200 			       struct cvmx_pki_pkind_config *pkind_cfg)
201 {
202 	int cluster = 0;
203 	u64 cl_mask;
204 	cvmx_pki_pkindx_icgsel_t icgsel;
205 	cvmx_pki_clx_pkindx_style_t pstyle;
206 	cvmx_pki_icgx_cfg_t icg_cfg;
207 	cvmx_pki_clx_pkindx_cfg_t pcfg;
208 	cvmx_pki_clx_pkindx_skip_t skip;
209 	cvmx_pki_clx_pkindx_l2_custom_t l2cust;
210 	cvmx_pki_clx_pkindx_lg_custom_t lgcust;
211 
212 	icgsel.u64 = csr_rd_node(node, CVMX_PKI_PKINDX_ICGSEL(pkind));
213 	icg_cfg.u64 = csr_rd_node(node, CVMX_PKI_ICGX_CFG(icgsel.s.icg));
214 	pkind_cfg->cluster_grp = (uint8_t)icgsel.s.icg;
215 	cl_mask = (uint64_t)icg_cfg.s.clusters;
216 	cluster = __builtin_ffsll(cl_mask) - 1;
217 
218 	pstyle.u64 =
219 		csr_rd_node(node, CVMX_PKI_CLX_PKINDX_STYLE(pkind, cluster));
220 	pkind_cfg->initial_parse_mode = pstyle.s.pm;
221 	pkind_cfg->initial_style = pstyle.s.style;
222 
223 	pcfg.u64 = csr_rd_node(node, CVMX_PKI_CLX_PKINDX_CFG(pkind, cluster));
224 	pkind_cfg->fcs_pres = pcfg.s.fcs_pres;
225 	pkind_cfg->parse_en.inst_hdr = pcfg.s.inst_hdr;
226 	pkind_cfg->parse_en.mpls_en = pcfg.s.mpls_en;
227 	pkind_cfg->parse_en.lg_custom = pcfg.s.lg_custom;
228 	pkind_cfg->parse_en.fulc_en = pcfg.s.fulc_en;
229 	pkind_cfg->parse_en.dsa_en = pcfg.s.dsa_en;
230 	pkind_cfg->parse_en.hg2_en = pcfg.s.hg2_en;
231 	pkind_cfg->parse_en.hg_en = pcfg.s.hg_en;
232 
233 	skip.u64 = csr_rd_node(node, CVMX_PKI_CLX_PKINDX_SKIP(pkind, cluster));
234 	pkind_cfg->fcs_skip = skip.s.fcs_skip;
235 	pkind_cfg->inst_skip = skip.s.inst_skip;
236 
237 	l2cust.u64 = csr_rd_node(node,
238 				 CVMX_PKI_CLX_PKINDX_L2_CUSTOM(pkind, cluster));
239 	pkind_cfg->l2_scan_offset = l2cust.s.offset;
240 
241 	lgcust.u64 = csr_rd_node(node,
242 				 CVMX_PKI_CLX_PKINDX_LG_CUSTOM(pkind, cluster));
243 	pkind_cfg->lg_scan_offset = lgcust.s.offset;
244 	return 0;
245 }
246 
247 /**
248  * This function writes per pkind parameters in hardware which defines how
249  * the incoming packet is processed.
250  *
251  * @param node  Node number.
252  * @param pkind  PKI supports a large number of incoming interfaces and packets
253  *     arriving on different interfaces or channels may want to be processed
254  *     differently. PKI uses the pkind to determine how the incoming
255  *     packet is processed.
256  * @param pkind_cfg  Pointer to struct conatining pkind configuration need
257  *     to be written in the hardware.
258  */
cvmx_pki_write_pkind_config(int node,int pkind,struct cvmx_pki_pkind_config * pkind_cfg)259 int cvmx_pki_write_pkind_config(int node, int pkind,
260 				struct cvmx_pki_pkind_config *pkind_cfg)
261 {
262 	unsigned int cluster = 0;
263 	u64 cluster_mask;
264 	cvmx_pki_pkindx_icgsel_t icgsel;
265 	cvmx_pki_clx_pkindx_style_t pstyle;
266 	cvmx_pki_icgx_cfg_t icg_cfg;
267 	cvmx_pki_clx_pkindx_cfg_t pcfg;
268 	cvmx_pki_clx_pkindx_skip_t skip;
269 	cvmx_pki_clx_pkindx_l2_custom_t l2cust;
270 	cvmx_pki_clx_pkindx_lg_custom_t lgcust;
271 
272 	if (pkind >= CVMX_PKI_NUM_PKIND ||
273 	    pkind_cfg->cluster_grp >= CVMX_PKI_NUM_CLUSTER_GROUP ||
274 	    pkind_cfg->initial_style >= CVMX_PKI_NUM_FINAL_STYLE) {
275 		debug("ERROR: Configuring PKIND pkind = %d cluster_group = %d style = %d\n",
276 		      pkind, pkind_cfg->cluster_grp, pkind_cfg->initial_style);
277 		return -1;
278 	}
279 	icgsel.u64 = csr_rd_node(node, CVMX_PKI_PKINDX_ICGSEL(pkind));
280 	icgsel.s.icg = pkind_cfg->cluster_grp;
281 	csr_wr_node(node, CVMX_PKI_PKINDX_ICGSEL(pkind), icgsel.u64);
282 
283 	icg_cfg.u64 =
284 		csr_rd_node(node, CVMX_PKI_ICGX_CFG(pkind_cfg->cluster_grp));
285 	cluster_mask = (uint64_t)icg_cfg.s.clusters;
286 	while (cluster < CVMX_PKI_NUM_CLUSTER) {
287 		if (cluster_mask & (0x01L << cluster)) {
288 			pstyle.u64 = csr_rd_node(
289 				node,
290 				CVMX_PKI_CLX_PKINDX_STYLE(pkind, cluster));
291 			pstyle.s.pm = pkind_cfg->initial_parse_mode;
292 			pstyle.s.style = pkind_cfg->initial_style;
293 			csr_wr_node(node,
294 				    CVMX_PKI_CLX_PKINDX_STYLE(pkind, cluster),
295 				    pstyle.u64);
296 
297 			pcfg.u64 = csr_rd_node(
298 				node, CVMX_PKI_CLX_PKINDX_CFG(pkind, cluster));
299 			pcfg.s.fcs_pres = pkind_cfg->fcs_pres;
300 			pcfg.s.inst_hdr = pkind_cfg->parse_en.inst_hdr;
301 			pcfg.s.mpls_en = pkind_cfg->parse_en.mpls_en;
302 			pcfg.s.lg_custom = pkind_cfg->parse_en.lg_custom;
303 			pcfg.s.fulc_en = pkind_cfg->parse_en.fulc_en;
304 			pcfg.s.dsa_en = pkind_cfg->parse_en.dsa_en;
305 			pcfg.s.hg2_en = pkind_cfg->parse_en.hg2_en;
306 			pcfg.s.hg_en = pkind_cfg->parse_en.hg_en;
307 			csr_wr_node(node,
308 				    CVMX_PKI_CLX_PKINDX_CFG(pkind, cluster),
309 				    pcfg.u64);
310 
311 			skip.u64 = csr_rd_node(
312 				node, CVMX_PKI_CLX_PKINDX_SKIP(pkind, cluster));
313 			skip.s.fcs_skip = pkind_cfg->fcs_skip;
314 			skip.s.inst_skip = pkind_cfg->inst_skip;
315 			csr_wr_node(node,
316 				    CVMX_PKI_CLX_PKINDX_SKIP(pkind, cluster),
317 				    skip.u64);
318 
319 			l2cust.u64 = csr_rd_node(
320 				node,
321 				CVMX_PKI_CLX_PKINDX_L2_CUSTOM(pkind, cluster));
322 			l2cust.s.offset = pkind_cfg->l2_scan_offset;
323 			csr_wr_node(node,
324 				    CVMX_PKI_CLX_PKINDX_L2_CUSTOM(pkind,
325 								  cluster),
326 				    l2cust.u64);
327 
328 			lgcust.u64 = csr_rd_node(
329 				node,
330 				CVMX_PKI_CLX_PKINDX_LG_CUSTOM(pkind, cluster));
331 			lgcust.s.offset = pkind_cfg->lg_scan_offset;
332 			csr_wr_node(node,
333 				    CVMX_PKI_CLX_PKINDX_LG_CUSTOM(pkind,
334 								  cluster),
335 				    lgcust.u64);
336 		}
337 		cluster++;
338 	}
339 	return 0;
340 }
341 
342 /**
343  * This function reads parameters associated with tag configuration in hardware.
344  * Only first cluster in the group is used.
345  *
346  * @param node  Node number.
347  * @param style  Style to configure tag for.
348  * @param cluster_mask	Mask of clusters to configure the style for.
349  * @param tag_cfg  Pointer to tag configuration struct.
350  */
cvmx_pki_read_tag_config(int node,int style,uint64_t cluster_mask,struct cvmx_pki_style_tag_cfg * tag_cfg)351 void cvmx_pki_read_tag_config(int node, int style, uint64_t cluster_mask,
352 			      struct cvmx_pki_style_tag_cfg *tag_cfg)
353 {
354 	int mask, tag_idx, index;
355 	cvmx_pki_clx_stylex_cfg2_t style_cfg2;
356 	cvmx_pki_clx_stylex_alg_t style_alg;
357 	cvmx_pki_stylex_tag_sel_t tag_sel;
358 	cvmx_pki_tag_incx_ctl_t tag_ctl;
359 	cvmx_pki_tag_incx_mask_t tag_mask;
360 	int cluster = __builtin_ffsll(cluster_mask) - 1;
361 
362 	style_cfg2.u64 =
363 		csr_rd_node(node, CVMX_PKI_CLX_STYLEX_CFG2(style, cluster));
364 	style_alg.u64 =
365 		csr_rd_node(node, CVMX_PKI_CLX_STYLEX_ALG(style, cluster));
366 
367 	/* 7-Tuple Tag: */
368 	tag_cfg->tag_fields.layer_g_src = style_cfg2.s.tag_src_lg;
369 	tag_cfg->tag_fields.layer_f_src = style_cfg2.s.tag_src_lf;
370 	tag_cfg->tag_fields.layer_e_src = style_cfg2.s.tag_src_le;
371 	tag_cfg->tag_fields.layer_d_src = style_cfg2.s.tag_src_ld;
372 	tag_cfg->tag_fields.layer_c_src = style_cfg2.s.tag_src_lc;
373 	tag_cfg->tag_fields.layer_b_src = style_cfg2.s.tag_src_lb;
374 	tag_cfg->tag_fields.layer_g_dst = style_cfg2.s.tag_dst_lg;
375 	tag_cfg->tag_fields.layer_f_dst = style_cfg2.s.tag_dst_lf;
376 	tag_cfg->tag_fields.layer_e_dst = style_cfg2.s.tag_dst_le;
377 	tag_cfg->tag_fields.layer_d_dst = style_cfg2.s.tag_dst_ld;
378 	tag_cfg->tag_fields.layer_c_dst = style_cfg2.s.tag_dst_lc;
379 	tag_cfg->tag_fields.layer_b_dst = style_cfg2.s.tag_dst_lb;
380 	tag_cfg->tag_fields.tag_vni = style_alg.s.tag_vni;
381 	tag_cfg->tag_fields.tag_gtp = style_alg.s.tag_gtp;
382 	tag_cfg->tag_fields.tag_spi = style_alg.s.tag_spi;
383 	tag_cfg->tag_fields.tag_sync = style_alg.s.tag_syn;
384 	tag_cfg->tag_fields.ip_prot_nexthdr = style_alg.s.tag_pctl;
385 	tag_cfg->tag_fields.second_vlan = style_alg.s.tag_vs1;
386 	tag_cfg->tag_fields.first_vlan = style_alg.s.tag_vs0;
387 	tag_cfg->tag_fields.mpls_label = style_alg.s.tag_mpls0;
388 	tag_cfg->tag_fields.input_port = style_alg.s.tag_prt;
389 
390 	/* Custom-Mask Tag: */
391 	tag_sel.u64 = csr_rd_node(node, CVMX_PKI_STYLEX_TAG_SEL(style));
392 	for (mask = 0; mask < 4; mask++) {
393 		tag_cfg->mask_tag[mask].enable =
394 			(style_cfg2.s.tag_inc & (1 << mask)) != 0;
395 		switch (mask) {
396 		case 0:
397 			tag_idx = tag_sel.s.tag_idx0;
398 			break;
399 		case 1:
400 			tag_idx = tag_sel.s.tag_idx1;
401 			break;
402 		case 2:
403 			tag_idx = tag_sel.s.tag_idx2;
404 			break;
405 		case 3:
406 			tag_idx = tag_sel.s.tag_idx3;
407 			break;
408 		}
409 		index = tag_idx * 4 + mask;
410 		tag_mask.u64 = csr_rd_node(node, CVMX_PKI_TAG_INCX_MASK(index));
411 		tag_cfg->mask_tag[mask].val = tag_mask.s.en;
412 		tag_ctl.u64 = csr_rd_node(node, CVMX_PKI_TAG_INCX_CTL(index));
413 		tag_cfg->mask_tag[mask].base = tag_ctl.s.ptr_sel;
414 		tag_cfg->mask_tag[mask].offset = tag_ctl.s.offset;
415 	}
416 }
417 
418 /**
419  * This function writes/configures parameters associated with tag configuration in
420  * hardware. In Custom-Mask Tagging, all four masks use the same base index
421  * to access Tag Control and Tag Mask registers.
422  *
423  * @param node  Node number.
424  * @param style  Style to configure tag for.
425  * @param cluster_mask  Mask of clusters to configure the style for.
426  * @param tag_cfg  Pointer to taf configuration struct.
427  */
cvmx_pki_write_tag_config(int node,int style,uint64_t cluster_mask,struct cvmx_pki_style_tag_cfg * tag_cfg)428 void cvmx_pki_write_tag_config(int node, int style, uint64_t cluster_mask,
429 			       struct cvmx_pki_style_tag_cfg *tag_cfg)
430 {
431 	int mask, index, tag_idx, mtag_en = 0;
432 	unsigned int cluster = 0;
433 	cvmx_pki_clx_stylex_cfg2_t scfg2;
434 	cvmx_pki_clx_stylex_alg_t style_alg;
435 	cvmx_pki_tag_incx_ctl_t tag_ctl;
436 	cvmx_pki_tag_incx_mask_t tag_mask;
437 	cvmx_pki_stylex_tag_sel_t tag_sel;
438 
439 	while (cluster < CVMX_PKI_NUM_CLUSTER) {
440 		if (cluster_mask & (0x01L << cluster)) {
441 			/* 7-Tuple Tag: */
442 			scfg2.u64 = csr_rd_node(
443 				node, CVMX_PKI_CLX_STYLEX_CFG2(style, cluster));
444 			scfg2.s.tag_src_lg = tag_cfg->tag_fields.layer_g_src;
445 			scfg2.s.tag_src_lf = tag_cfg->tag_fields.layer_f_src;
446 			scfg2.s.tag_src_le = tag_cfg->tag_fields.layer_e_src;
447 			scfg2.s.tag_src_ld = tag_cfg->tag_fields.layer_d_src;
448 			scfg2.s.tag_src_lc = tag_cfg->tag_fields.layer_c_src;
449 			scfg2.s.tag_src_lb = tag_cfg->tag_fields.layer_b_src;
450 			scfg2.s.tag_dst_lg = tag_cfg->tag_fields.layer_g_dst;
451 			scfg2.s.tag_dst_lf = tag_cfg->tag_fields.layer_f_dst;
452 			scfg2.s.tag_dst_le = tag_cfg->tag_fields.layer_e_dst;
453 			scfg2.s.tag_dst_ld = tag_cfg->tag_fields.layer_d_dst;
454 			scfg2.s.tag_dst_lc = tag_cfg->tag_fields.layer_c_dst;
455 			scfg2.s.tag_dst_lb = tag_cfg->tag_fields.layer_b_dst;
456 			csr_wr_node(node,
457 				    CVMX_PKI_CLX_STYLEX_CFG2(style, cluster),
458 				    scfg2.u64);
459 
460 			style_alg.u64 = csr_rd_node(
461 				node, CVMX_PKI_CLX_STYLEX_ALG(style, cluster));
462 			style_alg.s.tag_vni = tag_cfg->tag_fields.tag_vni;
463 			style_alg.s.tag_gtp = tag_cfg->tag_fields.tag_gtp;
464 			style_alg.s.tag_spi = tag_cfg->tag_fields.tag_spi;
465 			style_alg.s.tag_syn = tag_cfg->tag_fields.tag_sync;
466 			style_alg.s.tag_pctl =
467 				tag_cfg->tag_fields.ip_prot_nexthdr;
468 			style_alg.s.tag_vs1 = tag_cfg->tag_fields.second_vlan;
469 			style_alg.s.tag_vs0 = tag_cfg->tag_fields.first_vlan;
470 			style_alg.s.tag_mpls0 = tag_cfg->tag_fields.mpls_label;
471 			style_alg.s.tag_prt = tag_cfg->tag_fields.input_port;
472 			csr_wr_node(node,
473 				    CVMX_PKI_CLX_STYLEX_ALG(style, cluster),
474 				    style_alg.u64);
475 
476 			/* Custom-Mask Tag (Part 1): */
477 			for (mask = 0; mask < 4; mask++) {
478 				if (tag_cfg->mask_tag[mask].enable)
479 					mtag_en++;
480 			}
481 			if (mtag_en) {
482 				scfg2.u64 = csr_rd_node(
483 					node, CVMX_PKI_CLX_STYLEX_CFG2(
484 						      style, cluster));
485 				scfg2.s.tag_inc = 0;
486 				for (mask = 0; mask < 4; mask++) {
487 					if (tag_cfg->mask_tag[mask].enable)
488 						scfg2.s.tag_inc |= 1 << mask;
489 				}
490 				csr_wr_node(node,
491 					    CVMX_PKI_CLX_STYLEX_CFG2(style,
492 								     cluster),
493 					    scfg2.u64);
494 			}
495 		}
496 		cluster++;
497 	}
498 	/* Custom-Mask Tag (Part 2): */
499 	if (mtag_en) {
500 		tag_idx = cvmx_pki_mtag_idx_alloc(node, -1);
501 		if (tag_idx < 0)
502 			return;
503 
504 		tag_sel.u64 = csr_rd_node(node, CVMX_PKI_STYLEX_TAG_SEL(style));
505 		for (mask = 0; mask < 4; mask++) {
506 			if (tag_cfg->mask_tag[mask].enable) {
507 				switch (mask) {
508 				case 0:
509 					tag_sel.s.tag_idx0 = tag_idx;
510 					break;
511 				case 1:
512 					tag_sel.s.tag_idx1 = tag_idx;
513 					break;
514 				case 2:
515 					tag_sel.s.tag_idx2 = tag_idx;
516 					break;
517 				case 3:
518 					tag_sel.s.tag_idx3 = tag_idx;
519 					break;
520 				}
521 				index = tag_idx * 4 + mask;
522 				tag_mask.u64 = csr_rd_node(
523 					node, CVMX_PKI_TAG_INCX_MASK(index));
524 				tag_mask.s.en = tag_cfg->mask_tag[mask].val;
525 				csr_wr_node(node, CVMX_PKI_TAG_INCX_MASK(index),
526 					    tag_mask.u64);
527 
528 				tag_ctl.u64 = csr_rd_node(
529 					node, CVMX_PKI_TAG_INCX_CTL(index));
530 				tag_ctl.s.ptr_sel =
531 					tag_cfg->mask_tag[mask].base;
532 				tag_ctl.s.offset =
533 					tag_cfg->mask_tag[mask].offset;
534 				csr_wr_node(node, CVMX_PKI_TAG_INCX_CTL(index),
535 					    tag_ctl.u64);
536 			}
537 		}
538 		csr_wr_node(node, CVMX_PKI_STYLEX_TAG_SEL(style), tag_sel.u64);
539 	}
540 }
541 
542 /**
543  * This function reads parameters associated with style in hardware.
544  *
545  * @param node  Node number.
546  * @param style	Style to read from.
547  * @param cluster_mask	Mask of clusters style belongs to.
548  * @param style_cfg	 Pointer to style config struct.
549  */
cvmx_pki_read_style_config(int node,int style,uint64_t cluster_mask,struct cvmx_pki_style_config * style_cfg)550 void cvmx_pki_read_style_config(int node, int style, uint64_t cluster_mask,
551 				struct cvmx_pki_style_config *style_cfg)
552 {
553 	cvmx_pki_clx_stylex_cfg_t scfg;
554 	cvmx_pki_clx_stylex_cfg2_t scfg2;
555 	cvmx_pki_clx_stylex_alg_t style_alg;
556 	cvmx_pki_stylex_buf_t style_buf;
557 	int cluster = __builtin_ffsll(cluster_mask) - 1;
558 
559 	scfg.u64 = csr_rd_node(node, CVMX_PKI_CLX_STYLEX_CFG(style, cluster));
560 	scfg2.u64 = csr_rd_node(node, CVMX_PKI_CLX_STYLEX_CFG2(style, cluster));
561 	style_alg.u64 =
562 		csr_rd_node(node, CVMX_PKI_CLX_STYLEX_ALG(style, cluster));
563 	style_buf.u64 = csr_rd_node(node, CVMX_PKI_STYLEX_BUF(style));
564 
565 	style_cfg->parm_cfg.ip6_udp_opt = scfg.s.ip6_udp_opt;
566 	style_cfg->parm_cfg.lenerr_en = scfg.s.lenerr_en;
567 	style_cfg->parm_cfg.lenerr_eqpad = scfg.s.lenerr_eqpad;
568 	style_cfg->parm_cfg.maxerr_en = scfg.s.maxerr_en;
569 	style_cfg->parm_cfg.minerr_en = scfg.s.minerr_en;
570 	style_cfg->parm_cfg.fcs_chk = scfg.s.fcs_chk;
571 	style_cfg->parm_cfg.fcs_strip = scfg.s.fcs_strip;
572 	style_cfg->parm_cfg.minmax_sel = scfg.s.minmax_sel;
573 	style_cfg->parm_cfg.qpg_base = scfg.s.qpg_base;
574 	style_cfg->parm_cfg.qpg_dis_padd = scfg.s.qpg_dis_padd;
575 	style_cfg->parm_cfg.qpg_dis_aura = scfg.s.qpg_dis_aura;
576 	style_cfg->parm_cfg.qpg_dis_grp = scfg.s.qpg_dis_grp;
577 	style_cfg->parm_cfg.qpg_dis_grptag = scfg.s.qpg_dis_grptag;
578 	style_cfg->parm_cfg.rawdrp = scfg.s.rawdrp;
579 	style_cfg->parm_cfg.force_drop = scfg.s.drop;
580 	style_cfg->parm_cfg.nodrop = scfg.s.nodrop;
581 
582 	style_cfg->parm_cfg.len_lg = scfg2.s.len_lg;
583 	style_cfg->parm_cfg.len_lf = scfg2.s.len_lf;
584 	style_cfg->parm_cfg.len_le = scfg2.s.len_le;
585 	style_cfg->parm_cfg.len_ld = scfg2.s.len_ld;
586 	style_cfg->parm_cfg.len_lc = scfg2.s.len_lc;
587 	style_cfg->parm_cfg.len_lb = scfg2.s.len_lb;
588 	style_cfg->parm_cfg.csum_lg = scfg2.s.csum_lg;
589 	style_cfg->parm_cfg.csum_lf = scfg2.s.csum_lf;
590 	style_cfg->parm_cfg.csum_le = scfg2.s.csum_le;
591 	style_cfg->parm_cfg.csum_ld = scfg2.s.csum_ld;
592 	style_cfg->parm_cfg.csum_lc = scfg2.s.csum_lc;
593 	style_cfg->parm_cfg.csum_lb = scfg2.s.csum_lb;
594 
595 	style_cfg->parm_cfg.qpg_qos = style_alg.s.qpg_qos;
596 	style_cfg->parm_cfg.tag_type = style_alg.s.tt;
597 	style_cfg->parm_cfg.apad_nip = style_alg.s.apad_nip;
598 	style_cfg->parm_cfg.qpg_port_sh = style_alg.s.qpg_port_sh;
599 	style_cfg->parm_cfg.qpg_port_msb = style_alg.s.qpg_port_msb;
600 	style_cfg->parm_cfg.wqe_vs = style_alg.s.wqe_vs;
601 
602 	style_cfg->parm_cfg.pkt_lend = style_buf.s.pkt_lend;
603 	style_cfg->parm_cfg.wqe_hsz = style_buf.s.wqe_hsz;
604 	style_cfg->parm_cfg.wqe_skip = style_buf.s.wqe_skip * 128;
605 	style_cfg->parm_cfg.first_skip = style_buf.s.first_skip * 8;
606 	style_cfg->parm_cfg.later_skip = style_buf.s.later_skip * 8;
607 	style_cfg->parm_cfg.cache_mode = style_buf.s.opc_mode;
608 	style_cfg->parm_cfg.mbuff_size = style_buf.s.mb_size * 8;
609 	style_cfg->parm_cfg.dis_wq_dat = style_buf.s.dis_wq_dat;
610 
611 	cvmx_pki_read_tag_config(node, style, cluster_mask,
612 				 &style_cfg->tag_cfg);
613 }
614 
615 /**
616  * This function writes/configures parameters associated with style in hardware.
617  *
618  * @param node  Node number.
619  * @param style  Style to configure.
620  * @param cluster_mask  Mask of clusters to configure the style for.
621  * @param style_cfg	 Pointer to style config struct.
622  */
cvmx_pki_write_style_config(int node,uint64_t style,u64 cluster_mask,struct cvmx_pki_style_config * style_cfg)623 void cvmx_pki_write_style_config(int node, uint64_t style, u64 cluster_mask,
624 				 struct cvmx_pki_style_config *style_cfg)
625 {
626 	cvmx_pki_clx_stylex_cfg_t scfg;
627 	cvmx_pki_clx_stylex_cfg2_t scfg2;
628 	cvmx_pki_clx_stylex_alg_t style_alg;
629 	cvmx_pki_stylex_buf_t style_buf;
630 	unsigned int cluster = 0;
631 
632 	while (cluster < CVMX_PKI_NUM_CLUSTER) {
633 		if (cluster_mask & (0x01L << cluster)) {
634 			scfg.u64 = csr_rd_node(
635 				node, CVMX_PKI_CLX_STYLEX_CFG(style, cluster));
636 			scfg.s.ip6_udp_opt = style_cfg->parm_cfg.ip6_udp_opt;
637 			scfg.s.lenerr_en = style_cfg->parm_cfg.lenerr_en;
638 			scfg.s.lenerr_eqpad = style_cfg->parm_cfg.lenerr_eqpad;
639 			scfg.s.maxerr_en = style_cfg->parm_cfg.maxerr_en;
640 			scfg.s.minerr_en = style_cfg->parm_cfg.minerr_en;
641 			scfg.s.fcs_chk = style_cfg->parm_cfg.fcs_chk;
642 			scfg.s.fcs_strip = style_cfg->parm_cfg.fcs_strip;
643 			scfg.s.minmax_sel = style_cfg->parm_cfg.minmax_sel;
644 			scfg.s.qpg_base = style_cfg->parm_cfg.qpg_base;
645 			scfg.s.qpg_dis_padd = style_cfg->parm_cfg.qpg_dis_padd;
646 			scfg.s.qpg_dis_aura = style_cfg->parm_cfg.qpg_dis_aura;
647 			scfg.s.qpg_dis_grp = style_cfg->parm_cfg.qpg_dis_grp;
648 			scfg.s.qpg_dis_grptag =
649 				style_cfg->parm_cfg.qpg_dis_grptag;
650 			scfg.s.rawdrp = style_cfg->parm_cfg.rawdrp;
651 			scfg.s.drop = style_cfg->parm_cfg.force_drop;
652 			scfg.s.nodrop = style_cfg->parm_cfg.nodrop;
653 			csr_wr_node(node,
654 				    CVMX_PKI_CLX_STYLEX_CFG(style, cluster),
655 				    scfg.u64);
656 
657 			scfg2.u64 = csr_rd_node(
658 				node, CVMX_PKI_CLX_STYLEX_CFG2(style, cluster));
659 			scfg2.s.len_lg = style_cfg->parm_cfg.len_lg;
660 			scfg2.s.len_lf = style_cfg->parm_cfg.len_lf;
661 			scfg2.s.len_le = style_cfg->parm_cfg.len_le;
662 			scfg2.s.len_ld = style_cfg->parm_cfg.len_ld;
663 			scfg2.s.len_lc = style_cfg->parm_cfg.len_lc;
664 			scfg2.s.len_lb = style_cfg->parm_cfg.len_lb;
665 			scfg2.s.csum_lg = style_cfg->parm_cfg.csum_lg;
666 			scfg2.s.csum_lf = style_cfg->parm_cfg.csum_lf;
667 			scfg2.s.csum_le = style_cfg->parm_cfg.csum_le;
668 			scfg2.s.csum_ld = style_cfg->parm_cfg.csum_ld;
669 			scfg2.s.csum_lc = style_cfg->parm_cfg.csum_lc;
670 			scfg2.s.csum_lb = style_cfg->parm_cfg.csum_lb;
671 			csr_wr_node(node,
672 				    CVMX_PKI_CLX_STYLEX_CFG2(style, cluster),
673 				    scfg2.u64);
674 
675 			style_alg.u64 = csr_rd_node(
676 				node, CVMX_PKI_CLX_STYLEX_ALG(style, cluster));
677 			style_alg.s.qpg_qos = style_cfg->parm_cfg.qpg_qos;
678 			style_alg.s.tt = style_cfg->parm_cfg.tag_type;
679 			style_alg.s.apad_nip = style_cfg->parm_cfg.apad_nip;
680 			style_alg.s.qpg_port_sh =
681 				style_cfg->parm_cfg.qpg_port_sh;
682 			style_alg.s.qpg_port_msb =
683 				style_cfg->parm_cfg.qpg_port_msb;
684 			style_alg.s.wqe_vs = style_cfg->parm_cfg.wqe_vs;
685 			csr_wr_node(node,
686 				    CVMX_PKI_CLX_STYLEX_ALG(style, cluster),
687 				    style_alg.u64);
688 		}
689 		cluster++;
690 	}
691 	style_buf.u64 = csr_rd_node(node, CVMX_PKI_STYLEX_BUF(style));
692 	style_buf.s.pkt_lend = style_cfg->parm_cfg.pkt_lend;
693 	style_buf.s.wqe_hsz = style_cfg->parm_cfg.wqe_hsz;
694 	style_buf.s.wqe_skip = (style_cfg->parm_cfg.wqe_skip) / 128;
695 	style_buf.s.first_skip = (style_cfg->parm_cfg.first_skip) / 8;
696 	style_buf.s.later_skip = style_cfg->parm_cfg.later_skip / 8;
697 	style_buf.s.opc_mode = style_cfg->parm_cfg.cache_mode;
698 	style_buf.s.mb_size = (style_cfg->parm_cfg.mbuff_size) / 8;
699 	style_buf.s.dis_wq_dat = style_cfg->parm_cfg.dis_wq_dat;
700 	csr_wr_node(node, CVMX_PKI_STYLEX_BUF(style), style_buf.u64);
701 
702 	cvmx_pki_write_tag_config(node, style, cluster_mask,
703 				  &style_cfg->tag_cfg);
704 }
705 
706 /**
707  * This function reads qpg entry at specified offset from qpg table.
708  *
709  * @param node  Node number.
710  * @param offset  Offset in qpg table to read from.
711  * @param qpg_cfg  Pointer to structure containing qpg values.
712  */
cvmx_pki_read_qpg_entry(int node,int offset,struct cvmx_pki_qpg_config * qpg_cfg)713 int cvmx_pki_read_qpg_entry(int node, int offset,
714 			    struct cvmx_pki_qpg_config *qpg_cfg)
715 {
716 	cvmx_pki_qpg_tblx_t qpg_tbl;
717 
718 	if (offset >= CVMX_PKI_NUM_QPG_ENTRY) {
719 		debug("ERROR: qpg offset %d is >= 2048\n", offset);
720 		return -1;
721 	}
722 	qpg_tbl.u64 = csr_rd_node(node, CVMX_PKI_QPG_TBLX(offset));
723 	qpg_cfg->aura_num = qpg_tbl.s.laura;
724 	qpg_cfg->port_add = qpg_tbl.s.padd;
725 	qpg_cfg->grp_ok = qpg_tbl.s.grp_ok;
726 	qpg_cfg->grp_bad = qpg_tbl.s.grp_bad;
727 	qpg_cfg->grptag_ok = qpg_tbl.s.grptag_ok;
728 	qpg_cfg->grptag_bad = qpg_tbl.s.grptag_bad;
729 	return 0;
730 }
731 
732 /**
733  * This function writes qpg entry at specified offset in qpg table.
734  *
735  * @param node  Node number.
736  * @param offset  Offset in qpg table to read from.
737  * @param qpg_cfg  Pointer to structure containing qpg values.
738  */
cvmx_pki_write_qpg_entry(int node,int offset,struct cvmx_pki_qpg_config * qpg_cfg)739 void cvmx_pki_write_qpg_entry(int node, int offset,
740 			      struct cvmx_pki_qpg_config *qpg_cfg)
741 {
742 	cvmx_pki_qpg_tblx_t qpg_tbl;
743 
744 	qpg_tbl.u64 = csr_rd_node(node, CVMX_PKI_QPG_TBLX(offset));
745 	qpg_tbl.s.padd = qpg_cfg->port_add;
746 	qpg_tbl.s.laura = qpg_cfg->aura_num;
747 	qpg_tbl.s.grp_ok = qpg_cfg->grp_ok;
748 	qpg_tbl.s.grp_bad = qpg_cfg->grp_bad;
749 	qpg_tbl.s.grptag_ok = qpg_cfg->grptag_ok;
750 	qpg_tbl.s.grptag_bad = qpg_cfg->grptag_bad;
751 	csr_wr_node(node, CVMX_PKI_QPG_TBLX(offset), qpg_tbl.u64);
752 }
753 
754 /**
755  * This function writes pcam entry at given offset in pcam table in hardware
756  *
757  * @param node  Node number.
758  * @param index  Offset in pcam table.
759  * @param cluster_mask	Mask of clusters in which to write pcam entry.
760  * @param input  Input keys to pcam match passed as struct.
761  * @param action  PCAM match action passed as struct.
762  */
cvmx_pki_pcam_write_entry(int node,int index,uint64_t cluster_mask,struct cvmx_pki_pcam_input input,struct cvmx_pki_pcam_action action)763 int cvmx_pki_pcam_write_entry(int node, int index, uint64_t cluster_mask,
764 			      struct cvmx_pki_pcam_input input,
765 			      struct cvmx_pki_pcam_action action)
766 {
767 	int bank;
768 	unsigned int cluster = 0;
769 	cvmx_pki_clx_pcamx_termx_t term;
770 	cvmx_pki_clx_pcamx_matchx_t match;
771 	cvmx_pki_clx_pcamx_actionx_t act;
772 
773 	if (index >= CVMX_PKI_TOTAL_PCAM_ENTRY) {
774 		debug("\nERROR: Invalid pcam entry %d\n", index);
775 		return -1;
776 	}
777 	bank = (int)(input.field & 0x01);
778 	while (cluster < CVMX_PKI_NUM_CLUSTER) {
779 		if (cluster_mask & (0x01L << cluster)) {
780 			term.u64 = csr_rd_node(
781 				node,
782 				CVMX_PKI_CLX_PCAMX_TERMX(cluster, bank, index));
783 			term.s.valid = 0;
784 			csr_wr_node(node,
785 				    CVMX_PKI_CLX_PCAMX_TERMX(cluster, bank,
786 							     index),
787 				    term.u64);
788 			match.u64 = csr_rd_node(
789 				node, CVMX_PKI_CLX_PCAMX_MATCHX(cluster, bank,
790 								index));
791 			match.s.data1 = input.data & input.data_mask;
792 			match.s.data0 = (~input.data) & input.data_mask;
793 			csr_wr_node(node,
794 				    CVMX_PKI_CLX_PCAMX_MATCHX(cluster, bank,
795 							      index),
796 				    match.u64);
797 
798 			act.u64 = csr_rd_node(
799 				node, CVMX_PKI_CLX_PCAMX_ACTIONX(cluster, bank,
800 								 index));
801 			act.s.pmc = action.parse_mode_chg;
802 			act.s.style_add = action.style_add;
803 			act.s.pf = action.parse_flag_set;
804 			act.s.setty = action.layer_type_set;
805 			act.s.advance = action.pointer_advance;
806 			csr_wr_node(node,
807 				    CVMX_PKI_CLX_PCAMX_ACTIONX(cluster, bank,
808 							       index),
809 				    act.u64);
810 
811 			term.u64 = csr_rd_node(
812 				node,
813 				CVMX_PKI_CLX_PCAMX_TERMX(cluster, bank, index));
814 			term.s.term1 = input.field & input.field_mask;
815 			term.s.term0 = (~input.field) & input.field_mask;
816 			term.s.style1 = input.style & input.style_mask;
817 			term.s.style0 = (~input.style) & input.style_mask;
818 			term.s.valid = 1;
819 			csr_wr_node(node,
820 				    CVMX_PKI_CLX_PCAMX_TERMX(cluster, bank,
821 							     index),
822 				    term.u64);
823 		}
824 		cluster++;
825 	}
826 	return 0;
827 }
828 
829 /**
830  * Enables/Disables fcs check and fcs stripping on the pkind.
831  *
832  * @param node  Node number
833  * @param pknd  PKIND to apply settings on.
834  * @param fcs_chk  Enable/disable fcs check.
835  *    1 = enable fcs error check.
836  *    0 = disable fcs error check.
837  * @param fcs_strip	 Strip L2 FCS bytes from packet, decrease WQE[LEN] by 4 bytes
838  *    1 = strip L2 FCS.
839  *    0 = Do not strip L2 FCS.
840  */
cvmx_pki_endis_fcs_check(int node,int pknd,bool fcs_chk,bool fcs_strip)841 void cvmx_pki_endis_fcs_check(int node, int pknd, bool fcs_chk, bool fcs_strip)
842 {
843 	int style;
844 	unsigned int cluster;
845 	cvmx_pki_clx_pkindx_style_t pstyle;
846 	cvmx_pki_clx_stylex_cfg_t style_cfg;
847 
848 	/* Valudate PKIND # */
849 	if (pknd >= CVMX_PKI_NUM_PKIND) {
850 		printf("%s: PKIND %d out of range\n", __func__, pknd);
851 		return;
852 	}
853 
854 	for (cluster = 0; cluster < CVMX_PKI_NUM_CLUSTER; cluster++) {
855 		pstyle.u64 = csr_rd_node(
856 			node, CVMX_PKI_CLX_PKINDX_STYLE(pknd, cluster));
857 		style = pstyle.s.style;
858 		/* Validate STYLE # */
859 		if (style >= CVMX_PKI_NUM_INTERNAL_STYLE)
860 			continue;
861 		style_cfg.u64 = csr_rd_node(
862 			node, CVMX_PKI_CLX_STYLEX_CFG(style, cluster));
863 		style_cfg.s.fcs_chk = fcs_chk;
864 		style_cfg.s.fcs_strip = fcs_strip;
865 		csr_wr_node(node, CVMX_PKI_CLX_STYLEX_CFG(style, cluster),
866 			    style_cfg.u64);
867 	}
868 }
869 
870 /**
871  * Enables/Disables l2 length error check and max & min frame length checks
872  *
873  * @param node  Node number
874  * @param pknd  PKIND to disable error for.
875  * @param l2len_err  L2 length error check enable.
876  * @param maxframe_err  Max frame error check enable.
877  * @param minframe_err  Min frame error check enable.
878  *    1 = Enabel err checks
879  *    0 = Disable error checks
880  */
cvmx_pki_endis_l2_errs(int node,int pknd,bool l2len_err,bool maxframe_err,bool minframe_err)881 void cvmx_pki_endis_l2_errs(int node, int pknd, bool l2len_err,
882 			    bool maxframe_err, bool minframe_err)
883 {
884 	int style;
885 	unsigned int cluster;
886 	cvmx_pki_clx_pkindx_style_t pstyle;
887 	cvmx_pki_clx_stylex_cfg_t style_cfg;
888 
889 	/* Valudate PKIND # */
890 	if (pknd >= CVMX_PKI_NUM_PKIND) {
891 		printf("%s: PKIND %d out of range\n", __func__, pknd);
892 		return;
893 	}
894 
895 	for (cluster = 0; cluster < CVMX_PKI_NUM_CLUSTER; cluster++) {
896 		pstyle.u64 = csr_rd_node(
897 			node, CVMX_PKI_CLX_PKINDX_STYLE(pknd, cluster));
898 		style = pstyle.s.style;
899 		/* Validate STYLE # */
900 		if (style >= CVMX_PKI_NUM_INTERNAL_STYLE)
901 			continue;
902 		style_cfg.u64 = csr_rd_node(
903 			node, CVMX_PKI_CLX_STYLEX_CFG(style, cluster));
904 		style_cfg.s.lenerr_en = l2len_err;
905 		style_cfg.s.maxerr_en = maxframe_err;
906 		style_cfg.s.minerr_en = minframe_err;
907 		csr_wr_node(node, CVMX_PKI_CLX_STYLEX_CFG(style, cluster),
908 			    style_cfg.u64);
909 	}
910 }
911