1 // SPDX-License-Identifier: GPL-2.0
2 
3 /*
4  * Copyright 2020 HabanaLabs, Ltd.
5  * All Rights Reserved.
6  */
7 
8 #include "habanalabs.h"
9 
10 static const char * const hl_glbl_error_cause[HL_MAX_NUM_OF_GLBL_ERR_CAUSE] = {
11 	"Error due to un-priv read",
12 	"Error due to un-secure read",
13 	"Error due to read from unmapped reg",
14 	"Error due to un-priv write",
15 	"Error due to un-secure write",
16 	"Error due to write to unmapped reg",
17 	"External I/F write sec violation",
18 	"External I/F write to un-mapped reg",
19 	"Read to write only",
20 	"Write to read only"
21 };
22 
23 /**
24  * hl_get_pb_block - return the relevant block within the block array
25  *
26  * @hdev: pointer to hl_device structure
27  * @mm_reg_addr: register address in the desired block
28  * @pb_blocks: blocks array
29  * @array_size: blocks array size
30  *
31  */
hl_get_pb_block(struct hl_device * hdev,u32 mm_reg_addr,const u32 pb_blocks[],int array_size)32 static int hl_get_pb_block(struct hl_device *hdev, u32 mm_reg_addr,
33 		const u32 pb_blocks[], int array_size)
34 {
35 	int i;
36 	u32 start_addr, end_addr;
37 
38 	for (i = 0 ; i < array_size ; i++) {
39 		start_addr = pb_blocks[i];
40 		end_addr = start_addr + HL_BLOCK_SIZE;
41 
42 		if ((mm_reg_addr >= start_addr) && (mm_reg_addr < end_addr))
43 			return i;
44 	}
45 
46 	dev_err(hdev->dev, "No protection domain was found for 0x%x\n",
47 			mm_reg_addr);
48 	return -EDOM;
49 }
50 
51 /**
52  * hl_unset_pb_in_block - clear a specific protection bit in a block
53  *
54  * @hdev: pointer to hl_device structure
55  * @reg_offset: register offset will be converted to bit offset in pb block
56  * @sgs_entry: pb array
57  *
58  */
hl_unset_pb_in_block(struct hl_device * hdev,u32 reg_offset,struct hl_block_glbl_sec * sgs_entry)59 static int hl_unset_pb_in_block(struct hl_device *hdev, u32 reg_offset,
60 				struct hl_block_glbl_sec *sgs_entry)
61 {
62 	if ((reg_offset >= HL_BLOCK_SIZE) || (reg_offset & 0x3)) {
63 		dev_err(hdev->dev,
64 			"Register offset(%d) is out of range(%d) or invalid\n",
65 			reg_offset, HL_BLOCK_SIZE);
66 		return -EINVAL;
67 	}
68 
69 	UNSET_GLBL_SEC_BIT(sgs_entry->sec_array,
70 			 (reg_offset & (HL_BLOCK_SIZE - 1)) >> 2);
71 
72 	return 0;
73 }
74 
75 /**
76  * hl_unsecure_register - locate the relevant block for this register and
77  *                        remove corresponding protection bit
78  *
79  * @hdev: pointer to hl_device structure
80  * @mm_reg_addr: register address to unsecure
81  * @offset: additional offset to the register address
82  * @pb_blocks: blocks array
83  * @sgs_array: pb array
84  * @array_size: blocks array size
85  *
86  */
hl_unsecure_register(struct hl_device * hdev,u32 mm_reg_addr,int offset,const u32 pb_blocks[],struct hl_block_glbl_sec sgs_array[],int array_size)87 int hl_unsecure_register(struct hl_device *hdev, u32 mm_reg_addr, int offset,
88 		const u32 pb_blocks[], struct hl_block_glbl_sec sgs_array[],
89 		int array_size)
90 {
91 	u32 reg_offset;
92 	int block_num;
93 
94 	block_num = hl_get_pb_block(hdev, mm_reg_addr + offset, pb_blocks,
95 			array_size);
96 	if (block_num < 0)
97 		return block_num;
98 
99 	reg_offset = (mm_reg_addr + offset) - pb_blocks[block_num];
100 
101 	return hl_unset_pb_in_block(hdev, reg_offset, &sgs_array[block_num]);
102 }
103 
104 /**
105  * hl_unsecure_register_range - locate the relevant block for this register
106  *                              range and remove corresponding protection bit
107  *
108  * @hdev: pointer to hl_device structure
109  * @mm_reg_range: register address range to unsecure
110  * @offset: additional offset to the register address
111  * @pb_blocks: blocks array
112  * @sgs_array: pb array
113  * @array_size: blocks array size
114  *
115  */
hl_unsecure_register_range(struct hl_device * hdev,struct range mm_reg_range,int offset,const u32 pb_blocks[],struct hl_block_glbl_sec sgs_array[],int array_size)116 static int hl_unsecure_register_range(struct hl_device *hdev,
117 		struct range mm_reg_range, int offset, const u32 pb_blocks[],
118 		struct hl_block_glbl_sec sgs_array[],
119 		int array_size)
120 {
121 	u32 reg_offset;
122 	int i, block_num, rc = 0;
123 
124 	block_num = hl_get_pb_block(hdev,
125 			mm_reg_range.start + offset, pb_blocks,
126 			array_size);
127 	if (block_num < 0)
128 		return block_num;
129 
130 	for (i = mm_reg_range.start ; i <= mm_reg_range.end ; i += 4) {
131 		reg_offset = (i + offset) - pb_blocks[block_num];
132 		rc |= hl_unset_pb_in_block(hdev, reg_offset,
133 					&sgs_array[block_num]);
134 	}
135 
136 	return rc;
137 }
138 
139 /**
140  * hl_unsecure_registers - locate the relevant block for all registers and
141  *                        remove corresponding protection bit
142  *
143  * @hdev: pointer to hl_device structure
144  * @mm_reg_array: register address array to unsecure
145  * @mm_array_size: register array size
146  * @offset: additional offset to the register address
147  * @pb_blocks: blocks array
148  * @sgs_array: pb array
149  * @blocks_array_size: blocks array size
150  *
151  */
hl_unsecure_registers(struct hl_device * hdev,const u32 mm_reg_array[],int mm_array_size,int offset,const u32 pb_blocks[],struct hl_block_glbl_sec sgs_array[],int blocks_array_size)152 int hl_unsecure_registers(struct hl_device *hdev, const u32 mm_reg_array[],
153 		int mm_array_size, int offset, const u32 pb_blocks[],
154 		struct hl_block_glbl_sec sgs_array[], int blocks_array_size)
155 {
156 	int i, rc = 0;
157 
158 	for (i = 0 ; i < mm_array_size ; i++) {
159 		rc = hl_unsecure_register(hdev, mm_reg_array[i], offset,
160 				pb_blocks, sgs_array, blocks_array_size);
161 
162 		if (rc)
163 			return rc;
164 	}
165 
166 	return rc;
167 }
168 
169 /**
170  * hl_unsecure_registers_range - locate the relevant block for all register
171  *                        ranges and remove corresponding protection bit
172  *
173  * @hdev: pointer to hl_device structure
174  * @mm_reg_range_array: register address range array to unsecure
175  * @mm_array_size: register array size
176  * @offset: additional offset to the register address
177  * @pb_blocks: blocks array
178  * @sgs_array: pb array
179  * @blocks_array_size: blocks array size
180  *
181  */
hl_unsecure_registers_range(struct hl_device * hdev,const struct range mm_reg_range_array[],int mm_array_size,int offset,const u32 pb_blocks[],struct hl_block_glbl_sec sgs_array[],int blocks_array_size)182 static int hl_unsecure_registers_range(struct hl_device *hdev,
183 		const struct range mm_reg_range_array[], int mm_array_size,
184 		int offset, const u32 pb_blocks[],
185 		struct hl_block_glbl_sec sgs_array[], int blocks_array_size)
186 {
187 	int i, rc = 0;
188 
189 	for (i = 0 ; i < mm_array_size ; i++) {
190 		rc = hl_unsecure_register_range(hdev, mm_reg_range_array[i],
191 			offset, pb_blocks, sgs_array, blocks_array_size);
192 
193 		if (rc)
194 			return rc;
195 	}
196 
197 	return rc;
198 }
199 
200 /**
201  * hl_ack_pb_security_violations - Ack security violation
202  *
203  * @hdev: pointer to hl_device structure
204  * @pb_blocks: blocks array
205  * @block_offset: additional offset to the block
206  * @array_size: blocks array size
207  *
208  */
hl_ack_pb_security_violations(struct hl_device * hdev,const u32 pb_blocks[],u32 block_offset,int array_size)209 static void hl_ack_pb_security_violations(struct hl_device *hdev,
210 		const u32 pb_blocks[], u32 block_offset, int array_size)
211 {
212 	int i;
213 	u32 cause, addr, block_base;
214 
215 	for (i = 0 ; i < array_size ; i++) {
216 		block_base = pb_blocks[i] + block_offset;
217 		cause = RREG32(block_base + HL_BLOCK_GLBL_ERR_CAUSE);
218 		if (cause) {
219 			addr = RREG32(block_base + HL_BLOCK_GLBL_ERR_ADDR);
220 			hdev->asic_funcs->pb_print_security_errors(hdev,
221 					block_base, cause, addr);
222 			WREG32(block_base + HL_BLOCK_GLBL_ERR_CAUSE, cause);
223 		}
224 	}
225 }
226 
227 /**
228  * hl_config_glbl_sec - set pb in HW according to given pb array
229  *
230  * @hdev: pointer to hl_device structure
231  * @pb_blocks: blocks array
232  * @sgs_array: pb array
233  * @block_offset: additional offset to the block
234  * @array_size: blocks array size
235  *
236  */
hl_config_glbl_sec(struct hl_device * hdev,const u32 pb_blocks[],struct hl_block_glbl_sec sgs_array[],u32 block_offset,int array_size)237 void hl_config_glbl_sec(struct hl_device *hdev, const u32 pb_blocks[],
238 		struct hl_block_glbl_sec sgs_array[], u32 block_offset,
239 		int array_size)
240 {
241 	int i, j;
242 	u32 sgs_base;
243 
244 	if (hdev->pldm)
245 		usleep_range(100, 1000);
246 
247 	for (i = 0 ; i < array_size ; i++) {
248 		sgs_base = block_offset + pb_blocks[i] +
249 				HL_BLOCK_GLBL_SEC_OFFS;
250 
251 		for (j = 0 ; j < HL_BLOCK_GLBL_SEC_LEN ; j++)
252 			WREG32(sgs_base + j * sizeof(u32),
253 				sgs_array[i].sec_array[j]);
254 	}
255 }
256 
257 /**
258  * hl_secure_block - locally memsets a block to 0
259  *
260  * @hdev: pointer to hl_device structure
261  * @sgs_array: pb array to clear
262  * @array_size: blocks array size
263  *
264  */
hl_secure_block(struct hl_device * hdev,struct hl_block_glbl_sec sgs_array[],int array_size)265 void hl_secure_block(struct hl_device *hdev,
266 		struct hl_block_glbl_sec sgs_array[], int array_size)
267 {
268 	int i;
269 
270 	for (i = 0 ; i < array_size ; i++)
271 		memset((char *)(sgs_array[i].sec_array), 0,
272 			HL_BLOCK_GLBL_SEC_SIZE);
273 }
274 
275 /**
276  * hl_init_pb_with_mask - set selected pb instances with mask in HW according
277  *                        to given configuration
278  *
279  * @hdev: pointer to hl_device structure
280  * @num_dcores: number of decores to apply configuration to
281  *              set to HL_PB_SHARED if need to apply only once
282  * @dcore_offset: offset between dcores
283  * @num_instances: number of instances to apply configuration to
284  * @instance_offset: offset between instances
285  * @pb_blocks: blocks array
286  * @blocks_array_size: blocks array size
287  * @regs_array: register array
288  * @regs_array_size: register array size
289  * @mask: enabled instances mask: 1- enabled, 0- disabled
290  */
hl_init_pb_with_mask(struct hl_device * hdev,u32 num_dcores,u32 dcore_offset,u32 num_instances,u32 instance_offset,const u32 pb_blocks[],u32 blocks_array_size,const u32 * regs_array,u32 regs_array_size,u64 mask)291 int hl_init_pb_with_mask(struct hl_device *hdev, u32 num_dcores,
292 		u32 dcore_offset, u32 num_instances, u32 instance_offset,
293 		const u32 pb_blocks[], u32 blocks_array_size,
294 		const u32 *regs_array, u32 regs_array_size, u64 mask)
295 {
296 	int i, j;
297 	struct hl_block_glbl_sec *glbl_sec;
298 
299 	glbl_sec = kcalloc(blocks_array_size,
300 			sizeof(struct hl_block_glbl_sec),
301 			GFP_KERNEL);
302 	if (!glbl_sec)
303 		return -ENOMEM;
304 
305 	hl_secure_block(hdev, glbl_sec, blocks_array_size);
306 	hl_unsecure_registers(hdev, regs_array, regs_array_size, 0, pb_blocks,
307 			glbl_sec, blocks_array_size);
308 
309 	/* Fill all blocks with the same configuration */
310 	for (i = 0 ; i < num_dcores ; i++) {
311 		for (j = 0 ; j < num_instances ; j++) {
312 			int seq = i * num_instances + j;
313 
314 			if (!(mask & BIT_ULL(seq)))
315 				continue;
316 
317 			hl_config_glbl_sec(hdev, pb_blocks, glbl_sec,
318 					i * dcore_offset + j * instance_offset,
319 					blocks_array_size);
320 		}
321 	}
322 
323 	kfree(glbl_sec);
324 
325 	return 0;
326 }
327 
328 /**
329  * hl_init_pb - set pb in HW according to given configuration
330  *
331  * @hdev: pointer to hl_device structure
332  * @num_dcores: number of decores to apply configuration to
333  *              set to HL_PB_SHARED if need to apply only once
334  * @dcore_offset: offset between dcores
335  * @num_instances: number of instances to apply configuration to
336  * @instance_offset: offset between instances
337  * @pb_blocks: blocks array
338  * @blocks_array_size: blocks array size
339  * @regs_array: register array
340  * @regs_array_size: register array size
341  *
342  */
hl_init_pb(struct hl_device * hdev,u32 num_dcores,u32 dcore_offset,u32 num_instances,u32 instance_offset,const u32 pb_blocks[],u32 blocks_array_size,const u32 * regs_array,u32 regs_array_size)343 int hl_init_pb(struct hl_device *hdev, u32 num_dcores, u32 dcore_offset,
344 		u32 num_instances, u32 instance_offset,
345 		const u32 pb_blocks[], u32 blocks_array_size,
346 		const u32 *regs_array, u32 regs_array_size)
347 {
348 	return hl_init_pb_with_mask(hdev, num_dcores, dcore_offset,
349 			num_instances, instance_offset, pb_blocks,
350 			blocks_array_size, regs_array, regs_array_size,
351 			ULLONG_MAX);
352 }
353 
354 /**
355  * hl_init_pb_ranges_with_mask - set pb instances using mask in HW according to
356  *                               given configuration unsecurring registers
357  *                               ranges instead of specific registers
358  *
359  * @hdev: pointer to hl_device structure
360  * @num_dcores: number of decores to apply configuration to
361  *              set to HL_PB_SHARED if need to apply only once
362  * @dcore_offset: offset between dcores
363  * @num_instances: number of instances to apply configuration to
364  * @instance_offset: offset between instances
365  * @pb_blocks: blocks array
366  * @blocks_array_size: blocks array size
367  * @regs_range_array: register range array
368  * @regs_range_array_size: register range array size
369  * @mask: enabled instances mask: 1- enabled, 0- disabled
370  */
hl_init_pb_ranges_with_mask(struct hl_device * hdev,u32 num_dcores,u32 dcore_offset,u32 num_instances,u32 instance_offset,const u32 pb_blocks[],u32 blocks_array_size,const struct range * regs_range_array,u32 regs_range_array_size,u64 mask)371 int hl_init_pb_ranges_with_mask(struct hl_device *hdev, u32 num_dcores,
372 		u32 dcore_offset, u32 num_instances, u32 instance_offset,
373 		const u32 pb_blocks[], u32 blocks_array_size,
374 		const struct range *regs_range_array, u32 regs_range_array_size,
375 		u64 mask)
376 {
377 	int i, j, rc = 0;
378 	struct hl_block_glbl_sec *glbl_sec;
379 
380 	glbl_sec = kcalloc(blocks_array_size,
381 			sizeof(struct hl_block_glbl_sec),
382 			GFP_KERNEL);
383 	if (!glbl_sec)
384 		return -ENOMEM;
385 
386 	hl_secure_block(hdev, glbl_sec, blocks_array_size);
387 	rc = hl_unsecure_registers_range(hdev, regs_range_array,
388 			regs_range_array_size, 0, pb_blocks, glbl_sec,
389 			blocks_array_size);
390 	if (rc)
391 		goto free_glbl_sec;
392 
393 	/* Fill all blocks with the same configuration */
394 	for (i = 0 ; i < num_dcores ; i++) {
395 		for (j = 0 ; j < num_instances ; j++) {
396 			int seq = i * num_instances + j;
397 
398 			if (!(mask & BIT_ULL(seq)))
399 				continue;
400 
401 			hl_config_glbl_sec(hdev, pb_blocks, glbl_sec,
402 					i * dcore_offset + j * instance_offset,
403 					blocks_array_size);
404 		}
405 	}
406 
407 free_glbl_sec:
408 	kfree(glbl_sec);
409 
410 	return rc;
411 }
412 
413 /**
414  * hl_init_pb_ranges - set pb in HW according to given configuration unsecurring
415  *                     registers ranges instead of specific registers
416  *
417  * @hdev: pointer to hl_device structure
418  * @num_dcores: number of decores to apply configuration to
419  *              set to HL_PB_SHARED if need to apply only once
420  * @dcore_offset: offset between dcores
421  * @num_instances: number of instances to apply configuration to
422  * @instance_offset: offset between instances
423  * @pb_blocks: blocks array
424  * @blocks_array_size: blocks array size
425  * @regs_range_array: register range array
426  * @regs_range_array_size: register range array size
427  *
428  */
hl_init_pb_ranges(struct hl_device * hdev,u32 num_dcores,u32 dcore_offset,u32 num_instances,u32 instance_offset,const u32 pb_blocks[],u32 blocks_array_size,const struct range * regs_range_array,u32 regs_range_array_size)429 int hl_init_pb_ranges(struct hl_device *hdev, u32 num_dcores,
430 		u32 dcore_offset, u32 num_instances, u32 instance_offset,
431 		const u32 pb_blocks[], u32 blocks_array_size,
432 		const struct range *regs_range_array, u32 regs_range_array_size)
433 {
434 	return hl_init_pb_ranges_with_mask(hdev, num_dcores, dcore_offset,
435 			num_instances, instance_offset, pb_blocks,
436 			blocks_array_size, regs_range_array,
437 			regs_range_array_size, ULLONG_MAX);
438 }
439 
440 /**
441  * hl_init_pb_single_dcore - set pb for a single docre in HW
442  * according to given configuration
443  *
444  * @hdev: pointer to hl_device structure
445  * @dcore_offset: offset from the dcore0
446  * @num_instances: number of instances to apply configuration to
447  * @instance_offset: offset between instances
448  * @pb_blocks: blocks array
449  * @blocks_array_size: blocks array size
450  * @regs_array: register array
451  * @regs_array_size: register array size
452  *
453  */
hl_init_pb_single_dcore(struct hl_device * hdev,u32 dcore_offset,u32 num_instances,u32 instance_offset,const u32 pb_blocks[],u32 blocks_array_size,const u32 * regs_array,u32 regs_array_size)454 int hl_init_pb_single_dcore(struct hl_device *hdev, u32 dcore_offset,
455 		u32 num_instances, u32 instance_offset,
456 		const u32 pb_blocks[], u32 blocks_array_size,
457 		const u32 *regs_array, u32 regs_array_size)
458 {
459 	int i, rc = 0;
460 	struct hl_block_glbl_sec *glbl_sec;
461 
462 	glbl_sec = kcalloc(blocks_array_size,
463 			sizeof(struct hl_block_glbl_sec),
464 			GFP_KERNEL);
465 	if (!glbl_sec)
466 		return -ENOMEM;
467 
468 	hl_secure_block(hdev, glbl_sec, blocks_array_size);
469 	rc = hl_unsecure_registers(hdev, regs_array, regs_array_size, 0,
470 			pb_blocks, glbl_sec, blocks_array_size);
471 	if (rc)
472 		goto free_glbl_sec;
473 
474 	/* Fill all blocks with the same configuration */
475 	for (i = 0 ; i < num_instances ; i++)
476 		hl_config_glbl_sec(hdev, pb_blocks, glbl_sec,
477 				dcore_offset + i * instance_offset,
478 				blocks_array_size);
479 
480 free_glbl_sec:
481 	kfree(glbl_sec);
482 
483 	return rc;
484 }
485 
486 /**
487  * hl_init_pb_ranges_single_dcore - set pb for a single docre in HW according
488  *                                  to given configuration unsecurring
489  *                                  registers ranges instead of specific
490  *                                  registers
491  *
492  * @hdev: pointer to hl_device structure
493  * @dcore_offset: offset from the dcore0
494  * @num_instances: number of instances to apply configuration to
495  * @instance_offset: offset between instances
496  * @pb_blocks: blocks array
497  * @blocks_array_size: blocks array size
498  * @regs_range_array: register range array
499  * @regs_range_array_size: register range array size
500  *
501  */
hl_init_pb_ranges_single_dcore(struct hl_device * hdev,u32 dcore_offset,u32 num_instances,u32 instance_offset,const u32 pb_blocks[],u32 blocks_array_size,const struct range * regs_range_array,u32 regs_range_array_size)502 int hl_init_pb_ranges_single_dcore(struct hl_device *hdev, u32 dcore_offset,
503 		u32 num_instances, u32 instance_offset,
504 		const u32 pb_blocks[], u32 blocks_array_size,
505 		const struct range *regs_range_array, u32 regs_range_array_size)
506 {
507 	int i;
508 	struct hl_block_glbl_sec *glbl_sec;
509 
510 	glbl_sec = kcalloc(blocks_array_size,
511 			sizeof(struct hl_block_glbl_sec),
512 			GFP_KERNEL);
513 	if (!glbl_sec)
514 		return -ENOMEM;
515 
516 	hl_secure_block(hdev, glbl_sec, blocks_array_size);
517 	hl_unsecure_registers_range(hdev, regs_range_array,
518 			regs_range_array_size, 0, pb_blocks, glbl_sec,
519 			blocks_array_size);
520 
521 	/* Fill all blocks with the same configuration */
522 	for (i = 0 ; i < num_instances ; i++)
523 		hl_config_glbl_sec(hdev, pb_blocks, glbl_sec,
524 				dcore_offset + i * instance_offset,
525 				blocks_array_size);
526 
527 	kfree(glbl_sec);
528 
529 	return 0;
530 }
531 
532 /**
533  * hl_ack_pb_with_mask - ack pb with mask in HW according to given configuration
534  *
535  * @hdev: pointer to hl_device structure
536  * @num_dcores: number of decores to apply configuration to
537  *              set to HL_PB_SHARED if need to apply only once
538  * @dcore_offset: offset between dcores
539  * @num_instances: number of instances to apply configuration to
540  * @instance_offset: offset between instances
541  * @pb_blocks: blocks array
542  * @blocks_array_size: blocks array size
543  * @mask: enabled instances mask: 1- enabled, 0- disabled
544  *
545  */
hl_ack_pb_with_mask(struct hl_device * hdev,u32 num_dcores,u32 dcore_offset,u32 num_instances,u32 instance_offset,const u32 pb_blocks[],u32 blocks_array_size,u64 mask)546 void hl_ack_pb_with_mask(struct hl_device *hdev, u32 num_dcores,
547 		u32 dcore_offset, u32 num_instances, u32 instance_offset,
548 		const u32 pb_blocks[], u32 blocks_array_size, u64 mask)
549 {
550 	int i, j;
551 
552 	/* ack all blocks */
553 	for (i = 0 ; i < num_dcores ; i++) {
554 		for (j = 0 ; j < num_instances ; j++) {
555 			int seq = i * num_instances + j;
556 
557 			if (!(mask & BIT_ULL(seq)))
558 				continue;
559 
560 			hl_ack_pb_security_violations(hdev, pb_blocks,
561 					i * dcore_offset + j * instance_offset,
562 					blocks_array_size);
563 		}
564 	}
565 }
566 
567 /**
568  * hl_ack_pb - ack pb in HW according to given configuration
569  *
570  * @hdev: pointer to hl_device structure
571  * @num_dcores: number of decores to apply configuration to
572  *              set to HL_PB_SHARED if need to apply only once
573  * @dcore_offset: offset between dcores
574  * @num_instances: number of instances to apply configuration to
575  * @instance_offset: offset between instances
576  * @pb_blocks: blocks array
577  * @blocks_array_size: blocks array size
578  *
579  */
hl_ack_pb(struct hl_device * hdev,u32 num_dcores,u32 dcore_offset,u32 num_instances,u32 instance_offset,const u32 pb_blocks[],u32 blocks_array_size)580 void hl_ack_pb(struct hl_device *hdev, u32 num_dcores, u32 dcore_offset,
581 		u32 num_instances, u32 instance_offset,
582 		const u32 pb_blocks[], u32 blocks_array_size)
583 {
584 	hl_ack_pb_with_mask(hdev, num_dcores, dcore_offset, num_instances,
585 			instance_offset, pb_blocks, blocks_array_size,
586 			ULLONG_MAX);
587 }
588 
589 /**
590  * hl_ack_pb_single_dcore - ack pb for single docre in HW
591  * according to given configuration
592  *
593  * @hdev: pointer to hl_device structure
594  * @dcore_offset: offset from dcore0
595  * @num_instances: number of instances to apply configuration to
596  * @instance_offset: offset between instances
597  * @pb_blocks: blocks array
598  * @blocks_array_size: blocks array size
599  *
600  */
hl_ack_pb_single_dcore(struct hl_device * hdev,u32 dcore_offset,u32 num_instances,u32 instance_offset,const u32 pb_blocks[],u32 blocks_array_size)601 void hl_ack_pb_single_dcore(struct hl_device *hdev, u32 dcore_offset,
602 		u32 num_instances, u32 instance_offset,
603 		const u32 pb_blocks[], u32 blocks_array_size)
604 {
605 	int i;
606 
607 	/* ack all blocks */
608 	for (i = 0 ; i < num_instances ; i++)
609 		hl_ack_pb_security_violations(hdev, pb_blocks,
610 				dcore_offset + i * instance_offset,
611 				blocks_array_size);
612 
613 }
614 
hl_automated_get_block_base_addr(struct hl_device * hdev,struct hl_special_block_info * block_info,u32 major,u32 minor,u32 sub_minor)615 static u32 hl_automated_get_block_base_addr(struct hl_device *hdev,
616 		struct hl_special_block_info *block_info,
617 		u32 major, u32 minor, u32 sub_minor)
618 {
619 	u32 fw_block_base_address = block_info->base_addr +
620 			major * block_info->major_offset +
621 			minor * block_info->minor_offset +
622 			sub_minor * block_info->sub_minor_offset;
623 	struct asic_fixed_properties *prop = &hdev->asic_prop;
624 
625 	/* Calculation above returns an address for FW use, and therefore should
626 	 * be casted for driver use.
627 	 */
628 	return (fw_block_base_address - lower_32_bits(prop->cfg_base_address));
629 }
630 
hl_check_block_type_exclusion(struct hl_skip_blocks_cfg * skip_blocks_cfg,int block_type)631 static bool hl_check_block_type_exclusion(struct hl_skip_blocks_cfg *skip_blocks_cfg,
632 		int block_type)
633 {
634 	int i;
635 
636 	/* Check if block type is listed in the exclusion list of block types */
637 	for (i = 0 ; i < skip_blocks_cfg->block_types_len ; i++)
638 		if (block_type == skip_blocks_cfg->block_types[i])
639 			return true;
640 
641 	return false;
642 }
643 
hl_check_block_range_exclusion(struct hl_device * hdev,struct hl_skip_blocks_cfg * skip_blocks_cfg,struct hl_special_block_info * block_info,u32 major,u32 minor,u32 sub_minor)644 static bool hl_check_block_range_exclusion(struct hl_device *hdev,
645 		struct hl_skip_blocks_cfg *skip_blocks_cfg,
646 		struct hl_special_block_info *block_info,
647 		u32 major, u32 minor, u32 sub_minor)
648 {
649 	u32 blocks_in_range, block_base_addr_in_range, block_base_addr;
650 	int i, j;
651 
652 	block_base_addr = hl_automated_get_block_base_addr(hdev, block_info,
653 			major, minor, sub_minor);
654 
655 	for (i = 0 ; i < skip_blocks_cfg->block_ranges_len ; i++) {
656 		blocks_in_range = (skip_blocks_cfg->block_ranges[i].end -
657 				skip_blocks_cfg->block_ranges[i].start) /
658 				HL_BLOCK_SIZE + 1;
659 		for (j = 0 ; j < blocks_in_range ; j++) {
660 			block_base_addr_in_range = skip_blocks_cfg->block_ranges[i].start +
661 					j * HL_BLOCK_SIZE;
662 			if (block_base_addr == block_base_addr_in_range)
663 				return true;
664 		}
665 	}
666 
667 	return false;
668 }
669 
hl_read_glbl_errors(struct hl_device * hdev,u32 blk_idx,u32 major,u32 minor,u32 sub_minor,void * data)670 static int hl_read_glbl_errors(struct hl_device *hdev,
671 		u32 blk_idx, u32 major, u32 minor, u32 sub_minor, void *data)
672 {
673 	struct hl_special_block_info *special_blocks = hdev->asic_prop.special_blocks;
674 	struct hl_special_block_info *current_block = &special_blocks[blk_idx];
675 	u32 glbl_err_addr, glbl_err_cause, addr_val, cause_val, block_base,
676 		base = current_block->base_addr - lower_32_bits(hdev->asic_prop.cfg_base_address);
677 	int i;
678 
679 	block_base = base + major * current_block->major_offset +
680 			minor * current_block->minor_offset +
681 			sub_minor * current_block->sub_minor_offset;
682 
683 	glbl_err_cause = block_base + HL_GLBL_ERR_CAUSE_OFFSET;
684 	cause_val = RREG32(glbl_err_cause);
685 	if (!cause_val)
686 		return 0;
687 
688 	glbl_err_addr = block_base + HL_GLBL_ERR_ADDR_OFFSET;
689 	addr_val = RREG32(glbl_err_addr);
690 
691 	for (i = 0 ; i < hdev->asic_prop.glbl_err_cause_num ; i++) {
692 		if (cause_val & BIT(i))
693 			dev_err_ratelimited(hdev->dev,
694 				"%s, addr %#llx\n",
695 				hl_glbl_error_cause[i],
696 				hdev->asic_prop.cfg_base_address + block_base +
697 				FIELD_GET(HL_GLBL_ERR_ADDRESS_MASK, addr_val));
698 	}
699 
700 	WREG32(glbl_err_cause, cause_val);
701 
702 	return 0;
703 }
704 
hl_check_for_glbl_errors(struct hl_device * hdev)705 void hl_check_for_glbl_errors(struct hl_device *hdev)
706 {
707 	struct asic_fixed_properties *prop = &hdev->asic_prop;
708 	struct hl_special_blocks_cfg special_blocks_cfg;
709 	struct iterate_special_ctx glbl_err_iter;
710 	int rc;
711 
712 	memset(&special_blocks_cfg, 0, sizeof(special_blocks_cfg));
713 	special_blocks_cfg.skip_blocks_cfg = &prop->skip_special_blocks_cfg;
714 
715 	glbl_err_iter.fn = &hl_read_glbl_errors;
716 	glbl_err_iter.data = &special_blocks_cfg;
717 
718 	rc = hl_iterate_special_blocks(hdev, &glbl_err_iter);
719 	if (rc)
720 		dev_err_ratelimited(hdev->dev,
721 			"Could not iterate special blocks, glbl error check failed\n");
722 }
723 
hl_iterate_special_blocks(struct hl_device * hdev,struct iterate_special_ctx * ctx)724 int hl_iterate_special_blocks(struct hl_device *hdev, struct iterate_special_ctx *ctx)
725 {
726 	struct hl_special_blocks_cfg *special_blocks_cfg =
727 			(struct hl_special_blocks_cfg *)ctx->data;
728 	struct hl_skip_blocks_cfg *skip_blocks_cfg =
729 			special_blocks_cfg->skip_blocks_cfg;
730 	u32 major, minor, sub_minor, blk_idx, num_blocks;
731 	struct hl_special_block_info *block_info_arr;
732 	int rc;
733 
734 	block_info_arr = hdev->asic_prop.special_blocks;
735 	if (!block_info_arr)
736 		return -EINVAL;
737 
738 	num_blocks = hdev->asic_prop.num_of_special_blocks;
739 
740 	for (blk_idx = 0 ; blk_idx < num_blocks ; blk_idx++, block_info_arr++) {
741 		if (hl_check_block_type_exclusion(skip_blocks_cfg, block_info_arr->block_type))
742 			continue;
743 
744 		for (major = 0 ; major < block_info_arr->major ; major++) {
745 			minor = 0;
746 			do {
747 				sub_minor = 0;
748 				do {
749 					if ((hl_check_block_range_exclusion(hdev,
750 							skip_blocks_cfg, block_info_arr,
751 							major, minor, sub_minor)) ||
752 						(skip_blocks_cfg->skip_block_hook &&
753 						skip_blocks_cfg->skip_block_hook(hdev,
754 							special_blocks_cfg,
755 							blk_idx, major, minor, sub_minor))) {
756 						sub_minor++;
757 						continue;
758 					}
759 
760 					rc = ctx->fn(hdev, blk_idx, major, minor,
761 								sub_minor, ctx->data);
762 					if (rc)
763 						return rc;
764 
765 					sub_minor++;
766 				} while (sub_minor < block_info_arr->sub_minor);
767 
768 				minor++;
769 			} while (minor < block_info_arr->minor);
770 		}
771 	}
772 
773 	return 0;
774 }
775