1 // SPDX-License-Identifier: MIT
2 //
3 // Copyright 2024 Advanced Micro Devices, Inc.
4 
5 
6 #ifndef _DML21_WRAPPER_H_
7 #define _DML21_WRAPPER_H_
8 
9 #include "os_types.h"
10 #include "dml_top_soc_parameter_types.h"
11 #include "dml_top_display_cfg_types.h"
12 
13 struct dc;
14 struct dc_state;
15 struct dml2_configuration_options;
16 struct dml2_context;
17 enum dc_validate_mode;
18 
19 /**
20  * dml2_create - Creates dml21_context.
21  * @in_dc: dc.
22  * @dml2: Created dml21 context.
23  * @config: dml21 configuration options.
24  *
25  * Create of DML21 is done as part of dc_state creation.
26  * DML21 IP, SOC and STATES are initialized at
27  * creation time.
28  *
29  * Return: True if dml2 is successfully created, false otherwise.
30  */
31 bool dml21_create(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config);
32 void dml21_destroy(struct dml2_context *dml2);
33 void dml21_copy(struct dml2_context *dst_dml_ctx,
34 	struct dml2_context *src_dml_ctx);
35 bool dml21_create_copy(struct dml2_context **dst_dml_ctx,
36 	struct dml2_context *src_dml_ctx);
37 void dml21_reinit(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config);
38 
39 /**
40  * dml21_validate - Determines if a display configuration is supported or not.
41  * @in_dc: dc.
42  * @context: dc_state to be validated.
43  * @validate_mode: DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX
44  *           will not populate context.res_ctx.
45  *
46  * Based on fast_validate option internally would call:
47  *
48  * -dml21_mode_check_and_programming - for DC_VALIDATE_MODE_AND_PROGRAMMING option
49  * Calculates if dc_state can be supported on the input display
50  * configuration. If supported, generates the necessary HW
51  * programming for the new dc_state.
52  *
53  * -dml21_check_mode_support - for DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX option
54  * Calculates if dc_state can be supported for the input display
55  * config.
56 
57  * Context: Two threads may not invoke this function concurrently unless they reference
58  *          separate dc_states for validation.
59  * Return: True if mode is supported, false otherwise.
60  */
61 bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx,
62 	enum dc_validate_mode validate_mode);
63 
64 /* Prepare hubp mcache_regs for hubp mcache ID and split coordinate programming */
65 void dml21_prepare_mcache_programming(struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx);
66 
67 /* Structure for inputting external SOCBB and DCNIP values for tool based debugging. */
68 struct socbb_ip_params_external {
69 	struct dml2_ip_capabilities ip_params;
70 	struct dml2_soc_bb soc_bb;
71 };
72 
73 /*mcache parameters decided by dml*/
74 struct dc_mcache_params {
75 	bool valid;
76 	/*
77 	* For iMALL, dedicated mall mcaches are required (sharing of last
78 	* slice possible), for legacy phantom or phantom without return
79 	* the only mall mcaches need to be valid.
80 	*/
81 	bool requires_dedicated_mall_mcache;
82 	unsigned int num_mcaches_plane0;
83 	unsigned int num_mcaches_plane1;
84 	/*
85 	* Generally, plane0/1 slices must use a disjoint set of caches
86 	* but in some cases the final segement of the two planes can
87 	* use the same cache. If plane0_plane1 is set, then this is
88 	* allowed.
89 	*
90 	* Similarly, the caches allocated to MALL prefetcher are generally
91 	* disjoint, but if mall_prefetch is set, then the final segment
92 	* between the main and the mall pixel requestor can use the same
93 	* cache.
94 	*
95 	* Note that both bits may be set at the same time.
96 	*/
97 	struct {
98 		bool mall_comb_mcache_p0;
99 		bool mall_comb_mcache_p1;
100 		bool plane0_plane1;
101 	} last_slice_sharing;
102 	/*
103 	* A plane is divided into vertical slices of mcaches,
104 	* which wrap on the surface width.
105 	*
106 	* For example, if the surface width is 7680, and split into
107 	* three slices of equal width, the boundary array would contain
108 	* [2560, 5120, 7680]
109 	*
110 	* The assignments are
111 	* 0 = [0 .. 2559]
112 	* 1 = [2560 .. 5119]
113 	* 2 = [5120 .. 7679]
114 	* 0 = [7680 .. INF]
115 	* The final element implicitly is the same as the first, and
116 	* at first seems invalid since it is never referenced (since)
117 	* it is outside the surface. However, its useful when shifting
118 	* (see below).
119 	*
120 	* For any given valid mcache assignment, a shifted version, wrapped
121 	* on the surface width boundary is also assumed to be valid.
122 	*
123 	* For example, shifting [2560, 5120, 7680] by -50 results in
124 	* [2510, 5170, 7630].
125 	*
126 	* The assignments are now:
127 	* 0 = [0 .. 2509]
128 	* 1 = [2510 .. 5169]
129 	* 2 = [5170 .. 7629]
130 	* 0 = [7630 .. INF]
131 	*/
132 	int mcache_x_offsets_plane0[DML2_MAX_MCACHES + 1];
133 	int mcache_x_offsets_plane1[DML2_MAX_MCACHES + 1];
134 };
135 #endif
136