1 /*
2 * Copyright 2012-16 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26 #include <linux/slab.h>
27
28 #include "dal_asic_id.h"
29 #include "dc_types.h"
30 #include "dccg.h"
31 #include "clk_mgr_internal.h"
32 #include "link.h"
33
34 #include "dce100/dce_clk_mgr.h"
35 #include "dce110/dce110_clk_mgr.h"
36 #include "dce112/dce112_clk_mgr.h"
37 #include "dce120/dce120_clk_mgr.h"
38 #include "dce60/dce60_clk_mgr.h"
39 #include "dcn10/rv1_clk_mgr.h"
40 #include "dcn10/rv2_clk_mgr.h"
41 #include "dcn20/dcn20_clk_mgr.h"
42 #include "dcn21/rn_clk_mgr.h"
43 #include "dcn201/dcn201_clk_mgr.h"
44 #include "dcn30/dcn30_clk_mgr.h"
45 #include "dcn301/vg_clk_mgr.h"
46 #include "dcn31/dcn31_clk_mgr.h"
47 #include "dcn314/dcn314_clk_mgr.h"
48 #include "dcn315/dcn315_clk_mgr.h"
49 #include "dcn316/dcn316_clk_mgr.h"
50 #include "dcn32/dcn32_clk_mgr.h"
51
clk_mgr_helper_get_active_display_cnt(struct dc * dc,struct dc_state * context)52 int clk_mgr_helper_get_active_display_cnt(
53 struct dc *dc,
54 struct dc_state *context)
55 {
56 int i, display_count;
57
58 display_count = 0;
59 for (i = 0; i < context->stream_count; i++) {
60 const struct dc_stream_state *stream = context->streams[i];
61
62 /* Don't count SubVP phantom pipes as part of active
63 * display count
64 */
65 if (stream->mall_stream_config.type == SUBVP_PHANTOM)
66 continue;
67
68 /*
69 * Only notify active stream or virtual stream.
70 * Need to notify virtual stream to work around
71 * headless case. HPD does not fire when system is in
72 * S0i2.
73 */
74 if (!stream->dpms_off || stream->signal == SIGNAL_TYPE_VIRTUAL)
75 display_count++;
76 }
77
78 return display_count;
79 }
80
clk_mgr_helper_get_active_plane_cnt(struct dc * dc,struct dc_state * context)81 int clk_mgr_helper_get_active_plane_cnt(
82 struct dc *dc,
83 struct dc_state *context)
84 {
85 int i, total_plane_count;
86
87 total_plane_count = 0;
88 for (i = 0; i < context->stream_count; i++) {
89 const struct dc_stream_status stream_status = context->stream_status[i];
90
91 /*
92 * Sum up plane_count for all streams ( active and virtual ).
93 */
94 total_plane_count += stream_status.plane_count;
95 }
96
97 return total_plane_count;
98 }
99
clk_mgr_exit_optimized_pwr_state(const struct dc * dc,struct clk_mgr * clk_mgr)100 void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr)
101 {
102 struct dc_link *edp_links[MAX_NUM_EDP];
103 struct dc_link *edp_link = NULL;
104 int edp_num;
105 unsigned int panel_inst;
106
107 get_edp_links(dc, edp_links, &edp_num);
108 if (dc->hwss.exit_optimized_pwr_state)
109 dc->hwss.exit_optimized_pwr_state(dc, dc->current_state);
110
111 if (edp_num) {
112 for (panel_inst = 0; panel_inst < edp_num; panel_inst++) {
113 bool allow_active = false;
114
115 edp_link = edp_links[panel_inst];
116 if (!edp_link->psr_settings.psr_feature_enabled)
117 continue;
118 clk_mgr->psr_allow_active_cache = edp_link->psr_settings.psr_allow_active;
119 dc_link_set_psr_allow_active(edp_link, &allow_active, false, false, NULL);
120 }
121 }
122
123 }
124
clk_mgr_optimize_pwr_state(const struct dc * dc,struct clk_mgr * clk_mgr)125 void clk_mgr_optimize_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr)
126 {
127 struct dc_link *edp_links[MAX_NUM_EDP];
128 struct dc_link *edp_link = NULL;
129 int edp_num;
130 unsigned int panel_inst;
131
132 get_edp_links(dc, edp_links, &edp_num);
133 if (edp_num) {
134 for (panel_inst = 0; panel_inst < edp_num; panel_inst++) {
135 edp_link = edp_links[panel_inst];
136 if (!edp_link->psr_settings.psr_feature_enabled)
137 continue;
138 dc_link_set_psr_allow_active(edp_link,
139 &clk_mgr->psr_allow_active_cache, false, false, NULL);
140 }
141 }
142
143 if (dc->hwss.optimize_pwr_state)
144 dc->hwss.optimize_pwr_state(dc, dc->current_state);
145
146 }
147
dc_clk_mgr_create(struct dc_context * ctx,struct pp_smu_funcs * pp_smu,struct dccg * dccg)148 struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *pp_smu, struct dccg *dccg)
149 {
150 struct hw_asic_id asic_id = ctx->asic_id;
151
152 switch (asic_id.chip_family) {
153 #if defined(CONFIG_DRM_AMD_DC_SI)
154 case FAMILY_SI: {
155 struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
156
157 if (clk_mgr == NULL) {
158 BREAK_TO_DEBUGGER();
159 return NULL;
160 }
161 dce60_clk_mgr_construct(ctx, clk_mgr);
162 dce_clk_mgr_construct(ctx, clk_mgr);
163 return &clk_mgr->base;
164 }
165 #endif
166 case FAMILY_CI:
167 case FAMILY_KV: {
168 struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
169
170 if (clk_mgr == NULL) {
171 BREAK_TO_DEBUGGER();
172 return NULL;
173 }
174 dce_clk_mgr_construct(ctx, clk_mgr);
175 return &clk_mgr->base;
176 }
177 case FAMILY_CZ: {
178 struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
179
180 if (clk_mgr == NULL) {
181 BREAK_TO_DEBUGGER();
182 return NULL;
183 }
184 dce110_clk_mgr_construct(ctx, clk_mgr);
185 return &clk_mgr->base;
186 }
187 case FAMILY_VI: {
188 struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
189
190 if (clk_mgr == NULL) {
191 BREAK_TO_DEBUGGER();
192 return NULL;
193 }
194 if (ASIC_REV_IS_TONGA_P(asic_id.hw_internal_rev) ||
195 ASIC_REV_IS_FIJI_P(asic_id.hw_internal_rev)) {
196 dce_clk_mgr_construct(ctx, clk_mgr);
197 return &clk_mgr->base;
198 }
199 if (ASIC_REV_IS_POLARIS10_P(asic_id.hw_internal_rev) ||
200 ASIC_REV_IS_POLARIS11_M(asic_id.hw_internal_rev) ||
201 ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev)) {
202 dce112_clk_mgr_construct(ctx, clk_mgr);
203 return &clk_mgr->base;
204 }
205 if (ASIC_REV_IS_VEGAM(asic_id.hw_internal_rev)) {
206 dce112_clk_mgr_construct(ctx, clk_mgr);
207 return &clk_mgr->base;
208 }
209 return &clk_mgr->base;
210 }
211 case FAMILY_AI: {
212 struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
213
214 if (clk_mgr == NULL) {
215 BREAK_TO_DEBUGGER();
216 return NULL;
217 }
218 if (ASICREV_IS_VEGA20_P(asic_id.hw_internal_rev))
219 dce121_clk_mgr_construct(ctx, clk_mgr);
220 else
221 dce120_clk_mgr_construct(ctx, clk_mgr);
222 return &clk_mgr->base;
223 }
224 #if defined(CONFIG_DRM_AMD_DC_DCN)
225 case FAMILY_RV: {
226 struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
227
228 if (clk_mgr == NULL) {
229 BREAK_TO_DEBUGGER();
230 return NULL;
231 }
232
233 if (ASICREV_IS_RENOIR(asic_id.hw_internal_rev)) {
234 rn_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
235 return &clk_mgr->base;
236 }
237
238 if (ASICREV_IS_GREEN_SARDINE(asic_id.hw_internal_rev)) {
239 rn_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
240 return &clk_mgr->base;
241 }
242 if (ASICREV_IS_RAVEN2(asic_id.hw_internal_rev)) {
243 rv2_clk_mgr_construct(ctx, clk_mgr, pp_smu);
244 return &clk_mgr->base;
245 }
246 if (ASICREV_IS_RAVEN(asic_id.hw_internal_rev) ||
247 ASICREV_IS_PICASSO(asic_id.hw_internal_rev)) {
248 rv1_clk_mgr_construct(ctx, clk_mgr, pp_smu);
249 return &clk_mgr->base;
250 }
251 return &clk_mgr->base;
252 }
253 case FAMILY_NV: {
254 struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
255
256 if (clk_mgr == NULL) {
257 BREAK_TO_DEBUGGER();
258 return NULL;
259 }
260 if (ASICREV_IS_SIENNA_CICHLID_P(asic_id.hw_internal_rev)) {
261 dcn3_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
262 return &clk_mgr->base;
263 }
264 if (ASICREV_IS_DIMGREY_CAVEFISH_P(asic_id.hw_internal_rev)) {
265 dcn3_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
266 return &clk_mgr->base;
267 }
268 if (ASICREV_IS_BEIGE_GOBY_P(asic_id.hw_internal_rev)) {
269 dcn3_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
270 return &clk_mgr->base;
271 }
272 if (asic_id.chip_id == DEVICE_ID_NV_13FE) {
273 dcn201_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
274 return &clk_mgr->base;
275 }
276 dcn20_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
277 return &clk_mgr->base;
278 }
279 case FAMILY_VGH:
280 if (ASICREV_IS_VANGOGH(asic_id.hw_internal_rev)) {
281 struct clk_mgr_vgh *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
282
283 if (clk_mgr == NULL) {
284 BREAK_TO_DEBUGGER();
285 return NULL;
286 }
287 vg_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
288 return &clk_mgr->base.base;
289 }
290 break;
291
292 case FAMILY_YELLOW_CARP: {
293 struct clk_mgr_dcn31 *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
294
295 if (clk_mgr == NULL) {
296 BREAK_TO_DEBUGGER();
297 return NULL;
298 }
299
300 dcn31_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
301 return &clk_mgr->base.base;
302 }
303 break;
304 case AMDGPU_FAMILY_GC_10_3_6: {
305 struct clk_mgr_dcn315 *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
306
307 if (clk_mgr == NULL) {
308 BREAK_TO_DEBUGGER();
309 return NULL;
310 }
311
312 dcn315_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
313 return &clk_mgr->base.base;
314 }
315 break;
316 case AMDGPU_FAMILY_GC_10_3_7: {
317 struct clk_mgr_dcn316 *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
318
319 if (clk_mgr == NULL) {
320 BREAK_TO_DEBUGGER();
321 return NULL;
322 }
323
324 dcn316_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
325 return &clk_mgr->base.base;
326 }
327 break;
328 case AMDGPU_FAMILY_GC_11_0_0: {
329 struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
330
331 if (clk_mgr == NULL) {
332 BREAK_TO_DEBUGGER();
333 return NULL;
334 }
335
336 dcn32_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
337 return &clk_mgr->base;
338 break;
339 }
340
341 case AMDGPU_FAMILY_GC_11_0_1: {
342 struct clk_mgr_dcn314 *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
343
344 if (clk_mgr == NULL) {
345 BREAK_TO_DEBUGGER();
346 return NULL;
347 }
348
349 dcn314_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
350 return &clk_mgr->base.base;
351 }
352 break;
353
354 #endif
355 default:
356 ASSERT(0); /* Unknown Asic */
357 break;
358 }
359
360 return NULL;
361 }
362
dc_destroy_clk_mgr(struct clk_mgr * clk_mgr_base)363 void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
364 {
365 struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
366
367 #ifdef CONFIG_DRM_AMD_DC_DCN
368 switch (clk_mgr_base->ctx->asic_id.chip_family) {
369 case FAMILY_NV:
370 if (ASICREV_IS_SIENNA_CICHLID_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
371 dcn3_clk_mgr_destroy(clk_mgr);
372 } else if (ASICREV_IS_DIMGREY_CAVEFISH_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
373 dcn3_clk_mgr_destroy(clk_mgr);
374 }
375 if (ASICREV_IS_BEIGE_GOBY_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
376 dcn3_clk_mgr_destroy(clk_mgr);
377 }
378 break;
379
380 case FAMILY_VGH:
381 if (ASICREV_IS_VANGOGH(clk_mgr_base->ctx->asic_id.hw_internal_rev))
382 vg_clk_mgr_destroy(clk_mgr);
383 break;
384
385 case FAMILY_YELLOW_CARP:
386 dcn31_clk_mgr_destroy(clk_mgr);
387 break;
388
389 case AMDGPU_FAMILY_GC_10_3_6:
390 dcn315_clk_mgr_destroy(clk_mgr);
391 break;
392
393 case AMDGPU_FAMILY_GC_10_3_7:
394 dcn316_clk_mgr_destroy(clk_mgr);
395 break;
396
397 case AMDGPU_FAMILY_GC_11_0_0:
398 dcn32_clk_mgr_destroy(clk_mgr);
399 break;
400
401 case AMDGPU_FAMILY_GC_11_0_1:
402 dcn314_clk_mgr_destroy(clk_mgr);
403 break;
404
405 default:
406 break;
407 }
408 #endif
409
410 kfree(clk_mgr);
411 }
412
413