1 /*
2 * Copyright 2021 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23 #include <linux/firmware.h>
24 #include <linux/slab.h>
25 #include <linux/module.h>
26 #include <linux/pci.h>
27
28 #include "amdgpu.h"
29 #include "amdgpu_atombios.h"
30 #include "amdgpu_ih.h"
31 #include "amdgpu_uvd.h"
32 #include "amdgpu_vce.h"
33 #include "amdgpu_ucode.h"
34 #include "amdgpu_psp.h"
35 #include "amdgpu_smu.h"
36 #include "atom.h"
37 #include "amd_pcie.h"
38
39 #include "gc/gc_11_0_0_offset.h"
40 #include "gc/gc_11_0_0_sh_mask.h"
41 #include "mp/mp_13_0_0_offset.h"
42
43 #include "soc15.h"
44 #include "soc15_common.h"
45 #include "soc21.h"
46 #include "mxgpu_nv.h"
47
48 static const struct amd_ip_funcs soc21_common_ip_funcs;
49
50 /* SOC21 */
51 static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn0[] =
52 {
53 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
54 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
55 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
56 };
57
58 static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn1[] =
59 {
60 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
61 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
62 };
63
64 static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn0 =
65 {
66 .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_encode_array_vcn0),
67 .codec_array = vcn_4_0_0_video_codecs_encode_array_vcn0,
68 };
69
70 static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn1 =
71 {
72 .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_encode_array_vcn1),
73 .codec_array = vcn_4_0_0_video_codecs_encode_array_vcn1,
74 };
75
76 static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array_vcn0[] =
77 {
78 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
79 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
80 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
81 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
82 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
83 };
84
85 static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array_vcn1[] =
86 {
87 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
88 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
89 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
90 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
91 };
92
93 static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn0 =
94 {
95 .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_decode_array_vcn0),
96 .codec_array = vcn_4_0_0_video_codecs_decode_array_vcn0,
97 };
98
99 static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn1 =
100 {
101 .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_decode_array_vcn1),
102 .codec_array = vcn_4_0_0_video_codecs_decode_array_vcn1,
103 };
104
soc21_query_video_codecs(struct amdgpu_device * adev,bool encode,const struct amdgpu_video_codecs ** codecs)105 static int soc21_query_video_codecs(struct amdgpu_device *adev, bool encode,
106 const struct amdgpu_video_codecs **codecs)
107 {
108 if (adev->vcn.num_vcn_inst == hweight8(adev->vcn.harvest_config))
109 return -EINVAL;
110
111 switch (adev->ip_versions[UVD_HWIP][0]) {
112 case IP_VERSION(4, 0, 0):
113 case IP_VERSION(4, 0, 2):
114 case IP_VERSION(4, 0, 4):
115 if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) {
116 if (encode)
117 *codecs = &vcn_4_0_0_video_codecs_encode_vcn1;
118 else
119 *codecs = &vcn_4_0_0_video_codecs_decode_vcn1;
120 } else {
121 if (encode)
122 *codecs = &vcn_4_0_0_video_codecs_encode_vcn0;
123 else
124 *codecs = &vcn_4_0_0_video_codecs_decode_vcn0;
125 }
126 return 0;
127 default:
128 return -EINVAL;
129 }
130 }
131 /*
132 * Indirect registers accessor
133 */
soc21_pcie_rreg(struct amdgpu_device * adev,u32 reg)134 static u32 soc21_pcie_rreg(struct amdgpu_device *adev, u32 reg)
135 {
136 unsigned long address, data;
137 address = adev->nbio.funcs->get_pcie_index_offset(adev);
138 data = adev->nbio.funcs->get_pcie_data_offset(adev);
139
140 return amdgpu_device_indirect_rreg(adev, address, data, reg);
141 }
142
soc21_pcie_wreg(struct amdgpu_device * adev,u32 reg,u32 v)143 static void soc21_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
144 {
145 unsigned long address, data;
146
147 address = adev->nbio.funcs->get_pcie_index_offset(adev);
148 data = adev->nbio.funcs->get_pcie_data_offset(adev);
149
150 amdgpu_device_indirect_wreg(adev, address, data, reg, v);
151 }
152
soc21_pcie_rreg64(struct amdgpu_device * adev,u32 reg)153 static u64 soc21_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
154 {
155 unsigned long address, data;
156 address = adev->nbio.funcs->get_pcie_index_offset(adev);
157 data = adev->nbio.funcs->get_pcie_data_offset(adev);
158
159 return amdgpu_device_indirect_rreg64(adev, address, data, reg);
160 }
161
soc21_pcie_wreg64(struct amdgpu_device * adev,u32 reg,u64 v)162 static void soc21_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
163 {
164 unsigned long address, data;
165
166 address = adev->nbio.funcs->get_pcie_index_offset(adev);
167 data = adev->nbio.funcs->get_pcie_data_offset(adev);
168
169 amdgpu_device_indirect_wreg64(adev, address, data, reg, v);
170 }
171
soc21_didt_rreg(struct amdgpu_device * adev,u32 reg)172 static u32 soc21_didt_rreg(struct amdgpu_device *adev, u32 reg)
173 {
174 unsigned long flags, address, data;
175 u32 r;
176
177 address = SOC15_REG_OFFSET(GC, 0, regDIDT_IND_INDEX);
178 data = SOC15_REG_OFFSET(GC, 0, regDIDT_IND_DATA);
179
180 spin_lock_irqsave(&adev->didt_idx_lock, flags);
181 WREG32(address, (reg));
182 r = RREG32(data);
183 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
184 return r;
185 }
186
soc21_didt_wreg(struct amdgpu_device * adev,u32 reg,u32 v)187 static void soc21_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
188 {
189 unsigned long flags, address, data;
190
191 address = SOC15_REG_OFFSET(GC, 0, regDIDT_IND_INDEX);
192 data = SOC15_REG_OFFSET(GC, 0, regDIDT_IND_DATA);
193
194 spin_lock_irqsave(&adev->didt_idx_lock, flags);
195 WREG32(address, (reg));
196 WREG32(data, (v));
197 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
198 }
199
soc21_get_config_memsize(struct amdgpu_device * adev)200 static u32 soc21_get_config_memsize(struct amdgpu_device *adev)
201 {
202 return adev->nbio.funcs->get_memsize(adev);
203 }
204
soc21_get_xclk(struct amdgpu_device * adev)205 static u32 soc21_get_xclk(struct amdgpu_device *adev)
206 {
207 return adev->clock.spll.reference_freq;
208 }
209
210
soc21_grbm_select(struct amdgpu_device * adev,u32 me,u32 pipe,u32 queue,u32 vmid)211 void soc21_grbm_select(struct amdgpu_device *adev,
212 u32 me, u32 pipe, u32 queue, u32 vmid)
213 {
214 u32 grbm_gfx_cntl = 0;
215 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
216 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
217 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
218 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
219
220 WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, grbm_gfx_cntl);
221 }
222
soc21_vga_set_state(struct amdgpu_device * adev,bool state)223 static void soc21_vga_set_state(struct amdgpu_device *adev, bool state)
224 {
225 /* todo */
226 }
227
soc21_read_disabled_bios(struct amdgpu_device * adev)228 static bool soc21_read_disabled_bios(struct amdgpu_device *adev)
229 {
230 /* todo */
231 return false;
232 }
233
234 static struct soc15_allowed_register_entry soc21_allowed_read_registers[] = {
235 { SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS)},
236 { SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS2)},
237 { SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS_SE0)},
238 { SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS_SE1)},
239 { SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS_SE2)},
240 { SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS_SE3)},
241 { SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_STATUS_REG)},
242 { SOC15_REG_ENTRY(SDMA1, 0, regSDMA1_STATUS_REG)},
243 { SOC15_REG_ENTRY(GC, 0, regCP_STAT)},
244 { SOC15_REG_ENTRY(GC, 0, regCP_STALLED_STAT1)},
245 { SOC15_REG_ENTRY(GC, 0, regCP_STALLED_STAT2)},
246 { SOC15_REG_ENTRY(GC, 0, regCP_STALLED_STAT3)},
247 { SOC15_REG_ENTRY(GC, 0, regCP_CPF_BUSY_STAT)},
248 { SOC15_REG_ENTRY(GC, 0, regCP_CPF_STALLED_STAT1)},
249 { SOC15_REG_ENTRY(GC, 0, regCP_CPF_STATUS)},
250 { SOC15_REG_ENTRY(GC, 0, regCP_CPC_BUSY_STAT)},
251 { SOC15_REG_ENTRY(GC, 0, regCP_CPC_STALLED_STAT1)},
252 { SOC15_REG_ENTRY(GC, 0, regCP_CPC_STATUS)},
253 { SOC15_REG_ENTRY(GC, 0, regGB_ADDR_CONFIG)},
254 };
255
soc21_read_indexed_register(struct amdgpu_device * adev,u32 se_num,u32 sh_num,u32 reg_offset)256 static uint32_t soc21_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
257 u32 sh_num, u32 reg_offset)
258 {
259 uint32_t val;
260
261 mutex_lock(&adev->grbm_idx_mutex);
262 if (se_num != 0xffffffff || sh_num != 0xffffffff)
263 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
264
265 val = RREG32(reg_offset);
266
267 if (se_num != 0xffffffff || sh_num != 0xffffffff)
268 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
269 mutex_unlock(&adev->grbm_idx_mutex);
270 return val;
271 }
272
soc21_get_register_value(struct amdgpu_device * adev,bool indexed,u32 se_num,u32 sh_num,u32 reg_offset)273 static uint32_t soc21_get_register_value(struct amdgpu_device *adev,
274 bool indexed, u32 se_num,
275 u32 sh_num, u32 reg_offset)
276 {
277 if (indexed) {
278 return soc21_read_indexed_register(adev, se_num, sh_num, reg_offset);
279 } else {
280 if (reg_offset == SOC15_REG_OFFSET(GC, 0, regGB_ADDR_CONFIG) && adev->gfx.config.gb_addr_config)
281 return adev->gfx.config.gb_addr_config;
282 return RREG32(reg_offset);
283 }
284 }
285
soc21_read_register(struct amdgpu_device * adev,u32 se_num,u32 sh_num,u32 reg_offset,u32 * value)286 static int soc21_read_register(struct amdgpu_device *adev, u32 se_num,
287 u32 sh_num, u32 reg_offset, u32 *value)
288 {
289 uint32_t i;
290 struct soc15_allowed_register_entry *en;
291
292 *value = 0;
293 for (i = 0; i < ARRAY_SIZE(soc21_allowed_read_registers); i++) {
294 en = &soc21_allowed_read_registers[i];
295 if (!adev->reg_offset[en->hwip][en->inst])
296 continue;
297 else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
298 + en->reg_offset))
299 continue;
300
301 *value = soc21_get_register_value(adev,
302 soc21_allowed_read_registers[i].grbm_indexed,
303 se_num, sh_num, reg_offset);
304 return 0;
305 }
306 return -EINVAL;
307 }
308
309 #if 0
310 static int soc21_asic_mode1_reset(struct amdgpu_device *adev)
311 {
312 u32 i;
313 int ret = 0;
314
315 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
316
317 /* disable BM */
318 pci_clear_master(adev->pdev);
319
320 amdgpu_device_cache_pci_state(adev->pdev);
321
322 if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
323 dev_info(adev->dev, "GPU smu mode1 reset\n");
324 ret = amdgpu_dpm_mode1_reset(adev);
325 } else {
326 dev_info(adev->dev, "GPU psp mode1 reset\n");
327 ret = psp_gpu_reset(adev);
328 }
329
330 if (ret)
331 dev_err(adev->dev, "GPU mode1 reset failed\n");
332 amdgpu_device_load_pci_state(adev->pdev);
333
334 /* wait for asic to come out of reset */
335 for (i = 0; i < adev->usec_timeout; i++) {
336 u32 memsize = adev->nbio.funcs->get_memsize(adev);
337
338 if (memsize != 0xffffffff)
339 break;
340 udelay(1);
341 }
342
343 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
344
345 return ret;
346 }
347 #endif
348
349 static enum amd_reset_method
soc21_asic_reset_method(struct amdgpu_device * adev)350 soc21_asic_reset_method(struct amdgpu_device *adev)
351 {
352 if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
353 amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
354 amdgpu_reset_method == AMD_RESET_METHOD_BACO)
355 return amdgpu_reset_method;
356
357 if (amdgpu_reset_method != -1)
358 dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
359 amdgpu_reset_method);
360
361 switch (adev->ip_versions[MP1_HWIP][0]) {
362 case IP_VERSION(13, 0, 0):
363 case IP_VERSION(13, 0, 7):
364 case IP_VERSION(13, 0, 10):
365 return AMD_RESET_METHOD_MODE1;
366 case IP_VERSION(13, 0, 4):
367 case IP_VERSION(13, 0, 11):
368 return AMD_RESET_METHOD_MODE2;
369 default:
370 if (amdgpu_dpm_is_baco_supported(adev))
371 return AMD_RESET_METHOD_BACO;
372 else
373 return AMD_RESET_METHOD_MODE1;
374 }
375 }
376
soc21_asic_reset(struct amdgpu_device * adev)377 static int soc21_asic_reset(struct amdgpu_device *adev)
378 {
379 int ret = 0;
380
381 switch (soc21_asic_reset_method(adev)) {
382 case AMD_RESET_METHOD_PCI:
383 dev_info(adev->dev, "PCI reset\n");
384 ret = amdgpu_device_pci_reset(adev);
385 break;
386 case AMD_RESET_METHOD_BACO:
387 dev_info(adev->dev, "BACO reset\n");
388 ret = amdgpu_dpm_baco_reset(adev);
389 break;
390 case AMD_RESET_METHOD_MODE2:
391 dev_info(adev->dev, "MODE2 reset\n");
392 ret = amdgpu_dpm_mode2_reset(adev);
393 break;
394 default:
395 dev_info(adev->dev, "MODE1 reset\n");
396 ret = amdgpu_device_mode1_reset(adev);
397 break;
398 }
399
400 return ret;
401 }
402
soc21_set_uvd_clocks(struct amdgpu_device * adev,u32 vclk,u32 dclk)403 static int soc21_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
404 {
405 /* todo */
406 return 0;
407 }
408
soc21_set_vce_clocks(struct amdgpu_device * adev,u32 evclk,u32 ecclk)409 static int soc21_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
410 {
411 /* todo */
412 return 0;
413 }
414
soc21_pcie_gen3_enable(struct amdgpu_device * adev)415 static void soc21_pcie_gen3_enable(struct amdgpu_device *adev)
416 {
417 if (pci_is_root_bus(adev->pdev->bus))
418 return;
419
420 if (amdgpu_pcie_gen2 == 0)
421 return;
422
423 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
424 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
425 return;
426
427 /* todo */
428 }
429
soc21_program_aspm(struct amdgpu_device * adev)430 static void soc21_program_aspm(struct amdgpu_device *adev)
431 {
432 if (!amdgpu_device_should_use_aspm(adev))
433 return;
434
435 if (!(adev->flags & AMD_IS_APU) &&
436 (adev->nbio.funcs->program_aspm))
437 adev->nbio.funcs->program_aspm(adev);
438 }
439
soc21_enable_doorbell_aperture(struct amdgpu_device * adev,bool enable)440 static void soc21_enable_doorbell_aperture(struct amdgpu_device *adev,
441 bool enable)
442 {
443 adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
444 adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
445 }
446
447 const struct amdgpu_ip_block_version soc21_common_ip_block =
448 {
449 .type = AMD_IP_BLOCK_TYPE_COMMON,
450 .major = 1,
451 .minor = 0,
452 .rev = 0,
453 .funcs = &soc21_common_ip_funcs,
454 };
455
soc21_get_rev_id(struct amdgpu_device * adev)456 static uint32_t soc21_get_rev_id(struct amdgpu_device *adev)
457 {
458 return adev->nbio.funcs->get_rev_id(adev);
459 }
460
soc21_need_full_reset(struct amdgpu_device * adev)461 static bool soc21_need_full_reset(struct amdgpu_device *adev)
462 {
463 switch (adev->ip_versions[GC_HWIP][0]) {
464 case IP_VERSION(11, 0, 0):
465 return amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC);
466 case IP_VERSION(11, 0, 2):
467 case IP_VERSION(11, 0, 3):
468 return false;
469 default:
470 return true;
471 }
472 }
473
soc21_need_reset_on_init(struct amdgpu_device * adev)474 static bool soc21_need_reset_on_init(struct amdgpu_device *adev)
475 {
476 u32 sol_reg;
477
478 if (adev->flags & AMD_IS_APU)
479 return false;
480
481 /* Check sOS sign of life register to confirm sys driver and sOS
482 * are already been loaded.
483 */
484 sol_reg = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81);
485 if (sol_reg)
486 return true;
487
488 return false;
489 }
490
soc21_get_pcie_replay_count(struct amdgpu_device * adev)491 static uint64_t soc21_get_pcie_replay_count(struct amdgpu_device *adev)
492 {
493
494 /* TODO
495 * dummy implement for pcie_replay_count sysfs interface
496 * */
497
498 return 0;
499 }
500
soc21_init_doorbell_index(struct amdgpu_device * adev)501 static void soc21_init_doorbell_index(struct amdgpu_device *adev)
502 {
503 adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ;
504 adev->doorbell_index.mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0;
505 adev->doorbell_index.mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1;
506 adev->doorbell_index.mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2;
507 adev->doorbell_index.mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3;
508 adev->doorbell_index.mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4;
509 adev->doorbell_index.mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5;
510 adev->doorbell_index.mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6;
511 adev->doorbell_index.mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7;
512 adev->doorbell_index.userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START;
513 adev->doorbell_index.userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END;
514 adev->doorbell_index.gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0;
515 adev->doorbell_index.gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1;
516 adev->doorbell_index.gfx_userqueue_start =
517 AMDGPU_NAVI10_DOORBELL_GFX_USERQUEUE_START;
518 adev->doorbell_index.gfx_userqueue_end =
519 AMDGPU_NAVI10_DOORBELL_GFX_USERQUEUE_END;
520 adev->doorbell_index.mes_ring0 = AMDGPU_NAVI10_DOORBELL_MES_RING0;
521 adev->doorbell_index.mes_ring1 = AMDGPU_NAVI10_DOORBELL_MES_RING1;
522 adev->doorbell_index.sdma_engine[0] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0;
523 adev->doorbell_index.sdma_engine[1] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1;
524 adev->doorbell_index.ih = AMDGPU_NAVI10_DOORBELL_IH;
525 adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1;
526 adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3;
527 adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5;
528 adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7;
529 adev->doorbell_index.first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP;
530 adev->doorbell_index.last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP;
531
532 adev->doorbell_index.max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT << 1;
533 adev->doorbell_index.sdma_doorbell_range = 20;
534 }
535
soc21_pre_asic_init(struct amdgpu_device * adev)536 static void soc21_pre_asic_init(struct amdgpu_device *adev)
537 {
538 }
539
soc21_update_umd_stable_pstate(struct amdgpu_device * adev,bool enter)540 static int soc21_update_umd_stable_pstate(struct amdgpu_device *adev,
541 bool enter)
542 {
543 if (enter)
544 amdgpu_gfx_rlc_enter_safe_mode(adev);
545 else
546 amdgpu_gfx_rlc_exit_safe_mode(adev);
547
548 if (adev->gfx.funcs->update_perfmon_mgcg)
549 adev->gfx.funcs->update_perfmon_mgcg(adev, !enter);
550
551 return 0;
552 }
553
554 static const struct amdgpu_asic_funcs soc21_asic_funcs =
555 {
556 .read_disabled_bios = &soc21_read_disabled_bios,
557 .read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom,
558 .read_register = &soc21_read_register,
559 .reset = &soc21_asic_reset,
560 .reset_method = &soc21_asic_reset_method,
561 .set_vga_state = &soc21_vga_set_state,
562 .get_xclk = &soc21_get_xclk,
563 .set_uvd_clocks = &soc21_set_uvd_clocks,
564 .set_vce_clocks = &soc21_set_vce_clocks,
565 .get_config_memsize = &soc21_get_config_memsize,
566 .init_doorbell_index = &soc21_init_doorbell_index,
567 .need_full_reset = &soc21_need_full_reset,
568 .need_reset_on_init = &soc21_need_reset_on_init,
569 .get_pcie_replay_count = &soc21_get_pcie_replay_count,
570 .supports_baco = &amdgpu_dpm_is_baco_supported,
571 .pre_asic_init = &soc21_pre_asic_init,
572 .query_video_codecs = &soc21_query_video_codecs,
573 .update_umd_stable_pstate = &soc21_update_umd_stable_pstate,
574 };
575
soc21_common_early_init(void * handle)576 static int soc21_common_early_init(void *handle)
577 {
578 #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
579 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
580
581 adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
582 adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
583 adev->smc_rreg = NULL;
584 adev->smc_wreg = NULL;
585 adev->pcie_rreg = &soc21_pcie_rreg;
586 adev->pcie_wreg = &soc21_pcie_wreg;
587 adev->pcie_rreg64 = &soc21_pcie_rreg64;
588 adev->pcie_wreg64 = &soc21_pcie_wreg64;
589 adev->pciep_rreg = amdgpu_device_pcie_port_rreg;
590 adev->pciep_wreg = amdgpu_device_pcie_port_wreg;
591
592 /* TODO: will add them during VCN v2 implementation */
593 adev->uvd_ctx_rreg = NULL;
594 adev->uvd_ctx_wreg = NULL;
595
596 adev->didt_rreg = &soc21_didt_rreg;
597 adev->didt_wreg = &soc21_didt_wreg;
598
599 adev->asic_funcs = &soc21_asic_funcs;
600
601 adev->rev_id = soc21_get_rev_id(adev);
602 adev->external_rev_id = 0xff;
603 switch (adev->ip_versions[GC_HWIP][0]) {
604 case IP_VERSION(11, 0, 0):
605 adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG |
606 AMD_CG_SUPPORT_GFX_CGLS |
607 #if 0
608 AMD_CG_SUPPORT_GFX_3D_CGCG |
609 AMD_CG_SUPPORT_GFX_3D_CGLS |
610 #endif
611 AMD_CG_SUPPORT_GFX_MGCG |
612 AMD_CG_SUPPORT_REPEATER_FGCG |
613 AMD_CG_SUPPORT_GFX_FGCG |
614 AMD_CG_SUPPORT_GFX_PERF_CLK |
615 AMD_CG_SUPPORT_VCN_MGCG |
616 AMD_CG_SUPPORT_JPEG_MGCG |
617 AMD_CG_SUPPORT_ATHUB_MGCG |
618 AMD_CG_SUPPORT_ATHUB_LS |
619 AMD_CG_SUPPORT_MC_MGCG |
620 AMD_CG_SUPPORT_MC_LS |
621 AMD_CG_SUPPORT_IH_CG |
622 AMD_CG_SUPPORT_HDP_SD;
623 adev->pg_flags = AMD_PG_SUPPORT_VCN |
624 AMD_PG_SUPPORT_VCN_DPG |
625 AMD_PG_SUPPORT_JPEG |
626 AMD_PG_SUPPORT_ATHUB |
627 AMD_PG_SUPPORT_MMHUB;
628 adev->external_rev_id = adev->rev_id + 0x1; // TODO: need update
629 break;
630 case IP_VERSION(11, 0, 2):
631 adev->cg_flags =
632 AMD_CG_SUPPORT_GFX_CGCG |
633 AMD_CG_SUPPORT_GFX_CGLS |
634 AMD_CG_SUPPORT_REPEATER_FGCG |
635 AMD_CG_SUPPORT_VCN_MGCG |
636 AMD_CG_SUPPORT_JPEG_MGCG |
637 AMD_CG_SUPPORT_ATHUB_MGCG |
638 AMD_CG_SUPPORT_ATHUB_LS |
639 AMD_CG_SUPPORT_IH_CG |
640 AMD_CG_SUPPORT_HDP_SD;
641 adev->pg_flags =
642 AMD_PG_SUPPORT_VCN |
643 AMD_PG_SUPPORT_VCN_DPG |
644 AMD_PG_SUPPORT_JPEG |
645 AMD_PG_SUPPORT_ATHUB |
646 AMD_PG_SUPPORT_MMHUB;
647 adev->external_rev_id = adev->rev_id + 0x10;
648 break;
649 case IP_VERSION(11, 0, 1):
650 adev->cg_flags =
651 AMD_CG_SUPPORT_GFX_CGCG |
652 AMD_CG_SUPPORT_GFX_CGLS |
653 AMD_CG_SUPPORT_GFX_MGCG |
654 AMD_CG_SUPPORT_GFX_FGCG |
655 AMD_CG_SUPPORT_REPEATER_FGCG |
656 AMD_CG_SUPPORT_GFX_PERF_CLK |
657 AMD_CG_SUPPORT_MC_MGCG |
658 AMD_CG_SUPPORT_MC_LS |
659 AMD_CG_SUPPORT_HDP_MGCG |
660 AMD_CG_SUPPORT_HDP_LS |
661 AMD_CG_SUPPORT_ATHUB_MGCG |
662 AMD_CG_SUPPORT_ATHUB_LS |
663 AMD_CG_SUPPORT_IH_CG |
664 AMD_CG_SUPPORT_BIF_MGCG |
665 AMD_CG_SUPPORT_BIF_LS |
666 AMD_CG_SUPPORT_VCN_MGCG |
667 AMD_CG_SUPPORT_JPEG_MGCG;
668 adev->pg_flags =
669 AMD_PG_SUPPORT_GFX_PG |
670 AMD_PG_SUPPORT_VCN |
671 AMD_PG_SUPPORT_VCN_DPG |
672 AMD_PG_SUPPORT_JPEG;
673 adev->external_rev_id = adev->rev_id + 0x1;
674 break;
675 case IP_VERSION(11, 0, 3):
676 adev->cg_flags = AMD_CG_SUPPORT_VCN_MGCG |
677 AMD_CG_SUPPORT_JPEG_MGCG |
678 AMD_CG_SUPPORT_GFX_CGCG |
679 AMD_CG_SUPPORT_GFX_CGLS |
680 AMD_CG_SUPPORT_REPEATER_FGCG |
681 AMD_CG_SUPPORT_GFX_MGCG |
682 AMD_CG_SUPPORT_HDP_SD |
683 AMD_CG_SUPPORT_ATHUB_MGCG |
684 AMD_CG_SUPPORT_ATHUB_LS;
685 adev->pg_flags = AMD_PG_SUPPORT_VCN |
686 AMD_PG_SUPPORT_VCN_DPG |
687 AMD_PG_SUPPORT_JPEG;
688 adev->external_rev_id = adev->rev_id + 0x20;
689 break;
690 case IP_VERSION(11, 0, 4):
691 adev->cg_flags =
692 AMD_CG_SUPPORT_GFX_CGCG |
693 AMD_CG_SUPPORT_GFX_CGLS |
694 AMD_CG_SUPPORT_GFX_MGCG |
695 AMD_CG_SUPPORT_GFX_FGCG |
696 AMD_CG_SUPPORT_REPEATER_FGCG |
697 AMD_CG_SUPPORT_GFX_PERF_CLK |
698 AMD_CG_SUPPORT_MC_MGCG |
699 AMD_CG_SUPPORT_MC_LS |
700 AMD_CG_SUPPORT_HDP_MGCG |
701 AMD_CG_SUPPORT_HDP_LS |
702 AMD_CG_SUPPORT_ATHUB_MGCG |
703 AMD_CG_SUPPORT_ATHUB_LS |
704 AMD_CG_SUPPORT_IH_CG |
705 AMD_CG_SUPPORT_BIF_MGCG |
706 AMD_CG_SUPPORT_BIF_LS |
707 AMD_CG_SUPPORT_VCN_MGCG |
708 AMD_CG_SUPPORT_JPEG_MGCG;
709 adev->pg_flags = AMD_PG_SUPPORT_VCN |
710 AMD_PG_SUPPORT_VCN_DPG |
711 AMD_PG_SUPPORT_GFX_PG |
712 AMD_PG_SUPPORT_JPEG;
713 adev->external_rev_id = adev->rev_id + 0x1;
714 break;
715
716 default:
717 /* FIXME: not supported yet */
718 return -EINVAL;
719 }
720
721 if (amdgpu_sriov_vf(adev)) {
722 amdgpu_virt_init_setting(adev);
723 xgpu_nv_mailbox_set_irq_funcs(adev);
724 }
725
726 return 0;
727 }
728
soc21_common_late_init(void * handle)729 static int soc21_common_late_init(void *handle)
730 {
731 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
732
733 if (amdgpu_sriov_vf(adev))
734 xgpu_nv_mailbox_get_irq(adev);
735
736 return 0;
737 }
738
soc21_common_sw_init(void * handle)739 static int soc21_common_sw_init(void *handle)
740 {
741 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
742
743 if (amdgpu_sriov_vf(adev))
744 xgpu_nv_mailbox_add_irq_id(adev);
745
746 return 0;
747 }
748
soc21_common_sw_fini(void * handle)749 static int soc21_common_sw_fini(void *handle)
750 {
751 return 0;
752 }
753
soc21_common_hw_init(void * handle)754 static int soc21_common_hw_init(void *handle)
755 {
756 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
757
758 /* enable pcie gen2/3 link */
759 soc21_pcie_gen3_enable(adev);
760 /* enable aspm */
761 soc21_program_aspm(adev);
762 /* setup nbio registers */
763 adev->nbio.funcs->init_registers(adev);
764 /* remap HDP registers to a hole in mmio space,
765 * for the purpose of expose those registers
766 * to process space
767 */
768 if (adev->nbio.funcs->remap_hdp_registers)
769 adev->nbio.funcs->remap_hdp_registers(adev);
770 /* enable the doorbell aperture */
771 soc21_enable_doorbell_aperture(adev, true);
772
773 return 0;
774 }
775
soc21_common_hw_fini(void * handle)776 static int soc21_common_hw_fini(void *handle)
777 {
778 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
779
780 /* disable the doorbell aperture */
781 soc21_enable_doorbell_aperture(adev, false);
782
783 if (amdgpu_sriov_vf(adev))
784 xgpu_nv_mailbox_put_irq(adev);
785
786 return 0;
787 }
788
soc21_common_suspend(void * handle)789 static int soc21_common_suspend(void *handle)
790 {
791 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
792
793 return soc21_common_hw_fini(adev);
794 }
795
soc21_common_resume(void * handle)796 static int soc21_common_resume(void *handle)
797 {
798 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
799
800 return soc21_common_hw_init(adev);
801 }
802
soc21_common_is_idle(void * handle)803 static bool soc21_common_is_idle(void *handle)
804 {
805 return true;
806 }
807
soc21_common_wait_for_idle(void * handle)808 static int soc21_common_wait_for_idle(void *handle)
809 {
810 return 0;
811 }
812
soc21_common_soft_reset(void * handle)813 static int soc21_common_soft_reset(void *handle)
814 {
815 return 0;
816 }
817
soc21_common_set_clockgating_state(void * handle,enum amd_clockgating_state state)818 static int soc21_common_set_clockgating_state(void *handle,
819 enum amd_clockgating_state state)
820 {
821 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
822
823 switch (adev->ip_versions[NBIO_HWIP][0]) {
824 case IP_VERSION(4, 3, 0):
825 case IP_VERSION(4, 3, 1):
826 case IP_VERSION(7, 7, 0):
827 adev->nbio.funcs->update_medium_grain_clock_gating(adev,
828 state == AMD_CG_STATE_GATE);
829 adev->nbio.funcs->update_medium_grain_light_sleep(adev,
830 state == AMD_CG_STATE_GATE);
831 adev->hdp.funcs->update_clock_gating(adev,
832 state == AMD_CG_STATE_GATE);
833 break;
834 default:
835 break;
836 }
837 return 0;
838 }
839
soc21_common_set_powergating_state(void * handle,enum amd_powergating_state state)840 static int soc21_common_set_powergating_state(void *handle,
841 enum amd_powergating_state state)
842 {
843 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
844
845 switch (adev->ip_versions[LSDMA_HWIP][0]) {
846 case IP_VERSION(6, 0, 0):
847 case IP_VERSION(6, 0, 2):
848 adev->lsdma.funcs->update_memory_power_gating(adev,
849 state == AMD_PG_STATE_GATE);
850 break;
851 default:
852 break;
853 }
854
855 return 0;
856 }
857
soc21_common_get_clockgating_state(void * handle,u64 * flags)858 static void soc21_common_get_clockgating_state(void *handle, u64 *flags)
859 {
860 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
861
862 adev->nbio.funcs->get_clockgating_state(adev, flags);
863
864 adev->hdp.funcs->get_clock_gating_state(adev, flags);
865
866 return;
867 }
868
869 static const struct amd_ip_funcs soc21_common_ip_funcs = {
870 .name = "soc21_common",
871 .early_init = soc21_common_early_init,
872 .late_init = soc21_common_late_init,
873 .sw_init = soc21_common_sw_init,
874 .sw_fini = soc21_common_sw_fini,
875 .hw_init = soc21_common_hw_init,
876 .hw_fini = soc21_common_hw_fini,
877 .suspend = soc21_common_suspend,
878 .resume = soc21_common_resume,
879 .is_idle = soc21_common_is_idle,
880 .wait_for_idle = soc21_common_wait_for_idle,
881 .soft_reset = soc21_common_soft_reset,
882 .set_clockgating_state = soc21_common_set_clockgating_state,
883 .set_powergating_state = soc21_common_set_powergating_state,
884 .get_clockgating_state = soc21_common_get_clockgating_state,
885 };
886