1 /*
2 * Copyright (C) 2015-2020 Advanced Micro Devices, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26 #ifndef __AMDGPU_DM_H__
27 #define __AMDGPU_DM_H__
28
29 #include <drm/display/drm_dp_mst_helper.h>
30 #include <drm/drm_atomic.h>
31 #include <drm/drm_connector.h>
32 #include <drm/drm_crtc.h>
33 #include <drm/drm_plane.h>
34 #include "link_service_types.h"
35
36 /*
37 * This file contains the definition for amdgpu_display_manager
38 * and its API for amdgpu driver's use.
39 * This component provides all the display related functionality
40 * and this is the only component that calls DAL API.
41 * The API contained here intended for amdgpu driver use.
42 * The API that is called directly from KMS framework is located
43 * in amdgpu_dm_kms.h file
44 */
45
46 #define AMDGPU_DM_MAX_DISPLAY_INDEX 31
47
48 #define AMDGPU_DM_MAX_CRTC 6
49
50 #define AMDGPU_DM_MAX_NUM_EDP 2
51
52 #define AMDGPU_DMUB_NOTIFICATION_MAX 5
53
54 /*
55 #include "include/amdgpu_dal_power_if.h"
56 #include "amdgpu_dm_irq.h"
57 */
58
59 #include "irq_types.h"
60 #include "signal_types.h"
61 #include "amdgpu_dm_crc.h"
62 #include "mod_info_packet.h"
63 struct aux_payload;
64 struct set_config_cmd_payload;
65 enum aux_return_code_type;
66 enum set_config_status;
67
68 /* Forward declarations */
69 struct amdgpu_device;
70 struct amdgpu_crtc;
71 struct drm_device;
72 struct dc;
73 struct amdgpu_bo;
74 struct dmub_srv;
75 struct dc_plane_state;
76 struct dmub_notification;
77
78 struct common_irq_params {
79 struct amdgpu_device *adev;
80 enum dc_irq_source irq_src;
81 atomic64_t previous_timestamp;
82 };
83
84 /**
85 * struct dm_compressor_info - Buffer info used by frame buffer compression
86 * @cpu_addr: MMIO cpu addr
87 * @bo_ptr: Pointer to the buffer object
88 * @gpu_addr: MMIO gpu addr
89 */
90 struct dm_compressor_info {
91 void *cpu_addr;
92 struct amdgpu_bo *bo_ptr;
93 uint64_t gpu_addr;
94 };
95
96 typedef void (*dmub_notify_interrupt_callback_t)(struct amdgpu_device *adev, struct dmub_notification *notify);
97
98 /**
99 * struct dmub_hpd_work - Handle time consuming work in low priority outbox IRQ
100 *
101 * @handle_hpd_work: Work to be executed in a separate thread to handle hpd_low_irq
102 * @dmub_notify: notification for callback function
103 * @adev: amdgpu_device pointer
104 */
105 struct dmub_hpd_work {
106 struct work_struct handle_hpd_work;
107 struct dmub_notification *dmub_notify;
108 struct amdgpu_device *adev;
109 };
110
111 /**
112 * struct vblank_control_work - Work data for vblank control
113 * @work: Kernel work data for the work event
114 * @dm: amdgpu display manager device
115 * @acrtc: amdgpu CRTC instance for which the event has occurred
116 * @stream: DC stream for which the event has occurred
117 * @enable: true if enabling vblank
118 */
119 struct vblank_control_work {
120 struct work_struct work;
121 struct amdgpu_display_manager *dm;
122 struct amdgpu_crtc *acrtc;
123 struct dc_stream_state *stream;
124 bool enable;
125 };
126
127 /**
128 * struct amdgpu_dm_backlight_caps - Information about backlight
129 *
130 * Describe the backlight support for ACPI or eDP AUX.
131 */
132 struct amdgpu_dm_backlight_caps {
133 /**
134 * @ext_caps: Keep the data struct with all the information about the
135 * display support for HDR.
136 */
137 union dpcd_sink_ext_caps *ext_caps;
138 /**
139 * @aux_min_input_signal: Min brightness value supported by the display
140 */
141 u32 aux_min_input_signal;
142 /**
143 * @aux_max_input_signal: Max brightness value supported by the display
144 * in nits.
145 */
146 u32 aux_max_input_signal;
147 /**
148 * @min_input_signal: minimum possible input in range 0-255.
149 */
150 int min_input_signal;
151 /**
152 * @max_input_signal: maximum possible input in range 0-255.
153 */
154 int max_input_signal;
155 /**
156 * @caps_valid: true if these values are from the ACPI interface.
157 */
158 bool caps_valid;
159 /**
160 * @aux_support: Describes if the display supports AUX backlight.
161 */
162 bool aux_support;
163 };
164
165 /**
166 * struct dal_allocation - Tracks mapped FB memory for SMU communication
167 * @list: list of dal allocations
168 * @bo: GPU buffer object
169 * @cpu_ptr: CPU virtual address of the GPU buffer object
170 * @gpu_addr: GPU virtual address of the GPU buffer object
171 */
172 struct dal_allocation {
173 struct list_head list;
174 struct amdgpu_bo *bo;
175 void *cpu_ptr;
176 u64 gpu_addr;
177 };
178
179 /**
180 * struct hpd_rx_irq_offload_work_queue - Work queue to handle hpd_rx_irq
181 * offload work
182 */
183 struct hpd_rx_irq_offload_work_queue {
184 /**
185 * @wq: workqueue structure to queue offload work.
186 */
187 struct workqueue_struct *wq;
188 /**
189 * @offload_lock: To protect fields of offload work queue.
190 */
191 spinlock_t offload_lock;
192 /**
193 * @is_handling_link_loss: Used to prevent inserting link loss event when
194 * we're handling link loss
195 */
196 bool is_handling_link_loss;
197 /**
198 * @aconnector: The aconnector that this work queue is attached to
199 */
200 struct amdgpu_dm_connector *aconnector;
201 };
202
203 /**
204 * struct hpd_rx_irq_offload_work - hpd_rx_irq offload work structure
205 */
206 struct hpd_rx_irq_offload_work {
207 /**
208 * @work: offload work
209 */
210 struct work_struct work;
211 /**
212 * @data: reference irq data which is used while handling offload work
213 */
214 union hpd_irq_data data;
215 /**
216 * @offload_wq: offload work queue that this work is queued to
217 */
218 struct hpd_rx_irq_offload_work_queue *offload_wq;
219 };
220
221 /**
222 * struct amdgpu_display_manager - Central amdgpu display manager device
223 *
224 * @dc: Display Core control structure
225 * @adev: AMDGPU base driver structure
226 * @ddev: DRM base driver structure
227 * @display_indexes_num: Max number of display streams supported
228 * @irq_handler_list_table_lock: Synchronizes access to IRQ tables
229 * @backlight_dev: Backlight control device
230 * @backlight_link: Link on which to control backlight
231 * @backlight_caps: Capabilities of the backlight device
232 * @freesync_module: Module handling freesync calculations
233 * @hdcp_workqueue: AMDGPU content protection queue
234 * @fw_dmcu: Reference to DMCU firmware
235 * @dmcu_fw_version: Version of the DMCU firmware
236 * @soc_bounding_box: SOC bounding box values provided by gpu_info FW
237 * @cached_state: Caches device atomic state for suspend/resume
238 * @cached_dc_state: Cached state of content streams
239 * @compressor: Frame buffer compression buffer. See &struct dm_compressor_info
240 * @force_timing_sync: set via debugfs. When set, indicates that all connected
241 * displays will be forced to synchronize.
242 * @dmcub_trace_event_en: enable dmcub trace events
243 * @dmub_outbox_params: DMUB Outbox parameters
244 * @num_of_edps: number of backlight eDPs
245 * @disable_hpd_irq: disables all HPD and HPD RX interrupt handling in the
246 * driver when true
247 * @dmub_aux_transfer_done: struct completion used to indicate when DMUB
248 * transfers are done
249 * @delayed_hpd_wq: work queue used to delay DMUB HPD work
250 */
251 struct amdgpu_display_manager {
252
253 struct dc *dc;
254
255 /**
256 * @dmub_srv:
257 *
258 * DMUB service, used for controlling the DMUB on hardware
259 * that supports it. The pointer to the dmub_srv will be
260 * NULL on hardware that does not support it.
261 */
262 struct dmub_srv *dmub_srv;
263
264 /**
265 * @dmub_notify:
266 *
267 * Notification from DMUB.
268 */
269
270 struct dmub_notification *dmub_notify;
271
272 /**
273 * @dmub_callback:
274 *
275 * Callback functions to handle notification from DMUB.
276 */
277
278 dmub_notify_interrupt_callback_t dmub_callback[AMDGPU_DMUB_NOTIFICATION_MAX];
279
280 /**
281 * @dmub_thread_offload:
282 *
283 * Flag to indicate if callback is offload.
284 */
285
286 bool dmub_thread_offload[AMDGPU_DMUB_NOTIFICATION_MAX];
287
288 /**
289 * @dmub_fb_info:
290 *
291 * Framebuffer regions for the DMUB.
292 */
293 struct dmub_srv_fb_info *dmub_fb_info;
294
295 /**
296 * @dmub_fw:
297 *
298 * DMUB firmware, required on hardware that has DMUB support.
299 */
300 const struct firmware *dmub_fw;
301
302 /**
303 * @dmub_bo:
304 *
305 * Buffer object for the DMUB.
306 */
307 struct amdgpu_bo *dmub_bo;
308
309 /**
310 * @dmub_bo_gpu_addr:
311 *
312 * GPU virtual address for the DMUB buffer object.
313 */
314 u64 dmub_bo_gpu_addr;
315
316 /**
317 * @dmub_bo_cpu_addr:
318 *
319 * CPU address for the DMUB buffer object.
320 */
321 void *dmub_bo_cpu_addr;
322
323 /**
324 * @dmcub_fw_version:
325 *
326 * DMCUB firmware version.
327 */
328 uint32_t dmcub_fw_version;
329
330 /**
331 * @cgs_device:
332 *
333 * The Common Graphics Services device. It provides an interface for
334 * accessing registers.
335 */
336 struct cgs_device *cgs_device;
337
338 struct amdgpu_device *adev;
339 struct drm_device *ddev;
340 u16 display_indexes_num;
341
342 /**
343 * @atomic_obj:
344 *
345 * In combination with &dm_atomic_state it helps manage
346 * global atomic state that doesn't map cleanly into existing
347 * drm resources, like &dc_context.
348 */
349 struct drm_private_obj atomic_obj;
350
351 /**
352 * @dc_lock:
353 *
354 * Guards access to DC functions that can issue register write
355 * sequences.
356 */
357 struct mutex dc_lock;
358
359 /**
360 * @audio_lock:
361 *
362 * Guards access to audio instance changes.
363 */
364 struct mutex audio_lock;
365
366 /**
367 * @audio_component:
368 *
369 * Used to notify ELD changes to sound driver.
370 */
371 struct drm_audio_component *audio_component;
372
373 /**
374 * @audio_registered:
375 *
376 * True if the audio component has been registered
377 * successfully, false otherwise.
378 */
379 bool audio_registered;
380
381 /**
382 * @irq_handler_list_low_tab:
383 *
384 * Low priority IRQ handler table.
385 *
386 * It is a n*m table consisting of n IRQ sources, and m handlers per IRQ
387 * source. Low priority IRQ handlers are deferred to a workqueue to be
388 * processed. Hence, they can sleep.
389 *
390 * Note that handlers are called in the same order as they were
391 * registered (FIFO).
392 */
393 struct list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER];
394
395 /**
396 * @irq_handler_list_high_tab:
397 *
398 * High priority IRQ handler table.
399 *
400 * It is a n*m table, same as &irq_handler_list_low_tab. However,
401 * handlers in this table are not deferred and are called immediately.
402 */
403 struct list_head irq_handler_list_high_tab[DAL_IRQ_SOURCES_NUMBER];
404
405 /**
406 * @pflip_params:
407 *
408 * Page flip IRQ parameters, passed to registered handlers when
409 * triggered.
410 */
411 struct common_irq_params
412 pflip_params[DC_IRQ_SOURCE_PFLIP_LAST - DC_IRQ_SOURCE_PFLIP_FIRST + 1];
413
414 /**
415 * @vblank_params:
416 *
417 * Vertical blanking IRQ parameters, passed to registered handlers when
418 * triggered.
419 */
420 struct common_irq_params
421 vblank_params[DC_IRQ_SOURCE_VBLANK6 - DC_IRQ_SOURCE_VBLANK1 + 1];
422
423 /**
424 * @vline0_params:
425 *
426 * OTG vertical interrupt0 IRQ parameters, passed to registered
427 * handlers when triggered.
428 */
429 struct common_irq_params
430 vline0_params[DC_IRQ_SOURCE_DC6_VLINE0 - DC_IRQ_SOURCE_DC1_VLINE0 + 1];
431
432 /**
433 * @vupdate_params:
434 *
435 * Vertical update IRQ parameters, passed to registered handlers when
436 * triggered.
437 */
438 struct common_irq_params
439 vupdate_params[DC_IRQ_SOURCE_VUPDATE6 - DC_IRQ_SOURCE_VUPDATE1 + 1];
440
441 /**
442 * @dmub_trace_params:
443 *
444 * DMUB trace event IRQ parameters, passed to registered handlers when
445 * triggered.
446 */
447 struct common_irq_params
448 dmub_trace_params[1];
449
450 struct common_irq_params
451 dmub_outbox_params[1];
452
453 spinlock_t irq_handler_list_table_lock;
454
455 struct backlight_device *backlight_dev[AMDGPU_DM_MAX_NUM_EDP];
456
457 const struct dc_link *backlight_link[AMDGPU_DM_MAX_NUM_EDP];
458
459 uint8_t num_of_edps;
460
461 struct amdgpu_dm_backlight_caps backlight_caps[AMDGPU_DM_MAX_NUM_EDP];
462
463 struct mod_freesync *freesync_module;
464 #ifdef CONFIG_DRM_AMD_DC_HDCP
465 struct hdcp_workqueue *hdcp_workqueue;
466 #endif
467
468 /**
469 * @vblank_control_workqueue:
470 *
471 * Deferred work for vblank control events.
472 */
473 struct workqueue_struct *vblank_control_workqueue;
474
475 struct drm_atomic_state *cached_state;
476 struct dc_state *cached_dc_state;
477
478 struct dm_compressor_info compressor;
479
480 const struct firmware *fw_dmcu;
481 uint32_t dmcu_fw_version;
482 /**
483 * @soc_bounding_box:
484 *
485 * gpu_info FW provided soc bounding box struct or 0 if not
486 * available in FW
487 */
488 const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
489
490 /**
491 * @active_vblank_irq_count:
492 *
493 * number of currently active vblank irqs
494 */
495 uint32_t active_vblank_irq_count;
496
497 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
498 /**
499 * @secure_display_ctxs:
500 *
501 * Store the ROI information and the work_struct to command dmub and psp for
502 * all crtcs.
503 */
504 struct secure_display_context *secure_display_ctxs;
505 #endif
506 /**
507 * @hpd_rx_offload_wq:
508 *
509 * Work queue to offload works of hpd_rx_irq
510 */
511 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq;
512 /**
513 * @mst_encoders:
514 *
515 * fake encoders used for DP MST.
516 */
517 struct amdgpu_encoder mst_encoders[AMDGPU_DM_MAX_CRTC];
518 bool force_timing_sync;
519 bool disable_hpd_irq;
520 bool dmcub_trace_event_en;
521 /**
522 * @da_list:
523 *
524 * DAL fb memory allocation list, for communication with SMU.
525 */
526 struct list_head da_list;
527 struct completion dmub_aux_transfer_done;
528 struct workqueue_struct *delayed_hpd_wq;
529
530 /**
531 * @brightness:
532 *
533 * cached backlight values.
534 */
535 u32 brightness[AMDGPU_DM_MAX_NUM_EDP];
536 /**
537 * @actual_brightness:
538 *
539 * last successfully applied backlight values.
540 */
541 u32 actual_brightness[AMDGPU_DM_MAX_NUM_EDP];
542
543 /**
544 * @aux_hpd_discon_quirk:
545 *
546 * quirk for hpd discon while aux is on-going.
547 * occurred on certain intel platform
548 */
549 bool aux_hpd_discon_quirk;
550
551 /**
552 * @dpia_aux_lock:
553 *
554 * Guards access to DPIA AUX
555 */
556 struct mutex dpia_aux_lock;
557 };
558
559 enum dsc_clock_force_state {
560 DSC_CLK_FORCE_DEFAULT = 0,
561 DSC_CLK_FORCE_ENABLE,
562 DSC_CLK_FORCE_DISABLE,
563 };
564
565 struct dsc_preferred_settings {
566 enum dsc_clock_force_state dsc_force_enable;
567 uint32_t dsc_num_slices_v;
568 uint32_t dsc_num_slices_h;
569 uint32_t dsc_bits_per_pixel;
570 bool dsc_force_disable_passthrough;
571 };
572
573 enum mst_progress_status {
574 MST_STATUS_DEFAULT = 0,
575 MST_PROBE = BIT(0),
576 MST_REMOTE_EDID = BIT(1),
577 MST_ALLOCATE_NEW_PAYLOAD = BIT(2),
578 MST_CLEAR_ALLOCATED_PAYLOAD = BIT(3),
579 };
580
581 /**
582 * struct amdgpu_hdmi_vsdb_info - Keep track of the VSDB info
583 *
584 * AMDGPU supports FreeSync over HDMI by using the VSDB section, and this
585 * struct is useful to keep track of the display-specific information about
586 * FreeSync.
587 */
588 struct amdgpu_hdmi_vsdb_info {
589 /**
590 * @amd_vsdb_version: Vendor Specific Data Block Version, should be
591 * used to determine which Vendor Specific InfoFrame (VSIF) to send.
592 */
593 unsigned int amd_vsdb_version;
594
595 /**
596 * @freesync_supported: FreeSync Supported.
597 */
598 bool freesync_supported;
599
600 /**
601 * @min_refresh_rate_hz: FreeSync Minimum Refresh Rate in Hz.
602 */
603 unsigned int min_refresh_rate_hz;
604
605 /**
606 * @max_refresh_rate_hz: FreeSync Maximum Refresh Rate in Hz
607 */
608 unsigned int max_refresh_rate_hz;
609 };
610
611 struct amdgpu_dm_connector {
612
613 struct drm_connector base;
614 uint32_t connector_id;
615
616 /* we need to mind the EDID between detect
617 and get modes due to analog/digital/tvencoder */
618 struct edid *edid;
619
620 /* shared with amdgpu */
621 struct amdgpu_hpd hpd;
622
623 /* number of modes generated from EDID at 'dc_sink' */
624 int num_modes;
625
626 /* The 'old' sink - before an HPD.
627 * The 'current' sink is in dc_link->sink. */
628 struct dc_sink *dc_sink;
629 struct dc_link *dc_link;
630
631 /**
632 * @dc_em_sink: Reference to the emulated (virtual) sink.
633 */
634 struct dc_sink *dc_em_sink;
635
636 /* DM only */
637 struct drm_dp_mst_topology_mgr mst_mgr;
638 struct amdgpu_dm_dp_aux dm_dp_aux;
639 struct drm_dp_mst_port *mst_output_port;
640 struct amdgpu_dm_connector *mst_root;
641 struct drm_dp_aux *dsc_aux;
642 /* TODO see if we can merge with ddc_bus or make a dm_connector */
643 struct amdgpu_i2c_adapter *i2c;
644
645 /* Monitor range limits */
646 /**
647 * @min_vfreq: Minimal frequency supported by the display in Hz. This
648 * value is set to zero when there is no FreeSync support.
649 */
650 int min_vfreq;
651
652 /**
653 * @max_vfreq: Maximum frequency supported by the display in Hz. This
654 * value is set to zero when there is no FreeSync support.
655 */
656 int max_vfreq ;
657 int pixel_clock_mhz;
658
659 /* Audio instance - protected by audio_lock. */
660 int audio_inst;
661
662 struct mutex hpd_lock;
663
664 bool fake_enable;
665 #ifdef CONFIG_DEBUG_FS
666 uint32_t debugfs_dpcd_address;
667 uint32_t debugfs_dpcd_size;
668 #endif
669 bool force_yuv420_output;
670 struct dsc_preferred_settings dsc_settings;
671 union dp_downstream_port_present mst_downstream_port_present;
672 /* Cached display modes */
673 struct drm_display_mode freesync_vid_base;
674
675 int psr_skip_count;
676
677 /* Record progress status of mst*/
678 uint8_t mst_status;
679
680 /* Automated testing */
681 bool timing_changed;
682 struct dc_crtc_timing *timing_requested;
683
684 /* Adaptive Sync */
685 bool pack_sdp_v1_3;
686 enum adaptive_sync_type as_type;
687 struct amdgpu_hdmi_vsdb_info vsdb_info;
688 };
689
amdgpu_dm_set_mst_status(uint8_t * status,uint8_t flags,bool set)690 static inline void amdgpu_dm_set_mst_status(uint8_t *status,
691 uint8_t flags, bool set)
692 {
693 if (set)
694 *status |= flags;
695 else
696 *status &= ~flags;
697 }
698
699 #define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
700
701 extern const struct amdgpu_ip_block_version dm_ip_block;
702
703 struct dm_plane_state {
704 struct drm_plane_state base;
705 struct dc_plane_state *dc_state;
706 };
707
708 struct dm_crtc_state {
709 struct drm_crtc_state base;
710 struct dc_stream_state *stream;
711
712 bool cm_has_degamma;
713 bool cm_is_degamma_srgb;
714
715 bool mpo_requested;
716
717 int update_type;
718 int active_planes;
719
720 int crc_skip_count;
721
722 bool freesync_vrr_info_changed;
723
724 bool dsc_force_changed;
725 bool vrr_supported;
726 struct mod_freesync_config freesync_config;
727 struct dc_info_packet vrr_infopacket;
728
729 int abm_level;
730 };
731
732 #define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base)
733
734 struct dm_atomic_state {
735 struct drm_private_state base;
736
737 struct dc_state *context;
738 };
739
740 #define to_dm_atomic_state(x) container_of(x, struct dm_atomic_state, base)
741
742 struct dm_connector_state {
743 struct drm_connector_state base;
744
745 enum amdgpu_rmx_type scaling;
746 uint8_t underscan_vborder;
747 uint8_t underscan_hborder;
748 bool underscan_enable;
749 bool freesync_capable;
750 #ifdef CONFIG_DRM_AMD_DC_HDCP
751 bool update_hdcp;
752 #endif
753 uint8_t abm_level;
754 int vcpi_slots;
755 uint64_t pbn;
756 };
757
758 #define to_dm_connector_state(x)\
759 container_of((x), struct dm_connector_state, base)
760
761 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector);
762 struct drm_connector_state *
763 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector);
764 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
765 struct drm_connector_state *state,
766 struct drm_property *property,
767 uint64_t val);
768
769 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
770 const struct drm_connector_state *state,
771 struct drm_property *property,
772 uint64_t *val);
773
774 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev);
775
776 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
777 struct amdgpu_dm_connector *aconnector,
778 int connector_type,
779 struct dc_link *link,
780 int link_index);
781
782 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
783 struct drm_display_mode *mode);
784
785 void dm_restore_drm_connector_state(struct drm_device *dev,
786 struct drm_connector *connector);
787
788 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
789 struct edid *edid);
790
791 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev);
792
793 #define MAX_COLOR_LUT_ENTRIES 4096
794 /* Legacy gamm LUT users such as X doesn't like large LUT sizes */
795 #define MAX_COLOR_LEGACY_LUT_ENTRIES 256
796
797 void amdgpu_dm_init_color_mod(void);
798 int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state);
799 int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc);
800 int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
801 struct dc_plane_state *dc_plane_state);
802
803 void amdgpu_dm_update_connector_after_detect(
804 struct amdgpu_dm_connector *aconnector);
805
806 extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs;
807
808 int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int link_index,
809 struct aux_payload *payload, enum aux_return_code_type *operation_result);
810
811 int amdgpu_dm_process_dmub_set_config_sync(struct dc_context *ctx, unsigned int link_index,
812 struct set_config_cmd_payload *payload, enum set_config_status *operation_result);
813
814 bool check_seamless_boot_capability(struct amdgpu_device *adev);
815
816 struct dc_stream_state *
817 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
818 const struct drm_display_mode *drm_mode,
819 const struct dm_connector_state *dm_state,
820 const struct dc_stream_state *old_stream);
821
822 int dm_atomic_get_state(struct drm_atomic_state *state,
823 struct dm_atomic_state **dm_state);
824
825 struct amdgpu_dm_connector *
826 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
827 struct drm_crtc *crtc);
828
829 int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth);
830 #endif /* __AMDGPU_DM_H__ */
831