1 /*
2 * Copyright (c) 2014 Samsung Electronics Co., Ltd
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <linux/err.h>
25 #include <linux/media-bus-format.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28
29 #include <drm/drm_atomic_state_helper.h>
30 #include <drm/drm_bridge.h>
31 #include <drm/drm_encoder.h>
32 #include <drm/drm_of.h>
33 #include <drm/drm_print.h>
34
35 #include "drm_crtc_internal.h"
36
37 /**
38 * DOC: overview
39 *
40 * &struct drm_bridge represents a device that hangs on to an encoder. These are
41 * handy when a regular &drm_encoder entity isn't enough to represent the entire
42 * encoder chain.
43 *
44 * A bridge is always attached to a single &drm_encoder at a time, but can be
45 * either connected to it directly, or through a chain of bridges::
46 *
47 * [ CRTC ---> ] Encoder ---> Bridge A ---> Bridge B
48 *
49 * Here, the output of the encoder feeds to bridge A, and that furthers feeds to
50 * bridge B. Bridge chains can be arbitrarily long, and shall be fully linear:
51 * Chaining multiple bridges to the output of a bridge, or the same bridge to
52 * the output of different bridges, is not supported.
53 *
54 * &drm_bridge, like &drm_panel, aren't &drm_mode_object entities like planes,
55 * CRTCs, encoders or connectors and hence are not visible to userspace. They
56 * just provide additional hooks to get the desired output at the end of the
57 * encoder chain.
58 */
59
60 /**
61 * DOC: display driver integration
62 *
63 * Display drivers are responsible for linking encoders with the first bridge
64 * in the chains. This is done by acquiring the appropriate bridge with
65 * devm_drm_of_get_bridge(). Once acquired, the bridge shall be attached to the
66 * encoder with a call to drm_bridge_attach().
67 *
68 * Bridges are responsible for linking themselves with the next bridge in the
69 * chain, if any. This is done the same way as for encoders, with the call to
70 * drm_bridge_attach() occurring in the &drm_bridge_funcs.attach operation.
71 *
72 * Once these links are created, the bridges can participate along with encoder
73 * functions to perform mode validation and fixup (through
74 * drm_bridge_chain_mode_valid() and drm_atomic_bridge_chain_check()), mode
75 * setting (through drm_bridge_chain_mode_set()), enable (through
76 * drm_atomic_bridge_chain_pre_enable() and drm_atomic_bridge_chain_enable())
77 * and disable (through drm_atomic_bridge_chain_disable() and
78 * drm_atomic_bridge_chain_post_disable()). Those functions call the
79 * corresponding operations provided in &drm_bridge_funcs in sequence for all
80 * bridges in the chain.
81 *
82 * For display drivers that use the atomic helpers
83 * drm_atomic_helper_check_modeset(),
84 * drm_atomic_helper_commit_modeset_enables() and
85 * drm_atomic_helper_commit_modeset_disables() (either directly in hand-rolled
86 * commit check and commit tail handlers, or through the higher-level
87 * drm_atomic_helper_check() and drm_atomic_helper_commit_tail() or
88 * drm_atomic_helper_commit_tail_rpm() helpers), this is done transparently and
89 * requires no intervention from the driver. For other drivers, the relevant
90 * DRM bridge chain functions shall be called manually.
91 *
92 * Bridges also participate in implementing the &drm_connector at the end of
93 * the bridge chain. Display drivers may use the drm_bridge_connector_init()
94 * helper to create the &drm_connector, or implement it manually on top of the
95 * connector-related operations exposed by the bridge (see the overview
96 * documentation of bridge operations for more details).
97 */
98
99 /**
100 * DOC: special care dsi
101 *
102 * The interaction between the bridges and other frameworks involved in
103 * the probing of the upstream driver and the bridge driver can be
104 * challenging. Indeed, there's multiple cases that needs to be
105 * considered:
106 *
107 * - The upstream driver doesn't use the component framework and isn't a
108 * MIPI-DSI host. In this case, the bridge driver will probe at some
109 * point and the upstream driver should try to probe again by returning
110 * EPROBE_DEFER as long as the bridge driver hasn't probed.
111 *
112 * - The upstream driver doesn't use the component framework, but is a
113 * MIPI-DSI host. The bridge device uses the MIPI-DCS commands to be
114 * controlled. In this case, the bridge device is a child of the
115 * display device and when it will probe it's assured that the display
116 * device (and MIPI-DSI host) is present. The upstream driver will be
117 * assured that the bridge driver is connected between the
118 * &mipi_dsi_host_ops.attach and &mipi_dsi_host_ops.detach operations.
119 * Therefore, it must run mipi_dsi_host_register() in its probe
120 * function, and then run drm_bridge_attach() in its
121 * &mipi_dsi_host_ops.attach hook.
122 *
123 * - The upstream driver uses the component framework and is a MIPI-DSI
124 * host. The bridge device uses the MIPI-DCS commands to be
125 * controlled. This is the same situation than above, and can run
126 * mipi_dsi_host_register() in either its probe or bind hooks.
127 *
128 * - The upstream driver uses the component framework and is a MIPI-DSI
129 * host. The bridge device uses a separate bus (such as I2C) to be
130 * controlled. In this case, there's no correlation between the probe
131 * of the bridge and upstream drivers, so care must be taken to avoid
132 * an endless EPROBE_DEFER loop, with each driver waiting for the
133 * other to probe.
134 *
135 * The ideal pattern to cover the last item (and all the others in the
136 * MIPI-DSI host driver case) is to split the operations like this:
137 *
138 * - The MIPI-DSI host driver must run mipi_dsi_host_register() in its
139 * probe hook. It will make sure that the MIPI-DSI host sticks around,
140 * and that the driver's bind can be called.
141 *
142 * - In its probe hook, the bridge driver must try to find its MIPI-DSI
143 * host, register as a MIPI-DSI device and attach the MIPI-DSI device
144 * to its host. The bridge driver is now functional.
145 *
146 * - In its &struct mipi_dsi_host_ops.attach hook, the MIPI-DSI host can
147 * now add its component. Its bind hook will now be called and since
148 * the bridge driver is attached and registered, we can now look for
149 * and attach it.
150 *
151 * At this point, we're now certain that both the upstream driver and
152 * the bridge driver are functional and we can't have a deadlock-like
153 * situation when probing.
154 */
155
156 /**
157 * DOC: dsi bridge operations
158 *
159 * DSI host interfaces are expected to be implemented as bridges rather than
160 * encoders, however there are a few aspects of their operation that need to
161 * be defined in order to provide a consistent interface.
162 *
163 * A DSI host should keep the PHY powered down until the pre_enable operation is
164 * called. All lanes are in an undefined idle state up to this point, and it
165 * must not be assumed that it is LP-11.
166 * pre_enable should initialise the PHY, set the data lanes to LP-11, and the
167 * clock lane to either LP-11 or HS depending on the mode_flag
168 * %MIPI_DSI_CLOCK_NON_CONTINUOUS.
169 *
170 * Ordinarily the downstream bridge DSI peripheral pre_enable will have been
171 * called before the DSI host. If the DSI peripheral requires LP-11 and/or
172 * the clock lane to be in HS mode prior to pre_enable, then it can set the
173 * &pre_enable_prev_first flag to request the pre_enable (and
174 * post_disable) order to be altered to enable the DSI host first.
175 *
176 * Either the CRTC being enabled, or the DSI host enable operation should switch
177 * the host to actively transmitting video on the data lanes.
178 *
179 * The reverse also applies. The DSI host disable operation or stopping the CRTC
180 * should stop transmitting video, and the data lanes should return to the LP-11
181 * state. The DSI host &post_disable operation should disable the PHY.
182 * If the &pre_enable_prev_first flag is set, then the DSI peripheral's
183 * bridge &post_disable will be called before the DSI host's post_disable.
184 *
185 * Whilst it is valid to call &host_transfer prior to pre_enable or after
186 * post_disable, the exact state of the lanes is undefined at this point. The
187 * DSI host should initialise the interface, transmit the data, and then disable
188 * the interface again.
189 *
190 * Ultra Low Power State (ULPS) is not explicitly supported by DRM. If
191 * implemented, it therefore needs to be handled entirely within the DSI Host
192 * driver.
193 */
194
195 static DEFINE_MUTEX(bridge_lock);
196 static LIST_HEAD(bridge_list);
197
198 /**
199 * drm_bridge_add - add the given bridge to the global bridge list
200 *
201 * @bridge: bridge control structure
202 */
drm_bridge_add(struct drm_bridge * bridge)203 void drm_bridge_add(struct drm_bridge *bridge)
204 {
205 mutex_init(&bridge->hpd_mutex);
206
207 mutex_lock(&bridge_lock);
208 list_add_tail(&bridge->list, &bridge_list);
209 mutex_unlock(&bridge_lock);
210 }
211 EXPORT_SYMBOL(drm_bridge_add);
212
drm_bridge_remove_void(void * bridge)213 static void drm_bridge_remove_void(void *bridge)
214 {
215 drm_bridge_remove(bridge);
216 }
217
218 /**
219 * devm_drm_bridge_add - devm managed version of drm_bridge_add()
220 *
221 * @dev: device to tie the bridge lifetime to
222 * @bridge: bridge control structure
223 *
224 * This is the managed version of drm_bridge_add() which automatically
225 * calls drm_bridge_remove() when @dev is unbound.
226 *
227 * Return: 0 if no error or negative error code.
228 */
devm_drm_bridge_add(struct device * dev,struct drm_bridge * bridge)229 int devm_drm_bridge_add(struct device *dev, struct drm_bridge *bridge)
230 {
231 drm_bridge_add(bridge);
232 return devm_add_action_or_reset(dev, drm_bridge_remove_void, bridge);
233 }
234 EXPORT_SYMBOL(devm_drm_bridge_add);
235
236 /**
237 * drm_bridge_remove - remove the given bridge from the global bridge list
238 *
239 * @bridge: bridge control structure
240 */
drm_bridge_remove(struct drm_bridge * bridge)241 void drm_bridge_remove(struct drm_bridge *bridge)
242 {
243 mutex_lock(&bridge_lock);
244 list_del_init(&bridge->list);
245 mutex_unlock(&bridge_lock);
246
247 mutex_destroy(&bridge->hpd_mutex);
248 }
249 EXPORT_SYMBOL(drm_bridge_remove);
250
251 static struct drm_private_state *
drm_bridge_atomic_duplicate_priv_state(struct drm_private_obj * obj)252 drm_bridge_atomic_duplicate_priv_state(struct drm_private_obj *obj)
253 {
254 struct drm_bridge *bridge = drm_priv_to_bridge(obj);
255 struct drm_bridge_state *state;
256
257 state = bridge->funcs->atomic_duplicate_state(bridge);
258 return state ? &state->base : NULL;
259 }
260
261 static void
drm_bridge_atomic_destroy_priv_state(struct drm_private_obj * obj,struct drm_private_state * s)262 drm_bridge_atomic_destroy_priv_state(struct drm_private_obj *obj,
263 struct drm_private_state *s)
264 {
265 struct drm_bridge_state *state = drm_priv_to_bridge_state(s);
266 struct drm_bridge *bridge = drm_priv_to_bridge(obj);
267
268 bridge->funcs->atomic_destroy_state(bridge, state);
269 }
270
271 static const struct drm_private_state_funcs drm_bridge_priv_state_funcs = {
272 .atomic_duplicate_state = drm_bridge_atomic_duplicate_priv_state,
273 .atomic_destroy_state = drm_bridge_atomic_destroy_priv_state,
274 };
275
276 /**
277 * drm_bridge_attach - attach the bridge to an encoder's chain
278 *
279 * @encoder: DRM encoder
280 * @bridge: bridge to attach
281 * @previous: previous bridge in the chain (optional)
282 * @flags: DRM_BRIDGE_ATTACH_* flags
283 *
284 * Called by a kms driver to link the bridge to an encoder's chain. The previous
285 * argument specifies the previous bridge in the chain. If NULL, the bridge is
286 * linked directly at the encoder's output. Otherwise it is linked at the
287 * previous bridge's output.
288 *
289 * If non-NULL the previous bridge must be already attached by a call to this
290 * function.
291 *
292 * Note that bridges attached to encoders are auto-detached during encoder
293 * cleanup in drm_encoder_cleanup(), so drm_bridge_attach() should generally
294 * *not* be balanced with a drm_bridge_detach() in driver code.
295 *
296 * RETURNS:
297 * Zero on success, error code on failure
298 */
drm_bridge_attach(struct drm_encoder * encoder,struct drm_bridge * bridge,struct drm_bridge * previous,enum drm_bridge_attach_flags flags)299 int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
300 struct drm_bridge *previous,
301 enum drm_bridge_attach_flags flags)
302 {
303 int ret;
304
305 if (!encoder || !bridge)
306 return -EINVAL;
307
308 if (previous && (!previous->dev || previous->encoder != encoder))
309 return -EINVAL;
310
311 if (bridge->dev)
312 return -EBUSY;
313
314 bridge->dev = encoder->dev;
315 bridge->encoder = encoder;
316
317 if (previous)
318 list_add(&bridge->chain_node, &previous->chain_node);
319 else
320 list_add(&bridge->chain_node, &encoder->bridge_chain);
321
322 if (bridge->funcs->attach) {
323 ret = bridge->funcs->attach(bridge, flags);
324 if (ret < 0)
325 goto err_reset_bridge;
326 }
327
328 if (bridge->funcs->atomic_reset) {
329 struct drm_bridge_state *state;
330
331 state = bridge->funcs->atomic_reset(bridge);
332 if (IS_ERR(state)) {
333 ret = PTR_ERR(state);
334 goto err_detach_bridge;
335 }
336
337 drm_atomic_private_obj_init(bridge->dev, &bridge->base,
338 &state->base,
339 &drm_bridge_priv_state_funcs);
340 }
341
342 return 0;
343
344 err_detach_bridge:
345 if (bridge->funcs->detach)
346 bridge->funcs->detach(bridge);
347
348 err_reset_bridge:
349 bridge->dev = NULL;
350 bridge->encoder = NULL;
351 list_del(&bridge->chain_node);
352
353 #ifdef CONFIG_OF
354 DRM_ERROR("failed to attach bridge %pOF to encoder %s: %d\n",
355 bridge->of_node, encoder->name, ret);
356 #else
357 DRM_ERROR("failed to attach bridge to encoder %s: %d\n",
358 encoder->name, ret);
359 #endif
360
361 return ret;
362 }
363 EXPORT_SYMBOL(drm_bridge_attach);
364
drm_bridge_detach(struct drm_bridge * bridge)365 void drm_bridge_detach(struct drm_bridge *bridge)
366 {
367 if (WARN_ON(!bridge))
368 return;
369
370 if (WARN_ON(!bridge->dev))
371 return;
372
373 if (bridge->funcs->atomic_reset)
374 drm_atomic_private_obj_fini(&bridge->base);
375
376 if (bridge->funcs->detach)
377 bridge->funcs->detach(bridge);
378
379 list_del(&bridge->chain_node);
380 bridge->dev = NULL;
381 }
382
383 /**
384 * DOC: bridge operations
385 *
386 * Bridge drivers expose operations through the &drm_bridge_funcs structure.
387 * The DRM internals (atomic and CRTC helpers) use the helpers defined in
388 * drm_bridge.c to call bridge operations. Those operations are divided in
389 * three big categories to support different parts of the bridge usage.
390 *
391 * - The encoder-related operations support control of the bridges in the
392 * chain, and are roughly counterparts to the &drm_encoder_helper_funcs
393 * operations. They are used by the legacy CRTC and the atomic modeset
394 * helpers to perform mode validation, fixup and setting, and enable and
395 * disable the bridge automatically.
396 *
397 * The enable and disable operations are split in
398 * &drm_bridge_funcs.pre_enable, &drm_bridge_funcs.enable,
399 * &drm_bridge_funcs.disable and &drm_bridge_funcs.post_disable to provide
400 * finer-grained control.
401 *
402 * Bridge drivers may implement the legacy version of those operations, or
403 * the atomic version (prefixed with atomic\_), in which case they shall also
404 * implement the atomic state bookkeeping operations
405 * (&drm_bridge_funcs.atomic_duplicate_state,
406 * &drm_bridge_funcs.atomic_destroy_state and &drm_bridge_funcs.reset).
407 * Mixing atomic and non-atomic versions of the operations is not supported.
408 *
409 * - The bus format negotiation operations
410 * &drm_bridge_funcs.atomic_get_output_bus_fmts and
411 * &drm_bridge_funcs.atomic_get_input_bus_fmts allow bridge drivers to
412 * negotiate the formats transmitted between bridges in the chain when
413 * multiple formats are supported. Negotiation for formats is performed
414 * transparently for display drivers by the atomic modeset helpers. Only
415 * atomic versions of those operations exist, bridge drivers that need to
416 * implement them shall thus also implement the atomic version of the
417 * encoder-related operations. This feature is not supported by the legacy
418 * CRTC helpers.
419 *
420 * - The connector-related operations support implementing a &drm_connector
421 * based on a chain of bridges. DRM bridges traditionally create a
422 * &drm_connector for bridges meant to be used at the end of the chain. This
423 * puts additional burden on bridge drivers, especially for bridges that may
424 * be used in the middle of a chain or at the end of it. Furthermore, it
425 * requires all operations of the &drm_connector to be handled by a single
426 * bridge, which doesn't always match the hardware architecture.
427 *
428 * To simplify bridge drivers and make the connector implementation more
429 * flexible, a new model allows bridges to unconditionally skip creation of
430 * &drm_connector and instead expose &drm_bridge_funcs operations to support
431 * an externally-implemented &drm_connector. Those operations are
432 * &drm_bridge_funcs.detect, &drm_bridge_funcs.get_modes,
433 * &drm_bridge_funcs.get_edid, &drm_bridge_funcs.hpd_notify,
434 * &drm_bridge_funcs.hpd_enable and &drm_bridge_funcs.hpd_disable. When
435 * implemented, display drivers shall create a &drm_connector instance for
436 * each chain of bridges, and implement those connector instances based on
437 * the bridge connector operations.
438 *
439 * Bridge drivers shall implement the connector-related operations for all
440 * the features that the bridge hardware support. For instance, if a bridge
441 * supports reading EDID, the &drm_bridge_funcs.get_edid shall be
442 * implemented. This however doesn't mean that the DDC lines are wired to the
443 * bridge on a particular platform, as they could also be connected to an I2C
444 * controller of the SoC. Support for the connector-related operations on the
445 * running platform is reported through the &drm_bridge.ops flags. Bridge
446 * drivers shall detect which operations they can support on the platform
447 * (usually this information is provided by ACPI or DT), and set the
448 * &drm_bridge.ops flags for all supported operations. A flag shall only be
449 * set if the corresponding &drm_bridge_funcs operation is implemented, but
450 * an implemented operation doesn't necessarily imply that the corresponding
451 * flag will be set. Display drivers shall use the &drm_bridge.ops flags to
452 * decide which bridge to delegate a connector operation to. This mechanism
453 * allows providing a single static const &drm_bridge_funcs instance in
454 * bridge drivers, improving security by storing function pointers in
455 * read-only memory.
456 *
457 * In order to ease transition, bridge drivers may support both the old and
458 * new models by making connector creation optional and implementing the
459 * connected-related bridge operations. Connector creation is then controlled
460 * by the flags argument to the drm_bridge_attach() function. Display drivers
461 * that support the new model and create connectors themselves shall set the
462 * %DRM_BRIDGE_ATTACH_NO_CONNECTOR flag, and bridge drivers shall then skip
463 * connector creation. For intermediate bridges in the chain, the flag shall
464 * be passed to the drm_bridge_attach() call for the downstream bridge.
465 * Bridge drivers that implement the new model only shall return an error
466 * from their &drm_bridge_funcs.attach handler when the
467 * %DRM_BRIDGE_ATTACH_NO_CONNECTOR flag is not set. New display drivers
468 * should use the new model, and convert the bridge drivers they use if
469 * needed, in order to gradually transition to the new model.
470 */
471
472 /**
473 * drm_bridge_chain_mode_fixup - fixup proposed mode for all bridges in the
474 * encoder chain
475 * @bridge: bridge control structure
476 * @mode: desired mode to be set for the bridge
477 * @adjusted_mode: updated mode that works for this bridge
478 *
479 * Calls &drm_bridge_funcs.mode_fixup for all the bridges in the
480 * encoder chain, starting from the first bridge to the last.
481 *
482 * Note: the bridge passed should be the one closest to the encoder
483 *
484 * RETURNS:
485 * true on success, false on failure
486 */
drm_bridge_chain_mode_fixup(struct drm_bridge * bridge,const struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode)487 bool drm_bridge_chain_mode_fixup(struct drm_bridge *bridge,
488 const struct drm_display_mode *mode,
489 struct drm_display_mode *adjusted_mode)
490 {
491 struct drm_encoder *encoder;
492
493 if (!bridge)
494 return true;
495
496 encoder = bridge->encoder;
497 list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
498 if (!bridge->funcs->mode_fixup)
499 continue;
500
501 if (!bridge->funcs->mode_fixup(bridge, mode, adjusted_mode))
502 return false;
503 }
504
505 return true;
506 }
507 EXPORT_SYMBOL(drm_bridge_chain_mode_fixup);
508
509 /**
510 * drm_bridge_chain_mode_valid - validate the mode against all bridges in the
511 * encoder chain.
512 * @bridge: bridge control structure
513 * @info: display info against which the mode shall be validated
514 * @mode: desired mode to be validated
515 *
516 * Calls &drm_bridge_funcs.mode_valid for all the bridges in the encoder
517 * chain, starting from the first bridge to the last. If at least one bridge
518 * does not accept the mode the function returns the error code.
519 *
520 * Note: the bridge passed should be the one closest to the encoder.
521 *
522 * RETURNS:
523 * MODE_OK on success, drm_mode_status Enum error code on failure
524 */
525 enum drm_mode_status
drm_bridge_chain_mode_valid(struct drm_bridge * bridge,const struct drm_display_info * info,const struct drm_display_mode * mode)526 drm_bridge_chain_mode_valid(struct drm_bridge *bridge,
527 const struct drm_display_info *info,
528 const struct drm_display_mode *mode)
529 {
530 struct drm_encoder *encoder;
531
532 if (!bridge)
533 return MODE_OK;
534
535 encoder = bridge->encoder;
536 list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
537 enum drm_mode_status ret;
538
539 if (!bridge->funcs->mode_valid)
540 continue;
541
542 ret = bridge->funcs->mode_valid(bridge, info, mode);
543 if (ret != MODE_OK)
544 return ret;
545 }
546
547 return MODE_OK;
548 }
549 EXPORT_SYMBOL(drm_bridge_chain_mode_valid);
550
551 /**
552 * drm_bridge_chain_mode_set - set proposed mode for all bridges in the
553 * encoder chain
554 * @bridge: bridge control structure
555 * @mode: desired mode to be set for the encoder chain
556 * @adjusted_mode: updated mode that works for this encoder chain
557 *
558 * Calls &drm_bridge_funcs.mode_set op for all the bridges in the
559 * encoder chain, starting from the first bridge to the last.
560 *
561 * Note: the bridge passed should be the one closest to the encoder
562 */
drm_bridge_chain_mode_set(struct drm_bridge * bridge,const struct drm_display_mode * mode,const struct drm_display_mode * adjusted_mode)563 void drm_bridge_chain_mode_set(struct drm_bridge *bridge,
564 const struct drm_display_mode *mode,
565 const struct drm_display_mode *adjusted_mode)
566 {
567 struct drm_encoder *encoder;
568
569 if (!bridge)
570 return;
571
572 encoder = bridge->encoder;
573 list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
574 if (bridge->funcs->mode_set)
575 bridge->funcs->mode_set(bridge, mode, adjusted_mode);
576 }
577 }
578 EXPORT_SYMBOL(drm_bridge_chain_mode_set);
579
580 /**
581 * drm_atomic_bridge_chain_disable - disables all bridges in the encoder chain
582 * @bridge: bridge control structure
583 * @old_state: old atomic state
584 *
585 * Calls &drm_bridge_funcs.atomic_disable (falls back on
586 * &drm_bridge_funcs.disable) op for all the bridges in the encoder chain,
587 * starting from the last bridge to the first. These are called before calling
588 * &drm_encoder_helper_funcs.atomic_disable
589 *
590 * Note: the bridge passed should be the one closest to the encoder
591 */
drm_atomic_bridge_chain_disable(struct drm_bridge * bridge,struct drm_atomic_state * old_state)592 void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge,
593 struct drm_atomic_state *old_state)
594 {
595 struct drm_encoder *encoder;
596 struct drm_bridge *iter;
597
598 if (!bridge)
599 return;
600
601 encoder = bridge->encoder;
602 list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
603 if (iter->funcs->atomic_disable) {
604 struct drm_bridge_state *old_bridge_state;
605
606 old_bridge_state =
607 drm_atomic_get_old_bridge_state(old_state,
608 iter);
609 if (WARN_ON(!old_bridge_state))
610 return;
611
612 iter->funcs->atomic_disable(iter, old_bridge_state);
613 } else if (iter->funcs->disable) {
614 iter->funcs->disable(iter);
615 }
616
617 if (iter == bridge)
618 break;
619 }
620 }
621 EXPORT_SYMBOL(drm_atomic_bridge_chain_disable);
622
drm_atomic_bridge_call_post_disable(struct drm_bridge * bridge,struct drm_atomic_state * old_state)623 static void drm_atomic_bridge_call_post_disable(struct drm_bridge *bridge,
624 struct drm_atomic_state *old_state)
625 {
626 if (old_state && bridge->funcs->atomic_post_disable) {
627 struct drm_bridge_state *old_bridge_state;
628
629 old_bridge_state =
630 drm_atomic_get_old_bridge_state(old_state,
631 bridge);
632 if (WARN_ON(!old_bridge_state))
633 return;
634
635 bridge->funcs->atomic_post_disable(bridge,
636 old_bridge_state);
637 } else if (bridge->funcs->post_disable) {
638 bridge->funcs->post_disable(bridge);
639 }
640 }
641
642 /**
643 * drm_atomic_bridge_chain_post_disable - cleans up after disabling all bridges
644 * in the encoder chain
645 * @bridge: bridge control structure
646 * @old_state: old atomic state
647 *
648 * Calls &drm_bridge_funcs.atomic_post_disable (falls back on
649 * &drm_bridge_funcs.post_disable) op for all the bridges in the encoder chain,
650 * starting from the first bridge to the last. These are called after completing
651 * &drm_encoder_helper_funcs.atomic_disable
652 *
653 * If a bridge sets @pre_enable_prev_first, then the @post_disable for that
654 * bridge will be called before the previous one to reverse the @pre_enable
655 * calling direction.
656 *
657 * Note: the bridge passed should be the one closest to the encoder
658 */
drm_atomic_bridge_chain_post_disable(struct drm_bridge * bridge,struct drm_atomic_state * old_state)659 void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
660 struct drm_atomic_state *old_state)
661 {
662 struct drm_encoder *encoder;
663 struct drm_bridge *next, *limit;
664
665 if (!bridge)
666 return;
667
668 encoder = bridge->encoder;
669
670 list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
671 limit = NULL;
672
673 if (!list_is_last(&bridge->chain_node, &encoder->bridge_chain)) {
674 next = list_next_entry(bridge, chain_node);
675
676 if (next->pre_enable_prev_first) {
677 /* next bridge had requested that prev
678 * was enabled first, so disabled last
679 */
680 limit = next;
681
682 /* Find the next bridge that has NOT requested
683 * prev to be enabled first / disabled last
684 */
685 list_for_each_entry_from(next, &encoder->bridge_chain,
686 chain_node) {
687 if (next->pre_enable_prev_first) {
688 next = list_prev_entry(next, chain_node);
689 limit = next;
690 break;
691 }
692 }
693
694 /* Call these bridges in reverse order */
695 list_for_each_entry_from_reverse(next, &encoder->bridge_chain,
696 chain_node) {
697 if (next == bridge)
698 break;
699
700 drm_atomic_bridge_call_post_disable(next,
701 old_state);
702 }
703 }
704 }
705
706 drm_atomic_bridge_call_post_disable(bridge, old_state);
707
708 if (limit)
709 /* Jump all bridges that we have already post_disabled */
710 bridge = limit;
711 }
712 }
713 EXPORT_SYMBOL(drm_atomic_bridge_chain_post_disable);
714
drm_atomic_bridge_call_pre_enable(struct drm_bridge * bridge,struct drm_atomic_state * old_state)715 static void drm_atomic_bridge_call_pre_enable(struct drm_bridge *bridge,
716 struct drm_atomic_state *old_state)
717 {
718 if (old_state && bridge->funcs->atomic_pre_enable) {
719 struct drm_bridge_state *old_bridge_state;
720
721 old_bridge_state =
722 drm_atomic_get_old_bridge_state(old_state,
723 bridge);
724 if (WARN_ON(!old_bridge_state))
725 return;
726
727 bridge->funcs->atomic_pre_enable(bridge, old_bridge_state);
728 } else if (bridge->funcs->pre_enable) {
729 bridge->funcs->pre_enable(bridge);
730 }
731 }
732
733 /**
734 * drm_atomic_bridge_chain_pre_enable - prepares for enabling all bridges in
735 * the encoder chain
736 * @bridge: bridge control structure
737 * @old_state: old atomic state
738 *
739 * Calls &drm_bridge_funcs.atomic_pre_enable (falls back on
740 * &drm_bridge_funcs.pre_enable) op for all the bridges in the encoder chain,
741 * starting from the last bridge to the first. These are called before calling
742 * &drm_encoder_helper_funcs.atomic_enable
743 *
744 * If a bridge sets @pre_enable_prev_first, then the pre_enable for the
745 * prev bridge will be called before pre_enable of this bridge.
746 *
747 * Note: the bridge passed should be the one closest to the encoder
748 */
drm_atomic_bridge_chain_pre_enable(struct drm_bridge * bridge,struct drm_atomic_state * old_state)749 void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
750 struct drm_atomic_state *old_state)
751 {
752 struct drm_encoder *encoder;
753 struct drm_bridge *iter, *next, *limit;
754
755 if (!bridge)
756 return;
757
758 encoder = bridge->encoder;
759
760 list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
761 if (iter->pre_enable_prev_first) {
762 next = iter;
763 limit = bridge;
764 list_for_each_entry_from_reverse(next,
765 &encoder->bridge_chain,
766 chain_node) {
767 if (next == bridge)
768 break;
769
770 if (!next->pre_enable_prev_first) {
771 /* Found first bridge that does NOT
772 * request prev to be enabled first
773 */
774 limit = list_prev_entry(next, chain_node);
775 break;
776 }
777 }
778
779 list_for_each_entry_from(next, &encoder->bridge_chain, chain_node) {
780 /* Call requested prev bridge pre_enable
781 * in order.
782 */
783 if (next == iter)
784 /* At the first bridge to request prev
785 * bridges called first.
786 */
787 break;
788
789 drm_atomic_bridge_call_pre_enable(next, old_state);
790 }
791 }
792
793 drm_atomic_bridge_call_pre_enable(iter, old_state);
794
795 if (iter->pre_enable_prev_first)
796 /* Jump all bridges that we have already pre_enabled */
797 iter = limit;
798
799 if (iter == bridge)
800 break;
801 }
802 }
803 EXPORT_SYMBOL(drm_atomic_bridge_chain_pre_enable);
804
805 /**
806 * drm_atomic_bridge_chain_enable - enables all bridges in the encoder chain
807 * @bridge: bridge control structure
808 * @old_state: old atomic state
809 *
810 * Calls &drm_bridge_funcs.atomic_enable (falls back on
811 * &drm_bridge_funcs.enable) op for all the bridges in the encoder chain,
812 * starting from the first bridge to the last. These are called after completing
813 * &drm_encoder_helper_funcs.atomic_enable
814 *
815 * Note: the bridge passed should be the one closest to the encoder
816 */
drm_atomic_bridge_chain_enable(struct drm_bridge * bridge,struct drm_atomic_state * old_state)817 void drm_atomic_bridge_chain_enable(struct drm_bridge *bridge,
818 struct drm_atomic_state *old_state)
819 {
820 struct drm_encoder *encoder;
821
822 if (!bridge)
823 return;
824
825 encoder = bridge->encoder;
826 list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
827 if (bridge->funcs->atomic_enable) {
828 struct drm_bridge_state *old_bridge_state;
829
830 old_bridge_state =
831 drm_atomic_get_old_bridge_state(old_state,
832 bridge);
833 if (WARN_ON(!old_bridge_state))
834 return;
835
836 bridge->funcs->atomic_enable(bridge, old_bridge_state);
837 } else if (bridge->funcs->enable) {
838 bridge->funcs->enable(bridge);
839 }
840 }
841 }
842 EXPORT_SYMBOL(drm_atomic_bridge_chain_enable);
843
drm_atomic_bridge_check(struct drm_bridge * bridge,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state)844 static int drm_atomic_bridge_check(struct drm_bridge *bridge,
845 struct drm_crtc_state *crtc_state,
846 struct drm_connector_state *conn_state)
847 {
848 if (bridge->funcs->atomic_check) {
849 struct drm_bridge_state *bridge_state;
850 int ret;
851
852 bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
853 bridge);
854 if (WARN_ON(!bridge_state))
855 return -EINVAL;
856
857 ret = bridge->funcs->atomic_check(bridge, bridge_state,
858 crtc_state, conn_state);
859 if (ret)
860 return ret;
861 } else if (bridge->funcs->mode_fixup) {
862 if (!bridge->funcs->mode_fixup(bridge, &crtc_state->mode,
863 &crtc_state->adjusted_mode))
864 return -EINVAL;
865 }
866
867 return 0;
868 }
869
select_bus_fmt_recursive(struct drm_bridge * first_bridge,struct drm_bridge * cur_bridge,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state,u32 out_bus_fmt)870 static int select_bus_fmt_recursive(struct drm_bridge *first_bridge,
871 struct drm_bridge *cur_bridge,
872 struct drm_crtc_state *crtc_state,
873 struct drm_connector_state *conn_state,
874 u32 out_bus_fmt)
875 {
876 unsigned int i, num_in_bus_fmts = 0;
877 struct drm_bridge_state *cur_state;
878 struct drm_bridge *prev_bridge;
879 u32 *in_bus_fmts;
880 int ret;
881
882 prev_bridge = drm_bridge_get_prev_bridge(cur_bridge);
883 cur_state = drm_atomic_get_new_bridge_state(crtc_state->state,
884 cur_bridge);
885
886 /*
887 * If bus format negotiation is not supported by this bridge, let's
888 * pass MEDIA_BUS_FMT_FIXED to the previous bridge in the chain and
889 * hope that it can handle this situation gracefully (by providing
890 * appropriate default values).
891 */
892 if (!cur_bridge->funcs->atomic_get_input_bus_fmts) {
893 if (cur_bridge != first_bridge) {
894 ret = select_bus_fmt_recursive(first_bridge,
895 prev_bridge, crtc_state,
896 conn_state,
897 MEDIA_BUS_FMT_FIXED);
898 if (ret)
899 return ret;
900 }
901
902 /*
903 * Driver does not implement the atomic state hooks, but that's
904 * fine, as long as it does not access the bridge state.
905 */
906 if (cur_state) {
907 cur_state->input_bus_cfg.format = MEDIA_BUS_FMT_FIXED;
908 cur_state->output_bus_cfg.format = out_bus_fmt;
909 }
910
911 return 0;
912 }
913
914 /*
915 * If the driver implements ->atomic_get_input_bus_fmts() it
916 * should also implement the atomic state hooks.
917 */
918 if (WARN_ON(!cur_state))
919 return -EINVAL;
920
921 in_bus_fmts = cur_bridge->funcs->atomic_get_input_bus_fmts(cur_bridge,
922 cur_state,
923 crtc_state,
924 conn_state,
925 out_bus_fmt,
926 &num_in_bus_fmts);
927 if (!num_in_bus_fmts)
928 return -ENOTSUPP;
929 else if (!in_bus_fmts)
930 return -ENOMEM;
931
932 if (first_bridge == cur_bridge) {
933 cur_state->input_bus_cfg.format = in_bus_fmts[0];
934 cur_state->output_bus_cfg.format = out_bus_fmt;
935 kfree(in_bus_fmts);
936 return 0;
937 }
938
939 for (i = 0; i < num_in_bus_fmts; i++) {
940 ret = select_bus_fmt_recursive(first_bridge, prev_bridge,
941 crtc_state, conn_state,
942 in_bus_fmts[i]);
943 if (ret != -ENOTSUPP)
944 break;
945 }
946
947 if (!ret) {
948 cur_state->input_bus_cfg.format = in_bus_fmts[i];
949 cur_state->output_bus_cfg.format = out_bus_fmt;
950 }
951
952 kfree(in_bus_fmts);
953 return ret;
954 }
955
956 /*
957 * This function is called by &drm_atomic_bridge_chain_check() just before
958 * calling &drm_bridge_funcs.atomic_check() on all elements of the chain.
959 * It performs bus format negotiation between bridge elements. The negotiation
960 * happens in reverse order, starting from the last element in the chain up to
961 * @bridge.
962 *
963 * Negotiation starts by retrieving supported output bus formats on the last
964 * bridge element and testing them one by one. The test is recursive, meaning
965 * that for each tested output format, the whole chain will be walked backward,
966 * and each element will have to choose an input bus format that can be
967 * transcoded to the requested output format. When a bridge element does not
968 * support transcoding into a specific output format -ENOTSUPP is returned and
969 * the next bridge element will have to try a different format. If none of the
970 * combinations worked, -ENOTSUPP is returned and the atomic modeset will fail.
971 *
972 * This implementation is relying on
973 * &drm_bridge_funcs.atomic_get_output_bus_fmts() and
974 * &drm_bridge_funcs.atomic_get_input_bus_fmts() to gather supported
975 * input/output formats.
976 *
977 * When &drm_bridge_funcs.atomic_get_output_bus_fmts() is not implemented by
978 * the last element of the chain, &drm_atomic_bridge_chain_select_bus_fmts()
979 * tries a single format: &drm_connector.display_info.bus_formats[0] if
980 * available, MEDIA_BUS_FMT_FIXED otherwise.
981 *
982 * When &drm_bridge_funcs.atomic_get_input_bus_fmts() is not implemented,
983 * &drm_atomic_bridge_chain_select_bus_fmts() skips the negotiation on the
984 * bridge element that lacks this hook and asks the previous element in the
985 * chain to try MEDIA_BUS_FMT_FIXED. It's up to bridge drivers to decide what
986 * to do in that case (fail if they want to enforce bus format negotiation, or
987 * provide a reasonable default if they need to support pipelines where not
988 * all elements support bus format negotiation).
989 */
990 static int
drm_atomic_bridge_chain_select_bus_fmts(struct drm_bridge * bridge,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state)991 drm_atomic_bridge_chain_select_bus_fmts(struct drm_bridge *bridge,
992 struct drm_crtc_state *crtc_state,
993 struct drm_connector_state *conn_state)
994 {
995 struct drm_connector *conn = conn_state->connector;
996 struct drm_encoder *encoder = bridge->encoder;
997 struct drm_bridge_state *last_bridge_state;
998 unsigned int i, num_out_bus_fmts = 0;
999 struct drm_bridge *last_bridge;
1000 u32 *out_bus_fmts;
1001 int ret = 0;
1002
1003 last_bridge = list_last_entry(&encoder->bridge_chain,
1004 struct drm_bridge, chain_node);
1005 last_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
1006 last_bridge);
1007
1008 if (last_bridge->funcs->atomic_get_output_bus_fmts) {
1009 const struct drm_bridge_funcs *funcs = last_bridge->funcs;
1010
1011 /*
1012 * If the driver implements ->atomic_get_output_bus_fmts() it
1013 * should also implement the atomic state hooks.
1014 */
1015 if (WARN_ON(!last_bridge_state))
1016 return -EINVAL;
1017
1018 out_bus_fmts = funcs->atomic_get_output_bus_fmts(last_bridge,
1019 last_bridge_state,
1020 crtc_state,
1021 conn_state,
1022 &num_out_bus_fmts);
1023 if (!num_out_bus_fmts)
1024 return -ENOTSUPP;
1025 else if (!out_bus_fmts)
1026 return -ENOMEM;
1027 } else {
1028 num_out_bus_fmts = 1;
1029 out_bus_fmts = kmalloc(sizeof(*out_bus_fmts), GFP_KERNEL);
1030 if (!out_bus_fmts)
1031 return -ENOMEM;
1032
1033 if (conn->display_info.num_bus_formats &&
1034 conn->display_info.bus_formats)
1035 out_bus_fmts[0] = conn->display_info.bus_formats[0];
1036 else
1037 out_bus_fmts[0] = MEDIA_BUS_FMT_FIXED;
1038 }
1039
1040 for (i = 0; i < num_out_bus_fmts; i++) {
1041 ret = select_bus_fmt_recursive(bridge, last_bridge, crtc_state,
1042 conn_state, out_bus_fmts[i]);
1043 if (ret != -ENOTSUPP)
1044 break;
1045 }
1046
1047 kfree(out_bus_fmts);
1048
1049 return ret;
1050 }
1051
1052 static void
drm_atomic_bridge_propagate_bus_flags(struct drm_bridge * bridge,struct drm_connector * conn,struct drm_atomic_state * state)1053 drm_atomic_bridge_propagate_bus_flags(struct drm_bridge *bridge,
1054 struct drm_connector *conn,
1055 struct drm_atomic_state *state)
1056 {
1057 struct drm_bridge_state *bridge_state, *next_bridge_state;
1058 struct drm_bridge *next_bridge;
1059 u32 output_flags = 0;
1060
1061 bridge_state = drm_atomic_get_new_bridge_state(state, bridge);
1062
1063 /* No bridge state attached to this bridge => nothing to propagate. */
1064 if (!bridge_state)
1065 return;
1066
1067 next_bridge = drm_bridge_get_next_bridge(bridge);
1068
1069 /*
1070 * Let's try to apply the most common case here, that is, propagate
1071 * display_info flags for the last bridge, and propagate the input
1072 * flags of the next bridge element to the output end of the current
1073 * bridge when the bridge is not the last one.
1074 * There are exceptions to this rule, like when signal inversion is
1075 * happening at the board level, but that's something drivers can deal
1076 * with from their &drm_bridge_funcs.atomic_check() implementation by
1077 * simply overriding the flags value we've set here.
1078 */
1079 if (!next_bridge) {
1080 output_flags = conn->display_info.bus_flags;
1081 } else {
1082 next_bridge_state = drm_atomic_get_new_bridge_state(state,
1083 next_bridge);
1084 /*
1085 * No bridge state attached to the next bridge, just leave the
1086 * flags to 0.
1087 */
1088 if (next_bridge_state)
1089 output_flags = next_bridge_state->input_bus_cfg.flags;
1090 }
1091
1092 bridge_state->output_bus_cfg.flags = output_flags;
1093
1094 /*
1095 * Propagate the output flags to the input end of the bridge. Again, it's
1096 * not necessarily what all bridges want, but that's what most of them
1097 * do, and by doing that by default we avoid forcing drivers to
1098 * duplicate the "dummy propagation" logic.
1099 */
1100 bridge_state->input_bus_cfg.flags = output_flags;
1101 }
1102
1103 /**
1104 * drm_atomic_bridge_chain_check() - Do an atomic check on the bridge chain
1105 * @bridge: bridge control structure
1106 * @crtc_state: new CRTC state
1107 * @conn_state: new connector state
1108 *
1109 * First trigger a bus format negotiation before calling
1110 * &drm_bridge_funcs.atomic_check() (falls back on
1111 * &drm_bridge_funcs.mode_fixup()) op for all the bridges in the encoder chain,
1112 * starting from the last bridge to the first. These are called before calling
1113 * &drm_encoder_helper_funcs.atomic_check()
1114 *
1115 * RETURNS:
1116 * 0 on success, a negative error code on failure
1117 */
drm_atomic_bridge_chain_check(struct drm_bridge * bridge,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state)1118 int drm_atomic_bridge_chain_check(struct drm_bridge *bridge,
1119 struct drm_crtc_state *crtc_state,
1120 struct drm_connector_state *conn_state)
1121 {
1122 struct drm_connector *conn = conn_state->connector;
1123 struct drm_encoder *encoder;
1124 struct drm_bridge *iter;
1125 int ret;
1126
1127 if (!bridge)
1128 return 0;
1129
1130 ret = drm_atomic_bridge_chain_select_bus_fmts(bridge, crtc_state,
1131 conn_state);
1132 if (ret)
1133 return ret;
1134
1135 encoder = bridge->encoder;
1136 list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
1137 int ret;
1138
1139 /*
1140 * Bus flags are propagated by default. If a bridge needs to
1141 * tweak the input bus flags for any reason, it should happen
1142 * in its &drm_bridge_funcs.atomic_check() implementation such
1143 * that preceding bridges in the chain can propagate the new
1144 * bus flags.
1145 */
1146 drm_atomic_bridge_propagate_bus_flags(iter, conn,
1147 crtc_state->state);
1148
1149 ret = drm_atomic_bridge_check(iter, crtc_state, conn_state);
1150 if (ret)
1151 return ret;
1152
1153 if (iter == bridge)
1154 break;
1155 }
1156
1157 return 0;
1158 }
1159 EXPORT_SYMBOL(drm_atomic_bridge_chain_check);
1160
1161 /**
1162 * drm_bridge_detect - check if anything is attached to the bridge output
1163 * @bridge: bridge control structure
1164 *
1165 * If the bridge supports output detection, as reported by the
1166 * DRM_BRIDGE_OP_DETECT bridge ops flag, call &drm_bridge_funcs.detect for the
1167 * bridge and return the connection status. Otherwise return
1168 * connector_status_unknown.
1169 *
1170 * RETURNS:
1171 * The detection status on success, or connector_status_unknown if the bridge
1172 * doesn't support output detection.
1173 */
drm_bridge_detect(struct drm_bridge * bridge)1174 enum drm_connector_status drm_bridge_detect(struct drm_bridge *bridge)
1175 {
1176 if (!(bridge->ops & DRM_BRIDGE_OP_DETECT))
1177 return connector_status_unknown;
1178
1179 return bridge->funcs->detect(bridge);
1180 }
1181 EXPORT_SYMBOL_GPL(drm_bridge_detect);
1182
1183 /**
1184 * drm_bridge_get_modes - fill all modes currently valid for the sink into the
1185 * @connector
1186 * @bridge: bridge control structure
1187 * @connector: the connector to fill with modes
1188 *
1189 * If the bridge supports output modes retrieval, as reported by the
1190 * DRM_BRIDGE_OP_MODES bridge ops flag, call &drm_bridge_funcs.get_modes to
1191 * fill the connector with all valid modes and return the number of modes
1192 * added. Otherwise return 0.
1193 *
1194 * RETURNS:
1195 * The number of modes added to the connector.
1196 */
drm_bridge_get_modes(struct drm_bridge * bridge,struct drm_connector * connector)1197 int drm_bridge_get_modes(struct drm_bridge *bridge,
1198 struct drm_connector *connector)
1199 {
1200 if (!(bridge->ops & DRM_BRIDGE_OP_MODES))
1201 return 0;
1202
1203 return bridge->funcs->get_modes(bridge, connector);
1204 }
1205 EXPORT_SYMBOL_GPL(drm_bridge_get_modes);
1206
1207 /**
1208 * drm_bridge_get_edid - get the EDID data of the connected display
1209 * @bridge: bridge control structure
1210 * @connector: the connector to read EDID for
1211 *
1212 * If the bridge supports output EDID retrieval, as reported by the
1213 * DRM_BRIDGE_OP_EDID bridge ops flag, call &drm_bridge_funcs.get_edid to
1214 * get the EDID and return it. Otherwise return NULL.
1215 *
1216 * RETURNS:
1217 * The retrieved EDID on success, or NULL otherwise.
1218 */
drm_bridge_get_edid(struct drm_bridge * bridge,struct drm_connector * connector)1219 struct edid *drm_bridge_get_edid(struct drm_bridge *bridge,
1220 struct drm_connector *connector)
1221 {
1222 if (!(bridge->ops & DRM_BRIDGE_OP_EDID))
1223 return NULL;
1224
1225 return bridge->funcs->get_edid(bridge, connector);
1226 }
1227 EXPORT_SYMBOL_GPL(drm_bridge_get_edid);
1228
1229 /**
1230 * drm_bridge_hpd_enable - enable hot plug detection for the bridge
1231 * @bridge: bridge control structure
1232 * @cb: hot-plug detection callback
1233 * @data: data to be passed to the hot-plug detection callback
1234 *
1235 * Call &drm_bridge_funcs.hpd_enable if implemented and register the given @cb
1236 * and @data as hot plug notification callback. From now on the @cb will be
1237 * called with @data when an output status change is detected by the bridge,
1238 * until hot plug notification gets disabled with drm_bridge_hpd_disable().
1239 *
1240 * Hot plug detection is supported only if the DRM_BRIDGE_OP_HPD flag is set in
1241 * bridge->ops. This function shall not be called when the flag is not set.
1242 *
1243 * Only one hot plug detection callback can be registered at a time, it is an
1244 * error to call this function when hot plug detection is already enabled for
1245 * the bridge.
1246 */
drm_bridge_hpd_enable(struct drm_bridge * bridge,void (* cb)(void * data,enum drm_connector_status status),void * data)1247 void drm_bridge_hpd_enable(struct drm_bridge *bridge,
1248 void (*cb)(void *data,
1249 enum drm_connector_status status),
1250 void *data)
1251 {
1252 if (!(bridge->ops & DRM_BRIDGE_OP_HPD))
1253 return;
1254
1255 mutex_lock(&bridge->hpd_mutex);
1256
1257 if (WARN(bridge->hpd_cb, "Hot plug detection already enabled\n"))
1258 goto unlock;
1259
1260 bridge->hpd_cb = cb;
1261 bridge->hpd_data = data;
1262
1263 if (bridge->funcs->hpd_enable)
1264 bridge->funcs->hpd_enable(bridge);
1265
1266 unlock:
1267 mutex_unlock(&bridge->hpd_mutex);
1268 }
1269 EXPORT_SYMBOL_GPL(drm_bridge_hpd_enable);
1270
1271 /**
1272 * drm_bridge_hpd_disable - disable hot plug detection for the bridge
1273 * @bridge: bridge control structure
1274 *
1275 * Call &drm_bridge_funcs.hpd_disable if implemented and unregister the hot
1276 * plug detection callback previously registered with drm_bridge_hpd_enable().
1277 * Once this function returns the callback will not be called by the bridge
1278 * when an output status change occurs.
1279 *
1280 * Hot plug detection is supported only if the DRM_BRIDGE_OP_HPD flag is set in
1281 * bridge->ops. This function shall not be called when the flag is not set.
1282 */
drm_bridge_hpd_disable(struct drm_bridge * bridge)1283 void drm_bridge_hpd_disable(struct drm_bridge *bridge)
1284 {
1285 if (!(bridge->ops & DRM_BRIDGE_OP_HPD))
1286 return;
1287
1288 mutex_lock(&bridge->hpd_mutex);
1289 if (bridge->funcs->hpd_disable)
1290 bridge->funcs->hpd_disable(bridge);
1291
1292 bridge->hpd_cb = NULL;
1293 bridge->hpd_data = NULL;
1294 mutex_unlock(&bridge->hpd_mutex);
1295 }
1296 EXPORT_SYMBOL_GPL(drm_bridge_hpd_disable);
1297
1298 /**
1299 * drm_bridge_hpd_notify - notify hot plug detection events
1300 * @bridge: bridge control structure
1301 * @status: output connection status
1302 *
1303 * Bridge drivers shall call this function to report hot plug events when they
1304 * detect a change in the output status, when hot plug detection has been
1305 * enabled by drm_bridge_hpd_enable().
1306 *
1307 * This function shall be called in a context that can sleep.
1308 */
drm_bridge_hpd_notify(struct drm_bridge * bridge,enum drm_connector_status status)1309 void drm_bridge_hpd_notify(struct drm_bridge *bridge,
1310 enum drm_connector_status status)
1311 {
1312 mutex_lock(&bridge->hpd_mutex);
1313 if (bridge->hpd_cb)
1314 bridge->hpd_cb(bridge->hpd_data, status);
1315 mutex_unlock(&bridge->hpd_mutex);
1316 }
1317 EXPORT_SYMBOL_GPL(drm_bridge_hpd_notify);
1318
1319 #ifdef CONFIG_OF
1320 /**
1321 * of_drm_find_bridge - find the bridge corresponding to the device node in
1322 * the global bridge list
1323 *
1324 * @np: device node
1325 *
1326 * RETURNS:
1327 * drm_bridge control struct on success, NULL on failure
1328 */
of_drm_find_bridge(struct device_node * np)1329 struct drm_bridge *of_drm_find_bridge(struct device_node *np)
1330 {
1331 struct drm_bridge *bridge;
1332
1333 mutex_lock(&bridge_lock);
1334
1335 list_for_each_entry(bridge, &bridge_list, list) {
1336 if (bridge->of_node == np) {
1337 mutex_unlock(&bridge_lock);
1338 return bridge;
1339 }
1340 }
1341
1342 mutex_unlock(&bridge_lock);
1343 return NULL;
1344 }
1345 EXPORT_SYMBOL(of_drm_find_bridge);
1346 #endif
1347
1348 MODULE_AUTHOR("Ajay Kumar <ajaykumar.rs@samsung.com>");
1349 MODULE_DESCRIPTION("DRM bridge infrastructure");
1350 MODULE_LICENSE("GPL and additional rights");
1351