1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * pm_runtime.h - Device run-time power management helper functions.
4  *
5  * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>
6  */
7 
8 #ifndef _LINUX_PM_RUNTIME_H
9 #define _LINUX_PM_RUNTIME_H
10 
11 #include <linux/device.h>
12 #include <linux/notifier.h>
13 #include <linux/pm.h>
14 
15 #include <linux/jiffies.h>
16 
17 /* Runtime PM flag argument bits */
18 #define RPM_ASYNC		0x01	/* Request is asynchronous */
19 #define RPM_NOWAIT		0x02	/* Don't wait for concurrent
20 					    state change */
21 #define RPM_GET_PUT		0x04	/* Increment/decrement the
22 					    usage_count */
23 #define RPM_AUTO		0x08	/* Use autosuspend_delay */
24 
25 /*
26  * Use this for defining a set of PM operations to be used in all situations
27  * (system suspend, hibernation or runtime PM).
28  *
29  * Note that the behaviour differs from the deprecated UNIVERSAL_DEV_PM_OPS()
30  * macro, which uses the provided callbacks for both runtime PM and system
31  * sleep, while DEFINE_RUNTIME_DEV_PM_OPS() uses pm_runtime_force_suspend()
32  * and pm_runtime_force_resume() for its system sleep callbacks.
33  *
34  * If the underlying dev_pm_ops struct symbol has to be exported, use
35  * EXPORT_RUNTIME_DEV_PM_OPS() or EXPORT_GPL_RUNTIME_DEV_PM_OPS() instead.
36  */
37 #define DEFINE_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
38 	_DEFINE_DEV_PM_OPS(name, pm_runtime_force_suspend, \
39 			   pm_runtime_force_resume, suspend_fn, \
40 			   resume_fn, idle_fn)
41 
42 #define EXPORT_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
43 	EXPORT_DEV_PM_OPS(name) = { \
44 		RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
45 	}
46 #define EXPORT_GPL_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
47 	EXPORT_GPL_DEV_PM_OPS(name) = { \
48 		RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
49 	}
50 #define EXPORT_NS_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn, ns) \
51 	EXPORT_NS_DEV_PM_OPS(name, ns) = { \
52 		RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
53 	}
54 #define EXPORT_NS_GPL_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn, ns) \
55 	EXPORT_NS_GPL_DEV_PM_OPS(name, ns) = { \
56 		RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
57 	}
58 
59 #ifdef CONFIG_PM
60 extern struct workqueue_struct *pm_wq;
61 
queue_pm_work(struct work_struct * work)62 static inline bool queue_pm_work(struct work_struct *work)
63 {
64 	return queue_work(pm_wq, work);
65 }
66 
67 extern int pm_generic_runtime_suspend(struct device *dev);
68 extern int pm_generic_runtime_resume(struct device *dev);
69 extern int pm_runtime_force_suspend(struct device *dev);
70 extern int pm_runtime_force_resume(struct device *dev);
71 
72 extern int __pm_runtime_idle(struct device *dev, int rpmflags);
73 extern int __pm_runtime_suspend(struct device *dev, int rpmflags);
74 extern int __pm_runtime_resume(struct device *dev, int rpmflags);
75 extern int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count);
76 extern int pm_schedule_suspend(struct device *dev, unsigned int delay);
77 extern int __pm_runtime_set_status(struct device *dev, unsigned int status);
78 extern int pm_runtime_barrier(struct device *dev);
79 extern void pm_runtime_enable(struct device *dev);
80 extern void __pm_runtime_disable(struct device *dev, bool check_resume);
81 extern void pm_runtime_allow(struct device *dev);
82 extern void pm_runtime_forbid(struct device *dev);
83 extern void pm_runtime_no_callbacks(struct device *dev);
84 extern void pm_runtime_irq_safe(struct device *dev);
85 extern void __pm_runtime_use_autosuspend(struct device *dev, bool use);
86 extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay);
87 extern u64 pm_runtime_autosuspend_expiration(struct device *dev);
88 extern void pm_runtime_update_max_time_suspended(struct device *dev,
89 						 s64 delta_ns);
90 extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable);
91 extern void pm_runtime_get_suppliers(struct device *dev);
92 extern void pm_runtime_put_suppliers(struct device *dev);
93 extern void pm_runtime_new_link(struct device *dev);
94 extern void pm_runtime_drop_link(struct device_link *link);
95 extern void pm_runtime_release_supplier(struct device_link *link);
96 
97 extern int devm_pm_runtime_enable(struct device *dev);
98 
99 /**
100  * pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter.
101  * @dev: Target device.
102  *
103  * Increment the runtime PM usage counter of @dev if its runtime PM status is
104  * %RPM_ACTIVE and its runtime PM usage counter is greater than 0.
105  */
pm_runtime_get_if_in_use(struct device * dev)106 static inline int pm_runtime_get_if_in_use(struct device *dev)
107 {
108 	return pm_runtime_get_if_active(dev, false);
109 }
110 
111 /**
112  * pm_suspend_ignore_children - Set runtime PM behavior regarding children.
113  * @dev: Target device.
114  * @enable: Whether or not to ignore possible dependencies on children.
115  *
116  * The dependencies of @dev on its children will not be taken into account by
117  * the runtime PM framework going forward if @enable is %true, or they will
118  * be taken into account otherwise.
119  */
pm_suspend_ignore_children(struct device * dev,bool enable)120 static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
121 {
122 	dev->power.ignore_children = enable;
123 }
124 
125 /**
126  * pm_runtime_get_noresume - Bump up runtime PM usage counter of a device.
127  * @dev: Target device.
128  */
pm_runtime_get_noresume(struct device * dev)129 static inline void pm_runtime_get_noresume(struct device *dev)
130 {
131 	atomic_inc(&dev->power.usage_count);
132 }
133 
134 /**
135  * pm_runtime_put_noidle - Drop runtime PM usage counter of a device.
136  * @dev: Target device.
137  *
138  * Decrement the runtime PM usage counter of @dev unless it is 0 already.
139  */
pm_runtime_put_noidle(struct device * dev)140 static inline void pm_runtime_put_noidle(struct device *dev)
141 {
142 	atomic_add_unless(&dev->power.usage_count, -1, 0);
143 }
144 
145 /**
146  * pm_runtime_suspended - Check whether or not a device is runtime-suspended.
147  * @dev: Target device.
148  *
149  * Return %true if runtime PM is enabled for @dev and its runtime PM status is
150  * %RPM_SUSPENDED, or %false otherwise.
151  *
152  * Note that the return value of this function can only be trusted if it is
153  * called under the runtime PM lock of @dev or under conditions in which
154  * runtime PM cannot be either disabled or enabled for @dev and its runtime PM
155  * status cannot change.
156  */
pm_runtime_suspended(struct device * dev)157 static inline bool pm_runtime_suspended(struct device *dev)
158 {
159 	return dev->power.runtime_status == RPM_SUSPENDED
160 		&& !dev->power.disable_depth;
161 }
162 
163 /**
164  * pm_runtime_active - Check whether or not a device is runtime-active.
165  * @dev: Target device.
166  *
167  * Return %true if runtime PM is disabled for @dev or its runtime PM status is
168  * %RPM_ACTIVE, or %false otherwise.
169  *
170  * Note that the return value of this function can only be trusted if it is
171  * called under the runtime PM lock of @dev or under conditions in which
172  * runtime PM cannot be either disabled or enabled for @dev and its runtime PM
173  * status cannot change.
174  */
pm_runtime_active(struct device * dev)175 static inline bool pm_runtime_active(struct device *dev)
176 {
177 	return dev->power.runtime_status == RPM_ACTIVE
178 		|| dev->power.disable_depth;
179 }
180 
181 /**
182  * pm_runtime_status_suspended - Check if runtime PM status is "suspended".
183  * @dev: Target device.
184  *
185  * Return %true if the runtime PM status of @dev is %RPM_SUSPENDED, or %false
186  * otherwise, regardless of whether or not runtime PM has been enabled for @dev.
187  *
188  * Note that the return value of this function can only be trusted if it is
189  * called under the runtime PM lock of @dev or under conditions in which the
190  * runtime PM status of @dev cannot change.
191  */
pm_runtime_status_suspended(struct device * dev)192 static inline bool pm_runtime_status_suspended(struct device *dev)
193 {
194 	return dev->power.runtime_status == RPM_SUSPENDED;
195 }
196 
197 /**
198  * pm_runtime_enabled - Check if runtime PM is enabled.
199  * @dev: Target device.
200  *
201  * Return %true if runtime PM is enabled for @dev or %false otherwise.
202  *
203  * Note that the return value of this function can only be trusted if it is
204  * called under the runtime PM lock of @dev or under conditions in which
205  * runtime PM cannot be either disabled or enabled for @dev.
206  */
pm_runtime_enabled(struct device * dev)207 static inline bool pm_runtime_enabled(struct device *dev)
208 {
209 	return !dev->power.disable_depth;
210 }
211 
212 /**
213  * pm_runtime_has_no_callbacks - Check if runtime PM callbacks may be present.
214  * @dev: Target device.
215  *
216  * Return %true if @dev is a special device without runtime PM callbacks or
217  * %false otherwise.
218  */
pm_runtime_has_no_callbacks(struct device * dev)219 static inline bool pm_runtime_has_no_callbacks(struct device *dev)
220 {
221 	return dev->power.no_callbacks;
222 }
223 
224 /**
225  * pm_runtime_mark_last_busy - Update the last access time of a device.
226  * @dev: Target device.
227  *
228  * Update the last access time of @dev used by the runtime PM autosuspend
229  * mechanism to the current time as returned by ktime_get_mono_fast_ns().
230  */
pm_runtime_mark_last_busy(struct device * dev)231 static inline void pm_runtime_mark_last_busy(struct device *dev)
232 {
233 	WRITE_ONCE(dev->power.last_busy, ktime_get_mono_fast_ns());
234 }
235 
236 /**
237  * pm_runtime_is_irq_safe - Check if runtime PM can work in interrupt context.
238  * @dev: Target device.
239  *
240  * Return %true if @dev has been marked as an "IRQ-safe" device (with respect
241  * to runtime PM), in which case its runtime PM callabcks can be expected to
242  * work correctly when invoked from interrupt handlers.
243  */
pm_runtime_is_irq_safe(struct device * dev)244 static inline bool pm_runtime_is_irq_safe(struct device *dev)
245 {
246 	return dev->power.irq_safe;
247 }
248 
249 extern u64 pm_runtime_suspended_time(struct device *dev);
250 
251 #else /* !CONFIG_PM */
252 
queue_pm_work(struct work_struct * work)253 static inline bool queue_pm_work(struct work_struct *work) { return false; }
254 
pm_generic_runtime_suspend(struct device * dev)255 static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; }
pm_generic_runtime_resume(struct device * dev)256 static inline int pm_generic_runtime_resume(struct device *dev) { return 0; }
pm_runtime_force_suspend(struct device * dev)257 static inline int pm_runtime_force_suspend(struct device *dev) { return 0; }
pm_runtime_force_resume(struct device * dev)258 static inline int pm_runtime_force_resume(struct device *dev) { return 0; }
259 
__pm_runtime_idle(struct device * dev,int rpmflags)260 static inline int __pm_runtime_idle(struct device *dev, int rpmflags)
261 {
262 	return -ENOSYS;
263 }
__pm_runtime_suspend(struct device * dev,int rpmflags)264 static inline int __pm_runtime_suspend(struct device *dev, int rpmflags)
265 {
266 	return -ENOSYS;
267 }
__pm_runtime_resume(struct device * dev,int rpmflags)268 static inline int __pm_runtime_resume(struct device *dev, int rpmflags)
269 {
270 	return 1;
271 }
pm_schedule_suspend(struct device * dev,unsigned int delay)272 static inline int pm_schedule_suspend(struct device *dev, unsigned int delay)
273 {
274 	return -ENOSYS;
275 }
pm_runtime_get_if_in_use(struct device * dev)276 static inline int pm_runtime_get_if_in_use(struct device *dev)
277 {
278 	return -EINVAL;
279 }
pm_runtime_get_if_active(struct device * dev,bool ign_usage_count)280 static inline int pm_runtime_get_if_active(struct device *dev,
281 					   bool ign_usage_count)
282 {
283 	return -EINVAL;
284 }
__pm_runtime_set_status(struct device * dev,unsigned int status)285 static inline int __pm_runtime_set_status(struct device *dev,
286 					    unsigned int status) { return 0; }
pm_runtime_barrier(struct device * dev)287 static inline int pm_runtime_barrier(struct device *dev) { return 0; }
pm_runtime_enable(struct device * dev)288 static inline void pm_runtime_enable(struct device *dev) {}
__pm_runtime_disable(struct device * dev,bool c)289 static inline void __pm_runtime_disable(struct device *dev, bool c) {}
pm_runtime_allow(struct device * dev)290 static inline void pm_runtime_allow(struct device *dev) {}
pm_runtime_forbid(struct device * dev)291 static inline void pm_runtime_forbid(struct device *dev) {}
292 
devm_pm_runtime_enable(struct device * dev)293 static inline int devm_pm_runtime_enable(struct device *dev) { return 0; }
294 
pm_suspend_ignore_children(struct device * dev,bool enable)295 static inline void pm_suspend_ignore_children(struct device *dev, bool enable) {}
pm_runtime_get_noresume(struct device * dev)296 static inline void pm_runtime_get_noresume(struct device *dev) {}
pm_runtime_put_noidle(struct device * dev)297 static inline void pm_runtime_put_noidle(struct device *dev) {}
pm_runtime_suspended(struct device * dev)298 static inline bool pm_runtime_suspended(struct device *dev) { return false; }
pm_runtime_active(struct device * dev)299 static inline bool pm_runtime_active(struct device *dev) { return true; }
pm_runtime_status_suspended(struct device * dev)300 static inline bool pm_runtime_status_suspended(struct device *dev) { return false; }
pm_runtime_enabled(struct device * dev)301 static inline bool pm_runtime_enabled(struct device *dev) { return false; }
302 
pm_runtime_no_callbacks(struct device * dev)303 static inline void pm_runtime_no_callbacks(struct device *dev) {}
pm_runtime_irq_safe(struct device * dev)304 static inline void pm_runtime_irq_safe(struct device *dev) {}
pm_runtime_is_irq_safe(struct device * dev)305 static inline bool pm_runtime_is_irq_safe(struct device *dev) { return false; }
306 
pm_runtime_has_no_callbacks(struct device * dev)307 static inline bool pm_runtime_has_no_callbacks(struct device *dev) { return false; }
pm_runtime_mark_last_busy(struct device * dev)308 static inline void pm_runtime_mark_last_busy(struct device *dev) {}
__pm_runtime_use_autosuspend(struct device * dev,bool use)309 static inline void __pm_runtime_use_autosuspend(struct device *dev,
310 						bool use) {}
pm_runtime_set_autosuspend_delay(struct device * dev,int delay)311 static inline void pm_runtime_set_autosuspend_delay(struct device *dev,
312 						int delay) {}
pm_runtime_autosuspend_expiration(struct device * dev)313 static inline u64 pm_runtime_autosuspend_expiration(
314 				struct device *dev) { return 0; }
pm_runtime_set_memalloc_noio(struct device * dev,bool enable)315 static inline void pm_runtime_set_memalloc_noio(struct device *dev,
316 						bool enable){}
pm_runtime_get_suppliers(struct device * dev)317 static inline void pm_runtime_get_suppliers(struct device *dev) {}
pm_runtime_put_suppliers(struct device * dev)318 static inline void pm_runtime_put_suppliers(struct device *dev) {}
pm_runtime_new_link(struct device * dev)319 static inline void pm_runtime_new_link(struct device *dev) {}
pm_runtime_drop_link(struct device_link * link)320 static inline void pm_runtime_drop_link(struct device_link *link) {}
pm_runtime_release_supplier(struct device_link * link)321 static inline void pm_runtime_release_supplier(struct device_link *link) {}
322 
323 #endif /* !CONFIG_PM */
324 
325 /**
326  * pm_runtime_idle - Conditionally set up autosuspend of a device or suspend it.
327  * @dev: Target device.
328  *
329  * Invoke the "idle check" callback of @dev and, depending on its return value,
330  * set up autosuspend of @dev or suspend it (depending on whether or not
331  * autosuspend has been enabled for it).
332  */
pm_runtime_idle(struct device * dev)333 static inline int pm_runtime_idle(struct device *dev)
334 {
335 	return __pm_runtime_idle(dev, 0);
336 }
337 
338 /**
339  * pm_runtime_suspend - Suspend a device synchronously.
340  * @dev: Target device.
341  */
pm_runtime_suspend(struct device * dev)342 static inline int pm_runtime_suspend(struct device *dev)
343 {
344 	return __pm_runtime_suspend(dev, 0);
345 }
346 
347 /**
348  * pm_runtime_autosuspend - Set up autosuspend of a device or suspend it.
349  * @dev: Target device.
350  *
351  * Set up autosuspend of @dev or suspend it (depending on whether or not
352  * autosuspend is enabled for it) without engaging its "idle check" callback.
353  */
pm_runtime_autosuspend(struct device * dev)354 static inline int pm_runtime_autosuspend(struct device *dev)
355 {
356 	return __pm_runtime_suspend(dev, RPM_AUTO);
357 }
358 
359 /**
360  * pm_runtime_resume - Resume a device synchronously.
361  * @dev: Target device.
362  */
pm_runtime_resume(struct device * dev)363 static inline int pm_runtime_resume(struct device *dev)
364 {
365 	return __pm_runtime_resume(dev, 0);
366 }
367 
368 /**
369  * pm_request_idle - Queue up "idle check" execution for a device.
370  * @dev: Target device.
371  *
372  * Queue up a work item to run an equivalent of pm_runtime_idle() for @dev
373  * asynchronously.
374  */
pm_request_idle(struct device * dev)375 static inline int pm_request_idle(struct device *dev)
376 {
377 	return __pm_runtime_idle(dev, RPM_ASYNC);
378 }
379 
380 /**
381  * pm_request_resume - Queue up runtime-resume of a device.
382  * @dev: Target device.
383  */
pm_request_resume(struct device * dev)384 static inline int pm_request_resume(struct device *dev)
385 {
386 	return __pm_runtime_resume(dev, RPM_ASYNC);
387 }
388 
389 /**
390  * pm_request_autosuspend - Queue up autosuspend of a device.
391  * @dev: Target device.
392  *
393  * Queue up a work item to run an equivalent pm_runtime_autosuspend() for @dev
394  * asynchronously.
395  */
pm_request_autosuspend(struct device * dev)396 static inline int pm_request_autosuspend(struct device *dev)
397 {
398 	return __pm_runtime_suspend(dev, RPM_ASYNC | RPM_AUTO);
399 }
400 
401 /**
402  * pm_runtime_get - Bump up usage counter and queue up resume of a device.
403  * @dev: Target device.
404  *
405  * Bump up the runtime PM usage counter of @dev and queue up a work item to
406  * carry out runtime-resume of it.
407  */
pm_runtime_get(struct device * dev)408 static inline int pm_runtime_get(struct device *dev)
409 {
410 	return __pm_runtime_resume(dev, RPM_GET_PUT | RPM_ASYNC);
411 }
412 
413 /**
414  * pm_runtime_get_sync - Bump up usage counter of a device and resume it.
415  * @dev: Target device.
416  *
417  * Bump up the runtime PM usage counter of @dev and carry out runtime-resume of
418  * it synchronously.
419  *
420  * The possible return values of this function are the same as for
421  * pm_runtime_resume() and the runtime PM usage counter of @dev remains
422  * incremented in all cases, even if it returns an error code.
423  * Consider using pm_runtime_resume_and_get() instead of it, especially
424  * if its return value is checked by the caller, as this is likely to result
425  * in cleaner code.
426  */
pm_runtime_get_sync(struct device * dev)427 static inline int pm_runtime_get_sync(struct device *dev)
428 {
429 	return __pm_runtime_resume(dev, RPM_GET_PUT);
430 }
431 
432 /**
433  * pm_runtime_resume_and_get - Bump up usage counter of a device and resume it.
434  * @dev: Target device.
435  *
436  * Resume @dev synchronously and if that is successful, increment its runtime
437  * PM usage counter. Return 0 if the runtime PM usage counter of @dev has been
438  * incremented or a negative error code otherwise.
439  */
pm_runtime_resume_and_get(struct device * dev)440 static inline int pm_runtime_resume_and_get(struct device *dev)
441 {
442 	int ret;
443 
444 	ret = __pm_runtime_resume(dev, RPM_GET_PUT);
445 	if (ret < 0) {
446 		pm_runtime_put_noidle(dev);
447 		return ret;
448 	}
449 
450 	return 0;
451 }
452 
453 /**
454  * pm_runtime_put - Drop device usage counter and queue up "idle check" if 0.
455  * @dev: Target device.
456  *
457  * Decrement the runtime PM usage counter of @dev and if it turns out to be
458  * equal to 0, queue up a work item for @dev like in pm_request_idle().
459  */
pm_runtime_put(struct device * dev)460 static inline int pm_runtime_put(struct device *dev)
461 {
462 	return __pm_runtime_idle(dev, RPM_GET_PUT | RPM_ASYNC);
463 }
464 
465 /**
466  * pm_runtime_put_autosuspend - Drop device usage counter and queue autosuspend if 0.
467  * @dev: Target device.
468  *
469  * Decrement the runtime PM usage counter of @dev and if it turns out to be
470  * equal to 0, queue up a work item for @dev like in pm_request_autosuspend().
471  */
pm_runtime_put_autosuspend(struct device * dev)472 static inline int pm_runtime_put_autosuspend(struct device *dev)
473 {
474 	return __pm_runtime_suspend(dev,
475 	    RPM_GET_PUT | RPM_ASYNC | RPM_AUTO);
476 }
477 
478 /**
479  * pm_runtime_put_sync - Drop device usage counter and run "idle check" if 0.
480  * @dev: Target device.
481  *
482  * Decrement the runtime PM usage counter of @dev and if it turns out to be
483  * equal to 0, invoke the "idle check" callback of @dev and, depending on its
484  * return value, set up autosuspend of @dev or suspend it (depending on whether
485  * or not autosuspend has been enabled for it).
486  *
487  * The possible return values of this function are the same as for
488  * pm_runtime_idle() and the runtime PM usage counter of @dev remains
489  * decremented in all cases, even if it returns an error code.
490  */
pm_runtime_put_sync(struct device * dev)491 static inline int pm_runtime_put_sync(struct device *dev)
492 {
493 	return __pm_runtime_idle(dev, RPM_GET_PUT);
494 }
495 
496 /**
497  * pm_runtime_put_sync_suspend - Drop device usage counter and suspend if 0.
498  * @dev: Target device.
499  *
500  * Decrement the runtime PM usage counter of @dev and if it turns out to be
501  * equal to 0, carry out runtime-suspend of @dev synchronously.
502  *
503  * The possible return values of this function are the same as for
504  * pm_runtime_suspend() and the runtime PM usage counter of @dev remains
505  * decremented in all cases, even if it returns an error code.
506  */
pm_runtime_put_sync_suspend(struct device * dev)507 static inline int pm_runtime_put_sync_suspend(struct device *dev)
508 {
509 	return __pm_runtime_suspend(dev, RPM_GET_PUT);
510 }
511 
512 /**
513  * pm_runtime_put_sync_autosuspend - Drop device usage counter and autosuspend if 0.
514  * @dev: Target device.
515  *
516  * Decrement the runtime PM usage counter of @dev and if it turns out to be
517  * equal to 0, set up autosuspend of @dev or suspend it synchronously (depending
518  * on whether or not autosuspend has been enabled for it).
519  *
520  * The possible return values of this function are the same as for
521  * pm_runtime_autosuspend() and the runtime PM usage counter of @dev remains
522  * decremented in all cases, even if it returns an error code.
523  */
pm_runtime_put_sync_autosuspend(struct device * dev)524 static inline int pm_runtime_put_sync_autosuspend(struct device *dev)
525 {
526 	return __pm_runtime_suspend(dev, RPM_GET_PUT | RPM_AUTO);
527 }
528 
529 /**
530  * pm_runtime_set_active - Set runtime PM status to "active".
531  * @dev: Target device.
532  *
533  * Set the runtime PM status of @dev to %RPM_ACTIVE and ensure that dependencies
534  * of it will be taken into account.
535  *
536  * It is not valid to call this function for devices with runtime PM enabled.
537  */
pm_runtime_set_active(struct device * dev)538 static inline int pm_runtime_set_active(struct device *dev)
539 {
540 	return __pm_runtime_set_status(dev, RPM_ACTIVE);
541 }
542 
543 /**
544  * pm_runtime_set_suspended - Set runtime PM status to "suspended".
545  * @dev: Target device.
546  *
547  * Set the runtime PM status of @dev to %RPM_SUSPENDED and ensure that
548  * dependencies of it will be taken into account.
549  *
550  * It is not valid to call this function for devices with runtime PM enabled.
551  */
pm_runtime_set_suspended(struct device * dev)552 static inline int pm_runtime_set_suspended(struct device *dev)
553 {
554 	return __pm_runtime_set_status(dev, RPM_SUSPENDED);
555 }
556 
557 /**
558  * pm_runtime_disable - Disable runtime PM for a device.
559  * @dev: Target device.
560  *
561  * Prevent the runtime PM framework from working with @dev (by incrementing its
562  * "blocking" counter).
563  *
564  * For each invocation of this function for @dev there must be a matching
565  * pm_runtime_enable() call in order for runtime PM to be enabled for it.
566  */
pm_runtime_disable(struct device * dev)567 static inline void pm_runtime_disable(struct device *dev)
568 {
569 	__pm_runtime_disable(dev, true);
570 }
571 
572 /**
573  * pm_runtime_use_autosuspend - Allow autosuspend to be used for a device.
574  * @dev: Target device.
575  *
576  * Allow the runtime PM autosuspend mechanism to be used for @dev whenever
577  * requested (or "autosuspend" will be handled as direct runtime-suspend for
578  * it).
579  *
580  * NOTE: It's important to undo this with pm_runtime_dont_use_autosuspend()
581  * at driver exit time unless your driver initially enabled pm_runtime
582  * with devm_pm_runtime_enable() (which handles it for you).
583  */
pm_runtime_use_autosuspend(struct device * dev)584 static inline void pm_runtime_use_autosuspend(struct device *dev)
585 {
586 	__pm_runtime_use_autosuspend(dev, true);
587 }
588 
589 /**
590  * pm_runtime_dont_use_autosuspend - Prevent autosuspend from being used.
591  * @dev: Target device.
592  *
593  * Prevent the runtime PM autosuspend mechanism from being used for @dev which
594  * means that "autosuspend" will be handled as direct runtime-suspend for it
595  * going forward.
596  */
pm_runtime_dont_use_autosuspend(struct device * dev)597 static inline void pm_runtime_dont_use_autosuspend(struct device *dev)
598 {
599 	__pm_runtime_use_autosuspend(dev, false);
600 }
601 
602 #endif
603