Lines Matching refs:dev

27 static pm_callback_t __rpm_get_driver_callback(struct device *dev,  in __rpm_get_driver_callback()  argument
30 if (dev->driver && dev->driver->pm) in __rpm_get_driver_callback()
31 return get_callback_ptr(dev->driver->pm, cb_offset); in __rpm_get_driver_callback()
36 static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset) in __rpm_get_callback() argument
41 if (dev->pm_domain) in __rpm_get_callback()
42 ops = &dev->pm_domain->ops; in __rpm_get_callback()
43 else if (dev->type && dev->type->pm) in __rpm_get_callback()
44 ops = dev->type->pm; in __rpm_get_callback()
45 else if (dev->class && dev->class->pm) in __rpm_get_callback()
46 ops = dev->class->pm; in __rpm_get_callback()
47 else if (dev->bus && dev->bus->pm) in __rpm_get_callback()
48 ops = dev->bus->pm; in __rpm_get_callback()
56 cb = __rpm_get_driver_callback(dev, cb_offset); in __rpm_get_callback()
61 #define RPM_GET_CALLBACK(dev, callback) \ argument
62 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
64 static int rpm_resume(struct device *dev, int rpmflags);
65 static int rpm_suspend(struct device *dev, int rpmflags);
78 static void update_pm_runtime_accounting(struct device *dev) in update_pm_runtime_accounting() argument
82 if (dev->power.disable_depth > 0) in update_pm_runtime_accounting()
85 last = dev->power.accounting_timestamp; in update_pm_runtime_accounting()
88 dev->power.accounting_timestamp = now; in update_pm_runtime_accounting()
100 if (dev->power.runtime_status == RPM_SUSPENDED) in update_pm_runtime_accounting()
101 dev->power.suspended_time += delta; in update_pm_runtime_accounting()
103 dev->power.active_time += delta; in update_pm_runtime_accounting()
106 static void __update_runtime_status(struct device *dev, enum rpm_status status) in __update_runtime_status() argument
108 update_pm_runtime_accounting(dev); in __update_runtime_status()
109 trace_rpm_status(dev, status); in __update_runtime_status()
110 dev->power.runtime_status = status; in __update_runtime_status()
113 static u64 rpm_get_accounted_time(struct device *dev, bool suspended) in rpm_get_accounted_time() argument
118 spin_lock_irqsave(&dev->power.lock, flags); in rpm_get_accounted_time()
120 update_pm_runtime_accounting(dev); in rpm_get_accounted_time()
121 time = suspended ? dev->power.suspended_time : dev->power.active_time; in rpm_get_accounted_time()
123 spin_unlock_irqrestore(&dev->power.lock, flags); in rpm_get_accounted_time()
128 u64 pm_runtime_active_time(struct device *dev) in pm_runtime_active_time() argument
130 return rpm_get_accounted_time(dev, false); in pm_runtime_active_time()
133 u64 pm_runtime_suspended_time(struct device *dev) in pm_runtime_suspended_time() argument
135 return rpm_get_accounted_time(dev, true); in pm_runtime_suspended_time()
143 static void pm_runtime_deactivate_timer(struct device *dev) in pm_runtime_deactivate_timer() argument
145 if (dev->power.timer_expires > 0) { in pm_runtime_deactivate_timer()
146 hrtimer_try_to_cancel(&dev->power.suspend_timer); in pm_runtime_deactivate_timer()
147 dev->power.timer_expires = 0; in pm_runtime_deactivate_timer()
155 static void pm_runtime_cancel_pending(struct device *dev) in pm_runtime_cancel_pending() argument
157 pm_runtime_deactivate_timer(dev); in pm_runtime_cancel_pending()
162 dev->power.request = RPM_REQ_NONE; in pm_runtime_cancel_pending()
177 u64 pm_runtime_autosuspend_expiration(struct device *dev) in pm_runtime_autosuspend_expiration() argument
182 if (!dev->power.use_autosuspend) in pm_runtime_autosuspend_expiration()
185 autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay); in pm_runtime_autosuspend_expiration()
189 expires = READ_ONCE(dev->power.last_busy); in pm_runtime_autosuspend_expiration()
198 static int dev_memalloc_noio(struct device *dev, void *data) in dev_memalloc_noio() argument
200 return dev->power.memalloc_noio; in dev_memalloc_noio()
231 void pm_runtime_set_memalloc_noio(struct device *dev, bool enable) in pm_runtime_set_memalloc_noio() argument
240 spin_lock_irq(&dev->power.lock); in pm_runtime_set_memalloc_noio()
241 enabled = dev->power.memalloc_noio; in pm_runtime_set_memalloc_noio()
242 dev->power.memalloc_noio = enable; in pm_runtime_set_memalloc_noio()
243 spin_unlock_irq(&dev->power.lock); in pm_runtime_set_memalloc_noio()
252 dev = dev->parent; in pm_runtime_set_memalloc_noio()
259 if (!dev || (!enable && in pm_runtime_set_memalloc_noio()
260 device_for_each_child(dev, NULL, dev_memalloc_noio))) in pm_runtime_set_memalloc_noio()
271 static int rpm_check_suspend_allowed(struct device *dev) in rpm_check_suspend_allowed() argument
275 if (dev->power.runtime_error) in rpm_check_suspend_allowed()
277 else if (dev->power.disable_depth > 0) in rpm_check_suspend_allowed()
279 else if (atomic_read(&dev->power.usage_count)) in rpm_check_suspend_allowed()
281 else if (!dev->power.ignore_children && atomic_read(&dev->power.child_count)) in rpm_check_suspend_allowed()
285 else if ((dev->power.deferred_resume && in rpm_check_suspend_allowed()
286 dev->power.runtime_status == RPM_SUSPENDING) || in rpm_check_suspend_allowed()
287 (dev->power.request_pending && dev->power.request == RPM_REQ_RESUME)) in rpm_check_suspend_allowed()
289 else if (__dev_pm_qos_resume_latency(dev) == 0) in rpm_check_suspend_allowed()
291 else if (dev->power.runtime_status == RPM_SUSPENDED) in rpm_check_suspend_allowed()
297 static int rpm_get_suppliers(struct device *dev) in rpm_get_suppliers() argument
301 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, in rpm_get_suppliers()
340 static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend) in __rpm_put_suppliers() argument
344 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, in __rpm_put_suppliers()
352 static void rpm_put_suppliers(struct device *dev) in rpm_put_suppliers() argument
354 __rpm_put_suppliers(dev, true); in rpm_put_suppliers()
357 static void rpm_suspend_suppliers(struct device *dev) in rpm_suspend_suppliers() argument
362 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, in rpm_suspend_suppliers()
374 static int __rpm_callback(int (*cb)(struct device *), struct device *dev) in __rpm_callback() argument
375 __releases(&dev->power.lock) __acquires(&dev->power.lock) in __rpm_callback()
378 bool use_links = dev->power.links_count > 0; in __rpm_callback()
380 if (dev->power.irq_safe) { in __rpm_callback()
381 spin_unlock(&dev->power.lock); in __rpm_callback()
383 spin_unlock_irq(&dev->power.lock); in __rpm_callback()
392 if (use_links && dev->power.runtime_status == RPM_RESUMING) { in __rpm_callback()
395 retval = rpm_get_suppliers(dev); in __rpm_callback()
397 rpm_put_suppliers(dev); in __rpm_callback()
406 retval = cb(dev); in __rpm_callback()
408 if (dev->power.irq_safe) { in __rpm_callback()
409 spin_lock(&dev->power.lock); in __rpm_callback()
419 ((dev->power.runtime_status == RPM_SUSPENDING && !retval) || in __rpm_callback()
420 (dev->power.runtime_status == RPM_RESUMING && retval))) { in __rpm_callback()
423 __rpm_put_suppliers(dev, false); in __rpm_callback()
429 spin_lock_irq(&dev->power.lock); in __rpm_callback()
440 static int rpm_callback(int (*cb)(struct device *), struct device *dev) in rpm_callback() argument
444 if (dev->power.memalloc_noio) { in rpm_callback()
457 retval = __rpm_callback(cb, dev); in rpm_callback()
460 retval = __rpm_callback(cb, dev); in rpm_callback()
473 dev->power.runtime_error = retval; in rpm_callback()
491 static int rpm_idle(struct device *dev, int rpmflags) in rpm_idle() argument
496 trace_rpm_idle(dev, rpmflags); in rpm_idle()
497 retval = rpm_check_suspend_allowed(dev); in rpm_idle()
502 else if (dev->power.runtime_status != RPM_ACTIVE) in rpm_idle()
509 else if (dev->power.request_pending && in rpm_idle()
510 dev->power.request > RPM_REQ_IDLE) in rpm_idle()
514 else if (dev->power.idle_notification) in rpm_idle()
521 dev->power.request = RPM_REQ_NONE; in rpm_idle()
523 callback = RPM_GET_CALLBACK(dev, runtime_idle); in rpm_idle()
526 if (!callback || dev->power.no_callbacks) in rpm_idle()
531 dev->power.request = RPM_REQ_IDLE; in rpm_idle()
532 if (!dev->power.request_pending) { in rpm_idle()
533 dev->power.request_pending = true; in rpm_idle()
534 queue_work(pm_wq, &dev->power.work); in rpm_idle()
536 trace_rpm_return_int(dev, _THIS_IP_, 0); in rpm_idle()
540 dev->power.idle_notification = true; in rpm_idle()
542 if (dev->power.irq_safe) in rpm_idle()
543 spin_unlock(&dev->power.lock); in rpm_idle()
545 spin_unlock_irq(&dev->power.lock); in rpm_idle()
547 retval = callback(dev); in rpm_idle()
549 if (dev->power.irq_safe) in rpm_idle()
550 spin_lock(&dev->power.lock); in rpm_idle()
552 spin_lock_irq(&dev->power.lock); in rpm_idle()
554 dev->power.idle_notification = false; in rpm_idle()
555 wake_up_all(&dev->power.wait_queue); in rpm_idle()
558 trace_rpm_return_int(dev, _THIS_IP_, retval); in rpm_idle()
559 return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO); in rpm_idle()
583 static int rpm_suspend(struct device *dev, int rpmflags) in rpm_suspend() argument
584 __releases(&dev->power.lock) __acquires(&dev->power.lock) in rpm_suspend()
590 trace_rpm_suspend(dev, rpmflags); in rpm_suspend()
593 retval = rpm_check_suspend_allowed(dev); in rpm_suspend()
598 if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC)) in rpm_suspend()
605 if ((rpmflags & RPM_AUTO) && dev->power.runtime_status != RPM_SUSPENDING) { in rpm_suspend()
606 u64 expires = pm_runtime_autosuspend_expiration(dev); in rpm_suspend()
610 dev->power.request = RPM_REQ_NONE; in rpm_suspend()
619 if (!(dev->power.timer_expires && in rpm_suspend()
620 dev->power.timer_expires <= expires)) { in rpm_suspend()
625 u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) * in rpm_suspend()
628 dev->power.timer_expires = expires; in rpm_suspend()
629 hrtimer_start_range_ns(&dev->power.suspend_timer, in rpm_suspend()
634 dev->power.timer_autosuspends = 1; in rpm_suspend()
640 pm_runtime_cancel_pending(dev); in rpm_suspend()
642 if (dev->power.runtime_status == RPM_SUSPENDING) { in rpm_suspend()
650 if (dev->power.irq_safe) { in rpm_suspend()
651 spin_unlock(&dev->power.lock); in rpm_suspend()
655 spin_lock(&dev->power.lock); in rpm_suspend()
661 prepare_to_wait(&dev->power.wait_queue, &wait, in rpm_suspend()
663 if (dev->power.runtime_status != RPM_SUSPENDING) in rpm_suspend()
666 spin_unlock_irq(&dev->power.lock); in rpm_suspend()
670 spin_lock_irq(&dev->power.lock); in rpm_suspend()
672 finish_wait(&dev->power.wait_queue, &wait); in rpm_suspend()
676 if (dev->power.no_callbacks) in rpm_suspend()
681 dev->power.request = (rpmflags & RPM_AUTO) ? in rpm_suspend()
683 if (!dev->power.request_pending) { in rpm_suspend()
684 dev->power.request_pending = true; in rpm_suspend()
685 queue_work(pm_wq, &dev->power.work); in rpm_suspend()
690 __update_runtime_status(dev, RPM_SUSPENDING); in rpm_suspend()
692 callback = RPM_GET_CALLBACK(dev, runtime_suspend); in rpm_suspend()
694 dev_pm_enable_wake_irq_check(dev, true); in rpm_suspend()
695 retval = rpm_callback(callback, dev); in rpm_suspend()
699 dev_pm_enable_wake_irq_complete(dev); in rpm_suspend()
702 __update_runtime_status(dev, RPM_SUSPENDED); in rpm_suspend()
703 pm_runtime_deactivate_timer(dev); in rpm_suspend()
705 if (dev->parent) { in rpm_suspend()
706 parent = dev->parent; in rpm_suspend()
709 wake_up_all(&dev->power.wait_queue); in rpm_suspend()
711 if (dev->power.deferred_resume) { in rpm_suspend()
712 dev->power.deferred_resume = false; in rpm_suspend()
713 rpm_resume(dev, 0); in rpm_suspend()
718 if (dev->power.irq_safe) in rpm_suspend()
723 spin_unlock(&dev->power.lock); in rpm_suspend()
729 spin_lock(&dev->power.lock); in rpm_suspend()
732 if (dev->power.links_count > 0) { in rpm_suspend()
733 spin_unlock_irq(&dev->power.lock); in rpm_suspend()
735 rpm_suspend_suppliers(dev); in rpm_suspend()
737 spin_lock_irq(&dev->power.lock); in rpm_suspend()
741 trace_rpm_return_int(dev, _THIS_IP_, retval); in rpm_suspend()
746 dev_pm_disable_wake_irq_check(dev, true); in rpm_suspend()
747 __update_runtime_status(dev, RPM_ACTIVE); in rpm_suspend()
748 dev->power.deferred_resume = false; in rpm_suspend()
749 wake_up_all(&dev->power.wait_queue); in rpm_suspend()
757 if (!dev->power.runtime_error && (rpmflags & RPM_AUTO) && in rpm_suspend()
758 pm_runtime_autosuspend_expiration(dev) != 0) in rpm_suspend()
761 pm_runtime_cancel_pending(dev); in rpm_suspend()
783 static int rpm_resume(struct device *dev, int rpmflags) in rpm_resume() argument
784 __releases(&dev->power.lock) __acquires(&dev->power.lock) in rpm_resume()
790 trace_rpm_resume(dev, rpmflags); in rpm_resume()
793 if (dev->power.runtime_error) { in rpm_resume()
795 } else if (dev->power.disable_depth > 0) { in rpm_resume()
796 if (dev->power.runtime_status == RPM_ACTIVE && in rpm_resume()
797 dev->power.last_status == RPM_ACTIVE) in rpm_resume()
811 dev->power.request = RPM_REQ_NONE; in rpm_resume()
812 if (!dev->power.timer_autosuspends) in rpm_resume()
813 pm_runtime_deactivate_timer(dev); in rpm_resume()
815 if (dev->power.runtime_status == RPM_ACTIVE) { in rpm_resume()
820 if (dev->power.runtime_status == RPM_RESUMING || in rpm_resume()
821 dev->power.runtime_status == RPM_SUSPENDING) { in rpm_resume()
825 if (dev->power.runtime_status == RPM_SUSPENDING) { in rpm_resume()
826 dev->power.deferred_resume = true; in rpm_resume()
835 if (dev->power.irq_safe) { in rpm_resume()
836 spin_unlock(&dev->power.lock); in rpm_resume()
840 spin_lock(&dev->power.lock); in rpm_resume()
846 prepare_to_wait(&dev->power.wait_queue, &wait, in rpm_resume()
848 if (dev->power.runtime_status != RPM_RESUMING && in rpm_resume()
849 dev->power.runtime_status != RPM_SUSPENDING) in rpm_resume()
852 spin_unlock_irq(&dev->power.lock); in rpm_resume()
856 spin_lock_irq(&dev->power.lock); in rpm_resume()
858 finish_wait(&dev->power.wait_queue, &wait); in rpm_resume()
867 if (dev->power.no_callbacks && !parent && dev->parent) { in rpm_resume()
868 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING); in rpm_resume()
869 if (dev->parent->power.disable_depth > 0 || in rpm_resume()
870 dev->parent->power.ignore_children || in rpm_resume()
871 dev->parent->power.runtime_status == RPM_ACTIVE) { in rpm_resume()
872 atomic_inc(&dev->parent->power.child_count); in rpm_resume()
873 spin_unlock(&dev->parent->power.lock); in rpm_resume()
877 spin_unlock(&dev->parent->power.lock); in rpm_resume()
882 dev->power.request = RPM_REQ_RESUME; in rpm_resume()
883 if (!dev->power.request_pending) { in rpm_resume()
884 dev->power.request_pending = true; in rpm_resume()
885 queue_work(pm_wq, &dev->power.work); in rpm_resume()
891 if (!parent && dev->parent) { in rpm_resume()
897 parent = dev->parent; in rpm_resume()
898 if (dev->power.irq_safe) in rpm_resume()
901 spin_unlock(&dev->power.lock); in rpm_resume()
918 spin_lock(&dev->power.lock); in rpm_resume()
926 if (dev->power.no_callbacks) in rpm_resume()
929 __update_runtime_status(dev, RPM_RESUMING); in rpm_resume()
931 callback = RPM_GET_CALLBACK(dev, runtime_resume); in rpm_resume()
933 dev_pm_disable_wake_irq_check(dev, false); in rpm_resume()
934 retval = rpm_callback(callback, dev); in rpm_resume()
936 __update_runtime_status(dev, RPM_SUSPENDED); in rpm_resume()
937 pm_runtime_cancel_pending(dev); in rpm_resume()
938 dev_pm_enable_wake_irq_check(dev, false); in rpm_resume()
941 __update_runtime_status(dev, RPM_ACTIVE); in rpm_resume()
942 pm_runtime_mark_last_busy(dev); in rpm_resume()
946 wake_up_all(&dev->power.wait_queue); in rpm_resume()
949 rpm_idle(dev, RPM_ASYNC); in rpm_resume()
952 if (parent && !dev->power.irq_safe) { in rpm_resume()
953 spin_unlock_irq(&dev->power.lock); in rpm_resume()
957 spin_lock_irq(&dev->power.lock); in rpm_resume()
960 trace_rpm_return_int(dev, _THIS_IP_, retval); in rpm_resume()
974 struct device *dev = container_of(work, struct device, power.work); in pm_runtime_work() local
977 spin_lock_irq(&dev->power.lock); in pm_runtime_work()
979 if (!dev->power.request_pending) in pm_runtime_work()
982 req = dev->power.request; in pm_runtime_work()
983 dev->power.request = RPM_REQ_NONE; in pm_runtime_work()
984 dev->power.request_pending = false; in pm_runtime_work()
990 rpm_idle(dev, RPM_NOWAIT); in pm_runtime_work()
993 rpm_suspend(dev, RPM_NOWAIT); in pm_runtime_work()
996 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO); in pm_runtime_work()
999 rpm_resume(dev, RPM_NOWAIT); in pm_runtime_work()
1004 spin_unlock_irq(&dev->power.lock); in pm_runtime_work()
1015 struct device *dev = container_of(timer, struct device, power.suspend_timer); in pm_suspend_timer_fn() local
1019 spin_lock_irqsave(&dev->power.lock, flags); in pm_suspend_timer_fn()
1021 expires = dev->power.timer_expires; in pm_suspend_timer_fn()
1027 dev->power.timer_expires = 0; in pm_suspend_timer_fn()
1028 rpm_suspend(dev, dev->power.timer_autosuspends ? in pm_suspend_timer_fn()
1032 spin_unlock_irqrestore(&dev->power.lock, flags); in pm_suspend_timer_fn()
1042 int pm_schedule_suspend(struct device *dev, unsigned int delay) in pm_schedule_suspend() argument
1048 spin_lock_irqsave(&dev->power.lock, flags); in pm_schedule_suspend()
1051 retval = rpm_suspend(dev, RPM_ASYNC); in pm_schedule_suspend()
1055 retval = rpm_check_suspend_allowed(dev); in pm_schedule_suspend()
1060 pm_runtime_cancel_pending(dev); in pm_schedule_suspend()
1063 dev->power.timer_expires = expires; in pm_schedule_suspend()
1064 dev->power.timer_autosuspends = 0; in pm_schedule_suspend()
1065 hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS); in pm_schedule_suspend()
1068 spin_unlock_irqrestore(&dev->power.lock, flags); in pm_schedule_suspend()
1074 static int rpm_drop_usage_count(struct device *dev) in rpm_drop_usage_count() argument
1078 ret = atomic_sub_return(1, &dev->power.usage_count); in rpm_drop_usage_count()
1088 atomic_inc(&dev->power.usage_count); in rpm_drop_usage_count()
1089 dev_warn(dev, "Runtime PM usage count underflow!\n"); in rpm_drop_usage_count()
1106 int __pm_runtime_idle(struct device *dev, int rpmflags) in __pm_runtime_idle() argument
1112 retval = rpm_drop_usage_count(dev); in __pm_runtime_idle()
1116 trace_rpm_usage(dev, rpmflags); in __pm_runtime_idle()
1121 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); in __pm_runtime_idle()
1123 spin_lock_irqsave(&dev->power.lock, flags); in __pm_runtime_idle()
1124 retval = rpm_idle(dev, rpmflags); in __pm_runtime_idle()
1125 spin_unlock_irqrestore(&dev->power.lock, flags); in __pm_runtime_idle()
1144 int __pm_runtime_suspend(struct device *dev, int rpmflags) in __pm_runtime_suspend() argument
1150 retval = rpm_drop_usage_count(dev); in __pm_runtime_suspend()
1154 trace_rpm_usage(dev, rpmflags); in __pm_runtime_suspend()
1159 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); in __pm_runtime_suspend()
1161 spin_lock_irqsave(&dev->power.lock, flags); in __pm_runtime_suspend()
1162 retval = rpm_suspend(dev, rpmflags); in __pm_runtime_suspend()
1163 spin_unlock_irqrestore(&dev->power.lock, flags); in __pm_runtime_suspend()
1180 int __pm_runtime_resume(struct device *dev, int rpmflags) in __pm_runtime_resume() argument
1185 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe && in __pm_runtime_resume()
1186 dev->power.runtime_status != RPM_ACTIVE); in __pm_runtime_resume()
1189 atomic_inc(&dev->power.usage_count); in __pm_runtime_resume()
1191 spin_lock_irqsave(&dev->power.lock, flags); in __pm_runtime_resume()
1192 retval = rpm_resume(dev, rpmflags); in __pm_runtime_resume()
1193 spin_unlock_irqrestore(&dev->power.lock, flags); in __pm_runtime_resume()
1223 static int pm_runtime_get_conditional(struct device *dev, bool ign_usage_count) in pm_runtime_get_conditional() argument
1228 spin_lock_irqsave(&dev->power.lock, flags); in pm_runtime_get_conditional()
1229 if (dev->power.disable_depth > 0) { in pm_runtime_get_conditional()
1231 } else if (dev->power.runtime_status != RPM_ACTIVE) { in pm_runtime_get_conditional()
1233 } else if (ign_usage_count || (!dev->power.ignore_children && in pm_runtime_get_conditional()
1234 atomic_read(&dev->power.child_count) > 0)) { in pm_runtime_get_conditional()
1236 atomic_inc(&dev->power.usage_count); in pm_runtime_get_conditional()
1238 retval = atomic_inc_not_zero(&dev->power.usage_count); in pm_runtime_get_conditional()
1240 trace_rpm_usage(dev, 0); in pm_runtime_get_conditional()
1241 spin_unlock_irqrestore(&dev->power.lock, flags); in pm_runtime_get_conditional()
1256 int pm_runtime_get_if_active(struct device *dev) in pm_runtime_get_if_active() argument
1258 return pm_runtime_get_conditional(dev, true); in pm_runtime_get_if_active()
1278 int pm_runtime_get_if_in_use(struct device *dev) in pm_runtime_get_if_in_use() argument
1280 return pm_runtime_get_conditional(dev, false); in pm_runtime_get_if_in_use()
1308 int __pm_runtime_set_status(struct device *dev, unsigned int status) in __pm_runtime_set_status() argument
1310 struct device *parent = dev->parent; in __pm_runtime_set_status()
1318 spin_lock_irqsave(&dev->power.lock, flags); in __pm_runtime_set_status()
1324 if (dev->power.runtime_error || dev->power.disable_depth) in __pm_runtime_set_status()
1325 dev->power.disable_depth++; in __pm_runtime_set_status()
1329 spin_unlock_irqrestore(&dev->power.lock, flags); in __pm_runtime_set_status()
1343 error = rpm_get_suppliers(dev); in __pm_runtime_set_status()
1350 spin_lock_irqsave(&dev->power.lock, flags); in __pm_runtime_set_status()
1352 if (dev->power.runtime_status == status || !parent) in __pm_runtime_set_status()
1369 dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n", in __pm_runtime_set_status()
1370 dev_name(dev), in __pm_runtime_set_status()
1373 } else if (dev->power.runtime_status == RPM_SUSPENDED) { in __pm_runtime_set_status()
1386 __update_runtime_status(dev, status); in __pm_runtime_set_status()
1388 dev->power.runtime_error = 0; in __pm_runtime_set_status()
1391 spin_unlock_irqrestore(&dev->power.lock, flags); in __pm_runtime_set_status()
1399 rpm_put_suppliers(dev); in __pm_runtime_set_status()
1404 pm_runtime_enable(dev); in __pm_runtime_set_status()
1419 static void __pm_runtime_barrier(struct device *dev) in __pm_runtime_barrier() argument
1421 pm_runtime_deactivate_timer(dev); in __pm_runtime_barrier()
1423 if (dev->power.request_pending) { in __pm_runtime_barrier()
1424 dev->power.request = RPM_REQ_NONE; in __pm_runtime_barrier()
1425 spin_unlock_irq(&dev->power.lock); in __pm_runtime_barrier()
1427 cancel_work_sync(&dev->power.work); in __pm_runtime_barrier()
1429 spin_lock_irq(&dev->power.lock); in __pm_runtime_barrier()
1430 dev->power.request_pending = false; in __pm_runtime_barrier()
1433 if (dev->power.runtime_status == RPM_SUSPENDING || in __pm_runtime_barrier()
1434 dev->power.runtime_status == RPM_RESUMING || in __pm_runtime_barrier()
1435 dev->power.idle_notification) { in __pm_runtime_barrier()
1440 prepare_to_wait(&dev->power.wait_queue, &wait, in __pm_runtime_barrier()
1442 if (dev->power.runtime_status != RPM_SUSPENDING in __pm_runtime_barrier()
1443 && dev->power.runtime_status != RPM_RESUMING in __pm_runtime_barrier()
1444 && !dev->power.idle_notification) in __pm_runtime_barrier()
1446 spin_unlock_irq(&dev->power.lock); in __pm_runtime_barrier()
1450 spin_lock_irq(&dev->power.lock); in __pm_runtime_barrier()
1452 finish_wait(&dev->power.wait_queue, &wait); in __pm_runtime_barrier()
1470 int pm_runtime_barrier(struct device *dev) in pm_runtime_barrier() argument
1474 pm_runtime_get_noresume(dev); in pm_runtime_barrier()
1475 spin_lock_irq(&dev->power.lock); in pm_runtime_barrier()
1477 if (dev->power.request_pending in pm_runtime_barrier()
1478 && dev->power.request == RPM_REQ_RESUME) { in pm_runtime_barrier()
1479 rpm_resume(dev, 0); in pm_runtime_barrier()
1483 __pm_runtime_barrier(dev); in pm_runtime_barrier()
1485 spin_unlock_irq(&dev->power.lock); in pm_runtime_barrier()
1486 pm_runtime_put_noidle(dev); in pm_runtime_barrier()
1492 bool pm_runtime_block_if_disabled(struct device *dev) in pm_runtime_block_if_disabled() argument
1496 spin_lock_irq(&dev->power.lock); in pm_runtime_block_if_disabled()
1498 ret = !pm_runtime_enabled(dev); in pm_runtime_block_if_disabled()
1499 if (ret && dev->power.last_status == RPM_INVALID) in pm_runtime_block_if_disabled()
1500 dev->power.last_status = RPM_BLOCKED; in pm_runtime_block_if_disabled()
1502 spin_unlock_irq(&dev->power.lock); in pm_runtime_block_if_disabled()
1507 void pm_runtime_unblock(struct device *dev) in pm_runtime_unblock() argument
1509 spin_lock_irq(&dev->power.lock); in pm_runtime_unblock()
1511 if (dev->power.last_status == RPM_BLOCKED) in pm_runtime_unblock()
1512 dev->power.last_status = RPM_INVALID; in pm_runtime_unblock()
1514 spin_unlock_irq(&dev->power.lock); in pm_runtime_unblock()
1517 void __pm_runtime_disable(struct device *dev, bool check_resume) in __pm_runtime_disable() argument
1519 spin_lock_irq(&dev->power.lock); in __pm_runtime_disable()
1521 if (dev->power.disable_depth > 0) { in __pm_runtime_disable()
1522 dev->power.disable_depth++; in __pm_runtime_disable()
1531 if (check_resume && dev->power.request_pending && in __pm_runtime_disable()
1532 dev->power.request == RPM_REQ_RESUME) { in __pm_runtime_disable()
1537 pm_runtime_get_noresume(dev); in __pm_runtime_disable()
1539 rpm_resume(dev, 0); in __pm_runtime_disable()
1541 pm_runtime_put_noidle(dev); in __pm_runtime_disable()
1545 update_pm_runtime_accounting(dev); in __pm_runtime_disable()
1547 if (!dev->power.disable_depth++) { in __pm_runtime_disable()
1548 __pm_runtime_barrier(dev); in __pm_runtime_disable()
1549 dev->power.last_status = dev->power.runtime_status; in __pm_runtime_disable()
1553 spin_unlock_irq(&dev->power.lock); in __pm_runtime_disable()
1561 void pm_runtime_enable(struct device *dev) in pm_runtime_enable() argument
1565 spin_lock_irqsave(&dev->power.lock, flags); in pm_runtime_enable()
1567 if (!dev->power.disable_depth) { in pm_runtime_enable()
1568 dev_warn(dev, "Unbalanced %s!\n", __func__); in pm_runtime_enable()
1572 if (--dev->power.disable_depth > 0) in pm_runtime_enable()
1575 if (dev->power.last_status == RPM_BLOCKED) { in pm_runtime_enable()
1576 dev_warn(dev, "Attempt to enable runtime PM when it is blocked\n"); in pm_runtime_enable()
1579 dev->power.last_status = RPM_INVALID; in pm_runtime_enable()
1580 dev->power.accounting_timestamp = ktime_get_mono_fast_ns(); in pm_runtime_enable()
1582 if (dev->power.runtime_status == RPM_SUSPENDED && in pm_runtime_enable()
1583 !dev->power.ignore_children && in pm_runtime_enable()
1584 atomic_read(&dev->power.child_count) > 0) in pm_runtime_enable()
1585 dev_warn(dev, "Enabling runtime PM for inactive device with active children\n"); in pm_runtime_enable()
1588 spin_unlock_irqrestore(&dev->power.lock, flags); in pm_runtime_enable()
1602 int devm_pm_runtime_set_active_enabled(struct device *dev) in devm_pm_runtime_set_active_enabled() argument
1606 err = pm_runtime_set_active(dev); in devm_pm_runtime_set_active_enabled()
1610 err = devm_add_action_or_reset(dev, pm_runtime_set_suspended_action, dev); in devm_pm_runtime_set_active_enabled()
1614 return devm_pm_runtime_enable(dev); in devm_pm_runtime_set_active_enabled()
1632 int devm_pm_runtime_enable(struct device *dev) in devm_pm_runtime_enable() argument
1634 pm_runtime_enable(dev); in devm_pm_runtime_enable()
1636 return devm_add_action_or_reset(dev, pm_runtime_disable_action, dev); in devm_pm_runtime_enable()
1650 int devm_pm_runtime_get_noresume(struct device *dev) in devm_pm_runtime_get_noresume() argument
1652 pm_runtime_get_noresume(dev); in devm_pm_runtime_get_noresume()
1654 return devm_add_action_or_reset(dev, pm_runtime_put_noidle_action, dev); in devm_pm_runtime_get_noresume()
1666 void pm_runtime_forbid(struct device *dev) in pm_runtime_forbid() argument
1668 spin_lock_irq(&dev->power.lock); in pm_runtime_forbid()
1669 if (!dev->power.runtime_auto) in pm_runtime_forbid()
1672 dev->power.runtime_auto = false; in pm_runtime_forbid()
1673 atomic_inc(&dev->power.usage_count); in pm_runtime_forbid()
1674 rpm_resume(dev, 0); in pm_runtime_forbid()
1677 spin_unlock_irq(&dev->power.lock); in pm_runtime_forbid()
1687 void pm_runtime_allow(struct device *dev) in pm_runtime_allow() argument
1691 spin_lock_irq(&dev->power.lock); in pm_runtime_allow()
1692 if (dev->power.runtime_auto) in pm_runtime_allow()
1695 dev->power.runtime_auto = true; in pm_runtime_allow()
1696 ret = rpm_drop_usage_count(dev); in pm_runtime_allow()
1698 rpm_idle(dev, RPM_AUTO | RPM_ASYNC); in pm_runtime_allow()
1700 trace_rpm_usage(dev, RPM_AUTO | RPM_ASYNC); in pm_runtime_allow()
1703 spin_unlock_irq(&dev->power.lock); in pm_runtime_allow()
1715 void pm_runtime_no_callbacks(struct device *dev) in pm_runtime_no_callbacks() argument
1717 spin_lock_irq(&dev->power.lock); in pm_runtime_no_callbacks()
1718 dev->power.no_callbacks = 1; in pm_runtime_no_callbacks()
1719 spin_unlock_irq(&dev->power.lock); in pm_runtime_no_callbacks()
1720 if (device_is_registered(dev)) in pm_runtime_no_callbacks()
1721 rpm_sysfs_remove(dev); in pm_runtime_no_callbacks()
1736 void pm_runtime_irq_safe(struct device *dev) in pm_runtime_irq_safe() argument
1738 if (dev->parent) in pm_runtime_irq_safe()
1739 pm_runtime_get_sync(dev->parent); in pm_runtime_irq_safe()
1741 spin_lock_irq(&dev->power.lock); in pm_runtime_irq_safe()
1742 dev->power.irq_safe = 1; in pm_runtime_irq_safe()
1743 spin_unlock_irq(&dev->power.lock); in pm_runtime_irq_safe()
1758 static void update_autosuspend(struct device *dev, int old_delay, int old_use) in update_autosuspend() argument
1760 int delay = dev->power.autosuspend_delay; in update_autosuspend()
1763 if (dev->power.use_autosuspend && delay < 0) { in update_autosuspend()
1767 atomic_inc(&dev->power.usage_count); in update_autosuspend()
1768 rpm_resume(dev, 0); in update_autosuspend()
1770 trace_rpm_usage(dev, 0); in update_autosuspend()
1779 atomic_dec(&dev->power.usage_count); in update_autosuspend()
1782 rpm_idle(dev, RPM_AUTO); in update_autosuspend()
1795 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay) in pm_runtime_set_autosuspend_delay() argument
1799 spin_lock_irq(&dev->power.lock); in pm_runtime_set_autosuspend_delay()
1800 old_delay = dev->power.autosuspend_delay; in pm_runtime_set_autosuspend_delay()
1801 old_use = dev->power.use_autosuspend; in pm_runtime_set_autosuspend_delay()
1802 dev->power.autosuspend_delay = delay; in pm_runtime_set_autosuspend_delay()
1803 update_autosuspend(dev, old_delay, old_use); in pm_runtime_set_autosuspend_delay()
1804 spin_unlock_irq(&dev->power.lock); in pm_runtime_set_autosuspend_delay()
1816 void __pm_runtime_use_autosuspend(struct device *dev, bool use) in __pm_runtime_use_autosuspend() argument
1820 spin_lock_irq(&dev->power.lock); in __pm_runtime_use_autosuspend()
1821 old_delay = dev->power.autosuspend_delay; in __pm_runtime_use_autosuspend()
1822 old_use = dev->power.use_autosuspend; in __pm_runtime_use_autosuspend()
1823 dev->power.use_autosuspend = use; in __pm_runtime_use_autosuspend()
1824 update_autosuspend(dev, old_delay, old_use); in __pm_runtime_use_autosuspend()
1825 spin_unlock_irq(&dev->power.lock); in __pm_runtime_use_autosuspend()
1833 void pm_runtime_init(struct device *dev) in pm_runtime_init() argument
1835 dev->power.runtime_status = RPM_SUSPENDED; in pm_runtime_init()
1836 dev->power.last_status = RPM_INVALID; in pm_runtime_init()
1837 dev->power.idle_notification = false; in pm_runtime_init()
1839 dev->power.disable_depth = 1; in pm_runtime_init()
1840 atomic_set(&dev->power.usage_count, 0); in pm_runtime_init()
1842 dev->power.runtime_error = 0; in pm_runtime_init()
1844 atomic_set(&dev->power.child_count, 0); in pm_runtime_init()
1845 pm_suspend_ignore_children(dev, false); in pm_runtime_init()
1846 dev->power.runtime_auto = true; in pm_runtime_init()
1848 dev->power.request_pending = false; in pm_runtime_init()
1849 dev->power.request = RPM_REQ_NONE; in pm_runtime_init()
1850 dev->power.deferred_resume = false; in pm_runtime_init()
1851 dev->power.needs_force_resume = false; in pm_runtime_init()
1852 INIT_WORK(&dev->power.work, pm_runtime_work); in pm_runtime_init()
1854 dev->power.timer_expires = 0; in pm_runtime_init()
1855 hrtimer_setup(&dev->power.suspend_timer, pm_suspend_timer_fn, CLOCK_MONOTONIC, in pm_runtime_init()
1858 init_waitqueue_head(&dev->power.wait_queue); in pm_runtime_init()
1865 void pm_runtime_reinit(struct device *dev) in pm_runtime_reinit() argument
1867 if (!pm_runtime_enabled(dev)) { in pm_runtime_reinit()
1868 if (dev->power.runtime_status == RPM_ACTIVE) in pm_runtime_reinit()
1869 pm_runtime_set_suspended(dev); in pm_runtime_reinit()
1870 if (dev->power.irq_safe) { in pm_runtime_reinit()
1871 spin_lock_irq(&dev->power.lock); in pm_runtime_reinit()
1872 dev->power.irq_safe = 0; in pm_runtime_reinit()
1873 spin_unlock_irq(&dev->power.lock); in pm_runtime_reinit()
1874 if (dev->parent) in pm_runtime_reinit()
1875 pm_runtime_put(dev->parent); in pm_runtime_reinit()
1882 dev->power.needs_force_resume = false; in pm_runtime_reinit()
1889 void pm_runtime_remove(struct device *dev) in pm_runtime_remove() argument
1891 __pm_runtime_disable(dev, false); in pm_runtime_remove()
1892 pm_runtime_reinit(dev); in pm_runtime_remove()
1899 void pm_runtime_get_suppliers(struct device *dev) in pm_runtime_get_suppliers() argument
1906 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, in pm_runtime_get_suppliers()
1920 void pm_runtime_put_suppliers(struct device *dev) in pm_runtime_put_suppliers() argument
1927 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, in pm_runtime_put_suppliers()
1937 void pm_runtime_new_link(struct device *dev) in pm_runtime_new_link() argument
1939 spin_lock_irq(&dev->power.lock); in pm_runtime_new_link()
1940 dev->power.links_count++; in pm_runtime_new_link()
1941 spin_unlock_irq(&dev->power.lock); in pm_runtime_new_link()
1944 static void pm_runtime_drop_link_count(struct device *dev) in pm_runtime_drop_link_count() argument
1946 spin_lock_irq(&dev->power.lock); in pm_runtime_drop_link_count()
1947 WARN_ON(dev->power.links_count == 0); in pm_runtime_drop_link_count()
1948 dev->power.links_count--; in pm_runtime_drop_link_count()
1949 spin_unlock_irq(&dev->power.lock); in pm_runtime_drop_link_count()
1970 static pm_callback_t get_callback(struct device *dev, size_t cb_offset) in get_callback() argument
1978 if (dev_pm_strict_midlayer_is_set(dev)) in get_callback()
1979 return __rpm_get_driver_callback(dev, cb_offset); in get_callback()
1981 return __rpm_get_callback(dev, cb_offset); in get_callback()
1984 #define GET_CALLBACK(dev, callback) \ argument
1985 get_callback(dev, offsetof(struct dev_pm_ops, callback))
2004 int pm_runtime_force_suspend(struct device *dev) in pm_runtime_force_suspend() argument
2009 pm_runtime_disable(dev); in pm_runtime_force_suspend()
2010 if (pm_runtime_status_suspended(dev) || dev->power.needs_force_resume) in pm_runtime_force_suspend()
2013 callback = GET_CALLBACK(dev, runtime_suspend); in pm_runtime_force_suspend()
2015 dev_pm_enable_wake_irq_check(dev, true); in pm_runtime_force_suspend()
2016 ret = callback ? callback(dev) : 0; in pm_runtime_force_suspend()
2020 dev_pm_enable_wake_irq_complete(dev); in pm_runtime_force_suspend()
2031 if (pm_runtime_need_not_resume(dev)) in pm_runtime_force_suspend()
2032 pm_runtime_set_suspended(dev); in pm_runtime_force_suspend()
2034 dev->power.needs_force_resume = true; in pm_runtime_force_suspend()
2039 dev_pm_disable_wake_irq_check(dev, true); in pm_runtime_force_suspend()
2040 pm_runtime_enable(dev); in pm_runtime_force_suspend()
2066 int pm_runtime_force_resume(struct device *dev) in pm_runtime_force_resume() argument
2071 if (!dev->power.needs_force_resume && (!dev_pm_smart_suspend(dev) || in pm_runtime_force_resume()
2072 pm_runtime_status_suspended(dev))) in pm_runtime_force_resume()
2075 callback = GET_CALLBACK(dev, runtime_resume); in pm_runtime_force_resume()
2077 dev_pm_disable_wake_irq_check(dev, false); in pm_runtime_force_resume()
2078 ret = callback ? callback(dev) : 0; in pm_runtime_force_resume()
2080 pm_runtime_set_suspended(dev); in pm_runtime_force_resume()
2081 dev_pm_enable_wake_irq_check(dev, false); in pm_runtime_force_resume()
2085 pm_runtime_mark_last_busy(dev); in pm_runtime_force_resume()
2093 dev->power.smart_suspend = false; in pm_runtime_force_resume()
2098 dev->power.needs_force_resume = false; in pm_runtime_force_resume()
2100 pm_runtime_enable(dev); in pm_runtime_force_resume()
2105 bool pm_runtime_need_not_resume(struct device *dev) in pm_runtime_need_not_resume() argument
2107 return atomic_read(&dev->power.usage_count) <= 1 && in pm_runtime_need_not_resume()
2108 (atomic_read(&dev->power.child_count) == 0 || in pm_runtime_need_not_resume()
2109 dev->power.ignore_children); in pm_runtime_need_not_resume()