Lines Matching refs:s32

54 	s32 target_value;	/* Do not change to 64 bit */
55 s32 default_value;
56 s32 no_constraint_value;
68 s32 flags; /* Do not change to 64 bit */
73 s32 effective_flags; /* Do not change to 64 bit */
139 s32 pm_qos_read_value(struct pm_qos_constraints *c);
144 enum pm_qos_req_action action, s32 val);
147 s32 cpu_latency_qos_limit(void);
149 void cpu_latency_qos_add_request(struct pm_qos_request *req, s32 value);
150 void cpu_latency_qos_update_request(struct pm_qos_request *req, s32 new_value);
153 static inline s32 cpu_latency_qos_limit(void) { return INT_MAX; } in cpu_latency_qos_limit()
159 s32 value) {} in cpu_latency_qos_add_request()
161 s32 new_value) {} in cpu_latency_qos_update_request()
166 enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask);
167 enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask);
168 s32 __dev_pm_qos_resume_latency(struct device *dev);
169 s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type);
171 enum dev_pm_qos_req_type type, s32 value);
172 int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value);
184 enum dev_pm_qos_req_type type, s32 value);
185 int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value);
187 int dev_pm_qos_expose_flags(struct device *dev, s32 value);
189 int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set);
190 s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev);
191 int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val);
195 static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) in dev_pm_qos_requested_resume_latency()
200 static inline s32 dev_pm_qos_requested_flags(struct device *dev) in dev_pm_qos_requested_flags()
205 static inline s32 dev_pm_qos_raw_resume_latency(struct device *dev) in dev_pm_qos_raw_resume_latency()
213 s32 mask) in __dev_pm_qos_flags()
216 s32 mask) in dev_pm_qos_flags()
218 static inline s32 __dev_pm_qos_resume_latency(struct device *dev) in __dev_pm_qos_resume_latency()
220 static inline s32 dev_pm_qos_read_value(struct device *dev, in dev_pm_qos_read_value()
239 s32 value) in dev_pm_qos_add_request()
242 s32 new_value) in dev_pm_qos_update_request()
265 s32 value) in dev_pm_qos_add_ancestor_request()
267 static inline int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) in dev_pm_qos_expose_latency_limit()
270 static inline int dev_pm_qos_expose_flags(struct device *dev, s32 value) in dev_pm_qos_expose_flags()
273 static inline int dev_pm_qos_update_flags(struct device *dev, s32 m, bool set) in dev_pm_qos_update_flags()
275 static inline s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev) in dev_pm_qos_get_user_latency_tolerance()
277 static inline int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val) in dev_pm_qos_update_user_latency_tolerance()
283 static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) in dev_pm_qos_requested_resume_latency()
287 static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; } in dev_pm_qos_requested_flags()
288 static inline s32 dev_pm_qos_raw_resume_latency(struct device *dev) in dev_pm_qos_raw_resume_latency()
301 s32 freq_qos_read_value(struct freq_constraints *qos,
306 enum freq_qos_req_type type, s32 value);
307 int freq_qos_update_request(struct freq_qos_request *req, s32 new_value);
310 enum pm_qos_req_action action, s32 value);