• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Devices PM QoS constraints management
4  *
5  * Copyright (C) 2011 Texas Instruments, Inc.
6  *
7  * This module exposes the interface to kernel space for specifying
8  * per-device PM QoS dependencies. It provides infrastructure for registration
9  * of:
10  *
11  * Dependents on a QoS value : register requests
12  * Watchers of QoS value : get notified when target QoS value changes
13  *
14  * This QoS design is best effort based. Dependents register their QoS needs.
15  * Watchers register to keep track of the current QoS needs of the system.
16  * Watchers can register a per-device notification callback using the
17  * dev_pm_qos_*_notifier API. The notification chain data is stored in the
18  * per-device constraint data struct.
19  *
20  * Note about the per-device constraint data struct allocation:
21  * . The per-device constraints data struct ptr is stored into the device
22  *    dev_pm_info.
23  * . To minimize the data usage by the per-device constraints, the data struct
24  *   is only allocated at the first call to dev_pm_qos_add_request.
25  * . The data is later free'd when the device is removed from the system.
26  *  . A global mutex protects the constraints users from the data being
27  *     allocated and free'd.
28  */
29 
30 #include <linux/pm_qos.h>
31 #include <linux/spinlock.h>
32 #include <linux/slab.h>
33 #include <linux/device.h>
34 #include <linux/mutex.h>
35 #include <linux/export.h>
36 #include <linux/pm_runtime.h>
37 #include <linux/err.h>
38 #include <trace/events/power.h>
39 
40 #include "power.h"
41 
42 static DEFINE_MUTEX(dev_pm_qos_mtx);
43 static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
44 
45 /**
46  * __dev_pm_qos_flags - Check PM QoS flags for a given device.
47  * @dev: Device to check the PM QoS flags for.
48  * @mask: Flags to check against.
49  *
50  * This routine must be called with dev->power.lock held.
51  */
__dev_pm_qos_flags(struct device * dev,s32 mask)52 enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
53 {
54 	struct dev_pm_qos *qos = dev->power.qos;
55 	struct pm_qos_flags *pqf;
56 	s32 val;
57 
58 	lockdep_assert_held(&dev->power.lock);
59 
60 	if (IS_ERR_OR_NULL(qos))
61 		return PM_QOS_FLAGS_UNDEFINED;
62 
63 	pqf = &qos->flags;
64 	if (list_empty(&pqf->list))
65 		return PM_QOS_FLAGS_UNDEFINED;
66 
67 	val = pqf->effective_flags & mask;
68 	if (val)
69 		return (val == mask) ? PM_QOS_FLAGS_ALL : PM_QOS_FLAGS_SOME;
70 
71 	return PM_QOS_FLAGS_NONE;
72 }
73 
74 /**
75  * dev_pm_qos_flags - Check PM QoS flags for a given device (locked).
76  * @dev: Device to check the PM QoS flags for.
77  * @mask: Flags to check against.
78  */
dev_pm_qos_flags(struct device * dev,s32 mask)79 enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
80 {
81 	unsigned long irqflags;
82 	enum pm_qos_flags_status ret;
83 
84 	spin_lock_irqsave(&dev->power.lock, irqflags);
85 	ret = __dev_pm_qos_flags(dev, mask);
86 	spin_unlock_irqrestore(&dev->power.lock, irqflags);
87 
88 	return ret;
89 }
90 EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
91 
92 /**
93  * __dev_pm_qos_resume_latency - Get resume latency constraint for a given device.
94  * @dev: Device to get the PM QoS constraint value for.
95  *
96  * This routine must be called with dev->power.lock held.
97  */
__dev_pm_qos_resume_latency(struct device * dev)98 s32 __dev_pm_qos_resume_latency(struct device *dev)
99 {
100 	lockdep_assert_held(&dev->power.lock);
101 
102 	return dev_pm_qos_raw_resume_latency(dev);
103 }
104 
105 /**
106  * dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked).
107  * @dev: Device to get the PM QoS constraint value for.
108  * @type: QoS request type.
109  */
dev_pm_qos_read_value(struct device * dev,enum dev_pm_qos_req_type type)110 s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type)
111 {
112 	struct dev_pm_qos *qos = dev->power.qos;
113 	unsigned long flags;
114 	s32 ret;
115 
116 	spin_lock_irqsave(&dev->power.lock, flags);
117 
118 	switch (type) {
119 	case DEV_PM_QOS_RESUME_LATENCY:
120 		ret = IS_ERR_OR_NULL(qos) ? PM_QOS_RESUME_LATENCY_NO_CONSTRAINT
121 			: pm_qos_read_value(&qos->resume_latency);
122 		break;
123 	case DEV_PM_QOS_MIN_FREQUENCY:
124 		ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE
125 			: freq_qos_read_value(&qos->freq, FREQ_QOS_MIN);
126 		break;
127 	case DEV_PM_QOS_MAX_FREQUENCY:
128 		ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE
129 			: freq_qos_read_value(&qos->freq, FREQ_QOS_MAX);
130 		break;
131 	default:
132 		WARN_ON(1);
133 		ret = 0;
134 	}
135 
136 	spin_unlock_irqrestore(&dev->power.lock, flags);
137 
138 	return ret;
139 }
140 EXPORT_SYMBOL_GPL(dev_pm_qos_read_value);
141 
142 /**
143  * apply_constraint - Add/modify/remove device PM QoS request.
144  * @req: Constraint request to apply
145  * @action: Action to perform (add/update/remove).
146  * @value: Value to assign to the QoS request.
147  *
148  * Internal function to update the constraints list using the PM QoS core
149  * code and if needed call the per-device callbacks.
150  */
apply_constraint(struct dev_pm_qos_request * req,enum pm_qos_req_action action,s32 value)151 static int apply_constraint(struct dev_pm_qos_request *req,
152 			    enum pm_qos_req_action action, s32 value)
153 {
154 	struct dev_pm_qos *qos = req->dev->power.qos;
155 	int ret;
156 
157 	switch(req->type) {
158 	case DEV_PM_QOS_RESUME_LATENCY:
159 		if (WARN_ON(action != PM_QOS_REMOVE_REQ && value < 0))
160 			value = 0;
161 
162 		ret = pm_qos_update_target(&qos->resume_latency,
163 					   &req->data.pnode, action, value);
164 		break;
165 	case DEV_PM_QOS_LATENCY_TOLERANCE:
166 		ret = pm_qos_update_target(&qos->latency_tolerance,
167 					   &req->data.pnode, action, value);
168 		if (ret) {
169 			value = pm_qos_read_value(&qos->latency_tolerance);
170 			req->dev->power.set_latency_tolerance(req->dev, value);
171 		}
172 		break;
173 	case DEV_PM_QOS_MIN_FREQUENCY:
174 	case DEV_PM_QOS_MAX_FREQUENCY:
175 		ret = freq_qos_apply(&req->data.freq, action, value);
176 		break;
177 	case DEV_PM_QOS_FLAGS:
178 		ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
179 					  action, value);
180 		break;
181 	default:
182 		ret = -EINVAL;
183 	}
184 
185 	return ret;
186 }
187 
188 /*
189  * dev_pm_qos_constraints_allocate
190  * @dev: device to allocate data for
191  *
192  * Called at the first call to add_request, for constraint data allocation
193  * Must be called with the dev_pm_qos_mtx mutex held
194  */
dev_pm_qos_constraints_allocate(struct device * dev)195 static int dev_pm_qos_constraints_allocate(struct device *dev)
196 {
197 	struct dev_pm_qos *qos;
198 	struct pm_qos_constraints *c;
199 	struct blocking_notifier_head *n;
200 
201 	qos = kzalloc(sizeof(*qos), GFP_KERNEL);
202 	if (!qos)
203 		return -ENOMEM;
204 
205 	n = kzalloc(3 * sizeof(*n), GFP_KERNEL);
206 	if (!n) {
207 		kfree(qos);
208 		return -ENOMEM;
209 	}
210 
211 	c = &qos->resume_latency;
212 	plist_head_init(&c->list);
213 	c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
214 	c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
215 	c->no_constraint_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
216 	c->type = PM_QOS_MIN;
217 	c->notifiers = n;
218 	BLOCKING_INIT_NOTIFIER_HEAD(n);
219 
220 	c = &qos->latency_tolerance;
221 	plist_head_init(&c->list);
222 	c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
223 	c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
224 	c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
225 	c->type = PM_QOS_MIN;
226 
227 	freq_constraints_init(&qos->freq);
228 
229 	INIT_LIST_HEAD(&qos->flags.list);
230 
231 	spin_lock_irq(&dev->power.lock);
232 	dev->power.qos = qos;
233 	spin_unlock_irq(&dev->power.lock);
234 
235 	return 0;
236 }
237 
238 static void __dev_pm_qos_hide_latency_limit(struct device *dev);
239 static void __dev_pm_qos_hide_flags(struct device *dev);
240 
241 /**
242  * dev_pm_qos_constraints_destroy
243  * @dev: target device
244  *
245  * Called from the device PM subsystem on device removal under device_pm_lock().
246  */
dev_pm_qos_constraints_destroy(struct device * dev)247 void dev_pm_qos_constraints_destroy(struct device *dev)
248 {
249 	struct dev_pm_qos *qos;
250 	struct dev_pm_qos_request *req, *tmp;
251 	struct pm_qos_constraints *c;
252 	struct pm_qos_flags *f;
253 
254 	mutex_lock(&dev_pm_qos_sysfs_mtx);
255 
256 	/*
257 	 * If the device's PM QoS resume latency limit or PM QoS flags have been
258 	 * exposed to user space, they have to be hidden at this point.
259 	 */
260 	pm_qos_sysfs_remove_resume_latency(dev);
261 	pm_qos_sysfs_remove_flags(dev);
262 
263 	mutex_lock(&dev_pm_qos_mtx);
264 
265 	__dev_pm_qos_hide_latency_limit(dev);
266 	__dev_pm_qos_hide_flags(dev);
267 
268 	qos = dev->power.qos;
269 	if (!qos)
270 		goto out;
271 
272 	/* Flush the constraints lists for the device. */
273 	c = &qos->resume_latency;
274 	plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
275 		/*
276 		 * Update constraints list and call the notification
277 		 * callbacks if needed
278 		 */
279 		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
280 		memset(req, 0, sizeof(*req));
281 	}
282 
283 	c = &qos->latency_tolerance;
284 	plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
285 		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
286 		memset(req, 0, sizeof(*req));
287 	}
288 
289 	c = &qos->freq.min_freq;
290 	plist_for_each_entry_safe(req, tmp, &c->list, data.freq.pnode) {
291 		apply_constraint(req, PM_QOS_REMOVE_REQ,
292 				 PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE);
293 		memset(req, 0, sizeof(*req));
294 	}
295 
296 	c = &qos->freq.max_freq;
297 	plist_for_each_entry_safe(req, tmp, &c->list, data.freq.pnode) {
298 		apply_constraint(req, PM_QOS_REMOVE_REQ,
299 				 PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
300 		memset(req, 0, sizeof(*req));
301 	}
302 
303 	f = &qos->flags;
304 	list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
305 		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
306 		memset(req, 0, sizeof(*req));
307 	}
308 
309 	spin_lock_irq(&dev->power.lock);
310 	dev->power.qos = ERR_PTR(-ENODEV);
311 	spin_unlock_irq(&dev->power.lock);
312 
313 	kfree(qos->resume_latency.notifiers);
314 	kfree(qos);
315 
316  out:
317 	mutex_unlock(&dev_pm_qos_mtx);
318 
319 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
320 }
321 
dev_pm_qos_invalid_req_type(struct device * dev,enum dev_pm_qos_req_type type)322 static bool dev_pm_qos_invalid_req_type(struct device *dev,
323 					enum dev_pm_qos_req_type type)
324 {
325 	return type == DEV_PM_QOS_LATENCY_TOLERANCE &&
326 	       !dev->power.set_latency_tolerance;
327 }
328 
__dev_pm_qos_add_request(struct device * dev,struct dev_pm_qos_request * req,enum dev_pm_qos_req_type type,s32 value)329 static int __dev_pm_qos_add_request(struct device *dev,
330 				    struct dev_pm_qos_request *req,
331 				    enum dev_pm_qos_req_type type, s32 value)
332 {
333 	int ret = 0;
334 
335 	if (!dev || !req || dev_pm_qos_invalid_req_type(dev, type))
336 		return -EINVAL;
337 
338 	if (WARN(dev_pm_qos_request_active(req),
339 		 "%s() called for already added request\n", __func__))
340 		return -EINVAL;
341 
342 	if (IS_ERR(dev->power.qos))
343 		ret = -ENODEV;
344 	else if (!dev->power.qos)
345 		ret = dev_pm_qos_constraints_allocate(dev);
346 
347 	trace_dev_pm_qos_add_request(dev_name(dev), type, value);
348 	if (ret)
349 		return ret;
350 
351 	req->dev = dev;
352 	req->type = type;
353 	if (req->type == DEV_PM_QOS_MIN_FREQUENCY)
354 		ret = freq_qos_add_request(&dev->power.qos->freq,
355 					   &req->data.freq,
356 					   FREQ_QOS_MIN, value);
357 	else if (req->type == DEV_PM_QOS_MAX_FREQUENCY)
358 		ret = freq_qos_add_request(&dev->power.qos->freq,
359 					   &req->data.freq,
360 					   FREQ_QOS_MAX, value);
361 	else
362 		ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
363 
364 	return ret;
365 }
366 
367 /**
368  * dev_pm_qos_add_request - inserts new qos request into the list
369  * @dev: target device for the constraint
370  * @req: pointer to a preallocated handle
371  * @type: type of the request
372  * @value: defines the qos request
373  *
374  * This function inserts a new entry in the device constraints list of
375  * requested qos performance characteristics. It recomputes the aggregate
376  * QoS expectations of parameters and initializes the dev_pm_qos_request
377  * handle.  Caller needs to save this handle for later use in updates and
378  * removal.
379  *
380  * Returns 1 if the aggregated constraint value has changed,
381  * 0 if the aggregated constraint value has not changed,
382  * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory
383  * to allocate for data structures, -ENODEV if the device has just been removed
384  * from the system.
385  *
386  * Callers should ensure that the target device is not RPM_SUSPENDED before
387  * using this function for requests of type DEV_PM_QOS_FLAGS.
388  */
dev_pm_qos_add_request(struct device * dev,struct dev_pm_qos_request * req,enum dev_pm_qos_req_type type,s32 value)389 int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
390 			   enum dev_pm_qos_req_type type, s32 value)
391 {
392 	int ret;
393 
394 	mutex_lock(&dev_pm_qos_mtx);
395 	ret = __dev_pm_qos_add_request(dev, req, type, value);
396 	mutex_unlock(&dev_pm_qos_mtx);
397 	return ret;
398 }
399 EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
400 
401 /**
402  * __dev_pm_qos_update_request - Modify an existing device PM QoS request.
403  * @req : PM QoS request to modify.
404  * @new_value: New value to request.
405  */
__dev_pm_qos_update_request(struct dev_pm_qos_request * req,s32 new_value)406 static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
407 				       s32 new_value)
408 {
409 	s32 curr_value;
410 	int ret = 0;
411 
412 	if (!req) /*guard against callers passing in null */
413 		return -EINVAL;
414 
415 	if (WARN(!dev_pm_qos_request_active(req),
416 		 "%s() called for unknown object\n", __func__))
417 		return -EINVAL;
418 
419 	if (IS_ERR_OR_NULL(req->dev->power.qos))
420 		return -ENODEV;
421 
422 	switch(req->type) {
423 	case DEV_PM_QOS_RESUME_LATENCY:
424 	case DEV_PM_QOS_LATENCY_TOLERANCE:
425 		curr_value = req->data.pnode.prio;
426 		break;
427 	case DEV_PM_QOS_MIN_FREQUENCY:
428 	case DEV_PM_QOS_MAX_FREQUENCY:
429 		curr_value = req->data.freq.pnode.prio;
430 		break;
431 	case DEV_PM_QOS_FLAGS:
432 		curr_value = req->data.flr.flags;
433 		break;
434 	default:
435 		return -EINVAL;
436 	}
437 
438 	trace_dev_pm_qos_update_request(dev_name(req->dev), req->type,
439 					new_value);
440 	if (curr_value != new_value)
441 		ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value);
442 
443 	return ret;
444 }
445 
446 /**
447  * dev_pm_qos_update_request - modifies an existing qos request
448  * @req : handle to list element holding a dev_pm_qos request to use
449  * @new_value: defines the qos request
450  *
451  * Updates an existing dev PM qos request along with updating the
452  * target value.
453  *
454  * Attempts are made to make this code callable on hot code paths.
455  *
456  * Returns 1 if the aggregated constraint value has changed,
457  * 0 if the aggregated constraint value has not changed,
458  * -EINVAL in case of wrong parameters, -ENODEV if the device has been
459  * removed from the system
460  *
461  * Callers should ensure that the target device is not RPM_SUSPENDED before
462  * using this function for requests of type DEV_PM_QOS_FLAGS.
463  */
dev_pm_qos_update_request(struct dev_pm_qos_request * req,s32 new_value)464 int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
465 {
466 	int ret;
467 
468 	mutex_lock(&dev_pm_qos_mtx);
469 	ret = __dev_pm_qos_update_request(req, new_value);
470 	mutex_unlock(&dev_pm_qos_mtx);
471 	return ret;
472 }
473 EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
474 
__dev_pm_qos_remove_request(struct dev_pm_qos_request * req)475 static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
476 {
477 	int ret;
478 
479 	if (!req) /*guard against callers passing in null */
480 		return -EINVAL;
481 
482 	if (WARN(!dev_pm_qos_request_active(req),
483 		 "%s() called for unknown object\n", __func__))
484 		return -EINVAL;
485 
486 	if (IS_ERR_OR_NULL(req->dev->power.qos))
487 		return -ENODEV;
488 
489 	trace_dev_pm_qos_remove_request(dev_name(req->dev), req->type,
490 					PM_QOS_DEFAULT_VALUE);
491 	ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
492 	memset(req, 0, sizeof(*req));
493 	return ret;
494 }
495 
496 /**
497  * dev_pm_qos_remove_request - modifies an existing qos request
498  * @req: handle to request list element
499  *
500  * Will remove pm qos request from the list of constraints and
501  * recompute the current target value. Call this on slow code paths.
502  *
503  * Returns 1 if the aggregated constraint value has changed,
504  * 0 if the aggregated constraint value has not changed,
505  * -EINVAL in case of wrong parameters, -ENODEV if the device has been
506  * removed from the system
507  *
508  * Callers should ensure that the target device is not RPM_SUSPENDED before
509  * using this function for requests of type DEV_PM_QOS_FLAGS.
510  */
dev_pm_qos_remove_request(struct dev_pm_qos_request * req)511 int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
512 {
513 	int ret;
514 
515 	mutex_lock(&dev_pm_qos_mtx);
516 	ret = __dev_pm_qos_remove_request(req);
517 	mutex_unlock(&dev_pm_qos_mtx);
518 	return ret;
519 }
520 EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
521 
522 /**
523  * dev_pm_qos_add_notifier - sets notification entry for changes to target value
524  * of per-device PM QoS constraints
525  *
526  * @dev: target device for the constraint
527  * @notifier: notifier block managed by caller.
528  * @type: request type.
529  *
530  * Will register the notifier into a notification chain that gets called
531  * upon changes to the target value for the device.
532  *
533  * If the device's constraints object doesn't exist when this routine is called,
534  * it will be created (or error code will be returned if that fails).
535  */
dev_pm_qos_add_notifier(struct device * dev,struct notifier_block * notifier,enum dev_pm_qos_req_type type)536 int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier,
537 			    enum dev_pm_qos_req_type type)
538 {
539 	int ret = 0;
540 
541 	mutex_lock(&dev_pm_qos_mtx);
542 
543 	if (IS_ERR(dev->power.qos))
544 		ret = -ENODEV;
545 	else if (!dev->power.qos)
546 		ret = dev_pm_qos_constraints_allocate(dev);
547 
548 	if (ret)
549 		goto unlock;
550 
551 	switch (type) {
552 	case DEV_PM_QOS_RESUME_LATENCY:
553 		ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
554 						       notifier);
555 		break;
556 	case DEV_PM_QOS_MIN_FREQUENCY:
557 		ret = freq_qos_add_notifier(&dev->power.qos->freq,
558 					    FREQ_QOS_MIN, notifier);
559 		break;
560 	case DEV_PM_QOS_MAX_FREQUENCY:
561 		ret = freq_qos_add_notifier(&dev->power.qos->freq,
562 					    FREQ_QOS_MAX, notifier);
563 		break;
564 	default:
565 		WARN_ON(1);
566 		ret = -EINVAL;
567 	}
568 
569 unlock:
570 	mutex_unlock(&dev_pm_qos_mtx);
571 	return ret;
572 }
573 EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
574 
575 /**
576  * dev_pm_qos_remove_notifier - deletes notification for changes to target value
577  * of per-device PM QoS constraints
578  *
579  * @dev: target device for the constraint
580  * @notifier: notifier block to be removed.
581  * @type: request type.
582  *
583  * Will remove the notifier from the notification chain that gets called
584  * upon changes to the target value.
585  */
dev_pm_qos_remove_notifier(struct device * dev,struct notifier_block * notifier,enum dev_pm_qos_req_type type)586 int dev_pm_qos_remove_notifier(struct device *dev,
587 			       struct notifier_block *notifier,
588 			       enum dev_pm_qos_req_type type)
589 {
590 	int ret = 0;
591 
592 	mutex_lock(&dev_pm_qos_mtx);
593 
594 	/* Silently return if the constraints object is not present. */
595 	if (IS_ERR_OR_NULL(dev->power.qos))
596 		goto unlock;
597 
598 	switch (type) {
599 	case DEV_PM_QOS_RESUME_LATENCY:
600 		ret = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
601 							 notifier);
602 		break;
603 	case DEV_PM_QOS_MIN_FREQUENCY:
604 		ret = freq_qos_remove_notifier(&dev->power.qos->freq,
605 					       FREQ_QOS_MIN, notifier);
606 		break;
607 	case DEV_PM_QOS_MAX_FREQUENCY:
608 		ret = freq_qos_remove_notifier(&dev->power.qos->freq,
609 					       FREQ_QOS_MAX, notifier);
610 		break;
611 	default:
612 		WARN_ON(1);
613 		ret = -EINVAL;
614 	}
615 
616 unlock:
617 	mutex_unlock(&dev_pm_qos_mtx);
618 	return ret;
619 }
620 EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
621 
622 /**
623  * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
624  * @dev: Device whose ancestor to add the request for.
625  * @req: Pointer to the preallocated handle.
626  * @type: Type of the request.
627  * @value: Constraint latency value.
628  */
dev_pm_qos_add_ancestor_request(struct device * dev,struct dev_pm_qos_request * req,enum dev_pm_qos_req_type type,s32 value)629 int dev_pm_qos_add_ancestor_request(struct device *dev,
630 				    struct dev_pm_qos_request *req,
631 				    enum dev_pm_qos_req_type type, s32 value)
632 {
633 	struct device *ancestor = dev->parent;
634 	int ret = -ENODEV;
635 
636 	switch (type) {
637 	case DEV_PM_QOS_RESUME_LATENCY:
638 		while (ancestor && !ancestor->power.ignore_children)
639 			ancestor = ancestor->parent;
640 
641 		break;
642 	case DEV_PM_QOS_LATENCY_TOLERANCE:
643 		while (ancestor && !ancestor->power.set_latency_tolerance)
644 			ancestor = ancestor->parent;
645 
646 		break;
647 	default:
648 		ancestor = NULL;
649 	}
650 	if (ancestor)
651 		ret = dev_pm_qos_add_request(ancestor, req, type, value);
652 
653 	if (ret < 0)
654 		req->dev = NULL;
655 
656 	return ret;
657 }
658 EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
659 
__dev_pm_qos_drop_user_request(struct device * dev,enum dev_pm_qos_req_type type)660 static void __dev_pm_qos_drop_user_request(struct device *dev,
661 					   enum dev_pm_qos_req_type type)
662 {
663 	struct dev_pm_qos_request *req = NULL;
664 
665 	switch(type) {
666 	case DEV_PM_QOS_RESUME_LATENCY:
667 		req = dev->power.qos->resume_latency_req;
668 		dev->power.qos->resume_latency_req = NULL;
669 		break;
670 	case DEV_PM_QOS_LATENCY_TOLERANCE:
671 		req = dev->power.qos->latency_tolerance_req;
672 		dev->power.qos->latency_tolerance_req = NULL;
673 		break;
674 	case DEV_PM_QOS_FLAGS:
675 		req = dev->power.qos->flags_req;
676 		dev->power.qos->flags_req = NULL;
677 		break;
678 	default:
679 		WARN_ON(1);
680 		return;
681 	}
682 	__dev_pm_qos_remove_request(req);
683 	kfree(req);
684 }
685 
dev_pm_qos_drop_user_request(struct device * dev,enum dev_pm_qos_req_type type)686 static void dev_pm_qos_drop_user_request(struct device *dev,
687 					 enum dev_pm_qos_req_type type)
688 {
689 	mutex_lock(&dev_pm_qos_mtx);
690 	__dev_pm_qos_drop_user_request(dev, type);
691 	mutex_unlock(&dev_pm_qos_mtx);
692 }
693 
694 /**
695  * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space.
696  * @dev: Device whose PM QoS latency limit is to be exposed to user space.
697  * @value: Initial value of the latency limit.
698  */
dev_pm_qos_expose_latency_limit(struct device * dev,s32 value)699 int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
700 {
701 	struct dev_pm_qos_request *req;
702 	int ret;
703 
704 	if (!device_is_registered(dev) || value < 0)
705 		return -EINVAL;
706 
707 	req = kzalloc(sizeof(*req), GFP_KERNEL);
708 	if (!req)
709 		return -ENOMEM;
710 
711 	ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value);
712 	if (ret < 0) {
713 		kfree(req);
714 		return ret;
715 	}
716 
717 	mutex_lock(&dev_pm_qos_sysfs_mtx);
718 
719 	mutex_lock(&dev_pm_qos_mtx);
720 
721 	if (IS_ERR_OR_NULL(dev->power.qos))
722 		ret = -ENODEV;
723 	else if (dev->power.qos->resume_latency_req)
724 		ret = -EEXIST;
725 
726 	if (ret < 0) {
727 		__dev_pm_qos_remove_request(req);
728 		kfree(req);
729 		mutex_unlock(&dev_pm_qos_mtx);
730 		goto out;
731 	}
732 	dev->power.qos->resume_latency_req = req;
733 
734 	mutex_unlock(&dev_pm_qos_mtx);
735 
736 	ret = pm_qos_sysfs_add_resume_latency(dev);
737 	if (ret)
738 		dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
739 
740  out:
741 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
742 	return ret;
743 }
744 EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
745 
__dev_pm_qos_hide_latency_limit(struct device * dev)746 static void __dev_pm_qos_hide_latency_limit(struct device *dev)
747 {
748 	if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req)
749 		__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
750 }
751 
752 /**
753  * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
754  * @dev: Device whose PM QoS latency limit is to be hidden from user space.
755  */
dev_pm_qos_hide_latency_limit(struct device * dev)756 void dev_pm_qos_hide_latency_limit(struct device *dev)
757 {
758 	mutex_lock(&dev_pm_qos_sysfs_mtx);
759 
760 	pm_qos_sysfs_remove_resume_latency(dev);
761 
762 	mutex_lock(&dev_pm_qos_mtx);
763 	__dev_pm_qos_hide_latency_limit(dev);
764 	mutex_unlock(&dev_pm_qos_mtx);
765 
766 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
767 }
768 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
769 
770 /**
771  * dev_pm_qos_expose_flags - Expose PM QoS flags of a device to user space.
772  * @dev: Device whose PM QoS flags are to be exposed to user space.
773  * @val: Initial values of the flags.
774  */
dev_pm_qos_expose_flags(struct device * dev,s32 val)775 int dev_pm_qos_expose_flags(struct device *dev, s32 val)
776 {
777 	struct dev_pm_qos_request *req;
778 	int ret;
779 
780 	if (!device_is_registered(dev))
781 		return -EINVAL;
782 
783 	req = kzalloc(sizeof(*req), GFP_KERNEL);
784 	if (!req)
785 		return -ENOMEM;
786 
787 	ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
788 	if (ret < 0) {
789 		kfree(req);
790 		return ret;
791 	}
792 
793 	pm_runtime_get_sync(dev);
794 	mutex_lock(&dev_pm_qos_sysfs_mtx);
795 
796 	mutex_lock(&dev_pm_qos_mtx);
797 
798 	if (IS_ERR_OR_NULL(dev->power.qos))
799 		ret = -ENODEV;
800 	else if (dev->power.qos->flags_req)
801 		ret = -EEXIST;
802 
803 	if (ret < 0) {
804 		__dev_pm_qos_remove_request(req);
805 		kfree(req);
806 		mutex_unlock(&dev_pm_qos_mtx);
807 		goto out;
808 	}
809 	dev->power.qos->flags_req = req;
810 
811 	mutex_unlock(&dev_pm_qos_mtx);
812 
813 	ret = pm_qos_sysfs_add_flags(dev);
814 	if (ret)
815 		dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
816 
817  out:
818 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
819 	pm_runtime_put(dev);
820 	return ret;
821 }
822 EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
823 
__dev_pm_qos_hide_flags(struct device * dev)824 static void __dev_pm_qos_hide_flags(struct device *dev)
825 {
826 	if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req)
827 		__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
828 }
829 
830 /**
831  * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
832  * @dev: Device whose PM QoS flags are to be hidden from user space.
833  */
dev_pm_qos_hide_flags(struct device * dev)834 void dev_pm_qos_hide_flags(struct device *dev)
835 {
836 	pm_runtime_get_sync(dev);
837 	mutex_lock(&dev_pm_qos_sysfs_mtx);
838 
839 	pm_qos_sysfs_remove_flags(dev);
840 
841 	mutex_lock(&dev_pm_qos_mtx);
842 	__dev_pm_qos_hide_flags(dev);
843 	mutex_unlock(&dev_pm_qos_mtx);
844 
845 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
846 	pm_runtime_put(dev);
847 }
848 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
849 
850 /**
851  * dev_pm_qos_update_flags - Update PM QoS flags request owned by user space.
852  * @dev: Device to update the PM QoS flags request for.
853  * @mask: Flags to set/clear.
854  * @set: Whether to set or clear the flags (true means set).
855  */
dev_pm_qos_update_flags(struct device * dev,s32 mask,bool set)856 int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
857 {
858 	s32 value;
859 	int ret;
860 
861 	pm_runtime_get_sync(dev);
862 	mutex_lock(&dev_pm_qos_mtx);
863 
864 	if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
865 		ret = -EINVAL;
866 		goto out;
867 	}
868 
869 	value = dev_pm_qos_requested_flags(dev);
870 	if (set)
871 		value |= mask;
872 	else
873 		value &= ~mask;
874 
875 	ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
876 
877  out:
878 	mutex_unlock(&dev_pm_qos_mtx);
879 	pm_runtime_put(dev);
880 	return ret;
881 }
882 
883 /**
884  * dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance.
885  * @dev: Device to obtain the user space latency tolerance for.
886  */
dev_pm_qos_get_user_latency_tolerance(struct device * dev)887 s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
888 {
889 	s32 ret;
890 
891 	mutex_lock(&dev_pm_qos_mtx);
892 	ret = IS_ERR_OR_NULL(dev->power.qos)
893 		|| !dev->power.qos->latency_tolerance_req ?
894 			PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT :
895 			dev->power.qos->latency_tolerance_req->data.pnode.prio;
896 	mutex_unlock(&dev_pm_qos_mtx);
897 	return ret;
898 }
899 
900 /**
901  * dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance.
902  * @dev: Device to update the user space latency tolerance for.
903  * @val: New user space latency tolerance for @dev (negative values disable).
904  */
dev_pm_qos_update_user_latency_tolerance(struct device * dev,s32 val)905 int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
906 {
907 	int ret;
908 
909 	mutex_lock(&dev_pm_qos_mtx);
910 
911 	if (IS_ERR_OR_NULL(dev->power.qos)
912 	    || !dev->power.qos->latency_tolerance_req) {
913 		struct dev_pm_qos_request *req;
914 
915 		if (val < 0) {
916 			if (val == PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT)
917 				ret = 0;
918 			else
919 				ret = -EINVAL;
920 			goto out;
921 		}
922 		req = kzalloc(sizeof(*req), GFP_KERNEL);
923 		if (!req) {
924 			ret = -ENOMEM;
925 			goto out;
926 		}
927 		ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val);
928 		if (ret < 0) {
929 			kfree(req);
930 			goto out;
931 		}
932 		dev->power.qos->latency_tolerance_req = req;
933 	} else {
934 		if (val < 0) {
935 			__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE);
936 			ret = 0;
937 		} else {
938 			ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val);
939 		}
940 	}
941 
942  out:
943 	mutex_unlock(&dev_pm_qos_mtx);
944 	return ret;
945 }
946 EXPORT_SYMBOL_GPL(dev_pm_qos_update_user_latency_tolerance);
947 
948 /**
949  * dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace
950  * @dev: Device whose latency tolerance to expose
951  */
dev_pm_qos_expose_latency_tolerance(struct device * dev)952 int dev_pm_qos_expose_latency_tolerance(struct device *dev)
953 {
954 	int ret;
955 
956 	if (!dev->power.set_latency_tolerance)
957 		return -EINVAL;
958 
959 	mutex_lock(&dev_pm_qos_sysfs_mtx);
960 	ret = pm_qos_sysfs_add_latency_tolerance(dev);
961 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
962 
963 	return ret;
964 }
965 EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_tolerance);
966 
967 /**
968  * dev_pm_qos_hide_latency_tolerance - Hide latency tolerance from userspace
969  * @dev: Device whose latency tolerance to hide
970  */
dev_pm_qos_hide_latency_tolerance(struct device * dev)971 void dev_pm_qos_hide_latency_tolerance(struct device *dev)
972 {
973 	mutex_lock(&dev_pm_qos_sysfs_mtx);
974 	pm_qos_sysfs_remove_latency_tolerance(dev);
975 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
976 
977 	/* Remove the request from user space now */
978 	pm_runtime_get_sync(dev);
979 	dev_pm_qos_update_user_latency_tolerance(dev,
980 		PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT);
981 	pm_runtime_put(dev);
982 }
983 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_tolerance);
984