1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * bios-less APM driver for ARM Linux
4  *  Jamey Hicks <jamey@crl.dec.com>
5  *  adapted from the APM BIOS driver for Linux by Stephen Rothwell (sfr@linuxcare.com)
6  *
7  * APM 1.2 Reference:
8  *   Intel Corporation, Microsoft Corporation. Advanced Power Management
9  *   (APM) BIOS Interface Specification, Revision 1.2, February 1996.
10  *
11  * This document is available from Microsoft at:
12  *    http://www.microsoft.com/whdc/archive/amp_12.mspx
13  */
14 #include <linux/module.h>
15 #include <linux/poll.h>
16 #include <linux/slab.h>
17 #include <linux/mutex.h>
18 #include <linux/proc_fs.h>
19 #include <linux/seq_file.h>
20 #include <linux/miscdevice.h>
21 #include <linux/apm_bios.h>
22 #include <linux/capability.h>
23 #include <linux/sched.h>
24 #include <linux/suspend.h>
25 #include <linux/apm-emulation.h>
26 #include <linux/freezer.h>
27 #include <linux/device.h>
28 #include <linux/kernel.h>
29 #include <linux/list.h>
30 #include <linux/init.h>
31 #include <linux/completion.h>
32 #include <linux/kthread.h>
33 #include <linux/delay.h>
34 
35 /*
36  * One option can be changed at boot time as follows:
37  *	apm=on/off			enable/disable APM
38  */
39 
40 /*
41  * Maximum number of events stored
42  */
43 #define APM_MAX_EVENTS		16
44 
45 struct apm_queue {
46 	unsigned int		event_head;
47 	unsigned int		event_tail;
48 	apm_event_t		events[APM_MAX_EVENTS];
49 };
50 
51 /*
52  * thread states (for threads using a writable /dev/apm_bios fd):
53  *
54  * SUSPEND_NONE:	nothing happening
55  * SUSPEND_PENDING:	suspend event queued for thread and pending to be read
56  * SUSPEND_READ:	suspend event read, pending acknowledgement
57  * SUSPEND_ACKED:	acknowledgement received from thread (via ioctl),
58  *			waiting for resume
59  * SUSPEND_ACKTO:	acknowledgement timeout
60  * SUSPEND_DONE:	thread had acked suspend and is now notified of
61  *			resume
62  *
63  * SUSPEND_WAIT:	this thread invoked suspend and is waiting for resume
64  *
65  * A thread migrates in one of three paths:
66  *	NONE -1-> PENDING -2-> READ -3-> ACKED -4-> DONE -5-> NONE
67  *				    -6-> ACKTO -7-> NONE
68  *	NONE -8-> WAIT -9-> NONE
69  *
70  * While in PENDING or READ, the thread is accounted for in the
71  * suspend_acks_pending counter.
72  *
73  * The transitions are invoked as follows:
74  *	1: suspend event is signalled from the core PM code
75  *	2: the suspend event is read from the fd by the userspace thread
76  *	3: userspace thread issues the APM_IOC_SUSPEND ioctl (as ack)
77  *	4: core PM code signals that we have resumed
78  *	5: APM_IOC_SUSPEND ioctl returns
79  *
80  *	6: the notifier invoked from the core PM code timed out waiting
81  *	   for all relevant threds to enter ACKED state and puts those
82  *	   that haven't into ACKTO
83  *	7: those threads issue APM_IOC_SUSPEND ioctl too late,
84  *	   get an error
85  *
86  *	8: userspace thread issues the APM_IOC_SUSPEND ioctl (to suspend),
87  *	   ioctl code invokes pm_suspend()
88  *	9: pm_suspend() returns indicating resume
89  */
90 enum apm_suspend_state {
91 	SUSPEND_NONE,
92 	SUSPEND_PENDING,
93 	SUSPEND_READ,
94 	SUSPEND_ACKED,
95 	SUSPEND_ACKTO,
96 	SUSPEND_WAIT,
97 	SUSPEND_DONE,
98 };
99 
100 /*
101  * The per-file APM data
102  */
103 struct apm_user {
104 	struct list_head	list;
105 
106 	unsigned int		suser: 1;
107 	unsigned int		writer: 1;
108 	unsigned int		reader: 1;
109 
110 	int			suspend_result;
111 	enum apm_suspend_state	suspend_state;
112 
113 	struct apm_queue	queue;
114 };
115 
116 /*
117  * Local variables
118  */
119 static atomic_t suspend_acks_pending = ATOMIC_INIT(0);
120 static atomic_t userspace_notification_inhibit = ATOMIC_INIT(0);
121 static int apm_disabled;
122 static struct task_struct *kapmd_tsk;
123 
124 static DECLARE_WAIT_QUEUE_HEAD(apm_waitqueue);
125 static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue);
126 
127 /*
128  * This is a list of everyone who has opened /dev/apm_bios
129  */
130 static DECLARE_RWSEM(user_list_lock);
131 static LIST_HEAD(apm_user_list);
132 
133 /*
134  * kapmd info.  kapmd provides us a process context to handle
135  * "APM" events within - specifically necessary if we're going
136  * to be suspending the system.
137  */
138 static DECLARE_WAIT_QUEUE_HEAD(kapmd_wait);
139 static DEFINE_SPINLOCK(kapmd_queue_lock);
140 static struct apm_queue kapmd_queue;
141 
142 static DEFINE_MUTEX(state_lock);
143 
144 static const char driver_version[] = "1.13";	/* no spaces */
145 
146 
147 
148 /*
149  * Compatibility cruft until the IPAQ people move over to the new
150  * interface.
151  */
__apm_get_power_status(struct apm_power_info * info)152 static void __apm_get_power_status(struct apm_power_info *info)
153 {
154 }
155 
156 /*
157  * This allows machines to provide their own "apm get power status" function.
158  */
159 void (*apm_get_power_status)(struct apm_power_info *) = __apm_get_power_status;
160 EXPORT_SYMBOL(apm_get_power_status);
161 
162 
163 /*
164  * APM event queue management.
165  */
queue_empty(struct apm_queue * q)166 static inline int queue_empty(struct apm_queue *q)
167 {
168 	return q->event_head == q->event_tail;
169 }
170 
queue_get_event(struct apm_queue * q)171 static inline apm_event_t queue_get_event(struct apm_queue *q)
172 {
173 	q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS;
174 	return q->events[q->event_tail];
175 }
176 
queue_add_event(struct apm_queue * q,apm_event_t event)177 static void queue_add_event(struct apm_queue *q, apm_event_t event)
178 {
179 	q->event_head = (q->event_head + 1) % APM_MAX_EVENTS;
180 	if (q->event_head == q->event_tail) {
181 		static int notified;
182 
183 		if (notified++ == 0)
184 		    printk(KERN_ERR "apm: an event queue overflowed\n");
185 		q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS;
186 	}
187 	q->events[q->event_head] = event;
188 }
189 
queue_event(apm_event_t event)190 static void queue_event(apm_event_t event)
191 {
192 	struct apm_user *as;
193 
194 	down_read(&user_list_lock);
195 	list_for_each_entry(as, &apm_user_list, list) {
196 		if (as->reader)
197 			queue_add_event(&as->queue, event);
198 	}
199 	up_read(&user_list_lock);
200 	wake_up_interruptible(&apm_waitqueue);
201 }
202 
apm_read(struct file * fp,char __user * buf,size_t count,loff_t * ppos)203 static ssize_t apm_read(struct file *fp, char __user *buf, size_t count, loff_t *ppos)
204 {
205 	struct apm_user *as = fp->private_data;
206 	apm_event_t event;
207 	int i = count, ret = 0;
208 
209 	if (count < sizeof(apm_event_t))
210 		return -EINVAL;
211 
212 	if (queue_empty(&as->queue) && fp->f_flags & O_NONBLOCK)
213 		return -EAGAIN;
214 
215 	wait_event_interruptible(apm_waitqueue, !queue_empty(&as->queue));
216 
217 	while ((i >= sizeof(event)) && !queue_empty(&as->queue)) {
218 		event = queue_get_event(&as->queue);
219 
220 		ret = -EFAULT;
221 		if (copy_to_user(buf, &event, sizeof(event)))
222 			break;
223 
224 		mutex_lock(&state_lock);
225 		if (as->suspend_state == SUSPEND_PENDING &&
226 		    (event == APM_SYS_SUSPEND || event == APM_USER_SUSPEND))
227 			as->suspend_state = SUSPEND_READ;
228 		mutex_unlock(&state_lock);
229 
230 		buf += sizeof(event);
231 		i -= sizeof(event);
232 	}
233 
234 	if (i < count)
235 		ret = count - i;
236 
237 	return ret;
238 }
239 
apm_poll(struct file * fp,poll_table * wait)240 static __poll_t apm_poll(struct file *fp, poll_table * wait)
241 {
242 	struct apm_user *as = fp->private_data;
243 
244 	poll_wait(fp, &apm_waitqueue, wait);
245 	return queue_empty(&as->queue) ? 0 : EPOLLIN | EPOLLRDNORM;
246 }
247 
248 /*
249  * apm_ioctl - handle APM ioctl
250  *
251  * APM_IOC_SUSPEND
252  *   This IOCTL is overloaded, and performs two functions.  It is used to:
253  *     - initiate a suspend
254  *     - acknowledge a suspend read from /dev/apm_bios.
255  *   Only when everyone who has opened /dev/apm_bios with write permission
256  *   has acknowledge does the actual suspend happen.
257  */
258 static long
apm_ioctl(struct file * filp,u_int cmd,u_long arg)259 apm_ioctl(struct file *filp, u_int cmd, u_long arg)
260 {
261 	struct apm_user *as = filp->private_data;
262 	int err = -EINVAL;
263 
264 	if (!as->suser || !as->writer)
265 		return -EPERM;
266 
267 	switch (cmd) {
268 	case APM_IOC_SUSPEND:
269 		mutex_lock(&state_lock);
270 
271 		as->suspend_result = -EINTR;
272 
273 		switch (as->suspend_state) {
274 		case SUSPEND_READ:
275 			/*
276 			 * If we read a suspend command from /dev/apm_bios,
277 			 * then the corresponding APM_IOC_SUSPEND ioctl is
278 			 * interpreted as an acknowledge.
279 			 */
280 			as->suspend_state = SUSPEND_ACKED;
281 			atomic_dec(&suspend_acks_pending);
282 			mutex_unlock(&state_lock);
283 
284 			/*
285 			 * suspend_acks_pending changed, the notifier needs to
286 			 * be woken up for this
287 			 */
288 			wake_up(&apm_suspend_waitqueue);
289 
290 			/*
291 			 * Wait for the suspend/resume to complete.  If there
292 			 * are pending acknowledges, we wait here for them.
293 			 * wait_event_freezable() is interruptible and pending
294 			 * signal can cause busy looping.  We aren't doing
295 			 * anything critical, chill a bit on each iteration.
296 			 */
297 			while (wait_event_freezable(apm_suspend_waitqueue,
298 					as->suspend_state != SUSPEND_ACKED))
299 				msleep(10);
300 			break;
301 		case SUSPEND_ACKTO:
302 			as->suspend_result = -ETIMEDOUT;
303 			mutex_unlock(&state_lock);
304 			break;
305 		default:
306 			as->suspend_state = SUSPEND_WAIT;
307 			mutex_unlock(&state_lock);
308 
309 			/*
310 			 * Otherwise it is a request to suspend the system.
311 			 * Just invoke pm_suspend(), we'll handle it from
312 			 * there via the notifier.
313 			 */
314 			as->suspend_result = pm_suspend(PM_SUSPEND_MEM);
315 		}
316 
317 		mutex_lock(&state_lock);
318 		err = as->suspend_result;
319 		as->suspend_state = SUSPEND_NONE;
320 		mutex_unlock(&state_lock);
321 		break;
322 	}
323 
324 	return err;
325 }
326 
apm_release(struct inode * inode,struct file * filp)327 static int apm_release(struct inode * inode, struct file * filp)
328 {
329 	struct apm_user *as = filp->private_data;
330 
331 	filp->private_data = NULL;
332 
333 	down_write(&user_list_lock);
334 	list_del(&as->list);
335 	up_write(&user_list_lock);
336 
337 	/*
338 	 * We are now unhooked from the chain.  As far as new
339 	 * events are concerned, we no longer exist.
340 	 */
341 	mutex_lock(&state_lock);
342 	if (as->suspend_state == SUSPEND_PENDING ||
343 	    as->suspend_state == SUSPEND_READ)
344 		atomic_dec(&suspend_acks_pending);
345 	mutex_unlock(&state_lock);
346 
347 	wake_up(&apm_suspend_waitqueue);
348 
349 	kfree(as);
350 	return 0;
351 }
352 
apm_open(struct inode * inode,struct file * filp)353 static int apm_open(struct inode * inode, struct file * filp)
354 {
355 	struct apm_user *as;
356 
357 	as = kzalloc(sizeof(*as), GFP_KERNEL);
358 	if (as) {
359 		/*
360 		 * XXX - this is a tiny bit broken, when we consider BSD
361 		 * process accounting. If the device is opened by root, we
362 		 * instantly flag that we used superuser privs. Who knows,
363 		 * we might close the device immediately without doing a
364 		 * privileged operation -- cevans
365 		 */
366 		as->suser = capable(CAP_SYS_ADMIN);
367 		as->writer = (filp->f_mode & FMODE_WRITE) == FMODE_WRITE;
368 		as->reader = (filp->f_mode & FMODE_READ) == FMODE_READ;
369 
370 		down_write(&user_list_lock);
371 		list_add(&as->list, &apm_user_list);
372 		up_write(&user_list_lock);
373 
374 		filp->private_data = as;
375 	}
376 
377 	return as ? 0 : -ENOMEM;
378 }
379 
380 static const struct file_operations apm_bios_fops = {
381 	.owner		= THIS_MODULE,
382 	.read		= apm_read,
383 	.poll		= apm_poll,
384 	.unlocked_ioctl	= apm_ioctl,
385 	.open		= apm_open,
386 	.release	= apm_release,
387 	.llseek		= noop_llseek,
388 };
389 
390 static struct miscdevice apm_device = {
391 	.minor		= APM_MINOR_DEV,
392 	.name		= "apm_bios",
393 	.fops		= &apm_bios_fops
394 };
395 
396 
397 #ifdef CONFIG_PROC_FS
398 /*
399  * Arguments, with symbols from linux/apm_bios.h.
400  *
401  *   0) Linux driver version (this will change if format changes)
402  *   1) APM BIOS Version.  Usually 1.0, 1.1 or 1.2.
403  *   2) APM flags from APM Installation Check (0x00):
404  *	bit 0: APM_16_BIT_SUPPORT
405  *	bit 1: APM_32_BIT_SUPPORT
406  *	bit 2: APM_IDLE_SLOWS_CLOCK
407  *	bit 3: APM_BIOS_DISABLED
408  *	bit 4: APM_BIOS_DISENGAGED
409  *   3) AC line status
410  *	0x00: Off-line
411  *	0x01: On-line
412  *	0x02: On backup power (BIOS >= 1.1 only)
413  *	0xff: Unknown
414  *   4) Battery status
415  *	0x00: High
416  *	0x01: Low
417  *	0x02: Critical
418  *	0x03: Charging
419  *	0x04: Selected battery not present (BIOS >= 1.2 only)
420  *	0xff: Unknown
421  *   5) Battery flag
422  *	bit 0: High
423  *	bit 1: Low
424  *	bit 2: Critical
425  *	bit 3: Charging
426  *	bit 7: No system battery
427  *	0xff: Unknown
428  *   6) Remaining battery life (percentage of charge):
429  *	0-100: valid
430  *	-1: Unknown
431  *   7) Remaining battery life (time units):
432  *	Number of remaining minutes or seconds
433  *	-1: Unknown
434  *   8) min = minutes; sec = seconds
435  */
proc_apm_show(struct seq_file * m,void * v)436 static int proc_apm_show(struct seq_file *m, void *v)
437 {
438 	struct apm_power_info info;
439 	char *units;
440 
441 	info.ac_line_status = 0xff;
442 	info.battery_status = 0xff;
443 	info.battery_flag   = 0xff;
444 	info.battery_life   = -1;
445 	info.time	    = -1;
446 	info.units	    = -1;
447 
448 	if (apm_get_power_status)
449 		apm_get_power_status(&info);
450 
451 	switch (info.units) {
452 	default:	units = "?";	break;
453 	case 0: 	units = "min";	break;
454 	case 1: 	units = "sec";	break;
455 	}
456 
457 	seq_printf(m, "%s 1.2 0x%02x 0x%02x 0x%02x 0x%02x %d%% %d %s\n",
458 		     driver_version, APM_32_BIT_SUPPORT,
459 		     info.ac_line_status, info.battery_status,
460 		     info.battery_flag, info.battery_life,
461 		     info.time, units);
462 
463 	return 0;
464 }
465 #endif
466 
kapmd(void * arg)467 static int kapmd(void *arg)
468 {
469 	do {
470 		apm_event_t event;
471 
472 		wait_event_interruptible(kapmd_wait,
473 				!queue_empty(&kapmd_queue) || kthread_should_stop());
474 
475 		if (kthread_should_stop())
476 			break;
477 
478 		spin_lock_irq(&kapmd_queue_lock);
479 		event = 0;
480 		if (!queue_empty(&kapmd_queue))
481 			event = queue_get_event(&kapmd_queue);
482 		spin_unlock_irq(&kapmd_queue_lock);
483 
484 		switch (event) {
485 		case 0:
486 			break;
487 
488 		case APM_LOW_BATTERY:
489 		case APM_POWER_STATUS_CHANGE:
490 			queue_event(event);
491 			break;
492 
493 		case APM_USER_SUSPEND:
494 		case APM_SYS_SUSPEND:
495 			pm_suspend(PM_SUSPEND_MEM);
496 			break;
497 
498 		case APM_CRITICAL_SUSPEND:
499 			atomic_inc(&userspace_notification_inhibit);
500 			pm_suspend(PM_SUSPEND_MEM);
501 			atomic_dec(&userspace_notification_inhibit);
502 			break;
503 		}
504 	} while (1);
505 
506 	return 0;
507 }
508 
apm_suspend_notifier(struct notifier_block * nb,unsigned long event,void * dummy)509 static int apm_suspend_notifier(struct notifier_block *nb,
510 				unsigned long event,
511 				void *dummy)
512 {
513 	struct apm_user *as;
514 	int err;
515 	unsigned long apm_event;
516 
517 	/* short-cut emergency suspends */
518 	if (atomic_read(&userspace_notification_inhibit))
519 		return NOTIFY_DONE;
520 
521 	switch (event) {
522 	case PM_SUSPEND_PREPARE:
523 	case PM_HIBERNATION_PREPARE:
524 		apm_event = (event == PM_SUSPEND_PREPARE) ?
525 			APM_USER_SUSPEND : APM_USER_HIBERNATION;
526 		/*
527 		 * Queue an event to all "writer" users that we want
528 		 * to suspend and need their ack.
529 		 */
530 		mutex_lock(&state_lock);
531 		down_read(&user_list_lock);
532 
533 		list_for_each_entry(as, &apm_user_list, list) {
534 			if (as->suspend_state != SUSPEND_WAIT && as->reader &&
535 			    as->writer && as->suser) {
536 				as->suspend_state = SUSPEND_PENDING;
537 				atomic_inc(&suspend_acks_pending);
538 				queue_add_event(&as->queue, apm_event);
539 			}
540 		}
541 
542 		up_read(&user_list_lock);
543 		mutex_unlock(&state_lock);
544 		wake_up_interruptible(&apm_waitqueue);
545 
546 		/*
547 		 * Wait for the suspend_acks_pending variable to drop to
548 		 * zero, meaning everybody acked the suspend event (or the
549 		 * process was killed.)
550 		 *
551 		 * If the app won't answer within a short while we assume it
552 		 * locked up and ignore it.
553 		 */
554 		err = wait_event_interruptible_timeout(
555 			apm_suspend_waitqueue,
556 			atomic_read(&suspend_acks_pending) == 0,
557 			5*HZ);
558 
559 		/* timed out */
560 		if (err == 0) {
561 			/*
562 			 * Move anybody who timed out to "ack timeout" state.
563 			 *
564 			 * We could time out and the userspace does the ACK
565 			 * right after we time out but before we enter the
566 			 * locked section here, but that's fine.
567 			 */
568 			mutex_lock(&state_lock);
569 			down_read(&user_list_lock);
570 			list_for_each_entry(as, &apm_user_list, list) {
571 				if (as->suspend_state == SUSPEND_PENDING ||
572 				    as->suspend_state == SUSPEND_READ) {
573 					as->suspend_state = SUSPEND_ACKTO;
574 					atomic_dec(&suspend_acks_pending);
575 				}
576 			}
577 			up_read(&user_list_lock);
578 			mutex_unlock(&state_lock);
579 		}
580 
581 		/* let suspend proceed */
582 		if (err >= 0)
583 			return NOTIFY_OK;
584 
585 		/* interrupted by signal */
586 		return notifier_from_errno(err);
587 
588 	case PM_POST_SUSPEND:
589 	case PM_POST_HIBERNATION:
590 		apm_event = (event == PM_POST_SUSPEND) ?
591 			APM_NORMAL_RESUME : APM_HIBERNATION_RESUME;
592 		/*
593 		 * Anyone on the APM queues will think we're still suspended.
594 		 * Send a message so everyone knows we're now awake again.
595 		 */
596 		queue_event(apm_event);
597 
598 		/*
599 		 * Finally, wake up anyone who is sleeping on the suspend.
600 		 */
601 		mutex_lock(&state_lock);
602 		down_read(&user_list_lock);
603 		list_for_each_entry(as, &apm_user_list, list) {
604 			if (as->suspend_state == SUSPEND_ACKED) {
605 				/*
606 				 * TODO: maybe grab error code, needs core
607 				 * changes to push the error to the notifier
608 				 * chain (could use the second parameter if
609 				 * implemented)
610 				 */
611 				as->suspend_result = 0;
612 				as->suspend_state = SUSPEND_DONE;
613 			}
614 		}
615 		up_read(&user_list_lock);
616 		mutex_unlock(&state_lock);
617 
618 		wake_up(&apm_suspend_waitqueue);
619 		return NOTIFY_OK;
620 
621 	default:
622 		return NOTIFY_DONE;
623 	}
624 }
625 
626 static struct notifier_block apm_notif_block = {
627 	.notifier_call = apm_suspend_notifier,
628 };
629 
apm_init(void)630 static int __init apm_init(void)
631 {
632 	int ret;
633 
634 	if (apm_disabled) {
635 		printk(KERN_NOTICE "apm: disabled on user request.\n");
636 		return -ENODEV;
637 	}
638 
639 	kapmd_tsk = kthread_create(kapmd, NULL, "kapmd");
640 	if (IS_ERR(kapmd_tsk)) {
641 		ret = PTR_ERR(kapmd_tsk);
642 		kapmd_tsk = NULL;
643 		goto out;
644 	}
645 	wake_up_process(kapmd_tsk);
646 
647 #ifdef CONFIG_PROC_FS
648 	proc_create_single("apm", 0, NULL, proc_apm_show);
649 #endif
650 
651 	ret = misc_register(&apm_device);
652 	if (ret)
653 		goto out_stop;
654 
655 	ret = register_pm_notifier(&apm_notif_block);
656 	if (ret)
657 		goto out_unregister;
658 
659 	return 0;
660 
661  out_unregister:
662 	misc_deregister(&apm_device);
663  out_stop:
664 	remove_proc_entry("apm", NULL);
665 	kthread_stop(kapmd_tsk);
666  out:
667 	return ret;
668 }
669 
apm_exit(void)670 static void __exit apm_exit(void)
671 {
672 	unregister_pm_notifier(&apm_notif_block);
673 	misc_deregister(&apm_device);
674 	remove_proc_entry("apm", NULL);
675 
676 	kthread_stop(kapmd_tsk);
677 }
678 
679 module_init(apm_init);
680 module_exit(apm_exit);
681 
682 MODULE_AUTHOR("Stephen Rothwell");
683 MODULE_DESCRIPTION("Advanced Power Management");
684 MODULE_LICENSE("GPL");
685 
686 #ifndef MODULE
apm_setup(char * str)687 static int __init apm_setup(char *str)
688 {
689 	while ((str != NULL) && (*str != '\0')) {
690 		if (strncmp(str, "off", 3) == 0)
691 			apm_disabled = 1;
692 		if (strncmp(str, "on", 2) == 0)
693 			apm_disabled = 0;
694 		str = strchr(str, ',');
695 		if (str != NULL)
696 			str += strspn(str, ", \t");
697 	}
698 	return 1;
699 }
700 
701 __setup("apm=", apm_setup);
702 #endif
703 
704 /**
705  * apm_queue_event - queue an APM event for kapmd
706  * @event: APM event
707  *
708  * Queue an APM event for kapmd to process and ultimately take the
709  * appropriate action.  Only a subset of events are handled:
710  *   %APM_LOW_BATTERY
711  *   %APM_POWER_STATUS_CHANGE
712  *   %APM_USER_SUSPEND
713  *   %APM_SYS_SUSPEND
714  *   %APM_CRITICAL_SUSPEND
715  */
apm_queue_event(apm_event_t event)716 void apm_queue_event(apm_event_t event)
717 {
718 	unsigned long flags;
719 
720 	spin_lock_irqsave(&kapmd_queue_lock, flags);
721 	queue_add_event(&kapmd_queue, event);
722 	spin_unlock_irqrestore(&kapmd_queue_lock, flags);
723 
724 	wake_up_interruptible(&kapmd_wait);
725 }
726 EXPORT_SYMBOL(apm_queue_event);
727