1 /*
2  * Copyright (C) 2015-2017 Alibaba Group Holding Limited
3  */
4 
5 #include <common/log.h>
6 #include "bt_errno.h"
7 
8 struct k_work_q g_work_queue;
9 
10 extern void event_callback(uint8_t event_type);
k_work_submit_to_queue(struct k_work_q * work_q,struct k_work * work)11 static void k_work_submit_to_queue(struct k_work_q *work_q, struct k_work *work)
12 {
13     sys_snode_t *node = NULL;
14     struct k_work *delayed_work = NULL;
15     struct k_work *prev_delayed_work = NULL;
16     //uint32_t now = k_uptime_get_32();
17 
18     if (!atomic_test_and_set_bit(work->flags, K_WORK_STATE_PENDING)) {
19         SYS_SLIST_FOR_EACH_NODE(&g_work_queue.queue.queue_list, node) {
20             delayed_work = (struct k_work *)node;
21             if ((work->timeout + work->start_ms) < (delayed_work->start_ms + delayed_work->timeout)) {
22                 break;
23             }
24             prev_delayed_work = delayed_work;
25         }
26 
27         sys_slist_insert(&g_work_queue.queue.queue_list,
28                          (sys_snode_t *)prev_delayed_work, (sys_snode_t *)work);
29         delayed_work = k_queue_first_entry(&g_work_queue.queue);
30 
31         if (delayed_work &&
32             (k_uptime_get_32() <= delayed_work->start_ms + delayed_work->timeout)) {
33             event_callback(K_POLL_TYPE_EARLIER_WORK);
34         }
35     }
36 }
37 
k_work_rm_from_queue(struct k_work_q * work_q,struct k_work * work)38 static void k_work_rm_from_queue(struct k_work_q *work_q, struct k_work *work)
39 {
40     k_queue_remove(&work_q->queue, work);
41 }
42 
k_work_q_start(void)43 int k_work_q_start(void)
44 {
45     k_queue_init(&g_work_queue.queue);
46     return 0;
47 }
48 
k_work_init(struct k_work * work,k_work_handler_t handler)49 int k_work_init(struct k_work *work, k_work_handler_t handler)
50 {
51     atomic_clear_bit(work->flags, K_WORK_STATE_PENDING);
52     work->handler = handler;
53     work->start_ms = 0;
54     work->timeout = 0;
55     return 0;
56 }
57 
k_work_submit(struct k_work * work)58 void k_work_submit(struct k_work *work)
59 {
60     k_delayed_work_submit((struct k_delayed_work *)work, 0);
61 }
62 
k_delayed_work_init(struct k_delayed_work * work,k_work_handler_t handler)63 void k_delayed_work_init(struct k_delayed_work *work, k_work_handler_t handler)
64 {
65     k_work_init(&work->work, handler);
66 }
67 
k_delayed_work_submit(struct k_delayed_work * work,uint32_t delay)68 int k_delayed_work_submit(struct k_delayed_work *work, uint32_t delay)
69 {
70     int err = 0;
71     int key = irq_lock();
72 
73     if (atomic_test_bit(work->work.flags, K_WORK_STATE_PENDING)) {
74         k_delayed_work_cancel(work);
75     }
76 
77     work->work.start_ms = k_uptime_get_32();
78     work->work.timeout = delay;
79     k_work_submit_to_queue(&g_work_queue, (struct k_work *)work);
80 
81 //done:
82     irq_unlock(key);
83     return err;
84 }
85 
k_delayed_work_cancel(struct k_delayed_work * work)86 int k_delayed_work_cancel(struct k_delayed_work *work)
87 {
88     int key = irq_lock();
89 	work->work.timeout = 0;
90     atomic_clear_bit(work->work.flags, K_WORK_STATE_PENDING);
91     k_work_rm_from_queue(&g_work_queue, (struct k_work *)work);
92     irq_unlock(key);
93     return 0;
94 }
95 
k_delayed_work_remaining_get(struct k_delayed_work * work)96 bt_s32_t k_delayed_work_remaining_get(struct k_delayed_work *work)
97 {
98     int32_t remain;
99 
100     if (work == NULL) {
101         return 0;
102     }
103 
104     remain = work->work.timeout - (k_uptime_get_32() - work->work.start_ms);
105     if (remain < 0) {
106         remain = 0;
107     }
108     return remain;
109 }
110