1 // Copyright 2016 The Fuchsia Authors
2 //
3 // Use of this source code is governed by a MIT-style
4 // license that can be found in the LICENSE file or at
5 // https://opensource.org/licenses/MIT
6 
7 #include <object/dispatcher.h>
8 
9 #include <inttypes.h>
10 
11 #include <arch/ops.h>
12 #include <lib/ktrace.h>
13 #include <lib/counters.h>
14 #include <fbl/atomic.h>
15 #include <fbl/mutex.h>
16 
17 #include <object/tls_slots.h>
18 
19 
20 // kernel counters. The following counters never decrease.
21 // counts the number of times a dispatcher has been created and destroyed.
22 KCOUNTER(dispatcher_create_count, "kernel.dispatcher.create");
23 KCOUNTER(dispatcher_destroy_count, "kernel.dispatcher.destroy");
24 // counts the number of times observers have been added to a kernel object.
25 KCOUNTER(dispatcher_observe_count, "kernel.dispatcher.observer.add");
26 // counts the number of times observers have been canceled.
27 KCOUNTER(dispatcher_cancel_bh_count, "kernel.dispatcher.observer.cancel.byhandle");
28 KCOUNTER(dispatcher_cancel_bk_count, "kernel.dispatcher.observer.cancel.bykey");
29 // counts the number of cookies set or changed (reset).
30 KCOUNTER(dispatcher_cookie_set_count, "kernel.dispatcher.cookie.set");
31 KCOUNTER(dispatcher_cookie_reset_count, "kernel.dispatcher.cookie.reset");
32 
33 namespace {
34 // The first 1K koids are reserved.
35 fbl::atomic<zx_koid_t> global_koid(1024ULL);
36 
GenerateKernelObjectId()37 zx_koid_t GenerateKernelObjectId() {
38     return global_koid.fetch_add(1ULL, fbl::memory_order_relaxed);
39 }
40 
41 // Helper class that safely allows deleting Dispatchers without
42 // risk of blowing up the kernel stack. It uses one TLS slot to
43 // unwind the recursion.
44 class SafeDeleter {
45 public:
Delete(Dispatcher * kobj)46     static void Delete(Dispatcher* kobj) {
47         auto deleter = reinterpret_cast<SafeDeleter*>(tls_get(TLS_ENTRY_KOBJ_DELETER));
48         if (deleter) {
49             // Delete() was called recursively.
50             deleter->pending_.push_front(kobj);
51         } else {
52             SafeDeleter deleter;
53             tls_set(TLS_ENTRY_KOBJ_DELETER, &deleter);
54 
55             do {
56                 // This delete call can cause recursive calls to
57                 // Dispatcher::fbl_recycle() and hence to Delete().
58                 delete kobj;
59 
60                 kobj = deleter.pending_.pop_front();
61             } while (kobj);
62 
63             tls_set(TLS_ENTRY_KOBJ_DELETER, nullptr);
64         }
65     }
66 
67 private:
68     fbl::SinglyLinkedList<Dispatcher*, Dispatcher::DeleterListTraits> pending_;
69 };
70 
71 }  // namespace
72 
Dispatcher(zx_signals_t signals)73 Dispatcher::Dispatcher(zx_signals_t signals)
74     : koid_(GenerateKernelObjectId()),
75       handle_count_(0u),
76       signals_(signals) {
77 
78     kcounter_add(dispatcher_create_count, 1);
79 }
80 
~Dispatcher()81 Dispatcher::~Dispatcher() {
82     ktrace(TAG_OBJECT_DELETE, (uint32_t)koid_, 0, 0, 0);
83     kcounter_add(dispatcher_destroy_count, 1);
84 }
85 
86 // The refcount of this object has reached zero: delete self
87 // using the SafeDeleter to avoid potential recursion hazards.
88 // TODO(cpu): Not all object need the SafeDeleter. Only objects
89 // that can control the lifetime of dispatchers that in turn
90 // can control the lifetime of others. For example events do
91 // not fall in this category.
fbl_recycle()92 void Dispatcher::fbl_recycle() {
93     SafeDeleter::Delete(this);
94 }
95 
add_observer(StateObserver * observer)96 zx_status_t Dispatcher::add_observer(StateObserver* observer) {
97     if (!is_waitable())
98         return ZX_ERR_NOT_SUPPORTED;
99     AddObserver(observer, nullptr);
100     return ZX_OK;
101 }
102 
103 namespace {
104 
105 template <typename Func, typename LockType>
CancelWithFunc(Dispatcher::ObserverList * observers,Lock<LockType> * observer_lock,Func f)106 StateObserver::Flags CancelWithFunc(Dispatcher::ObserverList* observers,
107                                     Lock<LockType>* observer_lock, Func f) {
108     StateObserver::Flags flags = 0;
109 
110     Dispatcher::ObserverList obs_to_remove;
111 
112     {
113         Guard<LockType> guard{observer_lock};
114         for (auto it = observers->begin(); it != observers->end();) {
115             StateObserver::Flags it_flags = f(it.CopyPointer());
116             flags |= it_flags;
117             if (it_flags & StateObserver::kNeedRemoval) {
118                 auto to_remove = it;
119                 ++it;
120                 obs_to_remove.push_back(observers->erase(to_remove));
121             } else {
122                 ++it;
123             }
124         }
125     }
126 
127     while (!obs_to_remove.is_empty()) {
128         obs_to_remove.pop_front()->OnRemoved();
129     }
130 
131     // We've processed the removal flag, so strip it
132     return flags & (~StateObserver::kNeedRemoval);
133 }
134 
135 }  // namespace
136 
137 // Since this conditionally takes the dispatcher's |lock_|, based on
138 // the type of Mutex (either fbl::Mutex or fbl::NullLock), the thread
139 // safety analysis is unable to prove that the accesses to |signals_|
140 // and to |observers_| are always protected.
141 template <typename LockType>
AddObserverHelper(StateObserver * observer,const StateObserver::CountInfo * cinfo,Lock<LockType> * lock)142 void Dispatcher::AddObserverHelper(StateObserver* observer,
143                                    const StateObserver::CountInfo* cinfo,
144                                    Lock<LockType>* lock) TA_NO_THREAD_SAFETY_ANALYSIS {
145     ZX_DEBUG_ASSERT(is_waitable());
146     DEBUG_ASSERT(observer != nullptr);
147 
148     StateObserver::Flags flags;
149     {
150         Guard<LockType> guard{lock};
151 
152         flags = observer->OnInitialize(signals_, cinfo);
153         if (!(flags & StateObserver::kNeedRemoval))
154             observers_.push_front(observer);
155     }
156     if (flags & StateObserver::kNeedRemoval)
157         observer->OnRemoved();
158 
159     kcounter_add(dispatcher_observe_count, 1);
160 }
161 
AddObserver(StateObserver * observer,const StateObserver::CountInfo * cinfo)162 void Dispatcher::AddObserver(StateObserver* observer, const StateObserver::CountInfo* cinfo) {
163     AddObserverHelper(observer, cinfo, get_lock());
164 }
165 
AddObserverLocked(StateObserver * observer,const StateObserver::CountInfo * cinfo)166 void Dispatcher::AddObserverLocked(StateObserver* observer, const StateObserver::CountInfo* cinfo) {
167     // Type tag and local NullLock to make lockdep happy.
168     struct DispatcherAddObserverLocked {};
169     DECLARE_LOCK(DispatcherAddObserverLocked, fbl::NullLock) lock;
170 
171     AddObserverHelper(observer, cinfo, &lock);
172 }
173 
RemoveObserver(StateObserver * observer)174 void Dispatcher::RemoveObserver(StateObserver* observer) {
175     ZX_DEBUG_ASSERT(is_waitable());
176 
177     Guard<fbl::Mutex> guard{get_lock()};
178     DEBUG_ASSERT(observer != nullptr);
179     observers_.erase(*observer);
180 }
181 
Cancel(const Handle * handle)182 void Dispatcher::Cancel(const Handle* handle) {
183     ZX_DEBUG_ASSERT(is_waitable());
184 
185     CancelWithFunc(&observers_, get_lock(), [handle](StateObserver* obs) {
186         return obs->OnCancel(handle);
187     });
188 
189     kcounter_add(dispatcher_cancel_bh_count, 1);
190 }
191 
CancelByKey(const Handle * handle,const void * port,uint64_t key)192 bool Dispatcher::CancelByKey(const Handle* handle, const void* port, uint64_t key) {
193     ZX_DEBUG_ASSERT(is_waitable());
194 
195     StateObserver::Flags flags = CancelWithFunc(&observers_, get_lock(),
196                                                 [handle, port, key](StateObserver* obs) {
197         return obs->OnCancelByKey(handle, port, key);
198     });
199 
200     kcounter_add(dispatcher_cancel_bk_count, 1);
201 
202     return flags & StateObserver::kHandled;
203 }
204 
205 // Since this conditionally takes the dispatcher's |lock_|, based on
206 // the type of Mutex (either fbl::Mutex or fbl::NullLock), the thread
207 // safety analysis is unable to prove that the accesses to |signals_|
208 // are always protected.
209 template <typename LockType>
UpdateStateHelper(zx_signals_t clear_mask,zx_signals_t set_mask,Lock<LockType> * lock)210 void Dispatcher::UpdateStateHelper(zx_signals_t clear_mask,
211                                    zx_signals_t set_mask,
212                                    Lock<LockType>* lock) TA_NO_THREAD_SAFETY_ANALYSIS {
213     Dispatcher::ObserverList obs_to_remove;
214     {
215         Guard<LockType> guard{lock};
216 
217         auto previous_signals = signals_;
218         signals_ &= ~clear_mask;
219         signals_ |= set_mask;
220 
221         if (previous_signals == signals_)
222             return;
223 
224         UpdateInternalLocked(&obs_to_remove, signals_);
225     }
226 
227     while (!obs_to_remove.is_empty()) {
228         obs_to_remove.pop_front()->OnRemoved();
229     }
230 }
231 
UpdateState(zx_signals_t clear_mask,zx_signals_t set_mask)232 void Dispatcher::UpdateState(zx_signals_t clear_mask,
233                              zx_signals_t set_mask) {
234     UpdateStateHelper(clear_mask, set_mask, get_lock());
235 }
236 
UpdateStateLocked(zx_signals_t clear_mask,zx_signals_t set_mask)237 void Dispatcher::UpdateStateLocked(zx_signals_t clear_mask,
238                                    zx_signals_t set_mask) {
239     // Type tag and local NullLock to make lockdep happy.
240     struct DispatcherUpdateStateLocked {};
241     DECLARE_LOCK(DispatcherUpdateStateLocked, fbl::NullLock) lock;
242     UpdateStateHelper(clear_mask, set_mask, &lock);
243 }
244 
UpdateInternalLocked(ObserverList * obs_to_remove,zx_signals_t signals)245 void Dispatcher::UpdateInternalLocked(ObserverList* obs_to_remove, zx_signals_t signals) {
246     ZX_DEBUG_ASSERT(is_waitable());
247 
248     for (auto it = observers_.begin(); it != observers_.end();) {
249         StateObserver::Flags it_flags = it->OnStateChange(signals);
250         if (it_flags & StateObserver::kNeedRemoval) {
251             auto to_remove = it;
252             ++it;
253             obs_to_remove->push_back(observers_.erase(to_remove));
254         } else {
255             ++it;
256         }
257     }
258 }
259 
SetCookie(CookieJar * cookiejar,zx_koid_t scope,uint64_t cookie)260 zx_status_t Dispatcher::SetCookie(CookieJar* cookiejar, zx_koid_t scope, uint64_t cookie) {
261     if (cookiejar == nullptr)
262         return ZX_ERR_NOT_SUPPORTED;
263 
264     Guard<fbl::Mutex> guard{get_lock()};
265 
266     if (cookiejar->scope_ == ZX_KOID_INVALID) {
267         cookiejar->scope_ = scope;
268         cookiejar->cookie_ = cookie;
269 
270         kcounter_add(dispatcher_cookie_set_count, 1);
271         return ZX_OK;
272     }
273 
274     if (cookiejar->scope_ == scope) {
275         cookiejar->cookie_ = cookie;
276 
277         kcounter_add(dispatcher_cookie_reset_count, 1);
278         return ZX_OK;
279     }
280 
281     return ZX_ERR_ACCESS_DENIED;
282 }
283 
GetCookie(CookieJar * cookiejar,zx_koid_t scope,uint64_t * cookie)284 zx_status_t Dispatcher::GetCookie(CookieJar* cookiejar, zx_koid_t scope, uint64_t* cookie) {
285     if (cookiejar == nullptr)
286         return ZX_ERR_NOT_SUPPORTED;
287 
288     Guard<fbl::Mutex> guard{get_lock()};
289 
290     if (cookiejar->scope_ == scope) {
291         *cookie = cookiejar->cookie_;
292         return ZX_OK;
293     }
294 
295     return ZX_ERR_ACCESS_DENIED;
296 }
297 
InvalidateCookieLocked(CookieJar * cookiejar)298 zx_status_t Dispatcher::InvalidateCookieLocked(CookieJar* cookiejar) {
299     if (cookiejar == nullptr)
300         return ZX_ERR_NOT_SUPPORTED;
301 
302     cookiejar->scope_ = ZX_KOID_KERNEL;
303     return ZX_OK;
304 }
305 
InvalidateCookie(CookieJar * cookiejar)306 zx_status_t Dispatcher::InvalidateCookie(CookieJar* cookiejar) {
307     Guard<fbl::Mutex> guard{get_lock()};
308     return InvalidateCookieLocked(cookiejar);
309 }
310 
311