1 // Copyright 2016 The Fuchsia Authors
2 //
3 // Use of this source code is governed by a MIT-style
4 // license that can be found in the LICENSE file or at
5 // https://opensource.org/licenses/MIT
6
7 #pragma once
8
9 #include <err.h>
10 #include <stdint.h>
11 #include <string.h>
12
13 #include <fbl/auto_lock.h>
14 #include <fbl/canary.h>
15 #include <fbl/intrusive_double_list.h>
16 #include <fbl/intrusive_single_list.h>
17 #include <fbl/mutex.h>
18 #include <fbl/recycler.h>
19 #include <fbl/ref_counted.h>
20 #include <fbl/ref_counted_upgradeable.h>
21 #include <fbl/ref_ptr.h>
22 #include <ktl/unique_ptr.h>
23 #include <ktl/move.h>
24
25 #include <kernel/lockdep.h>
26 #include <kernel/spinlock.h>
27 #include <object/handle.h>
28 #include <object/state_observer.h>
29
30 #include <zircon/compiler.h>
31 #include <zircon/syscalls/object.h>
32 #include <zircon/types.h>
33
34 struct CookieJar {
35 zx_koid_t scope_ = ZX_KOID_INVALID;
36 uint64_t cookie_ = 0u;
37 };
38
39 template <typename T> struct DispatchTag;
40
41 #define DECLARE_DISPTAG(T, E) \
42 class T; \
43 template <> struct DispatchTag<T> { \
44 static constexpr zx_obj_type_t ID = E; \
45 };
46
DECLARE_DISPTAG(ProcessDispatcher,ZX_OBJ_TYPE_PROCESS)47 DECLARE_DISPTAG(ProcessDispatcher, ZX_OBJ_TYPE_PROCESS)
48 DECLARE_DISPTAG(ThreadDispatcher, ZX_OBJ_TYPE_THREAD)
49 DECLARE_DISPTAG(VmObjectDispatcher, ZX_OBJ_TYPE_VMO)
50 DECLARE_DISPTAG(ChannelDispatcher, ZX_OBJ_TYPE_CHANNEL)
51 DECLARE_DISPTAG(EventDispatcher, ZX_OBJ_TYPE_EVENT)
52 DECLARE_DISPTAG(PortDispatcher, ZX_OBJ_TYPE_PORT)
53 DECLARE_DISPTAG(InterruptDispatcher, ZX_OBJ_TYPE_INTERRUPT)
54 DECLARE_DISPTAG(PciDeviceDispatcher, ZX_OBJ_TYPE_PCI_DEVICE)
55 DECLARE_DISPTAG(LogDispatcher, ZX_OBJ_TYPE_LOG)
56 DECLARE_DISPTAG(SocketDispatcher, ZX_OBJ_TYPE_SOCKET)
57 DECLARE_DISPTAG(ResourceDispatcher, ZX_OBJ_TYPE_RESOURCE)
58 DECLARE_DISPTAG(EventPairDispatcher, ZX_OBJ_TYPE_EVENTPAIR)
59 DECLARE_DISPTAG(JobDispatcher, ZX_OBJ_TYPE_JOB)
60 DECLARE_DISPTAG(VmAddressRegionDispatcher, ZX_OBJ_TYPE_VMAR)
61 DECLARE_DISPTAG(FifoDispatcher, ZX_OBJ_TYPE_FIFO)
62 DECLARE_DISPTAG(GuestDispatcher, ZX_OBJ_TYPE_GUEST)
63 DECLARE_DISPTAG(VcpuDispatcher, ZX_OBJ_TYPE_VCPU)
64 DECLARE_DISPTAG(TimerDispatcher, ZX_OBJ_TYPE_TIMER)
65 DECLARE_DISPTAG(IommuDispatcher, ZX_OBJ_TYPE_IOMMU)
66 DECLARE_DISPTAG(BusTransactionInitiatorDispatcher, ZX_OBJ_TYPE_BTI)
67 DECLARE_DISPTAG(ProfileDispatcher, ZX_OBJ_TYPE_PROFILE)
68 DECLARE_DISPTAG(PinnedMemoryTokenDispatcher, ZX_OBJ_TYPE_PMT)
69 DECLARE_DISPTAG(SuspendTokenDispatcher, ZX_OBJ_TYPE_SUSPEND_TOKEN)
70 DECLARE_DISPTAG(PagerDispatcher, ZX_OBJ_TYPE_PAGER)
71
72 #undef DECLARE_DISPTAG
73
74 // Base class for all kernel objects that can be exposed to user-mode via
75 // the syscall API and referenced by handles.
76 //
77 // It implements RefCounted because handles are abstractions to a multiple
78 // references from user mode or kernel mode that control the lifetime o
79 // the object.
80 //
81 // It implements Recyclable because upon final Release() on the RefPtr
82 // it might be necessary to implement a destruction pattern that avoids
83 // deep recursion since the kernel stack is very limited.
84 //
85 // You don't derive directly from this class; instead derive
86 // from SoloDispatcher or PeeredDispatcher.
87 class Dispatcher : private fbl::RefCountedUpgradeable<Dispatcher>,
88 private fbl::Recyclable<Dispatcher> {
89 public:
90 using fbl::RefCountedUpgradeable<Dispatcher>::AddRef;
91 using fbl::RefCountedUpgradeable<Dispatcher>::Release;
92 using fbl::RefCountedUpgradeable<Dispatcher>::Adopt;
93 using fbl::RefCountedUpgradeable<Dispatcher>::AddRefMaybeInDestructor;
94
95 // At construction, the object's state tracker is asserting
96 // |signals|.
97 explicit Dispatcher(zx_signals_t signals = 0u);
98
99 // Dispatchers are either Solo or Peered. They handle refcounting
100 // and locking differently.
101 virtual ~Dispatcher();
102
103 zx_koid_t get_koid() const { return koid_; }
104
105 // Must be called under the handle table lock.
106 void increment_handle_count() TA_REQ(Handle::ArenaLock::Get()) {
107 ++handle_count_;
108 }
109
110 // Must be called under the handle table lock.
111 // Returns true exactly when the handle count goes to zero.
112 bool decrement_handle_count() TA_REQ(Handle::ArenaLock::Get()) {
113 --handle_count_;
114 return handle_count_ == 0u;
115 }
116
117 // Must be called under the handle table lock.
118 uint32_t current_handle_count() const TA_REQ(Handle::ArenaLock::Get()) {
119 return handle_count_;
120 }
121
122 // The following are only to be called when |is_waitable| reports true.
123
124 using ObserverList = fbl::DoublyLinkedList<StateObserver*, StateObserverListTraits>;
125
126 // Add an observer.
127 void AddObserver(StateObserver* observer, const StateObserver::CountInfo* cinfo);
128 void AddObserverLocked(StateObserver* observer,
129 const StateObserver::CountInfo* cinfo) TA_REQ(get_lock());
130
131 // Remove an observer (which must have been added).
132 void RemoveObserver(StateObserver* observer);
133
134 // Called when observers of the handle's state (e.g., waits on the handle) should be
135 // "cancelled", i.e., when a handle (for the object that owns this StateTracker) is being
136 // destroyed or transferred. Returns true if at least one observer was found.
137 void Cancel(const Handle* handle);
138
139 // Like Cancel() but issued via via zx_port_cancel().
140 bool CancelByKey(const Handle* handle, const void* port, uint64_t key);
141
142 // Dispatchers that support get/set cookie must provide
143 // a CookieJar for those cookies to be stored in.
144 virtual CookieJar* get_cookie_jar() { return nullptr; }
145
146 // Accessors for CookieJars.
147 zx_status_t SetCookie(CookieJar* cookiejar, zx_koid_t scope, uint64_t cookie);
148 zx_status_t GetCookie(CookieJar* cookiejar, zx_koid_t scope, uint64_t* cookie);
149 zx_status_t InvalidateCookie(CookieJar* cookiejar);
150 zx_status_t InvalidateCookieLocked(CookieJar* cookiejar) TA_REQ(get_lock());
151
152 // Interface for derived classes.
153
154 virtual zx_obj_type_t get_type() const = 0;
155
156 virtual zx_status_t add_observer(StateObserver* observer);
157
158 virtual zx_status_t user_signal_self(uint32_t clear_mask, uint32_t set_mask) = 0;
159 virtual zx_status_t user_signal_peer(uint32_t clear_mask, uint32_t set_mask) = 0;
160
161 virtual void on_zero_handles() {}
162
163 virtual zx_koid_t get_related_koid() const = 0;
164 virtual bool is_waitable() const = 0;
165
166 // get_name() will return a null-terminated name of ZX_MAX_NAME_LEN - 1 or fewer
167 // characters. For objects that don't have names it will be "".
168 virtual void get_name(char out_name[ZX_MAX_NAME_LEN]) const __NONNULL((2)) {
169 memset(out_name, 0, ZX_MAX_NAME_LEN);
170 }
171
172 // set_name() will truncate to ZX_MAX_NAME_LEN - 1 and ensure there is a
173 // terminating null
174 virtual zx_status_t set_name(const char* name, size_t len) { return ZX_ERR_NOT_SUPPORTED; }
175
176 struct DeleterListTraits {
177 static fbl::SinglyLinkedListNodeState<Dispatcher*>& node_state(
178 Dispatcher& obj) {
179 return obj.deleter_ll_;
180 }
181 };
182
183 // Called whenever the object is bound to a new process. The |new_owner| is
184 // the koid of the new process. It is only overridden for objects where a single
185 // owner makes sense.
186 virtual void set_owner(zx_koid_t new_owner) {}
187
188 protected:
189 // Notify others of a change in state (possibly waking them). (Clearing satisfied signals or
190 // setting satisfiable signals should not wake anyone.)
191 void UpdateState(zx_signals_t clear_mask, zx_signals_t set_mask);
192 void UpdateStateLocked(zx_signals_t clear_mask, zx_signals_t set_mask) TA_REQ(get_lock());
193
194 zx_signals_t GetSignalsStateLocked() const TA_REQ(get_lock()) {
195 return signals_;
196 }
197
198 // Dispatcher subtypes should use this lock to protect their internal state.
199 virtual Lock<fbl::Mutex>* get_lock() const = 0;
200
201 private:
202 friend class fbl::Recyclable<Dispatcher>;
203 void fbl_recycle();
204
205 // The common implementation of UpdateState and UpdateStateLocked.
206 template <typename LockType>
207 void UpdateStateHelper(zx_signals_t clear_mask,
208 zx_signals_t set_mask,
209 Lock<LockType>* lock);
210
211 // The common implementation of AddObserver and AddObserverLocked.
212 template <typename LockType>
213 void AddObserverHelper(StateObserver* observer,
214 const StateObserver::CountInfo* cinfo,
215 Lock<LockType>* lock);
216
217 void UpdateInternalLocked(ObserverList* obs_to_remove,
218 zx_signals_t signals) TA_REQ(get_lock());
219
220 const zx_koid_t koid_;
221 uint32_t handle_count_ TA_GUARDED(Handle::ArenaLock::Get());
222
223 zx_signals_t signals_ TA_GUARDED(get_lock());
224
225 // Active observers are elements in |observers_|.
226 ObserverList observers_ TA_GUARDED(get_lock());
227
228 // Used to store this dispatcher on the dispatcher deleter list.
229 fbl::SinglyLinkedListNodeState<Dispatcher*> deleter_ll_;
230 };
231
232 // SoloDispatchers stand alone. Since they have no peer to coordinate with, they
233 // directly contain their state lock. This is a CRTP template type to permit
234 // the lock validator to distinguish between locks in different subclasses of
235 // SoloDispatcher.
236 template <typename T, zx_rights_t def_rights, zx_signals_t extra_signals = 0u>
237 class SoloDispatcher : public Dispatcher {
238 public:
default_rights()239 static constexpr zx_rights_t default_rights() { return def_rights; }
240
241 // At construction, the object's state tracker is asserting
242 // |signals|.
243 explicit SoloDispatcher(zx_signals_t signals = 0u)
Dispatcher(signals)244 : Dispatcher(signals) {}
245
246 // Related koid is overridden by subclasses, like thread and process.
get_related_koid()247 zx_koid_t get_related_koid() const override TA_REQ(get_lock()) { return 0ULL; }
is_waitable()248 bool is_waitable() const final { return default_rights() & ZX_RIGHT_WAIT; }
249
user_signal_self(uint32_t clear_mask,uint32_t set_mask)250 zx_status_t user_signal_self(uint32_t clear_mask, uint32_t set_mask) final {
251 if (!is_waitable())
252 return ZX_ERR_NOT_SUPPORTED;
253 // Generic objects can set all USER_SIGNALs. Particular object
254 // types (events and eventpairs) may be able to set more.
255 auto allowed_signals = ZX_USER_SIGNAL_ALL | extra_signals;
256 if ((set_mask & ~allowed_signals) || (clear_mask & ~allowed_signals))
257 return ZX_ERR_INVALID_ARGS;
258
259 UpdateState(clear_mask, set_mask);
260 return ZX_OK;
261 }
262
user_signal_peer(uint32_t clear_mask,uint32_t set_mask)263 zx_status_t user_signal_peer(uint32_t clear_mask, uint32_t set_mask) final {
264 return ZX_ERR_NOT_SUPPORTED;
265 }
266
267 protected:
get_lock()268 Lock<fbl::Mutex>* get_lock() const final { return &lock_; }
269
270 mutable DECLARE_MUTEX(SoloDispatcher) lock_;
271 };
272
273 // PeeredDispatchers have opposing endpoints to coordinate state
274 // with. For example, writing into one endpoint of a Channel needs to
275 // modify zx_signals_t state (for the readability bit) on the opposite
276 // side. To coordinate their state, they share a mutex, which is held
277 // by the PeerHolder. Both endpoints have a RefPtr back to the
278 // PeerHolder; no one else ever does.
279
280 // Thus creating a pair of peered objects will typically look
281 // something like
282 // // Make the two RefPtrs for each endpoint's handle to the mutex.
283 // auto holder0 = AdoptRef(new PeerHolder<Foo>(...));
284 // auto holder1 = peer_holder0;
285 // // Create the opposing sides.
286 // auto foo0 = AdoptRef(new Foo(std::move(holder0, ...));
287 // auto foo1 = AdoptRef(new Foo(std::move(holder1, ...));
288 // // Initialize the opposing sides, teaching them about each other.
289 // foo0->Init(&foo1);
290 // foo1->Init(&foo0);
291
292 // A PeeredDispatcher object, in its |on_zero_handles| call must clear
293 // out its peer's |peer_| field. This is needed to avoid leaks, and to
294 // ensure that |user_signal| can correctly report ZX_ERR_PEER_CLOSED.
295
296 // TODO(kulakowski) We should investigate turning this into one
297 // allocation. This would mean PeerHolder would have two EndPoint
298 // members, and that PeeredDispatcher would have custom refcounting.
299 template <typename Endpoint>
300 class PeerHolder : public fbl::RefCounted<PeerHolder<Endpoint>> {
301 public:
302 PeerHolder() = default;
303 ~PeerHolder() = default;
304
get_lock()305 Lock<fbl::Mutex>* get_lock() const { return &lock_; }
306
307 mutable DECLARE_MUTEX(PeerHolder) lock_;
308 };
309
310 template <typename Self, zx_rights_t def_rights, zx_signals_t extra_signals = 0u>
311 class PeeredDispatcher : public Dispatcher {
312 public:
default_rights()313 static constexpr zx_rights_t default_rights() { return def_rights; }
314
315 // At construction, the object's state tracker is asserting
316 // |signals|.
317 explicit PeeredDispatcher(fbl::RefPtr<PeerHolder<Self>> holder,
318 zx_signals_t signals = 0u)
Dispatcher(signals)319 : Dispatcher(signals),
320 holder_(ktl::move(holder)) {}
321 virtual ~PeeredDispatcher() = default;
322
get_related_koid()323 zx_koid_t get_related_koid() const final TA_REQ(get_lock()) { return peer_koid_; }
is_waitable()324 bool is_waitable() const final { return default_rights() & ZX_RIGHT_WAIT; }
325
user_signal_self(uint32_t clear_mask,uint32_t set_mask)326 zx_status_t user_signal_self(uint32_t clear_mask, uint32_t set_mask) final
327 TA_NO_THREAD_SAFETY_ANALYSIS {
328 auto allowed_signals = ZX_USER_SIGNAL_ALL | extra_signals;
329 if ((set_mask & ~allowed_signals) || (clear_mask & ~allowed_signals))
330 return ZX_ERR_INVALID_ARGS;
331
332 Guard<fbl::Mutex> guard{get_lock()};
333
334 UpdateStateLocked(clear_mask, set_mask);
335 return ZX_OK;
336 }
337
user_signal_peer(uint32_t clear_mask,uint32_t set_mask)338 zx_status_t user_signal_peer(uint32_t clear_mask, uint32_t set_mask) final
339 TA_NO_THREAD_SAFETY_ANALYSIS {
340 auto allowed_signals = ZX_USER_SIGNAL_ALL | extra_signals;
341 if ((set_mask & ~allowed_signals) || (clear_mask & ~allowed_signals))
342 return ZX_ERR_INVALID_ARGS;
343
344 Guard<fbl::Mutex> guard{get_lock()};
345 // object_signal() may race with handle_close() on another thread.
346 if (!peer_)
347 return ZX_ERR_PEER_CLOSED;
348 peer_->UpdateStateLocked(clear_mask, set_mask);
349 return ZX_OK;
350 }
351
352 // All subclasses of PeeredDispatcher must implement a public
353 // |void on_zero_handles_locked()|. The peer lifetime management
354 // (i.e. the peer zeroing) is centralized here.
on_zero_handles()355 void on_zero_handles() final TA_NO_THREAD_SAFETY_ANALYSIS {
356 Guard<fbl::Mutex> guard{get_lock()};
357 auto peer = ktl::move(peer_);
358 static_cast<Self*>(this)->on_zero_handles_locked();
359
360 // This is needed to avoid leaks, and to ensure that
361 // |user_signal| can correctly report ZX_ERR_PEER_CLOSED.
362 if (peer != nullptr) {
363 // This defeats the lock analysis in the usual way: it
364 // can't reason that the peers' get_lock() calls alias.
365 peer->peer_.reset();
366 static_cast<Self*>(peer.get())->OnPeerZeroHandlesLocked();
367 }
368 }
369
get_lock()370 Lock<fbl::Mutex>* get_lock() const final { return holder_->get_lock(); }
371
372 protected:
373 zx_koid_t peer_koid_ = 0u;
374 fbl::RefPtr<Self> peer_ TA_GUARDED(get_lock());
375
376 private:
377 const fbl::RefPtr<PeerHolder<Self>> holder_;
378 };
379
380 // DownCastDispatcher checks if a RefPtr<Dispatcher> points to a
381 // dispatcher of a given dispatcher subclass T and, if so, moves the
382 // reference to a RefPtr<T>, otherwise it leaves the
383 // RefPtr<Dispatcher> alone. Must be called with a pointer to a valid
384 // (non-null) dispatcher.
385
386 // Note that the Dispatcher -> Dispatcher versions come up in generic
387 // code, and so aren't totally vacuous.
388
389 // Dispatcher -> FooDispatcher
390 template <typename T>
DownCastDispatcher(fbl::RefPtr<Dispatcher> * disp)391 fbl::RefPtr<T> DownCastDispatcher(fbl::RefPtr<Dispatcher>* disp) {
392 return (likely(DispatchTag<T>::ID == (*disp)->get_type())) ?
393 fbl::RefPtr<T>::Downcast(ktl::move(*disp)) :
394 nullptr;
395 }
396
397 // Dispatcher -> Dispatcher
398 template <>
DownCastDispatcher(fbl::RefPtr<Dispatcher> * disp)399 inline fbl::RefPtr<Dispatcher> DownCastDispatcher(fbl::RefPtr<Dispatcher>* disp) {
400 return ktl::move(*disp);
401 }
402
403 // const Dispatcher -> const FooDispatcher
404 template <typename T>
DownCastDispatcher(fbl::RefPtr<const Dispatcher> * disp)405 fbl::RefPtr<T> DownCastDispatcher(fbl::RefPtr<const Dispatcher>* disp) {
406 static_assert(fbl::is_const<T>::value, "");
407 return (likely(DispatchTag<typename fbl::remove_const<T>::type>::ID == (*disp)->get_type())) ?
408 fbl::RefPtr<T>::Downcast(ktl::move(*disp)) :
409 nullptr;
410 }
411
412 // const Dispatcher -> const Dispatcher
413 template <>
DownCastDispatcher(fbl::RefPtr<const Dispatcher> * disp)414 inline fbl::RefPtr<const Dispatcher> DownCastDispatcher(fbl::RefPtr<const Dispatcher>* disp) {
415 return ktl::move(*disp);
416 }
417
418 // The same, but for Dispatcher* and FooDispatcher* instead of RefPtr.
419
420 // Dispatcher -> FooDispatcher
421 template <typename T>
DownCastDispatcher(Dispatcher * disp)422 T* DownCastDispatcher(Dispatcher* disp) {
423 return (likely(DispatchTag<T>::ID == disp->get_type())) ?
424 reinterpret_cast<T*>(disp) : nullptr;
425 }
426
427 // Dispatcher -> Dispatcher
428 template <>
DownCastDispatcher(Dispatcher * disp)429 inline Dispatcher* DownCastDispatcher(Dispatcher* disp) {
430 return disp;
431 }
432
433 // const Dispatcher -> const FooDispatcher
434 template <typename T>
DownCastDispatcher(const Dispatcher * disp)435 const T* DownCastDispatcher(const Dispatcher* disp) {
436 static_assert(fbl::is_const<T>::value, "");
437 return (likely(DispatchTag<typename fbl::remove_const<T>::type>::ID == disp->get_type())) ?
438 reinterpret_cast<const T*>(disp) : nullptr;
439 }
440
441 // const Dispatcher -> const Dispatcher
442 template <>
DownCastDispatcher(const Dispatcher * disp)443 inline const Dispatcher* DownCastDispatcher(const Dispatcher* disp) {
444 return disp;
445 }
446