1 // Copyright 2016 The Fuchsia Authors
2 //
3 // Use of this source code is governed by a MIT-style
4 // license that can be found in the LICENSE file or at
5 // https://opensource.org/licenses/MIT
6 
7 #include <object/handle.h>
8 
9 #include <object/dispatcher.h>
10 #include <fbl/arena.h>
11 #include <fbl/mutex.h>
12 #include <lib/counters.h>
13 #include <pow2.h>
14 
15 namespace {
16 
17 // The number of outstanding (live) handles in the arena.
18 constexpr size_t kMaxHandleCount = 256 * 1024u;
19 
20 // Warning level: high_handle_count() is called when
21 // there are this many outstanding handles.
22 constexpr size_t kHighHandleCount = (kMaxHandleCount * 7) / 8;
23 
24 KCOUNTER(handle_count_made, "kernel.handles.made");
25 KCOUNTER(handle_count_duped, "kernel.handles.duped");
26 KCOUNTER(handle_count_live, "kernel.handles.live");
27 KCOUNTER(handle_count_max_live, "kernel.handles.max_live");
28 
29 // Masks for building a Handle's base_value, which ProcessDispatcher
30 // uses to create zx_handle_t values.
31 //
32 // base_value bit fields:
33 //   [31..30]: Must be zero
34 //   [29..kHandleGenerationShift]: Generation number
35 //                                 Masked by kHandleGenerationMask
36 //   [kHandleGenerationShift-1..0]: Index into handle_arena
37 //                                  Masked by kHandleIndexMask
38 constexpr uint32_t kHandleIndexMask = kMaxHandleCount - 1;
39 static_assert((kHandleIndexMask & kMaxHandleCount) == 0,
40               "kMaxHandleCount must be a power of 2");
41 constexpr uint32_t kHandleGenerationMask = ~kHandleIndexMask & ~(3 << 30);
42 constexpr uint32_t kHandleGenerationShift = log2_uint_floor(kMaxHandleCount);
43 static_assert(((3 << (kHandleGenerationShift - 1)) & kHandleGenerationMask) ==
44                   1 << kHandleGenerationShift,
45               "Shift is wrong");
46 static_assert((kHandleGenerationMask >> kHandleGenerationShift) >= 255,
47               "Not enough room for a useful generation count");
48 static_assert(((3 << 30) ^ kHandleGenerationMask ^ kHandleIndexMask) ==
49                   0xffffffffu,
50               "Masks do not agree");
51 
52 }  // namespace
53 
54 fbl::Arena Handle::arena_;
55 
Init()56 void Handle::Init() TA_NO_THREAD_SAFETY_ANALYSIS {
57     arena_.Init("handles", sizeof(Handle), kMaxHandleCount);
58 }
59 
set_process_id(zx_koid_t pid)60 void Handle::set_process_id(zx_koid_t pid) {
61     process_id_.store(pid, fbl::memory_order_relaxed);
62     dispatcher_->set_owner(pid);
63 }
64 
65 // Returns a new |base_value| based on the value stored in the free
66 // arena slot pointed to by |addr|. The new value will be different
67 // from the last |base_value| used by this slot.
GetNewBaseValue(void * addr)68 uint32_t Handle::GetNewBaseValue(void* addr) TA_REQ(ArenaLock::Get()) {
69     // Get the index of this slot within the arena.
70     uint32_t handle_index = HandleToIndex(reinterpret_cast<Handle*>(addr));
71     DEBUG_ASSERT((handle_index & ~kHandleIndexMask) == 0);
72 
73     // Check the free memory for a stashed base_value.
74     uint32_t v = *reinterpret_cast<uint32_t*>(addr);
75     uint32_t old_gen = 0;
76     if (v != 0) {
77         // This slot has been used before.
78         DEBUG_ASSERT((v & kHandleIndexMask) == handle_index);
79         old_gen = (v & kHandleGenerationMask) >> kHandleGenerationShift;
80     }
81     uint32_t new_gen =
82         (((old_gen + 1) << kHandleGenerationShift) & kHandleGenerationMask);
83     return (handle_index | new_gen);
84 }
85 
86 // Allocate space for a Handle from the arena, but don't instantiate the
87 // object.  |base_value| gets the value for Handle::base_value_.  |what|
88 // says whether this is allocation or duplication, for the error message.
Alloc(const fbl::RefPtr<Dispatcher> & dispatcher,const char * what,uint32_t * base_value)89 void* Handle::Alloc(const fbl::RefPtr<Dispatcher>& dispatcher,
90                     const char* what, uint32_t* base_value) {
91     size_t outstanding_handles;
92     {
93         Guard<fbl::Mutex> guard{ArenaLock::Get()};
94         void* addr = arena_.Alloc();
95         outstanding_handles = arena_.DiagnosticCount();
96         if (likely(addr)) {
97             if (outstanding_handles > kHighHandleCount) {
98                 // TODO: Avoid calling this for every handle after
99                 // kHighHandleCount; printfs are slow and we're
100                 // holding the mutex.
101                 printf("WARNING: High handle count: %zu handles\n",
102                        outstanding_handles);
103             }
104             dispatcher->increment_handle_count();
105             *base_value = GetNewBaseValue(addr);
106             return addr;
107         }
108     }
109 
110     printf("WARNING: Could not allocate %s handle (%zu outstanding)\n",
111            what, outstanding_handles);
112     return nullptr;
113 }
114 
Make(fbl::RefPtr<Dispatcher> dispatcher,zx_rights_t rights)115 HandleOwner Handle::Make(fbl::RefPtr<Dispatcher> dispatcher,
116                          zx_rights_t rights) {
117     uint32_t base_value;
118     void* addr = Alloc(dispatcher, "new", &base_value);
119     if (unlikely(!addr))
120         return nullptr;
121     kcounter_add(handle_count_made, 1);
122     kcounter_add(handle_count_live, 1);
123     kcounter_max_counter(handle_count_max_live, handle_count_live);
124     return HandleOwner(new (addr) Handle(ktl::move(dispatcher),
125                                          rights, base_value));
126 }
127 
128 // Called only by Make.
Handle(fbl::RefPtr<Dispatcher> dispatcher,zx_rights_t rights,uint32_t base_value)129 Handle::Handle(fbl::RefPtr<Dispatcher> dispatcher, zx_rights_t rights,
130                uint32_t base_value)
131     : process_id_(0u),
132       dispatcher_(ktl::move(dispatcher)),
133       rights_(rights),
134       base_value_(base_value) {
135 }
136 
Dup(Handle * source,zx_rights_t rights)137 HandleOwner Handle::Dup(Handle* source, zx_rights_t rights) {
138     uint32_t base_value;
139     void* addr = Alloc(source->dispatcher(), "duplicate", &base_value);
140     if (unlikely(!addr))
141         return nullptr;
142     kcounter_add(handle_count_duped, 1);
143     kcounter_add(handle_count_live, 1);
144     kcounter_max_counter(handle_count_max_live, handle_count_live);
145     return HandleOwner(new (addr) Handle(source, rights, base_value));
146 }
147 
148 // Called only by Dup.
Handle(Handle * rhs,zx_rights_t rights,uint32_t base_value)149 Handle::Handle(Handle* rhs, zx_rights_t rights, uint32_t base_value)
150     : process_id_(rhs->process_id()),
151       dispatcher_(rhs->dispatcher_),
152       rights_(rights),
153       base_value_(base_value) {
154 }
155 
156 // Destroys, but does not free, the Handle, and fixes up its memory to protect
157 // against stale pointers to it. Also stashes the Handle's base_value for reuse
158 // the next time this slot is allocated.
TearDown()159 void Handle::TearDown() TA_EXCL(ArenaLock::Get()) {
160     uint32_t old_base_value = base_value();
161 
162     // Calling the handle dtor can cause many things to happen, so it is
163     // important to call it outside the lock.
164     this->~Handle();
165 
166     // There may be stale pointers to this slot. Zero out most of its fields
167     // to ensure that the Handle does not appear to belong to any process
168     // or point to any Dispatcher.
169     memset(this, 0, sizeof(*this));
170 
171     // Hold onto the base_value for the next user of this slot, stashing
172     // it at the beginning of the free slot.
173     *reinterpret_cast<uint32_t*>(this) = old_base_value;
174 
175     // Double-check that the process_id field is zero, ensuring that
176     // no process can refer to this slot while it's free. This isn't
177     // completely legal since |handle| points to unconstructed memory,
178     // but it should be safe enough for an assertion.
179     DEBUG_ASSERT(process_id() == 0);
180 }
181 
Delete()182 void Handle::Delete() {
183     fbl::RefPtr<Dispatcher> disp = dispatcher();
184 
185     if (disp->is_waitable())
186         disp->Cancel(this);
187 
188     TearDown();
189 
190     bool zero_handles = false;
191     {
192         Guard<fbl::Mutex> guard{ArenaLock::Get()};
193         zero_handles = disp->decrement_handle_count();
194         arena_.Free(this);
195     }
196 
197     if (zero_handles)
198         disp->on_zero_handles();
199 
200     // If |disp| is the last reference then the dispatcher object
201     // gets destroyed here.
202     kcounter_add(handle_count_live, -1);
203 }
204 
FromU32(uint32_t value)205 Handle* Handle::FromU32(uint32_t value) TA_NO_THREAD_SAFETY_ANALYSIS {
206     uintptr_t handle_addr = IndexToHandle(value & kHandleIndexMask);
207     {
208         Guard<fbl::Mutex> guard{ArenaLock::Get()};
209         if (unlikely(!arena_.in_range(handle_addr)))
210             return nullptr;
211     }
212     auto handle = reinterpret_cast<Handle*>(handle_addr);
213     return likely(handle->base_value() == value) ? handle : nullptr;
214 }
215 
Count(const fbl::RefPtr<const Dispatcher> & dispatcher)216 uint32_t Handle::Count(const fbl::RefPtr<const Dispatcher>& dispatcher) {
217     // Handle::ArenaLock also guards Dispatcher::handle_count_.
218     Guard<fbl::Mutex> guard{ArenaLock::Get()};
219     return dispatcher->current_handle_count();
220 }
221 
OutstandingHandles()222 size_t Handle::diagnostics::OutstandingHandles() {
223     Guard<fbl::Mutex> guard{ArenaLock::Get()};
224     return arena_.DiagnosticCount();
225 }
226 
DumpTableInfo()227 void Handle::diagnostics::DumpTableInfo() {
228     Guard<fbl::Mutex> guard{ArenaLock::Get()};
229     arena_.Dump();
230 }
231