1 // Copyright 2016 The Fuchsia Authors
2 //
3 // Use of this source code is governed by a MIT-style
4 // license that can be found in the LICENSE file or at
5 // https://opensource.org/licenses/MIT
6
7 #include <object/process_dispatcher.h>
8
9 #include <assert.h>
10 #include <inttypes.h>
11 #include <list.h>
12 #include <rand.h>
13 #include <string.h>
14 #include <trace.h>
15
16 #include <arch/defines.h>
17
18 #include <kernel/thread.h>
19 #include <vm/vm.h>
20 #include <vm/vm_aspace.h>
21 #include <vm/vm_object.h>
22
23 #include <lib/crypto/global_prng.h>
24 #include <lib/ktrace.h>
25
26 #include <zircon/rights.h>
27
28 #include <object/diagnostics.h>
29 #include <object/futex_context.h>
30 #include <object/handle.h>
31 #include <object/job_dispatcher.h>
32 #include <object/thread_dispatcher.h>
33 #include <object/vm_address_region_dispatcher.h>
34 #include <object/vm_object_dispatcher.h>
35
36 #include <fbl/alloc_checker.h>
37 #include <fbl/auto_lock.h>
38
39 #define LOCAL_TRACE 0
40
map_handle_to_value(const Handle * handle,uint32_t mixer)41 static zx_handle_t map_handle_to_value(const Handle* handle, uint32_t mixer) {
42 // Ensure that the last bit of the result is not zero, and make sure
43 // we don't lose any base_value bits or make the result negative
44 // when shifting.
45 DEBUG_ASSERT((mixer & ((1<<31) | 0x1)) == 0);
46 DEBUG_ASSERT((handle->base_value() & 0xc0000000) == 0);
47
48 auto handle_id = (handle->base_value() << 1) | 0x1;
49 return static_cast<zx_handle_t>(mixer ^ handle_id);
50 }
51
map_value_to_handle(zx_handle_t value,uint32_t mixer)52 static Handle* map_value_to_handle(zx_handle_t value, uint32_t mixer) {
53 auto handle_id = (static_cast<uint32_t>(value) ^ mixer) >> 1;
54 return Handle::FromU32(handle_id);
55 }
56
Create(fbl::RefPtr<JobDispatcher> job,fbl::StringPiece name,uint32_t flags,fbl::RefPtr<Dispatcher> * dispatcher,zx_rights_t * rights,fbl::RefPtr<VmAddressRegionDispatcher> * root_vmar_disp,zx_rights_t * root_vmar_rights)57 zx_status_t ProcessDispatcher::Create(
58 fbl::RefPtr<JobDispatcher> job, fbl::StringPiece name, uint32_t flags,
59 fbl::RefPtr<Dispatcher>* dispatcher, zx_rights_t* rights,
60 fbl::RefPtr<VmAddressRegionDispatcher>* root_vmar_disp,
61 zx_rights_t* root_vmar_rights) {
62 fbl::AllocChecker ac;
63 fbl::RefPtr<ProcessDispatcher> process =
64 fbl::AdoptRef(new (&ac) ProcessDispatcher(job, name, flags));
65 if (!ac.check())
66 return ZX_ERR_NO_MEMORY;
67
68 if (!job->AddChildProcess(process))
69 return ZX_ERR_BAD_STATE;
70
71 zx_status_t result = process->Initialize();
72 if (result != ZX_OK)
73 return result;
74
75 fbl::RefPtr<VmAddressRegion> vmar(process->aspace()->RootVmar());
76
77 // Create a dispatcher for the root VMAR.
78 fbl::RefPtr<Dispatcher> new_vmar_dispatcher;
79 result = VmAddressRegionDispatcher::Create(vmar, ARCH_MMU_FLAG_PERM_USER,
80 &new_vmar_dispatcher,
81 root_vmar_rights);
82 if (result != ZX_OK) {
83 process->aspace_->Destroy();
84 return result;
85 }
86
87 *rights = default_rights();
88 *dispatcher = ktl::move(process);
89 *root_vmar_disp = DownCastDispatcher<VmAddressRegionDispatcher>(
90 &new_vmar_dispatcher);
91
92 return ZX_OK;
93 }
94
ProcessDispatcher(fbl::RefPtr<JobDispatcher> job,fbl::StringPiece name,uint32_t flags)95 ProcessDispatcher::ProcessDispatcher(fbl::RefPtr<JobDispatcher> job,
96 fbl::StringPiece name,
97 uint32_t flags)
98 : job_(ktl::move(job)), policy_(job_->GetPolicy()),
99 name_(name.data(), name.length()) {
100 LTRACE_ENTRY_OBJ;
101
102 // Generate handle XOR mask with top bit and bottom two bits cleared
103 uint32_t secret;
104 auto prng = crypto::GlobalPRNG::GetInstance();
105 prng->Draw(&secret, sizeof(secret));
106
107 // Handle values cannot be negative values, so we mask the high bit.
108 handle_rand_ = (secret << 2) & INT_MAX;
109 }
110
~ProcessDispatcher()111 ProcessDispatcher::~ProcessDispatcher() {
112 LTRACE_ENTRY_OBJ;
113
114 DEBUG_ASSERT(state_ == State::INITIAL || state_ == State::DEAD);
115
116 // Assert that the -> DEAD transition cleaned up what it should have.
117 DEBUG_ASSERT(handles_.is_empty());
118 DEBUG_ASSERT(exception_port_ == nullptr);
119 DEBUG_ASSERT(debugger_exception_port_ == nullptr);
120
121 // Remove ourselves from the parent job's raw ref to us. Note that this might
122 // have beeen called when transitioning State::DEAD. The Job can handle double calls.
123 job_->RemoveChildProcess(this);
124
125 LTRACE_EXIT_OBJ;
126 }
127
on_zero_handles()128 void ProcessDispatcher::on_zero_handles() {
129 // If the process is in the initial state and the last handle is closed
130 // we never detach from the parent job, so run the shutdown sequence for
131 // that case.
132 {
133 Guard<fbl::Mutex> guard{get_lock()};
134 if (state_ != State::INITIAL) {
135 // Use the normal cleanup path instead.
136 return;
137 }
138 SetStateLocked(State::DEAD);
139 }
140
141 FinishDeadTransition();
142 }
143
get_name(char out_name[ZX_MAX_NAME_LEN]) const144 void ProcessDispatcher::get_name(char out_name[ZX_MAX_NAME_LEN]) const {
145 name_.get(ZX_MAX_NAME_LEN, out_name);
146 }
147
set_name(const char * name,size_t len)148 zx_status_t ProcessDispatcher::set_name(const char* name, size_t len) {
149 return name_.set(name, len);
150 }
151
Initialize()152 zx_status_t ProcessDispatcher::Initialize() {
153 LTRACE_ENTRY_OBJ;
154
155 Guard<fbl::Mutex> guard{get_lock()};
156
157 DEBUG_ASSERT(state_ == State::INITIAL);
158
159 // create an address space for this process, named after the process's koid.
160 char aspace_name[ZX_MAX_NAME_LEN];
161 snprintf(aspace_name, sizeof(aspace_name), "proc:%" PRIu64, get_koid());
162 aspace_ = VmAspace::Create(VmAspace::TYPE_USER, aspace_name);
163 if (!aspace_) {
164 TRACEF("error creating address space\n");
165 return ZX_ERR_NO_MEMORY;
166 }
167
168 return ZX_OK;
169 }
170
Exit(int64_t retcode)171 void ProcessDispatcher::Exit(int64_t retcode) {
172 LTRACE_ENTRY_OBJ;
173
174 DEBUG_ASSERT(ProcessDispatcher::GetCurrent() == this);
175
176 {
177 Guard<fbl::Mutex> guard{get_lock()};
178
179 // check that we're in the RUNNING state or we're racing with something
180 // else that has already pushed us until the DYING state
181 DEBUG_ASSERT_MSG(state_ == State::RUNNING || state_ == State::DYING,
182 "state is %s", StateToString(state_));
183
184 // Set the exit status if there isn't already an exit in progress.
185 if (state_ != State::DYING) {
186 DEBUG_ASSERT(retcode_ == 0);
187 retcode_ = retcode;
188 }
189
190 // enter the dying state, which should kill all threads
191 SetStateLocked(State::DYING);
192 }
193
194 ThreadDispatcher::GetCurrent()->Exit();
195
196 __UNREACHABLE;
197 }
198
Kill()199 void ProcessDispatcher::Kill() {
200 LTRACE_ENTRY_OBJ;
201
202 // ZX-880: Call RemoveChildProcess outside of |get_lock()|.
203 bool became_dead = false;
204
205 {
206 Guard<fbl::Mutex> guard{get_lock()};
207
208 // we're already dead
209 if (state_ == State::DEAD)
210 return;
211
212 if (state_ != State::DYING) {
213 // If there isn't an Exit already in progress, set a nonzero exit
214 // status so e.g. crashing tests don't appear to have succeeded.
215 DEBUG_ASSERT(retcode_ == 0);
216 retcode_ = -1;
217 }
218
219 // if we have no threads, enter the dead state directly
220 if (thread_list_.is_empty()) {
221 SetStateLocked(State::DEAD);
222 became_dead = true;
223 } else {
224 // enter the dying state, which should trigger a thread kill.
225 // the last thread exiting will transition us to DEAD
226 SetStateLocked(State::DYING);
227 }
228 }
229
230 if (became_dead)
231 FinishDeadTransition();
232 }
233
Suspend()234 zx_status_t ProcessDispatcher::Suspend() {
235 canary_.Assert();
236
237 LTRACE_ENTRY_OBJ;
238
239 Guard<fbl::Mutex> guard{get_lock()};
240
241 // If we're dying don't try to suspend.
242 if (state_ == State::DYING || state_ == State::DEAD)
243 return ZX_ERR_BAD_STATE;
244
245 DEBUG_ASSERT(suspend_count_ >= 0);
246 suspend_count_++;
247 if (suspend_count_ == 1) {
248 for (auto& thread : thread_list_) {
249 // Thread suspend can only fail if the thread is already dying, which is fine here
250 // since it will be removed from this process shortly, so continue to suspend whether
251 // the thread suspend succeeds or fails.
252 zx_status_t status = thread.Suspend();
253 DEBUG_ASSERT(status == ZX_OK || thread.IsDyingOrDead());
254 }
255 }
256
257 return ZX_OK;
258 }
259
Resume()260 void ProcessDispatcher::Resume() {
261 canary_.Assert();
262
263 LTRACE_ENTRY_OBJ;
264
265 Guard<fbl::Mutex> guard{get_lock()};
266
267 // If we're in the process of dying don't try to resume, just let it continue to clean up.
268 if (state_ == State::DYING || state_ == State::DEAD)
269 return;
270
271 DEBUG_ASSERT(suspend_count_ > 0);
272 suspend_count_--;
273 if (suspend_count_ == 0) {
274 for (auto& thread : thread_list_) {
275 thread.Resume();
276 }
277 }
278 }
279
KillAllThreadsLocked()280 void ProcessDispatcher::KillAllThreadsLocked() {
281 LTRACE_ENTRY_OBJ;
282
283 for (auto& thread : thread_list_) {
284 LTRACEF("killing thread %p\n", &thread);
285 thread.Kill();
286 }
287 }
288
AddThread(ThreadDispatcher * t,bool initial_thread,bool * suspended)289 zx_status_t ProcessDispatcher::AddThread(ThreadDispatcher* t,
290 bool initial_thread,
291 bool* suspended) {
292 LTRACE_ENTRY_OBJ;
293
294 Guard<fbl::Mutex> guard{get_lock()};
295
296 if (initial_thread) {
297 if (state_ != State::INITIAL)
298 return ZX_ERR_BAD_STATE;
299 } else {
300 // We must not add a thread when in the DYING or DEAD states.
301 // Also, we want to ensure that this is not the first thread.
302 if (state_ != State::RUNNING)
303 return ZX_ERR_BAD_STATE;
304 }
305
306 // add the thread to our list
307 DEBUG_ASSERT(thread_list_.is_empty() == initial_thread);
308 thread_list_.push_back(t);
309
310 DEBUG_ASSERT(t->process() == this);
311
312 // If we're suspended, start this thread in suspended state as well.
313 *suspended = (suspend_count_ > 0);
314
315 if (initial_thread)
316 SetStateLocked(State::RUNNING);
317
318 return ZX_OK;
319 }
320
321 // This is called within thread T's context when it is exiting.
322
RemoveThread(ThreadDispatcher * t)323 void ProcessDispatcher::RemoveThread(ThreadDispatcher* t) {
324 LTRACE_ENTRY_OBJ;
325
326 // ZX-880: Call RemoveChildProcess outside of |get_lock()|.
327 bool became_dead = false;
328
329 {
330 // we're going to check for state and possibly transition below
331 Guard<fbl::Mutex> guard{get_lock()};
332
333 // remove the thread from our list
334 DEBUG_ASSERT(t != nullptr);
335 thread_list_.erase(*t);
336
337 // if this was the last thread, transition directly to DEAD state
338 if (thread_list_.is_empty()) {
339 LTRACEF("last thread left the process %p, entering DEAD state\n", this);
340 SetStateLocked(State::DEAD);
341 became_dead = true;
342 }
343 }
344
345 if (became_dead)
346 FinishDeadTransition();
347 }
348
get_related_koid() const349 zx_koid_t ProcessDispatcher::get_related_koid() const {
350 return job_->get_koid();
351 }
352
state() const353 ProcessDispatcher::State ProcessDispatcher::state() const {
354 Guard<fbl::Mutex> guard{get_lock()};
355 return state_;
356 }
357
job()358 fbl::RefPtr<JobDispatcher> ProcessDispatcher::job() {
359 return job_;
360 }
361
SetStateLocked(State s)362 void ProcessDispatcher::SetStateLocked(State s) {
363 LTRACEF("process %p: state %u (%s)\n", this, static_cast<unsigned int>(s), StateToString(s));
364
365 DEBUG_ASSERT(get_lock()->lock().IsHeld());
366
367 // look for some invalid state transitions
368 if (state_ == State::DEAD && s != State::DEAD) {
369 panic("ProcessDispatcher::SetStateLocked invalid state transition from DEAD to !DEAD\n");
370 return;
371 }
372
373 // transitions to your own state are okay
374 if (s == state_)
375 return;
376
377 state_ = s;
378
379 if (s == State::DYING) {
380 // send kill to all of our threads
381 KillAllThreadsLocked();
382 }
383 }
384
385 // Finish processing of the transition to State::DEAD. Some things need to be done
386 // outside of holding |get_lock()|. Beware this is called from several places
387 // including on_zero_handles().
FinishDeadTransition()388 void ProcessDispatcher::FinishDeadTransition() {
389 DEBUG_ASSERT(!completely_dead_);
390 completely_dead_ = true;
391
392 // clean up the handle table
393 LTRACEF_LEVEL(2, "cleaning up handle table on proc %p\n", this);
394
395 fbl::DoublyLinkedList<Handle*> to_clean;
396 {
397 Guard<fbl::Mutex> guard{&handle_table_lock_};
398 for (auto& handle : handles_) {
399 handle.set_process_id(ZX_KOID_INVALID);
400 }
401 to_clean.swap(handles_);
402 }
403
404 // zx-1544: Here is where if we're the last holder of a handle of one of
405 // our exception ports then ResetExceptionPort will get called (by
406 // ExceptionPort::OnPortZeroHandles) and will need to grab |get_lock()|.
407 // This needs to be done outside of |get_lock()|.
408 while (!to_clean.is_empty()) {
409 // Delete handle via HandleOwner dtor.
410 HandleOwner ho(to_clean.pop_front());
411 }
412
413 LTRACEF_LEVEL(2, "done cleaning up handle table on proc %p\n", this);
414
415 // tear down the address space
416 aspace_->Destroy();
417
418 // signal waiter
419 LTRACEF_LEVEL(2, "signaling waiters\n");
420 UpdateState(0u, ZX_TASK_TERMINATED);
421
422 // The PROC_CREATE record currently emits a uint32_t koid.
423 uint32_t koid = static_cast<uint32_t>(get_koid());
424 ktrace(TAG_PROC_EXIT, koid, 0, 0, 0);
425
426 // Call job_->RemoveChildProcess(this) outside of |get_lock()|. Otherwise
427 // we risk a deadlock as we have |get_lock()| and RemoveChildProcess grabs
428 // the job's |lock_|, whereas JobDispatcher::EnumerateChildren obtains the
429 // locks in the opposite order. We want to keep lock acquisition order
430 // consistent, and JobDispatcher::EnumerateChildren's order makes
431 // sense. We don't need |get_lock()| when calling RemoveChildProcess
432 // here. ZX-880
433 // RemoveChildProcess is called soon after releasing |get_lock()| so that
434 // the semantics of signaling ZX_JOB_NO_PROCESSES match that of
435 // ZX_TASK_TERMINATED.
436 job_->RemoveChildProcess(this);
437 }
438
439 // process handle manipulation routines
MapHandleToValue(const Handle * handle) const440 zx_handle_t ProcessDispatcher::MapHandleToValue(const Handle* handle) const {
441 return map_handle_to_value(handle, handle_rand_);
442 }
443
MapHandleToValue(const HandleOwner & handle) const444 zx_handle_t ProcessDispatcher::MapHandleToValue(const HandleOwner& handle) const {
445 return map_handle_to_value(handle.get(), handle_rand_);
446 }
447
GetHandleLocked(zx_handle_t handle_value,bool skip_policy)448 Handle* ProcessDispatcher::GetHandleLocked(zx_handle_t handle_value,
449 bool skip_policy) {
450 auto handle = map_value_to_handle(handle_value, handle_rand_);
451 if (handle && handle->process_id() == get_koid())
452 return handle;
453
454 // Handle lookup failed. We potentially generate an exception,
455 // depending on the job policy. Note that we don't use the return
456 // value from QueryBasicPolicy() here: ZX_POL_ACTION_ALLOW and
457 // ZX_POL_ACTION_DENY are equivalent for ZX_POL_BAD_HANDLE.
458 if (likely(!skip_policy))
459 QueryBasicPolicy(ZX_POL_BAD_HANDLE);
460 return nullptr;
461 }
462
AddHandle(HandleOwner handle)463 void ProcessDispatcher::AddHandle(HandleOwner handle) {
464 Guard<fbl::Mutex> guard{&handle_table_lock_};
465 AddHandleLocked(ktl::move(handle));
466 }
467
AddHandleLocked(HandleOwner handle)468 void ProcessDispatcher::AddHandleLocked(HandleOwner handle) {
469 handle->set_process_id(get_koid());
470 handles_.push_front(handle.release());
471 }
472
RemoveHandle(zx_handle_t handle_value)473 HandleOwner ProcessDispatcher::RemoveHandle(zx_handle_t handle_value) {
474 Guard<fbl::Mutex> guard{&handle_table_lock_};
475 return RemoveHandleLocked(handle_value);
476 }
477
RemoveHandleLocked(zx_handle_t handle_value)478 HandleOwner ProcessDispatcher::RemoveHandleLocked(zx_handle_t handle_value) {
479 auto handle = GetHandleLocked(handle_value);
480 if (!handle)
481 return nullptr;
482
483 handle->set_process_id(ZX_KOID_INVALID);
484 handles_.erase(*handle);
485
486 return HandleOwner(handle);
487 }
488
489
RemoveHandles(user_in_ptr<const zx_handle_t> user_handles,size_t num_handles)490 zx_status_t ProcessDispatcher::RemoveHandles(user_in_ptr<const zx_handle_t> user_handles,
491 size_t num_handles) {
492 zx_status_t status = ZX_OK;
493 size_t offset = 0;
494 while (offset < num_handles) {
495 // We process |num_handles| in chunks of |kMaxMessageHandles|
496 // because we don't have a limit on how large |num_handles|
497 // can be.
498 auto chunk_size = fbl::min<size_t>(num_handles - offset, kMaxMessageHandles);
499
500 zx_handle_t handles[kMaxMessageHandles];
501
502 // If we fail |copy_array_from_user|, then we might discard some, but
503 // not all, of the handles |user_handles| specified.
504 if (user_handles.copy_array_from_user(handles, chunk_size, offset) != ZX_OK)
505 return status;
506
507 {
508 Guard<fbl::Mutex> guard{handle_table_lock()};
509 for (size_t ix = 0; ix != chunk_size; ++ix) {
510 if (handles[ix] == ZX_HANDLE_INVALID)
511 continue;
512 auto handle = RemoveHandleLocked(handles[ix]);
513 if (!handle)
514 status = ZX_ERR_BAD_HANDLE;
515 }
516 }
517
518 offset += chunk_size;
519 }
520
521 return status;
522 }
523
GetKoidForHandle(zx_handle_t handle_value)524 zx_koid_t ProcessDispatcher::GetKoidForHandle(zx_handle_t handle_value) {
525 Guard<fbl::Mutex> guard{&handle_table_lock_};
526 Handle* handle = GetHandleLocked(handle_value);
527 if (!handle)
528 return ZX_KOID_INVALID;
529 return handle->dispatcher()->get_koid();
530 }
531
GetDispatcherInternal(zx_handle_t handle_value,fbl::RefPtr<Dispatcher> * dispatcher,zx_rights_t * rights)532 zx_status_t ProcessDispatcher::GetDispatcherInternal(zx_handle_t handle_value,
533 fbl::RefPtr<Dispatcher>* dispatcher,
534 zx_rights_t* rights) {
535 Guard<fbl::Mutex> guard{&handle_table_lock_};
536 Handle* handle = GetHandleLocked(handle_value);
537 if (!handle)
538 return ZX_ERR_BAD_HANDLE;
539
540 *dispatcher = handle->dispatcher();
541 if (rights)
542 *rights = handle->rights();
543 return ZX_OK;
544 }
545
GetDispatcherWithRightsInternal(zx_handle_t handle_value,zx_rights_t desired_rights,fbl::RefPtr<Dispatcher> * dispatcher_out,zx_rights_t * out_rights)546 zx_status_t ProcessDispatcher::GetDispatcherWithRightsInternal(zx_handle_t handle_value,
547 zx_rights_t desired_rights,
548 fbl::RefPtr<Dispatcher>* dispatcher_out,
549 zx_rights_t* out_rights) {
550 Guard<fbl::Mutex> guard{&handle_table_lock_};
551 Handle* handle = GetHandleLocked(handle_value);
552 if (!handle)
553 return ZX_ERR_BAD_HANDLE;
554
555 if (!handle->HasRights(desired_rights))
556 return ZX_ERR_ACCESS_DENIED;
557
558 *dispatcher_out = handle->dispatcher();
559 if (out_rights)
560 *out_rights = handle->rights();
561 return ZX_OK;
562 }
563
GetInfo(zx_info_process_t * info)564 zx_status_t ProcessDispatcher::GetInfo(zx_info_process_t* info) {
565 memset(info, 0, sizeof(*info));
566
567 State state;
568 // retcode_ depends on the state: make sure they're consistent.
569 {
570 Guard<fbl::Mutex> guard{get_lock()};
571 state = state_;
572 info->return_code = retcode_;
573 // TODO: Protect with rights if necessary.
574 info->debugger_attached = debugger_exception_port_ != nullptr;
575 }
576
577 switch (state) {
578 case State::DEAD:
579 case State::DYING:
580 info->exited = true;
581 __FALLTHROUGH;
582 case State::RUNNING:
583 info->started = true;
584 break;
585 case State::INITIAL:
586 default:
587 break;
588 }
589
590 return ZX_OK;
591 }
592
GetStats(zx_info_task_stats_t * stats)593 zx_status_t ProcessDispatcher::GetStats(zx_info_task_stats_t* stats) {
594 DEBUG_ASSERT(stats != nullptr);
595 Guard<fbl::Mutex> guard{get_lock()};
596 if (state_ == State::DEAD) {
597 return ZX_ERR_BAD_STATE;
598 }
599 VmAspace::vm_usage_t usage;
600 zx_status_t s = aspace_->GetMemoryUsage(&usage);
601 if (s != ZX_OK) {
602 return s;
603 }
604 stats->mem_mapped_bytes = usage.mapped_pages * PAGE_SIZE;
605 stats->mem_private_bytes = usage.private_pages * PAGE_SIZE;
606 stats->mem_shared_bytes = usage.shared_pages * PAGE_SIZE;
607 stats->mem_scaled_shared_bytes = usage.scaled_shared_bytes;
608 return ZX_OK;
609 }
610
GetAspaceMaps(user_out_ptr<zx_info_maps_t> maps,size_t max,size_t * actual,size_t * available)611 zx_status_t ProcessDispatcher::GetAspaceMaps(
612 user_out_ptr<zx_info_maps_t> maps, size_t max,
613 size_t* actual, size_t* available) {
614 Guard<fbl::Mutex> guard{get_lock()};
615 if (state_ == State::DEAD) {
616 return ZX_ERR_BAD_STATE;
617 }
618 return GetVmAspaceMaps(aspace_, maps, max, actual, available);
619 }
620
GetVmos(user_out_ptr<zx_info_vmo_t> vmos,size_t max,size_t * actual_out,size_t * available_out)621 zx_status_t ProcessDispatcher::GetVmos(
622 user_out_ptr<zx_info_vmo_t> vmos, size_t max,
623 size_t* actual_out, size_t* available_out) {
624 Guard<fbl::Mutex> guard{get_lock()};
625 if (state_ != State::RUNNING) {
626 return ZX_ERR_BAD_STATE;
627 }
628 size_t actual = 0;
629 size_t available = 0;
630 zx_status_t s = GetProcessVmosViaHandles(this, vmos, max, &actual, &available);
631 if (s != ZX_OK) {
632 return s;
633 }
634 size_t actual2 = 0;
635 size_t available2 = 0;
636 DEBUG_ASSERT(max >= actual);
637 s = GetVmAspaceVmos(aspace_, vmos.element_offset(actual), max - actual,
638 &actual2, &available2);
639 if (s != ZX_OK) {
640 return s;
641 }
642 *actual_out = actual + actual2;
643 *available_out = available + available2;
644 return ZX_OK;
645 }
646
GetThreads(fbl::Array<zx_koid_t> * out_threads)647 zx_status_t ProcessDispatcher::GetThreads(fbl::Array<zx_koid_t>* out_threads) {
648 Guard<fbl::Mutex> guard{get_lock()};
649 size_t n = thread_list_.size_slow();
650 fbl::Array<zx_koid_t> threads;
651 fbl::AllocChecker ac;
652 threads.reset(new (&ac) zx_koid_t[n], n);
653 if (!ac.check())
654 return ZX_ERR_NO_MEMORY;
655 size_t i = 0;
656 for (auto& thread : thread_list_) {
657 threads[i] = thread.get_koid();
658 ++i;
659 }
660 DEBUG_ASSERT(i == n);
661 *out_threads = ktl::move(threads);
662 return ZX_OK;
663 }
664
SetExceptionPort(fbl::RefPtr<ExceptionPort> eport)665 zx_status_t ProcessDispatcher::SetExceptionPort(fbl::RefPtr<ExceptionPort> eport) {
666 LTRACE_ENTRY_OBJ;
667 bool debugger = false;
668 switch (eport->type()) {
669 case ExceptionPort::Type::DEBUGGER:
670 debugger = true;
671 break;
672 case ExceptionPort::Type::PROCESS:
673 break;
674 default:
675 DEBUG_ASSERT_MSG(false, "unexpected port type: %d",
676 static_cast<int>(eport->type()));
677 break;
678 }
679
680 // Lock |get_lock()| to ensure the process doesn't transition to dead
681 // while we're setting the exception handler.
682 Guard<fbl::Mutex> guard{get_lock()};
683 if (state_ == State::DEAD)
684 return ZX_ERR_NOT_FOUND;
685 if (debugger) {
686 if (debugger_exception_port_)
687 return ZX_ERR_ALREADY_BOUND;
688 debugger_exception_port_ = eport;
689 } else {
690 if (exception_port_)
691 return ZX_ERR_ALREADY_BOUND;
692 exception_port_ = eport;
693 }
694
695 return ZX_OK;
696 }
697
ResetExceptionPort(bool debugger)698 bool ProcessDispatcher::ResetExceptionPort(bool debugger) {
699 LTRACE_ENTRY_OBJ;
700 fbl::RefPtr<ExceptionPort> eport;
701
702 // Remove the exception handler first. As we resume threads we don't
703 // want them to hit another exception and get back into
704 // ExceptionHandlerExchange.
705 {
706 Guard<fbl::Mutex> guard{get_lock()};
707 if (debugger) {
708 debugger_exception_port_.swap(eport);
709 } else {
710 exception_port_.swap(eport);
711 }
712 if (eport == nullptr) {
713 // Attempted to unbind when no exception port is bound.
714 return false;
715 }
716 // This method must guarantee that no caller will return until
717 // OnTargetUnbind has been called on the port-to-unbind.
718 // This becomes important when a manual unbind races with a
719 // PortDispatcher::on_zero_handles auto-unbind.
720 //
721 // If OnTargetUnbind were called outside of the lock, it would lead to
722 // a race (for threads A and B):
723 //
724 // A: Calls ResetExceptionPort; acquires the lock
725 // A: Sees a non-null exception_port_, swaps it into the eport local.
726 // exception_port_ is now null.
727 // A: Releases the lock
728 //
729 // B: Calls ResetExceptionPort; acquires the lock
730 // B: Sees a null exception_port_ and returns. But OnTargetUnbind()
731 // hasn't yet been called for the port.
732 //
733 // So, call it before releasing the lock.
734 eport->OnTargetUnbind();
735 }
736
737 OnExceptionPortRemoval(eport);
738 return true;
739 }
740
exception_port()741 fbl::RefPtr<ExceptionPort> ProcessDispatcher::exception_port() {
742 Guard<fbl::Mutex> guard{get_lock()};
743 return exception_port_;
744 }
745
debugger_exception_port()746 fbl::RefPtr<ExceptionPort> ProcessDispatcher::debugger_exception_port() {
747 Guard<fbl::Mutex> guard{get_lock()};
748 return debugger_exception_port_;
749 }
750
OnExceptionPortRemoval(const fbl::RefPtr<ExceptionPort> & eport)751 void ProcessDispatcher::OnExceptionPortRemoval(
752 const fbl::RefPtr<ExceptionPort>& eport) {
753 Guard<fbl::Mutex> guard{get_lock()};
754 for (auto& thread : thread_list_) {
755 thread.OnExceptionPortRemoval(eport);
756 }
757 }
758
ThreadCount() const759 uint32_t ProcessDispatcher::ThreadCount() const {
760 canary_.Assert();
761
762 Guard<fbl::Mutex> guard{get_lock()};
763 return static_cast<uint32_t>(thread_list_.size_slow());
764 }
765
PageCount() const766 size_t ProcessDispatcher::PageCount() const {
767 canary_.Assert();
768
769 Guard<fbl::Mutex> guard{get_lock()};
770 if (state_ != State::RUNNING) {
771 return 0;
772 }
773 return aspace_->AllocatedPages();
774 }
775
776 class FindProcessByKoid final : public JobEnumerator {
777 public:
FindProcessByKoid(zx_koid_t koid)778 FindProcessByKoid(zx_koid_t koid) : koid_(koid) {}
779 FindProcessByKoid(const FindProcessByKoid&) = delete;
780
781 // To be called after enumeration.
get_pd()782 fbl::RefPtr<ProcessDispatcher> get_pd() { return pd_; }
783
784 private:
OnProcess(ProcessDispatcher * process)785 bool OnProcess(ProcessDispatcher* process) final {
786 if (process->get_koid() == koid_) {
787 pd_ = fbl::WrapRefPtr(process);
788 // Stop the enumeration.
789 return false;
790 }
791 // Keep looking.
792 return true;
793 }
794
795 const zx_koid_t koid_;
796 fbl::RefPtr<ProcessDispatcher> pd_ = nullptr;
797 };
798
799 // static
LookupProcessById(zx_koid_t koid)800 fbl::RefPtr<ProcessDispatcher> ProcessDispatcher::LookupProcessById(zx_koid_t koid) {
801 FindProcessByKoid finder(koid);
802 GetRootJobDispatcher()->EnumerateChildren(&finder, /* recurse */ true);
803 return finder.get_pd();
804 }
805
LookupThreadById(zx_koid_t koid)806 fbl::RefPtr<ThreadDispatcher> ProcessDispatcher::LookupThreadById(zx_koid_t koid) {
807 LTRACE_ENTRY_OBJ;
808 Guard<fbl::Mutex> guard{get_lock()};
809
810 auto iter = thread_list_.find_if([koid](const ThreadDispatcher& t) { return t.get_koid() == koid; });
811 return fbl::WrapRefPtr(iter.CopyPointer());
812 }
813
get_debug_addr() const814 uintptr_t ProcessDispatcher::get_debug_addr() const {
815 Guard<fbl::Mutex> guard{get_lock()};
816 return debug_addr_;
817 }
818
set_debug_addr(uintptr_t addr)819 zx_status_t ProcessDispatcher::set_debug_addr(uintptr_t addr) {
820 if (addr == 0u)
821 return ZX_ERR_INVALID_ARGS;
822 Guard<fbl::Mutex> guard{get_lock()};
823 // Only allow the value to be set to a nonzero or magic debug break once:
824 // Once ld.so has set it that's it.
825 if (!(debug_addr_ == 0u || debug_addr_ == ZX_PROCESS_DEBUG_ADDR_BREAK_ON_SET))
826 return ZX_ERR_ACCESS_DENIED;
827 debug_addr_ = addr;
828 return ZX_OK;
829 }
830
QueryBasicPolicy(uint32_t condition) const831 zx_status_t ProcessDispatcher::QueryBasicPolicy(uint32_t condition) const {
832 auto action = policy_.QueryBasicPolicy(condition);
833 if (action & ZX_POL_ACTION_EXCEPTION) {
834 thread_signal_policy_exception();
835 }
836 // TODO(cpu): check for the ZX_POL_KILL bit and return an error code
837 // that abigen understands as termination.
838 return (action & ZX_POL_ACTION_DENY) ? ZX_ERR_ACCESS_DENIED : ZX_OK;
839 }
840
GetTimerSlackPolicy() const841 TimerSlack ProcessDispatcher::GetTimerSlackPolicy() const {
842 return policy_.GetTimerSlack();
843 }
844
cache_vdso_code_address()845 uintptr_t ProcessDispatcher::cache_vdso_code_address() {
846 Guard<fbl::Mutex> guard{get_lock()};
847 vdso_code_address_ = aspace_->vdso_code_address();
848 return vdso_code_address_;
849 }
850
StateToString(ProcessDispatcher::State state)851 const char* StateToString(ProcessDispatcher::State state) {
852 switch (state) {
853 case ProcessDispatcher::State::INITIAL:
854 return "initial";
855 case ProcessDispatcher::State::RUNNING:
856 return "running";
857 case ProcessDispatcher::State::DYING:
858 return "dying";
859 case ProcessDispatcher::State::DEAD:
860 return "dead";
861 }
862 return "unknown";
863 }
864
IsHandleValid(zx_handle_t handle_value)865 bool ProcessDispatcher::IsHandleValid(zx_handle_t handle_value) {
866 Guard<fbl::Mutex> guard{&handle_table_lock_};
867 return (GetHandleLocked(handle_value) != nullptr);
868 }
869
IsHandleValidNoPolicyCheck(zx_handle_t handle_value)870 bool ProcessDispatcher::IsHandleValidNoPolicyCheck(zx_handle_t handle_value) {
871 Guard<fbl::Mutex> guard{&handle_table_lock_};
872 return (GetHandleLocked(handle_value, true) != nullptr);
873 }
874
OnProcessStartForJobDebugger(ThreadDispatcher * t)875 void ProcessDispatcher::OnProcessStartForJobDebugger(ThreadDispatcher *t) {
876 auto job = job_;
877 while (job) {
878 auto port = job->debugger_exception_port();
879 if (port) {
880 port->OnProcessStartForDebugger(t);
881 break;
882 } else {
883 job = job->parent();
884 }
885 }
886 }
887