1 // Copyright 2016 The Fuchsia Authors
2 //
3 // Use of this source code is governed by a MIT-style
4 // license that can be found in the LICENSE file or at
5 // https://opensource.org/licenses/MIT
6
7 #include <object/thread_dispatcher.h>
8
9 #include <assert.h>
10 #include <err.h>
11 #include <inttypes.h>
12 #include <platform.h>
13 #include <string.h>
14 #include <trace.h>
15
16 #include <arch/debugger.h>
17 #include <arch/exception.h>
18
19 #include <kernel/thread.h>
20 #include <vm/kstack.h>
21 #include <vm/vm.h>
22 #include <vm/vm_address_region.h>
23 #include <vm/vm_aspace.h>
24 #include <vm/vm_object_paged.h>
25
26 #include <zircon/rights.h>
27 #include <zircon/syscalls/debug.h>
28 #include <zircon/types.h>
29
30 #include <object/excp_port.h>
31 #include <object/handle.h>
32 #include <object/job_dispatcher.h>
33 #include <object/process_dispatcher.h>
34
35 #include <fbl/algorithm.h>
36 #include <fbl/alloc_checker.h>
37 #include <fbl/auto_call.h>
38 #include <fbl/auto_lock.h>
39
40 #define LOCAL_TRACE 0
41
42 // static
Create(fbl::RefPtr<ProcessDispatcher> process,uint32_t flags,fbl::StringPiece name,fbl::RefPtr<Dispatcher> * out_dispatcher,zx_rights_t * out_rights)43 zx_status_t ThreadDispatcher::Create(fbl::RefPtr<ProcessDispatcher> process, uint32_t flags,
44 fbl::StringPiece name,
45 fbl::RefPtr<Dispatcher>* out_dispatcher,
46 zx_rights_t* out_rights) {
47 fbl::AllocChecker ac;
48 auto disp = fbl::AdoptRef(new (&ac) ThreadDispatcher(ktl::move(process), flags));
49 if (!ac.check())
50 return ZX_ERR_NO_MEMORY;
51
52 auto result = disp->Initialize(name.data(), name.length());
53 if (result != ZX_OK)
54 return result;
55
56 *out_rights = default_rights();
57 *out_dispatcher = ktl::move(disp);
58 return ZX_OK;
59 }
60
ThreadDispatcher(fbl::RefPtr<ProcessDispatcher> process,uint32_t flags)61 ThreadDispatcher::ThreadDispatcher(fbl::RefPtr<ProcessDispatcher> process,
62 uint32_t flags)
63 : process_(ktl::move(process)) {
64 LTRACE_ENTRY_OBJ;
65 }
66
~ThreadDispatcher()67 ThreadDispatcher::~ThreadDispatcher() {
68 LTRACE_ENTRY_OBJ;
69
70 DEBUG_ASSERT(&thread_ != get_current_thread());
71
72 switch (state_.lifecycle()) {
73 case ThreadState::Lifecycle::DEAD: {
74 // join the LK thread before doing anything else to clean up LK state and ensure
75 // the thread we're destroying has stopped.
76 LTRACEF("joining LK thread to clean up state\n");
77 __UNUSED auto ret = thread_join(&thread_, nullptr, ZX_TIME_INFINITE);
78 LTRACEF("done joining LK thread\n");
79 DEBUG_ASSERT_MSG(ret == ZX_OK, "thread_join returned something other than ZX_OK\n");
80 break;
81 }
82 case ThreadState::Lifecycle::INITIAL:
83 // this gets a pass, we can destruct a partially constructed thread
84 break;
85 case ThreadState::Lifecycle::INITIALIZED:
86 // as we've been initialized previously, forget the LK thread.
87 // note that thread_forget is not called for self since the thread is not running
88 thread_forget(&thread_);
89 break;
90 default:
91 DEBUG_ASSERT_MSG(false, "bad state %s, this %p\n",
92 ThreadLifecycleToString(state_.lifecycle()), this);
93 }
94
95 event_destroy(&exception_event_);
96 }
97
98 // complete initialization of the thread object outside of the constructor
Initialize(const char * name,size_t len)99 zx_status_t ThreadDispatcher::Initialize(const char* name, size_t len) {
100 LTRACE_ENTRY_OBJ;
101
102 Guard<fbl::Mutex> guard{get_lock()};
103
104 // Make sure LK's max name length agrees with ours.
105 static_assert(THREAD_NAME_LENGTH == ZX_MAX_NAME_LEN, "name length issue");
106 if (len >= ZX_MAX_NAME_LEN)
107 len = ZX_MAX_NAME_LEN - 1;
108
109 char thread_name[THREAD_NAME_LENGTH];
110 memcpy(thread_name, name, len);
111 memset(thread_name + len, 0, ZX_MAX_NAME_LEN - len);
112
113 // create an underlying LK thread
114 thread_t* lkthread = thread_create_etc(
115 &thread_, thread_name, StartRoutine, this, DEFAULT_PRIORITY, nullptr);
116
117 if (!lkthread) {
118 TRACEF("error creating thread\n");
119 return ZX_ERR_NO_MEMORY;
120 }
121 DEBUG_ASSERT(lkthread == &thread_);
122
123 // register an event handler with the LK kernel
124 thread_set_user_callback(&thread_, &ThreadUserCallback);
125
126 // set the per-thread pointer
127 lkthread->user_thread = this;
128
129 // associate the proc's address space with this thread
130 process_->aspace()->AttachToThread(lkthread);
131
132 // we've entered the initialized state
133 SetStateLocked(ThreadState::Lifecycle::INITIALIZED);
134
135 return ZX_OK;
136 }
137
set_name(const char * name,size_t len)138 zx_status_t ThreadDispatcher::set_name(const char* name, size_t len) {
139 canary_.Assert();
140
141 // ignore characters after the first NUL
142 len = strnlen(name, len);
143
144 if (len >= ZX_MAX_NAME_LEN)
145 len = ZX_MAX_NAME_LEN - 1;
146
147 Guard<SpinLock, IrqSave> guard{&name_lock_};
148 memcpy(thread_.name, name, len);
149 memset(thread_.name + len, 0, ZX_MAX_NAME_LEN - len);
150 return ZX_OK;
151 }
152
get_name(char out_name[ZX_MAX_NAME_LEN]) const153 void ThreadDispatcher::get_name(char out_name[ZX_MAX_NAME_LEN]) const {
154 canary_.Assert();
155
156 Guard<SpinLock, IrqSave> guard{&name_lock_};
157 memset(out_name, 0, ZX_MAX_NAME_LEN);
158 strlcpy(out_name, thread_.name, ZX_MAX_NAME_LEN);
159 }
160
161 // start a thread
Start(uintptr_t entry,uintptr_t sp,uintptr_t arg1,uintptr_t arg2,bool initial_thread)162 zx_status_t ThreadDispatcher::Start(uintptr_t entry, uintptr_t sp,
163 uintptr_t arg1, uintptr_t arg2,
164 bool initial_thread) {
165 canary_.Assert();
166
167 LTRACE_ENTRY_OBJ;
168
169 is_initial_thread_ = initial_thread;
170
171 Guard<fbl::Mutex> guard{get_lock()};
172
173 if (state_.lifecycle() != ThreadState::Lifecycle::INITIALIZED)
174 return ZX_ERR_BAD_STATE;
175
176 // save the user space entry state
177 user_entry_ = entry;
178 user_sp_ = sp;
179 user_arg1_ = arg1;
180 user_arg2_ = arg2;
181
182 // add ourselves to the process, which may fail if the process is in a dead state
183 bool suspended;
184 auto ret = process_->AddThread(this, initial_thread, &suspended);
185 if (ret < 0)
186 return ret;
187
188 // update our suspend count to account for our parent state
189 if (suspended)
190 suspend_count_++;
191
192 // bump the ref on this object that the LK thread state will now own until the lk thread has exited
193 AddRef();
194 thread_.user_tid = get_koid();
195 thread_.user_pid = process_->get_koid();
196
197 // start the thread in RUNNING state, if we're starting suspended it will transition to
198 // SUSPENDED when it checks thread signals before executing any user code
199 SetStateLocked(ThreadState::Lifecycle::RUNNING);
200
201 if (suspend_count_ == 0) {
202 thread_resume(&thread_);
203 } else {
204 // thread_suspend() only fails if the underlying thread is already dead, which we should
205 // ignore here to match the behavior of thread_resume(); our Exiting() callback will run
206 // shortly to clean us up
207 thread_suspend(&thread_);
208 }
209
210 return ZX_OK;
211 }
212
213 // called in the context of our thread
Exit()214 void ThreadDispatcher::Exit() {
215 canary_.Assert();
216
217 LTRACE_ENTRY_OBJ;
218
219 // only valid to call this on the current thread
220 DEBUG_ASSERT(get_current_thread() == &thread_);
221
222 {
223 Guard<fbl::Mutex> guard{get_lock()};
224
225 SetStateLocked(ThreadState::Lifecycle::DYING);
226 }
227
228 // exit here
229 // this will recurse back to us in ::Exiting()
230 thread_exit(0);
231
232 __UNREACHABLE;
233 }
234
Kill()235 void ThreadDispatcher::Kill() {
236 canary_.Assert();
237
238 LTRACE_ENTRY_OBJ;
239
240 Guard<fbl::Mutex> guard{get_lock()};
241
242 switch (state_.lifecycle()) {
243 case ThreadState::Lifecycle::INITIAL:
244 case ThreadState::Lifecycle::INITIALIZED:
245 // thread was never started, leave in this state
246 break;
247 case ThreadState::Lifecycle::RUNNING:
248 case ThreadState::Lifecycle::SUSPENDED:
249 // deliver a kernel kill signal to the thread
250 thread_kill(&thread_);
251
252 // enter the dying state
253 SetStateLocked(ThreadState::Lifecycle::DYING);
254 break;
255 case ThreadState::Lifecycle::DYING:
256 case ThreadState::Lifecycle::DEAD:
257 // already going down
258 break;
259 }
260 }
261
Suspend()262 zx_status_t ThreadDispatcher::Suspend() {
263 canary_.Assert();
264
265 LTRACE_ENTRY_OBJ;
266
267 Guard<fbl::Mutex> guard{get_lock()};
268
269 LTRACEF("%p: state %s\n", this, ThreadLifecycleToString(state_.lifecycle()));
270
271 // Update |suspend_count_| in all cases so that we can always verify a sane value - it's
272 // possible both Suspend() and Resume() get called while the thread is DYING.
273 DEBUG_ASSERT(suspend_count_ >= 0);
274 suspend_count_++;
275
276 switch (state_.lifecycle()) {
277 case ThreadState::Lifecycle::INITIAL:
278 // Unreachable, thread leaves INITIAL state before Create() returns.
279 DEBUG_ASSERT(false);
280 __UNREACHABLE;
281 case ThreadState::Lifecycle::INITIALIZED:
282 // If the thread hasn't started yet, don't actually try to suspend it. We need to let
283 // Start() run first to set up userspace entry data, which will then suspend if the count
284 // is still >0 at that time.
285 return ZX_OK;
286 case ThreadState::Lifecycle::RUNNING:
287 case ThreadState::Lifecycle::SUSPENDED:
288 if (suspend_count_ == 1)
289 return thread_suspend(&thread_);
290 return ZX_OK;
291 case ThreadState::Lifecycle::DYING:
292 case ThreadState::Lifecycle::DEAD:
293 return ZX_ERR_BAD_STATE;
294 }
295
296 DEBUG_ASSERT(false);
297 return ZX_ERR_BAD_STATE;
298 }
299
Resume()300 void ThreadDispatcher::Resume() {
301 canary_.Assert();
302
303 LTRACE_ENTRY_OBJ;
304
305 Guard<fbl::Mutex> guard{get_lock()};
306
307 LTRACEF("%p: state %s\n", this, ThreadLifecycleToString(state_.lifecycle()));
308
309 DEBUG_ASSERT(suspend_count_ > 0);
310 suspend_count_--;
311
312 switch (state_.lifecycle()) {
313 case ThreadState::Lifecycle::INITIAL:
314 // Unreachable, thread leaves INITIAL state before Create() returns.
315 DEBUG_ASSERT(false);
316 __UNREACHABLE;
317 case ThreadState::Lifecycle::INITIALIZED:
318 break;
319 case ThreadState::Lifecycle::RUNNING:
320 case ThreadState::Lifecycle::SUSPENDED:
321 // It's possible the thread never transitioned from RUNNING -> SUSPENDED.
322 if (suspend_count_ == 0)
323 thread_resume(&thread_);
324 break;
325 case ThreadState::Lifecycle::DYING:
326 case ThreadState::Lifecycle::DEAD:
327 // If it's dying or dead then bail.
328 break;
329 }
330 }
331
IsDyingOrDead() const332 bool ThreadDispatcher::IsDyingOrDead() const {
333 Guard<fbl::Mutex> guard{get_lock()};
334 return state_.lifecycle() == ThreadState::Lifecycle::DYING ||
335 state_.lifecycle() == ThreadState::Lifecycle::DEAD;
336 }
337
ThreadCleanupDpc(dpc_t * d)338 static void ThreadCleanupDpc(dpc_t* d) {
339 LTRACEF("dpc %p\n", d);
340
341 ThreadDispatcher* t = reinterpret_cast<ThreadDispatcher*>(d->arg);
342 DEBUG_ASSERT(t);
343
344 delete t;
345 }
346
Exiting()347 void ThreadDispatcher::Exiting() {
348 canary_.Assert();
349
350 LTRACE_ENTRY_OBJ;
351
352 // Notify a debugger if attached. Do this before marking the thread as
353 // dead: the debugger expects to see the thread in the DYING state, it may
354 // try to read thread registers. The debugger still has to handle the case
355 // where the process is also dying (and thus the thread could transition
356 // DYING->DEAD from underneath it), but that's life (or death :-)).
357 // N.B. OnThreadExitForDebugger will block in ExceptionHandlerExchange, so
358 // don't hold the process's |state_lock_| across the call.
359 {
360 fbl::RefPtr<ExceptionPort> eport(process_->debugger_exception_port());
361 if (eport) {
362 eport->OnThreadExitForDebugger(this);
363 }
364 }
365
366 // Mark the thread as dead. Do this before removing the thread from the
367 // process because if this is the last thread then the process will be
368 // marked dead, and we don't want to have a state where the process is
369 // dead but one thread is not.
370 {
371 Guard<fbl::Mutex> guard{get_lock()};
372
373 // put ourselves into the dead state
374 SetStateLocked(ThreadState::Lifecycle::DEAD);
375 }
376
377 // remove ourselves from our parent process's view
378 process_->RemoveThread(this);
379
380 // drop LK's reference
381 if (Release()) {
382 // We're the last reference, so will need to destruct ourself while running, which is not possible
383 // Use a dpc to pull this off
384 cleanup_dpc_.func = ThreadCleanupDpc;
385 cleanup_dpc_.arg = this;
386
387 // disable interrupts before queuing the dpc to prevent starving the DPC thread if it starts running
388 // before we're completed.
389 // disabling interrupts effectively raises us to maximum priority on this cpu.
390 // note this is only safe because we're about to exit the thread permanently so the context
391 // switch will effectively reenable interrupts in the new thread.
392 arch_disable_ints();
393
394 // queue without reschdule since us exiting is a reschedule event already
395 dpc_queue(&cleanup_dpc_, false);
396 }
397
398 // after this point the thread will stop permanently
399 LTRACE_EXIT_OBJ;
400 }
401
Suspending()402 void ThreadDispatcher::Suspending() {
403 LTRACE_ENTRY_OBJ;
404
405 // Update the state before sending any notifications out. We want the
406 // receiver to see the new state.
407 {
408 Guard<fbl::Mutex> guard{get_lock()};
409
410 // Don't suspend if we are racing with our own death.
411 if (state_.lifecycle() != ThreadState::Lifecycle::DYING) {
412 SetStateLocked(ThreadState::Lifecycle::SUSPENDED);
413 }
414 }
415
416 LTRACE_EXIT_OBJ;
417 }
418
Resuming()419 void ThreadDispatcher::Resuming() {
420 LTRACE_ENTRY_OBJ;
421
422 // Update the state before sending any notifications out. We want the
423 // receiver to see the new state.
424 {
425 Guard<fbl::Mutex> guard{get_lock()};
426
427 // Don't resume if we are racing with our own death.
428 if (state_.lifecycle() != ThreadState::Lifecycle::DYING) {
429 SetStateLocked(ThreadState::Lifecycle::RUNNING);
430 }
431 }
432
433 LTRACE_EXIT_OBJ;
434 }
435
436 // low level LK callback in thread's context just before exiting
ThreadUserCallback(enum thread_user_state_change new_state,thread_t * arg)437 void ThreadDispatcher::ThreadUserCallback(enum thread_user_state_change new_state, thread_t* arg) {
438 ThreadDispatcher* t = arg->user_thread;
439 DEBUG_ASSERT(t != nullptr);
440
441 switch (new_state) {
442 case THREAD_USER_STATE_EXIT:
443 t->Exiting();
444 return;
445 case THREAD_USER_STATE_SUSPEND:
446 t->Suspending();
447 return;
448 case THREAD_USER_STATE_RESUME:
449 t->Resuming();
450 return;
451 }
452 }
453
454 // low level LK entry point for the thread
StartRoutine(void * arg)455 int ThreadDispatcher::StartRoutine(void* arg) {
456 LTRACE_ENTRY;
457
458 ThreadDispatcher* t = (ThreadDispatcher*)arg;
459
460 // Notify job debugger if attached.
461 if (t->is_initial_thread_) {
462 t->process_->OnProcessStartForJobDebugger(t);
463 }
464
465 // Notify debugger if attached.
466 // This is done by first obtaining our own reference to the port so the
467 // test can be done safely. Note that this function doesn't return so we
468 // need the reference to go out of scope before then.
469 {
470 fbl::RefPtr<ExceptionPort> debugger_port(t->process_->debugger_exception_port());
471 if (debugger_port) {
472 debugger_port->OnThreadStartForDebugger(t);
473 }
474 }
475
476 LTRACEF("arch_enter_uspace SP: %#" PRIxPTR " PC: %#" PRIxPTR
477 ", ARG1: %#" PRIxPTR ", ARG2: %#" PRIxPTR "\n",
478 t->user_sp_, t->user_entry_, t->user_arg1_, t->user_arg2_);
479
480 // switch to user mode and start the process
481 arch_enter_uspace(t->user_entry_, t->user_sp_,
482 t->user_arg1_, t->user_arg2_);
483
484 __UNREACHABLE;
485 }
486
SetStateLocked(ThreadState::Lifecycle lifecycle)487 void ThreadDispatcher::SetStateLocked(ThreadState::Lifecycle lifecycle) {
488 canary_.Assert();
489
490 LTRACEF("thread %p: state %u (%s)\n", this, static_cast<unsigned int>(lifecycle),
491 ThreadLifecycleToString(lifecycle));
492
493 DEBUG_ASSERT(get_lock()->lock().IsHeld());
494
495 state_.set(lifecycle);
496
497 switch (lifecycle) {
498 case ThreadState::Lifecycle::RUNNING:
499 UpdateStateLocked(ZX_THREAD_SUSPENDED, ZX_THREAD_RUNNING);
500 break;
501 case ThreadState::Lifecycle::SUSPENDED:
502 UpdateStateLocked(ZX_THREAD_RUNNING, ZX_THREAD_SUSPENDED);
503 break;
504 case ThreadState::Lifecycle::DEAD:
505 UpdateStateLocked(ZX_THREAD_RUNNING | ZX_THREAD_SUSPENDED, ZX_THREAD_TERMINATED);
506 break;
507 default:
508 // Nothing to do.
509 // In particular, for the DYING state we don't modify the SUSPENDED
510 // or RUNNING signals: For observer purposes they'll only be interested
511 // in the transition from {SUSPENDED,RUNNING} to DEAD.
512 break;
513 }
514 }
515
SetExceptionPort(fbl::RefPtr<ExceptionPort> eport)516 zx_status_t ThreadDispatcher::SetExceptionPort(fbl::RefPtr<ExceptionPort> eport) {
517 canary_.Assert();
518
519 DEBUG_ASSERT(eport->type() == ExceptionPort::Type::THREAD);
520
521 // Lock |state_lock_| to ensure the thread doesn't transition to dead
522 // while we're setting the exception handler.
523 Guard<fbl::Mutex> guard{get_lock()};
524 if (state_.lifecycle() == ThreadState::Lifecycle::DEAD)
525 return ZX_ERR_NOT_FOUND;
526 if (exception_port_)
527 return ZX_ERR_ALREADY_BOUND;
528 exception_port_ = eport;
529
530 return ZX_OK;
531 }
532
ResetExceptionPort()533 bool ThreadDispatcher::ResetExceptionPort() {
534 canary_.Assert();
535
536 fbl::RefPtr<ExceptionPort> eport;
537
538 // Remove the exception handler first. If the thread resumes execution
539 // we don't want it to hit another exception and get back into
540 // ExceptionHandlerExchange.
541 {
542 Guard<fbl::Mutex> guard{get_lock()};
543 exception_port_.swap(eport);
544 if (eport == nullptr) {
545 // Attempted to unbind when no exception port is bound.
546 return false;
547 }
548 // This method must guarantee that no caller will return until
549 // OnTargetUnbind has been called on the port-to-unbind.
550 // This becomes important when a manual unbind races with a
551 // PortDispatcher::on_zero_handles auto-unbind.
552 //
553 // If OnTargetUnbind were called outside of the lock, it would lead to
554 // a race (for threads A and B):
555 //
556 // A: Calls ResetExceptionPort; acquires the lock
557 // A: Sees a non-null exception_port_, swaps it into the eport local.
558 // exception_port_ is now null.
559 // A: Releases the lock
560 //
561 // B: Calls ResetExceptionPort; acquires the lock
562 // B: Sees a null exception_port_ and returns. But OnTargetUnbind()
563 // hasn't yet been called for the port.
564 //
565 // So, call it before releasing the lock
566 eport->OnTargetUnbind();
567 }
568
569 OnExceptionPortRemoval(eport);
570 return true;
571 }
572
exception_port()573 fbl::RefPtr<ExceptionPort> ThreadDispatcher::exception_port() {
574 canary_.Assert();
575
576 Guard<fbl::Mutex> guard{get_lock()};
577 return exception_port_;
578 }
579
ExceptionHandlerExchange(fbl::RefPtr<ExceptionPort> eport,const zx_exception_report_t * report,const arch_exception_context_t * arch_context,ThreadState::Exception * out_estatus)580 zx_status_t ThreadDispatcher::ExceptionHandlerExchange(
581 fbl::RefPtr<ExceptionPort> eport,
582 const zx_exception_report_t* report,
583 const arch_exception_context_t* arch_context,
584 ThreadState::Exception* out_estatus) {
585 canary_.Assert();
586
587 LTRACE_ENTRY_OBJ;
588
589 // Note: As far as userspace is concerned there is no state change that we would notify state
590 // tracker observers of, currently.
591 //
592 // Send message, wait for reply. Note that there is a "race" that we need handle: We need to
593 // send the exception report before going to sleep, but what if the receiver of the report gets
594 // it and processes it before we are asleep? This is handled by locking state_lock_ in places
595 // where the handler can see/modify thread state.
596
597 EnterException(eport, report, arch_context);
598
599 zx_status_t status;
600
601 {
602 // The caller may have already done this, but do it again for the
603 // one-off callers like the debugger synthetic exceptions.
604 AutoBlocked by(Blocked::EXCEPTION);
605
606 // There's no need to send the message under the lock, but we do need to make sure our
607 // exception state and blocked state are up to date before sending the message. Otherwise, a
608 // debugger could get the packet and observe them before we've updated them. Thus, send the
609 // packet after updating both exception state and blocked state.
610 status = eport->SendPacket(this, report->header.type);
611 if (status != ZX_OK) {
612 // Can't send the request to the exception handler. Report the error, which will
613 // probably kill the process.
614 LTRACEF("SendPacket returned %d\n", status);
615 ExitException();
616 return status;
617 }
618
619 // Continue to wait for the exception response if we get suspended.
620 // If it is suspended, the suspension will be processed after the
621 // exception response is received (requiring a second resume).
622 // Exceptions and suspensions are essentially treated orthogonally.
623
624 do {
625 status = event_wait_with_mask(&exception_event_, THREAD_SIGNAL_SUSPEND);
626 } while (status == ZX_ERR_INTERNAL_INTR_RETRY);
627 }
628
629 Guard<fbl::Mutex> guard{get_lock()};
630
631 // Note: If |status| != ZX_OK, then |state_| is still
632 // ThreadState::Exception::UNPROCESSED.
633 switch (status) {
634 case ZX_OK:
635 // It's critical that at this point the event no longer be armed.
636 // Otherwise the next time we get an exception we'll fall right through
637 // without waiting for an exception response.
638 // Note: The event could be signaled after event_wait_deadline returns
639 // if the thread was killed while the event was signaled.
640 DEBUG_ASSERT(!event_signaled(&exception_event_));
641 // Fetch TRY_NEXT/RESUME status.
642 *out_estatus = state_.exception();
643 DEBUG_ASSERT(*out_estatus == ThreadState::Exception::TRY_NEXT ||
644 *out_estatus == ThreadState::Exception::RESUME);
645 break;
646 case ZX_ERR_INTERNAL_INTR_KILLED:
647 // Thread was killed.
648 break;
649 default:
650 ASSERT_MSG(false, "unexpected exception result: %d\n", status);
651 __UNREACHABLE;
652 }
653
654 ExitExceptionLocked();
655
656 LTRACEF("returning status %d, estatus %d\n",
657 status, static_cast<int>(*out_estatus));
658 return status;
659 }
660
EnterException(fbl::RefPtr<ExceptionPort> eport,const zx_exception_report_t * report,const arch_exception_context_t * arch_context)661 void ThreadDispatcher::EnterException(fbl::RefPtr<ExceptionPort> eport,
662 const zx_exception_report_t* report,
663 const arch_exception_context_t* arch_context) {
664 Guard<fbl::Mutex> guard{get_lock()};
665
666 // Mark that we're in an exception.
667 thread_.exception_context = arch_context;
668
669 // For GetExceptionReport.
670 exception_report_ = report;
671
672 // For OnExceptionPortRemoval in case the port is unbound.
673 DEBUG_ASSERT(exception_wait_port_ == nullptr);
674 exception_wait_port_ = eport;
675
676 state_.set(ThreadState::Exception::UNPROCESSED);
677 }
678
ExitException()679 void ThreadDispatcher::ExitException() {
680 Guard<fbl::Mutex> guard{get_lock()};
681 ExitExceptionLocked();
682 }
683
ExitExceptionLocked()684 void ThreadDispatcher::ExitExceptionLocked() {
685 exception_wait_port_.reset();
686 exception_report_ = nullptr;
687 thread_.exception_context = nullptr;
688 state_.set(ThreadState::Exception::IDLE);
689 }
690
MarkExceptionHandledWorker(PortDispatcher * eport,ThreadState::Exception handled_state)691 zx_status_t ThreadDispatcher::MarkExceptionHandledWorker(PortDispatcher* eport,
692 ThreadState::Exception handled_state) {
693 canary_.Assert();
694
695 LTRACEF("obj %p\n", this);
696
697 Guard<fbl::Mutex> guard{get_lock()};
698 if (!InExceptionLocked())
699 return ZX_ERR_BAD_STATE;
700
701 // The exception port isn't used directly but is instead proof that the caller has
702 // permission to resume from the exception. So validate that it corresponds to the
703 // task being resumed.
704 if (!exception_wait_port_->PortMatches(eport, /* allow_null */ false))
705 return ZX_ERR_ACCESS_DENIED;
706
707 // The thread can be in several states at this point. Alas this is a bit complicated because
708 // there is a window in the middle of ExceptionHandlerExchange between the thread going to sleep
709 // and after the thread waking up where we can obtain the lock. Things are further complicated
710 // by the fact that OnExceptionPortRemoval could get there first, or we might get called a
711 // second time for the same exception. It's critical that we don't re-arm the event after the
712 // thread wakes up. To keep things simple we take a first-one-wins approach.
713 if (state_.exception() != ThreadState::Exception::UNPROCESSED)
714 return ZX_ERR_BAD_STATE;
715
716 state_.set(handled_state);
717 event_signal(&exception_event_, true);
718 return ZX_OK;
719 }
720
MarkExceptionHandled(PortDispatcher * eport)721 zx_status_t ThreadDispatcher::MarkExceptionHandled(PortDispatcher* eport) {
722 return MarkExceptionHandledWorker(eport, ThreadState::Exception::RESUME);
723 }
724
MarkExceptionNotHandled(PortDispatcher * eport)725 zx_status_t ThreadDispatcher::MarkExceptionNotHandled(PortDispatcher* eport) {
726 return MarkExceptionHandledWorker(eport, ThreadState::Exception::TRY_NEXT);
727 }
728
OnExceptionPortRemoval(const fbl::RefPtr<ExceptionPort> & eport)729 void ThreadDispatcher::OnExceptionPortRemoval(const fbl::RefPtr<ExceptionPort>& eport) {
730 canary_.Assert();
731
732 LTRACE_ENTRY_OBJ;
733 Guard<fbl::Mutex> guard{get_lock()};
734 if (!InExceptionLocked())
735 return;
736 if (exception_wait_port_ == eport) {
737 // Leave things alone if already processed. See MarkExceptionHandled.
738 if (state_.exception() == ThreadState::Exception::UNPROCESSED) {
739 state_.set(ThreadState::Exception::TRY_NEXT);
740 event_signal(&exception_event_, true);
741 }
742 }
743 }
744
InExceptionLocked()745 bool ThreadDispatcher::InExceptionLocked() {
746 canary_.Assert();
747
748 LTRACE_ENTRY_OBJ;
749 DEBUG_ASSERT(get_lock()->lock().IsHeld());
750 return thread_stopped_in_exception(&thread_);
751 }
752
GetInfoForUserspace(zx_info_thread_t * info)753 zx_status_t ThreadDispatcher::GetInfoForUserspace(zx_info_thread_t* info) {
754 canary_.Assert();
755
756 LTRACE_ENTRY_OBJ;
757
758 *info = {};
759
760 ThreadState state;
761 Blocked blocked_reason;
762 ExceptionPort::Type excp_port_type;
763 // We need to fetch all these values under lock, but once we have them
764 // we no longer need the lock.
765 {
766 Guard<fbl::Mutex> guard{get_lock()};
767 state = state_;
768 blocked_reason = blocked_reason_;
769 if (InExceptionLocked() &&
770 // A port type of !NONE here indicates to the caller that the
771 // thread is waiting for an exception response. So don't return
772 // !NONE if the thread just woke up but hasn't reacquired
773 // |state_lock_|.
774 state_.exception() == ThreadState::Exception::UNPROCESSED) {
775 DEBUG_ASSERT(exception_wait_port_ != nullptr);
776 excp_port_type = exception_wait_port_->type();
777 } else {
778 // Either we're not in an exception, or we're in the window where
779 // event_wait_deadline has woken up but |state_lock_| has
780 // not been reacquired.
781 DEBUG_ASSERT(exception_wait_port_ == nullptr ||
782 state_.exception() != ThreadState::Exception::UNPROCESSED);
783 excp_port_type = ExceptionPort::Type::NONE;
784 }
785 }
786
787 switch (state.lifecycle()) {
788 case ThreadState::Lifecycle::INITIAL:
789 case ThreadState::Lifecycle::INITIALIZED:
790 info->state = ZX_THREAD_STATE_NEW;
791 break;
792 case ThreadState::Lifecycle::RUNNING:
793 // The thread may be "running" but be blocked in a syscall or
794 // exception handler.
795 switch (blocked_reason) {
796 case Blocked::NONE:
797 info->state = ZX_THREAD_STATE_RUNNING;
798 break;
799 case Blocked::EXCEPTION:
800 info->state = ZX_THREAD_STATE_BLOCKED_EXCEPTION;
801 break;
802 case Blocked::SLEEPING:
803 info->state = ZX_THREAD_STATE_BLOCKED_SLEEPING;
804 break;
805 case Blocked::FUTEX:
806 info->state = ZX_THREAD_STATE_BLOCKED_FUTEX;
807 break;
808 case Blocked::PORT:
809 info->state = ZX_THREAD_STATE_BLOCKED_PORT;
810 break;
811 case Blocked::CHANNEL:
812 info->state = ZX_THREAD_STATE_BLOCKED_CHANNEL;
813 break;
814 case Blocked::WAIT_ONE:
815 info->state = ZX_THREAD_STATE_BLOCKED_WAIT_ONE;
816 break;
817 case Blocked::WAIT_MANY:
818 info->state = ZX_THREAD_STATE_BLOCKED_WAIT_MANY;
819 break;
820 case Blocked::INTERRUPT:
821 info->state = ZX_THREAD_STATE_BLOCKED_INTERRUPT;
822 break;
823 default:
824 DEBUG_ASSERT_MSG(false, "unexpected blocked reason: %d",
825 static_cast<int>(blocked_reason));
826 break;
827 }
828 break;
829 case ThreadState::Lifecycle::SUSPENDED:
830 info->state = ZX_THREAD_STATE_SUSPENDED;
831 break;
832 case ThreadState::Lifecycle::DYING:
833 info->state = ZX_THREAD_STATE_DYING;
834 break;
835 case ThreadState::Lifecycle::DEAD:
836 info->state = ZX_THREAD_STATE_DEAD;
837 break;
838 default:
839 DEBUG_ASSERT_MSG(false, "unexpected run state: %d",
840 static_cast<int>(state.lifecycle()));
841 break;
842 }
843
844 switch (excp_port_type) {
845 case ExceptionPort::Type::NONE:
846 info->wait_exception_port_type = ZX_EXCEPTION_PORT_TYPE_NONE;
847 break;
848 case ExceptionPort::Type::DEBUGGER:
849 info->wait_exception_port_type = ZX_EXCEPTION_PORT_TYPE_DEBUGGER;
850 break;
851 case ExceptionPort::Type::JOB_DEBUGGER:
852 info->wait_exception_port_type = ZX_EXCEPTION_PORT_TYPE_JOB_DEBUGGER;
853 break;
854 case ExceptionPort::Type::THREAD:
855 info->wait_exception_port_type = ZX_EXCEPTION_PORT_TYPE_THREAD;
856 break;
857 case ExceptionPort::Type::PROCESS:
858 info->wait_exception_port_type = ZX_EXCEPTION_PORT_TYPE_PROCESS;
859 break;
860 case ExceptionPort::Type::JOB:
861 info->wait_exception_port_type = ZX_EXCEPTION_PORT_TYPE_JOB;
862 break;
863 default:
864 DEBUG_ASSERT_MSG(false, "unexpected exception port type: %d",
865 static_cast<int>(excp_port_type));
866 break;
867 }
868
869 return ZX_OK;
870 }
871
GetStatsForUserspace(zx_info_thread_stats_t * info)872 zx_status_t ThreadDispatcher::GetStatsForUserspace(zx_info_thread_stats_t* info) {
873 canary_.Assert();
874
875 LTRACE_ENTRY_OBJ;
876
877 *info = {};
878
879 info->total_runtime = runtime_ns();
880 return ZX_OK;
881 }
882
GetExceptionReport(zx_exception_report_t * report)883 zx_status_t ThreadDispatcher::GetExceptionReport(zx_exception_report_t* report) {
884 canary_.Assert();
885
886 LTRACE_ENTRY_OBJ;
887 Guard<fbl::Mutex> guard{get_lock()};
888 if (!InExceptionLocked())
889 return ZX_ERR_BAD_STATE;
890 DEBUG_ASSERT(exception_report_ != nullptr);
891 *report = *exception_report_;
892 return ZX_OK;
893 }
894
895 // Note: buffer must be sufficiently aligned
896
ReadState(zx_thread_state_topic_t state_kind,void * buffer,size_t buffer_len)897 zx_status_t ThreadDispatcher::ReadState(zx_thread_state_topic_t state_kind,
898 void* buffer, size_t buffer_len) {
899 canary_.Assert();
900
901 LTRACE_ENTRY_OBJ;
902
903 // We can't be reading regs while the thread transitions from
904 // SUSPENDED to RUNNING.
905 Guard<fbl::Mutex> guard{get_lock()};
906
907 if (state_.lifecycle() != ThreadState::Lifecycle::SUSPENDED && !InExceptionLocked())
908 return ZX_ERR_BAD_STATE;
909
910 switch (state_kind) {
911 case ZX_THREAD_STATE_GENERAL_REGS: {
912 if (buffer_len != sizeof(zx_thread_state_general_regs_t))
913 return ZX_ERR_INVALID_ARGS;
914 return arch_get_general_regs(
915 &thread_, static_cast<zx_thread_state_general_regs_t*>(buffer));
916 }
917 case ZX_THREAD_STATE_FP_REGS: {
918 if (buffer_len != sizeof(zx_thread_state_fp_regs_t))
919 return ZX_ERR_INVALID_ARGS;
920 return arch_get_fp_regs(
921 &thread_, static_cast<zx_thread_state_fp_regs_t*>(buffer));
922 }
923 case ZX_THREAD_STATE_VECTOR_REGS: {
924 if (buffer_len != sizeof(zx_thread_state_vector_regs_t))
925 return ZX_ERR_INVALID_ARGS;
926 return arch_get_vector_regs(
927 &thread_, static_cast<zx_thread_state_vector_regs_t*>(buffer));
928 }
929 case ZX_THREAD_STATE_DEBUG_REGS: {
930 if (buffer_len != sizeof(zx_thread_state_debug_regs_t))
931 return ZX_ERR_INVALID_ARGS;
932 return arch_get_debug_regs(
933 &thread_, static_cast<zx_thread_state_debug_regs_t*>(buffer));
934 }
935 case ZX_THREAD_STATE_SINGLE_STEP: {
936 if (buffer_len != sizeof(zx_thread_state_single_step_t))
937 return ZX_ERR_INVALID_ARGS;
938 bool single_step;
939 zx_status_t status = arch_get_single_step(&thread_, &single_step);
940 if (status != ZX_OK)
941 return status;
942 *static_cast<zx_thread_state_single_step_t*>(buffer) =
943 static_cast<zx_thread_state_single_step_t>(single_step);
944 return ZX_OK;
945 }
946 default:
947 return ZX_ERR_INVALID_ARGS;
948 }
949 }
950
951 // Note: buffer must be sufficiently aligned
952
WriteState(zx_thread_state_topic_t state_kind,const void * buffer,size_t buffer_len)953 zx_status_t ThreadDispatcher::WriteState(zx_thread_state_topic_t state_kind,
954 const void* buffer, size_t buffer_len) {
955 canary_.Assert();
956
957 LTRACE_ENTRY_OBJ;
958
959 // We can't be reading regs while the thread transitions from
960 // SUSPENDED to RUNNING.
961 Guard<fbl::Mutex> guard{get_lock()};
962
963 if (state_.lifecycle() != ThreadState::Lifecycle::SUSPENDED && !InExceptionLocked())
964 return ZX_ERR_BAD_STATE;
965
966 switch (state_kind) {
967 case ZX_THREAD_STATE_GENERAL_REGS: {
968 if (buffer_len != sizeof(zx_thread_state_general_regs_t))
969 return ZX_ERR_INVALID_ARGS;
970 return arch_set_general_regs(
971 &thread_, static_cast<const zx_thread_state_general_regs_t*>(buffer));
972 }
973 case ZX_THREAD_STATE_FP_REGS: {
974 if (buffer_len != sizeof(zx_thread_state_fp_regs_t))
975 return ZX_ERR_INVALID_ARGS;
976 return arch_set_fp_regs(
977 &thread_, static_cast<const zx_thread_state_fp_regs_t*>(buffer));
978 }
979 case ZX_THREAD_STATE_VECTOR_REGS: {
980 if (buffer_len != sizeof(zx_thread_state_vector_regs_t))
981 return ZX_ERR_INVALID_ARGS;
982 return arch_set_vector_regs(
983 &thread_, static_cast<const zx_thread_state_vector_regs_t*>(buffer));
984 }
985 case ZX_THREAD_STATE_DEBUG_REGS: {
986 if (buffer_len != sizeof(zx_thread_state_debug_regs_t))
987 return ZX_ERR_INVALID_ARGS;
988 return arch_set_debug_regs(
989 &thread_, static_cast<const zx_thread_state_debug_regs_t*>(buffer));
990 }
991 case ZX_THREAD_STATE_SINGLE_STEP: {
992 if (buffer_len != sizeof(zx_thread_state_single_step_t))
993 return ZX_ERR_INVALID_ARGS;
994 const zx_thread_state_single_step_t* single_step =
995 static_cast<const zx_thread_state_single_step_t*>(buffer);
996 if (*single_step != 0 && *single_step != 1)
997 return ZX_ERR_INVALID_ARGS;
998 return arch_set_single_step(&thread_, !!*single_step);
999 }
1000 default:
1001 return ZX_ERR_INVALID_ARGS;
1002 }
1003 }
1004
SetPriority(int32_t priority)1005 zx_status_t ThreadDispatcher::SetPriority(int32_t priority) {
1006 Guard<fbl::Mutex> guard{get_lock()};
1007 if ((state_.lifecycle() == ThreadState::Lifecycle::INITIAL) ||
1008 (state_.lifecycle() == ThreadState::Lifecycle::DYING) ||
1009 (state_.lifecycle() == ThreadState::Lifecycle::DEAD)) {
1010 return ZX_ERR_BAD_STATE;
1011 }
1012 // The priority was already validated by the Profile dispatcher.
1013 thread_set_priority(&thread_, priority);
1014 return ZX_OK;
1015 }
1016
ThreadLifecycleToString(ThreadState::Lifecycle lifecycle)1017 const char* ThreadLifecycleToString(ThreadState::Lifecycle lifecycle) {
1018 switch (lifecycle) {
1019 case ThreadState::Lifecycle::INITIAL:
1020 return "initial";
1021 case ThreadState::Lifecycle::INITIALIZED:
1022 return "initialized";
1023 case ThreadState::Lifecycle::RUNNING:
1024 return "running";
1025 case ThreadState::Lifecycle::SUSPENDED:
1026 return "suspended";
1027 case ThreadState::Lifecycle::DYING:
1028 return "dying";
1029 case ThreadState::Lifecycle::DEAD:
1030 return "dead";
1031 }
1032 return "unknown";
1033 }
1034
get_related_koid() const1035 zx_koid_t ThreadDispatcher::get_related_koid() const {
1036 canary_.Assert();
1037
1038 return process_->get_koid();
1039 }
1040