1 // Copyright 2018 The Fuchsia Authors
2 //
3 // Use of this source code is governed by a MIT-style
4 // license that can be found in the LICENSE file or at
5 // https://opensource.org/licenses/MIT
6 #include <vm/kstack.h>
7
8 #include <assert.h>
9 #include <err.h>
10 #include <inttypes.h>
11 #include <string.h>
12 #include <trace.h>
13
14 #include <vm/vm.h>
15 #include <vm/vm_address_region.h>
16 #include <vm/vm_aspace.h>
17 #include <vm/vm_object_paged.h>
18
19 #include <fbl/algorithm.h>
20 #include <fbl/alloc_checker.h>
21 #include <fbl/auto_call.h>
22 #include <fbl/auto_lock.h>
23 #include <fbl/ref_ptr.h>
24 #include <ktl/move.h>
25
26 #define LOCAL_TRACE 0
27
28 // Allocates and maps a kernel stack with one page of padding before and after the mapping.
allocate_vmar(bool unsafe,fbl::RefPtr<VmMapping> * out_kstack_mapping,fbl::RefPtr<VmAddressRegion> * out_kstack_vmar)29 static zx_status_t allocate_vmar(bool unsafe,
30 fbl::RefPtr<VmMapping>* out_kstack_mapping,
31 fbl::RefPtr<VmAddressRegion>* out_kstack_vmar) {
32 LTRACEF("allocating %s stack\n", unsafe ? "unsafe" : "safe");
33
34 // get a handle to the root vmar
35 auto vmar = VmAspace::kernel_aspace()->RootVmar()->as_vm_address_region();
36 DEBUG_ASSERT(!!vmar);
37
38 // Create a VMO for our stack
39 fbl::RefPtr<VmObject> stack_vmo;
40 zx_status_t status = VmObjectPaged::Create(
41 PMM_ALLOC_FLAG_ANY, 0u, DEFAULT_STACK_SIZE, &stack_vmo);
42 if (status != ZX_OK) {
43 TRACEF("error allocating %s stack for thread\n",
44 unsafe ? "unsafe" : "safe");
45 return status;
46 }
47 const char* name = unsafe ? "unsafe-stack" : "safe-stack";
48 stack_vmo->set_name(name, strlen(name));
49
50 // create a vmar with enough padding for a page before and after the stack
51 const size_t padding_size = PAGE_SIZE;
52
53 fbl::RefPtr<VmAddressRegion> kstack_vmar;
54 status = vmar->CreateSubVmar(
55 0, 2 * padding_size + DEFAULT_STACK_SIZE, 0,
56 VMAR_FLAG_CAN_MAP_SPECIFIC |
57 VMAR_FLAG_CAN_MAP_READ |
58 VMAR_FLAG_CAN_MAP_WRITE,
59 unsafe ? "unsafe_kstack_vmar" : "kstack_vmar",
60 &kstack_vmar);
61 if (status != ZX_OK) {
62 return status;
63 }
64
65 // destroy the vmar if we early abort
66 // this will also clean up any mappings that may get placed on the vmar
67 auto vmar_cleanup = fbl::MakeAutoCall([&kstack_vmar]() {
68 kstack_vmar->Destroy();
69 });
70
71 LTRACEF("%s stack vmar at %#" PRIxPTR "\n",
72 unsafe ? "unsafe" : "safe", kstack_vmar->base());
73
74 // create a mapping offset padding_size into the vmar we created
75 fbl::RefPtr<VmMapping> kstack_mapping;
76 status = kstack_vmar->CreateVmMapping(padding_size, DEFAULT_STACK_SIZE, 0,
77 VMAR_FLAG_SPECIFIC,
78 ktl::move(stack_vmo), 0,
79 ARCH_MMU_FLAG_PERM_READ |
80 ARCH_MMU_FLAG_PERM_WRITE,
81 unsafe ? "unsafe_kstack" : "kstack",
82 &kstack_mapping);
83 if (status != ZX_OK) {
84 return status;
85 }
86
87 LTRACEF("%s stack mapping at %#" PRIxPTR "\n",
88 unsafe ? "unsafe" : "safe", kstack_mapping->base());
89
90 // fault in all the pages so we dont demand fault in the stack
91 status = kstack_mapping->MapRange(0, DEFAULT_STACK_SIZE, true);
92 if (status != ZX_OK) {
93 return status;
94 }
95
96 // Cancel the cleanup handler on the vmar since we're about to save a
97 // reference to it.
98 vmar_cleanup.cancel();
99 *out_kstack_mapping = ktl::move(kstack_mapping);
100 *out_kstack_vmar = ktl::move(kstack_vmar);
101
102 return ZX_OK;
103 }
104
vm_allocate_kstack(kstack_t * stack)105 zx_status_t vm_allocate_kstack(kstack_t* stack) {
106 DEBUG_ASSERT(stack->base == 0);
107 DEBUG_ASSERT(stack->size == 0);
108 DEBUG_ASSERT(stack->top == 0);
109 DEBUG_ASSERT(stack->vmar == nullptr);
110 #if __has_feature(safe_stack)
111 DEBUG_ASSERT(stack->unsafe_base == 0);
112 DEBUG_ASSERT(stack->unsafe_vmar == nullptr);
113 #endif
114
115 fbl::RefPtr<VmMapping> mapping;
116 fbl::RefPtr<VmAddressRegion> vmar;
117 zx_status_t status = allocate_vmar(false, &mapping, &vmar);
118 if (status != ZX_OK) {
119 return status;
120 }
121 stack->size = mapping->size();
122 stack->base = mapping->base();
123 stack->top = mapping->base() + DEFAULT_STACK_SIZE;
124
125 // Stash address of VMAR so we can later free it in |vm_free_kstack|.
126 stack->vmar = vmar.leak_ref();
127
128 #if __has_feature(safe_stack)
129 status = allocate_vmar(true, &mapping, &vmar);
130 if (status != ZX_OK) {
131 vm_free_kstack(stack);
132 return status;
133 }
134 stack->size = mapping->size();
135 stack->unsafe_base = mapping->base();
136
137 // Stash address of VMAR so we can later free it in |vm_free_kstack|.
138 stack->unsafe_vmar = vmar.leak_ref();
139 #endif
140
141 return ZX_OK;
142 }
143
vm_free_kstack(kstack_t * stack)144 zx_status_t vm_free_kstack(kstack_t* stack) {
145 stack->base = 0;
146 stack->size = 0;
147 stack->top = 0;
148
149 if (stack->vmar != nullptr) {
150 fbl::RefPtr<VmAddressRegion> vmar =
151 fbl::internal::MakeRefPtrNoAdopt(static_cast<VmAddressRegion*>(stack->vmar));
152 zx_status_t status = vmar->Destroy();
153 if (status != ZX_OK) {
154 return status;
155 }
156 stack->vmar = nullptr;
157 }
158
159 #if __has_feature(safe_stack)
160 stack->unsafe_base = 0;
161
162 if (stack->unsafe_vmar != nullptr) {
163 fbl::RefPtr<VmAddressRegion> vmar =
164 fbl::internal::MakeRefPtrNoAdopt(static_cast<VmAddressRegion*>(stack->unsafe_vmar));
165 zx_status_t status = vmar->Destroy();
166 if (status != ZX_OK) {
167 return status;
168 }
169 stack->unsafe_vmar = nullptr;
170 }
171 #endif
172
173 return ZX_OK;
174 }
175