1 // Copyright 2017 The Fuchsia Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <pthread.h>
6
7 #include <zircon/syscalls.h>
8 #include <unittest/unittest.h>
9
10 #if defined(__x86_64__)
11
12 #include <cpuid.h>
13 #include <x86intrin.h>
14
15 static pthread_barrier_t g_barrier;
16
17 // Returns whether the CPU supports the {rd,wr}{fs,gs}base instructions.
x86_feature_fsgsbase()18 static bool x86_feature_fsgsbase() {
19 uint32_t eax, ebx, ecx, edx;
20 __cpuid(7, eax, ebx, ecx, edx);
21 return ebx & bit_FSGSBASE;
22 }
23
24 __attribute__((target("fsgsbase")))
gs_base_test_thread(void * thread_arg)25 static void* gs_base_test_thread(void* thread_arg) {
26 uintptr_t gs_base = (uintptr_t)thread_arg;
27 uintptr_t fs_base = 0;
28 if (x86_feature_fsgsbase()) {
29 _writegsbase_u64(gs_base);
30 // We don't want to modify fs_base because it is used by libc etc.,
31 // but we might as well check that it is also preserved.
32 fs_base = _readfsbase_u64();
33 }
34
35 // Wait until all the test threads reach this point.
36 int rv = pthread_barrier_wait(&g_barrier);
37 EXPECT_TRUE(rv == 0 || rv == PTHREAD_BARRIER_SERIAL_THREAD);
38
39 if (x86_feature_fsgsbase()) {
40 EXPECT_TRUE(_readgsbase_u64() == gs_base);
41 EXPECT_TRUE(_readfsbase_u64() == fs_base);
42 }
43
44 return nullptr;
45 }
46
47 // This tests whether the gs_base register on x86 is preserved across
48 // context switches.
49 //
50 // We do this by launching multiple threads that set gs_base to different
51 // values. After all the threads have set gs_base, the threads wake up and
52 // check that gs_base was preserved.
TestContextSwitchOfGsBase()53 bool TestContextSwitchOfGsBase() {
54 BEGIN_TEST;
55
56 // We run the rest of the test even if the fsgsbase instructions aren't
57 // available, so that at least the test's threading logic gets
58 // exercised.
59 printf("fsgsbase available = %d\n", x86_feature_fsgsbase());
60
61 // We launch more threads than there are CPUs. This ensures that there
62 // should be at least one CPU that has >1 of our threads scheduled on
63 // it, so saving and restoring gs_base between those threads should get
64 // exercised.
65 uint32_t thread_count = zx_system_get_num_cpus() * 2;
66 ASSERT_GT(thread_count, 0);
67
68 pthread_t tids[thread_count];
69 ASSERT_EQ(pthread_barrier_init(&g_barrier, nullptr, thread_count), 0);
70 for (uint32_t i = 0; i < thread_count; ++i) {
71 // Give each thread a different test value for gs_base.
72 void* gs_base = (void*)(uintptr_t)(i * 0x10004);
73 ASSERT_EQ(pthread_create(&tids[i], nullptr, gs_base_test_thread,
74 gs_base), 0);
75 }
76 for (uint32_t i = 0; i < thread_count; ++i) {
77 ASSERT_EQ(pthread_join(tids[i], nullptr), 0);
78 }
79 ASSERT_EQ(pthread_barrier_destroy(&g_barrier), 0);
80
81 END_TEST;
82 }
83
84 #define DEFINE_REGISTER_ACCESSOR(REG) \
85 static inline void set_##REG(uint16_t value) { \
86 __asm__ volatile("mov %0, %%" #REG : : "r"(value)); \
87 } \
88 static inline uint16_t get_##REG(void) { \
89 uint16_t value; \
90 __asm__ volatile("mov %%" #REG ", %0" : "=r"(value)); \
91 return value; \
92 }
93
94 DEFINE_REGISTER_ACCESSOR(ds)
DEFINE_REGISTER_ACCESSOR(es)95 DEFINE_REGISTER_ACCESSOR(es)
96 DEFINE_REGISTER_ACCESSOR(fs)
97 DEFINE_REGISTER_ACCESSOR(gs)
98
99 #undef DEFINE_REGISTER_ACCESSOR
100
101 // This test demonstrates that if the segment selector registers are set to
102 // 1, they will eventually be reset to 0 when an interrupt occurs. This is
103 // mostly a property of the x86 architecture rather than the kernel: The
104 // IRET instruction has the side effect of resetting these registers when
105 // returning from the kernel to userland (but not when returning to kernel
106 // code).
107 bool TestSegmentSelectorsZeroedOnInterrupt() {
108 BEGIN_TEST;
109
110 // Disable this test because some versions of non-KVM QEMU don't
111 // implement the part of IRET described above.
112 return true;
113
114 // We skip setting %fs because that breaks libc's TLS.
115 set_ds(1);
116 set_es(1);
117 set_gs(1);
118
119 // This could be interrupted by an interrupt that causes a context
120 // switch, but on an unloaded machine it is more likely to be
121 // interrupted by an interrupt where the handler returns without doing
122 // a context switch.
123 while (get_gs() == 1)
124 __asm__ volatile("pause");
125
126 EXPECT_EQ(get_ds(), 0);
127 EXPECT_EQ(get_es(), 0);
128 EXPECT_EQ(get_gs(), 0);
129
130 END_TEST;
131 }
132
133 // Test that the kernel also resets the segment selector registers on a
134 // context switch, to avoid leaking their values and to match what happens
135 // on an interrupt.
TestSegmentSelectorsZeroedOnContextSwitch()136 bool TestSegmentSelectorsZeroedOnContextSwitch() {
137 BEGIN_TEST;
138
139 set_ds(1);
140 set_es(1);
141 set_gs(1);
142
143 // Sleeping should cause a context switch away from this thread (to the
144 // kernel's idle thread) and another context switch back.
145 //
146 // It is possible that this thread is interrupted by an interrupt, but
147 // not very likely, because this thread does not execute very long.
148 EXPECT_EQ(zx_nanosleep(zx_deadline_after(ZX_MSEC(1))), ZX_OK);
149
150 EXPECT_EQ(get_ds(), 0);
151 EXPECT_EQ(get_es(), 0);
152 EXPECT_EQ(get_gs(), 0);
153
154 END_TEST;
155 }
156
157 BEGIN_TEST_CASE(register_state_tests)
RUN_TEST(TestContextSwitchOfGsBase)158 RUN_TEST(TestContextSwitchOfGsBase)
159 RUN_TEST(TestSegmentSelectorsZeroedOnInterrupt)
160 RUN_TEST(TestSegmentSelectorsZeroedOnContextSwitch)
161 END_TEST_CASE(register_state_tests)
162
163 #endif
164
165 int main(int argc, char** argv) {
166 bool success = unittest_run_all_tests(argc, argv);
167 return success ? 0 : -1;
168 }
169