1 // Copyright 2016 The Fuchsia Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <assert.h>
6 #include <errno.h>
7 #include <inttypes.h>
8 #include <limits.h>
9 #include <new>
10 #include <pthread.h>
11 #include <stdint.h>
12 #include <stdio.h>
13 #include <threads.h>
14 #include <utility>
15
16 #include <fbl/alloc_checker.h>
17 #include <fbl/auto_lock.h>
18 #include <fbl/intrusive_double_list.h>
19 #include <fbl/intrusive_hash_table.h>
20 #include <fbl/intrusive_single_list.h>
21 #include <fbl/unique_ptr.h>
22 #include <hw/inout.h>
23 #include <pci/pio.h>
24 #include <zircon/assert.h>
25 #include <zircon/process.h>
26 #include <zircon/syscalls.h>
27 #include <zircon/thread_annotations.h>
28
29 #if !defined(__x86_64__) && !defined(__x86__)
30 #error "Unsupported architecture"
31 #endif
32
33 #include "acpi.h"
34
35 __WEAK zx_handle_t root_resource_handle;
36
37 #define _COMPONENT ACPI_OS_SERVICES
38 ACPI_MODULE_NAME("oszircon")
39
40 #define LOCAL_TRACE 0
41
42 #define TRACEF(str, x...) \
43 do { \
44 printf("%s:%d: " str, __FUNCTION__, __LINE__, ##x); \
45 } while (0)
46 #define LTRACEF(x...) \
47 do { \
48 if (LOCAL_TRACE) { \
49 TRACEF(x); \
50 } \
51 } while (0)
52
53 /* Structures used for implementing AcpiOsExecute and
54 * AcpiOsWaitEventsComplete */
55 struct AcpiOsTaskCtx : public fbl::DoublyLinkedListable<fbl::unique_ptr<AcpiOsTaskCtx>> {
56 ACPI_OSD_EXEC_CALLBACK func;
57 void* ctx;
58 };
59
60 /* Thread function for implementing AcpiOsExecute */
61 static int AcpiOsExecuteTask(void* arg);
62 /* Tear down the OsExecuteTask thread */
63 static void ShutdownOsExecuteTask();
64
65 /* Data used for implementing AcpiOsExecute and
66 * AcpiOsWaitEventsComplete */
67 static struct {
68 thrd_t thread;
69 cnd_t cond;
70 cnd_t idle_cond;
71 mtx_t lock = MTX_INIT;
72 bool shutdown = false;
73 bool idle = true;
74
75 fbl::DoublyLinkedList<fbl::unique_ptr<AcpiOsTaskCtx>> tasks;
76 } os_execute_state;
77
78 class AcpiOsMappingNode : public fbl::SinglyLinkedListable<fbl::unique_ptr<AcpiOsMappingNode>> {
79 public:
80 using HashTable =
81 fbl::HashTable<uintptr_t, fbl::unique_ptr<AcpiOsMappingNode>>;
82
83 // @param vaddr Virtual address returned to ACPI, used as key to the hashtable.
84 // @param vaddr_actual Actual virtual address of the mapping. May be different than
85 // vaddr if it is unaligned.
86 // @param length Length of the mapping
87 // @param vmo_handle Handle to the mapped VMO
88 AcpiOsMappingNode(uintptr_t vaddr, uintptr_t vaddr_actual,
89 size_t length, zx_handle_t vmo_handle);
90 ~AcpiOsMappingNode();
91
92 // Trait implementation for fbl::HashTable
GetKey() const93 uintptr_t GetKey() const { return vaddr_; }
GetHash(uintptr_t key)94 static size_t GetHash(uintptr_t key) { return key; }
95
96 private:
97 uintptr_t vaddr_;
98 uintptr_t vaddr_actual_;
99 size_t length_;
100 zx_handle_t vmo_handle_;
101 };
102
103 fbl::Mutex os_mapping_lock;
104
105 AcpiOsMappingNode::HashTable os_mapping_tbl;
106
107 const size_t PCIE_MAX_DEVICES_PER_BUS = 32;
108 const size_t PCIE_MAX_FUNCTIONS_PER_DEVICE = 8;
109
AcpiOsMappingNode(uintptr_t vaddr,uintptr_t vaddr_actual,size_t length,zx_handle_t vmo_handle)110 AcpiOsMappingNode::AcpiOsMappingNode(uintptr_t vaddr, uintptr_t vaddr_actual,
111 size_t length, zx_handle_t vmo_handle)
112 : vaddr_(vaddr), vaddr_actual_(vaddr_actual),
113 length_(length), vmo_handle_(vmo_handle) {
114 }
115
~AcpiOsMappingNode()116 AcpiOsMappingNode::~AcpiOsMappingNode() {
117 zx_vmar_unmap(zx_vmar_root_self(), (uintptr_t)vaddr_actual_, length_);
118 zx_handle_close(vmo_handle_);
119 }
120
mmap_physical(zx_paddr_t phys,size_t size,uint32_t cache_policy,zx_handle_t * out_vmo,zx_vaddr_t * out_vaddr)121 static zx_status_t mmap_physical(zx_paddr_t phys, size_t size, uint32_t cache_policy,
122 zx_handle_t* out_vmo, zx_vaddr_t* out_vaddr) {
123 zx_handle_t vmo;
124 zx_vaddr_t vaddr;
125 zx_status_t st = zx_vmo_create_physical(root_resource_handle, phys, size, &vmo);
126 if (st != ZX_OK) {
127 return st;
128 }
129 st = zx_vmo_set_cache_policy(vmo, cache_policy);
130 if (st != ZX_OK) {
131 zx_handle_close(vmo);
132 return st;
133 }
134 st = zx_vmar_map(zx_vmar_root_self(),
135 ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_MAP_RANGE,
136 0, vmo, 0, size, &vaddr);
137 if (st != ZX_OK) {
138 zx_handle_close(vmo);
139 return st;
140 } else {
141 *out_vmo = vmo;
142 *out_vaddr = vaddr;
143 return ZX_OK;
144 }
145 }
146
thrd_status_to_acpi_status(int status)147 static ACPI_STATUS thrd_status_to_acpi_status(int status) {
148 switch (status) {
149 case thrd_success:
150 return AE_OK;
151 case thrd_nomem:
152 return AE_NO_MEMORY;
153 case thrd_timedout:
154 return AE_TIME;
155 default:
156 return AE_ERROR;
157 }
158 }
159
timeout_to_timespec(UINT16 Timeout,struct timespec * timespec)160 static void timeout_to_timespec(UINT16 Timeout, struct timespec* timespec) {
161 zx_time_t now = zx_clock_get(ZX_CLOCK_UTC);
162 timespec->tv_sec = static_cast<time_t>(now / ZX_SEC(1)),
163 timespec->tv_nsec = static_cast<long>(now % ZX_SEC(1)),
164 timespec->tv_nsec += ZX_MSEC(Timeout);
165 if (timespec->tv_nsec > static_cast<long>(ZX_SEC(1))) {
166 timespec->tv_sec += timespec->tv_nsec / ZX_SEC(1);
167 timespec->tv_nsec %= ZX_SEC(1);
168 }
169 }
170
171 // The |acpi_spinlock_lock| is used to guarantee that all spinlock acquisitions will
172 // be uncontested in certain circumstances. This allows us to ensure that
173 // the codepaths for entering an S-state will not need to wait for some other thread
174 // to finish processing. The scheme works with the following protocol:
175 //
176 // Normal operational threads: If attempting to acquire a lock, and the thread
177 // holds no spinlock yet, then acquire |acpi_spinlock_lock| in READ mode before
178 // acquiring the desired lock. For all other lock acquisitions behave normally.
179 // If a thread is releasing its last held lock, release the |acpi_spinlock_lock|.
180 //
181 // Non-contested thread: To enter non-contested mode, call
182 // |acpica_enable_noncontested_mode| while not holding any ACPI spinlock. This will
183 // acquire the |acpi_spinlock_lock| in WRITE mode. Call
184 // |acpica_disable_noncontested_mode| while not holding any ACPI spinlock to release
185 // the |acpi_spinlock_lock|.
186 //
187 // Non-contested mode needs to apply to both spin locks and mutexes to prevent deadlock.
188 static pthread_rwlock_t acpi_spinlock_lock = PTHREAD_RWLOCK_INITIALIZER;
189 static thread_local uint64_t acpi_spinlocks_held = 0;
190
acpica_enable_noncontested_mode()191 void acpica_enable_noncontested_mode() {
192 ZX_ASSERT(acpi_spinlocks_held == 0);
193 int ret = pthread_rwlock_wrlock(&acpi_spinlock_lock);
194 ZX_ASSERT(ret == 0);
195 acpi_spinlocks_held++;
196 }
197
acpica_disable_noncontested_mode()198 void acpica_disable_noncontested_mode() {
199 ZX_ASSERT(acpi_spinlocks_held == 1);
200 int ret = pthread_rwlock_unlock(&acpi_spinlock_lock);
201 ZX_ASSERT(ret == 0);
202 acpi_spinlocks_held--;
203 }
204
205 /**
206 * @brief Initialize the OSL subsystem.
207 *
208 * This function allows the OSL to initialize itself. It is called during
209 * intiialization of the ACPICA subsystem.
210 *
211 * @return Initialization status
212 */
AcpiOsInitialize()213 ACPI_STATUS AcpiOsInitialize() {
214 ACPI_STATUS status = thrd_status_to_acpi_status(
215 cnd_init(&os_execute_state.cond));
216 if (status != AE_OK) {
217 return status;
218 }
219 status = thrd_status_to_acpi_status(cnd_init(&os_execute_state.idle_cond));
220 if (status != AE_OK) {
221 cnd_destroy(&os_execute_state.cond);
222 return status;
223 }
224
225 status = thrd_status_to_acpi_status(thrd_create(&os_execute_state.thread, AcpiOsExecuteTask,
226 nullptr));
227 if (status != AE_OK) {
228 return status;
229 }
230
231 /* TODO(teisenbe): be less permissive */
232 zx_ioports_request(root_resource_handle, 0, 65536);
233 return AE_OK;
234 }
235
236 /**
237 * @brief Terminate the OSL subsystem.
238 *
239 * This function allows the OSL to cleanup and terminate. It is called during
240 * termination of the ACPICA subsystem.
241 *
242 * @return Termination status
243 */
AcpiOsTerminate()244 ACPI_STATUS AcpiOsTerminate() {
245 ShutdownOsExecuteTask();
246 cnd_destroy(&os_execute_state.cond);
247 cnd_destroy(&os_execute_state.idle_cond);
248
249 return AE_OK;
250 }
251
252 /**
253 * @brief Obtain the Root ACPI table pointer (RSDP).
254 *
255 * @return The physical address of the RSDP
256 */
AcpiOsGetRootPointer()257 ACPI_PHYSICAL_ADDRESS AcpiOsGetRootPointer() {
258 zx_paddr_t acpi_rsdp, smbios;
259 zx_status_t zx_status = zx_pc_firmware_tables(root_resource_handle, &acpi_rsdp, &smbios);
260 if (zx_status == ZX_OK && acpi_rsdp != 0) {
261 return acpi_rsdp;
262 }
263
264 ACPI_PHYSICAL_ADDRESS TableAddress = 0;
265 ACPI_STATUS status = AcpiFindRootPointer(&TableAddress);
266 if (status != AE_OK) {
267 return 0;
268 }
269 return TableAddress;
270 }
271
272 /**
273 * @brief Allow the host OS to override a predefined ACPI object.
274 *
275 * @param PredefinedObject A pointer to a predefind object (name and initial
276 * value)
277 * @param NewValue Where a new value for the predefined object is returned.
278 * NULL if there is no override for this object.
279 *
280 * @return Exception code that indicates success or reason for failure.
281 */
AcpiOsPredefinedOverride(const ACPI_PREDEFINED_NAMES * PredefinedObject,ACPI_STRING * NewValue)282 ACPI_STATUS AcpiOsPredefinedOverride(
283 const ACPI_PREDEFINED_NAMES* PredefinedObject,
284 ACPI_STRING* NewValue) {
285 *NewValue = NULL;
286 return AE_OK;
287 }
288
289 /**
290 * @brief Allow the host OS to override a firmware ACPI table via a logical
291 * address.
292 *
293 * @param ExistingTable A pointer to the header of the existing ACPI table
294 * @param NewTable Where the pointer to the replacment table is returned. The
295 * OSL returns NULL if no replacement is provided.
296 *
297 * @return Exception code that indicates success or reason for failure.
298 */
AcpiOsTableOverride(ACPI_TABLE_HEADER * ExistingTable,ACPI_TABLE_HEADER ** NewTable)299 ACPI_STATUS AcpiOsTableOverride(
300 ACPI_TABLE_HEADER* ExistingTable,
301 ACPI_TABLE_HEADER** NewTable) {
302 *NewTable = NULL;
303 return AE_OK;
304 }
305
306 /**
307 * @brief Allow the host OS to override a firmware ACPI table via a physical
308 * address.
309 *
310 * @param ExistingTable A pointer to the header of the existing ACPI table
311 * @param NewAddress Where the physical address of the replacment table is
312 * returned. The OSL returns NULL if no replacement is provided.
313 * @param NewLength Where the length of the replacement table is returned.
314 *
315 * @return Exception code that indicates success or reason for failure.
316 */
AcpiOsPhysicalTableOverride(ACPI_TABLE_HEADER * ExistingTable,ACPI_PHYSICAL_ADDRESS * NewAddress,UINT32 * NewTableLength)317 ACPI_STATUS AcpiOsPhysicalTableOverride(
318 ACPI_TABLE_HEADER* ExistingTable,
319 ACPI_PHYSICAL_ADDRESS* NewAddress,
320 UINT32* NewTableLength) {
321 *NewAddress = 0;
322 return AE_OK;
323 }
324
325 // If we decide to make use of a more Zircon specific cache mechanism,
326 // remove the ACPI_USE_LOCAL_CACHE define from the header and implement these
327 // functions.
328 #if 0
329 /**
330 * @brief Create a memory cache object.
331 *
332 * @param CacheName An ASCII identfier for the cache.
333 * @param ObjectSize The size of each object in the cache.
334 * @param MaxDepth Maximum number of objects in the cache.
335 * @param ReturnCache Where a pointer to the cache object is returned.
336 *
337 * @return AE_OK The cache was successfully created.
338 * @return AE_BAD_PARAMETER The ReturnCache pointer is NULL or ObjectSize < 16.
339 * @return AE_NO_MEMORY Insufficient dynamic memory to complete the operation.
340 */
341 ACPI_STATUS AcpiOsCreateCache(
342 char *CacheName,
343 UINT16 ObjectSize,
344 UINT16 MaxDepth,
345 ACPI_CACHE_T **ReturnCache) {
346 PANIC_UNIMPLEMENTED;
347 return AE_NO_MEMORY;
348 }
349
350 /**
351 * @brief Delete a memory cache object.
352 *
353 * @param Cache The cache object to be deleted.
354 *
355 * @return AE_OK The cache was successfully deleted.
356 * @return AE_BAD_PARAMETER The Cache pointer is NULL.
357 */
358 ACPI_STATUS AcpiOsDeleteCache(ACPI_CACHE_T *Cache) {
359 PANIC_UNIMPLEMENTED;
360 return AE_OK;
361 }
362
363 /**
364 * @brief Free all objects currently within a cache object.
365 *
366 * @param Cache The cache object to purge.
367 *
368 * @return AE_OK The cache was successfully purged.
369 * @return AE_BAD_PARAMETER The Cache pointer is NULL.
370 */
371 ACPI_STATUS AcpiOsPurgeCache(ACPI_CACHE_T *Cache) {
372 PANIC_UNIMPLEMENTED;
373 return AE_OK;
374 }
375
376
377 /**
378 * @brief Acquire an object from a cache.
379 *
380 * @param Cache The cache object from which to acquire an object.
381 *
382 * @return A pointer to a cache object. NULL if the object could not be
383 * acquired.
384 */
385 void *AcpiOsAcquireObject(ACPI_CACHE_T *Cache) {
386 PANIC_UNIMPLEMENTED;
387 return NULL;
388 }
389
390 /**
391 * @brief Release an object to a cache.
392 *
393 * @param Cache The cache object to which the object will be released.
394 * @param Object The object to be released.
395 *
396 * @return AE_OK The object was successfully released.
397 * @return AE_BAD_PARAMETER The Cache or Object pointer is NULL.
398 */
399 ACPI_STATUS AcpiOsReleaseObject(ACPI_CACHE_T *Cache, void *Object) {
400 PANIC_UNIMPLEMENTED;
401 return AE_OK;
402 }
403 #endif
404
405 /**
406 * @brief Map physical memory into the caller's address space.
407 *
408 * @param PhysicalAddress A full physical address of the memory to be mapped
409 * into the caller's address space
410 * @param Length The amount of memory to mapped starting at the given physical
411 * address
412 *
413 * @return Logical pointer to the mapped memory. A NULL pointer indicated failures.
414 */
AcpiOsMapMemory(ACPI_PHYSICAL_ADDRESS PhysicalAddress,ACPI_SIZE Length)415 void* AcpiOsMapMemory(
416 ACPI_PHYSICAL_ADDRESS PhysicalAddress,
417 ACPI_SIZE Length) {
418
419 fbl::AutoLock lock(&os_mapping_lock);
420
421 // Caution: PhysicalAddress might not be page-aligned, Length might not
422 // be a page multiple.
423
424 ACPI_PHYSICAL_ADDRESS aligned_address = PhysicalAddress & ~(PAGE_SIZE - 1);
425 ACPI_PHYSICAL_ADDRESS end = (PhysicalAddress + Length + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
426
427 uintptr_t vaddr;
428 size_t length = end - aligned_address;
429 zx_handle_t vmo;
430 zx_status_t status = mmap_physical(aligned_address, end - aligned_address,
431 ZX_CACHE_POLICY_CACHED, &vmo, &vaddr);
432 if (status != ZX_OK) {
433 return NULL;
434 }
435
436 void* out_addr = (void*)(vaddr + (PhysicalAddress - aligned_address));
437 fbl::unique_ptr<AcpiOsMappingNode> mn(
438 new AcpiOsMappingNode(reinterpret_cast<uintptr_t>(out_addr),
439 vaddr, length, vmo));
440 os_mapping_tbl.insert(std::move(mn));
441
442 return out_addr;
443 }
444
445 /**
446 * @brief Remove a physical to logical memory mapping.
447 *
448 * @param LogicalAddress The logical address that was returned from a previous
449 * call to AcpiOsMapMemory.
450 * @param Length The amount of memory that was mapped. This value must be
451 * identical to the value used in the call to AcpiOsMapMemory.
452 */
AcpiOsUnmapMemory(void * LogicalAddress,ACPI_SIZE Length)453 void AcpiOsUnmapMemory(void* LogicalAddress, ACPI_SIZE Length) {
454 fbl::AutoLock lock(&os_mapping_lock);
455 fbl::unique_ptr<AcpiOsMappingNode> mn = os_mapping_tbl.erase((uintptr_t)LogicalAddress);
456 if (mn == NULL) {
457 printf("AcpiOsUnmapMemory nonexisting mapping %p\n", LogicalAddress);
458 }
459 }
460
461 /**
462 * @brief Allocate memory from the dynamic memory pool.
463 *
464 * @param Size Amount of memory to allocate.
465 *
466 * @return A pointer to the allocated memory. A NULL pointer is returned on
467 * error.
468 */
AcpiOsAllocate(ACPI_SIZE Size)469 void* AcpiOsAllocate(ACPI_SIZE Size) {
470 return malloc(Size);
471 }
472
473 /**
474 * @brief Free previously allocated memory.
475 *
476 * @param Memory A pointer to the memory to be freed.
477 */
AcpiOsFree(void * Memory)478 void AcpiOsFree(void* Memory) {
479 free(Memory);
480 }
481
482 /**
483 * @brief Obtain the ID of the currently executing thread.
484 *
485 * @return A unique non-zero value that represents the ID of the currently
486 * executing thread. The value -1 is reserved and must not be returned
487 * by this interface.
488 */
489 static_assert(sizeof(ACPI_THREAD_ID) >= sizeof(zx_handle_t), "tid size");
AcpiOsGetThreadId()490 ACPI_THREAD_ID AcpiOsGetThreadId() {
491 return (uintptr_t)thrd_current();
492 }
493
AcpiOsExecuteTask(void * arg)494 static int AcpiOsExecuteTask(void* arg) {
495 while (1) {
496 fbl::unique_ptr<AcpiOsTaskCtx> task;
497
498 mtx_lock(&os_execute_state.lock);
499 while ((task = os_execute_state.tasks.pop_front()) == nullptr) {
500 os_execute_state.idle = true;
501 // If anything is waiting for the queue to empty, notify it.
502 cnd_signal(&os_execute_state.idle_cond);
503
504 // If we're waiting to shutdown, do it now that there's no more work
505 if (os_execute_state.shutdown) {
506 mtx_unlock(&os_execute_state.lock);
507 return 0;
508 }
509
510 cnd_wait(&os_execute_state.cond, &os_execute_state.lock);
511 }
512 os_execute_state.idle = false;
513 mtx_unlock(&os_execute_state.lock);
514
515 task->func(task->ctx);
516 }
517
518 return 0;
519 }
520
ShutdownOsExecuteTask()521 static void ShutdownOsExecuteTask() {
522 mtx_lock(&os_execute_state.lock);
523 os_execute_state.shutdown = true;
524 mtx_unlock(&os_execute_state.lock);
525 cnd_broadcast(&os_execute_state.cond);
526 thrd_join(os_execute_state.thread, nullptr);
527 }
528
529 /**
530 * @brief Schedule a procedure for deferred execution.
531 *
532 * @param Type Type of the callback function.
533 * @param Function Address of the procedure to execute.
534 * @param Context A context value to be passed to the called procedure.
535 *
536 * @return AE_OK The procedure was successfully queued for execution.
537 * @return AE_BAD_PARAMETER The Type is invalid or the Function pointer
538 * is NULL.
539 */
AcpiOsExecute(ACPI_EXECUTE_TYPE Type,ACPI_OSD_EXEC_CALLBACK Function,void * Context)540 ACPI_STATUS AcpiOsExecute(
541 ACPI_EXECUTE_TYPE Type,
542 ACPI_OSD_EXEC_CALLBACK Function,
543 void* Context) {
544
545 if (Function == NULL) {
546 return AE_BAD_PARAMETER;
547 }
548
549 switch (Type) {
550 case OSL_GLOBAL_LOCK_HANDLER:
551 case OSL_NOTIFY_HANDLER:
552 case OSL_GPE_HANDLER:
553 case OSL_DEBUGGER_MAIN_THREAD:
554 case OSL_DEBUGGER_EXEC_THREAD:
555 case OSL_EC_POLL_HANDLER:
556 case OSL_EC_BURST_HANDLER:
557 break;
558 default:
559 return AE_BAD_PARAMETER;
560 }
561
562 fbl::AllocChecker ac;
563 fbl::unique_ptr<AcpiOsTaskCtx> task(new (&ac) AcpiOsTaskCtx);
564 if (!ac.check()) {
565 return AE_NO_MEMORY;
566 }
567 task->func = Function;
568 task->ctx = Context;
569
570 mtx_lock(&os_execute_state.lock);
571 os_execute_state.tasks.push_back(std::move(task));
572 mtx_unlock(&os_execute_state.lock);
573 cnd_signal(&os_execute_state.cond);
574
575 return AE_OK;
576 }
577
578 /**
579 * @brief Wait for completion of asynchronous events.
580 *
581 * This function blocks until all asynchronous events initiated by
582 * AcpiOsExecute have completed.
583 */
AcpiOsWaitEventsComplete(void)584 void AcpiOsWaitEventsComplete(void) {
585 mtx_lock(&os_execute_state.lock);
586 while (!os_execute_state.idle) {
587 cnd_wait(&os_execute_state.idle_cond, &os_execute_state.lock);
588 }
589 mtx_unlock(&os_execute_state.lock);
590 }
591
592 /**
593 * @brief Suspend the running task (course granularity).
594 *
595 * @param Milliseconds The amount of time to sleep, in milliseconds.
596 */
AcpiOsSleep(UINT64 Milliseconds)597 void AcpiOsSleep(UINT64 Milliseconds) {
598 if (Milliseconds > UINT32_MAX) {
599 // If we're asked to sleep for a long time (>1.5 months), shorten it
600 Milliseconds = UINT32_MAX;
601 }
602 zx_nanosleep(zx_deadline_after(ZX_MSEC(Milliseconds)));
603 }
604
605 /**
606 * @brief Wait for a short amount of time (fine granularity).
607 *
608 * Execution of the running thread is not suspended for this time.
609 *
610 * @param Microseconds The amount of time to delay, in microseconds.
611 */
AcpiOsStall(UINT32 Microseconds)612 void AcpiOsStall(UINT32 Microseconds) {
613 zx_nanosleep(zx_deadline_after(ZX_USEC(Microseconds)));
614 }
615
616 /**
617 * @brief Create a semaphore.
618 *
619 * @param MaxUnits The maximum number of units this semaphore will be required
620 * to accept
621 * @param InitialUnits The initial number of units to be assigned to the
622 * semaphore.
623 * @param OutHandle A pointer to a locaton where a handle to the semaphore is
624 * to be returned.
625 *
626 * @return AE_OK The semaphore was successfully created.
627 * @return AE_BAD_PARAMETER The InitialUnits is invalid or the OutHandle
628 * pointer is NULL.
629 * @return AE_NO_MEMORY Insufficient memory to create the semaphore.
630 */
AcpiOsCreateSemaphore(UINT32 MaxUnits,UINT32 InitialUnits,ACPI_SEMAPHORE * OutHandle)631 ACPI_STATUS AcpiOsCreateSemaphore(
632 UINT32 MaxUnits,
633 UINT32 InitialUnits,
634 ACPI_SEMAPHORE* OutHandle) {
635 sem_t* sem = (sem_t*)malloc(sizeof(sem_t));
636 if (!sem) {
637 return AE_NO_MEMORY;
638 }
639 if (sem_init(sem, 0, InitialUnits) < 0) {
640 free(sem);
641 return AE_ERROR;
642 }
643 *OutHandle = sem;
644 return AE_OK;
645 }
646
647 /**
648 * @brief Delete a semaphore.
649 *
650 * @param Handle A handle to a semaphore objected that was returned by a
651 * previous call to AcpiOsCreateSemaphore.
652 *
653 * @return AE_OK The semaphore was successfully deleted.
654 */
AcpiOsDeleteSemaphore(ACPI_SEMAPHORE Handle)655 ACPI_STATUS AcpiOsDeleteSemaphore(ACPI_SEMAPHORE Handle) {
656 free(Handle);
657 return AE_OK;
658 }
659
660 /**
661 * @brief Wait for units from a semaphore.
662 *
663 * @param Handle A handle to a semaphore objected that was returned by a
664 * previous call to AcpiOsCreateSemaphore.
665 * @param Units The number of units the caller is requesting.
666 * @param Timeout How long the caller is willing to wait for the requested
667 * units, in milliseconds. A value of -1 indicates that the caller
668 * is willing to wait forever. Timeout may be 0.
669 *
670 * @return AE_OK The requested units were successfully received.
671 * @return AE_BAD_PARAMETER The Handle is invalid.
672 * @return AE_TIME The units could not be acquired within the specified time.
673 */
AcpiOsWaitSemaphore(ACPI_SEMAPHORE Handle,UINT32 Units,UINT16 Timeout)674 ACPI_STATUS AcpiOsWaitSemaphore(
675 ACPI_SEMAPHORE Handle,
676 UINT32 Units,
677 UINT16 Timeout) {
678
679 if (Timeout == UINT16_MAX) {
680 if (sem_wait(Handle) < 0) {
681 ZX_ASSERT_MSG(false, "sem_wait failed %d", errno);
682 }
683 return AE_OK;
684 }
685
686 struct timespec then;
687 timeout_to_timespec(Timeout, &then);
688 if (sem_timedwait(Handle, &then) < 0) {
689 ZX_ASSERT_MSG(errno == ETIMEDOUT, "sem_timedwait failed unexpectedly %d", errno);
690 return AE_TIME;
691 }
692 return AE_OK;
693 }
694
695 /**
696 * @brief Send units to a semaphore.
697 *
698 * @param Handle A handle to a semaphore objected that was returned by a
699 * previous call to AcpiOsCreateSemaphore.
700 * @param Units The number of units to send to the semaphore.
701 *
702 * @return AE_OK The semaphore was successfully signaled.
703 * @return AE_BAD_PARAMETER The Handle is invalid.
704 */
AcpiOsSignalSemaphore(ACPI_SEMAPHORE Handle,UINT32 Units)705 ACPI_STATUS AcpiOsSignalSemaphore(
706 ACPI_SEMAPHORE Handle,
707 UINT32 Units) {
708 // TODO: Implement support for Units > 1
709 assert(Units == 1);
710
711 sem_post(Handle);
712 return AE_OK;
713 }
714
715 /**
716 * @brief Create a mutex.
717 *
718 * @param OutHandle A pointer to a locaton where a handle to the mutex is
719 * to be returned.
720 *
721 * @return AE_OK The mutex was successfully created.
722 * @return AE_BAD_PARAMETER The OutHandle pointer is NULL.
723 * @return AE_NO_MEMORY Insufficient memory to create the mutex.
724 */
AcpiOsCreateMutex(ACPI_MUTEX * OutHandle)725 ACPI_STATUS AcpiOsCreateMutex(ACPI_MUTEX* OutHandle) {
726 mtx_t* lock = (mtx_t*)malloc(sizeof(mtx_t));
727 if (!lock) {
728 return AE_NO_MEMORY;
729 }
730
731 ACPI_STATUS status = thrd_status_to_acpi_status(
732 mtx_init(lock, mtx_plain));
733 if (status != AE_OK) {
734 return status;
735 }
736 *OutHandle = lock;
737 return AE_OK;
738 }
739
740 /**
741 * @brief Delete a mutex.
742 *
743 * @param Handle A handle to a mutex objected that was returned by a
744 * previous call to AcpiOsCreateMutex.
745 */
AcpiOsDeleteMutex(ACPI_MUTEX Handle)746 void AcpiOsDeleteMutex(ACPI_MUTEX Handle) {
747 mtx_destroy(Handle);
748 free(Handle);
749 }
750
751 /**
752 * @brief Acquire a mutex.
753 *
754 * @param Handle A handle to a mutex objected that was returned by a
755 * previous call to AcpiOsCreateMutex.
756 * @param Timeout How long the caller is willing to wait for the requested
757 * units, in milliseconds. A value of -1 indicates that the caller
758 * is willing to wait forever. Timeout may be 0.
759 *
760 * @return AE_OK The requested units were successfully received.
761 * @return AE_BAD_PARAMETER The Handle is invalid.
762 * @return AE_TIME The mutex could not be acquired within the specified time.
763 */
AcpiOsAcquireMutex(ACPI_MUTEX Handle,UINT16 Timeout)764 ACPI_STATUS AcpiOsAcquireMutex(
765 ACPI_MUTEX Handle,
766 UINT16 Timeout) TA_TRY_ACQ(AE_OK, Handle) TA_NO_THREAD_SAFETY_ANALYSIS {
767 if (Timeout == UINT16_MAX) {
768 if (acpi_spinlocks_held == 0) {
769 int ret = pthread_rwlock_rdlock(&acpi_spinlock_lock);
770 ZX_ASSERT(ret == 0);
771 }
772
773 int res = mtx_lock(Handle);
774 ZX_ASSERT(res == thrd_success);
775 } else {
776 struct timespec then;
777 timeout_to_timespec(Timeout, &then);
778
779 if (acpi_spinlocks_held == 0) {
780 int ret = pthread_rwlock_timedrdlock(&acpi_spinlock_lock, &then);
781 if (ret == ETIMEDOUT)
782 return AE_TIME;
783 ZX_ASSERT(ret == 0);
784 }
785
786 int res = mtx_timedlock(Handle, &then);
787 if (res == thrd_timedout) {
788 if (acpi_spinlocks_held == 0) {
789 int res = pthread_rwlock_unlock(&acpi_spinlock_lock);
790 ZX_ASSERT(res == 0);
791 }
792 return AE_TIME;
793 }
794 ZX_ASSERT(res == thrd_success);
795 }
796
797 acpi_spinlocks_held++;
798 return AE_OK;
799 }
800
801 /**
802 * @brief Release a mutex.
803 *
804 * @param Handle A handle to a mutex objected that was returned by a
805 * previous call to AcpiOsCreateMutex.
806 */
AcpiOsReleaseMutex(ACPI_MUTEX Handle)807 void AcpiOsReleaseMutex(ACPI_MUTEX Handle) TA_REL(Handle) {
808 mtx_unlock(Handle);
809
810 acpi_spinlocks_held--;
811 if (acpi_spinlocks_held == 0) {
812 int ret = pthread_rwlock_unlock(&acpi_spinlock_lock);
813 ZX_ASSERT(ret == 0);
814 }
815 }
816
817 /**
818 * @brief Create a spin lock.
819 *
820 * @param OutHandle A pointer to a locaton where a handle to the lock is
821 * to be returned.
822 *
823 * @return AE_OK The lock was successfully created.
824 * @return AE_BAD_PARAMETER The OutHandle pointer is NULL.
825 * @return AE_NO_MEMORY Insufficient memory to create the lock.
826 */
AcpiOsCreateLock(ACPI_SPINLOCK * OutHandle)827 ACPI_STATUS AcpiOsCreateLock(ACPI_SPINLOCK* OutHandle) {
828 // Since we don't have a notion of interrupt contex in usermode, just make
829 // these mutexes.
830 return AcpiOsCreateMutex(OutHandle);
831 }
832
833 /**
834 * @brief Delete a spin lock.
835 *
836 * @param Handle A handle to a lock objected that was returned by a
837 * previous call to AcpiOsCreateLock.
838 */
AcpiOsDeleteLock(ACPI_SPINLOCK Handle)839 void AcpiOsDeleteLock(ACPI_SPINLOCK Handle) {
840 AcpiOsDeleteMutex(Handle);
841 }
842
843 /**
844 * @brief Acquire a spin lock.
845 *
846 * @param Handle A handle to a lock objected that was returned by a
847 * previous call to AcpiOsCreateLock.
848 *
849 * @return Platform-dependent CPU flags. To be used when the lock is released.
850 */
AcpiOsAcquireLock(ACPI_SPINLOCK Handle)851 ACPI_CPU_FLAGS AcpiOsAcquireLock(ACPI_SPINLOCK Handle) TA_ACQ(Handle) TA_NO_THREAD_SAFETY_ANALYSIS {
852 int ret = AcpiOsAcquireMutex(Handle, UINT16_MAX);
853 // The thread safety analysis doesn't seem to handle the noreturn inside of the assert
854 ZX_ASSERT(ret == AE_OK);
855 return 0;
856 }
857
858 /**
859 * @brief Release a spin lock.
860 *
861 * @param Handle A handle to a lock objected that was returned by a
862 * previous call to AcpiOsCreateLock.
863 * @param Flags CPU Flags that were returned from AcpiOsAcquireLock.
864 */
AcpiOsReleaseLock(ACPI_SPINLOCK Handle,ACPI_CPU_FLAGS Flags)865 void AcpiOsReleaseLock(ACPI_SPINLOCK Handle, ACPI_CPU_FLAGS Flags) TA_REL(Handle) {
866 AcpiOsReleaseMutex(Handle);
867 }
868
869 // Wrapper structs for interfacing between our interrupt handler convention and
870 // ACPICA's
871 struct AcpiIrqThread {
872 thrd_t thread;
873 ACPI_OSD_HANDLER handler;
874 zx_handle_t irq_handle;
875 void* context;
876 };
acpi_irq_thread(void * arg)877 static int acpi_irq_thread(void* arg) {
878 auto real_arg = static_cast<AcpiIrqThread*>(arg);
879 while (1) {
880 zx_status_t status = zx_interrupt_wait(real_arg->irq_handle, NULL);
881 if (status != ZX_OK) {
882 break;
883 }
884 // TODO: Should we do something with the return value from the handler?
885 real_arg->handler(real_arg->context);
886 }
887 return 0;
888 }
889
890 static fbl::unique_ptr<AcpiIrqThread> sci_irq;
891
892 /**
893 * @brief Install a handler for a hardware interrupt.
894 *
895 * @param InterruptLevel Interrupt level that the handler will service.
896 * @param Handler Address of the handler.
897 * @param Context A context value that is passed to the handler when the
898 * interrupt is dispatched.
899 *
900 * @return AE_OK The handler was successfully installed.
901 * @return AE_BAD_PARAMETER The InterruptNumber is invalid or the Handler
902 * pointer is NULL.
903 * @return AE_ALREADY_EXISTS A handler for this interrupt level is already
904 * installed.
905 */
AcpiOsInstallInterruptHandler(UINT32 InterruptLevel,ACPI_OSD_HANDLER Handler,void * Context)906 ACPI_STATUS AcpiOsInstallInterruptHandler(
907 UINT32 InterruptLevel,
908 ACPI_OSD_HANDLER Handler,
909 void* Context) {
910 // Note that InterruptLevel here is ISA IRQs (or global of the legacy PIC
911 // does't exist), not system exceptions.
912
913 // TODO: Clean this up to be less x86 centric.
914
915 if (InterruptLevel == 0) {
916 /* Some buggy firmware fails to populate the SCI_INT field of the FADT
917 * properly. 0 is a known bad value, since the legacy PIT uses it and
918 * cannot be remapped. Just lie and say we installed a handler; this
919 * system will just never receive an SCI. If we return an error here,
920 * ACPI init will fail completely, and the system will be unusable. */
921 return AE_OK;
922 }
923
924 assert(InterruptLevel == 0x9); // SCI
925
926 fbl::AllocChecker ac;
927 fbl::unique_ptr<AcpiIrqThread> arg(new (&ac) AcpiIrqThread());
928 if (!ac.check()) {
929 return AE_NO_MEMORY;
930 }
931
932 zx_handle_t handle;
933 zx_status_t status = zx_interrupt_create(root_resource_handle, InterruptLevel,
934 ZX_INTERRUPT_REMAP_IRQ, &handle);
935 if (status != ZX_OK) {
936 return AE_ERROR;
937 }
938 arg->handler = Handler;
939 arg->context = Context;
940 arg->irq_handle = handle;
941
942 int ret = thrd_create(&arg->thread, acpi_irq_thread, arg.get());
943 if (ret != 0) {
944 return AE_ERROR;
945 }
946
947 sci_irq = std::move(arg);
948 return AE_OK;
949 }
950
951 /**
952 * @brief Remove an interrupt handler.
953 *
954 * @param InterruptNumber Interrupt number that the handler is currently
955 * servicing.
956 * @param Handler Address of the handler that was previously installed.
957 *
958 * @return AE_OK The handler was successfully removed.
959 * @return AE_BAD_PARAMETER The InterruptNumber is invalid, the Handler
960 * pointer is NULL, or the Handler address is no the same as the one
961 * currently installed.
962 * @return AE_NOT_EXIST There is no handler installed for this interrupt level.
963 */
AcpiOsRemoveInterruptHandler(UINT32 InterruptNumber,ACPI_OSD_HANDLER Handler)964 ACPI_STATUS AcpiOsRemoveInterruptHandler(
965 UINT32 InterruptNumber,
966 ACPI_OSD_HANDLER Handler) {
967 assert(InterruptNumber == 0x9); // SCI
968 assert(sci_irq);
969 zx_interrupt_destroy(sci_irq->irq_handle);
970 thrd_join(sci_irq->thread, nullptr);
971 sci_irq.reset();
972 return AE_OK;
973 }
974
975 /**
976 * @brief Read a value from a memory location.
977 *
978 * @param Address Memory address to be read.
979 * @param Value A pointer to a location where the data is to be returned.
980 * @param Width The memory width in bits, either 8, 16, 32, or 64.
981 *
982 * @return Exception code that indicates success or reason for failure.
983 */
AcpiOsReadMemory(ACPI_PHYSICAL_ADDRESS Address,UINT64 * Value,UINT32 Width)984 ACPI_STATUS AcpiOsReadMemory(
985 ACPI_PHYSICAL_ADDRESS Address,
986 UINT64* Value,
987 UINT32 Width) {
988 assert(false);
989 return AE_OK;
990 }
991
992 /**
993 * @brief Write a value to a memory location.
994 *
995 * @param Address Memory address where data is to be written.
996 * @param Value Data to be written to the memory location.
997 * @param Width The memory width in bits, either 8, 16, 32, or 64.
998 *
999 * @return Exception code that indicates success or reason for failure.
1000 */
AcpiOsWriteMemory(ACPI_PHYSICAL_ADDRESS Address,UINT64 Value,UINT32 Width)1001 ACPI_STATUS AcpiOsWriteMemory(
1002 ACPI_PHYSICAL_ADDRESS Address,
1003 UINT64 Value,
1004 UINT32 Width) {
1005 assert(false);
1006 return AE_OK;
1007 }
1008
1009 /**
1010 * @brief Read a value from an input port.
1011 *
1012 * @param Address Hardware I/O port address to be read.
1013 * @param Value A pointer to a location where the data is to be returned.
1014 * @param Width The port width in bits, either 8, 16, or 32.
1015 *
1016 * @return Exception code that indicates success or reason for failure.
1017 */
AcpiOsReadPort(ACPI_IO_ADDRESS Address,UINT32 * Value,UINT32 Width)1018 ACPI_STATUS AcpiOsReadPort(
1019 ACPI_IO_ADDRESS Address,
1020 UINT32* Value,
1021 UINT32 Width) {
1022 if (Address > 0xffff) {
1023 return AE_BAD_PARAMETER;
1024 }
1025
1026 switch (Width) {
1027 case 8:
1028 *Value = inp((uint16_t)Address);
1029 break;
1030 case 16:
1031 *Value = inpw((uint16_t)Address);
1032 break;
1033 case 32:
1034 *Value = inpd((uint16_t)Address);
1035 break;
1036 default:
1037 return AE_BAD_PARAMETER;
1038 }
1039 return AE_OK;
1040 }
1041
1042 /**
1043 * @brief Write a value to an output port.
1044 *
1045 * @param Address Hardware I/O port address where data is to be written.
1046 * @param Value The value to be written.
1047 * @param Width The port width in bits, either 8, 16, or 32.
1048 *
1049 * @return Exception code that indicates success or reason for failure.
1050 */
AcpiOsWritePort(ACPI_IO_ADDRESS Address,UINT32 Value,UINT32 Width)1051 ACPI_STATUS AcpiOsWritePort(
1052 ACPI_IO_ADDRESS Address,
1053 UINT32 Value,
1054 UINT32 Width) {
1055 if (Address > 0xffff) {
1056 return AE_BAD_PARAMETER;
1057 }
1058
1059 switch (Width) {
1060 case 8:
1061 outp((uint16_t)Address, (uint8_t)Value);
1062 break;
1063 case 16:
1064 outpw((uint16_t)Address, (uint16_t)Value);
1065 break;
1066 case 32:
1067 outpd((uint16_t)Address, (uint32_t)Value);
1068 break;
1069 default:
1070 return AE_BAD_PARAMETER;
1071 }
1072 return AE_OK;
1073 }
1074
1075 /**
1076 * @brief Read/Write a value from a PCI configuration register.
1077 *
1078 * @param PciId The full PCI configuration space address, consisting of a
1079 * segment number, bus number, device number, and function number.
1080 * @param Register The PCI register address to be read from.
1081 * @param Value A pointer to a location where the data is to be returned.
1082 * @param Width The register width in bits, either 8, 16, 32, or 64.
1083 * @param Write Write or Read.
1084 *
1085 * @return Exception code that indicates success or reason for failure.
1086 */
AcpiOsReadWritePciConfiguration(ACPI_PCI_ID * PciId,UINT32 Register,UINT64 * Value,UINT32 Width,bool Write)1087 static ACPI_STATUS AcpiOsReadWritePciConfiguration(
1088 ACPI_PCI_ID* PciId,
1089 UINT32 Register,
1090 UINT64* Value,
1091 UINT32 Width,
1092 bool Write) {
1093
1094 if (LOCAL_TRACE) {
1095 printf("ACPIOS: %s PCI Config %x:%x:%x:%x register %#x width %u\n",
1096 Write ? "write" : "read", PciId->Segment, PciId->Bus, PciId->Device, PciId->Function, Register, Width);
1097 }
1098
1099 // Only segment 0 is supported for now
1100 if (PciId->Segment != 0) {
1101 printf("ACPIOS: read/write config, segment != 0 not supported.\n");
1102 return AE_ERROR;
1103 }
1104
1105 // Check bounds of device and function offsets
1106 if (PciId->Device >= PCIE_MAX_DEVICES_PER_BUS || PciId->Function >= PCIE_MAX_FUNCTIONS_PER_DEVICE) {
1107 printf("ACPIOS: device out of reasonable bounds.\n");
1108 return AE_ERROR;
1109 }
1110
1111 // PCI config only supports up to 32 bit values
1112 if (Write && (*Value > UINT_MAX)) {
1113 printf("ACPIOS: read/write config, Value passed does not fit confg registers.\n");
1114 }
1115
1116 // Clear higher bits before a read
1117 if (!Write) {
1118 *Value = 0;
1119 }
1120
1121 #if __x86_64__
1122 uint8_t bus = static_cast<uint8_t>(PciId->Bus);
1123 uint8_t dev = static_cast<uint8_t>(PciId->Device);
1124 uint8_t func = static_cast<uint8_t>(PciId->Function);
1125 uint8_t offset = static_cast<uint8_t>(Register);
1126 zx_status_t st;
1127 #ifdef ENABLE_USER_PCI
1128 pci_bdf_t addr = {bus, dev, func};
1129 switch (Width) {
1130 case 8u:
1131 (Write) ? st = pci_pio_write8(addr, offset, static_cast<uint8_t>(*Value))
1132 : st = pci_pio_read8(addr, offset, reinterpret_cast<uint8_t*>(Value));
1133 break;
1134 case 16u:
1135 (Write) ? st = pci_pio_write16(addr, offset, static_cast<uint16_t>(*Value))
1136 : st = pci_pio_read16(addr, offset, reinterpret_cast<uint16_t*>(Value));
1137 break;
1138 // assume 32bit by default since 64 bit reads on IO ports are not a thing supported by the spec
1139 default:
1140 (Write) ? st = pci_pio_write32(addr, offset, static_cast<uint32_t>(*Value))
1141 : st = pci_pio_read32(addr, offset, reinterpret_cast<uint32_t*>(Value));
1142 }
1143 #else
1144 st = zx_pci_cfg_pio_rw(root_resource_handle, bus, dev, func, offset,
1145 reinterpret_cast<uint32_t*>(Value), static_cast<uint8_t>(Width), Write);
1146
1147 #endif // ENABLE_USER_PCI
1148 #ifdef ACPI_DEBUG_OUTPUT
1149 if (st != ZX_OK) {
1150 printf("ACPIOS: pci rw error: %d\n", st);
1151 }
1152 #endif // ACPI_DEBUG_OUTPUT
1153 return (st == ZX_OK) ? AE_OK : AE_ERROR;
1154 #endif // __x86_64__
1155
1156 return AE_NOT_IMPLEMENTED;
1157 }
1158 /**
1159 * @brief Read a value from a PCI configuration register.
1160 *
1161 * @param PciId The full PCI configuration space address, consisting of a
1162 * segment number, bus number, device number, and function number.
1163 * @param Register The PCI register address to be read from.
1164 * @param Value A pointer to a location where the data is to be returned.
1165 * @param Width The register width in bits, either 8, 16, 32, or 64.
1166 *
1167 * @return Exception code that indicates success or reason for failure.
1168 */
AcpiOsReadPciConfiguration(ACPI_PCI_ID * PciId,UINT32 Register,UINT64 * Value,UINT32 Width)1169 ACPI_STATUS AcpiOsReadPciConfiguration(
1170 ACPI_PCI_ID* PciId,
1171 UINT32 Register,
1172 UINT64* Value,
1173 UINT32 Width) {
1174
1175 return AcpiOsReadWritePciConfiguration(PciId, Register, Value, Width, false);
1176 }
1177
1178 /**
1179 * @brief Write a value to a PCI configuration register.
1180 *
1181 * @param PciId The full PCI configuration space address, consisting of a
1182 * segment number, bus number, device number, and function number.
1183 * @param Register The PCI register address to be written to.
1184 * @param Value Data to be written.
1185 * @param Width The register width in bits, either 8, 16, or 32.
1186 *
1187 * @return Exception code that indicates success or reason for failure.
1188 */
AcpiOsWritePciConfiguration(ACPI_PCI_ID * PciId,UINT32 Register,UINT64 Value,UINT32 Width)1189 ACPI_STATUS AcpiOsWritePciConfiguration(
1190 ACPI_PCI_ID* PciId,
1191 UINT32 Register,
1192 UINT64 Value,
1193 UINT32 Width) {
1194
1195 return AcpiOsReadWritePciConfiguration(PciId, Register, &Value, Width, true);
1196 }
1197
1198 /**
1199 * @brief Formatted stream output.
1200 *
1201 * @param Format A standard print format string.
1202 * @param ...
1203 */
AcpiOsPrintf(const char * Format,...)1204 void ACPI_INTERNAL_VAR_XFACE AcpiOsPrintf(const char* Format, ...) {
1205 va_list argp;
1206 va_start(argp, Format);
1207 AcpiOsVprintf(Format, argp);
1208 va_end(argp);
1209 }
1210
1211 /**
1212 * @brief Formatted stream output.
1213 *
1214 * @param Format A standard print format string.
1215 * @param Args A variable parameter list
1216 */
AcpiOsVprintf(const char * Format,va_list Args)1217 void AcpiOsVprintf(const char* Format, va_list Args) {
1218 // Only implement if ACPI_DEBUG_OUTPUT is defined, otherwise this causes
1219 // excess boot spew.
1220 #ifdef ACPI_DEBUG_OUTPUT
1221 vprintf(Format, Args);
1222 #endif
1223 }
1224
1225 /**
1226 * @brief Get current value of the system timer
1227 *
1228 * @return The current value of the system timer in 100-ns units.
1229 */
AcpiOsGetTimer()1230 UINT64 AcpiOsGetTimer() {
1231 assert(false);
1232 return 0;
1233 }
1234
1235 /**
1236 * @brief Break to the debugger or display a breakpoint message.
1237 *
1238 * @param Function Signal to be sent to the host operating system. Either
1239 * ACPI_SIGNAL_FATAL or ACPI_SIGNAL_BREAKPOINT
1240 * @param Info Data associated with the signal; type depends on signal type.
1241 *
1242 * @return Exception code that indicates success or reason for failure.
1243 */
AcpiOsSignal(UINT32 Function,void * Info)1244 ACPI_STATUS AcpiOsSignal(
1245 UINT32 Function,
1246 void* Info) {
1247 assert(false);
1248 return AE_OK;
1249 }
1250
1251 /* @brief Acquire the ACPI global lock
1252 *
1253 * Implementation for ACPI_ACQUIRE_GLOBAL_LOCK
1254 *
1255 * @param FacsPtr pointer to the FACS ACPI structure
1256 *
1257 * @return True if the lock was successfully acquired
1258 */
_acpica_acquire_global_lock(void * FacsPtr)1259 bool _acpica_acquire_global_lock(void* FacsPtr) {
1260 ACPI_TABLE_FACS* table = (ACPI_TABLE_FACS*)FacsPtr;
1261 uint32_t old_val, new_val, test_val;
1262 do {
1263 old_val = test_val = table->GlobalLock;
1264 new_val = old_val & ~ACPI_GLOCK_PENDING;
1265 // If the lock is owned, we'll mark it pending
1266 if (new_val & ACPI_GLOCK_OWNED) {
1267 new_val |= ACPI_GLOCK_PENDING;
1268 }
1269 new_val |= ACPI_GLOCK_OWNED;
1270 __atomic_compare_exchange_n(&table->GlobalLock, &old_val, new_val, false,
1271 __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
1272 } while (old_val != test_val);
1273
1274 /* If we're here, we either acquired the lock or marked it pending */
1275 return !(new_val & ACPI_GLOCK_PENDING);
1276 }
1277
1278 /* @brief Release the ACPI global lock
1279 *
1280 * Implementation for ACPI_RELEASE_GLOBAL_LOCK
1281 *
1282 * @param FacsPtr pointer to the FACS ACPI structure
1283 *
1284 * @return True if there is someone waiting to acquire the lock
1285 */
_acpica_release_global_lock(void * FacsPtr)1286 bool _acpica_release_global_lock(void* FacsPtr) {
1287 ACPI_TABLE_FACS* table = (ACPI_TABLE_FACS*)FacsPtr;
1288 uint32_t old_val, new_val, test_val;
1289 do {
1290 old_val = test_val = table->GlobalLock;
1291 new_val = old_val & ~(ACPI_GLOCK_PENDING | ACPI_GLOCK_OWNED);
1292 __atomic_compare_exchange_n(&table->GlobalLock, &old_val, new_val, false,
1293 __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
1294 } while (old_val != test_val);
1295
1296 return !!(old_val & ACPI_GLOCK_PENDING);
1297 }
1298