1 /*
2 * Copyright (c) 2021 Travis Geiselbrecht
3 *
4 * Use of this source code is governed by a MIT-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/MIT
7 */
8 #include <lk/trace.h>
9 #include <lk/debug.h>
10 #include <stdint.h>
11 #include <arch/ops.h>
12 #include <arch/m68k.h>
13 #include <kernel/spinlock.h>
14
15 #define LOCAL_TRACE 0
16
arch_early_init(void)17 void arch_early_init(void) {
18 LTRACE;
19
20 arch_disable_ints();
21
22 #if M68K_CPU >= 68010
23 // set the exception vector base
24 extern uint32_t exc_vectors[256];
25 asm volatile("movec %0, %%vbr" :: "r"(exc_vectors));
26 #endif
27 }
28
arch_init(void)29 void arch_init(void) {
30 LTRACE;
31 }
32
arch_idle(void)33 void arch_idle(void) {
34 // set the SR such that we're in supervisor state and no ints are masked
35 asm("stop #0x2000" ::: "cc");
36 }
37
arch_chain_load(void * entry,ulong arg0,ulong arg1,ulong arg2,ulong arg3)38 void arch_chain_load(void *entry, ulong arg0, ulong arg1, ulong arg2, ulong arg3) {
39 PANIC_UNIMPLEMENTED;
40 }
41
42 /* unimplemented cache operations */
arch_disable_cache(uint flags)43 void arch_disable_cache(uint flags) { PANIC_UNIMPLEMENTED; }
arch_enable_cache(uint flags)44 void arch_enable_cache(uint flags) { PANIC_UNIMPLEMENTED; }
45
arch_clean_cache_range(addr_t start,size_t len)46 void arch_clean_cache_range(addr_t start, size_t len) { PANIC_UNIMPLEMENTED; }
arch_clean_invalidate_cache_range(addr_t start,size_t len)47 void arch_clean_invalidate_cache_range(addr_t start, size_t len) { PANIC_UNIMPLEMENTED; }
arch_invalidate_cache_range(addr_t start,size_t len)48 void arch_invalidate_cache_range(addr_t start, size_t len) { PANIC_UNIMPLEMENTED; }
arch_sync_cache_range(addr_t start,size_t len)49 void arch_sync_cache_range(addr_t start, size_t len) { PANIC_UNIMPLEMENTED; }
50
51 /* atomics that may need to be implemented */
52 // from https://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary
__atomic_fetch_add_4(volatile void * mem,unsigned int val,int model)53 unsigned int __atomic_fetch_add_4 (volatile void *mem, unsigned int val, int model) {
54 spin_lock_saved_state_t state;
55 arch_interrupt_save(&state, 0);
56 unsigned int old = *(volatile unsigned int *)mem;
57 *(volatile unsigned int *)mem = old + val;
58 arch_interrupt_restore(state, 0);
59 return old;
60 }
61