1 // Copyright 2016 The Fuchsia Authors
2 // Copyright (c) 2009 Corey Tabaka
3 //
4 // Use of this source code is governed by a MIT-style
5 // license that can be found in the LICENSE file or at
6 // https://opensource.org/licenses/MIT
7 
8 #include <arch/ops.h>
9 #include <arch/x86.h>
10 #include <arch/x86/feature.h>
11 
arch_dcache_line_size(void)12 uint32_t arch_dcache_line_size(void) {
13     return x86_get_clflush_line_size();
14 }
15 
arch_icache_line_size(void)16 uint32_t arch_icache_line_size(void) {
17     return x86_get_clflush_line_size();
18 }
19 
arch_sync_cache_range(addr_t start,size_t len)20 void arch_sync_cache_range(addr_t start, size_t len) {
21     // Invoke cpuid to act as a serializing instruction.  This will ensure we
22     // see modifications to future parts of the instruction stream.  See
23     // Intel Volume 3, 8.1.3 "Handling Self- and Cross-Modifying Code".  cpuid
24     // is the more conservative approach suggested in this section.
25     uint32_t v;
26     cpuid(0, &v, &v, &v, &v);
27 }
28 
arch_invalidate_cache_range(addr_t start,size_t len)29 void arch_invalidate_cache_range(addr_t start, size_t len) {
30 }
31 
arch_clean_cache_range(addr_t start,size_t len)32 void arch_clean_cache_range(addr_t start, size_t len) {
33     // TODO: consider wiring up clwb if present
34     arch_clean_invalidate_cache_range(start, len);
35 }
36 
arch_clean_invalidate_cache_range(addr_t start,size_t len)37 void arch_clean_invalidate_cache_range(addr_t start, size_t len) {
38     if (unlikely(!x86_feature_test(X86_FEATURE_CLFLUSH))) {
39         __asm__ volatile("wbinvd");
40         return;
41     }
42 
43     // clflush/clflushopt is present
44     const vaddr_t clsize = x86_get_clflush_line_size();
45     addr_t end = start + len;
46     addr_t ptr = ROUNDDOWN(start, clsize);
47 
48     // TODO: use run time patching to merge these two paths
49     if (likely(x86_feature_test(X86_FEATURE_CLFLUSHOPT))) {
50         while (ptr < end) {
51             __asm__ volatile("clflushopt %0" ::"m"(*(char*)ptr));
52             ptr += clsize;
53         }
54     } else {
55         while (ptr < end) {
56             __asm__ volatile("clflush %0" ::"m"(*(char*)ptr));
57             ptr += clsize;
58         }
59     }
60 
61     __asm__ volatile("mfence");
62 }
63