1 /*
2  * Copyright 2018 The Hafnium Authors.
3  *
4  * Use of this source code is governed by a BSD-style
5  * license that can be found in the LICENSE file or at
6  * https://opensource.org/licenses/BSD-3-Clause.
7  */
8 
9 #pragma once
10 
11 #include <stdalign.h>
12 #include <stdbool.h>
13 #include <stdint.h>
14 
15 #include "hf/arch/mm.h"
16 
17 #include "hf/addr.h"
18 #include "hf/mpool.h"
19 #include "hf/static_assert.h"
20 
21 /* Keep macro alignment */
22 /* clang-format off */
23 
24 #define PAGE_SIZE (1 << PAGE_BITS)
25 #define MM_PTE_PER_PAGE (PAGE_SIZE / sizeof(pte_t))
26 
27 /* The following are arch-independent page mapping modes. */
28 #define MM_MODE_R UINT32_C(0x0001) /* read */
29 #define MM_MODE_W UINT32_C(0x0002) /* write */
30 #define MM_MODE_X UINT32_C(0x0004) /* execute */
31 #define MM_MODE_D UINT32_C(0x0008) /* device */
32 
33 /*
34  * Memory in stage-1 is either valid (present) or invalid (absent).
35  *
36  * Memory in stage-2 has more states to track sharing, borrowing and giving of
37  * memory. The states are made up of three parts:
38  *
39  *  1. V = valid/invalid    : Whether the memory is part of the VM's address
40  *                            space. A fault will be generated if accessed when
41  *                            invalid.
42  *  2. O = owned/unowned    : Whether the memory is owned by the VM.
43  *  3. X = exclusive/shared : Whether access is exclusive to the VM or shared
44  *                            with at most one other.
45  *
46  * These parts compose to form the following state:
47  *
48  *  -  V  O  X : Owner of memory with exclusive access.
49  *  -  V  O !X : Owner of memory with access shared with at most one other VM.
50  *  -  V !O  X : Borrower of memory with exclusive access.
51  *  -  V !O !X : Borrower of memory where access is shared with the owner.
52  *  - !V  O  X : Owner of memory lent to a VM that has exclusive access.
53  *
54  *  - !V  O !X : Unused. Owner of shared memory always has access.
55  *  - !V !O  X : Unused. Next entry is used for invalid memory.
56  *
57  *  - !V !O !X : Invalid memory. Memory is unrelated to the VM.
58  *
59  *  Modes are selected so that owner of exclusive memory is the default.
60  */
61 #define MM_MODE_INVALID UINT32_C(0x0010)
62 #define MM_MODE_UNOWNED UINT32_C(0x0020)
63 #define MM_MODE_SHARED  UINT32_C(0x0040)
64 
65 /* Specifies if a mapping will be a user mapping(EL0). */
66 #define MM_MODE_USER    UINT32_C(0x0200)
67 
68 /* Map page as non-global. */
69 #define MM_MODE_NG UINT32_C(0x0100) /* non-global */
70 
71 /* The mask for a mode that is considered unmapped. */
72 #define MM_MODE_UNMAPPED_MASK (MM_MODE_INVALID | MM_MODE_UNOWNED)
73 
74 #define MM_FLAG_COMMIT  0x01
75 #define MM_FLAG_UNMAP   0x02
76 #define MM_FLAG_STAGE1  0x04
77 
78 /* clang-format on */
79 
80 #define MM_PPOOL_ENTRY_SIZE sizeof(struct mm_page_table)
81 
82 struct mm_page_table {
83 	alignas(PAGE_SIZE) pte_t entries[MM_PTE_PER_PAGE];
84 };
85 static_assert(sizeof(struct mm_page_table) == PAGE_SIZE,
86 	      "A page table must take exactly one page.");
87 static_assert(alignof(struct mm_page_table) == PAGE_SIZE,
88 	      "A page table must be page aligned.");
89 
90 struct mm_ptable {
91 	/**
92 	 * VMID/ASID associated with a page table. ASID 0 is reserved for use by
93 	 * the hypervisor.
94 	 */
95 	uint16_t id;
96 	/** Address of the root of the page table. */
97 	paddr_t root;
98 };
99 
100 /** The type of addresses stored in the page table. */
101 typedef uintvaddr_t ptable_addr_t;
102 
103 /** Represents the currently locked stage-1 page table of the hypervisor. */
104 struct mm_stage1_locked {
105 	struct mm_ptable *ptable;
106 };
107 
108 void mm_vm_enable_invalidation(void);
109 
110 bool mm_ptable_init(struct mm_ptable *t, uint16_t id, int flags,
111 		    struct mpool *ppool);
112 ptable_addr_t mm_ptable_addr_space_end(int flags);
113 
114 bool mm_vm_init(struct mm_ptable *t, uint16_t id, struct mpool *ppool);
115 void mm_vm_fini(struct mm_ptable *t, struct mpool *ppool);
116 
117 bool mm_identity_prepare(struct mm_ptable *t, paddr_t begin, paddr_t end,
118 			 uint32_t mode, struct mpool *ppool);
119 void *mm_identity_commit(struct mm_ptable *t, paddr_t begin, paddr_t end,
120 			 uint32_t mode, struct mpool *ppool);
121 
122 bool mm_vm_identity_map(struct mm_ptable *t, paddr_t begin, paddr_t end,
123 			uint32_t mode, struct mpool *ppool, ipaddr_t *ipa);
124 bool mm_vm_identity_prepare(struct mm_ptable *t, paddr_t begin, paddr_t end,
125 			    uint32_t mode, struct mpool *ppool);
126 void mm_vm_identity_commit(struct mm_ptable *t, paddr_t begin, paddr_t end,
127 			   uint32_t mode, struct mpool *ppool, ipaddr_t *ipa);
128 bool mm_vm_unmap(struct mm_ptable *t, paddr_t begin, paddr_t end,
129 		 struct mpool *ppool);
130 void mm_stage1_defrag(struct mm_ptable *t, struct mpool *ppool);
131 void mm_vm_defrag(struct mm_ptable *t, struct mpool *ppool);
132 void mm_vm_dump(struct mm_ptable *t);
133 bool mm_vm_get_mode(struct mm_ptable *t, ipaddr_t begin, ipaddr_t end,
134 		    uint32_t *mode);
135 bool mm_get_mode(struct mm_ptable *t, vaddr_t begin, vaddr_t end,
136 		 uint32_t *mode);
137 
138 struct mm_stage1_locked mm_lock_ptable_unsafe(struct mm_ptable *ptable);
139 struct mm_stage1_locked mm_lock_stage1(void);
140 void mm_unlock_stage1(struct mm_stage1_locked *lock);
141 void *mm_identity_map(struct mm_stage1_locked stage1_locked, paddr_t begin,
142 		      paddr_t end, uint32_t mode, struct mpool *ppool);
143 bool mm_unmap(struct mm_stage1_locked stage1_locked, paddr_t begin, paddr_t end,
144 	      struct mpool *ppool);
145 void mm_defrag(struct mm_stage1_locked stage1_locked, struct mpool *ppool);
146 
147 bool mm_init(struct mpool *ppool);
148