1 #include <console.h>
2 #include <unistd.h>
3 #include <errno.h>
4 #include <string.h>
5 #include <inttypes.h>
6 #include <stdlib.h>
7 #include <stdbool.h>
8 #include <mini-os/byteorder.h>
9
10 #include "vtpm_manager.h"
11 #include "log.h"
12 #include "uuid.h"
13
14 #include "vtpmmgr.h"
15 #include "vtpm_disk.h"
16 #include "disk_crypto.h"
17 #include "disk_format.h"
18 #include "disk_io.h"
19 #include "disk_tpm.h"
20
21 struct mem_tpm_mgr *g_mgr;
22
vtpm_sync_disk(struct mem_tpm_mgr * mgr,int depth)23 int vtpm_sync_disk(struct mem_tpm_mgr *mgr, int depth)
24 {
25 int old_active_root = mgr->active_root;
26 int new_active_root = !old_active_root;
27 int rc = 0;
28 struct tpm_authdata prev;
29 struct mem_group *group0 = mgr->groups[0].v;
30
31 // don't bother writing if we will never be able to read
32 if (!group0 || !group0->nr_seals)
33 return 0;
34
35 mgr->sequence++;
36 mgr->active_root = new_active_root;
37
38 switch (depth) {
39 case CTR_UPDATE:
40 {
41 uint32_t ctr = be32_native(mgr->counter_value);
42 mgr->counter_value = native_be32(ctr + 1);
43 break;
44 }
45 case MGR_KEY_UPDATE:
46 {
47 int i;
48 mgr->root_seals_valid = 0;
49 do_random(&mgr->tm_key, 16);
50 aes_setup(&mgr->tm_key_e, &mgr->tm_key);
51 do_random(&mgr->nv_key, 16);
52 for(i=0; i < mgr->nr_groups; i++) {
53 abort(); // TODO use raw re-encryption to handle unopened groups
54 }
55 break;
56 }
57 case CTR_AUTH_UPDATE:
58 mgr->root_seals_valid = 0;
59 memcpy(&prev, &mgr->counter_auth, 20);
60 do_random(&mgr->counter_auth, 20);
61 break;
62 case NV_AUTH_UPDATE:
63 mgr->root_seals_valid = 0;
64 memcpy(&prev, &mgr->nvram_auth, 20);
65 do_random(&mgr->nvram_auth, 20);
66 break;
67 }
68
69 disk_write_all(mgr);
70
71 switch (depth) {
72 case SEQ_UPDATE:
73 break;
74
75 case CTR_UPDATE:
76 rc = TPM_disk_incr_counter(mgr->counter_index, mgr->counter_auth);
77 if (rc) {
78 uint32_t ctr = be32_native(mgr->counter_value);
79 mgr->counter_value = native_be32(ctr - 1);
80 mgr->active_root = old_active_root;
81 return rc;
82 }
83 break;
84
85 case MGR_KEY_UPDATE:
86 rc = TPM_disk_nvwrite(&mgr->nv_key, 16, mgr->nvram_slot, mgr->nvram_auth);
87 if (rc)
88 abort();
89 break;
90
91 case CTR_AUTH_UPDATE:
92 rc = TPM_disk_change_counter(mgr->counter_index, prev, mgr->counter_auth);
93 if (rc)
94 abort();
95 break;
96
97 case NV_AUTH_UPDATE:
98 rc = TPM_disk_nvchange(mgr->nvram_slot, prev, mgr->nvram_auth);
99 if (rc)
100 abort();
101 break;
102 }
103
104 return rc;
105 }
106
find_mem_group_hdr(struct mem_tpm_mgr * mgr,struct mem_group * group)107 static struct mem_group_hdr* find_mem_group_hdr(struct mem_tpm_mgr *mgr, struct mem_group *group)
108 {
109 int i;
110 for (i = 0; i < mgr->nr_groups; i++) {
111 struct mem_group_hdr *hdr = mgr->groups + i;
112 if (hdr->v == group)
113 return hdr;
114 }
115 return NULL;
116 }
117
vtpm_sync_group(struct mem_group * group,int depth)118 int vtpm_sync_group(struct mem_group *group, int depth)
119 {
120 struct mem_group_hdr* hdr = find_mem_group_hdr(g_mgr, group);
121 uint64_t seq = be64_native(group->details.sequence);
122
123 if (!hdr)
124 abort();
125
126 hdr->disk_loc.value = 0;
127 group->details.sequence = native_be64(1 + seq);
128
129 if (depth == GROUP_KEY_UPDATE) {
130 int i;
131 do_random(&group->group_key, 16);
132 do_random(&group->rollback_mac_key, 16);
133 group->flags &= ~MEM_GROUP_FLAG_SEAL_VALID;
134 for (i = 0; i < group->nr_pages; i++)
135 group->data[i].disk_loc.value = 0;
136 depth = CTR_UPDATE;
137 }
138
139 return vtpm_sync_disk(g_mgr, depth);
140 }
141
find_mem_vtpm_page(struct mem_group * group,struct mem_vtpm * vtpm)142 static struct mem_vtpm_page* find_mem_vtpm_page(struct mem_group *group, struct mem_vtpm *vtpm)
143 {
144 int pgidx = vtpm->index_in_parent / VTPMS_PER_SECTOR;
145 return group->data + pgidx;
146 }
147
vtpm_sync(struct mem_group * group,struct mem_vtpm * vtpm)148 int vtpm_sync(struct mem_group *group, struct mem_vtpm *vtpm)
149 {
150 struct mem_vtpm_page *pg = find_mem_vtpm_page(group, vtpm);
151 if (!pg)
152 return 1;
153 pg->disk_loc.value = 0;
154 return vtpm_sync_group(group, SEQ_UPDATE);
155 }
156
157 /************************************************************************/
158
create_vtpm(struct mem_group * group,struct mem_vtpm ** vtpmp,const uuid_t uuid)159 int create_vtpm(struct mem_group *group, struct mem_vtpm **vtpmp, const uuid_t uuid)
160 {
161 int pgidx = group->nr_vtpms / VTPMS_PER_SECTOR;
162 int vtidx = group->nr_vtpms % VTPMS_PER_SECTOR;
163 struct mem_vtpm *vtpm = calloc(1, sizeof(*vtpm));
164
165 struct mem_vtpm_page *page = group->data + pgidx;
166 if (pgidx >= group->nr_pages) {
167 if (pgidx != group->nr_pages)
168 abort(); // nr_vtpms inconsistent with nr_pages
169 group->nr_pages++;
170 group->data = realloc(group->data, group->nr_pages * sizeof(*page));
171 page = group->data + pgidx;
172 memset(page, 0, sizeof(*page));
173 }
174 if (page->size != vtidx)
175 abort(); // nr_vtpms inconsistent with page->size
176 page->size++;
177
178 page->vtpms[vtidx] = vtpm;
179 vtpm->index_in_parent = group->nr_vtpms;
180 vtpm->flags = 0;
181
182 group->nr_vtpms++;
183
184 memcpy(vtpm->uuid, uuid, 16);
185 *vtpmp = vtpm;
186 return 0;
187 }
188
delete_vtpm(struct mem_group * group,struct mem_vtpm * vtpm)189 int delete_vtpm(struct mem_group *group, struct mem_vtpm *vtpm)
190 {
191 struct mem_vtpm_page *pg = find_mem_vtpm_page(group, vtpm);
192 struct mem_vtpm_page *last_pg = group->data + (group->nr_pages - 1);
193 struct mem_vtpm *last = last_pg->vtpms[last_pg->size - 1];
194 int vtidx = vtpm->index_in_parent % VTPMS_PER_SECTOR;
195
196 if (vtpm->flags & VTPM_FLAG_OPEN)
197 return 1;
198
199 last->index_in_parent = vtpm->index_in_parent;
200 pg->vtpms[vtidx] = last;
201 pg->disk_loc.value = 0;
202
203 last_pg->vtpms[last_pg->size - 1] = NULL;
204 last_pg->disk_loc.value = 0;
205 last_pg->size--;
206
207 if (last_pg->size == 0)
208 group->nr_pages--;
209 group->nr_vtpms--;
210 free(vtpm);
211 return 0;
212 }
213
find_vtpm(struct mem_group ** groupp,struct mem_vtpm ** vtpmp,const uuid_t uuid)214 int find_vtpm(struct mem_group **groupp, struct mem_vtpm **vtpmp, const uuid_t uuid)
215 {
216 struct mem_group *group;
217 int i, j, k;
218
219 for (i = 0; i < g_mgr->nr_groups; i++) {
220 group = g_mgr->groups[i].v;
221 if (!group)
222 continue;
223 for (j = 0; j < group->nr_pages; j++) {
224 struct mem_vtpm_page *pg = &group->data[j];
225 for (k = 0; k < pg->size; k++) {
226 struct mem_vtpm *vt = pg->vtpms[k];
227 if (!memcmp(uuid, vt->uuid, 16)) {
228 *groupp = group;
229 *vtpmp = vt;
230 return 0;
231 }
232 }
233 }
234 }
235
236 return 1;
237 }
238