1 /*
2 * (c) 2008-2009 Adam Lackorzynski <adam@os.inf.tu-dresden.de>,
3 * Alexander Warg <warg@os.inf.tu-dresden.de>
4 * economic rights: Technische Universität Dresden (Germany)
5 *
6 * This file is part of TUD:OS and distributed under the terms of the
7 * GNU General Public License 2.
8 * Please see the COPYING-GPL-2 file for details.
9 */
10 #include <l4/sys/types.h>
11 #include <l4/sys/ipc.h>
12 #include <l4/sys/assert.h>
13 #include <l4/sys/factory.h>
14 #include <l4/sys/capability>
15 #include <l4/sys/cxx/ipc_epiface>
16 #include <l4/sys/factory>
17
18 #include <l4/sigma0/sigma0.h>
19
20 #include <l4/cxx/iostream>
21 #include <l4/cxx/l4iostream>
22 #include <l4/cxx/l4types.h>
23
24 #include "globals.h"
25 #include "page_alloc.h"
26 #include "mem_man.h"
27 #include "memmap.h"
28 #include "memmap_internal.h"
29 #include "ioports.h"
30
31
32 l4_kernel_info_t *l4_info;
33
34 Mem_man iomem;
35
36 enum Memory_type { Ram, Io_mem, Io_mem_cached };
37
dump_all()38 void dump_all()
39 {
40 L4::cout << PROG_NAME": Dump of all resource maps\n"
41 << "RAM:------------------------\n";
42 Mem_man::ram()->dump();
43 L4::cout << "IOMEM:----------------------\n";
44 iomem.dump();
45 dump_io_ports();
46 }
47
48 static
map_kip(Answer * a)49 void map_kip(Answer *a)
50 {
51 a->snd_fpage((l4_umword_t) l4_info, L4_LOG2_PAGESIZE, L4_FPAGE_RX, true);
52 }
53
54 static
new_client(l4_umword_t,Answer * a)55 void new_client(l4_umword_t, Answer *a)
56 {
57 static l4_cap_idx_t _next_gate = L4_BASE_CAPS_LAST + L4_CAP_OFFSET;
58
59 if ((_next_gate >> L4_CAP_SHIFT) & ~Region::Owner_mask)
60 {
61 a->error(L4_ENOMEM);
62 return;
63 }
64
65 l4_factory_create_gate_u(L4_BASE_FACTORY_CAP, _next_gate,
66 L4_BASE_THREAD_CAP, (_next_gate >> L4_CAP_SHIFT) << 4, a->utcb);
67 a->snd_fpage(l4_obj_fpage(_next_gate, 0, L4_CAP_FPAGE_RWS));
68 _next_gate += L4_CAP_SIZE;
69 return;
70 }
71
72 static
map_free_page(unsigned size,l4_umword_t t,Answer * a)73 void map_free_page(unsigned size, l4_umword_t t, Answer *a)
74 {
75 unsigned long addr;
76
77 if (size < L4_PAGESHIFT)
78 {
79 a->error(L4_EINVAL);
80 return;
81 }
82
83 addr = Mem_man::ram()->alloc_first(1UL << size, t);
84 if (addr != ~0UL)
85 a->snd_fpage(addr, size, L4_FPAGE_RWX, true);
86 else
87 a->error(L4_ENOMEM);
88 }
89
90
91 static
map_mem(l4_fpage_t fp,Memory_type fn,l4_umword_t t,Answer * an)92 void map_mem(l4_fpage_t fp, Memory_type fn, l4_umword_t t, Answer *an)
93 {
94 Mem_man *m;
95 L4_fpage_rights mem_flags;
96 bool cached = true;
97 unsigned long addr = ~0UL;
98 Region const *p;
99 Region r;
100 unsigned long send_addr = l4_fpage_memaddr(fp);
101 unsigned send_order = l4_fpage_size(fp);
102
103 // Check if send_addr is correctly aligned to send_order since the kernel
104 // will otherwise truncate the send address. Fail in case it is not aligned.
105 if (l4_trunc_size(send_addr, send_order) != send_addr)
106 {
107 an->error(L4_EINVAL);
108 return;
109 }
110
111 // Isolation is only enforced at page granularity. Deny smaller requests.
112 if (send_order < L4_PAGESHIFT)
113 {
114 an->error(L4_EINVAL);
115 return;
116 }
117
118 switch (fn)
119 {
120 case Ram:
121 m = Mem_man::ram();
122 mem_flags = L4_FPAGE_RWX;
123 addr = m->alloc(Region::bs(send_addr, 1UL << send_order, t));
124 break;
125 case Io_mem:
126 cached = false;
127 /* fall through */
128 case Io_mem_cached:
129 // there is no first-come, first-serve for IO memory
130 r = Region::bs(send_addr, 1UL << send_order);
131 p = iomem.find(r);
132 if (p)
133 {
134 addr = r.start();
135 mem_flags = p->rights();
136 }
137 break;
138 default:
139 an->error(L4_EINVAL);
140 return;
141 }
142
143 if (addr == ~0UL)
144 {
145 an->error(L4_ENOMEM);
146 return;
147 }
148
149 an->snd_fpage(addr, send_order, mem_flags, cached);
150
151 return;
152 }
153
154 /* handler for page fault requests */
155 static
156 void
handle_page_fault(l4_umword_t t,l4_utcb_t * utcb,Answer * answer)157 handle_page_fault(l4_umword_t t, l4_utcb_t *utcb, Answer *answer)
158 {
159 unsigned long pfa = l4_utcb_mr_u(utcb)->mr[0] & ~7UL;
160 bool inst_fetch = l4_utcb_mr_u(utcb)->mr[0] & 4;
161 bool write = l4_utcb_mr_u(utcb)->mr[0] & 2;
162
163 L4_fpage_rights dr = inst_fetch ? (write ? L4_FPAGE_RWX : L4_FPAGE_RX)
164 : (write ? L4_FPAGE_RW : L4_FPAGE_RO);
165
166 L4_fpage_rights rights;
167 Region r = Region::bs(l4_trunc_page(pfa), L4_PAGESIZE, t, dr);
168 if (Mem_man::ram()->alloc_get_rights(r, &rights))
169 {
170 answer->snd_fpage(r.start(), L4_LOG2_PAGESIZE, rights, true);
171 return;
172 }
173
174 if (debug_warnings)
175 L4::cout << PROG_NAME ": Page fault, did not find page " << r << "\n";
176
177 answer->error(L4_ENOMEM);
178 }
179
180 static
handle_service_request(l4_umword_t t,l4_utcb_t * utcb,Answer * answer)181 void handle_service_request(l4_umword_t t, l4_utcb_t *utcb, Answer *answer)
182 {
183 if ((long)l4_utcb_mr_u(utcb)->mr[0] != L4_PROTO_SIGMA0)
184 {
185 answer->error(L4_ENODEV);
186 return;
187 }
188 new_client(t, answer);
189 }
190
191 static
handle_sigma0_request(l4_umword_t t,l4_utcb_t * utcb,Answer * answer)192 void handle_sigma0_request(l4_umword_t t, l4_utcb_t *utcb, Answer *answer)
193 {
194 if (!SIGMA0_IS_MAGIC_REQ(l4_utcb_mr_u(utcb)->mr[0]))
195 {
196 answer->error(L4_ENOSYS);
197 return;
198 }
199
200 switch (l4_utcb_mr_u(utcb)->mr[0] & SIGMA0_REQ_ID_MASK)
201 {
202 case SIGMA0_REQ_ID_DEBUG_DUMP:
203 {
204 Mem_man::Tree::Node_allocator alloc;
205 L4::cout << PROG_NAME": Memory usage: a total of "
206 << Page_alloc_base::total()
207 << " bytes are in the memory pool\n"
208 << " allocated "
209 << alloc.total_objects() - alloc.free_objects()
210 << " of " << alloc.total_objects() << " objects\n"
211 << " these are "
212 << (alloc.total_objects() - alloc.free_objects())
213 * alloc.object_size
214 << " of " << alloc.total_objects() * alloc.object_size
215 << " bytes\n";
216 dump_all();
217 answer->error(0);
218 }
219 break;
220 case SIGMA0_REQ_ID_FPAGE_RAM:
221 map_mem((l4_fpage_t&)l4_utcb_mr_u(utcb)->mr[1], Ram, t, answer);
222 break;
223 case SIGMA0_REQ_ID_FPAGE_IOMEM:
224 map_mem((l4_fpage_t&)l4_utcb_mr_u(utcb)->mr[1], Io_mem, t, answer);
225 break;
226 case SIGMA0_REQ_ID_FPAGE_IOMEM_CACHED:
227 map_mem((l4_fpage_t&)l4_utcb_mr_u(utcb)->mr[1], Io_mem_cached, t, answer);
228 break;
229 case SIGMA0_REQ_ID_KIP:
230 map_kip(answer);
231 break;
232 case SIGMA0_REQ_ID_FPAGE_ANY:
233 map_free_page(l4_fpage_size(*(l4_fpage_t*)(&l4_utcb_mr_u(utcb)->mr[1])),
234 t, answer);
235 break;
236 case SIGMA0_REQ_ID_NEW_CLIENT:
237 new_client(t, answer);
238 break;
239 default:
240 answer->error(L4_ENOSYS);
241 break;
242 }
243 }
244
245 namespace {
246
247 class Sigma0 :
248 public L4::Kobject_t<Sigma0, L4::Factory, L4_PROTO_SIGMA0>
249 {};
250
251 }
252
253 /* PAGER dispatch loop */
254 void
pager(void)255 pager(void)
256 {
257 l4_umword_t t;
258 l4_msgtag_t tag;
259
260 l4_utcb_t *utcb = l4_utcb();
261 Answer answer(utcb);
262
263 /* now start serving the subtasks */
264 for (;;)
265 {
266 tag = l4_ipc_wait(utcb, &t, L4_IPC_NEVER);
267 if (0)
268 L4::cout << PROG_NAME << ": rcv: " << tag << "\n";
269 while (!l4_msgtag_has_error(tag))
270 {
271 l4_umword_t pfa;
272 if (debug_warnings)
273 pfa = l4_utcb_mr_u(utcb)->mr[0];
274 t >>= 4;
275 /* we received a paging request here */
276 /* handle the sigma0 protocol */
277
278 if (debug_ipc)
279 {
280 l4_umword_t d1 = l4_utcb_mr_u(utcb)->mr[0];
281 l4_umword_t d2 = l4_utcb_mr_u(utcb)->mr[1];
282 L4::cout << PROG_NAME": received " << tag << " d1=" << L4::hex
283 << d1 << " d2=" << d2 << L4::dec << " from thread="
284 << t << '\n';
285 }
286
287 switch (tag.label())
288 {
289 case L4_PROTO_SIGMA0:
290 handle_sigma0_request(t, utcb, &answer);
291 break;
292 case L4::Meta::Protocol:
293 {
294 L4::Ipc::Detail::Meta_svr<Sigma0> dummy;
295 answer.tag
296 = L4::Ipc::Msg::dispatch_call<L4::Meta::Rpcs>(&dummy, utcb,
297 tag, t);
298 }
299 break;
300 case L4::Factory::Protocol:
301 handle_service_request(t, utcb, &answer);
302 break;
303 case L4_PROTO_PAGE_FAULT:
304 handle_page_fault(t, utcb, &answer);
305 break;
306 case L4_PROTO_IO_PAGE_FAULT:
307 handle_io_page_fault(t, utcb, &answer);
308 break;
309 default:
310 answer.error(L4_EBADPROTO);
311 break;
312 }
313
314 if (answer.failed())
315 {
316 if (debug_warnings)
317 {
318 L4::cout << PROG_NAME": can't handle label=" << L4::dec
319 << l4_msgtag_label(tag)
320 << " d1=" << L4::hex << pfa
321 << " d2=" << l4_utcb_mr_u(utcb)->mr[1]
322 << " from thread=" << L4::dec << t << '\n';
323 if (tag.is_page_fault())
324 Mem_man::ram()->dump();
325 }
326
327 l4_assert(!tag.is_exception());
328 }
329
330 if (debug_ipc)
331 L4::cout << PROG_NAME": sending d1="
332 << L4::hex << l4_utcb_mr_u(utcb)->mr[0]
333 << " d2=" << l4_utcb_mr_u(utcb)->mr[1]
334 << " msg=" << answer.tag << L4::dec
335 << " to thread=" << t << '\n';
336
337 /* send reply and wait for next message */
338 tag = l4_ipc_reply_and_wait(utcb, answer.tag, &t,
339 L4_IPC_SEND_TIMEOUT_0);
340 }
341 }
342 }
343