1 /*
2 * (c) 2008-2009 Adam Lackorzynski <adam@os.inf.tu-dresden.de>,
3 * Alexander Warg <warg@os.inf.tu-dresden.de>
4 * economic rights: Technische Universität Dresden (Germany)
5 *
6 * This file is part of TUD:OS and distributed under the terms of the
7 * GNU General Public License 2.
8 * Please see the COPYING-GPL-2 file for details.
9 */
10 #include "dataspace_noncont.h"
11 #include "quota.h"
12 #include "pages.h"
13
14 #include <l4/sys/task.h>
15 #include <l4/sys/cache.h>
16
17 #include <l4/cxx/iostream>
18 #include <l4/cxx/minmax>
19 #include <l4/cxx/exceptions>
20 #include <cstring>
21 #include <climits>
22
23 using cxx::min;
24
25 void
unmap_page(Page const & p,bool ro) const26 Moe::Dataspace_noncont::unmap_page(Page const &p, bool ro) const throw()
27 {
28 if (p.valid())
29 l4_task_unmap(L4_BASE_TASK_CAP,
30 l4_fpage((unsigned long)*p, page_shift(),
31 ro ? L4_FPAGE_W : L4_FPAGE_RWX), L4_FP_OTHER_SPACES);
32 }
33
34 void
free_page(Page & p) const35 Moe::Dataspace_noncont::free_page(Page &p) const throw()
36 {
37 unmap_page(p);
38 if (p.valid() && !Moe::Pages::unshare(*p))
39 {
40 //L4::cout << "free page @" << *p << '\n';
41 qalloc()->free_pages(*p, page_size());
42 }
43
44 p.set(0, 0);
45 }
46
47 Moe::Dataspace::Address
map_address(l4_addr_t offset,Flags flags) const48 Moe::Dataspace_noncont::map_address(l4_addr_t offset, Flags flags) const
49 {
50 // XXX: There may be a problem with data spaces with
51 // page_size() > L4_PAGE_SIZE
52 // MUST review that!!
53 if (!check_limit(offset))
54 return Address(-L4_ERANGE);
55
56 Page &p = alloc_page(offset);
57
58 flags &= map_flags();
59
60 if (flags.w() && (p.flags() & Page_cow))
61 {
62 if (Moe::Pages::ref_count(*p) == 1)
63 p.set(*p, p.flags() & ~Page_cow);
64 else
65 {
66 void *np = qalloc()->alloc_pages(page_size(), page_size());
67 Moe::Pages::share(np);
68
69 // L4::cout << "copy on write for " << *p << " to " << np << '\n';
70 memcpy(np, *p, page_size());
71 // FIXME: we should pass information if this page is to be mapped
72 // executable or not and conditionally make I caches coherent.
73 // And we should provide a single API with opcode bits to allow
74 // a combination of cache clean and I cache coherency in a single
75 // operation.
76 l4_cache_coherent((l4_addr_t)np, (l4_addr_t)np + page_size());
77 l4_cache_clean_data((l4_addr_t)np, (l4_addr_t)np + page_size());
78 unmap_page(p);
79 Moe::Pages::unshare(*p);
80 p.set(np, 0);
81 }
82 }
83
84 if (!*p)
85 {
86 p.set(qalloc()->alloc_pages(page_size(), page_size()), 0);
87 Moe::Pages::share(*p);
88 memset(*p, 0, page_size());
89 // No need for I cache coherence, as we just zero fill and assume that
90 // this is no executable code
91 l4_cache_clean_data((l4_addr_t)*p, (l4_addr_t)(*p) + page_size());
92 }
93
94 return Address(l4_addr_t(*p), page_shift(), flags, offset & (page_size()-1));
95 }
96
97 Moe::Dataspace::Address
address(l4_addr_t offset,Flags flags,l4_addr_t,l4_addr_t,l4_addr_t) const98 Moe::Dataspace_noncont::address(l4_addr_t offset, Flags flags, l4_addr_t,
99 l4_addr_t, l4_addr_t) const
100 { return map_address(offset, flags); }
101
102 int
copy_address(l4_addr_t offset,Flags flags,l4_addr_t * addr,unsigned long * size) const103 Moe::Dataspace_noncont::copy_address(l4_addr_t offset, Flags flags,
104 l4_addr_t *addr, unsigned long *size) const
105 {
106 auto a = map_address(offset, flags);
107 if (a.is_nil())
108 return -L4_ERANGE;
109
110 *addr = (l4_addr_t)a.adr();
111 *size = a.sz() - a.of();
112 return 0;
113 }
114
115 int
pre_allocate(l4_addr_t offset,l4_size_t size,unsigned rights)116 Moe::Dataspace_noncont::pre_allocate(l4_addr_t offset, l4_size_t size, unsigned rights)
117 {
118 if (!check_range(offset, size))
119 return -L4_ERANGE;
120
121 l4_addr_t end_off = l4_round_size(offset + size, page_shift());
122
123 l4_size_t ps = page_size();
124 for (l4_addr_t o = l4_trunc_size(offset, page_shift()); o < end_off; o += ps)
125 {
126 Address a = address(o, map_flags(rights));
127 if (a.is_nil())
128 return a.error();
129 }
130 return 0;
131 }
132
133 long
clear(unsigned long offs,unsigned long size) const134 Moe::Dataspace_noncont::clear(unsigned long offs, unsigned long size) const throw()
135 {
136 if (!check_limit(offs))
137 return -L4_ERANGE;
138
139 unsigned long sz = min(size, round_size()-offs);
140 unsigned long pg_sz = page_size();
141 unsigned long pre_sz = offs & (pg_sz-1);
142 if (pre_sz)
143 {
144 pre_sz = min(pg_sz - pre_sz, sz);
145 Moe::Dataspace::clear(offs, pre_sz);
146 sz -= pre_sz;
147 offs += pre_sz;
148 }
149
150 unsigned long u_sz = sz & ~(pg_sz-1);
151
152 while (u_sz)
153 {
154 // printf("ds free page offs %lx\n", offs);
155 free_page(page(offs));
156 offs += pg_sz;
157 u_sz -= pg_sz;
158 }
159
160 sz &= (pg_sz-1);
161
162 if (sz)
163 Moe::Dataspace::clear(offs, sz);
164
165 return 0;
166 }
167
168 namespace {
169 class Mem_one_page : public Moe::Dataspace_noncont
170 {
171 public:
Mem_one_page(unsigned long size,Flags flags)172 Mem_one_page(unsigned long size, Flags flags) throw()
173 : Moe::Dataspace_noncont(size, flags)
174 {}
175
~Mem_one_page()176 ~Mem_one_page() throw()
177 { free_page(page(0)); }
178
page(unsigned long) const179 Page &page(unsigned long /*offs*/) const throw() override
180 { return const_cast<Page &>(_page); }
181
alloc_page(unsigned long) const182 Page &alloc_page(unsigned long /*offs*/) const throw() override
183 { return const_cast<Page &>(_page); }
184 };
185
186 class Mem_small : public Moe::Dataspace_noncont
187 {
188 enum
189 {
190 Meta_align_bits = 10,
191 Meta_align = 1UL << Meta_align_bits,
192 };
193
194 public:
meta_size() const195 unsigned long meta_size() const throw()
196 { return (l4_round_size(num_pages()*sizeof(unsigned long), Meta_align_bits)); }
Mem_small(unsigned long size,Flags flags)197 Mem_small(unsigned long size, Flags flags)
198 : Moe::Dataspace_noncont(size, flags)
199 {
200 _pages = (Page *)qalloc()->alloc_pages(meta_size(), Meta_align);
201 memset((void *)_pages, 0, meta_size());
202 }
203
~Mem_small()204 ~Mem_small() throw()
205 {
206 for (unsigned long i = num_pages(); i > 0; --i)
207 free_page(page((i - 1) << page_shift()));
208
209 qalloc()->free_pages(_pages, meta_size());
210 }
211
page(unsigned long offs) const212 Page &page(unsigned long offs) const throw() override
213 { return _pages[offs >> page_shift()]; }
214
alloc_page(unsigned long offs) const215 Page &alloc_page(unsigned long offs) const throw() override
216 { return _pages[offs >> page_shift()]; }
217
218 };
219
220 class Mem_big : public Moe::Dataspace_noncont
221 {
222 public:
223
224 // use a 4KB second level for page management
meta2_size()225 static unsigned long meta2_size() throw()
226 { return 1UL << 12; }
227
entries2()228 static unsigned long entries2() throw()
229 { return meta2_size() / sizeof(Page *); }
230
231 private:
232 class L1
233 {
234 private:
235 unsigned long p;
236
237 public:
l2() const238 Page *l2() const throw() { return (Page*)(p & ~0xfffUL); }
operator [](unsigned idx)239 Page &operator [] (unsigned idx) throw()
240 { return l2()[idx]; }
operator *() const241 Page *operator * () const throw() { return l2(); }
cnt() const242 unsigned long cnt() const throw() { return p & 0xfffUL; }
inc()243 void inc() throw() { p = (p & ~0xfffUL) | (((p & 0xfffUL)+1) & 0xfffUL); }
dec()244 void dec() throw() { p = (p & ~0xfffUL) | (((p & 0xfffUL)-1) & 0xfffUL); }
set(void * _p)245 void set(void* _p) throw() { p = (unsigned long)_p; }
246 };
247
__p(unsigned long offs) const248 L1 &__p(unsigned long offs) const throw()
249 { return ((L1*)_pages)[(offs >> page_shift()) / entries2()]; }
250
l2_idx(unsigned long offs) const251 unsigned l2_idx(unsigned long offs) const
252 { return (offs >> page_shift()) & (entries2() - 1); }
253
254 public:
entries1() const255 unsigned long entries1() const throw()
256 { return (num_pages() + entries2() - 1) / entries2(); }
257
meta1_size() const258 long meta1_size() const throw()
259 { return l4_round_size(entries1() * sizeof(L1 *), 10); }
260
Mem_big(unsigned long size,Flags flags)261 Mem_big(unsigned long size, Flags flags)
262 : Moe::Dataspace_noncont(size, flags)
263 {
264 _pages = (Page *)qalloc()->alloc_pages(meta1_size(), 1024);
265 memset((void *)_pages, 0, meta1_size());
266 }
267
~Mem_big()268 ~Mem_big() throw()
269 {
270 for (unsigned long i = 0; i < size(); i += page_size())
271 free_page(page(i));
272
273 for (L1 *p = (L1 *)_pages; p != (L1 *)_pages + entries1(); ++p)
274 {
275 if (**p)
276 qalloc()->free_pages(**p, meta2_size());
277 p->set(0);
278 }
279
280 qalloc()->free_pages(_pages, meta1_size());
281 }
282
page(unsigned long offs) const283 Page &page(unsigned long offs) const throw() override
284 {
285 static Page invalid_page;
286 if (!*__p(offs))
287 return invalid_page;
288
289 return __p(offs)[l2_idx(offs)];
290 }
291
alloc_page(unsigned long offs) const292 Page &alloc_page(unsigned long offs) const override
293 {
294 L1 &_p = __p(offs);
295 if (!*_p)
296 {
297 void *a = qalloc()->alloc_pages(meta2_size(), meta2_size());
298 assert (((l4_addr_t)a & 0xfff) == 0);
299 _p.set(a);
300 memset(a, 0, meta2_size());
301 }
302
303 return _p[l2_idx(offs)];
304 }
305 };
306 };
307
308
309 Moe::Dataspace_noncont *
create(Moe::Q_alloc * q,unsigned long size,Flags flags)310 Moe::Dataspace_noncont::create(Moe::Q_alloc *q, unsigned long size,
311 Flags flags)
312 {
313 if (size <= L4_PAGESIZE)
314 return q->make_obj<Mem_one_page>(size, flags);
315 else if (size <= L4_PAGESIZE * (L4_PAGESIZE / sizeof(unsigned long)))
316 return q->make_obj<Mem_small>(size, flags);
317 else
318 return q->make_obj<Mem_big>(size, flags);
319 }
320
321