1// vi:set ft=cpp: -*- Mode: C++ -*-
2/*
3 * (c) 2008-2009 Alexander Warg <warg@os.inf.tu-dresden.de>,
4 *               Alexander Warg <warg@os.inf.tu-dresden.de>
5 *     economic rights: Technische Universität Dresden (Germany)
6 *
7 * This file is part of TUD:OS and distributed under the terms of the
8 * GNU General Public License 2.
9 * Please see the COPYING-GPL-2 file for details.
10 *
11 * As a special exception, you may use this file as part of a free software
12 * library without restriction.  Specifically, if other files instantiate
13 * templates or use macros or inline functions from this file, or you compile
14 * this file and link it with other files to produce an executable, this
15 * file does not by itself cause the resulting executable to be covered by
16 * the GNU General Public License.  This exception does not however
17 * invalidate any other reasons why the executable file might be covered by
18 * the GNU General Public License.
19 */
20
21#pragma once
22
23#include <l4/cxx/std_alloc>
24#include <l4/cxx/hlist>
25#include <l4/sys/consts.h>
26
27namespace cxx {
28
29/**
30 * \ingroup cxx_api
31 * Basic slab allocator.
32 *
33 * \tparam Obj_size  The size of the objects managed by the allocator (in bytes).
34 * \tparam Slab_size The size of a slab (in bytes).
35 * \tparam Max_free  The maximum number of free slabs. When this limit is reached
36 *                   slabs are freed, provided that the backend allocator
37 *                   supports allocated memory to be freed.
38 * \tparam Alloc     The backend allocator used to allocate slabs.
39 */
40template< int Obj_size, int Slab_size = L4_PAGESIZE,
41  int Max_free = 2, template<typename A> class Alloc = New_allocator >
42class Base_slab
43{
44private:
45  struct Free_o
46  {
47    Free_o *next;
48  };
49
50protected:
51  struct Slab_i;
52
53private:
54  struct Slab_head : public H_list_item
55  {
56    /// Number of free objects in the slab.
57    unsigned num_free;
58    /// Pointer to the first free object in the slab.
59    Free_o *free;
60    /// Pointer to the slab cache (instance of the slab allocator).
61    Base_slab<Obj_size, Slab_size, Max_free, Alloc> *cache;
62
63    inline Slab_head() throw() : num_free(0), free(0), cache(0)
64    {}
65  };
66
67  // In an empty or partially filled slab, each free object stores a pointer to
68  // the next free object. Thus, the size of an object needs to be at least the
69  // size of a pointer.
70  static_assert(Obj_size >= sizeof(void *),
71                "Object size must be at least the size of a pointer.");
72  static_assert(Obj_size <= Slab_size - sizeof(Slab_head),
73                "Object_size exceeds slab capability.");
74
75public:
76  enum
77  {
78    /// Size of an object.
79    object_size      = Obj_size,
80    /// Size of a slab.
81    slab_size        = Slab_size,
82    /// Objects per slab.
83    objects_per_slab = (Slab_size - sizeof(Slab_head)) / object_size,
84    /// Maximum number of free slabs.
85    max_free_slabs   = Max_free,
86  };
87
88protected:
89  struct Slab_store
90  {
91    char _o[slab_size - sizeof(Slab_head)];
92    Free_o *object(unsigned obj) throw()
93    { return reinterpret_cast<Free_o*>(_o + object_size * obj); }
94  };
95
96  /// Type of a slab
97  struct Slab_i : public Slab_store, public Slab_head
98  {};
99
100public:
101  /// Type of the backend allocator.
102  typedef Alloc<Slab_i> Slab_alloc;
103
104  typedef void Obj_type;
105
106private:
107  /// Allocator used for slabs.
108  Slab_alloc _alloc;
109  /// Number of empty slabs.
110  unsigned _num_free;
111  /// Total number of slabs.
112  unsigned _num_slabs;
113  /// List of full slabs.
114  H_list<Slab_i> _full_slabs;
115  /// List of partial slabs.
116  H_list<Slab_i> _partial_slabs;
117  /// List of empty slabs.
118  H_list<Slab_i> _empty_slabs;
119
120  /// Add a new slab.
121  void add_slab(Slab_i *s) throw()
122  {
123    s->num_free = objects_per_slab;
124    s->cache = this;
125
126    //L4::cerr << "Slab: " << this << "->add_slab(" << s << ", size="
127    //  << slab_size << "):" << " f=" << s->object(0) << '\n';
128
129    // initialize free list
130    Free_o *f = s->free = s->object(0);
131    for (unsigned i = 1; i < objects_per_slab; ++i)
132      {
133	f->next = s->object(i);
134	f = f->next;
135      }
136    f->next = 0;
137
138    // insert slab into cache's list
139    _empty_slabs.push_front(s);
140    ++_num_slabs;
141    ++_num_free;
142  }
143
144  /// Grow the slab cache, by adding a new slab.
145  bool grow() throw()
146  {
147    Slab_i *s = _alloc.alloc();
148    if (!s)
149      return false;
150
151    new (s, cxx::Nothrow()) Slab_i();
152
153    add_slab(s);
154    return true;
155  }
156
157  /**
158   * Shrink the slab cache by freeing empty slabs.
159   *
160   * The flow of memory from the slab cache back to the system is regulated via
161   * the backend allocator's flag `can_free`. If this flag is set to true, the
162   * slab cache retains at maximum Max_free empty slabs; empty slabs exceeding
163   * this limit are freed. If `can_free` is set to false, the shrink operation
164   * does nothing.
165   */
166  void shrink() throw()
167  {
168    if (!_alloc.can_free)
169      return;
170
171    while (!_empty_slabs.empty() && _num_free > max_free_slabs)
172      {
173	Slab_i *s = _empty_slabs.front();
174	_empty_slabs.remove(s);
175	--_num_free;
176	--_num_slabs;
177	_alloc.free(s);
178      }
179  }
180
181public:
182  Base_slab(Slab_alloc const &alloc = Slab_alloc()) throw()
183    : _alloc(alloc), _num_free(0), _num_slabs(0), _full_slabs(),
184      _partial_slabs(), _empty_slabs()
185  {}
186
187  ~Base_slab() throw()
188  {
189    while (!_empty_slabs.empty())
190      {
191	Slab_i *o = _empty_slabs.front();
192	_empty_slabs.remove(o);
193	_alloc.free(o);
194      }
195    while (!_partial_slabs.empty())
196      {
197	Slab_i *o = _partial_slabs.front();
198	_partial_slabs.remove(o);
199	_alloc.free(o);
200      }
201    while (!_full_slabs.empty())
202      {
203	Slab_i *o = _full_slabs.front();
204	_full_slabs.remove(o);
205	_alloc.free(o);
206      }
207  }
208
209  /**
210   * Allocate a new object.
211   *
212   * \return A pointer to the new object if the allocation succeeds, or 0 on
213   *         failure to acquire memory from the backend allocator when the slab
214   *         cache memory is already exhausted.
215   *
216   * \note The user is responsible for initializing the object.
217   */
218  void *alloc() throw()
219  {
220    H_list<Slab_i> *free = &_partial_slabs;
221    if (free->empty())
222      free = &_empty_slabs;
223
224    if (free->empty() && !grow())
225      return 0;
226
227    Slab_i *s = free->front();
228    Free_o *o = s->free;
229    s->free = o->next;
230
231    if (free == &_empty_slabs)
232      {
233	_empty_slabs.remove(s);
234        --_num_free;
235      }
236
237    --(s->num_free);
238
239    if (!s->free)
240      {
241	_partial_slabs.remove(s);
242        _full_slabs.push_front(s);
243      }
244    else if (free == &_empty_slabs)
245      _partial_slabs.push_front(s);
246
247    //L4::cerr << this << "->alloc(): " << o << ", of " << s << '\n';
248
249    return o;
250  }
251
252  /**
253   * Free the given object (`_o`).
254   *
255   * \pre The object must have been allocated with this allocator.
256   */
257  void free(void *_o) throw()
258  {
259    if (!_o)
260      return;
261
262    unsigned long addr = (unsigned long)_o;
263
264    // find out the slab the object is in
265    addr = (addr / slab_size) * slab_size;
266    Slab_i *s = (Slab_i*)addr;
267
268    if (s->cache != this)
269      return;
270
271    Free_o *o = reinterpret_cast<Free_o*>(_o);
272
273    o->next = s->free;
274    s->free = o;
275
276    bool was_full = false;
277
278    if (!s->num_free)
279      {
280	_full_slabs.remove(s);
281	was_full = true;
282      }
283
284    ++(s->num_free);
285
286    if (s->num_free == objects_per_slab)
287      {
288	if (!was_full)
289	  _partial_slabs.remove(s);
290
291	_empty_slabs.push_front(s);
292	++_num_free;
293	if (_num_free > max_free_slabs)
294	  shrink();
295
296	was_full = false;
297      }
298    else if (was_full)
299      _partial_slabs.push_front(s);
300
301    //L4::cerr << this << "->free(" << _o << "): of " << s << '\n';
302  }
303
304  /**
305   * Get the total number of objects managed by the slab allocator.
306   *
307   * \return The number of objects managed by the allocator (including the
308   *         free objects).
309   */
310  unsigned total_objects() const throw()
311  { return _num_slabs * objects_per_slab; }
312
313  /**
314   * Get the number of objects which can be allocated before a new empty slab
315   * needs to be added to the slab allocator.
316   *
317   * \return The number of free objects in the slab allocator.
318   */
319  unsigned free_objects() const throw()
320  {
321    unsigned count = 0;
322
323    /* count partial slabs first */
324    for (typename H_list<Slab_i>::Const_iterator s = _partial_slabs.begin();
325         s != _partial_slabs.end(); ++s)
326      count += s->num_free;
327
328    /* add empty slabs */
329    count += _num_free * objects_per_slab;
330
331    return count;
332  }
333};
334
335/**
336 * \ingroup cxx_api
337 * Slab allocator for object of type `Type`.
338 *
339 * \tparam Type       The type of the objects to manage.
340 * \tparam Slab_size  Size of a slab.
341 * \tparam Max_free   The maximum number of free slabs.
342 * \tparam Alloc      The allocator for the slabs.
343 */
344template<typename Type, int Slab_size = L4_PAGESIZE,
345  int Max_free = 2, template<typename A> class Alloc = New_allocator >
346class Slab : public Base_slab<sizeof(Type), Slab_size, Max_free, Alloc>
347{
348private:
349  typedef Base_slab<sizeof(Type), Slab_size, Max_free, Alloc> Base_type;
350public:
351
352  typedef Type Obj_type;
353
354  Slab(typename Base_type::Slab_alloc const &alloc
355      = typename Base_type::Slab_alloc()) throw()
356    : Base_slab<sizeof(Type), Slab_size, Max_free, Alloc>(alloc) {}
357
358
359  /**
360   * Allocate an object of type `Type`.
361   *
362   * \return A pointer to the object just allocated, or 0 on failure.
363   *
364   * \note The user is responsible for initializing the object.
365   */
366  Type *alloc() throw()
367  {
368    return reinterpret_cast<Type *>(Base_type::alloc());
369  }
370
371  /**
372   * Free the object addressed by `o`.
373   *
374   * \param o The pointer to the object to free.
375   * \pre The object must have been allocated with this allocator.
376   */
377  void free(Type *o) throw()
378  { Base_slab<sizeof(Type), Slab_size, Max_free, Alloc>::free(o); }
379};
380
381
382/**
383 * \ingroup cxx_api
384 * Merged slab allocator (allocators for objects of the same size are merged
385 * together).
386 *
387 * \tparam Obj_size   The size of an object managed by the slab allocator.
388 * \tparam Slab_size  The size of a slab.
389 * \tparam Max_free   The maximum number of free slabs.
390 * \tparam Alloc      The allocator for the slabs.
391 *
392 * This slab allocator class is useful for merging slab allocators with the
393 * same parameters (equal `Obj_size`, `Slab_size`, `Max_free`, and
394 * `Alloc` parameters) together and share the overhead for the slab caches
395 * among all equal-sized objects.
396 */
397template< int Obj_size, int Slab_size = L4_PAGESIZE,
398  int Max_free = 2, template<typename A> class Alloc = New_allocator >
399class Base_slab_static
400{
401private:
402  typedef Base_slab<Obj_size, Slab_size, Max_free, Alloc> _A;
403  static _A _a;
404public:
405  typedef void Obj_type;
406  enum
407  {
408    /// Size of an object.
409    object_size      = Obj_size,
410    /// Size of a slab.
411    slab_size        = Slab_size,
412    /// Number of objects per slab.
413    objects_per_slab = _A::objects_per_slab,
414    /// Maximum number of free slabs.
415    max_free_slabs   = Max_free,
416  };
417
418  /**
419   * Allocate an object.
420   *
421   * \note The user is responsible for initializing the object.
422   */
423  void *alloc() throw() { return _a.alloc(); }
424
425  /**
426   * Free the given object (`p`).
427   *
428   * \param p  The pointer to the object to free.
429   * \pre `p` must be a pointer to an object allocated by this allocator.
430   */
431  void free(void *p) throw() { _a.free(p); }
432
433  /**
434   * Get the total number of objects managed by the slab allocator.
435   *
436   * \return The number of objects managed by the allocator (including the
437   *         free objects).
438   * \note The value is the merged value for all equal parameterized
439   *       Base_slab_static instances.
440   */
441  unsigned total_objects() const throw() { return _a.total_objects(); }
442
443  /**
444   * Get the number of free objects in the slab allocator.
445   *
446   * \return The number of free objects in all free and partially used
447   *         slabs managed by this allocator.
448   * \note The value is the merged value for all equal parameterized
449   *       Base_slab_static instances.
450   */
451  unsigned free_objects() const throw() { return _a.free_objects(); }
452};
453
454
455template< int _O, int _S, int _M, template<typename A> class Alloc >
456typename Base_slab_static<_O,_S,_M,Alloc>::_A
457  Base_slab_static<_O,_S,_M,Alloc>::_a;
458
459/**
460 * \ingroup cxx_api
461 * Merged slab allocator (allocators for objects of the same size are merged
462 * together).
463 *
464 * \tparam Type       The type of the objects to manage.
465 * \tparam Slab_size  The size of a slab.
466 * \tparam Max_free   The maximum number of free slabs.
467 * \tparam Alloc      The allocator for the slabs.
468 *
469 * This slab allocator class is useful for merging slab allocators with the
470 * same parameters (equal `sizeof(Type)`, `Slab_size`, `Max_free`, and
471 * `Alloc` parameters) together and share the overhead for the slab caches
472 * among all equal-sized objects.
473 */
474template<typename Type, int Slab_size = L4_PAGESIZE,
475  int Max_free = 2, template<typename A> class Alloc = New_allocator >
476class Slab_static
477: public Base_slab_static<sizeof(Type), Slab_size, Max_free, Alloc>
478{
479public:
480
481  typedef Type Obj_type;
482  /**
483   * Allocate an object of type `Type`.
484   *
485   * \return A pointer to the just allocated object, or 0 on failure.
486   *
487   * \note The object is not zeroed out by the slab allocator.
488   */
489  Type *alloc() throw()
490  {
491    return reinterpret_cast<Type *>(
492      Base_slab_static<sizeof(Type), Slab_size, Max_free, Alloc>::alloc());
493  }
494};
495
496}
497